diff --git a/src/components/overrides/PageTitle.astro b/src/components/overrides/PageTitle.astro
index 2cef29cdeb4aba..60ffa467065270 100644
--- a/src/components/overrides/PageTitle.astro
+++ b/src/components/overrides/PageTitle.astro
@@ -16,7 +16,6 @@ import { getEntry } from "astro:content";
const spotlightDetails = Astro.props.entry.data.spotlight;
const updated = Astro.props.entry.data.updated;
-const badge = Astro.props.entry.data.sidebar?.badge;
const summary = Astro.props.entry.data.summary;
const slug = Astro.props.entry.slug;
@@ -97,8 +96,6 @@ const hideBreadcrumbs = Astro.props.hideBreadcrumbs;
{!hideTitle && }
-{badge && }
-
{updated && }
{
diff --git a/src/components/overrides/Sidebar.astro b/src/components/overrides/Sidebar.astro
index c94f3bfae50d31..8141e7b96dc8a4 100644
--- a/src/components/overrides/Sidebar.astro
+++ b/src/components/overrides/Sidebar.astro
@@ -31,6 +31,28 @@ interface Group {
order: number;
}
+function inferBadgeVariant(badge: ComponentProps) {
+ if (badge.variant === "default") {
+ switch (badge.text) {
+ case "Beta": {
+ badge.variant = "caution";
+ break;
+ }
+ case "New": {
+ badge.variant = "note";
+ break;
+ }
+ case "Deprecated":
+ case "Legacy": {
+ badge.variant = "danger";
+ break;
+ }
+ }
+ }
+
+ return badge;
+}
+
const currentSection = slug?.split("/")[0];
let filtered = sidebar.filter(
@@ -78,7 +100,10 @@ async function handleGroup(group: Group): Promise {
group.label = frontmatter.sidebar.group?.label ?? frontmatter.title;
group.order = frontmatter.sidebar.order ?? Number.MAX_VALUE;
- group.badge = frontmatter.sidebar.group?.badge;
+
+ if (frontmatter.sidebar.group?.badge) {
+ group.badge = inferBadgeVariant(frontmatter.sidebar.group?.badge);
+ }
if (frontmatter.hideChildren) {
return {
@@ -129,6 +154,10 @@ async function handleLink(link: Link): Promise {
const frontmatter = page.data;
link.order = frontmatter.sidebar.order ?? Number.MAX_VALUE;
+ if (link.badge) {
+ link.badge = inferBadgeVariant(link.badge);
+ }
+
if (slug === currentSection) {
link.order = 0;
}
@@ -141,6 +170,7 @@ async function handleLink(link: Link): Promise {
badge: frontmatter.external_link.startsWith("/api")
? {
text: "API",
+ variant: "note",
}
: undefined,
};
diff --git a/src/content/changelogs-next/2024-12-11-hyperdrive-caching-at-edge.mdx b/src/content/changelogs-next/2024-12-11-hyperdrive-caching-at-edge.mdx
new file mode 100644
index 00000000000000..6cc85e45a894e8
--- /dev/null
+++ b/src/content/changelogs-next/2024-12-11-hyperdrive-caching-at-edge.mdx
@@ -0,0 +1,21 @@
+---
+title: Up to 10x faster cached queries for Hyperdrive
+description: Hyperdrive now caches queries in all Cloudflare locations decreasing cache hit latency by up to 90%
+products:
+ - hyperdrive
+date: 2024-12-11T18:00:00Z
+---
+
+Hyperdrive now caches queries in all Cloudflare locations, decreasing cache hit latency by up to 90%.
+
+When you make a query to your database and Hyperdrive has cached the query results, Hyperdrive will now return the results from the nearest cache. By caching data closer to your users, the latency for cache hits reduces by up to 90%.
+
+This reduction in cache hit latency is reflected in a reduction of the session duration for all queries (cached and uncached) from Cloudflare Workers to Hyperdrive, as illustrated below.
+
+
+
+_P50, P75, and P90 Hyperdrive session latency for all client connection sessions (both cached and uncached queries) for Hyperdrive configurations with caching enabled during the rollout period._
+
+This performance improvement is applied to all new and existing Hyperdrive configurations that have caching enabled.
+
+For more details on how Hyperdrive performs query caching, refer to the [Hyperdrive documentation](/hyperdrive/configuration/how-hyperdrive-works/#query-caching).
diff --git a/src/content/changelogs-next/2025-01-15-workflows-more-steps.mdx b/src/content/changelogs-next/2025-01-15-workflows-more-steps.mdx
new file mode 100644
index 00000000000000..dbdac3b8d98c06
--- /dev/null
+++ b/src/content/changelogs-next/2025-01-15-workflows-more-steps.mdx
@@ -0,0 +1,17 @@
+---
+title: Workflows
+description: More steps per Workflow + pause/resume now supported
+products:
+ - workflows
+date: 2025-01-15
+---
+
+[Workflows](/workflows/) (beta) now allows you to define up to 1024 [steps](/workflows/build/workers-api/#workflowstep). `sleep` steps do not count against this limit.
+
+We've also added:
+
+* `instanceId` as property to the [`WorkflowEvent`](/workflows/build/workers-api/#workflowevent) type, allowing you to retrieve the current instance ID from within a running Workflow instance
+* Improved queueing logic for Workflow instances beyond the current maximum concurrent instances, reducing the cases where instances are stuck in the queued state.
+* Support for [`pause` and `resume`](/workflows/build/workers-api/#pause) for Workflow instances in a queued state.
+
+We're continuing to work on increases to the number of concurrent Workflow instances, steps, and support for a new `waitForEvent` API over the coming weeks.
diff --git a/src/content/changelogs-next/2025-01-27-kv-increased-namespaces-limits.mdx b/src/content/changelogs-next/2025-01-27-kv-increased-namespaces-limits.mdx
new file mode 100644
index 00000000000000..c18885b2c68b2b
--- /dev/null
+++ b/src/content/changelogs-next/2025-01-27-kv-increased-namespaces-limits.mdx
@@ -0,0 +1,13 @@
+---
+title: Workers KV namespace limits increased to 1000
+description: You can now have up to 1000 Workers KV namespaces per account.
+products:
+ - kv
+date: 2025-01-28T14:00:00Z
+---
+
+You can now have up to 1000 Workers KV namespaces per account.
+
+Workers KV namespace limits were increased from 200 to 1000 for all accounts. Higher limits for Workers KV namespaces enable better organization of key-value data, such as by category, tenant, or environment.
+
+Consult the [Workers KV limits documentation](/kv/platform/limits/) for the rest of the limits. This increased limit is available for both the Free and Paid [Workers plans](/workers/platform/pricing/).
diff --git a/src/content/changelogs-next/2025-01-28-hyperdrive-automated-private-database-configuration.mdx b/src/content/changelogs-next/2025-01-28-hyperdrive-automated-private-database-configuration.mdx
new file mode 100644
index 00000000000000..0c54cf613b7820
--- /dev/null
+++ b/src/content/changelogs-next/2025-01-28-hyperdrive-automated-private-database-configuration.mdx
@@ -0,0 +1,17 @@
+---
+title: Automatic configuration for private databases on Hyperdrive
+description: Hyperdrive now automatically configures your Cloudflare Tunnel to connect to your private database.
+products:
+ - hyperdrive
+date: 2025-01-28T18:00:00Z
+---
+
+Hyperdrive now automatically configures your Cloudflare Tunnel to connect to your private database.
+
+
+
+When creating a Hyperdrive configuration for a private database, you only need to provide your database credentials and set up a Cloudflare Tunnel within the private network where your database is accessible. Hyperdrive will automatically create the Cloudflare Access, Service Token, and Policies needed to secure and restrict your Cloudflare Tunnel to the Hyperdrive configuration.
+
+To create a Hyperdrive for a private database, you can follow the [Hyperdrive documentation](/hyperdrive/configuration/connect-to-private-database/). You can still manually create the Cloudflare Access, Service Token, and Policies if you prefer.
+
+This feature is available from the Cloudflare dashboard.
diff --git a/src/content/changelogs-next/2025-01-28-nodejs-compat-improvements.mdx b/src/content/changelogs-next/2025-01-28-nodejs-compat-improvements.mdx
new file mode 100644
index 00000000000000..6d35bd073a7cb9
--- /dev/null
+++ b/src/content/changelogs-next/2025-01-28-nodejs-compat-improvements.mdx
@@ -0,0 +1,86 @@
+---
+title: Support for Node.js DNS, Net, and Timer APIs in Workers
+description: Node.js APIs from the node:dns, node:net, and node:timers modules are now available when using nodejs_compat.
+products:
+ - workers
+date: 2025-01-28T13:00:00Z
+---
+
+import { Render, PackageManagers, TypeScriptExample } from "~/components";
+
+When using a Worker with the [`nodejs_compat`](/workers/runtime-apis/nodejs/) compatibility flag enabled, you can now use the following Node.js APIs:
+
+- [`node:net`](/workers/runtime-apis/nodejs/net/)
+- [`node:dns`](/workers/runtime-apis/nodejs/dns/)
+- [`node:timers`](/workers/runtime-apis/nodejs/timers/)
+
+#### node:net
+
+You can use [`node:net`](https://nodejs.org/api/net.html) to create a direct connection to servers via a TCP sockets
+with [`net.Socket`](https://nodejs.org/api/net.html#class-netsocket).
+
+
+```ts
+import net from "node:net";
+
+const exampleIP = "127.0.0.1";
+
+export default {
+ async fetch(req): Promise {
+ const socket = new net.Socket();
+ socket.connect(4000, exampleIP, function () {
+ console.log("Connected");
+ });
+
+ socket.write("Hello, Server!");
+ socket.end();
+
+ return new Response("Wrote to server", { status: 200 });
+ },
+} satisfies ExportedHandler;
+````
+
+
+Additionally, you can now use other APIs incliding [`net.BlockList`](https://nodejs.org/api/net.html#class-netblocklist) and
+[`net.SocketAddress`](https://nodejs.org/api/net.html#class-netsocketaddress).
+
+Note that [`net.Server`](https://nodejs.org/api/net.html#class-netserver) is not supported.
+
+#### node:dns
+
+You can use [`node:dns`](https://nodejs.org/api/dns.html) for name resolution via [DNS over HTTPS](/1.1.1.1/encryption/dns-over-https/) using
+[Cloudflare DNS](https://www.cloudflare.com/application-services/products/dns/) at 1.1.1.1.
+
+
+```ts
+import dns from 'node:dns';
+
+let responese = await dns.promises.resolve4('cloudflare.com', 'NS');
+````
+
+
+
+All `node:dns` functions are available, except `lookup`, `lookupService`, and `resolve` which throw "Not implemented" errors when called.
+
+#### node:timers
+
+You can use [`node:timers`](https://nodejs.org/api/timers.html) to schedule functions to be called at some future period of time.
+
+This includes [`setTimeout`](https://nodejs.org/api/timers.html#settimeoutcallback-delay-args) for calling a function after a delay,
+[`setInterval`](https://nodejs.org/api/timers.html#setintervalcallback-delay-args) for calling a function repeatedly,
+and [`setImmediate`](https://nodejs.org/api/timers.html#setimmediatecallback-args) for calling a function in the next iteration of the event loop.
+
+
+```ts
+import timers from "node:timers";
+
+console.log("first");
+timers.setTimeout(() => {
+ console.log("last");
+}, 10);
+
+timers.setTimeout(() => {
+ console.log("next");
+});
+```
+
diff --git a/src/content/changelogs/ai-gateway.yaml b/src/content/changelogs/ai-gateway.yaml
index 57ee466815836d..938312e4a49c74 100644
--- a/src/content/changelogs/ai-gateway.yaml
+++ b/src/content/changelogs/ai-gateway.yaml
@@ -5,11 +5,15 @@ productLink: "/ai-gateway/"
productArea: Developer platform
productAreaLink: /workers/platform/changelog/platform/
entries:
+ - publish_date: "2025-01-23"
+ title: Request timeouts for Universal gateways and fallback providers
+ description: |-
+ * Added [request timeouts](/ai-gateway/configuration/fallbacks/#request-timeouts) as a configuration option for fallback providers. This property triggers a fallback proivder based on a predetermined response time (measured in milliseconds).
- publish_date: "2025-01-02"
title: DeepSeek
description: |-
* **Configuration**: Added [DeepSeek](/ai-gateway/providers/deepseek/) as a new provider.
-
+
- publish_date: "2024-12-17"
title: AI Gateway Dashboard
description: |-
diff --git a/src/content/changelogs/dlp.yaml b/src/content/changelogs/dlp.yaml
index bfb3dea1ccbba7..5c227bb3c75617 100644
--- a/src/content/changelogs/dlp.yaml
+++ b/src/content/changelogs/dlp.yaml
@@ -9,6 +9,10 @@ entries:
title: Source code confidence levels
description: |-
DLP now supports setting a confidence level for [source code profiles](/cloudflare-one/policies/data-loss-prevention/dlp-profiles/predefined-profiles/#source-code).
+ - publish_date: "2025-01-15"
+ title: Payload log match visibility
+ description: |-
+ When viewing decrypted payload log matches, DLP now provides more context by listing multiple DLP matches and the matching DLP profile.
- publish_date: "2024-11-25"
title: Profile confidence levels
description: |-
diff --git a/src/content/changelogs/dns.yaml b/src/content/changelogs/dns.yaml
index 5965fb8f7efd69..234a07eef9b52a 100644
--- a/src/content/changelogs/dns.yaml
+++ b/src/content/changelogs/dns.yaml
@@ -5,6 +5,12 @@ productLink: "/dns/"
productArea: Core platform
productAreaLink: /fundamentals/reference/changelog/performance/
entries:
+ - publish_date: "2025-01-27"
+ title: Zone IDs and names on individual DNS records
+ description: |-
+ Records returned by the API will no longer contain the `zone_id` and `zone_name` fields.
+ This change may take up to four weeks to fully roll out.
+ The affected fields were deprecated with an End of Life (EOL) date of November 30, 2024.
- publish_date: "2024-10-15"
title: Quote validation for TXT records added via dashboard
description: |-
diff --git a/src/content/changelogs/hyperdrive.yaml b/src/content/changelogs/hyperdrive.yaml
index 5c097c65fc298c..8d1bf39089e162 100644
--- a/src/content/changelogs/hyperdrive.yaml
+++ b/src/content/changelogs/hyperdrive.yaml
@@ -5,6 +5,14 @@ productLink: "/hyperdrive/"
productArea: Developer platform
productAreaLink: /workers/platform/changelog/platform/
entries:
+ - publish_date: "2025-01-28"
+ title: Hyperdrive automatically configures your Cloudflare Tunnel to connect to your private database.
+ description: |-
+ When creating a Hyperdrive configuration for a private database, you only need to provide your database credentials and set up a Cloudflare Tunnel within the private network where your database is accessible.
+
+ Hyperdrive will automatically create the Cloudflare Access, Service Token and Policies needed to secure and restrict your Cloudflare Tunnel to the Hyperdrive configuration.
+
+ Refer to [documentation on how to configure Hyperdrive to connect to a private database](/hyperdrive/configuration/connect-to-private-database/).
- publish_date: "2024-12-11"
title: Hyperdrive now caches queries in all Cloudflare locations decreasing cache hit latency by up to 90%
description: |-
diff --git a/src/content/changelogs/load-balancing.yaml b/src/content/changelogs/load-balancing.yaml
index c50774a9295e19..d45a2b7ba22ba0 100644
--- a/src/content/changelogs/load-balancing.yaml
+++ b/src/content/changelogs/load-balancing.yaml
@@ -5,6 +5,10 @@ productLink: "/load-balancing/"
productArea: Application performance
productAreaLink: /fundamentals/reference/changelog/performance/
entries:
+ - publish_date: "2025-01-24"
+ title: Update to Cloudflare Tunnel Steering
+ description: |-
+ Introduced changes to the resolution of proxied domains that are backed by Cloudflare Tunnels on the same zone. These changes correct how orange-clouded records are steered to Cloudflare Tunnels via Cloudflare Load Balancers.
- publish_date: "2025-01-16"
title: Update to Pool Health Monitoring
description: |-
diff --git a/src/content/changelogs/queues.yaml b/src/content/changelogs/queues.yaml
index 4af0d1031004bb..825169372613d8 100644
--- a/src/content/changelogs/queues.yaml
+++ b/src/content/changelogs/queues.yaml
@@ -64,7 +64,7 @@ entries:
description: |-
Queue consumers will soon automatically scale up concurrently as a queues' backlog grows in order to keep overall message processing latency down. Concurrency will be enabled on all existing queues by 2023-03-28.
- **To opt-out, or to configure a fixed maximum concurrency**, set `max_concurrency = 1` in your `wrangler.toml` file or via [the queues dashboard](https://dash.cloudflare.com/?to=/:account/queues).
+ **To opt-out, or to configure a fixed maximum concurrency**, set `max_concurrency = 1` in your `wrangler.toml / wrangler.json` file or via [the queues dashboard](https://dash.cloudflare.com/?to=/:account/queues).
**To opt-in, you do not need to take any action**: your consumer will begin to scale out as needed to keep up with your message backlog. It will scale back down as the backlog shrinks, and/or if a consumer starts to generate a higher rate of errors. To learn more about how consumers scale, refer to the [consumer concurrency](/queues/configuration/consumer-concurrency/) documentation.
- publish_date: "2023-03-02"
diff --git a/src/content/changelogs/rules.yaml b/src/content/changelogs/rules.yaml
index 8f4cd9686b7b24..1b55e3f79a5100 100644
--- a/src/content/changelogs/rules.yaml
+++ b/src/content/changelogs/rules.yaml
@@ -5,6 +5,10 @@ productLink: "/rules/"
productArea: Application performance
productAreaLink: /fundamentals/reference/changelog/performance/
entries:
+ - publish_date: "2025-01-29"
+ title: New Snippets code editor
+ description: |-
+ The new Snippets code editor is now live, allowing users to edit both Snippets code and rules from a single page. This update simplifies the workflow and introduces features such as code formatting, refactoring, and auto-complete. The updated editor is available within the Snippets interface.
- publish_date: "2025-01-09"
title: New Rules Overview page
description: |-
diff --git a/src/content/docs/1.1.1.1/faq.mdx b/src/content/docs/1.1.1.1/faq.mdx
index 34310c8cf34370..57569899c5f734 100644
--- a/src/content/docs/1.1.1.1/faq.mdx
+++ b/src/content/docs/1.1.1.1/faq.mdx
@@ -3,7 +3,7 @@ pcx_content_type: faq
title: FAQ
structured_data: true
sidebar:
- order: 8
+ order: 12
slug: 1.1.1.1/faq
---
diff --git a/src/content/docs/1.1.1.1/setup/index.mdx b/src/content/docs/1.1.1.1/setup/index.mdx
index cd0590d5144bfe..9b50e91e5ac0b7 100644
--- a/src/content/docs/1.1.1.1/setup/index.mdx
+++ b/src/content/docs/1.1.1.1/setup/index.mdx
@@ -8,6 +8,7 @@ head:
- tag: title
content: Set up Cloudflare 1.1.1.1 resolver
slug: 1.1.1.1/setup
+description: Learn how to set up Cloudflare's 1.1.1.1 DNS resolver for enhanced security and privacy. Protect against malware and adult content with easy configuration.
---
diff --git a/src/content/docs/1.1.1.1/terms-of-use.mdx b/src/content/docs/1.1.1.1/terms-of-use.mdx
index 070c4e9c2a07d4..2a9687a7bff29f 100644
--- a/src/content/docs/1.1.1.1/terms-of-use.mdx
+++ b/src/content/docs/1.1.1.1/terms-of-use.mdx
@@ -2,7 +2,7 @@
pcx_content_type: reference
title: Terms of use
sidebar:
- order: 7
+ order: 10
slug: 1.1.1.1/terms-of-use
---
diff --git a/src/content/docs/1.1.1.1/setup/reporting-issues.mdx b/src/content/docs/1.1.1.1/troubleshooting.mdx
similarity index 89%
rename from src/content/docs/1.1.1.1/setup/reporting-issues.mdx
rename to src/content/docs/1.1.1.1/troubleshooting.mdx
index 2499955ad0b1bf..34405246ec0db1 100644
--- a/src/content/docs/1.1.1.1/setup/reporting-issues.mdx
+++ b/src/content/docs/1.1.1.1/troubleshooting.mdx
@@ -1,18 +1,19 @@
---
pcx_content_type: troubleshooting
-title: Reporting Issues with Cloudflare's DNS Resolver
+title: Troubleshooting
+description: Learn how to diagnose and report issues with Cloudflare's DNS Resolver
sidebar:
- order: 3
+ order: 8
head:
- tag: title
- content: Reporting Issues with Cloudflare's DNS Resolver
+ content: Troubleshooting DNS Resolver
slug: 1.1.1.1/troubleshooting
---
import { Render } from "~/components"
-This guide will help you diagnose and resolve common issues with Cloudflare's DNS Resolver. Before proceeding with manual troubleshooting steps, you can use our [diagnostic tool](https://one.one.one.one/help/) to automatically gather relevant information.
+This guide will help you diagnose and resolve common issues with Cloudflare's DNS Resolver. Before proceeding with manual troubleshooting steps, you can [verify your connection](/1.1.1.1/check/) to automatically gather relevant information.
## Name resolution issues
@@ -136,4 +137,5 @@ If your traceroute fails at the first hop, the issue is likely hardware-related.
## Additional resources
- [1.1.1.1 DNS Resolver homepage](https://1.1.1.1)
-- [DNS-over-TLS documentation](/1.1.1.1/encryption/dns-over-tls/)
+- [DNS over TLS documentation](/1.1.1.1/encryption/dns-over-tls/)
+- [Diagnostic tool](https://one.one.one.one/help/)
diff --git a/src/content/docs/agents/capabilities/control-web-browsers.mdx b/src/content/docs/agents/capabilities/control-web-browsers.mdx
new file mode 100644
index 00000000000000..08618a97c854a2
--- /dev/null
+++ b/src/content/docs/agents/capabilities/control-web-browsers.mdx
@@ -0,0 +1,10 @@
+---
+pcx_content_type: navigation
+title: Control Web Browsers (Browser Rendering API)
+external_link: /browser-rendering/
+sidebar:
+ order: 1
+head: []
+description: The Workers Browser Rendering API allows developers to programmatically control and interact with a headless browser instance and create automation flows for their applications and products.
+
+---
diff --git a/src/content/docs/agents/capabilities/index.mdx b/src/content/docs/agents/capabilities/index.mdx
new file mode 100644
index 00000000000000..dfcca68ba21de9
--- /dev/null
+++ b/src/content/docs/agents/capabilities/index.mdx
@@ -0,0 +1,14 @@
+---
+pcx_content_type: reference
+title: Capabilities
+sidebar:
+ order: 2
+ group:
+ hideIndex: true
+---
+
+import { DirectoryListing } from "~/components";
+
+Capabilities
+
+
diff --git a/src/content/docs/agents/capabilities/run-models.mdx b/src/content/docs/agents/capabilities/run-models.mdx
new file mode 100644
index 00000000000000..86a14253478d37
--- /dev/null
+++ b/src/content/docs/agents/capabilities/run-models.mdx
@@ -0,0 +1,9 @@
+---
+pcx_content_type: navigation
+title: Run models (Workers AI)
+external_link: /workers-ai/
+sidebar:
+ order: 2
+head: []
+description: Run popular open-source AI models on Cloudflare's network.
+---
\ No newline at end of file
diff --git a/src/content/docs/agents/capabilities/send-email.mdx b/src/content/docs/agents/capabilities/send-email.mdx
new file mode 100644
index 00000000000000..b23feac138d75a
--- /dev/null
+++ b/src/content/docs/agents/capabilities/send-email.mdx
@@ -0,0 +1,11 @@
+---
+pcx_content_type: navigation
+title: Send Email
+external_link: /email-routing/email-workers/send-email-workers/
+sidebar:
+ order: 2
+head: []
+description: Send emails from your Worker for async updates to a user.
+---
+
+
diff --git a/src/content/docs/agents/capabilities/webrtc-realtime.mdx b/src/content/docs/agents/capabilities/webrtc-realtime.mdx
new file mode 100644
index 00000000000000..3acddf48f2e141
--- /dev/null
+++ b/src/content/docs/agents/capabilities/webrtc-realtime.mdx
@@ -0,0 +1,9 @@
+---
+pcx_content_type: navigation
+title: Realtime voice (WebRTC)
+external_link: /calls/
+sidebar:
+ order: 4
+head: []
+description: Build real-time serverless video, audio and data applications.
+---
\ No newline at end of file
diff --git a/src/content/docs/agents/index.mdx b/src/content/docs/agents/index.mdx
new file mode 100644
index 00000000000000..c396697639ca9f
--- /dev/null
+++ b/src/content/docs/agents/index.mdx
@@ -0,0 +1,529 @@
+---
+title: Build agents on Cloudflare
+type: overview
+pcx_content_type: overview
+sidebar:
+ order: 1
+head:
+ - tag: title
+ content: Agents
+---
+
+import {
+ CardGrid,
+ Description,
+ Feature,
+ LinkButton,
+ LinkTitleCard,
+ Plan,
+ RelatedProduct,
+ Render,
+ TabItem,
+ Tabs,
+} from "~/components";
+
+Build AI-powered agents that can autonomously perform tasks, persist state, browse the web, and communicate back to users in real-time over any channel.
+
+- **Non I/O bound pricing:** don't pay for long-running processes when your code is not executing. Cloudflare Workers is designed to scale down and [only charge you for CPU time](https://blog.cloudflare.com/workers-pricing-scale-to-zero/), as opposed to wall-clock time.
+- **Designed for durable execution:** [Durable Objects](/durable-objects/) and [Workflows](/workflows) are built for a programming model that enables guaranteed execution for async tasks like long-running deep thinking LLM calls, human-in-the-loop, or unreliable API calls.
+- **Scalable, and reliable, without compromising on performance:** by running on Cloudflare's network, agents can execute tasks close to the user without introducing latency for real-time experiences.
+
+## Start building
+
+
+
+
+Build agents that can execute complex tasks, progressively save state, and call out to _any_ third party API they need to using [Workflows](/workflows/). Send emails or [text messages](/workflows/examples/twilio/), [browse the web](/browser-rendering/), process and summarize documents, and/or query your database.
+
+```sh
+npm create cloudflare@latest workflows-starter -- --template "cloudflare/workflows-starter"
+cd workflows-starter
+npm i resend
+```
+
+```ts collapse={30-1000}
+import { WorkflowEntrypoint, WorkflowStep, WorkflowEvent } from 'cloudflare:workers';
+import { Resend } from 'resend';
+
+type Env = {
+ MY_WORKFLOW: Workflow;
+ RESEND_API_KEY: string;
+};
+
+type Params = {
+ email: string;
+ metadata: Record;
+};
+
+export class MyWorkflow extends WorkflowEntrypoint {
+ async run(event: WorkflowEvent, step: WorkflowStep) {
+
+ const files = await step.do('my first step', async () => {
+ // Fetch a list of files from $SOME_SERVICE
+ return {
+ files: [
+ 'doc_7392_rev3.pdf',
+ 'report_x29_final.pdf',
+ 'memo_2024_05_12.pdf',
+ 'file_089_update.pdf',
+ 'proj_alpha_v2.pdf',
+ 'data_analysis_q2.pdf',
+ 'notes_meeting_52.pdf',
+ 'summary_fy24_draft.pdf',
+ ],
+ };
+ });
+
+ const summaries = await step.do('summarize text', async () => {
+ const results = {};
+ for (const filename of files.files) {
+ const fileContent = await this.env.MY_BUCKET.get(filename);
+ if (!fileContent) continue;
+
+ const text = await fileContent.text();
+ const summary = await this.env.WORKERS_AI.run('@cf/meta/llama-3.2-3b-instruct', {
+ messages: [{
+ role: 'user',
+ content: `Please summarize the following text concisely: ${text}`
+ }]
+ });
+ results[filename] = summary.response;
+ }
+ return results;
+ });
+
+ await step.sleep('wait on something', '1 minute');
+
+ let summaryKey = await step.do(
+ 'store summaries in R2',
+ async () => {
+ const summaryKey = `summaries-${Date.now()}.json`;
+ await this.env.MY_BUCKET.put(summaryKey, JSON.stringify(summaries));
+ return summaryKey;
+ },
+ );
+
+ await step.do(
+ 'email summaries',
+ {
+ retries: {
+ limit: 3,
+ delay: '5 second',
+ backoff: 'exponential',
+ }
+ },
+ async () => {
+ const summaryText = Object.entries(summaries)
+ .map(([filename, summary]) => `${filename}:\n${summary}\n\n`)
+ .join('');
+
+ const resend = new Resend(this.env.RESEND_API_KEY);
+
+ await resend.emails.send({
+ from: 'notifications@yourdomain.com',
+ to: event.payload.email,
+ subject: 'Your Document Summaries',
+ text: summaryText,
+ });
+ }
+ );
+
+ return summaryKey;
+ }
+}
+
+export default {
+ async fetch(req: Request, env: Env): Promise {
+ let id = new URL(req.url).searchParams.get('instanceId');
+
+ if (id) {
+ let instance = await env.MY_WORKFLOW.get(id);
+ return Response.json({
+ status: await instance.status(),
+ });
+ }
+
+ let instance = await env.MY_WORKFLOW.create();
+ return Response.json({
+ id: instance.id,
+ details: await instance.status(),
+ });
+ },
+};
+```
+
+
+
+
+Use [Durable Objects](/durable-objects/) — stateful, serverless, long-running micro-servers — to ship interactive, real-time agents that can connect to the latest AI models.
+
+Stream responses over [WebSockets](/durable-objects/best-practices/websockets/), and don't time out while waiting for the latest chain-of-thought models — including `o1` or `deepseek-r1` — to respond.
+
+```ts
+npm i openai
+```
+
+```ts collapse={30-1000}
+import { DurableObject } from "cloudflare:workers";
+
+export interface Env {
+ DURABLE_AGENT: DurableObjectNamespace;
+ OPENAI_API_KEY: string;
+}
+
+export default {
+ async fetch(request: Request, env: Env, ctx: ExecutionContext) {
+ if (request.url.endsWith("/agent/chat")) {
+ const upgradeHeader = request.headers.get("Upgrade");
+ if (!upgradeHeader || upgradeHeader !== "websocket") {
+ return Response.json(
+ { error: "Durable Object expected Upgrade: websocket" },
+ { status: 426 }
+ );
+ }
+
+ const url = new URL(request.url);
+ const agentId = url.searchParams.get("id") || (await crypto.randomUUID());
+
+ let id = env.DURABLE_AGENT.idFromName(agentId);
+ let agent = env.DURABLE_AGENT.get(id);
+
+ return agent.fetch(request);
+ }
+
+ return Response.json({ message: "Bad Request" }, { status: 400 });
+ },
+};
+
+export class DurableAgent extends DurableObject {
+ constructor(private state: DurableObjectState, private env: Env) {
+ super();
+ }
+
+ async fetch(request: Request): Promise {
+ const webSocketPair = new WebSocketPair();
+ const [client, server] = Object.values(webSocketPair);
+
+ this.ctx.acceptWebSocket(server);
+
+ return new Response(null, {
+ status: 101,
+ webSocket: client,
+ });
+ }
+
+ async webSocketMessage(ws: WebSocket, message: ArrayBuffer | string) {
+ try {
+ const openai = new OpenAI({
+ apiKey: this.env.OPENAI_API_KEY,
+ timeout: 10 * 60 * 1000, // Don't let it think TOO long.
+ });
+
+ // Stream the response to immediately start sending chunks to the client,
+ // rather than buffering the entire response and making the user wait
+ const stream = await openai.chat.completions.create({
+ model: "o1",
+ messages: [{ role: "user", content: message.toString() }],
+ stream: true,
+ });
+
+ for await (const chunk of stream) {
+ const content = chunk.choices[0]?.delta?.content;
+ if (content) {
+ ws.send(content);
+ }
+ }
+ } catch (error) {
+ ws.send(
+ JSON.stringify({
+ error: "OpenAI request failed",
+ message: error.message,
+ })
+ );
+ }
+ }
+
+ async webSocketClose(ws: WebSocket, code: number, reason: string, wasClean: boolean) {
+ ws.close(code, "Durable Object is closing WebSocket");
+ }
+}
+```
+
+
+
+
+Use the [Browser Rendering API](/browser-rendering/) to allow your agents to search the web, take screenshots, and directly interact with websites.
+
+```sh
+npm install @cloudflare/puppeteer --save-dev
+```
+
+```ts collapse={30-1000}
+import puppeteer from "@cloudflare/puppeteer";
+
+interface Env {
+ MYBROWSER: Fetcher;
+ BROWSER_KV_DEMO: KVNamespace;
+}
+
+export default {
+ async fetch(request: Request, env: Env): Promise {
+ const { searchParams } = new URL(request.url);
+ const url = searchParams.get("url");
+
+ if (!url) {
+ return new Response("Please add an ?url=https://example.com/ parameter");
+ }
+
+ const normalizedUrl = new URL(url).toString();
+ let img = await env.BROWSER_KV_DEMO.get(normalizedUrl, { type: "arrayBuffer" });
+
+ if (img === null) {
+ const browser = await puppeteer.launch(env.MYBROWSER);
+ const page = await browser.newPage();
+ await page.goto(normalizedUrl);
+ img = await page.screenshot() as Buffer;
+
+ await env.BROWSER_KV_DEMO.put(normalizedUrl, img, {
+ expirationTtl: 60 * 60 * 24, // 24 hours
+ });
+
+ await browser.close();
+ }
+
+ return new Response(img, {
+ headers: {
+ "content-type": "image/jpeg",
+ },
+ });
+ },
+};
+```
+
+
+
+
+Use [AI Gateway](/ai-gateway/) to cache, log, retry and run [evals](/ai-gateway/evaluations/) (evaluations) for your agents, no matter where they're deployed.
+
+```py collapse={30-1000}
+from anthropic import Anthropic
+
+anthropic = Anthropic(
+ api_key="",
+ # Route, cache, fallback and log prompt-response pairs between your app
+ # and your AI model provider.
+ base_url="https://gateway.ai.cloudflare.com/v1/${accountId}/${gatewayId}/anthropic"
+)
+
+message = anthropic.messages.create(
+ model="claude-3-opus-20240229",
+ max_tokens=1000,
+ messages=[{
+ "role": "user",
+ "content": "Generate a Cloudflare Worker that returns a simple JSON payload based on a query param",
+ }]
+)
+
+print(message.content)
+```
+
+
+
+
+## Use your favorite AI framework
+
+Build agents using your favorite AI frameworks, and deploy it directly to [Cloudflare Workers](/workers/).
+
+
+
+
+Use [LangChain](https://js.langchain.com/docs/integrations/text_embedding/cloudflare_ai/) to build Retrieval-Augmented Generation (RAG) applications using [Workers AI](/workers-ai/) and [Vectorize](/vectorize/).
+
+Give your agents more context and the ability to search across content, reply to user queries, and expand their domain knowledge.
+
+```sh
+npm i @langchain/cloudflare hono
+```
+
+```ts collapse={30-1000}
+import {
+ CloudflareVectorizeStore,
+ CloudflareWorkersAIEmbeddings
+} from "@langchain/cloudflare";
+import { VectorizeIndex } from "@cloudflare/workers-types";
+import { Ai } from "@cloudflare/ai";
+import { Hono } from "hono";
+
+export interface Env {
+ VECTORIZE_INDEX: VectorizeIndex;
+ AI: Ai;
+}
+
+const app = new Hono<{ Bindings: Env }>();
+
+app.get("/", async (c) => {
+ const embeddings = new CloudflareWorkersAIEmbeddings({
+ binding: c.env.AI,
+ model: "@cf/baai/bge-small-en-v1.5",
+ });
+
+ const store = new CloudflareVectorizeStore(embeddings, {
+ index: c.env.VECTORIZE_INDEX,
+ });
+
+ const results = await store.similaritySearch("hello", 5);
+ return c.json(results);
+});
+
+app.post("/load", async (c) => {
+ const embeddings = new CloudflareWorkersAIEmbeddings({
+ binding: c.env.AI,
+ model: "@cf/baai/bge-small-en-v1.5",
+ });
+
+ const store = new CloudflareVectorizeStore(embeddings, {
+ index: c.env.VECTORIZE_INDEX,
+ });
+
+ const documents = [
+ { pageContent: "hello", metadata: {} },
+ { pageContent: "world", metadata: {} },
+ { pageContent: "hi", metadata: {} }
+ ];
+
+ await store.addDocuments(documents, {
+ ids: ["id1", "id2", "id3"]
+ });
+
+ return c.json({ success: true });
+});
+
+app.delete("/clear", async (c) => {
+ const embeddings = new CloudflareWorkersAIEmbeddings({
+ binding: c.env.AI,
+ model: "@cf/baai/bge-small-en-v1.5",
+ });
+
+ const store = new CloudflareVectorizeStore(embeddings, {
+ index: c.env.VECTORIZE_INDEX,
+ });
+
+ await store.delete({ ids: ["id1", "id2", "id3"] });
+ return c.json({ success: true });
+});
+
+export default app;
+```
+
+
+
+
+Ship faster with the [AI SDK](https://sdk.vercel.ai/docs/introduction): make it easier to generate text, tool call and/or get structured output from your AI models (and then deploy it [Workers](/workers/).
+
+```sh
+npm i ai workers-ai-provider
+```
+
+```ts collapse={30-1000}
+import { createWorkersAI } from 'workers-ai-provider';
+import { streamText } from 'ai';
+
+type Env = {
+ AI: Ai;
+};
+
+export default {
+ async fetch(_: Request, env: Env) {
+ const workersai = createWorkersAI({ binding: env.AI });
+ const result = streamText({
+ model: workersai('@cf/meta/llama-3.2-3b-instruct'),
+ prompt: 'Write short essay on why you like Cloudflare Durable Objects.',
+ });
+
+ return result.toTextStreamResponse({
+ headers: {
+ 'Content-Type': 'text/x-unknown',
+ 'content-encoding': 'identity',
+ 'transfer-encoding': 'chunked',
+ },
+ });
+ },
+};
+```
+
+
+
+
+Use any model provider with OpenAI compatible endpoints, including [ChatGPT](https://platform.openai.com/docs/quickstart), [DeepSeek](https://api-docs.deepseek.com/) and [Workers AI](/workers-ai/configuration/open-ai-compatibility/), directly from Cloudflare Workers.
+
+```sh
+npm i openai
+```
+
+```ts collapse={30-1000}
+import OpenAI from "openai";
+
+export interface Env {
+ OPENAI_API_KEY: string;
+}
+
+export default {
+ async fetch(request: Request, env: Env) {
+ const url = new URL(request.url);
+ const prompt = url.searchParams.get('prompt') || "Make some robot noises";
+
+ const openai = new OpenAI({
+ apiKey: env.OPENAI_API_KEY
+ });
+
+ const chatCompletion = await openai.chat.completions.create({
+ messages: [{ role: "user", content: prompt }],
+ model: "gpt-3.5-turbo",
+ });
+
+ const embeddings = await openai.embeddings.create({
+ model: "text-embedding-ada-002",
+ input: "Cloudflare Agents documentation",
+ });
+
+ return new Response(JSON.stringify({ chatCompletion, embeddings }));
+ }
+}
+```
+
+
+
+
+***
+
+## All the products you need in one platform
+
+
+
+Observe and control your AI applications with caching, rate limiting, request retries, model fallback, and more.
+
+
+
+
+
+Build full-stack AI applications with Vectorize, Cloudflare’s vector database. Adding Vectorize enables you to perform tasks such as semantic search, recommendations, anomaly detection or can be used to provide context and memory to an LLM.
+
+
+
+
+
+Run machine learning models, powered by serverless GPUs, on Cloudflare's global network.
+
+
+
+
+
+Build real-time serverless video, audio and data applications with WebRTC running on Cloudflare's network.
+
+
+
+
+
+Build stateful agents that guarantee executions, including automatic retries, persistent state that runs for minutes, hours, days, or weeks.
+
+
\ No newline at end of file
diff --git a/src/content/docs/agents/products/ai-gateway.mdx b/src/content/docs/agents/products/ai-gateway.mdx
new file mode 100644
index 00000000000000..152bb1879e435b
--- /dev/null
+++ b/src/content/docs/agents/products/ai-gateway.mdx
@@ -0,0 +1,11 @@
+---
+pcx_content_type: navigation
+title: AI Gateway
+external_link: /ai-gateway/
+sidebar:
+ order: 3
+head: []
+description: Observe and control your AI applications.
+---
+
+
diff --git a/src/content/docs/agents/products/durable-objects.mdx b/src/content/docs/agents/products/durable-objects.mdx
new file mode 100644
index 00000000000000..955c94ff44a712
--- /dev/null
+++ b/src/content/docs/agents/products/durable-objects.mdx
@@ -0,0 +1,11 @@
+---
+pcx_content_type: navigation
+title: Durable Objects
+external_link: /durable-objects/
+sidebar:
+ order: 6
+head: []
+description: Create collaborative applications, real-time chat, multiplayer games and more without needing to coordinate state or manage infrastructure.
+---
+
+
diff --git a/src/content/docs/agents/products/email-workers.mdx b/src/content/docs/agents/products/email-workers.mdx
new file mode 100644
index 00000000000000..f2a3ccc62285ae
--- /dev/null
+++ b/src/content/docs/agents/products/email-workers.mdx
@@ -0,0 +1,11 @@
+---
+pcx_content_type: navigation
+title: Email Workers
+external_link: /email-routing/email-workers/
+sidebar:
+ order: 4
+head: []
+description: Implement any logic you need to process your emails and create complex rules
+---
+
+
diff --git a/src/content/docs/agents/products/index.mdx b/src/content/docs/agents/products/index.mdx
new file mode 100644
index 00000000000000..0fce7832787300
--- /dev/null
+++ b/src/content/docs/agents/products/index.mdx
@@ -0,0 +1,14 @@
+---
+pcx_content_type: reference
+title: Products
+sidebar:
+ order: 3
+ group:
+ hideIndex: true
+---
+
+import { DirectoryListing } from "~/components";
+
+Example Reference Architectures
+
+
diff --git a/src/content/docs/agents/products/real-time.mdx b/src/content/docs/agents/products/real-time.mdx
new file mode 100644
index 00000000000000..3d86980d7cc2e4
--- /dev/null
+++ b/src/content/docs/agents/products/real-time.mdx
@@ -0,0 +1,11 @@
+---
+pcx_content_type: navigation
+title: Real Time
+external_link: /calls/
+sidebar:
+ order: 10
+head: []
+description: Build real-time serverless video, audio and data applications.
+---
+
+
diff --git a/src/content/docs/agents/products/workers-ai.mdx b/src/content/docs/agents/products/workers-ai.mdx
new file mode 100644
index 00000000000000..fb54c8993df240
--- /dev/null
+++ b/src/content/docs/agents/products/workers-ai.mdx
@@ -0,0 +1,11 @@
+---
+pcx_content_type: navigation
+title: Workers AI
+external_link: /workers-ai/
+sidebar:
+ order: 2
+head: []
+description: Run machine learning models, powered by serverless GPUs, on Cloudflare's global network.
+---
+
+
diff --git a/src/content/docs/agents/products/workflows.mdx b/src/content/docs/agents/products/workflows.mdx
new file mode 100644
index 00000000000000..eeb7aca8b0d2bc
--- /dev/null
+++ b/src/content/docs/agents/products/workflows.mdx
@@ -0,0 +1,11 @@
+---
+pcx_content_type: navigation
+title: Workflows
+external_link: /workflows/
+sidebar:
+ order: 5
+head: []
+description: Build durable multi-step applications on Cloudflare Workers with Workflows.
+---
+
+
diff --git a/src/content/docs/agents/reference-architectures/index.mdx b/src/content/docs/agents/reference-architectures/index.mdx
new file mode 100644
index 00000000000000..e1dd3af47f3a47
--- /dev/null
+++ b/src/content/docs/agents/reference-architectures/index.mdx
@@ -0,0 +1,14 @@
+---
+pcx_content_type: reference
+title: Reference Architectures
+sidebar:
+ order: 1
+ group:
+ hideIndex: true
+---
+
+import { DirectoryListing } from "~/components";
+
+Example Reference Architectures
+
+
diff --git a/src/content/docs/agents/reference-architectures/rag.mdx b/src/content/docs/agents/reference-architectures/rag.mdx
new file mode 100644
index 00000000000000..7154404f2de659
--- /dev/null
+++ b/src/content/docs/agents/reference-architectures/rag.mdx
@@ -0,0 +1,9 @@
+---
+pcx_content_type: navigation
+title: Retrieval Augmented Generation
+external_link: /reference-architecture/diagrams/ai/ai-rag/
+sidebar:
+ order: 2
+head: []
+description: Build RAG architectures on Cloudflare
+---
\ No newline at end of file
diff --git a/src/content/docs/agents/reference-architectures/text-and-call.mdx b/src/content/docs/agents/reference-architectures/text-and-call.mdx
new file mode 100644
index 00000000000000..5471f0d78645de
--- /dev/null
+++ b/src/content/docs/agents/reference-architectures/text-and-call.mdx
@@ -0,0 +1,9 @@
+---
+pcx_content_type: navigation
+title: Send text messages from agents
+external_link: /workflows/examples/twilio/
+sidebar:
+ order: 3
+head: []
+description: Send text messages and accept phone calls from your agent
+---
\ No newline at end of file
diff --git a/src/content/docs/ai-gateway/configuration/fallbacks.mdx b/src/content/docs/ai-gateway/configuration/fallbacks.mdx
index 98fc7395ccb25f..fdbb454ead8605 100644
--- a/src/content/docs/ai-gateway/configuration/fallbacks.mdx
+++ b/src/content/docs/ai-gateway/configuration/fallbacks.mdx
@@ -9,11 +9,15 @@ import { Render } from "~/components";
Specify model or provider fallbacks with your [Universal endpoint](/ai-gateway/providers/universal/) to handle request failures and ensure reliability.
-Fallbacks are currently triggered only when a request encounters an error. We are working to expand fallback functionality to include time-based triggers, which will allow requests that exceed a predefined response time to timeout and fallback.
+Cloudflare can trigger your fallback provider in response to [request errors](#request-failures) or [predetermined request timeouts](#request-timeouts). The [response header `cf-aig-step`](#response-headercf-aig-step) indicates which step successfully processed the request.
-## Example
+## Request failures
-In the following example, a request first goes to the [Workers AI](/workers-ai/) Inference API. If the request fails, it falls back to OpenAI. The response header `cf-aig-step` indicates which provider successfully processed the request.
+By default, Cloudflare triggers your fallback if a model request returns an error.
+
+### Request failure example
+
+In the following example, a request first goes to the [Workers AI](/workers-ai/) Inference API. If the request fails, it falls back to OpenAI.
1. Sends a request to Workers AI Inference API.
2. If that request fails, proceeds to OpenAI.
@@ -32,6 +36,84 @@ You can add as many fallbacks as you need, just by adding another object in the
+---
+
+## Request timeouts
+
+If set, a request timeout triggers a fallback provider based on a predetermined response time (measured in milliseconds). This feature is helpful for latency-sensitive applications because your gateway does not have to wait for a [request error](#request-failure) before moving to a fallback provider.
+
+You can configure request timeouts by using one or more of the following properties, which are listed in order of priority:
+
+| Priority | Property |
+| -------- | ---------------------------------------------------------------------------------------------------------------------- |
+| 1 | `requestTimeout` (added as a universal attribute) |
+| 2 | `cf-aig-request-timeout` (header included at the [provider level](/ai-gateway/providers/universal/#payload-reference)) |
+| 3 | `cf-aig-request-timeout` (header included at the request level) |
+
+Your gateway follows this hierarchy to determine the timeout duration before implementing a fallback.
+
+### Request timeout example
+
+These request timeout values can interact to customize the behavior of your universal gateway.
+
+In this example, the request will try to answer `What is Cloudflare?` within 1000 milliseconds using the normal `@cf/meta/llama-3.1-8b-instruct` model. The `requestTimeout` property takes precedence over the `cf-aig-request-timeout` for `@cf/meta/llama-3.1-8b-instruct`.
+
+If that fails, then the gateway will timeout and move to the fallback `@cf/meta/llama-3.1-8b-instruct-fast` model. This model has 3000 milliseconds - determined by the request-level `cf-aig-request-timeout` value - to complete the request and provide an answer.
+
+```bash title="Request" collapse={36-50} {2,11,13-15}
+curl 'https://gateway.ai.cloudflare.com/v1/{account_id}/{gateway_id}' \
+ --header 'cf-aig-request-timeout: 3000' \
+ --header 'Content-Type: application/json' \
+ --data '[
+ {
+ "provider": "workers-ai",
+ "endpoint": "@cf/meta/llama-3.1-8b-instruct",
+ "headers": {
+ "Authorization": "Bearer {cloudflare_token}",
+ "Content-Type": "application/json",
+ "cf-aig-request-timeout": "2000"
+ },
+ "config": {
+ "requestTimeout": 1000
+ },
+ "query": {
+ "messages": [
+ {
+ "role": "system",
+ "content": "You are a friendly assistant"
+ },
+ {
+ "role": "user",
+ "content": "What is Cloduflare?"
+ }
+ ]
+ }
+ },
+ {
+ "provider": "workers-ai",
+ "endpoint": "@cf/meta/llama-3.1-8b-instruct-fast",
+ "headers": {
+ "Authorization": "Bearer {cloudflare_token}",
+ "Content-Type": "application/json"
+ },
+ "query": {
+ "messages": [
+ {
+ "role": "system",
+ "content": "You are a friendly assistant"
+ },
+ {
+ "role": "user",
+ "content": "What is Cloudflare?"
+ }
+ ]
+ }
+ }
+]'
+```
+
+---
+
## Response header(cf-aig-step)
When using the [Universal endpoint](/ai-gateway/providers/universal/) with fallbacks, the response header `cf-aig-step` indicates which model successfully processed the request by returning the step number. This header provides visibility into whether a fallback was triggered and which model ultimately processed the response.
diff --git a/src/content/docs/ai-gateway/evaluations/set-up-evaluations.mdx b/src/content/docs/ai-gateway/evaluations/set-up-evaluations.mdx
index ec7db10020eb14..8fa842320959af 100644
--- a/src/content/docs/ai-gateway/evaluations/set-up-evaluations.mdx
+++ b/src/content/docs/ai-gateway/evaluations/set-up-evaluations.mdx
@@ -24,6 +24,24 @@ Please keep in mind that datasets currently use `AND` joins, so there can only b
:::
+### List of available filters
+
+| Filter category | Filter options | Filter by description |
+| --------------- | ------------------------------------------------------------ | ----------------------------------------- |
+| Status | error, status | error type or status. |
+| Cache | cached, not cached | based on whether they were cached or not. |
+| Provider | specific providers | the selected AI provider. |
+| AI Models | specific models | the selected AI model. |
+| Cost | less than, greater than | cost, specifying a threshold. |
+| Request type | Universal, Workers AI Binding, WebSockets | the type of request. |
+| Tokens | Total tokens, Tokens In, Tokens Out | token count (less than or greater than). |
+| Duration | less than, greater than | request duration. |
+| Feedback | equals, does not equal (thumbs up, thumbs down, no feedback) | feedback type. |
+| Metadata Key | equals, does not equal | specific metadata keys. |
+| Metadata Value | equals, does not equal | specific metadata values. |
+| Log ID | equals, does not equal | a specific Log ID. |
+| Event ID | equals, does not equal | a specific Event ID. |
+
## 2. Select evaluators
After creating a dataset, choose the evaluation parameters:
diff --git a/src/content/docs/ai-gateway/integrations/agents.mdx b/src/content/docs/ai-gateway/integrations/agents.mdx
new file mode 100644
index 00000000000000..235f6ea5ebfd11
--- /dev/null
+++ b/src/content/docs/ai-gateway/integrations/agents.mdx
@@ -0,0 +1,10 @@
+---
+pcx_content_type: navigation
+title: Agents
+external_link: /agents/
+sidebar:
+ order: 10
+head: []
+description: Build AI-powered Agents on Cloudflare
+---
+
diff --git a/src/content/docs/ai-gateway/integrations/aig-workers-ai-binding.mdx b/src/content/docs/ai-gateway/integrations/aig-workers-ai-binding.mdx
index 056c35ebc79dc9..787608b9a2603a 100644
--- a/src/content/docs/ai-gateway/integrations/aig-workers-ai-binding.mdx
+++ b/src/content/docs/ai-gateway/integrations/aig-workers-ai-binding.mdx
@@ -4,7 +4,7 @@ pcx_content_type: tutorial
updated: 2024-10-17
---
-import { Render, PackageManagers } from "~/components";
+import { Render, PackageManagers, WranglerConfig } from "~/components";
This guide will walk you through setting up and deploying a Workers AI project. You will use [Workers](/workers/), an AI Gateway binding, and a large language model (LLM), to deploy your first AI-powered application on the Cloudflare global network.
@@ -35,7 +35,7 @@ Running `npm create cloudflare@latest` will prompt you to install the create-clo
This will create a new `hello-ai` directory. Your new `hello-ai` directory will include:
- A "Hello World" Worker at `src/index.ts`.
-- A `wrangler.toml` configuration file.
+- A `wrangler.json` configuration file.
Go to your application directory:
@@ -47,9 +47,7 @@ cd hello-ai
You must create an AI binding for your Worker to connect to Workers AI. Bindings allow your Workers to interact with resources, like Workers AI, on the Cloudflare Developer Platform.
-To bind Workers AI to your Worker, add the following to the end of your `wrangler.toml` file:
-
-import { WranglerConfig } from "~/components";
+To bind Workers AI to your Worker, add the following to the end of your `wrangler.json` file:
@@ -72,7 +70,7 @@ Update the `index.ts` file in your `hello-ai` application directory with the fol
```typescript title="src/index.ts" {78-81}
export interface Env {
- // If you set another name in wrangler.toml as the value for 'binding',
+ // If you set another name in the `wrangler.toml / wrangler.json` file as the value for 'binding',
// replace "AI" with the variable name you defined.
AI: Ai;
}
diff --git a/src/content/docs/ai-gateway/integrations/vercel-ai-sdk.mdx b/src/content/docs/ai-gateway/integrations/vercel-ai-sdk.mdx
index bd18fa2fb39a54..8931c75192fe63 100644
--- a/src/content/docs/ai-gateway/integrations/vercel-ai-sdk.mdx
+++ b/src/content/docs/ai-gateway/integrations/vercel-ai-sdk.mdx
@@ -35,6 +35,18 @@ const anthropic = createAnthropic({
});
```
+### Google AI Studio
+
+If you're using the Google AI Studio provider in AI SDK, you need to append `/v1beta` to your Google AI Studio-compatible AI Gateway URL to avoid errors. The `/v1beta` path is required because Google AI Studio's API includes this in its endpoint structure, and the AI SDK sets the model name separately. This ensures compatibility with Google's API versioning.
+
+```typescript
+import { createGoogleGenerativeAI } from '@ai-sdk/google';
+
+const google = createGoogleGenerativeAI({
+ baseURL: `https://gateway.ai.cloudflare.com/v1/{account_id}/{gateway_id}/google-ai-studio/v1beta`,
+});
+```
+
### Other providers
For other providers that are not listed above, you can follow a similar pattern by creating a custom instance for any AI provider, and passing your AI Gateway URL. For help finding your provider-specific AI Gateway URL, refer to the [Supported providers page](/ai-gateway/providers).
diff --git a/src/content/docs/ai-gateway/integrations/worker-binding-methods.mdx b/src/content/docs/ai-gateway/integrations/worker-binding-methods.mdx
new file mode 100644
index 00000000000000..237a7b0f9e2c8d
--- /dev/null
+++ b/src/content/docs/ai-gateway/integrations/worker-binding-methods.mdx
@@ -0,0 +1,122 @@
+---
+title: AI Gateway Binding Methods
+pcx_content_type: tutorial
+updated: 2025-01-28
+---
+
+import { Render, PackageManagers } from "~/components";
+
+This guide provides an overview of how to use the latest Cloudflare Workers AI Gateway binding methods. You will learn how to set up an AI Gateway binding, access new methods, and integrate them into your Workers.
+
+## Prerequisites
+
+- Ensure your Worker project is configured with an AI Gateway binding in `wrangler.json`.
+- Install and use the `@cloudflare/workers-types` library, version `4.20250124.3` or above.
+
+## 1. Add an AI Binding to your Worker
+
+To connect your Worker to Workers AI, add the following to your `wrangler.toml` file:
+
+import { WranglerConfig } from "~/components";
+
+
+
+```toml title="wrangler.toml"
+[ai]
+binding = "AI"
+```
+
+
+
+This configuration sets up the AI binding accessible in your Worker code as `env.AI`.
+
+## 2. Basic Usage with Workers AI + Gateway
+
+To perform an inference task using Workers AI and an AI Gateway, you can use the following code:
+
+```typescript title="src/index.ts"
+const resp = await env.AI.run("@cf/meta/llama-3.1-8b-instruct", {
+ prompt: "tell me a joke"
+}, {
+ gateway: {
+ id: "my-gateway"
+ }
+});
+```
+
+Additionally, you can access the latest request log ID with:
+
+```typescript
+const myLogId = env.AI.aiGatewayLogId;
+```
+
+## 3. Access the Gateway Binding
+
+You can access your AI Gateway binding using the following code:
+
+```typescript
+const gateway = env.AI.gateway("my-gateway");
+```
+
+Once you have the gateway instance, you can use the following methods:
+
+### 3.1. `patchLog`: Send Feedback
+
+The `patchLog` method allows you to send feedback, score, and metadata for a specific log ID. All object properties are optional, so you can include any combination of the parameters:
+
+```typescript
+gateway.patchLog('my-log-id', {
+ feedback: 1,
+ score: 100,
+ metadata: {
+ user: "123"
+ }
+});
+```
+
+- **Returns**: `Promise` (Make sure to `await` the request.)
+- **Example Use Case**: Update a log entry with user feedback or additional metadata.
+
+### 3.2. `getLog`: Read Log Details
+
+The `getLog` method retrieves details of a specific log ID. It returns an object of type `Promise`. You can import the `AiGatewayLog` type from the `@cloudflare/workers-types` library.
+
+```typescript
+const log = await gateway.getLog("my-log-id");
+```
+
+- **Returns**: `Promise`
+- **Example Use Case**: Retrieve log information for debugging or analytics.
+
+### 3.3. `run`: Universal Requests
+
+The `run` method allows you to execute universal requests. Users can pass either a single universal request object or an array of them. This method supports all AI Gateway providers.
+
+Refer to the [Universal endpoint documentation](/ai-gateway/providers/universal/) for details about the available inputs.
+
+```typescript
+const resp = await gateway.run({
+ provider: "workers-ai",
+ endpoint: "@cf/meta/llama-3.1-8b-instruct",
+ headers: {
+ authorization: "Bearer my-api-token"
+ },
+ query: {
+ prompt: "tell me a joke"
+ }
+});
+```
+
+- **Returns**: `Promise`
+- **Example Use Case**: Perform a universal AI request to any supported provider.
+
+## Conclusion
+
+With the new AI Gateway binding methods, you can now:
+
+- Send feedback and update metadata with `patchLog`.
+- Retrieve detailed log information using `getLog`.
+- Execute universal requests to any AI Gateway provider with `run`.
+
+These methods offer greater flexibility and control over your AI integrations, empowering you to build more sophisticated applications on the Cloudflare Workers platform.
+
diff --git a/src/content/docs/ai-gateway/observability/logging/index.mdx b/src/content/docs/ai-gateway/observability/logging/index.mdx
index 32b577f7bdcf3c..599975e783fa8a 100644
--- a/src/content/docs/ai-gateway/observability/logging/index.mdx
+++ b/src/content/docs/ai-gateway/observability/logging/index.mdx
@@ -62,3 +62,21 @@ This setting is useful for maintaining continuous logging while staying within t
### Manual deletion
To manually delete logs, navigate to the Logs tab in the dashboard. Use the available filters such as status, cache, provider, cost, or any other options in the dropdown to refine the logs you wish to delete. Once filtered, select Delete logs to complete the action.
+
+See full list of available filters and their descriptions below:
+
+| Filter category | Filter options | Filter by description |
+| --------------- | ------------------------------------------------------------ | ----------------------------------------- |
+| Status | error, status | error type or status. |
+| Cache | cached, not cached | based on whether they were cached or not. |
+| Provider | specific providers | the selected AI provider. |
+| AI Models | specific models | the selected AI model. |
+| Cost | less than, greater than | cost, specifying a threshold. |
+| Request type | Universal, Workers AI Binding, WebSockets | the type of request. |
+| Tokens | Total tokens, Tokens In, Tokens Out | token count (less than or greater than). |
+| Duration | less than, greater than | request duration. |
+| Feedback | equals, does not equal (thumbs up, thumbs down, no feedback) | feedback type. |
+| Metadata Key | equals, does not equal | specific metadata keys. |
+| Metadata Value | equals, does not equal | specific metadata values. |
+| Log ID | equals, does not equal | a specific Log ID. |
+| Event ID | equals, does not equal | a specific Event ID. |
diff --git a/src/content/docs/ai-gateway/providers/universal.mdx b/src/content/docs/ai-gateway/providers/universal.mdx
index 09fe85e847a76c..9c8faedbd85c8a 100644
--- a/src/content/docs/ai-gateway/providers/universal.mdx
+++ b/src/content/docs/ai-gateway/providers/universal.mdx
@@ -16,18 +16,24 @@ https://gateway.ai.cloudflare.com/v1/{account_id}/{gateway_id}
AI Gateway offers multiple endpoints for each Gateway you create - one endpoint per provider, and one Universal Endpoint. The Universal Endpoint requires some adjusting to your schema, but supports additional features. Some of these features are, for example, retrying a request if it fails the first time, or configuring a [fallback model/provider](/ai-gateway/configuration/fallbacks/).
+## Payload reference
+
You can use the Universal endpoint to contact every provider. The payload is expecting an array of message, and each message is an object with the following parameters:
-- `provider` : the name of the provider you would like to direct this message to. Can be OpenAI, workers-ai, or any of our supported providers.
+- `provider` : the name of the provider you would like to direct this message to. Can be OpenAI, Workers AI, or any of our supported providers.
- `endpoint`: the pathname of the provider API you’re trying to reach. For example, on OpenAI it can be `chat/completions`, and for Workers AI this might be [`@cf/meta/llama-3.1-8b-instruct`](/workers-ai/models/llama-3.1-8b-instruct/). See more in the sections that are specific to [each provider](/ai-gateway/providers/).
-- `authorization`: the content of the Authorization HTTP Header that should be used when contacting this provider. This usually starts with “Token” or “Bearer”.
+- `headers`:
+ - `Authorization`: the content of the Authorization HTTP Header that should be used when contacting this provider. This usually starts with "Token" or "Bearer".
+ - Any other custom header in a model's [configuration](/ai-gateway/configuration/), such as [Caching](/ai-gateway/configuration/caching/) or [Custom Metadata](/ai-gateway/configuration/custom-metadata/).
- `query`: the payload as the provider expects it in their official API.
## cURL example
+The following example shows a simple setup with a primary model and a [fallback](/ai-gateway/configuration/fallbacks/) option.
+
-The above will send a request to Workers AI Inference API, if it fails it will proceed to OpenAI. You can add as many fallbacks as you need, just by adding another JSON in the array.
+The above will send a request to Workers AI Inference API, if it fails it will proceed to OpenAI. You can add as many [fallbacks](/ai-gateway/configuration/fallbacks/) as you need, just by adding another JSON in the array.
## WebSockets API
diff --git a/src/content/docs/ai-gateway/providers/workersai.mdx b/src/content/docs/ai-gateway/providers/workersai.mdx
index 79422005b577ac..5d8cc4334f4097 100644
--- a/src/content/docs/ai-gateway/providers/workersai.mdx
+++ b/src/content/docs/ai-gateway/providers/workersai.mdx
@@ -114,6 +114,6 @@ Workers AI supports the following parameters for AI gateways:
- `id` string
- Name of your existing [AI Gateway](/ai-gateway/get-started/#create-gateway). Must be in the same account as your Worker.
- `skipCache` boolean(default: false)
- - Controls whether the request should [skip the cache](/ai-gateway/configuration/caching/#skip-cache-cf-skip-cache).
+ - Controls whether the request should [skip the cache](/ai-gateway/configuration/caching/#skip-cache-cf-aig-skip-cache).
- `cacheTtl` number
- - Controls the [Cache TTL](/ai-gateway/configuration/caching/#cache-ttl-cf-cache-ttl).
+ - Controls the [Cache TTL](/ai-gateway/configuration/caching/#cache-ttl-cf-aig-cache-ttl).
diff --git a/src/content/docs/analytics/account-and-zone-analytics/zone-analytics.mdx b/src/content/docs/analytics/account-and-zone-analytics/zone-analytics.mdx
index a7ab91f8332507..446c72e72e28cb 100644
--- a/src/content/docs/analytics/account-and-zone-analytics/zone-analytics.mdx
+++ b/src/content/docs/analytics/account-and-zone-analytics/zone-analytics.mdx
@@ -89,21 +89,6 @@ The metrics aggregated under this tab span multiple Cloudflare services. The p
* **Origin Performance (Argo)** (add-on service) - Displays metrics related to response time between the Cloudflare edge network and origin servers for the last 48 hours. For additional details, refer to [Argo Analytics](/argo-smart-routing/analytics/).
* **Overview** - Displays a set of pie charts for: **Client HTTP Version Used**, **Bandwidth Saved**, and **Content Type Breakdown**. If available, the expandable **Details** link display a table with numerical data.
-### DNS
-
-:::note[New DNS analytics]
-The **Analytics** > **DNS** tab will be deprecated soon.
-
-To access the new analytics dashboard, go to **DNS** > **Analytics**. Refer to [Analytics and logs](/dns/additional-options/analytics/) for details.
-:::
-
-The DNS tab presents statistics for DNS queries. Note that metrics are available as long as Cloudflare is the site’s authoritative DNS server, even if the site is not proxied by Cloudflare. Therefore, DNS metrics are not offered for sites with a [CNAME Setup](/dns/zone-setups/partial-setup/).
-
-The metrics panels available under the DNS tab may include:
-
-* **DNS Queries** - Displays area charts and data tables for DNS record metrics. For free plans, this section includes *Queries by Response Code* and, for Pro and above, this section also includes *Queries by Record Type*. Records that return an *NXDOMAIN* response (dns record doesn’t exist) are also considered. For zones in Enterprise plan, you can also filter by one or several DNS records by entering record names (for example, [www.example.com](http://www.example.com)) in the dropdown near the top.
-* **DNS Queries by Data Center** - Displays DNS query distribution across Cloudflare’s data centers. Metrics appear as interactive maps and data tables, and include statistics for *DNS Traffic*, *NXDOMAIN*, and *NOERROR*. This section is available to zones in Pro plan or above.
-
### Workers
This panel features metrics for Cloudflare Workers. To learn more, read [Cloudflare analytics with Workers](/analytics/account-and-zone-analytics/analytics-with-workers/).
diff --git a/src/content/docs/analytics/analytics-engine/get-started.mdx b/src/content/docs/analytics/analytics-engine/get-started.mdx
index 10e30f6c323c90..a37dd476312dce 100644
--- a/src/content/docs/analytics/analytics-engine/get-started.mdx
+++ b/src/content/docs/analytics/analytics-engine/get-started.mdx
@@ -9,13 +9,11 @@ head:
---
-import { DirectoryListing } from "~/components"
+import { DirectoryListing, WranglerConfig } from "~/components"
## 1. Name your dataset and add it to your Worker
-Add the following to your `wrangler.toml` file to create a [binding](/workers/runtime-apis/bindings/) to a Workers Analytics Engine dataset. A dataset is like a table in SQL: the rows and columns should have consistent meaning.
-
-import { WranglerConfig } from "~/components";
+Add the following to your `wrangler.toml / wrangler.json` file to create a [binding](/workers/runtime-apis/bindings/) to a Workers Analytics Engine dataset. A dataset is like a table in SQL: the rows and columns should have consistent meaning.
diff --git a/src/content/docs/analytics/analytics-engine/worker-querying.mdx b/src/content/docs/analytics/analytics-engine/worker-querying.mdx
index e6e37857cb2355..b45aaeaade6738 100644
--- a/src/content/docs/analytics/analytics-engine/worker-querying.mdx
+++ b/src/content/docs/analytics/analytics-engine/worker-querying.mdx
@@ -8,6 +8,9 @@ head:
content: Querying Workers Analytics Engine from a Worker
---
+
+import { WranglerConfig } from "~/components";
+
If you want to access Analytics Engine data from within a Worker you can use `fetch` to access the SQL API. The API can return JSON data that is easy to interact with in JavaScript.
## Authentication
@@ -46,9 +49,7 @@ The following is a sample Worker which executes a query against a dataset of wea
First the environment variables are set up with the account ID and API token.
-The account ID is set in `wrangler.toml`:
-
-import { WranglerConfig } from "~/components";
+The account ID is set in the `wrangler.toml / wrangler.json` file:
diff --git a/src/content/docs/analytics/graphql-api/getting-started/explore-graphql-schema.mdx b/src/content/docs/analytics/graphql-api/getting-started/explore-graphql-schema.mdx
index 358b8b08c4c8f0..ccfd40a5bfffd9 100644
--- a/src/content/docs/analytics/graphql-api/getting-started/explore-graphql-schema.mdx
+++ b/src/content/docs/analytics/graphql-api/getting-started/explore-graphql-schema.mdx
@@ -6,7 +6,7 @@ sidebar:
---
-Many GraphQL clients are support browsing GraphQL schema by taking care of
+Many GraphQL clients support browsing the GraphQL schema by taking care of
[introspection][1]. In this page, we will cover GraphiQL and Altair clients.
[GraphiQL][2] and [Altair][3] are open-source GraphQL clients that provide a
diff --git a/src/content/docs/analytics/network-analytics/configure/displayed-data.mdx b/src/content/docs/analytics/network-analytics/configure/displayed-data.mdx
index cd357de5698983..253a06b964231b 100644
--- a/src/content/docs/analytics/network-analytics/configure/displayed-data.mdx
+++ b/src/content/docs/analytics/network-analytics/configure/displayed-data.mdx
@@ -67,3 +67,18 @@ Note that some filters will not be added to the new Magic Firewall rule definiti
Enable the **Show annotations** toggle to show or hide annotations for advertised/withdrawn IP prefix events in the **Network Analytics** view. Select each annotation to get more details.

+
+## View logged or monitored traffic
+
+[Network DDoS managed rules](/ddos-protection/managed-rulesets/network/) and [Advanced DDoS Protection systems](/ddos-protection/advanced-ddos-systems/overview/) provide a `log` or `monitoring` mode that does not drop traffic. These `log` and `monitoring` mode events are based on **Verdict** and **Outcome**/**Action** fields.
+
+To filter for these traffic events:
+
+1. Log in to the [Cloudflare dashboard](https://dash.cloudflare.com/) and select your account.
+2. Go to **Analytics & Logs** > **Network Analytics** > **DDoS managed rules**.
+3. Select **Add filter**.
+ - Set `Verdict equals drop`.
+ - Set `Action equals pass`.
+4. Select **Apply**.
+
+By setting `verdict` to `drop` and `outcome` as `pass`, we are filtering for traffic that was marked as a detection (that is, verdict was `drop`) but was not dropped (for example, outcome was `pass`).
\ No newline at end of file
diff --git a/src/content/docs/analytics/types-of-analytics.mdx b/src/content/docs/analytics/types-of-analytics.mdx
index 6499fb517de7ea..3be09c4af5b47f 100644
--- a/src/content/docs/analytics/types-of-analytics.mdx
+++ b/src/content/docs/analytics/types-of-analytics.mdx
@@ -42,7 +42,6 @@ Data available under the **Analytics & Logs** section includes:
* **Security** - Total Threats, Top Crawlers/Bots, Rate Limiting, Total Threats Stopped.
* **Performance** - Origin Performance, Bandwidth Saved.
* **Edge Reachability** - [Last mile insights](/network-error-logging/) for Enterprise customers.
-* **DNS** - DNS Queries by Response Code, Record Type, and Cloudflare Data Center. [Available metrics](/analytics/account-and-zone-analytics/zone-analytics/#dns) vary according to the zone plan. For information on the new DNS analytics refer to [Analytics and logs](/dns/additional-options/analytics/).
* **Workers** - [Detailed information](/workers/observability/metrics-and-analytics/) related to your Workers per zone, and Workers KV per account.
* **Logs** - [Detailed logs](/logs/) of the metadata generated by Cloudflare products for Enterprise customers.
* **Instant logs** - [Live stream of traffic](/logs/instant-logs/) for your domain. Enterprise customers can access this live stream from the Cloudflare dashboard or from a command-line interface (CLI).
diff --git a/src/content/docs/api-shield/security/api-discovery.mdx b/src/content/docs/api-shield/security/api-discovery.mdx
index 44382ffbb5144b..e4cc45a2e5f162 100644
--- a/src/content/docs/api-shield/security/api-discovery.mdx
+++ b/src/content/docs/api-shield/security/api-discovery.mdx
@@ -55,7 +55,7 @@ By adding endpoints to Endpoint Management, you will unlock further [security](/
To restore any errantly ignored endpoints, you can filter by **Ignored** and select **Restore**.
-Check back regularly for new API Discovery results. A badge with the number of endpoints needing review will show in the API Shield dashboard.
+API Discovery is an ongoing process. Check back regularly for new API Discovery results. A badge with the number of endpoints needing review will show in the API Shield dashboard. You may see the quantities in the **Needs Review** and **Ignored** metrics change over time. As your actual API or traffic patterns to your APIs change, API Discovery results that are not saved can disappear.
:::note
diff --git a/src/content/docs/bots/reference/javascript-detections.mdx b/src/content/docs/bots/reference/javascript-detections.mdx
index 5f77af3553144f..cec48aeb22b45c 100644
--- a/src/content/docs/bots/reference/javascript-detections.mdx
+++ b/src/content/docs/bots/reference/javascript-detections.mdx
@@ -24,6 +24,8 @@ For more details on how to set up bot protection, see [Get started](/bots/get-st
+
+
## Limitations
### If you enabled Bot Management before June 2020
diff --git a/src/content/docs/browser-rendering/get-started/browser-rendering-with-DO.mdx b/src/content/docs/browser-rendering/get-started/browser-rendering-with-DO.mdx
index 299e8aff7227fe..6a40018be18649 100644
--- a/src/content/docs/browser-rendering/get-started/browser-rendering-with-DO.mdx
+++ b/src/content/docs/browser-rendering/get-started/browser-rendering-with-DO.mdx
@@ -14,7 +14,7 @@ sidebar:
order: 2
---
-import { Render, PackageManagers } from "~/components";
+import { Render, PackageManagers, WranglerConfig } from "~/components";
By following this guide, you will create a Worker that uses the Browser Rendering API along with [Durable Objects](/durable-objects/) to take screenshots from web pages and store them in [R2](/r2/).
@@ -69,13 +69,11 @@ wrangler r2 bucket list
After running the `list` command, you will see all bucket names, including the ones you have just created.
-## 5. Configure `wrangler.toml`
+## 5. Configure your Wrangler configuration file
-Configure your `browser-worker` project's [`wrangler.toml`](/workers/wrangler/configuration/) file by adding a browser [binding](/workers/runtime-apis/bindings/) and a [Node.js compatibility flag](/workers/configuration/compatibility-flags/#nodejs-compatibility-flag). Browser bindings allow for communication between a Worker and a headless browser which allows you to do actions such as taking a screenshot, generating a PDF and more.
+Configure your `browser-worker` project's [`wrangler.toml / wrangler.json` file](/workers/wrangler/configuration/) by adding a browser [binding](/workers/runtime-apis/bindings/) and a [Node.js compatibility flag](/workers/configuration/compatibility-flags/#nodejs-compatibility-flag). Browser bindings allow for communication between a Worker and a headless browser which allows you to do actions such as taking a screenshot, generating a PDF and more.
-Update your `wrangler.toml` configuration file with the Browser Rendering API binding, the R2 bucket you created and a Durable Object:
-
-import { WranglerConfig } from "~/components";
+Update your `wrangler.toml / wrangler.json` file with the Browser Rendering API binding, the R2 bucket you created and a Durable Object:
diff --git a/src/content/docs/browser-rendering/get-started/reuse-sessions.mdx b/src/content/docs/browser-rendering/get-started/reuse-sessions.mdx
index 68fc77f791b703..f6637b5678fb5f 100644
--- a/src/content/docs/browser-rendering/get-started/reuse-sessions.mdx
+++ b/src/content/docs/browser-rendering/get-started/reuse-sessions.mdx
@@ -5,7 +5,7 @@ sidebar:
order: 3
---
-import { Render, PackageManagers } from "~/components";
+import { Render, PackageManagers, WranglerConfig } from "~/components";
The best way to improve the performance of your browser rendering worker is to reuse sessions. One way to do that is via [Durable Objects](/browser-rendering/get-started/browser-rendering-with-do/), which allows you to keep a long running connection from a worker to a browser. Another way is to keep the browser open after you've finished with it, and connect to that session each time you have a new request.
@@ -35,15 +35,16 @@ Create a new Worker project named `browser-worker` by running:
## 2. Install Puppeteer
-In your `browser-worker` directory, install Cloudflare’s [fork of Puppeteer](/browser-rendering/platform/puppeteer/):
+In your `browser-worker` directory, install Cloudflare's [fork of Puppeteer](/browser-rendering/platform/puppeteer/):
```sh
npm install @cloudflare/puppeteer --save-dev
```
-## 3. Configure `wrangler.toml`
+## 3. Configure the `wrangler.toml / wrangler.json` file
-```
+
+```toml
name = "browser-worker"
main = "src/index.ts"
compatibility_date = "2023-03-14"
@@ -51,6 +52,7 @@ compatibility_flags = [ "nodejs_compat" ]
browser = { binding = "MYBROWSER" }
```
+
## 4. Code
diff --git a/src/content/docs/browser-rendering/get-started/screenshots.mdx b/src/content/docs/browser-rendering/get-started/screenshots.mdx
index e9e6f30a3b4dcb..4f565bd92a6822 100644
--- a/src/content/docs/browser-rendering/get-started/screenshots.mdx
+++ b/src/content/docs/browser-rendering/get-started/screenshots.mdx
@@ -5,7 +5,7 @@ sidebar:
order: 1
---
-import { Render, TabItem, Tabs, PackageManagers } from "~/components";
+import { Render, TabItem, Tabs, PackageManagers, WranglerConfig } from "~/components";
By following this guide, you will create a Worker that uses the Browser Rendering API to take screenshots from web pages. This is a common use case for browser automation.
@@ -56,17 +56,15 @@ npx wrangler kv:namespace create BROWSER_KV_DEMO --preview
Take note of the IDs for the next step.
-## 4. Configure `wrangler.toml`
+## 4. Configure the `wrangler.toml / wrangler.json` file
-Configure your `browser-worker` project's [`wrangler.toml`](/workers/wrangler/configuration/) file by adding a browser [binding](/workers/runtime-apis/bindings/) and a [Node.js compatibility flag](/workers/configuration/compatibility-flags/#nodejs-compatibility-flag). Bindings allow your Workers to interact with resources on the Cloudflare developer platform. Your browser `binding` name is set by you, this guide uses the name `MYBROWSER`. Browser bindings allow for communication between a Worker and a headless browser which allows you to do actions such as taking a screenshot, generating a PDF and more.
+Configure your `browser-worker` project's [`wrangler.toml / wrangler.json` file](/workers/wrangler/configuration/) by adding a browser [binding](/workers/runtime-apis/bindings/) and a [Node.js compatibility flag](/workers/configuration/compatibility-flags/#nodejs-compatibility-flag). Bindings allow your Workers to interact with resources on the Cloudflare developer platform. Your browser `binding` name is set by you, this guide uses the name `MYBROWSER`. Browser bindings allow for communication between a Worker and a headless browser which allows you to do actions such as taking a screenshot, generating a PDF and more.
-Update your `wrangler.toml` configuration file with the Browser Rendering API binding and the KV namespaces you created:
-
-import { WranglerConfig } from "~/components";
+Update your `wrangler.toml / wrangler.json` file with the Browser Rendering API binding and the KV namespaces you created:
-```toml
+```toml title="wrangler.toml"
name = "browser-worker"
main = "src/index.js"
compatibility_date = "2023-03-14"
diff --git a/src/content/docs/browser-rendering/how-to/ai.mdx b/src/content/docs/browser-rendering/how-to/ai.mdx
index d95ab64425c9df..7bf88493d1a0cd 100644
--- a/src/content/docs/browser-rendering/how-to/ai.mdx
+++ b/src/content/docs/browser-rendering/how-to/ai.mdx
@@ -4,9 +4,9 @@ sidebar:
order: 2
---
-import { Aside } from "~/components";
+import { Aside, WranglerConfig } from "~/components";
-The ability to browse websites can be crucial when building workflows with AI. Here, we provide an example where we use Browser Rendering to visit
+The ability to browse websites can be crucial when building workflows with AI. Here, we provide an example where we use Browser Rendering to visit
`https://news.ycombinator.com/` and then, using a machine learning model available in [Workers AI](/workers-ai/), extract the first post as JSON with a specified schema.
## Prerequisites
@@ -30,16 +30,20 @@ npm i zod
npm i zod-to-json-schema
```
-3. Activate the nodejs compatibility flag and add your Browser Rendering binding to your new `wrangler.toml` configuration:
+3. Activate the nodejs compatibility flag and add your Browser Rendering binding to your new Wrangler configuration:
+
```toml
compatibility_flags = [ "nodejs_compat" ]
```
+
+
```toml
[browser]
binding = "MY_BROWSER"
```
+
4. In order to use [Workers AI](/workers-ai/), you need to get your [Account ID and API token](/workers-ai/get-started/rest-api/#1-get-api-token-and-account-id).
Once you have those, create a [`.dev.vars`](/workers/configuration/environment-variables/#add-environment-variables-via-wrangler) file and set them there:
diff --git a/src/content/docs/browser-rendering/how-to/pdf-generation.mdx b/src/content/docs/browser-rendering/how-to/pdf-generation.mdx
index c5ea765a3556dc..22a40c370f7163 100644
--- a/src/content/docs/browser-rendering/how-to/pdf-generation.mdx
+++ b/src/content/docs/browser-rendering/how-to/pdf-generation.mdx
@@ -5,7 +5,7 @@ sidebar:
order: 1
---
-import { Aside } from "~/components";
+import { Aside, WranglerConfig } from "~/components";
As seen in the [Getting Started guide](/browser-rendering/get-started/screenshots/), Browser Rendering can be used to generate screenshots for any given URL. Alongside screenshots, you can also generate full PDF documents for a given webpage, and can also provide the webpage markup and style ourselves.
@@ -23,11 +23,13 @@ npm create cloudflare@latest -- browser-worker
npm install @cloudflare/puppeteer --save-dev
```
-3. Add your Browser Rendering binding to your new `wrangler.toml` configuration:
+3. Add your Browser Rendering binding to your new Wrangler configuration:
+
```toml
browser = { binding = "BROWSER" }
```
+
4. Replace the contents of `src/index.ts` (or `src/index.js` for JavaScript projects) with the following skeleton script:
diff --git a/src/content/docs/browser-rendering/platform/wrangler.mdx b/src/content/docs/browser-rendering/platform/wrangler.mdx
index f94f7bad96e7f6..91cd57c36cdca8 100644
--- a/src/content/docs/browser-rendering/platform/wrangler.mdx
+++ b/src/content/docs/browser-rendering/platform/wrangler.mdx
@@ -19,7 +19,7 @@ To install Wrangler, refer to [Install and Update Wrangler](/workers/wrangler/in
[Bindings](/workers/runtime-apis/bindings/) allow your Workers to interact with resources on the Cloudflare developer platform. A browser binding will provide your Worker with an authenticated endpoint to interact with a dedicated Chromium browser instance.
-To deploy a Browser Rendering Worker, you must declare a [browser binding](/workers/runtime-apis/bindings/) in your Worker's `wrangler.toml` configuration file.
+To deploy a Browser Rendering Worker, you must declare a [browser binding](/workers/runtime-apis/bindings/) in your Worker's Wrangler configuration file.
diff --git a/src/content/docs/byoip/address-maps/index.mdx b/src/content/docs/byoip/address-maps/index.mdx
index 67997a8f8d7e9d..0fb1d1e254b3af 100644
--- a/src/content/docs/byoip/address-maps/index.mdx
+++ b/src/content/docs/byoip/address-maps/index.mdx
@@ -27,7 +27,7 @@ For zones using [Cloudflare's authoritative DNS](/dns/), Cloudflare typically re
Address maps do not change [how Cloudflare reaches the configured origin](/fundamentals/concepts/how-cloudflare-works/#how-cloudflare-works-as-a-reverse-proxy). The IP addresses defined on the **DNS** > **Records** under your zone continue to instruct Cloudflare how to reach the origin.
:::caution
-Depending on whether you use static IPs or BYOIP, the process to [create an address map](/byoip/address-maps/setup/#create-address-maps) is different.
+Depending on whether you use static IPs or BYOIP, the process to [create an address map](/byoip/address-maps/setup/) is different.
:::
### Static IPs or BYOIP
@@ -44,4 +44,4 @@ Some customers may only proxy zones through BYOIP addresses, and are prohibited
It is still possible to create more specific zone-level address maps with specific BYOIPs, but DNS will fall back to the account-wide address map without one.
-To specify different addresses for certain zones, [create a new address map](/byoip/address-maps/setup/#create-address-maps).
\ No newline at end of file
+To specify different addresses for certain zones, [create a new address map](/byoip/address-maps/setup/).
\ No newline at end of file
diff --git a/src/content/docs/byoip/address-maps/setup.mdx b/src/content/docs/byoip/address-maps/setup.mdx
index 9acfb13cc58486..c4ea246f0cd92e 100644
--- a/src/content/docs/byoip/address-maps/setup.mdx
+++ b/src/content/docs/byoip/address-maps/setup.mdx
@@ -10,6 +10,10 @@ import { GlossaryTooltip } from "~/components";
Consider the sections below to learn how to set up address maps.
+:::note
+There is **no expected downtime** when setting up or updating your address maps.
+:::
+
## Create address maps
To avoid any errors if you have [static IPs](/byoip/concepts/static-ips/), Cloudflare creates an address map during the static IP onboarding process, meaning you cannot create a new address map with your static IPs. You may only edit the Cloudflare-created map and add or edit your zones within the existing map.
diff --git a/src/content/docs/byoip/concepts/irr-entries/best-practices.mdx b/src/content/docs/byoip/concepts/irr-entries/best-practices.mdx
index 3b8563b4b059fe..17958789de3adb 100644
--- a/src/content/docs/byoip/concepts/irr-entries/best-practices.mdx
+++ b/src/content/docs/byoip/concepts/irr-entries/best-practices.mdx
@@ -81,10 +81,6 @@ Add or update IRR entries when they meet any of these criteria:
* The entry is incomplete or inaccurate — for example, when the route object does not show the correct origin.
* The entry is complete but requires updating — for example, when they correspond to supernets but need to correspond to subnets used in Magic Transit.
-You are strongly encouraged to verify IRR entries for the exact prefixes you will use to onboard with Cloudflare.
-
-IRR entries for less specific prefixes are acceptable as long as you understand and accept the following risk: if you modify your IRR entries in the future (for example, by changing your ASN) and the IRR entry for the supernet no longer matches the prefix or origin mapping in your Magic Transit configuration, the prefix will have reduced reachability due to networks Cloudflare peers with automatically filtering the prefix. Having specific IRR entries helps minimize (but not entirely remove) this risk.
-
### IRR entry verification methods
To verify your prefix and ASN route, use the tools and methods outlined on the table below:
diff --git a/src/content/docs/byoip/get-started.mdx b/src/content/docs/byoip/get-started.mdx
index eedc04a990e4be..0c0da10e4ce13d 100644
--- a/src/content/docs/byoip/get-started.mdx
+++ b/src/content/docs/byoip/get-started.mdx
@@ -11,22 +11,23 @@ import { GlossaryTooltip } from "~/components"
To bring your own IPs, you must work with your account team to understand everything you need to ensure a smooth transition during the onboarding process.
:::note
-
-BYOIP is ingress only.
+BYOIP is ingress only.
:::
-## Overview
-
Cloudflare requires a service-specific configuration for your prefixes, as well as some requirements common to all BYOIP customers regardless of service type. These requirements are common to all products compatible with BYOIP, such as [Magic Transit](/magic-transit/), [Spectrum](/spectrum/), and [CDN services](/cache/).
## Prerequisites
There are two major prerequisites before Cloudflare can begin onboarding your IP space.
-1. You must verify your [Internet Routing Registry (IRR)](/byoip/concepts/irr-entries/) records are up to date with the correct prefix or ASN information.
-2. Cloudflare must receive a [Letter of Agency (LOA)](/byoip/concepts/loa/) to announce your prefixes, which we will share with our transit partners as evidence that we are allowed to announce the route.
+1. Cloudflare must receive a [Letter of Agency (LOA)](/byoip/concepts/loa/) to announce your prefixes, which we will share with our transit partners as evidence that we are allowed to announce the route.
+2. You must verify that your [Internet Routing Registry (IRR)](/byoip/concepts/irr-entries/) records are up to date and contain:
+ - `route` or `route6` objects matching the exact prefixes you want to onboard
+ - `origin` matching the correct ASN you want to onboard
-Optionally, if you use the Resource Public Key Infrastructure (RPKI) protocol to sign your routes, Cloudflare can help with this as well. Contact your account team if you are interested in using RPKI.
+:::caution[RPKI validation]
+You are not required to use Resource Public Key Infrastructure (RPKI). However, if you do, make sure your ROAs are accurate. You can use [Cloudflare's RPKI Portal](https://rpki.cloudflare.com/?view=validator) and a second source such as [Routinator](https://rpki-validator.ripe.net/ui/) to double check your prefixes.
+:::
After onboarding, [Border Gateway Protocol (BGP)](https://www.cloudflare.com/learning/security/glossary/what-is-bgp/) announcements for customer prefixes can be controlled with the [Dynamic Advertisement](/byoip/concepts/dynamic-advertisement/) API or via the Cloudflare dashboard.
@@ -47,5 +48,5 @@ To protect your network using a Cloudflare IP address, contact your account mana
:::note
-When you use a Cloudflare-managed IP space, you do not need to provide a Letter of Agency (LOA) and advertise your prefixes that are associated with bringing your own IP.
+When you use a Cloudflare-managed IP space, you do not need to provide a Letter of Agency (LOA) and advertise your prefixes that are associated with bringing your own IP.
:::
diff --git a/src/content/docs/byoip/index.mdx b/src/content/docs/byoip/index.mdx
index deba21519c58b6..48c1eeeadf7fd8 100644
--- a/src/content/docs/byoip/index.mdx
+++ b/src/content/docs/byoip/index.mdx
@@ -12,11 +12,6 @@ import { LinkButton, Plan } from "~/components";
-With **Bringing Your Own IPs** (BYOIP), Cloudflare announces your IPs in all our locations. Use your IPs with Magic Transit, Spectrum, CDN services, or Gateway DNS.
+With **Bringing Your Own IPs** (BYOIP), Cloudflare announces your IPs in all our locations. Use your IPs with [Magic Transit](/magic-transit/), [Spectrum](/spectrum/), [CDN services](/cache/), or [Gateway DNS](/cloudflare-one/policies/gateway/dns-policies/).
-BYOIP is compatible with [Magic Transit](/magic-transit/), [Spectrum](/spectrum/), [CDN services](/cache/), and [Gateway DNS](/cloudflare-one/policies/gateway/dns-policies/).
-
-{" "}
-
- Get started
-
+Learn how to [get started](/byoip/get-started/).
diff --git a/src/content/docs/cache/concepts/revalidation.mdx b/src/content/docs/cache/concepts/revalidation.mdx
index e59f12a7bba79b..2ad5b0591a9461 100644
--- a/src/content/docs/cache/concepts/revalidation.mdx
+++ b/src/content/docs/cache/concepts/revalidation.mdx
@@ -6,9 +6,11 @@ pcx_content_type: concept
import { GlossaryTooltip } from "~/components"
-Revalidation is a caching mechanism that involves checking the [freshness](/cache/concepts/retention-vs-freshness/) of cached data before serving it to a client or user. If a cached object is no longer [fresh](/cache/concepts/retention-vs-freshness/#freshness-ttl) and Cloudflare receives a request for it, a request is made to the origin to revalidate the object in the Cloudflare cache.
+Revalidation is a caching mechanism that checks the [freshness](/cache/concepts/retention-vs-freshness/#freshness-ttl) of cached data before serving it to users. If a cached object is no longer fresh and Cloudflare receives a request for it, the system makes a request to the origin to revalidate the object in the Cloudflare cache. By using headers like `If-Modified-Since` and `ETag`, Cloudflare validates content without fully re-fetching it. When these headers are missing, Smart Edge Revalidation generates a `Last-Modified` header, ensuring efficient updates and delivery of fresh content while reducing origin traffic.
-## Example scenarios
+## Revalidation towards origin
+
+For stale (expired TTL) content, Cloudflare will send a revalidation request to the origin. If the stale content is still valid Cloudflare will set a new TTL. If the content is expired, then the origin will provide new fresh content to replace the old.
Consider the following example scenarios.
@@ -19,3 +21,7 @@ One-thousand (1,000) requests arrive simultaneously at Cloudflare's network, and
### Example 2
One-thousand (1,000) requests arrive simultaneously at a single Cloudflare data center, and the requested asset is not in Cloudflare's cache (a cache miss). These requests will use a cache lock to communicate with your origin. This means that only the first request will go to origin to fetch the asset. The remaining 999 requests wait for the first request to fetch the data, after which the response is [streamed](https://blog.cloudflare.com/introducing-concurrent-streaming-acceleration/) to all the waiting requests. The cache lock ensures that Cloudflare only sends the origin one request at a time for a given asset from a location in Cloudflare's network, preventing the origin from getting too much traffic.
+
+## Smart revalidation towards users
+
+When both [`Last-Modified`](https://datatracker.ietf.org/doc/html/rfc7232?cf_history_state=%7B%22guid%22%3A%22C255D9FF78CD46CDA4F76812EA68C350%22%2C%22historyId%22%3A15%2C%22targetId%22%3A%226C8153BAEF7BC0C5A331E28F8BCF1ABA%22%7D#section-2.2) and [`Etag`](https://datatracker.ietf.org/doc/html/rfc7232?cf_history_state=%7B%22guid%22%3A%22C255D9FF78CD46CDA4F76812EA68C350%22%2C%22historyId%22%3A13%2C%22targetId%22%3A%226C8153BAEF7BC0C5A331E28F8BCF1ABA%22%7D#section-2.3) headers are absent from the origin server response, Smart Edge Revalidation will use the time the object was cached on Cloudflare's global network as the `Last-Modified` header value. When a browser sends a revalidation request to Cloudflare using `If-Modified-Since` or `If-None-Match`, our global network can answer those revalidation questions using the `Last-Modified` header generated from Smart Edge Revalidation. In this way, our global network can ensure efficient revalidation even if the headers are not sent from the origin.
\ No newline at end of file
diff --git a/src/content/docs/cache/how-to/cache-keys.mdx b/src/content/docs/cache/how-to/cache-keys.mdx
index d8c13121cbceb6..b9114660b5e201 100644
--- a/src/content/docs/cache/how-to/cache-keys.mdx
+++ b/src/content/docs/cache/how-to/cache-keys.mdx
@@ -119,8 +119,8 @@ Additionally, you cannot include the following headers:
Host determines which host header to include in the Cache Key.
-* If `resolved: false`, Cloudflare includes the `Host` header in the HTTP request sent to the origin.
-* If `resolved: true`, Cloudflare includes the `Host` header that was resolved to get the `origin IP` for the request. In this scenario, the `Host` header may be different from the header actually sent if the [Cloudflare Resolve Override](/rules/page-rules/how-to/override-url-or-ip-address/) feature is used.
+* If `Use original host` (`resolved: false` in the API), Cloudflare includes the `Host` header in the HTTP request sent to the origin.
+* If `Resolved host` (`resolved: true` in the API), Cloudflare includes the `Host` header that was resolved to get the `origin IP` for the request. The `Host` header may be different from the header actually sent if the [Cloudflare Resolve Override](/rules/page-rules/how-to/override-url-or-ip-address/) feature is used.
### Cookie
diff --git a/src/content/docs/cache/how-to/cache-rules/examples/custom-cache-key.mdx b/src/content/docs/cache/how-to/cache-rules/examples/custom-cache-key.mdx
index 748e3d400b267b..103f8469d5ae39 100644
--- a/src/content/docs/cache/how-to/cache-rules/examples/custom-cache-key.mdx
+++ b/src/content/docs/cache/how-to/cache-rules/examples/custom-cache-key.mdx
@@ -27,3 +27,5 @@ import { Example, Render } from "~/components"
* **Query string**: All query string parameters
+
+Refer to [cache keys](/cache/how-to/cache-keys/) for more information on possible settings when configuring a custom cache key.
diff --git a/src/content/docs/calls/turn/faq.mdx b/src/content/docs/calls/turn/faq.mdx
index 9228b2f110c274..545d0a25e8f97c 100644
--- a/src/content/docs/calls/turn/faq.mdx
+++ b/src/content/docs/calls/turn/faq.mdx
@@ -3,16 +3,12 @@ pcx_content_type: get-started
title: FAQ
sidebar:
order: 20
-
---
-
## General
### What is Cloudflare Calls TURN pricing? How exactly is it calculated?
-
-
Cloudflare TURN pricing is based on the data sent from the Cloudflare edge to the TURN client, as described in [RFC 8656 Figure 1](https://datatracker.ietf.org/doc/html/rfc8656#fig-turn-model). This means data sent from the TURN server to the TURN client and captures all data, including TURN overhead, following successful authentication.
Pricing for Cloudflare Calls Serverless SFU and TURN services is $0.05 per GB of data used.
@@ -27,211 +23,109 @@ Traffic between Cloudflare Calls TURN and Cloudflare Calls SFU or Cloudflare Str
-
### Is Calls TURN HIPAA/GDPR/FedRAMP compliant?
-
-
-
-
Please view Cloudflare's [certifications and compliance resources](https://www.cloudflare.com/trust-hub/compliance-resources/) and contact your Cloudflare enterprise account manager for more information.
### Is Calls TURN end-to-end encrypted?
-
-
-
-
-
-
TURN protocol, [RFC 8656](https://datatracker.ietf.org/doc/html/rfc8656), does not discuss encryption beyond wrapper protocols such as TURN over TLS. If you are using TURN with WebRTC will encrypt data at the WebRTC level.
-
-
### What regions does Cloudflare Calls TURN operate at?
-
-
-
-
Cloudflare Calls TURN server runs on [Cloudflare's global network](https://www.cloudflare.com/network) - a growing global network of thousands of machines distributed across hundreds of locations, with the notable exception of the Cloudflare's [China Network](/china-network/).
-
-
### Does Cloudflare Calls TURN use the Cloudflare Backbone or is there any "magic" Cloudflare do to speed connection up?
-
-
-
-
Cloudflare Calls TURN allocations are homed in the nearest available Cloudflare data center to the TURN client via anycast routing. If both ends of a connection are using Cloudflare Calls TURN, Cloudflare will be able to control the routing and, if possible, route TURN packets through the Cloudflare backbone.
-
-
### What is the difference between Cloudflare Calls TURN with a enterprise plan vs self-serve (pay with your credit card) plans?
-
-
-
-
There is no performance or feature level difference for Cloudflare Calls TURN service in enterprise or self-serve plans, however those on [enterprise plans](https://www.cloudflare.com/enterprise/) will get the benefit of priority support, predictable flat-rate pricing and SLA guarantees.
-
### Does Cloudflare Calls TURN run in the Cloudflare China Network?
-
-
-
-
-
Cloudflare's [China Network](/china-network/) does not participate in serving Calls traffic and TURN traffic from China will connect to Cloudflare locations outside of China.
-
-
-
### How long does it take for TURN activity to be available in analytics?
-
-
-
TURN usage shows up in analytics in 30 seconds.
-
## Technical
### I need to allowlist (whitelist) Cloudflare Calls TURN IP addresses. Which IP addresses should I use?
-
-
-
-
-
Cloudflare Calls TURN is easy to use by IT administrators who have strict firewalls because it requires very few IP addresses to be allowlisted compared to other providers. You must allowlist both IPv6 and IPv4 addresses.
Please allowlist the following IP addresses:
-* `2a06:98c1:3200::1/128`
-* `2606:4700:48::1/128`
-* `141.101.90.1/32`
-* `162.159.207.1/32`
+- `2a06:98c1:3200::1/128`
+- `2606:4700:48::1/128`
+- `141.101.90.1/32`
+- `162.159.207.1/32`
:::caution[Watch for IP changes]
-
Cloudflare tries to, but cannot guarantee that the IP addresses used for the TURN service won't change. If you are allowlisting IP addresses and do not have a enterprise contract, you must set up alerting that detects changes the DNS response from `turn.cloudflare.com` (A and AAAA records) and update the hardcoded IP address(es) accordingly within 14 days of the DNS change.
For more details about static IPs, guarantees and other arrangements please discuss with your enterprise account team.
Your enterprise team will be able to provide additional addresses to allowlist as future backup to achieve address diversity while still keeping a short list of IPs.
-
:::
-
### I would like to hardcode IP addresses used for TURN in my application to save a DNS lookup
-
-
-
-
-
Although this is not recommended, we understand there is a very small set of circumstances where hardcoding IP addresses might be useful. In this case, you must set up alerting that detects changes the DNS response from `turn.cloudflare.com` (A and AAAA records) and update the hardcoded IP address(es) accordingly within 14 days of the DNS change. Note that this DNS response could return more than one IP address. In addition, you must set up a failover to a DNS query if there is a problem connecting to the hardcoded IP address. Cloudflare tries to, but cannot guarantee that the IP address used for the TURN service won't change unless this is in your enterprise contract. For more details about static IPs, guarantees and other arrangements please discuss with your enterprise account team.
+### I see that TURN IP are published above. Do you also publish IPs for STUN?
+TURN service at `turn.cloudflare.com` will also respond to binding requests ("STUN requests").
### Does Cloudflare Calls TURN support the expired IETF RFC draft "draft-uberti-behave-turn-rest-00"?
-
-
-
-
The Cloudflare Calls credential generation function returns a JSON structure similar to the [expired RFC draft "draft-uberti-behave-turn-rest-00"](https://datatracker.ietf.org/doc/html/draft-uberti-behave-turn-rest-00), but it does not include the TTL value. If you need a response in this format, you can modify the JSON from the Cloudflare Calls credential generation endpoint to the required format in your backend server or Cloudflare Workers.
-
-
### I am observing packet loss when using Cloudflare Calls TURN - how can I debug this?
-
-
-
-
Packet loss is normal in UDP and can happen occasionally even on reliable connections. However, if you observe systematic packet loss, consider the following:
-* Are you sending or receiving data at a high rate (>50-100Mbps) from a single TURN client? Calls TURN might be dropping packets to signal you to slow down.
-* Are you sending or receiving large amounts of data with very small packet sizes (high packet rate > 5-10kpps) from a single TURN client? Cloudflare Calls might be dropping packets.
-* Are you sending packets to new unique addresses at a high rate resembling to [port scanning](https://en.wikipedia.org/wiki/Port_scanner) behavior?
-
-
+- Are you sending or receiving data at a high rate (>50-100Mbps) from a single TURN client? Calls TURN might be dropping packets to signal you to slow down.
+- Are you sending or receiving large amounts of data with very small packet sizes (high packet rate > 5-10kpps) from a single TURN client? Cloudflare Calls might be dropping packets.
+- Are you sending packets to new unique addresses at a high rate resembling to [port scanning](https://en.wikipedia.org/wiki/Port_scanner) behavior?
### I plan to use Calls TURN at scale. What is the rate at which I can issue credentials?
-
-
-
-
There is no defined limit for credential issuance. Start at 500 credentials/sec and scale up linearly. Ensure you use more than 50% of the issued credentials.
-
### What is the maximum value I can use for TURN credential expiry time?
-
You can set a expiration time for a credential up to 48 hours in the future. If you need your TURN allocation to last longer than this, you will need to [update](https://developer.mozilla.org/en-US/docs/Web/API/RTCPeerConnection/setConfiguration) the TURN credentials.
-
-
### Does Calls TURN support IPv6?
-
-
-
Yes. Cloudflare Calls is available over both IPv4 and IPv6 for TURN Client to TURN server communication, however it does not issue relay addresses in IPv6 as described in [RFC 6156](https://datatracker.ietf.org/doc/html/rfc6156).
-
-
### Does Calls TURN issue IPv6 relay addresses?
-
-
-
-
No. Calls TURN will not respect `REQUESTED-ADDRESS-FAMILY` STUN attribute if specified and will issue IPv4 addresses only.
-
### Does Calls TURN support TCP relaying?
-
-
-
-
-
No. Calls does not implement [RFC6062](https://datatracker.ietf.org/doc/html/rfc6062) and will not respect `REQUESTED-TRANSPORT` STUN attribute.
-
-
-
### I am unable to make CreatePermission or ChannelBind requests with certain IP addresses. Why is that?
-
-
-
Cloudflare Calls denies CreatePermission or ChannelBind requests if private IP ranges (e.g loopback addresses, linklocal unicast or multicast blocks) or IP addresses that are part of [BYOIP](/byoip/) are used.
If you are a Cloudflare BYOIP customer and wish to connect to your BYOIP ranges with Calls TURN, please reach out to your account manager for further details.
-
-
### When I send packets to relayed address without using TURN, the packets don't arrive
-
-
-
-
Cloudflare Calls denies CreatePermission or ChannelBind requests if private IP ranges (e.g loopback addresses, linklocal unicast or multicast blocks) or IP addresses that are part of [BYOIP](/byoip/) are used.
If you are a Cloudflare BYOIP customer and wish to connect to your BYOIP ranges with Calls TURN, please reach out to your account manager for further details.
-
-
### What will happen if TURN credentials expire while the TURN allocation is in use?
-Cloudflare Calls will immediately stop billing and recording usage for analytics. After a short delay, the connection will be disconnected.
\ No newline at end of file
+Cloudflare Calls will immediately stop billing and recording usage for analytics. After a short delay, the connection will be disconnected.
diff --git a/src/content/docs/cloudflare-for-platforms/workers-for-platforms/configuration/outbound-workers.mdx b/src/content/docs/cloudflare-for-platforms/workers-for-platforms/configuration/outbound-workers.mdx
index 7eec498aa71076..d3d86e065bef2b 100644
--- a/src/content/docs/cloudflare-for-platforms/workers-for-platforms/configuration/outbound-workers.mdx
+++ b/src/content/docs/cloudflare-for-platforms/workers-for-platforms/configuration/outbound-workers.mdx
@@ -4,7 +4,9 @@ title: Outbound Workers
---
-Outbound Workers sit between your customer’s Workers and the public Internet. They give you visibility into all outgoing `fetch()` requests from user Workers.
+import { WranglerConfig } from "~/components";
+
+Outbound Workers sit between your customer's Workers and the public Internet. They give you visibility into all outgoing `fetch()` requests from user Workers.

@@ -25,8 +27,6 @@ To use Outbound Workers:
Make sure that you have `wrangler@3.3.0` or later [installed](/workers/wrangler/install-and-update/).
-import { WranglerConfig } from "~/components";
-
```toml
diff --git a/src/content/docs/cloudflare-for-platforms/workers-for-platforms/get-started/configuration.mdx b/src/content/docs/cloudflare-for-platforms/workers-for-platforms/get-started/configuration.mdx
index cfd397c1a1ccbc..7750a40cacd968 100644
--- a/src/content/docs/cloudflare-for-platforms/workers-for-platforms/get-started/configuration.mdx
+++ b/src/content/docs/cloudflare-for-platforms/workers-for-platforms/get-started/configuration.mdx
@@ -5,7 +5,7 @@ sidebar:
order: 1
---
-import { Render, PackageManagers } from "~/components";
+import { Render, PackageManagers, WranglerConfig } from "~/components";
## Prerequisites:
@@ -113,9 +113,7 @@ Change to your project's directory:
cd my-dispatcher
```
-Open the `wrangler.toml` file in your project directory, and add the dispatch namespace binding:
-
-import { WranglerConfig } from "~/components";
+Open the Wrangler file in your project directory, and add the dispatch namespace binding:
diff --git a/src/content/docs/cloudflare-for-platforms/workers-for-platforms/get-started/developing-with-wrangler.mdx b/src/content/docs/cloudflare-for-platforms/workers-for-platforms/get-started/developing-with-wrangler.mdx
index 76665dbd288b85..5b886dfa8728cc 100644
--- a/src/content/docs/cloudflare-for-platforms/workers-for-platforms/get-started/developing-with-wrangler.mdx
+++ b/src/content/docs/cloudflare-for-platforms/workers-for-platforms/get-started/developing-with-wrangler.mdx
@@ -5,7 +5,7 @@ sidebar:
order: 3
---
-import { Render, PackageManagers } from "~/components";
+import { Render, PackageManagers, WranglerConfig } from "~/components";
To test your [Dispatch Worker](/cloudflare-for-platforms/workers-for-platforms/reference/how-workers-for-platforms-works/#dynamic-dispatch-worker), [user Worker](/cloudflare-for-platforms/workers-for-platforms/reference/how-workers-for-platforms-works/#user-workers) and [Outbound Worker](/cloudflare-for-platforms/workers-for-platforms/configuration/outbound-workers/) before deploying to production, you can use [Wrangler](/workers/wrangler) for development and testing.
@@ -52,9 +52,7 @@ export default {
};
```
-Update the `wrangler.toml` file for customer-worker-1 and add the dispatch namespace:
-
-import { WranglerConfig } from "~/components";
+Update the Wrangler file for customer-worker-1 and add the dispatch namespace:
@@ -111,7 +109,7 @@ export default {
};
```
-Update the `wrangler.toml` file for dispatch-worker and add the dispatch namespace binding:
+Update the Wrangler file for dispatch-worker and add the dispatch namespace binding:
diff --git a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/adobe-sign-saas.mdx b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/adobe-sign-saas.mdx
index a9bc1d27d64562..069e4a022cd5e6 100644
--- a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/adobe-sign-saas.mdx
+++ b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/adobe-sign-saas.mdx
@@ -44,9 +44,8 @@ This guide covers how to configure [Adobe Acrobat Sign](https://helpx.adobe.com/
* **Entity ID**: Entity ID/SAML Audience from Adobe Acrobat Sign SAML SSO configuration.
* **Assertion Consumer Service URL**: Assertion Consumer URL from Adobe Acrobat Sign SAML SSO configuration.
* **Name ID format**: *Email*
-2. Select **Save configuration**.
-3. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
-4. Select **Done**.
+2. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
+3. Save the application.
## 4. Test the integration and finalize configuration
diff --git a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/area-1.mdx b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/area-1.mdx
index 9eaf8545005737..c1bc57ec8483f3 100644
--- a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/area-1.mdx
+++ b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/area-1.mdx
@@ -32,21 +32,11 @@ sidebar:
| **Assertion Consumer Service URL** | `https://horizon.area1security.com/api/users/saml` |
| **Name ID Format** | *Email* |
-6. (Optional) Configure [App Launcher settings](/cloudflare-one/applications/app-launcher/) for the application.
+6. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
-7. Choose the **Identity providers** you want to enable for your application.
+7. Save the application.
-8. Turn on **Instant Auth** if you are selecting only one login method for your application, and would like your end users to skip the identity provider selection step.
-
-9. Select **Next**.
-
-## 2. Add an Access policy
-
-1. To control who can access your application, [create an Access policy](/cloudflare-one/policies/access/).
-
-2. Select **Next**.
-
-## 3. Configure SSO for Area 1
+## 2. Configure SSO for Area 1
Finally, you will need to configure Area 1 to allow users to log in through Cloudflare Access.
@@ -74,6 +64,4 @@ Finally, you will need to configure Area 1 to allow users to log in through Clou
7. Select **Update Settings**.
-8. In Zero Trust, select **Done**.
-
-Your application will appear on the **Applications** page. If you added the application to your App Launcher, you can test the integration by going to `.cloudflareaccess.com`.
+If you added the application to your App Launcher, you can test the integration by going to `.cloudflareaccess.com`.
diff --git a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/asana-saas.mdx b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/asana-saas.mdx
index aeebf64cf0a0e9..9a57671a43568c 100644
--- a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/asana-saas.mdx
+++ b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/asana-saas.mdx
@@ -26,9 +26,8 @@ This guide covers how to configure [Asana](https://help.asana.com/hc/en-us/artic
* **Assertion Consumer Service URL**: `https://app.asana.com/-/saml/consume`
* **Name ID format**: *Email*
7. Copy the **SSO endpoint** and **Public key**.
-8. Select **Save configuration**.
-9. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
-10. Select **Done**.
+8. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
+9. Save the application.
## 2. Add a SAML SSO provider to Asana
diff --git a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/atlassian-saas.mdx b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/atlassian-saas.mdx
index 9d148d90c5e0c4..474bca09a18b6b 100644
--- a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/atlassian-saas.mdx
+++ b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/atlassian-saas.mdx
@@ -24,7 +24,7 @@ This guide covers how to configure [Atlassian Cloud](https://support.atlassian.c
4. For the authentication protocol, select **SAML**.
5. Select **Add application**.
6. Copy the **Access Entity ID or Issuer**, **Public key**, and **SSO endpoint**.
-7. Keep this window open without selecting **Select configuration**. You will finish this configuration in step [4. Finish adding a SaaS application to Cloudflare Zero Trust](#4-finish-adding-a-saas-application-to-cloudflare-zero-trust).
+7. Keep this window open. You will finish this configuration in step [4. Finish adding a SaaS application to Cloudflare Zero Trust](#4-finish-adding-a-saas-application-to-cloudflare-zero-trust).
## 2. Create a x.509 certificate
@@ -38,13 +38,9 @@ This guide covers how to configure [Atlassian Cloud](https://support.atlassian.c
3. For **Directory name**, enter your desired name. For example, you could enter `Cloudflare Access`.
4. Select **Add** > **Set up SAML single sign-on** > **Next**.
-:::note
-
-
-This screen will advise you to create an authentication policy before proceeding. You will do this in step [5. Create an application policy to test integration](#5-create-an-authentication-policy-to-test-integration).
-
-
-:::
+ :::note
+ This screen will advise you to create an authentication policy before proceeding. You will do this in step [5. Create an application policy to test integration](#5-create-an-authentication-policy-to-test-integration).
+ :::
5. Fill in the following fields:
* **Identity provider Entity ID**: Access Entity ID or Issuer from application configuration in Cloudflare Zero Trust.
@@ -60,11 +56,10 @@ This screen will advise you to create an authentication policy before proceeding
1. In your open Zero Trust window, fill in the following fields:
* **Entity ID**: Service provider entity URL from Atlassian Cloud SAML SSO set-up.
- * **Assertion Consumer Service URL**: Service provider assertion comsumer service URL from Atlassian Cloud SAML SSO set-up.
+ * **Assertion Consumer Service URL**: Service provider assertion consumer service URL from Atlassian Cloud SAML SSO set-up.
* **Name ID format**: *Email*
-2. Select **Save configuration**.
-3. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
-4. Select **Done**.
+2. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
+3. Save the application.
## 5. Create an authentication policy to test integration
diff --git a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/aws-sso-saas.mdx b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/aws-sso-saas.mdx
index f988b586729eb1..7fcc3506ce72ad 100644
--- a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/aws-sso-saas.mdx
+++ b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/aws-sso-saas.mdx
@@ -40,9 +40,8 @@ Next, we will obtain **Identity provider metadata** from Zero Trust.
1. Copy the **SAML Metadata endpoint**.
2. In a separate browser window, go to the SAML Metadata endpoint (`https://.cloudflareaccess.com/cdn-cgi/access/sso/saml/xxx/saml-metadata`).
3. Save the page as `access_saml_metadata.xml`.
-9. Save your SaaS application configuration.
-10. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
-11. Select **Done**.
+9. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
+10. Save the application.
## 3. Complete AWS configuration
@@ -60,7 +59,7 @@ Access for SaaS does not currently support [SCIM provisioning](/cloudflare-one/i
1. Users are created in both your identity provider and AWS.
2. Users have matching usernames in your identity provider and AWS.
-3. Usernames are email addresses. This is the only format AWS supports with third-party SSO providers.
+3. Usernames are email addresses. This is the only format AWS supports with third-party SSO providers.
:::
## 4. Test the integration
diff --git a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/braintree-saas.mdx b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/braintree-saas.mdx
index 865b76f7000c5c..7e7c6f78c3f590 100644
--- a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/braintree-saas.mdx
+++ b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/braintree-saas.mdx
@@ -26,9 +26,8 @@ This guide covers how to configure [Braintree](https://developer.paypal.com/brai
* **Assertion Consumer Service URL**: `https://www.placeholder.com`
* **Name ID format**: *Email*
7. Copy the **SSO endpoint** and **Public key**.
-8. Select **Save configuration**.
-9. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
-10. Select **Done**.
+8. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
+9. Save the application.
## 2. Enable SSO Configuration in Braintree
diff --git a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/coupa-saas.mdx b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/coupa-saas.mdx
index 371a1c310c04c2..9f8386faf11b65 100644
--- a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/coupa-saas.mdx
+++ b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/coupa-saas.mdx
@@ -28,9 +28,8 @@ This guide covers how to configure [Coupa](https://compass.coupa.com/en-us/produ
* **Name ID format**: *Email*
7. Copy the **Access Entity ID or Issuer** and **SAML Metadata Endpoint**.
8. In **Default relay state**, enter `https://.coupahost.com/sessions/saml_post`.
-9. Select **Save configuration**.
-10. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
-11. Select **Done**.
+9. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
+10. Save the application.
## 2. Download the metadata file
diff --git a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/digicert-saas.mdx b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/digicert-saas.mdx
index d5986d5bef4214..92ccbb9c8d2567 100644
--- a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/digicert-saas.mdx
+++ b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/digicert-saas.mdx
@@ -27,9 +27,8 @@ This guide covers how to configure [Digicert](https://docs.digicert.com/en/certc
* **Assertion Consumer Service URL**: `https://www.digicert.com/account/sso/`
* **Name ID format**: *Email*
7. Copy the **SAML Metadata endpoint**.
-8. Select **Save configuration**.
-9. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
-10. Select **Done**.
+8. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
+9. Save the application.
## 2. Add a SAML SSO provider in Digicert
diff --git a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/docusign-access.mdx b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/docusign-access.mdx
index ec871675a13d0d..0bab2519bf5390 100644
--- a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/docusign-access.mdx
+++ b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/docusign-access.mdx
@@ -45,20 +45,17 @@ This guide covers how to configure [Docusign](https://support.docusign.com/s/doc
7. Set an Access policy (for example, create a policy based on _Emails ending in @example.com_).
-8. Copy and save SSO Endpoint, Entity ID and Public Key.
+8. Copy and save the **SSO Endpoint**, **Entity ID** and **Public Key**.
- :::note
+9. Transform the **Public Key** into a fingerprint:
- The Public key must be transformed into a fingerprint. To do that:
+ 1. Copy the **Public Key** Value.
-9. Copy the Public Key Value.
+ 2. Paste the **Public Key** into VIM or another code editor.
-10. Paste the Public Key into VIM or another code editor.
+ 3. Wrap the value in `-----BEGIN CERTIFICATE-----` and `-----END CERTIFICATE-----`.
-11. Wrap the value in `-----BEGIN CERTIFICATE-----` and `-----END CERTIFICATE-----`.
-
-12. Set the file extension to `.crt` and save.
- :::
+ 4. Set the file extension to `.crt` and save.
## 2. Configure your DocuSign SSO instance
diff --git a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/dropbox-saas.mdx b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/dropbox-saas.mdx
index 0e86f0db764ff2..209fbcc30cf62b 100644
--- a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/dropbox-saas.mdx
+++ b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/dropbox-saas.mdx
@@ -26,9 +26,8 @@ This guide covers how to configure [Dropbox](https://help.dropbox.com/security/s
* **Assertion Consumer Service URL**: `https://www.dropbox.com/saml_login`
* **Name ID format**: *Email*
7. Copy the **SSO endpoint** and **Public key**.
-8. Select **Save configuration**.
-9. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
-10. Select **Done**.
+8. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
+9. Save the application.
## 2. Create a certificate file
diff --git a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/generic-oidc-saas.mdx b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/generic-oidc-saas.mdx
index 26ab8a83e176ca..947d924f17f740 100644
--- a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/generic-oidc-saas.mdx
+++ b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/generic-oidc-saas.mdx
@@ -62,24 +62,22 @@ Some SaaS applications provide the Redirect URL after you [configure the SSO pro
| Key endpoint | Returns the current public keys used to [verify the Access JWT](/cloudflare-one/identity/authorization-cookie/validating-json/) `https://.cloudflareaccess.com/cdn-cgi/access/sso/oidc//jwks` |
| User info endpoint | Returns all user claims in JSON format `https://.cloudflareaccess.com/cdn-cgi/access/sso/oidc//userinfo` |
-11. (Optional) Configure [App Launcher settings](/cloudflare-one/applications/app-launcher/) by turning on **Enable App in App Launcher** and, in **App Launcher URL**, entering the URL that users should be sent to when they select the tile.
+11. Add [Access policies](/cloudflare-one/policies/access/) to control who can connect to your application. All Access applications are deny by default -- a user must match an Allow policy before they are granted access.
-12.
+12.
-13.
+13. Select **Next**.
-14. Select **Save configuration**.
+14. (Optional) Configure [App Launcher settings](/cloudflare-one/applications/app-launcher/) for the application.
-## 3. Add an Access policy
+15.
-1. To control who can access the SaaS application, [create an Access policy](/cloudflare-one/policies/access/).
+16. Select **Save application**.
-2. Select **Done**.
-
-## 4. Configure SSO in your SaaS application
+## 3. Configure SSO in your SaaS application
Next, configure your SaaS application to require users to log in through Cloudflare Access. Refer to your SaaS application documentation for instructions on how to configure a third-party OIDC SSO provider.
-## 5. Test the integration
+## 4. Test the integration
Open an incognito browser window and go to the SaaS application's login URL. You will be redirected to the Cloudflare Access login screen and prompted to sign in with your identity provider.
diff --git a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/generic-saml-saas.mdx b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/generic-saml-saas.mdx
index 446dfec843ade6..5a207f5178194f 100644
--- a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/generic-saml-saas.mdx
+++ b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/generic-saml-saas.mdx
@@ -48,19 +48,17 @@ Obtain the following URLs from your SaaS application account:
If you are using Okta, Microsoft Entra ID (formerly Azure AD), Google Workspace, or GitHub as your IdP, Access will automatically send a SAML attribute titled `groups` with all of the user's associated groups as attribute values.
:::
-11. (Optional) Configure [App Launcher settings](/cloudflare-one/applications/app-launcher/) for the application.
+11. Add [Access policies](/cloudflare-one/policies/access/) to control who can connect to your application. All Access applications are deny by default -- a user must match an Allow policy before they are granted access.
-12.
+12.
-13.
+13. Select **Next**.
-14. Select **Save configuration**.
+14. (Optional) Configure [App Launcher settings](/cloudflare-one/applications/app-launcher/) for the application.
-## 2. Add an Access policy
+15.
-1. To control who can access the SaaS application, [create an Access policy](/cloudflare-one/policies/access/).
-
-2. Select **Done**.
+16. Select **Save application**.
## 3. Configure SSO in your SaaS application
diff --git a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/github-saas.mdx b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/github-saas.mdx
index dd8ca0ea731c72..18a5682af1ae73 100644
--- a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/github-saas.mdx
+++ b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/github-saas.mdx
@@ -27,9 +27,8 @@ This guide covers how to configure [GitHub Enterprise Cloud](https://docs.github
* **Assertion Consumer Service URL**: `https://github.com/orgs//saml/consume`
* **Name ID format**: *Email*
7. Copy the **SSO endpoint**, **Access Entity ID or Issuer**, and **Public key**.
-8. Select **Save configuration**.
-9. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
-10. Select **Done**.
+8. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
+9. Save the application.
## 2. Create a x.509 certificate
diff --git a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/google-cloud-saas.mdx b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/google-cloud-saas.mdx
index f3d0505feb2c46..9ae073c28f7242 100644
--- a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/google-cloud-saas.mdx
+++ b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/google-cloud-saas.mdx
@@ -19,7 +19,7 @@ When configuring Google Cloud with Access, the following limitations apply:
- The integration of Access as a single sign-on provider for your Google Cloud account does not work for Google super admins. It will work for other users.
:::
-## Prerequistes
+## Prerequisites
- An [identity provider](/cloudflare-one/identity/idp-integration/) configured in Cloudflare Zero Trust
- Admin access to a Google Workspace account
@@ -37,9 +37,8 @@ When configuring Google Cloud with Access, the following limitations apply:
- **Assertion Consumer Service URL**: `https://www.google.com/a//acs`
- **Name ID format**: _Email_
7. Copy the **SSO endpoint**, **Access Entity ID or Issuer**, and **Public key**.
-8. Select **Save configuration**.
-9. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
-10. Select **Done**.
+8. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
+9. Save the application.
## 2. Create a x.509 certificate
diff --git a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/google-workspace-saas.mdx b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/google-workspace-saas.mdx
index b25f2a480d568e..9268be6fd8a860 100644
--- a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/google-workspace-saas.mdx
+++ b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/google-workspace-saas.mdx
@@ -16,7 +16,7 @@ The integration of Access as a single sign-on provider for your Google Workspace
:::
-## Prerequistes
+## Prerequisites
- An [identity provider](/cloudflare-one/identity/idp-integration/) configured in Cloudflare Zero Trust
- Admin access to a Google Workspace account
@@ -38,9 +38,11 @@ The integration of Access as a single sign-on provider for your Google Workspace
When you put your Google Workspace behind Access, users will not be able to log in using [Google](/cloudflare-one/identity/idp-integration/google/) or [Google Workspace](/cloudflare-one/identity/idp-integration/gsuite/) as an identity provider.
:::
-4. On the next page, [create an Access policy](/cloudflare-one/policies/access/) for your application. For example, you could allow users with an `@your_domain.com` email address.
+4. [Create an Access policy](/cloudflare-one/policies/access/) for your application. For example, you could allow users with an `@your_domain.com` email address.
-5. On the next page, you will see your **SSO endpoint**, **Access Entity ID or Issuer**, and **Public key**. These values will be used to configure Google Workspace.
+5. Copy the **SSO endpoint**, **Access Entity ID or Issuer**, and **Public key**. These values will be used to configure Google Workspace.
+
+6. Save the application.
## 2. Create a certificate from your public key
diff --git a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/grafana-cloud-saas-oidc.mdx b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/grafana-cloud-saas-oidc.mdx
index 11ef1b207a94e4..2fc1b65b3e195e 100644
--- a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/grafana-cloud-saas-oidc.mdx
+++ b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/grafana-cloud-saas-oidc.mdx
@@ -25,10 +25,9 @@ This guide covers how to configure [Grafana Cloud](https://grafana.com/docs/graf
7. In **Redirect URLs**, enter `https:///login/generic_oauth`.
8. (Optional) Enable [Proof of Key Exchange (PKCE)](https://www.oauth.com/oauth2-servers/pkce/) if the protocol is supported by your IdP. PKCE will be performed on all login attempts.
9. Copy the **Client secret**, **Client ID**, **Token endpoint**, and **Authorization endpoint**.
-10. Select **Save configuration**.
-11. (Optional) configure [App Launcher settings](/cloudflare-one/applications/app-launcher/) by turning on **Enable App in App Launcher** and, in **App Launcher URL**, entering `https:///login`.
-12. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
-13. Select **Done**.
+10. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
+11. (Optional) In **Experience settings**, configure [App Launcher settings](/cloudflare-one/applications/app-launcher/) by turning on **Enable App in App Launcher** and, in **App Launcher URL**, entering `https:///login`.
+12. Save the application.
## 2. Add a SSO provider to Grafana Cloud
diff --git a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/grafana-saas-oidc.mdx b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/grafana-saas-oidc.mdx
index 89cf2235b94f7a..48b3e8ba1e387e 100644
--- a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/grafana-saas-oidc.mdx
+++ b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/grafana-saas-oidc.mdx
@@ -15,11 +15,7 @@ This guide covers how to configure [Grafana](https://grafana.com/docs/grafana/la
* Admin access to a Grafana account
:::note
-
-
You can also configure OIDC SSO for Grafana using a [configuration file](https://grafana.com/docs/grafana/latest/setup-grafana/configure-security/configure-authentication/generic-oauth/#configure-generic-oauth-authentication-client-using-the-grafana-configuration-file) instead of using Grafana's user interface (UI), as documented in this guide.
-
-
:::
## 1. Add a SaaS application to Cloudflare Zero Trust
@@ -33,10 +29,9 @@ You can also configure OIDC SSO for Grafana using a [configuration file](https:/
7. In **Redirect URLs**, enter `https:///login/generic_oauth`.
8. (Optional) Enable [Proof of Key Exchange (PKCE)](https://www.oauth.com/oauth2-servers/pkce/) if the protocol is supported by your IdP. PKCE will be performed on all login attempts.
9. Copy the **Client secret**, **Client ID**, **Token endpoint**, and **Authorization endpoint**.
-10. Select **Save configuration**.
-11. (Optional) configure [App Launcher settings](/cloudflare-one/applications/app-launcher/) by turning on **Enable App in App Launcher** and, in **App Launcher URL**, entering `https:///login`.
-12. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
-13. Select **Done**.
+10. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
+11. (Optional) In **Experience settings**, configure [App Launcher settings](/cloudflare-one/applications/app-launcher/) by turning on **Enable App in App Launcher** and, in **App Launcher URL**, entering `https:///login`.
+12. Save the application.
## 2. Add a SSO provider to Grafana
diff --git a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/greenhouse-saas.mdx b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/greenhouse-saas.mdx
index d3ea8b77af42be..fb43009202bd5a 100644
--- a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/greenhouse-saas.mdx
+++ b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/greenhouse-saas.mdx
@@ -22,7 +22,7 @@ This guide covers how to configure [Greenhouse Recruiting](https://support.green
4. For the authentication protocol, select **SAML**.
5. Select **Add application**.
6. Copy the **SAML Metadata endpoint**.
-7. Keep this window open without selecting **Select configuration**. You will finish this configuration in step [4. Finish adding a SaaS application to Cloudflare Zero Trust](#4-finish-adding-a-saas-application-to-cloudflare-zero-trust).
+7. Keep this window open. You will finish this configuration in step [4. Finish adding a SaaS application to Cloudflare Zero Trust](#4-finish-adding-a-saas-application-to-cloudflare-zero-trust).
## 2. Download the metadata file
@@ -43,9 +43,8 @@ This guide covers how to configure [Greenhouse Recruiting](https://support.green
* **Entity ID**: `greenhouse.io`
* **Assertion Consumer Service URL**: SSO Assertion Consumer URL from SSO configuration in Greenhouse Recruiting.
* **Name ID format**: *Email*
-2. Select **Save configuration**.
-3. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
-4. Select **Done**.
+2. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
+3. Save the application.
## 5. Test the integration and finalize configuration
diff --git a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/hubspot-saas.mdx b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/hubspot-saas.mdx
index 7057d4c0496c5d..7128843f1b69c9 100644
--- a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/hubspot-saas.mdx
+++ b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/hubspot-saas.mdx
@@ -30,19 +30,21 @@ This guide covers how to configure [Hubspot](https://knowledge.hubspot.com/accou
| Hubspot values | Cloudflare values |
| -------------- | ------------------------------ |
- | Audience URI | EntityID |
+ | Audience URI | Entity ID |
| Sign On URL | Assertion Consumer Service URL |
4. Set **NameID** to *Email*.
5. Add any desired [Access policies](/cloudflare-one/policies/access/) to your application.
-6. Copy SSO endpoint and Access Entity ID.
+6. Copy the **SSO endpoint** and **Access Entity ID**.
-## 3. Create the certificate
+7. Save the application.
-1. Wrap the certificate in `-----BEGIN CERTIFICATE-----` and `-----END CERTIFICATE-----`.
-2. Paste the certificate contents into the Certificate field.
+## 3. Create a x.509 certificate
+
+1. Paste the **Public key** in a text editor.
+2. Wrap the certificate in `-----BEGIN CERTIFICATE-----` and `-----END CERTIFICATE-----`.
## 4. Finalize Hubspot configuration
diff --git a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/ironclad-saas.mdx b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/ironclad-saas.mdx
index ba4a825556d518..a0319e2ffffdf5 100644
--- a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/ironclad-saas.mdx
+++ b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/ironclad-saas.mdx
@@ -22,7 +22,7 @@ This guide covers how to configure [Ironclad](https://support.ironcladapp.com/hc
4. For the authentication protocol, select **SAML**.
5. Select **Add application**.
6. Copy the **SSO Endpoint** and **Public key**.
-7. Keep this window open without selecting **Select configuration**. You will finish this configuration in step [3. Finish adding a SaaS application to Cloudflare Zero Trust](#3-finish-adding-a-saas-application-to-cloudflare-zero-trust).
+7. Keep this window open. You will finish this configuration in step [3. Finish adding a SaaS application to Cloudflare Zero Trust](#3-finish-adding-a-saas-application-to-cloudflare-zero-trust).
## 2. Add a SAML SSO provider to Ironclad
@@ -40,9 +40,8 @@ This guide covers how to configure [Ironclad](https://support.ironcladapp.com/hc
* **Entity ID**: `ironcladapp.com`
* **Assertion Consumer Service URL**: Callback from Ironclad SAML SSO set-up.
* **Name ID format**: *Email*
-2. Select **Save configuration**.
-3. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
-4. Select **Done**.
+2. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
+3. Save the application.
## 4. Add a test user to Ironclad and test the integration
diff --git a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/jamf-pro-saas.mdx b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/jamf-pro-saas.mdx
index 61d857fa2ccef3..55ef40c805ac63 100644
--- a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/jamf-pro-saas.mdx
+++ b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/jamf-pro-saas.mdx
@@ -33,9 +33,8 @@ This guide covers how to configure [Jamf Pro](https://learn.jamf.com/en-US/bundl
* **Assertion Consumer Service URL**: Assertion Consumer Service value from Jamf Pro metadata file.
* **Name ID format**: *Email*
7. Copy the **SAML Metadata endpoint**.
-8. Select **Save configuration**.
-9. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
-10. Select **Done**.
+8. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
+9. Save the application.
## 3. Edit Access SAML Metadata
@@ -57,11 +56,7 @@ This guide covers how to configure [Jamf Pro](https://learn.jamf.com/en-US/bundl
5. Turn on **Single Sign On**.
:::note
-
-
The Failover Login URL located on this page can be used to log in if your SSO does not work.
-
-
:::
## 5. Test the Integration
diff --git a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/miro-saas.mdx b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/miro-saas.mdx
index 32dd9de72fd8b9..88d9f890fad49a 100644
--- a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/miro-saas.mdx
+++ b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/miro-saas.mdx
@@ -29,9 +29,8 @@ This guide covers how to configure [Miro](https://help.miro.com/hc/articles/3600
* **Assertion Consumer Service URL**: `https://miro.com/sso/saml`
* **Name ID format**: *Email*
7. Copy the **SSO endpoint** and **Public key**.
-8. Select **Save configuration**.
-9. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
-10. Select **Done**.
+8. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
+9. Save the application.
## 2. Add a SAML SSO provider to Miro
@@ -52,9 +51,5 @@ This guide covers how to configure [Miro](https://help.miro.com/hc/articles/3600
In the Miro SAML/SSO configuration page, select **Test SSO Configuration**. You will be redirected to the Cloudflare Access login screen and prompted to sign in with your identity provider. If the login is successful, you will receive a **SSO configuration test was successful** message.
:::note
-
-
When testing the integration, you do not have to use an email from a domain you have configured for SSO or a user configured in Miro. The only requirement is that the user is already configured in your identity provider.
-
-
:::
diff --git a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/pagerduty-saml-saas.mdx b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/pagerduty-saml-saas.mdx
index 5149743a46b4ab..abfa66b10f6760 100644
--- a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/pagerduty-saml-saas.mdx
+++ b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/pagerduty-saml-saas.mdx
@@ -26,9 +26,8 @@ This guide covers how to configure [PagerDuty](https://support.pagerduty.com/doc
* **Assertion Consumer Service URL**: ` https://.pagerduty.com/sso/saml/consume`
* **Name ID format**: *Email*
7. Copy the **SSO endpoint** and **Public key**.
-8. Select **Save configuration**.
-9. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
-10. Select **Done**.
+8. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
+9. Save the application.
## 2. Create a x.509 certificate
diff --git a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/pingboard-saas.mdx b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/pingboard-saas.mdx
index b55b2ae5cd90b2..fc202fc9863e9b 100644
--- a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/pingboard-saas.mdx
+++ b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/pingboard-saas.mdx
@@ -26,9 +26,8 @@ This guide covers how to configure [Pingboard](https://support.pingboard.com/hc/
* **Assertion Consumer Service URL**: `https://sso-demo.pingboard.com/auth/saml/consume`
* **Name ID format**: *Email*
7. Copy the **SAML Metadata endpoint**.
-8. Select **Save configuration**.
-9. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
-10. Select **Done**.
+8. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
+9. Save the application.
## 2. Add a SAML SSO provider to Pingboard
diff --git a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/salesforce-saas-oidc.mdx b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/salesforce-saas-oidc.mdx
index a6c13e2e5bb2dd..4514b2af287171 100644
--- a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/salesforce-saas-oidc.mdx
+++ b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/salesforce-saas-oidc.mdx
@@ -32,10 +32,9 @@ This guide covers how to configure [Salesforce](https://help.salesforce.com/s/ar
* **Authorization endpoint**
* **Token endpoint**
* **User info endpoint**
-10. (Optional) configure [App Launcher settings](/cloudflare-one/applications/app-launcher/) by turning on **Enable App in App Launcher** and, in **App Launcher URL**, entering `https://.my.salesforce.com`.
-11. Select **Save configuration**.
-12. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
-13. Select **Done**.
+10. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
+11. (Optional) In **Experience settings**, configure [App Launcher settings](/cloudflare-one/applications/app-launcher/) by turning on **Enable App in App Launcher** and, in **App Launcher URL**, entering `https://.my.salesforce.com`.
+12. Save the application.
## 2. Add a SSO provider to Salesforce
diff --git a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/salesforce-saas-saml.mdx b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/salesforce-saas-saml.mdx
index cf23bc7d0dde6a..4811a20700c52e 100644
--- a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/salesforce-saas-saml.mdx
+++ b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/salesforce-saas-saml.mdx
@@ -29,17 +29,12 @@ This guide covers how to configure [Salesforce](https://help.salesforce.com/s/ar
* **Name ID format**: *Email*
:::note
-
-
If you are unsure of which URL to use in the **Entity ID** and **Assertion Consumer Service URL** fields, you can check your Salesforce account's metadata. In Salesforce, go to the **Single Sign-On Settings** page and select **Download Metadata**. In this file, you will find the correct URLs to use.
-
-
:::
7. Copy the **SSO endpoint**, **Public key**, and **Access Entity ID or Issuer**.
-8. Select **Save configuration**.
-9. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
-10. Select **Done**.
+8. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
+9. Save the application.
## 2. Create a certificate file
@@ -58,11 +53,11 @@ If you are unsure of which URL to use in the **Entity ID** and **Assertion Consu
* **Issuer:** Paste the Access Entity ID or Issuer from application configuration in Cloudflare Zero Trust.
* **Identity Provider Certificate**: Upload the `.crt` certificate file from [2. Create a certificate file](#2-create-a-certificate-file).
* **Entity ID**: `https://.my.salesforce.com`
- * **SAML Identity type:** If the user's Salesforce username is their email address, select *Assertion contains the User's Salesforce username*. Otherwise, select *Assertion contains the Federation ID from the User object* and make sure the user's Federation ID matches their email address.
+ * **SAML Identity type:** If the user's Salesforce username is their email address, select *Assertion contains the User's Salesforce username*. Otherwise, select *Assertion contains the Federation ID from the User object* and make sure the user's Federation ID matches their email address.
1. In the **Quick Find** box, enter `users` and select **Users**.
2. Select the user.
- 3. Verify that the user's **Federation ID** matches the email address used to authenticate to Cloudflare Access.
+ 3. Verify that the user's **Federation ID** matches the email address used to authenticate to Cloudflare Access.
* **Identity Provider Login URL**: SSO endpoint provided in Cloudflare Zero Trust for this application.
5. Select **Save**.
diff --git a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/servicenow-saas-oidc.mdx b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/servicenow-saas-oidc.mdx
index b309ff64f2a757..be4aafe79e4a5c 100644
--- a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/servicenow-saas-oidc.mdx
+++ b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/servicenow-saas-oidc.mdx
@@ -25,11 +25,9 @@ This guide covers how to configure [ServiceNow](https://docs.servicenow.com/bund
7. In **Redirect URLs**, enter `https://.service-now.com/navpage.do`.
8. (Optional) Enable [Proof of Key Exchange (PKCE)](https://www.oauth.com/oauth2-servers/pkce/) if the protocol is supported by your IdP. PKCE will be performed on all login attempts.
9. Copy the **Client secret** and **Client ID**.
-10. Select **Save configuration**.
-11. (Optional) configure [App Launcher settings](/cloudflare-one/applications/app-launcher/) by turning on **Enable App in App Launcher** and, in **App Launcher URL**, entering `https://.service-now.com`.
-12. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
-13. Select **Done**.
-
+10. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
+11. (Optional) In **Experience settings**, configure [App Launcher settings](/cloudflare-one/applications/app-launcher/) by turning on **Enable App in App Launcher** and, in **App Launcher URL**, entering `https://.service-now.com`.
+12. Save the application.
## 2. Add the Multiple Provider Single Sign-On Installer Plugin to ServiceNow
1. In ServiceNow, select **All**.
diff --git a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/servicenow-saas-saml.mdx b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/servicenow-saas-saml.mdx
index bd42669af865b3..aa3f6132d9e871 100644
--- a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/servicenow-saas-saml.mdx
+++ b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/servicenow-saas-saml.mdx
@@ -26,9 +26,8 @@ This guide covers how to configure [ServiceNow](https://docs.servicenow.com/bund
* **Assertion Consumer Service URL**: `https://.service-now.com/navpage.do`
* **Name ID format**: *Email*
7. Copy the **SAML Metadata endpoint**.
-8. Select **Save configuration**.
-9. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
-10. Select **Done**.
+8. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
+9. Save the application.
## 2. Add the Multiple Provider Single Sign-On Installer Plugin to ServiceNow
diff --git a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/slack-saas.mdx b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/slack-saas.mdx
index 32ec7f8b6a4e82..1953a1ad2ff80b 100644
--- a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/slack-saas.mdx
+++ b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/slack-saas.mdx
@@ -28,9 +28,8 @@ This guide covers how to configure [Slack](https://slack.com/help/articles/20377
* **Assertion Consumer Service URL**: `https://.slack.com/sso/saml`
* **Name ID format**: The format expected by Slack, usually *Email*
7. Copy the **SSO endpoint**, **Access Entity ID or Issuer**, and **Public key**.
-8. Select **Save configuration**.
-9. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
-10. Select **Done**.
+8. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
+9. Save the application.
## 2. Create a x.509 certificate
diff --git a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/smartsheet-saas.mdx b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/smartsheet-saas.mdx
index 30dd398d30c41d..d984dba2d6ddeb 100644
--- a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/smartsheet-saas.mdx
+++ b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/smartsheet-saas.mdx
@@ -16,11 +16,7 @@ This guide covers how to configure [Smartsheet](https://help.smartsheet.com/arti
* A [domain](https://help.smartsheet.com/articles/2483051-domain-management) verified in Smartsheet
:::note
-
-
In Smartsheet, SSO is configured for a domain. If you have multiple plans using the same domain, the SSO configuration will apply to all Smartsheet users in that domain, regardless of their plan type.
-
-
:::
## 1. Add a SaaS application to Cloudflare Zero Trust
@@ -35,9 +31,8 @@ In Smartsheet, SSO is configured for a domain. If you have multiple plans using
* **Assertion Consumer Service URL**: `https://saml.authn.smartsheet.com/saml2/idpresponse`
* **Name ID format**: *Unique ID*
7. Copy the **SAML Metadata endpoint**.
-8. Select **Save configuration**.
-9. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
-10. Select **Done**.
+8. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
+9. Save the application.
## 2. Create and test a SAML SSO provider in Smartsheet
diff --git a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/sparkpost-saas.mdx b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/sparkpost-saas.mdx
index b23d116cbc35aa..ddb29f7fa09262 100644
--- a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/sparkpost-saas.mdx
+++ b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/sparkpost-saas.mdx
@@ -32,9 +32,8 @@ This guide covers how to configure [SparkPost or SparkPost EU](https://support.s
* `https:///api/v1/users/saml/consume` for SparkPost accounts with dedicated tenants
* **Name ID format**: *Email*
7. Copy the **SAML Metadata endpoint**.
-8. Select **Save configuration**.
-9. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
-10. Select **Done**.
+8. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
+9. Save the application.
## 2. Download the metadata file
@@ -57,5 +56,5 @@ This guide covers how to configure [SparkPost or SparkPost EU](https://support.s
:::note
-The SparkPost SSO login link is `https://app.sparkpost.com/auth/sso`. Alternatively, you can go to the usual sign in page and select **Log in with Single Sign-On**.
+The SparkPost SSO login link is `https://app.sparkpost.com/auth/sso`. Alternatively, you can go to the usual sign in page and select **Log in with Single Sign-On**.
:::
diff --git a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/tableau-saml-saas.mdx b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/tableau-saml-saas.mdx
index a6a062a938cfab..60b45d0ec550c4 100644
--- a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/tableau-saml-saas.mdx
+++ b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/tableau-saml-saas.mdx
@@ -22,7 +22,7 @@ This guide covers how to configure [Tableau Cloud](https://help.tableau.com/curr
4. For the authentication protocol, select **SAML**.
5. Select **Add application**.
6. Copy the **SAML Metadata endpoint**.
-7. Keep this window open without selecting **Select configuration**. You will finish this configuration in step [4. Finish adding a SaaS application to Cloudflare Zero Trust](#4-finish-adding-a-saas-application-to-cloudflare-zero-trust).
+7. Keep this window open. You will finish this configuration in step [4. Finish adding a SaaS application to Cloudflare Zero Trust](#4-finish-adding-a-saas-application-to-cloudflare-zero-trust).
## 2. Download the metadata file
@@ -34,7 +34,7 @@ This guide covers how to configure [Tableau Cloud](https://help.tableau.com/curr
1. In Tableau Cloud, go to **Settings** > **Authentication**.
2. Turn on **Enable an additional authentication method**. For **select authentication type**, select *SAML*.
3. Under **1. Get Tableau Cloud metadata**, copy the **Tableau Cloud entity ID** and **Tableau Cloud ACS URL**.
-4. Under **4. Upload metatdata to Tableau**, select **Choose a file**, and upload the `.xml` file created in step [2. Download the metadata file](#2-download-the-metadata-file)
+4. Under **4. Upload metadata to Tableau**, select **Choose a file**, and upload the `.xml` file created in step [2. Download the metadata file](#2-download-the-metadata-file)
5. Under **5. Map attributes**, turn on **Full name**. For **Name (full name)**, enter `name`.
6. (Optional) Choose whether users who are accessing embedded views will **Authenticate in a separate pop-up window** or **Authenticate using an inline frame**.
7. Select **Save Changes**.
@@ -45,9 +45,8 @@ This guide covers how to configure [Tableau Cloud](https://help.tableau.com/curr
* **Entity ID**: Tableau Cloud entity ID from Tableau Cloud SAML SSO set-up.
* **Assertion Consumer Service URL**: Tableau Cloud ACS URL from Tableau Cloud SAML SSO set-up.
* **Name ID format**: *Email*
-2. Select **Save configuration**.
-3. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
-4. Select **Done**.
+2. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
+3. Save the application.
## 5. Test the integration and set default authentication type
diff --git a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/workday-saas.mdx b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/workday-saas.mdx
index f4ca54c9e05c94..90cf78c84f459f 100644
--- a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/workday-saas.mdx
+++ b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/workday-saas.mdx
@@ -26,9 +26,8 @@ This guide covers how to configure [Workday](https://doc.workday.com/admin-guide
* **Assertion Consumer Service URL**: `https://.myworkday.com//login-saml.flex` for a production account or `https://-impl.myworkday.com//login-saml.flex` for a preview sandbox account
* **Name ID format**: *Email*
7. Copy the **SSO endpoint**, **Access Entity ID or Issuer**, and **Public key**.
-8. Select **Save configuration**.
-9. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
-10. Select **Done**.
+8. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
+9. Save the application.
## 2. Download the metadata file
@@ -59,11 +58,7 @@ This guide covers how to configure [Workday](https://doc.workday.com/admin-guide
## 4. Test the integration
:::note
-
-
If you encounter a situation where one or more users get locked out of Workday, the user can use this backup URL provided by Workday to sign in with their username and password: `https:///login.flex?redirect=n`.
-
-
:::
1. In Workday, create an [authentication rule](https://doc.workday.com/admin-guide/en-us/authentication-and-security/authentication/authentication-policies/dan1370796466772.html).
diff --git a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/zendesk-sso-saas.mdx b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/zendesk-sso-saas.mdx
index 24b4a5d019b150..2173b827f5c507 100644
--- a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/zendesk-sso-saas.mdx
+++ b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/zendesk-sso-saas.mdx
@@ -40,23 +40,27 @@ This guide covers how to configure [Zendesk](https://support.zendesk.com/hc/en-u
6. To determine who can access Zendesk, [create an Access policy](/cloudflare-one/policies/access/).
-7. Copy the values from the Cloudflare IdP fields and add them to the following Zendesk fields:
+7. Copy the **SSO Endpoint** and **Public Key**.
- | Cloudflare IdP field | Zendesk field |
- | ------------------------------------------- | --------------------------- |
- | **SSO Endpoint** | **SAML SSO URL** |
- | **Public Key** (transformed to fingerprint) | **Certificate Fingerprint** |
+8. Transform the public key into a fingerprint:
+
+ 1. Open a [fingerprint calculator](https://www.samltool.com/fingerprint.php).
- To transform the public key into a fingerprint, use a [fingerprint calculator](https://www.samltool.com/fingerprint.php):
+ 2. Paste the **Public Key** into **X.509 cert**.
- 1. Copy the public key value and paste it into **X.509 cert**.
+ 3. Wrap the value with `-----BEGIN CERTIFICATE-----` and `-----END CERTIFICATE-----`.
- 2. Wrap the value with `-----BEGIN CERTIFICATE-----` and `-----END CERTIFICATE-----`.
+ 4. Set **Algorithm** to _SHA256_ and select **Calculate Fingerprint**.
- 3. Set **Algorithm** to _SHA256_ and select **Calculate Fingerprint**.
+ 5. Copy the **Formatted FingerPrint** value.
- 4. Copy the **Formatted FingerPrint** value.
+9. Add the Cloudflare values to the following Zendesk fields:
+
+ | Cloudflare IdP field | Zendesk field |
+ | ------------------------------------------- | --------------------------- |
+ | **SSO Endpoint** | **SAML SSO URL** |
+ | **Public Key** (transformed to fingerprint) | **Certificate Fingerprint** |
-8. Go to `https://.zendesk.com/admin/security/staff_members` and enable **External Authentication** > **Single Sign On**.
+10. Go to `https://.zendesk.com/admin/security/staff_members` and enable **External Authentication** > **Single Sign On**.
Users should now be able to log in to Zendesk if their Email address exists in the Zendesk user list.
diff --git a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/zoom-saas.mdx b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/zoom-saas.mdx
index cbbc5749464a1f..41f72e814d5dc0 100644
--- a/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/zoom-saas.mdx
+++ b/src/content/docs/cloudflare-one/applications/configure-apps/saas-apps/zoom-saas.mdx
@@ -28,9 +28,8 @@ This guide covers how to configure [Zoom](https://support.zoom.com/hc/en/article
* **Assertion Consumer Service URL**: `https://.zoom.us/saml/SSO`
* **Name ID format**: *Email*
7. Copy the **Access Entity ID or Issuer**, **Public key**, and **SSO endpoint**.
-8. Select **Save configuration**.
-9. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
-10. Select **Done**.
+8. Configure [Access policies](/cloudflare-one/policies/access/) for the application.
+9. Save the application.
## 2. Add a SAML SSO provider in Zoom
diff --git a/src/content/docs/cloudflare-one/connections/connect-devices/agentless/pac-files.mdx b/src/content/docs/cloudflare-one/connections/connect-devices/agentless/pac-files.mdx
index d294ca9815035a..d4a492130a71c7 100644
--- a/src/content/docs/cloudflare-one/connections/connect-devices/agentless/pac-files.mdx
+++ b/src/content/docs/cloudflare-one/connections/connect-devices/agentless/pac-files.mdx
@@ -13,7 +13,7 @@ Only available on Enterprise plans.
You can apply Gateway HTTP and DNS policies at the browser level by configuring a Proxy Auto-Configuration (PAC) file.
-
+
When end users visit a website, their browser will send the request to a Cloudflare proxy server associated with your account to be filtered by Gateway. Note that Gateway [cannot filter every type of HTTP traffic](#limitations) proxied using PAC files.
@@ -215,19 +215,17 @@ To get the domain of a proxy endpoint:
--header "Authorization: Bearer "
```
- ```json {10} output
+ ```json {8} output
{
- "success": true,
- "result": {
- "id": "ed35569b41ce4d1facfe683550f54086",
- "created_at": "2014-01-01T05:20:00.12345Z",
- "ips": [
- "192.0.2.1/32"
- ],
- "name": "DevOps team",
- "subdomain": "oli3n9zkz5.proxy.cloudflare-gateway.com",
- "updated_at": "2014-01-01T05:20:00.12345Z"
- }
+ "success": true,
+ "result": {
+ "id": "ed35569b41ce4d1facfe683550f54086",
+ "created_at": "2014-01-01T05:20:00.12345Z",
+ "ips": ["192.0.2.1/32"],
+ "name": "DevOps team",
+ "subdomain": "oli3n9zkz5.proxy.cloudflare-gateway.com",
+ "updated_at": "2014-01-01T05:20:00.12345Z"
+ }
}
```
diff --git a/src/content/docs/cloudflare-one/connections/connect-devices/user-side-certificates/automated-deployment.mdx b/src/content/docs/cloudflare-one/connections/connect-devices/user-side-certificates/automated-deployment.mdx
index 47e4aa6b23a896..bd22817eee97ae 100644
--- a/src/content/docs/cloudflare-one/connections/connect-devices/user-side-certificates/automated-deployment.mdx
+++ b/src/content/docs/cloudflare-one/connections/connect-devices/user-side-certificates/automated-deployment.mdx
@@ -6,7 +6,7 @@ sidebar:
head: []
description: Automatically deploy a root certificate on desktop devices.
banner:
- content: The default global Cloudflare root certificate will expire on 2025-02-02. If you installed the default Cloudflare certificate before 2024-10-17, you must generate a new certificate and activate it for your Zero Trust organization to avoid inspection errors.
+ content: The default global Cloudflare root certificate will expire on 2025-02-02. If you installed the default Cloudflare certificate before 2024-10-17, you must generate a new certificate and activate it for your Zero Trust organization to avoid inspection errors.
---
import { Details } from "~/components";
diff --git a/src/content/docs/cloudflare-one/connections/connect-devices/user-side-certificates/custom-certificate.mdx b/src/content/docs/cloudflare-one/connections/connect-devices/user-side-certificates/custom-certificate.mdx
index 3fdfecc1201967..9cdc0840bac358 100644
--- a/src/content/docs/cloudflare-one/connections/connect-devices/user-side-certificates/custom-certificate.mdx
+++ b/src/content/docs/cloudflare-one/connections/connect-devices/user-side-certificates/custom-certificate.mdx
@@ -7,7 +7,7 @@ head: []
description: Configure WARP to use a custom root certificate instead of the
Cloudflare certificate.
banner:
- content: The default global Cloudflare root certificate will expire on 2025-02-02. If you installed the default Cloudflare certificate before 2024-10-17, you must generate a new certificate and activate it for your Zero Trust organization to avoid inspection errors.
+ content: The default global Cloudflare root certificate will expire on 2025-02-02. If you installed the default Cloudflare certificate before 2024-10-17, you must generate a new certificate and activate it for your Zero Trust organization to avoid inspection errors.
---
import { Render, Tabs, TabItem } from "~/components";
diff --git a/src/content/docs/cloudflare-one/connections/connect-devices/user-side-certificates/index.mdx b/src/content/docs/cloudflare-one/connections/connect-devices/user-side-certificates/index.mdx
index da1c6488b6e986..3818f7eed063fd 100644
--- a/src/content/docs/cloudflare-one/connections/connect-devices/user-side-certificates/index.mdx
+++ b/src/content/docs/cloudflare-one/connections/connect-devices/user-side-certificates/index.mdx
@@ -11,7 +11,7 @@ import { Tabs, TabItem } from "~/components";
Advanced security features such as [HTTPS traffic inspection](/cloudflare-one/policies/gateway/http-policies/tls-decryption/), [Data Loss Prevention](/cloudflare-one/policies/data-loss-prevention/), [anti-virus scanning](/cloudflare-one/policies/gateway/http-policies/antivirus-scanning/), [Access for Infrastructure](/cloudflare-one/applications/non-http/infrastructure-apps/), and [Browser Isolation](/cloudflare-one/policies/browser-isolation/) require users to install and trust a root certificate on their device. You can either install the certificate provided by Cloudflare (default option), or generate your own custom certificate and upload it to Cloudflare.
-Gateway [generates a unique root CA](#generate-a-cloudflare-root-certificate) for each Zero Trust account and deploys it across the Cloudflare global network. Alternatively, Enterprise users can upload and deploy their own [custom certificate](/cloudflare-one/connections/connect-devices/user-side-certificates/custom-certificate/).
+Zero Trust [generates a unique root CA](#generate-a-cloudflare-root-certificate) for each account and deploys it across the Cloudflare global network. Alternatively, Enterprise users can upload and deploy their own [custom certificate](/cloudflare-one/connections/connect-devices/user-side-certificates/custom-certificate/).
## Certificate status
@@ -121,4 +121,4 @@ curl --request PUT \
-You can set multiple certificates to **Available**, but you can only turn on one certificate for use in inspection at a time. Setting a certificate as **In-Use** will set any other turned on certificates as **Available** and prevent them from being used for inspection until turned on again.
+You can set multiple certificates to **Available**, but you can only turn on one certificate for use in inspection at a time. Setting a certificate as **In-Use** will set any other in-use certificates as **Available** only and prevent them from being used for inspection until turned on again.
diff --git a/src/content/docs/cloudflare-one/connections/connect-devices/user-side-certificates/manual-deployment.mdx b/src/content/docs/cloudflare-one/connections/connect-devices/user-side-certificates/manual-deployment.mdx
index 3d87f75d714dcb..e10de82710a346 100644
--- a/src/content/docs/cloudflare-one/connections/connect-devices/user-side-certificates/manual-deployment.mdx
+++ b/src/content/docs/cloudflare-one/connections/connect-devices/user-side-certificates/manual-deployment.mdx
@@ -7,23 +7,23 @@ head: []
description: Manually add a Cloudflare certificate to mobile devices and
individual applications.
banner:
- content: The default global Cloudflare root certificate will expire on 2025-02-02. If you installed the default Cloudflare certificate before 2024-10-17, you must generate a new certificate and activate it for your Zero Trust organization to avoid inspection errors.
+ content: The default global Cloudflare root certificate will expire on 2025-02-02. If you installed the default Cloudflare certificate before 2024-10-17, you must generate a new certificate and activate it for your Zero Trust organization to avoid inspection errors.
---
import { Details, Render, TabItem, Tabs } from "~/components";
:::note
-
This procedure is only required to enable specific Cloudflare Zero Trust features, and should only be done at the direction of your IT department. This procedure is not required to enable the WARP client for consumers.
-
:::
If your device does not support [certificate installation via WARP](/cloudflare-one/connections/connect-devices/user-side-certificates/automated-deployment/), you can manually install a Cloudflare certificate. You must add the certificate to both the [system keychain](#add-the-certificate-to-operating-systems) and to [individual application stores](#add-the-certificate-to-applications). These steps must be performed on each new device that is to be subject to HTTP filtering.
+Zero Trust will only inspect traffic using installed certificates set to [**Available** and **In-Use**](/cloudflare-one/connections/connect-devices/user-side-certificates/#activate-a-root-certificate).
+
## Download the Cloudflare root certificate
:::note[Download limitation]
-You can only download certificates from the Zero Trust dashboard.
+You can only download Cloudflare-generated certificates from the Zero Trust dashboard or with WARP.
:::
First, [generate](/cloudflare-one/connections/connect-devices/user-side-certificates/#generate-a-cloudflare-root-certificate) and download a Cloudflare certificate. The certificate is available in both `.pem` and `.crt` file format. Certain applications require the certificate to be in a specific file type, so ensure you download the most appropriate file for your use case.
@@ -33,6 +33,8 @@ First, [generate](/cloudflare-one/connections/connect-devices/user-side-certific
3. Select the certificate you want to download.
4. Depending on which format you want, choose **Download .pem** and/or **Download .crt**.
+Alternatively, you can download and install a certificate [using WARP](/cloudflare-one/connections/connect-devices/user-side-certificates/automated-deployment/#install-a-certificate-using-warp). WARP will add the certificates to the device's system certificate store in `installed_certs/.pem`.
+
### Verify the downloaded certificate
To verify your download, use a terminal to check that the downloaded certificate's hash matches the thumbprint listed under **Certificate thumbprint**. For example:
@@ -715,20 +717,22 @@ To trust a Cloudflare root certificate in the Google Drive desktop application,
-1. In the Finder menu bar, go to **Go** > **Go to Folder**. Enter `/Applications/Google Drive.app/Contents/Resources`.
+1. In a terminal, copy the contents of the Google Drive certificate file to a new certificate file in a permanent location, such as your Documents folder. For example:
-2. Find `roots.pem` and copy it to a permanent location, such as your Documents folder.
+ ```sh
+ cat /Applications/"Google Drive.app"/Contents/Resources/roots.pem > ~/Documents/gdrivecerts.pem
+ ```
-3. Append the contents of `cloudflare.pem` to the end of `roots.pem`.
+2. Append the contents of the downloaded certificate to the end of the new file. For example:
```sh
- cat ~/Downloads/certificate.pem >> path/to/roots.pem
+ cat ~/Downloads/certificate.pem >> ~/Documents/gdrivecerts.pem
```
-4. Apply the newly created root certificate to your Google Drive application.
+3. Apply the newly created root certificate to your Google Drive application. For example:
```sh
- sudo defaults write /Library/Preferences/com.google.drivefs.settings TrustedRootCertsFile -string "path/to/roots.pem"
+ sudo defaults write /Library/Preferences/com.google.drivefs.settings TrustedRootCertsFile "/Users/$(whoami)/Documents/gdrivecerts.pem"
```
You can verify the update with the following command.
@@ -741,26 +745,28 @@ defaults read /Library/Preferences/com.google.drivefs.settings
-1. In File Explorer, go to `\Program Files\Google\Drive File Stream\\config\`.
+1. In an administrator PowerShell terminal, copy the contents of the Google Drive certificate file to a new certificate file in a permanent location, such as your Documents folder. For example:
-2. Find `roots.pem` and copy it to a permanent location, such as your Documents folder.
+ ```powershell
+ Get-Content "C:\Program Files\Google\Drive File Stream\roots.pem" | Set-Content "$HOME\Documents\gdrivecerts.pem"
+ ```
-3. Append the contents of `cloudflare.pem` to the end of `roots.pem`.
+2. Append the contents of the downloaded certificate to the end of the new file. For example:
```powershell
- cat ~\Downloads\certificate.pem >> path\to\roots.pem
+ Get-Content "$HOME\Downloads\certificate.pem" | Add-Content "$HOME\Documents\gdrivecerts.pem"
```
-4. Update the Google Drive registry key.
+3. Apply the newly created root certificate to your Google Drive application. For example:
```powershell
- reg ADD "HKEY_LOCAL_MACHINE\Software\Google\DriveFS" /v TrustedRootCertsFile /t REG_SZ /d "path\to\roots.pem"
+ Set-ItemProperty -Path "HKLM:\SOFTWARE\Google\DriveFS" -Name "TrustedRootCertsFile" -Value "$HOME\Documents\gdrivecerts.pem"
```
You can verify the update with the following command.
```powershell
-reg QUERY "HKEY_LOCAL_MACHINE\Software\Google\DriveFS" /v TrustedRootCertsFile"
+Get-ItemProperty -Path "HKLM:\SOFTWARE\Google\DriveFS" | Select-Object TrustedRootCertsFile
```
diff --git a/src/content/docs/cloudflare-one/connections/connect-devices/warp/configure-warp/warp-settings/index.mdx b/src/content/docs/cloudflare-one/connections/connect-devices/warp/configure-warp/warp-settings/index.mdx
index 49244e86c2680d..d9c49d6389c327 100644
--- a/src/content/docs/cloudflare-one/connections/connect-devices/warp/configure-warp/warp-settings/index.mdx
+++ b/src/content/docs/cloudflare-one/connections/connect-devices/warp/configure-warp/warp-settings/index.mdx
@@ -199,7 +199,7 @@ We recommend keeping this set to a very low value — usually just enough time f
**Value:**
- `0`: Allow the switch to stay in the off position indefinitely until the user turns it back on.
-- `1` to `86400`: Turn switch back on automatically after the specified number of seconds.
+- `1` to `1440`: Turn switch back on automatically after the specified number of minutes.
### Support URL
diff --git a/src/content/docs/cloudflare-one/connections/connect-devices/warp/deployment/mdm-deployment/parameters.mdx b/src/content/docs/cloudflare-one/connections/connect-devices/warp/deployment/mdm-deployment/parameters.mdx
index 574bf7272035f8..bee0cef29bb157 100644
--- a/src/content/docs/cloudflare-one/connections/connect-devices/warp/deployment/mdm-deployment/parameters.mdx
+++ b/src/content/docs/cloudflare-one/connections/connect-devices/warp/deployment/mdm-deployment/parameters.mdx
@@ -75,14 +75,14 @@ Requires the `auth_client_id` parameter.
### `auto_connect`
-If switch has been turned off by user, the client will automatically turn itself back on after the specified number of seconds. We recommend keeping this set to a very low value — usually just enough time for a user to log in to hotel or airport Wi-Fi. If any value is specified for `auto_connect` the default state of the WARP client will always be Connected (for example, after the initial install or a reboot).
+If switch has been turned off by user, the client will automatically turn itself back on after the specified number of minutes. We recommend keeping this set to a very low value — usually just enough time for a user to log in to hotel or airport Wi-Fi. If any value is specified for `auto_connect` the default state of the WARP client will always be Connected (for example, after the initial install or a reboot).
**Value Type:** `integer`
**Value:**
* `0` — Allow the switch to stay in the off position indefinitely until the user turns it back on.
-* `1` to `86400` — Turn switch back on automatically after the specified number of seconds.
+* `1` to `1440` — Turn switch back on automatically after the specified number of minutes.
:::note
@@ -249,4 +249,4 @@ An optional property. `is_browser` will help the Cloudflare One Agent applicatio
**Value Type**: `boolean`
-**Value**: If the value is `true`, identifies the application defined in `app_identifier` as a browser. The default value is `false` and `is_browser` is an optional property.
+**Value**: If the value is `true`, identifies the application defined in `app_identifier` as a browser. The default value is `false` and `is_browser` is an optional property.
\ No newline at end of file
diff --git a/src/content/docs/cloudflare-one/connections/connect-devices/warp/deployment/mdm-deployment/partners/fleet.mdx b/src/content/docs/cloudflare-one/connections/connect-devices/warp/deployment/mdm-deployment/partners/fleet.mdx
index 6039f4a5871403..962a2e4285a6c9 100644
--- a/src/content/docs/cloudflare-one/connections/connect-devices/warp/deployment/mdm-deployment/partners/fleet.mdx
+++ b/src/content/docs/cloudflare-one/connections/connect-devices/warp/deployment/mdm-deployment/partners/fleet.mdx
@@ -133,7 +133,7 @@ echo -e "\n organization\n your-team-name\
curl -fsSL https://pkg.cloudflareclient.com/pubkey.gpg | sudo gpg --yes --dearmor --output /usr/share/keyrings/cloudflare-warp-archive-keyring.gpg
# Add this repo to your apt repositories
-echo "deb [signed-by=/usr/share/keyrings/cloudflare-warp-archive-keyring.gpg] https://pkg.cloudflareclient.com/ $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/cloudflare-client.list
+echo "deb [signed-by=/usr/share/keyrings/cloudflare-warp-archive-keyring.gpg] https://pkg.cloudflareclient.com/ any main" | sudo tee /etc/apt/sources.list.d/cloudflare-client.list
# Install
sudo apt-get -y update && sudo apt-get -y install cloudflare-warp
diff --git a/src/content/docs/cloudflare-one/connections/connect-devices/warp/download-warp/beta-releases.mdx b/src/content/docs/cloudflare-one/connections/connect-devices/warp/download-warp/beta-releases.mdx
index 74d65b2f96d1cf..19e1e60942939a 100644
--- a/src/content/docs/cloudflare-one/connections/connect-devices/warp/download-warp/beta-releases.mdx
+++ b/src/content/docs/cloudflare-one/connections/connect-devices/warp/download-warp/beta-releases.mdx
@@ -5,18 +5,22 @@ sidebar:
order: 3
---
-import { Render, Details, WARPReleases } from "~/components";
+import { Render, Details, LinkButton, WARPReleases } from "~/components";
-Cloudflare tests new WARP features and improvements in an unstable beta release before adding them to the stable release. To get early access to new features, download the latest beta client from the links below.
+Cloudflare tests new WARP features and improvements in an unstable beta release before adding them to the [stable release](/cloudflare-one/connections/connect-devices/warp/download-warp/). Beta releases are not recommended for production environments. To get early access to new features, download the latest beta client from the links below.
## Windows
+Download latest beta release
+
## macOS
+Download latest beta release
+
\ No newline at end of file
diff --git a/src/content/docs/cloudflare-one/connections/connect-devices/warp/download-warp/index.mdx b/src/content/docs/cloudflare-one/connections/connect-devices/warp/download-warp/index.mdx
index 58c7c915d3c6ee..3776777d2dc760 100644
--- a/src/content/docs/cloudflare-one/connections/connect-devices/warp/download-warp/index.mdx
+++ b/src/content/docs/cloudflare-one/connections/connect-devices/warp/download-warp/index.mdx
@@ -6,24 +6,32 @@ sidebar:
label: Stable releases
---
-import { Render, Details, WARPReleases } from "~/components";
+import { Render, Details, LinkButton, WARPReleases } from "~/components";
-Download the WARP client from one of the following links after checking requirements.
+This page contains the stable WARP client releases currently supported by Cloudflare. We recommend using stable releases for production environments. You can download stable releases from the links below after checking requirements.
+
+Cloudflare also offers an unstable beta release track with the latest features and improvements. To preview new features before they are available in a stable release, refer to the [beta release page](/cloudflare-one/connections/connect-devices/warp/download-warp/beta-releases/).
## Windows
+Download latest stable release
+
## macOS
+Download latest stable release
+
## Linux
+Package repository
+
diff --git a/src/content/docs/cloudflare-one/connections/connect-devices/warp/download-warp/update-warp.mdx b/src/content/docs/cloudflare-one/connections/connect-devices/warp/download-warp/update-warp.mdx
index aa9f3af6d3628c..18d5e154e8835e 100644
--- a/src/content/docs/cloudflare-one/connections/connect-devices/warp/download-warp/update-warp.mdx
+++ b/src/content/docs/cloudflare-one/connections/connect-devices/warp/download-warp/update-warp.mdx
@@ -18,7 +18,7 @@ We also recognize that there is a cost associated for a business to go through a
Cloudflare does not operate on a major-release upgrade cycle; all releases for the WARP client are incremental. With this in mind, you should choose which releases make the most sense for your business.
-Cloudflare publishes release notes for WARP in the official [download repositories](/cloudflare-one/connections/connect-devices/warp/download-warp/) and in the [WARP changelog](/cloudflare-one/changelog/warp/). You can look at the release notes to determine whether there is an explicit reason for updating to the latest release.
+Cloudflare publishes release notes for WARP on the [Downloads page](/cloudflare-one/connections/connect-devices/warp/download-warp/) and in the [WARP changelog](/cloudflare-one/changelog/warp/). You can look at the release notes to determine whether there is an explicit reason for updating to the latest release.
### Support lifecycle
diff --git a/src/content/docs/cloudflare-one/connections/connect-devices/warp/troubleshooting/known-limitations.mdx b/src/content/docs/cloudflare-one/connections/connect-devices/warp/troubleshooting/known-limitations.mdx
index a04dff999b5c07..01f5f0a1ad4a18 100644
--- a/src/content/docs/cloudflare-one/connections/connect-devices/warp/troubleshooting/known-limitations.mdx
+++ b/src/content/docs/cloudflare-one/connections/connect-devices/warp/troubleshooting/known-limitations.mdx
@@ -80,7 +80,7 @@ Cisco Meraki devices have a bug where WARP traffic can sometimes be identified a
## Windows Teredo
-The [Windows Teredo](https://learn.microsoft.com/en-us/windows/win32/teredo/about-teredo) interface conflicts with the WARP client. Since Teredo and WARP will fight for control over IPv6 traffic routing, you must disable Terado on your Windows device. This allows the WARP client to provide IPv6 connectivity on the device.
+The [Windows Teredo](https://learn.microsoft.com/en-us/windows/win32/teredo/about-teredo) interface conflicts with the WARP client. Since Teredo and WARP will fight for control over IPv6 traffic routing, you must disable Teredo on your Windows device. This allows the WARP client to provide IPv6 connectivity on the device.
## Docker on Linux with bridged networking
@@ -101,3 +101,7 @@ docker network create -o "com.docker.network.driver.mtu=1420" my-docker-network
```
The MTU value should be set to the MTU of your host's default interface minus 80 bytes for the WARP protocol overhead. Most MTUs are 1500, therefore 1420 should work for most people.
+
+## Windows 10 in Microsoft 365 Cloud PC is not supported
+
+Use of the WARP client in a Microsoft 365 Windows 10 Cloud PC is not supported. To work around this limitation, use Windows 11.
diff --git a/src/content/docs/cloudflare-one/connections/connect-networks/get-started/create-remote-tunnel.mdx b/src/content/docs/cloudflare-one/connections/connect-networks/get-started/create-remote-tunnel.mdx
index 290a4e33f4263b..e3f91cbe5ff87a 100644
--- a/src/content/docs/cloudflare-one/connections/connect-networks/get-started/create-remote-tunnel.mdx
+++ b/src/content/docs/cloudflare-one/connections/connect-networks/get-started/create-remote-tunnel.mdx
@@ -26,6 +26,8 @@ Follow these steps to connect an application through your tunnel. If you are loo
+If you add a multi-level subdomain (more than one level of subdomain), you must [order an Advanced Certificate for the hostname](/cloudflare-one/faq/troubleshooting/#i-see-this-site-cant-provide-a-secure-connection).
+
The application is now publicly available on the Internet. To allow or block specific users, [create an Access application](/cloudflare-one/applications/configure-apps/self-hosted-public-app/).
## 2b. Connect a network
@@ -43,4 +45,3 @@ To configure Zero Trust policies and connect as a user, refer to [Connect privat
After saving the tunnel, you will be redirected to the **Tunnels** page. Look for your new tunnel to be listed along with its active connector.

-
diff --git a/src/content/docs/cloudflare-one/faq/getting-started-faq.mdx b/src/content/docs/cloudflare-one/faq/getting-started-faq.mdx
index d5aa0ed421f458..24978f770020d5 100644
--- a/src/content/docs/cloudflare-one/faq/getting-started-faq.mdx
+++ b/src/content/docs/cloudflare-one/faq/getting-started-faq.mdx
@@ -14,9 +14,9 @@ description: Review FAQs about getting started with Cloudflare Zero Trust.
You can sign up today at [this link](https://dash.cloudflare.com/sign-up/teams). Follow the onboarding steps, choose a team name and a payment plan, and start protecting your network in just a few minutes.
-## What's a team domain/team name?
+## What is a team domain/team name?
-Your team domain is a unique subdomain assigned to your Cloudflare account; for example, `.cloudflareaccess.com`. Setting up a team domain is an essential step in your Zero Trust configuration. This is where your users will find the apps you have secured behind Cloudflare Zero Trust — displayed in the [App Launcher](/cloudflare-one/applications/app-launcher/) — and will be able to make login requests to them. The customizable portion of your team domain is called **team name**. You can view your team name and team domain in Zero Trust under **Settings** > **Custom Pages**.
+Your team domain is a unique subdomain assigned to your Cloudflare account, for example, `.cloudflareaccess.com`. [Setting up a team domain](/cloudflare-one/setup/#create-a-zero-trust-organization) is an essential step in your Zero Trust configuration. This is where your users will find the apps you have secured behind Cloudflare Zero Trust — displayed in the [App Launcher](/cloudflare-one/applications/app-launcher/) — and will be able to make login requests to them. The customizable portion of your team domain is called **team name**. You can view your team name and team domain in Zero Trust under **Settings** > **Custom Pages**.
| team name | team domain |
| ---------------- | --------------------------------------- |
@@ -29,6 +29,20 @@ You can change your team name at any time, unless you have the Cloudflare dashbo
If you change your team name, you need to update your organization's identity providers (IdPs) and the WARP client to reflect the new team name in order to avoid any mismatch errors.
:::
+### Why is my old team name is still showing up on the Login page and App Launcher?
+
+After changing your team name, you will need to check your Block page, Login page, and App Launcher settings to make sure the new team name is reflected.
+
+To verify that your team name change is successfully rendering on the Block page, Login page and App Launcher:
+
+1. In [Zero Trust](https://one.dash.cloudflare.com/), go to **Settings** > **Custom Pages**.
+2. Find the **Block page** and **Login page** > select **Customize** next to the page you would like to review first.
+3. Review that the value in **Your Organization's name** matches your new team name.
+4. If the desired name is not already displayed, change the value to your desired team name and select **Save**.
+5. Check both pages (**Block page** and **Login page**) to set **Your Organization's name** as your desired team name.
+
+The App Launcher will display the same team name set on the Login page, so you do not need to update the **Your Organization's name** field in the App Launcher page.
+
## How do I change my subscription plan?
To make changes to your subscription, visit the Billing section under Account in [Zero Trust](https://one.dash.cloudflare.com/). You can change or cancel your subscription at any time. Just remember - if you downgrade your plan during a billing cycle, your downgraded pricing will apply in the next billing cycle. If you upgrade during a billing cycle, you will be billed for the upgraded plan at the moment you select it.
diff --git a/src/content/docs/cloudflare-one/faq/troubleshooting.mdx b/src/content/docs/cloudflare-one/faq/troubleshooting.mdx
index 16d9c86888c408..7a5dad59db6e3f 100644
--- a/src/content/docs/cloudflare-one/faq/troubleshooting.mdx
+++ b/src/content/docs/cloudflare-one/faq/troubleshooting.mdx
@@ -180,3 +180,9 @@ If you need to unblock port `25`, contact your account team.
This issue can occur when communicating with an origin that partially supports HTTP/2. In these scenarios, the connection from Gateway to the website starts using HTTP/2 but requests a downgrade to HTTP/1.1 for some requests. For example, servers such as [Microsoft Internet Information Services (IIS)](https://learn.microsoft.com/iis/get-started/whats-new-in-iis-10/http2-on-iis#when-is-http2-not-supported) do not support authentication over HTTP/2. When errors occur, the website may send back a `RST_STREAM` frame with the error code `HTTP_1_1_REQUIRED`, which indicates that the browser should retry the request over HTTP/1.1. Gateway translates any received upstream `RST_STREAM` frames to a pseudo socket close, so this appears as a `502 Bad Gateway` exception page. The browser will not indicate why it failed.
Gateway does not support this downgrade mechanism. When receiving the `HTTP_1_1_REQUIRED` error code, Gateway will not reissue requests over HTTP/1.1. To make the connection from Gateway to the website successfully, you will need to disable HTTP/2 at the origin.
+
+## I see `This site can't provide a secure connection.`
+
+If you see an error with the title `This site can't provide a secure connection` and a subtitle of ` uses an unsupported protocol`, you must [order an Advanced Certificate](/ssl/edge-certificates/advanced-certificate-manager/manage-certificates/#create-a-certificate).
+
+If you added a [multi-level subdomain](/cloudflare-one/connections/connect-networks/get-started/create-remote-tunnel/#2a-connect-an-application) (more than one level of subdomain), you must [order an Advanced Certificate for the hostname](/cloudflare-one/connections/connect-networks/get-started/create-remote-tunnel/#2a-connect-an-application) as Cloudflare's Universal certificate will not cover the public hostname by default.
diff --git a/src/content/docs/cloudflare-one/identity/devices/service-providers/index.mdx b/src/content/docs/cloudflare-one/identity/devices/service-providers/index.mdx
index 82025299f5f43c..035b4475a8a4d3 100644
--- a/src/content/docs/cloudflare-one/identity/devices/service-providers/index.mdx
+++ b/src/content/docs/cloudflare-one/identity/devices/service-providers/index.mdx
@@ -23,5 +23,6 @@ Service-to-service integrations allow the WARP client to get device posture data
| [Kolide](/cloudflare-one/identity/devices/service-providers/kolide/) | ✅ | ✅ | ✅ | ❌ | ❌ |
| [Microsoft Endpoint Manager](/cloudflare-one/identity/devices/service-providers/microsoft/) | ✅ | ✅ | ❌ | ❌ | ❌ |
| [SentinelOne](/cloudflare-one/identity/devices/service-providers/sentinelone/) | ✅ | ✅ | ✅ | ❌ | ❌ |
+| [Tanium](/cloudflare-one/identity/devices/service-providers/taniums2s/) | ✅ | ✅ | ✅ | ❌ | ❌ |
| [Uptycs](/cloudflare-one/identity/devices/service-providers/uptycs/) | ✅ | ✅ | ✅ | ❌ | ❌ |
| [Workspace ONE](/cloudflare-one/identity/devices/service-providers/workspace-one/) | ✅ | ✅ | ✅ | ❌ | ❌ |
diff --git a/src/content/docs/cloudflare-one/identity/idp-integration/okta.mdx b/src/content/docs/cloudflare-one/identity/idp-integration/okta.mdx
index 58fceda3eef086..051bf61ca19ae8 100644
--- a/src/content/docs/cloudflare-one/identity/idp-integration/okta.mdx
+++ b/src/content/docs/cloudflare-one/identity/idp-integration/okta.mdx
@@ -40,7 +40,7 @@ Additionally, you can configure Okta to use risk information from Zero Trust [us
9. Set the **Groups claim filter** to _Matches regex_ and its value to `.*`.
:::note
- Groups managed outside of Okta (for example, Microsoft Entra ID or Google groups) may require different regex values. For more information, refer to the [Okta documentation](https://support.okta.com/help/s/article/Why-isnt-my-Groups-claim-returning-Active-Directory-groups).
+ Groups managed outside of Okta (for example, Microsoft Entra ID or Google groups) may require different regex values. For more information, refer to the Okta documentation on [Groups Claims](https://support.okta.com/help/s/article/Why-isnt-my-Groups-claim-returning-Active-Directory-groups) and [OpenID Connect Claims](https://support.okta.com/help/s/article/Can-we-retrieve-both-Active-Directory-and-Okta-groups-in-OpenID-Connect-claims).
:::
10. In the **General** tab, copy the **Client ID** and **Client secret**.
@@ -94,10 +94,7 @@ If you would like to only maintain one Okta app instance, Okta does support SAML
### 1. Enable SCIM in Zero Trust
-
+
### 2. Configure SCIM in Okta
@@ -135,17 +132,17 @@ If you would like to only maintain one Okta app instance, Okta does support SAML
13. Select **Save** to complete the configuration.
-14. In the **Assignments** tab, add the users you want to synchronize with Cloudflare Access. You can add users in batches by assigning a group. If a user is removed from the application assignment via a either direct user assignment or removed from the group that was assigned to the app, this will trigger a deprovisioning event from Okta to Cloudflare.
+14. In the **Assignments** tab, add the users you want to synchronize with Cloudflare Access. You can add users in batches by assigning a group. If a user is removed from the application assignment via a either direct user assignment or removed from the group that was assigned to the app, this will trigger a deprovisioning event from Okta to Cloudflare.
15. In the **Push Groups** tab, add the Okta groups you want to synchronize with Cloudflare Access. These groups will display in the Access policy builder and are the group memberships that will be added and removed upon membership change in Okta.
- :::note
- Groups in this SCIM app Push Groups integration should match the groups in your base [OIDC app integration](/cloudflare-one/identity/idp-integration/okta/#set-up-okta-as-an-oidc-provider). Because SCIM group membership updates will overwrite any groups in a user's identity, assigning the same groups to each app ensures consistent policy evaluation.
- :::
+ :::note
+ Groups in this SCIM app Push Groups integration should match the groups in your base [OIDC app integration](/cloudflare-one/identity/idp-integration/okta/#set-up-okta-as-an-oidc-provider). Because SCIM group membership updates will overwrite any groups in a user's identity, assigning the same groups to each app ensures consistent policy evaluation.
+ :::
To verify the integration, select **View Logs** in the Okta SCIM application.
-
+
## Example API Configuration
diff --git a/src/content/docs/cloudflare-one/implementation-guides/index.mdx b/src/content/docs/cloudflare-one/implementation-guides/index.mdx
index 456ff33bd365cf..db0d2a648681ab 100644
--- a/src/content/docs/cloudflare-one/implementation-guides/index.mdx
+++ b/src/content/docs/cloudflare-one/implementation-guides/index.mdx
@@ -3,12 +3,30 @@ pcx_content_type: navigation
title: Implementation guides
sidebar:
order: 3
- group:
- hideIndex: true
head: []
description: View implementation guides for Cloudflare Zero Trust.
---
-import { DirectoryListing } from "~/components";
+import { CardGrid, LinkTitleCard } from "~/components";
-
+Implementation guides cover deployment steps and best practices for specific Cloudflare One use cases.
+
+
+
+
+ Provide your users and networks with a secure, performant, and flexible path to the Internet.
+
+
+
+ Give users secure, auditable network and application access.
+
+
+
+ Secure access to internal web applications without a device client.
+
+
+
+ Use Cloudflare's Email Security to protect your Microsoft 365 email inbox from phishing and malware attacks.
+
+
+
\ No newline at end of file
diff --git a/src/content/docs/cloudflare-one/insights/dex/ip-visibility.mdx b/src/content/docs/cloudflare-one/insights/dex/ip-visibility.mdx
new file mode 100644
index 00000000000000..0a3ebd8cacf525
--- /dev/null
+++ b/src/content/docs/cloudflare-one/insights/dex/ip-visibility.mdx
@@ -0,0 +1,61 @@
+---
+pcx_content_type: reference
+title: IP visibility
+sidebar:
+ order: 7
+---
+
+import { Render } from "~/components";
+
+DEX's IP visibility gives administrators insight into three different IP types per device:
+
+1. **Device**: The private IP address of an end-user device.
+2. **ISP**: The public IP assigned by the ISP that the end-user device is being routed though.
+3. **Gateway**: The router's private IP (the router the end device is connected to.)
+
+:::note
+
+The ISP IP is only visible to users with the [Zero Trust PII role](/cloudflare-one/roles-permissions/#cloudflare-zero-trust-pii).
+
+:::
+
+DEX's IP visibility supports both IPv6 and IPv4 addresses.
+
+IP information is crucial for IT administrators to accurately troubleshoot network issues and identify user locations. IT administrators face challenges like:
+
+- Pinpointing the exact location of a user experiencing issues ("AP 87 is bad.")
+- Identifying network access control policy violations ("NAC Policies is not applied properly.")
+- Troubleshooting firewall restrictions ("Firewall on VLAN 93 is blocking.")
+- Resolving Layer 2 and DHCP related problems.
+- Indirectly determining user identity and device location.
+
+## View a device's IP information
+
+To view IP information for a user device:
+
+1. In [Zero Trust](https://one.dash.cloudflare.com/), go to **My team** > **Devices**.
+2. Select a device, then select **View details**.
+3. Under **Details**, scroll down to **IP details**.
+4. Review the IP details for your selected device's most recent session.
+
+## View a device's IP history
+
+DEX's IP visibility allows you to review an event log of a device's IP history for the last seven days. To view a device's IP history:
+
+1. In [Zero Trust](https://one.dash.cloudflare.com/), go to **My team** > **Devices**.
+2. Select a device > **View details** > under **Details**, scroll down to **IP details**.
+3. Select **View device history**.
+4. View the device's IP history and status from the last seven days.
+5. Select a time to view more information about the device at that time.
+
+Refer to [Available metrics](/cloudflare-one/insights/dex/fleet-status/#available-metrics) to review **Status** and **Mode** descriptions.
+
+## Troubleshoot with IP visibility
+
+While IP visibility allows you to inspect a device's IP information, use [DEX's live analytics](/cloudflare-one/insights/dex/fleet-status/#available-metrics) to review which Cloudflare data center the device is connected to. When traffic leaves a WARP-connected end-user device, it will hit a [Cloudflare data center](/support/troubleshooting/general-troubleshooting/gathering-information-for-troubleshooting-sites/#identify-the-cloudflare-data-center-serving-your-request).
+
+To find which Cloudflare data center a device is connected to:
+
+1. Follow the steps listed in [View IP information](#view-a-devices-ip-history) to find a device's IP information.
+2. Select **Device Monitoring** above **Device event log**.
+3. Find **Colo** in the **Device details** table to review which Cloudflare data center your selected device's egress traffic is connected to.
diff --git a/src/content/docs/cloudflare-one/insights/email-monitoring/email-details.mdx b/src/content/docs/cloudflare-one/insights/email-monitoring/email-details.mdx
new file mode 100644
index 00000000000000..0a7d5ef58876c2
--- /dev/null
+++ b/src/content/docs/cloudflare-one/insights/email-monitoring/email-details.mdx
@@ -0,0 +1,47 @@
+---
+title: Email details
+pcx_content_type: reference
+sidebar:
+ order: 6
+---
+
+Email Security shows you the following email detail information:
+
+- Details
+- Action log
+- Raw message
+- Mail trace
+
+### Details
+
+Email Security displays the following details:
+
+1. **Threat type**: Threat type of the email, for example, [credential harvester](/cloudflare-one/email-security/reference/how-es-detects-phish/#credential-harvesters), and [IP-based spam](/cloudflare-one/email-security/reference/how-es-detects-phish/#ip-based-spam).
+2. **Validation**: Email validation methods [SPF](https://www.cloudflare.com/learning/dns/dns-records/dns-spf-record/), [DKIM](https://www.cloudflare.com/learning/dns/dns-records/dns-dkim-record/), [DMARC](https://www.cloudflare.com/learning/dns/dns-records/dns-dmarc-record/).
+3. **Sender details**: Information include:
+ - IP address
+ - Registered domain
+ - Autonomous sys number: This number identifies your [autonomous system (AS)](https://www.cloudflare.com/en-gb/learning/network-layer/what-is-an-autonomous-system/).
+ - Autonomous sys name: This name identifies your autonomous system (AS).
+ - Country
+4. **Links identified**: A list of malicious links identified by Email Security.
+5. **Reasons for disposition**: Description of why the email was deemed as malicious, suspicious, or spam.
+
+### Action log
+
+Action log allows you to review post-delivery actions performed on your selected message. The action log displays:
+
+- **Date**: Date when the post-delivery action was performed.
+- **Activity**: The activity taken on an email. For example, moving the email to the trash folder, releasing a quarantined email, and more.
+
+### Raw message
+
+Raw message allows you to view the raw details of the message. You can also choose to download the email message. To download the message, select **Download .EML**.
+
+### Mail trace
+
+Mail trace allows you to track the path your selected message took from the sender to the recipient. Mail trace displays:
+
+- **Date**: The date and time when the mail was tracked.
+- **Type**: An email can be inbound (email sent to you from another email), or outbound (emails sent from your email address).
+- **Activity**: The activity taken on an email. For example, moving the email to the trash folder, releasing a quarantined email, and more.
\ No newline at end of file
diff --git a/src/content/docs/cloudflare-one/insights/email-monitoring/search-email.mdx b/src/content/docs/cloudflare-one/insights/email-monitoring/search-email.mdx
index c58d792a8e9daf..0d0d29c739252c 100644
--- a/src/content/docs/cloudflare-one/insights/email-monitoring/search-email.mdx
+++ b/src/content/docs/cloudflare-one/insights/email-monitoring/search-email.mdx
@@ -133,7 +133,7 @@ To view status and actions for each email:
1. On the **Investigation** page, select the three dots.
2. Selecting the three dots will show you the following options:
- If the email is quarantined:
- - **View details**: Refer to [Email details](/cloudflare-one/roles-permissions/#email-details) to learn more.
+ - **View details**: Refer to [Email details](/cloudflare-one/insights/email-monitoring/email-details/) to learn more.
- **View similar emails**: Find similar emails based on the `value_edf_hash` (Electronic Detection Fingerprint hash).
- **Release**: Email Security will no longer quarantine your chosen messages.
- **Reclassify**: Choose the dispositions of your messages if they are incorrect. Refer to [Reclassify messages](/cloudflare-one/insights/email-monitoring/search-email/#reclassify-messages) to learn more.
diff --git a/src/content/docs/cloudflare-one/policies/access/external-evaluation.mdx b/src/content/docs/cloudflare-one/policies/access/external-evaluation.mdx
index b5ed268f0a3221..b5e3e1b6a0637e 100644
--- a/src/content/docs/cloudflare-one/policies/access/external-evaluation.mdx
+++ b/src/content/docs/cloudflare-one/policies/access/external-evaluation.mdx
@@ -5,7 +5,7 @@ sidebar:
order: 4
---
-import { GlossaryTooltip, Example } from "~/components";
+import { GlossaryTooltip, Example, WranglerConfig } from "~/components";
With Cloudflare Access, you can create Allow or Block policies which evaluate the user based on custom criteria. This is done by adding an **External Evaluation** rule to your policy. The **External Evaluation** selector requires two values:
@@ -53,12 +53,14 @@ You can set up External Evaluation rules using any API service, but to get start
id = "YOUR_KV_NAMESPACE_ID"
```
-4. Open `wrangler.toml` in a text editor and insert the following:
+4. Open the `wrangler.toml / wrangler.json` file in an editor and insert the following:
- `[[kv_namespaces]]`: Add the output generated in the previous step.
- ``: your Cloudflare Zero Trust team name.
- ```txt
+
+
+ ```toml
name = "my-worker"
workers_dev = true
compatibility_date = "2024-08-06"
@@ -73,6 +75,7 @@ You can set up External Evaluation rules using any API service, but to get start
TEAM_DOMAIN=".cloudflareaccess.com"
DEBUG=false
```
+
### 2. Program your business logic
@@ -136,7 +139,7 @@ To debug your External Evaluation rule:
cd my-worker
```
-2. Open `wrangler.toml` in a text editor and set the `debug` variable to `TRUE`.
+2. Open the `wrangler.toml / wrangler.json` file in an editor and set the `debug` variable to `TRUE`.
3. Deploy your changes.
diff --git a/src/content/docs/cloudflare-one/policies/access/policy-management.mdx b/src/content/docs/cloudflare-one/policies/access/policy-management.mdx
index c2bf24263b1cf4..f24c4ae0f0295d 100644
--- a/src/content/docs/cloudflare-one/policies/access/policy-management.mdx
+++ b/src/content/docs/cloudflare-one/policies/access/policy-management.mdx
@@ -75,7 +75,7 @@ To migrate legacy policies to reusable policies:
1. [Create a reusable policy](#create-a-policy) that will replace the legacy policy.
2. Go to the Access application associated with the legacy policy.
3. Add the reusable policy to the application and remove the legacy policy.
-4. Repeat these steps for each legacy policy. If you have duplicate legacy policies, you can replace them with a single reuseable policy.
+4. Repeat these steps for each legacy policy. If you have duplicate legacy policies, you can replace them with a single reusable policy.
### Convert a legacy policy
diff --git a/src/content/docs/cloudflare-one/policies/browser-isolation/isolation-policies.mdx b/src/content/docs/cloudflare-one/policies/browser-isolation/isolation-policies.mdx
index 3d6779a8d0e011..bc8feefac6f359 100644
--- a/src/content/docs/cloudflare-one/policies/browser-isolation/isolation-policies.mdx
+++ b/src/content/docs/cloudflare-one/policies/browser-isolation/isolation-policies.mdx
@@ -43,53 +43,79 @@ You can choose to disable isolation for certain destinations or categories. The
## Policy settings
-The following optional settings appear in the Gateway HTTP policy builder when you select the _Isolate_ action. Enable these settings to [prevent data loss](https://blog.cloudflare.com/data-protection-browser/) when users interact with untrusted websites in the remote browser.
-
-### Disable copy / paste
-
-Prohibits users from copying and pasting content between a remote web page and their local machine.
+The following optional settings appear in the Gateway HTTP policy builder when you select the _Isolate_ action. Configure these settings to [prevent data loss](https://blog.cloudflare.com/data-protection-browser/) when users interact with untrusted websites in the remote browser.
+
+### Copy (from remote to client)
+
+```mermaid
+ flowchart LR
+ subgraph remotebrowser[Remote browser]
+ siteA["Isolated
+ website"]--Data-->remoteclip["Remote
+ clipboard"]
+ end
+ subgraph client[Client]
+ localclip["Local
+ clipboard"]
+ end
+ remoteclip-->localclip
+```
-### Disable printing
+- _Allow_: (Default) Users can copy content from an isolated website to their local clipboard.
+- _Allow only within isolated browser_: Users can only copy content from an isolated website to the remote clipboard. Users cannot copy content out of the remote browser to the local clipboard. You can use this setting alongside [**Paste (from client to remote)**: _Allow only within isolated browser_](/cloudflare-one/policies/browser-isolation/isolation-policies/#paste-from-client-to-remote) to only allow copy-pasting between isolated websites.
+- _Do not allow_: Prohibits users from copying content from an isolated website.
+
+### Paste (from client to remote)
+
+```mermaid
+ flowchart LR
+ subgraph client[Client]
+ localclip["Local
+ clipboard"]
+ end
+ subgraph remotebrowser[Remote browser]
+ remoteclip["Remote
+ clipboard"]-->siteA["Isolated
+ website"]
+ end
+ localclip--Data-->remoteclip
+```
-Prohibits users from printing remote web pages to their local machine.
+- _Allow_: (Default) Users can paste content from their local clipboard to an isolated website.
+- _Allow only within isolated browser_: Users can only paste content from the remote clipboard to an isolated website. Users cannot paste content from their local clipboard to the remote browser. You can use this setting alongside [**Copy (from remote to client)**: _Allow only within isolated browser_](/cloudflare-one/policies/browser-isolation/isolation-policies/#copy-from-remote-to-client) to only allow copy-pasting between isolated websites.
+- _Do not allow_: Prohibits users from pasting content into an isolated website.
-### Disable keyboard
+### File downloads
-Prohibits users from performing keyboard input into the remote web page.
+- _Allow_: (Default) User can download files from an isolated website to their local machine.
+- _Do not allow_: Prohibits users from downloading files from an isolated website to their local machine.
:::note
-
-Mouse input remains available (to allow users to browse a website by following hyperlinks and scrolling). This does not prevent user input into third-party virtual keyboards within a remote web page.
+This option does not prevent files from being downloaded into the remote browser. To prevent files being downloaded into the remote browser, use HTTP Policies to block by [Download Mime Type](/cloudflare-one/policies/gateway/http-policies/#download-and-upload-mime-type).
:::
-### Disable upload
+### File uploads
-Prohibits users from uploading files from their local machine into a remote web page.
+- _Allow_: (Default) Users can upload files from their local machine into an isolated website.
+- _Do not allow_: Prohibits users from uploading files from their local machine into an isolated website.
:::note
-
This option does not prevent files being uploaded to websites from third-party cloud file managers or files downloaded into the remote browser download bar from other isolated websites. To prevent files being uploaded from the remote browser into an isolated website, use HTTP Policies to block by [Upload Mime Type](/cloudflare-one/policies/gateway/http-policies/#download-and-upload-mime-type).
:::
-### Disable download
+### Keyboard
-Prohibits users from exporting files from the remote browser to their local machine.
+- _Allow_: (Default) Users can perform keyboard inputs into an isolated website.
+- _Do not allow_: Prohibits users from performing keyboard inputs into an isolated website.
:::note
-
-This option does not prevent files from being downloaded into the remote browser. To prevent files being downloaded into the remote browser, use HTTP Policies to block by [Download Mime Type](/cloudflare-one/policies/gateway/http-policies/#download-and-upload-mime-type).
+Mouse input remains available to allow users to browse a website by following hyperlinks and scrolling. This does not prevent user input into third-party virtual keyboards within an isolated website.
:::
-### Disable clipboard redirection
+### Printing
-Prevents copying isolated content from the remote browser to their local clipboard and pasting content from their local clipboard into isolated pages.
-
-:::note
-
-This option does not prevent clipboard interactions between isolated websites. Use [Disable copy / paste](/cloudflare-one/policies/browser-isolation/isolation-policies/#disable-copy--paste) to prohibit clipboard use on sensitive isolated applications.
-
-Disable copy / paste and Disable clipboard redirection are mutually exclusive and cannot be used in conjunction with each other.
-:::
+- _Allow_: (Default) Users can print isolated web pages to their local machine.
+- _Do not allow_: Prohibits users from printing isolated web pages to their local machine.
## Common policies
diff --git a/src/content/docs/cloudflare-one/policies/gateway/egress-policies/index.mdx b/src/content/docs/cloudflare-one/policies/gateway/egress-policies/index.mdx
index c67a7317731f6c..e1074b01a0df81 100644
--- a/src/content/docs/cloudflare-one/policies/gateway/egress-policies/index.mdx
+++ b/src/content/docs/cloudflare-one/policies/gateway/egress-policies/index.mdx
@@ -16,6 +16,8 @@ When your users connect to the Internet through Cloudflare Gateway, by default t
Egress policies allow you to control which dedicated egress IP is used and when, based on attributes such as identity, IP address, and geolocation. Traffic that does not match an egress policy will default to using the most performant dedicated egress IP.
+Cloudflare does not publish WARP egress IP ranges. WARP egress IPs are not documented at [Cloudflare's IP Ranges](https://cloudflare.com/ips). To obtain a dedicated WARP egress IP, contact your account team.
+
## Force IP version
To control whether only IPv4 or IPv6 is used to egress, ensure you are [filtering DNS traffic](/cloudflare-one/policies/gateway/initial-setup/dns/), then create a DNS policy to [block AAAA or A records](/cloudflare-one/policies/gateway/dns-policies/common-policies/#control-ip-version).
diff --git a/src/content/docs/cloudflare-one/policies/gateway/proxy.mdx b/src/content/docs/cloudflare-one/policies/gateway/proxy.mdx
index 47802fdb426e9f..499d43e77d773d 100644
--- a/src/content/docs/cloudflare-one/policies/gateway/proxy.mdx
+++ b/src/content/docs/cloudflare-one/policies/gateway/proxy.mdx
@@ -11,7 +11,38 @@ You can forward [HTTP](/cloudflare-one/policies/gateway/initial-setup/http/) and
The Gateway proxy is required for filtering HTTP and network traffic via the WARP client in Gateway with WARP mode. To proxy HTTP traffic without deploying the WARP client, you can configure [PAC files](/cloudflare-one/connections/connect-devices/agentless/pac-files/) on your devices.
-## Proxy protocols
+## Proxy algorithm
+
+Gateway uses the [Happy Eyeballs algorithm](https://datatracker.ietf.org/doc/html/rfc6555) to proxy traffic in the following order:
+
+1. The user's browser initiates the TCP handshake by sending Gateway a TCP SYN segment.
+2. Gateway sends a SYN segment to the origin server.
+3. If the origin server sends a SYN-ACK segment back, Gateway establishes distinct TCP connections between the user and Gateway and between Gateway and the origin server.
+4. Gateway inspects and filters traffic received from the user.
+5. If the traffic passes inspection, Gateway proxies traffic bidirectionally between the user and the origin server.
+
+```mermaid
+flowchart TD
+ %% Accessibility
+ accTitle: How Gateway proxy works
+ accDescr: Flowchart describing how the Gateway proxy uses the Happy Eyeballs algorithm to establish TCP connections and proxy user traffic.
+
+ %% Flowchart
+ A[User's device sends TCP SYN to Gateway] --> B[Gateway sends TCP SYN to origin server]
+ B --> C{{Origin server responds with TCP SYN-ACK?}}
+ C -->|Yes| E[TCP handshakes completed]
+ C -->|No| D[Connection fails]
+ E --> F{{Connection allowed?}}
+ F -->|Yes| G[Gateway proxies traffic bidirectionally]
+ F -->|No| H[Connection blocked by firewall policies]
+
+ %% Styling
+ style D stroke:#D50000
+ style G stroke:#00C853
+ style H stroke:#D50000
+```
+
+## Supported protocols
Gateway supports proxying TCP, UDP, and ICMP traffic.
diff --git a/src/content/docs/cloudflare-one/roles-permissions.mdx b/src/content/docs/cloudflare-one/roles-permissions.mdx
index ace1c4c58e0e1c..84341ff57a2563 100644
--- a/src/content/docs/cloudflare-one/roles-permissions.mdx
+++ b/src/content/docs/cloudflare-one/roles-permissions.mdx
@@ -30,47 +30,4 @@ The Cloudflare Zero Trust PII role should be considered an add-on role, to be co
:::note
The Cloudflare Zero Trust PII role does not apply to Access audit logs. PII is always visible in Access logs.
-:::
-
-## Email details
-
-Email Security shows you the following email detail information:
-
-- Details
-- Action log
-- Raw message
-- Mail trace
-
-### Details
-
-Email Security displays the following details:
-
-1. **Threat type**: Threat type of the email, for example, [credential harvester](/cloudflare-one/email-security/reference/how-es-detects-phish/#credential-harvesters), and [IP-based spam](/cloudflare-one/email-security/reference/how-es-detects-phish/#ip-based-spam).
-2. **Validation**: Email validation methods [SPF](https://www.cloudflare.com/en-gb/learning/dns/dns-records/dns-spf-record/), [DKIM](https://www.cloudflare.com/en-gb/learning/dns/dns-records/dns-dkim-record/), [DMARC](https://www.cloudflare.com/en-gb/learning/dns/dns-records/dns-dmarc-record/).
-3. **Sender details**: Information include:
- - IP address
- - Registered domain
- - Autonomous sys number: This number identifies your [autonomous system (AS)](https://www.cloudflare.com/en-gb/learning/network-layer/what-is-an-autonomous-system/).
- - Autonomous sys name: This name identifies your autonomous system (AS).
- - Country
-4. **Links identified**: A list of malicious links identified by Email Security.
-5. **Reasons for disposition**: Description of why the email was deemed as malicious, suspicious, or spam.
-
-### Action log
-
-Action log allows you to review post-delivery actions performed on your selected message. The action log displays:
-
-- **Date**: Date when the post-delivery action was performed.
-- **Activity**: The activity taken on an email. For example, moving the email to the trash folder, releasing a quarantined email, and more.
-
-### Raw message
-
-Raw message allows you to view the raw details of the message. You can also choose to download the email message. To download the message, select **Download .EML**.
-
-### Mail trace
-
-Mail trace allows you to track the path your selected message took from the sender to the recipient. Mail trace displays:
-
-- **Date**: The date and time when the mail was tracked.
-- **Type**: An email can be inbound (email sent to you from another email), or outbound (emails sent from your email address).
-- **Activity**: The activity taken on an email. For example, moving the email to the trash folder, releasing a quarantined email, and more.
\ No newline at end of file
+:::
\ No newline at end of file
diff --git a/src/content/docs/cloudflare-one/tutorials/entra-id-risky-users.mdx b/src/content/docs/cloudflare-one/tutorials/entra-id-risky-users.mdx
index 6e7590add20715..515c05a805c65d 100644
--- a/src/content/docs/cloudflare-one/tutorials/entra-id-risky-users.mdx
+++ b/src/content/docs/cloudflare-one/tutorials/entra-id-risky-users.mdx
@@ -80,7 +80,7 @@ To get started quickly, deploy our example Cloudflare Workers script by followin
cd risky-users
```
-3. Modify `wrangler.toml` to include the following values:
+3. Modify the `wrangler.toml / wrangler.json` file to include the following values:
- ``: your Cloudflare [account ID](/fundamentals/setup/find-account-and-zone-ids/).
- ``: your Entra ID **Directory (tenant) ID**, obtained when [setting up Entra ID as an identity provider](#1-set-up-entra-id-as-an-identity-provider).
diff --git a/src/content/docs/cloudflare-one/tutorials/extend-sso-with-workers.mdx b/src/content/docs/cloudflare-one/tutorials/extend-sso-with-workers.mdx
index 0e9e33d380cc27..d917b8aa44e9af 100644
--- a/src/content/docs/cloudflare-one/tutorials/extend-sso-with-workers.mdx
+++ b/src/content/docs/cloudflare-one/tutorials/extend-sso-with-workers.mdx
@@ -6,7 +6,7 @@ pcx_content_type: tutorial
title: Send SSO attributes to Access-protected origins with Workers
---
-import { Render, GlossaryTooltip, PackageManagers } from "~/components"
+import { Render, GlossaryTooltip, PackageManagers, WranglerConfig } from "~/components"
This tutorial will walk you through extending the single-sign-on (SSO) capabilities of [Cloudflare Access](/cloudflare-one/policies/access/) with our serverless computing platform, [Cloudflare Workers](/workers/). Specifically, this guide will demonstrate how to modify requests sent to your secured origin to include additional information from the Cloudflare Access authentication event.
@@ -185,9 +185,7 @@ Below is an example of a user identity that includes the `disk_encryption` and `
## 3. Route the Worker to your application
-In `wrangler.toml`, [set up a route](/workers/configuration/routing/routes/) that maps the Worker to your Access application domain:
-
-import { WranglerConfig } from "~/components";
+In the `wrangler.toml / wrangler.json` file, [set up a route](/workers/configuration/routing/routes/) that maps the Worker to your Access application domain:
diff --git a/src/content/docs/constellation/platform/client-api.mdx b/src/content/docs/constellation/platform/client-api.mdx
index 329dc99b644207..550376d7dccaa8 100644
--- a/src/content/docs/constellation/platform/client-api.mdx
+++ b/src/content/docs/constellation/platform/client-api.mdx
@@ -213,7 +213,7 @@ const session = new InferenceSession(
);
```
-* **env.PROJECT** is the project binding defined in your `wrangler.toml` configuration.
+* **env.PROJECT** is the project binding defined in your Wrangler configuration.
* **0ae7bd14...** is the model ID inside the project. Use Wrangler to list the models and their IDs in a project.
#### async session.run()
diff --git a/src/content/docs/d1/best-practices/local-development.mdx b/src/content/docs/d1/best-practices/local-development.mdx
index 5d3d38b1c15986..98e2ed772a364c 100644
--- a/src/content/docs/d1/best-practices/local-development.mdx
+++ b/src/content/docs/d1/best-practices/local-development.mdx
@@ -5,6 +5,8 @@ sidebar:
order: 8
---
+import { WranglerConfig } from "~/components";
+
D1 has fully-featured support for local development, running the same version of D1 as Cloudflare runs globally. Local development uses [Wrangler](/workers/wrangler/install-and-update/), the command-line interface for Workers, to manage local development sessions and state.
## Start a local development session
@@ -52,13 +54,11 @@ To start a local development session:
[b] open a browser, [d] open Devtools, [l] turn off local mode, [c] clear console, [x] to exit
```
-In this example, the Worker has access to local-only D1 database. The corresponding D1 binding in your `wrangler.toml` configuration file would resemble the following:
-
-import { WranglerConfig } from "~/components";
+In this example, the Worker has access to local-only D1 database. The corresponding D1 binding in your `wrangler.toml / wrangler.json` file would resemble the following:
-```toml title="wrangler.toml"
+```toml
[[d1_databases]]
binding = "DB"
database_name = "test-db"
@@ -73,21 +73,19 @@ Refer to the [`wrangler dev` documentation](/workers/wrangler/commands/#dev) to
## Develop locally with Pages
-You can only develop against a _local_ D1 database when using [Cloudflare Pages](/pages/) by creating a minimal `wrangler.toml` in the root of your Pages project. This can be useful when creating schemas, seeding data or otherwise managing a D1 database directly, without adding to your application logic.
+You can only develop against a _local_ D1 database when using [Cloudflare Pages](/pages/) by creating a minimal `wrangler.toml / wrangler.json` file in the root of your Pages project. This can be useful when creating schemas, seeding data or otherwise managing a D1 database directly, without adding to your application logic.
:::caution[Local development for remote databases]
It is currently not possible to develop against a _remote_ D1 database when using [Cloudflare Pages](/pages/).
:::
-Your `wrangler.toml` should resemble the following:
-
-
+your `wrangler.toml / wrangler.json` file should resemble the following:
```toml
-# If you are only using Pages + D1, you only need the below in your wrangler.toml to interact with D1 locally.
+# If you are only using Pages + D1, you only need the below in your Wrangler config file to interact with D1 locally.
[[d1_databases]]
binding = "DB" # Should match preview_database_id
database_name = "YOUR_DATABASE_NAME"
@@ -160,11 +158,9 @@ console.log(results);
### `unstable_dev`
-Wrangler exposes an [`unstable_dev()`](/workers/wrangler/api/) that allows you to run a local HTTP server for testing Workers and D1. Run [migrations](/d1/reference/migrations/) against a local database by setting a `preview_database_id` in your `wrangler.toml` configuration.
-
-Given the below `wrangler.toml` configuration:
-
+Wrangler exposes an [`unstable_dev()`](/workers/wrangler/api/) that allows you to run a local HTTP server for testing Workers and D1. Run [migrations](/d1/reference/migrations/) against a local database by setting a `preview_database_id` in your Wrangler configuration.
+Given the below Wrangler configuration:
diff --git a/src/content/docs/d1/configuration/environments.mdx b/src/content/docs/d1/configuration/environments.mdx
index 2f4a62c1e3e49a..18d9d451d4943d 100644
--- a/src/content/docs/d1/configuration/environments.mdx
+++ b/src/content/docs/d1/configuration/environments.mdx
@@ -6,11 +6,11 @@ sidebar:
---
-[Environments](/workers/wrangler/environments/) are different contexts that your code runs in. Cloudflare Developer Platform allows you to create and manage different environments. Through environments, you can deploy the same project to multiple places under multiple names.
+import { WranglerConfig } from "~/components";
-To specify different D1 databases for different environments, use the following syntax in your `wrangler.toml` file:
+[Environments](/workers/wrangler/environments/) are different contexts that your code runs in. Cloudflare Developer Platform allows you to create and manage different environments. Through environments, you can deploy the same project to multiple places under multiple names.
-import { WranglerConfig } from "~/components";
+To specify different D1 databases for different environments, use the following syntax in your Wrangler file:
@@ -32,11 +32,9 @@ d1_databases = [
In the code above, the `staging` environment is using a different database (`DATABASE_NAME_1`) than the `production` environment (`DATABASE_NAME_2`).
-## Anatomy of `wrangler.toml` file
-
-If you need to specify different D1 databases for different environments, your `wrangler.toml` may contain bindings that resemble the following:
-
+## Anatomy of Wrangler file
+If you need to specify different D1 databases for different environments, your `wrangler.toml / wrangler.json` file may contain bindings that resemble the following:
diff --git a/src/content/docs/d1/examples/d1-and-hono.mdx b/src/content/docs/d1/examples/d1-and-hono.mdx
index 9a2784c5f6e405..a77e6abc9dfed8 100644
--- a/src/content/docs/d1/examples/d1-and-hono.mdx
+++ b/src/content/docs/d1/examples/d1-and-hono.mdx
@@ -18,14 +18,14 @@ Hono is a fast web framework for building API-first applications, and it include
When using Workers:
-* Ensure you have configured [`wrangler.toml`](/d1/get-started/#3-bind-your-worker-to-your-d1-database) to bind your D1 database to your Worker.
+* Ensure you have configured your [`wrangler.toml / wrangler.json` file](/d1/get-started/#3-bind-your-worker-to-your-d1-database) to bind your D1 database to your Worker.
* You can access your D1 databases via Hono's [`Context`](https://hono.dev/api/context) parameter: [bindings](https://hono.dev/getting-started/cloudflare-workers#bindings) are exposed on `context.env`. If you configured a [binding](/pages/functions/bindings/#d1-databases) named `DB`, then you would access [D1 Workers Binding API](/d1/worker-api/prepared-statements/) methods via `c.env.DB`.
* Refer to the Hono documentation for [Cloudflare Workers](https://hono.dev/getting-started/cloudflare-workers).
If you are using [Pages Functions](/pages/functions/):
1. Bind a D1 database to your [Pages Function](/pages/functions/bindings/#d1-databases).
-2. Pass the `--d1 BINDING_NAME=DATABASE_ID` flag to `wrangler dev` when developing locally. `BINDING_NAME` should match what call in your code, and `DATABASE_ID` should match the `database_id` defined in your wrangler.toml: for example, `--d1 DB=xxxx-xxxx-xxxx-xxxx-xxxx`.
+2. Pass the `--d1 BINDING_NAME=DATABASE_ID` flag to `wrangler dev` when developing locally. `BINDING_NAME` should match what call in your code, and `DATABASE_ID` should match the `database_id` defined in your `wrangler.toml / wrangler.json` file: for example, `--d1 DB=xxxx-xxxx-xxxx-xxxx-xxxx`.
3. Refer to the Hono guide for [Cloudflare Pages](https://hono.dev/getting-started/cloudflare-pages).
The following examples show how to access a D1 database bound to `DB` from both a Workers script and a Pages Function:
diff --git a/src/content/docs/d1/examples/d1-and-remix.mdx b/src/content/docs/d1/examples/d1-and-remix.mdx
index 3cf86ffd579eef..bd706040910ea8 100644
--- a/src/content/docs/d1/examples/d1-and-remix.mdx
+++ b/src/content/docs/d1/examples/d1-and-remix.mdx
@@ -20,7 +20,7 @@ To set up a new Remix site on Cloudflare Pages that can query D1:
1. **Refer to [the Remix guide](/pages/framework-guides/deploy-a-remix-site/)**.
2. Bind a D1 database to your [Pages Function](/pages/functions/bindings/#d1-databases).
-3. Pass the `--d1 BINDING_NAME=DATABASE_ID` flag to `wrangler dev` when developing locally. `BINDING_NAME` should match what call in your code, and `DATABASE_ID` should match the `database_id` defined in your wrangler.toml: for example, `--d1 DB=xxxx-xxxx-xxxx-xxxx-xxxx`.
+3. Pass the `--d1 BINDING_NAME=DATABASE_ID` flag to `wrangler dev` when developing locally. `BINDING_NAME` should match what call in your code, and `DATABASE_ID` should match the `database_id` defined in your `wrangler.toml / wrangler.json` file: for example, `--d1 DB=xxxx-xxxx-xxxx-xxxx-xxxx`.
The following example shows you how to define a Remix [`loader`](https://remix.run/docs/en/main/route/loader) that has a binding to a D1 database.
diff --git a/src/content/docs/d1/examples/d1-and-sveltekit.mdx b/src/content/docs/d1/examples/d1-and-sveltekit.mdx
index 12b48a3785dba5..e2b9885736907b 100644
--- a/src/content/docs/d1/examples/d1-and-sveltekit.mdx
+++ b/src/content/docs/d1/examples/d1-and-sveltekit.mdx
@@ -22,7 +22,7 @@ To set up a new SvelteKit site on Cloudflare Pages that can query D1:
1. **Refer to [the SvelteKit guide](/pages/framework-guides/deploy-a-svelte-kit-site/) and Svelte's [Cloudflare adapter](https://kit.svelte.dev/docs/adapter-cloudflare)**.
2. Install the Cloudflare adapter within your SvelteKit project: `npm i -D @sveltejs/adapter-cloudflare`.
3. Bind a D1 database [to your Pages Function](/pages/functions/bindings/#d1-databases).
-4. Pass the `--d1 BINDING_NAME=DATABASE_ID` flag to `wrangler dev` when developing locally. `BINDING_NAME` should match what call in your code, and `DATABASE_ID` should match the `database_id` defined in your wrangler.toml: for example, `--d1 DB=xxxx-xxxx-xxxx-xxxx-xxxx`.
+4. Pass the `--d1 BINDING_NAME=DATABASE_ID` flag to `wrangler dev` when developing locally. `BINDING_NAME` should match what call in your code, and `DATABASE_ID` should match the `database_id` defined in your `wrangler.toml / wrangler.json` file: for example, `--d1 DB=xxxx-xxxx-xxxx-xxxx-xxxx`.
The following example shows you how to create a server endpoint configured to query D1.
diff --git a/src/content/docs/d1/examples/query-d1-from-python-workers.mdx b/src/content/docs/d1/examples/query-d1-from-python-workers.mdx
index e5a2f198f6358d..a135c34270a1dd 100644
--- a/src/content/docs/d1/examples/query-d1-from-python-workers.mdx
+++ b/src/content/docs/d1/examples/query-d1-from-python-workers.mdx
@@ -11,6 +11,8 @@ sidebar:
description: Learn how to query D1 from a Python Worker
---
+import { WranglerConfig } from "~/components";
+
The Cloudflare Workers platform supports [multiple languages](/workers/languages/), including TypeScript, JavaScript, Rust and Python. This guide shows you how to query a D1 database from [Python](/workers/languages/python/) and deploy your application globally.
:::note
@@ -31,7 +33,7 @@ If you are new to Cloudflare Workers, refer to the [Get started guide](/workers/
## Query from Python
-This example assumes you have an existing D1 database. To allow your Python Worker to query your database, you first need to create a [binding](/workers/runtime-apis/bindings/) between your Worker and your D1 database and define this in your `wrangler.toml` configuration file.
+This example assumes you have an existing D1 database. To allow your Python Worker to query your database, you first need to create a [binding](/workers/runtime-apis/bindings/) between your Worker and your D1 database and define this in your `wrangler.toml / wrangler.json` file.
You will need the `database_name` and `database_id` for a D1 database. You can use the `wrangler` CLI to create a new database or fetch the ID for an existing database as follows:
@@ -55,9 +57,7 @@ npx wrangler d1 info some-existing-db
### 1. Configure bindings
-In your `wrangler.toml` file, create a new `[[d1_databases]]` configuration block and set `database_name` and `database_id` to the name and id (respectively) of the D1 database you want to query:
-
-import { WranglerConfig } from "~/components";
+In your Wrangler file, create a new `[[d1_databases]]` configuration block and set `database_name` and `database_id` to the name and id (respectively) of the D1 database you want to query:
@@ -79,7 +79,7 @@ The value of `binding` is how you will refer to your database from within your W
### 2. Create your Python Worker
-To create a Python Worker, create an empty file at `src/entry.py`, matching the value of `main` in your `wrangler.toml` file with the contents below:
+To create a Python Worker, create an empty file at `src/entry.py`, matching the value of `main` in your Wrangler file with the contents below:
```python
from js import Response
@@ -94,7 +94,7 @@ async def on_fetch(request, env):
```
-The value of `binding` in your `wrangler.toml` file exactly must match the name of the variable in your Python code. This example refers to the database via a `DB` binding, and query this binding via `await env.DB.prepare(...)`.
+The value of `binding` in your Wrangler file exactly must match the name of the variable in your Python code. This example refers to the database via a `DB` binding, and query this binding via `await env.DB.prepare(...)`.
You can then deploy your Python Worker directly:
@@ -119,8 +119,8 @@ Your Worker will be available at `https://python-and-d1.YOUR_SUBDOMAIN.workers.d
If you receive an error deploying:
-- Make sure you have configured your `wrangler.toml` with the `database_id` and `database_name` of a valid D1 database.
-- Ensure `compatibility_flags = ["python_workers"]` is set in your `wrangler.toml`, which is required for Python.
+- Make sure you have configured your `wrangler.toml / wrangler.json` file with the `database_id` and `database_name` of a valid D1 database.
+- Ensure `compatibility_flags = ["python_workers"]` is set in your `wrangler.toml / wrangler.json` file, which is required for Python.
- Review the [list of error codes](/workers/observability/errors/), and ensure your code does not throw an uncaught exception.
## Next steps
diff --git a/src/content/docs/d1/get-started.mdx b/src/content/docs/d1/get-started.mdx
index a2477f988e6575..db4a97528d73e4 100644
--- a/src/content/docs/d1/get-started.mdx
+++ b/src/content/docs/d1/get-started.mdx
@@ -19,7 +19,7 @@ import {
This guide instructs you through:
-- Creating your first database using D1, Cloudflare’s native serverless SQL database.
+- Creating your first database using D1, Cloudflare's native serverless SQL database.
- Creating a schema and querying your database via the command-line.
- Connecting a [Cloudflare Worker](/workers/) to your D1 database to query your D1 database programmatically.
@@ -67,13 +67,13 @@ Create a new Worker as the means to query your database.
- testconfig.json
- vitest.config.mts
- worker-configuration.d.ts
- - **wrangler.toml**
+ - **wrangler.json**
Your new `d1-tutorial` directory includes:
- A `"Hello World"` [Worker](/workers/get-started/guide/#3-write-code) in `index.ts`.
- - A [`wrangler.toml`](/workers/wrangler/configuration/) configuration file. `wrangler.toml` is how your `d1-tutorial` Worker accesses your D1 database.
+ - A [`wrangler.toml / wrangler.json` file](/workers/wrangler/configuration/). This file is how your `d1-tutorial` Worker accesses your D1 database.
@@ -170,12 +170,12 @@ To bind your D1 database to your Worker:
-You create bindings by updating your `wrangler.toml` file.
+You create bindings by updating your Wrangler file.
1. Copy the lines obtained from [step 2](/d1/get-started/#2-create-a-database) from your terminal.
-2. Add them to the end of your `wrangler.toml` file.
+2. Add them to the end of your Wrangler file.
@@ -230,7 +230,7 @@ You create bindings by adding them to the Worker you have created.
-With `wrangler.toml` configured properly, set up your database. Use the example `schema.sql` file below to initialize your database.
+After correctly preparing your `wrangler.toml / wrangler.json` file, set up your database. Use the example `schema.sql` file below to initialize your database.
1. Copy the following code and save it as a `schema.sql` file in the `d1-tutorial` Worker directory you created in step 1:
@@ -309,7 +309,7 @@ After you have set up your database, run an SQL query from within your Worker.
```typescript
export interface Env {
- // If you set another name in wrangler.toml as the value for 'binding',
+ // If you set another name in the Wrangler config file for the value for 'binding',
// replace "DB" with the variable name you defined.
DB: D1Database;
}
@@ -338,7 +338,7 @@ After you have set up your database, run an SQL query from within your Worker.
In the code above, you:
- 1. Define a binding to your D1 database in your TypeScript code. This binding matches the `binding` value you set in `wrangler.toml` under `[[d1_databases]]`.
+ 1. Define a binding to your D1 database in your TypeScript code. This binding matches the `binding` value you set in the `wrangler.toml / wrangler.json` file under `[[d1_databases]]`.
2. Query your database using `env.DB.prepare` to issue a [prepared query](/d1/worker-api/d1-database/#prepare) with a placeholder (the `?` in the query).
3. Call `bind()` to safely and securely bind a value to that placeholder. In a real application, you would allow a user to define the `CompanyName` they want to list results for. Using `bind()` prevents users from executing arbitrary SQL (known as "SQL injection") against your application and deleting or otherwise modifying your database.
4. Execute the query by calling `all()` to return all rows (or none, if the query returns none).
diff --git a/src/content/docs/d1/reference/migrations.mdx b/src/content/docs/d1/reference/migrations.mdx
index ca152a6bc53a5d..363b0df10c0e31 100644
--- a/src/content/docs/d1/reference/migrations.mdx
+++ b/src/content/docs/d1/reference/migrations.mdx
@@ -6,6 +6,8 @@ sidebar:
---
+import { WranglerConfig } from "~/components";
+
Database migrations are a way of versioning your database. Each migration is stored as an `.sql` file in your `migrations` folder. The `migrations` folder is created in your project directory when you create your first migration. This enables you to store and track changes throughout database development.
## Features
@@ -22,9 +24,7 @@ Every migration file in the `migrations` folder has a specified version number i
By default, migrations are created in the `migrations/` folder in your Worker project directory. Creating migrations will keep a record of applied migrations in the `d1_migrations` table found in your database.
-This location and table name can be customized in your `wrangler.toml` file, inside the D1 binding.
-
-import { WranglerConfig } from "~/components";
+This location and table name can be customized in your Wrangler file, inside the D1 binding.
diff --git a/src/content/docs/d1/tutorials/build-a-comments-api/index.mdx b/src/content/docs/d1/tutorials/build-a-comments-api/index.mdx
index 012173de452f1a..aa97a6c69c271a 100644
--- a/src/content/docs/d1/tutorials/build-a-comments-api/index.mdx
+++ b/src/content/docs/d1/tutorials/build-a-comments-api/index.mdx
@@ -13,7 +13,7 @@ languages:
- SQL
---
-import { Render, PackageManagers, Stream } from "~/components";
+import { Render, PackageManagers, Stream, WranglerConfig } from "~/components";
In this tutorial, you will learn how to use D1 to add comments to a static blog site. To do this, you will construct a new D1 database, and build a JSON API that allows the creation and retrieval of comments.
@@ -88,9 +88,7 @@ You will now create a D1 database. In Wrangler v2, there is support for the `wra
npx wrangler d1 create d1-example
```
-Reference your created database in your Worker code by creating a [binding](/workers/runtime-apis/bindings/) inside of your `wrangler.toml` file, Wrangler's configuration file. Bindings allow us to access Cloudflare resources, like D1 databases, KV namespaces, and R2 buckets, using a variable name in code. In `wrangler.toml`, set up the binding `DB` and connect it to the `database_name` and `database_id`:
-
-import { WranglerConfig } from "~/components";
+Reference your created database in your Worker code by creating a [binding](/workers/runtime-apis/bindings/) inside of your Wrangler file, Wrangler's configuration file. Bindings allow us to access Cloudflare resources, like D1 databases, KV namespaces, and R2 buckets, using a variable name in code. In the `wrangler.toml / wrangler.json` file, set up the binding `DB` and connect it to the `database_name` and `database_id`:
@@ -103,7 +101,7 @@ database_id = "4e1c28a9-90e4-41da-8b4b-6cf36e5abb29"
-With your binding configured in your `wrangler.toml` file, you can interact with your database from the command line, and inside your Workers function.
+With your binding configured in your Wrangler file, you can interact with your database from the command line, and inside your Workers function.
## 4. Interact with D1
@@ -149,7 +147,7 @@ npx wrangler d1 execute d1-example --remote --file schemas/schema.sql
## 5. Execute SQL
-In earlier steps, you created a SQL database and populated it with initial data. Now, you will add a route to your Workers function to retrieve data from that database. Based on your `wrangler.toml` configuration in previous steps, your D1 database is now accessible via the `DB` binding. In your code, use the binding to prepare SQL statements and execute them, for example, to retrieve comments:
+In earlier steps, you created a SQL database and populated it with initial data. Now, you will add a route to your Workers function to retrieve data from that database. Based on your Wrangler configuration in previous steps, your D1 database is now accessible via the `DB` binding. In your code, use the binding to prepare SQL statements and execute them, for example, to retrieve comments:
```js
app.get("/api/posts/:slug/comments", async (c) => {
@@ -205,7 +203,7 @@ With your application ready for deployment, use Wrangler to build and deploy you
Begin by running `wrangler whoami` to confirm that you are logged in to your Cloudflare account. If you are not logged in, Wrangler will prompt you to login, creating an API key that you can use to make authenticated requests automatically from your local machine.
-After you have logged in, confirm that your `wrangler.toml` file is configured similarly to what is seen below. You can change the `name` field to a project name of your choice:
+After you have logged in, confirm that your Wrangler file is configured similarly to what is seen below. You can change the `name` field to a project name of your choice:
diff --git a/src/content/docs/d1/tutorials/build-a-staff-directory-app/index.mdx b/src/content/docs/d1/tutorials/build-a-staff-directory-app/index.mdx
index 486bc38f373ea1..ae25e1d87afd4a 100644
--- a/src/content/docs/d1/tutorials/build-a-staff-directory-app/index.mdx
+++ b/src/content/docs/d1/tutorials/build-a-staff-directory-app/index.mdx
@@ -13,6 +13,8 @@ languages:
- SQL
---
+import { WranglerConfig } from "~/components";
+
In this tutorial, you will learn how to use D1 to build a staff directory. This application will allow users to access information about an organization's employees and give admins the ability to add new employees directly within the app.
To do this, you will first need to set up a [D1 database](/d1/get-started/) to manage data seamlessly, then you will develop and deploy your application using the [HonoX Framework](https://github.com/honojs/honox) and [Cloudflare Pages](/pages).
@@ -68,11 +70,9 @@ To create a database for your project, use the Cloudflare CLI tool, [Wrangler](/
npx wrangler d1 create staff-directory
```
-After creating your database, you will need to set up a [binding](/workers/runtime-apis/bindings/) in the Wrangler configuration file to integrate your database with your application.
+After creating your database, you will need to set up a [binding](/workers/runtime-apis/bindings/) in the `wrangler.toml / wrangler.json` file to integrate your database with your application.
-This binding enables your application to interact with Cloudflare resources such as D1 databases, KV namespaces, and R2 buckets. To configure this, create a `wrangler.toml` file in your project's root directory and input the basic setup information:
-
-import { WranglerConfig } from "~/components";
+This binding enables your application to interact with Cloudflare resources such as D1 databases, KV namespaces, and R2 buckets. To configure this, create a Wrangler file in your project's root directory and input the basic setup information:
@@ -83,7 +83,7 @@ compatibility_date = "2023-12-01"
-Next, add the database binding details to your `wrangler.toml` file. This involves specifying a binding name (in this case, `DB`), which will be used to reference the database within your application, along with the `database_name` and `database_id` provided when you created the database:
+Next, add the database binding details to your Wrangler file. This involves specifying a binding name (in this case, `DB`), which will be used to reference the database within your application, along with the `database_name` and `database_id` provided when you created the database:
@@ -173,7 +173,7 @@ To execute the schema locally and seed data into your local directory, pass the
## 5. Create SQL statements
-After setting up your D1 database and configuring the `wrangler.toml` file as outlined in previous steps, your database is accessible in your code through the `DB` binding. This allows you to directly interact with the database by preparing and executing SQL statements. In the following step, you will learn how to use this binding to perform common database operations such as retrieving data and inserting new records.
+After setting up your D1 database and configuring the Wrangler file as outlined in previous steps, your database is accessible in your code through the `DB` binding. This allows you to directly interact with the database by preparing and executing SQL statements. In the following step, you will learn how to use this binding to perform common database operations such as retrieving data and inserting new records.
### Retrieve data from database
@@ -377,7 +377,7 @@ Use the `wrangler r2 bucket create` command to create a bucket:
wrangler r2 bucket create employee-avatars
```
-Once the bucket is created, add the R2 bucket binding to your `wrangler.toml` file:
+Once the bucket is created, add the R2 bucket binding to your Wrangler file:
@@ -428,7 +428,7 @@ if (imageFile instanceof File) {
With your application ready for deployment, you can use Wrangler to build and deploy your project to the Cloudflare Network. Ensure you are logged in to your Cloudflare account by running the `wrangler whoami` command. If you are not logged in, Wrangler will prompt you to login by creating an API key that you can use to make authenticated requests automatically from your computer.
-After successful login, confirm that your `wrangler.toml` file is configured similarly to the code block below:
+After successful login, confirm that your Wrangler file is configured similarly to the code block below:
diff --git a/src/content/docs/d1/tutorials/build-an-api-to-access-d1/index.mdx b/src/content/docs/d1/tutorials/build-an-api-to-access-d1/index.mdx
index bb92a93b867cce..d69f1becdea36b 100644
--- a/src/content/docs/d1/tutorials/build-an-api-to-access-d1/index.mdx
+++ b/src/content/docs/d1/tutorials/build-an-api-to-access-d1/index.mdx
@@ -220,7 +220,7 @@ Make a note of the displayed `database_name` and `database_id`. You will use thi
## 7. Add a binding
-1. From your `d1-http` folder, open the `wrangler.toml` file, Wrangler's configuration file.
+1. From your `d1-http` folder, open the Wrangler file, Wrangler's configuration file.
2. Add the following binding in the file. Make sure that the `database_name` and the `database_id` are correct.
diff --git a/src/content/docs/d1/tutorials/d1-and-prisma-orm/index.mdx b/src/content/docs/d1/tutorials/d1-and-prisma-orm/index.mdx
index 7542b338e88da6..19c11b7aca284b 100644
--- a/src/content/docs/d1/tutorials/d1-and-prisma-orm/index.mdx
+++ b/src/content/docs/d1/tutorials/d1-and-prisma-orm/index.mdx
@@ -11,6 +11,8 @@ languages:
- SQL
---
+import { WranglerConfig } from "~/components";
+
## What is Prisma ORM?
[Prisma ORM](https://www.prisma.io/orm) is a next-generation JavaScript and TypeScript ORM that unlocks a new level of developer experience when working with databases thanks to its intuitive data model, automated migrations, type-safety and auto-completion.
@@ -112,9 +114,7 @@ database_id = "__YOUR_D1_DATABASE_ID__"
You now have a D1 database in your Cloudflare account with a binding to your Cloudflare Worker.
-Copy the last part of the command output and paste it into your `wrangler.toml` file. It should look similar to this:
-
-import { WranglerConfig } from "~/components";
+Copy the last part of the command output and paste it into your Wrangler file. It should look similar to this:
diff --git a/src/content/docs/d1/worker-api/d1-database.mdx b/src/content/docs/d1/worker-api/d1-database.mdx
index 4054166f9375da..993939f1f33ce1 100644
--- a/src/content/docs/d1/worker-api/d1-database.mdx
+++ b/src/content/docs/d1/worker-api/d1-database.mdx
@@ -11,7 +11,7 @@ To interact with your D1 database from your Worker, you need to access it throug
```js
async fetch(request, env) {
- // D1 database is 'env.DB', where "DB" is the binding name from the Wrangler.toml file.
+ // D1 database is 'env.DB', where "DB" is the binding name from the `wrangler.toml / wrangler.json` file.
}
```
diff --git a/src/content/docs/data-localization/how-to/index.mdx b/src/content/docs/data-localization/how-to/index.mdx
index 73e9ef59490a67..9bf6b48d10307a 100644
--- a/src/content/docs/data-localization/how-to/index.mdx
+++ b/src/content/docs/data-localization/how-to/index.mdx
@@ -3,10 +3,9 @@ title: Configuration guides
pcx_content_type: navigation
sidebar:
order: 7
-
---
-import { DirectoryListing } from "~/components"
+import { DirectoryListing } from "~/components";
Learn how to use Cloudflare products with the Data Localization Suite.
@@ -24,6 +23,6 @@ curl -X GET -I https:/// 2>&1 | grep cf-ray
curl -s https:///cdn-cgi/trace | grep "colo="
```
-The first command will return a three-letter IATA code in the [CF-Ray](/fundamentals/reference/http-request-headers/#cf-ray) header, indicating the Cloudflare data center location of processing and/or TLS termination. The second command will directly return the three-letter IATA code.
+The first command will return a three-letter IATA code in the [Cf-Ray](/fundamentals/reference/http-headers/#cf-ray) header, indicating the Cloudflare data center location of processing and/or TLS termination. The second command will directly return the three-letter IATA code.
For example, when a hostname is configured to use the region European Union (EU), the three-letter IATA code will always return a data center inside of the EU.
diff --git a/src/content/docs/ddos-protection/advanced-ddos-systems/concepts.mdx b/src/content/docs/ddos-protection/advanced-ddos-systems/concepts.mdx
index 48a4783509e09f..64a73c5fdb9277 100644
--- a/src/content/docs/ddos-protection/advanced-ddos-systems/concepts.mdx
+++ b/src/content/docs/ddos-protection/advanced-ddos-systems/concepts.mdx
@@ -58,7 +58,8 @@ Besides defining rules with one of the above scopes, you must also select the [p
The Advanced TCP Protection system constantly learns your TCP connections to mitigate DDoS attacks. Advanced TCP Protection rules can have one of the following execution modes: monitoring, mitigation (enabled), or disabled.
- **Monitoring**
- - In this mode, Advanced TCP Protection will not impact any packets. Instead, the protection system will learn your legitimate TCP connections and show you what it would have mitigated. Check Network Analytics to visualize what actions Advanced TCP Protection would have taken on incoming packets, according to the current configuration.
+ - In this mode, Advanced TCP Protection will not impact any packets. Instead, the protection system will learn your legitimate TCP connections and show you what it would have mitigated. Check Network Analytics to visualize what actions Advanced TCP Protection would have taken on incoming packets, according to the current configuration.
+
- **Mitigation (Enabled)**
- In this mode, Advanced TCP Protection will learn your legitimate TCP connections and perform mitigation actions on incoming TCP DDoS attacks based on the rule configuration (burst and rate sensitivity) and your [allowlist](/ddos-protection/advanced-ddos-systems/concepts/#allowlist).
@@ -99,7 +100,9 @@ The default rate sensitivity and recommended setting is _Low_. You should only i
## Filter
- The filter expression can reference source and destination IP addresses and ports. Each system component (SYN flood protection and out-of-state TCP protection) should have one or more [rules](#rule), but filters are optional.
+
+
+The filter expression can reference source and destination IP addresses and ports. Each system component (SYN flood protection and out-of-state TCP protection) should have one or more [rules](#rule), but filters are optional.
Each system component has its own filters. You can configure a filter for each execution mode:
diff --git a/src/content/docs/ddos-protection/managed-rulesets/network/override-parameters.mdx b/src/content/docs/ddos-protection/managed-rulesets/network/override-parameters.mdx
index 74e719712975c6..f50027b67b3426 100644
--- a/src/content/docs/ddos-protection/managed-rulesets/network/override-parameters.mdx
+++ b/src/content/docs/ddos-protection/managed-rulesets/network/override-parameters.mdx
@@ -26,7 +26,7 @@ The action performed for packets that match specific rules of Cloudflare's DDoS
- **Log**
- API value: `"log"`.
- - Only available on Enterprise plans. Logs requests that match the expression of a rule detecting network layer DDoS attacks. Recommended for validating a rule before committing to a more severe action.
+ - Only available on Enterprise plans. Logs requests that match the expression of a rule detecting network layer DDoS attacks. Recommended for validating a rule before committing to a more severe action.
- **Block**
- API value: `"block"`.
diff --git a/src/content/docs/developer-spotlight/index.mdx b/src/content/docs/developer-spotlight/index.mdx
index 2c39852e98fa30..f4c3f6617b81a3 100644
--- a/src/content/docs/developer-spotlight/index.mdx
+++ b/src/content/docs/developer-spotlight/index.mdx
@@ -13,6 +13,13 @@ Applications are currently open until Thursday, the 24th of October 2024. To app
## View latest contributions
+
+ By Mackenly Jones
+
+
-
-```sh
-pnpm create cloudflare@latest
-```
-
-
-
-```sh
-npm create cloudflare@latest
-```
-
-
-
-```sh
-yarn create cloudflare@latest
-```
-
-
+
In this tutorial, the Worker will be named `cms-sitemap`.
@@ -90,15 +72,13 @@ yarn add @sanity/client
-## Configure wrangler.toml
-
-A default `wrangler.toml` was generated in the previous step.
+## Configure Wrangler
-The `wrangler.toml` file is a configuration file used to specify project settings and deployment configurations in a structured format.
+A default `wrangler.json` was generated in the previous step.
-For this tutorial your `wrangler.toml` should be similar to the following:
+The Wrangler file is a configuration file used to specify project settings and deployment configurations in a structured format.
-import { WranglerConfig } from "~/components";
+For this tutorial your `wrangler.toml / wrangler.json` file should be similar to the following:
@@ -122,7 +102,7 @@ You must update the `[vars]` section to match your needs. See the inline comment
:::caution
-Secrets do not belong in `wrangler.toml`. If you need to add secrets, use `.dev.vars` for local secrets and the `wranger secret put` command for deploying secrets. For more information, refer to [Secrets](/workers/configuration/secrets/).
+Secrets do not belong in `wrangler.toml / wrangler.json` files. If you need to add secrets, use `.dev.vars` for local secrets and the `wranger secret put` command for deploying secrets. For more information, refer to [Secrets](/workers/configuration/secrets/).
:::
@@ -145,7 +125,7 @@ Paste the following code into the existing `index.ts|js` file:
* - Open a browser tab at http://localhost:8787/ to see your worker in action
* - Run `npm run deploy` to publish your worker
*
- * Bind resources to your worker in `wrangler.toml`. After adding bindings, a type definition for the
+ * Bind resources to your worker in Wrangler config file. After adding bindings, a type definition for the
* `Env` object can be regenerated with `npm run cf-typegen`.
*
* Learn more at https://developers.cloudflare.com/workers/
diff --git a/src/content/docs/developer-spotlight/tutorials/creating-a-recommendation-api.mdx b/src/content/docs/developer-spotlight/tutorials/creating-a-recommendation-api.mdx
index 4de7150c02e40b..17fd82faf6b456 100644
--- a/src/content/docs/developer-spotlight/tutorials/creating-a-recommendation-api.mdx
+++ b/src/content/docs/developer-spotlight/tutorials/creating-a-recommendation-api.mdx
@@ -22,7 +22,13 @@ sidebar:
order: 2
---
-import { Render, TabItem, Tabs } from "~/components";
+import {
+ Render,
+ TabItem,
+ Tabs,
+ PackageManagers,
+ WranglerConfig,
+} from "~/components";
E-commerce and media sites often work on increasing the average transaction value to boost profitability. One of the strategies to increase the average transaction value is "cross-selling," which involves recommending related products. Cloudflare offers a range of products designed to build mechanisms for retrieving data related to the products users are viewing or requesting. In this tutorial, you will experience developing functionalities necessary for cross-selling by creating APIs for related product searches and product recommendations.
@@ -61,25 +67,11 @@ First, let's create a Cloudflare Workers project.
To efficiently create and manage multiple APIs, let's use [`Hono`](https://hono.dev). Hono is an open-source application framework released by a Cloudflare Developer Advocate. It is lightweight and allows for the creation of multiple API paths, as well as efficient request and response handling.
Open your command line interface (CLI) and run the following command:
-
-
-```sh
-npm create cloudflare@latest cross-sell-api -- --framework hono
-```
-
-
-
-```sh
-yarn create cloudflare@latest cross-sell-api -- --framework hono
-```
-
-
-
-```sh
-pnpm create cloudflare@latest cross-sell-api -- --framework hono
-```
-
-
+
If this is your first time running the `C3` command, you will be asked whether you want to install it. Confirm that the package name for installation is `create-cloudflare` and answer `y`.
@@ -173,13 +165,13 @@ Let's start implementing step-by-step.
### Bind Workers AI and Vectorize to your Worker
-This API requires the use of Workers AI and Vectorize. To use these resources from a Worker, you will need to first create the resources then [bind](/workers/runtime-apis/bindings/#what-is-a-binding) them to a Worker. First, let's create a Vectorize index with Wrangler using the command `wrangler vectorize create {index_name} --dimensions={number_of_dimensions} --metric={similarity_metric}`. The values for `dimensions` and `metric` depend on the type of [Text Embedding Model](/workers-ai/models/#text-embeddings) you are using for data vectorization (Embedding). For example, if you are using the `bge-large-en-v1.5` model, the command is:
+This API requires the use of Workers AI and Vectorize. To use these resources from a Worker, you will need to first create the resources then [bind](/workers/runtime-apis/bindings/#what-is-a-binding) them to a Worker. First, let's create a Vectorize index with Wrangler using the command `wrangler vectorize create {index_name} --dimensions={number_of_dimensions} --metric={similarity_metric}`. The values for `dimensions` and `metric` depend on the type of [Text Embedding Model](/workers-ai/models/) you are using for data vectorization (Embedding). For example, if you are using the `bge-large-en-v1.5` model, the command is:
```sh
npx wrangler vectorize create stripe-products --dimensions=1024 --metric=cosine
```
-When this command executes successfully, you will see a message like the following. It provides the items you need to add to `wrangler.toml` to bind the Vectorize index with your Worker application. Copy the three lines starting with `[[vectorize]]`.
+When this command executes successfully, you will see a message like the following. It provides the items you need to add to the `wrangler.toml / wrangler.json` file to bind the Vectorize index with your Worker application. Copy the three lines starting with `[[vectorize]]`.
```sh
✅ Successfully created a new Vectorize index: 'stripe-products'
@@ -190,9 +182,7 @@ binding = "VECTORIZE_INDEX"
index_name = "stripe-products"
```
-To use the created Vectorize index from your Worker, let's add the binding. Open `wrangler.toml` and add the copied lines.
-
-import { WranglerConfig } from "~/components";
+To use the created Vectorize index from your Worker, let's add the binding. Open the `wrangler.toml / wrangler.json` file and add the copied lines.
@@ -208,9 +198,7 @@ index_name = "stripe-products"
-Additionally, let's add the configuration to use Workers AI in `wrangler.toml`.
-
-
+Additionally, let's add the configuration to use Workers AI in the `wrangler.toml / wrangler.json` file.
diff --git a/src/content/docs/developer-spotlight/tutorials/custom-access-control-for-files.mdx b/src/content/docs/developer-spotlight/tutorials/custom-access-control-for-files.mdx
index 1edce6b17cd6dc..040332d4b8b92e 100644
--- a/src/content/docs/developer-spotlight/tutorials/custom-access-control-for-files.mdx
+++ b/src/content/docs/developer-spotlight/tutorials/custom-access-control-for-files.mdx
@@ -17,7 +17,7 @@ spotlight:
author_bio_source: GitHub
---
-import { Render, PackageManagers } from "~/components";
+import { Render, PackageManagers, WranglerConfig } from "~/components";
This tutorial gives you an overview on how to create a TypeScript-based Cloudflare Worker which allows you to control file access based on a simple username and password authentication. To achieve this, we will use a [D1 database](/d1/) for user management and an [R2 bucket](/r2/) for file storage.
@@ -70,9 +70,7 @@ Replace `` with the name you want to use for your database.
After the database is successfully created, you will see the data for the binding displayed as an output.
The binding declaration will start with `[[d1_databases]]` and contain the binding name, database name and ID.
-To use the database in your worker, you will need to add the binding to your `wrangler.toml` file, by copying the declaration and pasting it into the wrangler file, as shown in the example below.
-
-import { WranglerConfig } from "~/components";
+To use the database in your worker, you will need to add the binding to your Wrangler file, by copying the declaration and pasting it into the wrangler file, as shown in the example below.
@@ -96,7 +94,7 @@ npx wrangler r2 bucket create
```
This works similar to the D1 database creation, where you will need to replace `` with the name you want to use for your bucket.
-To do this, go to the `wrangler.toml` file again and then add the following lines:
+To do this, go to the Wrangler file again and then add the following lines:
@@ -112,7 +110,7 @@ bucket_name = ""
Now that you have prepared the Wrangler configuration, you should update the `worker-configuration.d.ts` file to include the new bindings.
This file will then provide TypeScript with the correct type definitions for the bindings, which allows for type checking and code completion in your editor.
-You could either update it manually or run the following command in the directory of your project to update it automatically based on the wrangler configuration file (recommended).
+You could either update it manually or run the following command in the directory of your project to update it automatically based on the `wrangler.toml / wrangler.json` file (recommended).
```sh
npm run cf-typegen
diff --git a/src/content/docs/developer-spotlight/tutorials/fullstack-authentication-with-next-js-and-cloudflare-d1.mdx b/src/content/docs/developer-spotlight/tutorials/fullstack-authentication-with-next-js-and-cloudflare-d1.mdx
new file mode 100644
index 00000000000000..228132595a5190
--- /dev/null
+++ b/src/content/docs/developer-spotlight/tutorials/fullstack-authentication-with-next-js-and-cloudflare-d1.mdx
@@ -0,0 +1,451 @@
+---
+updated: 2025-01-13
+difficulty: Intermediate
+content_type: 📝 Tutorial
+pcx_content_type: tutorial
+title: Setup Fullstack Authentication with Next.js, Auth.js, and Cloudflare D1
+products:
+ - Workers
+ - D1
+languages:
+ - TypeScript
+spotlight:
+ author: Mackenly Jones
+ author_bio_link: https://github.com/mackenly
+ author_bio_source: GitHub
+---
+
+import {
+ Render,
+ PackageManagers,
+ Type,
+ TypeScriptExample,
+ FileTree,
+} from "~/components";
+
+In this tutorial, you will build a [Next.js app](/workers/frameworks/framework-guides/nextjs/) with authentication powered by Auth.js, Resend, and [Cloudflare D1](/d1/).
+
+Before continuing, make sure you have a Cloudflare account and have installed and [authenticated Wrangler](https://developers.cloudflare.com/workers/wrangler/commands/#login). Some experience with HTML, CSS, and JavaScript/TypeScript is helpful but not required. In this tutorial, you will learn:
+
+- How to create a Next.js application and run it on Cloudflare Workers
+- How to bind a Cloudflare D1 database to your Next.js app and use it to store authentication data
+- How to use Auth.js to add serverless fullstack authentication to your Next.js app
+
+You can find the finished code for this project on [GitHub](https://github.com/mackenly/auth-js-d1-example).
+
+## Prerequisites
+
+
+
+3. Create or login to a [Resend account](https://resend.com/signup) and get an [API key](https://resend.com/docs/dashboard/api-keys/introduction#add-api-key).
+4. [Install and authenticate Wrangler](/workers/wrangler/install-and-update/).
+
+## 1. Create a Next.js app using Workers
+
+From within the repository or directory where you want to create your project run:
+
+
+
+
+
+This will create a new Next.js project using [OpenNext](https://opennext.js.org/cloudflare) that will run in a Worker using [Workers Static Assets](/workers/frameworks/framework-guides/nextjs/#static-assets).
+
+Before we get started, open your project's `tsconfig.json` file and add the following to the `compilerOptions` object to allow for top level await needed to let our application get the Cloudflare context:
+
+```json title="tsconfig.json"
+{
+ "compilerOptions": {
+ "target": "ES2022",
+ }
+}
+```
+
+Throughout this tutorial, we'll add several values to Cloudflare Secrets. For [local development](/workers/configuration/secrets/#local-development-with-secrets), add those same values to a file in the top level of your project called `.dev.vars` and make sure it is not committed into version control. This will let you work with Secret values locally. Go ahead and copy and paste the following into `.dev.vars` for now and replace the values as we go.
+
+```sh title=".dev.vars"
+AUTH_SECRET = ""
+AUTH_RESEND_KEY = ""
+AUTH_EMAIL_FROM = "onboarding@resend.dev"
+AUTH_URL = "http://localhost:8787/"
+```
+
+:::note[Manually set URL]
+Within the Workers environment, the `AUTH_URL` doesn't always get picked up automatically by Auth.js, hence why we're specifying it manually here (we'll need to do the same for prod later).
+:::
+
+## 2. Install Auth.js
+
+Following the [installation instructions](https://authjs.dev/getting-started/installation?framework=Next.js) from Auth.js, begin by installing Auth.js:
+
+
+
+Now run the following to generate an `AUTH_SECRET`:
+
+```sh
+npx auth secret
+```
+
+Now, deviating from the standard Auth.js setup, locate your generated secret (likely in a file named `.env.local`) and [add the secret to your Workers application](/workers/configuration/secrets/#adding-secrets-to-your-project) by running the following and completing the steps to add a secret's value that we just generated:
+
+```sh
+npx wrangler secret put AUTH_SECRET
+```
+
+After adding the secret, update your `.dev.vars` file to include an `AUTH_SECRET` value (this secret should be different from the one you generated earlier for security purposes):
+
+```sh title=".dev.vars"
+# ...
+AUTH_SECRET = ""
+# ...
+```
+
+Next, go into the newly generated `env.d.ts` file and add the following to the interface:
+
+```ts title="env.d.ts"
+interface CloudflareEnv {
+ AUTH_SECRET: string;
+}
+```
+
+## 3. Install Cloudflare D1 Adapter
+
+Now, install the Auth.js D1 adapter by running:
+
+
+
+Create a D1 database using the following command:
+
+```sh title="Create D1 database"
+npx wrangler d1 create auth-js-d1-example-db
+```
+
+When finished you should see instructions to add the database binding to your `wrangler.toml`. Example binding:
+
+```toml title="wrangler.toml"
+[[d1_databases]]
+binding = "DB"
+database_name = "auth-js-d1-example-db"
+database_id = ""
+```
+
+Now, within your `env.d.ts`, add your D1 binding, like:
+
+```ts title="env.d.ts"
+interface CloudflareEnv {
+ DB: D1Database;
+ AUTH_SECRET: string;
+}
+```
+
+## 4. Configure Credentials Provider
+
+Auth.js provides integrations for many different [credential providers](https://authjs.dev/getting-started/authentication) such as Google, GitHub, etc. For this tutorial we're going to use [Resend for magic links](https://authjs.dev/getting-started/authentication/email). You should have already created a Resend account and have an [API key](https://resend.com/docs/dashboard/api-keys/introduction#add-api-key).
+
+Using either a [Resend verified domain email address](https://resend.com/docs/dashboard/domains/introduction) or `onboarding@resend.dev`, add a new Secret to your Worker containing the email your magic links will come from:
+
+```sh title="Add Resend email to secrets"
+npx wrangler secret put AUTH_EMAIL_FROM
+```
+
+Next, ensure the `AUTH_EMAIL_FROM` environment variable is updated in your `.dev.vars` file with the email you just added as a secret:
+
+```sh title=".dev.vars"
+# ...
+AUTH_EMAIL_FROM = "onboarding@resend.dev"
+# ...
+```
+
+Now [create a Resend API key](https://resend.com/docs/dashboard/api-keys/introduction) with `Sending access` and add it to your Worker's Secrets:
+
+```sh title="Add Resend API key to secrets"
+npx wrangler secret put AUTH_RESEND_KEY
+```
+
+As with previous secrets, update your `.dev.vars` file with the new secret value for `AUTH_RESEND_KEY` to use in local development:
+
+```sh title=".dev.vars"
+# ...
+AUTH_RESEND_KEY = ""
+# ...
+```
+
+After adding both of those Secrets, your `env.d.ts` should now include the following:
+
+```ts title="env.d.ts"
+interface CloudflareEnv {
+ DB: D1Database;
+ AUTH_SECRET: string;
+ AUTH_RESEND_KEY: string;
+ AUTH_EMAIL_FROM: string;
+}
+```
+
+Credential providers and database adapters are provided to Auth.js through a configuration file called `auth.ts`. Create a file within your `src/app/` directory called `auth.ts` with the following contents:
+
+
+```ts
+import NextAuth from "next-auth";
+import { NextAuthResult } from "next-auth";
+import { D1Adapter } from "@auth/d1-adapter";
+import Resend from "next-auth/providers/resend";
+import { getCloudflareContext } from "@opennextjs/cloudflare";
+
+const authResult = async (): Promise => {
+ return NextAuth({
+ providers: [
+ Resend({
+ apiKey: (await getCloudflareContext()).env.AUTH_RESEND_KEY,
+ from: (await getCloudflareContext()).env.AUTH_EMAIL_FROM,
+ }),
+ ],
+ adapter: D1Adapter((await getCloudflareContext()).env.DB),
+ });
+};
+
+export const { handlers, signIn, signOut, auth } = await authResult();
+```
+
+
+Now, lets add the route handler and middleware used to authenticate and persist sessions.
+
+Create a new directory structure and route handler within `src/app/api/auth/[...nextauth]` called `route.ts`. The file should contain:
+
+
+```ts
+import { handlers } from "../../../auth";
+
+export const { GET, POST } = handlers;
+```
+
+
+Now, within the `src/` directory, create a `middleware.ts` file to persist session data containing the following:
+
+
+```ts
+export { auth as middleware } from "./app/auth";
+```
+
+
+## 5. Create Database Tables
+
+The D1 adapter requires that tables be created within your database. It [recommends](https://authjs.dev/getting-started/adapters/d1#migrations) using the exported `up()` method to complete this. Within `src/app/api/` create a directory called `setup` containing a file called `route.ts`. Within this route handler, add the following code:
+
+
+```ts
+import type { NextRequest } from 'next/server';
+import { up } from "@auth/d1-adapter";
+import { getCloudflareContext } from "@opennextjs/cloudflare";
+
+export async function GET(request: NextRequest) {
+ try {
+ await up((await getCloudflareContext()).env.DB)
+ } catch (e: any) {
+ console.log(e.cause.message, e.message)
+ }
+ return new Response('Migration completed');
+}
+
+```
+
+
+You'll need to run this once on your production database to create the necessary tables. If you're following along with this tutorial, we'll run it together in a few steps.
+
+:::note[Clean up]
+Running this multiple times won't hurt anything since the tables are only created if they do not already exist, but it's a good idea to remove this route from your production code once you've run it since you won't need it anymore.
+:::
+
+Before we go further, make sure you've created all of the necessary files:
+
+- src/
+ - app/
+ - api/
+ - auth/
+ - [...nextauth]/
+ - route.ts
+ - setup/
+ - route.ts
+ - auth.ts
+ - page.ts
+ - middleware.ts
+- env.d.ts
+- wrangler.toml
+
+
+## 6. Build Sign-in Interface
+We've completed the backend steps for our application. Now, we need a way to sign in. First, let's install [shadcn](https://ui.shadcn.com/):
+```sh title="Install shadcn"
+npx shadcn@latest init -d
+```
+
+Next, run the following to add a few components:
+```sh title="Add components"
+npx shadcn@latest add button input card avatar label
+```
+
+To make it easy, we've provided a basic sign-in interface for you below that you can copy into your app. You will likely want to customize this to fit your needs, but for now, this will let you sign in, see your account details, and update your user's name.
+
+Replace the contents of `page.ts` from within the `app/` directory with the following:
+
+```ts title="src/app/page.ts"
+import { redirect } from 'next/navigation';
+import { signIn, signOut, auth } from './auth';
+import { updateRecord } from '@auth/d1-adapter';
+import { getCloudflareContext } from '@opennextjs/cloudflare';
+import { Button } from '@/components/ui/button';
+import { Input } from '@/components/ui/input';
+import { Card, CardContent, CardDescription, CardHeader, CardTitle, CardFooter } from '@/components/ui/card';
+import { Avatar, AvatarFallback, AvatarImage } from '@/components/ui/avatar';
+import { Label } from '@/components/ui/label';
+
+async function updateName(formData: FormData): Promise {
+ 'use server';
+ const session = await auth();
+ if (!session?.user?.id) {
+ return;
+ }
+ const name = formData.get('name') as string;
+ if (!name) {
+ return;
+ }
+ const query = `UPDATE users SET name = $1 WHERE id = $2`;
+ await updateRecord((await getCloudflareContext()).env.DB, query, [name, session.user.id]);
+ redirect('/');
+}
+
+export default async function Home() {
+ const session = await auth();
+ return (
+
+
+
+ {session ? 'User Profile' : 'Login'}
+
+ {session ? 'Manage your account' : 'Welcome to the auth-js-d1-example demo'}
+
+
+
+ {session ? (
+
+
+
+
+ {session.user?.name?.[0] || 'U'}
+
+
+
{session.user?.name || 'No name set'}
+
{session.user?.email}
+
+
+
+
User ID: {session.user?.id}
+
+
+
+ ) : (
+
+ )}
+
+ {session && (
+
+
+
+ )}
+
+
+ );
+}
+```
+
+## 7. Preview and Deploy
+
+Now, it's time to preview our app. Run the following to preview your application:
+
+
+
+:::caution[Windows support]
+OpenNext has [limited Windows support](https://opennext.js.org/cloudflare#windows-support) and recommends using WSL2 if developing on Windows.
+:::
+
+You should see our login form. But wait, we're not done yet. Remember to create your database tables by visiting `/api/setup`. You should see `Migration completed`. This means your database is ready to go.
+
+Navigate back to your application's homepage. Enter your email and sign in (use the same email as your Resend account if you used the `onboarding@resend.dev` address). You should receive an email in your inbox (check spam). Follow the link to sign in. If everything is configured correctly, you should now see a basic user profile letting your update your name and sign out.
+
+Now let's deploy our application to production. From within the project's directory run:
+
+
+
+This will build and deploy your application as a Worker. Note that you may need to select which account you want to deploy your Worker to. After your app is deployed, Wrangler should give you the URL on which it was deployed. It might look something like this: `https://auth-js-d1-example.example.workers.dev`. Add your URL to your Worker using:
+
+```sh title="Add URL to secrets"
+npx wrangler secret put AUTH_URL
+```
+
+After the changes are deployed, you should now be able to access and try out your new application.
+
+You have successfully created, configured, and deployed a fullstack Next.js application with authentication powered by Auth.js, Resend, and Cloudflare D1.
+
+## Related resources
+
+To build more with Workers, refer to [Tutorials](/workers/tutorials/).
+
+Find more information about the tools and services used in this tutorial at:
+
+- [Auth.js](https://authjs.dev/getting-started)
+- [Resend](https://resend.com/)
+- [Cloudflare D1](/d1/)
+
+If you have any questions, need assistance, or would like to share your project, join the Cloudflare Developer community on [Discord](https://discord.cloudflare.com) to connect with other developers and the Cloudflare team.
diff --git a/src/content/docs/dns/additional-options/dns-zone-defaults.mdx b/src/content/docs/dns/additional-options/dns-zone-defaults.mdx
index ab41eb6128202a..ac1ba3ed3c228a 100644
--- a/src/content/docs/dns/additional-options/dns-zone-defaults.mdx
+++ b/src/content/docs/dns/additional-options/dns-zone-defaults.mdx
@@ -1,12 +1,11 @@
---
pcx_content_type: how-to
-title: Zone defaults
+title: Configure DNS zone defaults
sidebar:
order: 3
+ label: Zone defaults
---
-# Configure DNS zone defaults
-
While there are default values for DNS settings that Cloudflare applies to all new zones, Enterprise accounts have the option to configure their own DNS zone defaults according to their preference.
:::caution
@@ -16,7 +15,7 @@ DNS zone defaults are only applied at the moment a new zone is created and will
## Steps
1. Log in to the [Cloudflare dashboard](https://dash.cloudflare.com/login) and select your account.
-2. Go to **Manage Account** > **Configurations** > **DNS Settings**.
+2. Go to **Manage Account** > **Configurations** > **DNS Settings**. If these options are not displayed on your Cloudflare dashboard, you may need to reach out to your account team to have them added.
3. For **DNS zone defaults**, select **Configure defaults**.
The values you select for the listed settings will be automatically applied to new zones as you add them to your Cloudflare account.
@@ -35,4 +34,4 @@ For secondary zones:
- [Secondary DNS override](/dns/zone-setups/zone-transfers/cloudflare-as-secondary/proxy-traffic/): Enable the options to use Cloudflare [proxy](/dns/manage-dns-records/reference/proxied-dns-records/) and add `CNAME` records at your zone apex.
- Multi-provider DNS does not apply as a setting for secondary zones, as this is already a required behavior for this setup. `SOA` record and the `NS` record TTL are defined on your external DNS provider and only transferred into Cloudflare.
\ No newline at end of file
+ Multi-provider DNS does not apply as a setting for secondary zones, as this is already a required behavior for this setup. `SOA` record and the `NS` record TTL are defined on your external DNS provider and only transferred into Cloudflare.
diff --git a/src/content/docs/durable-objects/api/namespace.mdx b/src/content/docs/durable-objects/api/namespace.mdx
index 62462968882252..bafef513d287c3 100644
--- a/src/content/docs/durable-objects/api/namespace.mdx
+++ b/src/content/docs/durable-objects/api/namespace.mdx
@@ -11,7 +11,7 @@ import { Render, Tabs, TabItem, GlossaryTooltip } from "~/components";
A Durable Object namespace is a set of Durable Objects that are backed by the same Durable Object class. There is only one Durable Object namespace per class. A Durable Object namespace can contain any number of Durable Objects.
-The `DurableObjectNamespace` interface is used to obtain a reference to new or existing Durable Objects. The interface is accessible from the fetch handler on a Cloudflare Worker via the `env` parameter, which is the standard interface when referencing bindings declared in `wrangler.toml`.
+The `DurableObjectNamespace` interface is used to obtain a reference to new or existing Durable Objects. The interface is accessible from the fetch handler on a Cloudflare Worker via the `env` parameter, which is the standard interface when referencing bindings declared in the `wrangler.toml / wrangler.json` file.
This interface defines several [methods](/durable-objects/api/namespace/#methods) that can be used to create an ID for a Durable Object. Note that creating an ID for a Durable Object does not create the Durable Object. The Durable Object is created lazily after calling [`DurableObjectNamespace::get`](/durable-objects/api/namespace/#get) to create a [`DurableObjectStub`](/durable-objects/api/stub) from a `DurableObjectId`. This ensures that objects are not constructed until they are actually accessed.
diff --git a/src/content/docs/durable-objects/api/webgpu.mdx b/src/content/docs/durable-objects/api/webgpu.mdx
index eea471fb178acb..505513cddb5d39 100644
--- a/src/content/docs/durable-objects/api/webgpu.mdx
+++ b/src/content/docs/durable-objects/api/webgpu.mdx
@@ -15,7 +15,7 @@ The [WebGPU API](https://developer.mozilla.org/en-US/docs/Web/API/WebGPU_API) al
The WebGPU API is only accessible from within [Durable Objects](/durable-objects/). You cannot use the WebGPU API from within Workers.
-To use the WebGPU API in local development, enable the `experimental` and `webgpu` [compatibility flags](/workers/configuration/compatibility-flags/) in the [`wrangler.toml` configuration file](/workers/wrangler/configuration/) of your Durable Object.
+To use the WebGPU API in local development, enable the `experimental` and `webgpu` [compatibility flags](/workers/configuration/compatibility-flags/) in the [`wrangler.toml / wrangler.json` file](/workers/wrangler/configuration/) of your Durable Object.
```
compatibility_flags = ["experimental", "webgpu"]
diff --git a/src/content/docs/durable-objects/best-practices/access-durable-objects-storage.mdx b/src/content/docs/durable-objects/best-practices/access-durable-objects-storage.mdx
index b93175cbb23eff..e291ac0d8e2dda 100644
--- a/src/content/docs/durable-objects/best-practices/access-durable-objects-storage.mdx
+++ b/src/content/docs/durable-objects/best-practices/access-durable-objects-storage.mdx
@@ -6,7 +6,7 @@ sidebar:
---
-import { Render, GlossaryTooltip } from "~/components";
+import { Render, GlossaryTooltip, WranglerConfig } from "~/components";
Durable Objects are a powerful compute API that provides a compute with storage building block. Each Durable Object has its own private, transactional and strongly consistent storage. Durable Objects Storage API provides access to a Durable Object's attached storage.
@@ -56,9 +56,7 @@ The new beta version of Durable Objects is available where each Durable Object h
:::
-To allow a new Durable Object class to use SQLite storage backend, use `new_sqlite_classes` on the migration in your Worker's `wrangler.toml` file:
-
-import { WranglerConfig } from "~/components";
+To allow a new Durable Object class to use SQLite storage backend, use `new_sqlite_classes` on the migration in your Worker's Wrangler file:
diff --git a/src/content/docs/durable-objects/best-practices/websockets.mdx b/src/content/docs/durable-objects/best-practices/websockets.mdx
index 23c4f900bd6b00..7d84e282125789 100644
--- a/src/content/docs/durable-objects/best-practices/websockets.mdx
+++ b/src/content/docs/durable-objects/best-practices/websockets.mdx
@@ -217,7 +217,7 @@ export class WebSocketServer extends DurableObject {
-To execute this code, configure your `wrangler.toml` file to include a Durable Object [binding](/durable-objects/get-started/walkthrough/#5-configure-durable-object-bindings) and [migration](/durable-objects/reference/durable-objects-migrations/) based on the namespace and class name chosen previously.
+To execute this code, configure your Wrangler file to include a Durable Object [binding](/durable-objects/get-started/tutorial/#5-configure-durable-object-bindings) and [migration](/durable-objects/reference/durable-objects-migrations/) based on the namespace and class name chosen previously.
```toml title="wrangler.toml"
name = "websocket-server"
@@ -355,7 +355,7 @@ export class WebSocketHibernationServer extends DurableObject {
-Similar to the WebSocket Standard API example, to execute this code, configure your `wrangler.toml` file to include a Durable Object [binding](/durable-objects/get-started/walkthrough/#5-configure-durable-object-bindings) and [migration](/durable-objects/reference/durable-objects-migrations/) based on the namespace and class name chosen previously.
+Similar to the WebSocket Standard API example, to execute this code, configure your Wrangler file to include a Durable Object [binding](/durable-objects/get-started/tutorial/#5-configure-durable-object-bindings) and [migration](/durable-objects/reference/durable-objects-migrations/) based on the namespace and class name chosen previously.
```toml title="wrangler.toml"
name = "websocket-hibernation-server"
diff --git a/src/content/docs/durable-objects/examples/agents.mdx b/src/content/docs/durable-objects/examples/agents.mdx
new file mode 100644
index 00000000000000..2ba4fdef824cfc
--- /dev/null
+++ b/src/content/docs/durable-objects/examples/agents.mdx
@@ -0,0 +1,9 @@
+---
+pcx_content_type: navigation
+title: Agents
+external_link: /agents/
+sidebar:
+ order: 10
+head: []
+description: Build AI-powered Agents on Cloudflare
+---
\ No newline at end of file
diff --git a/src/content/docs/durable-objects/examples/alarms-api.mdx b/src/content/docs/durable-objects/examples/alarms-api.mdx
index 55d86de038fa92..d813dfe402eb92 100644
--- a/src/content/docs/durable-objects/examples/alarms-api.mdx
+++ b/src/content/docs/durable-objects/examples/alarms-api.mdx
@@ -13,7 +13,7 @@ description: Use the Durable Objects Alarms API to batch requests to a Durable O
---
-import { GlossaryTooltip } from "~/components";
+import { GlossaryTooltip, WranglerConfig } from "~/components";
This example implements an `alarm()` handler that wakes the Durable Object once every 10 seconds to batch requests to a single Durable Object. The `alarm()` handler will delay processing until there is enough work in the queue.
@@ -74,9 +74,7 @@ export class Batcher extends DurableObject {
The `alarm()` handler will be called once every 10 seconds. If an unexpected error terminates the Durable Object, the `alarm()` handler will be re-instantiated on another machine. Following a short delay, the `alarm()` handler will run from the beginning on the other machine.
-Finally, configure your `wrangler.toml` file to include a Durable Object [binding](/durable-objects/get-started/walkthrough/#5-configure-durable-object-bindings) and [migration](/durable-objects/reference/durable-objects-migrations/) based on the namespace and class name chosen previously.
-
-import { WranglerConfig } from "~/components";
+Finally, configure your Wrangler file to include a Durable Object [binding](/durable-objects/get-started/tutorial/#5-configure-durable-object-bindings) and [migration](/durable-objects/reference/durable-objects-migrations/) based on the namespace and class name chosen previously.
diff --git a/src/content/docs/durable-objects/examples/build-a-counter.mdx b/src/content/docs/durable-objects/examples/build-a-counter.mdx
index 220c43fd73e082..f8be0b9c308651 100644
--- a/src/content/docs/durable-objects/examples/build-a-counter.mdx
+++ b/src/content/docs/durable-objects/examples/build-a-counter.mdx
@@ -11,7 +11,7 @@ description: Build a counter using Durable Objects and Workers with RPC methods.
---
-import { TabItem, Tabs } from "~/components"
+import { TabItem, Tabs, WranglerConfig } from "~/components"
This example shows how to build a counter using Durable Objects and Workers with [RPC methods](/workers/runtime-apis/rpc) that can print, increment, and decrement a `name` provided by the URL query string parameter, for example, `?name=A`.
@@ -170,9 +170,7 @@ export class Counter extends DurableObject {
-Finally, configure your `wrangler.toml` file to include a Durable Object [binding](/durable-objects/get-started/walkthrough/#5-configure-durable-object-bindings) and [migration](/durable-objects/reference/durable-objects-migrations/) based on the namespace and class name chosen previously.
-
-import { WranglerConfig } from "~/components";
+Finally, configure your Wrangler file to include a Durable Object [binding](/durable-objects/get-started/tutorial/#5-configure-durable-object-bindings) and [migration](/durable-objects/reference/durable-objects-migrations/) based on the namespace and class name chosen previously.
diff --git a/src/content/docs/durable-objects/examples/build-a-rate-limiter.mdx b/src/content/docs/durable-objects/examples/build-a-rate-limiter.mdx
index c18155010e889c..3cd21b778894ec 100644
--- a/src/content/docs/durable-objects/examples/build-a-rate-limiter.mdx
+++ b/src/content/docs/durable-objects/examples/build-a-rate-limiter.mdx
@@ -11,7 +11,7 @@ description: Build a rate limiter using Durable Objects and Workers.
---
-import { TabItem, Tabs, GlossaryTooltip } from "~/components"
+import { TabItem, Tabs, GlossaryTooltip, WranglerConfig } from "~/components"
This example shows how to build a rate limiter using Durable Objects and Workers that can be used to protect upstream resources, including third-party APIs that your application relies on and/or services that may be costly for you to invoke.
@@ -264,9 +264,7 @@ export class RateLimiter extends DurableObject {
-Finally, configure your `wrangler.toml` file to include a Durable Object [binding](/durable-objects/get-started/walkthrough/#5-configure-durable-object-bindings) and [migration](/durable-objects/reference/durable-objects-migrations/) based on the namespace and class name chosen previously.
-
-import { WranglerConfig } from "~/components";
+Finally, configure your Wrangler file to include a Durable Object [binding](/durable-objects/get-started/tutorial/#5-configure-durable-object-bindings) and [migration](/durable-objects/reference/durable-objects-migrations/) based on the namespace and class name chosen previously.
diff --git a/src/content/docs/durable-objects/examples/durable-object-in-memory-state.mdx b/src/content/docs/durable-objects/examples/durable-object-in-memory-state.mdx
index ec4c4f0e202cb1..0f6230e6bec26a 100644
--- a/src/content/docs/durable-objects/examples/durable-object-in-memory-state.mdx
+++ b/src/content/docs/durable-objects/examples/durable-object-in-memory-state.mdx
@@ -13,6 +13,8 @@ description: Create a Durable Object that stores the last location it was
---
+import { WranglerConfig } from "~/components";
+
This example shows you how Durable Objects are stateful, meaning in-memory state can be retained between requests. After a brief period of inactivity, the Durable Object will be evicted, and all in-memory state will be lost. The next request will reconstruct the object, but instead of showing the city of the previous request, it will display a message indicating that the object has been reinitialized. If you need your applications state to survive eviction, write the state to storage by using the [Storage API](/durable-objects/api/storage-api/), or by storing your data elsewhere.
```js
@@ -68,9 +70,7 @@ New Location: ${request.cf.city}`);
}
```
-Finally, configure your `wrangler.toml` file to include a Durable Object [binding](/durable-objects/get-started/walkthrough/#5-configure-durable-object-bindings) and [migration](/durable-objects/reference/durable-objects-migrations/) based on the namespace and class name chosen previously.
-
-import { WranglerConfig } from "~/components";
+Finally, configure your Wrangler file to include a Durable Object [binding](/durable-objects/get-started/tutorial/#5-configure-durable-object-bindings) and [migration](/durable-objects/reference/durable-objects-migrations/) based on the namespace and class name chosen previously.
diff --git a/src/content/docs/durable-objects/examples/durable-object-ttl.mdx b/src/content/docs/durable-objects/examples/durable-object-ttl.mdx
index e01f04819fc885..8495b826fc1a92 100644
--- a/src/content/docs/durable-objects/examples/durable-object-ttl.mdx
+++ b/src/content/docs/durable-objects/examples/durable-object-ttl.mdx
@@ -12,7 +12,7 @@ sidebar:
description: Use the Durable Objects Alarms API to implement a Time To Live (TTL) for Durable Object instances.
---
-import { TabItem, Tabs, GlossaryTooltip } from "~/components";
+import { TabItem, Tabs, GlossaryTooltip, WranglerConfig } from "~/components";
A common feature request for Durable Objects is a Time To Live (TTL) for Durable Object instances. Durable Objects give developers the tools to implement a custom TTL in only a few lines of code. This example demonstrates how to implement a TTL making use of `alarms`. While this TTL will be extended upon every new request to the Durable Object, this can be customized based on a particular use case.
@@ -100,9 +100,7 @@ export default {
-To test and deploy this example, configure your `wrangler.toml` file to include a Durable Object [binding](/durable-objects/get-started/walkthrough/#5-configure-durable-object-bindings) and [migration](/durable-objects/reference/durable-objects-migrations/) based on the namespace and class name chosen previously.
-
-import { WranglerConfig } from "~/components";
+To test and deploy this example, configure your Wrangler file to include a Durable Object [binding](/durable-objects/get-started/tutorial/#5-configure-durable-object-bindings) and [migration](/durable-objects/reference/durable-objects-migrations/) based on the namespace and class name chosen previously.
diff --git a/src/content/docs/durable-objects/examples/use-kv-from-durable-objects.mdx b/src/content/docs/durable-objects/examples/use-kv-from-durable-objects.mdx
index 742b8e024122f8..4ed2f79340b3ea 100644
--- a/src/content/docs/durable-objects/examples/use-kv-from-durable-objects.mdx
+++ b/src/content/docs/durable-objects/examples/use-kv-from-durable-objects.mdx
@@ -11,19 +11,17 @@ description: Read and write to/from KV within a Durable Object
---
-import { GlossaryTooltip } from "~/components";
+import { GlossaryTooltip, WranglerConfig } from "~/components";
The following Worker script shows you how to configure a Durable Object to read from and/or write to a [Workers KV namespace](/kv/concepts/how-kv-works/). This is useful when using a Durable Object to coordinate between multiple clients, and allows you to serialize writes to KV and/or broadcast a single read from KV to hundreds or thousands of clients connected to a single Durable Object [using WebSockets](/durable-objects/best-practices/websockets/).
Prerequisites:
* A [KV namespace](/kv/api/) created via the Cloudflare dashboard or the [wrangler CLI](/workers/wrangler/install-and-update/).
-* A [configured binding](/kv/concepts/kv-bindings/) for the `kv_namespace` in the Cloudflare dashboard or `wrangler.toml` file.
+* A [configured binding](/kv/concepts/kv-bindings/) for the `kv_namespace` in the Cloudflare dashboard or Wrangler file.
* A [Durable Object namespace binding](/workers/wrangler/configuration/#durable-objects).
-Configure your `wrangler.toml` file as follows:
-
-import { WranglerConfig } from "~/components";
+Configure your Wrangler file as follows:
diff --git a/src/content/docs/durable-objects/examples/websocket-hibernation-server.mdx b/src/content/docs/durable-objects/examples/websocket-hibernation-server.mdx
index 40699a52402af0..927ce9c5d46c62 100644
--- a/src/content/docs/durable-objects/examples/websocket-hibernation-server.mdx
+++ b/src/content/docs/durable-objects/examples/websocket-hibernation-server.mdx
@@ -13,7 +13,7 @@ description: Build a WebSocket server using WebSocket Hibernation on Durable
---
-import { TabItem, Tabs } from "~/components"
+import { TabItem, Tabs, WranglerConfig } from "~/components"
This example is similar to the [Build a WebSocket server](/durable-objects/examples/websocket-server/) example, but uses the WebSocket Hibernation API. The WebSocket Hibernation API should be preferred for WebSocket server applications built on Durable Objects, since it significantly decreases duration charge, and provides additional features that pair well with WebSocket applications. For more information, refer to [Use Durable Objects with WebSockets](/durable-objects/best-practices/websockets/).
@@ -177,9 +177,7 @@ export class WebSocketHibernationServer extends DurableObject {
-Finally, configure your `wrangler.toml` file to include a Durable Object [binding](/durable-objects/get-started/walkthrough/#5-configure-durable-object-bindings) and [migration](/durable-objects/reference/durable-objects-migrations/) based on the namespace and class name chosen previously.
-
-import { WranglerConfig } from "~/components";
+Finally, configure your Wrangler file to include a Durable Object [binding](/durable-objects/get-started/tutorial/#5-configure-durable-object-bindings) and [migration](/durable-objects/reference/durable-objects-migrations/) based on the namespace and class name chosen previously.
diff --git a/src/content/docs/durable-objects/examples/websocket-server.mdx b/src/content/docs/durable-objects/examples/websocket-server.mdx
index ded0ede2bdb865..80047bbe15e170 100644
--- a/src/content/docs/durable-objects/examples/websocket-server.mdx
+++ b/src/content/docs/durable-objects/examples/websocket-server.mdx
@@ -11,7 +11,7 @@ description: Build a WebSocket server using Durable Objects and Workers.
---
-import { TabItem, Tabs, GlossaryTooltip } from "~/components"
+import { TabItem, Tabs, GlossaryTooltip, WranglerConfig } from "~/components"
This example shows how to build a WebSocket server using Durable Objects and Workers. The example exposes an endpoint to create a new WebSocket connection. This WebSocket connection echos any message while including the total number of WebSocket connections currently established. For more information, refer to [Use Durable Objects with WebSockets](/durable-objects/best-practices/websockets/).
@@ -187,9 +187,7 @@ export class WebSocketServer extends DurableObject {
-Finally, configure your `wrangler.toml` file to include a Durable Object [binding](/durable-objects/get-started/walkthrough/#5-configure-durable-object-bindings) and [migration](/durable-objects/reference/durable-objects-migrations/) based on the namespace and class name chosen previously.
-
-import { WranglerConfig } from "~/components";
+Finally, configure your Wrangler file to include a Durable Object [binding](/durable-objects/get-started/tutorial/#5-configure-durable-object-bindings) and [migration](/durable-objects/reference/durable-objects-migrations/) based on the namespace and class name chosen previously.
diff --git a/src/content/docs/durable-objects/get-started/tutorial-with-sql-api.mdx b/src/content/docs/durable-objects/get-started/tutorial-with-sql-api.mdx
index 03c2985921c92c..1527e77108a4ee 100644
--- a/src/content/docs/durable-objects/get-started/tutorial-with-sql-api.mdx
+++ b/src/content/docs/durable-objects/get-started/tutorial-with-sql-api.mdx
@@ -5,7 +5,7 @@ sidebar:
order: 2
---
-import { Render, TabItem, Tabs, PackageManagers } from "~/components";
+import { Render, TabItem, Tabs, PackageManagers, WranglerConfig } from "~/components";
This guide will instruct you through:
@@ -56,7 +56,7 @@ Running `create cloudflare@latest` will install [Wrangler](/workers/wrangler/ins
}}
/>
-This will create a new directory, which will include either a `src/index.js` or `src/index.ts` file to write your code and a [`wrangler.toml`](/workers/wrangler/configuration/) configuration file.
+This will create a new directory, which will include either a `src/index.js` or `src/index.ts` file to write your code and a [`wrangler.json`](/workers/wrangler/configuration/) configuration file.
Move into your new directory:
@@ -201,9 +201,7 @@ Refer to [Access a Durable Object from a Worker](/durable-objects/best-practices
## 5. Configure Durable Object bindings
-[Bindings](/workers/runtime-apis/bindings/) allow your Workers to interact with resources on the Cloudflare developer platform. The Durable Object bindings in your Worker project's `wrangler.toml` will include a binding name (for this guide, use `MY_DURABLE_OBJECT`) and the class name (`MyDurableObject`).
-
-import { WranglerConfig } from "~/components";
+[Bindings](/workers/runtime-apis/bindings/) allow your Workers to interact with resources on the Cloudflare developer platform. The Durable Object bindings in your Worker project's `wrangler.toml / wrangler.json` file will include a binding name (for this guide, use `MY_DURABLE_OBJECT`) and the class name (`MyDurableObject`).
@@ -225,9 +223,9 @@ The `[[durable_objects.bindings]]` section contains the following fields:
A migration is a mapping process from a class name to a runtime state. You perform a migration when creating a new Durable Object class, or when renaming, deleting or transferring an existing Durable Object class.
-Migrations are performed through the `[[migrations]]` configurations key in your `wrangler.toml` file.
+Migrations are performed through the `[[migrations]]` configurations key in your Wrangler file.
-The Durable Object migration to create a new Durable Object class with SQLite storage backend will look like the following in your Worker's `wrangler.toml` file:
+The Durable Object migration to create a new Durable Object class with SQLite storage backend will look like the following in your Worker's Wrangler file:
diff --git a/src/content/docs/durable-objects/get-started/walkthrough.mdx b/src/content/docs/durable-objects/get-started/tutorial.mdx
similarity index 93%
rename from src/content/docs/durable-objects/get-started/walkthrough.mdx
rename to src/content/docs/durable-objects/get-started/tutorial.mdx
index 25eb0723df1ac2..a8221ba79ffd3f 100644
--- a/src/content/docs/durable-objects/get-started/walkthrough.mdx
+++ b/src/content/docs/durable-objects/get-started/tutorial.mdx
@@ -1,11 +1,11 @@
---
-title: Walkthrough
+title: Tutorial
pcx_content_type: get-started
sidebar:
order: 1
---
-import { Render, TabItem, Tabs, PackageManagers } from "~/components";
+import { Render, TabItem, Tabs, PackageManagers, WranglerConfig } from "~/components";
This guide will instruct you through:
@@ -49,7 +49,7 @@ Running `create cloudflare@latest` will install [Wrangler](/workers/wrangler/ins
}}
/>
-This will create a new directory, which will include either a `src/index.js` or `src/index.ts` file to write your code and a [`wrangler.toml`](/workers/wrangler/configuration/) configuration file.
+This will create a new directory, which will include either a `src/index.js` or `src/index.ts` file to write your code and a [`wrangler.json`](/workers/wrangler/configuration/) configuration file.
Move into your new directory:
@@ -174,9 +174,7 @@ export default {
## 5. Configure Durable Object bindings
-To allow a Worker to invoke methods on a Durable Object, the Worker must have a [Durable Object binding](/workers/runtime-apis/bindings/) in the project's [`wrangler.toml`](/workers/wrangler/configuration/#durable-objects) file. The binding is configured to use a particular Durable Object class.
-
-import { WranglerConfig } from "~/components";
+To allow a Worker to invoke methods on a Durable Object, the Worker must have a [Durable Object binding](/workers/runtime-apis/bindings/) in the project's [`wrangler.toml / wrangler.json` file](/workers/wrangler/configuration/#durable-objects). The binding is configured to use a particular Durable Object class.
@@ -201,9 +199,9 @@ Refer to [Wrangler Configuration](/workers/wrangler/configuration/#durable-objec
A migration is a mapping process from a class name to a runtime state. You perform a migration when creating a new Durable Object class, or when renaming, deleting or transferring an existing Durable Object class.
-Migrations are performed through the `[[migrations]]` configurations key in your `wrangler.toml` file.
+Migrations are performed through the `[[migrations]]` configurations key in your Wrangler file.
-The Durable Object migration to create a new Durable Object class will look like the following in your Worker's `wrangler.toml` file:
+The Durable Object migration to create a new Durable Object class will look like the following in your Worker's Wrangler file:
@@ -227,7 +225,7 @@ New beta version of Durable Objects is available where each Durable Object has a
A Durable Object class can only have a single storage type, which cannot be changed after the Durable Object class is created.
-To configure SQL storage and API, replace `new_classes` with `new_sqlite_classes` in your Worker's `wrangler.toml` file:
+To configure SQL storage and API, replace `new_classes` with `new_sqlite_classes` in your Worker's Wrangler file:
diff --git a/src/content/docs/durable-objects/index.mdx b/src/content/docs/durable-objects/index.mdx
index 6dc38a695bd902..31e68050326598 100644
--- a/src/content/docs/durable-objects/index.mdx
+++ b/src/content/docs/durable-objects/index.mdx
@@ -32,7 +32,7 @@ A Durable Object is a special kind of [Worker](/workers/). Like a Worker, it is
Thus, Durable Objects enable **stateful** serverless applications.
-Get started
+Get started
:::note[SQLite in Durable Objects Beta]
diff --git a/src/content/docs/durable-objects/observability/troubleshooting.mdx b/src/content/docs/durable-objects/observability/troubleshooting.mdx
index 78a126ce25b8d7..d10e6f180dd7d5 100644
--- a/src/content/docs/durable-objects/observability/troubleshooting.mdx
+++ b/src/content/docs/durable-objects/observability/troubleshooting.mdx
@@ -17,11 +17,11 @@ The `wrangler dev` command opens a tunnel from your local development environmen
### No event handlers were registered. This script does nothing.
-In your `wrangler.toml` file, make sure the `dir` and `main` entries point to the correct file containing your Worker code, and that the file extension is `.mjs` instead of `.js` if using ES modules syntax.
+In your Wrangler file, make sure the `dir` and `main` entries point to the correct file containing your Worker code, and that the file extension is `.mjs` instead of `.js` if using ES modules syntax.
### Cannot apply `--delete-class` migration to class.
-When deleting a migration using `npx wrangler deploy --delete-class `, you may encounter this error: `"Cannot apply --delete-class migration to class without also removing the binding that references it"`. You should remove the corresponding binding under `[durable_objects]` in `wrangler.toml` before attempting to apply `--delete-class` again.
+When deleting a migration using `npx wrangler deploy --delete-class `, you may encounter this error: `"Cannot apply --delete-class migration to class without also removing the binding that references it"`. You should remove the corresponding binding under `[durable_objects]` in the `wrangler.toml / wrangler.json` file before attempting to apply `--delete-class` again.
### Durable Object is overloaded.
diff --git a/src/content/docs/durable-objects/reference/durable-objects-migrations.mdx b/src/content/docs/durable-objects/reference/durable-objects-migrations.mdx
index 50909cb778c961..e11c4e28abb269 100644
--- a/src/content/docs/durable-objects/reference/durable-objects-migrations.mdx
+++ b/src/content/docs/durable-objects/reference/durable-objects-migrations.mdx
@@ -6,7 +6,7 @@ sidebar:
---
-import { GlossaryTooltip } from "~/components";
+import { GlossaryTooltip, WranglerConfig } from "~/components";
A migration is a mapping process from a class name to a runtime state.
@@ -41,7 +41,7 @@ The destination class (the class that stored Durable Objects are being transferr
After a rename or transfer migration, requests to the destination Durable Object class will have access to the source Durable Object's stored data.
-After a migration, any existing bindings to the original Durable Object class (for example, from other Workers) will automatically forward to the updated destination class. However, any Workers bound to the updated Durable Object class must update their Durable Object binding configuration in the `wrangler.toml` file for their next deployment.
+After a migration, any existing bindings to the original Durable Object class (for example, from other Workers) will automatically forward to the updated destination class. However, any Workers bound to the updated Durable Object class must update their Durable Object binding configuration in the Wrangler file for their next deployment.
:::
@@ -56,22 +56,20 @@ Running a delete migration will delete all Durable Objects associated with the d
:::
-### Durable Object migrations in `wrangler.toml`
+### Durable Object migrations in the Wrangler configuration file
-Migrations are performed through the `[[migrations]]` configurations key in your `wrangler.toml` file.
+Migrations are performed through the `[[migrations]]` configurations key in your Wrangler file.
Migrations require a migration tag, which is defined by the `tag` property in each migration entry.
Migration tags are treated like unique names and are used to determine which migrations have already been applied. Once a given Worker code has a migration tag set on it, all future Worker code deployments must include a migration tag.
-The migration list is an ordered array of tables, specified as a top-level key in your `wrangler.toml` file. The migration list is inherited by all environments and cannot be overridden by a specific environment.
+The migration list is an ordered array of tables, specified as a top-level key in your Wrangler file. The migration list is inherited by all environments and cannot be overridden by a specific environment.
All migrations are applied at deployment. Each migration can only be applied once per [environment](/durable-objects/reference/environments/).
To illustrate an example migrations workflow, the `DurableObjectExample` class can be initially defined with:
-import { WranglerConfig } from "~/components";
-
```toml
@@ -117,7 +115,7 @@ The new beta version of Durable Objects is available where each Durable Object h
:::
-To allow a new Durable Object class to use a SQLite storage backend, use `new_sqlite_classes` on the migration in your Worker's `wrangler.toml` file:
+To allow a new Durable Object class to use a SQLite storage backend, use `new_sqlite_classes` on the migration in your Worker's Wrangler file:
diff --git a/src/content/docs/durable-objects/reference/environments.mdx b/src/content/docs/durable-objects/reference/environments.mdx
index efbbefee13f4b6..95e05c25c346a3 100644
--- a/src/content/docs/durable-objects/reference/environments.mdx
+++ b/src/content/docs/durable-objects/reference/environments.mdx
@@ -6,14 +6,14 @@ sidebar:
---
+import { WranglerConfig } from "~/components";
+
[Wrangler](/workers/wrangler/install-and-update/) allows you to deploy the same Worker application with different configuration for each [environment](/workers/wrangler/environments/).
If you are using Wrangler environments, you must specify any [Durable Object bindings](/workers/runtime-apis/bindings/) you wish to use on a per-environment basis.
Durable Object bindings are not inherited. For example, you can define an environment named `staging` as below:
-import { WranglerConfig } from "~/components";
-
```toml
diff --git a/src/content/docs/durable-objects/tutorials/build-a-seat-booking-app/index.mdx b/src/content/docs/durable-objects/tutorials/build-a-seat-booking-app/index.mdx
index 6e8cccb0c72d8c..bded7ea0797ab0 100644
--- a/src/content/docs/durable-objects/tutorials/build-a-seat-booking-app/index.mdx
+++ b/src/content/docs/durable-objects/tutorials/build-a-seat-booking-app/index.mdx
@@ -11,7 +11,7 @@ languages:
- SQL
---
-import { Render, PackageManagers, Details } from "~/components";
+import { Render, PackageManagers, Details, WranglerConfig } from "~/components";
In this tutorial, you will learn how to build a seat reservation app using Durable Objects. This app will allow users to book a seat for a flight. The app will be written in TypeScript and will use the new [SQLite storage backend in Durable Object](/durable-objects/best-practices/access-durable-objects-storage/#sqlite-storage-backend) to store the data.
@@ -256,13 +256,11 @@ The frontend of the application is a simple HTML page that allows users to selec
- It also uses a WebSocket connection to receive updates about the available seats.
- When a user clicks on a seat, the `bookSeat()` function is called that prompts the user to enter their name and then makes a `POST` request to the `/book-seat` endpoint.
-4. Update the bindings in `wrangler.toml` to configure `assets` to serve the `public` directory.
-
-import { WranglerConfig } from "~/components";
+4. Update the bindings in the `wrangler.toml / wrangler.json` file to configure `assets` to serve the `public` directory.
-```toml title="wrangler.toml"
+```toml
[assets]
directory = "public"
```
@@ -283,15 +281,13 @@ npm run dev
## 3. Create table for each flight
-The application already has the binding for the Durable Objects class configured in `wrangler.toml`. If you update the name of the Durable Objects class in `src/index.ts`, make sure to also update the binding in `wrangler.toml`.
-
-1. Update the binding to use the SQLite storage in Durable Objects. In `wrangler.toml`, replace `new_classes=["Flight"]` with `new_sqlite_classes=["Flight"]`, `name = "FLIGHT"` with `name = "FLIGHT"`, and `class_name = "MyDurableObject"` with `class_name = "Flight"`. Your `wrangler.toml` should look like this:
-
+The application already has the binding for the Durable Objects class configured in the `wrangler.toml / wrangler.json` file. If you update the name of the Durable Objects class in `src/index.ts`, make sure to also update the binding in the `wrangler.toml / wrangler.json` file.
+1. Update the binding to use the SQLite storage in Durable Objects. In the `wrangler.toml / wrangler.json` file, replace `new_classes=["Flight"]` with `new_sqlite_classes=["Flight"]`, `name = "FLIGHT"` with `name = "FLIGHT"`, and `class_name = "MyDurableObject"` with `class_name = "Flight"`. your `wrangler.toml / wrangler.json` file should look like this:
-```toml {9} title="wrangler.toml"
+```toml {9}
[[durable_objects.bindings]]
name = "FLIGHT"
class_name = "Flight"
diff --git a/src/content/docs/email-routing/email-workers/send-email-workers.mdx b/src/content/docs/email-routing/email-workers/send-email-workers.mdx
index df307bbf3ae848..76791896858b53 100644
--- a/src/content/docs/email-routing/email-workers/send-email-workers.mdx
+++ b/src/content/docs/email-routing/email-workers/send-email-workers.mdx
@@ -6,12 +6,10 @@ sidebar:
---
-import { Render } from "~/components"
+import { Render, WranglerConfig } from "~/components"
-import { WranglerConfig } from "~/components";
-
```toml
diff --git a/src/content/docs/email-security/reporting/search/available-parameters.mdx b/src/content/docs/email-security/reporting/search/available-parameters.mdx
index e498706cf3bfa2..ca6f6b939009f6 100644
--- a/src/content/docs/email-security/reporting/search/available-parameters.mdx
+++ b/src/content/docs/email-security/reporting/search/available-parameters.mdx
@@ -49,3 +49,9 @@ For disposition-specific s
For Email Security Horizon Enterprise customers, detections search would index for a period of 12 months and rotate over to a rolling 12-month period.
For Email Security Horizon Advantage customers, detections search would index for three months and rotate over to a rolling 3-month period.
+
+## Scope of data retained
+
+For messages that are not detected, the body of the message itself is not retained. Only the metadata such as sender, recipient, subject, message_id, and delivery log will be retained. It is also possible to view the messages as the preview image.
+
+For detections, full messages are retained, including attachments, in addition to the metadata described above. The raw message including attachments can be downloaded as an `.eml` file.
diff --git a/src/content/docs/fundamentals/api/how-to/create-via-api.mdx b/src/content/docs/fundamentals/api/how-to/create-via-api.mdx
index 06c1fda17b2428..b3ea27fb775a72 100644
--- a/src/content/docs/fundamentals/api/how-to/create-via-api.mdx
+++ b/src/content/docs/fundamentals/api/how-to/create-via-api.mdx
@@ -6,7 +6,7 @@ sidebar:
---
-import { Render } from "~/components"
+import { Render, Tabs, TabItem } from "~/components"
Generate new API tokens on the fly via the API. Before you can do this, you must create an API token in the Cloudflare dashboard that can create subsequent tokens.
@@ -24,7 +24,7 @@ Cloudflare also recommends limiting the use of the token via client IP address f
## Creating API tokens with the API
-Once you create an API token that can create other tokens, you can now use it in the API. Refer to the [API schema docs](/api/resources/user/subresources/tokens/methods/create/) for more information.
+You can create a user owned token or account owned token to use with the API. Refer to the [user owned token](/api/resources/user/subresources/tokens/methods/create/) or the [account owned token](/api/resources/accounts/subresources/tokens/methods/create/) API schema docs for more information.
To create a token:
@@ -130,6 +130,93 @@ Each parameter in the `in` and `not_in` objects must be in CIDR notation. For ex
Combine the previous information to create a token as in the following example:
+
+
+```bash
+curl "https://api.cloudflare.com/client/v4/accounts/{account_id}/tokens" \
+--header "Authorization: Bearer " \
+--header "Content-Type: application/json" \
+--data '{
+ "name": "readonly token",
+ "policies": [
+ {
+ "effect": "allow",
+ "resources": {
+ "com.cloudflare.api.account.zone.eb78d65290b24279ba6f44721b3ea3c4": "*",
+ "com.cloudflare.api.account.zone.22b1de5f1c0e4b3ea97bb1e963b06a43": "*"
+ },
+ "permission_groups": [
+ {
+ "id": "c8fed203ed3043cba015a93ad1616f1f",
+ "name": "Zone Read"
+ },
+ {
+ "id": "82e64a83756745bbbb1c9c2701bf816b",
+ "name": "DNS Read"
+ }
+ ]
+ }
+ ],
+ "not_before": "2020-04-01T05:20:00Z",
+ "expires_on": "2020-04-10T00:00:00Z",
+ "condition": {
+ "request.ip": {
+ "in": [
+ "199.27.128.0/21",
+ "2400:cb00::/32"
+ ],
+ "not_in": [
+ "199.27.128.1/32"
+ ]
+ }
+ }
+}'
+```
+
+
+```bash
+curl "https://api.cloudflare.com/client/v4/user/tokens" \
+--header "Authorization: Bearer " \
+--header "Content-Type: application/json" \
+--data '{
+ "name": "readonly token",
+ "policies": [
+ {
+ "effect": "allow",
+ "resources": {
+ "com.cloudflare.api.account.zone.eb78d65290b24279ba6f44721b3ea3c4": "*",
+ "com.cloudflare.api.account.zone.22b1de5f1c0e4b3ea97bb1e963b06a43": "*"
+ },
+ "permission_groups": [
+ {
+ "id": "c8fed203ed3043cba015a93ad1616f1f",
+ "name": "Zone Read"
+ },
+ {
+ "id": "82e64a83756745bbbb1c9c2701bf816b",
+ "name": "DNS Read"
+ }
+ ]
+ }
+ ],
+ "not_before": "2020-04-01T05:20:00Z",
+ "expires_on": "2020-04-10T00:00:00Z",
+ "condition": {
+ "request.ip": {
+ "in": [
+ "199.27.128.0/21",
+ "2400:cb00::/32"
+ ],
+ "not_in": [
+ "199.27.128.1/32"
+ ]
+ }
+ }
+}'
+```
+
+
+
```bash
curl "https://api.cloudflare.com/client/v4/user/tokens" \
--header "Authorization: Bearer " \
diff --git a/src/content/docs/fundamentals/api/how-to/make-api-calls.mdx b/src/content/docs/fundamentals/api/how-to/make-api-calls.mdx
index 846c34713bcbb4..3ecbcf57c07778 100644
--- a/src/content/docs/fundamentals/api/how-to/make-api-calls.mdx
+++ b/src/content/docs/fundamentals/api/how-to/make-api-calls.mdx
@@ -5,27 +5,31 @@ sidebar:
order: 2
---
+import { TabItem, Tabs } from "~/components";
+
Once you [create your API token](/fundamentals/api/get-started/create-token/), all API requests are authorized in the same way. Cloudflare uses the [RFC standard](https://tools.ietf.org/html/rfc6750#section-2.1) `Authorization: Bearer ` interface. An example request is shown below.
{/* */}
```bash
-curl "https://api.cloudflare.com/client/v4/zones/{zone_id}" \
+curl "https://api.cloudflare.com/client/v4/zones/$ZONE_ID" \
--header "Authorization: Bearer YQSn-xWAQiiEh9qM58wZNnyQS7FUdoqGIUAbrh7T"
```
Never send or store your API token secret in plaintext. Also be sure not to check it into code repositories, especially public ones.
+Consider defining [environment variables](#environment-variables) for the zone or account ID, as well as for authentication credentials (for example, the API token).
+
To format JSON output for readability in the command line, you can use a tool like `jq`, a command-line JSON processor. For more information on obtaining and installing `jq`, refer to [Download jq](https://stedolan.github.io/jq/download/).
The following example will format the curl JSON output using `jq`:
```bash
-curl "https://api.cloudflare.com/client/v4/zones/{zone_id}" \
---header "Authorization: Bearer " | jq .
+curl "https://api.cloudflare.com/client/v4/zones/$ZONE_ID" \
+--header "Authorization: Bearer $CLOUDFLARE_API_TOKEN" | jq .
```
-## Using Cloudflare’s APIs
+## Using Cloudflare's APIs
Every Cloudflare API element is fixed to a version number. The latest version is Version 4. The stable base URL for all Version 4 HTTPS endpoints is: `https://api.cloudflare.com/client/v4/`
@@ -39,14 +43,15 @@ For specific guidance on making API calls, refer to the following resources:
Several Cloudflare endpoints have optional query parameters to filter incoming results, such as [List Zones](/api/resources/zones/methods/list/).
-When adding those query parameters, make sure you enclose the URL in quotes `'` (just like the header values), or the API call might error.
+When adding those query parameters, make sure you enclose the URL in double quotes `""` (just like the header values), or the API call might error.
-```bash "'"
- curl 'https://api.cloudflare.com/client/v4/zones?account.id=' \
- --header 'Authorization: Bearer ' \
- --header 'Content-Type: application/json'
+```bash '"'
+curl "https://api.cloudflare.com/client/v4/zones?account.id=$ACCOUNT_ID" \
+--header "Authorization: Bearer $CLOUDFLARE_API_TOKEN"
```
+You can enclose strings using either single quotes (`''`) or double quotes (`""`). However, using single quotes prevents variable substitution in shells like `bash`. In the previous example, this would mean that the `$ACCOUNT_ID` and `$CLOUDFLARE_API_TOKEN` [environment variables](#environment-variables) would not be replaced with their values.
+
### Pagination
Sometimes there will be too many results to display via the default page size, for example you might receive the following:
@@ -63,7 +68,7 @@ There are two query parameter options, which can be combined to paginate across
- `page=x` enables you to select a specific page.
- `per_page=xx` enables you to adjust the number of results displayed on a page. If you select too many, you may get a timeout.
-An example might be `https://api.cloudflare.com/client/v4/zones/zone-identifier/dns_records?per_page=100&page=2`.
+An example might be `https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records?per_page=100&page=2`.
Other options are:
@@ -78,7 +83,7 @@ Recent versions of Windows 10 and 11 [already include the curl tool](https://cur
### Using a Command Prompt window
-To use the Cloudflare API with curl on a Command Prompt window, you must use double quotes (`"`) as string delimiters instead of single quotes (`'`).
+To use the Cloudflare API with curl on a Command Prompt window, you must use double quotes (`"`) as string delimiters.
A typical `PATCH` request will be similar to the following:
@@ -110,10 +115,10 @@ PowerShell has specific cmdlets (`Invoke-RestMethod` and `ConvertFrom-Json`) for
The following example uses the `Invoke-RestMethod` cmdlet:
```powershell
-Invoke-RestMethod -URI 'https://api.cloudflare.com/client/v4/zones/{zone_id}/ssl/certificate_packs?ssl_status=all' -Method 'GET' -ContentType 'application/json' -Headers @{'X-Auth-Email'='';'X-Auth-Key'=''}
+Invoke-RestMethod -URI "https://api.cloudflare.com/client/v4/zones/$Env:ZONE_ID/ssl/certificate_packs?ssl_status=all" -Method 'GET' -Headers @{'X-Auth-Email'=$Env:CLOUDFLARE_EMAIL;'X-Auth-Key'=$Env:CLOUDFLARE_API_KEY}
```
-```txt title="Example output"
+```txt output
result : {@{id=78411cfa-5727-4dc1-8d4a-773d01f17c7c; type=universal; hosts=System.Object[];
primary_certificate=c173c8a1-9724-4e96-a748-2c4494186098; status=active; certificates=System.Object[];
created_on=2022-12-09T23:11:06.010263Z; validity_days=90; validation_method=txt;
@@ -124,13 +129,15 @@ errors : {}
messages : {}
```
+The command assumes that the environment variables `ZONE_ID`, `CLOUDFLARE_EMAIL`, and `CLOUDFLARE_API_KEY` have been previously defined. For more information, refer to [Environment variables](#environment-variables).
+
By default, the output will only contain the first level of the JSON object hierarchy (in the above example, the content of objects such as `hosts` and `certificates` is not shown). To show additional levels and format the output like the `jq` tool, you can use the `ConvertFrom-Json` cmdlet specifying the desired maximum depth (by default, `2`):
```powershell
-Invoke-RestMethod -URI 'https://api.cloudflare.com/client/v4/zones/{zone_id}/ssl/certificate_packs?ssl_status=all' -Method 'GET' -ContentType 'application/json' -Headers @{'X-Auth-Email'='';'X-Auth-Key'=''} | ConvertTo-Json -Depth 5
+Invoke-RestMethod -URI "https://api.cloudflare.com/client/v4/zones/$Env:ZONE_ID/ssl/certificate_packs?ssl_status=all" -Method 'GET' -Headers @{'X-Auth-Email'=$Env:CLOUDFLARE_EMAIL;'X-Auth-Key'=$Env:CLOUDFLARE_API_KEY} | ConvertTo-Json -Depth 5
```
-```json title="Example output"
+```json output
{
"result": [
{
@@ -174,7 +181,7 @@ You can also use the curl tool in PowerShell. However, in PowerShell `curl` is a
A typical `PATCH` request with curl will be similar to the following:
```powershell
-curl.exe --request PATCH "https://api.cloudflare.com/client/v4/user/invites/{id}" --header "Authorization: Bearer " --data '{\"status\": \"accepted\"}'
+curl.exe --request PATCH "https://api.cloudflare.com/client/v4/user/invites/{id}" --header "Authorization: Bearer $Env:CLOUDFLARE_API_TOKEN" --data '{\"status\": \"accepted\"}'
```
To escape a double quote (`"`) character in a request body (specified with `-d` or `--data`), prepend it with another double quote (`"`) or a backslash (`\`). You must escape double quotes even when using single quotes (`'`) as string delimiters.
@@ -184,7 +191,98 @@ To break a single command in two or more lines, use a backtick (`` ` ``) charact
```powershell
curl.exe --request PATCH `
"https://api.cloudflare.com/client/v4/user/invites/{id}" `
---header "X-Auth-Email: " `
---header "X-Auth-Key: " `
+--header "X-Auth-Email: $Env:CLOUDFLARE_EMAIL" `
+--header "X-Auth-Key: $Env:CLOUDFLARE_API_KEY" `
--data '{\"status\": \"accepted\"}'
```
+
+## Environment variables
+
+You can define environment variables for values that repeat between commands, such as the zone or account ID. The lifetime of an environment variable can be the current shell session, all future sessions of the current user, or even all future sessions of all users on the machine you are defining them.
+
+You can also use environment variables for keeping authentication credentials (API token, API key, and email) and reusing them in different commands. However, make sure you define these values in the smallest possible scope (either the current shell session only or all new sessions for the current user).
+
+The procedure for setting and referencing environment variables depends on your platform and shell.
+
+### Define an environment variable
+
+
+
+To define a `ZONE_ID` environment variable for the current shell session, run the following command:
+
+```sh
+export ZONE_ID='f2ea6707005a4da1af1b431202e96ac5'
+```
+
+To define the variable for all new shell sessions for the current user, add the command above at the end of your shell configuration file (for example, `~/.bashrc` for the `bash` shell and `~/.zshrc` for the `zsh` shell).
+
+
+
+To define a `ZONE_ID` environment variable for the current PowerShell session, run the following command:
+
+```powershell
+$Env:ZONE_ID='f2ea6707005a4da1af1b431202e96ac5'
+```
+
+To define the environment variable for all new PowerShell sessions of the current user, set the variable in your PowerShell profile. You can get the path to your PowerShell profile by running `echo $PROFILE`.
+
+Alternatively, set the variable for all new PowerShell sessions of the current user using the `SetEnvironmentVariable()` method of the `System.Environment` class. For example:
+
+```powershell
+[Environment]::SetEnvironmentVariable("ZONE_ID", "f2ea6707005a4da1af1b431202e96ac5", "User")
+```
+
+Running this command will not affect the current session. You will need to close and start a new PowerShell session.
+
+
+
+To define a `ZONE_ID` environment variable for the current Command Prompt session, run the following command:
+
+```txt frame="terminal"
+set ZONE_ID=f2ea6707005a4da1af1b431202e96ac5
+```
+
+To define an environment variable for all future Command Prompt sessions of the current user, run the following command:
+
+```txt frame="terminal"
+setx ZONE_ID f2ea6707005a4da1af1b431202e96ac5
+```
+
+Running this command will not affect the current window. You will need to either run the `set` command or close and start a new Command Prompt window.
+
+
+
+### Reference an environment variable
+
+
+
+When referencing an environment variable in a command, add a `$` prefix to the variable name (for example, `$ZONE_ID`). Make sure that the full string referencing the variable is either unquoted (if it does not contain spaces) or enclosed in double quotes (`""`).
+
+For example:
+
+```sh
+curl "https://api.cloudflare.com/client/v4/zones/$ZONE_ID" \
+--header "Authorization: Bearer $CLOUDFLARE_API_TOKEN"
+```
+
+
+
+When referencing an environment variable in a command, add an `$Env:` prefix to the variable name (for example, `$Env:ZONE_ID`). Make sure that the full string referencing the variable is either unquoted or enclosed in double quotes (`""`).
+
+For example:
+
+```powershell
+Invoke-RestMethod -URI "https://api.cloudflare.com/client/v4/zones/$Env:ZONE_ID" -Method 'GET' -Headers @{'Authorization'="Bearer $Env:CLOUDFLARE_API_TOKEN"}
+```
+
+
+
+When referencing an environment variable in a command, enclose the variable name in `%` characters (for example, `%ZONE_ID%`).
+
+For example:
+
+```txt frame="terminal"
+curl "https://api.cloudflare.com/client/v4/zones/%ZONE_ID%" --header "Authorization: Bearer %CLOUDFLARE_API_TOKEN%"
+```
+
+
diff --git a/src/content/docs/fundamentals/reference/cloudflare-ray-id.mdx b/src/content/docs/fundamentals/reference/cloudflare-ray-id.mdx
index 875274deed3eef..c513f61574d7f1 100644
--- a/src/content/docs/fundamentals/reference/cloudflare-ray-id.mdx
+++ b/src/content/docs/fundamentals/reference/cloudflare-ray-id.mdx
@@ -1,7 +1,6 @@
---
pcx_content_type: reference
title: Cloudflare Ray ID
-
---
A **Cloudflare Ray ID** is an identifier given to every request that goes through Cloudflare.
@@ -10,7 +9,7 @@ Ray IDs are particularly useful when evaluating Security Events for patterns or
:::caution
-Ray IDs are not guaranteed to be unique for every request. In some situations, different requests may have the same Ray ID.
+Ray IDs are not guaranteed to be unique for every request. In some situations, different requests may have the same Ray ID.
:::
## Look up Ray IDs
@@ -33,4 +32,4 @@ Enterprise customers can enable Ray ID as a field in their [Cloudflare Logs](/lo
### Server logs
-For more details about sending Ray IDs to your server logs, refer to the [CF-Ray](/fundamentals/reference/http-request-headers/#cf-ray) header.
+For more details about sending Ray IDs to your server logs, refer to the [Cf-Ray](/fundamentals/reference/http-headers/#cf-ray) header.
diff --git a/src/content/docs/fundamentals/reference/http-request-headers.mdx b/src/content/docs/fundamentals/reference/http-headers.mdx
similarity index 80%
rename from src/content/docs/fundamentals/reference/http-request-headers.mdx
rename to src/content/docs/fundamentals/reference/http-headers.mdx
index c820ec5847abed..522a5b22e044b4 100644
--- a/src/content/docs/fundamentals/reference/http-request-headers.mdx
+++ b/src/content/docs/fundamentals/reference/http-headers.mdx
@@ -1,33 +1,34 @@
---
pcx_content_type: reference
-title: Cloudflare HTTP request headers
+title: Cloudflare HTTP headers
---
import { Render } from "~/components";
+## Request headers
+
Cloudflare passes all HTTP request headers to your origin web server and adds additional headers as specified below.
:::note
-
Cloudflare may remove HTTP request headers with names considered invalid [according to NGINX](https://nginx.org/en/docs/http/ngx_http_core_module.html#ignore_invalid_headers) — for example, header names containing a `.` (dot) character.
:::
-## Accept-Encoding
+### Accept-Encoding
For incoming requests, the value of this header will always be set to `accept-encoding: br, gzip`. If the client set a different value, such as `accept-encoding: deflate`, it will be overwritten and the original value will be available in `request.cf.clientAcceptEncoding`.
-## CF-Connecting-IP
+### CF-Connecting-IP
`CF-Connecting-IP` provides the client IP address connecting to Cloudflare to the origin web server.
This header will only be sent on the traffic from Cloudflare's edge to your origin web server.
-For guidance on logging your visitor’s original IP address, refer to [Restoring original visitor IPs](/support/troubleshooting/restoring-visitor-ips/restoring-original-visitor-ips/).
+For guidance on logging your visitor's original IP address, refer to [Restoring original visitor IPs](/support/troubleshooting/restoring-visitor-ips/restoring-original-visitor-ips/).
Alternatively, if you do not wish to receive the `CF-Connecting-IP` header or any HTTP header that may contain the visitor's IP address, [enable the **Remove visitor IP headers** Managed Transform](/rules/transform/managed-transforms/configure/).
-### CF-Connecting-IP in Worker subrequests
+#### CF-Connecting-IP in Worker subrequests
-In same-zone Worker subrequests, the value of `CF-Connecting-IP` reflects the value of `x-real-ip` (the client’s IP). `x-real-ip` can be altered by the user in their Worker script.
+In same-zone Worker subrequests, the value of `CF-Connecting-IP` reflects the value of `x-real-ip` (the client's IP). `x-real-ip` can be altered by the user in their Worker script.
In cross-zone subrequests from one Cloudflare zone to another Cloudflare zone, the `CF-Connecting-IP` value will be set to the Worker client IP address `'2a06:98c0:3600::103'` for security reasons.
@@ -35,21 +36,21 @@ For Worker subrequests destined for a non-Cloudflare customer zone, the `CF-Conn
When no Worker subrequest is triggered, `cf-connecting-ip` reflects the client's IP address and the `x-real-ip` header is stripped.
-## CF-Connecting-IPv6
+### CF-Connecting-IPv6
Cloudflare provides [free IPv6 support](/network/ipv6-compatibility/) to all domains without requiring additional configuration or hardware. To support migrating to IPv6, Cloudflare's [Pseudo IPv4](/network/pseudo-ipv4/) provides an IPv6 to IPv4 translation service for all Cloudflare domains.
-## CF-EW-Via
+### CF-EW-Via
This header is used for loop detection, similar to the `CDN-Loop` [header](https://blog.cloudflare.com/preventing-request-loops-using-cdn-loop/).
-## CF-Pseudo-IPv4
+### CF-Pseudo-IPv4
If [Pseudo IPv4](/network/pseudo-ipv4/) is set to `Add Header` - Cloudflare automatically adds the `CF-Pseudo-IPv4` header with a Class E IPv4 address hashed from the original IPv6 address.
-## True-Client-IP (Enterprise plan only)
+### True-Client-IP (Enterprise plan only)
`True-Client-IP` provides the original client IP address to the origin web server. `True-Client-IP` is only available on an Enterprise plan. In the example below, `203.0.113.1` is the original visitor IP address. For example: `True-Client-IP: 203.0.113.1`
@@ -60,12 +61,10 @@ To add a `True-Client-IP` HTTP header to requests, [enable the **Add "True-Clien
Alternatively, if you do not wish to receive the `True-Client-IP` header or any HTTP header that may contain the visitor's IP address, [enable the **Remove visitor IP headers** Managed Transform](/rules/transform/managed-transforms/configure/).
:::caution
-
If you are using Cloudflare in a stacked CDN and authenticating HTTP requests based on the IP address value in the `True-Client-IP` header, you must add a `True-Client-IP` header to your requests. If you do not add this header, its value can be spoofed to any value.
-
:::
-## X-Forwarded-For
+### X-Forwarded-For
`X-Forwarded-For` maintains proxy server and original visitor IP addresses. If there was no existing `X-Forwarded-For`header in the request sent to Cloudflare, `X-Forwarded-For` has an identical value to the `CF-Connecting-IP` header.
@@ -76,28 +75,26 @@ If, on the other hand, an `X-Forwarded-For` header was already present in the re
If you do not wish to receive the visitor's IP address in the `X-Forwarded-For` header, or any HTTP header that may contain the visitor's IP address, [enable the **Remove visitor IP headers** Managed Transform](/rules/transform/managed-transforms/configure/).
:::note
-
To restore the original visitor IP address at your origin web server, Cloudflare recommends that your logs or applications look at `CF-Connecting-IP` or `True-Client-IP` instead of `X-Forwarded-For`. `CF-Connecting-IP` and `True-Client-IP` both have a consistent format containing only one IP address.
-
:::
-## X-Forwarded-Proto
+### X-Forwarded-Proto
`X-Forwarded-Proto` is used to identify the protocol (HTTP or HTTPS) that Cloudflare uses to connect to origin web server. By default, it is `http`. Certain [encryption mode](/ssl/origin-configuration/ssl-modes/) may change this header to `https` if the connection is encrypted.
For incoming requests, the value of this header will be set to the protocol the client used (`http` or `https`). If the client set a different value, it will be overwritten.
-## CF-RAY
+### Cf-Ray
-The `CF-ray` header (otherwise known as a [Ray ID](/fundamentals/reference/cloudflare-ray-id/)) is a hashed value that encodes information about the data center and the visitor’s request. For example: `CF-RAY: 230b030023ae2822-SJC`.
+The `Cf-Ray` header (otherwise known as a [Ray ID](/fundamentals/reference/cloudflare-ray-id/)) is a hashed value that encodes information about the data center and the visitor's request. For example: `Cf-Ray: 230b030023ae2822-SJC`.
-Add the [`CF-Ray` header to your origin web server logs](/support/troubleshooting/general-troubleshooting/gathering-information-for-troubleshooting-sites/#add-the-cf-ray-header-to-your-logs) to match requests proxied to Cloudflare to requests in your server logs.
+Add the [`Cf-Ray` header to your origin web server logs](/support/troubleshooting/general-troubleshooting/gathering-information-for-troubleshooting-sites/#add-the-cf-ray-header-to-your-logs) to match requests proxied to Cloudflare to requests in your server logs.
Enterprise customers can also see all requests via [Cloudflare Logs](/logs/).
-## CF-IPCountry
+### CF-IPCountry
-The `CF-IPCountry` header contains a two-character country code of the originating visitor’s country.
+The `CF-IPCountry` header contains a two-character country code of the originating visitor's country.
Besides the [ISO-3166-1 alpha-2 codes](https://www.iso.org/iso-3166-country-codes.html), Cloudflare uses the following special country codes:
@@ -107,44 +104,67 @@ Besides the [ISO-3166-1 alpha-2 codes](https://www.iso.org/iso-3166-country-code
To add this header to requests, along with other HTTP headers with location information for the visitor's IP address, [enable the **Add visitor location headers** Managed Transform](/rules/transform/managed-transforms/configure/).
:::note
-
The `CF-IPCountry` header is removed from requests made from a Worker to an origin that is not proxied behind Cloudflare.
-
:::
-## CF-Visitor
+### CF-Visitor
-Currently, this header is a JSON object, containing only one key called “scheme”. The header will be either HTTP or HTTPS, and it is only relevant if you need to enable Flexible SSL in your Cloudflare settings. For example: `CF-Visitor: { \"scheme\":\"https\"}`.
+Currently, this header is a JSON object, containing only one key called `scheme`. The header will be either HTTP or HTTPS, and it is only relevant if you need to enable Flexible SSL in your Cloudflare settings. For example: `CF-Visitor: { \"scheme\":\"https\"}`.
-## CDN-Loop
+### CDN-Loop
-`CDN-Loop` allows Cloudflare to specify how many times a request can enter Cloudflare's network before it is blocked as a looping request. For example: `CDN-Loop: cloudflare`
+`CDN-Loop` allows Cloudflare to specify how many times a request can enter Cloudflare's network before it is blocked as a looping request. For example: `CDN-Loop: cloudflare`.
-## CF-Worker
+### CF-Worker
-The `CF-Worker` request header is added to an edge Worker subrequest that identifies the host that spawned the subrequest. This is useful when you want to protect yourself against cross-zone worker subrequests. For example: `CF-Worker: example.com`.
+The `CF-Worker` request header is added to an edge Worker subrequest that identifies the host that spawned the subrequest. This is useful when you want to protect yourself against cross-zone Worker subrequests. For example: `CF-Worker: example.com`.
You can add `CF-Worker` header on server logs similar to the way you add the [`CF-RAY`](/support/troubleshooting/general-troubleshooting/gathering-information-for-troubleshooting-sites/#add-the-cf-ray-header-to-your-logs) header. To do that, add `$http_cf_worker` in the log format file: `log_format cf_custom "CF-Worker:$http_cf_worker"'`
`CF-Worker` is added to all Worker subrequests sent via `fetch()`. It is set to the name of the zone which owns the Worker making the subrequest. For example, a Worker script on route for `foo.example.com/*` from `example.com` will have all subrequests with the header:
-`CF-Worker`: `example.com`
+```txt
+CF-Worker: example.com
+```
The intended purpose of this header is to provide a means for recipients (for example, origins, load balancers, other Workers) to recognize, filter, and route traffic generated by Workers on specific zones.
:::note
-
-When configuring WAF custom rules, do not match on this header. These rules are applied before Cloudflare adds the `CF-Worker` header. Instead, use the [`cf.worker.upstream_zone`](/ruleset-engine/rules-language/fields/reference/cf.worker.upstream_zone/) dynamic field, which contains the same value and exists for the same purpose.
-
+When configuring WAF custom rules, do not match on this header. These rules are applied before Cloudflare adds the `CF-Worker` header. Instead, use the [`cf.worker.upstream_zone`](/ruleset-engine/rules-language/fields/reference/cf.worker.upstream_zone/) field, which contains the same value and exists for the same purpose.
:::
-## Connection
+### Connection
For incoming requests, the value of this header will always be set to `Keep-Alive`. If the client set a different value, such as `close`, it will be overwritten. Note that is also the case when the client uses HTTP/2 or HTTP/3 to connect.
-## Considerations for Spectrum
+### Considerations for Spectrum
When using Spectrum with a TCP application, these headers are not visible at the origin as they are HTTP headers. If you wish to utilize these in your application, there are two options:
- Use an HTTP or HTTPS Spectrum app instead of TCP
- Use the [Proxy Protocol feature](/spectrum/how-to/enable-proxy-protocol/)
+
+## Response headers
+
+Cloudflare will remove some HTTP headers from the response sent back to the visitor and add some Cloudflare-specific HTTP headers.
+
+### Removed response headers
+
+Cloudflare passes all HTTP headers in the response from the origin server back to the visitor with the exception of the following headers:
+
+- `X-Accel-Buffering`
+- `X-Accel-Charset`
+- `X-Accel-Limit-Rate`
+- `X-Accel-Redirect`
+
+### Added response headers
+
+Cloudflare adds the HTTP headers specified below to the response sent to the visitor.
+
+#### Cf-Ray
+
+The `Cf-Ray` value returned to the visitor will be the same `Cf-Ray` value that was sent to the origin server.
+
+#### Cf-Cache-Status
+
+A list of all possible `Cf-Cache-Status` values is contained in [Cloudflare cache responses](/cache/concepts/cache-responses/).
diff --git a/src/content/docs/fundamentals/setup/manage-members/roles.mdx b/src/content/docs/fundamentals/setup/manage-members/roles.mdx
index f4d2eb54cdff95..e36f7984058d52 100644
--- a/src/content/docs/fundamentals/setup/manage-members/roles.mdx
+++ b/src/content/docs/fundamentals/setup/manage-members/roles.mdx
@@ -39,11 +39,11 @@ Account-scoped roles apply across an entire Cloudflare account, and through all
| Cloudflare Zero Trust Read Only | Can access [Cloudflare for Zero Trust](/cloudflare-one/) read only mode. |
| Cloudflare Zero Trust Reporting | Can access [Cloudflare for Zero Trust](/cloudflare-one/) reporting data. |
| DNS | Can edit [DNS records](/dns/manage-dns-records/). |
-| Email Configuration Admin | Grants write access to all of CES, [CASB](/cloudflare-one/applications/casb/), [DLP](/cloudflare-one/policies/data-loss-prevention/), [Gateway](/cloudflare-one/policies/gateway/), and [Tunnels](/cloudflare-one/connections/connect-networks/), except Mail Preview, Raw Email, on-demand reports, actions on emails, and Submissions, Submission Transparency (Requires Cloudflare Zero Trust PII). |
-| Email Integration Admin | Grants write access to CES account integration only, [CASB](/cloudflare-one/applications/casb/), [DLP](/cloudflare-one/policies/data-loss-prevention/), [Gateway](/cloudflare-one/policies/gateway/), and [Tunnels](/cloudflare-one/connections/connect-networks/). |
-| Email Security Analyst | Grants write access to all of CES, except Settings which is read only (Requires Cloudflare Zero Trust PII). |
-| Email Security Read Only | Grants read access to all of CES, but cannot see Raw Email, take action on emails, or make Submissions (Requires Cloudflare Zero Trust PII). |
-| Email Security Reporting | Grants read access to CES Home, PhishGuard, and Submission Transparency. |
+| Email Configuration Admin | Grants write access to all of Email Security, [CASB](/cloudflare-one/applications/casb/), [DLP](/cloudflare-one/policies/data-loss-prevention/), [Gateway](/cloudflare-one/policies/gateway/), and [Tunnels](/cloudflare-one/connections/connect-networks/), except Mail Preview, Raw Email, on-demand reports, actions on emails, and Submissions, Submission Transparency (Requires Cloudflare Zero Trust PII). |
+| Email Integration Admin | Grants write access to Email Security account integration only, [CASB](/cloudflare-one/applications/casb/), [DLP](/cloudflare-one/policies/data-loss-prevention/), [Gateway](/cloudflare-one/policies/gateway/), and [Tunnels](/cloudflare-one/connections/connect-networks/). |
+| Email Security Analyst | Grants write access to all of Email Security, except Settings which is read only (Requires Cloudflare Zero Trust PII). |
+| Email Security Read Only | Grants read access to all of Email Security, but cannot see Raw Email, take action on emails, or make Submissions (Requires Cloudflare Zero Trust PII). |
+| Email Security Reporting | Grants read access to Email Security Home, PhishGuard, and Submission Transparency. |
| Firewall | Can edit [WAF](/waf/), [IP Access rules](/waf/tools/ip-access-rules/), [Zone Lockdown](/waf/tools/zone-lockdown/) settings, and [Cache Rules](/cache/how-to/cache-rules/). |
| Load Balancer | Can edit [Load Balancers](/load-balancing/), Pools, Origins, and Health Checks. |
| Log Share | Can edit [Log Share](/logs/) configuration. |
@@ -51,8 +51,8 @@ Account-scoped roles apply across an entire Cloudflare account, and through all
| Magic Network Monitoring | Can view and edit [MNM configuration](/magic-network-monitoring/). |
| Magic Network Monitoring Admin | Can view, edit, create, and delete [MNM configuration](/magic-network-monitoring/). |
| Magic Network Monitoring Read-Only | Can view [MNM configuration](/magic-network-monitoring/). |
-| Network Services Write (Magic) | Grants write access to network configurations for Magic services. |
-| Network Services Read (Magic) | Grants read access to network configurations for Magic services. |
+| Network Services Write (Magic) | Grants write access to network configurations for Magic services. Magic Tunnel health checks require the Analytics role for non-admin users. |
+| Network Services Read (Magic) | Grants read access to network configurations for Magic services. Magic Tunnel health checks require the Analytics role for non-admin users. |
| Minimal Account Access | Can view account, and nothing else. |
| Page Shield | Grants write access to [Page Shield](/page-shield/) across the whole account. |
| Page Shield Read | Grants read access to [Page Shield](/page-shield/) across the whole account. |
diff --git a/src/content/docs/hyperdrive/configuration/connect-to-postgres.mdx b/src/content/docs/hyperdrive/configuration/connect-to-postgres.mdx
index ec3c878aac486f..33d68e9adee0dc 100644
--- a/src/content/docs/hyperdrive/configuration/connect-to-postgres.mdx
+++ b/src/content/docs/hyperdrive/configuration/connect-to-postgres.mdx
@@ -5,7 +5,7 @@ sidebar:
order: 3
---
-import { TabItem, Tabs, Render } from "~/components";
+import { TabItem, Tabs, Render, WranglerConfig } from "~/components";
Hyperdrive supports PostgreSQL and PostgreSQL-compatible databases, [popular drivers](#supported-drivers) and Object Relational Mapper (ORM) libraries that use those drivers.
@@ -26,9 +26,7 @@ When using wrangler, replace the placeholder value provided to `--connection-str
npx wrangler hyperdrive create my-first-hyperdrive --connection-string="postgres://user:password@database.host.example.com:5432/databasenamehere"
```
-The command above will output the ID of your Hyperdrive, which you will need to set in the `wrangler.toml` configuration file for your Workers project:
-
-import { WranglerConfig } from "~/components";
+The command above will output the ID of your Hyperdrive, which you will need to set in the `wrangler.toml / wrangler.json` file for your Workers project:
@@ -52,18 +50,24 @@ Refer to the [Examples documentation](/hyperdrive/examples/) for step-by-step gu
Hyperdrive uses Workers [TCP socket support](/workers/runtime-apis/tcp-sockets/#connect) to support TCP connections to databases. The following table lists the supported database drivers and the minimum version that works with Hyperdrive:
-| Driver | Documentation | Minimum Version Required | Notes |
-| ----------------------------- | ---------------------------------------------------------------------------- | ------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
-| Postgres.js (**recommended**) | [Postgres.js documentation](https://github.com/porsager/postgres) | `postgres@3.4.4` | Supported in both Workers & Pages. |
-| node-postgres - `pg` | [node-postgres - `pg` documentation](https://node-postgres.com/) | `pg@8.13.0` | `8.11.4` introduced a bug with URL parsing and will not work. `8.11.5` fixes this. Requires `compatibility_flags = ["nodejs_compat"]` and `compatibility_date = "2024-09-23"` - refer to [Node.js compatibility](/workers/runtime-apis/nodejs). Requires wrangler `3.78.7` or later. |
-| Drizzle | [Drizzle documentation](https://orm.drizzle.team/) | `0.26.2`^ | |
-| Kysely | [Kysely documentation](https://kysely.dev/) | `0.26.3`^ | |
-| [rust-postgres](https://github.com/sfackler/rust-postgres) | [rust-postgres documentation](https://docs.rs/postgres/latest/postgres/) | `v0.19.8` | Use the [`query_typed`](https://docs.rs/postgres/latest/postgres/struct.Client.html#method.query_typed) method for best performance. |
+| Driver | Documentation | Minimum Version Required | Notes |
+| ---------------------------------------------------------- | ------------------------------------------------------------------------ | ------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| Postgres.js (**recommended**) | [Postgres.js documentation](https://github.com/porsager/postgres) | `postgres@3.4.4` | Supported in both Workers & Pages. |
+| node-postgres - `pg` | [node-postgres - `pg` documentation](https://node-postgres.com/) | `pg@8.13.0` | `8.11.4` introduced a bug with URL parsing and will not work. `8.11.5` fixes this. Requires `compatibility_flags = ["nodejs_compat"]` and `compatibility_date = "2024-09-23"` - refer to [Node.js compatibility](/workers/runtime-apis/nodejs). Requires wrangler `3.78.7` or later. |
+| Drizzle | [Drizzle documentation](https://orm.drizzle.team/) | `0.26.2`^ | |
+| Kysely | [Kysely documentation](https://kysely.dev/) | `0.26.3`^ | |
+| [rust-postgres](https://github.com/sfackler/rust-postgres) | [rust-postgres documentation](https://docs.rs/postgres/latest/postgres/) | `v0.19.8` | Use the [`query_typed`](https://docs.rs/postgres/latest/postgres/struct.Client.html#method.query_typed) method for best performance. |
^ _The marked libraries use `node-postgres` as a dependency._
Other drivers and ORMs not listed may also be supported: this list is not exhaustive.
+### Database drivers and Node.js compatibility
+
+[Node.js compatibility](/workers/runtime-apis/nodejs/) is required for database drivers, including Postgres.js, and needs to be configured for your Workers project.
+
+
+
## Supported TLS (SSL) modes
Hyperdrive supports the following [PostgreSQL TLS (SSL)](https://www.postgresql.org/docs/current/libpq-ssl.html) connection modes when connecting to your origin database:
@@ -104,7 +108,7 @@ Install the `node-postgres` driver:
npm install pg
```
-**Ensure you have `compatibility_flags` and `compatibility_date` set in your `wrangler.toml` configuration file** as shown below:
+**Ensure you have `compatibility_flags` and `compatibility_date` set in your `wrangler.toml / wrangler.json` file** as shown below:
@@ -114,7 +118,7 @@ Create a new `Client` instance and pass the Hyperdrive parameters:
import { Client } from "pg";
export interface Env {
- // If you set another name in wrangler.toml as the value for 'binding',
+ // If you set another name in the `wrangler.toml / wrangler.json` file as the value for 'binding',
// replace "HYPERDRIVE" with the variable name you defined.
HYPERDRIVE: Hyperdrive;
}
diff --git a/src/content/docs/hyperdrive/configuration/connect-to-private-database.mdx b/src/content/docs/hyperdrive/configuration/connect-to-private-database.mdx
index fc34cd4eec2e6a..a442d96979dc5c 100644
--- a/src/content/docs/hyperdrive/configuration/connect-to-private-database.mdx
+++ b/src/content/docs/hyperdrive/configuration/connect-to-private-database.mdx
@@ -18,6 +18,12 @@ When your database is isolated within a private network (such as a [virtual priv
- [Cloudflare Tunnel](/cloudflare-one/connections/connect-networks/) is used to establish the secure tunnel connection.
- [Cloudflare Access](/cloudflare-one/policies/access/) is used to restrict access to your tunnel such that only specific Hyperdrive configurations can access it.
+A request from the Cloudflare Worker to the origin database goes through Hyperdrive, Cloudflare Access, and the Cloudflare Tunnel established by `cloudflared`. `cloudflared` must be running in the private network in which your database is accessible.
+
+The Cloudflare Tunnel will establish an outbound bidirectional connection from your private network to Cloudflare. Cloudflare Access will secure your Cloudflare Tunnel to be only accessible by your Hyperdrive configuration.
+
+
+
:::caution[Warning]
@@ -44,7 +50,7 @@ Your tunnel must be configured to use a public hostname so that Hyperdrive can r
1. In the **Public Hostnames** tab, choose a **Domain** and specify any subdomain or path information. This will be used in your Hyperdrive configuration to route to this tunnel.
-2. In the **Service** section, specify **Type** `TCP` and the URL and configured port of your database, such as `localhost:5432`. This address will be used by the tunnel to route requests to your database.
+2. In the **Service** section, specify **Type** `TCP` and the URL and configured port of your database, such as `localhost:5432` or `my-database-host.database-provider.com:5432`. This address will be used by the tunnel to route requests to your database.
3. Select **Save tunnel**.
@@ -52,7 +58,29 @@ Your tunnel must be configured to use a public hostname so that Hyperdrive can r
If you are setting up the tunnel through the CLI instead ([locally-managed tunnel](/cloudflare-one/connections/connect-networks/configure-tunnels/local-management/)), you will have to complete these steps manually. Follow the Cloudflare Zero Trust documentation to [add a public hostname to your tunnel](/cloudflare-one/connections/connect-networks/routing-to-tunnel/dns/) and [configure the public hostname to route to the address of your database](/cloudflare-one/connections/connect-networks/configure-tunnels/local-management/configuration-file/).
:::
-## 2. Create a service token
+## 2. Create and configure Hyperdrive to connect to the Cloudflare Tunnel
+
+To restrict access to the Cloudflare Tunnel to Hyperdrive, a [Cloudflare Access application](/cloudflare-one/applications/) must be configured with a [Policy](/cloudflare-one/policies/) that requires requests to contain a valid [Service Auth token](/cloudflare-one/policies/access/#service-auth).
+
+The Cloudflare dashboard can automatically create and configure the underlying [Cloudflare Access application](/cloudflare-one/applications/), [Service Auth token](/cloudflare-one/policies/access/#service-auth), and [Policy](/cloudflare-one/policies/) on your behalf. Alternatively, you can manually create the Access application and configure the Policies.
+
+
+
+### 2.1 Create a Hyperdrive configuration in the Cloudflare dashboard
+
+Create a Hyperdrive configuration in the Cloudflare dashboard to automatically configure Hyperdrive to connect to your Cloudflare Tunnel.
+
+1. In the [Cloudflare dashboard](https://dash.cloudflare.com/?to=/:account/workers/hyperdrive), navigate to **Storage & Databases > Hyperdrive** and click **Create configuration**.
+2. Select **Private database**.
+3. In the **Networking details** section, select the tunnel you are connecting to.
+4. In the **Networking details** section, select the hostname associated to the tunnel. If there is no hostname for your database, return to step [1.2. Connect your database using a public hostname](/hyperdrive/configuration/connect-to-private-database/#12-connect-your-database-using-a-public-hostname).
+5. In the **Access Service Authentication Token** section, select **Create new (automatic)**.
+6. In the **Access Application** section, select **Create new (automatic)**.
+7. In the **Database connection details** section, enter the database **name**, **user**, and **password**.
+
+
+
+### 2.1 Create a service token
The service token will be used to restrict requests to the tunnel, and is needed for the next step.
@@ -72,7 +100,7 @@ The service token will be used to restrict requests to the tunnel, and is needed
This is the only time Cloudflare Access will display the Client Secret. If you lose the Client Secret, you must regenerate the service token.
:::
-## 3. Create an Access application to secure the tunnel
+### 2.2 Create an Access application to secure the tunnel
[Cloudflare Access](/cloudflare-one/policies/access/) will be used to verify that requests to the tunnel originate from Hyperdrive using the service token created above.
@@ -110,7 +138,7 @@ The service token will be used to restrict requests to the tunnel, and is needed
17. Save the application.
-## 4. Create a Hyperdrive configuration
+### 2.3 Create a Hyperdrive configuration
To create a Hyperdrive configuration for your private database, you'll need to specify the Access application and Cloudflare Tunnel information upon creation.
@@ -152,7 +180,9 @@ In addition, it will also set the Access Client ID and the Access Client Secret
When creating the Hyperdrive configuration for the private database, you must enter the `access-client-id` and the `access-client-id`, and omit the `port`. Hyperdrive will route database messages to the public hostname of the tunnel, and the tunnel will rely on its service configuration (as configured in [1.2. Connect your database using a public hostname](#12-connect-your-database-using-a-public-hostname)) to route requests to the database within your private network.
:::
-## 5. Query your Hyperdrive configuration from a Worker (optional)
+
+
+## 3. Query your Hyperdrive configuration from a Worker (optional)
To test your Hyperdrive configuration to the database using Cloudflare Tunnel and Access, use the Hyperdrive configuration ID in your Worker and deploy it.
@@ -177,4 +207,5 @@ If you successfully receive the list of `pg_tables` from your database when you
## Troubleshooting
If you encounter issues when setting up your Hyperdrive configuration with tunnels to a private database, consider these common solutions, in addition to [general troubleshooting steps](/hyperdrive/observability/troubleshooting/) for Hyperdrive:
-* Ensure your database is configured to use TLS (SSL). Hyperdrive requires TLS (SSL) to connect.
+
+- Ensure your database is configured to use TLS (SSL). Hyperdrive requires TLS (SSL) to connect.
diff --git a/src/content/docs/hyperdrive/configuration/local-development.mdx b/src/content/docs/hyperdrive/configuration/local-development.mdx
index ad50bd38e7e49b..7f566e156f8bcc 100644
--- a/src/content/docs/hyperdrive/configuration/local-development.mdx
+++ b/src/content/docs/hyperdrive/configuration/local-development.mdx
@@ -5,6 +5,8 @@ sidebar:
order: 6
---
+import { WranglerConfig } from "~/components";
+
Hyperdrive can be used when developing and testing your Workers locally by connecting to any local database instance running on your machine directly. Local development uses [Wrangler](/workers/wrangler/install-and-update/), the command-line interface for Workers, to manage local development sessions and state.
## Configure local development
@@ -19,10 +21,10 @@ If you are new to Hyperdrive and/or Cloudflare Workers, refer to [Hyperdrive tut
To specify a database to connect to when developing locally, you can:
-- **Recommended** Create a `WRANGLER_HYPERDRIVE_LOCAL_CONNECTION_STRING_` environmental variable with the connection string of your database. `` is the name of the binding assigned to your Hyperdrive in your `wrangler.toml` or Pages configuration. This allows you to avoid committing potentially sensitive credentials to source control in your `wrangler.toml`, if your test/development database is not ephemeral. If you have configured multiple Hyperdrive bindings, replace `` with the unique binding name for each.
-- Set `localConnectionString` in `wrangler.toml`.
+- **Recommended** Create a `WRANGLER_HYPERDRIVE_LOCAL_CONNECTION_STRING_` environmental variable with the connection string of your database. `` is the name of the binding assigned to your Hyperdrive in your `wrangler.toml / wrangler.json` file or Pages configuration. This allows you to avoid committing potentially sensitive credentials to source control in your `wrangler.toml / wrangler.json` file, if your test/development database is not ephemeral. If you have configured multiple Hyperdrive bindings, replace `` with the unique binding name for each.
+- Set `localConnectionString` in the `wrangler.toml / wrangler.json` file.
-If both the `WRANGLER_HYPERDRIVE_LOCAL_CONNECTION_STRING_` environmental variable and `localConnectionString` in `wrangler.toml` are set, `wrangler dev` will use the environmental variable instead. Use `unset WRANGLER_HYPERDRIVE_LOCAL_CONNECTION_STRING_` to unset any existing environmental variables.
+If both the `WRANGLER_HYPERDRIVE_LOCAL_CONNECTION_STRING_` environmental variable and `localConnectionString` in the `wrangler.toml / wrangler.json` file are set, `wrangler dev` will use the environmental variable instead. Use `unset WRANGLER_HYPERDRIVE_LOCAL_CONNECTION_STRING_` to unset any existing environmental variables.
For example, to use the environmental variable, export the environmental variable before running `wrangler dev`:
@@ -33,9 +35,7 @@ export WRANGLER_HYPERDRIVE_LOCAL_CONNECTION_STRING_TEST_DB="postgres://user:pass
npx wrangler dev
```
-To configure a `localConnectionString` in `wrangler.toml`, ensure your Hyperdrive bindings have a `localConnectionString` property set:
-
-import { WranglerConfig } from "~/components";
+To configure a `localConnectionString` in the `wrangler.toml / wrangler.json` file, ensure your Hyperdrive bindings have a `localConnectionString` property set:
diff --git a/src/content/docs/hyperdrive/configuration/rotate-credentials.mdx b/src/content/docs/hyperdrive/configuration/rotate-credentials.mdx
index 971ff24d1b5d52..0975fa258d06b5 100644
--- a/src/content/docs/hyperdrive/configuration/rotate-credentials.mdx
+++ b/src/content/docs/hyperdrive/configuration/rotate-credentials.mdx
@@ -5,7 +5,7 @@ sidebar:
order: 8
---
-import { TabItem, Tabs, Render } from "~/components";
+import { TabItem, Tabs, Render, WranglerConfig } from "~/components";
You can change the connection information and credentials of your Hyperdrive configuration in one of two ways:
@@ -23,9 +23,7 @@ To create a Hyperdrive configuration that connects to an existing PostgreSQL dat
npx wrangler hyperdrive create my-updated-hyperdrive --connection-string=""
```
-The command above will output the ID of your Hyperdrive. Set this ID in the `wrangler.toml` configuration file for your Workers project:
-
-import { WranglerConfig } from "~/components";
+The command above will output the ID of your Hyperdrive. Set this ID in the `wrangler.toml / wrangler.json` file for your Workers project:
diff --git a/src/content/docs/hyperdrive/get-started.mdx b/src/content/docs/hyperdrive/get-started.mdx
index c172656857ce97..1a2287e1e9ee72 100644
--- a/src/content/docs/hyperdrive/get-started.mdx
+++ b/src/content/docs/hyperdrive/get-started.mdx
@@ -72,25 +72,13 @@ Create a new project named `hyperdrive-tutorial` by running:
This will create a new `hyperdrive-tutorial` directory. Your new `hyperdrive-tutorial` directory will include:
- A `"Hello World"` [Worker](/workers/get-started/guide/#3-write-code) at `src/index.ts`.
-- A [`wrangler.toml`](/workers/wrangler/configuration/) configuration file. `wrangler.toml` is how your `hyperdrive-tutorial` Worker will connect to Hyperdrive.
+- A [`wrangler.json`](/workers/wrangler/configuration/) configuration file. `wrangler.json` is how your `hyperdrive-tutorial` Worker will connect to Hyperdrive.
-:::note
-
-Note that the `wrangler.toml` file contains the following option:
-
-import { WranglerConfig } from "~/components";
-
-
+### Enable Node.js compatibility
-```toml title="wrangler.toml"
-compatibility_flags = [ "nodejs_compat" ]
-```
-
-
+[Node.js compatibility](/workers/runtime-apis/nodejs/) is required for database drivers, including Postgres.js, and needs to be configured for your Workers project.
-This enables the Node.js compatibility mode which is required for database drivers, including Postgres.js.
-
-:::
+
## 3. Connect Hyperdrive to a database
@@ -112,7 +100,7 @@ cd hyperdrive-tutorial
:::note
-Support for the new `hyperdrive` commands in the wrangler CLI requires a wrangler version of `3.10.0` or later. You can use `npx wrangler@latest` to always ensure you are using the latest version of wrangler.
+Support for the new `hyperdrive` commands in the wrangler CLI requires a wrangler version of `3.10.0` or later. You can use `npx wrangler@latest` to always ensure you are using the latest version of Wrangler.
:::
@@ -194,7 +182,7 @@ Populate your `index.ts` file with the following code:
import postgres from "postgres";
export interface Env {
- // If you set another name in wrangler.toml as the value for 'binding',
+ // If you set another name in the Wrangler config file as the value for 'binding',
// replace "HYPERDRIVE" with the variable name you defined.
HYPERDRIVE: Hyperdrive;
}
diff --git a/src/content/docs/hyperdrive/observability/troubleshooting.mdx b/src/content/docs/hyperdrive/observability/troubleshooting.mdx
index 8254816b8ce6d8..fb12ad92baa7aa 100644
--- a/src/content/docs/hyperdrive/observability/troubleshooting.mdx
+++ b/src/content/docs/hyperdrive/observability/troubleshooting.mdx
@@ -3,7 +3,6 @@ pcx_content_type: concept
title: Troubleshoot and debug
sidebar:
order: 10
-
---
Troubleshoot and debug errors commonly associated with connecting to a database with Hyperdrive.
@@ -43,6 +42,12 @@ Hyperdrive may also encounter `ErrorResponse` wire protocol messages sent by you
| `Failed to acquire a connection from the pool.` | Hyperdrive timed out while waiting for a connection to your database, or cannot connect at all. | If you are seeing this error intermittently, your Hyperdrive pool is being exhausted because too many connections are being held open for too long by your worker. This can be caused by a myriad of different issues, but long-running queries/transactions are a common offender. |
| `Server connection attempt failed: connection_refused` | Hyperdrive is unable to create new connections to your origin database. | A network firewall or access control list (ACL) is likely rejecting requests from Hyperdrive. Ensure you have allowed connections from the public Internet. Sometimes, this can be caused by your database host provider refusing incoming connections when you go over your connection limit. |
+### Node errors
+
+| Error Message | Details | Recommended fixes |
+| ------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------- |
+| `Uncaught Error: No such module "node:"` | Your Cloudflare Workers project or a library that it imports is trying to access a Node module that is not available. | Enable [Node.js compatibility](/workers/runtime-apis/nodejs/) for your Cloudflare Workers project to maximize compatibility. |
+
### Improve performance
Having query traffic written as transactions can limit performance. This is because in the case of a transaction, the connection must be held for the duration of the transaction, which limits connection multiplexing. If there are multiple queries per transaction, this can be particularly impactful on connection multiplexing. Where possible, we recommend not wrapping queries in transactions to allow the connections to be shared more aggressively.
diff --git a/src/content/docs/hyperdrive/platform/limits.mdx b/src/content/docs/hyperdrive/platform/limits.mdx
index eda9a473a24db6..07a3cfa0439ee2 100644
--- a/src/content/docs/hyperdrive/platform/limits.mdx
+++ b/src/content/docs/hyperdrive/platform/limits.mdx
@@ -17,21 +17,18 @@ The following limits apply to Hyperdrive configuration, connections, and queries
| Maximum query (statement) duration | 60 seconds |
| Maximum username length | 63 characters (bytes) [^1] |
| Maximum database name length | 63 characters (bytes) [^1] |
-| Maximum origin database connections per region | 10-20 |
-| Maximum potential origin database connections | 10 \* number of regions serving traffic (approx. \~80 - 100 connections) [^2] |
+| Maximum potential origin database connections | approx. \~100 connections [^2] |
:::note
-
Hyperdrive does not have a hard limit on the number of concurrent *client* connections made from your Workers.
As many hosted databases have limits on the number of unique connections they can manage, Hyperdrive attempts to keep number of concurrent pooled connections to your origin database lower.
-
:::
[^1]: This is a limit enforced by PostgreSQL. Some database providers may enforce smaller limits.
-[^2]: Hyperdrive maintains semi-regional connection pools to balance between latency, reliability and overall load on your origin database.
+[^2]: Hyperdrive is a distributed system, so it is possible for a client to be unable to reach an existing pool. In this scenario, a new pool will be established, with its own allocation of connections. This favors availability over strictly enforcing limits, but does mean that it is possible in edge cases to overshoot the normal connection limit.
:::note
You can request adjustments to limits that conflict with your project goals by contacting Cloudflare. Not all limits can be increased. To request an increase, submit a [Limit Increase Request](https://forms.gle/ukpeZVLWLnKeixDu7) and we will contact you with next steps.
-:::
+:::
\ No newline at end of file
diff --git a/src/content/docs/hyperdrive/tutorials/serverless-timeseries-api-with-timescale/index.mdx b/src/content/docs/hyperdrive/tutorials/serverless-timeseries-api-with-timescale/index.mdx
index 523cb957f4a3ac..bd3d0c12ffc8ee 100644
--- a/src/content/docs/hyperdrive/tutorials/serverless-timeseries-api-with-timescale/index.mdx
+++ b/src/content/docs/hyperdrive/tutorials/serverless-timeseries-api-with-timescale/index.mdx
@@ -13,7 +13,7 @@ languages:
- SQL
---
-import { Render, PackageManagers } from "~/components";
+import { Render, PackageManagers, WranglerConfig } from "~/components";
In this tutorial, you will learn to build an API on Workers which will ingest and query time-series data stored in [Timescale](https://www.timescale.com/) (they make PostgreSQL faster in the cloud).
@@ -140,9 +140,7 @@ Hyperdrive will attempt to connect to your database with the provided credential
:::
-This command outputs your Hyperdrive ID. You can now bind your Hyperdrive configuration to your Worker in your `wrangler.toml` configuration by replacing the content with the following:
-
-import { WranglerConfig } from "~/components";
+This command outputs your Hyperdrive ID. You can now bind your Hyperdrive configuration to your Worker in your Wrangler configuration by replacing the content with the following:
diff --git a/src/content/docs/images/index.mdx b/src/content/docs/images/index.mdx
index e73544be71eabd..a199fafffe78d8 100644
--- a/src/content/docs/images/index.mdx
+++ b/src/content/docs/images/index.mdx
@@ -1,6 +1,7 @@
---
title: Overview
pcx_content_type: overview
+description: Streamline your image infrastructure with Cloudflare Images. Store, transform, and deliver images efficiently using Cloudflare's global network.
sidebar:
order: 1
head:
diff --git a/src/content/docs/images/pricing.mdx b/src/content/docs/images/pricing.mdx
index 997e060f1470ae..a1323da7e132c1 100644
--- a/src/content/docs/images/pricing.mdx
+++ b/src/content/docs/images/pricing.mdx
@@ -24,7 +24,7 @@ On the Free plan, you can request up to 5,000 unique transformations each month
Once you exceed 5,000 unique transformations:
- Existing transformations in cache will continue to be served as expected.
-- New transformations will return a `9423` error. If your source image is from the same domain where the transformation is served, then you can use the [`onerror` parameter](/images/transform-images/transform-via-url/#onerror) to redirect to the original image.
+- New transformations will return a `9422` error. If your source image is from the same domain where the transformation is served, then you can use the [`onerror` parameter](/images/transform-images/transform-via-url/#onerror) to redirect to the original image.
- You will not be charged for exceeding the limits in the Free plan.
To request more than 5,000 unique transformations each month, you can purchase an Images Paid plan.
diff --git a/src/content/docs/images/reference/troubleshooting.mdx b/src/content/docs/images/reference/troubleshooting.mdx
index e9e8bda31cdbe3..5891255cadf231 100644
--- a/src/content/docs/images/reference/troubleshooting.mdx
+++ b/src/content/docs/images/reference/troubleshooting.mdx
@@ -40,7 +40,7 @@ When resizing fails, the response body contains an error message explaining the
* 9504, 9505, & 9510 — The origin server could not be contacted because the origin server may be down or overloaded. Try again later.
* 9523 — The `/cdn-cgi/image/` resizing service could not perform resizing. This may happen when an image has invalid format. Use correctly formatted image and try again.
* 9524 — The `/cdn-cgi/image/` resizing service could not perform resizing. This may happen when an image URL is intercepted by a Worker. As an alternative you can [resize within the Worker](/images/transform-images/transform-via-workers/). This can also happen when using a `pages.dev` URL of a [Cloudflare Pages](/pages/) project. In that case, you can use a [Custom Domain](/pages/configuration/custom-domains/) instead.
-* 9511 — The image format is not supported. Refer to [Supported formats and limitations](/images/transform-images/) to learn about supported input and output formats.
+* 9520 — The image format is not supported. Refer to [Supported formats and limitations](/images/transform-images/) to learn about supported input and output formats.
* 9522 — The image exceeded the processing limit. This may happen briefly after purging an entire zone or when files with very large dimensions are requested. If the problem persists, contact support.
* 9422, 9424, 9516, 9517, 9518, 9522 & 9523 — Internal errors. Please contact support if you encounter these errors.
diff --git a/src/content/docs/kv/concepts/kv-bindings.mdx b/src/content/docs/kv/concepts/kv-bindings.mdx
index 8ef9fac5ee0949..f0998ba208a3ea 100644
--- a/src/content/docs/kv/concepts/kv-bindings.mdx
+++ b/src/content/docs/kv/concepts/kv-bindings.mdx
@@ -5,6 +5,8 @@ sidebar:
order: 7
---
+import { WranglerConfig } from "~/components";
+
KV [bindings](/workers/runtime-apis/bindings/) allow for communication between a Worker and a KV namespace.
Configure KV bindings in the [wrangler.toml file](/workers/wrangler/configuration/).
@@ -21,9 +23,7 @@ A KV namespace will have a name you choose (for example, `My tasks`), and an ass
To execute your Worker, define the binding.
-In the following example, the binding is called `TODO`. In the `kv_namespaces` portion of your `wrangler.toml` file, add:
-
-import { WranglerConfig } from "~/components";
+In the following example, the binding is called `TODO`. In the `kv_namespaces` portion of your Wrangler file, add:
@@ -58,7 +58,7 @@ export default {
When you use Wrangler to develop locally with the `wrangler dev` command, Wrangler will default to using a local version of KV to avoid interfering with any of your live production data in KV. This means that reading keys that you have not written locally will return `null`.
-To have `wrangler dev` connect to your Workers KV namespace running on Cloudflare's global network, call `wrangler dev --remote` instead. This will use the `preview_id` of the KV binding configuration in the `wrangler.toml` file. This is how a `wrangler.toml` file looks with the `preview_id` specified.
+To have `wrangler dev` connect to your Workers KV namespace running on Cloudflare's global network, call `wrangler dev --remote` instead. This will use the `preview_id` of the KV binding configuration in the Wrangler file. This is how a Wrangler file looks with the `preview_id` specified.
@@ -78,7 +78,7 @@ kv_namespaces = [
## Access KV from Durable Objects and Workers using ES modules format
-[Durable Objects](/durable-objects/) use ES modules format. Instead of a global variable, bindings are available as properties of the `env` parameter [passed to the constructor](/durable-objects/get-started/walkthrough/#3-write-a-durable-object-class).
+[Durable Objects](/durable-objects/) use ES modules format. Instead of a global variable, bindings are available as properties of the `env` parameter [passed to the constructor](/durable-objects/get-started/tutorial/#3-write-a-durable-object-class).
An example might look like:
diff --git a/src/content/docs/kv/concepts/kv-namespaces.mdx b/src/content/docs/kv/concepts/kv-namespaces.mdx
index 57d4a6c4fea6ba..42939b6ba116e4 100644
--- a/src/content/docs/kv/concepts/kv-namespaces.mdx
+++ b/src/content/docs/kv/concepts/kv-namespaces.mdx
@@ -5,7 +5,7 @@ sidebar:
order: 7
---
-import { Type, MetaInfo } from "~/components";
+import { Type, MetaInfo, WranglerConfig } from "~/components";
A KV namespace is a key-value database replicated to Cloudflare’s global network.
@@ -35,8 +35,6 @@ To bind KV namespaces to your Worker, assign an array of the below object to the
Example:
-import { WranglerConfig } from "~/components";
-
```toml title="wrangler.toml"
diff --git a/src/content/docs/kv/examples/workers-kv-to-serve-assets.mdx b/src/content/docs/kv/examples/workers-kv-to-serve-assets.mdx
index 7fa1702a33f362..ba78758a47484f 100644
--- a/src/content/docs/kv/examples/workers-kv-to-serve-assets.mdx
+++ b/src/content/docs/kv/examples/workers-kv-to-serve-assets.mdx
@@ -80,7 +80,7 @@ To create a KV store via Wrangler:
id = ""
```
-2. In your `wrangler.toml` file, add the following with the values generated in the terminal:
+2. In your Wrangler file, add the following with the values generated in the terminal:
```bash {3} title="wrangler.toml"
[[kv_namespaces]]
@@ -90,7 +90,7 @@ To create a KV store via Wrangler:
The [KV binding](/kv/concepts/kv-bindings/) `assets` is how your Worker will interact with the [KV namespace](/kv/concepts/kv-namespaces/). This binding will be provided as a runtime variable within your Workers code by the Workers runtime.
- We'll also create a preview KV namespace. It is recommended to create a separate KV namespace when developing locally to avoid making changes to the production namespace. When developing locally against remote resources, the Wrangler CLI will only use the namespace specified by `preview_id` in the KV namespace configuration of the `wrangler.toml` file.
+ We'll also create a preview KV namespace. It is recommended to create a separate KV namespace when developing locally to avoid making changes to the production namespace. When developing locally against remote resources, the Wrangler CLI will only use the namespace specified by `preview_id` in the KV namespace configuration of the Wrangler file.
3. In your terminal, run the following command:
@@ -113,7 +113,7 @@ To create a KV store via Wrangler:
preview_id = ""
```
-4. In your `wrangler.toml` file, add the additional preview_id below kv_namespaces with the values generated in the terminal:
+4. In your Wrangler file, add the additional preview_id below kv_namespaces with the values generated in the terminal:
```bash {4} title="wrangler.toml"
[[kv_namespaces]]
@@ -141,7 +141,7 @@ npx wrangler kv key put index.html --path index.html --binding assets --preview
npx wrangler kv key put index.html --path index.html --binding assets --preview
```
-This will create a KV pair with the filename as key and the file content as value, within the our production and preview namespaces specified by your binding in your `wrangler.toml` file.
+This will create a KV pair with the filename as key and the file content as value, within the our production and preview namespaces specified by your binding in your Wrangler file.
## 4. Serve static assets from KV from your Worker application
@@ -377,7 +377,7 @@ Run `wrangler deploy` to deploy your Workers project to Cloudflare with the bind
npx wrangler deploy
```
-Wrangler will automatically set your KV binding to use the production KV namespace set in our `wrangler.toml` file with the KV namespace id. Throughout this example, we uploaded our assets to both the preview and the production KV namespaces.
+Wrangler will automatically set your KV binding to use the production KV namespace set in our Wrangler file with the KV namespace id. Throughout this example, we uploaded our assets to both the preview and the production KV namespaces.
We can now verify that our project is properly working by accessing our Workers default hostname and accessing `..dev/index.html` or `..dev/hello-world` to see our deployed Worker in action, generating responses from the values in our KV store.
diff --git a/src/content/docs/kv/get-started.mdx b/src/content/docs/kv/get-started.mdx
index 07864f5ec22060..e3e22e1a1852fd 100644
--- a/src/content/docs/kv/get-started.mdx
+++ b/src/content/docs/kv/get-started.mdx
@@ -61,13 +61,13 @@ Create a new Worker to read and write to your KV namespace.
- testconfig.json
- vitest.config.mts
- worker-configuration.d.ts
- - **wrangler.toml**
+ - **wrangler.json**
Your new `kv-tutorial` directory includes:
- A `"Hello World"` [Worker](/workers/get-started/guide/#3-write-code) in `index.ts`.
- - A [`wrangler.toml`](/workers/wrangler/configuration/) configuration file. `wrangler.toml` is how your `kv-tutorial` Worker accesses your kv database.
+ - A [`wrangler.json`](/workers/wrangler/configuration/) configuration file. `wrangler.json` is how your `kv-tutorial` Worker accesses your kv database.
2. Change into the directory you just created for your Worker project:
@@ -119,7 +119,7 @@ To create a KV namespace via Wrangler:
npx wrangler kv namespace create
```
- The `npx wrangler kv namespace create ` subcommand takes a new binding name as its argument. A KV namespace is created using a concatenation of your Worker’s name (from your `wrangler.toml` file) and the binding name you provide. A `BINDING_ID` is randomly generated for you.
+ The `npx wrangler kv namespace create ` subcommand takes a new binding name as its argument. A KV namespace is created using a concatenation of your Worker’s name (from your Wrangler file) and the binding name you provide. A `BINDING_ID` is randomly generated for you.
For this tutorial, use the binding name `BINDING_NAME`.
@@ -161,7 +161,7 @@ To bind your KV namespace to your Worker:
-1. In your `wrangler.toml` file, add the following with the values generated in your terminal from [step 2](/kv/get-started/#2-create-a-kv-namespace):
+1. In your Wrangler file, add the following with the values generated in your terminal from [step 2](/kv/get-started/#2-create-a-kv-namespace):
@@ -182,7 +182,7 @@ To bind your KV namespace to your Worker:
:::note[Bindings]
-A binding is how your Worker interacts with external resources such as [KV namespaces](/kv/concepts/kv-namespaces/). A binding is a runtime variable that the Workers runtime provides to your code. You can declare a variable name in your `wrangler.toml` file that binds to these resources at runtime, and interact with them through this variable. Every binding's variable name and behavior is determined by you when deploying the Worker.
+A binding is how your Worker interacts with external resources such as [KV namespaces](/kv/concepts/kv-namespaces/). A binding is a runtime variable that the Workers runtime provides to your code. You can declare a variable name in your Wrangler file that binds to these resources at runtime, and interact with them through this variable. Every binding's variable name and behavior is determined by you when deploying the Worker.
Refer to [Environment](/kv/reference/environments/) for more information.
@@ -315,7 +315,7 @@ You can view key-value pairs directly from the dashboard.
When using [`wrangler dev`](/workers/wrangler/commands/#dev) to develop locally, Wrangler defaults to using a local version of KV to avoid interfering with any of your live production data in KV. This means that reading keys that you have not written locally returns null.
-To have `wrangler dev` connect to your Workers KV namespace running on Cloudflare's global network, call `wrangler dev --remote` instead. This uses the `preview_id` of the KV binding configuration in the `wrangler.toml` file. Refer to the [KV binding docs](/kv/concepts/kv-bindings/#use-kv-bindings-when-developing-locally) for more information.
+To have `wrangler dev` connect to your Workers KV namespace running on Cloudflare's global network, call `wrangler dev --remote` instead. This uses the `preview_id` of the KV binding configuration in the Wrangler file. Refer to the [KV binding docs](/kv/concepts/kv-bindings/#use-kv-bindings-when-developing-locally) for more information.
:::
diff --git a/src/content/docs/kv/platform/limits.mdx b/src/content/docs/kv/platform/limits.mdx
index d2f3d3e2a072d6..085a4c2e5f302d 100644
--- a/src/content/docs/kv/platform/limits.mdx
+++ b/src/content/docs/kv/platform/limits.mdx
@@ -16,7 +16,7 @@ import { Render } from "~/components"
| Writes to different keys | 1,000 writes per day | Unlimited |
| Writes to same key | 1 per second | 1 per second |
| Operations/worker invocation | 1000 | 1000 |
-| Namespaces | 200 | 200 |
+| Namespaces | 1000 | 1000 |
| Storage/account | 1 GB | Unlimited |
| Storage/namespace | 1 GB | Unlimited |
| Keys/namespace | Unlimited | Unlimited |
diff --git a/src/content/docs/kv/reference/environments.mdx b/src/content/docs/kv/reference/environments.mdx
index 4b4f6d58e9e626..8eef01fdb0e69c 100644
--- a/src/content/docs/kv/reference/environments.mdx
+++ b/src/content/docs/kv/reference/environments.mdx
@@ -5,11 +5,11 @@ sidebar:
order: 3
---
-KV namespaces can be used with [environments](/workers/wrangler/environments/). This is useful when you have code in your Worker that refers to a KV binding like `MY_KV`, and you want to have these bindings point to different KV namespaces (for example, one for staging and one for production).
+import { WranglerConfig } from "~/components";
-The following code in the `wrangler.toml` file shows you how to have two environments that have two different KV namespaces but the same binding name:
+KV namespaces can be used with [environments](/workers/wrangler/environments/). This is useful when you have code in your Worker that refers to a KV binding like `MY_KV`, and you want to have these bindings point to different KV namespaces (for example, one for staging and one for production).
-import { WranglerConfig } from "~/components";
+The following code in the Wrangler file shows you how to have two environments that have two different KV namespaces but the same binding name:
@@ -52,7 +52,7 @@ Most `kv` subcommands also allow you to specify an environment with the optional
Specifying an environment with the optional `--env` flag allows you to publish Workers running the same code but with different KV namespaces.
-For example, you could use separate staging and production KV namespaces for KV data in your `wrangler.toml` file:
+For example, you could use separate staging and production KV namespaces for KV data in your Wrangler file:
@@ -78,9 +78,9 @@ kv_namespaces = [
-With the `wrangler.toml` file above, you can specify `--env production` when you want to perform a KV action on the KV namespace `MY_KV` under `env.production`.
+With the Wrangler file above, you can specify `--env production` when you want to perform a KV action on the KV namespace `MY_KV` under `env.production`.
-For example, with the `wrangler.toml` file above, you can get a value out of a production KV instance with:
+For example, with the Wrangler file above, you can get a value out of a production KV instance with:
```sh
wrangler kv key get --binding "MY_KV" --env=production ""
diff --git a/src/content/docs/learning-paths/secure-internet-traffic/build-http-policies/tls-inspection.mdx b/src/content/docs/learning-paths/secure-internet-traffic/build-http-policies/tls-inspection.mdx
index 54a28595a39586..b793700bb0ec3e 100644
--- a/src/content/docs/learning-paths/secure-internet-traffic/build-http-policies/tls-inspection.mdx
+++ b/src/content/docs/learning-paths/secure-internet-traffic/build-http-policies/tls-inspection.mdx
@@ -38,7 +38,7 @@ To turn on TLS inspection for your Zero Trust organization:
### 3. Determine the certificate used for inspection
-TLS inspection requires a trusted private root certificate to be able to inspect and filter encrypted traffic. A [Cloudflare root certificate](/cloudflare-one/connections/connect-devices/user-side-certificates/automated-deployment/) is a simple and common solution that is usually appropriate for testing or proof-of-concept conditions when deployed to your devices.
+TLS inspection requires a trusted private root certificate to be able to inspect and filter encrypted traffic. A [Cloudflare root certificate](/cloudflare-one/connections/connect-devices/user-side-certificates/automated-deployment/) is a simple and common solution that is usually appropriate for testing or proof-of-concept conditions when deployed to your devices. You can [generate a Cloudflare certificate](/cloudflare-one/connections/connect-devices/user-side-certificates/#generate-a-cloudflare-root-certificate) in Zero Trust.
Alternatively, if you already have a root CA that you use for other inspection or trust applications, we recommend [using your own certificate](/cloudflare-one/connections/connect-devices/user-side-certificates/custom-certificate/). A few reasons for this include:
@@ -46,6 +46,8 @@ Alternatively, if you already have a root CA that you use for other inspection o
- If external services like Git workflows or CLI tools rely on an existing certificate store, presenting the same certificate in inspection is far less likely to interrupt their traffic flow, although these are things that you may wish to exempt from inspection.
- If you are using [WARP Connector](/cloudflare-one/connections/connect-networks/private-net/warp-connector/) or a [Magic WAN](/magic-wan/) IPsec/GRE tunnel to on-ramp traffic to Cloudflare, devices behind those tunnels will not be able to use HTTP policies that require TLS inspection unless they have a certificate that matches your organization's certificate of choice. Your network infrastructure most likely already has your own device certificates deployed, so using your own existing public key infrastructure for inspection will simplify protection.
+Once you generate a Cloudflare certificate or upload a custom certificate, you will need to set it as **Available** to deploy it across the Cloudflare network and as **In-Use** to use it for inspection. For more information, refer to [Activate a root certificate](/cloudflare-one/connections/connect-devices/user-side-certificates/#activate-a-root-certificate).
+
### 4. Build a baseline Do Not Inspect policy
Do you want to inspect all traffic by default, or do you only want to inspect explicit destinations? We recommend that you build a Gateway list of applications and endpoints to exclude from inspection and add the list as an OR operator in addition to our existing Do Not Inspect application group. For example:
diff --git a/src/content/docs/learning-paths/workers/get-started/c3-and-wrangler.mdx b/src/content/docs/learning-paths/workers/get-started/c3-and-wrangler.mdx
index 64125e6c313051..49ab962c55310a 100644
--- a/src/content/docs/learning-paths/workers/get-started/c3-and-wrangler.mdx
+++ b/src/content/docs/learning-paths/workers/get-started/c3-and-wrangler.mdx
@@ -35,9 +35,9 @@ When you run C3 to create your project, C3 will install the latest version of Wr
## Source of truth
-If you are building your Worker on the Cloudflare dashboard, you will set up your project configuration (such as environment variables, bindings, and routes) through the dashboard. If you are building your project programmatically using C3 and Wrangler, you will rely on a [`wrangler.toml`](/workers/wrangler/configuration/) file to configure your Worker.
+If you are building your Worker on the Cloudflare dashboard, you will set up your project configuration (such as environment variables, bindings, and routes) through the dashboard. If you are building your project programmatically using C3 and Wrangler, you will rely on a [`wrangler.toml / wrangler.json` file](/workers/wrangler/configuration/) to configure your Worker.
-Cloudflare recommends choosing and using one [source of truth](/workers/wrangler/configuration/#source-of-truth), the dashboard or `wrangler.toml`, to avoid errors in your project.
+Cloudflare recommends choosing and using one [source of truth](/workers/wrangler/configuration/#source-of-truth), the dashboard or the `wrangler.toml / wrangler.json` file (for example, `wrangler.json` or `wrangler.toml`), to avoid errors in your project.
## Summary
diff --git a/src/content/docs/learning-paths/workers/get-started/first-worker.mdx b/src/content/docs/learning-paths/workers/get-started/first-worker.mdx
index b179eb3177fcbc..f43daf62364bfe 100644
--- a/src/content/docs/learning-paths/workers/get-started/first-worker.mdx
+++ b/src/content/docs/learning-paths/workers/get-started/first-worker.mdx
@@ -53,7 +53,7 @@ Refer to [How to run Wrangler commands](/workers/wrangler/commands/#how-to-run-w
In your Worker project directory, C3 has generated the following:
-1. `wrangler.toml`: Your [Wrangler](/workers/wrangler/configuration/#sample-wrangler-configuration) configuration file.
+1. `wrangler.json`: Your [Wrangler](/workers/wrangler/configuration/#sample-wrangler-configuration) configuration file.
2. `index.js` (in `/src`): A minimal `'Hello World!'` Worker written in [ES module](/workers/reference/migrate-to-module-workers/) syntax.
3. `package.json`: A minimal Node dependencies configuration file.
4. `package-lock.json`: Refer to [`npm` documentation on `package-lock.json`](https://docs.npmjs.com/cli/v9/configuring-npm/package-lock-json).
diff --git a/src/content/docs/load-balancing/load-balancers/dns-records.mdx b/src/content/docs/load-balancing/load-balancers/dns-records.mdx
index fc5e7c8ed57828..2acb51acde41fc 100644
--- a/src/content/docs/load-balancing/load-balancers/dns-records.mdx
+++ b/src/content/docs/load-balancing/load-balancers/dns-records.mdx
@@ -60,7 +60,7 @@ If you already have an existing `A`, `AAAA`, or `CNAME` record, be aware that th
## SSL/TLS coverage
-Due to internal limitations, Cloudflare [Universal SSL certificates](/ssl/edge-certificates/universal-ssl/) do not cover load balancing hostnames by default. This behavior will be corrected in the future.
+Due to internal limitations, on [Partial (CNAME) setup](/dns/zone-setups/partial-setup/) the Cloudflare [Universal SSL certificates](/ssl/edge-certificates/universal-ssl/) do not cover load balancing hostnames by default. This behavior will be corrected in the future.
As a current workaround for a domain or first-level subdomain (`lb.example.com`), create a [proxied `CNAME`/`A`/`AAAA` record](/dns/manage-dns-records/how-to/create-dns-records/) for that hostname.
diff --git a/src/content/docs/load-balancing/monitors/index.mdx b/src/content/docs/load-balancing/monitors/index.mdx
index 7c45ffddeff40e..1a5f5ed0449289 100644
--- a/src/content/docs/load-balancing/monitors/index.mdx
+++ b/src/content/docs/load-balancing/monitors/index.mdx
@@ -78,7 +78,7 @@ The Cloudflare API supports the following commands for monitors. Examples are gi
The following table summarizes the different types of monitors available in Cloudflare Load Balancing, their monitoring types, and how each health check process evaluates the success criteria to determine endpoint health:
-| Monitor type | Monitoring type | Description | Health check process | Sucess critera |
+| Monitor type | Monitoring type | Description | Health check process | Success criteria |
| ------------ | -------------- | ----------- | -------------------- | -------------- |
| HTTP/HTTPS | Public and private | Used for HTTP and HTTPS endpoints with specific protocol attributes. | The probe is configured with settings and success criteria such as Method, Simulate Zone, Follow Redirects, Request Headers, and Response Body. The probe then evaluates the configured success criteria using the HTTP protocol. Throughout the configured timeout period, the TCP connection is kept active using [keep-alives](/fundamentals/reference/tcp-connections/#tcp-connections-and-keep-alives), even if no response is received. | Success is based on meeting the configured HTTP success criteria. No response within the configured timeout and retries is considered unhealthy. |
| TCP | Public and private | Checks TCP connectivity by attempting to open a connection to the endpoint. | The monitor sends a TCP SYN message to the specified port. A successful health check requires receiving a SYN/ACK message to establish the connection. The connection is closed by sending a FIN or RST packet, or by receiving a FIN packet from the endpoint. | Failure to establish a TCP connection within the configured timeout and retries is considered unhealthy. |
diff --git a/src/content/docs/logs/faq/common-calculations.mdx b/src/content/docs/logs/faq/common-calculations.mdx
index b8c2e54601f16b..f199813807313c 100644
--- a/src/content/docs/logs/faq/common-calculations.mdx
+++ b/src/content/docs/logs/faq/common-calculations.mdx
@@ -1,6 +1,6 @@
---
pcx_content_type: faq
-title: Common calculations
+title: Common calculations FAQ
structured_data: true
sidebar:
order: 4
diff --git a/src/content/docs/logs/faq/instant-logs.mdx b/src/content/docs/logs/faq/instant-logs.mdx
index 13d7e77beaa519..f8d25230cbde4e 100644
--- a/src/content/docs/logs/faq/instant-logs.mdx
+++ b/src/content/docs/logs/faq/instant-logs.mdx
@@ -1,6 +1,6 @@
---
pcx_content_type: faq
-title: Instant Logs
+title: Instant Logs FAQ
structured_data: true
sidebar:
order: 5
diff --git a/src/content/docs/logs/faq/logpull-api.mdx b/src/content/docs/logs/faq/logpull-api.mdx
index 37c7790653bc62..19df58b94e0286 100644
--- a/src/content/docs/logs/faq/logpull-api.mdx
+++ b/src/content/docs/logs/faq/logpull-api.mdx
@@ -1,6 +1,6 @@
---
pcx_content_type: faq
-title: Logpull API
+title: Logpull API FAQ
structured_data: true
sidebar:
order: 3
diff --git a/src/content/docs/logs/faq/logpush.mdx b/src/content/docs/logs/faq/logpush.mdx
index 14629cc22a462d..150714a318d265 100644
--- a/src/content/docs/logs/faq/logpush.mdx
+++ b/src/content/docs/logs/faq/logpush.mdx
@@ -1,6 +1,6 @@
---
pcx_content_type: faq
-title: Logpush
+title: Logpush FAQ
structured_data: true
sidebar:
order: 2
diff --git a/src/content/docs/logs/get-started/permissions.mdx b/src/content/docs/logs/get-started/permissions.mdx
index 763139878782c5..b3a010a033b17b 100644
--- a/src/content/docs/logs/get-started/permissions.mdx
+++ b/src/content/docs/logs/get-started/permissions.mdx
@@ -10,9 +10,9 @@ Below is a description of the available permissions for tokens and roles as they
## Tokens
-* **Logs: Read** - Grants read access to logs using Logpull or Instant Logs.
+- **Logs: Read** - Grants read access to logs using Logpull or Instant Logs.
-* **Logs: Write** - Grants read and write access to Logpull and Logpush, and read access to Instant Logs.
+- **Logs: Write** - Grants read and write access to Logpull and Logpush, and read access to Instant Logs. Note that all Logpush API operations require **Logs: Write** permission because Logpush jobs contain sensitive information.
:::note[Note]
diff --git a/src/content/docs/magic-transit/index.mdx b/src/content/docs/magic-transit/index.mdx
index 06cdfc1a892f83..7ed8a46a187851 100644
--- a/src/content/docs/magic-transit/index.mdx
+++ b/src/content/docs/magic-transit/index.mdx
@@ -37,6 +37,10 @@ Magic Transit steers traffic along tunnel routes based on priorities you define
Use Cloudflare-owned IP addresses if you want to protect a smaller network and do not meet Magic Transit's `/24` prefix length requirements.
+
+Use BGP peering between your networks and Cloudflare to automate the process of adding or removing networks and subnets, and take advantage of failure detection and session recovery features.
+
+
---
## Related products
diff --git a/src/content/docs/magic-transit/reference/tunnel-health-checks.mdx b/src/content/docs/magic-transit/reference/tunnel-health-checks.mdx
index f54c61af202e8b..4ac972925b9cb0 100644
--- a/src/content/docs/magic-transit/reference/tunnel-health-checks.mdx
+++ b/src/content/docs/magic-transit/reference/tunnel-health-checks.mdx
@@ -11,11 +11,11 @@ import { Render } from "~/components";
diff --git a/src/content/docs/magic-wan/configuration/manually/third-party/azure/azure-virtual-wan.mdx b/src/content/docs/magic-wan/configuration/manually/third-party/azure/azure-virtual-wan.mdx
new file mode 100644
index 00000000000000..4000a904d96f94
--- /dev/null
+++ b/src/content/docs/magic-wan/configuration/manually/third-party/azure/azure-virtual-wan.mdx
@@ -0,0 +1,144 @@
+---
+pcx_content_type: integration-guide
+title: Microsoft Azure Virtual WAN
+---
+
+This tutorial provides information on how to connect Magic WAN to a Microsoft Azure Virtual WAN hub.
+
+## Prerequisites
+
+You will need to have an existing Resource group, Virtual Network, and Virtual Machine created in your Azure account. Refer to [Microsoft's documentation](https://learn.microsoft.com/en-us/azure/virtual-network/) to learn more on how to create these.
+
+## Start Azure configuration
+
+### 1. Create a Virtual WAN
+
+To connect one or more VNets to Magic WAN via a Virtual WAN hub, you first need to create a Virtual WAN (vWAN) resource representing your Azure network. If you already have a vWAN that you wish to connect to Magic WAN, continue to the next step. Refer to [Microsoft's documentation](https://learn.microsoft.com/en-us/azure/virtual-wan/virtual-wan-site-to-site-portal#openvwan) to learn more.
+
+1. In the Azure portal, go to your **Virtual WANs** page.
+2. Select the option to create a **Virtual WAN**.
+3. Create a Virtual WAN with the **Type** set to **Standard**.
+
+### 2. Create a Virtual WAN Hub
+
+Using traditional hub and spoke terminology, a Virtual WAN Hub deployed within a vWAN is the hub to which your VNet(s) and Magic WAN attach as spokes. The vWAN hub deployed in this step will contain a VPN Gateway for connecting to Magic WAN.
+
+1. Create a **Virtual WAN Hub**.
+2. In **Basics**:
+ 1. Select your resource group as well as your desired region, capacity, and hub routing preference. Microsoft recommends using the default hub routing preference of **ExpressRoute** unless you have a specific need to change this setting. Refer to [Microsoft's documentation](https://learn.microsoft.com/en-us/azure/virtual-wan/about-virtual-hub-routing-preference) to learn more about Azure hub routing preferences.
+ 2. Configure the **Hub Private Address Space**. Choose an [address space with a subnet mask of `/24` or greater](https://learn.microsoft.com/en-us/azure/virtual-wan/virtual-wan-site-to-site-portal#hub) that does not overlap with the address spaces of any VNets you wish to attach to the vWAN Hub, nor with any of your Magic WAN sites.
+3. In **Site to Site**:
+ 1. In **Do you want to create a Site to site (VPN gateway)?** select **Yes**.
+ 2. Select your desired **Gateway scale units** and **Routing Preference**. Refer to [Microsoft's documentation](https://learn.microsoft.com/en-us/azure/virtual-network/ip-services/routing-preference-overview#routing-via-microsoft-global-network) to learn more about Azure routing preferences.
+4. Select **Create**. Note that the deployment time for the vWAN Hub and VPN Gateway may take 30 minutes or more.
+5. After the VPN Gateway has finished provisioning, go to **Virtual WAN** > **Hubs** > **Your vHub** > **Connectivity** > **VPN (Site to site)**.
+6. In the **Essentials** dropdown select the VPN Gateway listed.
+7. Select the JSON View for the VPN Gateway and take note of the JSON attributes at the paths `properties.ipConfigurations[0].publicIpAddress` and `properties.ipConfigurations[1].publicIpAddress`. These will be the customer endpoints needed when configuring IPsec tunnels for Magic WAN.
+
+### 3. Create a VPN site
+
+A VPN site represents the remote site your Azure vWAN can reach through a VPN connection. This is typically an on-premises location. In this case, the VPN site represents Magic WAN.
+
+1. Go to **Virtual WAN** > **VPN sites** > **Create site**.
+2. In **Basics**:
+ 1. Configure your desired region and name.
+ 2. Configure the **Device vendor** as Cloudflare.
+ 3. In **Private address space**, specify the address range(s) you wish to access from your vWAN through Magic WAN. This could include other private networks connected to your Magic WAN, or a default route (`0.0.0.0/0`) if you want Internet egress traffic to traverse Magic WAN (that is, to be scanned by Cloudflare Gateway). The address space can be modified after VPN site creation.
+3. In **Links**:
+ 1. Configure a single link. Provide a name, speed (in Mbps), and provider name (here, enter `Cloudflare`) for your link. For the **Link IP address**, enter your Cloudflare anycast address. The **BGP address** and **ASN** fields should be left empty. BGP is not supported at the time of writing this tutorial.
+4. Select **Create**.
+
+### 4. Configure VPN site for Magic IPsec tunnel health checks
+
+Magic WAN uses [Tunnel Health Checks](/magic-wan/reference/tunnel-health-checks/) to monitor whether a tunnel is available.
+
+Tunnel health checks make use of ICMP probes sent from the Cloudflare side of the Magic IPsec tunnel to the remote endpoint (Azure). Probes are sent from the tunnel's interface address, which you specify in two places:
+
+- **Cloudflare Dashboard:** In your Magic IPsec tunnel configuration as the address of the virtual tunnel interface (VTI) (so that Cloudflare knows what address to send probes from). Cloudflare requires this address in CIDR notation with a `/31` netmask.
+- **Azure Portal:** In your VPN site's address space (so that Azure routes probe responses back over the tunnel). Azure requires this address in CIDR notation with a `/32` netmask.
+
+Cloudflare recommends that you select a unique `/31` subnet ([RFC 1918 — Address Allocation for Private Internets](https://datatracker.ietf.org/doc/html/rfc1918)) for each IPsec tunnel which is treated as a Point-to-Point Link and provides the ideal addressing scheme to satisfy both requirements.
+
+Example:
+
+- Select `169.254.251.137/31` as your unique Point-to-Point Link subnet.
+- In the Cloudflare dashboard, set `169.254.251.137/31` as your tunnel's **IPv4 Interface address**. (Refer to [Configure Magic WAN](#configure-magic-wan) below.)
+- In the Azure portal, add `169.254.251.137/32` to your VPN site's **Private address space**.
+
+:::note
+It is important to ensure the subnet selected for the Interface Address does not overlap with any other subnet.
+
+You should also refer to RFC 3021 for more information on using 31-bit prefixes on [IPv4 Point-to-Point Links](https://datatracker.ietf.org/doc/html/rfc3021).
+:::
+
+To configure the Address Space for the Local Network Gateway to support Tunnel Health Checks:
+
+1. Go to **Virtual WAN** > **VPN sites** > **Your VPN Site** > **Edit site** to edit the VPN site configured in the previous section.
+2. Update the **Private address space** to include two `/32` subnets in CIDR notation as described above. When using Azure VPN Gateways with vWAN Hubs, a single VPN Gateway Connection maps to two Magic WAN IPsec Tunnels. For this reason, we need to select two unique `/31` subnets, one for each Cloudflare IPsec Tunnel. The upper address of each `/31` is then added to the VPN Site's Private address space as a `/32`subnet.
+3. Select **Confirm**.
+
+### 5. Create a Virtual Network Connection
+
+To connect your existing VNet to your newly created vHub:
+
+1. Go to **Virtual WAN** > **Virtual network connections** and select **Add connection**.
+2. Configure the connection to connect the desired VNet to the vHub created above.
+3. Ensure that within the connection's **Routing configuration**:
+ 1. **Propagate to none** is set to **No.**
+ 2. **Bypass Next Hop IP for workloads within this VNet** is set to **No**
+ 3. And **Propagate static route** is set to **Yes**.
+4. Select **Create**.
+
+## Configure Magic WAN
+
+When connecting your Azure vHub VPN Gateway to Magic WAN, two Cloudflare tunnels are required to map to the single Azure VPN Gateway Connection created above. This is because Azure VPN Gateways are deployed with two public IP addresses.
+
+1. Create an [IPsec tunnel](/magic-wan/configuration/manually/how-to/configure-tunnels/#add-tunnels) in the Cloudflare dashboard.
+2. Make sure you have the following settings:
+ 1. **Interface address**: Add the upper IP address within the first `/31` subnet selected in step 4 of the Start Azure Configuration section. Refer to [Tunnel endpoints](/magic-wan/configuration/manually/how-to/configure-tunnels/) for more details.
+ 2. **Customer endpoint**: The first public IP associated with your Azure VPN Gateway. For example, `40.xxx.xxx.xxx`.
+ 3. **Cloudflare endpoint**: Use the Cloudflare anycast address you have received from your account team. This will also be the IP address corresponding to the VPN Site in Azure. For example, `162.xxx.xxx.xxx`.
+ 4. **Health check rate**: Medium (default).
+ 5. **Health check type**: Reply (default).
+ 6. **Health check direction**: Bidirectional (default).
+ 7. **Health check target**: Custom; enter the customer endpoint.
+ 8. **Add pre-shared key later**: Select this option to create a PSK that will be used later in Azure.
+ 9. **Replay protection**: **Enable**.
+3. Edit the tunnel. Generate a new pre-shared key and copy the key to a safe location.
+4. Create static routes for your Azure Virtual Network subnets, specifying the newly created tunnel as the next hop.
+5. Create the second IPsec tunnel in the Cloudflare dashboard. Copy the configuration of the first tunnel with the following exceptions:
+ 1. **Interface address**: Add the upper IP address within the **second** `/31` subnet selected in step 4 of the Start Azure Configuration section.
+ 2. **Customer endpoint**: The **second** Public IP associated with your Azure VPN Gateway.
+ 3. **Health check target**: Enter the new customer endpoint as a custom target.
+ 4. **Use my own pre-shared key**: Select this option and enter the key generated for the first tunnel.
+6. Create static routes for your Azure Virtual Network subnets, specifying the newly created tunnel as the next hop. To use one tunnel as primary and the other as backup, give the primary tunnel's route a lower priority. To ECMP load balance across both tunnels, assign both routes the same priority.
+
+## Finish Azure Configuration
+
+### 1. Create an IPsec VPN Gateway Connection
+
+To create a **VPN Gateway Connection**:
+
+1. Go to **Virtual WAN** > **Hubs** > **Your vHub** > **Connectivity** > **VPN (Site to site)** and remove the default filter **Hub association: Connected** to display the **VPN Site** created above.
+2. Check the box next to your VPN Site and select **Connect VPN sites**.
+
+Choose the following settings when creating your VPN Connection:
+
+1. **PSK**: Provide the PSK generated by Cloudflare for your Magic WAN Tunnels.
+2. **Protocol**: *IKEv2*
+3. **IPsec**: *Custom*
+ 1. **IPsec SA lifetime in seconds**: 28800
+ 2. **IKE Phase 1**
+ 1. **Encryption**: *AES256*
+ 2. **Integrity/PRF**: *SHA256*
+ 3. **DH Group**: *ECP384*
+ 3. **IKE Phase 2(IPsec)**
+ 1. **IPsec Encryption**: *AES256*
+ 2. **IPsec Integrity**: *SHA256*
+ 3. **PFS Group**: *ECP384*
+ 4. **Propagate Default Route:** **Disable**
+ 5. **Use policy based traffic selector**: **Disable**
+ 6. **Connection mode**: **Initiator Only**
+ 7. **Configure traffic selector?**: **Disabled**
+
+4. Select **Connect**.
\ No newline at end of file
diff --git a/src/content/docs/magic-wan/configuration/manually/third-party/azure.mdx b/src/content/docs/magic-wan/configuration/manually/third-party/azure/azure-vpn-gateway.mdx
similarity index 99%
rename from src/content/docs/magic-wan/configuration/manually/third-party/azure.mdx
rename to src/content/docs/magic-wan/configuration/manually/third-party/azure/azure-vpn-gateway.mdx
index bf72ed89c695fc..1cc173cee69f62 100644
--- a/src/content/docs/magic-wan/configuration/manually/third-party/azure.mdx
+++ b/src/content/docs/magic-wan/configuration/manually/third-party/azure/azure-vpn-gateway.mdx
@@ -1,6 +1,6 @@
---
pcx_content_type: integration-guide
-title: Microsoft Azure
+title: Microsoft Azure VPN Gateway
---
This tutorial provides information on how to connect Cloudflare Magic WAN to your Azure Virtual Network, using the Azure Virtual Network Gateway.
diff --git a/src/content/docs/magic-wan/configuration/manually/third-party/azure/index.mdx b/src/content/docs/magic-wan/configuration/manually/third-party/azure/index.mdx
new file mode 100644
index 00000000000000..9edcfc5ab59e91
--- /dev/null
+++ b/src/content/docs/magic-wan/configuration/manually/third-party/azure/index.mdx
@@ -0,0 +1,13 @@
+---
+pcx_content_type: integration-guide
+title: Microsoft Azure
+sidebar:
+ group:
+ hideIndex: true
+---
+
+import { DirectoryListing } from "~/components"
+
+Microsoft Azure integration guides currently available:
+
+
\ No newline at end of file
diff --git a/src/content/docs/magic-wan/index.mdx b/src/content/docs/magic-wan/index.mdx
index ba703fdc82c6ae..9eab27fa2ead77 100644
--- a/src/content/docs/magic-wan/index.mdx
+++ b/src/content/docs/magic-wan/index.mdx
@@ -74,6 +74,10 @@ Learn how to [get started](/magic-wan/get-started/).
Learn how you can use Magic WAN with other Cloudflare Zero Trust products.
+
+Use BGP peering between your networks and Cloudflare to automate the process of adding or removing networks and subnets, and take advantage of failure detection and session recovery features.
+
+
---
## Related products
diff --git a/src/content/docs/magic-wan/reference/tunnel-health-checks.mdx b/src/content/docs/magic-wan/reference/tunnel-health-checks.mdx
index 9166ee5811bd5a..ab59f0fba6890d 100644
--- a/src/content/docs/magic-wan/reference/tunnel-health-checks.mdx
+++ b/src/content/docs/magic-wan/reference/tunnel-health-checks.mdx
@@ -12,14 +12,11 @@ import { Render } from "~/components";
file="tunnel-health/tunnel-health-checks"
product="magic-transit"
params={{
- healthCheckFrequencyURL:
- "/magic-wan/configuration/common-settings/tunnel-health-checks/",
+ addTunnels: "/magic-wan/configuration/manually/how-to/configure-tunnels/#add-tunnels",
+ changeHealthCheckRate: "/magic-wan/configuration/common-settings/tunnel-health-checks/",
+ probeHealth: "#health-state-and-prioritization",
productName: "Magic WAN",
- onboardingURL:
- "/magic-wan/configuration/manually/how-to/configure-static-routes/",
- configureTunnelEndpointsURL:
- "/magic-wan/configuration/manually/how-to/configure-tunnels/",
- urlChangeHealthCheckType:
- "/magic-wan/configuration/manually/how-to/configure-tunnels/#add-tunnels",
+ staticRoutes: "/magic-wan/configuration/manually/how-to/configure-static-routes/",
+ tunnelEndpoints: "/magic-wan/configuration/manually/how-to/configure-tunnels/"
}}
/>
diff --git a/src/content/docs/network/ip-geolocation.mdx b/src/content/docs/network/ip-geolocation.mdx
index 3fa8a683d947c7..3a2dd8a5222f3d 100644
--- a/src/content/docs/network/ip-geolocation.mdx
+++ b/src/content/docs/network/ip-geolocation.mdx
@@ -6,7 +6,7 @@ title: IP geolocation
import { FeatureTable, TabItem, Tabs } from "~/components";
-IP geolocation adds the [`CF-IPCountry` header](/fundamentals/reference/http-request-headers/#cf-ipcountry) to all requests to your origin server.
+IP geolocation adds the [`CF-IPCountry` header](/fundamentals/reference/http-headers/#cf-ipcountry) to all requests to your origin server.
Cloudflare automatically updates its IP geolocation database using MaxMind and other data sources, typically twice a week.
@@ -36,7 +36,7 @@ To enable **IP Geolocation** with the API, send a [`PATCH`](/api/resources/zones
:::note
-In order to use this data, you will need to then retrieve it from the [`CF-IPCountry` header](/fundamentals/reference/http-request-headers/#cf-ipcountry).
+In order to use this data, you will need to then retrieve it from the [`CF-IPCountry` header](/fundamentals/reference/http-headers/#cf-ipcountry).
:::
diff --git a/src/content/docs/network/true-client-ip-header.mdx b/src/content/docs/network/true-client-ip-header.mdx
index 329ce81f983e34..ed0297246c9ae8 100644
--- a/src/content/docs/network/true-client-ip-header.mdx
+++ b/src/content/docs/network/true-client-ip-header.mdx
@@ -2,12 +2,11 @@
pcx_content_type: troubleshooting
source: https://support.cloudflare.com/hc/en-us/articles/206776727-Understanding-the-True-Client-IP-Header
title: Understanding the True-Client-IP Header
-
---
-import { FeatureTable } from "~/components"
+import { FeatureTable } from "~/components";
-Enabling the True-Client-IP Header adds the [`True-Client-IP` header](/fundamentals/reference/http-request-headers/#true-client-ip-enterprise-plan-only) to all requests to your origin server, which includes the end user’s IP address.
+Enabling the True-Client-IP Header adds the [`True-Client-IP` header](/fundamentals/reference/http-headers/#true-client-ip-enterprise-plan-only) to all requests to your origin server, which includes the end user's IP address.
## Availability
@@ -18,17 +17,13 @@ Enabling the True-Client-IP Header adds the [`True-Client-IP` header](/fundament
The recommended procedure to access client IP information is to [enable the **Add "True-Client-IP" header** Managed Transform](/rules/transform/managed-transforms/reference/#add-true-client-ip-header).
:::note
-
-
-In order to use this data, you will need to then retrieve it from the [`True-Client-IP` header](/fundamentals/reference/http-request-headers/#cf-ipcountry).
-
-
+To use this data, you will need to then retrieve it from the [`True-Client-IP` header](/fundamentals/reference/http-headers/#cf-ipcountry).
:::
## Additional resources
For additional guidance on using True-Client-IP Header with Cloudflare, refer to the following resources:
-* [Available Managed Transforms](/rules/transform/managed-transforms/reference/#add-true-client-ip-header)
-* [HTTP request headers](/fundamentals/reference/http-request-headers/#true-client-ip-enterprise-plan-only)
-* [Restoring original visitor IPs](/support/troubleshooting/restoring-visitor-ips/restoring-original-visitor-ips/)
+- [Available Managed Transforms](/rules/transform/managed-transforms/reference/#add-true-client-ip-header)
+- [Cloudflare HTTP headers](/fundamentals/reference/http-headers/#true-client-ip-enterprise-plan-only)
+- [Restoring original visitor IPs](/support/troubleshooting/restoring-visitor-ips/restoring-original-visitor-ips/)
diff --git a/src/content/docs/page-shield/how-it-works/index.mdx b/src/content/docs/page-shield/how-it-works/index.mdx
index c170aad8bc79b4..db472942975511 100644
--- a/src/content/docs/page-shield/how-it-works/index.mdx
+++ b/src/content/docs/page-shield/how-it-works/index.mdx
@@ -12,7 +12,7 @@ description: Page Shield tracks resources (such as scripts) loaded by your
import { GlossaryTooltip } from "~/components";
-Page Shield helps manage resources loaded by your website visitors, including scripts, their connections, and cookies. It can trigger alert notifications when resources change or are considered malicious.
+Page Shield helps manage resources loaded by your website visitors, including scripts, their connections, and [cookies](https://www.cloudflare.com/learning/privacy/what-are-cookies/). It can trigger alert notifications when resources change or are considered malicious.
Enabling Page Shield adds a Content Security Policy (CSP) deployed with a [report-only directive](/page-shield/reference/csp-header/) to collect information from the browser. This allows Cloudflare to provide you with a list of all scripts running on your application and the connections they make to third-party endpoints. Page Shield also monitors ingress and egress traffic for cookies, either set by origin servers or by the visitor's browser.
diff --git a/src/content/docs/pages/configuration/build-caching.mdx b/src/content/docs/pages/configuration/build-caching.mdx
index 669755637cbc83..f2a15dac4433e9 100644
--- a/src/content/docs/pages/configuration/build-caching.mdx
+++ b/src/content/docs/pages/configuration/build-caching.mdx
@@ -3,36 +3,17 @@ pcx_content_type: concept
title: Build caching
---
-Improve Pages build times by turning on build caching to restore dependencies and build output between builds. The first build to occur after enabling build caching on your Pages project will save to cache. Every subsequent build will restore from cache unless configured otherwise.
+Improve Pages build times by caching dependencies and build output between builds with a project-wide shared cache.
-## Requirements
+The first build to occur after enabling build caching on your Pages project will save to cache. Every subsequent build will restore from cache unless configured otherwise.
-Build caching requires the [V2 build system](/pages/configuration/build-image/#v2-build-system) or later. To update from V1, refer to the [V2 build system migration instructions](/pages/configuration/build-image/#v1-to-v2-migration).
-
-## Configuration
-
-### Enable build caching
-
-To enable build caching in the Cloudflare dashboard:
-
-1. Log in to the [Cloudflare dashboard](https://dash.cloudflare.com) and select your account.
-2. In Account Home, select **Workers & Pages**.
-3. In **Overview**, select your Pages project.
-4. Go to **Settings** > **Build** > **Build cache** and select **Enable**.
+## About build cache
-### Clear cache
-
-The build cache can be cleared for a project if needed, such as when debugging build issues. To clear the build cache:
-
-1. Log in to the [Cloudflare dashboard](https://dash.cloudflare.com) and select your account.
-2. In Account Home, select **Workers & Pages**.
-3. In **Overview**, select your Pages project.
-4. Go to **Settings** > **Build** > **Build cache**.
-5. Select **Clear Cache** to clear the build cache.
+When enabled, the build cache will automatically detect and cache data from each build. Refer to [Frameworks](/pages/configuration/build-caching/#frameworks) to review what directories are automatically saved and restored from the build cache.
-## How build caching works
+### Requirements
-When enabled, the build cache will automatically detect and cache data from each build. Refer to [Frameworks](/pages/configuration/build-caching/#frameworks) to review what directories are automatically saved and restored from the build cache.
+Build caching requires the [V2 build system](/pages/configuration/build-image/#v2-build-system) or later. To update from V1, refer to the [V2 build system migration instructions](/pages/configuration/build-image/#v1-to-v2-migration).
### Package managers
@@ -47,7 +28,9 @@ Pages will cache the global cache directories of the following package managers:
### Frameworks
-Caching the build output from frameworks can speed up subsequent build times. The build cache supports the following frameworks:
+Some frameworks provide a cache directory that is typically populated by the framework with intermediate build outputs or dependencies during build time. Pages will automatically detect the framework you are using and cache this directory for reuse in subsequent builds.
+
+The following frameworks support build output caching:
| Framework | Directories cached |
| ---------- | --------------------------------------------- |
@@ -58,9 +41,27 @@ Caching the build output from frameworks can speed up subsequent build times. Th
| Next.js | `.next/cache` |
| Nuxt | `node_modules/.cache/nuxt` |
-## Limits
+### Limits
The following limits are imposed for build caching:
- **Retention**: Cache is purged seven days after its last read date. Unread cache artifacts are purged seven days after creation.
- **Storage**: Every project is allocated 10 GB. If the project cache exceeds this limit, the project will automatically start deleting artifacts that were read least recently.
+
+## Enable build cache
+
+To enable build caching :
+
+1. Navigate to [Workers & Pages Overview](https://dash.cloudflare.com) on the Dashboard.
+2. Find your Pages project.
+3. Go to **Settings** > **Build** > **Build cache**.
+4. Select **Enable** to turn on build caching.
+
+## Clear build cache
+
+The build cache can be cleared for a project if needed, such as when debugging build issues. To clear the build cache:
+
+1. Navigate to [Workers & Pages Overview](https://dash.cloudflare.com) on the Dashboard.
+2. Find your Pages project.
+3. Go to **Settings** > **Build** > **Build cache**.
+4. Select **Clear Cache** to clear the build cache.
diff --git a/src/content/docs/pages/configuration/git-integration.mdx b/src/content/docs/pages/configuration/git-integration.mdx
deleted file mode 100644
index 132cc91f444f48..00000000000000
--- a/src/content/docs/pages/configuration/git-integration.mdx
+++ /dev/null
@@ -1,128 +0,0 @@
----
-pcx_content_type: concept
-title: Git integration
----
-
-Cloudflare supports connecting Cloudflare Pages to your GitHub and GitLab repositories to look for new changes to your project. Pages does not currently support self-hosted instances of GitHub or GitLab.
-
-## Custom branches
-
-Suppose you have a custom Git workflow that uses specific branches to represent your project's production build. In that case, you can specify a custom branch when creating (or managing an existing) project in the Pages dashboard by going to **Settings** > **Builds & deployments** > **Configure Production deployments**. To change the production branch, click the **production branch** dropdown menu and choose any other branch.
-
-You can also use [preview deployments](/pages/configuration/preview-deployments/) to preview how the new version of your project looks before merging into `production`. In addition, Pages allows you to configure which of your preview branches are built and deployed by using [branch build controls](/pages/configuration/branch-build-controls/).
-
-To configure this in your Pages project go to **Settings** > **Builds & deployments** > **Configure preview deployment** and select **Custom branches**. Here you can specify branches you wish to include and exclude from automatic deployments in the provided configuration fields. To learn more refer to the [branch build controls](/pages/configuration/branch-build-controls/) documentation.
-
-## Skipping a specific build via a commit message
-
-Without any configuration required, you can choose to skip a deployment on an adhoc basis. By adding the `[CI Skip]`, `[CI-Skip]`, `[Skip CI]`, `[Skip-CI]`, or `[CF-Pages-Skip]` flag as a prefix in your commit message, and Pages will omit that deployment. The prefixes are not case sensitive.
-
-## Organizational access
-
-You can deploy projects to Cloudflare Pages from your open-source team, company, or side project on both GitHub and GitLab.
-
-### GitHub
-
-When authorizing Cloudflare Pages to access a GitHub account, you can specify access to your individual account or an organization that you belong to on GitHub. In order to be able to add the Cloudflare Pages installation to that organization, your user account must be an owner or have the appropriate role within the organization (that is, the GitHub Apps Manager role). More information on these roles can be seen on [GitHub's documentation](https://docs.github.com/en/organizations/managing-peoples-access-to-your-organization-with-roles/roles-in-an-organization#github-app-managers).
-
-:::caution[GitHub security consideration]
-
-A GitHub account should only point to one Cloudflare account, however, this is not enforced. If you are setting up Cloudflare with GitHub for your organization, Cloudflare recommends that you limit the scope of the application to only the repositories you intend to build with Pages at the time that you set up your project. You can modify these permissions as you build more applications.
-
-:::
-
-### GitLab
-
-By authorizing Cloudflare Pages to access your GitLab account, you will automatically allow access to organizations, groups, and namespaces your GitLab account can access for use by Cloudflare Pages. Managing access to these organizations and groups is handled by GitLab.
-
-## Removing access
-
-### GitHub
-
-You can remove Cloudflare Pages' access to your GitHub account by viewing the [**Applications** page](https://github.com/settings/installations) on GitHub. The GitHub App is named Cloudflare Workers and Pages, and it is shared between Workers and Pages projects.
-
-Note that removing access to GitHub will disable new builds for Workers and Pages, though the last build of your site will continue to be hosted via Cloudflare Pages.
-
-### GitLab
-
-You can remove Cloudflare Pages' access to your GitLab account by navigating to **User Settings** > **Applications** > **Authorized Applications**. Find the applications called Cloudflare Pages and select the **Revoke** button to revoke access.
-
-Note that the GitLab application Cloudflare Pages is shared between Workers and Pages projects, and removing access to GitLab will disable new builds for Workers and Pages, though the last build of your site will continue to be hosted via Cloudflare Pages.
-
-## Reinstall a Git installation
-
-When encountering Git integration related issues, one potential troubleshooting step is attempting to uninstall and reinstall the GitHub or GitLab application associated with the Cloudflare Pages installation. The process for each Git provider is provided below.
-
-### GitHub
-
-1. Go to the installation settings page on GitHub:
- 1. `https://github.com/settings/installations` for individual accounts.
- 2. `https://github.com/organizations//settings/installations` for organizational accounts.
-2. If the Cloudflare Workers and Pages installation is there, click **Configure**, and click **Uninstall "Cloudflare Workers and Pages"**. If there is no "Cloudflare Workers and Pages" installation there, then you don't need to do anything.
-3. Go back to the **Workers & Pages** overview page at `https://dash.cloudflare.com/[YOUR_ACCOUNT_ID]/workers-and-pages`. Click **Create application** > **Pages** > **Connect to Git**
-4. Click the **+ Add account** button, click the GitHub account you want to add, and then click **Install & Authorize**.
-5. You should be redirected to the create project page with your GitHub account or organization in the account list.
-6. Attempt to make a new deployment with your project which was previously broken.
-
-### GitLab
-
-1. Go to your application settings page on GitLab located here: [https://gitlab.com/-/profile/applications](https://gitlab.com/-/profile/applications)
-2. Click the "Revoke" button on your Cloudflare Pages installation if it exists.
-3. Go back to the **Workers & Pages** overview page at `https://dash.cloudflare.com/[YOUR_ACCOUNT_ID]/workers-and-pages`. Click **Create application** > **Pages** > **Connect to Git**
-4. Select the **GitLab** tab at the top, click the **+ Add account** button, click the GitLab account you want to add, and then click **Authorize** on the modal titled "Authorize Cloudflare Pages to use your account?".
-5. You will be redirected to the create project page with your GitLab account or organization in the account list.
-6. Attempt to make a new deployment with your project which was previously broken.
-
-## Troubleshooting
-
-### Project Creation
-
-#### `This repository is being used for a Cloudflare Pages project on a different Cloudflare account.`
-
-Using the same Github/Gitlab repository across separate Cloudflare accounts is disallowed. To use the repository for a Pages project in that Cloudflare account, you should delete any Pages projects using the repository in other Cloudflare accounts.
-
-### Deployments
-
-If you run into any issues related to deployments or failing, check your project dashboard to see if there are any SCM installation warnings listed as shown in the screenshot below.
-
-
-
-To resolve any errors displayed in the Cloudflare Pages dashboard, follow the steps listed below.
-
-#### `This project is disconnected from your Git account, this may cause deployments to fail.`
-
-To resolve this issue, follow the steps provided above in the [Reinstalling a Git installation section](/pages/configuration/git-integration/#reinstall-a-git-installation) for the applicable SCM provider. If the issue persists even after uninstalling and reinstalling, contact support.
-
-#### `Cloudflare Pages is not properly installed on your Git account, this may cause deployments to fail.`
-
-To resolve this issue, follow the steps provided above in the [Reinstalling a Git installation section](/pages/configuration/git-integration/#reinstall-a-git-installation) for the applicable SCM provider. If the issue persists even after uninstalling and reinstalling, contact support.
-
-#### `The Cloudflare Pages installation has been suspended, this may cause deployments to fail.`
-
-Go to your GitHub installation settings:
-
-- `https://github.com/settings/installations` for individual accounts
-- `https://github.com/organizations//settings/installations` for organizational accounts
-
-Click **Configure** on the Cloudflare Pages application. Scroll down to the bottom of the page and click **Unsuspend** to allow Cloudflare Pages to make future deployments.
-
-#### `The project is linked to a repository that no longer exists, this may cause deployments to fail.`
-
-You may have deleted or transferred the repository associated with this Cloudflare Pages project. For a deleted repository, you will need to create a new Cloudflare Pages project with a repository that has not been deleted. For a transferred repository, you can either transfer the repository back to the original Git account or you will need to create a new Cloudflare Pages project with the transferred repository.
-
-#### `The repository cannot be accessed, this may cause deployments to fail.`
-
-You may have excluded this repository from your installation's repository access settings. Go to your GitHub installation settings:
-
-- `https://github.com/settings/installations` for individual accounts
-- `https://github.com/organizations//settings/installations` for organizational accounts
-
-Click **Configure** on the Cloudflare Pages application. Under **Repository access**, ensure that the repository associated with your Cloudflare Pages project is included in the list.
-
-#### `There is an internal issue with your Cloudflare Pages Git installation.`
-
-This is an internal error in the Cloudflare Pages SCM system. You can attempt to [reinstall your Git installation](/pages/configuration/git-integration/#reinstall-a-git-installation), but if the issue persists, [contact support](/support/contacting-cloudflare-support/).
-
-## Related resources
-
-- [Branch build controls](/pages/configuration/branch-build-controls/#production-branch-control) - Control which environments and branches you would like to automatically deploy to.
diff --git a/src/content/docs/pages/configuration/git-integration/github-integration.mdx b/src/content/docs/pages/configuration/git-integration/github-integration.mdx
new file mode 100644
index 00000000000000..c21b2d1ae0505d
--- /dev/null
+++ b/src/content/docs/pages/configuration/git-integration/github-integration.mdx
@@ -0,0 +1,91 @@
+---
+pcx_content_type: concept
+title: GitHub integration
+---
+
+You can connect each Cloudflare Pages project to a GitHub repository, and Cloudflare will automatically deploy your code every time you push a change to a branch.
+
+## Features
+
+Beyond automatic deployments, the Cloudflare GitHub integration lets you monitor, manage, and preview deployments directly in GitHub, keeping you informed without leaving your workflow.
+
+### Custom branches
+
+Pages will default to setting your [production environment](/pages/configuration/branch-build-controls/#production-branch-control) to the branch you first push. If a branch other than the default branch (e.g. `main`) represents your project's production branch, then go to **Settings** > **Builds** > **Branch control**, change the production branch by clicking the **Production branch** dropdown menu and choose any other branch.
+
+You can also use [preview deployments](/pages/configuration/preview-deployments/) to preview versions of your project before merging your production branch, and deploying to production. Pages allows you to configure which of your preview branches are automatically deployed using [branch build controls](/pages/configuration/branch-build-controls/). To configure, go to **Settings** > **Builds** > **Branch control** and select an option under **Preview branch**. Use [**Custom branches**](/pages/configuration/branch-build-controls/) to specify branches you wish to include or exclude from automatic preview deployments.
+
+### Preview URLs
+
+Every time you open a new pull request on your GitHub repository, Cloudflare Pages will create a unique preview URL, which will stay updated as you continue to push new commits to the branch. Note that preview URLs will not be created for pull requests created from forks of your repository. Learn more in [Preview Deployments](/pages/configuration/preview-deployments/).
+
+
+
+### Skipping a build via a commit message
+
+Without any configuration required, you can choose to skip a deployment on an ad hoc basis. By adding the `[CI Skip]`, `[CI-Skip]`, `[Skip CI]`, `[Skip-CI]`, or `[CF-Pages-Skip]` flag as a prefix in your commit message, and Pages will omit that deployment. The prefixes are not case sensitive.
+
+### Check runs
+
+If you have one or multiple projects connected to a repository (i.e. a [monorepo](/pages/configuration/monorepos/)), you can check on the status of each build within GitHub via [GitHub check runs](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/collaborating-on-repositories-with-code-quality-features/about-status-checks#checks).
+
+You can see the checks by selecting the status icon next to a commit within your GitHub repository. In the example below, you can select the green check mark to see the results of the check run.
+
+
+
+Check runs will appear like the following in your repository.
+
+
+
+If a build skips for any reason (i.e. CI Skip, build watch paths, or branch deployment controls), the check run/commit status will not appear.
+
+## Manage access
+
+You can deploy projects to Cloudflare Workers from your company or side project on GitHub using the [Cloudflare Workers & Pages GitHub App](https://github.com/apps/cloudflare-workers-and-pages).
+
+### Organizational access
+
+You can deploy projects to Cloudflare Pages from your company or side project on both GitHub and GitLab.
+
+When authorizing Cloudflare Pages to access a GitHub account, you can specify access to your individual account or an organization that you belong to on GitHub. In order to be able to add the Cloudflare Pages installation to that organization, your user account must be an owner or have the appropriate role within the organization (that is, the GitHub Apps Manager role). More information on these roles can be seen on [GitHub's documentation](https://docs.github.com/en/organizations/managing-peoples-access-to-your-organization-with-roles/roles-in-an-organization#github-app-managers).
+
+:::caution[GitHub security consideration]
+
+A GitHub account should only point to one Cloudflare account. If you are setting up Cloudflare with GitHub for your organization, Cloudflare recommends that you limit the scope of the application to only the repositories you intend to build with Pages. To modify these permissions, go to the [Applications page](https://github.com/settings/installations) on GitHub and select **Switch settings context** to access your GitHub organization settings. Then, select **Cloudflare Workers & Pages** > For **Repository access**, select **Only select repositories** > select your repositories.
+
+:::
+
+### Remove access
+
+You can remove Cloudflare Pages' access to your GitHub repository or account by going to the [Applications page](https://github.com/settings/installations) on GitHub (if you are in an organization, select Switch settings context to access your GitHub organization settings). The GitHub App is named Cloudflare Workers and Pages, and it is shared between Workers and Pages projects.
+
+#### Remove Cloudflare access to a GitHub repository
+
+To remove access to an individual GitHub repository, you can navigate to **Repository access**. Select the **Only select repositories** option, and configure which repositories you would like Cloudflare to have access to.
+
+
+
+#### Remove Cloudflare access to the entire GitHub account
+
+To remove Cloudflare Workers and Pages access to your entire Git account, you can navigate to **Uninstall "Cloudflare Workers and Pages"**, then select **Uninstall**. Removing access to the Cloudflare Workers and Pages app will revoke Cloudflare's access to _all repositories_ from that GitHub account. If you want to only disable automatic builds and deployments, follow the [Disable Build](/workers/ci-cd/builds/#disabling-builds) instructions.
+
+Note that removing access to GitHub will disable new builds for Workers and Pages project that were connected to those repositories, though your previous deployments will continue to be hosted by Cloudflare Workers.
+
+### Reinstall the Cloudflare GitHub app
+
+If you see errors where Cloudflare Pages cannot access your git repository, you should attempt to uninstall and reinstall the GitHub application associated with the Cloudflare Pages installation.
+
+1. Go to the installation settings page on GitHub:
+ - Navigate to **Settings > Builds** for the Pages project and select **Manage** under Git Repository.
+ - Alternatively, visit these links to find the Cloudflare Workers and Pages installation and select **Configure**:
+
+| | |
+| ---------------- | ---------------------------------------------------------------------------------- |
+| **Individual** | `https://github.com/settings/installations` |
+| **Organization** | `https://github.com/organizations//settings/installations` |
+
+2. In the Cloudflare Workers and Pages GitHub App settings page, navigate to **Uninstall "Cloudflare Workers and Pages"** and select **Uninstall**.
+3. Go back to the [**Workers & Pages** overview](https://dash.cloudflare.com) page. Select **Create application** > **Pages** > **Connect to Git**.
+4. Select the **+ Add account** button, select the GitHub account you want to add, and then select **Install & Authorize**.
+5. You should be redirected to the create project page with your GitHub account or organization in the account list.
+6. Attempt to make a new deployment with your project which was previously broken.
diff --git a/src/content/docs/pages/configuration/git-integration/gitlab-integration.mdx b/src/content/docs/pages/configuration/git-integration/gitlab-integration.mdx
new file mode 100644
index 00000000000000..8a829d389418d2
--- /dev/null
+++ b/src/content/docs/pages/configuration/git-integration/gitlab-integration.mdx
@@ -0,0 +1,61 @@
+---
+pcx_content_type: concept
+title: GitLab integration
+---
+
+You can connect each Cloudflare Pages project to a GitLab repository, and Cloudflare will automatically deploy your code every time you push a change to a branch.
+
+## Features
+
+Beyond automatic deployments, the Cloudflare GitLab integration lets you monitor, manage, and preview deployments directly in GitLab, keeping you informed without leaving your workflow.
+
+### Custom branches
+
+Pages will default to setting your [production environment](/pages/configuration/branch-build-controls/#production-branch-control) to the branch you first push. If a branch other than the default branch (e.g. `main`) represents your project's production branch, then go to **Settings** > **Builds** > **Branch control**, change the production branch by clicking the **Production branch** dropdown menu and choose any other branch.
+
+You can also use [preview deployments](/pages/configuration/preview-deployments/) to preview versions of your project before merging your production branch, and deploying to production. Pages allows you to configure which of your preview branches are automatically deployed using [branch build controls](/pages/configuration/branch-build-controls/). To configure, go to **Settings** > **Builds** > **Branch control** and select an option under **Preview branch**. Use [**Custom branches**](/pages/configuration/branch-build-controls/) to specify branches you wish to include or exclude from automatic preview deployments.
+
+### Skipping a specific build via a commit message
+
+Without any configuration required, you can choose to skip a deployment on an ad hoc basis. By adding the `[CI Skip]`, `[CI-Skip]`, `[Skip CI]`, `[Skip-CI]`, or `[CF-Pages-Skip]` flag as a prefix in your commit message, Pages will omit that deployment. The prefixes are not case sensitive.
+
+### Check runs and preview URLs
+
+If you have one or multiple projects connected to a repository (i.e. a [monorepo](/workers/ci-cd/builds/advanced-setups/#monorepos)), you can check on the status of each build within GitLab via [GitLab commit status](https://docs.gitlab.com/ee/user/project/merge_requests/status_checks.html).
+
+You can see the statuses by selecting the status icon next to a commit or by going to **Build** > **Pipelines** within your GitLab repository. In the example below, you can select the green check mark to see the results of the check run.
+
+
+
+Check runs will appear like the following in your repository. You can select one of the statuses to view the [preview URL](/pages/configuration/preview-deployments/) for that deployment.
+
+
+
+If a build skips for any reason (i.e. CI Skip, build watch paths, or branch deployment controls), the check run/commit status will not appear.
+
+## Manage access
+
+You can deploy projects to Cloudflare Workers from your company or side project on GitLab using the Cloudflare Pages app.
+
+### Organizational access
+
+You can deploy projects to Cloudflare Pages from your company or side project on both GitHub and GitLab.
+
+When you authorize Cloudflare Pages to access your GitLab account, you automatically give Cloudflare Pages access to organizations, groups, and namespaces accessed by your GitLab account. Managing access to these organizations and groups is handled by GitLab.
+
+### Remove access
+
+You can remove Cloudflare Workers' access to your GitLab account by navigating to [Authorized Applications page](https://gitlab.com/-/profile/applications) on GitLab. Find the applications called Cloudflare Workers and select the **Revoke** button to revoke access.
+
+Note that the GitLab application Cloudflare Workers is shared between Workers and Pages projects, and removing access to GitLab will disable new builds for Workers and Pages, though your previous deployments will continue to be hosted by Cloudflare Pages.
+
+### Reinstall the Cloudflare GitLab app
+
+When encountering Git integration related issues, one potential troubleshooting step is attempting to uninstall and reinstall the GitHub or GitLab application associated with the Cloudflare Pages installation.
+
+1. Go to your application settings page on GitLab located here: [https://gitlab.com/-/profile/applications](https://gitlab.com/-/profile/applications)
+2. Select the **Revoke** button on your Cloudflare Pages installation if it exists.
+3. Go back to the **Workers & Pages** overview page at `https://dash.cloudflare.com/[YOUR_ACCOUNT_ID]/workers-and-pages`. Select **Create application** > **Pages** > **Connect to Git**.
+4. Select the **GitLab** tab at the top, select the **+ Add account** button, select the GitLab account you want to add, and then select **Authorize** on the modal titled "Authorize Cloudflare Pages to use your account?".
+5. You will be redirected to the create project page with your GitLab account or organization in the account list.
+6. Attempt to make a new deployment with your project which was previously broken.
diff --git a/src/content/docs/pages/configuration/git-integration/index.mdx b/src/content/docs/pages/configuration/git-integration/index.mdx
new file mode 100644
index 00000000000000..24ede8576e9dc4
--- /dev/null
+++ b/src/content/docs/pages/configuration/git-integration/index.mdx
@@ -0,0 +1,57 @@
+---
+pcx_content_type: concept
+title: Git integration
+---
+
+You can connect each Cloudflare Pages project to a [GitHub](/pages/configuration/git-integration/github-integration) or [GitLab](/pages/configuration/git-integration/gitlab-integration) repository, and Cloudflare will automatically deploy your code every time you push a change to a branch.
+
+:::note
+Cloudflare Workers now also supports Git integrations to automatically build and deploy Workers from your connected Git repository. Learn more in [Workers Builds](/workers/ci-cd/builds/).
+:::
+
+When you connect a git repository to your Cloudflare Pages project, Cloudflare will also:
+
+- **Preview deployments for custom branches**, generating preview URLs for a commit to any branch in the repository without affecting your production deployment.
+- **Preview URLs in pull requests** (PRs) to the repository.
+- **Build and deployment status checks** within the Git repository.
+- **Skipping builds using a commit message**.
+
+These features allow you to manage your deployments directly within GitHub or GitLab without leaving your team's regular development workflow.
+
+:::caution[You cannot switch to Direct Upload later]
+If you deploy using the Git integration, you cannot switch to [Direct Upload](/pages/get-started/direct-upload/) later. However, if you already use a Git-integrated project and do not want to trigger deployments every time you push a commit, you can [disable automatic deployments](/pages/configuration/git-integration/#disable-automatic-deployments) on all branches. Then, you can use Wrangler to deploy directly to your Pages projects and make changes to your Git repository without automatically triggering a build.
+
+:::
+
+## Supported Git providers
+
+Cloudflare supports connecting Cloudflare Pages to your GitHub and GitLab repositories. Pages does not currently support connecting self-hosted instances of GitHub or GitLab.
+
+If you using a different Git provider (e.g. Bitbucket) or a self-hosted instance, you can start with a Direct Upload project and deploy using a CI/CD provider (e.g. GitHub Actions) with [Wrangler CLI](/pages/how-to/use-direct-upload-with-continuous-integration/).
+
+## Add a Git integration
+
+If you do not have a Git account linked to your Cloudflare account, you will be prompted to set up an installation to GitHub or GitLab when [connecting to Git](/pages/get-started/git-integration/) for the first time, or when adding a new Git account. Follow the prompts and authorize the Cloudflare Git integration.
+
+You can check the following pages to see if your Git integration has been installed:
+
+- [GitHub Applications page](https://github.com/settings/installations) (if you're in an organization, select **Switch settings context** to access your GitHub organization settings)
+- [GitLab Authorized Applications page](https://gitlab.com/-/profile/applications)
+
+For details on providing access to organization accounts, see the [GitHub](/pages/configuration/git-integration/github-integration/#organizational-access) and [GitLab](/pages/configuration/git-integration/gitlab-integration/#organizational-access) guides.
+
+## Manage a Git integration
+
+You can manage the Git installation associated with your repository connection by navigating to the Pages project, then going to **Settings** > **Builds** and selecting **Manage** under **Git Repository**.
+
+This can be useful for managing repository access or troubleshooting installation issues by reinstalling. For more details, see the [GitHub](/pages/configuration/git-integration/github-integration/#managing-access) and [GitLab](/pages/configuration/git-integration/gitlab-integration/#managing-access) guides.
+
+## Disable automatic deployments
+
+If you are using a Git-integrated project and do not want to trigger deployments every time you push a commit, you can use [branch control](/pages/configuration/branch-build-controls/) to disable/pause builds:
+
+1. Go to the **Settings** of your **Pages project** in the [Cloudflare dashboard](https://dash.cloudflare.com).
+2. Navigate to **Build** > edit **Branch control** > turn off **Enable automatic production branch deployments**.
+3. You can also change your Preview branch to **None (Disable automatic branch deployments)** to pause automatic preview deployments.
+
+Then, you can use Wrangler to deploy directly to your Pages project and make changes to your Git repository without automatically triggering a build.
diff --git a/src/content/docs/pages/configuration/git-integration/troubleshooting.mdx b/src/content/docs/pages/configuration/git-integration/troubleshooting.mdx
new file mode 100644
index 00000000000000..5da836dddb7e2e
--- /dev/null
+++ b/src/content/docs/pages/configuration/git-integration/troubleshooting.mdx
@@ -0,0 +1,58 @@
+---
+pcx_content_type: concept
+title: Troubleshooting builds
+---
+
+If your git integration is experiencing issues, you may find the following banners in the Deployment page of your Pages project.
+
+## Project creation
+
+#### `This repository is being used for a Cloudflare Pages project on a different Cloudflare account.`
+
+Using the same GitHub/GitLab repository across separate Cloudflare accounts is disallowed. To use the repository for a Pages project in that Cloudflare account, you should delete any Pages projects using the repository in other Cloudflare accounts.
+
+## Deployments
+
+If you run into any issues related to deployments or failing, check your project dashboard to see if there are any SCM installation warnings listed as shown in the screenshot below.
+
+
+
+To resolve any errors displayed in the Cloudflare Pages dashboard, follow the steps listed below.
+
+#### `This project is disconnected from your Git account, this may cause deployments to fail.`
+
+To resolve this issue, follow the steps provided above in the [Reinstalling a Git installation section](/pages/configuration/git-integration/#reinstall-a-git-installation) for the applicable SCM provider. If the issue persists even after uninstalling and reinstalling, contact support.
+
+#### `Cloudflare Pages is not properly installed on your Git account, this may cause deployments to fail.`
+
+To resolve this issue, follow the steps provided above in the [Reinstalling a Git installation section](/pages/configuration/git-integration/#reinstall-a-git-installation) for the applicable SCM provider. If the issue persists even after uninstalling and reinstalling, contact support.
+
+#### `The Cloudflare Pages installation has been suspended, this may cause deployments to fail.`
+
+Go to your GitHub installation settings:
+
+- `https://github.com/settings/installations` for individual accounts
+- `https://github.com/organizations//settings/installations` for organizational accounts
+
+Click **Configure** on the Cloudflare Pages application. Scroll down to the bottom of the page and click **Unsuspend** to allow Cloudflare Pages to make future deployments.
+
+#### `The project is linked to a repository that no longer exists, this may cause deployments to fail.`
+
+You may have deleted or transferred the repository associated with this Cloudflare Pages project. For a deleted repository, you will need to create a new Cloudflare Pages project with a repository that has not been deleted. For a transferred repository, you can either transfer the repository back to the original Git account or you will need to create a new Cloudflare Pages project with the transferred repository.
+
+#### `The repository cannot be accessed, this may cause deployments to fail.`
+
+You may have excluded this repository from your installation's repository access settings. Go to your GitHub installation settings:
+
+- `https://github.com/settings/installations` for individual accounts
+- `https://github.com/organizations//settings/installations` for organizational accounts
+
+Click **Configure** on the Cloudflare Pages application. Under **Repository access**, ensure that the repository associated with your Cloudflare Pages project is included in the list.
+
+#### `There is an internal issue with your Cloudflare Pages Git installation.`
+
+This is an internal error in the Cloudflare Pages SCM system. You can attempt to [reinstall your Git installation](/pages/configuration/git-integration/#reinstall-a-git-installation), but if the issue persists, [contact support](/support/contacting-cloudflare-support/).
+
+#### `GitHub/GitLab is having an incident and push events to Cloudflare are operating in a degraded state. Check their status page for more details.`
+
+This indicates that GitHub or GitLab may be experiencing an incident affecting push events to Cloudflare. It is recommended to monitor their status page ([GitHub](https://www.githubstatus.com/), [GitLab](https://status.gitlab.com/)) for updates and try deploying again later.
diff --git a/src/content/docs/pages/configuration/preview-deployments.mdx b/src/content/docs/pages/configuration/preview-deployments.mdx
index 808e740d76fb62..e989051a614792 100644
--- a/src/content/docs/pages/configuration/preview-deployments.mdx
+++ b/src/content/docs/pages/configuration/preview-deployments.mdx
@@ -11,6 +11,8 @@ Preview deployments allow you to preview new versions of your project without de
Every time you open a new pull request on your GitHub repository, Cloudflare Pages will create a unique preview URL, which will stay updated as you continue to push new commits to the branch. This is only true when pull requests originate from the repository itself.
+
+
For example, if you have a repository called `user-example` connected to Pages, this will give you a `user-example.pages.dev` subdomain. If `main` is your default branch, then any commits to the `main` branch will update your `user-example.pages.dev` content, as well as any [custom domains](/pages/configuration/custom-domains/) attached to the project.

diff --git a/src/content/docs/pages/framework-guides/deploy-a-nuxt-site.mdx b/src/content/docs/pages/framework-guides/deploy-a-nuxt-site.mdx
index f26026ac689fef..49d97532ca141a 100644
--- a/src/content/docs/pages/framework-guides/deploy-a-nuxt-site.mdx
+++ b/src/content/docs/pages/framework-guides/deploy-a-nuxt-site.mdx
@@ -107,7 +107,7 @@ export default defineNuxtConfig({
});
```
-This module is powered by the [`getPlatformProxy` helper function](/workers/wrangler/api#getplatformproxy). `getPlatformProxy` will automatically detect any bindings defined in your project's `wrangler.toml` file and emulate those bindings in local development. Review [Wrangler configuration information on bindings](/workers/wrangler/configuration/#bindings) for more information on how to configure bindings in `wrangler.toml`.
+This module is powered by the [`getPlatformProxy` helper function](/workers/wrangler/api#getplatformproxy). `getPlatformProxy` will automatically detect any bindings defined in your project's Wrangler file and emulate those bindings in local development. Review [Wrangler configuration information on bindings](/workers/wrangler/configuration/#bindings) for more information on how to configure bindings in the `wrangler.toml / wrangler.json` file.
:::note
diff --git a/src/content/docs/pages/framework-guides/deploy-a-remix-site.mdx b/src/content/docs/pages/framework-guides/deploy-a-remix-site.mdx
index c140334ae10cd2..0cdf9866f6a49c 100644
--- a/src/content/docs/pages/framework-guides/deploy-a-remix-site.mdx
+++ b/src/content/docs/pages/framework-guides/deploy-a-remix-site.mdx
@@ -3,7 +3,7 @@ pcx_content_type: how-to
title: Remix
---
-import { PagesBuildPreset, Render, PackageManagers } from "~/components";
+import { PagesBuildPreset, Render, PackageManagers, WranglerConfig } from "~/components";
[Remix](https://remix.run/) is a framework that is focused on fully utilizing the power of the web. Like Cloudflare Workers, it uses modern JavaScript APIs, and it places emphasis on web fundamentals such as meaningful HTTP status codes, caching and optimizing for both usability and performance.
@@ -82,9 +82,9 @@ A [binding](/pages/functions/bindings/) allows your application to interact with
Remix uses Wrangler's [`getPlatformProxy`](/workers/wrangler/api/#getplatformproxy) to simulate the Cloudflare environment locally. You configure `getPlatformProxy` in your project's `vite.config.ts` file via [`cloudflareDevProxyVitePlugin`](https://remix.run/docs/en/main/future/vite#cloudflare-proxy).
-To bind resources in local development, you need to configure the bindings in the `wrangler.toml` file. Refer to [Bindings](/workers/wrangler/configuration/#bindings) to learn more.
+To bind resources in local development, you need to configure the bindings in the Wrangler file. Refer to [Bindings](/workers/wrangler/configuration/#bindings) to learn more.
-Once you have configured the bindings in the `wrangler.toml` file, the proxies are then available within `context.cloudflare` in your `loader` or `action` functions:
+Once you have configured the bindings in the Wrangler file, the proxies are then available within `context.cloudflare` in your `loader` or `action` functions:
```typescript
export const loader = ({ context }: LoaderFunctionArgs) => {
@@ -96,7 +96,7 @@ export const loader = ({ context }: LoaderFunctionArgs) => {
:::note[Correcting the env type]
-You may have noticed that `context.cloudflare.env` is not typed correctly when you add additional bindings in `wrangler.toml`.
+You may have noticed that `context.cloudflare.env` is not typed correctly when you add additional bindings in the `wrangler.toml / wrangler.json` file.
To fix this, run `npm run typegen` to generate the missing types. This will update the `Env` interface defined in `worker-configuration.d.ts`.
After running the command, you can access the bindings in your `loader` or `action` using `context.cloudflare.env` as shown above.
@@ -113,9 +113,7 @@ Once you have configured the bindings in the Cloudflare dashboard, the proxies a
As an example, you will bind and query a D1 database in a Remix application.
1. Create a D1 database. Refer to the [D1 documentation](/d1/) to learn more.
-2. Configure bindings for your D1 database in the `wrangler.toml` file:
-
-import { WranglerConfig } from "~/components";
+2. Configure bindings for your D1 database in the Wrangler file:
diff --git a/src/content/docs/pages/framework-guides/deploy-an-analog-site.mdx b/src/content/docs/pages/framework-guides/deploy-an-analog-site.mdx
index bf19694e41f427..a87a9547ba4373 100644
--- a/src/content/docs/pages/framework-guides/deploy-an-analog-site.mdx
+++ b/src/content/docs/pages/framework-guides/deploy-an-analog-site.mdx
@@ -102,15 +102,15 @@ export default defineNitroPlugin((nitroApp: NitroApp) => {
});
```
-In the code above, the `getPlatformProxy` helper function will automatically detect any bindings defined in your project's `wrangler.toml` file and emulate those bindings in local development. Review [Wrangler configuration information on bindings](/workers/wrangler/configuration/#bindings) for more information on how to configure bindings in `wrangler.toml`.
+In the code above, the `getPlatformProxy` helper function will automatically detect any bindings defined in your project's Wrangler file and emulate those bindings in local development. You may wish to refer to [Wrangler configuration information on bindings](/workers/wrangler/configuration/#bindings).
-A new type definition for the `Env` type (used by `context.cloudflare.env`) can be generated from `wrangler.toml` with the following command:
+A new type definition for the `Env` type (used by `context.cloudflare.env`) can be generated from the `wrangler.toml / wrangler.json` file with the following command:
```sh
npm run cf-typegen
```
-This should be done any time you add new bindings to `wrangler.toml`.
+This should be done any time you add new bindings to your Wrangler configuration.
### Setup bindings in deployed applications
diff --git a/src/content/docs/pages/framework-guides/nextjs/ssr/advanced.mdx b/src/content/docs/pages/framework-guides/nextjs/ssr/advanced.mdx
index 7de369abf105bc..c43dced68f8795 100644
--- a/src/content/docs/pages/framework-guides/nextjs/ssr/advanced.mdx
+++ b/src/content/docs/pages/framework-guides/nextjs/ssr/advanced.mdx
@@ -32,7 +32,7 @@ export default {
} as ExportedHandler<{ ASSETS: Fetcher }>;
```
-This looks like a Worker — but it does not need its own `wrangler.toml` file. You can think of it purely as code that `@cloudflare/next-on-pages` will then use to wrap the output of the build that is deployed to your Cloudflare Pages project.
+This looks like a Worker — but it does not need its own Wrangler file. You can think of it purely as code that `@cloudflare/next-on-pages` will then use to wrap the output of the build that is deployed to your Cloudflare Pages project.
2. Pass the entrypoint argument to the next-on-pages CLI with the path to your handler.
diff --git a/src/content/docs/pages/framework-guides/nextjs/ssr/bindings.mdx b/src/content/docs/pages/framework-guides/nextjs/ssr/bindings.mdx
index 3d5e9372f782a5..79ecc4ac1f77c7 100644
--- a/src/content/docs/pages/framework-guides/nextjs/ssr/bindings.mdx
+++ b/src/content/docs/pages/framework-guides/nextjs/ssr/bindings.mdx
@@ -26,7 +26,7 @@ export async function GET(request) {
}
```
-Add bindings to your Pages project by [adding them to your `wrangler.toml` configuration file](/pages/functions/wrangler-configuration/).
+Add bindings to your Pages project by [adding them to your `wrangler.toml / wrangler.json` file](/pages/functions/wrangler-configuration/).
## TypeScript type declarations for bindings
diff --git a/src/content/docs/pages/framework-guides/nextjs/ssr/get-started.mdx b/src/content/docs/pages/framework-guides/nextjs/ssr/get-started.mdx
index f1c76217a886f4..87d20e861dce35 100644
--- a/src/content/docs/pages/framework-guides/nextjs/ssr/get-started.mdx
+++ b/src/content/docs/pages/framework-guides/nextjs/ssr/get-started.mdx
@@ -9,7 +9,7 @@ head:
description: Deploy a full-stack Next.js app to Cloudflare Pages
---
-import { PackageManagers } from "~/components";
+import { PackageManagers, WranglerConfig } from "~/components";
Learn how to deploy full-stack (SSR) Next.js apps to Cloudflare Pages.
@@ -43,11 +43,9 @@ First, install [@cloudflare/next-on-pages](https://github.com/cloudflare/next-on
npm install --save-dev @cloudflare/next-on-pages
```
-### 2. Add `wrangler.toml` file
+### 2. Add Wrangler file
-Then, add a [`wrangler.toml`](/pages/functions/wrangler-configuration/) file to the root directory of your Next.js app:
-
-import { WranglerConfig } from "~/components";
+Then, add a [`wrangler.toml / wrangler.json` file](/pages/functions/wrangler-configuration/) to the root directory of your Next.js app:
diff --git a/src/content/docs/pages/functions/bindings.mdx b/src/content/docs/pages/functions/bindings.mdx
index df5f95d32f7420..976cc4dcc29fe5 100644
--- a/src/content/docs/pages/functions/bindings.mdx
+++ b/src/content/docs/pages/functions/bindings.mdx
@@ -5,7 +5,7 @@ sidebar:
order: 7
---
-import { Render, TabItem, Tabs } from "~/components";
+import { Render, TabItem, Tabs, WranglerConfig } from "~/components";
A [binding](/workers/runtime-apis/bindings/) enables your Pages Functions to interact with resources on the Cloudflare developer platform. Use bindings to integrate your Pages Functions with Cloudflare resources like [KV](/kv/concepts/how-kv-works/), [Durable Objects](/durable-objects/), [R2](/r2/), and [D1](/d1/). You can set bindings for both production and preview environments.
@@ -21,7 +21,7 @@ Pages Functions only support a subset of all [bindings](/workers/runtime-apis/bi
[Workers KV](/kv/concepts/kv-namespaces/) is Cloudflare's key-value storage solution.
-To bind your KV namespace to your Pages Function, you can configure a KV namespace binding in [`wrangler.toml`](/pages/functions/wrangler-configuration/#kv-namespaces) or the Cloudflare dashboard.
+To bind your KV namespace to your Pages Function, you can configure a KV namespace binding in the [`wrangler.toml / wrangler.json` file](/pages/functions/wrangler-configuration/#kv-namespaces) or the Cloudflare dashboard.
To configure a KV namespace binding via the Cloudflare dashboard:
@@ -63,7 +63,7 @@ export const onRequest: PagesFunction = async (context) => {
You can interact with your KV namespace bindings locally in one of two ways:
-- Configure your Pages project's `wrangler.toml` file and run [`npx wrangler pages dev`](/workers/wrangler/commands/#dev-1).
+- Configure your Pages project's Wrangler file and run [`npx wrangler pages dev`](/workers/wrangler/commands/#dev-1).
- Pass arguments to `wrangler pages dev` directly.
To interact with your KV namespace binding locally by passing arguments to the Wrangler CLI, add `-k ` or `--kv=` to the `wrangler pages dev` command. For example, if your KV namespace is bound your Function via the `TODO_LIST` binding, access the KV namespace in local development by running:
@@ -80,7 +80,7 @@ npx wrangler pages dev --kv=TODO_LIST
-To bind your Durable Object to your Pages Function, you can configure a Durable Object binding in [`wrangler.toml`](/pages/functions/wrangler-configuration/#kv-namespaces) or the Cloudflare dashboard.
+To bind your Durable Object to your Pages Function, you can configure a Durable Object binding in the [`wrangler.toml / wrangler.json` file](/pages/functions/wrangler-configuration/#kv-namespaces) or the Cloudflare dashboard.
To configure a Durable Object binding via the Cloudflare dashboard:
@@ -128,7 +128,7 @@ export const onRequestGet: PagesFunction = async (context) => {
You can interact with your Durable Object bindings locally in one of two ways:
-- Configure your Pages project's `wrangler.toml` file and run [`npx wrangler pages dev`](/workers/wrangler/commands/#dev-1).
+- Configure your Pages project's Wrangler file and run [`npx wrangler pages dev`](/workers/wrangler/commands/#dev-1).
- Pass arguments to `wrangler pages dev` directly.
While developing locally, to interact with a Durable Object namespace, run `wrangler dev` in the directory of the Worker exporting the Durable Object. In another terminal, run `wrangler pages dev` in the directory of your Pages project.
@@ -143,7 +143,7 @@ For example, if your Worker is called `do-worker` and it declares a Durable Obje
[R2](/r2/) is Cloudflare's blob storage solution that allows developers to store large amounts of unstructured data without the egress fees.
-To bind your R2 bucket to your Pages Function, you can configure a R2 bucket binding in [`wrangler.toml`](/pages/functions/wrangler-configuration/#r2-buckets) or the Cloudflare dashboard.
+To bind your R2 bucket to your Pages Function, you can configure a R2 bucket binding in the [`wrangler.toml / wrangler.json` file](/pages/functions/wrangler-configuration/#r2-buckets) or the Cloudflare dashboard.
To configure a R2 bucket binding via the Cloudflare dashboard:
@@ -191,7 +191,7 @@ export const onRequest: PagesFunction = async (context) => {
You can interact with your R2 bucket bindings locally in one of two ways:
-- Configure your Pages project's `wrangler.toml` file and run [`npx wrangler pages dev`](/workers/wrangler/commands/#dev-1).
+- Configure your Pages project's Wrangler file and run [`npx wrangler pages dev`](/workers/wrangler/commands/#dev-1).
- Pass arguments to `wrangler pages dev` directly.
:::note
@@ -214,7 +214,7 @@ Interact with this binding by using `context.env` (for example, `context.env.BUC
[D1](/d1/) is Cloudflare’s native serverless database.
-To bind your D1 database to your Pages Function, you can configure a D1 database binding in [`wrangler.toml`](/pages/functions/wrangler-configuration/#d1-databases) or the Cloudflare dashboard.
+To bind your D1 database to your Pages Function, you can configure a D1 database binding in the [`wrangler.toml / wrangler.json` file](/pages/functions/wrangler-configuration/#d1-databases) or the Cloudflare dashboard.
To configure a D1 database binding via the Cloudflare dashboard:
@@ -262,12 +262,12 @@ export const onRequest: PagesFunction = async (context) => {
You can interact with your D1 database bindings locally in one of two ways:
-- Configure your Pages project's `wrangler.toml` file and run [`npx wrangler pages dev`](/workers/wrangler/commands/#dev-1).
+- Configure your Pages project's Wrangler file and run [`npx wrangler pages dev`](/workers/wrangler/commands/#dev-1).
- Pass arguments to `wrangler pages dev` directly.
To interact with a D1 database via the Wrangler CLI while [developing locally](/d1/best-practices/local-development/#develop-locally-with-pages), add `--d1 =` to the `wrangler pages dev` command.
-If your D1 database is bound to your Pages Function via the `NORTHWIND_DB` binding and the `database_id` in your `wrangler.toml` file is `xxxx-xxxx-xxxx-xxxx-xxxx`, access this database in local development by running:
+If your D1 database is bound to your Pages Function via the `NORTHWIND_DB` binding and the `database_id` in your Wrangler file is `xxxx-xxxx-xxxx-xxxx-xxxx`, access this database in local development by running:
```sh
npx wrangler pages dev --d1 NORTHWIND_DB=xxxx-xxxx-xxxx-xxxx-xxxx
@@ -289,7 +289,7 @@ Refer to the [D1 Workers Binding API documentation](/d1/worker-api/) for the API
[Vectorize](/vectorize/) is Cloudflare’s native vector database.
-To bind your Vectorize index to your Pages Function, you can configure a Vectorize index binding in [`wrangler.toml`](/pages/functions/wrangler-configuration/#vectorize-indexes) or the Cloudflare dashboard.
+To bind your Vectorize index to your Pages Function, you can configure a Vectorize index binding in the [`wrangler.toml / wrangler.json` file](/pages/functions/wrangler-configuration/#vectorize-indexes) or the Cloudflare dashboard.
To configure a Vectorize index binding via the Cloudflare dashboard:
@@ -426,7 +426,7 @@ export const onRequest: PagesFunction = async (context) => {
[Workers AI](/workers-ai/) allows you to run machine learning models, powered by serverless GPUs, on Cloudflare’s global network.
-To bind Workers AI to your Pages Function, you can configure a Workers AI binding in [`wrangler.toml`](/pages/functions/wrangler-configuration/#workers-ai) or the Cloudflare dashboard.
+To bind Workers AI to your Pages Function, you can configure a Workers AI binding in the [`wrangler.toml / wrangler.json` file](/pages/functions/wrangler-configuration/#workers-ai) or the Cloudflare dashboard.
When developing locally using Wrangler, you can define an AI binding using the `--ai` flag. Start Wrangler in development mode by running [`wrangler pages dev --ai AI`](/workers/wrangler/commands/#dev) to expose the `context.env.AI` binding.
@@ -485,7 +485,7 @@ export const onRequest: PagesFunction = async (context) => {
You can interact with your Workers AI bindings locally in one of two ways:
-- Configure your Pages project's `wrangler.toml` file and run [`npx wrangler pages dev`](/workers/wrangler/commands/#dev-1).
+- Configure your Pages project's Wrangler file and run [`npx wrangler pages dev`](/workers/wrangler/commands/#dev-1).
- Pass arguments to `wrangler pages dev` directly.
To interact with a Workers AI binding via the Wrangler CLI while developing locally, run:
@@ -500,7 +500,7 @@ npx wrangler pages dev --ai=
[Service bindings](/workers/runtime-apis/bindings/service-bindings/) enable you to call a Worker from within your Pages Function.
-To bind your Pages Function to a Worker, configure a Service binding in your Pages Function using [`wrangler.toml`](/pages/functions/wrangler-configuration/#service-bindings) or the Cloudflare dashboard.
+To bind your Pages Function to a Worker, configure a Service binding in your Pages Function using the [`wrangler.toml / wrangler.json` file](/pages/functions/wrangler-configuration/#service-bindings) or the Cloudflare dashboard.
To configure a Service binding via the Cloudflare dashboard:
@@ -540,7 +540,7 @@ export const onRequest: PagesFunction = async (context) => {
You can interact with your Service bindings locally in one of two ways:
-- Configure your Pages project's `wrangler.toml` file and run [`npx wrangler pages dev`](/workers/wrangler/commands/#dev-1).
+- Configure your Pages project's Wrangler file and run [`npx wrangler pages dev`](/workers/wrangler/commands/#dev-1).
- Pass arguments to `wrangler pages dev` directly.
To interact with a [Service binding](/workers/runtime-apis/bindings/service-bindings/) while developing locally, run the Worker you want to bind to via `wrangler dev` and in parallel, run `wrangler pages dev` with `--service =` where `SCRIPT_NAME` indicates the name of the Worker. For example, if your Worker is called `my-worker`, connect with this Worker by running it via `npx wrangler dev` (in the Worker's directory) alongside `npx wrangler pages dev --service MY_SERVICE=my-worker` (in the Pages' directory). Interact with this binding by using `context.env` (for example, `context.env.MY_SERVICE`).
@@ -555,7 +555,7 @@ For example, to develop locally, if your Worker is called `my-worker`, run `npx
[Queue Producers](/queues/configuration/javascript-apis/#producer) enable you to send messages into a queue within your Pages Function.
-To bind a queue to your Pages Function, configure a queue producer binding in your Pages Function using [`wrangler.toml`](/pages/functions/wrangler-configuration/#queues-producers) or the Cloudflare dashboard:
+To bind a queue to your Pages Function, configure a queue producer binding in your Pages Function using the [`wrangler.toml / wrangler.json` file](/pages/functions/wrangler-configuration/#queues-producers) or the Cloudflare dashboard:
To configure a queue producer binding via the Cloudflare dashboard:
@@ -613,8 +613,6 @@ If using a queue producer binding with a Pages Function, you will be able to sen
PostgreSQL drivers like [`Postgres.js`](https://github.com/porsager/postgres) depend on Node.js APIs. Pages Functions with Hyperdrive bindings must be [deployed with Node.js compatibility](/workers/runtime-apis/nodejs).
-import { WranglerConfig } from "~/components";
-
```toml title="wrangler.toml"
@@ -628,7 +626,7 @@ compatibility_date = "2024-09-23"
[Hyperdrive](/hyperdrive/) is a service for connecting to your existing databases from Cloudflare Workers and Pages Functions.
-To bind your Hyperdrive config to your Pages Function, you can configure a Hyperdrive binding in [`wrangler.toml`](/pages/functions/wrangler-configuration/#hyperdrive) or the Cloudflare dashboard.
+To bind your Hyperdrive config to your Pages Function, you can configure a Hyperdrive binding in the [`wrangler.toml / wrangler.json` file](/pages/functions/wrangler-configuration/#hyperdrive) or the Cloudflare dashboard.
To configure a Hyperdrive binding via the Cloudflare dashboard:
1. Log in to the [Cloudflare dashboard](https://dash.cloudflare.com) and select your account.
@@ -693,13 +691,13 @@ export const onRequest: PagesFunction = async (context) => {
### Interact with your Hyperdrive binding locally
-To interact with your Hyperdrive binding locally, you must provide a local connection string to your database that your Pages project will connect to directly. You can set an environment variable `WRANGLER_HYPERDRIVE_LOCAL_CONNECTION_STRING_` with the connection string of the database, or use the `wrangler.toml` file to configure your Hyperdrive binding with a `localConnectionString` as specified in [Hyperdrive documentation for local development](/hyperdrive/configuration/local-development/). Then, run [`npx wrangler pages dev `](/workers/wrangler/commands/#dev-1).
+To interact with your Hyperdrive binding locally, you must provide a local connection string to your database that your Pages project will connect to directly. You can set an environment variable `WRANGLER_HYPERDRIVE_LOCAL_CONNECTION_STRING_` with the connection string of the database, or use the Wrangler file to configure your Hyperdrive binding with a `localConnectionString` as specified in [Hyperdrive documentation for local development](/hyperdrive/configuration/local-development/). Then, run [`npx wrangler pages dev `](/workers/wrangler/commands/#dev-1).
## Analytics Engine
The [Analytics Engine](/analytics/analytics-engine/) binding enables you to write analytics within your Pages Function.
-To bind an Analytics Engine dataset to your Pages Function, you must configure an Analytics Engine binding using [`wrangler.toml`](/pages/functions/wrangler-configuration/#analytics-engine-datasets) or the Cloudflare dashboard:
+To bind an Analytics Engine dataset to your Pages Function, you must configure an Analytics Engine binding using the [`wrangler.toml / wrangler.json` file](/pages/functions/wrangler-configuration/#analytics-engine-datasets) or the Cloudflare dashboard:
To configure an Analytics Engine binding via the Cloudflare dashboard:
@@ -759,7 +757,7 @@ You cannot use an Analytics Engine binding locally.
An [environment variable](/workers/configuration/environment-variables/) is an injected value that can be accessed by your Functions. Environment variables are a type of binding that allow you to attach text strings or JSON values to your Pages Function. It is stored as plain text. Set your environment variables directly within the Cloudflare dashboard for both your production and preview environments at runtime and build-time.
-To add environment variables to your Pages project, you can use [`wrangler.toml`](/pages/functions/wrangler-configuration/#environment-variables) or the Cloudflare dashboard.
+To add environment variables to your Pages project, you can use the [`wrangler.toml / wrangler.json` file](/pages/functions/wrangler-configuration/#environment-variables) or the Cloudflare dashboard.
To configure an environment variable via the Cloudflare dashboard:
@@ -805,7 +803,7 @@ export const onRequest: PagesFunction = async (context) => {
You can interact with your environment variables locally in one of two ways:
-- Configure your Pages project's `wrangler.toml` file and running `npx wrangler pages dev`.
+- Configure your Pages project's Wrangler file and running `npx wrangler pages dev`.
- Pass arguments to [`wrangler pages dev`](/workers/wrangler/commands/#dev-1) directly.
To interact with your environment variables locally via the Wrangler CLI, add `--binding==` to the `wrangler pages dev` command:
diff --git a/src/content/docs/pages/functions/local-development.mdx b/src/content/docs/pages/functions/local-development.mdx
index 87b149c7a25aaf..8c430386201310 100644
--- a/src/content/docs/pages/functions/local-development.mdx
+++ b/src/content/docs/pages/functions/local-development.mdx
@@ -25,13 +25,13 @@ This will then start serving your Pages project. You can press `b` to open the b
:::note
-If you have a [`wrangler.toml`](/pages/functions/wrangler-configuration/) file configured for your Pages project, you can run [`wrangler pages dev`](/workers/wrangler/commands/#dev-1) without specifying a directory.
+If you have a [`wrangler.toml / wrangler.json` file](/pages/functions/wrangler-configuration/) file configured for your Pages project, you can run [`wrangler pages dev`](/workers/wrangler/commands/#dev-1) without specifying a directory.
:::
### HTTPS support
-To serve your local development server over HTTPS with a self-signed certificate, you can [set `local_protocol` via `wrangler.toml`](/pages/functions/wrangler-configuration/#local-development-settings) or you can pass the `--local-protocol=https` argument to [`wrangler pages dev`](/workers/wrangler/commands/#dev-1):
+To serve your local development server over HTTPS with a self-signed certificate, you can [set `local_protocol` via the `wrangler.toml / wrangler.json` file](/pages/functions/wrangler-configuration/#local-development-settings) or you can pass the `--local-protocol=https` argument to [`wrangler pages dev`](/workers/wrangler/commands/#dev-1):
```sh
npx wrangler pages dev --local-protocol=https
@@ -41,6 +41,6 @@ npx wrangler pages dev --local-protocol=https
To attach a binding to local development, refer to [Bindings](/pages/functions/bindings/) and find the Cloudflare Developer Platform resource you would like to work with.
-## Additional configuration via `wrangler.toml`
+## Additional Wrangler configuration
-If you are using a `wrangler.toml` configuration file in your project, you can set up dev server values like: `port`, `local protocol`, `ip`, and `port`. For more information, read about [configuring local development settings](/pages/functions/wrangler-configuration/#local-development-settings).
+If you are using a Wrangler configuration file in your project, you can set up dev server values like: `port`, `local protocol`, `ip`, and `port`. For more information, read about [configuring local development settings](/pages/functions/wrangler-configuration/#local-development-settings).
diff --git a/src/content/docs/pages/functions/source-maps.mdx b/src/content/docs/pages/functions/source-maps.mdx
index e88463b8466777..1878e223f979ab 100644
--- a/src/content/docs/pages/functions/source-maps.mdx
+++ b/src/content/docs/pages/functions/source-maps.mdx
@@ -9,7 +9,7 @@ sidebar:
---
-import { Render } from "~/components"
+import { Render, WranglerConfig } from "~/components"
@@ -23,9 +23,7 @@ Support for uploading source maps for Pages is available now in open beta. Minim
## Source Maps
-To enable source maps, provide the `--upload-source-maps` flag to [`wrangler pages deploy`](/workers/wrangler/commands/#deploy-1) or add the following to your Pages application's [`wrangler.toml`](/pages/functions/wrangler-configuration/) file if you are using the Pages build environment:
-
-import { WranglerConfig } from "~/components";
+To enable source maps, provide the `--upload-source-maps` flag to [`wrangler pages deploy`](/workers/wrangler/commands/#deploy-1) or add the following to your Pages application's [`wrangler.toml / wrangler.json` file](/pages/functions/wrangler-configuration/) if you are using the Pages build environment:
diff --git a/src/content/docs/pages/functions/wrangler-configuration.mdx b/src/content/docs/pages/functions/wrangler-configuration.mdx
index ed66149b387e69..460b0cd99dcc03 100644
--- a/src/content/docs/pages/functions/wrangler-configuration.mdx
+++ b/src/content/docs/pages/functions/wrangler-configuration.mdx
@@ -5,36 +5,34 @@ sidebar:
order: 6
---
-import { Render, TabItem, Tabs, Type, MetaInfo } from "~/components";
+import { Render, TabItem, Tabs, Type, MetaInfo, WranglerConfig } from "~/components";
:::caution
-If your project contains an existing `wrangler.toml` file that you [previously used for local development](/pages/functions/local-development/), make sure you verify that it matches your project settings in the Cloudflare dashboard before opting-in to deploy your Pages project with `wrangler.toml`. Instead of writing your `wrangler.toml` file by hand, Cloudflare recommends using `npx wrangler pages download config` to download your current project settings into a `wrangler.toml` file.
+If your project contains an existing Wrangler file that you [previously used for local development](/pages/functions/local-development/), make sure you verify that it matches your project settings in the Cloudflare dashboard before opting-in to deploy your Pages project with the `wrangler.toml / wrangler.json` file. Instead of writing your Wrangler file by hand, Cloudflare recommends using `npx wrangler pages download config` to download your current project settings into a Wrangler file.
:::
-Pages Functions can be configured two ways, either via the [Cloudflare dashboard](https://dash.cloudflare.com) or `wrangler.toml`, a configuration file used to customize the development and deployment setup for [Workers](/workers/) and Pages Functions.
+Pages Functions can be configured two ways, either via the [Cloudflare dashboard](https://dash.cloudflare.com) or the `wrangler.toml / wrangler.json` file, a file used to customize the development and deployment setup for [Workers](/workers/) and Pages Functions.
-This page serves as a reference on how to configure your Pages project via `wrangler.toml`.
+This page serves as a reference on how to configure your Pages project via the `wrangler.toml / wrangler.json` file.
-If using `wrangler.toml`, you must treat your `wrangler.toml` as the [source of truth](/pages/functions/wrangler-configuration/#source-of-truth) for your Pages project configuration.
+If using a `wrangler.toml / wrangler.json` file, you must treat your file as the [source of truth](/pages/functions/wrangler-configuration/#source-of-truth) for your Pages project configuration.
-:::note[Configuration via `wrangler.toml` is in open beta.]
+:::note[Configuration via the `wrangler.toml / wrangler.json` file is in open beta.]
Cloudflare welcomes your feedback. Join the #functions channel in the [Cloudflare Developers Discord](https://discord.com/invite/cloudflaredev) to report bugs and request features.
:::
-Using `wrangler.toml` to configure your Pages project allows you to:
+Using the `wrangler.toml / wrangler.json` file to configure your Pages project allows you to:
- **Store your configuration file in source control:** Keep your configuration in your repository alongside the rest of your code.
- **Edit your configuration via your code editor:** Remove the need to switch back and forth between interfaces.
- **Write configuration that is shared across environments:** Define configuration like [bindings](/pages/functions/bindings/) for local development, preview and production in one file.
- **Ensure better access control:** By using a configuration file in your project repository, you can control who has access to make changes without giving access to your Cloudflare dashboard.
-## Example `wrangler.toml` file
-
-import { WranglerConfig } from "~/components";
+## Example Wrangler file
@@ -61,23 +59,21 @@ API_KEY = "1234567asdf"
### V2 build system
-Pages Functions configuration via `wrangler.toml` requires the [V2 build system](/pages/configuration/build-image/#v2-build-system) or later. To update from V1, refer to the [V2 build system migration instructions](/pages/configuration/build-image/#v1-to-v2-migration).
+Pages Functions configuration via the `wrangler.toml / wrangler.json` file requires the [V2 build system](/pages/configuration/build-image/#v2-build-system) or later. To update from V1, refer to the [V2 build system migration instructions](/pages/configuration/build-image/#v1-to-v2-migration).
### Wrangler
-You must have Wrangler version 3.45.0 or higher to use `wrangler.toml` for your Pages project's configuration. To check your Wrangler version, update Wrangler or install Wrangler, refer to [Install/Update Wrangler](/workers/wrangler/install-and-update/).
+You must have Wrangler version 3.45.0 or higher to use a `wrangler.toml / wrangler.json` file for your Pages project's configuration. To check your Wrangler version, update Wrangler or install Wrangler, refer to [Install/Update Wrangler](/workers/wrangler/install-and-update/).
## Migrate from dashboard configuration
-The migration instructions for Pages projects that do not have a `wrangler.toml` file currently are different than those for Pages projects with an existing `wrangler.toml` file. Read the instructions based on your situation carefully to avoid errors in production.
-
-### Projects with existing `wrangler.toml` file
-
-Before you could use `wrangler.toml` to define your preview and production configuration, it was possible to use `wrangler.toml` to define which [bindings](/pages/functions/bindings/) should be available to your Pages project in local development.
+The migration instructions for Pages projects that do not have a Wrangler file currently are different than those for Pages projects with an existing Wrangler file. Read the instructions based on your situation carefully to avoid errors in production.
-If you have been using `wrangler.toml` for local development, you may already have a file in your Pages project that looks like this:
+### Projects with existing Wrangler file
+Before you could use the `wrangler.toml / wrangler.json` file to define your preview and production configuration, it was possible to use the file to define which [bindings](/pages/functions/bindings/) should be available to your Pages project in local development.
+If you have been using a `wrangler.toml / wrangler.json` file for local development, you may already have a file in your Pages project that looks like this:
@@ -89,24 +85,24 @@ id = ""
-If you would like to use your existing `wrangler.toml` file for your Pages project configuration, you must:
+If you would like to use your existing Wrangler file for your Pages project configuration, you must:
1. Add the `pages_build_output_dir` key with the appropriate value of your [build output directory](/pages/configuration/build-configuration/#build-commands-and-directories) (for example, `pages_build_output_dir = "./dist"`.)
-2. Review your existing `wrangler.toml` configuration carefully to make sure it aligns with your desired project configuration before deploying.
+2. Review your existing Wrangler configuration carefully to make sure it aligns with your desired project configuration before deploying.
-If you add the `pages_build_output_dir` key to `wrangler.toml` and deploy your Pages project, Pages will use whatever configuration was defined for local use, which is very likely to be non-production. Do not deploy until you are confident that your `wrangler.toml` is ready for production use.
+If you add the `pages_build_output_dir` key to your `wrangler.toml / wrangler.json` file and deploy your Pages project, Pages will use whatever configuration was defined for local use, which is very likely to be non-production. Do not deploy until you are confident that your `wrangler.toml / wrangler.json` file is ready for production use.
:::caution[Overwriting configuration]
-Running [`wrangler pages download config`](/pages/functions/wrangler-configuration/#projects-without-existing-wranglertoml-file) will overwrite your existing `wrangler.toml` file with a generated `wrangler.toml` file based on your Cloudflare dashboard configuration. Run this command only if you want to discard your previous `wrangler.toml` file that you used for local development and start over with configuration pulled from the Cloudflare dashboard.
+Running [`wrangler pages download config`](/pages/functions/wrangler-configuration/#projects-without-existing-wranglertoml-file) will overwrite your existing Wrangler file with a generated Wrangler file based on your Cloudflare dashboard configuration. Run this command only if you want to discard your previous Wrangler file that you used for local development and start over with configuration pulled from the Cloudflare dashboard.
:::
-You can continue to use your `wrangler.toml` file for local development without migrating it for production use by not adding a `pages_build_output_dir` key. If you do not add a `pages_build_output_dir` key and run `wrangler pages deploy`, you will see a warning message telling you that fields are missing and that the file will continue to be used for local development only.
+You can continue to use your Wrangler file for local development without migrating it for production use by not adding a `pages_build_output_dir` key. If you do not add a `pages_build_output_dir` key and run `wrangler pages deploy`, you will see a warning message telling you that fields are missing and that the file will continue to be used for local development only.
-### Projects without existing `wrangler.toml` file
+### Projects without existing Wrangler file
-If you have an existing Pages project with configuration set up via the Cloudflare dashboard and do not have an existing `wrangler.toml` file in your Project, run the `wrangler pages download config` command in your Pages project directory. The `wrangler pages download config` command will download your existing Cloudflare dashboard configuration and generate a valid `wrangler.toml` file in your Pages project directory.
+If you have an existing Pages project with configuration set up via the Cloudflare dashboard and do not have an existing Wrangler file in your Project, run the `wrangler pages download config` command in your Pages project directory. The `wrangler pages download config` command will download your existing Cloudflare dashboard configuration and generate a valid Wrangler file in your Pages project directory.
@@ -128,32 +124,32 @@ pnpm wrangler pages download config
-Review your generated `wrangler.toml` file. To start using `wrangler.toml` for your Pages project's configuration, create a new deployment, via [Git integration](/pages/get-started/git-integration/) or [Direct Upload](/pages/get-started/direct-upload/).
+Review your generated Wrangler file. To start using the `wrangler.toml / wrangler.json` file for your Pages project's configuration, create a new deployment, via [Git integration](/pages/get-started/git-integration/) or [Direct Upload](/pages/get-started/direct-upload/).
### Handling compatibility dates set to "Latest"
In the Cloudflare dashboard, you can set compatibility dates for preview deployments to "Latest". This will ensure your project is always using the latest compatibility date without the need to explicitly set it yourself.
-If you download a `wrangler.toml` from a project configured with "Latest" using the `wrangler pages download` command, your `wrangler.toml` will have the latest compatibility date available at the time you downloaded the configuration file. Wrangler does not support the "Latest" functionality like the dashboard. Compatibility dates must be explicitly set when using `wrangler.toml`.
+If you download a `wrangler.toml / wrangler.json` file from a project configured with "Latest" using the `wrangler pages download` command, your `wrangler.toml / wrangler.json` file will have the latest compatibility date available at the time you downloaded the configuration file. Wrangler does not support the "Latest" functionality like the dashboard. Compatibility dates must be explicitly set when using a `wrangler.toml / wrangler.json` file.
Refer to [this guide](/workers/configuration/compatibility-dates/) for more information on what compatibility dates are and how they work.
-## Differences using `wrangler.toml` for Pages Functions and Workers
+## Differences using a Wrangler configuration file for Pages Functions and Workers
-If you have used [Workers](/workers), you may already be familiar with [`wrangler.toml`](/workers/wrangler/configuration/). There are a few key differences to be aware of when using `wrangler.toml` with your Pages Functions project:
+If you have used [Workers](/workers), you may already be familiar with the [`wrangler.toml / wrangler.json` file](/workers/wrangler/configuration/). There are a few key differences to be aware of when using this file with your Pages Functions project:
-- The configuration fields **do not match exactly** between Pages Functions `wrangler.toml` file and the Workers equivalent. For example, configuration keys like `main`, which are Workers specific, do not apply to a Pages Function's `wrangler.toml`. Some functionality supported by Workers, such as [module aliasing](/workers/wrangler/configuration/#module-aliasing) cannot yet be used by Cloudflare Pages projects.
-- The Pages `wrangler.toml` introduces a new key, `pages_build_output_dir`, which is only used for Pages projects.
+- The configuration fields **do not match exactly** between Pages Functions Wrangler file and the Workers equivalent. For example, configuration keys like `main`, which are Workers specific, do not apply to a Pages Function's `wrangler.toml / wrangler.json` file. Some functionality supported by Workers, such as [module aliasing](/workers/wrangler/configuration/#module-aliasing) cannot yet be used by Cloudflare Pages projects.
+- The Pages' `wrangler.toml / wrangler.json` file introduces a new key, `pages_build_output_dir`, which is only used for Pages projects.
- The concept of [environments](/pages/functions/wrangler-configuration/#configure-environments) and configuration inheritance in this file **is not** the same as Workers.
- This file becomes the [source of truth](/pages/functions/wrangler-configuration/#source-of-truth) when used, meaning that you **can not edit the same fields in the dashboard** once you are using this file.
## Configure environments
-With `wrangler.toml` you can quickly set configuration across your local environment, preview deployments, and production.
+With a `wrangler.toml / wrangler.json` file, you can quickly set configuration across your local environment, preview deployments, and production.
### Local development
-`wrangler.toml` works locally when using `wrangler pages dev`. This means that you can test out configuration changes quickly without a need to login to the Cloudflare dashboard. Refer to the following config file for an example:
+`wrangler.toml / wrangler.json` file works locally when using `wrangler pages dev`. This means that you can test out configuration changes quickly without a need to login to the Cloudflare dashboard. Refer to the following config file for an example:
@@ -172,7 +168,7 @@ id = ""
-This `wrangler.toml` configuration file adds the `nodejs_compat` compatibility flag and a KV namespace binding to your Pages project. Running `wrangler pages dev` in a Pages project directory with this `wrangler.toml` configuration file will apply the `nodejs_compat` compatibility flag locally, and expose the `KV` binding in your Pages Function code at `context.env.KV`.
+This `wrangler.toml / wrangler.json` file adds the `nodejs_compat` compatibility flag and a KV namespace binding to your Pages project. Running `wrangler pages dev` in a Pages project directory with this `wrangler.toml / wrangler.json` file will apply the `nodejs_compat` compatibility flag locally, and expose the `KV` binding in your Pages Function code at `context.env.KV`.
:::note
@@ -182,7 +178,7 @@ For a full list of configuration keys, refer to [inheritable keys](#inheritable-
### Production and preview deployments
-Once you are ready to deploy your project, you can set the configuration for production and preview deployments by creating a new deployment containing a `wrangler.toml` file.
+Once you are ready to deploy your project, you can set the configuration for production and preview deployments by creating a new deployment containing a Wrangler file.
:::note
@@ -220,7 +216,7 @@ Unlike [Workers Environments](/workers/wrangler/configuration/#environments), `p
:::
-Refer to the following `wrangler.toml` configuration file for an example of how to override preview deployment configuration:
+Refer to the following `wrangler.toml / wrangler.json` file for an example of how to override preview deployment configuration:
@@ -439,20 +435,18 @@ When using Wrangler in the default local development mode, files will be written
:::
-- Configure D1 database bindings via your [`wrangler.toml` file](/workers/wrangler/configuration/#d1-databases) the same way they are configured with Cloudflare Workers.
+- Configure D1 database bindings via your [Wrangler file](/workers/wrangler/configuration/#d1-databases) the same way they are configured with Cloudflare Workers.
- Interact with your [D1 Database binding](/pages/functions/bindings/#d1-databases).
### Durable Objects
[Durable Objects](/durable-objects/) provide low-latency coordination and consistent storage for the Workers platform.
-- Configure Durable Object namespace bindings via your [`wrangler.toml` file](/workers/wrangler/configuration/#durable-objects) the same way they are configured with Cloudflare Workers.
+- Configure Durable Object namespace bindings via your [Wrangler file](/workers/wrangler/configuration/#durable-objects) the same way they are configured with Cloudflare Workers.
:::caution
- Durable Object bindings configured in
-a Pages project's `wrangler.toml` require the `script_name` key. For Workers,
-the `script_name` key is optional.
+ Durable Object bindings configured in a Pages project's `wrangler.toml / wrangler.json` file require the `script_name` key. For Workers, the `script_name` key is optional.
:::
@@ -462,14 +456,14 @@ the `script_name` key is optional.
[Environment variables](/workers/configuration/environment-variables/) are a type of binding that allow you to attach text strings or JSON values to your Pages Function.
-- Configure environment variables via your [`wrangler.toml` file](/workers/wrangler/configuration/#environment-variables) the same way they are configured with Cloudflare Workers.
+- Configure environment variables via your [Wrangler file](/workers/wrangler/configuration/#environment-variables) the same way they are configured with Cloudflare Workers.
- Interact with your [environment variables](/pages/functions/bindings/#environment-variables).
### Hyperdrive
[Hyperdrive](/hyperdrive/) bindings allow you to interact with and query any Postgres database from within a Pages Function.
-- Configure Hyperdrive bindings via your [`wrangler.toml` file](/workers/wrangler/configuration/#hyperdrive) the same way they are configured with Cloudflare Workers.
+- Configure Hyperdrive bindings via your [Wrangler file](/workers/wrangler/configuration/#hyperdrive) the same way they are configured with Cloudflare Workers.
### KV namespaces
@@ -481,7 +475,7 @@ When using Wrangler in the default local development mode, files will be written
:::
-- Configure KV namespace bindings via your [`wrangler.toml` file](/workers/wrangler/configuration/#kv-namespaces) the same way they are configured with Cloudflare Workers.
+- Configure KV namespace bindings via your [Wrangler file](/workers/wrangler/configuration/#kv-namespaces) the same way they are configured with Cloudflare Workers.
- Interact with your [KV namespace binding](/pages/functions/bindings/#kv-namespaces).
### Queues Producers
@@ -494,7 +488,7 @@ You cannot currently configure a [queues consumer](/queues/reference/how-queues-
:::
-- Configure Queues Producer bindings via your [`wrangler.toml` file](/workers/wrangler/configuration/#queues) the same way they are configured with Cloudflare Workers.
+- Configure Queues Producer bindings via your [Wrangler file](/workers/wrangler/configuration/#queues) the same way they are configured with Cloudflare Workers.
- Interact with your [Queues Producer binding](/pages/functions/bindings/#queue-producers).
### R2 buckets
@@ -507,27 +501,27 @@ When using Wrangler in the default local development mode, files will be written
:::
-- Configure R2 bucket bindings via your [`wrangler.toml` file](/workers/wrangler/configuration/#r2-buckets) the same way they are configured with Cloudflare Workers.
+- Configure R2 bucket bindings via your [Wrangler file](/workers/wrangler/configuration/#r2-buckets) the same way they are configured with Cloudflare Workers.
- Interact with your [R2 bucket bindings](/pages/functions/bindings/#r2-buckets).
### Vectorize indexes
A [Vectorize index](/vectorize/) allows you to insert and query vector embeddings for semantic search, classification and other vector search use-cases.
-- Configure Vectorize bindings via your [`wrangler.toml` file](/workers/wrangler/configuration/#vectorize-indexes) the same way they are configured with Cloudflare Workers.
+- Configure Vectorize bindings via your [Wrangler file](/workers/wrangler/configuration/#vectorize-indexes) the same way they are configured with Cloudflare Workers.
### Service bindings
A service binding allows you to call a Worker from within your Pages Function. Binding a Pages Function to a Worker allows you to send HTTP requests to the Worker without those requests going over the Internet. The request immediately invokes the downstream Worker, reducing latency as compared to a request to a third-party service. Refer to [About Service bindings](/workers/runtime-apis/bindings/service-bindings/).
-- Configure service bindings via your [`wrangler.toml` file](/workers/wrangler/configuration/#service-bindings) the same way they are configured with Cloudflare Workers.
+- Configure service bindings via your [Wrangler file](/workers/wrangler/configuration/#service-bindings) the same way they are configured with Cloudflare Workers.
- Interact with your [service bindings](/pages/functions/bindings/#service-bindings).
### Analytics Engine Datasets
[Workers Analytics Engine](/analytics/analytics-engine/) provides analytics, observability and data logging from Pages Functions. Write data points within your Pages Function binding then query the data using the [SQL API](/analytics/analytics-engine/sql-api/).
-- Configure Analytics Engine Dataset bindings via your [`wrangler.toml` file](/workers/wrangler/configuration/#analytics-engine-datasets) the same way they are configured with Cloudflare Workers.
+- Configure Analytics Engine Dataset bindings via your [Wrangler file](/workers/wrangler/configuration/#analytics-engine-datasets) the same way they are configured with Cloudflare Workers.
- Interact with your [Analytics Engine Dataset](/pages/functions/bindings/#analytics-engine).
### Workers AI
@@ -538,7 +532,7 @@ A service binding allows you to call a Worker from within your Pages Function. B
Unlike other bindings, this binding is limited to one AI binding per Pages Function project.
-- Configure Workers AI bindings via your [`wrangler.toml` file](/workers/wrangler/configuration/#workers-ai) the same way they are configured with Cloudflare Workers.
+- Configure Workers AI bindings via your [Wrangler file](/workers/wrangler/configuration/#workers-ai) the same way they are configured with Cloudflare Workers.
- Interact with your [Workers AI binding](/pages/functions/bindings/#workers-ai).
## Local development settings
@@ -547,6 +541,6 @@ The local development settings that you can configure are the same for Pages Fun
## Source of truth
-When used in your Pages Functions projects, your `wrangler.toml` file is the source of truth. You will be able to see, but not edit, the same fields when you log into the Cloudflare dashboard.
+When used in your Pages Functions projects, your Wrangler file is the source of truth. You will be able to see, but not edit, the same fields when you log into the Cloudflare dashboard.
-If you decide that you don't want to use `wrangler.toml` for configuration, you can safely delete it and create a new deployment. Configuration values from your last deployment will still apply and you will be able to edit them from the dashboard.
+If you decide that you do not want to use a `wrangler.toml / wrangler.json` file for configuration, you can safely delete it and create a new deployment. Configuration values from your last deployment will still apply and you will be able to edit them from the dashboard.
diff --git a/src/content/docs/pages/get-started/c3.mdx b/src/content/docs/pages/get-started/c3.mdx
index 15d1cb90f1ca2a..d77d3c148b1063 100644
--- a/src/content/docs/pages/get-started/c3.mdx
+++ b/src/content/docs/pages/get-started/c3.mdx
@@ -10,7 +10,14 @@ description: Use C3 (`create-cloudflare` CLI) to set up and deploy new
deployment.
---
-import { Render, TabItem, Tabs, Type, MetaInfo, PackageManagers } from "~/components";
+import {
+ Render,
+ TabItem,
+ Tabs,
+ Type,
+ MetaInfo,
+ PackageManagers,
+} from "~/components";
Cloudflare provides a CLI command for creating new Workers and Pages projects — `npm create cloudflare`, powered by the [`create-cloudflare` package](https://www.npmjs.com/package/create-cloudflare).
@@ -73,7 +80,7 @@ npm create cloudflare@latest [--] [] [OPTIONS] [-- ]
```sh
-yarn create cloudflare@latest [--] [] [OPTIONS] [-- ]
+yarn create cloudflare [--] [] [OPTIONS] [-- ]
```
@@ -163,8 +170,8 @@ bun create cloudflare@latest [--] [] [OPTIONS] [-- ]
At a minimum, templates must contain the following:
- `package.json`
- - `wrangler.toml`
- - `src/` containing a worker script referenced from `wrangler.toml`
+ - `wrangler.json`
+ - `src/` containing a worker script referenced from `wrangler.json`
See the [templates folder](https://github.com/cloudflare/workers-sdk/tree/main/packages/create-cloudflare/templates) of this repo for more examples.
@@ -229,7 +236,7 @@ Cloudflare collects anonymous usage data to improve `create-cloudflare` over tim
You can opt-out if you do not wish to share any information.
@@ -243,15 +250,15 @@ export CREATE_CLOUDFLARE_TELEMETRY_DISABLED=1
You can check the status of telemetry collection at any time.
You can always re-enable telemetry collection.
diff --git a/src/content/docs/pages/get-started/direct-upload.mdx b/src/content/docs/pages/get-started/direct-upload.mdx
index 73fc8e4ce6cf4f..2634e821ff69b0 100644
--- a/src/content/docs/pages/get-started/direct-upload.mdx
+++ b/src/content/docs/pages/get-started/direct-upload.mdx
@@ -10,7 +10,13 @@ description: Upload your prebuilt assets to Pages and deploy them via the
import { Render } from "~/components";
-Direct Upload enables you to upload your prebuilt assets to Pages and deploy them to the Cloudflare global network. This guide will instruct you how to upload your assets using Wrangler or the drag and drop method.
+Direct Upload enables you to upload your prebuilt assets to Pages and deploy them to the Cloudflare global network. You should choose Direct Upload over Git integration if you want to [integrate your own build platform](/pages/how-to/use-direct-upload-with-continuous-integration/) or upload from your local computer.
+
+This guide will instruct you how to upload your assets using Wrangler or the drag and drop method.
+
+:::caution[You cannot switch to Git integration later]
+If you choose Direct Upload, you cannot switch to [Git integration](/pages/get-started/git-integration/) later. You will have to create a new project with Git integration to use automatic deployments.
+:::
## Prerequisites
@@ -141,4 +147,4 @@ If using the drag and drop method, a red warning symbol will appear next to an a
Drag and drop deployments made from the Cloudflare dashboard do not currently support compiling a `functions` folder of [Pages Functions](/pages/functions/). To deploy a `functions` folder, you must use Wrangler. When deploying a project using Wrangler, if a `functions` folder exists where the command is run, that `functions` folder will be uploaded with the project.
-However, note that a `_worker.js` file is supported by both Wrangler and drag and drop deployments made from the dashboard.
\ No newline at end of file
+However, note that a `_worker.js` file is supported by both Wrangler and drag and drop deployments made from the dashboard.
diff --git a/src/content/docs/pages/get-started/git-integration.mdx b/src/content/docs/pages/get-started/git-integration.mdx
index 85e7ba1ee544d2..c8e53242c2cc4c 100644
--- a/src/content/docs/pages/get-started/git-integration.mdx
+++ b/src/content/docs/pages/get-started/git-integration.mdx
@@ -5,12 +5,16 @@ head:
- tag: title
content: Git integration guide
description: Connect your Git provider to Pages.
-
---
-import { Details, Render } from "~/components"
+import { Details, Render } from "~/components";
+
+In this guide, you will get started with Cloudflare Pages and deploy your first website to the Pages platform through Git integration. The Git integration enables automatic builds and deployments every time you push a change to your connected [GitHub](/pages/configuration/git-integration/github-integration/) or [GitLab](/pages/configuration/git-integration/gitlab-integration/) repository.
+
+:::caution[You cannot switch to Direct Upload later]
+If you deploy using the Git integration, you cannot switch to [Direct Upload](/pages/get-started/direct-upload/) later. However, if you already use a Git-integrated project and do not want to trigger deployments every time you push a commit, you can [disable automatic deployments](/pages/configuration/git-integration/#disable-automatic-deployments) on all branches. Then, you can use Wrangler to deploy directly to your Pages projects and make changes to your Git repository without automatically triggering a build.
-In this guide, you will get started with Cloudflare Pages, and deploy your first website to the Pages platform through Git integration.
+:::
## Connect your Git provider to Pages
diff --git a/src/content/docs/pages/how-to/add-custom-http-headers.mdx b/src/content/docs/pages/how-to/add-custom-http-headers.mdx
index d696423ccb2f04..a5e50cfe196fce 100644
--- a/src/content/docs/pages/how-to/add-custom-http-headers.mdx
+++ b/src/content/docs/pages/how-to/add-custom-http-headers.mdx
@@ -3,6 +3,8 @@ pcx_content_type: how-to
title: Add custom HTTP headers
---
+import { WranglerConfig } from "~/components";
+
:::note
Cloudflare provides HTTP header customization for Pages projects by adding a `_headers` file to your project. Refer to the [documentation](/pages/configuration/headers/) for more information.
@@ -64,9 +66,7 @@ cd custom-headers-example
npm install
```
-To operate your Workers function alongside your Pages application, deploy it to the same custom domain as your Pages application. To do this, update the `wrangler.toml` file in your project with your account and zone details:
-
-import { WranglerConfig } from "~/components";
+To operate your Workers function alongside your Pages application, deploy it to the same custom domain as your Pages application. To do this, update the Wrangler file in your project with your account and zone details:
@@ -83,7 +83,7 @@ zone_id = "FILL-IN-YOUR-ZONE-ID"
If you do not know how to find your Account ID and Zone ID, refer to [our guide](/fundamentals/setup/find-account-and-zone-ids/).
-Once you have configured your `wrangler.toml`, run `npx wrangler deploy` in your terminal to deploy your Worker:
+Once you have configured your `wrangler.toml / wrangler.json` file, run `npx wrangler deploy` in your terminal to deploy your Worker:
```sh
npx wrangler deploy
diff --git a/src/content/docs/pages/migrations/migrating-from-workers/index.mdx b/src/content/docs/pages/migrations/migrating-from-workers/index.mdx
index 64cded1f056b5a..ef891f3d21e1fc 100644
--- a/src/content/docs/pages/migrations/migrating-from-workers/index.mdx
+++ b/src/content/docs/pages/migrations/migrating-from-workers/index.mdx
@@ -17,7 +17,7 @@ You may already have a reasonably complex Worker and/or it would be tedious to s
:::note
-When using a `_worker.js` file, the entire `/functions` directory is ignored – this includes its routing and middleware characteristics. Instead, the `_worker.js` file is deployed as is and must be written using the [Module Worker syntax](/workers/reference/migrate-to-module-workers/).
+When using a `_worker.js` file, the entire `/functions` directory is ignored - this includes its routing and middleware characteristics. Instead, the `_worker.js` file is deployed as is and must be written using the [Module Worker syntax](/workers/reference/migrate-to-module-workers/).
:::
@@ -29,10 +29,10 @@ By migrating to Cloudflare Pages, you will be able to access features like [prev
Workers Sites projects consist of the following pieces:
1. An application built with a [static site tool](/pages/how-to/) or a static collection of HTML, CSS and JavaScript files.
-2. If using a static site tool, a build directory (called `bucket` in `wrangler.toml`) where the static project builds your HTML, CSS, and JavaScript files.
+2. If using a static site tool, a build directory (called `bucket` in the `wrangler.toml / wrangler.json` file) where the static project builds your HTML, CSS, and JavaScript files.
3. A Worker application for serving that build directory. For most projects, this is likely to be the `workers-site` directory.
-When moving to Cloudflare Pages, remove the Workers application and any associated `wrangler.toml` configuration files or build output. Instead, note and record your `build` command (if you have one), and the `bucket` field, or build directory, from the `wrangler.toml` file in your project's directory.
+When moving to Cloudflare Pages, remove the Workers application and any associated `wrangler.toml / wrangler.json` files or build output. Instead, note and record your `build` command (if you have one), and the `bucket` field, or build directory, from the Wrangler file in your project's directory.
## Migrate headers and redirects
diff --git a/src/content/docs/pages/platform/known-issues.mdx b/src/content/docs/pages/platform/known-issues.mdx
index e15b0e556bfaf7..97aa558d274f9b 100644
--- a/src/content/docs/pages/platform/known-issues.mdx
+++ b/src/content/docs/pages/platform/known-issues.mdx
@@ -19,7 +19,7 @@ Here are some known bugs and issues with Cloudflare Pages:
## Git configuration
-- After you have selected a GitHub/GitLab repository for your Pages application, it cannot be changed. Delete your Pages project and create a new one pointing at a different repository if you need to update it.
+- If you deploy using the Git integration, you cannot switch to Direct Upload later. However, if you already use a Git-integrated project and do not want to trigger deployments every time you push a commit, you can [disable/pause automatic deployments](/pages/configuration/git-integration/#disable-automatic-deployments). Alternatively, you can delete your Pages project and create a new one pointing at a different repository if you need to update it.
## Build configuration
diff --git a/src/content/docs/pages/tutorials/localize-a-website/index.mdx b/src/content/docs/pages/tutorials/localize-a-website/index.mdx
index 2a7aad8fffcf35..ea94ce012754cf 100644
--- a/src/content/docs/pages/tutorials/localize-a-website/index.mdx
+++ b/src/content/docs/pages/tutorials/localize-a-website/index.mdx
@@ -10,7 +10,7 @@ tags:
- HTMLRewriter
---
-import { Render, PackageManagers } from "~/components";
+import { Render, PackageManagers, WranglerConfig } from "~/components";
In this tutorial, you will build an example internationalization and localization engine (commonly referred to as **i18n** and **l10n**) for your application, serve the content of your site, and automatically translate the content based on your visitors’ location in the world.
@@ -258,9 +258,7 @@ export async function onRequest(context) {
Your i18n tool built on Cloudflare Pages is complete and it is time to deploy it to your domain.
-To deploy your application to a `*.pages.dev` subdomain, you need to specify a directory of static assets to serve, configure the `pages_build_output_dir` in your project’s `wrangler.toml` file and set the value to `./public`:
-
-import { WranglerConfig } from "~/components";
+To deploy your application to a `*.pages.dev` subdomain, you need to specify a directory of static assets to serve, configure the `pages_build_output_dir` in your project’s Wrangler file and set the value to `./public`:
diff --git a/src/content/docs/pages/tutorials/use-r2-as-static-asset-storage-for-pages/index.mdx b/src/content/docs/pages/tutorials/use-r2-as-static-asset-storage-for-pages/index.mdx
index 83353424c94221..f016a6357a08c0 100644
--- a/src/content/docs/pages/tutorials/use-r2-as-static-asset-storage-for-pages/index.mdx
+++ b/src/content/docs/pages/tutorials/use-r2-as-static-asset-storage-for-pages/index.mdx
@@ -12,6 +12,8 @@ languages:
- JavaScript
---
+import { WranglerConfig } from "~/components";
+
This tutorial will teach you how to use [R2](/r2/) as a static asset storage bucket for your [Pages](/pages/) app. This is especially helpful if you're hitting the [file limit](/pages/platform/limits/#files) or the [max file size limit](/pages/platform/limits/#file-size) on Pages.
To illustrate how this is done, we will use R2 as a static asset storage for a fictional cat blog.
@@ -65,11 +67,9 @@ npx wrangler r2 object put / -f
## Bind R2 to Pages
-To bind the R2 bucket we have created to the cat blog, we need to update `wrangler.toml`.
+To bind the R2 bucket we have created to the cat blog, we need to update the Wrangler configuration.
-Open `wrangler.toml`, and add the following binding to the file. `bucket_name` should be the exact name of the bucket created earlier, while `binding` can be any custom name referring to the R2 resource:
-
-import { WranglerConfig } from "~/components";
+Open the `wrangler.toml / wrangler.json` file, and add the following binding to the file. `bucket_name` should be the exact name of the bucket created earlier, while `binding` can be any custom name referring to the R2 resource:
@@ -86,7 +86,7 @@ bucket_name = "cat-media"
Note: The keyword `ASSETS` is reserved and cannot be used as a resource binding.
:::
-Save `wrangler.toml` and we are ready to move on to the last step.
+Save the `wrangler.toml / wrangler.json` file, and we are ready to move on to the last step.
Alternatively, you can add a binding to your Pages project on the dashboard by navigating to the project’s _Settings_ tab > _Functions_ > _R2 bucket bindings_.
diff --git a/src/content/docs/pub-sub/learning/integrate-workers.mdx b/src/content/docs/pub-sub/learning/integrate-workers.mdx
index bd2399073fcc58..3ba068f47cbcb0 100644
--- a/src/content/docs/pub-sub/learning/integrate-workers.mdx
+++ b/src/content/docs/pub-sub/learning/integrate-workers.mdx
@@ -5,6 +5,8 @@ sidebar:
order: 2
---
+import { WranglerConfig } from "~/components";
+
Once of the most powerful features of Pub/Sub is the ability to connect [Cloudflare Workers](/workers/) — powerful serverless functions that run on the edge — and filter, aggregate and mutate every message published to that broker. Workers can also mirror those messages to other sources, including writing to [Cloudflare R2 storage](/r2/), external databases, or other cloud services beyond Cloudflare, making it easy to persist or analyze incoming message payloads and data at scale.
There are three ways to integrate a Worker with Pub/Sub:
@@ -80,7 +82,7 @@ You should receive a success response that resembles the example below, with the
]
```
-Copy the array of public keys into your `wrangler.toml` as an environmental variable:
+Copy the array of public keys into your `wrangler.toml / wrangler.json` file as an environmental variable:
:::note
@@ -88,8 +90,6 @@ Your public keys will be unique to your own Pub/Sub Broker: you should ensure yo
:::
-import { WranglerConfig } from "~/components";
-
```toml
diff --git a/src/content/docs/queues/configuration/batching-retries.mdx b/src/content/docs/queues/configuration/batching-retries.mdx
index 361cd9afcdce53..b09220c49f5300 100644
--- a/src/content/docs/queues/configuration/batching-retries.mdx
+++ b/src/content/docs/queues/configuration/batching-retries.mdx
@@ -5,6 +5,8 @@ sidebar:
order: 2
---
+import { WranglerConfig } from "~/components";
+
## Batching
When configuring a [consumer Worker](/queues/reference/how-queues-works#consumers) for a queue, you can also define how messages are batched as they are delivered.
@@ -194,9 +196,7 @@ npx wrangler@latest queues consumer worker add $QUEUE-NAME $WORKER_SCRIPT_NAME -
npx wrangler@latest queues consumer http add $QUEUE-NAME --retry-delay-secs=60
```
-Delays can also be configured in [`wrangler.toml`](/workers/wrangler/configuration/#queues) with the `delivery_delay` setting for producers (when sending) and/or the `retry_delay` (when retrying) per-consumer:
-
-import { WranglerConfig } from "~/components";
+Delays can also be configured in the [`wrangler.toml / wrangler.json` file](/workers/wrangler/configuration/#queues) with the `delivery_delay` setting for producers (when sending) and/or the `retry_delay` (when retrying) per-consumer:
@@ -213,7 +213,7 @@ import { WranglerConfig } from "~/components";
-If you use both the `wrangler` CLI and `wrangler.toml` to change the settings associated with a queue or a queue consumer, the most recent configuration change will take effect.
+If you use both the `wrangler` CLI and the `wrangler.toml / wrangler.json` file to change the settings associated with a queue or a queue consumer, the most recent configuration change will take effect.
Refer to the [Queues REST API documentation](/api/resources/queues/subresources/consumers/methods/get/) to learn how to configure message delays and retry delays programmatically.
diff --git a/src/content/docs/queues/configuration/configure-queues.mdx b/src/content/docs/queues/configuration/configure-queues.mdx
index 6030d3d0654cfa..d34bc6c1227253 100644
--- a/src/content/docs/queues/configuration/configure-queues.mdx
+++ b/src/content/docs/queues/configuration/configure-queues.mdx
@@ -9,16 +9,18 @@ head:
---
+import { WranglerConfig } from "~/components";
+
Cloudflare Queues can be configured using [Wrangler](/workers/wrangler/install-and-update/), the command-line interface for Cloudflare's Developer Platform, which includes [Workers](/workers/), [R2](/r2/), and other developer products.
-Each Worker has a `wrangler.toml` configuration file that specifies environment variables, triggers, and resources, such as a Queue. To enable Worker-to-resource communication, you must set up a [binding](/workers/runtime-apis/bindings/) in your Worker project's `wrangler.toml` file.
+Each Worker has a `wrangler.toml / wrangler.json` file that specifies environment variables, triggers, and resources, such as a Queue. To enable Worker-to-resource communication, you must set up a [binding](/workers/runtime-apis/bindings/) in your Worker project's Wrangler file.
Use the options below to configure your queue.
:::note
-Below are options for Queues, refer to the Wrangler documentation for a full reference of [`wrangler.toml`](/workers/wrangler/configuration/).
+Below are options for Queues, refer to the Wrangler documentation for a full reference of the [`wrangler.toml / wrangler.json` file](/workers/wrangler/configuration/).
:::
@@ -27,9 +29,7 @@ Below are options for Queues, refer to the Wrangler documentation for a full ref
A producer is a [Cloudflare Worker](/workers/) that writes to one or more queues. A producer can accept messages over HTTP, asynchronously write messages when handling requests, and/or write to a queue from within a [Durable Object](/durable-objects/). Any Worker can write to a queue.
-To produce to a queue, set up a binding in your `wrangler.toml` file. These options should be used when a Worker wants to send messages to a queue.
-
-import { WranglerConfig } from "~/components";
+To produce to a queue, set up a binding in your Wrangler file. These options should be used when a Worker wants to send messages to a queue.
@@ -57,7 +57,7 @@ import { WranglerConfig } from "~/components";
## Workers
-To consume messages from one or more queues, set up a binding in your `wrangler.toml` file. These options should be used when a Worker wants to receive messages from a queue.
+To consume messages from one or more queues, set up a binding in your Wrangler file. These options should be used when a Worker wants to receive messages from a queue.
diff --git a/src/content/docs/queues/configuration/consumer-concurrency.mdx b/src/content/docs/queues/configuration/consumer-concurrency.mdx
index e23d881d8ddfce..add633b7286c56 100644
--- a/src/content/docs/queues/configuration/consumer-concurrency.mdx
+++ b/src/content/docs/queues/configuration/consumer-concurrency.mdx
@@ -5,6 +5,8 @@ sidebar:
order: 5
---
+import { WranglerConfig } from "~/components";
+
Consumer concurrency allows a [consumer Worker](/queues/reference/how-queues-works/#consumers) processing messages from a queue to automatically scale out horizontally to keep up with the rate that messages are being written to a queue.
In many systems, the rate at which you write messages to a queue can easily exceed the rate at which a single consumer can read and process those same messages. This is often because your consumer might be parsing message contents, writing to storage or a database, or making third-party (upstream) API calls.
@@ -58,7 +60,7 @@ If you have a workflow that is limited by an upstream API and/or system, you may
You can configure the concurrency of your consumer Worker in two ways:
1. Set concurrency settings in the Cloudflare dashboard
-2. Set concurrency settings via `wrangler.toml`
+2. Set concurrency settings via the `wrangler.toml / wrangler.json` file
### Set concurrency settings in the Cloudflare dashboard
@@ -74,7 +76,7 @@ To remove a fixed maximum value, select **auto (recommended)**.
Note that if you are writing messages to a queue faster than you can process them, messages may eventually reach the [maximum retention period](/queues/platform/limits/) set for that queue. Individual messages that reach that limit will expire from the queue and be deleted.
-### Set concurrency settings via `wrangler.toml`
+### Set concurrency settings in the `wrangler.toml / wrangler.json` file
:::note
@@ -82,9 +84,7 @@ Ensure you are using the latest version of [wrangler](/workers/wrangler/install-
:::
-To set a fixed maximum number of concurrent consumer invocations for a given queue, configure a `max_concurrency` in your `wrangler.toml` file:
-
-import { WranglerConfig } from "~/components";
+To set a fixed maximum number of concurrent consumer invocations for a given queue, configure a `max_concurrency` in your Wrangler file:
diff --git a/src/content/docs/queues/configuration/dead-letter-queues.mdx b/src/content/docs/queues/configuration/dead-letter-queues.mdx
index cdfe6d16253c84..847a079ed92c4f 100644
--- a/src/content/docs/queues/configuration/dead-letter-queues.mdx
+++ b/src/content/docs/queues/configuration/dead-letter-queues.mdx
@@ -5,14 +5,14 @@ sidebar:
order: 3
---
+import { WranglerConfig } from "~/components";
+
A Dead Letter Queue (DLQ) is a common concept in a messaging system, and represents where messages are sent when a delivery failure occurs with a consumer after `max_retries` is reached. A Dead Letter Queue is like any other queue, and can be produced to and consumed from independently.
With Cloudflare Queues, a Dead Letter Queue is defined within your [consumer configuration](/queues/configuration/configure-queues/). Messages are delivered to the DLQ when they reach the configured retry limit for the consumer. Without a DLQ configured, messages that reach the retry limit are deleted permanently.
For example, the following consumer configuration would send messages to our DLQ named `"my-other-queue"` after retrying delivery (by default, 3 times):
-import { WranglerConfig } from "~/components";
-
```toml
diff --git a/src/content/docs/queues/configuration/pull-consumers.mdx b/src/content/docs/queues/configuration/pull-consumers.mdx
index 6fce655387eed7..ca58cd2572b3e1 100644
--- a/src/content/docs/queues/configuration/pull-consumers.mdx
+++ b/src/content/docs/queues/configuration/pull-consumers.mdx
@@ -8,6 +8,8 @@ head:
content: Cloudflare Queues - Pull consumers
---
+import { WranglerConfig } from "~/components";
+
A pull-based consumer allows you to pull from a queue over HTTP from any environment and/or programming language outside of Cloudflare Workers. A pull-based consumer can be useful when your message consumption rate is limited by upstream infrastructure or long-running tasks.
## How to choose between push or pull consumer
@@ -34,13 +36,11 @@ To configure a pull-based consumer and receive messages from a queue, you need t
## 1. Enable HTTP pull
-You can enable HTTP pull or change a queue from push-based to pull-based via `wrangler.toml`, the `wrangler` CLI, or via the [Cloudflare dashboard](https://dash.cloudflare.com/).
+You can enable HTTP pull or change a queue from push-based to pull-based via the `wrangler.toml / wrangler.json` file, the `wrangler` CLI, or via the [Cloudflare dashboard](https://dash.cloudflare.com/).
-### wrangler.toml
+### Wrangler configuration file
-A HTTP consumer can be configured in `wrangler.toml` by setting `type = "http_pull"` in the consumer configuration:
-
-import { WranglerConfig } from "~/components";
+A HTTP consumer can be configured in the `wrangler.toml / wrangler.json` file by setting `type = "http_pull"` in the consumer configuration:
@@ -75,7 +75,7 @@ wrangler queues consumer worker remove $QUEUE-NAME $SCRIPT_NAME
:::note
-If you remove the Worker consumer with `wrangler` but do not delete the `[[queues.consumer]]` configuration from `wrangler.toml`, subsequent deployments of your Worker will fail when they attempt to add a conflicting consumer configuration.
+If you remove the Worker consumer with `wrangler` but do not delete the `[[queues.consumer]]` configuration from `wrangler.toml / wrangler.json` file, subsequent deployments of your Worker will fail when they attempt to add a conflicting consumer configuration.
Ensure you remove the consumer configuration first.
diff --git a/src/content/docs/queues/examples/publish-to-a-queue-over-http.mdx b/src/content/docs/queues/examples/publish-to-a-queue-over-http.mdx
index 7753c6f9c7225c..e56b154f54ee1c 100644
--- a/src/content/docs/queues/examples/publish-to-a-queue-over-http.mdx
+++ b/src/content/docs/queues/examples/publish-to-a-queue-over-http.mdx
@@ -10,6 +10,8 @@ head:
description: Publish to a Queue directly via HTTP and Workers.
---
+import { WranglerConfig } from "~/components";
+
The following example shows you how to publish messages to a queue from any HTTP client, using a shared secret to securely authenticate the client.
This allows you to write to a Queue from any service or programming language that support HTTP, including Go, Rust, Python or even a Bash script.
@@ -17,11 +19,9 @@ This allows you to write to a Queue from any service or programming language tha
### Prerequisites
- A [queue created](/queues/get-started/#3-create-a-queue) via the [Cloudflare dashboard](https://dash.cloudflare.com) or the [wrangler CLI](/workers/wrangler/install-and-update/).
-- A [configured **producer** binding](/queues/configuration/configure-queues/#producer) in the Cloudflare dashboard or `wrangler.toml` file.
+- A [configured **producer** binding](/queues/configuration/configure-queues/#producer) in the Cloudflare dashboard or Wrangler file.
-Configure your `wrangler.toml` file as follows:
-
-import { WranglerConfig } from "~/components";
+Configure your Wrangler file as follows:
diff --git a/src/content/docs/queues/examples/send-errors-to-r2.mdx b/src/content/docs/queues/examples/send-errors-to-r2.mdx
index c8f3aa93d71cab..c74a0f060358b7 100644
--- a/src/content/docs/queues/examples/send-errors-to-r2.mdx
+++ b/src/content/docs/queues/examples/send-errors-to-r2.mdx
@@ -11,10 +11,10 @@ description: Example of how to use Queues to batch data and store it in an R2 bu
---
-The following Worker will catch JavaScript errors and send them to a queue. The same Worker will receive those errors in batches and store them to a log file in an R2 bucket.
-
import { WranglerConfig } from "~/components";
+The following Worker will catch JavaScript errors and send them to a queue. The same Worker will receive those errors in batches and store them to a log file in an R2 bucket.
+
```toml
diff --git a/src/content/docs/queues/examples/use-queues-with-durable-objects.mdx b/src/content/docs/queues/examples/use-queues-with-durable-objects.mdx
index 79ccb57415ebfc..b40279302a55b7 100644
--- a/src/content/docs/queues/examples/use-queues-with-durable-objects.mdx
+++ b/src/content/docs/queues/examples/use-queues-with-durable-objects.mdx
@@ -10,17 +10,17 @@ head:
description: Publish to a queue from within a Durable Object.
---
+import { WranglerConfig } from "~/components";
+
The following example shows you how to write a Worker script to publish to [Cloudflare Queues](/queues/) from within a [Durable Object](/durable-objects/).
Prerequisites:
- A [queue created](/queues/get-started/#3-create-a-queue) via the Cloudflare dashboard or the [wrangler CLI](/workers/wrangler/install-and-update/).
-- A [configured **producer** binding](/queues/configuration/configure-queues/#producer) in the Cloudflare dashboard or `wrangler.toml` file.
+- A [configured **producer** binding](/queues/configuration/configure-queues/#producer) in the Cloudflare dashboard or Wrangler file.
- A [Durable Object namespace binding](/workers/wrangler/configuration/#durable-objects).
-Configure your `wrangler.toml` file as follows:
-
-import { WranglerConfig } from "~/components";
+Configure your Wrangler file as follows:
diff --git a/src/content/docs/queues/get-started.mdx b/src/content/docs/queues/get-started.mdx
index d04392af57517c..f1bd0fd93e310e 100644
--- a/src/content/docs/queues/get-started.mdx
+++ b/src/content/docs/queues/get-started.mdx
@@ -8,7 +8,7 @@ head:
content: Get started
---
-import { Render, PackageManagers } from "~/components";
+import { Render, PackageManagers, WranglerConfig } from "~/components";
Cloudflare Queues is a flexible messaging queue that allows you to queue messages for asynchronous processing. By following this guide, you will create your first queue, a Worker to publish messages to that queue, and a consumer Worker to consume messages from that queue.
@@ -40,7 +40,7 @@ To create a producer Worker, run:
}}
/>
-This will create a new directory, which will include both a `src/index.ts` Worker script, and a [`wrangler.toml`](/workers/wrangler/configuration/) configuration file. After you create your Worker, you will create a Queue to access.
+This will create a new directory, which will include both a `src/index.ts` Worker script, and a [`wrangler.json`](/workers/wrangler/configuration/) configuration file. After you create your Worker, you will create a Queue to access.
Move into the newly created directory:
@@ -68,9 +68,7 @@ You cannot change your queue name after you have set it. After you create your q
To expose your queue to the code inside your Worker, you need to connect your queue to your Worker by creating a binding. [Bindings](/workers/runtime-apis/bindings/) allow your Worker to access resources, such as Queues, on the Cloudflare developer platform.
-To create a binding, open your newly generated `wrangler.toml` configuration file and add the following:
-
-import { WranglerConfig } from "~/components";
+To create a binding, open your newly generated `wrangler.toml / wrangler.json` file and add the following:
@@ -108,7 +106,7 @@ export default {
} satisfies ExportedHandler;
```
-Replace `MY_QUEUE` with the name you have set for your binding from your `wrangler.toml`.
+Replace `MY_QUEUE` with the name you have set for your binding from your `wrangler.toml / wrangler.json` file.
Also add the queue to `Env` interface in `index.ts`.
@@ -124,7 +122,7 @@ In a production application, you would likely use a [`try...catch`](https://deve
### Publish your producer Worker
-With your `wrangler.toml` file and `index.ts` file configured, you are ready to publish your producer Worker. To publish your producer Worker, run:
+With your Wrangler file and `index.ts` file configured, you are ready to publish your producer Worker. To publish your producer Worker, run:
```sh
npx wrangler deploy
@@ -174,7 +172,7 @@ export default {
} satisfies ExportedHandler;
```
-Replace `MY_QUEUE` with the name you have set for your binding from your `wrangler.toml`.
+Replace `MY_QUEUE` with the name you have set for your binding from your `wrangler.toml / wrangler.json` file.
Every time messages are published to the queue, your consumer Worker's `queue` handler (`async queue`) is called and it is passed one or more messages.
@@ -188,7 +186,7 @@ After you have configured your consumer Worker, you are ready to connect it to y
Each queue can only have one consumer Worker connected to it. If you try to connect multiple consumers to the same queue, you will encounter an error when attempting to publish that Worker.
-To connect your queue to your consumer Worker, open your `wrangler.toml` file and add this to the bottom:
+To connect your queue to your consumer Worker, open your Wrangler file and add this to the bottom:
@@ -215,7 +213,7 @@ In your consumer Worker, you are using queues to auto batch messages using the `
### Publish your consumer Worker
-With your `wrangler.toml` file and `index.ts` file configured, publish your consumer Worker by running:
+With your Wrangler file and `index.ts` file configured, publish your consumer Worker by running:
```sh
npx wrangler deploy
@@ -241,7 +239,7 @@ With `wrangler tail` running, your consumer Worker will start logging the reques
If you refresh less than 10 times, it may take a few seconds for the messages to appear because batch timeout is configured for 10 seconds. After 10 seconds, messages should arrive in your terminal.
-If you get errors when you refresh, check that the queue name you created in [step 2](/queues/get-started/#2-create-a-queue) and the queue you referenced in your `wrangler.toml` file is the same. You should ensure that your producer Worker is returning `Success` and is not returning an error.
+If you get errors when you refresh, check that the queue name you created in [step 2](/queues/get-started/#2-create-a-queue) and the queue you referenced in your Wrangler file is the same. You should ensure that your producer Worker is returning `Success` and is not returning an error.
By completing this guide, you have now created a queue, a producer Worker that publishes messages to that queue, and a consumer Worker that consumes those messages from it.
diff --git a/src/content/docs/queues/observability/metrics.mdx b/src/content/docs/queues/observability/metrics.mdx
index 1da117cb3f9223..7ac5708c569624 100644
--- a/src/content/docs/queues/observability/metrics.mdx
+++ b/src/content/docs/queues/observability/metrics.mdx
@@ -6,7 +6,7 @@ sidebar:
---
-You can view the metrics for a Queue on your account via the [Cloudflare dashboard](https://dash.cloudflare.com). Navigate to **Workers** > **Queues** > **your Queue** and under the **Metrics** tab you'll be able to view line charts describing the number of messages processed by final outcome, the number of messages in the backlog, and other important indicators.
+You can view the metrics for a Queue on your account via the [Cloudflare dashboard](https://dash.cloudflare.com). Navigate to **Storage & Databases** > **Queues** > **your Queue** and under the **Metrics** tab you'll be able to view line charts describing the number of messages processed by final outcome, the number of messages in the backlog, and other important indicators.
The metrics displayed in the Cloudflare dashboard charts are all pulled from Cloudflare's GraphQL Analytics API. You can access the metrics programmatically.
diff --git a/src/content/docs/queues/reference/how-queues-works.mdx b/src/content/docs/queues/reference/how-queues-works.mdx
index e2b01267e6ee4f..f9ffc010101814 100644
--- a/src/content/docs/queues/reference/how-queues-works.mdx
+++ b/src/content/docs/queues/reference/how-queues-works.mdx
@@ -6,6 +6,8 @@ sidebar:
---
+import { WranglerConfig } from "~/components";
+
Cloudflare Queues is a flexible messaging queue that allows you to queue messages for asynchronous processing. Message queues are great at decoupling components of applications, like the checkout and order fulfillment services for an e-commerce site. Decoupled services are easier to reason about, deploy, and implement, allowing you to ship features that delight your customers without worrying about synchronizing complex deployments. Queues also allow you to batch and buffer calls to downstream services and APIs.
There are four major concepts to understand with Queues:
@@ -135,9 +137,7 @@ export default {
};
```
-You then connect that consumer to a queue with `wrangler queues consumer ` or by defining a `[[queues.consumers]]` configuration in your `wrangler.toml` manually:
-
-import { WranglerConfig } from "~/components";
+You then connect that consumer to a queue with `wrangler queues consumer ` or by defining a `[[queues.consumers]]` configuration in your `wrangler.toml / wrangler.json` file manually:
@@ -190,7 +190,7 @@ export default {
### Remove a consumer
-To remove a queue from your project, run `wrangler queues consumer remove ` and then remove the desired queue below the `[[queues.consumers]]` in `wrangler.toml` file.
+To remove a queue from your project, run `wrangler queues consumer remove ` and then remove the desired queue below the `[[queues.consumers]]` in Wrangler file.
### Pull consumers
diff --git a/src/content/docs/queues/tutorials/handle-rate-limits/index.mdx b/src/content/docs/queues/tutorials/handle-rate-limits/index.mdx
index ab05871bfbe181..897469ac2462f2 100644
--- a/src/content/docs/queues/tutorials/handle-rate-limits/index.mdx
+++ b/src/content/docs/queues/tutorials/handle-rate-limits/index.mdx
@@ -18,7 +18,7 @@ head:
description: Example of how to use Queues to handle rate limits of external APIs.
---
-import { Render, PackageManagers } from "~/components";
+import { Render, PackageManagers, WranglerConfig } from "~/components";
This tutorial explains how to use Queues to handle rate limits of external APIs by building an application that sends email notifications using [Resend](https://www.resend.com/). However, you can use this pattern to handle rate limits of any external API.
@@ -73,11 +73,9 @@ Creating queue rate-limit-queue.
Created queue rate-limit-queue.
```
-### Add Queue bindings to `wrangler.toml`
+### Add Queue bindings to your `wrangler.toml / wrangler.json` file
-In your `wrangler.toml` file, add the following:
-
-import { WranglerConfig } from "~/components";
+In your Wrangler file, add the following:
@@ -97,7 +95,7 @@ max_retries = 3
It is important to include the `max_batch_size` of two to the consumer queue is important because the Resend API has a default rate limit of two requests per second. This batch size allows the queue to process the message in the batch size of two. If the batch size is less than two, the queue will wait for 10 seconds to collect the next message. If no more messages are available, the queue will process the message in the batch. For more information, refer to the [Batching, Retries and Delays documentation](/queues/configuration/batching-retries)
-Your final `wrangler.toml` file should look similar to the example below.
+Your final Wrangler file should look similar to the example below.
diff --git a/src/content/docs/queues/tutorials/web-crawler-with-browser-rendering/index.mdx b/src/content/docs/queues/tutorials/web-crawler-with-browser-rendering/index.mdx
index e364e5a2bbaaee..ba1ba58ec58332 100644
--- a/src/content/docs/queues/tutorials/web-crawler-with-browser-rendering/index.mdx
+++ b/src/content/docs/queues/tutorials/web-crawler-with-browser-rendering/index.mdx
@@ -19,7 +19,7 @@ head:
description: Example of how to use Queues and Browser Rendering to power a web crawler.
---
-import { Render, PackageManagers } from "~/components";
+import { Render, PackageManagers, WranglerConfig } from "~/components";
This tutorial explains how to build and deploy a web crawler with Queues, [Browser Rendering](/browser-rendering/), and [Puppeteer](/browser-rendering/platform/puppeteer/).
@@ -82,11 +82,9 @@ binding = "crawler_screenshots"
id = ""
```
-### Add KV bindings to wrangler.toml
+### Add KV bindings to the `wrangler.toml / wrangler.json` file
-Then, in your `wrangler.toml` file, add the following with the values generated in the terminal:
-
-import { WranglerConfig } from "~/components";
+Then, in your Wrangler file, add the following with the values generated in the terminal:
@@ -137,7 +135,7 @@ Created queue queues-web-crawler.
### Add Queue bindings to wrangler.toml
-Then, in your `wrangler.toml` file, add the following:
+Then, in your Wrangler file, add the following:
@@ -157,7 +155,7 @@ binding = "CRAWLER_QUEUE"
Adding the `max_batch_timeout` of 60 seconds to the consumer queue is important because Browser Rendering has a limit of two new browsers per minute per account. This timeout waits up to a minute before collecting queue messages into a batch. The Worker will then remain under this browser invocation limit.
-Your final `wrangler.toml` file should look similar to the one below.
+Your final Wrangler file should look similar to the one below.
diff --git a/src/content/docs/r2/api/s3/presigned-urls.mdx b/src/content/docs/r2/api/s3/presigned-urls.mdx
index 99fd02874e46a2..d089be4156fa9b 100644
--- a/src/content/docs/r2/api/s3/presigned-urls.mdx
+++ b/src/content/docs/r2/api/s3/presigned-urls.mdx
@@ -68,9 +68,9 @@ A valid alternative design to presigned URLs is to use a Worker with a [binding]
:::note[Bindings]
-A binding is how your Worker interacts with external resources such as [KV Namespaces](/kv/concepts/kv-namespaces/), [Durable Objects](/durable-objects/), or [R2 Buckets](/r2/buckets/). A binding is a runtime variable that the Workers runtime provides to your code. You can declare a variable name in your `wrangler.toml` file that will be bound to these resources at runtime, and interact with them through this variable. Every binding's variable name and behavior is determined by you when deploying the Worker. Refer to [Environment Variables](/workers/configuration/environment-variables/) for more information.
+A binding is how your Worker interacts with external resources such as [KV Namespaces](/kv/concepts/kv-namespaces/), [Durable Objects](/durable-objects/), or [R2 Buckets](/r2/buckets/). A binding is a runtime variable that the Workers runtime provides to your code. You can declare a variable name in your Wrangler file that will be bound to these resources at runtime, and interact with them through this variable. Every binding's variable name and behavior is determined by you when deploying the Worker. Refer to [Environment Variables](/workers/configuration/environment-variables/) for more information.
-A binding is defined in the `wrangler.toml` file of your Worker project's directory.
+A binding is defined in the Wrangler file of your Worker project's directory.
:::
@@ -131,7 +131,7 @@ export default {
} satisfies ExportedHandler;
```
-Notice the total absence of any configuration or token secrets present in the Worker code. Instead, you would create a `wrangler.toml` [binding](/r2/api/workers/workers-api-usage/#4-bind-your-bucket-to-a-worker) to whatever bucket represents the bucket you will upload to. Additionally, authorization is handled in-line with the upload which can reduce latency.
+Notice the total absence of any configuration or token secrets present in the Worker code. Instead, you would create a `wrangler.toml / wrangler.json` file [binding](/r2/api/workers/workers-api-usage/#4-bind-your-bucket-to-a-worker) to whatever bucket represents the bucket you will upload to. Additionally, authorization is handled in-line with the upload which can reduce latency.
In some cases, Workers lets you implement certain functionality more easily. For example, if you wanted to offer a write-once guarantee so that users can only upload to a path once, with pre-signed URLs, you would need to sign specific headers and require the sender to send them. You can modify the previous Worker to sign additional headers:
diff --git a/src/content/docs/r2/api/workers/workers-api-reference.mdx b/src/content/docs/r2/api/workers/workers-api-reference.mdx
index ce0539010e6bd5..ac28aeb841de53 100644
--- a/src/content/docs/r2/api/workers/workers-api-reference.mdx
+++ b/src/content/docs/r2/api/workers/workers-api-reference.mdx
@@ -3,7 +3,7 @@ pcx_content_type: reference
title: Workers API reference
---
-import { Type, MetaInfo } from "~/components";
+import { Type, MetaInfo, WranglerConfig } from "~/components";
The in-Worker R2 API is accessed by binding an R2 bucket to a [Worker](/workers). The Worker you write can expose external access to buckets via a route or manipulate R2 objects internally.
@@ -17,15 +17,13 @@ R2 organizes the data you store, called objects, into containers, called buckets
:::note[Bindings]
-A binding is how your Worker interacts with external resources such as [KV Namespaces](/kv/concepts/kv-namespaces/), [Durable Objects](/durable-objects/), or [R2 Buckets](/r2/buckets/). A binding is a runtime variable that the Workers runtime provides to your code. You can declare a variable name in your `wrangler.toml` file that will be bound to these resources at runtime, and interact with them through this variable. Every binding's variable name and behavior is determined by you when deploying the Worker. Refer to [Environment Variables](/workers/configuration/environment-variables/) for more information.
+A binding is how your Worker interacts with external resources such as [KV Namespaces](/kv/concepts/kv-namespaces/), [Durable Objects](/durable-objects/), or [R2 Buckets](/r2/buckets/). A binding is a runtime variable that the Workers runtime provides to your code. You can declare a variable name in your Wrangler file that will be bound to these resources at runtime, and interact with them through this variable. Every binding's variable name and behavior is determined by you when deploying the Worker. Refer to [Environment Variables](/workers/configuration/environment-variables/) for more information.
-A binding is defined in the `wrangler.toml` file of your Worker project's directory.
+A binding is defined in the Wrangler file of your Worker project's directory.
:::
-To bind your R2 bucket to your Worker, add the following to your `wrangler.toml` file. Update the `binding` property to a valid JavaScript variable identifier and `bucket_name` to the name of your R2 bucket:
-
-import { WranglerConfig } from "~/components";
+To bind your R2 bucket to your Worker, add the following to your Wrangler file. Update the `binding` property to a valid JavaScript variable identifier and `bucket_name` to the name of your R2 bucket:
@@ -361,7 +359,7 @@ Only a single hashing algorithm can be specified at once.
- Note that there is a limit on the total amount of data that a single `list` operation can return. If you request data, you may receive fewer than `limit` results in your response to accommodate metadata.
- - The [compatibility date](/workers/configuration/compatibility-dates/) must be set to `2022-08-04` or later in your `wrangler.toml` file. If not, then the `r2_list_honor_include` compatibility flag must be set. Otherwise it is treated as `include: ['httpMetadata', 'customMetadata']` regardless of what the `include` option provided actually is.
+ - The [compatibility date](/workers/configuration/compatibility-dates/) must be set to `2022-08-04` or later in your Wrangler file. If not, then the `r2_list_honor_include` compatibility flag must be set. Otherwise it is treated as `include: ['httpMetadata', 'customMetadata']` regardless of what the `include` option provided actually is.
This means applications must be careful to avoid comparing the amount of returned objects against your `limit`. Instead, use the `truncated` property to determine if the `list` request has more data to be returned.
diff --git a/src/content/docs/r2/api/workers/workers-api-usage.mdx b/src/content/docs/r2/api/workers/workers-api-usage.mdx
index 7ca5d9c8d1102c..2c40d0b32a88f7 100644
--- a/src/content/docs/r2/api/workers/workers-api-usage.mdx
+++ b/src/content/docs/r2/api/workers/workers-api-usage.mdx
@@ -8,7 +8,7 @@ head:
content: Use R2 from Workers
---
-import { Render, PackageManagers } from "~/components";
+import { Render, PackageManagers, WranglerConfig } from "~/components";
## 1. Create a new application with C3
@@ -56,15 +56,13 @@ You will need to bind your bucket to a Worker.
:::note[Bindings]
-A binding is how your Worker interacts with external resources such as [KV Namespaces](/kv/concepts/kv-namespaces/), [Durable Objects](/durable-objects/), or [R2 Buckets](/r2/buckets/). A binding is a runtime variable that the Workers runtime provides to your code. You can declare a variable name in your `wrangler.toml` file that will be bound to these resources at runtime, and interact with them through this variable. Every binding's variable name and behavior is determined by you when deploying the Worker. Refer to the [Environment Variables](/workers/configuration/environment-variables/) documentation for more information.
+A binding is how your Worker interacts with external resources such as [KV Namespaces](/kv/concepts/kv-namespaces/), [Durable Objects](/durable-objects/), or [R2 Buckets](/r2/buckets/). A binding is a runtime variable that the Workers runtime provides to your code. You can declare a variable name in your Wrangler file that will be bound to these resources at runtime, and interact with them through this variable. Every binding's variable name and behavior is determined by you when deploying the Worker. Refer to the [Environment Variables](/workers/configuration/environment-variables/) documentation for more information.
-A binding is defined in the `wrangler.toml` file of your Worker project's directory.
+A binding is defined in the Wrangler file of your Worker project's directory.
:::
-To bind your R2 bucket to your Worker, add the following to your `wrangler.toml` file. Update the `binding` property to a valid JavaScript variable identifier and `bucket_name` to the `` you used to create your bucket in [step 2](#2-create-your-bucket):
-
-import { WranglerConfig } from "~/components";
+To bind your R2 bucket to your Worker, add the following to your Wrangler file. Update the `binding` property to a valid JavaScript variable identifier and `bucket_name` to the `` you used to create your bucket in [step 2](#2-create-your-bucket):
diff --git a/src/content/docs/r2/buckets/create-buckets.mdx b/src/content/docs/r2/buckets/create-buckets.mdx
index b89386f86a6759..c747815d03c8d5 100644
--- a/src/content/docs/r2/buckets/create-buckets.mdx
+++ b/src/content/docs/r2/buckets/create-buckets.mdx
@@ -25,7 +25,8 @@ wrangler r2 bucket create your-bucket-name
:::note
-Bucket names can only contain lowercase letters (a-z), numbers (0-9), and hyphens (-).
+- Bucket names can only contain lowercase letters (a-z), numbers (0-9), and hyphens (-).
+- Bucket names cannot begin or end with a hyphen.
The placeholder text is only for the example.
diff --git a/src/content/docs/r2/examples/terraform-aws.mdx b/src/content/docs/r2/examples/terraform-aws.mdx
index 30a53f1a9c262d..1d6be7b07b313d 100644
--- a/src/content/docs/r2/examples/terraform-aws.mdx
+++ b/src/content/docs/r2/examples/terraform-aws.mdx
@@ -18,7 +18,12 @@ For using only the Cloudflare provider, see [Terraform](/r2/examples/terraform/)
:::
-With [`terraform`](https://developer.hashicorp.com/terraform/downloads) installed, create `main.tf` and copy the content below replacing with your Account ID and R2 credentials.
+With [`terraform`](https://developer.hashicorp.com/terraform/downloads) installed:
+
+1. Create `main.tf` file, or edit your existing Terraform configuration
+2. Populate the endpoint URL at `endpoints.s3` with your [Cloudflare account ID](/fundamentals/setup/find-account-and-zone-ids/)
+3. Populate `access_key` and `secret_key` with the corresponding [R2 API credentials](/r2/api/s3/tokens/).
+4. Ensure that `skip_region_validation = true`, `skip_requesting_account_id = true`, and `skip_credentials_validation = true` are set in the provider configuration.
```hcl
terraform {
@@ -36,6 +41,8 @@ provider "aws" {
access_key =
secret_key =
+ # Required for R2.
+ # These options disable S3-specific validation on the client (Terraform) side.
skip_credentials_validation = true
skip_region_validation = true
skip_requesting_account_id = true
diff --git a/src/content/docs/r2/index.mdx b/src/content/docs/r2/index.mdx
index fbbd70e7d36c26..bb561e69c93f8f 100644
--- a/src/content/docs/r2/index.mdx
+++ b/src/content/docs/r2/index.mdx
@@ -4,6 +4,7 @@ type: overview
pcx_content_type: overview
sidebar:
order: 1
+description: Cloudflare R2 is a cost-effective, scalable object storage solution for cloud-native apps, web content, and data lakes without egress fees.
head:
- tag: title
content: Cloudflare R2
diff --git a/src/content/docs/r2/reference/data-location.mdx b/src/content/docs/r2/reference/data-location.mdx
index 950f7a9fc4c9a5..4342a004ad4a1f 100644
--- a/src/content/docs/r2/reference/data-location.mdx
+++ b/src/content/docs/r2/reference/data-location.mdx
@@ -5,6 +5,8 @@ sidebar:
order: 7
---
+import { WranglerConfig } from "~/components";
+
Learn how the location of data stored in R2 is determined and about the different available inputs that control the physical location where objects in your buckets are stored.
## Automatic (recommended)
@@ -77,9 +79,7 @@ Use Jurisdictional Restrictions when you need to ensure data is stored and proce
### Using jurisdictions from Workers
-To access R2 buckets that belong to a jurisdiction from [Workers](/workers/), you will need to specify the jurisdiction as well as the bucket name as part of your [bindings](/r2/api/workers/workers-api-usage/#3-bind-your-bucket-to-a-worker) in your `wrangler.toml`:
-
-import { WranglerConfig } from "~/components";
+To access R2 buckets that belong to a jurisdiction from [Workers](/workers/), you will need to specify the jurisdiction as well as the bucket name as part of your [bindings](/r2/api/workers/workers-api-usage/#3-bind-your-bucket-to-a-worker) in your `wrangler.toml / wrangler.json` file:
diff --git a/src/content/docs/r2/reference/partners/index.mdx b/src/content/docs/r2/reference/partners/index.mdx
new file mode 100644
index 00000000000000..5b80dfa05c1246
--- /dev/null
+++ b/src/content/docs/r2/reference/partners/index.mdx
@@ -0,0 +1,5 @@
+---
+title: Partners
+sidebar:
+ hidden: true
+---
diff --git a/src/content/docs/r2/reference/partners/snowflake-regions.mdx b/src/content/docs/r2/reference/partners/snowflake-regions.mdx
new file mode 100644
index 00000000000000..ba76a5d1d51a5d
--- /dev/null
+++ b/src/content/docs/r2/reference/partners/snowflake-regions.mdx
@@ -0,0 +1,81 @@
+---
+title: Snowflake
+sidebar:
+ hidden: true
+---
+
+import { Render } from "~/components";
+
+This page details which R2 location or jurisdiction is recommended based on your Snowflake region.
+
+You have the following inputs to control the physical location where objects in your R2 buckets are stored (for more information refer to [data location](/r2/reference/data-location/)):
+
+- [**Location hints**](/r2/reference/data-location/#location-hints): Specify a geophrical area (for example, Asia-Pacific or Western Europe). R2 makes a best effort to place your bucket in or near that location to optimize performance. You can confirm bucket placement after creation by navigating to the **Settings** tab of your bucket and referring to the **Bucket details** section.
+- [**Jurisdictions**](/r2/reference/data-location/#jurisdictional-restrictions): Enforce that data is both stored and processed within a specific jurisdiction (for example, the EU or FedRAMP environment). Use jurisdictions when you need to ensure data is stored and processed within a jurisdiction to meet data residency requirements, including local regulations such as the [GDPR](https://gdpr-info.eu/) or [FedRAMP](https://blog.cloudflare.com/cloudflare-achieves-fedramp-authorization/).
+
+## North and South America (Commercial)
+
+| Snowflake region name | Cloud | Region ID | Recommended R2 location |
+| ------------------------- | ----- | ---------------- | ----------------------- |
+| Canada (Central) | AWS | `ca-central-1` | Location hint: `enam` |
+| South America (Sao Paulo) | AWS | `sa-east-1` | Location hint: `enam` |
+| US West (Oregon) | AWS | `us-west-2` | Location hint: `wnam` |
+| US East (Ohio) | AWS | `us-east-2` | Location hint: `enam` |
+| US East (N. Virginia) | AWS | `us-east-1` | Location hint: `enam` |
+| US Central1 (Iowa) | GCP | `us-central1` | Location hint: `enam` |
+| US East4 (N. Virginia) | GCP | `us-east4` | Location hint: `enam` |
+| Canada Central (Toronto) | Azure | `canadacentral` | Location hint: `enam` |
+| Central US (Iowa) | Azure | `centralus` | Location hint: `enam` |
+| East US 2 (Virginia) | Azure | `eastus2` | Location hint: `enam` |
+| South Central US (Texas) | Azure | `southcentralus` | Location hint: `enam` |
+| West US 2 (Washington) | Azure | `westus2` | Location hint: `wnam` |
+
+## U.S. Government
+
+| Snowflake region name | Cloud | Region ID | Recommended R2 location |
+| --------------------- | ----- | --------------- | ----------------------- |
+| US Gov East 1 | AWS | `us-gov-east-1` | Jurisdiction: `fedramp` |
+| US Gov West 1 | AWS | `us-gov-west-1` | Jurisdiction: `fedramp` |
+| US Gov Virginia | Azure | `usgovvirginia` | Jurisdiction: `fedramp` |
+
+:::note
+
+Cloudflare Enterprise customers may contact their account team or [Cloudflare Support](/support/contacting-cloudflare-support/) to get access to the FedRAMP jurisdiction.
+:::
+
+## Europe and Middle East
+
+| Snowflake region name | Cloud | Region ID | Recommended R2 location |
+| ----------------------------- | ----- | ------------------ | ----------------------------------------- |
+| EU (Frankfurt) | AWS | `eu-central-1` | Jurisdiction: `eu` or hint: `weur`/`eeur` |
+| EU (Zurich) | AWS | `eu-central-2` | Jurisdiction: `eu` or hint: `weur`/`eeur` |
+| EU (Stockholm) | AWS | `eu-north-1` | Jurisdiction: `eu` or hint: `weur`/`eeur` |
+| EU (Ireland) | AWS | `eu-west-1` | Jurisdiction: `eu` or hint: `weur`/`eeur` |
+| Europe (London) | AWS | `eu-west-2` | Jurisdiction: `eu` or hint: `weur`/`eeur` |
+| EU (Paris) | AWS | `eu-west-3` | Jurisdiction: `eu` or hint: `weur`/`eeur` |
+| Middle East Central2 (Dammam) | GCP | `me-central2` | Location hint: `weur`/`eeur` |
+| Europe West2 (London) | GCP | `europe-west-2` | Jurisdiction: `eu` or hint: `weur`/`eeur` |
+| Europe West3 (Frankfurt) | GCP | `europe-west-3` | Jurisdiction: `eu` or hint: `weur`/`eeur` |
+| Europe West4 (Netherlands) | GCP | `europe-west-4` | Jurisdiction: `eu` or hint: `weur`/`eeur` |
+| North Europe (Ireland) | Azure | `northeurope` | Jurisdiction: `eu` or hint: `weur`/`eeur` |
+| Switzerland North (Zurich) | Azure | `switzerlandnorth` | Jurisdiction: `eu` or hint: `weur`/`eeur` |
+| West Europe (Netherlands) | Azure | `westeurope` | Jurisdiction: `eu` or hint: `weur`/`eeur` |
+| UAE North (Dubai) | Azure | `uaenorth` | Location hint: `weur`/`eeur` |
+| UK South (London) | Azure | `uksouth` | Jurisdiction: `eu` or hint: `weur`/`eeur` |
+
+## Asia Pacific and China
+
+| Snowflake region name | Cloud | Region ID | Recommended R2 location |
+| -------------------------------- | ----- | ---------------- | ----------------------- |
+| Asia Pacific (Tokyo) | AWS | `ap-northeast-1` | Location hint: `apac` |
+| Asia Pacific (Seoul) | AWS | `ap-northeast-2` | Location hint: `apac` |
+| Asia Pacific (Osaka) | AWS | `ap-northeast-3` | Location hint: `apac` |
+| Asia Pacific (Mumbai) | AWS | `ap-south-1` | Location hint: `apac` |
+| Asia Pacific (Singapore) | AWS | `ap-southeast-1` | Location hint: `apac` |
+| Asia Pacific (Sydney) | AWS | `ap-southeast-2` | Location hint: `oc` |
+| Asia Pacific (Jakarta) | AWS | `ap-southeast-3` | Location hint: `apac` |
+| China (Ningxia) | AWS | `cn-northwest-1` | Location hint: `apac` |
+| Australia East (New South Wales) | Azure | `australiaeast` | Location hint: `oc` |
+| Central India (Pune) | Azure | `centralindia` | Location hint: `apac` |
+| Japan East (Tokyo) | Azure | `japaneast` | Location hint: `apac` |
+| Southeast Asia (Singapore) | Azure | `southeastasia` | Location hint: `apac` |
diff --git a/src/content/docs/r2/tutorials/summarize-pdf.mdx b/src/content/docs/r2/tutorials/summarize-pdf.mdx
index 61dfdd9edc7be7..79aed17692a561 100644
--- a/src/content/docs/r2/tutorials/summarize-pdf.mdx
+++ b/src/content/docs/r2/tutorials/summarize-pdf.mdx
@@ -12,7 +12,7 @@ languages:
- TypeScript
---
-import { Render, PackageManagers, Details } from "~/components";
+import { Render, PackageManagers, Details, WranglerConfig } from "~/components";
In this tutorial, you will learn how to use [event notifications](/r2/buckets/event-notifications/) to process a PDF file when it is uploaded to an R2 bucket. You will use [Workers AI](/workers-ai/) to summarize the PDF and store the summary as a text file in the same bucket.
@@ -61,9 +61,7 @@ cd pdf-summarizer
## 2. Create the front-end
-Using Static Assets, you can serve the front-end of your application from your Worker. To use Static Assets, you need to add the required bindings to your `wrangler.toml` file.
-
-import { WranglerConfig } from "~/components";
+Using Static Assets, you can serve the front-end of your application from your Worker. To use Static Assets, you need to add the required bindings to your Wrangler file.
@@ -237,7 +235,7 @@ When you open the URL in your browser, you will see that there is a file upload
## 3. Handle file upload
-To handle the file upload, you will first need to add the R2 binding. In the `wrangler.toml` file, add the following code:
+To handle the file upload, you will first need to add the R2 binding. In the Wrangler file, add the following code:
@@ -305,7 +303,7 @@ Event notifications capture changes to data in your R2 bucket. You will need to
npx wrangler queues create pdf-summarizer
```
-Add the binding to the `wrangler.toml` file:
+Add the binding to the Wrangler file:
@@ -387,7 +385,7 @@ The above code does the following:
## 7. Use Workers AI to summarize the content
-To use Workers AI, you will need to add the Workers AI binding to the `wrangler.toml` file. The `wrangler.toml` file should contain the following code:
+To use Workers AI, you will need to add the Workers AI binding to the Wrangler file. The Wrangler file should contain the following code:
diff --git a/src/content/docs/r2/tutorials/upload-logs-event-notifications.mdx b/src/content/docs/r2/tutorials/upload-logs-event-notifications.mdx
index b537755019a9c0..85d6763ecd902a 100644
--- a/src/content/docs/r2/tutorials/upload-logs-event-notifications.mdx
+++ b/src/content/docs/r2/tutorials/upload-logs-event-notifications.mdx
@@ -11,7 +11,7 @@ languages:
- TypeScript
---
-import { Render, PackageManagers } from "~/components";
+import { Render, PackageManagers, WranglerConfig } from "~/components";
This example provides a step-by-step guide on using [event notifications](/r2/buckets/event-notifications/) to capture and store R2 upload logs in a separate bucket.
@@ -85,9 +85,7 @@ cd consumer-worker
## 5. Configure your Worker
-In your Worker project's [`wrangler.toml`](/workers/wrangler/configuration/) file, add a [queue consumer](/workers/wrangler/configuration/#queues) and [R2 bucket binding](/workers/wrangler/configuration/#r2-buckets). The queues consumer bindings will register your Worker as a consumer of your future event notifications and the R2 bucket bindings will allow your Worker to access your R2 bucket.
-
-import { WranglerConfig } from "~/components";
+In your Worker project's [`wrangler.toml / wrangler.json` file](/workers/wrangler/configuration/), add a [queue consumer](/workers/wrangler/configuration/#queues) and [R2 bucket binding](/workers/wrangler/configuration/#r2-buckets). The queues consumer bindings will register your Worker as a consumer of your future event notifications and the R2 bucket bindings will allow your Worker to access your R2 bucket.
diff --git a/src/content/docs/radar/investigate/bgp-anomalies.mdx b/src/content/docs/radar/investigate/bgp-anomalies.mdx
index c5d1f629fbd641..3f537a2ab7eb95 100644
--- a/src/content/docs/radar/investigate/bgp-anomalies.mdx
+++ b/src/content/docs/radar/investigate/bgp-anomalies.mdx
@@ -7,7 +7,7 @@ sidebar:
text: Beta
---
-import { Render, PackageManagers } from "~/components";
+import { Render, PackageManagers, WranglerConfig } from "~/components";
To access Cloudflare Radar BGP Anomaly Detection results, you will first need to create an API token that includes a `Account:Radar` permission. All the following examples should work with a free-tier Cloudflare account.
@@ -198,11 +198,9 @@ To start developing your Worker, `cd` into your new project directory:
cd hijack-alerts
```
-In your `wrangler.toml` file, change the default checking frequency (once per hour) to what you like. Here is an example
+In your Wrangler file, change the default checking frequency (once per hour) to what you like. Here is an example
of configuring the workers to run the script five minutes.
-import { WranglerConfig } from "~/components";
-
```toml
@@ -372,7 +370,7 @@ The last step is to deploy the application with command `npx wrangler deploy` an
If you have [Email Routing][email-routing] enabled for your domain, you can also send email alerts directly from Workers. Refer to [Send emails from Workers][email-workers-tutorial] to learn more.
-For this alert to work, you will need to configure the proper email bindings in the [`wrangler.toml`][wrangler-send-email] file.
+For this alert to work, you will need to configure the proper email bindings in the [`wrangler.toml / wrangler.json` file][wrangler-send-email].
diff --git a/src/content/docs/reference-architecture/diagrams/ai/ai-asset-creation.mdx b/src/content/docs/reference-architecture/diagrams/ai/ai-asset-creation.mdx
index 1223203503a6fc..198999947776b9 100644
--- a/src/content/docs/reference-architecture/diagrams/ai/ai-asset-creation.mdx
+++ b/src/content/docs/reference-architecture/diagrams/ai/ai-asset-creation.mdx
@@ -34,13 +34,13 @@ Example uses of such compositions of AI models can be employed to generation vis

1. **Client upload**: Send POST request with content to API endpoint.
-2. **Prompt generation**: Generate prompt for later-stage text-to-image model by calling [Workers AI](/workers-ai/) [text generation models](/workers-ai/models/#text-generation) with content as input.
+2. **Prompt generation**: Generate prompt for later-stage text-to-image model by calling [Workers AI](/workers-ai/) [text generation models](/workers-ai/models/) with content as input.
3. **Safety check**: Check for compliance with safety guidelines by calling [Workers AI](/workers-ai/) [text classification models](/workers-ai/models/#text-classification) with the previously generated prompt as input.
4. **Image generation**: Generate image by calling [Workers AI](/workers-ai/) [text-to-image models](/workers-ai/models/#text-to-image) previously generated prompt.
## Related resources
- [Community project: content-based asset creation demo](https://auto-asset.pages.dev/)
-- [Workers AI: Text generation models](/workers-ai/models/#text-generation)
+- [Workers AI: Text generation models](/workers-ai/models/)
- [Workers AI: Text-to-image models](/workers-ai/models/#text-to-image)
- [Workers AI: llamaguard-7b-awq](/workers-ai/models/llamaguard-7b-awq/)
diff --git a/src/content/docs/rules/reference/page-rules-migration.mdx b/src/content/docs/rules/reference/page-rules-migration.mdx
index f67d13639cef85..2c4254dde1b964 100644
--- a/src/content/docs/rules/reference/page-rules-migration.mdx
+++ b/src/content/docs/rules/reference/page-rules-migration.mdx
@@ -145,7 +145,7 @@ You configured a Page Rule to perform an automatic redirect from HTTP to HTTPS f
3. If your tests succeed, delete the existing Page Rule.
-
+
| Page Rules configuration | Migrate to a single redirect |
| ---------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
@@ -188,7 +188,7 @@ You configured a Page Rule turning on Automatic HTTPS Rewrites for all subdomain
3. If your tests succeed, delete the existing Page Rule.
-
+
| Page Rules configuration | Migrate to a configuration rule |
| -------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
@@ -234,7 +234,7 @@ You configured a Page Rule adjusting browser cache TTL to one day for all subdom
3. If your tests succeed, delete the existing Page Rule.
-
+
| Page Rules configuration | Migrate to a cache rule |
| ------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
@@ -277,7 +277,7 @@ You configured a Page Rule turning on Browser Integrity Check for all subdomains
3. If your tests succeed, delete the existing Page Rule.
-
+
| Page Rules configuration | Migrate to a configuration rule |
| ------------------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
@@ -321,7 +321,7 @@ You configured a Page Rule turning on Bypass Cache on Cookie for all subdomains
3. If your tests succeed, delete the existing Page Rule.
-
+
| Page Rules configuration | Migrate to a cache rule |
| ---------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
@@ -367,7 +367,7 @@ You configured a Page Rule turning on Cache By Device Type for all subdomains of
3. If your tests succeed, delete the existing Page Rule.
-
+
| Page Rules configuration | Migrate to a cache rule |
| ------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
@@ -412,7 +412,7 @@ You configured a Page Rule turning on Cache Deception Armor for all subdomains o
3. If your tests succeed, delete the existing Page Rule.
-
+
| Page Rules configuration | Migrate to a cache rule |
| -------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
@@ -456,7 +456,7 @@ You configured a Page Rule turning on caching of all assets for all subdomains o
3. If your tests succeed, delete the existing Page Rule.
-
+
| Page Rules configuration | Migrate to a cache rule |
| ----------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
@@ -500,7 +500,7 @@ You configured a Page Rule turning on caching for responses that contained cooki
3. If your tests succeed, delete the existing Page Rule.
-
+
| Page Rules configuration | Migrate to a cache rule |
| -------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------- |
@@ -552,7 +552,7 @@ You configured a Page Rule turning on caching of every response with status code
3. If your tests succeed, delete the existing Page Rule.
-
+
| Page Rules configuration | Migrate to a cache rule |
| ------------------------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
@@ -598,7 +598,7 @@ You configured a Page Rule setting a custom cache key for all query string param
3. If your tests succeed, delete the existing Page Rule.
-
+
| Page Rules configuration | Migrate to a cache rule |
| -------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------- |
@@ -639,7 +639,7 @@ You configured a Page Rule turning off Cloudflare Apps (deprecated) for all subd
3. If your tests succeed, delete the existing Page Rule.
-
+
| Page Rules configuration | Migrate to a configuration rule |
| -------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------- |
@@ -688,7 +688,7 @@ You configured a Page Rule with **Disable Performance** (deprecated) for all sub
3. If your tests succeed, delete the existing Page Rule.
-
+
| Page Rules configuration | Migrate to a configuration rule |
| ---------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
@@ -767,7 +767,7 @@ You configured a Page Rule turning off [Zaraz](/zaraz/) for all subdomains of `e
3. If your tests succeed, delete the existing Page Rule.
-
+
| Page Rules configuration | Migrate to a configuration rule |
| ---------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
@@ -814,7 +814,7 @@ You configured a Page Rule adjusting Edge Cache TTL for all subdomains of `examp
3. If your tests succeed, delete the existing Page Rule.
-
+
| Page Rules configuration | Migrate to a cache rule |
| ---------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------ |
@@ -857,7 +857,7 @@ You configured a Page Rule turning off [Email Obfuscation](/waf/tools/scrape-shi
3. If your tests succeed, delete the existing Page Rule.
-
+
| Page Rules configuration | Migrate to a configuration rule |
| ------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
@@ -910,7 +910,7 @@ You configured a Page Rule permanently redirecting `www.example.com` to `example
}}
/>
-
+
| Page Rules configuration | Migrate to a single redirect |
| --------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
@@ -961,7 +961,7 @@ You configured a Page Rule permanently redirecting `example.com/old-path` to `ex
}}
/>
-
+
| Page Rules configuration | Migrate to a single redirect |
| ----------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
@@ -1004,7 +1004,7 @@ You configured a Page Rule changing the `Host` HTTP header to `example.saas-prov
3. If your tests succeed, delete the existing Page Rule.
-
+
| Page Rules configuration | Migrate to an origin rule |
| ------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
@@ -1030,7 +1030,7 @@ You configured a Page Rule adding a `CF-IPCountry` HTTP header, for all requests
2. Turn off your existing Page Rule and validate the behavior of the Managed Transform.
3. If your tests succeed, delete the existing Page Rule.
-
+
| Page Rules configuration | Migrate to a Managed Transform |
| -------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
@@ -1073,7 +1073,7 @@ You configured a Page Rule turning off Mirage for all subdomains of `example.com
3. If your tests succeed, delete the existing Page Rule.
-
+
| Page Rules configuration | Migrate to a configuration rule |
| -------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- |
@@ -1116,7 +1116,7 @@ You configured a Page Rule turning off Opportunistic Encryption for all subdomai
3. If your tests succeed, delete the existing Page Rule.
-
+
| Page Rules configuration | Migrate to a configuration rule |
| -------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
@@ -1162,7 +1162,7 @@ You configured a Page Rule turning off Origin Cache Control for all subdomains o
3. If your tests succeed, delete the existing Page Rule.
-
+
| Page Rules configuration | Migrate to a cache rule |
| ------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
@@ -1208,7 +1208,7 @@ You configured a Page Rule turning on Origin Error Page Pass-thru for all subdom
3. If your tests succeed, delete the existing Page Rule.
-
+
| Page Rules configuration | Migrate to a cache rule |
| -------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
@@ -1251,7 +1251,7 @@ You configured a Page Rule turning off [Polish](/images/polish/) for all subdoma
3. If your tests succeed, delete the existing Page Rule.
-
+
| Page Rules configuration | Migrate to a configuration rule |
| -------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- |
@@ -1297,7 +1297,7 @@ You configured a Page Rule turning on Query String Sort for all subdomains of `e
3. If your tests succeed, delete the existing Page Rule.
-
+
| Page Rules configuration | Migrate to a cache rule |
| ------------------------------------------------------------------------------------------------------------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
@@ -1339,7 +1339,7 @@ You configured a Page Rule changing the origin to `example.saas-provider.com`, f
3. If your tests succeed, delete the existing Page Rule.
-
+
| Page Rules configuration | Migrate to an origin rule |
| ---------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- |
@@ -1385,7 +1385,7 @@ You configured a Page Rule turning on byte-for-byte equivalency checks for all s
3. If your tests succeed, delete the existing Page Rule.
-
+
| Page Rules configuration | Migrate to a cache rule |
| ------------------------------------------------------------------------------------------------------------------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
@@ -1428,7 +1428,7 @@ You configured a Page Rule turning off Rocket Loader for all subdomains of `exam
3. If your tests succeed, delete the existing Page Rule.
-
+
| Page Rules configuration | Migrate to a configuration rule |
| ---------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
@@ -1471,7 +1471,7 @@ You configured a Page Rule setting Security Level to _I'm Under Attack_ for all
3. If your tests succeed, delete the existing Page Rule.
-
+
| Page Rules configuration | Migrate to a configuration rule |
| ------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
@@ -1497,7 +1497,7 @@ You configured a Page Rule adding a `True-Client-IP` HTTP header for all request
2. Turn off your existing Page Rule and validate the behavior of the Managed Transform.
3. If your tests succeed, delete the existing Page Rule.
-
+
| Page Rules configuration | Migrate to a Managed Transform |
| -------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
@@ -1540,7 +1540,7 @@ You configured a Page Rule setting SSL to _Strict_ for all subdomains of `exampl
3. If your tests succeed, delete the existing Page Rule.
-
+
| Page Rules configuration | Migrate to a configuration rule |
| -------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------- |
diff --git a/src/content/docs/rules/snippets/create-api.mdx b/src/content/docs/rules/snippets/create-api.mdx
index ea776b1f1d2134..e0a823242d0140 100644
--- a/src/content/docs/rules/snippets/create-api.mdx
+++ b/src/content/docs/rules/snippets/create-api.mdx
@@ -58,6 +58,8 @@ curl --request PUT https://api.cloudflare.com/client/v4/zones/{zone_id}/snippets
--form metadata='{"main_module":"example.js"}'
```
+The name of a snippet can only contain the characters `a-z`, `0-9`, and `_` (underscore). The name must be unique in the context of the zone. You cannot change the snippet name after creating the snippet.
+
The required body parameters are:
- `files`: The file with your JavaScript code.
@@ -78,6 +80,8 @@ To make this example work, save your JavaScript code in a file named `example.js
}
```
+To deploy a new snippet you must [create a snippet rule](#createupdatedelete-snippet-rules). The expression of the snippet rule defines when the snippet code will run.
+
### Create/update/delete snippet rules
:::caution
diff --git a/src/content/docs/rules/snippets/create-dashboard.mdx b/src/content/docs/rules/snippets/create-dashboard.mdx
index 759f9c5c33736d..3e2d9d007dbe18 100644
--- a/src/content/docs/rules/snippets/create-dashboard.mdx
+++ b/src/content/docs/rules/snippets/create-dashboard.mdx
@@ -9,30 +9,20 @@ head:
content: Create a snippet in the dashboard
---
-The snippet creation wizard will guide you through the following steps:
-
-1. Define snippet code and test your snippet.
-2. Create snippet rule. The rule expression defines for which requests the snippet code will run.
-3. Review your snippet configuration and deploy the snippet.
-
-## Procedure
-
1. Log in to the [Cloudflare dashboard](https://dash.cloudflare.com/), and select your account and website.
2. Go to **Rules** > **Snippets**, and select **Create a Snippet**.
-3. Enter a descriptive name for the snippet.
-
-4. Enter the snippet's JavaScript code. You can test how your snippet will handle incoming requests using the **HTTP** and **Preview** tabs.
+3. In **Snippet name**, enter a descriptive name for the snippet. You cannot change the name after creating the snippet.
-5. Select **Continue to add Snippet rule**.
+4. Enter the snippet's JavaScript code in the code editor. You can test how your snippet will handle incoming requests using the **HTTP** and **Preview** tabs.
-6. Enter a descriptive name for the snippet rule. The snippet rule specifies for which requests the snippet code will run.
+5. Select **Snippet rule** to configure when the snippet will run.
-7. Under **When incoming requests match**, select if you wish to run the snippet for all incoming requests or only for requests that match a custom filter expression.
+6. Under **Run this Snippet if incoming requests match**, select if you wish to run the snippet only for requests that match a custom filter expression or for all incoming requests.
-8. (Optional) To define a custom expression, use the Expression Builder (specifying one or more values for **Field**, **Operator**, and **Value**) or manually enter an expression using the Expression Editor. For more information, refer to [Edit expressions in the dashboard](/ruleset-engine/rules-language/expressions/edit-expressions/).
+7. (Optional) To define a custom expression, use the Expression Builder (specifying one or more values for **Field**, **Operator**, and **Value**) or manually enter an expression using the Expression Editor. For more information, refer to [Edit expressions in the dashboard](/ruleset-engine/rules-language/expressions/edit-expressions/).
-9. Select **Continue to create Snippet**.
+8. Select **Done**.
-10. Review the snippet code and the snippet rule that defines when the snippet will run. To save your snippet and deploy a rule that enables it, select **Save and deploy Snippet**. If you are not ready to deploy your snippet, select **Save and enable later**.
+9. To deploy your snippet, select **Deploy**. If you are not ready to deploy your snippet, open the dropdown next to **Deploy** and select **Save as Draft**.
diff --git a/src/content/docs/rules/snippets/create-terraform.mdx b/src/content/docs/rules/snippets/create-terraform.mdx
index 5a6a46a680151b..a911725349c34c 100644
--- a/src/content/docs/rules/snippets/create-terraform.mdx
+++ b/src/content/docs/rules/snippets/create-terraform.mdx
@@ -40,7 +40,7 @@ resource "cloudflare_snippet_rules" "cookie_snippet_rule" {
}
```
-The name of a snippet can only contain the characters `a-z`, `0-9`, and `_` (underscore). The name must be unique in the context of the zone.
+The name of a snippet can only contain the characters `a-z`, `0-9`, and `_` (underscore). The name must be unique in the context of the zone. You cannot change the snippet name after creating the snippet.
All `snippet_name` values in the `cloudflare_snippet_rules` resource must match the names of existing snippets.
diff --git a/src/content/docs/rules/snippets/index.mdx b/src/content/docs/rules/snippets/index.mdx
index f78465a0d71b4b..6ac0f2b4bf8afe 100644
--- a/src/content/docs/rules/snippets/index.mdx
+++ b/src/content/docs/rules/snippets/index.mdx
@@ -26,7 +26,7 @@ To create and deploy a Snippet, you need to define the following elements:
- **Code snippet**: JavaScript code to be executed during the request-handling process.
- **Snippet rule**: A [filter expression](/ruleset-engine/rules-language/expressions/) that determines which requests the Snippet will be applied to. Each Snippet can only be associated with one Snippet Rule.
-For more information, refer to the [How it works](/rules/snippets/how-it-works/) and [Create in the dashboard](/rules/snippets/create-dashboard/) sections.
+For more information, refer to the [How it works](/rules/snippets/how-it-works/) and [Create a snippet in the dashboard](/rules/snippets/create-dashboard/) sections.
## Templates
diff --git a/src/content/docs/ruleset-engine/reference/phases-list.mdx b/src/content/docs/ruleset-engine/reference/phases-list.mdx
index 000c5af17b2ec8..8b11f8f9aa3d0d 100644
--- a/src/content/docs/ruleset-engine/reference/phases-list.mdx
+++ b/src/content/docs/ruleset-engine/reference/phases-list.mdx
@@ -3,18 +3,15 @@ title: Phases list
pcx_content_type: reference
sidebar:
order: 1
-
---
-import { Render } from "~/components"
+import { Render } from "~/components";
The following tables list the [phases](/ruleset-engine/about/phases/) of Cloudflare products powered by the Ruleset Engine, in the order those phases are executed. Some products such as the Cloudflare Web Application Firewall have more than one associated phase.
## Network layer
-Network-layer phases apply to packets received on the Cloudflare global network.
-
-
+[Network-layer](https://www.cloudflare.com/learning/ddos/glossary/open-systems-interconnection-model-osi/) phases apply to packets received on the Cloudflare global network.
| Phase name | Used in product/feature |
| ---------------- | ------------------------------------------------------------------------------------------------ |
@@ -23,43 +20,38 @@ Network-layer phases apply to packets received on the Cloudflare global network.
| `mt_managed` | [Magic Firewall managed rulesets](/magic-firewall/how-to/enable-managed-rulesets/) |
| `mt_ids_managed` | [Magic Firewall Intrusion Detection System (IDS)](/magic-firewall/about/ids/) |
-
-
## Application layer
-Application-layer phases apply to requests received on the Cloudflare global network.
+[Application-layer](https://www.cloudflare.com/learning/ddos/what-is-layer-7/) phases apply to requests received on the Cloudflare global network.
### Request phases
The phases execute in the order they appear in the table.
-
-
-| Phase name | Used in product/feature |
-| ----------------------------------- | ------------------------------------------------------------------------------------------------------ |
-| `http_request_sanitize` | [URL normalization](/rules/normalization/) |
-| `http_request_dynamic_redirect` | [Single Redirects](/rules/url-forwarding/single-redirects/) |
-| `http_request_transform` | [Rewrite URL Rules](/rules/transform/url-rewrite/) |
-| *N/A* (internal phase) | [Waiting Room Rules](/waiting-room/additional-options/waiting-room-rules/) |
-| `http_config_settings` | [Configuration Rules](/rules/configuration-rules/) |
-| `http_request_origin` | [Origin Rules](/rules/origin-rules/) |
-| `ddos_l7`\* | [HTTP DDoS Attack Protection](/ddos-protection/managed-rulesets/http/) |
-| `http_request_api_gateway` | [API Gateway](/api-shield/api-gateway/) |
-| `http_request_firewall_custom` | [Custom rules (Web Application Firewall)](/waf/custom-rules/) |
-| `http_ratelimit` | [Rate limiting rules (WAF)](/waf/rate-limiting-rules/) |
-| *N/A* (internal phase) | [API Shield](/api-shield/) |
-| `http_request_firewall_managed` | [WAF Managed Rules](/waf/managed-rules/) |
-| `http_request_sbfm` | [Super Bot Fight Mode](/bots/get-started/pro/) |
-| *N/A* (internal phase) | [Cloudflare Access](/cloudflare-one/policies/access/) |
-| `http_request_redirect` | [Bulk Redirects](/rules/url-forwarding/bulk-redirects/) |
-| *N/A* (internal phase) | [Managed Transforms](/rules/transform/managed-transforms/) |
-| `http_request_late_transform` | [HTTP Request Header Modification Rules](/rules/transform/request-header-modification/) |
-| `http_request_cache_settings` | [Cache Rules](/cache/how-to/cache-rules/) |
-| `http_request_snippets` | [Snippets](/rules/snippets/) |
-| `http_request_cloud_connector` | [Cloud Connector](/rules/cloud-connector/) |
-
-
-\* *This phase is for configuration purposes only — the corresponding rules will not be executed at this stage in the request handling process.*
+| Phase name | Used in product/feature |
+| ------------------------------- | --------------------------------------------------------------------------------------- |
+| `http_request_sanitize` | [URL normalization](/rules/normalization/) |
+| `http_request_dynamic_redirect` | [Single Redirects](/rules/url-forwarding/single-redirects/) |
+| `http_request_transform` | [Rewrite URL Rules](/rules/transform/url-rewrite/) |
+| _N/A_ (internal phase) | [Waiting Room Rules](/waiting-room/additional-options/waiting-room-rules/) |
+| `http_config_settings` | [Configuration Rules](/rules/configuration-rules/) |
+| `http_request_origin` | [Origin Rules](/rules/origin-rules/) |
+| `ddos_l7`\* | [HTTP DDoS Attack Protection](/ddos-protection/managed-rulesets/http/) |
+| `http_request_api_gateway` | [API Gateway](/api-shield/api-gateway/) |
+| `http_request_firewall_custom` | [Custom rules (Web Application Firewall)](/waf/custom-rules/) |
+| `http_ratelimit` | [Rate limiting rules (WAF)](/waf/rate-limiting-rules/) |
+| _N/A_ (internal phase) | [API Shield](/api-shield/) |
+| `http_request_firewall_managed` | [WAF Managed Rules](/waf/managed-rules/) |
+| `http_request_sbfm` | [Super Bot Fight Mode](/bots/get-started/pro/) |
+| _N/A_ (internal phase) | [Cloudflare Access](/cloudflare-one/policies/access/) |
+| `http_request_redirect` | [Bulk Redirects](/rules/url-forwarding/bulk-redirects/) |
+| _N/A_ (internal phase) | [Managed Transforms](/rules/transform/managed-transforms/) |
+| `http_request_late_transform` | [HTTP Request Header Modification Rules](/rules/transform/request-header-modification/) |
+| `http_request_cache_settings` | [Cache Rules](/cache/how-to/cache-rules/) |
+| `http_request_snippets` | [Snippets](/rules/snippets/) |
+| `http_request_cloud_connector` | [Cloud Connector](/rules/cloud-connector/) |
+
+\* _This phase is for configuration purposes only — the corresponding rules will not be executed at this stage in the request handling process._
@@ -67,14 +59,12 @@ The phases execute in the order they appear in the table.
The phases execute in the order they appear in the table.
-
-
-| Phase name | Used in product/feature |
-| --------------------------------- | ---------------------------------------------------------------------------------------------------- |
-| `http_custom_errors` | [Custom Error Responses](/rules/custom-error-responses/) |
-| *N/A* (internal phase) | [Managed Transforms](/rules/transform/managed-transforms/) |
-| `http_response_headers_transform` | [HTTP Response Header Modification Rules](/rules/transform/response-header-modification/) |
-| `http_ratelimit` | [Rate limiting rules](/waf/rate-limiting-rules/) (when they use response information) |
-| `http_response_compression` | [Compression Rules](/rules/compression-rules/) |
-| `http_response_firewall_managed` | [Cloudflare Sensitive Data Detection](/waf/managed-rules/) (Data Loss Prevention) |
-| `http_log_custom_fields` | [Logpush custom fields](/logs/reference/custom-fields/) |
\ No newline at end of file
+| Phase name | Used in product/feature |
+| --------------------------------- | ----------------------------------------------------------------------------------------- |
+| `http_custom_errors` | [Custom Error Responses](/rules/custom-error-responses/) |
+| _N/A_ (internal phase) | [Managed Transforms](/rules/transform/managed-transforms/) |
+| `http_response_headers_transform` | [HTTP Response Header Modification Rules](/rules/transform/response-header-modification/) |
+| `http_ratelimit` | [Rate limiting rules](/waf/rate-limiting-rules/) (when they use response information) |
+| `http_response_compression` | [Compression Rules](/rules/compression-rules/) |
+| `http_response_firewall_managed` | [Cloudflare Sensitive Data Detection](/waf/managed-rules/) (Data Loss Prevention) |
+| `http_log_custom_fields` | [Logpush custom fields](/logs/reference/custom-fields/) |
diff --git a/src/content/docs/ruleset-engine/rules-language/functions.mdx b/src/content/docs/ruleset-engine/rules-language/functions.mdx
index cfa47bd5d8a236..433f9006f3b23e 100644
--- a/src/content/docs/ruleset-engine/rules-language/functions.mdx
+++ b/src/content/docs/ruleset-engine/rules-language/functions.mdx
@@ -347,6 +347,9 @@ url_decode("%2520", "r") will return " "
// Using url_decode() with the any() function:
any(url_decode(http.request.body.form.values[*])[*] contains "an xss attack")
+
+// Using the u option to match a specific alphabet
+url_decode(http.request.uri.path) matches "(?u)\p{Hangul}+"
```
### `uuidv4`
diff --git a/src/content/docs/ssl/edge-certificates/changing-dcv-method/methods/delegated-dcv.mdx b/src/content/docs/ssl/edge-certificates/changing-dcv-method/methods/delegated-dcv.mdx
index 742626e2f4474d..0128edc487b72d 100644
--- a/src/content/docs/ssl/edge-certificates/changing-dcv-method/methods/delegated-dcv.mdx
+++ b/src/content/docs/ssl/edge-certificates/changing-dcv-method/methods/delegated-dcv.mdx
@@ -35,7 +35,7 @@ You should use Delegated DCV when all of the following conditions are true:
:::note[Delegated DCV and origin certificates]
-As explained in the [announcement blog post](https://blog.cloudflare.com/introducing-dcv-delegation/), currently, you can only delegate DCV to one provider at a time. If you also issue publicly trusted certificates for the same hostname for your [origin server](/ssl/concepts/#origin-certificate), this will no longer be possible. You can use [Cloudflare Origin CA certificates](/ssl/origin-configuration/origin-ca/) instead.
+As explained in the [announcement blog post](https://blog.cloudflare.com/introducing-dcv-delegation/), currently, you can only delegate DCV to one provider at a time. If you also issue publicly trusted certificates for the same hostname for your [origin server](/ssl/concepts/#origin-certificate), this will no longer be possible. You can use [Cloudflare origin CA certificates](/ssl/origin-configuration/origin-ca/) instead.
:::
## Setup
diff --git a/src/content/docs/ssl/origin-configuration/authenticated-origin-pull/set-up/zone-level.mdx b/src/content/docs/ssl/origin-configuration/authenticated-origin-pull/set-up/zone-level.mdx
index e5ca768855d785..966af24685ffb2 100644
--- a/src/content/docs/ssl/origin-configuration/authenticated-origin-pull/set-up/zone-level.mdx
+++ b/src/content/docs/ssl/origin-configuration/authenticated-origin-pull/set-up/zone-level.mdx
@@ -28,7 +28,7 @@ If you need a different AOP certificate to apply to different custom hostnames,
First, upload a certificate to your origin.
-To use a Cloudflare certificate (which uses a specific CA), [download the .PEM file](/ssl/static/authenticated_origin_pull_ca.pem) and upload it to your origin. This certificate is **not** the same as the Cloudflare Origin CA certificate and will not appear on your Dashboard.
+To use a Cloudflare certificate (which uses a specific CA), [download the .PEM file](/ssl/static/authenticated_origin_pull_ca.pem) and upload it to your origin. This certificate is **not** the same as the [Cloudflare origin CA certificate](/ssl/origin-configuration/origin-ca/) and will not appear on your Dashboard.
To use a custom certificate, follow the API instructions to [upload a custom certificate to Cloudflare](/ssl/edge-certificates/custom-certificates/uploading/#upload-a-custom-certificate), but use the [`origin_tls_client_auth` endpoint](/api/resources/origin_tls_client_auth/methods/create/). Then, upload the certificate to your origin.
diff --git a/src/content/docs/ssl/origin-configuration/origin-ca.mdx b/src/content/docs/ssl/origin-configuration/origin-ca/index.mdx
similarity index 85%
rename from src/content/docs/ssl/origin-configuration/origin-ca.mdx
rename to src/content/docs/ssl/origin-configuration/origin-ca/index.mdx
index d64ce987c620d2..f0f6f9171b172f 100644
--- a/src/content/docs/ssl/origin-configuration/origin-ca.mdx
+++ b/src/content/docs/ssl/origin-configuration/origin-ca/index.mdx
@@ -1,24 +1,23 @@
---
-title: Origin CA certificates
+title: Cloudflare origin CA
pcx_content_type: how-to
sidebar:
order: 3
+ label: Setup
head: []
-description: Origin Certificate Authority (CA) certificates allow you to encrypt
- traffic between Cloudflare and your origin web server, and reduce origin
- bandwidth consumption.
+description: Encrypt traffic between Cloudflare and your origin web server and reduce origin bandwidth consumption.
---
-import { FeatureTable } from "~/components"
+import { FeatureTable, GlossaryTooltip, Render } from "~/components"
-Use Origin Certificate Authority (CA) certificates to encrypt traffic between Cloudflare and your origin web server and reduce origin bandwidth consumption. Once deployed, these certificates are compatible with [Strict SSL mode](/ssl/origin-configuration/ssl-modes/full-strict/).
+If your origin only receives traffic from proxied records, use Cloudflare origin CA certificates to encrypt traffic between Cloudflare and your origin web server and reduce bandwidth consumption. Once deployed, these certificates are compatible with [Strict SSL mode](/ssl/origin-configuration/ssl-modes/full-strict/).
-For more background information on Origin CA certificates, refer to the [introductory blog post](https://blog.cloudflare.com/cloudflare-ca-encryption-origin/).
+For more background information on origin CA certificates, refer to the [introductory blog post](https://blog.cloudflare.com/cloudflare-ca-encryption-origin/).
:::note
-Using Cloudflare Origin CA certificates do not prevent you from using [delegated DCV](/ssl/edge-certificates/changing-dcv-method/methods/delegated-dcv/).
+Using Cloudflare origin CA certificates does not prevent you from using [delegated DCV](/ssl/edge-certificates/changing-dcv-method/methods/delegated-dcv/).
:::
## Availability
@@ -89,6 +88,10 @@ If all your origin hosts are protected by Origin CA certificates or publicly tru
If you have origin hosts that are not protected by certificates, set the **SSL/TLS encryption** mode for a specific application to **Full (strict)** by using a [Page Rule](/rules/page-rules/).
+:::caution
+
+:::
+
## Revoke an Origin CA certificate
If you misplace your key material or do not want a certificate to be trusted, you may want to revoke your certificate. You cannot undo this process.
@@ -114,7 +117,7 @@ Some origin web servers require upload of the Cloudflare Origin CA root certific
### Hostname and wildcard coverage
-Certificates may be generated with up to 200 individual Subject Alternative Names (SANs). A SAN can take the form of a fully-qualified domain name (`www.example.com`) or a wildcard (`*.example.com`). You cannot use IP addresses as SANs on Cloudflare Origin CA certificates.
+Certificates may be generated with up to 200 individual Subject Alternative Names (SANs). A SAN can take the form of a fully-qualified domain name (`www.example.com`) or a wildcard (`*.example.com`). You cannot use IP addresses as SANs on Cloudflare origin CA certificates.
Wildcards may only cover one level, but can be used multiple times on the same certificate for broader coverage (for example, `*.example.com` and `*.secure.example.com` may co-exist).
@@ -131,4 +134,4 @@ To automate processes involving Origin CA certificates, use the following API ca
## Troubleshooting
-Site visitors may see untrusted certificate errors if you pause or disable Cloudflare on subdomains that use Origin CA certificates. These certificates only encrypt traffic between Cloudflare and your origin server, not traffic from client browsers to your origin.
+If you find `NET::ERR_CERT_AUTHORITY_INVALID` or other issues after setting up Cloudflare origin CA, refer to [troubleshooting](/ssl/origin-configuration/origin-ca/troubleshooting/).
diff --git a/src/content/docs/ssl/origin-configuration/origin-ca/troubleshooting.mdx b/src/content/docs/ssl/origin-configuration/origin-ca/troubleshooting.mdx
new file mode 100644
index 00000000000000..c0aa6232c89f68
--- /dev/null
+++ b/src/content/docs/ssl/origin-configuration/origin-ca/troubleshooting.mdx
@@ -0,0 +1,45 @@
+---
+title: Troubleshooting Cloudflare origin CA
+pcx_content_type: troubleshooting
+description: Troubleshoot issues like NET::ERR_CERT_AUTHORITY_INVALID when using Cloudflare origin CA.
+sidebar:
+ order: 2
+ label: Troubleshooting
+---
+
+import { GlossaryTooltip, Render } from "~/components";
+
+Consider the following common issues and troubleshooting steps when using [Cloudflare origin CA](/ssl/origin-configuration/origin-ca/).
+
+## NET::ERR_CERT_AUTHORITY_INVALID
+
+### Cause
+
+
+This also means that SSL Labs or similar SSL validators are expected to flag the certificate as invalid.
+
+### Solutions
+
+- Make sure the [proxy status](/dns/manage-dns-records/reference/proxied-dns-records/) of your DNS records and any [page rules](/rules/page-rules/) (if existing) are set up correctly. If so, you can try to turn proxying off and then on again and wait a few minutes.
+- If you must have direct connections between clients and your origin server, consider installing a publicly trusted certificate at your origin instead. This process is done outside of Cloudflare, where you should issue the certificate directly from a certificate authority (CA) of your choice. You can still use Full (strict) [encryption mode](/ssl/origin-configuration/ssl-modes/), as long as the CA is listed on the [Cloudflare trust store](https://github.com/cloudflare/cfssl_trust).
+
+## The issuer of this certificate could not be found
+
+### Cause
+Some origin web servers require that you upload the Cloudflare origin CA root certificate or certificate chain.
+
+### Solution
+Use the following links to download either an ECC or an RSA version and upload to your origin web server:
+
+* [Cloudflare Origin ECC PEM](/ssl/static/origin_ca_ecc_root.pem) (do not use with Apache cPanel)
+* [Cloudflare Origin RSA PEM](/ssl/static/origin_ca_rsa_root.pem)
+
+## The certificate is not trusted in all web browsers
+
+### Cause
+Apache cPanel requires that you upload the Cloudflare origin CA root certificate or certificate chain.
+
+### Solution
+Use the following link to download an RSA version of the root certificate and upload it to your origin web server:
+
+* [Cloudflare Origin RSA PEM](/ssl/static/origin_ca_rsa_root.pem)
\ No newline at end of file
diff --git a/src/content/docs/style-guide/components/badges.mdx b/src/content/docs/style-guide/components/badges.mdx
index 657dcf0052b31c..c806adadf26866 100644
--- a/src/content/docs/style-guide/components/badges.mdx
+++ b/src/content/docs/style-guide/components/badges.mdx
@@ -6,21 +6,19 @@ import { Badge } from "~/components";
Badges are a built-in component provided by [Starlight](https://starlight.astro.build/guides/components/#bades). Use them to indicate a product is in beta, for example.
-:::note
-
-For guidance on using badges inline with other text, refer to [Inline badges](/style-guide/components/inline-badge/).
-
-:::
-
## Component
+To adopt this styling in a React component, apply the `sl-badge` class to a `span` element.
+
```mdx live
import { Badge } from "~/components";
-
-
-
-
+
+
+
+
+
+
```
## Sidebar
diff --git a/src/content/docs/style-guide/components/inline-badge.mdx b/src/content/docs/style-guide/components/inline-badge.mdx
index 57dc07ef721782..af6375f6a0a168 100644
--- a/src/content/docs/style-guide/components/inline-badge.mdx
+++ b/src/content/docs/style-guide/components/inline-badge.mdx
@@ -12,7 +12,9 @@ Guidelines:
- For instructions related to the feature (such as instructions on turning the feature on or off), you may mention again it's in beta, and also include "(beta)" in the side nav.
:::
-## Example
+## Component
+
+To adopt this styling in a React component, apply the `sl-badge` class to a `span` element.
```mdx live
import { InlineBadge } from '~/components';
@@ -27,7 +29,7 @@ import { InlineBadge } from '~/components';
### Legacy
-### Custom
+### Default
```
## Inputs
diff --git a/src/content/docs/style-guide/documentation-content-strategy/content-types/changelog.mdx b/src/content/docs/style-guide/documentation-content-strategy/content-types/changelog.mdx
index 3ec3c91722d564..b57eb9d457f4a7 100644
--- a/src/content/docs/style-guide/documentation-content-strategy/content-types/changelog.mdx
+++ b/src/content/docs/style-guide/documentation-content-strategy/content-types/changelog.mdx
@@ -117,7 +117,7 @@ entries:
description: |-
Queue consumers will soon automatically scale up concurrently as a queues' backlog grows in order to keep overall message processing latency down. Concurrency will be enabled on all existing queues by 2023-03-28.
- **To opt-out, or to configure a fixed maximum concurrency**, set `max_concurrency = 1` in your `wrangler.toml` file or via [the queues dashboard](https://dash.cloudflare.com/?to=/:account/queues).
+ **To opt-out, or to configure a fixed maximum concurrency**, set `max_concurrency = 1` in your Wrangler file or via [the queues dashboard](https://dash.cloudflare.com/?to=/:account/queues).
**To opt-in, you do not need to take any action**: your consumer will begin to scale out as needed to keep up with your message backlog. It will scale back down as the backlog shrinks, and/or if a consumer starts to generate a higher rate of errors. To learn more about how consumers scale, refer to the [consumer concurrency](/queues/learning/consumer-concurrency/) documentation.
- publish_date: "2023-03-02"
diff --git a/src/content/docs/style-guide/formatting/code-conventions-and-format.mdx b/src/content/docs/style-guide/formatting/code-conventions-and-format.mdx
index 5450485a3fd28c..4a6065f78b675b 100644
--- a/src/content/docs/style-guide/formatting/code-conventions-and-format.mdx
+++ b/src/content/docs/style-guide/formatting/code-conventions-and-format.mdx
@@ -107,7 +107,7 @@ Text in this font denotes text or characters that you should enter from the keyb
| Enum (enumerator) names (depending on language) | `type ContentTypeMapElem` |
| Environment variable names | `` |
| Element names, including angle brackets (XML and HTML). | `