diff --git a/public/_redirects b/public/_redirects index bd240a9497794f..28a02d31279c38 100644 --- a/public/_redirects +++ b/public/_redirects @@ -279,23 +279,30 @@ # D1 /d1/client-api/ /d1/worker-api/ 301 /d1/build-with-d1/d1-client-api/ /d1/worker-api/ 301 -/d1/build-with-d1/import-data/ /d1/best-practices/import-export-data/ 301 -/d1/build-with-d1/ /d1/best-practices/ 301 -/d1/build-with-d1/import-export-data/ /d1/best-practices/import-export-data/ 301 -/d1/build-with-d1/use-indexes/ /d1/best-practices/use-indexes/ 301 -/d1/build-with-d1/remote-development/ /d1/best-practices/remote-development/ 301 -/d1/build-with-d1/local-development/ /d1/best-practices/local-development/ 301 +/d1/build-with-d1/import-data/ /d1/features/import-export-data/ 301 +/d1/build-with-d1/ /d1/features/ 301 +/d1/build-with-d1/import-export-data/ /d1/features/import-export-data/ 301 +/d1/build-with-d1/use-indexes/ /d1/features/use-indexes/ 301 +/d1/build-with-d1/remote-development/ /d1/features/remote-development/ 301 +/d1/build-with-d1/local-development/ /d1/features/local-development/ 301 /d1/build-with-d1/foreign-keys/ /d1/sql-api/foreign-keys/ 301 /d1/build-with-d1/query-json/ /d1/sql-api/query-json/ 301 -/d1/build-with-d1/use-d1-from-pages/ /d1/best-practices/use-d1-from-pages/ 301 +/d1/build-with-d1/use-d1-from-pages/ /d1/features/use-d1-from-pages/ 301 +/d1/best-practices/ /d1/features/ 301 +/d1/best-practices/import-export-data/ /d1/features/import-export-data/ 301 +/d1/best-practices/local-development/ /d1/features/local-development/ 301 +/d1/best-practices/query-d1/ /d1/features/query-d1/ 301 +/d1/best-practices/remote-development/ /d1/features/remote-development/ 301 +/d1/best-practices/use-d1-from-pages/ /d1/features/use-d1-from-pages/ 301 +/d1/best-practices/use-indexes/ /d1/features/use-indexes/ 301 /d1/learning/using-d1-from-pages/ /pages/functions/bindings/#d1-databases 301 /d1/learning/debug-d1/ /d1/observability/debug-d1/ 301 -/d1/learning/using-indexes/ /d1/best-practices/use-indexes/ 301 +/d1/learning/using-indexes/ /d1/features/use-indexes/ 301 /d1/learning/querying-json/ /d1/sql-api/query-json/ 301 -/d1/learning/importing-data/ /d1/best-practices/import-export-data/ 301 +/d1/learning/importing-data/ /d1/features/import-export-data/ 301 /d1/learning/generated-columns/ /d1/reference/generated-columns/ 301 -/d1/learning/local-development/ /d1/best-practices/local-development/ 301 -/d1/learning/remote-development/ /d1/best-practices/remote-development/ 301 +/d1/learning/local-development/ /d1/features/local-development/ 301 +/d1/learning/remote-development/ /d1/features/remote-development/ 301 /d1/learning/data-location/ /d1/configuration/data-location/ 301 /d1/migrations/ /d1/reference/migrations/ 301 /d1/platform/wrangler-commands/ /workers/wrangler/commands/#d1 301 @@ -310,20 +317,20 @@ /d1/reference/environments/ /d1/configuration/environments/ 301 /d1/reference/metrics-analytics/ /d1/observability/metrics-analytics/ 301 /d1/reference/wrangler-commands/ /d1/wrangler-commands/ 301 -/d1/how-to/ /d1/best-practices/ 301 -/d1/how-to/query-databases/ /d1/best-practices/query-d1/ 301 -/d1/how-to/using-indexes/ /d1/best-practices/use-indexes/ 301 +/d1/how-to/ /d1/features/ 301 +/d1/how-to/query-databases/ /d1/features/query-d1/ 301 +/d1/how-to/using-indexes/ /d1/features/use-indexes/ 301 /d1/how-to/querying-json/ /d1/sql-api/query-json/ 301 -/d1/how-to/importing-data/ /d1/best-practices/import-export-data/ 301 +/d1/how-to/importing-data/ /d1/features/import-export-data/ 301 /d1/how-to/generated-columns/ /d1/reference/generated-columns/ 301 -/d1/build-databases/ /d1/best-practices/ 301 -/d1/build-databases/query-databases/ /d1/best-practices/query-d1/ 301 -/d1/build-databases/use-indexes/ /d1/best-practices/use-indexes/ 301 -/d1/build-databases/import-data/ /d1/best-practices/import-export-data/ 301 +/d1/build-databases/ /d1/features/ 301 +/d1/build-databases/query-databases/ /d1/features/query-d1/ 301 +/d1/build-databases/use-indexes/ /d1/features/use-indexes/ 301 +/d1/build-databases/import-data/ /d1/features/import-export-data/ 301 /d1/build-databases/client-api/ /d1/worker-api/ 301 /d1/reference/query-json/ /d1/sql-api/query-json/ 301 -/d1/configuration/local-development/ /d1/best-practices/local-development/ 301 -/d1/configuration/remote-development/ /d1/best-practices/remote-development/ 301 +/d1/configuration/local-development/ /d1/features/local-development/ 301 +/d1/configuration/remote-development/ /d1/features/remote-development/ 301 /d1/reference/database-commands/ /d1/reference/sql-statements/ 301 /d1/reference/sql-statements/ /d1/sql-api/sql-statements/ 301 diff --git a/public/images/d1/d1-read-replication-concept.png b/public/images/d1/d1-read-replication-concept.png new file mode 100644 index 00000000000000..9eeb2639d79e29 Binary files /dev/null and b/public/images/d1/d1-read-replication-concept.png differ diff --git a/src/content/apps/index.yaml b/src/content/apps/index.yaml index a83364c7e157f0..c72dfc3d4779cd 100644 --- a/src/content/apps/index.yaml +++ b/src/content/apps/index.yaml @@ -340,3 +340,11 @@ languages: [TypeScript] cloudflare: false updated: 2024-10-07 +- link: https://github.com/harshil1712/e-com-d1 + id: E-commerce Store + description: An application to showcase D1 read replication in the context of an online store. + tags: [] + products: [Workers, D1] + languages: [TypeScript] + cloudflare: true + updated: 2025-02-27 \ No newline at end of file diff --git a/src/content/docs/d1/configuration/data-location.mdx b/src/content/docs/d1/configuration/data-location.mdx index 445506e0cd6a02..a6e907e6eda9df 100644 --- a/src/content/docs/d1/configuration/data-location.mdx +++ b/src/content/docs/d1/configuration/data-location.mdx @@ -9,13 +9,13 @@ Learn how the location of data stored in D1 is determined, including where the l ## Automatic (recommended) -By default, D1 will automatically create your database in a location close to where you issued the request to create a database. In most cases this allows D1 to choose the optimal location for your database on your behalf. +By default, D1 will automatically create your primary database instance in a location close to where you issued the request to create a database. In most cases this allows D1 to choose the optimal location for your database on your behalf. ## Provide a location hint -Location hint is an optional parameter you can provide to indicate your desired geographical location for your database. +Location hint is an optional parameter you can provide to indicate your desired geographical location for your primary database instance. -You may want to explicitly provide a location hint in cases where the majority of your writes to a specific database come from a different location than where you are creating the database from. location hints can be useful when: +You may want to explicitly provide a location hint in cases where the majority of your writes to a specific database come from a different location than where you are creating the database from. Location hints can be useful when: - Working in a distributed team. - Creating databases specific to users in specific locations. @@ -33,9 +33,7 @@ Providing a location hint does not guarantee that D1 runs in your preferred loca ### Use Wrangler :::note - To install Wrangler, the command-line interface for D1 and Workers, refer to [Install and Update Wrangler](/workers/wrangler/install-and-update/). - ::: To provide a location hint when creating a new database, pass the `--location` flag with a valid location hint: @@ -70,3 +68,11 @@ D1 supports the following location hints: :::caution D1 location hints are not currently supported for South America (`sam`), Africa (`afr`), and the Middle East (`me`). D1 databases do not run in these locations. ::: + +## Read replica locations + +D1 read replication allows you to create and distribute read-only copies of the primary database instance around the world. This reduces the query latency for users located far away from the primary database instance. + +When using D1 read replication through [Sessions API](/d1/features/read-replication/#how-d1-read-replication-works), D1 automatically creates a read replica in [every available region](/d1/configuration/data-location#available-location-hints), including the region where the primary database instance is located. + +Refer to [D1 read replication](/d1/features/read-replication/) for more information. \ No newline at end of file diff --git a/src/content/docs/d1/configuration/index.mdx b/src/content/docs/d1/configuration/index.mdx index 35b901b9e04f0a..bd877291ba15a2 100644 --- a/src/content/docs/d1/configuration/index.mdx +++ b/src/content/docs/d1/configuration/index.mdx @@ -2,7 +2,7 @@ title: Configuration pcx_content_type: navigation sidebar: - order: 8 + order: 9 group: hideIndex: true --- diff --git a/src/content/docs/d1/d1-api.mdx b/src/content/docs/d1/d1-api.mdx index 25c486c6a10520..f44f45af0287ea 100644 --- a/src/content/docs/d1/d1-api.mdx +++ b/src/content/docs/d1/d1-api.mdx @@ -3,5 +3,5 @@ pcx_content_type: navigation title: REST API external_link: /api/resources/d1/subresources/database/methods/create/ sidebar: - order: 6 + order: 7 --- diff --git a/src/content/docs/d1/demos.mdx b/src/content/docs/d1/demos.mdx index eb856d104531d2..523b7536e729f1 100644 --- a/src/content/docs/d1/demos.mdx +++ b/src/content/docs/d1/demos.mdx @@ -2,7 +2,7 @@ pcx_content_type: navigation title: Demos and architectures sidebar: - order: 12 + order: 13 --- diff --git a/src/content/docs/d1/examples/index.mdx b/src/content/docs/d1/examples/index.mdx index 8ad40e6d10db80..991ad6cbf6b533 100644 --- a/src/content/docs/d1/examples/index.mdx +++ b/src/content/docs/d1/examples/index.mdx @@ -4,7 +4,7 @@ hideChildren: false pcx_content_type: navigation title: Examples sidebar: - order: 10 + order: 11 group: hideIndex: true --- diff --git a/src/content/docs/d1/examples/query-d1-from-python-workers.mdx b/src/content/docs/d1/examples/query-d1-from-python-workers.mdx index 712e9929c7d8a9..3c8895cc9d2dbe 100644 --- a/src/content/docs/d1/examples/query-d1-from-python-workers.mdx +++ b/src/content/docs/d1/examples/query-d1-from-python-workers.mdx @@ -126,4 +126,4 @@ If you receive an error deploying: - Refer to [Workers Python documentation](/workers/languages/python/) to learn more about how to use Python in Workers. - Review the [D1 Workers Binding API](/d1/worker-api/) and how to query D1 databases. -- Learn [how to import data](/d1/best-practices/import-export-data/) to your D1 database. +- Learn [how to import data](/d1/features/import-export-data/) to your D1 database. diff --git a/src/content/docs/d1/best-practices/import-export-data.mdx b/src/content/docs/d1/features/import-export-data.mdx similarity index 99% rename from src/content/docs/d1/best-practices/import-export-data.mdx rename to src/content/docs/d1/features/import-export-data.mdx index a430fdb98539d8..76d62975cd7211 100644 --- a/src/content/docs/d1/best-practices/import-export-data.mdx +++ b/src/content/docs/d1/features/import-export-data.mdx @@ -7,7 +7,7 @@ sidebar: D1 allows you to import existing SQLite tables and their data directly, enabling you to migrate existing data into D1 quickly and easily. This can be useful when migrating applications to use Workers and D1, or when you want to prototype a schema locally before importing it to your D1 database(s). -D1 also allows you to export a database. This can be useful for [local development](/d1/best-practices/local-development/) or testing. +D1 also allows you to export a database. This can be useful for [local development](/d1/features/local-development/) or testing. ## Import an existing database diff --git a/src/content/docs/d1/best-practices/index.mdx b/src/content/docs/d1/features/index.mdx similarity index 87% rename from src/content/docs/d1/best-practices/index.mdx rename to src/content/docs/d1/features/index.mdx index 57f21ba7643ffc..6dca17a44da955 100644 --- a/src/content/docs/d1/best-practices/index.mdx +++ b/src/content/docs/d1/features/index.mdx @@ -1,5 +1,5 @@ --- -title: Best practices +title: Features pcx_content_type: navigation sidebar: order: 3 diff --git a/src/content/docs/d1/best-practices/local-development.mdx b/src/content/docs/d1/features/local-development.mdx similarity index 100% rename from src/content/docs/d1/best-practices/local-development.mdx rename to src/content/docs/d1/features/local-development.mdx diff --git a/src/content/docs/d1/best-practices/query-d1.mdx b/src/content/docs/d1/features/query-d1.mdx similarity index 100% rename from src/content/docs/d1/best-practices/query-d1.mdx rename to src/content/docs/d1/features/query-d1.mdx diff --git a/src/content/docs/d1/features/read-replication.mdx b/src/content/docs/d1/features/read-replication.mdx new file mode 100644 index 00000000000000..637329d86792d0 --- /dev/null +++ b/src/content/docs/d1/features/read-replication.mdx @@ -0,0 +1,217 @@ +--- +title: Read replication +pcx_content_type: concept +sidebar: + order: 2 + +--- + +import { GlossaryTooltip, Details } from "~/components" + +D1 read replication is a feature which reduces the request latency for read requests for users who may be located far away from the primary database instance. + +You can use D1 read replication by using D1 Sessions API. A Session encapsulates all the queries from one logical session for your application. For example, a Session may correspond to all queries coming from a particular web browser session. + +By using Sessions API for read replication, all of your queries from a single Session read from a version of the database which is as up-to-date as your query. This ensures that the version of the database you are reading is logically consistent with your queries when using read replicas. + +The following Worker code uses D1 Sessions API to use read replicas, and allows you to test the decrease in query latency when using read replication. + +```js collapse={8-15, 24-31} +export default { + async fetch(request, env) { + const { pathname } = new URL(request.url); + const companyName1 = `Bs Beverages`; + const stmt = env.DB.prepare(`SELECT * FROM Customers WHERE CompanyName = ?`); + const session = env.DB.withSession("first-unconstrained"); + + // Without Sessions API / read replication + if (pathname === `/run`) { + const tsStart1 = Date.now(); + const { results, meta } = await stmt.bind(companyName1).run(); + const d1Duration1 = Date.now() - tsStart1; + return Response.json({ results, meta, d1Duration1 }); + } + + // With Sessions API + read replication + else if (pathname === `/withsession`) { + const tsStart2 = Date.now(); + const { results, meta } = await session.prepare(`SELECT * FROM Customers WHERE CompanyName = ?`).bind(companyName1).run(); + const d1Duration2 = Date.now() - tsStart2; + return Response.json({ results, meta, d1Duration2 }); + } + + // Welcome text + return new Response( + `Welcome to the D1 read replication demo! + Add one of the following slugs below to see the effects of using D1 read replication. + /run - Queries the table without using read replication + /withsession - Queries the table using read replication (using "first-unconstrained")` + ); + } +}; +``` + +{/* Deploy to Workers button */} + +## Primary database instance vs read replicas + +When using D1 without read replication, D1 routes all queries (both read and write) to a specific database instance in [one location in the world](/d1/configuration/data-location/), known as the primary database instance. The request latency is dependent on the physical closeness of a user to the primary database instance - it takes less time for light (information) to travel between the user and the primary database instance if that distance is shorter. Users located further away from this database instance experience the longest request latency. + +When using read replication, D1 introduces multiple “almost up-to-date” copies of the primary database instance which only serve read requests, called read replicas . D1 creates the read replicas in multiple regions throughout the world [across the Cloudflare network](/d1/features/read-replication/#read-replica-locations). + +A user may be located far away from the primary database instance but close to a read replica. By sending their read requests to the read replica instead of the primary database instance, the user enjoys shorter read request response times. For example, the request latency when using the primary database instance may be 250 ms, versus 20 - 50 ms when using a read replica. + +![D1 read replication concept](/images/d1/d1-read-replication-concept.png) + +:::note +All write queries are still forwarded to the primary database instance. Read replication only improves the query response time for read requests. +::: + +| Type of database instance | Description | How it handles write queries | How it handles read queries | +| ------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------- | --------------------------------------------------------- | +| Primary database instance | The database instance containing the “original” copy of the database | Can serve write queries | Can serve read queries | +| Read replica | A database instance containing an “almost up-to-date” copy of the original database, asynchronously updated against the primary database instance | Forwards any write queries to the primary database instance | Can serve read queries using its own copy of the database | + +D1 asynchronously updates read replicas against the primary database instance. This means that at any given time, a read replica may be arbitrarily out of date. The difference between the primary database instance and the read replica is known as the replica lag . + +## Replica lag and consistency model + +To account for replica lag, it is important to consider the consistency model for D1. A consistency model is a logical framework that governs how a database system serves user queries (how the data is updated and accessed) when there are multiple database instances. Different models can be useful in different use cases. Most database systems provide [read committed](https://jepsen.io/consistency/models/read-committed), [snapshot isolation](https://jepsen.io/consistency/models/snapshot-isolation), or [serializable](https://jepsen.io/consistency/models/serializable) consistency models, depending on their configuration. + +D1 read replication offers [sequential consistency](https://jepsen.io/consistency/models/sequential). D1 creates a global order of all operations which have taken place on the database, and can identify the latest version of the database that a query has seen, using [bookmarks](/d1/reference/time-travel/#bookmarks). It then serves the query with a database instance that is at least as up-to-date as the query itself. + +Sequential consistency has properties such as: + +- **Monotonic reads**: If you perform two reads one after the other (read-1, then read-2), read-2 cannot read a version of the database prior to read-1. +- **Monotonic writes**: If you perform write-1 then write-2, all processes observe write-1 before write-2. +- **Writes follow reads**: If you read a value, then perform a write, the subsequent write must be based on the value that was just read. +- **Read my own writes**: If you write to the database, all subsequent reads will see the write. + +## Use Sessions API + +### Use read replication via Sessions API + +By using Sessions API for read replication, all of your queries from a single Session read from a version of the database which is as up-to-date as your query. This ensures that the version of the database you are reading is logically consistent with your queries when using read replicas. + +### Bookmarks + +D1 read replication achieves the total ordering of all operations by attaching a bookmark to each write query within a Session. A bookmark represents the state of a database at a specific point in time. For more information, refer to [Bookmarks](/d1/reference/time-travel/#bookmarks). + +### Start a D1 Session + +A Session API code block may resemble the following: + +```ts +// synchronous +let sess = env.DB_D1.withSession(' or bookmark') +const stmt = sess.prepare('') +// wait for Session condition, queries within batch have casual consistency +const result = await sess.run() +``` + +- You begin a Session with .withSession, specifying a ``. +- You continue a Session by providing a `bookmark`. +- You prepare your query string with `.prepare`. +- You obtain the result with `.run`. + +When starting a Session with `withSession` in your Worker code, you can specify the `` parameter, which corresponds to how the Session begins. + +- `first-primary`: + - Directs the first query in the Session (whether read or write) to the primary database instance. + - Subsequent queries in the Session have sequential consistency. + - Example use-case: Reading the score-table of an ongoing sports tournament. It is important for the first read to read the latest data from the primary database instance, but may not need to display subsequent score updates immediately (and therefore suitable to use read replicas for subsequent queries within the same Session). +- `first-unconstrained`: + - Directs the first query in the Session (whether read or write) to any database instance. This could be either the primary database instance or a read replica (note that write queries will be forwarded to the primary database instance). + - Subsequent queries in the Session have sequential consistency. + - Example use-case: Displaying blog articles on a website. It is not crucial to display the latest blog article, and therefore the user can start the Session by reading from a read replica. Subsequent queries can also go to the read replica. + +When continuing an existing Session started with `withSession`, you can provide a `bookmark` parameter from an existing Session. + +- `bookmark`: + - Continues an existing Session using the provided `bookmark` as the reference point. If you already have a `bookmark` from a different part of the application, you can continue that Session using the same `bookmark`, which ensures you read from a version of the database which is at least as up-to-date as the `bookmark`. + - Subsequent queries in the Session have sequential consistency. + - You can return the last encountered `bookmark` for a given Session using `sess.getBookmark()`. + - Example use-case: (Continuation of the previous example used in `first-primary`) Reading the score-table of an ongoing sports tournament. A user may have started a web browser session and already has a `bookmark`. D1 can serve subsequent read queries within the same Session which is logically consistent by using the `bookmark`. + +By default (when the `` is either `null` or unspecified), D1 Session uses `first-primary` as the starting condition. + +For more information on Sessions API, refer to the [D1 Workers Binding API documentation](/d1/worker-api/d1-database#withsession). + +### Examples + +#### Start a new Session with `first-primary` + +Suppose you want to develop a webpage for an electricity provider which lists the electricity bill statements. An assumption here is that each statement is immutable. Once issued, it never changes. + +In this scenario, you want the first request of the page to show a list of all the statements and their issue dates. Therefore, the first request starts a new D1 Session using the constraint `first-primary` to get the latest information (ensuring that the list includes all issued bill statements) from the primary database instance. + +Then, when opening an individual electricity bill statement, we can continue using the same Session by passing the `bookmark` from the first query to subsequent requests. Since each bill statement is immutable, any bill statement listed from the first query is guaranteed to be available in subsequent requests using the same Session. + +```ts +async function listBillStatements(accountId: string, db: D1Database): ListBillStatementsResult { + const session = db.withSession("first-primary"); + const { results } = session.prepare("SELECT * FROM bills WHERE accountId = ?").bind(accountId).run(); + return { bookmark: session.getBookmark(), bills: results }; +} + +async function getBillStatement(accountId: string, billId: string, bookmark: string): GetBillStatementResult { + // NOTE: We achieve sequential consistency with the given `bookmark`. + const session = db.withSession(bookmark); + const { results } = session.prepare("SELECT * FROM bills WHERE accountId = ? AND billId = ? LIMIT 1").bind(accountId, billId).first(); + return { bookmark: session.getBookmark(), bill: results }; +} +``` + +#### Start a new Session with `first-unconstrained` + +Suppose you want to develop a feature for displaying “likes” on a social network application. + +The number of likes is a good example of a situation which does not require the latest information all the time. When displaying the number of likes of a post, the first request starts a new D1 Session using the constraint `first-unconstrained`, which will be served by the nearest D1 read replica. + +Subsequent interactions on the application should continue using the same Session by passing the `bookmark` from the first query to subsequent requests. This guarantees that all interactions will observe information at least as up-to-date as the initial request, and therefore never show information older than what a user has already observed. The number of likes will be updated with newer counts over time with subsequent requests as D1 asynchronously updates the read replicas with the changes from the primary database. + +```js +async function getLikes(postId: string, db: D1Database, bookmark: string | null): GetLikesResult { + // NOTE: Achieve sequential consistency with given bookmark, + // or start a new session that can be served by any replica. + const session = db.withSession(bookmark ?? "first-unconstrained"); + const { results } = session.prepare("SELECT * FROM likes WHERE postId = ?").bind(postId).run(); + return { bookmark: session.getBookmark(), likes: results }; +} +``` + +## Read replica locations + +Currently, D1 automatically creates a read replica in every region, including the region where the primary database instance is located. These regions are: +- ENAM +- WNAM +- WEUR +- EEUR +- APAC +- OC + +## Benefits of read replication + +A system with multiple read replicas located around the world improves the performance of databases: + +- The read throughput increases by distributing load across multiple replicas. Since multiple database instances are able to serve read-only requests, your application can serve a larger number of queries at any given time. +- The query latency decreases for users located close to the read replicas. By shortening the physical distance between a read replica and the user, read query latency decreases, resulting in a faster application. + +## Monitor latency + +To see the impact of read replication, check the latency for your read-only queries. Refer to the [queryBatchTimeMs](/d1/observability/metrics-analytics/) metric or [view your metrics through the dashboard](/d1/observability/metrics-analytics/#view-metrics-in-the-dashboard). + +## Known limitations + +There are some known limitations for D1 read replication. + +- The lifecycle of a read replica is tied to the lifecycle of the primary database instance. For example, iof the primary database instance is inactive, the read replicas are inactive too. +- TBC + +## Supplementary information + +You may wish to refer to the following resources: + +- Blog: [Building D1: a Global Database](https://blog.cloudflare.com/building-d1-a-global-database/) +- [D1 Sessions API documentation](/d1/worker-api/d1-database#withsession) +- [Read replication demo application](/d1/demos/) \ No newline at end of file diff --git a/src/content/docs/d1/best-practices/remote-development.mdx b/src/content/docs/d1/features/remote-development.mdx similarity index 100% rename from src/content/docs/d1/best-practices/remote-development.mdx rename to src/content/docs/d1/features/remote-development.mdx diff --git a/src/content/docs/d1/best-practices/use-d1-from-pages.mdx b/src/content/docs/d1/features/use-d1-from-pages.mdx similarity index 100% rename from src/content/docs/d1/best-practices/use-d1-from-pages.mdx rename to src/content/docs/d1/features/use-d1-from-pages.mdx diff --git a/src/content/docs/d1/best-practices/use-indexes.mdx b/src/content/docs/d1/features/use-indexes.mdx similarity index 100% rename from src/content/docs/d1/best-practices/use-indexes.mdx rename to src/content/docs/d1/features/use-indexes.mdx diff --git a/src/content/docs/d1/get-started.mdx b/src/content/docs/d1/get-started.mdx index 07b5cd49780242..5a98c3d4690a98 100644 --- a/src/content/docs/d1/get-started.mdx +++ b/src/content/docs/d1/get-started.mdx @@ -6,16 +6,7 @@ sidebar: order: 2 --- -import { - Render, - PackageManagers, - Steps, - FileTree, - Tabs, - TabItem, - TypeScriptExample, - WranglerConfig -} from "~/components"; +import { Render, PackageManagers, Steps, FileTree, Tabs, TabItem, TypeScriptExample, WranglerConfig } from "~/components"; This guide instructs you through: diff --git a/src/content/docs/d1/index.mdx b/src/content/docs/d1/index.mdx index f6992adcf96740..dd42caf3b21271 100644 --- a/src/content/docs/d1/index.mdx +++ b/src/content/docs/d1/index.mdx @@ -27,7 +27,7 @@ D1 is Cloudflare's managed, serverless database with SQLite's SQL semantics, bui D1 is designed for horizontal scale out across multiple, smaller (10 GB) databases, such as per-user, per-tenant or per-entity databases. D1 allows you to build applications with thousands of databases at no extra cost for isolating with multiple databases. D1 pricing is based only on query and storage costs. -Create your first D1 database by [following the Get started guide](/d1/get-started/), learn how to [import data into a database](/d1/best-practices/import-export-data/), and how to [interact with your database](/d1/worker-api/) directly from [Workers](/workers/) or [Pages](/pages/functions/bindings/#d1-databases). +Create your first D1 database by [following the Get started guide](/d1/get-started/), learn how to [import data into a database](/d1/features/import-export-data/), and how to [interact with your database](/d1/worker-api/) directly from [Workers](/workers/) or [Pages](/pages/functions/bindings/#d1-databases). *** diff --git a/src/content/docs/d1/observability/index.mdx b/src/content/docs/d1/observability/index.mdx index 35c902c3124a36..e6f2e7cc962fed 100644 --- a/src/content/docs/d1/observability/index.mdx +++ b/src/content/docs/d1/observability/index.mdx @@ -2,7 +2,7 @@ title: Observability pcx_content_type: navigation sidebar: - order: 9 + order: 10 group: hideIndex: true --- diff --git a/src/content/docs/d1/observability/metrics-analytics.mdx b/src/content/docs/d1/observability/metrics-analytics.mdx index 50a0d51971087e..b79cb4130c7f10 100644 --- a/src/content/docs/d1/observability/metrics-analytics.mdx +++ b/src/content/docs/d1/observability/metrics-analytics.mdx @@ -32,7 +32,7 @@ Metrics can be queried (and are retained) for the past 31 days. D1 returns the number of rows read, rows written (or both) in response to each individual query via [the Workers Binding API](/d1/worker-api/return-object/). Row counts are a precise count of how many rows were read (scanned) or written by that query. -Inspect row counts to understand the performance and cost of a given query, including whether you can reduce the rows read [using indexes](/d1/best-practices/use-indexes/). Use query counts to understand the total volume of traffic against your databases and to discern which databases are actively in-use. +Inspect row counts to understand the performance and cost of a given query, including whether you can reduce the rows read [using indexes](/d1/features/use-indexes/). Use query counts to understand the total volume of traffic against your databases and to discern which databases are actively in-use. Refer to the [Pricing documentation](/d1/platform/pricing/) for more details on how rows are counted. @@ -322,5 +322,5 @@ npx wrangler d1 insights --sort-type=sum --sort-by=writes --limi :::note The quantity `queryEfficiency` measures how efficient your query was. It is calculated as: the number of rows returned divided by the number of rows read. -Generally, you should try to get `queryEfficiency` as close to `1` as possible. Refer to [Use indexes](/d1/best-practices/use-indexes/) for more information on efficient querying. +Generally, you should try to get `queryEfficiency` as close to `1` as possible. Refer to [Use indexes](/d1/features/use-indexes/) for more information on efficient querying. ::: \ No newline at end of file diff --git a/src/content/docs/d1/platform/index.mdx b/src/content/docs/d1/platform/index.mdx index 1720605aee0206..adf92ca4d07c8b 100644 --- a/src/content/docs/d1/platform/index.mdx +++ b/src/content/docs/d1/platform/index.mdx @@ -2,7 +2,7 @@ pcx_content_type: navigation title: Platform sidebar: - order: 12 + order: 13 group: hideIndex: true --- diff --git a/src/content/docs/d1/platform/pricing.mdx b/src/content/docs/d1/platform/pricing.mdx index b8a77fc2f021b5..e6ecfc3a2b43e0 100644 --- a/src/content/docs/d1/platform/pricing.mdx +++ b/src/content/docs/d1/platform/pricing.mdx @@ -64,7 +64,7 @@ Yes, any queries you run against your database, including inserting (`INSERT`) e ### Can I use an index to reduce the number of rows read by a query? -Yes, you can use an index to reduce the number of rows read by a query. [Creating indexes](/d1/best-practices/use-indexes/) for your most queried tables and filtered columns reduces how much data is scanned and improves query performance at the same time. If you have a read-heavy workload (most common), this can be particularly advantageous. Writing to columns referenced in an index will add at least one (1) additional row written to account for updating the index, but this is typically offset by the reduction in rows read due to the benefits of an index. +Yes, you can use an index to reduce the number of rows read by a query. [Creating indexes](/d1/features/use-indexes/) for your most queried tables and filtered columns reduces how much data is scanned and improves query performance at the same time. If you have a read-heavy workload (most common), this can be particularly advantageous. Writing to columns referenced in an index will add at least one (1) additional row written to account for updating the index, but this is typically offset by the reduction in rows read due to the benefits of an index. ### Does a freshly created database, and/or an empty table with no rows, contribute to my storage? diff --git a/src/content/docs/d1/reference/backups.mdx b/src/content/docs/d1/reference/backups.mdx index 0b26855c41a4f5..c64361d7c32edf 100644 --- a/src/content/docs/d1/reference/backups.mdx +++ b/src/content/docs/d1/reference/backups.mdx @@ -86,7 +86,7 @@ wrangler d1 backup download example-db 123a81a2-ab91-4c2e-8ebc-64d69633faf1 🌀 Done! ``` -The database backup will be download to the current working directory in native SQLite3 format. To import a local database, read [the documentation on importing data](/d1/best-practices/import-export-data/) to D1. +The database backup will be download to the current working directory in native SQLite3 format. To import a local database, read [the documentation on importing data](/d1/features/import-export-data/) to D1. ## Restoring a backup diff --git a/src/content/docs/d1/reference/generated-columns.mdx b/src/content/docs/d1/reference/generated-columns.mdx index 6ad0527ab31133..21b0f54781922a 100644 --- a/src/content/docs/d1/reference/generated-columns.mdx +++ b/src/content/docs/d1/reference/generated-columns.mdx @@ -10,7 +10,7 @@ D1 allows you to define generated columns based on the values of one or more oth This allows you to normalize your data as you write to it or read it from a table, making it easier to query and reducing the need for complex application logic. -Generated columns can also have [indexes defined](/d1/best-practices/use-indexes/) against them, which can dramatically increase query performance over frequently queried fields. +Generated columns can also have [indexes defined](/d1/features/use-indexes/) against them, which can dramatically increase query performance over frequently queried fields. ## Types of generated columns diff --git a/src/content/docs/d1/reference/index.mdx b/src/content/docs/d1/reference/index.mdx index adcefc0bc39146..99f8aa882f71c4 100644 --- a/src/content/docs/d1/reference/index.mdx +++ b/src/content/docs/d1/reference/index.mdx @@ -2,7 +2,7 @@ pcx_content_type: navigation title: Reference sidebar: - order: 13 + order: 14 group: hideIndex: true --- diff --git a/src/content/docs/d1/sql-api/foreign-keys.mdx b/src/content/docs/d1/sql-api/foreign-keys.mdx index 0472d41719c26b..0b71b5d6be2302 100644 --- a/src/content/docs/d1/sql-api/foreign-keys.mdx +++ b/src/content/docs/d1/sql-api/foreign-keys.mdx @@ -16,7 +16,7 @@ By default, D1 enforces that foreign key constraints are valid within all querie ## Defer foreign key constraints -When running a [query](/d1/worker-api/), [migration](/d1/reference/migrations/) or [importing data](/d1/best-practices/import-export-data/) against a D1 database, there may be situations in which you need to disable foreign key validation during table creation or changes to your schema. +When running a [query](/d1/worker-api/), [migration](/d1/reference/migrations/) or [importing data](/d1/features/import-export-data/) against a D1 database, there may be situations in which you need to disable foreign key validation during table creation or changes to your schema. D1's foreign key enforcement is equivalent to SQLite's `PRAGMA foreign_keys = on` directive. Because D1 runs every query inside an implicit transaction, user queries cannot change this during a query or migration. diff --git a/src/content/docs/d1/sql-api/index.mdx b/src/content/docs/d1/sql-api/index.mdx index 8b74fb521d38d0..3228a42dd6a9bc 100644 --- a/src/content/docs/d1/sql-api/index.mdx +++ b/src/content/docs/d1/sql-api/index.mdx @@ -2,7 +2,7 @@ title: SQL API pcx_content_type: navigation sidebar: - order: 5 + order: 6 group: hideIndex: true --- diff --git a/src/content/docs/d1/sql-api/query-json.mdx b/src/content/docs/d1/sql-api/query-json.mdx index 55f53a6781f4fe..c95fd24a6ce85e 100644 --- a/src/content/docs/d1/sql-api/query-json.mdx +++ b/src/content/docs/d1/sql-api/query-json.mdx @@ -78,7 +78,7 @@ ERROR 9015: SQL engine error: query error: Error code 1: SQL error or missing da D1's support for [generated columns](/d1/reference/generated-columns/) allows you to create dynamic columns that are generated based on the values of other columns, including extracted or calculated values of JSON data. -These columns can be queried like any other column, and can have [indexes](/d1/best-practices/use-indexes/) defined on them. If you have JSON data that you frequently query and filter over, creating a generated column and an index can dramatically improve query performance. +These columns can be queried like any other column, and can have [indexes](/d1/features/use-indexes/) defined on them. If you have JSON data that you frequently query and filter over, creating a generated column and an index can dramatically improve query performance. For example, to define a column based on a value within a larger JSON object, use the `AS` keyword combined with a [JSON function](#supported-functions) to generate a typed column: diff --git a/src/content/docs/d1/sql-api/sql-statements.mdx b/src/content/docs/d1/sql-api/sql-statements.mdx index 4419395b5e7cde..136a901390bfb2 100644 --- a/src/content/docs/d1/sql-api/sql-statements.mdx +++ b/src/content/docs/d1/sql-api/sql-statements.mdx @@ -73,6 +73,6 @@ results: [...] ## Related resources -- Learn [how to create indexes](/d1/best-practices/use-indexes/#list-indexes) in D1. +- Learn [how to create indexes](/d1/features/use-indexes/#list-indexes) in D1. - Use D1's [JSON functions](/d1/sql-api/query-json/) to query JSON data. - Use [`wrangler dev`](/workers/wrangler/commands/#dev) to run your Worker and D1 locally and debug issues before deploying. diff --git a/src/content/docs/d1/tutorials/d1-and-prisma-orm/index.mdx b/src/content/docs/d1/tutorials/d1-and-prisma-orm/index.mdx index 19c11b7aca284b..2f77181a11431d 100644 --- a/src/content/docs/d1/tutorials/d1-and-prisma-orm/index.mdx +++ b/src/content/docs/d1/tutorials/d1-and-prisma-orm/index.mdx @@ -193,8 +193,8 @@ CREATE UNIQUE INDEX "User_email_key" ON "User"("email"); You now need to use the `wrangler d1 migrations apply` command to send this SQL statement to D1. This command accepts two options: -- `--local`: Executes the statement against a _local_ version of D1. This local version of D1 is a SQLite database file that will be located in the `.wrangler/state` directory of your project. Use this approach when you want to develop and test your Worker on your local machine. Refer to [Local development](/d1/best-practices/local-development/) to learn more. -- `--remote`: Executes the statement against your _remote_ version of D1. This version is used by your _deployed_ Cloudflare Workers. Refer to [Remote development](/d1/best-practices/remote-development/) to learn more. +- `--local`: Executes the statement against a _local_ version of D1. This local version of D1 is a SQLite database file that will be located in the `.wrangler/state` directory of your project. Use this approach when you want to develop and test your Worker on your local machine. Refer to [Local development](/d1/features/local-development/) to learn more. +- `--remote`: Executes the statement against your _remote_ version of D1. This version is used by your _deployed_ Cloudflare Workers. Refer to [Remote development](/d1/features/remote-development/) to learn more. In this tutorial, you will do local and remote development. You will test the Worker locally and deploy your Worker afterwards. Open your terminal, and run both commands: diff --git a/src/content/docs/d1/tutorials/import-to-d1-with-rest-api/index.mdx b/src/content/docs/d1/tutorials/import-to-d1-with-rest-api/index.mdx index 60af7188b174e9..67bc77d872a91d 100644 --- a/src/content/docs/d1/tutorials/import-to-d1-with-rest-api/index.mdx +++ b/src/content/docs/d1/tutorials/import-to-d1-with-rest-api/index.mdx @@ -449,7 +449,7 @@ In the previous steps, you have created functions to execute various processes i You will now see your target D1 table populated with the example data. :::note -If you encounter the `statement too long` error, you would need to break your SQL command into smaller chunks and upload them in batches. You can learn more about this error in the [D1 documentation](/d1/best-practices/import-export-data/#resolve-statement-too-long-error). +If you encounter the `statement too long` error, you would need to break your SQL command into smaller chunks and upload them in batches. You can learn more about this error in the [D1 documentation](/d1/features/import-export-data/#resolve-statement-too-long-error). ::: ## Summary diff --git a/src/content/docs/d1/tutorials/index.mdx b/src/content/docs/d1/tutorials/index.mdx index 11a1a82e0f5331..4ae5ba9499914e 100644 --- a/src/content/docs/d1/tutorials/index.mdx +++ b/src/content/docs/d1/tutorials/index.mdx @@ -4,7 +4,7 @@ pcx_content_type: navigation title: Tutorials hideChildren: true sidebar: - order: 11 + order: 12 --- diff --git a/src/content/docs/d1/tutorials/test-read-replication/index.mdx b/src/content/docs/d1/tutorials/test-read-replication/index.mdx new file mode 100644 index 00000000000000..e6b513b5333838 --- /dev/null +++ b/src/content/docs/d1/tutorials/test-read-replication/index.mdx @@ -0,0 +1,149 @@ +--- +updated: 2025-03-04 +difficulty: Beginner +content_type: Tutorial +pcx_content_type: tutorial +title: Test D1 read replication +products: + - D1 +languages: + - JavaScript + - TypeScript + - SQL +--- + +import { Render, Steps, Tabs, TabItem, FileTree } from "~/components"; + +In this tutorial, you will create a basic Worker script to test the effect of [D1 read replication](/d1/features/read-replication/), and see the difference in the request latency. + +## Prerequisites + +This tutorial assumes you have completed and understood the [D1 get started](/d1/get-started/) tutorial. + +## 1. Create a Worker + +Create a new Worker as the means to query your database. + +```sh +npm create cloudflare@latest -- +``` + +## 2. Create a database located far away + +Create a database located far away by specifying a [location hint](/d1/configuration/data-location/) which is far away from the region you are located in. + +```sh +npx wrangler d1 create --location=apac +``` +```sh output +✅ Successfully created DB '' in region APAC +Created your new D1 database. + +{ + "d1_databases": [ + { + "binding": "DB", + "database_name": "", + "database_id": "" + } + ] +} +``` + +This creates a new D1 database and outputs the [binding](/workers/runtime-apis/bindings/) configuration needed in the next step. + +## 3. Bind your D1 database to your Worker + +Modify your `wrangler.jsonc` file to include the output of the CLI to bind your D1 database to your Worker. + +## 4. Populate the D1 database + +Populate your database with the table from the [D1 get started](/d1/get-started/) tutorial. + + +1. Copy the following code and save it as a `schema.sql` file in the Worker directory you created in step 1: + + ```sql + DROP TABLE IF EXISTS Customers; + CREATE TABLE IF NOT EXISTS Customers (CustomerId INTEGER PRIMARY KEY, CompanyName TEXT, ContactName TEXT); + INSERT INTO Customers (CustomerID, CompanyName, ContactName) VALUES (1, 'Alfreds Futterkiste', 'Maria Anders'), (4, 'Around the Horn', 'Thomas Hardy'), (11, 'Bs Beverages', 'Victoria Ashworth'), (13, 'Bs Beverages', 'Random Name'); + ``` + +2. Initialize your database to run remotely. + + ```sh + npx wrangler d1 execute --remote --file=./schema.sql + ``` + + +## 5. Write a Worker file which queries the table + +Write a Worker file which queries the table and outputs both the results with the query latency. + +```js +export default { + async fetch(request, env) { + const { pathname } = new URL(request.url); + const companyName1 = `Bs Beverages`; + const stmt = env.DB.prepare(`SELECT * FROM Customers WHERE CompanyName = ?`); + const session = env.DB.withSession("first-unconstrained"); + + if (pathname === `/run`) { + const tsStart1 = Date.now(); + const { results, meta } = await stmt.bind(companyName1).run(); + const d1Duration1 = Date.now() - tsStart1; + return Response.json({ results, meta, d1Duration1 }); + + } else if (pathname === `/withsession`) { + const tsStart2 = Date.now(); + const { results, meta } = await session.prepare(`SELECT * FROM Customers WHERE CompanyName = ?`).bind(companyName1).run(); + const d1Duration2 = Date.now() - tsStart2; + return Response.json({ results, meta, d1Duration2 }); + } + return new Response( + `Welcome to the D1 read replication demo! + + Add one of the following slugs below to see the effects of using D1 read replication. + + \n/run - Queries the table without using read replication + + \n/withsession - Queries the table using read replication (using "first-unconstrained") + + \nUse the two options to compare the difference in query latency.` + ); + } + }; +``` + +## 6. Deploy Worker + +Deploy your Worker. + +```sh +npx wrangler deploy +``` + +## 7. Compare query latency + +Once deployed, you can compare the query latency when using read replication. + +- Use the `/run` URL to send a read query without read replication. +- Use the `/withsession` URL to send a read query with read replication. + +For both queries, the Worker script returns the `meta` object, which contains `served-by-primary` boolean field. This field indicates whether your request was served by the primary database instance. + +The `d1Duration` variable shows the query latency. + +## Summary + +By completing this tutorial, you have: + +1. Created a D1 database using a location hint. +2. Created a Worker script which uses D1 Sessions to use read replication. +3. Deployed the Worker to test the difference in query latency when using read replication. + +## Related resources + +- [D1 read replication](/d1/features/read-replication) +- [D1 Sessions Workers Binding API](/d1/worker-api/d1-database#withsession) +- [D1 read replication demo application](/d1/demos/) \ No newline at end of file diff --git a/src/content/docs/d1/worker-api/d1-database.mdx b/src/content/docs/d1/worker-api/d1-database.mdx index a870a5c2f94308..7d363929a54421 100644 --- a/src/content/docs/d1/worker-api/d1-database.mdx +++ b/src/content/docs/d1/worker-api/d1-database.mdx @@ -241,4 +241,47 @@ return new Response(dump, { #### Return values -- None. \ No newline at end of file +- None. + +### `withSession` + +Starts a D1 Session which activates read replication. + +```ts +const session = env.DB.withSession(" or bookmark"); +``` + +#### Parameters + +- constraint: + - The starting condition for the D1 Session. `` can be one of two: + - `first-primary`: Directs the first query in the Session (whether read or write) to the primary database instance. Use this option if you need to start the Session with the most up-to-date data from the primary database instance. + - `first-unconstrained`: Directs the first query in the Session (whether read or write) to any database instance. Use this option if you do not need to start the Session with the most up-to-date data, and wish to prioritize minimizing query latency from the very start of the Session. + +- bookmark: + - A [`bookmark`](/d1/reference/time-travel/#bookmarks) from an existing D1 Session. This allows you to continue the existing Session using the `bookmark` as a reference point. + +- If no parameters are provided, `withSession` defaults to using the `first-unconstrained` parameter. + +#### Return values + +- D1DatabaseSession: + - An object which contains the methods [`prepare()`](/d1/worker-api/d1-database#prepare) and [`getBookmark`](/d1/worker-api/d1-database#getbookmark). + +### `getBookmark` + +Retrieves the `bookmark` from the D1 Session. + +```ts +const session = db.withSession("first-primary"); +return { bookmark } = session.getBookmark(); +``` + +#### Parameters + +- None + +#### Return values + +- bookmark: + - A [`bookmark`](/d1/reference/time-travel/#bookmarks) which identifies the latest version of the database seen by the write query. \ No newline at end of file diff --git a/src/content/docs/d1/worker-api/index.mdx b/src/content/docs/d1/worker-api/index.mdx index 376ae96925e590..343460d33f248e 100644 --- a/src/content/docs/d1/worker-api/index.mdx +++ b/src/content/docs/d1/worker-api/index.mdx @@ -2,7 +2,7 @@ pcx_content_type: navigation title: Workers Binding API sidebar: - order: 4 + order: 5 --- import { DirectoryListing, Details, Steps } from "~/components"; @@ -81,16 +81,17 @@ Replace the contents of your `index.js` file with the code below to view the eff export default { async fetch(request, env) { const { pathname } = new URL(request.url); - // if (pathname === "/api/beverages") { // // If you did not use `DB` as your binding name, change it here // const { results } = await env.DB.prepare("SELECT * FROM Customers WHERE CompanyName = ?",).bind("Bs Beverages").all(); // return Response.json(results); // } - const companyName1 = `Bs Beverages`; const companyName2 = `Around the Horn`; const stmt = env.DB.prepare(`SELECT * FROM Customers WHERE CompanyName = ?`); + const stmtMulti = env.DB.prepare(`SELECT * FROM Customers; SELECT * FROM Customers WHERE CompanyName = ?`); + const session = env.DB.withSession("first-primary") + const sessionStmt = session.prepare(`SELECT * FROM Customers WHERE CompanyName = ?`); if (pathname === `/RUN`){ const returnValue = await stmt.bind(companyName1).run(); @@ -114,6 +115,11 @@ export default { } else if (pathname === `/EXEC`){ const returnValue = await env.DB.exec(`SELECT * FROM Customers WHERE CompanyName = "Bs Beverages"`); return Response.json(returnValue); + + } else if (pathname === `/WITHSESSION`){ + const returnValue = await sessionStmt.bind(companyName1).run(); + console.log("You're now using D1 Sessions!") + return Response.json(returnValue); } return new Response( @@ -121,8 +127,7 @@ export default { \nChange the URL to test the various methods inside your index.js file.`, ); }, -}; - + }; ``` @@ -130,26 +135,22 @@ export default { 1. Navigate to your tutorial directory you created by following step 1. -2. Run `npx wrangler dev`. +2. Run `npx wrangler deploy`. ```sh - npx wrangler dev + npx wrangler deploy ``` ```sh output - ⛅️ wrangler 3.85.0 (update available 3.86.1) - ------------------------------------------------------- + ⛅️ wrangler 3.112.0 + -------------------- + Total Upload: 1.90 KiB / gzip: 0.59 KiB Your worker has access to the following bindings: - D1 Databases: - - DB: (DATABASE_ID) (local) - ⎔ Starting local server... - [wrangler:inf] Ready on http://localhost:8787 - ╭───────────────────────────╮ - │ [b] open a browser │ - │ [d] open devtools │ - │ [l] turn off local mode │ - │ [c] clear console │ - │ [x] to exit │ - ╰───────────────────────────╯ + - DB: DATABASE_NAME () + Uploaded WORKER_NAME (7.01 sec) + Deployed WORKER_NAME triggers (1.25 sec) + https://jun-d1-rr.d1-sandbox.workers.dev + Current Version ID: VERSION_ID ``` 3. Open a browser at the specified address. diff --git a/src/content/docs/pages/functions/bindings.mdx b/src/content/docs/pages/functions/bindings.mdx index b613d9c59ced87..1da4a2f9fdb2a1 100644 --- a/src/content/docs/pages/functions/bindings.mdx +++ b/src/content/docs/pages/functions/bindings.mdx @@ -265,7 +265,7 @@ You can interact with your D1 database bindings locally in one of two ways: - Configure your Pages project's Wrangler file and run [`npx wrangler pages dev`](/workers/wrangler/commands/#dev-1). - Pass arguments to `wrangler pages dev` directly. -To interact with a D1 database via the Wrangler CLI while [developing locally](/d1/best-practices/local-development/#develop-locally-with-pages), add `--d1 =` to the `wrangler pages dev` command. +To interact with a D1 database via the Wrangler CLI while [developing locally](/d1/features/local-development/#develop-locally-with-pages), add `--d1 =` to the `wrangler pages dev` command. If your D1 database is bound to your Pages Function via the `NORTHWIND_DB` binding and the `database_id` in your Wrangler file is `xxxx-xxxx-xxxx-xxxx-xxxx`, access this database in local development by running: diff --git a/src/content/docs/workers/local-development.mdx b/src/content/docs/workers/local-development.mdx index 92ba77458c4613..8a81149400ffc5 100644 --- a/src/content/docs/workers/local-development.mdx +++ b/src/content/docs/workers/local-development.mdx @@ -137,5 +137,5 @@ There is a bug associated with how outgoing requests are handled when using `wra ## Related resources -- [D1 local development](/d1/best-practices/local-development/) - The official D1 guide to local development and testing. +- [D1 local development](/d1/features/local-development/) - The official D1 guide to local development and testing. - [DevTools](/workers/observability/dev-tools) - Guides to using DevTools to debug your Worker locally. diff --git a/src/content/glossary/d1.yaml b/src/content/glossary/d1.yaml index cffb5e0863db0f..bf15a570b7d2c4 100644 --- a/src/content/glossary/d1.yaml +++ b/src/content/glossary/d1.yaml @@ -1,6 +1,21 @@ --- productName: D1 entries: + - term: primary database instance + general_definition: |- + the primary database instance is the original instance of a database. This database instance only exists in one location in the world. + - term: request latency + general_definition: |- + the request latency is the time it takes for a request from a user to be returned by D1. + - term: read replica + general_definition: |- + a read replica is an “almost up-to-date” copy of the primary database instance which only serve read requests. There may be multiple read replicas for a single primary database instance. + - term: replica lag + general_definition: |- + the difference between the primary database instance and a read replica. - term: "query planner" general_definition: |- A component in a database management system which takes a user query and generates the most efficient plan of executing that query (the query plan). For example, the query planner decides which indices to use, or which table to access first. + - term: "session" + general_definition: |- + A Session encapsulates all the queries from one logical session for your application. For example, a Session may correspond to all queries coming from a particular web browser session. \ No newline at end of file diff --git a/src/content/partials/d1/use-pragma-statements.mdx b/src/content/partials/d1/use-pragma-statements.mdx index 94e86aa3c423a5..efb58c6ceb89f2 100644 --- a/src/content/partials/d1/use-pragma-statements.mdx +++ b/src/content/partials/d1/use-pragma-statements.mdx @@ -418,7 +418,7 @@ Attempts to optimize all schemas in a database by running the `ANALYZE` command When `PRAGMA optimize` runs `ANALYZE`, it sets a limit to ensure the command does not take too long to execute. Alternatively, `PRAGMA optimize` may deem it unnecessary to run `ANALYZE` (for example, if the schema has not changed significantly). In this scenario, no optimizations are made. -We recommend running this command after making any changes to the schema (for example, after [creating an index](/d1/best-practices/use-indexes/)). +We recommend running this command after making any changes to the schema (for example, after [creating an index](/d1/features/use-indexes/)). :::note Currently, D1 does not support `PRAGMA optimize(-1)`. diff --git a/src/content/partials/durable-objects/durable-objects-vs-d1.mdx b/src/content/partials/durable-objects/durable-objects-vs-d1.mdx index dd23ebb36a3bfd..64fe9a6612287f 100644 --- a/src/content/partials/durable-objects/durable-objects-vs-d1.mdx +++ b/src/content/partials/durable-objects/durable-objects-vs-d1.mdx @@ -10,7 +10,7 @@ Cloudflare Workers offers a SQLite-backed serverless database product - [D1](/d1 D1 fits into a familiar architecture for developers, where application servers communicate with a database over the network. Application servers are typically Workers; however, D1 also supports external, non-Worker access via an [HTTP API](https://developers.cloudflare.com/api/resources/d1/subresources/database/methods/query/), which helps unlock [third-party tooling](/d1/reference/community-projects/#_top) support for D1. -D1 aims for a "batteries included" feature set, including the above HTTP API, [database schema management](/d1/reference/migrations/#_top), [data import/export](/d1/best-practices/import-export-data/), and [database query insights](/d1/observability/metrics-analytics/#query-insights). +D1 aims for a "batteries included" feature set, including the above HTTP API, [database schema management](/d1/reference/migrations/#_top), [data import/export](/d1/features/import-export-data/), and [database query insights](/d1/observability/metrics-analytics/#query-insights). With D1, your application code and SQL database queries are not colocated which can impact application performance. If performance is a concern with D1, Workers has [Smart Placement](/workers/configuration/smart-placement/#_top) to dynamically run your Worker in the best location to reduce total Worker request latency, considering everything your Worker talks to, including D1. diff --git a/src/content/partials/workers/d1-pricing.mdx b/src/content/partials/workers/d1-pricing.mdx index 564b86f14490f2..f8894d47ab8622 100644 --- a/src/content/partials/workers/d1-pricing.mdx +++ b/src/content/partials/workers/d1-pricing.mdx @@ -14,11 +14,11 @@ To accurately track your usage, use the [meta object](/d1/worker-api/return-obje ### Definitions -1. Rows read measure how many rows a query reads (scans), regardless of the size of each row. For example, if you have a table with 5000 rows and run a `SELECT * FROM table` as a full table scan, this would count as 5,000 rows read. A query that filters on an [unindexed column](/d1/best-practices/use-indexes/) may return fewer rows to your Worker, but is still required to read (scan) more rows to determine which subset to return. +1. Rows read measure how many rows a query reads (scans), regardless of the size of each row. For example, if you have a table with 5000 rows and run a `SELECT * FROM table` as a full table scan, this would count as 5,000 rows read. A query that filters on an [unindexed column](/d1/features/use-indexes/) may return fewer rows to your Worker, but is still required to read (scan) more rows to determine which subset to return. 2. Rows written measure how many rows were written to D1 database. Write operations include `INSERT`, `UPDATE`, and `DELETE`. Each of these operations contribute towards rows written. A query that `INSERT` 10 rows into a `users` table would count as 10 rows written. 3. DDL operations (for example, `CREATE`, `ALTER`, and `DROP`) are used to define or modify the structure of a database. They may contribute to a mix of read rows and write rows. Ensure you are accurately tracking your usage through the available tools ([meta object](/d1/worker-api/return-object/), [GraphQL Analytics API](/d1/observability/metrics-analytics/#query-via-the-graphql-api), or the [Cloudflare dashboard](https://dash.cloudflare.com/?to=/:account/workers/d1/)). 4. Row size or the number of columns in a row does not impact how rows are counted. A row that is 1 KB and a row that is 100 KB both count as one row. -5. Defining [indexes](/d1/best-practices/use-indexes/) on your table(s) reduces the number of rows read by a query when filtering on that indexed field. For example, if the `users` table has an index on a timestamp column `created_at`, the query `SELECT * FROM users WHERE created_at > ?1` would only need to read a subset of the table. +5. Defining [indexes](/d1/features/use-indexes/) on your table(s) reduces the number of rows read by a query when filtering on that indexed field. For example, if the `users` table has an index on a timestamp column `created_at`, the query `SELECT * FROM users WHERE created_at > ?1` would only need to read a subset of the table. 6. Indexes will add an additional written row when writes include the indexed column, as there are two rows written: one to the table itself, and one to the index. The performance benefit of an index and reduction in rows read will, in nearly all cases, offset this additional write. 7. Storage is based on gigabytes stored per month, and is based on the sum of all databases in your account. Tables and indexes both count towards storage consumed. 8. Free limits reset daily at 00:00 UTC. Monthly included limits reset based on your monthly subscription renewal date, which is determined by the day you first subscribed. diff --git a/src/content/release-notes/d1.yaml b/src/content/release-notes/d1.yaml index 84ab925c8566ef..23c47142f46af2 100644 --- a/src/content/release-notes/d1.yaml +++ b/src/content/release-notes/d1.yaml @@ -94,7 +94,7 @@ entries: * Developers with a Workers Paid plan now have a 10GB GB per-database limit (up from 2GB), which can be combined with existing limit of 50,000 databases per account. * Developers with a Workers Free plan retain the 500 MB per-database limit and can create up to 10 databases per account. - * D1 databases can be [exported](/d1/best-practices/import-export-data/#export-an-existing-d1-database) as a SQL file. + * D1 databases can be [exported](/d1/features/import-export-data/#export-an-existing-d1-database) as a SQL file. - publish_date: "2024-03-12" title: Change in `wrangler d1 execute` default @@ -174,7 +174,7 @@ entries: - publish_date: "2023-08-19" title: Row count now returned per query description: |- - D1 now returns a count of `rows_written` and `rows_read` for every query executed, allowing you to assess the cost of query for both [pricing](/d1/platform/pricing/) and [index optimization](/d1/best-practices/use-indexes/) purposes. + D1 now returns a count of `rows_written` and `rows_read` for every query executed, allowing you to assess the cost of query for both [pricing](/d1/platform/pricing/) and [index optimization](/d1/features/use-indexes/) purposes. The `meta` object returned in [D1's Client API](/d1/worker-api/return-object/#d1result) contains a total count of the rows read (`rows_read`) and rows written (`rows_written`) by that query. For example, a query that performs a full table scan (for example, `SELECT * FROM users`) from a table with 5000 rows would return a `rows_read` value of `5000`: ```json