diff --git a/src/content/docs/hyperdrive/configuration/connect-to-postgres.mdx b/src/content/docs/hyperdrive/configuration/connect-to-postgres.mdx index 70a899aa0d0690..37b419f5e2dff6 100644 --- a/src/content/docs/hyperdrive/configuration/connect-to-postgres.mdx +++ b/src/content/docs/hyperdrive/configuration/connect-to-postgres.mdx @@ -5,7 +5,7 @@ sidebar: order: 3 --- -import { TabItem, Tabs } from "~/components"; +import { TabItem, Tabs, Render } from "~/components"; Hyperdrive supports PostgreSQL and PostgreSQL-compatible databases, [popular drivers](#supported-drivers) and Object Relational Mapper (ORM) libraries that use those drivers. @@ -86,50 +86,7 @@ The following examples show you how to: The following Workers code shows you how to use [Postgres.js](https://github.com/porsager/postgres) with Hyperdrive. -Install the Postgres.js driver: - -```sh -npm install postgres -``` - -Create a new `sql` instance and pass the Hyperdrive parameters: - - - -```ts -import postgres from "postgres"; - -export interface Env { - // If you set another name in wrangler.toml as the value for 'binding', - // replace "HYPERDRIVE" with the variable name you defined. - HYPERDRIVE: Hyperdrive; -} - -export default { - async fetch(request: Request, env: Env, ctx: ExecutionContext) { - // NOTE: if `prepare: false` is passed when connecting, performance will - // be slower but still correctly supported. - const sql = postgres(env.HYPERDRIVE.connectionString); - - try { - // A very simple test query - const result = await sql`select * from pg_tables`; - - // Clean up the client, ensuring we don't kill the worker before that is - // completed. - ctx.waitUntil(sql.end()); - - // Return result rows as JSON - return Response.json({ result: result }); - } catch (e) { - console.log(e); - return Response.json({ error: e.message }, { status: 500 }); - } - }, -} satisfies ExportedHandler; -``` - - + ### node-postgres / pg @@ -149,8 +106,6 @@ compatibility_flags = [ "nodejs_compat_v2"] Create a new `Client` instance and pass the Hyperdrive parameters: - - ```ts import { Client } from "pg"; @@ -187,8 +142,6 @@ export default { } satisfies ExportedHandler; ``` - - ## Identify connections from Hyperdrive To identify active connections to your Postgres database server from Hyperdrive: diff --git a/src/content/docs/hyperdrive/configuration/connect-to-private-database.mdx b/src/content/docs/hyperdrive/configuration/connect-to-private-database.mdx new file mode 100644 index 00000000000000..bb6379df5dbf64 --- /dev/null +++ b/src/content/docs/hyperdrive/configuration/connect-to-private-database.mdx @@ -0,0 +1,162 @@ +--- +pcx_content_type: concept +title: Connect to a private database using Tunnel +sidebar: + order: 4 + badge: + text: Beta +--- + +import { TabItem, Tabs, Render } from "~/components"; + +Hyperdrive can securely connect to your private databases using [Cloudflare Tunnel](/cloudflare-one/connections/connect-networks/) and [Cloudflare Access](/cloudflare-one/policies/access/). + +## How it works + +When your database is isolated within a private network (such as a [virtual private cloud](https://www.cloudflare.com/learning/cloud/what-is-a-virtual-private-cloud) or an on-premise network), you must enable a secure connection from your network to Cloudflare. + +- [Cloudflare Tunnel](/cloudflare-one/connections/connect-networks/) is used to establish the secure tunnel connection. +- [Cloudflare Access](/cloudflare-one/policies/access/) is used to restrict access to your tunnel such that only specific Hyperdrive configurations can access it. + + + +## Prerequisites + +- A database in your private network, [configured to use TLS/SSL](/hyperdrive/configuration/connect-to-postgres/#supported-tls-ssl-modes). +- A hostname on your Cloudflare account, which will be used to route requests to your database. + +## 1. Create a tunnel in your private network + +### 1.1. Create a tunnel + +First, create a [Cloudflare Tunnel](/cloudflare-one/connections/connect-networks/) in your private network to establish a secure connection between your network and Cloudflare. Your network must be configured such that the tunnel has permissions to egress to the Cloudflare network and access the database within your network. + + + +### 1.2. Connect your database using a public hostname + +Your tunnel must be configured to use a public hostname so that Hyperdrive can route requests to it. If you don't have a hostname on Cloudflare yet, you will need to [register a new hostname](/registrar/get-started/register-domain/) or [add a zone](/dns/zone-setups/) to Cloudflare to proceed. + +1. In the **Public Hostnames** tab, choose a **Domain** and specify any subdomain or path information. This will be used in your Hyperdrive configuration to route to this tunnel. + +2. In the **Service** section, specify **Type** `TCP` and the URL and configured port of your database, such as `localhost:5432`. This address will be used by the tunnel to route requests to your database. + +3. Select **Save tunnel**. + +:::note +If you are setting up the tunnel through the CLI instead ([locally-managed tunnel](/cloudflare-one/connections/connect-networks/configure-tunnels/local-management/)), you will have to complete these steps manually. Follow the Cloudflare Zero Trust documentation to [add a public hostname to your tunnel](/cloudflare-one/connections/connect-networks/routing-to-tunnel/dns/) and [configure the public hostname to route to the address of your database](/cloudflare-one/connections/connect-networks/configure-tunnels/local-management/configuration-file/). +::: + +## 2. Create a service token + +The service token will be used to restrict requests to the tunnel, and is needed for the next step. + +1. In [Zero Trust](https://one.dash.cloudflare.com), go to **Access** > **Service Auth** > **Service Tokens**. + +2. Select **Create Service Token**. + +3. Name the service token. The name allows you to easily identify events related to the token in the logs and to revoke the token individually. + +4. Set a **Service Token Duration** of `Non-expiring`. This prevents the service token from expiring, ensuring it can be used throughout the life of the Hyperdrive configuration. + +5. Select **Generate token**. You will see the generated Client ID and Client Secret for the service token, as well as their respective request headers. + +6. Copy the Access Client ID and Access Client Secret. These will be used when creating the Hyperdrive configuration. + + :::caution + This is the only time Cloudflare Access will display the Client Secret. If you lose the Client Secret, you must regenerate the service token. + ::: + +## 3. Create an Access application to secure the tunnel + +[Cloudflare Access](/cloudflare-one/policies/access/) will be used to verify that requests to the tunnel originate from Hyperdrive using the service token created above. + +1. In [Zero Trust](https://one.dash.cloudflare.com), go to **Access** > **Applications**. + +2. Select **Add an application**. + +3. Select **Self-hosted**. + +4. In **Application Configuration** > **Application name**, enter any name for the application. + +5. In **Application Configuration** > **Session Duration**, select `No duration, expires immediately`. + +6. In **Application Configuration** > **Application domain**, enter the subdomain and domain that was previously set for the tunnel application. + +7. In **Application Appearance**, disable the `Enable App in App Launcher` setting. + +8. In **Identity providers**, disable the `Accept all available identity providers` setting and select `Deselect all` identity providers. + +9. Select **Next**. + +10. Enter a name in the **Policy name** and set the **Action** to `Service Auth`. + +11. In **Configure rules**, create an **Include** rule. Specify a **Selector** of `Service Token` and the **Value** of the service token you created in step [2. Create a service token](#2-create-a-service-token). + +12. Select **Next**. + +13. Select **Add application** to create the Access application. + +## 4. Create a Hyperdrive configuration + +To create a Hyperdrive configuration for your private database, you'll need to specify the Access application and Cloudflare Tunnel information upon creation. + + + +```sh +# wrangler v3.65 and above required +npx wrangler hyperdrive create --host= --user= --password= --database= --access-client-id= --access-client-secret= +``` + + + +```terraform +resource "cloudflare_hyperdrive_config" "" { + account_id = "" + name = "" + origin = { + host = "" + database = "" + user = "" + password = "" + scheme = "postgres" + access_client_id = "" + access_client_secret = "" + } + caching = { + disabled = false + } +} +``` + + + +This will create a Hyperdrive configuration using the usual database information (database name, database host, database user, and database password). + +In addition, it will also set the Access Client ID and the Access Client Secret of the Service Token. When Hyperdrive makes requests to the tunnel, requests will be intercepted by Access and validated using the credentials of the Service Token. + +:::note +When creating the Hyperdrive configuration for the private database, you must enter the `access-client-id` and the `access-client-id`, and omit the `port`. Hyperdrive will route database messages to the public hostname of the tunnel, and the tunnel will rely on its service configuration (as configured in [1.2. Connect your database using a public hostname](#12-connect-your-database-using-a-public-hostname)) to route requests to the database within your private network. +::: + +## 5. Query your Hyperdrive configuration from a Worker (optional) + +To test your Hyperdrive configuration to the database using Cloudflare Tunnel and Access, use the Hyperdrive configuration ID in your Worker and deploy it. + +### Create a Hyperdrive binding + + + +### Query your database using Postgres.js + +Use Postgres.js to send a test query to validate that the connection has been successful. + + + +Now, deploy your Worker: + +```bash +npx wrangler deploy +``` + +If you successfully receive the list of `pg_tables` from your database when you access your deployed Worker, your Hyperdrive has now been configured to securely connect to a private database using [Cloudflare Tunnel](/cloudflare-one/connections/connect-networks/) and [Cloudflare Access](/cloudflare-one/policies/access/). diff --git a/src/content/docs/hyperdrive/get-started.mdx b/src/content/docs/hyperdrive/get-started.mdx index d6059b51cc68a3..31ed182886b7dc 100644 --- a/src/content/docs/hyperdrive/get-started.mdx +++ b/src/content/docs/hyperdrive/get-started.mdx @@ -160,21 +160,7 @@ Hyperdrive will attempt to connect to your database with the provided credential ## 4. Bind your Worker to Hyperdrive -You must create a binding for your Worker to connect to your Hyperdrive configuration. [Bindings](/workers/runtime-apis/bindings/) allow your Workers to access resources, like D1, on the Cloudflare developer platform. You create bindings by updating your `wrangler.toml` file. - -To bind your Hyperdrive configuration to your Worker, add the following to the end of your `wrangler.toml` file: - -```toml -[[hyperdrive]] -binding = "HYPERDRIVE" -id = "" # the ID associated with the Hyperdrive you just created -``` - -Specifically: - -- The value (string) you set for the `name` (binding name) will be used to reference this database in your Worker. In this tutorial, name your binding `HYPERDRIVE`. -- The binding must be [a valid JavaScript variable name](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Grammar_and_types#variables). For example, `binding = "hyperdrive"` or `binding = "productionDB"` would both be valid names for the binding. -- Your binding is available in your Worker at `env.`. + ## 5. Run a query against your database diff --git a/src/content/partials/hyperdrive/create-hyperdrive-binding.mdx b/src/content/partials/hyperdrive/create-hyperdrive-binding.mdx new file mode 100644 index 00000000000000..fe687354266143 --- /dev/null +++ b/src/content/partials/hyperdrive/create-hyperdrive-binding.mdx @@ -0,0 +1,20 @@ +--- +{} + +--- + +You must create a binding for your Worker to connect to your Hyperdrive configuration. [Bindings](/workers/runtime-apis/bindings/) allow your Workers to access resources, like D1, on the Cloudflare developer platform. You create bindings by updating your `wrangler.toml` file. + +To bind your Hyperdrive configuration to your Worker, add the following to the end of your `wrangler.toml` file: + +```toml +[[hyperdrive]] +binding = "HYPERDRIVE" +id = "" # the ID associated with the Hyperdrive you just created +``` + +Specifically: + +- The value (string) you set for the `name` (binding name) will be used to reference this database in your Worker. In this tutorial, name your binding `HYPERDRIVE`. +- The binding must be [a valid JavaScript variable name](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Grammar_and_types#variables). For example, `binding = "hyperdrive"` or `binding = "productionDB"` would both be valid names for the binding. +- Your binding is available in your Worker at `env.`. diff --git a/src/content/partials/hyperdrive/use-postgresjs-to-make-query.mdx b/src/content/partials/hyperdrive/use-postgresjs-to-make-query.mdx new file mode 100644 index 00000000000000..e1d0edb9bee1d7 --- /dev/null +++ b/src/content/partials/hyperdrive/use-postgresjs-to-make-query.mdx @@ -0,0 +1,44 @@ +--- +{} +--- + +Install the Postgres.js driver: + +```sh +npm install postgres +``` + +Create a new `sql` instance and pass the Hyperdrive parameters: + +```ts +import postgres from "postgres"; + +export interface Env { + // If you set another name in wrangler.toml as the value for 'binding', + // replace "HYPERDRIVE" with the variable name you defined. + HYPERDRIVE: Hyperdrive; +} + +export default { + async fetch(request: Request, env: Env, ctx: ExecutionContext) { + // NOTE: if `prepare: false` is passed when connecting, performance will + // be slower but still correctly supported. + const sql = postgres(env.HYPERDRIVE.connectionString); + + try { + // A very simple test query + const result = await sql`select * from pg_tables LIMIT 10`; + + // Clean up the client, ensuring we don't kill the worker before that is + // completed. + ctx.waitUntil(sql.end()); + + // Return result rows as JSON + return Response.json({ result: result }); + } catch (e) { + console.log(e); + return Response.json({ error: e.message }, { status: 500 }); + } + }, +} satisfies ExportedHandler; +```