diff --git a/.github/actions/build-docs/action.yml b/.github/actions/build-docs/action.yml deleted file mode 100644 index 5265bc781..000000000 --- a/.github/actions/build-docs/action.yml +++ /dev/null @@ -1,20 +0,0 @@ -name: Build Docs -description: Build docs pages inside /docs directory - -runs: - using: 'composite' - steps: - - name: Install dependencies - run: pnpm install - shell: bash - working-directory: docs - - - name: Build with Next.js - run: pnpm next build - shell: bash - working-directory: docs - - - name: Export static HTML with Next.js - run: pnpm next export - shell: bash - working-directory: docs diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml new file mode 100644 index 000000000..2ba1239bd --- /dev/null +++ b/.github/workflows/docs.yml @@ -0,0 +1,36 @@ +name: Deploy docs to GitHub Pages + +on: + workflow_dispatch: + +defaults: + run: + working-directory: docs + +permissions: + contents: read + pages: write + id-token: write + +concurrency: + group: 'pages' + cancel-in-progress: false + +jobs: + deploy: + environment: + name: github-pages + url: ${{ steps.deployment.outputs.page_url }} + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Setup Pages + uses: actions/configure-pages@v5 + - name: Upload artifact + uses: actions/upload-pages-artifact@v3 + with: + path: 'docs/' + - name: Deploy to GitHub Pages + id: deployment + uses: actions/deploy-pages@v4 diff --git a/docs/404.html b/docs/404.html new file mode 100644 index 000000000..001bf6142 --- /dev/null +++ b/docs/404.html @@ -0,0 +1,54 @@ + + + + + + + OpenNext docs have moved + + + +
+

+ The docs have moved to its own repository. You can find it here: +

+
+ + https://opennext.js.org + + + https://github.com/opennextjs/docs + +
+
+ + \ No newline at end of file diff --git a/docs/components/Footer.tsx b/docs/components/Footer.tsx deleted file mode 100644 index aa97edd6b..000000000 --- a/docs/components/Footer.tsx +++ /dev/null @@ -1,26 +0,0 @@ -import { SITE } from "../config"; -import styles from "../styles/Layout.module.css"; - -export default function Footer() { - return ( - - ); -} diff --git a/docs/components/Logo.svg.tsx b/docs/components/Logo.svg.tsx deleted file mode 100644 index 50777319c..000000000 --- a/docs/components/Logo.svg.tsx +++ /dev/null @@ -1,44 +0,0 @@ -export default function Logo() { - return ( - - - - - - - - - - - ); -} diff --git a/docs/config.ts b/docs/config.ts deleted file mode 100644 index fc97fdbd7..000000000 --- a/docs/config.ts +++ /dev/null @@ -1,17 +0,0 @@ -interface Site { - sst: string; - title: string; - github: string; - discord: string; - twitter: string; - description: string; -} - -export const SITE: Site = { - title: "OpenNext", - sst: "https://sst.dev", - discord: "https://sst.dev/discord", - twitter: "https://twitter.com/SST_dev", - github: "https://github.com/serverless-stack/open-next", - description: "Open source Next.js serverless adapter", -}; diff --git a/docs/index.html b/docs/index.html new file mode 100644 index 000000000..001bf6142 --- /dev/null +++ b/docs/index.html @@ -0,0 +1,54 @@ + + + + + + + OpenNext docs have moved + + + +
+

+ The docs have moved to its own repository. You can find it here: +

+
+ + https://opennext.js.org + + + https://github.com/opennextjs/docs + +
+
+ + \ No newline at end of file diff --git a/docs/next.config.js b/docs/next.config.js deleted file mode 100644 index 0279b73f3..000000000 --- a/docs/next.config.js +++ /dev/null @@ -1,43 +0,0 @@ -/** @type {import('next').NextConfig} */ - -// let basePath = undefined; -// let assetPrefix = undefined; - -///** Deploys as a directory through GitHub Actions **/ -//const isGithubActions = process.env.GITHUB_ACTIONS || false -// -//if (isGithubActions) { -// // trim off `/` -// const repo = process.env.GITHUB_REPOSITORY.replace(/.*?\//, '') -// -// assetPrefix = `/${repo}/` -// basePath = `/${repo}` -//} -///** End GitHub Actions case **/ - -// const nextConfig = { -// images: { -// unoptimized: true, -// }, - -// swcMinify: true, -// reactStrictMode: true, - -// basePath: basePath, -// assetPrefix: assetPrefix, -// }; - -const withNextra = require("nextra")({ - theme: "nextra-theme-docs", - themeConfig: "./theme.config.jsx", -}); - -module.exports = withNextra({ - swcMinify: true, - eslint: { - ignoreDuringBuilds: true, - }, - images: { - unoptimized: true, - }, -}); diff --git a/docs/package.json b/docs/package.json deleted file mode 100644 index 08106f67d..000000000 --- a/docs/package.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "name": "open-next-docs", - "version": "0.1.0", - "private": true, - "scripts": { - "dev": "next dev", - "build": "next build", - "start": "next start", - "lint": "next lint" - }, - "dependencies": { - "@types/node": "18.11.9", - "@types/react": "18.0.25", - "@types/react-dom": "18.0.9", - "next": "13.4.12", - "nextra": "^2.13.1", - "nextra-theme-docs": "^2.13.1", - "react": "18.2.0", - "react-dom": "18.2.0", - "typescript": "4.9.3" - } -} diff --git a/docs/pages/404.mdx b/docs/pages/404.mdx deleted file mode 100644 index 333cbbf90..000000000 --- a/docs/pages/404.mdx +++ /dev/null @@ -1,3 +0,0 @@ -## Page not found - -The page you are looking for does not exist. You can go back to the [homepage](/). \ No newline at end of file diff --git a/docs/pages/_app.mdx b/docs/pages/_app.mdx deleted file mode 100644 index 2e1b362c3..000000000 --- a/docs/pages/_app.mdx +++ /dev/null @@ -1,3 +0,0 @@ -export default function App({ Component, pageProps }) { - return -} \ No newline at end of file diff --git a/docs/pages/_meta.json b/docs/pages/_meta.json deleted file mode 100644 index 1565330ea..000000000 --- a/docs/pages/_meta.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "index": "Home", - "get_started": "Getting started", - "config": "Configuration", - "comparison": "Comparison", - "inner_workings": "How does it work?", - "faq": "FAQ", - "common_issues": "Troubleshooting", - "contribute": "Contributing" -} \ No newline at end of file diff --git a/docs/pages/common_issues.mdx b/docs/pages/common_issues.mdx deleted file mode 100644 index 0209efce9..000000000 --- a/docs/pages/common_issues.mdx +++ /dev/null @@ -1,64 +0,0 @@ -#### Debug mode - -OpenNext can be executed in debug mode by setting the environment variable `OPEN_NEXT_DEBUG=true`. - -This will output **A LOT** of additional logs to the console.This also disable minifying in esbuild, and add source maps to the output. This can result in code that might be up to 2-3X larger than the production build. **Do not enable this in production** - -```bash -OPEN_NEXT_DEBUG=true npx open-next@latest build -``` - -#### Cannot find module next - -You might stumble upon this error inside cloudwatch logs: `Cannot find module 'next'`. It is likely that you are in a monorepo and you have several lock files. **Just make sure that you have a single lock file in the root of your project.** - -#### Reducing bundle size - -Next might incorrectly include some dependencies in the bundle. To remove them you can use this configuration inside `next.config.js`: - -```javascript -experimental: { - outputFileTracingExcludes: { - "*": ["node_modules/the-unwanted-package"], - }, - }, -``` - -Also you should not add sharp as a dependencies unless absolutely necessary, the image optimization already has it's own version of sharp. - -#### Patch fetch behaviour for ISR. Only for next@13.5.1+ - -If you use ISR and fetch in your app, you may encounter a bug that makes your revalidate values inconsistent. -The issue is that it revalidates using the lowest revalidate of all fetch calls in your page, regardless of their individual values. To fix this bug, you need to modify the fetch function in your root layout component with the following code snippet - -```ts -export default function RootLayout() { - const asyncStorage = require('next/dist/client/components/static-generation-async-storage.external'); - //@ts-ignore - const staticStore = - (fetch as any).__nextGetStaticStore?.() || - asyncStorage.staticGenerationAsyncStorage; - const store = staticStore.getStore(); - store.isOnDemandRevalidate = - store.isOnDemandRevalidate && !(process.env.OPEN_NEXT_ISR === 'true'); - return <>...; -} -``` - -#### Access Denied errors on routes during page refresh and direct URL access - -If you are refreshing a dynamic/static route or going to that route directly from an URL. Like this route f.ex: -`/profile/[userId]/[id]`, and you are getting an `Access Denied` error in XML: - -```xml - - AccessDenied - Access Denied - R4E6T9G2Q1S0Z5X8 - S7h9F3g2T0z5K8d6A2s1W4x3C7v8B9m2L0j3K4i7H8g9F0r3A5q8w9E8r7t6Y5h4U3i2O1p0 - -``` - -This can also happen in app router when a client navigates via NextJS `` component. - -The issue might be that your having a folder or file in your `public` directory with an overlapping between the name and your route. In this case, you should rename that to something else. diff --git a/docs/pages/comparison.mdx b/docs/pages/comparison.mdx deleted file mode 100644 index f03a52c52..000000000 --- a/docs/pages/comparison.mdx +++ /dev/null @@ -1,25 +0,0 @@ -It should be noted that open-next does not actually deploy the app. It only bundles everything for your IAC to deploy it. - -Here is a table comparing the different options to deploy a next.js app: - -| Features | OpenNext | Vercel | AWS Amplify | Docker Standalone | -| --- | --- | --- | --- | --- | -| **Function splitting** | Yes | Yes | No | No | -| **Multiple deployment target** ¹ | Yes | Yes ² | No | No | -| **Serverless** | Yes | Yes | Yes | No ³ | -| **Warmer function** | Yes | No | No | Not necessary | -| **External middleware** | Yes ⁴ | Yes | No | No | -| **Edge runtime support** | Partial Support ⁵ | Yes | Embedded ⁶ | Embedded ⁶ | -| **ISR** | Yes | Yes | Yes | Yes ⁷ | -| **On-Demand Revalidation** | Yes ⁸ | Yes | No | Yes ⁸ | -| **Custom server** | Yes ⁹ | No | No | Yes | - -1. Multiple deployment target means that you can deploy the same app to different target like some part to ECS, some part to Lambda etc... -2. Vercel supports only serverless Node (backed by AWS Lambda) and Edge runtime (backed by cloudflare workers) -2. You can deploy a dockerized next.js app to AWS lambda using AWS Lambda Web adapter, but some part like ISR will not work as expected -3. OpenNext supports external middleware, but it is not enabled by default. -4. OpenNext supports edge runtime in node, but every route needs to be deployed separately. OpenNext supports edge runtime in cloudflare workers, but only for app router api routes. -5. Embedded means that the edge runtime is embedded inside the bundle. It emulates a fake edge runtime inside the prod environment. -6. You might experience some inconsistencies with ISR if you have a CDN in front of your app. Next always set the cache-control header to `s-maxage=REVALIDATION_TIME, stale-while-revalidate`, it means that your data (json or rsc) and your html might be out of sync. -7. You need to invalidate the CDN manually. For OpenNext, here is an example for cloudfront -8. OpenNext supports custom server, but it is not enabled by default. You can have a custom server even in a serverless environment. \ No newline at end of file diff --git a/docs/pages/config.mdx b/docs/pages/config.mdx deleted file mode 100644 index 644e9bcb6..000000000 --- a/docs/pages/config.mdx +++ /dev/null @@ -1,26 +0,0 @@ -### Build Arguments - -There is two build arguments that you can pass to the `open-next build` command: - -- `--config-path` - This is the path to the configuration file that you want to use. By default, it will look for `open-next.config.ts` in the current working directory. This needs to be relative to the current working directory. -- `--node-externals` - You can configure externals for the esbuild compilation of the `open-next.config.ts` file (i.e `--node-externals @aws-sdk/*,open-next/dist/queue/*`) - -### Configuration File - -For personalisation you need to create a file `open-next.config.ts` at the same place as your `next.config.js`, and export a default object that satisfies the `OpenNextConfig` interface. It is possible to not have an `open-next.config.ts` file, the default configuration will then be applied automatically. - -This file needs to be placed at the same level as your `next.config.js` file. - -If you have an `open-next.config.ts` file, make sure you have atleast this: - -```ts -export default { - default: {}, -}; -``` - -If you want to take a look at some simple configuration examples, you can check the [simple example](/config/simple_example). - -For more advanced use cases, you can check [how to implement custom overrides](/config/custom_overrides). - -If you want to look at a full example, you can check [the full example](/config/full_example). diff --git a/docs/pages/config/_meta.json b/docs/pages/config/_meta.json deleted file mode 100644 index 4163e4c71..000000000 --- a/docs/pages/config/_meta.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "simple_example": "Simple Example", - "custom_overrides": "Custom Overrides", - "full_example": "Full Example", - "nx": "Nx Monorepo" -} \ No newline at end of file diff --git a/docs/pages/config/custom_overrides.mdx b/docs/pages/config/custom_overrides.mdx deleted file mode 100644 index 7e319a938..000000000 --- a/docs/pages/config/custom_overrides.mdx +++ /dev/null @@ -1,297 +0,0 @@ -import { Callout } from 'nextra/components' - -In some cases the simple example is not enough, and you want to add more customization to your server. This is where the lazy loaded overrides come in. You can override any part of the server by providing a function that returns a promise that resolves to the override object. This is useful when you want to add custom logic to your server, like adding a custom queue, or adding a custom converter. - - -Be careful if you use the edge runtime (either in a function or by using the external middleware), we do 2 compilations of the `open-next.config.ts`, one for node and one for the edge runtime. If you're using some custom overrides, you likely want to add -```ts -edgeExternals: ['./customWrapper', './anyOtherOverrideUsed'] -``` -to your `open-next.config.ts` to avoid the edge runtime to try to compile overrides that are not compatible with the edge runtime. - - - -## Custom converter - -Sometimes you might want to modify the object received by OpenNext. For example `Config.YOUR_SECRET_KEY` from sst cannot be used in the middleware, so you might want to add it to the headers. This is where the custom converter comes in. You can add a custom converter to modify the object before it is passed to OpenNext. - -You'll still have to use a fallback value during dev as this is not used by the dev server. - -```ts -// customConverter.ts -import converter from 'open-next/converters/aws-apigw-v2.js' -import type { Converter } from 'open-next/types/open-next' -import { Config } from 'sst/node/Config' -const mySecretKey = Config.YOUR_SECRET_KEY - -export default { - convertFrom: async (event) => { - const result = await converter.convertFrom(event) - return { - ...result, - headers: { - ...result.headers, - 'inserted-in-converter': '1', - 'my-super-secret-key': mySecretKey - } - } - }, - convertTo : async (intResult) => { - const result = await converter.convertTo(intResult) - return { - ...result, - headers: { - ...result.headers, - 'x-converter-end': '1' - } - } - }, - name: 'custom-apigw-v2' -} as Converter -``` - -```ts -// open-next.config.ts -import type { OpenNextConfig } from 'open-next/types/open-next' - -const config = { - default: { - override: { - converter: () => import('./customConverter').then((mod) => mod.default) - } - } -} as OpenNextConfig -``` - -## Custom wrapper -Here we provide a few examples for some custom wrapper. - -### Define a global to use node in the middleware - -```ts -// customWrapper.ts -import defaultWrapper from 'open-next/wrappers/aws-lambda.js' - -//Here you can define some globals -declare global { - var myApi: () => Promise; -} -globalThis.myApi = async () => { - const crypto = await import('crypto') - return { - nb: crypto.randomInt(0, 100) - } -} - -export default defaultWrapper -``` - -```ts -// open-next.config.ts -import type { OpenNextConfig } from 'open-next/types/open-next' -const config = { - default: { - override: { - wrapper: () => import('./customWrapper').then((mod) => mod.default) - } - } -} as OpenNextConfig -export default config -``` -But since Next dev server runs in a fake edge runtime and that the global is defined only for deployment, you'll have to mock the global in your middleware. - -```ts -// middleware.ts -import { NextResponse } from 'next/server' -import type { NextRequest } from 'next/server' - -// Here you need to mock the global if not present -// One way to avoid issues with different implementation would be to create an api endpoint -// that uses the exact same logic as the global you defined earlier, -// and that is only available during development i.e. /api/dev/myApi -if(!globalThis.myApi) { - globalThis.myApi = async () => { - return await fetch('http://localhost:3000/api/dev/myApi').then(res => res.json()) - } -} - -export function middleware(request: NextRequest) { - // You can also send an error in the api endpoint itself - // Or you could add all the dev endpoint in their own lambda - // that you do not deploy in production - if(request.nextUrl.pathname.startsWith('/api/dev') && process.env.NODE_ENV === 'production') { - return NextResponse('This route is only available in development',{ - status: 500, - }) - } - // Now you can use Node.js in your middleware - const {nb} = await myApi() - - // ... your code here -} - -``` - - -### Use middy.js with the wrapper - -```ts -// customWrapper.ts -import streamingWrapper from 'open-next/wrappers/aws-lambda.js' -import {WrapperHandler} from 'open-next/types/open-next' -import middy from '@middy/core' -import httpSecurityHeaders from '@middy/http-security-headers' - -const handler : WrapperHandler = async (handler, converter) => { - const defaultHandler = await streamingWrapper.wrapper(handler, converter) - return middy() - .use(httpSecurityHeaders()) - .handler(defaultHandler) -} - -export default { - wrapper: handler, - name: "custom-aws-lambda", - supportStreaming: false, -}; -``` - -```ts -// open-next.config.ts -import type { OpenNextConfig } from 'open-next/types/open-next' -const config = { - default: { - override: { - wrapper: () => import('./customWrapper').then((mod) => mod.default) - } - } -} as OpenNextConfig -export default config -``` - -### Preload some routes during warmer event - -In this example the custom wrapper is used to preload some important routes before the first request. This is useful if you have some routes that are slow on coldstart (Next lazily load the routes only when they are needed) and you want to preload them before the first request. This is also useful if you want to add some custom logic to the server, like adding a custom header to the response. - -**WARNING**: This one is not properly tested. It's just an example of what you could do. You should test it properly before using it in production. Also preloading too many routes is probably a bad idea. - - -```ts -// customWrapper.ts -import type { - APIGatewayProxyEventV2, - APIGatewayProxyResultV2, -} from "aws-lambda"; -import type { StreamCreator } from "open-next/http/openNextResponse"; -import { Writable } from "node:stream"; - -import { WarmerEvent, WarmerResponse } from "open-next/adapters/warmer-function"; -import type { WrapperHandler } from "open-next/types/open-next"; - -type AwsLambdaEvent = - | APIGatewayProxyEventV2 - | WarmerEvent; - -type AwsLambdaReturn = - | APIGatewayProxyResultV2 - | WarmerResponse; - - -const serverId = Math.random().toPrecision(5).toString() -let isPreloaded = false - -function formatWarmerResponse(event: WarmerEvent) { - return new Promise((resolve) => { - setTimeout(() => { - resolve({ serverId, type: "warmer" } satisfies WarmerResponse); - }, event.delay); - }); -} - -const handler: WrapperHandler = - async (handler, converter) => - async (event: AwsLambdaEvent): Promise => { - console.log('custom wrapper') - // Handle warmer event - if ("type" in event) { - if(!isPreloaded) { - // You could preload every route you want here - // Be careful, while the route is preloading the lambda cannot process other requests - await handler({ - type: 'core', - url: '/myRoute', - method: 'GET', - headers: {}, - query: {}, - rawPath: '/myRoute', - cookies: {}, - remoteAddress: '' - }); - isPreloaded = true - } - return formatWarmerResponse(event); - } - - - const internalEvent = await converter.convertFrom(event); - internalEvent.headers['inserted-in-wrapper'] = 'hello from wrapper' - - //This is a workaround, there is an issue in node that causes node to crash silently if the OpenNextNodeResponse stream is not consumed - //This does not happen everytime, it's probably caused by suspended component in ssr (either via or loading.tsx) - //Everyone that wish to create their own wrapper without a StreamCreator should implement this workaround - //This is not necessary if the underlying handler does not use OpenNextNodeResponse (At the moment, OpenNextNodeResponse is used by the node runtime servers and the image server) - const fakeStream: StreamCreator = { - writeHeaders: () => { - return new Writable({ - write: (_chunk, _encoding, callback) => { - callback(); - }, - }); - }, - onFinish: () => { - // Do nothing - }, - }; - - const response = await handler(internalEvent, fakeStream); - response.headers['x-wrapper'] = 'hi' - - return converter.convertTo(response, event); - }; - -export default { - wrapper: handler, - name: "custom-aws-lambda", - supportStreaming: false, -}; -``` - -```ts -// open-next.config.ts -import type { OpenNextConfig } from 'open-next/types/open-next' -const config = { - default: { - override: { - wrapper: () => import('./customWrapper').then((mod) => mod.default) - } - } -} as OpenNextConfig -``` - -## Custom Incremental cache -TODO - -## Custom queue -TODO - -## Custom Tag cache -TODO - -## Custom Origin Resolver -TODO - -## Custom Image Loader -TODO - -## Custom Warmer Invoke -TODO \ No newline at end of file diff --git a/docs/pages/config/full_example.mdx b/docs/pages/config/full_example.mdx deleted file mode 100644 index 88a97d9f8..000000000 --- a/docs/pages/config/full_example.mdx +++ /dev/null @@ -1,105 +0,0 @@ -Here is a detailed example of an `open-next.config.ts` file: -This file need to be at the same place as your `next.config.js` file - -`server` in here could refer to a lambda function, a docker container, a node server or whatever that can support running nodejs code. (Even cloudflare workers in the future) - -For more information about the options here, take a look at the [components section](/inner_workings/components/overview). - -```ts -import type { OpenNextConfig } from 'open-next/types/open-next' -const config = { - default: { // This is the default server, similar to the server-function in open-next v2 - // You don't have to provide the below, by default it will generate an output - // for normal lambda as in open-next v2 - override: { - wrapper: "aws-lambda-streaming", // This is necessary to enable lambda streaming - // You can override any part that is a `LazyLoadedOverride` this way - queue: () => Promise.resolve({ - send: async (message) => { - //Your custom code here - } - }) - }, - minify: true, // This will minify the output - }, - // Below we define the functions that we want to deploy in a different server - // This is only used if you want to split the server into multiple servers - functions: { - ssr: { - routes: [ - "app/api/isr/route", "app/api/sse/route", "app/api/revalidateTag/route", // app dir Api routes - "app/route1/page", "app/route2/page", // app dir pages - "pages/route3" // page dir pages - ], // For app dir, you need to include route|page, no need to include layout or loading - patterns: ['api/*', 'route1', 'route2', 'route3'], // patterns needs to be in a cloudfront compatible format, this will be used to generate the output - override: { - wrapper: "aws-lambda-streaming", - }, - // This enables the bundled next server which is faster and reduce the size of the server - // This is also experimental and might not work in all cases - experimentalBundledNextServer: true - }, - pageSsr: { - routes: ["pages/pageSsr"], // For page dir routes should be in the form `pages/${route}` without the extension, it should match the filesystem - // BUILD_ID is a special case, it will be replaced with the actual build id - patterns: [ 'pageSsr', "_next/data/BUILD_ID/pageSsr.json"], - override: { - wrapper: "node", - converter: "node", - // This is necessary to generate the dockerfile and for the implementation to know that it needs to deploy on docker - // You can also provide a string here which will be used to create the dockerfile - generateDockerfile: true, - }, - }, - edge: { - runtime: "edge", - routes: ["app/ssr/page"], - patterns: ["ssr"], - override: {} - } - }, - // By setting this, it will create another bundle for the middleware, - // and the middleware will be deployed in a separate server. - // If not set middleware will be bundled inside the servers - // It could be in lambda@edge, cloudflare workers, or anywhere else - // By default it uses lambda@edge - // This is not implemented in the reference construct implementation. - // This is optional, but might be necessary if you split your app into multiple servers - middleware: { - external: true - } - - // Optional - imageOptimization: { - // This is the architecture of the image, it could be x64 or arm64 - // This is necessary to bundle the proper version of sharp - arch: "x64", - } - - // If you want to override the default build command, you can do it here - // By default it uses `npm run build` - buildCommand: "echo 'hello world'", - - dangerous: { - // This will disable the tag cache - // You can use it safely on page router, on app router it will break revalidateTag and revalidatePath - disableTagCache: true, - // This will disable the incremental cache - // This is generally not recommended, as this is necessary for ISR AND SSG routes as well as the fetch cache - disableIncrementalCache: true, - } - - //The path to the target folder of build output from the `buildCommand` option (the path which will contain the `.next` and `.open-next` folders). This path is relative from the current process.cwd() - Optional default to "." - buildOutputPath: "build", - - //The path to the root of the Next.js app's source code. This path is relative from the current process.cwd(). - Optional default to "." - appPath: "app", - - //The path to the package.json file of the Next.js app. This path is relative from the current process.cwd(). - Optional - packageJsonPath: "package.json", - -} satisfies OpenNextConfig - -export default config; -export type Config = typeof config -``` \ No newline at end of file diff --git a/docs/pages/config/nx.mdx b/docs/pages/config/nx.mdx deleted file mode 100644 index a0d034a41..000000000 --- a/docs/pages/config/nx.mdx +++ /dev/null @@ -1,252 +0,0 @@ -# Configuring OpenNext for use in an Nx Monorepo. -Here's a detailed exampled of how to add open-next + SST to an existing Nx workspace, with an existing NextJS application sitting at `apps/next-site` - -1. install `open-next`: `pnpm add —save-dev open-next` - -2. Update your `apps/next-site/next.config.js` add `output: ‘standalone’`, and you want to add `experimental.outputFileTracingRoot`, it should look a little like this: -```javascript -//@ts-check - -// eslint-disable-next-line @typescript-eslint/no-var-requires -const { composePlugins, withNx } = require('@nx/next'); -const { join } = require('node:path'); - -/** - * @type {import('@nx/next/plugins/with-nx').WithNxOptions} - **/ -const nextConfig = { - nx: { - // Set this to true if you would like to use SVGR - // See: https://github.com/gregberge/svgr - svgr: false, - }, -+ output: 'standalone', -+ experimental: { -+ // this should be the path to the root of your repo, in this case it's just two levels down. needed for open-next to detect that it's a monorepo -+ outputFileTracingRoot: join(__dirname, '../../'), -+ }, -}; - -const plugins = [ - // Add more Next.js plugins to this list if needed. - withNx, -]; - -module.exports = composePlugins(...plugins)(nextConfig); -``` - -3. Create `open-next.config.js` inside your apps root directory, it should look a little something like this: -```javascript -import type { OpenNextConfig } from 'open-next/types/open-next'; - -const config = { - default: {}, - buildCommand: 'exit 0', // in my example we set up Nx task distribution to handle the order of building. - buildOutputPath: '.', - appPath: '.', - packageJsonPath: '../../', // again, path to the root of your repo (where the package.json is) -} satisfies OpenNextConfig; - -export default config; -``` - -4. Set up nx's targets/tasks - -Now we have open-next configuration set up, you can try to run `open-next build` and depending on whether you have already built your next app or not -it might even work. - -However, we don't want to rely on needing to manually running a build every time we want to deploy a change, so instead we can set up a target. -We do this in your project's `project.json` (in this case, living at `apps/next-site/project`), we want to find the targets object and update it: -```diff -{ - "name": "next-site", - "$schema": "../../node_modules/nx/schemas/project-schema.json", - "sourceRoot": "apps/next-site", - "projectType": "application", - "tags": [], - "targets": { -+ "open-next-build": { // name of the target, this is what you will call -+ "executor": "nx:run-commands", -+ "dependsOn": ["build"], // this ensures that Nx will build our next app before running this command. -+ "cache": true, // cache the output, good for if you want to use DTE/Nx cloud -+ "outputs": ["{projectRoot}/.open-next"], // tell nx where the output lives -+ "options": { -+ "cwd": "apps/next-site", // where we run the command -+ "command": "open-next build" // what command we want to run -+ } -+ } - } -} - -``` -Next we need to add the open-next directory to our eslint's `ignorePatterns` array -```diff -{ - "extends": [ - "plugin:@nx/react-typescript", - "next", - "next/core-web-vitals", - "../../.eslintrc.json" - ], - "ignorePatterns": [ - "!**/*", -+ ".next/**/*", -+ ".open-next/**/*" - ], - "overrides": [ - { - "files": ["*.ts", "*.tsx", "*.js", "*.jsx"], - "rules": {} - }, - { - "files": ["*.ts", "*.tsx"], - "rules": {} - }, - { - "files": ["*.js", "*.jsx"], - "rules": {} - }, - { - "files": ["*.spec.ts", "*.spec.tsx", "*.spec.js", "*.spec.jsx"], - "env": { - "jest": true - } - } - ] -} -``` - -now, when you run `nx open-next-build next-site`, nx will automatically build the next app, and anything that the next app requires, neat! - -5. Deploying with SST - -Now, we have a built app, ready to deploy, so how do we get it onto SST/AWS ? Good question! - -We are using `sst ion` in this example. i will assume you have already have the cli installed, (if not, check here on how!)[https://ion.sst.dev/], -but we will not use the SST cli to init this project, because it wants to add a package.json to your next app, and it will look like it's working, but you will end up with a big far server error (all because the package.json overrides whatever nx _thinks_ there should be, and it will miss a bunch of dependencies). we will instead manually set this up: - -- let's add the sst package with `pnpm add sst@ion`, and the required packages for SST to work with AWS `pnpm add --save-dev aws-cdk-lib constructs @types/aws-lambda` -- then we want to manually create an `sst.config.ts` file in `apps/next-site` that looks like this: - -```typescript -/// - -export default $config({ - app(input) { - return { - name: 'next-site', // use whatever your project is called here - removal: input?.stage === 'production' ? 'retain' : 'remove', - home: 'aws', - }; - }, - async run() { - new sst.aws.Nextjs('Site', { - buildCommand: 'exit 0;' // again, we want to get Nx to handle building - }); - }, -}); - -``` - -- now, you probably see some type errors, as SST is not initialized yet, but we can resolve this by running -```bash -$ cd apps/next-site && sst install -``` - -this will resolve the type issues and initialise SST. - -- next we need to add `sst.config.ts` to our `tsconfig.json`'s excludes array - -- then we want to add both `sst.config.ts` and the `.sst` folder to the eslint ignorePatterns - -```diff -{ - "extends": [ - "plugin:@nx/react-typescript", - "next", - "next/core-web-vitals", - "../../.eslintrc.json" - ], - "ignorePatterns": [ - "!**/*", - ".next/**/*", -+ ".open-next/**/*", -+ ".sst/**/*", -+ "sst.config.ts" - ], - "overrides": [ - { - "files": ["*.ts", "*.tsx", "*.js", "*.jsx"], - "rules": {} - }, - { - "files": ["*.ts", "*.tsx"], - "rules": {} - }, - { - "files": ["*.js", "*.jsx"], - "rules": {} - }, - { - "files": ["*.spec.ts", "*.spec.tsx", "*.spec.js", "*.spec.jsx"], - "env": { - "jest": true - } - } - ] -} -``` - -- now, if you want to run `sst dev` you can do so with `sst dev "nx dev next-site"` similarly deploying can be done with `sst deploy`...but you probably want to set up that task chaining, again we can do that by adding a target to your app, and setting it's `dependsOn` to the `open-next-build`, here's what it might look like: - -```diff -{ - "name": "next-site", - "$schema": "../../node_modules/nx/schemas/project-schema.json", - "sourceRoot": "apps/next-site", - "projectType": "application", - "tags": [], - "targets": { - "open-next-build": { - "executor": "nx:run-commands", - "dependsOn": ["build"], - "cache": true, - "outputs": ["{projectRoot}/.open-next"], - "options": { - "cwd": "apps/next-site", - "command": "open-next build" - } -+ }, -+ "deploy": { -+ "executor": "nx:run-commands", -+ "dependsOn": ["open-next-build"], -+ "options": { -+ "cwd": "apps/next-site", -+ "command": "sst deploy --stage {args.stage}", // here we use nx's interpolation to allow --stage to be passed, with some configuration examples below -+ "forwardAllArgs": true -+ }, -+ "defaultConfiguration": "dev", -+ "configurations": { -+ "production": { -+ "args": ["--stage=production"] -+ }, -+ "staging": { -+ "args": ["--stage=staging"] -+ }, -+ "dev": { -+ "args": ["--stage=development"] -+ } -+ } -+ } -+ } -} - - -``` - -now we can run (or if you want a custom stage, you can simply do `nx deploy next-site --stage this-is-my-stage` and it will be passed directly to the sst command). -```bash -$ nx deploy next-site --configuration dev # using dev configuration (which sets the stage to development) -# nx deploy next-site -c dev # OR -# nx deploy next-site --stage my-stage # Custom Stage -``` \ No newline at end of file diff --git a/docs/pages/config/simple_example.mdx b/docs/pages/config/simple_example.mdx deleted file mode 100644 index 367aa556e..000000000 --- a/docs/pages/config/simple_example.mdx +++ /dev/null @@ -1,190 +0,0 @@ -import { Callout } from 'nextra/components'; - -Here you can find the most common `open-next.config.ts` file examples that you could easily take as a starting point for your own configuration. - -## Streaming with lambda - - - AWS has a bunch of different implementations of streaming in production. You - might be lucky and have a fully working one, but you might have one that are - not suitable for production. Be aware that there is nothing to do to prevent - AWS from breaking your deployment (same code and same runtime might break from - one day to another) [Thread - 1](https://discord.com/channels/983865673656705025/1230482660913184800) - [Thread - 2](https://discord.com/channels/983865673656705025/1249368592558985247)
{' '} -
On some AWS accounts the response can hang if the body is empty. There - is a workaround for that on OpenNext 3.0.3+, setting environment variable - `OPEN_NEXT_FORCE_NON_EMPTY_RESPONSE` to `"true"`. This will ensure that the - stream body is not empty.

- If you have an issue with streaming send a message on [discord](https://sst.dev/discord) - and contact AWS support to let them know of the issue. -
- -```ts -import type { OpenNextConfig } from 'open-next/types/open-next'; -const config = { - default: { - override: { - wrapper: 'aws-lambda-streaming', // This is necessary to enable lambda streaming - }, - }, -} satisfies OpenNextConfig; - -export default config; -``` - -## Splitting the server - -```ts -import type { OpenNextConfig } from 'open-next/types/open-next'; -const config = { - // This is the default server, similar to the server-function in open-next v2 - // In this case we are not providing any override, so it will generate a normal lambda (i.e. no streaming) - default: {}, - // You can define multiple functions here, each with its own routes, patterns and overrides - functions: { - myFn: { - // Patterns needs to use the glob pattern - patterns: ['route1', 'route2', 'route3'], - // For app dir, you need to include route|page, no need to include layout or loading - // It needs to be prepended with app/ or pages/ depending on the directory used - routes: ['app/route1/page', 'app/route2/page', 'pages/route3'], - override: { - wrapper: 'aws-lambda-streaming', - }, - }, - }, -} satisfies OpenNextConfig; - -export default config; -``` - -## Use aws4fetch instead of AWS sdk - - - By default we use S3, DynamoDB and SQS for handling ISR/SSG and the fetch - cache. We interact with them using AWS sdk v3. This can contribute a lot to - the cold start. There is a built-in option to use - [aws4fetch](https://github.com/mhart/aws4fetch) instead of the AWS sdk that - can reduce cold start up to 300ms. Requires `OpenNext v3.0.3+`. Here is how - you enable it: - - -```ts -import type { OpenNextConfig } from 'open-next/types/open-next'; -const config = { - default: { - override: { - tagCache: 'dynamodb-lite', - incrementalCache: 's3-lite', - queue: 'sqs-lite', - }, - }, -} satisfies OpenNextConfig; - -export default config; -``` - -## Running in Lambda@Edge - -```ts -import type { OpenNextConfig } from 'open-next/types/open-next'; -const config = { - default: { - placement: 'global', - override: { - converter: 'aws-cloudfront', - }, - }, -} satisfies OpenNextConfig; - -export default config; -``` - -## Bundle for a classic Node server (With function splitting) - - -This is not implemented in sst yet. You'll have to use your own IAC construct to deploy this. - -Be aware that this uses the exact same system for ISR/SSG as the default lambda setup. So it will have to have all the proper permissions and env variable to interact with S3, DynamoDB and SQS (Or whatever you override it with). You can see [here](/inner_workings/components/server/node#special-overrides) for more details - - -```ts -import type { OpenNextConfig } from 'open-next/types/open-next'; -const config = { - // In this case, the default server is meant to run as a classic Node server - // To execute the server you need to run `node index.mjs` inside `.open-next/server-functions/default` - default: { - override: { - wrapper: 'node', - converter: 'node', - // This is necessary to generate a simple dockerfile and for the generated output to know that it needs to deploy on docker - // You can also provide a string here (i.e. the content of your Dockerfile) which will be used to create the dockerfile - // You don't have to provide this if you plan on not using docker, or if you plan on using your own custom dockerfile - generateDockerfile: true, - }, - }, - // You can define multiple functions here, each with its own routes, patterns and overrides - functions: { - // In this case both the api route is in lambda and the rest is in node - myFn: { - // Patterns needs to use the glob pattern - patterns: ['api/*'], - routes: ['app/api/test/route', 'app/api/test2/route'], - }, - }, -} satisfies OpenNextConfig; - -export default config; -``` - -## Edge runtime splitted function - -This will generate 2 server functions, the default one and the edge one. The edge one will still be deployed as a lambda function, but it will be deployed in the edge runtime. - -Edge runtime function have less cold start time, but you can only deploy one route per function. They also do not have the middleware bundled in the function, so you need to use external middleware if you need it in front of the edge function. - -```ts -import type { OpenNextConfig } from 'open-next/types/open-next'; -const config = { - default: {}, - functions: { - edge: { - runtime: 'edge', - //placement: "global", If you want your function to be deployed globally (i.e. lambda@edge) uncomment this line. Otherwise it will be deployed in the region specified in the stack - routes: ['app/api/test/route'], - patterns: ['api/test'], - }, - }, -} satisfies OpenNextConfig; - -export default config; -``` - -## External middleware - -In some cases (edge runtime, function splitting with some middleware rewrites, etc) you might want to use external middleware. -With the default middleware configuration, it is bundled for a deployment in lambda@edge. -This is how you can do it: - -```ts -import type { OpenNextConfig } from 'open-next/types/open-next'; -const config = { - default: {}, - functions: { - myFn: { - patterns: ['route1', 'route2', 'route3'], - routes: ['app/route1/page', 'app/route2/page', 'pages/route3'], - override: { - wrapper: 'aws-lambda-streaming', - }, - }, - }, - middleware: { - external: true, - }, -} satisfies OpenNextConfig; - -export default config; -``` diff --git a/docs/pages/contribute.mdx b/docs/pages/contribute.mdx deleted file mode 100644 index 3454c61b4..000000000 --- a/docs/pages/contribute.mdx +++ /dev/null @@ -1,19 +0,0 @@ -To run `OpenNext` locally: - -1. Clone [this repository](https://github.com/sst/open-next). -2. Build `open-next`: - ```bash - cd open-next - pnpm build - ``` -3. Run `open-next` in watch mode: - ```bash - pnpm dev - ``` -4. Now, you can make changes in `open-next` and build your Next.js app to test the changes. - ```bash - cd path/to/my/nextjs/app - path/to/open-next/packages/open-next/dist/index.js build - ``` - -It can be a bit cumbersome to need to deploy every time you want to test changes. If your change is not dependent on the wrapper or the converter, then you can create a custom `open-next.config.ts` file, you can take a look [here](/contribute/local_run) for more information. \ No newline at end of file diff --git a/docs/pages/contribute/_meta.json b/docs/pages/contribute/_meta.json deleted file mode 100644 index 64c1128e1..000000000 --- a/docs/pages/contribute/_meta.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "local_run": "Run locally", - "plugin": "Internal plugin system" -} \ No newline at end of file diff --git a/docs/pages/contribute/local_run.mdx b/docs/pages/contribute/local_run.mdx deleted file mode 100644 index 4a461799e..000000000 --- a/docs/pages/contribute/local_run.mdx +++ /dev/null @@ -1,185 +0,0 @@ -When making some changes to OpenNext, it can be a bit cumbersome to need to deploy every time you want to test changes. If your change is not dependent on the wrapper or the converter, then you can create a custom `open-next.config.ts` file (you can use another name so that it doesn't conflict with your existing `open-next.config.ts`). Here is an example with a bunch of custom overrides: - -To run `OpenNext` locally: -```bash -# This is to build (the config-path is needed if you use a different name than the default one) -node /path/to/open-next/packages/open-next/dist/index.js build --config-path open-next.local.config.ts -# Then to run the server -node .open-next/server-functions/default/index.mjs -``` - -```typescript -// open-next.local.config.ts - -// A good practice would be to use a different name so that it doesn't conflict -// with your existing open-next.config.ts i.e. open-next.local.config.ts -import type {OpenNextConfig} from 'open-next/types/open-next' - -const config = { - default: { - override:{ - // We use a custom wrapper so that we can use static assets and image optimization locally - wrapper: () => import('./dev/wrapper').then(m => m.default), - // ISR and SSG won't work properly locally without this - Remove if you only need SSR - incrementalCache: () => import('./dev/incrementalCache').then(m => m.default), - // ISR requires a queue to work properly - Remove if you only need SSR or SSG - queue: () => import('./dev/queue').then(m => m.default), - converter: 'node', - } - }, - // You don't need this part if you don't use image optimization or don't need it in your test - imageOptimization: { - // Image optimization only work on linux, and you have to use the correct architecture for your system - arch: 'x64', - override: { - wrapper: 'node', - converter: 'node', - } - // If you need to test with local assets, you'll have to override the imageLoader as well - }, - - dangerous: { - // We disable the cache tags as it will usually not be needed locally for testing - // It's only used for next/cache revalidateTag and revalidatePath - // If you need it you'll have to override the tagCache as well - disableTagCache: true, - - - // You can uncomment this line if you only need to test SSR - //disableIncrementalCache: true, - }, - // You can override the build command so that you don't have to rebuild the app every time - // You need to have run the default build command at least once - buildCommand: 'echo "no build command"', - edgeExternals: ['./dev/wrapper', './dev/incrementalCache', './dev/queue'], -} satisfies OpenNextConfig - -export default config -``` - -```typescript -// dev/wrapper.ts -// You'll need to install express -import express from 'express' -// The proxy is used to proxy the image optimization server -// you don't have to use it, but image request will return 500 error -import proxy from 'express-http-proxy' -import { fork } from 'child_process' - -import type { StreamCreator } from "open-next/http/openNextResponse"; -import type { WrapperHandler } from "open-next/types/open-next"; - -const wrapper: WrapperHandler = async (handler, converter) => { - const app = express(); - // To serve static assets - app.use(express.static('../../assets')) - - //Launch image server fork - fork('../../image-optimization-function/index.mjs', [], { - env: { - NODE_ENV: 'development', - PORT: '3001', - } - }) - app.use('/_next/image', proxy('localhost:3001')) - - app.all('*', async (req, res) => { - const internalEvent = await converter.convertFrom(req); - const _res : StreamCreator = { - writeHeaders: (prelude) => { - res.writeHead(prelude.statusCode, prelude.headers); - res.uncork(); - return res; - }, - onFinish: () => { - // Is it necessary to do something here? - }, - }; - await handler(internalEvent, _res); - }); - - const server = app.listen(parseInt(process.env.PORT ?? "3000", 10), ()=> { - console.log(`Server running on port ${process.env.PORT ?? 3000}`); - }) - - - app.on('error', (err) => { - console.error('error', err); - }); - - return () => { - server.close(); - }; -}; - -export default { - wrapper, - name: "dev-node", - supportStreaming: true, -}; -``` - -```typescript -// dev/incrementalCache.ts -import type {IncrementalCache} from 'open-next/cache/incremental/types' - -import fs from 'node:fs/promises' -import path from 'node:path' - -const buildId = process.env.NEXT_BUILD_ID -const basePath= path.resolve(process.cwd(), `../../cache/${buildId}`) - -const getCacheKey = (key: string) => { - return path.join(basePath, `${key}.cache`) -} - -const cache: IncrementalCache = { - name: 'dev-fs', - get: async (key: string) => { - const fileData = await fs.readFile(getCacheKey(key), 'utf-8') - const data = JSON.parse(fileData) - const {mtime} = await fs.stat(getCacheKey(key)) - return { - value: data, - lastModified: mtime.getTime(), - } - }, - set: async (key, value, isFetch) => { - const data = JSON.stringify(value) - await fs.writeFile(getCacheKey(key), data) - }, - delete: async (key) => { - await fs.rm(getCacheKey(key)) - } -} - -export default cache -``` - -```typescript -// dev/queue.ts -import type {Queue} from 'open-next/queue/types' - -declare global { - // This is declared in the global scope so that we can use it in the queue - // We need to use this one as next overrides the global fetch - var internalFetch: typeof fetch -} - -const queue: Queue = { - name: 'dev-queue', - send: async (message) => { - const prerenderManifest = (await import('open-next/adapters/config')).PrerenderManifest as any - const revalidateId : string = prerenderManifest.preview.previewModeId - await globalThis.internalFetch(`http://localhost:3000${message.MessageBody.url}`, { - method: "HEAD", - headers: { - "x-prerender-revalidate": revalidateId, - "x-isr": "1", - }, - },) - console.log('sending message', message) - }, -} - -export default queue -``` diff --git a/docs/pages/contribute/plugin.mdx b/docs/pages/contribute/plugin.mdx deleted file mode 100644 index a2041f33c..000000000 --- a/docs/pages/contribute/plugin.mdx +++ /dev/null @@ -1,71 +0,0 @@ -OpenNext use 3 different esbuild plugins internally to recompile or modify the source code depending on some condition like the next version or the runtime used - -#### OpenNext Replacement Plugin - -This plugin is used to replace some code in the source code with some other code. - -Here is a very simple example of how to use it: - -```typescript -openNextPlugin({ - // The target file to replace code in - target: /plugins\/default\.js/g, - // This is where the plugin will look for the code to replace - replacements: [require.resolve("./plugins/default.js")], - // This is to delete some code from the target file - deletes: ["id1"], - }) - - //To inject arbritary code by using (import at top of file): - - //#import - - import data from 'data' - const datum = data.datum - - //#endImport - - To replace code: - - //#override id1 - - export function overrideMe() { - // I will replace the "id1" block in the target file - } - - //#endOverride -``` - -#### OpenNext Resolve Plugin - -This plugin is used to avoid having to bundle the whole library in the final bundle. It will replace the dynamic import of the overrides with the one that we want to use. - -Here is a very simple example of how to use it: - -```typescript -openNextResolvePlugin({ - overrides: { - wrapper: "node", - converter: "node", - } -}) -``` - -#### OpenNext Edge Plugin - -This plugin is used to properly compile routes or middleware built for the `edge` runtime. - -Here is a very simple example of how to use it: - -```typescript -openNextEdgePlugin({ - // The path to the .next directory - nextDir: "next", - // The path to the edgeFunctionHandler.js file that we'll use to bundle the routing - edgeFunctionHandlerPath: "./edgeFunctionHandler.js", - // The middlewareInfo from the middlware manifest file - middlewareInfo: middlewareInfo - // If the app should be bundled for cloudflare workers - isInCloudflare: true -}) -``` \ No newline at end of file diff --git a/docs/pages/faq.mdx b/docs/pages/faq.mdx deleted file mode 100644 index 7e9001a78..000000000 --- a/docs/pages/faq.mdx +++ /dev/null @@ -1,25 +0,0 @@ -#### Will my Next.js app behave the same as it does on Vercel? - -OpenNext aims to deploy your Next.js app to AWS using services like CloudFront, S3, and Lambda. While Vercel uses some AWS services, it also has proprietary infrastructures, resulting in a natural gap of feature parity. And OpenNext is filling that gap. - -One architectural difference is how [middleware](https://nextjs.org/docs/advanced-features/middleware) is run, but this should not affect the behavior of most apps. - -On Vercel, the Next.js app is built in an undocumented way using the "[minimalMode](https://github.com/vercel/next.js/discussions/29801)". The middleware code is separated from the server code and deployed to edge locations, while the server code is deployed to a single region. When a user makes a request, the middleware code runs first. Then the request reaches the CDN. If the request is cached, the cached response is returned; otherwise, the request hits the server function. This means that the middleware is called even for cached requests. - -On the other hand, OpenNext uses the standard `next build` command, which generates a server function that includes the middleware code. This means that for cached requests, the CDN (CloudFront) will send back the cached response, and the middleware code is not run. - -We previously built the app using the "minimalMode" and having the same architecture as Vercel, where the middleware code would run in Lambda@Edge on Viewer Request. See the [`vercel-mode` branch](https://github.com/serverless-stack/open-next/tree/vercel-mode). However, we decided that this architecture was not a good fit on AWS for a few reasons: - -1. Cold start - Running middleware and server in two separate Lambda functions results in double the latency. -1. Maintenance - Because the "minimalMode" is not documented, there will likely be unhandled edge cases, and triaging would require constant reverse engineering of Vercel's code base. -1. Feature parity - Lambda@Edge functions triggered on Viewer Request do not have access to geolocation headers, which affects i18n support. - -#### How does OpenNext compared to AWS Amplify? - -OpenNext is an open source initiative, and there are a couple of advantages when compared to Amplify: - -1. The community contributions to OpenNext allows it to have better feature support. For example, Amplify does not currently support [on-demand revalidation](https://github.com/aws-amplify/amplify-hosting/issues/3116). - -1. Amplify's Next.js hosting is a black box. Resources are not deployed to your AWS account. All Amplify users share the same CloudFront CDN owned by the Amplify team. This prevents you from customizing the setup, and customization is important if you are looking for Vercel-like features. - -1. Amplify's implementation is closed-source. Bug fixes often take much longer to get fixed as you have to go through AWS support. And you are likely to encounter more quirks when hosting Next.js anywhere but Vercel. \ No newline at end of file diff --git a/docs/pages/get_started.mdx b/docs/pages/get_started.mdx deleted file mode 100644 index 3afa79d98..000000000 --- a/docs/pages/get_started.mdx +++ /dev/null @@ -1,36 +0,0 @@ -OpenNext allows you to deploy your Next.js apps using a growing list of frameworks. - -### SST - -The easiest way to deploy OpenNext to AWS is with [SST](https://docs.sst.dev/start/nextjs). This is maintained by the OpenNext team and only requires three simple steps: - -1. Run `npx create-sst@latest` in your Next.js app -2. Run `npm install` -3. Deploy to AWS `npx sst deploy` - -For more information, check out the SST docs: https://docs.sst.dev/start/nextjs - -### Ion - -You can also deploy OpenNext to AWS using [Ion](https://ion.sst.dev/). - -Check out the [Ion Docs](https://ion.sst.dev/docs/component/aws/nextjs/) - -### Other Frameworks - -The OpenNext community has contributed deployment options for a few other frameworks. - -- CDK (TS): https://github.com/jetbridge/cdk-nextjs -- CDK (TS, Java, Go, Py, .NET): https://github.com/datasprayio/open-next-cdk -- CloudFormation: https://github.com/serverless-stack/open-next/issues/32 -- Serverless Framework: https://github.com/serverless-stack/open-next/issues/32 -- Terraform: https://github.com/RJPearson94/terraform-aws-open-next -- Terraform: https://github.com/nhs-england-tools/terraform-aws-opennext#diagrams - -To use these, you'll need to run the following inside your Next.js app. - -```bash -$ npx open-next@latest build -``` - -If you are using OpenNext to deploy using a framework that is not listed here, please let us know so we can add it to the list. \ No newline at end of file diff --git a/docs/pages/index.mdx b/docs/pages/index.mdx deleted file mode 100644 index 5d1967a6a..000000000 --- a/docs/pages/index.mdx +++ /dev/null @@ -1,63 +0,0 @@ -import { SITE } from '../config'; -import { Callout } from 'nextra/components'; - - -This docs is for the V3 of OpenNext. If you are looking for the V2 docs, you can find them [here](/v2). - -If you're migrating from V2 to V3, you can find the migration guide [here](/migration#from-opennext-v2). - - - -### Open source Next.js adapter - ---- - -OpenNext takes the Next.js build output and converts it into packages that can be deployed across a variety of environments. -Natively OpenNext has support for AWS Lambda and classic Node Server. It also offer partial support for the `edge` runtime in Cloudflare Workers. - -One notable feature of OpenNext is its ability to split the Next.js output, enabling selective deployment to different targets such as AWS Lambda, Cloudflare Workers, or Amazon ECS. This facilitates a tailored deployment strategy that aligns with the specific needs of your application. - -Thanks to this, you could deploy part of your API to ECS, another part to Cloudflare Workers, your SSR routes to another ECS cluster, and your ISR/SSG routes to Lambda. - ---- - -While Vercel is great, it's not a good option if all your infrastructure is on AWS. Hosting it in your AWS account makes it easy to integrate with your backend. And it's a lot cheaper than Vercel. - -Vercel is also limited to serverless and to their way of doing things. OpenNext is open source and can be deployed to any platform that supports Node.js. - -Next.js, unlike Remix or Astro, doesn't have a way to self-host using **serverless**. You can run it as a Node application. This however doesn't work the same way as it does on Vercel. - ---- - -There have been several attempts to fix this. Broadly falling into two categories; open source framework specific implementations, or closed source SaaS products. - -Most of the open source options, like [serverless-nextjs](https://github.com/serverless-nextjs/serverless-next.js) are dead. Their implementation can also be specific to the framework they are targeting. - -Closed source SaaS products like [Amplify](https://aws.amazon.com/amplify/) have incomplete or incorrect implementations. And because they are closed source, you'll need to file a support ticket to get them updated. - ---- - -We need your help keeping it up to date and feature complete. Make sure to [**join us on Discord**](https://sst.dev/discord) and [**star us on GitHub**](https://github.com/sst/open-next). - -## Features - -OpenNext aims to support all Next.js 14 features. Some features are work in progress. Please open a [new issue](https://github.com/opennextjs/opennextjs-aws/issues/new) to let us know! - -- [x] App & Pages Router -- [x] API routes -- [x] Dynamic routes -- [x] Static site generation (SSG) -- [x] Server-side rendering (SSR) -- [x] [Incremental static regeneration (ISR)](/inner_workings/caching) -- [x] Middleware -- [x] Image optimization -- [x] [NextAuth.js](https://next-auth.js.org) -- [x] [Running in lambda@edge](/config/simple_example#running-in-lambdaedge) -- [x] [No cold start](/inner_workings/components/warmer) -- [x] Experimental streaming support - -## Who is using OpenNext? - -[NHS England](https://github.com/nhs-england-tools/terraform-aws-opennext), -[Udacity](https://engineering.udacity.com/deploying-next-js-on-the-edge-with-sst-is-sst-the-game-changer-its-claimed-to-be-1f05a0abc27c), -[Gymshark UK](https://uk.gymshark.com) diff --git a/docs/pages/inner_workings.mdx b/docs/pages/inner_workings.mdx deleted file mode 100644 index ada07ea02..000000000 --- a/docs/pages/inner_workings.mdx +++ /dev/null @@ -1,23 +0,0 @@ -When calling `open-next build`, OpenNext **runs `next build`** to build the Next.js app, and then **transforms the build output** to a format that can be deployed to AWS. - -#### Building the Next.js app - -OpenNext runs the `build` script in your `package.json` file. Depending on the lock file found in the app, the corresponding packager manager will be used. Either `npm run build`, `yarn build`, or `pnpm build` will be run. You can overrride the build command by editing the `buildCommand` property in `open-next.config.ts`, see how-to in the [full example](/config/full_example). - -#### Transforming the build output - -The build output is then transformed into a format that can be deployed to AWS. The transformed output is generated inside the `.open-next` folder within your Next.js app. Files in `assets/` are ready to be uploaded to AWS S3. And the function code is wrapped inside Lambda handlers, ready to be deployed to AWS Lambda or Lambda@Edge. - -```bash -my-next-app/ - .open-next/ - cache/ -> ISR cache files to upload - This cannot be directly served - assets/ -> Static files to upload - server-functions/ - default/ -> Handler code for the default server - other-fn/ -> Handler code for another backend - revalidation-function/ -> Handler code for revalidation backend - image-optimization-function/ -> Handler code for image optimization backend - warmer-function/ -> Cron job code to keep server function warm - Not mandatory - dynamo-provider/ -> Code for a custom resource to populate the Tag Cache - Only necessary for app dir -``` \ No newline at end of file diff --git a/docs/pages/inner_workings/_meta.json b/docs/pages/inner_workings/_meta.json deleted file mode 100644 index 765e57198..000000000 --- a/docs/pages/inner_workings/_meta.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "routing": "Routing Layer", - "caching": "Caching (ISR/SSG)", - "cache_interception": "Optional Cache Interception", - "components": "Main Components", - "architecture": "Default Architecture" -} diff --git a/docs/pages/inner_workings/architecture.mdx b/docs/pages/inner_workings/architecture.mdx deleted file mode 100644 index 078533313..000000000 --- a/docs/pages/inner_workings/architecture.mdx +++ /dev/null @@ -1,112 +0,0 @@ -## Architecture overview - -OpenNext does not create the underlying infrastructure. You can create the infrastructure for your app with your preferred tool — SST, AWS CDK, Terraform, Serverless Framework, etc. - -This is the recommended setup. - -

- Architecture -

- -We are going to take a look at every component created by OpenNext. - -#### Asset files - -OpenNext will create a `.open-next/assets` folder containing the hashed and un-hashed files. Those files can be directly served without going through the server function. Files in `.open-next/assets` should be served from the root of your website. For example, the file `.open-next/assets/favicon.ico` should be served from `/favicon.ico`. - - -There are two types of files in the `.open-next/assets` folder: - -**Hashed files** - -These are files with a hash component in the file name. Hashed files are be found in the `.open-next/assets/_next` folder, such as `.open-next/assets/_next/static/css/0275f6d90e7ad339.css`. The hash values in the filenames are guaranteed to change when the content of the files is modified. Therefore, hashed files should be cached both at the CDN level and at the browser level. The recommended cache control setting for these file is - -``` -public,max-age=31536000,immutable -``` - -**Un-hashed files** - -Other files inside the `.open-next/assets` folder are copied from your app's `public/` folder, such as `.open-next/assets/favicon.ico`. The filename for un-hashed files may remain unchanged when the content is modified. Un-hashed files should be cached at the CDN level, but not at the browser level. When the content of un-hashed files is modified, the CDN cache should be invalidated on deploy. The recommended cache control setting for these file is - -``` -public,max-age=0,s-maxage=31536000,must-revalidate -``` - -#### Cache files - -OpenNext will create a `.open-next/cache` folder containing cache data used by the incremental cache(i.e. ISR **and** SSG routes). These files should not be publicly accessible. - -There are two types of caches in the `.open-next/cache` folder: - -- Route cache: This cache includes `html` and `json` or `rsc` files that are prerendered during the build. They are merged into a single `.cache` file. They are used to seed the revalidation cache. -- Fetch cache: This cache includes fetch call responses, which might contain sensitive information. Make sure these files are not publicly accessible. - -#### Image optimization backend - -This backend handles image optimization requests when the Next.js `` component is used. The [sharp](https://www.npmjs.com/package/sharp) library, which is bundled with the function, is used to convert the image. The library is compiled against the selected architecture (by default `arm64`) and is intended to run on Node. - -Note that the image optimization backend responds with the `Cache-Control` header, so the image will be cached both at the CDN level and at the browser level. - -See [Image Optimization](/inner_workings/components/image_optimization) for more details. - -#### Servers Lambda backend - -These backends handles all other types of requests from the Next.js app, including Server-side Rendering (SSR) requests, Incremental Static Regeneration (ISR) requests, Static Site Generation requests (SSG) and API requests. OpenNext builds the Next.js app in **standalone** mode. The standalone mode generates a `.next` folder containing the **NextServer** class that handles requests and a `node_modules` folder with **all the dependencies** needed to run the `NextServer`. The structure looks like this: - -``` - .next/ -> NextServer - node_modules/ -> dependencies -``` - -The server backend adapter wraps around `NextServer` and exports a handler function that supports the Lambda request and response. The `server-function` bundle looks like this: - -```diff - .next/ -> NextServer -+ .open-next/ - node_modules/ -> dependencies -+ index.mjs -> server function adapter -``` - -**Monorepo** - -In the case of a monorepo, the build output looks slightly different. For example, if the app is located in `packages/web`, the build output looks like this: - -``` - packages/ - web/ - .next/ -> NextServer - node_modules/ -> dependencies from root node_modules (optional) - node_modules/ -> dependencies from package node_modules -``` - -In this case, the server function adapter needs to be created inside `packages/web` next to `.next/`. This is to ensure that the adapter can import dependencies from both `node_modules` folders. It is not a good practice to have the Lambda configuration coupled with the project structure, so instead of setting the Lambda handler to `packages/web/index.mjs`, we will add a wrapper `index.mjs` at the `server-function` bundle root that re-exports the adapter. The resulting structure looks like this: - -```diff - packages/ - web/ - .next/ -> NextServer -+ .open-next/ - node_modules/ -> dependencies from root node_modules (optional) -+ index.mjs -> server function adapter - node_modules/ -> dependencies from package node_modules -+ index.mjs -> adapter wrapper -``` - -This ensures that the function handler remains at `index.mjs`. - -#### Revalidation backend - -OpenNext will create a `.open-next/revalidation-function` folder containing the revalidation backend. - -This backend is supposed to handle revalidation requests from the revalidation queue. The revalidation queue is a FIFO queue that contains messages for revalidating routes. The revalidation backend polls the queue for messages and sends a HEAD request to the specified route for revalidation. - -#### Warmer backend - -OpenNext will create a `.open-next/warmer-function` folder containing the warmer backend. - -Read more on [how warming works](/v2/inner_workings/warming). - -#### Tag Provider backend - -This is used to populate the revalidation table with tags. \ No newline at end of file diff --git a/docs/pages/inner_workings/cache_interception.mdx b/docs/pages/inner_workings/cache_interception.mdx deleted file mode 100644 index 8bd13d965..000000000 --- a/docs/pages/inner_workings/cache_interception.mdx +++ /dev/null @@ -1,32 +0,0 @@ -By default OpenNext override the cache system of Next.js to store the cache in S3(Or any other incremental cache provider defined in config) using [`cacheHandler` from `next.config.js`](https://nextjs.org/docs/app/api-reference/next-config-js/incrementalCacheHandlerPath). It also uses a queue system to handle ISR revalidation. - -This is great but it still has go through `NextServer` which by default will load the js associated with the page even if the page is cached. This is not ideal for cold start, and makes it impossible to serve ISR/SSG pages inside an external middleware. - -Since OpenNext 3.1, we have added a new feature that we call Cache Interception that allows you to intercept the cache system directly inside OpenNext routing layer and serve the page directly from the cache without going through `NextServer`. If the cache interception fail, the request will be forwarded to the `NextServer` as usual. - -Enabling this alongside external middleware means that the external middleware will need to have all the proper permissions and env variable to interact with S3, DynamoDB and SQS (Or whatever you override it with). You can see [here](/inner_workings/components/server/node#special-overrides) for more details. - -This has the following benefits: -- Faster cold start (No need to load the js associated with the page if the page is cached) -- ISR/SSG route can be served directly from an external middleware -- If the external middleware is in front of the CDN, the middleware can be used for every route including ISR/SSG -- This will allow PPR (Partial Prerendering) to work as intended and demonstrated by Vercel.¹ **This is not implemented yet**. - -To enable cache interception, you need to add `enableCacheInterception` option in the `open-next.config.ts` file. - -```ts -// open-next.config.ts -import type { OpenNextConfig } from 'open-next/types/open-next'; -const config = { - default: { - }, - dangerous: { - enableCacheInterception: true, - }, -} satisfies OpenNextConfig; - -export default config; - -``` - -1. PPR outside of vercel does not work in the way they demonstrated. The reason is that the cache system is handled by `NextServer` and it has to reach the server **every single time**. PPR pages cannot be served from the CDN when using `next start` or the standalone ouput as of now. In some cases PPR without vercel can be slower than SSR (especially if your cache is not using the default filesystem). \ No newline at end of file diff --git a/docs/pages/inner_workings/caching.mdx b/docs/pages/inner_workings/caching.mdx deleted file mode 100644 index a06285000..000000000 --- a/docs/pages/inner_workings/caching.mdx +++ /dev/null @@ -1,112 +0,0 @@ -## Caching in Next and OpenNext - -Caching could be become tricky very fast when using Next outside of Vercel. There is a lot of things that need to be taken into account. - -Usually, you'll deploy your Next app with a CDN in front of it. This CDN will cache the responses from your Next app and serve them to the users. This is great for performance, but it can also be a problem when you need to invalidate the cache. We provide some code examples in this doc to help with [cloudfront cache invalidation](#cloudfront-cache-invalidation). **In OpenNext, you only need to do this if you do On Demand Revalidation**. - -Also the default Next.js standalone output (or `next start`) does not work in a serverless environment because it tries to do the revalidation in the background. - -Default Next.js also uses the file system to cache files. You can override it by providing your own cache implementation. This is done **automatically** when using OpenNext. - -There is also 2 issues with the default `Cache-Control` headers that Next.js sets. -They use this header by default `s-maxage=YOUR_REVALIDATION_TIME, stale-while-revalidate`. There is 2 issue with that: -- `stale-while-revalidate` is not the proper syntax for the `Cache-Control` header. It should be `stale-while-revalidate=TIME_WHERE_YOU_SERVE_STALE`. They added this [undocumented options](https://github.com/vercel/next.js/blob/952da876f7bb8590d0222b2579117f8a2bd301bb/packages/next/src/server/config-shared.ts#L202) to remedy this in recent version of Next -- Setting `s-maxage` to the same value for every request to the same page could be a bad idea. -Next can serve different content based on if you request the full html or are doing client side navigation (RSC or JSON for page router) - - This can cause inconsistencies in the cache with ISR especially when you can have big revalidation time. - - For example, let's assume you use app router, you have a link to your homepage in your main navbar and you set ISR to 1 day. Every other page will have a different RSC cache entry (for client side navigation). This will result in as many cache entry as you have pages in your app which all will have that same 1 day `s-maxage` value but could have been requested at very different time. This could lead to some pages being served with stale content for as long as 2 day. - -**All of these issues are automatically fixed for you in OpenNext** - -### Cloudfront cache invalidation - -When you manually revalidate the Next.js cache for a specific page, the ISR cache files stored on S3 will be updated. However, it is still necessary to invalidate the CloudFront cache: - -```ts -// pages/api/revalidate.js -export default async function handler(req, res) { - await res.revalidate("/foo"); - await invalidateCloudFrontPaths(["/foo"]); - // ... -} -``` - -If the pages router is in use, you must also invalidate the `_next/data/BUILD_ID/foo.json` path. The value for `BUILD_ID` can be found in the `.next/BUILD_ID` build output and can be accessed at runtime via the `process.env.NEXT_BUILD_ID` environment variable. - -```ts -await invalidateCloudFrontPaths(["/foo", `/_next/data/${process.env.NEXT_BUILD_ID}/foo.json`]); -``` - -And here is an example of the `invalidateCloudFrontPaths()` function: - -```ts -import { CloudFrontClient, CreateInvalidationCommand } from "@aws-sdk/client-cloudfront"; - -const cloudFront = new CloudFrontClient({}); - -async function invalidateCloudFrontPaths(paths: string[]) { - await cloudFront.send( - new CreateInvalidationCommand({ - // Set CloudFront distribution ID here - DistributionId: distributionId, - InvalidationBatch: { - CallerReference: `${Date.now()}`, - Paths: { - Quantity: paths.length, - Items: paths, - }, - }, - }), - ); -} -``` - -Note that manual CloudFront path invalidation incurs costs. According to the [AWS CloudFront pricing page](https://aws.amazon.com/cloudfront/pricing/): - -> No additional charge for the first 1,000 paths requested for invalidation each month. Thereafter, $0.005 per path requested for invalidation. - -Due to these costs, if multiple paths require invalidation, it is more economical to invalidate a wildcard path `/*`. For example: - -```ts -// This costs $0.005 x 3 = $0.015 after the first 1000 paths -await invalidateCloudFrontPaths(["/page/a", "/page/b", "/page/c"]); - -// This costs $0.005, but also invalidates other routes such as "page/d" -await invalidateCloudFrontPaths(["/page/*"]); -``` - -For on-demand revalidation via the [`next/cache` module](https://nextjs.org/docs/app/building-your-application/data-fetching/revalidating#using-on-demand-revalidation), if you want to retrieve the associated paths for a given tag, you can use this function: - -```ts -import { DynamoDBClient, QueryCommand } from '@aws-sdk/client-dynamodb'; - -const client = new DynamoDBClient({ region: process.env.CACHE_BUCKET_REGION }); - -async function getPaths(tag: string) { - try { - const { Items } = await client.send( - new QueryCommand({ - TableName: process.env.CACHE_DYNAMO_TABLE, - KeyConditionExpression: "#tag = :tag", - ExpressionAttributeNames: { - "#tag": "tag", - }, - ExpressionAttributeValues: { - ":tag": { S: `${process.env.NEXT_BUILD_ID}/${tag}` }, - }, - }), - ); - return ( - Items?.map( - (item) => - item.path?.S?.replace(`${process.env.NEXT_BUILD_ID}/`, "") ?? "", - ) ?? [] - ); - } catch (e) { - console.error("Failed to get by tag", e); - return []; - } -} -``` diff --git a/docs/pages/inner_workings/components/_meta.json b/docs/pages/inner_workings/components/_meta.json deleted file mode 100644 index 529196797..000000000 --- a/docs/pages/inner_workings/components/_meta.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "overview": "Overview", - "server": "Server", - "middleware": "Middleware", - "image_optimization": "Image Optimization", - "revalidation": "Revalidation", - "warmer": "Warmer", - "initializer": "Initializer" -} \ No newline at end of file diff --git a/docs/pages/inner_workings/components/image_optimization.mdx b/docs/pages/inner_workings/components/image_optimization.mdx deleted file mode 100644 index 6bef28b99..000000000 --- a/docs/pages/inner_workings/components/image_optimization.mdx +++ /dev/null @@ -1,26 +0,0 @@ -The image optimization backend is used to transform images to the desired format and quality. -It should be accessible at `/_next/image?url=URL_OF_THE_IMAGE&q=QUALITY&w=WIDTH` and should be used as a proxy to the image source. - -Streaming is **useless** for this backend, as we have to wait for the image to be transformed even for the headers. In the future this backend might be enhanced to support streaming. - -### Environment Variables - - BUCKET_NAME: The name of the bucket where the images are stored - - BUCKET_KEY_PREFIX: The prefix of the key where the images are stored - - OPENNEXT_STATIC_ETAG: If set to `true`, the ETag header will be set to the hash of the properties of the image (href, width, quality, buildId). This will allow us to return 304 responses when the image properties has not changed. - -### Special Overrides - -#### Loader -It is used to load **internal** images. By default, it uses S3 API to load the images from the bucket. - -You can customize the loader by providing a custom loader function to the `loader` prop. -It should return -```typescript -type ImageLoader = BaseOverride & { - load: (url: string) => Promise<{ - body?: Readable; - contentType?: string; - cacheControl?: string; - }>; -} -``` \ No newline at end of file diff --git a/docs/pages/inner_workings/components/initializer.mdx b/docs/pages/inner_workings/components/initializer.mdx deleted file mode 100644 index ced2cace8..000000000 --- a/docs/pages/inner_workings/components/initializer.mdx +++ /dev/null @@ -1,16 +0,0 @@ -The Initializer is a special backend that should run only once during deployment. It's default role is to populate the tag cache with the prerendered tags. - -It uses these events and results: -```typescript -interface InitializationFunctionEvent { - type: "initializationFunction"; - requestType: "create" | "update" | "delete"; - resourceId: typeof PHYSICAL_RESOURCE_ID; -} - -type InitializationFunctionResult = InitializationFunctionEvent; -``` - -### Special Overrides - -The initializer uses the tag cache to store the prerendered tags. \ No newline at end of file diff --git a/docs/pages/inner_workings/components/middleware.mdx b/docs/pages/inner_workings/components/middleware.mdx deleted file mode 100644 index 4694e579a..000000000 --- a/docs/pages/inner_workings/components/middleware.mdx +++ /dev/null @@ -1,45 +0,0 @@ -import { Callout } from 'nextra/components' - - - When we talk about middleware in this docs, we are referring both to the next `middleware` as well as the whole `routing` layer of next. - For example, `next.config.js` headers, redirects, rewrites, etc. are all considered `routing` in this context. - - -Since V3, you can decide to move the middleware outside of the server into it's own function that you can place in front of your others backend functions. - -By default, the middleware is inside the server, but you can move it outside by setting `middleware.external` to `true` in your `open-next.config.ts` file. -```ts -{ - middleware: { - external: true - } -} -``` -It uses the `aws-lambda` wrapper and the `aws-cloudfront` converter by default so that you can use it with AWS Lambda@edge and Cloudfront. -You can also use it inside Cloudflare Workers by setting the wrapper to `cloudflare` and the converter to `edge`. - -### Special overrides - -#### Origin resolver - -Since your app could be split into different origins, you need to be able to retrieve the right origin for the request. That's the purpose of the `originResolver` function. - -By default, it uses the `pattern-env` resolver which uses an `OPEN_NEXT_ORIGIN` environment variable to resolve the origin as well as the pattern defined in your `open-next.config.ts`. This env var should be a stringified version of an object of this shape: -```ts -{ - // Key should be the same as the one used in `open-next.config.ts` for the functions - [origin: string]: { - host: string; - protocol: "http" | "https"; - port?: number; - customHeaders?: Record; - } -} -``` -You can of course override this behavior by providing your own `originResolver` function in your `open-next.config.ts` file. -This is the type of the `originResolver` function: -```ts -type OriginResolver = BaseOverride & { - resolve: (path: string) => Promise; -}; -``` \ No newline at end of file diff --git a/docs/pages/inner_workings/components/overview.mdx b/docs/pages/inner_workings/components/overview.mdx deleted file mode 100644 index aa040fcf1..000000000 --- a/docs/pages/inner_workings/components/overview.mdx +++ /dev/null @@ -1,104 +0,0 @@ -import { Callout } from 'nextra/components'; - - - This documentation is still a work in progress. Some parts might be missing or incomplete. - -This is also mostly for advanced use cases (Overriding some default, provide support for another Cloud provider, creating your own IAC component ...), most people won't need to use this. - - - -## General - -- For the node runtime, you need at least Node 18. -- For the edge runtime, you can use both Node 18+ or cloudflare workers with `node_compat` flag enabled (Cloudflare workers support is experimental) -- Open-next doesn't work well on Windows. We recommend using WSL2 or a Linux VM. - -## Backends - -Every backend is a separate module. The following backends are available: - -- Servers (Node or Edge runtime) -- Middleware (If manually set to external) -- Image Optimization -- Revalidation -- Warmer -- Initializer - -All these backends can be overrided to fit your needs. They share some common ground for the configuration and the way they are used. - -### Common Overrides - -Every custom overrides (not `string`) share a `BaseOverride` types and needs to be wrapped this way: - -```typescript -type BaseOverride = { - name: string; -}; - -// This is the type of the override -type LazyLoadedOverride = () => Promise; -``` - -#### Wrapper - -The wrapper is the main entrypoint for the backend. This is the type of the wrapper: - -```typescript -type WrapperHandler< - E extends BaseEventOrResult = InternalEvent, - R extends BaseEventOrResult = InternalResult, -> = ( - handler: OpenNextHandler, - converter: Converter, -) => Promise<(...args: any[]) => any>; - -export type Wrapper< - E extends BaseEventOrResult = InternalEvent, - R extends BaseEventOrResult = InternalResult, -> = BaseOverride & { - wrapper: WrapperHandler; - supportStreaming: boolean; -}; -``` - -The `handler` is the original handler that is being wrapped. The `converter` is the converter that is being used to convert the event and the result. - -Here is a list of the provided wrappers: - -- `aws-lambda` - The default AWS Lambda wrapper - It is the default wrapper -- `aws-lambda-streaming` - The AWS Lambda wrapper with streaming support -- `node` - The Node wrapper - Create a node server, not suitable for serverless -- `cloudflare` - The Cloudflare wrapper - For Cloudflare Worker - -##### Aws Lambda streaming - -If you want to enable streaming in lambda, you need to use this wrapper. It is not enabled by default. - -Be aware that you might encounter some unexpected behaviors when using streaming. Aws Lambda seems to apply some buffering on the response. In some rare cases it might mean that streaming won't properly start. This is an issue with the lambda runtime itself, but this should only impact TTFB (Time To First Byte) in those cases. See this [github issue](https://github.com/aws/aws-lambda-nodejs-runtime-interface-client/issues/94) - -#### Converter - -The converter is used to convert the event and the result. This is the type of the converter: - -```typescript -export type Converter< - E extends BaseEventOrResult = InternalEvent, - R extends BaseEventOrResult = InternalResult, -> = BaseOverride & { - convertFrom: (event: any) => Promise; - convertTo: (result: R, originalRequest?: any) => any; -}; -``` - -The `convertFrom` method is used to convert the event to the internal event used by the backend internal handler. The `convertTo` method is used to convert the result to the expected result. -`convertTo` can usually be ignored when using streaming as the result is being streamed directly. - -Here is a list of the provided converters: - -- `aws-apigw-v2` - The default AWS API Gateway v2 converter - It is the default converter -- `aws-apigw-v1` - The AWS API Gateway v1 converter -- `aws-cloudfront` - The AWS CloudFront converter - Used for lambda@edge -- `edge` - The Edge converter - Converts from `Request` and to `Response` -- `node` - The Node converter - Converts from `IncomingMessage` and to `ServerResponse` -- `sqs-revalidate` - The SQS Revalidate converter - Used by default for the revalidation backend -- `dummy` - The Dummy converter - Does nothing diff --git a/docs/pages/inner_workings/components/revalidation.mdx b/docs/pages/inner_workings/components/revalidation.mdx deleted file mode 100644 index 7bc125319..000000000 --- a/docs/pages/inner_workings/components/revalidation.mdx +++ /dev/null @@ -1,16 +0,0 @@ -The revalidation backend is used to read the queue and revalidate the routes. It is used for ISR only. -For every received message it will trigger a `HEAD` request with `x-prerender-revalidate` header to the host. - -### Special Event and Result types -```typescript -interface RevalidateEvent { - type: "revalidate"; - records: { - host: string; - url: string; - }[]; -} - interface RevalidateResult { - type: "revalidate"; - } -``` \ No newline at end of file diff --git a/docs/pages/inner_workings/components/server/edge.mdx b/docs/pages/inner_workings/components/server/edge.mdx deleted file mode 100644 index 30404ce4c..000000000 --- a/docs/pages/inner_workings/components/server/edge.mdx +++ /dev/null @@ -1 +0,0 @@ -TODO \ No newline at end of file diff --git a/docs/pages/inner_workings/components/server/node.mdx b/docs/pages/inner_workings/components/server/node.mdx deleted file mode 100644 index 506a75358..000000000 --- a/docs/pages/inner_workings/components/server/node.mdx +++ /dev/null @@ -1,132 +0,0 @@ -This is the main entrypoint for SSR, ISR or SSG routes. -This docs only apply for the node runtime. - -### ISR/SSG - -In standalone mode, Next.js prebuilds the ISR/SSG cache during the build process. And at runtime, **NextServer** expects this cache locally on the server. This works effectively when the server is run on a single web server machine, sharing the cache across all requests. In a serverless environment, the cache needs to be housed centrally in a location accessible by all server Lambda function instances.By default S3 serves as this central location. - -To facilitate this: - -- ISR cache files are excluded from the `server-function` bundle and instead are uploaded to the cache bucket. -- The default cache handler is replaced with a custom cache handler by configuring the [`incrementalCacheHandlerPath`](https://nextjs.org/docs/app/api-reference/next-config-js/incrementalCacheHandlerPath) field in `next.config.js`. -- The custom cache handler manages the cache files on S3 by default, handling both reading and writing operations. -- Since we're using FIFO queue, if we want to process more than one revalidation at a time, we need to have separate Message Group IDs. We generate a Message Group ID for each revalidation request based on the route path. This ensures that revalidation requests for the same route are processed only once. You can use `MAX_REVALIDATE_CONCURRENCY` environment variable to control the number of revalidation requests processed at a time. By default, it is set to 10. -- The `revalidation-function` polls the message from the queue and makes a `HEAD` request to the route with the `x-prerender-revalidate` header. -- The `server-function` receives the `HEAD` request and revalidates the cache. -- Tags are handled differently in a dynamodb table. We use a separate table to store the tags for each route. The custom cache handler will update the tags in the table when it updates the cache. - -#### Lifetime of an ISR request for a stale page - -1. Cloudfront receives a request for a page. Let's assume the page is stale in Cloudfront. -2. Cloudfront forwards the request to the `server-function` in the background but still returns the cached version. -3. The `server-function` checks in the incremental cache. If the page is stale, it sends the stale response back to Cloudfront while sending a message to the revalidation queue to trigger background revalidation. It will also change the cache-control header to `s-maxage=2, stale-while-revalidate=2592000` -4. A new request comes in for the same page after 2 seconds. Cloudfront sends the cached version back to the user and forwards the request to the `server-function`. -5. If the revalidation is done, the `server-function` will update the cache and send the updated response back to Cloudfront. Subsequent request will then get the updated version. Otherwise, we go back to step 3. - -#### Lifetime of an SSG request for a page revalidated using `revalidateTag` - -1. You revalidate the page using `revalidateTag` or `revalidatePath`. You should also invalidate the cache in Cloudfront -2. Cloudfront receives a request for a page. -3. Cloudfront forwards the request to the `server-function`. -4. The `server-function` checks in the incremental cache, then in the tag cache. If the page is stale in the tag cache, it will trigger an immediate revalidation and send the updated response back to Cloudfront. -5. The user will receive the updated version of the page with the `x-next-cache` header set to `MISS`. - -### Special Overrides - -All these overrides apply on a per function basis. You need to specify them for each function you want to override. - -#### Incremental Cache - -The Incremental Cache is a cache that is used to store the results of the ISR **and** SSG pages as well as the fetch cache. -By default, OpenNext uses S3 as the default incremental cache. - -You can override the default cache by setting the `override.incrementalCache` property in the `open-next.config.ts` file. -You can look at the expected types [here](https://github.com/opennextjs/opennextjs-aws/blob/f1b2b78ce622ceae496ee566abf74f07018619f4/packages/open-next/src/cache/incremental/types.ts#L38) - -##### Default S3 Incremental Cache - -The default S3 Incremental Cache uses the `@aws-sdk/client-s3` to interact with the S3 bucket. It needs to have the proper permissions to read and write to the bucket. - -File in S3 should follow the following structure: -- `Key_Prefix/BUILD_ID/path/to/page.cache` - For the cache of a page -- `Key_Prefix/__fetch/BUILD_ID/fetch-cache-key` - For the fetch cache - -The default S3 Incremental Cache can be configured using the following environment variables: - -###### Environment Variables -- CACHE_BUCKET_REGION: The region of the S3 bucket -- CACHE_BUCKET_NAME: The name of the S3 bucket -- CACHE_BUCKET_KEY_PREFIX: The prefix of the keys in the S3 bucket - Optional -- AWS_SDK_S3_MAX_ATTEMPTS: The maximum number of attempts to make to the S3 bucket - Optional - -#### Tag Cache - -The Tag Cache is a cache that is used to store the tags for the ISR/SSG pages as well as the fetch cache. -By default, OpenNext uses DynamoDB as the default incremental cache. - -You can override the default cache by setting the `override.tagCache` property in the `open-next.config.ts` file. -You can look at the expected types [here](https://github.com/opennextjs/opennextjs-aws/blob/f1b2b78ce622ceae496ee566abf74f07018619f4/packages/open-next/src/cache/tag/types.ts#L1) - -##### Default DynamoDB Tag Cache - -The default DynamoDB Tag Cache uses the `@aws-sdk/client-dynamodb` to interact with the DynamoDB table. It needs to have the proper permissions to read and write to the table. - -Tags in DynamoDB should follow the following structure: -```typescript -type Tag = { - path: string, // The path of the page - tag: string, // The tag of the page - revalidatedAt: number, // The time at which the page has been revalidated -} - -``` -We use an index called `revalidate` with `path` as a partition key and `revalidatedAt` as the sort key. -It needs to be pre-populated with the tags of the pages that are being generated. - -The default DynamoDB Tag Cache can be configured using the following environment variables: - -###### Environment Variables -- CACHE_BUCKET_REGION: The region of the DynamoDB table -- CACHE_DYNAMO_TABLE: The name of the DynamoDB table -- AWS_SDK_DYNAMODB_MAX_ATTEMPTS: The maximum number of attempts to make to the DynamoDB table - Optional -- DYNAMO_BATCH_WRITE_COMMAND_CONCURRENCY: The number of concurrent batch write commands to make to the DynamoDB table - Optional Default to 4 - -#### Revalidation queue - -The revalidation queue is a queue that is used to store the pages that need to be revalidated. -By default, OpenNext uses SQS as the default revalidation queue. - -You can override the default queue by setting the `override.queue` property in the `open-next.config.ts` file. - -Here is the expected type for the queue override: -```ts -interface QueueMessage { - MessageDeduplicationId: string; - MessageBody: { - host: string; - url: string; - }; - MessageGroupId: string; -} - -export interface Queue { - send(message: QueueMessage): Promise; - name: string; -} -``` -The send function will be called when a page is marked as `STALE`. You don't have to use a queue here, you can use any other mechanism to send the message to the revalidation worker or even do the revalidation in the same process. - -#### Shared Environment Variables - -- MAX_REVALIDATE_CONCURRENCY: The number of revalidation requests processed at a time. - Optional Default to 10 - -##### Default SQS Revalidation Queue - -The default SQS Revalidation Queue uses the `@aws-sdk/client-sqs` to interact with the SQS queue. It needs to have the proper permissions to send messages to the queue. - -The default SQS Revalidation Queue can be configured using the following environment variables: - -###### Environment Variables - -- REVALIDATION_QUEUE_REGION: The region of the SQS queue -- REVALIDATION_QUEUE_URL: The URL of the SQS queue \ No newline at end of file diff --git a/docs/pages/inner_workings/components/warmer.mdx b/docs/pages/inner_workings/components/warmer.mdx deleted file mode 100644 index 65ae199f7..000000000 --- a/docs/pages/inner_workings/components/warmer.mdx +++ /dev/null @@ -1,28 +0,0 @@ -The warmer backend is used to warm up the server lambda in case of serverless. This is useful to avoid cold starts. - -### Special Event and Result -```typescript -interface WarmerEvent { - type: "warmer"; - warmerId: string; - index: number; - concurrency: number; - delay: number; -} - -interface WarmerResponse { - type: "warmer"; - serverId: string; -} -``` - -### Special Overrides - -#### Invoke function -You can customize the `invoke` function to handle the `warmer` event. By default it uses lambda invoke command. The default invoke needs a `WARM_PARAMS` environment variable that is a stringified version of this -```typescript -{ - concurrency: number; - function: string; -}[] -``` \ No newline at end of file diff --git a/docs/pages/inner_workings/routing.mdx b/docs/pages/inner_workings/routing.mdx deleted file mode 100644 index 7be01830a..000000000 --- a/docs/pages/inner_workings/routing.mdx +++ /dev/null @@ -1,22 +0,0 @@ -OpenNext doesn't use the default routing provided by Next.js. Instead, it uses its own routing system. - -Historically, OpenNext used the same routing system as Next.js. But in Next 13.4.13, they moved the routing system outside of `NextServer` and separated every major part of the app (i.e. app router, page router and the routing) inside jest workers. This caused terrible cold start (around 8s for a default `create next-app` app) and made it impossible to serve ISR/SSG pages inside lambda. Since then, OpenNext has been using its own routing system for every app that uses Next 13.4.13 or higher. FYI Vercel use an undocumented `minimalMode` inside `NextServer` to bypass the routing and cache system so that it is done in their infra instead of the lambda. - -This move also allow OpenNext to have a more flexible routing system. With OpenNext you can put the routing layer in a function (lambda@edge or cloudflare workers) in front of your server. You can even serve ISR/SSG directly from the routing layer. - -Here is a list of features that OpenNext routing system handle: - -- From `next.config.js` (the rest is handled by `NextServer` itself): - - Headers - - Redirects - - Rewrites - - basePath - - i18n -- [Middleware](#next-middleware) -- [Optional Cache Interception](/inner_workings/cache_interception) (i.e. serve ISR/SSG directly from the routing layer) -- Handle 404 in some cases (i.e. when the page does not correspond to any of the regex routes) - - -#### Next Middleware - -The Next middleware in OpenNext is not running in the same way as in Next.js. In Next.js, the middleware is running inside the `NextServer` inside a fake edge runtime. In OpenNext, we modify the middleware and run it fully inside the routing layer. So if you run the routing layer in Node, you can use Node api inside the middleware (it's a bit tricky because it won't work with `next dev` and involves some workaround because Next will remove Node api during bundling. Some example [here](/config/custom_overrides#define-a-global-to-use-node-in-the-middleware)). \ No newline at end of file diff --git a/docs/pages/migration.mdx b/docs/pages/migration.mdx deleted file mode 100644 index 22b087820..000000000 --- a/docs/pages/migration.mdx +++ /dev/null @@ -1,45 +0,0 @@ -If your Next.js app is already deployed to another platform or using another open-source framework, you'd want to take a look here: - -#### From OpenNext V2 - -If you used all the default configurations, you should be able to deploy your app without any changes. - -OpenNext V3 replaced all the build args with a [custom config file](/config#configuration-file). - -Here are the old build args and their new equivalent: - -- `--build-command` : `buildCommand` inside the config file. -- `--app-path`: `appPath` inside the config file. -- `--build-output-path`: `buildOutputPath` inside the config file. -- `--package-json-path`: `packageJsonPath` inside the config file. -- `--minify`: `minify` options inside each function property. This is enabled on a per function basis now. -- `--streaming`: This is dependent on the wrapper you're using on a per function basis. To enable streaming in aws lambda, you can set `wrapper: 'aws-lambda-streaming'` in the override property of the function. Please note that AWS lambda streaming still suffer from buffering issue from the runtime itself. See [here](/inner_workings/components/overview#aws-lambda-streaming) for more info. -- `--dangerously-disable-dynamodb-cache`: `dangerous.disableTagCache` inside the config file. -- `--dangerously-disable-incremental-cache`: `dangerous.disableIncrementalCache` inside the config file. - -#### Vercel - -Everything should already be set up for you. But there are a few things you should know: - -- Streaming by default is disabled in open-next (for now). It is very **experimental** right now see [here](/v2/inner_workings/streaming) for more info. -- Middleware is not run for static file and for image requests with open-next. - -#### AWS Amplify - -TODO - -#### Netlify - -TODO - -#### serverless-nextjs - -You should remove some stuff from your `next.config.js` file, otherwise you'll get a build error.: - -```js -module.exports = { - // ... - target: 'serverless', // remove this - // ... -} -``` diff --git a/docs/pages/reference-implementation.mdx b/docs/pages/reference-implementation.mdx deleted file mode 100644 index d2898cca1..000000000 --- a/docs/pages/reference-implementation.mdx +++ /dev/null @@ -1,434 +0,0 @@ -import { Callout } from 'nextra/components' - -In order to help deploying OpenNext with your own IAC implementation, we created a simple reference implementation using aws-cdk. - -If you wish to use it, just copy the code for the construct below. If you use it inside sst, make sure to use the same version of aws-cdk as sst. - - - - This is a reference implementation, and it is not meant to be used in production as is. - - This is just here to help you understand how to use the new features of OpenNext. - - There is some features that are not implemented like the warmer function, or everything related to lambda@edge(It requires inserting env variables which is out of scope of this implementation). - - -```ts -import { Construct } from "constructs"; -import { readFileSync } from "fs"; -import path from "path"; -import { BlockPublicAccess, Bucket } from "aws-cdk-lib/aws-s3"; -import { BucketDeployment, Source } from "aws-cdk-lib/aws-s3-deployment"; -import { - CustomResource, - Duration, - Fn, - RemovalPolicy, - Stack, -} from "aws-cdk-lib/core"; -import { - AllowedMethods, - BehaviorOptions, - CacheCookieBehavior, - CacheHeaderBehavior, - CachePolicy, - CacheQueryStringBehavior, - CachedMethods, - Distribution, - ICachePolicy, - ViewerProtocolPolicy, - FunctionEventType, - OriginRequestPolicy, - Function as CloudfrontFunction, - FunctionCode, -} from "aws-cdk-lib/aws-cloudfront"; -import { HttpOrigin, S3Origin } from "aws-cdk-lib/aws-cloudfront-origins"; -import { - Code, - Function as CdkFunction, - FunctionUrlAuthType, - InvokeMode, - Runtime, -} from "aws-cdk-lib/aws-lambda"; -import { - TableV2 as Table, - AttributeType, - Billing, -} from "aws-cdk-lib/aws-dynamodb"; -import { - Service, - Source as AppRunnerSource, - Memory, - HealthCheck, - Cpu, -} from "@aws-cdk/aws-apprunner-alpha"; -import { DockerImageAsset } from "aws-cdk-lib/aws-ecr-assets"; -import { Queue } from "aws-cdk-lib/aws-sqs"; -import { SqsEventSource } from "aws-cdk-lib/aws-lambda-event-sources"; -import { IGrantable } from "aws-cdk-lib/aws-iam"; -import { Provider } from "aws-cdk-lib/custom-resources"; -import { RetentionDays } from "aws-cdk-lib/aws-logs"; - -type BaseFunction = { - handler: string; - bundle: string; -}; - -type OpenNextFunctionOrigin = { - type: "function"; - streaming?: boolean; -} & BaseFunction; - -type OpenNextECSOrigin = { - type: "ecs"; - bundle: string; - dockerfile: string; -}; - -type OpenNextS3Origin = { - type: "s3"; - originPath: string; - copy: { - from: string; - to: string; - cached: boolean; - versionedSubDir?: string; - }[]; -}; - -type OpenNextOrigins = - | OpenNextFunctionOrigin - | OpenNextECSOrigin - | OpenNextS3Origin; - -interface OpenNextOutput { - edgeFunctions: { - [key: string]: BaseFunction; - }; - origins: { - s3: OpenNextS3Origin; - default: OpenNextFunctionOrigin | OpenNextECSOrigin; - imageOptimizer: OpenNextFunctionOrigin | OpenNextECSOrigin; - [key: string]: OpenNextOrigins; - }; - behaviors: { - pattern: string; - origin?: string; - edgeFunction?: string; - }[]; - additionalProps?: { - disableIncrementalCache?: boolean; - disableTagCache?: boolean; - initializationFunction?: BaseFunction; - warmer?: BaseFunction; - revalidationFunction?: BaseFunction; - }; -} - -interface OpenNextCdkReferenceImplementationProps { - openNextPath: string; -} - -export class OpenNextCdkReferenceImplementation extends Construct { - private openNextOutput: OpenNextOutput; - private bucket: Bucket; - private table: Table; - private queue: Queue; - - private staticCachePolicy: ICachePolicy; - private serverCachePolicy: CachePolicy; - - public distribution: Distribution; - - constructor( - scope: Construct, - id: string, - props: OpenNextCdkReferenceImplementationProps, - ) { - super(scope, id); - this.openNextOutput = JSON.parse( - readFileSync( - path.join(props.openNextPath, "open-next.output.json"), - "utf-8", - ), - ) as OpenNextOutput; - - this.bucket = new Bucket(this, "OpenNextBucket", { - publicReadAccess: false, - blockPublicAccess: BlockPublicAccess.BLOCK_ALL, - autoDeleteObjects: true, - removalPolicy: RemovalPolicy.DESTROY, - enforceSSL: true, - }); - this.table = this.createRevalidationTable(); - this.queue = this.createRevalidationQueue(); - - const origins = this.createOrigins(); - this.serverCachePolicy = this.createServerCachePolicy(); - this.staticCachePolicy = this.createStaticCachePolicy(); - this.distribution = this.createDistribution(origins); - } - - private createRevalidationTable() { - const table = new Table(this, "RevalidationTable", { - partitionKey: { name: "tag", type: AttributeType.STRING }, - sortKey: { name: "path", type: AttributeType.STRING }, - pointInTimeRecovery: true, - billing: Billing.onDemand(), - globalSecondaryIndexes: [ - { - indexName: "revalidate", - partitionKey: { name: "path", type: AttributeType.STRING }, - sortKey: { name: "revalidatedAt", type: AttributeType.NUMBER }, - }, - ], - removalPolicy: RemovalPolicy.DESTROY, - }); - - const initFn = this.openNextOutput.additionalProps?.initializationFunction; - - const insertFn = new CdkFunction(this, "RevalidationInsertFunction", { - description: "Next.js revalidation data insert", - handler: initFn?.handler ?? "index.handler", - // code: Code.fromAsset(initFn?.bundle ?? ""), - code: Code.fromAsset(".open-next/dynamodb-provider"), - runtime: Runtime.NODEJS_18_X, - timeout: Duration.minutes(15), - memorySize: 128, - environment: { - CACHE_DYNAMO_TABLE: table.tableName, - }, - }); - - const provider = new Provider(this, "RevalidationProvider", { - onEventHandler: insertFn, - logRetention: RetentionDays.ONE_DAY, - }); - - new CustomResource(this, "RevalidationResource", { - serviceToken: provider.serviceToken, - properties: { - version: Date.now().toString(), - }, - }); - - return table; - } - - private createOrigins() { - const { - s3: s3Origin, - default: defaultOrigin, - imageOptimizer: imageOrigin, - ...restOrigins - } = this.openNextOutput.origins; - const s3 = new S3Origin(this.bucket, { - originPath: s3Origin.originPath, - }); - for (const copy of s3Origin.copy) { - new BucketDeployment(this, `OpenNextBucketDeployment${copy.from}`, { - sources: [Source.asset(copy.from)], - destinationBucket: this.bucket, - destinationKeyPrefix: copy.to, - prune: false, - }); - } - const origins = { - s3: new S3Origin(this.bucket, { - originPath: s3Origin.originPath, - originAccessIdentity: undefined, - }), - default: - defaultOrigin.type === "function" - ? this.createFunctionOrigin("default", defaultOrigin) - : this.createAppRunnerOrigin("default", defaultOrigin), - imageOptimizer: - imageOrigin.type === "function" - ? this.createFunctionOrigin("imageOptimizer", imageOrigin) - : this.createAppRunnerOrigin("imageOptimizer", imageOrigin), - ...Object.entries(restOrigins).reduce( - (acc, [key, value]) => { - if (value.type === "function") { - acc[key] = this.createFunctionOrigin(key, value); - } else if (value.type === "ecs") { - acc[key] = this.createAppRunnerOrigin(key, value); - } - return acc; - }, - {} as Record, - ), - }; - return origins; - } - - private createRevalidationQueue() { - const queue = new Queue(this, "RevalidationQueue", { - fifo: true, - receiveMessageWaitTime: Duration.seconds(20), - }); - const consumer = new CdkFunction(this, "RevalidationFunction", { - description: "Next.js revalidator", - handler: "index.handler", - code: Code.fromAsset( - this.openNextOutput.additionalProps?.revalidationFunction?.bundle ?? "", - ), - runtime: Runtime.NODEJS_18_X, - timeout: Duration.seconds(30), - }); - consumer.addEventSource(new SqsEventSource(queue, { batchSize: 5 })); - return queue; - } - - private getEnvironment() { - return { - CACHE_BUCKET_NAME: this.bucket.bucketName, - CACHE_BUCKET_KEY_PREFIX: "_cache", - CACHE_BUCKET_REGION: Stack.of(this).region, - REVALIDATION_QUEUE_URL: this.queue.queueUrl, - REVALIDATION_QUEUE_REGION: Stack.of(this).region, - CACHE_DYNAMO_TABLE: this.table.tableName, - // Those 2 are used only for image optimizer - BUCKET_NAME: this.bucket.bucketName, - BUCKET_KEY_PREFIX: "_assets", - }; - } - - private grantPermissions(grantable: IGrantable) { - this.bucket.grantReadWrite(grantable); - this.table.grantReadWriteData(grantable); - this.queue.grantSendMessages(grantable); - } - - private createFunctionOrigin(key: string, origin: OpenNextFunctionOrigin) { - const environment = this.getEnvironment(); - const fn = new CdkFunction(this, `${key}Function`, { - runtime: Runtime.NODEJS_18_X, - handler: origin.handler, - code: Code.fromAsset(origin.bundle), - environment, - memorySize: 1024, - }); - const fnUrl = fn.addFunctionUrl({ - authType: FunctionUrlAuthType.NONE, - invokeMode: origin.streaming - ? InvokeMode.RESPONSE_STREAM - : InvokeMode.BUFFERED, - }); - this.grantPermissions(fn); - return new HttpOrigin(Fn.parseDomainName(fnUrl.url)); - } - - // We are using AppRunner because it is the easiest way to demonstrate the new feature. - // You can use any other container service like ECS, EKS, Fargate, etc. - private createAppRunnerOrigin( - key: string, - origin: OpenNextECSOrigin, - ): HttpOrigin { - const imageAsset = new DockerImageAsset(this, `${key}ImageAsset`, { - directory: origin.bundle, - // file: origin.dockerfile, - }); - const service = new Service(this, `${key}Service`, { - source: AppRunnerSource.fromAsset({ - asset: imageAsset, - - imageConfiguration: { - port: 3000, - environmentVariables: this.getEnvironment(), - }, - }), - serviceName: key, - autoDeploymentsEnabled: false, - cpu: Cpu.HALF_VCPU, - memory: Memory.ONE_GB, - healthCheck: HealthCheck.http({ - path: "/__health", - }), - }); - this.grantPermissions(service); - return new HttpOrigin(service.serviceUrl); - } - - private createDistribution(origins: Record) { - const cloudfrontFunction = new CloudfrontFunction(this, 'OpenNextCfFunction', { - code: FunctionCode.fromInline(` - function handler(event) { - var request = event.request; - request.headers["x-forwarded-host"] = request.headers.host; - return request; - } - `) - }) - const fnAssociations = [ - { - function: cloudfrontFunction , - eventType: FunctionEventType.VIEWER_REQUEST, - }, - ] - - const distribution = new Distribution(this, "OpenNextDistribution", { - defaultBehavior: { - origin: origins.default, - viewerProtocolPolicy: ViewerProtocolPolicy.REDIRECT_TO_HTTPS, - allowedMethods: AllowedMethods.ALLOW_GET_HEAD_OPTIONS, - cachedMethods: CachedMethods.CACHE_GET_HEAD_OPTIONS, - cachePolicy: this.serverCachePolicy, - originRequestPolicy: OriginRequestPolicy.ALL_VIEWER_EXCEPT_HOST_HEADER, - functionAssociations: fnAssociations - }, - additionalBehaviors: this.openNextOutput.behaviors - .filter((b) => b.pattern !== "*") - .reduce( - (acc, behavior) => { - return { - ...acc, - [behavior.pattern]: { - origin: behavior.origin - ? origins[behavior.origin] - : origins.default, - viewerProtocolPolicy: ViewerProtocolPolicy.REDIRECT_TO_HTTPS, - allowedMethods: AllowedMethods.ALLOW_GET_HEAD_OPTIONS, - cachedMethods: CachedMethods.CACHE_GET_HEAD_OPTIONS, - cachePolicy: - behavior.origin === "s3" - ? this.staticCachePolicy - : this.serverCachePolicy, - originRequestPolicy: - behavior.origin === "s3" - ? undefined - : OriginRequestPolicy.ALL_VIEWER_EXCEPT_HOST_HEADER, - functionAssociations: fnAssociations - }, - }; - }, - {} as Record, - ), - }); - return distribution; - } - - private createServerCachePolicy() { - return new CachePolicy(this, "OpenNextServerCachePolicy", { - queryStringBehavior: CacheQueryStringBehavior.all(), - headerBehavior: CacheHeaderBehavior.allowList( - "accept", - "accept-encoding", - "rsc", - "next-router-prefetch", - "next-router-state-tree", - "next-url", - "x-prerender-revalidate", - ), - cookieBehavior: CacheCookieBehavior.none(), - defaultTtl: Duration.days(0), - maxTtl: Duration.days(365), - minTtl: Duration.days(0), - }); - } - - private createStaticCachePolicy() { - return CachePolicy.CACHING_OPTIMIZED; - } -} - -``` diff --git a/docs/pages/v2.mdx b/docs/pages/v2.mdx deleted file mode 100644 index 6435eaa8b..000000000 --- a/docs/pages/v2.mdx +++ /dev/null @@ -1,11 +0,0 @@ -import { Callout } from 'nextra/components' - - - OpenNext V2 will be deprecated soon and will not receive new features or updates. - - We might release security updates if needed. They will be listed on this page. - - -### Latest Releases - -- OpenNext [2.3.9](https://github.com/opennextjs/opennextjs-aws/releases/tag/v2.3.9) \ No newline at end of file diff --git a/docs/pages/v2/_meta.json b/docs/pages/v2/_meta.json deleted file mode 100644 index 6d0e93260..000000000 --- a/docs/pages/v2/_meta.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "common_issues": "Troubleshooting", - "inner_workings": "Inner Workings", - "advanced": "Advanced" -} \ No newline at end of file diff --git a/docs/pages/v2/advanced/_meta.json b/docs/pages/v2/advanced/_meta.json deleted file mode 100644 index 8eb79fce3..000000000 --- a/docs/pages/v2/advanced/_meta.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "architecture": "Architecture", - "options": "Options", - "workaround": "Workaround", - "debugging": "Debugging", - "contribute": "Contribute" -} \ No newline at end of file diff --git a/docs/pages/v2/advanced/architecture.mdx b/docs/pages/v2/advanced/architecture.mdx deleted file mode 100644 index da2bb33c7..000000000 --- a/docs/pages/v2/advanced/architecture.mdx +++ /dev/null @@ -1,175 +0,0 @@ -## Recommended infrastructure on AWS - -OpenNext does not create the underlying infrastructure. You can create the infrastructure for your app with your preferred tool — SST, AWS CDK, Terraform, Serverless Framework, etc. - -This is the recommended setup. - -

- Architecture -

- -Here are the recommended configurations for each AWS resource. - -#### Asset files - -Create an S3 bucket and upload the content in the `.open-next/assets` folder to the root of the bucket. For example, the file `.open-next/assets/favicon.ico` should be uploaded to `/favicon.ico` at the root of the bucket. If you need to upload the files to a subfolder within the bucket, [refer to this section](options#reusing-same-bucket-for-asset-and-cache). - -There are two types of files in the `.open-next/assets` folder: - -**Hashed files** - -These are files with a hash component in the file name. Hashed files are be found in the `.open-next/assets/_next` folder, such as `.open-next/assets/_next/static/css/0275f6d90e7ad339.css`. The hash values in the filenames are guaranteed to change when the content of the files is modified. Therefore, hashed files should be cached both at the CDN level and at the browser level. When uploading the hashed files to S3, the recommended cache control setting is - -``` -public,max-age=31536000,immutable -``` - -**Un-hashed files** - -Other files inside the `.open-next/assets` folder are copied from your app's `public/` folder, such as `.open-next/assets/favicon.ico`. The filename for un-hashed files may remain unchanged when the content is modified. Un-hashed files should be cached at the CDN level, but not at the browser level. When the content of un-hashed files is modified, the CDN cache should be invalidated on deploy. When uploading the un-hashed files to S3, the recommended cache control setting is - -``` -public,max-age=0,s-maxage=31536000,must-revalidate -``` - -#### Cache files - -Create an S3 bucket and upload the content in the `.open-next/cache` folder to the root of the bucket. If you need to upload the files to a subfolder within the bucket, [refer to this section](options#reusing-same-bucket-for-asset-and-cache). - -There are two types of caches in the `.open-next/cache` folder: - -- Route cache: This cache includes `html` and `json` files that are prerendered during the build. They are used to seed the revalidation cache. -- Fetch cache: This cache includes fetch call responses, which might contain sensitive information. Make sure these files are not publicly accessible. - -#### Revalidation Table - -Create a DynamoDB table with the following configuration: -- Partition key: `tag` (String) -- Sort key: `path` (String) -- An index named `revalidate` with the following configuration: - - Partition key: `path` (String) - - Sort key: `revalidatedAt` (Number) - -#### Image optimization function - -Create a Lambda function using the code in the `.open-next/image-optimization-function` folder, with the handler `index.mjs`. Also, ensure that the function is configured as follows: - -- Set the architecture to `arm64`. -- Set the `BUCKET_NAME` environment variable with the value being the name of the S3 bucket where the original images are stored. -- Set the `BUCKET_KEY_PREFIX` environment variable if the asset files are uploaded to a subfolder in the S3 bucket. The value is the path to the folder. This is Optional. -- Grant `s3:GetObject` permission. - -This function handles image optimization requests when the Next.js `` component is used. The [sharp](https://www.npmjs.com/package/sharp) library, which is bundled with the function, is used to convert the image. The library is compiled against the `arm64` architecture and is intended to run on AWS Lambda Arm/Graviton2 architecture. [Learn about the better cost-performance offered by AWS Graviton2 processors.](https://aws.amazon.com/blogs/aws/aws-lambda-functions-powered-by-aws-graviton2-processor-run-your-functions-on-arm-and-get-up-to-34-better-price-performance/) - -Note that the image optimization function responds with the `Cache-Control` header, so the image will be cached both at the CDN level and at the browser level. - -#### Server Lambda function - -Create a Lambda function using the code in the `.open-next/server-function` folder, with the handler `index.mjs`. Also, ensure that the function is configured as follows: - -- Set the `CACHE_BUCKET_NAME` environment variable with the value being the name of the S3 bucket where the cache files are stored. -- Set the `CACHE_BUCKET_KEY_PREFIX` environment variable if the cache files are uploaded to a subfolder in the S3 bucket. The value is the path to the folder. This is optional. -- Set the `CACHE_BUCKET_REGION` environment variable with the value being the region of the S3 bucket. -- Set the `REVALIDATION_QUEUE_URL` environment variable with the value being the URL of the revalidation queue. -- Set the `REVALIDATION_QUEUE_REGION` environment variable with the value being the region of the revalidation queue. -- Set the `CACHE_DYNAMO_TABLE` environment variable with the value being the name of the revalidation table. -- Grant `s3:GetObject`, `s3:PutObject`, and `s3:ListObjects` permission. -- Grant `sqs:SendMessage` permission. - -This function handles all other types of requests from the Next.js app, including Server-side Rendering (SSR) requests and API requests. OpenNext builds the Next.js app in **standalone** mode. The standalone mode generates a `.next` folder containing the **NextServer** class that handles requests and a `node_modules` folder with **all the dependencies** needed to run the `NextServer`. The structure looks like this: - -``` - .next/ -> NextServer - node_modules/ -> dependencies -``` - -The server function adapter wraps around `NextServer` and exports a handler function that supports the Lambda request and response. The `server-function` bundle looks like this: - -```diff - .next/ -> NextServer -+ .open-next/ - node_modules/ -> dependencies -+ index.mjs -> server function adapter -``` - -**Monorepo** - -In the case of a monorepo, the build output looks slightly different. For example, if the app is located in `packages/web`, the build output looks like this: - -``` - packages/ - web/ - .next/ -> NextServer - node_modules/ -> dependencies from root node_modules (optional) - node_modules/ -> dependencies from package node_modules -``` - -In this case, the server function adapter needs to be created inside `packages/web` next to `.next/`. This is to ensure that the adapter can import dependencies from both `node_modules` folders. It is not a good practice to have the Lambda configuration coupled with the project structure, so instead of setting the Lambda handler to `packages/web/index.mjs`, we will add a wrapper `index.mjs` at the `server-function` bundle root that re-exports the adapter. The resulting structure looks like this: - -```diff - packages/ - web/ - .next/ -> NextServer -+ .open-next/ - node_modules/ -> dependencies from root node_modules (optional) -+ index.mjs -> server function adapter - node_modules/ -> dependencies from package node_modules -+ index.mjs -> adapter wrapper -``` - -This ensures that the Lambda handler remains at `index.mjs`. - -#### CloudFront distribution - -Create a CloudFront distribution, and dispatch requests to their corresponding handlers (behaviors). The following behaviors are configured: - -| Behavior | Requests | CloudFront Function | Origin | -| ----------------------------------------------------------------------------------------------------------------------------- | ------------------- | ------------------------------------------------------------------------------------------- | --------------------------- | -| `/_next/static/*` | Hashed static files | - | S3 bucket | -| `/favicon.ico`
`/my-images/*`
[see why](workaround#workaround-public-static-files-served-out-by-server-function-aws-specific) | public assets | - | S3 bucket | -| `/_next/image` | Image optimization | - | image optimization function | -| `/_next/data/*` | data requests | set `x-forwarded-host`
[see why](workaround#workaround-set-x-forwarded-host-header-aws-specific) | server function | -| `/api/*` | API | set `x-forwarded-host`
[see why](workaround#workaround-set-x-forwarded-host-header-aws-specific) | server function | -| `/*` | catch all | set `x-forwarded-host`
[see why](workaround#workaround-set-x-forwarded-host-header-aws-specific) | server function | - -#### Running at edge - -The server function can also run at edge locations by configuring it as Lambda@Edge on Origin Request. The server function can accept both regional request events (API payload version 2.0) and edge request events (CloudFront Origin Request payload). Depending on the shape of the Lambda event object, the function will process the request accordingly. - -To configure the CloudFront distribution: - -| Behavior | Requests | CloudFront Function | Lambda@Edge | Origin | -| ----------------------------------------------------------------------------------------------------------------------------- | ------------------- | ------------------------------------------------------------------------------------------- | --------------- | --------------------------- | -| `/_next/static/*` | Hashed static files | - | - | S3 bucket | -| `/favicon.ico`
`/my-images/*`
[see why](workaround#workaround-public-static-files-served-out-by-server-function-aws-specific) | public assets | - | - | S3 bucket | -| `/_next/image` | Image optimization | - | - | image optimization function | -| `/_next/data/*` | data requests | set `x-forwarded-host`
[see why](workaround#workaround-set-x-forwarded-host-header-aws-specific) | server function | - | -| `/api/*` | API | set `x-forwarded-host`
[see why](workaround#workaround-set-x-forwarded-host-header-aws-specific) | server function | - | -| `/*` | catch all | set `x-forwarded-host`
[see why](workaround#workaround-set-x-forwarded-host-header-aws-specific) | server function | - | - -#### Revalidation function - -Create a Lambda function using the code in the `.open-next/revalidation-function` folder, with the handler `index.mjs`. - -Also, create an SQS FIFO queue, and set it as the event source for this function. - -This function polls the queue for revalidation messages. Upon receiving a message, the function sends a HEAD request to the specified route for its revalidation. - -#### Warmer function - -Create a Lambda function using the code in the `.open-next/warmer-function` folder, with the handler `index.mjs`. Ensure the function is configured as follows: - -- Set the `FUNCTION_NAME` environment variable with the value being the name of the server Lambda function. -- Set the `CONCURRENCY` environment variable with the value being the number of server functions to warm. -- Grant `lambda:InvokeFunction` permission to allow the warmer to invoke the server function. - -Also, create an EventBridge scheduled rule to invoke the warmer function every 5 minutes. - -Read more on [how warming works](/v2/inner_workings/warming). - -#### Dynamo Provider Function - -This function is used to populate the revalidation table. It is a custom resource handler from the cdk see [here](https://docs.aws.amazon.com/cdk/api/v2/docs/aws-cdk-lib.custom_resources.AwsCustomResource.html). Ensure the function is configured as follows: - -- Set the `CACHE_DYNAMO_TABLE` environment variable with the value being the name of the DynamoDB table where the revalidation table is stored. -- Grant `dynamodb:PutItem` permission to allow the function to write to the DynamoDB table. \ No newline at end of file diff --git a/docs/pages/v2/advanced/debugging.mdx b/docs/pages/v2/advanced/debugging.mdx deleted file mode 100644 index 41cb8f43a..000000000 --- a/docs/pages/v2/advanced/debugging.mdx +++ /dev/null @@ -1,21 +0,0 @@ -#### Function logs - -To find the **server, image optimization, and warmer log**, go to the AWS CloudWatch console in the **region you deployed to**. - -If the server function is **deployed to Lambda@Edge**, the logs will appear in the **region you are physically close to**. For example, if you deployed your app to `us-east-1` and you are visiting the app from in London, the logs are likely to be in `eu-west-2`. - -#### Warmer function logs - -The logs from the warmer function provide insights into the results of the warming process. - -``` -{ event: 'warmer result', sent: 2, success: 2, uniqueServersWarmed: 2 } -``` - -- `sent` — The number of times the warmer invoked the server function using the Lambda SDK. This value should correspond to the `CONCURRENCY` set in the warmer function. -- `success` — The number of SDK calls that returned a 200 status code, indicating successful invocations. -- `uniqueServersWarmed` — This helps track any instances that responded unusually quickly and served multiple warming requests. As all SDK calls are made concurrently using `await Promise.all()`, this metric is useful for monitoring the number of unique warmed instances. - -#### Opening an issue - -To help diagnose issues, it's always helpful to provide a reproducible setup when opening an issue. One easy way to do this is to create a pull request (PR) and add a new page to the [benchmark app](#example) located in the `example` folder, which reproduces the issue. The PR will automatically deploy the app to AWS. \ No newline at end of file diff --git a/docs/pages/v2/advanced/options.mdx b/docs/pages/v2/advanced/options.mdx deleted file mode 100644 index a24673622..000000000 --- a/docs/pages/v2/advanced/options.mdx +++ /dev/null @@ -1,128 +0,0 @@ -#### Custom build command - -OpenNext runs the `build` script in your `package.json` by default. However, you can specify a custom build command if required. - -```bash -# CLI -open-next build --build-command "pnpm custom:build" -``` - -```ts -// JS -import { build } from "open-next/build.js"; - -await build({ - buildCommand: "pnpm custom:build", -}); -``` - -#### Custom app and build output paths - -OpenNext runs the `build` script from your current command folder by default. When running OpenNext from a monorepo with decentralised application and build output paths, you can specify a custom `appPath` and/or `buildOutputPath`. This will allow you to execute your command from the root of the monorepo. - -```bash -# CLI -open-next build --build-command "pnpm custom:build" --app-path "./apps/example-app" --build-output-path "./dist/apps/example-app" -``` - -```ts -// JS -import { build } from "open-next/build.js"; - -await build({ - buildCommand: "pnpm custom:build", - appPath: "./apps/example-app", - buildOutputPath: "./dist/apps/example-app" -}); -``` - -#### Minify server function - -Enabling this option will minimize all `.js` and `.json` files in the server function bundle using the [node-minify](https://github.com/srod/node-minify) library. This can reduce the size of the server function bundle by about 40%, depending on the size of your app. - -```bash -# CLI -open-next build --minify -``` - -```ts -// JS -import { build } from "open-next/build.js"; - -await build({ - minify: true, -}); -``` - -This feature is currently **experimental** and needs to be opted into. It can significantly decrease the server function's cold start time. Once it is thoroughly tested and its stability is confirmed, it will be enabled by default. - -#### **Experimental** Streaming support - -Enabling this option will enable streaming support for the server function. This is experimental and needs to be opted into. It can significantly decrease the server function's time to first byte. - -**Do not use this in production**. See [this](/v2/inner_workings/streaming) for more information. - -```bash -open-next build --streaming -``` - -#### **Experimental** disable dynamodb cache - -Enabling this option will disable the dynamodb cache. This is experimental and needs to be opted into. This means that `next/cache` revalidation will not work. - -```bash -open-next build --dangerously-disable-dynamodb-cache -``` - -#### **Experimental** disable incremental cache - -Disabling incremental cache will cause the entire page to be revalidated on each request. This will cause ISR and SSG pages to be in an inconsistent state. Specify this option if you are using SSR pages only. This will also disable the dynamodb cache. - -```bash -open-next build --dangerously-disable-incremental-cache -``` - -#### Reusing same bucket for asset and cache - -Typically, asset files are uploaded to the root of the bucket. However, you might want to store them in a subfolder of the bucket, for instance, when: - -- using a pre-existing bucket; or -- storing both assets and cache files in the same bucket. - -If you choose to upload asset files to a subfolder (ie. "assets"), be sure to: - -- Set the `BUCKET_KEY_PREFIX` environment variable for the image optimization function to `assets`. -- Set the "origin path" for the CloudFront S3 origin to `assets`. - -Similarly, if you decide to upload cache files to a subfolder (ie. "cache"), be sure to: - -- Set the `CACHE_BUCKET_KEY_PREFIX` environment variable for the server function to `cache`. - -#### Debug mode - -OpenNext can be executed in debug mode for bug tracking purposes. - -```bash -# CLI -OPEN_NEXT_DEBUG=true npx open-next@latest build -``` - -```ts -// JS -import { build } from "open-next/build.js"; - -await build({ - debug: true, -}); -``` - -This does a few things: - -1. Lambda handler functions in the build output will not be minified. -1. Lambda handler functions in the build output has sourcemap enabled inline. -1. Lambda handler functions will automatically `console.log` the request event object along with other debugging information. - -It is recommended to **turn off debug mode when building for production** because: - -1. Un-minified function code is 2-3X larger than minified code. This will result in longer Lambda cold start times. -1. Logging the event object on each request can result in a lot of logs being written to AWS CloudWatch. This will result in increased AWS costs. \ No newline at end of file diff --git a/docs/pages/v2/advanced/workaround.mdx b/docs/pages/v2/advanced/workaround.mdx deleted file mode 100644 index 9461e3d16..000000000 --- a/docs/pages/v2/advanced/workaround.mdx +++ /dev/null @@ -1,204 +0,0 @@ -#### WORKAROUND: Create one cache behavior per top-level file and folder in `public/` (AWS specific) - -As mentioned in the [Asset files](./architecture#asset-files) section, files in your app's `public/` folder are static and are uploaded to the S3 bucket. And requests for these files are handled by the S3 bucket, like so: - -``` -https://my-nextjs-app.com/favicon.ico -https://my-nextjs-app.com/my-images/avatar.png -``` - -Ideally, we would create a single cache behavior that routes all requests for `public/` files to the S3 bucket. Unfortunately, CloudFront does not support regex or advanced string patternss for cache behaviors (ie. `/favicon.ico|my-images\/*/` ). - -To work around this limitation, we create a separate cache behavior for each top-level file and folder in `public/`. For example, if your folder structure is: - -``` -public/ - favicon.ico - my-images/ - avatar.png - avatar-dark.png - foo/ - bar.png -``` - -You would create three cache behaviors: `/favicon.ico`, `/my-images/*`, and `/foo/*`. Each of these behaviors points to the S3 bucket. - -One thing to be aware of is that CloudFront has a [default limit of 25 behaviors per distribution](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/cloudfront-limits.html#limits-web-distributions). If you have a lot of top-level files and folders, you may reach this limit. To avoid this, consider moving some or all files and folders into a subdirectory: - -``` -public/ - files/ - favicon.ico - my-images/ - avatar.png - avatar-dark.png - foo/ - bar.png -``` - -In this case, you only need to create one cache behavior: `/files/*`. - -Make sure to update your code accordingly to reflect the new file paths. - -Alternatively, you can [request an increase to the limit through AWS Support](https://console.aws.amazon.com/support/home#/case/create?issueType=service-limit-increase&limitType=service-code-cloudfront-distributions). - -#### WORKAROUND: Set `x-forwarded-host` header (AWS specific) - -When the server function receives a request, the `host` value in the Lambda request header is set to the hostname of the AWS Lambda service instead of the actual frontend hostname. This creates an issue for the server function (middleware, SSR routes, or API routes) when it needs to know the frontend host. - -To work around the issue, a CloudFront function is run on Viewer Request, which sets the frontend hostname as the `x-forwarded-host` header. The function code looks like this: - -```ts -function handler(event) { - var request = event.request; - request.headers["x-forwarded-host"] = request.headers.host; - return request; -} -``` - -The server function would then sets the `host` header of the request to the value of the `x-forwarded-host` header when sending the request to the `NextServer`. - -#### WORKAROUND: Set `NextRequest` geolocation data - -When your application is hosted on Vercel, you can access a user's geolocation inside your middleware through the `NextRequest` object. - -```ts -export function middleware(request: NextRequest) { - request.geo.country; - request.geo.city; -} -``` - -When your application is hosted on AWS, you can [obtain the geolocation data from CloudFront request headers](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/adding-cloudfront-headers.html#cloudfront-headers-viewer-location). However, there is no way to set this data on the `NextRequest` object passed to the middleware function. - -To work around the issue, the `NextRequest` constructor is modified to initialize geolocation data from CloudFront headers, instead of using the default empty object. - -```diff -- geo: init.geo || {} -+ geo: init.geo || { -+ country: this.headers("cloudfront-viewer-country"), -+ countryName: this.headers("cloudfront-viewer-country-name"), -+ region: this.headers("cloudfront-viewer-country-region"), -+ regionName: this.headers("cloudfront-viewer-country-region-name"), -+ city: this.headers("cloudfront-viewer-city"), -+ postalCode: this.headers("cloudfront-viewer-postal-code"), -+ timeZone: this.headers("cloudfront-viewer-time-zone"), -+ latitude: this.headers("cloudfront-viewer-latitude"), -+ longitude: this.headers("cloudfront-viewer-longitude"), -+ metroCode: this.headers("cloudfront-viewer-metro-code"), -+ } -``` - -CloudFront provides more detailed geolocation information, such as postal code and timezone. Here is a complete list of `geo` properties available in your middleware: - -```ts -export function middleware(request: NextRequest) { - // Supported by Next.js - request.geo.country; - request.geo.region; - request.geo.city; - request.geo.latitude; - request.geo.longitude; - - // Also supported by OpenNext - request.geo.countryName; - request.geo.regionName; - request.geo.postalCode; - request.geo.timeZone; - request.geo.metroCode; -} -``` - -#### WORKAROUND: `NextServer` does not set cache headers for HTML pages - -As mentioned in the [Server function](./architecture#server-lambda-function) section, the server function uses the `NextServer` class from Next.js' build output to handle requests. However, `NextServer` does not seem to set the correct `Cache Control` headers. - -To work around the issue, the server function checks if the request is for an HTML page, and sets the `Cache Control` header to: - -``` -public, max-age=0, s-maxage=31536000, must-revalidate -``` - -#### WORKAROUND: `NextServer` does not set correct SWR cache headers - -`NextServer` does not seem to set an appropriate value for the `stale-while-revalidate` cache header. For example, the header might look like this: - -``` -s-maxage=600 stale-while-revalidate -``` - -This prevents CloudFront from caching the stale data. - -To work around the issue, the server function checks if the response includes the `stale-while-revalidate` header. If found, it sets the value to 30 days: - -``` -s-maxage=600 stale-while-revalidate=2592000 -``` - -#### WORKAROUND: Set `NextServer` working directory (AWS specific) - -Next.js recommends using `process.cwd()` instead of `__dirname` to get the app directory. For example, consider a `posts` folder in your app with markdown files: - -``` -pages/ -posts/ - my-post.md -public/ -next.config.js -package.json -``` - -You can build the file path like this: - -```ts -path.join(process.cwd(), "posts", "my-post.md"); -``` - -As mentioned in the [Server function](./architecture#server-lambda-function) section, in a non-monorepo setup, the `server-function` bundle looks like: - -``` -.next/ -node_modules/ -posts/ - my-post.md <- path is "posts/my-post.md" -index.mjs -``` - -In this case, `path.join(process.cwd(), "posts", "my-post.md")` resolves to the correct path. - -However, when the user's app is inside a monorepo (ie. at `/packages/web`), the `server-function` bundle looks like: - -``` -packages/ - web/ - .next/ - node_modules/ - posts/ - my-post.md <- path is "packages/web/posts/my-post.md" - index.mjs -node_modules/ -index.mjs -``` - -In this case, `path.join(process.cwd(), "posts", "my-post.md")` cannot be resolved. - -To work around the issue, we change the working directory for the server function to where `.next/` is located, ie. `packages/web`. - -#### WORKAROUND: Set `__NEXT_PRIVATE_PREBUNDLED_REACT` to use prebundled React - -For Next.js 13.2 and later versions, you need to explicitly set the `__NEXT_PRIVATE_PREBUNDLED_REACT` environment variable. Although this environment variable isn't documented at the time of writing, you can refer to the Next.js source code to understand its usage: - -> In standalone mode, we don't have separated render workers so if both app and pages are used, we need to resolve to the prebundled React to ensure the correctness of the version for app. - -> Require these modules with static paths to make sure they are tracked by NFT when building the app in standalone mode, as we are now conditionally aliasing them it's tricky to track them in build time. - -On every request, we try to detect whether the route is using the Pages Router or the App Router. If the Pages Router is being used, we set `__NEXT_PRIVATE_PREBUNDLED_REACT` to `undefined`, which means the React version from the `node_modules` is used. However, if the App Router is used, `__NEXT_PRIVATE_PREBUNDLED_REACT` is set, and the prebundled React version is used. - -#### WORKAROUND: 13.4.13+ breaking changes (middleware, redirect, rewrites) - -Nextjs 13.4.13 refactored the middleware logic so that it no longer runs in the server handler. Instead they are executed as workers in child threads, which introduces a non-acceptable latency of ~5 seconds. In order to circumvent this issue, open-next needs to implement the middleware handler before processing the server handler ourselves. - -We've introduced a custom esbuild plugin to conditionally inject and override code to properly handle the breaking changes. - -The default request handler is in `adapters/plugins/default.ts` -When open-next needs to override that implementation due to NextJs breaking compatibility, the `createServerBundle` in `build.ts` determines the proper overrides to replace the code of the `default.ts` file. diff --git a/docs/pages/v2/common_issues.mdx b/docs/pages/v2/common_issues.mdx deleted file mode 100644 index 349d9a663..000000000 --- a/docs/pages/v2/common_issues.mdx +++ /dev/null @@ -1,23 +0,0 @@ -#### Cannot find module next - -You might stumble upon this error inside cloudwatch logs: `Cannot find module 'next'`. -It is likely that you are in a monorepo and you have several lock files. -Just make sure that you have a single lock file in the root of your project. - -#### headers, redirect, rewrites in `next-config` and middleware are not working in next 13.4.12+ - -If you use a version of nextjs >= 13.4.12, you'll need to use an open-next version >= 2.1 - -#### My api route are returning empty response and i'm using sentry - -If you are using sentry, API routes returns empty body. You could try configuring sentry to ignore API routes. You can read more about it [here](https://docs.sentry.io/platforms/javascript/guides/nextjs/manual-setup/?opt-out-of-auto-instrumentation-on-specific-routes) - -#### My ISR page has this cache-control header `s-maxage=2, stale-while-revalidate=2592000` - -Given how ISR works, while waiting for the revalidation to happen, the page will be served using this cache control header. This prevent your server from being overloaded by a lot of requests while the revalidation is done. You can read more about it [here](/v2/inner_workings/isr). - -#### Unzipped size must be smaller than 262144000 bytes - -AWS Lambda has an unzipped size limit of 250MB. If your app is over this limit, then it is most likely using a node_module library that is too large for serverless or there is a large dev dependency getting bundled. -For example, `pdfjs` has `canvas` optional dependency which takes up 180MB. For more details, [read me](/v2/common_issues/bundle_size). -Note: a large bundle size will increase cold start significantly. \ No newline at end of file diff --git a/docs/pages/v2/common_issues/_meta.json b/docs/pages/v2/common_issues/_meta.json deleted file mode 100644 index 6232261fa..000000000 --- a/docs/pages/v2/common_issues/_meta.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "isr": "ISR" -} diff --git a/docs/pages/v2/common_issues/bundle_size.mdx b/docs/pages/v2/common_issues/bundle_size.mdx deleted file mode 100644 index ec96671df..000000000 --- a/docs/pages/v2/common_issues/bundle_size.mdx +++ /dev/null @@ -1,50 +0,0 @@ -import {Callout} from 'nextra/components' - - -#### Reducing Bundle Size - -Serverless environments are sensitive to bundle size, since it has to download and unzip the contents. To help reduce cold start times as much as possible, you should remove any unnecessary files in the node_modules or report offending dev libraries in this [PR](https://github.com/opennextjs/opennextjs-aws/pull/242) - - - - Next 14+ no longer includes dev dependencies like @swc, esbuild, etc... in the output node_modules, so please upgrade at your earliest convenience. - - -#### Unzipped size must be smaller than 262144000 bytes - -To identify the module that's taking up too much space (and isn't serverless friendly): - -```bash -du -hs .open-next/server-function/node_modules/* | sort -rh -``` - -If your app requires the offending library, then consider moving your business logic of the `api` to its own lambda, eg: `/api/v2` => `Api Lambda` - - - There is a [PR](https://github.com/opennextjs/opennextjs-aws/pull/242) to remove some dev dependency from the output node_modules but that requires more testing before it can merge. - - -#### Common issues - -##### Sharp - -`sharp` is not needed outside of the `Image Optimization` function so you should not have it as a dependency. But if you are depending on `sharp`, be sure to install it with the correct flags for your lambda. -eg: `--arch=arm64 --platform=linux --target=18 --libc=glibc` - -##### pdfjs - -- If you need to use pdfjs, you should install it with `npm i pdfjs-dist--no-optional` because the optional dep: `canvas` takes about 180MB. - -- If the above doesn't work (or gives some compilation errors) you can try: - -```js - experimental: { - outputFileTracingExcludes: { - "*": ["node_modules/canvas"], - }, - }, -``` - -##### Others - -Please open an issue or let us know on discord if there are any other modules that are causing issues. diff --git a/docs/pages/v2/common_issues/isr.mdx b/docs/pages/v2/common_issues/isr.mdx deleted file mode 100644 index e6a4c398b..000000000 --- a/docs/pages/v2/common_issues/isr.mdx +++ /dev/null @@ -1,108 +0,0 @@ -import {Callout} from 'nextra/components' - - - `next/cache` revalidation needs `next@13.5.1` or higher to work properly. If you are using an older version, please upgrade. - - -#### On-demand revalidation - -When you manually revalidate the Next.js cache for a specific page, the ISR cache files stored on S3 will be updated. However, it is still necessary to invalidate the CloudFront cache: - -```ts -// pages/api/revalidate.js -export default async function handler(req, res) { - await res.revalidate("/foo"); - await invalidateCloudFrontPaths(["/foo"]); - // ... -} -``` - -If the pages router is in use, you must also invalidate the `_next/data/BUILD_ID/foo.json` path. The value for `BUILD_ID` can be found in the `.next/BUILD_ID` build output and can be accessed at runtime via the `process.env.NEXT_BUILD_ID` environment variable. - -```ts -await invalidateCloudFrontPaths(["/foo", `/_next/data/${process.env.NEXT_BUILD_ID}/foo.json`]); -``` - -And here is an example of the `invalidateCloudFrontPaths()` function: - -```ts -import { CloudFrontClient, CreateInvalidationCommand } from "@aws-sdk/client-cloudfront"; - -const cloudFront = new CloudFrontClient({}); - -async function invalidateCloudFrontPaths(paths: string[]) { - await cloudFront.send( - new CreateInvalidationCommand({ - // Set CloudFront distribution ID here - DistributionId: distributionId, - InvalidationBatch: { - CallerReference: `${Date.now()}`, - Paths: { - Quantity: paths.length, - Items: paths, - }, - }, - }), - ); -} -``` - -Note that manual CloudFront path invalidation incurs costs. According to the [AWS CloudFront pricing page](https://aws.amazon.com/cloudfront/pricing/): - -> No additional charge for the first 1,000 paths requested for invalidation each month. Thereafter, $0.005 per path requested for invalidation. - -Due to these costs, if multiple paths require invalidation, it is more economical to invalidate a wildcard path `/*`. For example: - -```ts -// This costs $0.005 x 3 = $0.015 after the first 1000 paths -await invalidateCloudFrontPaths(["/page/a", "/page/b", "/page/c"]); - -// This costs $0.005, but also invalidates other routes such as "page/d" -await invalidateCloudFrontPaths(["/page/*"]); -``` - -For on-demand revalidation via the [`next/cache` module](https://nextjs.org/docs/app/building-your-application/data-fetching/revalidating#using-on-demand-revalidation), if you want to retrieve the associated paths for a given tag, you can use this function: - -```ts -function getByTag(tag: string) { - try { - const { Items } = await this.dynamoClient.send( - new QueryCommand({ - TableName: process.env.CACHE_DYNAMO_TABLE, - KeyConditionExpression: "#tag = :tag", - ExpressionAttributeNames: { - "#tag": "tag", - }, - ExpressionAttributeValues: { - ":tag": { S: `${process.env.NEXT_BUILD_ID}/${tag}` }, - }, - }), - ); - return ( - // We need to remove the buildId from the path - Items?.map( - ({ path: { S: key } }) => key?.replace(`${process.env.NEXT_BUILD_ID}/`, "") ?? "", - ) ?? [] - ); - } catch (e) { - error("Failed to get by tag", e); - return []; - } -} -``` - -#### Patch fetch behaviour for ISR. Only for next@13.5.1+ - -If you use ISR and fetch in your app, you may encounter a bug that makes your revalidate values inconsistent. -The issue is that it revalidates using the lowest revalidate of all fetch calls in your page, regardless of their individual values. To fix this bug, you need to modify the fetch function in your root layout component with the following code snippet - -```ts -export default function RootLayout() { - const asyncStorage = require("next/dist/client/components/static-generation-async-storage.external"); - //@ts-ignore - const staticStore = (fetch as any).__nextGetStaticStore?.() || asyncStorage.staticGenerationAsyncStorage; - const store = staticStore.getStore(); - store.isOnDemandRevalidate = store.isOnDemandRevalidate && !(process.env.OPEN_NEXT_ISR === 'true'); - return <>...; -} -``` \ No newline at end of file diff --git a/docs/pages/v2/inner_workings/_meta.json b/docs/pages/v2/inner_workings/_meta.json deleted file mode 100644 index 6232261fa..000000000 --- a/docs/pages/v2/inner_workings/_meta.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "isr": "ISR" -} diff --git a/docs/pages/v2/inner_workings/isr.mdx b/docs/pages/v2/inner_workings/isr.mdx deleted file mode 100644 index 4a4687b3b..000000000 --- a/docs/pages/v2/inner_workings/isr.mdx +++ /dev/null @@ -1,89 +0,0 @@ -import { Callout } from 'nextra/components' - -In standalone mode, Next.js prebuilds the ISR cache during the build process. And at runtime, **NextServer** expects this cache locally on the server. This works effectively when the server is run on a single web server machine, sharing the cache across all requests. In a Lambda environment, the cache needs to be housed centrally in a location accessible by all server Lambda function instances. S3 serves as this central location. - -To facilitate this: - -- ISR cache files are excluded from the `server-function` bundle and instead are uploaded to the cache bucket. -- The default cache handler is replaced with a custom cache handler by configuring the [`incrementalCacheHandlerPath`](https://nextjs.org/docs/app/api-reference/next-config-js/incrementalCacheHandlerPath) field in `next.config.js`. -- The custom cache handler manages the cache files on S3, handling both reading and writing operations. -- Since we're using FIFO queue, if we want to process more than one revalidation at a time, we need to have separate Message Group IDs. We generate a Message Group ID for each revalidation request based on the route path. This ensures that revalidation requests for the same route are processed only once. You can use `MAX_REVALIDATE_CONCURRENCY` environment variable to control the number of revalidation requests processed at a time. By default, it is set to 10. -- The `revalidation-function` polls the message from the queue and makes a `HEAD` request to the route with the `x-prerender-revalidate` header. -- The `server-function` receives the `HEAD` request and revalidates the cache. -- Tags are handled differently in a dynamodb table. We use a separate table to store the tags for each route. The custom cache handler will update the tags in the table when it updates the cache. - -#### Lifetime of an ISR request for a stale page - -1. Cloudfront receives a request for a page. Let's assume the page is stale in Cloudfront. -2. Cloudfront forwards the request to the `server-function` in the background but still returns the cached version. -3. The `server-function` checks in the S3 cache. If the page is stale, it sends the stale response back to Cloudfront while sending a message to the revalidation queue to trigger background revalidation. It will also change the cache-control header to `s-maxage=2, stale-while-revalidate=2592000` -4. A new request comes in for the same page after 2 seconds. Cloudfront sends the cached version back to the user and forwards the request to the `server-function`. -5. If the revalidation is done, the `server-function` will update the cache and send the updated response back to Cloudfront. Subsequent request will then get the updated version. Otherwise, we go back to step 3. - -#### Tags - -Tags are stored in a dynamodb table. -There is 3 fields in the table: `tag`, `path`, `revalidatedAt`. The `tag` field is the partition key and `path` is the sort key. - -We use an index called `revalidate` with `path` as a partition key and `revalidatedAt` as the sort key. - -Each tags has several paths, and every subpath is also considered as a tag. For example, if we have a tag `tag1` with path `/a/b/c`, we also have tags `/a`, `/a/layout`, `/a/page`, `/a/b`, `/a/b/layout`, `/a/b/page`, `/a/b/c/layout`, `/a/b/c/page`. - -When `revalidateTag` is called, we update the `revalidatedAt` value for each path and subpath associated with this tag. - -When we check if a page is stale, we check the `revalidatedAt` value for each record and the `LastModified` of this S3 cache objects . If `revalidatedAt` is greater than `LastModified`, we consider the page is stale. - -#### Cost - - - Be aware that fetch cache is using S3. `fetch` by default in next is cached, and even for SSR requests, it will be written to S3. This can lead to a lot of S3 requests and can be expensive. You can disable fetch cache by setting `cache` to `no-store` in the `fetch` options. Also see [this workaround](/v2/common_issues/isr#patch-fetch-behaviour-for-isr-only-for-next1351) - - -`get` will be called on every request to ISR and SSG that are not cached in Cloudfront, and `set` will be called on every revalidation. -They can also be called on fetch requests if the `cache` option is not set to `no-store`. - -There is also some cost associated to deployment since you need to upload the cache to S3 and upload the tags to DynamoDB. - -For the examples here, let's assume an app route with a 5 minute revalidation delay in us-east-1. This is assuming you get constant traffic to the route (If you get no traffic, you will only pay for the storage cost). - -##### S3 -- Each `get` request to the cache will result in at least 1 `GetObject` - -``` - GetObject cost - 8,640 requests * $0.0004 per 1,000 requests = $0.003456 - Total cost - $0.003456 per route per month -``` - -- Each `set` request to the cache will result in 1 `PutObject` in S3 - -``` - PutObject cost - 8,640 requests * $0.005 per 1,000 requests = $0.0432 - Total cost - $0.0432 per route per month -``` - -You can then calculate the cost based on your usage and the [S3 pricing](https://aws.amazon.com/s3/pricing/) - -##### DynamoDB - -For the example, let's consider that that same route has 2 tags and 10 paths and subpath for each tag. This is assuming you get constant traffic to the route. - -- Each `revalidateTag` request will result in 1 `Query` in DynamoDB and a `PutItem` for each path associated with the tag, they are grouped in batches of 25 in a `BatchWriteItem` request. -``` - Assuming you do 1 revalidation per 5 minute - Query cost - 8,640 request * $0.25 per 1,000,000 read = $0.00216 - BatchWriteItem cost - 86,400 requests * $0.25 per 1,000,000 write = $0.0216 - Total cost - $0.04536 per tag revalidation per month -``` -- Each `get` request will result in 1 `Query` in DynamoDB -``` - Query cost - 8,640 request * $0.25 per 1,000,000 read = $0.00216 - Total cost - $0.00216 per route per month -``` -- Each `set` request will result in 1 `Query` in DynamoDB and a `PutItem` for each tag associated with the path that are not present in DynamoDB, they are grouped in batches of 25 in a `BatchWriteItem` request. -``` - Query cost - 8,640 request * $0.25 per 1,000,000 read = $0.00216 - Total cost - $0.00216 per route per month -``` - -You can then calculate the cost based on your usage and the [DynamoDB pricing](https://aws.amazon.com/dynamodb/pricing/) - diff --git a/docs/pages/v2/inner_workings/plugin.mdx b/docs/pages/v2/inner_workings/plugin.mdx deleted file mode 100644 index 4ce994599..000000000 --- a/docs/pages/v2/inner_workings/plugin.mdx +++ /dev/null @@ -1,64 +0,0 @@ -import { Callout } from "nextra/components"; - -Open-next use esbuild to build the project. Given that we aim to support multiple version of Next and multiple runtime, we developped a plugin to override some parts of our code at build time with an esbuild plugin. - - - As of now, the plugin is internal and not exposed to the user. - We might expose it in the future, but for now, it is used only internally. - - -#### How to use - -You need to create a ts file where you will override the code. Inside this file, you can use the following syntax: - -```ts -//#import -// Everything inside here will be placed at the top of the file -const test = "test"; -//#endImport - - -//#override import -// Everything inside here will replace the content of the override with the given id: import -// To get the id you'll need to look at the file you want to override -import { requestHandler } from "./util.js"; -//#endOverride - -//#override fnId -// Everything inside here will replace the content of the override with the given id: fnId -export const fnId = (req: IncomingMessage, res: ServerResponse) => { - requestHandler(req, res); -}; -//#endOverride -``` - -and then you use the plugin this way: - -```ts -import openNextPlugin from "./plugin.js"; - -openNextPlugin({ - name: "name of the plugin", - target: /plugins\/serverHandler\.js/g, // regex to match the file to override - replacements: ["./serverHandler.replacement.js"], // path to the files containing the overrides -}), - -``` - -#### Known issues - -Do not include `types` in #override and #imports, as esbuild will remove preceeding comments (ie it removes //#override id)when it builds. - -Instead, put the `import type` outside like: - -``` -import type { PluginHandler } from "../next-types.js"; -import type { IncomingMessage } from "../request.js"; -import type { ServerResponse } from "../response.js"; - -//#override imports -import { requestHandler } from "./util.js"; -//#endOverride -``` - -The types are removed in the final output anyways. diff --git a/docs/pages/v2/inner_workings/streaming.mdx b/docs/pages/v2/inner_workings/streaming.mdx deleted file mode 100644 index e6d259cae..000000000 --- a/docs/pages/v2/inner_workings/streaming.mdx +++ /dev/null @@ -1,16 +0,0 @@ -import { Callout } from 'nextra/components' - - - Streaming support is **EXTREMELY EXPERIMENTAL**... AWS has updated their runtime in the past which broke streaming... It seems stable now but if they do another rugpull, it could cause your app to break. - - **It's not recommended to use streaming in production.** - - -Streaming is using the new AWS lambda support for [streaming responses](https://docs.aws.amazon.com/lambda/latest/dg/configuration-response-streaming.html). This allows you to send data to the client as it's generated instead of waiting for the entire response to be generated. - -If you want to try streaming you need to set the function url invoke mode to `RESPONSE_STREAM`. -You'll also need to add a `--streaming` tags to the build cli. - -```bash -open-next build --streaming -``` \ No newline at end of file diff --git a/docs/pages/v2/inner_workings/warming.mdx b/docs/pages/v2/inner_workings/warming.mdx deleted file mode 100644 index 6944adc89..000000000 --- a/docs/pages/v2/inner_workings/warming.mdx +++ /dev/null @@ -1,47 +0,0 @@ -Server functions may experience performance issues due to Lambda cold starts. To mitigate this, the server function can be invoked periodically. Remember, **Warming is optional** and is only required if you want to keep the server function warm. - -Please note, warming is currently only supported when the server function is deployed to a single region (Lambda). - -#### Prewarm - -Each time you deploy, a new version of the Lambda function will be generated. All warmed server function instances will be turned off. And there won't be any warm instances until the warmer function runs again at the next 5-minute interval. - -To ensure the functions are prewarmed on deploy, create a [CloudFormation Custom Resource](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-custom-resources.html) to invoke the warmer function on deployment. The custom resource should be configured as follows: - -- Invoke the warmer function on resource `Create` and `Update`. -- Include a timestamp value in the resource property to ensure the custom resource runs on every deployment. -- Grant `lambda:InvokeFunction` permission to allow the custom resource to invoke the warmer function. - -#### Cost - -There are three components to the cost: - -1. EventBridge scheduler: $0.00864 - - ``` - Requests cost — 8,640 invocations per month x $1/million = $0.00864 - ``` - -1. Warmer function: $0.145728288 - - ``` - Requests cost — 8,640 invocations per month x $0.2/million = $0.001728 - Duration cost — 8,640 invocations per month x 1GB memory x 1s duration x $0.0000166667/GB-second = $0.144000288 - ``` - -1. Server function: $0.0161280288 per warmed instance - - ``` - Requests cost — 8,640 invocations per month x $0.2/million = $0.001728 - Duration cost — 8,640 invocations per month x 1GB memory x 100ms duration x $0.0000166667/GB-second = $0.0144000288 - ``` - -For example, keeping 50 instances of the server function warm will cost approximately **$0.96 per month** - -``` - -$0.00864 + $0.145728288 + $0.0161280288 x 50 = $0.960769728 - -``` - -This cost estimate is based on the `us-east-1` region pricing and does not consider any free tier benefits. \ No newline at end of file diff --git a/docs/public/architecture.png b/docs/public/architecture.png deleted file mode 100644 index 8be4efaa8..000000000 Binary files a/docs/public/architecture.png and /dev/null differ diff --git a/docs/public/favicon-dark.png b/docs/public/favicon-dark.png deleted file mode 100644 index f1598e74e..000000000 Binary files a/docs/public/favicon-dark.png and /dev/null differ diff --git a/docs/public/favicon-light.png b/docs/public/favicon-light.png deleted file mode 100644 index 111cf2060..000000000 Binary files a/docs/public/favicon-light.png and /dev/null differ diff --git a/docs/public/logo-dark.svg b/docs/public/logo-dark.svg deleted file mode 100644 index 859c6e143..000000000 --- a/docs/public/logo-dark.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - - - - - - - diff --git a/docs/public/logo-light.svg b/docs/public/logo-light.svg deleted file mode 100644 index 52b67230a..000000000 --- a/docs/public/logo-light.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - - - - - - - diff --git a/docs/public/share.png b/docs/public/share.png deleted file mode 100644 index 78860e139..000000000 Binary files a/docs/public/share.png and /dev/null differ diff --git a/docs/styles/Layout.module.css b/docs/styles/Layout.module.css deleted file mode 100644 index 941476da8..000000000 --- a/docs/styles/Layout.module.css +++ /dev/null @@ -1,42 +0,0 @@ -.container { - padding: 0 2rem; -} -@media only screen and (max-width: 600px) { - .container { - padding: 0 1rem; - } -} - -.main { - margin: 0 auto; - max-width: var(--max-width); - min-height: calc(100vh - var(--footer-margin) - var(--footer-height)); - padding: 4rem 0 0; - flex: 1; - display: flex; - flex-direction: column; - align-items: center; - justify-content: center; -} - -.footer { - margin-top: var(--footer-margin); - display: flex; - flex: 1; - height: var(--footer-height); - justify-content: space-between; - align-items: center; - color: var(--theme-text-lighter); - font-size: 0.875rem; -} -.footer a { - color: var(--theme-text-lighter); -} -.footer a:not(:first-child) { - margin-left: 1.3rem; -} -@media only screen and (max-width: 600px) { - .footer a:not(:first-child) { - margin-left: 0.5rem; - } -} diff --git a/docs/theme.config.jsx b/docs/theme.config.jsx deleted file mode 100644 index b3af0a288..000000000 --- a/docs/theme.config.jsx +++ /dev/null @@ -1,56 +0,0 @@ -import { useConfig } from "nextra-theme-docs"; - -import Footer from "./components/Footer"; -import Logo from "./components/Logo.svg"; - -export default { - logo: , - docsRepositoryBase: "https://github.com/opennextjs/opennextjs-aws/tree/main/docs", - project: { - link: "https://github.com/sst/open-next", - }, - chat: { - link: "https://sst.dev/discord", - }, - footer: { - text: