- 
                Notifications
    You must be signed in to change notification settings 
- Fork 178
Feat: Multi-tiered cache for aws #699
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 3 commits
8a48c59
              f6c2914
              f1c4c83
              6695a2b
              0b0ccc4
              2f06186
              File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change | 
|---|---|---|
| @@ -0,0 +1,5 @@ | ||
| --- | ||
| "@opennextjs/aws": minor | ||
| --- | ||
|  | ||
| Add a new multi-tiered incremental cache | 
| Original file line number | Diff line number | Diff line change | 
|---|---|---|
| @@ -0,0 +1,172 @@ | ||
| import type { CacheValue, IncrementalCache } from "types/overrides"; | ||
| import { customFetchClient } from "utils/fetch"; | ||
| import { debug } from "../../adapters/logger"; | ||
| import S3Cache, { getAwsClient } from "./s3-lite"; | ||
|  | ||
| // TTL for the local cache in milliseconds | ||
| const localCacheTTL = process.env.OPEN_NEXT_LOCAL_CACHE_TTL | ||
| ? Number.parseInt(process.env.OPEN_NEXT_LOCAL_CACHE_TTL) | ||
| : 0; | ||
| // Maximum size of the local cache in nb of entries | ||
| const maxCacheSize = process.env.OPEN_NEXT_LOCAL_CACHE_SIZE | ||
| ? Number.parseInt(process.env.OPEN_NEXT_LOCAL_CACHE_SIZE) | ||
| : 1000; | ||
|  | ||
| class LRUCache { | ||
|         
                  conico974 marked this conversation as resolved.
              Outdated
          
            Show resolved
            Hide resolved | ||
| private cache: Map< | ||
| string, | ||
| { | ||
| value: CacheValue<boolean>; | ||
| lastModified: number; | ||
| } | ||
| > = new Map(); | ||
| private maxSize: number; | ||
|  | ||
| constructor(maxSize: number) { | ||
| this.maxSize = maxSize; | ||
| } | ||
|         
                  conico974 marked this conversation as resolved.
              Outdated
          
            Show resolved
            Hide resolved | ||
|  | ||
| // isFetch is not used here, only used for typing | ||
| get<T extends boolean = false>(key: string, isFetch?: T) { | ||
| return this.cache.get(key) as { | ||
|         
                  conico974 marked this conversation as resolved.
              Outdated
          
            Show resolved
            Hide resolved | ||
| value: CacheValue<T>; | ||
| lastModified: number; | ||
| }; | ||
| } | ||
|  | ||
| set(key: string, value: any) { | ||
| if (this.cache.size >= this.maxSize) { | ||
| const firstKey = this.cache.keys().next().value; | ||
| if (firstKey) { | ||
| this.cache.delete(firstKey); | ||
| } | ||
| } | ||
| this.cache.set(key, value); | ||
| } | ||
|  | ||
| delete(key: string) { | ||
| this.cache.delete(key); | ||
| } | ||
| } | ||
|  | ||
| const localCache = new LRUCache(maxCacheSize); | ||
|  | ||
| const awsFetch = (body: RequestInit["body"], type: "get" | "set" = "get") => { | ||
| const { CACHE_BUCKET_REGION } = process.env; | ||
| const client = getAwsClient(); | ||
| return customFetchClient(client)( | ||
| `https://dynamodb.${CACHE_BUCKET_REGION}.amazonaws.com`, | ||
| { | ||
| method: "POST", | ||
| headers: { | ||
| "Content-Type": "application/x-amz-json-1.0", | ||
| "X-Amz-Target": `DynamoDB_20120810.${ | ||
| type === "get" ? "GetItem" : "PutItem" | ||
| }`, | ||
| }, | ||
| body, | ||
| }, | ||
| ); | ||
| }; | ||
|  | ||
| const buildDynamoKey = (key: string) => { | ||
| const { NEXT_BUILD_ID } = process.env; | ||
| return `__meta_${NEXT_BUILD_ID}_${key}`; | ||
| }; | ||
|  | ||
| /** | ||
| * This cache implementation uses a multi-tier cache with a local cache, a DynamoDB metadata cache and an S3 cache. | ||
| * It uses the same DynamoDB table as the default tag cache and the same S3 bucket as the default incremental cache. | ||
| * It will first check the local cache. | ||
| * If the local cache is expired, it will check the DynamoDB metadata cache to see if the local cache is still valid. | ||
| * Lastly it will check the S3 cache. | ||
| */ | ||
| const multiTierCache: IncrementalCache = { | ||
| name: "multi-tier-ddb-s3", | ||
| async get(key, isFetch) { | ||
| // First we check the local cache | ||
| const localCacheEntry = localCache.get(key, isFetch); | ||
| if (localCacheEntry) { | ||
| if (Date.now() - localCacheEntry.lastModified < localCacheTTL) { | ||
| debug("Using local cache without checking ddb"); | ||
| return localCacheEntry; | ||
| } | ||
| try { | ||
| // Here we'll check ddb metadata to see if the local cache is still valid | ||
| const { CACHE_DYNAMO_TABLE } = process.env; | ||
| const result = await awsFetch( | ||
| JSON.stringify({ | ||
| TableName: CACHE_DYNAMO_TABLE, | ||
| Key: { | ||
| path: { S: buildDynamoKey(key) }, | ||
| tag: { S: buildDynamoKey(key) }, | ||
| }, | ||
| }), | ||
| ); | ||
| if (result.status === 200) { | ||
| const data = await result.json(); | ||
| const hasBeenDeleted = data.Item?.deleted?.BOOL; | ||
| if (hasBeenDeleted) { | ||
| localCache.delete(key); | ||
| return { value: undefined, lastModified: 0 }; | ||
| } | ||
| // If the metadata is older than the local cache, we can use the local cache | ||
| // If it's not found we assume that no write has been done yet and we can use the local cache | ||
| const lastModified = data.Item?.revalidatedAt?.N | ||
| ? Number.parseInt(data.Item.revalidatedAt.N) | ||
|         
                  conico974 marked this conversation as resolved.
              Outdated
          
            Show resolved
            Hide resolved | ||
| : 0; | ||
| if (lastModified <= localCacheEntry.lastModified) { | ||
| debug("Using local cache after checking ddb"); | ||
| return localCacheEntry; | ||
| } | ||
| } | ||
| } catch (e) { | ||
| debug("Failed to get metadata from ddb", e); | ||
| } | ||
| } | ||
| const result = await S3Cache.get(key, isFetch); | ||
| if (result.value) { | ||
| localCache.set(key, { | ||
| value: result.value, | ||
| lastModified: result.lastModified ?? Date.now(), | ||
| }); | ||
| } | ||
| return result; | ||
| }, | ||
| async set(key, value, isFetch) { | ||
| const revalidatedAt = Date.now(); | ||
| await S3Cache.set(key, value, isFetch); | ||
| There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. use  There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is on purpose actually, given how it works we have 3 choice for handling write error failure: 
 I should have added a comment explaining this. One other thing we could do is to let the user chose the behavior they'd want. I'll update and merge the PR tomorrow in case we should chose another option There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Oh you're right 👍 | ||
| await awsFetch( | ||
| JSON.stringify({ | ||
| TableName: process.env.CACHE_DYNAMO_TABLE, | ||
| Item: { | ||
| tag: { S: buildDynamoKey(key) }, | ||
| path: { S: buildDynamoKey(key) }, | ||
| revalidatedAt: { N: String(revalidatedAt) }, | ||
| }, | ||
| }), | ||
| "set", | ||
| ); | ||
| localCache.set(key, { | ||
| value, | ||
| lastModified: revalidatedAt, | ||
| }); | ||
| }, | ||
| async delete(key) { | ||
| await S3Cache.delete(key); | ||
|         
                  conico974 marked this conversation as resolved.
              Show resolved
            Hide resolved | ||
| await awsFetch( | ||
| JSON.stringify({ | ||
| TableName: process.env.CACHE_DYNAMO_TABLE, | ||
| Item: { | ||
| tag: { S: buildDynamoKey(key) }, | ||
| path: { S: buildDynamoKey(key) }, | ||
| deleted: { BOOL: true }, | ||
| }, | ||
| }), | ||
| "set", | ||
| ); | ||
| localCache.delete(key); | ||
| }, | ||
| }; | ||
|  | ||
| export default multiTierCache; | ||
Uh oh!
There was an error while loading. Please reload this page.