diff --git a/src/content/docs/images/examples/index.mdx b/src/content/docs/images/examples/index.mdx new file mode 100644 index 000000000000000..59fd8bc95ffd0e9 --- /dev/null +++ b/src/content/docs/images/examples/index.mdx @@ -0,0 +1,12 @@ +--- +type: overview +hideChildren: true +pcx_content_type: navigation +title: Examples +sidebar: + order: 11 +--- + +import { ListExamples } from "~/components"; + + \ No newline at end of file diff --git a/src/content/docs/images/examples/transcode-from-workers-ai.mdx b/src/content/docs/images/examples/transcode-from-workers-ai.mdx new file mode 100644 index 000000000000000..674623fc8d82fd7 --- /dev/null +++ b/src/content/docs/images/examples/transcode-from-workers-ai.mdx @@ -0,0 +1,29 @@ +--- +type: example +summary: Transcode an image from Workers AI before uploading to R2 +pcx_content_type: example +title: Transcode images +sidebar: + order: 5 +description: Transcode an image from Workers AI before uploading to R2 +--- + +```js +const stream = await env.AI.run( + "@cf/bytedance/stable-diffusion-xl-lightning", + { + prompt: YOUR_PROMPT_HERE + } +); + +// Convert to AVIF +const image = ( + await env.IMAGES.input(stream) + .output({format: "image/avif"}) +).response(); + +const fileName = "image.avif"; + +// Upload to R2 +await env.R2.put(fileName, image.body); +``` \ No newline at end of file diff --git a/src/content/docs/images/examples/watermark-from-kv.mdx b/src/content/docs/images/examples/watermark-from-kv.mdx new file mode 100644 index 000000000000000..dfe8792adc03050 --- /dev/null +++ b/src/content/docs/images/examples/watermark-from-kv.mdx @@ -0,0 +1,48 @@ +--- +type: example +summary: Draw a watermark from KV on an image from R2 +pcx_content_type: example +title: Watermarks +sidebar: + order: 5 +description: Draw a watermark from KV on an image from R2 +--- + +```ts +interface Env { + BUCKET: R2Bucket, + NAMESPACE: KVNamespace, + IMAGES: ImagesBinding, +} +export default { + async fetch(request, env, ctx): Promise { + const watermarkKey = "my-watermark"; + const sourceKey = "my-source-image"; + + const cache = await caches.open("transformed-images"); + const cacheKey = new URL(sourceKey + "/" + watermarkKey, request.url); + const cacheResponse = await cache.match(cacheKey); + + if (cacheResponse) { + return cacheResponse; + } + + let watermark = await env.NAMESPACE.get(watermarkKey, "stream"); + let source = await env.BUCKET.get(sourceKey); + + if (!watermark || !source) { + return new Response("Not found", { status: 404 }); + } + + const result = await env.IMAGES.input(source.body) + .draw(watermark) + .output({ format: "image/jpeg" }); + + const response = result.response(); + + ctx.waitUntil(cache.put(cacheKey, response.clone())); + + return result.response(); + }, +} satisfies ExportedHandler; +``` \ No newline at end of file diff --git a/src/content/docs/images/tutorials/index.mdx b/src/content/docs/images/tutorials/index.mdx new file mode 100644 index 000000000000000..5a3a0dca95f7264 --- /dev/null +++ b/src/content/docs/images/tutorials/index.mdx @@ -0,0 +1,12 @@ +--- +title: Tutorials +pcx_content_type: navigation +sidebar: + order: 10 + group: + hideIndex: true +--- + +import { DirectoryListing } from "~/components"; + + \ No newline at end of file diff --git a/src/content/docs/images/tutorials/optimize-user-uploaded-image.mdx b/src/content/docs/images/tutorials/optimize-user-uploaded-image.mdx new file mode 100644 index 000000000000000..708b108fdc89cf8 --- /dev/null +++ b/src/content/docs/images/tutorials/optimize-user-uploaded-image.mdx @@ -0,0 +1,323 @@ +--- +pcx_content_type: tutorial +title: Transform user-uploaded images before uploading to R2 +description: Set up bindings to connect Images, R2, and Assets to your Worker +sidebar: + order: 1 +--- + +import { WranglerConfig } from "~/components" + + +In this guide, you will build an app that accepts image uploads, overlays the image with a visual watermark, then stores the transformed image in your R2 bucket. + +--- + +With Images, you have the flexibility to choose where your original images are stored. You can transform images that are stored outside of the Images product, like in [R2](/r2/). + +When you store user-uploaded media in R2, you may want to optimize or manipulate images before they are uploaded to your R2 bucket. + +You will learn how to connect Developer Platform services to your Worker through bindings, as well as use various optimization features in the Images API. + +## Prerequisites + +Before you begin, you will need to do the following: + +* Add an [Images Paid](/images/pricing/#images-paid) subscription to your account. This allows you to bind the Images API to your Worker. +* Create an [R2 bucket](/r2/get-started/#2-create-a-bucket), where the transformed images will be uploaded. +* Create a new Worker project. + +If you are new, review how to [create your first Worker](/workers/get-started/guide/). + + +## 1: Set up your Worker project + +To start, you will need to set up your project to use the following resources on the Developer Platform: + +* [Images](/images/transform-images/bindings/) to transform, resize, and encode images directly from your Worker. +* [R2](/r2/api/workers/workers-api-usage/) to connect the bucket for storing transformed images. +* [Assets](/workers/static-assets/binding/) to access a static image that will be used as the visual watermark. + + +### Add the bindings to your Wrangler configuration + +Configure your `wrangler.toml` file to add the Images, R2, and Assets bindings: + + +```toml +[images] +binding = "IMAGES" + +[[r2_buckets]] +binding = "R2" +bucket_name = "" + +[assets] +directory = "./" +binding = "ASSETS" +``` + + +Replace `` with the name of the R2 bucket where you will upload the images after they are transformed. In your Worker code, you will be able to refer to this bucket using `env.R2.` + +Replace `./` with the name of the project's directory where the overlay image will be stored. In your Worker code, you will be able to refer to these assets using `env.ASSETS`. + +### Set up your assets directory + +Because we want to apply a visual watermark to every uploaded image, you need a place to store the overlay image. + +The assets directory of your project lets you upload static assets as part of your Worker. When you deploy your project, these uploaded files, along with your Worker code, are deployed to Cloudflare's infrastructure in a single operation. + +After you configure your Wrangler file, upload the overlay image to the specified directory. In our example app, the directory `./assets` contains the overlay image. + + +## 2: Build your frontend + +You will need to build the interface for the app that lets users upload images. + +In this example, the frontend is rendered directly from the Worker script. + +To do this, make a new `html` variable, which contains a `form` element for accepting uploads. In `fetch`, construct a new `Response` with a `Content-Type: text/html` header to serve your static HTML site to the client: + +```js +const html = ` + + + + + Upload Image + + +

Upload an image

+
+ + +
+ + +`; + +export default { + async fetch(request, env) { + if (request.method === "GET") { + return new Response(html, {headers:{'Content-Type':'text/html'},}) + } + if (request.method ==="POST") { + // This is called when the user submits the form + } + } +}; +``` + +## 3: Read the uploaded image + +After you have a `form`, you need to make sure you can transform the uploaded images. + +Because the `form` lets users upload directly from their disk, you cannot use `fetch()` to get an image from a URL. Instead, you will operate on the body of the image as a stream of bytes. + +To do this, parse the data from the `form` as an array buffer: + +```js +export default { + async fetch(request, env) { + if (request.method === "GET") { + return new Response(html, {headers:{'Content-Type':'text/html'},}) + } + if (request.method === "POST") { + try { + // Parse form data + const formData = await request.formData(); + const file = formData.get("image"); + if (!file || typeof file.arrayBuffer !== "function") { + return new Response("No image file provided", { status: 400 }); + } + + // Read uploaded image as array buffer + const fileBuffer = await file.arrayBuffer(); + } catch (err) { + console.log(err.message) + } + } + } +}; +``` + +## 4: Transform the image + +For every uploaded image, you want to perform the following actions: + +- Overlay the visual watermark that we added to our assets directory. +- Transcode the image — with its watermark — to `AVIF`. This compresses the image and reduces its file size. +- Upload the transformed image to R2. + +### Set up the overlay image + +To fetch the overlay image from the assets directory, create a function `assetUrl` then use `env.ASSETS` to retrieve the `watermark.png` image: + +```js +var __defProp = Object.defineProperty; +var __name = (target, value) => __defProp(target, "name", { value, configurable: true }); + +function assetUrl(request, path) { + const url = new URL(request.url); + url.pathname = path; + return url; +} +__name(assetUrl, "assetUrl"); + +export default { + async fetch(request, env) { + if (request.method === "GET") { + return new Response(html, {headers:{'Content-Type':'text/html'},}) + } + if (request.method === "POST") { + try { + // Parse form data + const formData = await request.formData(); + const file = formData.get("image"); + if (!file || typeof file.arrayBuffer !== "function") { + return new Response("No image file provided", { status: 400 }); + } + + // Read uploaded image as array buffer + const fileBuffer = await file.arrayBuffer(); + + // Fetch image as watermark + let watermarkStream = (await env.ASSETS.fetch(assetUrl(request, "watermark.png"))).body; + } catch (err) { + console.log(err.message) + } + } + } +}; +``` + +### Watermark and transcode the image + +You can interact with the Images binding through `env.IMAGES`. + +This is where you will put all of the optimization operations you want to perform on the image. Here, you will use the `.draw()` function to apply a visual watermark over the uploaded image, then use `.output()` to encode the image as AVIF: + +```js +var __defProp = Object.defineProperty; +var __name = (target, value) => __defProp(target, "name", { value, configurable: true }); + +function assetUrl(request, path) { + const url = new URL(request.url); + url.pathname = path; + return url; +} +__name(assetUrl, "assetUrl"); + +export default { + async fetch(request, env) { + if (request.method === "GET") { + return new Response(html, {headers:{'Content-Type':'text/html'},}) + } + if (request.method === "POST") { + try { + // Parse form data + const formData = await request.formData(); + const file = formData.get("image"); + if (!file || typeof file.arrayBuffer !== "function") { + return new Response("No image file provided", { status: 400 }); + } + + // Read uploaded image as array buffer + const fileBuffer = await file.arrayBuffer(); + + // Fetch image as watermark + let watermarkStream = (await env.ASSETS.fetch(assetUrl(request, "watermark.png"))).body; + + // Apply watermark and convert to AVIF + const imageResponse = ( + await env.IMAGES.input(fileBuffer) + // Draw the watermark on top of the image + .draw( + env.IMAGES.input(watermarkStream) + .transform({ width: 100, height: 100 }), + { bottom: 10, right: 10, opacity: 0.75 } + ) + // Output the final image as AVIF + .output({ format: "image/avif" }) + ).response(); + } catch (err) { + console.log(err.message) + } + } + } +}; +``` + +## 5: Upload to R2 + +Upload the transformed image to R2. + +By creating a `fileName` variable, you can specify the name of the transformed image. In this example, you append the date to the name of the original image before uploading to R2. + +Here is the full code for the example: + +```js +var __defProp = Object.defineProperty; +var __name = (target, value) => __defProp(target, "name", { value, configurable: true }); + +function assetUrl(request, path) { + const url = new URL(request.url); + url.pathname = path; + return url; +} +__name(assetUrl, "assetUrl"); + +export default { + async fetch(request, env) { + if (request.method === "GET") { + return new Response(html, {headers:{'Content-Type':'text/html'},}) + } + if (request.method === "POST") { + try { + // Parse form data + const formData = await request.formData(); + const file = formData.get("image"); + if (!file || typeof file.arrayBuffer !== "function") { + return new Response("No image file provided", { status: 400 }); + } + + // Read uploaded image as array buffer + const fileBuffer = await file.arrayBuffer(); + + // Fetch image as watermark + let watermarkStream = (await env.ASSETS.fetch(assetUrl(request, "watermark.png"))).body; + + // Apply watermark and convert to AVIF + const imageResponse = ( + await env.IMAGES.input(fileBuffer) + // Draw the watermark on top of the image + .draw( + env.IMAGES.input(watermarkStream) + .transform({ width: 100, height: 100 }), + { bottom: 10, right: 10, opacity: 0.75 } + ) + // Output the final image as AVIF + .output({ format: "image/avif" }) + ).response(); + + // Add timestamp to file name + const fileName = `image-${Date.now()}.avif`; + + // Upload to R2 + await env.R2.put(fileName, imageResponse.body) + + return new Response(`Image uploaded successfully as ${fileName}`, { status: 200 }); + } catch (err) { + console.log(err.message) + } + } + } +}; +``` + +## Next steps + +In this tutorial, you learned how to connect your Worker to various resources on the Developer Platform to build an app that accepts image uploads, transform images, and uploads the output to R2. + +Next, you can [set up a transformation URL](/images/transform-images/transform-via-url/) to dynamically optimize images that are stored in R2.