diff --git a/.changeset/friendly-walls-search.md b/.changeset/friendly-walls-search.md new file mode 100644 index 0000000000..6607878960 --- /dev/null +++ b/.changeset/friendly-walls-search.md @@ -0,0 +1,5 @@ +--- +"@trigger.dev/build": patch +--- + +Make sure BuildManifest is exported from @trigger.dev/build diff --git a/.changeset/strange-cobras-bake.md b/.changeset/strange-cobras-bake.md new file mode 100644 index 0000000000..db2bc4dbae --- /dev/null +++ b/.changeset/strange-cobras-bake.md @@ -0,0 +1,5 @@ +--- +"trigger.dev": patch +--- + +Always insert the dirs option when initializing a new project in the trigger.config.ts diff --git a/docs/examples/generate-image-with-dall-e3.mdx b/docs/examples/dall-e3-generate-image.mdx similarity index 100% rename from docs/examples/generate-image-with-dall-e3.mdx rename to docs/examples/dall-e3-generate-image.mdx diff --git a/docs/examples/ffmpeg-video-processing.mdx b/docs/examples/ffmpeg-video-processing.mdx new file mode 100644 index 0000000000..ae45cd969e --- /dev/null +++ b/docs/examples/ffmpeg-video-processing.mdx @@ -0,0 +1,330 @@ +--- +title: "Video processing with FFmpeg" +sidebarTitle: "FFmpeg video processing" +description: "These examples show you how to process videos in various ways using FFmpeg with Trigger.dev." +--- + +## Adding the FFmpeg build extension + +To use these example tasks, you'll first need to add our FFmpeg extension to your project configuration like this: + +```ts trigger.config.ts +import { ffmpeg } from "@trigger.dev/build/extensions/core"; +import { defineConfig } from "@trigger.dev/sdk/v3"; + +export default defineConfig({ + project: "", + // Your other config settings... + build: { + extensions: [ffmpeg()], + }, +}); +``` + + + [Build extensions](../guides/build-extensions) allow you to hook into the build system and + customize the build process or the resulting bundle and container image (in the case of + deploying). You can use pre-built extensions or create your own. + + +## Compress a video using FFmpeg + +This task demonstrates how to use FFmpeg to compress a video, reducing its file size while maintaining reasonable quality, and upload the compressed video to R2 storage. + +### Key Features: + +- Fetches a video from a given URL +- Compresses the video using FFmpeg with various compression settings +- Uploads the compressed video to R2 storage +- Handles temporary file management by creating and cleaning up the output file +- Returns the compressed video file path, the compressed file size, and the R2 URL of the uploaded video + +### Task code + +```ts trigger/ffmpeg-compress-video.ts +import { PutObjectCommand, S3Client } from "@aws-sdk/client-s3"; +import { logger, task } from "@trigger.dev/sdk/v3"; +import ffmpeg from "fluent-ffmpeg"; +import fs from "fs/promises"; +import fetch from "node-fetch"; +import { Readable } from "node:stream"; +import os from "os"; +import path from "path"; + +// Initialize S3 client for R2 storage +const s3Client = new S3Client({ + region: "auto", + endpoint: process.env.S3_ENDPOINT, + credentials: { + accessKeyId: process.env.R2_ACCESS_KEY_ID ?? "", + secretAccessKey: process.env.R2_SECRET_ACCESS_KEY ?? "", + }, +}); + +export const ffmpegCompressVideo = task({ + id: "ffmpeg-compress-video", + run: async (payload: { videoUrl: string }) => { + const { videoUrl } = payload; + + // Generate output file name with a timestamp + const tempDirectory = os.tmpdir(); + const outputPath = path.join(tempDirectory, `output_${Date.now()}.mp4`); + + // Fetch the video from the provided URL + const response = await fetch(videoUrl); + + // Compress the video using FFmpeg + await new Promise((resolve, reject) => { + if (!response.body) { + return reject(new Error("Failed to fetch video")); + } + + ffmpeg(Readable.from(response.body)) + .outputOptions([ + "-c:v libx264", // Use H.264 codec + "-crf 28", // Higher CRF for more compression (28 is near the upper limit for acceptable quality) + "-preset veryslow", // Slowest preset for best compression + "-vf scale=iw/2:ih/2", // Reduce resolution to 50% of original width and height + "-c:a aac", // Use AAC for audio + "-b:a 64k", // Reduce audio bitrate to 64k + "-ac 1", // Convert to mono audio + ]) + .output(outputPath) + .on("end", resolve) + .on("error", reject) + .run(); + }); + + // Read the compressed video into a buffer + const compressedVideo = await fs.readFile(outputPath); + + // Get the compressed video size + const compressedSize = compressedVideo.length; + + // Log compression results for debugging purposes + logger.log(`Compressed video size: ${compressedSize} bytes`); + logger.log(`Compressed video saved at: ${outputPath}`); + + // Generate the S3 key for the uploaded video file + const s3Key = `processed-videos/${path.basename(outputPath)}`; + + // Set up the parameters for uploading the video to R2 + const uploadParams = { + Bucket: process.env.S3_BUCKET, + Key: s3Key, + Body: compressedVideo, + }; + + // Upload the video to R2 and get the public URL + await s3Client.send(new PutObjectCommand(uploadParams)); + const s3Url = `https://${process.env.S3_BUCKET}.s3.amazonaws.com/${s3Key}`; + logger.log("Compressed video uploaded to R2", { url: s3Url }); + + // Delete the temporary compressed video file + await fs.unlink(outputPath); + + // Return the compressed video file path, compressed size, and R2 URL + return { + compressedVideoPath: outputPath, + compressedSize, + s3Url, + }; + }, +}); +``` + +## Extract audio from a video using FFmpeg + +This task demonstrates how to use FFmpeg to extract audio from a video, convert it to WAV format, and upload it to R2 storage. + +### Key Features: + +- Fetches a video from a given URL +- Extracts the audio from the video using FFmpeg +- Converts the extracted audio to WAV format +- Uploads the extracted audio to R2 storage + +### Task code + + + When testing, make sure to provide a video URL that contains audio. If the video does not have + audio, the task will fail. + + +```ts trigger/ffmpeg-extract-audio.ts +import { PutObjectCommand, S3Client } from "@aws-sdk/client-s3"; +import { logger, task } from "@trigger.dev/sdk/v3"; +import ffmpeg from "fluent-ffmpeg"; +import fs from "fs/promises"; +import fetch from "node-fetch"; +import { Readable } from "node:stream"; +import os from "os"; +import path from "path"; + +// Initialize S3 client for R2 storage +const s3Client = new S3Client({ + region: "auto", + endpoint: process.env.S3_ENDPOINT, + credentials: { + accessKeyId: process.env.R2_ACCESS_KEY_ID ?? "", + secretAccessKey: process.env.R2_SECRET_ACCESS_KEY ?? "", + }, +}); + +export const ffmpegExtractAudio = task({ + id: "ffmpeg-extract-audio", + run: async (payload: { videoUrl: string }) => { + const { videoUrl } = payload; + + // Generate output file name with a timestamp + const tempDirectory = os.tmpdir(); + const outputPath = path.join(tempDirectory, `output_${Date.now()}.wav`); + + // Fetch the video from the provided URL + const response = await fetch(videoUrl); + + // Convert the video to WAV format using FFmpeg + await new Promise((resolve, reject) => { + if (!response.body) { + return reject(new Error("Failed to fetch video")); + } + ffmpeg(Readable.from(response.body)) + .toFormat("wav") + .save(outputPath) + .on("end", () => { + logger.log(`WAV file saved to ${outputPath}`); + resolve(outputPath); + }) + .on("error", (err) => { + reject(err); + }); + }); + + // Read the WAV file into a buffer + const wavBuffer = await fs.readFile(outputPath); + + // Log the output file path for debugging purposes + logger.log(`Converted video saved at: ${outputPath}`); + + // Generate the S3 key for the uploaded audio file + const s3Key = `processed-audio/${path.basename(outputPath)}`; + + // Set up the parameters for uploading the audio to R2 + const uploadParams = { + Bucket: process.env.S3_BUCKET, + Key: s3Key, + Body: wavBuffer, + }; + + // Upload the audio to R2 and get the public URL + await s3Client.send(new PutObjectCommand(uploadParams)); + const s3Url = `https://${process.env.S3_BUCKET}.s3.amazonaws.com/${s3Key}`; + logger.log("Extracted audio uploaded to R2", { url: s3Url }); + + // Delete the temporary output file + await fs.unlink(outputPath); + + // Return the WAV buffer, file path, and R2 URL + return { + wavBuffer, + wavFilePath: outputPath, + s3Url, + }; + }, +}); +``` + +## Generate a thumbnail from a video using FFmpeg + +This task demonstrates how to use FFmpeg to generate a thumbnail from a video at a specific time and upload the generated thumbnail to R2 storage. + +### Key Features: + +- Fetches a video from a given URL +- Generates a thumbnail from the video at the 5-second mark +- Uploads the generated thumbnail to R2 storage + +### Task code + +```ts trigger/ffmpeg-generate-thumbnail.ts +import { PutObjectCommand, S3Client } from "@aws-sdk/client-s3"; +import { logger, task } from "@trigger.dev/sdk/v3"; +import ffmpeg from "fluent-ffmpeg"; +import fs from "fs/promises"; +import fetch from "node-fetch"; +import { Readable } from "node:stream"; +import os from "os"; +import path from "path"; + +// Initialize S3 client for R2 storage +const s3Client = new S3Client({ + region: "auto", + endpoint: process.env.S3_ENDPOINT, + credentials: { + accessKeyId: process.env.R2_ACCESS_KEY_ID ?? "", + secretAccessKey: process.env.R2_SECRET_ACCESS_KEY ?? "", + }, +}); + +export const ffmpegGenerateThumbnail = task({ + id: "ffmpeg-generate-thumbnail", + run: async (payload: { videoUrl: string }) => { + const { videoUrl } = payload; + + // Generate output file name with a timestamp + const tempDirectory = os.tmpdir(); + const outputPath = path.join(tempDirectory, `thumbnail_${Date.now()}.jpg`); + + // Fetch the video from the provided URL + const response = await fetch(videoUrl); + + // Generate the thumbnail using FFmpeg + await new Promise((resolve, reject) => { + if (!response.body) { + return reject(new Error("Failed to fetch video")); + } + ffmpeg(Readable.from(response.body)) + .screenshots({ + count: 1, + folder: "/tmp", + filename: path.basename(outputPath), + size: "320x240", + timemarks: ["5"], // 5 seconds + }) + .on("end", resolve) + .on("error", reject); + }); + + // Read the generated thumbnail into a buffer + const thumbnail = await fs.readFile(outputPath); + + // Generate the S3 key for the uploaded thumbnail file + const s3Key = `thumbnails/${path.basename(outputPath)}`; + + // Set up the parameters for uploading the thumbnail to R2 + const uploadParams = { + Bucket: process.env.S3_BUCKET, + Key: s3Key, + Body: thumbnail, + }; + + // Upload the thumbnail to R2 and get the public URL + await s3Client.send(new PutObjectCommand(uploadParams)); + const s3Url = `https://${process.env.S3_BUCKET}.s3.amazonaws.com/${s3Key}`; + logger.log("Thumbnail uploaded to R2", { url: s3Url }); + + // Delete the temporary thumbnail file + await fs.unlink(outputPath); + + // Log thumbnail generation results for debugging purposes + logger.log(`Thumbnail uploaded to S3: ${s3Url}`); + + // Return the thumbnail buffer, file path, and R2 URL + return { + thumbnailBuffer: thumbnail, + thumbnailPath: outputPath, + s3Url, + }; + }, +}); +``` diff --git a/docs/examples/sharp-image-processing.mdx b/docs/examples/sharp-image-processing.mdx new file mode 100644 index 0000000000..30a45f0a31 --- /dev/null +++ b/docs/examples/sharp-image-processing.mdx @@ -0,0 +1,121 @@ +--- +title: "Process images using Sharp" +sidebarTitle: "Sharp image processing" +description: "This example demonstrates how to process images using the Sharp library with Trigger.dev." +--- + +## Overview + +This task optimizes and watermarks an image using the Sharp library, and then uploads the processed image to R2 storage. + +## Adding build configurations + +To use this example, you'll first need to add these build settings to your `trigger.config.ts` file: + +```ts trigger.config.ts +import { defineConfig } from "@trigger.dev/sdk/v3"; + +export default defineConfig({ + project: "", + // Your other config settings... + build: { + // This is required to use the Sharp library + external: ["sharp"], + }, +}); +``` + + + Any packages that install or build a native binary should be added to external, as native binaries + cannot be bundled. + + +## Key features + + - Resizes and rotates an image + - Adds a watermark to the image + - Uploads the processed image to R2 storage + +## Task code + +```ts trigger/sharp-image-processing.ts +import { PutObjectCommand, S3Client } from "@aws-sdk/client-s3"; +import { logger, task } from "@trigger.dev/sdk/v3"; +import fs from "fs/promises"; +import fetch from "node-fetch"; +import os from "os"; +import path from "path"; +import sharp from "sharp"; + +// Initialize S3 client +const s3Client = new S3Client({ + region: "auto", + endpoint: process.env.S3_ENDPOINT, + credentials: { + accessKeyId: process.env.R2_ACCESS_KEY_ID ?? "", + secretAccessKey: process.env.R2_SECRET_ACCESS_KEY ?? "", + }, +}); + +export const sharpProcessImage = task({ + id: "sharp-process-image", + run: async (payload: { imageUrl: string; watermarkUrl: string }) => { + const { imageUrl, watermarkUrl } = payload; + + // Generate temporary and output file names + const tempDirectory = os.tmpdir(); + const outputPath = path.join(tempDirectory, `output_${Date.now()}.jpg`); + + // Fetch the image and watermark + const [imageResponse, watermarkResponse] = await Promise.all([ + fetch(imageUrl), + fetch(watermarkUrl), + ]); + const imageBuffer = await imageResponse.arrayBuffer(); + const watermarkBuffer = await watermarkResponse.arrayBuffer(); + + // Optimize the image using Sharp + await sharp(Buffer.from(imageBuffer)) + .rotate(90) // Rotate the image by 90 degrees + .resize(800, 600) // Resize the image to 800x600 + .composite([ + { + input: Buffer.from(watermarkBuffer), + gravity: "southeast", // Position the watermark in the bottom-right corner + }, + ]) + .toFormat("jpeg") + .toFile(outputPath); + + // Log the output file path + logger.log(`Optimized image saved at: ${outputPath}`); + + // Read the optimized image file + const optimizedImageBuffer = await fs.readFile(outputPath); + + // Upload the optimized image to S3, replacing slashes with underscores + const s3Key = `processed-images/${path.basename(outputPath)}`; + + const uploadParams = { + Bucket: process.env.S3_BUCKET, + Key: s3Key, + Body: optimizedImageBuffer, + }; + + // Upload the image to R2 and get the URL + await s3Client.send(new PutObjectCommand(uploadParams)); + const s3Url = `https://${process.env.S3_BUCKET}.s3.amazonaws.com/${s3Key}`; + logger.log("Optimized image uploaded to R2", { url: s3Url }); + + // Delete the temporary file + await fs.unlink(outputPath); + + // Return the optimized image buffer and file path + return { + optimizedImageBuffer, + optimizedImagePath: outputPath, + s3Url, + }; + }, +}); +``` diff --git a/docs/examples/vercel-ai-sdk.mdx b/docs/examples/vercel-ai-sdk.mdx new file mode 100644 index 0000000000..a1d0de1609 --- /dev/null +++ b/docs/examples/vercel-ai-sdk.mdx @@ -0,0 +1,44 @@ +--- +title: "Using the Vercel AI SDK" +sidebarTitle: "Vercel AI SDK" +description: "This example demonstrates how to use the Vercel AI SDK with Trigger.dev." +--- + +## Overview + +The [Vercel AI SDK](https://www.npmjs.com/package/ai) is a simple way to use AI models from many different providers, including OpenAI, Microsoft Azure, Google Generative AI, Anthropic, Amazon Bedrock, Groq, Perplexity and [more](https://sdk.vercel.ai/providers/ai-sdk-providers). + +It provides a consistent interface to interact with the different AI models, so you can easily switch between them without needing to change your code. + +## Generate text using OpenAI + +This task uses the Vercel AI SDK to use OpenAI to generate text from a prompt. + +### Task code + +```ts trigger/vercel-ai-sdk-openai.ts +import { logger, task } from "@trigger.dev/sdk/v3"; +import { generateText } from "ai"; +// Install the package of the AI model you want to use, in this case OpenAI +import { openai } from "@ai-sdk/openai"; // Ensure OPENAI_API_KEY environment variable is set + +// Task to process each file +export const openaiTask = task({ + id: "vercel-sdk-openai-task", + + run: async (payload: { prompt: string }) => { + const chatCompletion = await generateText({ + model: openai("gpt-4-turbo"), + // Add a system message which will be included with the prompt + system: "You are a friendly assistant!", + // The prompt passed in from the payload + prompt: payload.prompt, + }); + + // Log the generated text + logger.log("chatCompletion text:" + chatCompletion.text); + + return chatCompletion; + }, +}); +``` diff --git a/docs/mint.json b/docs/mint.json index 254c934c77..06ff7d7337 100644 --- a/docs/mint.json +++ b/docs/mint.json @@ -1,10 +1,7 @@ { "$schema": "https://mintlify.com/schema.json", "name": "Trigger.dev", - "openapi": [ - "/openapi.yml", - "/v3-openapi.yaml" - ], + "openapi": ["/openapi.yml", "/v3-openapi.yaml"], "api": { "playground": { "mode": "simple" @@ -103,23 +100,14 @@ "navigation": [ { "group": "Getting Started", - "pages": [ - "introduction", - "quick-start", - "how-it-works", - "upgrading-beta", - "limits" - ] + "pages": ["introduction", "quick-start", "how-it-works", "upgrading-beta", "limits"] }, { "group": "Fundamentals", "pages": [ { "group": "Tasks", - "pages": [ - "tasks/overview", - "tasks/scheduled" - ] + "pages": ["tasks/overview", "tasks/scheduled"] }, "triggering", "apikeys", @@ -128,10 +116,7 @@ }, { "group": "Development", - "pages": [ - "cli-dev", - "run-tests" - ] + "pages": ["cli-dev", "run-tests"] }, { "group": "Deployment", @@ -141,9 +126,7 @@ "github-actions", { "group": "Deployment integrations", - "pages": [ - "vercel-integration" - ] + "pages": ["vercel-integration"] } ] }, @@ -155,13 +138,7 @@ "errors-retrying", { "group": "Wait", - "pages": [ - "wait", - "wait-for", - "wait-until", - "wait-for-event", - "wait-for-request" - ] + "pages": ["wait", "wait-for", "wait-until", "wait-for-event", "wait-for-request"] }, "queue-concurrency", "versioning", @@ -179,10 +156,7 @@ "management/overview", { "group": "Tasks API", - "pages": [ - "management/tasks/trigger", - "management/tasks/batch-trigger" - ] + "pages": ["management/tasks/trigger", "management/tasks/batch-trigger"] }, { "group": "Runs API", @@ -220,9 +194,7 @@ }, { "group": "Projects API", - "pages": [ - "management/projects/runs" - ] + "pages": ["management/projects/runs"] } ] }, @@ -268,11 +240,7 @@ }, { "group": "Help", - "pages": [ - "community", - "help-slack", - "help-email" - ] + "pages": ["community", "help-slack", "help-email"] }, { "group": "Frameworks", @@ -294,23 +262,22 @@ }, { "group": "Dashboard", - "pages": [ - "guides/dashboard/creating-a-project" - ] + "pages": ["guides/dashboard/creating-a-project"] }, { "group": "Migrations", - "pages": [ - "guides/use-cases/upgrading-from-v2" - ] + "pages": ["guides/use-cases/upgrading-from-v2"] }, { "group": "Examples", "pages": [ - "examples/generate-image-with-dall-e3", + "examples/dall-e3-generate-image", + "examples/ffmpeg-video-processing", "examples/open-ai-with-retrying", + "examples/sharp-image-processing", "examples/react-pdf", - "examples/resend-email-sequence" + "examples/resend-email-sequence", + "examples/vercel-ai-sdk" ] } ], @@ -319,4 +286,4 @@ "github": "https://github.com/triggerdotdev", "linkedin": "https://www.linkedin.com/company/triggerdotdev" } -} \ No newline at end of file +}