Skip to content

Commit 27e763b

Browse files
authored
Merge branch 'main' into tiny-agents-web-server
2 parents 1dfaae2 + bc251da commit 27e763b

File tree

10 files changed

+353
-3
lines changed

10 files changed

+353
-3
lines changed

packages/hub/src/lib/cache-management.spec.ts

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ import {
88
type CachedFileInfo,
99
} from "./cache-management";
1010
import { stat, readdir, realpath, lstat } from "node:fs/promises";
11-
import type { Dirent, Stats } from "node:fs";
11+
import type { Stats } from "node:fs";
1212
import { join } from "node:path";
1313

1414
// Mocks
@@ -86,7 +86,9 @@ describe("scanSnapshotDir", () => {
8686
test("should scan a valid snapshot directory", async () => {
8787
const cachedFiles: CachedFileInfo[] = [];
8888
const blobStats = new Map<string, Stats>();
89-
vi.mocked(readdir).mockResolvedValueOnce([{ name: "file1", isDirectory: () => false } as Dirent]);
89+
vi.mocked(readdir).mockResolvedValueOnce([
90+
{ name: "file1", isDirectory: () => false } as unknown as Awaited<ReturnType<typeof readdir>>[0],
91+
]);
9092

9193
vi.mocked(realpath).mockResolvedValueOnce("/fake/realpath");
9294
vi.mocked(lstat).mockResolvedValueOnce({ size: 1024, atimeMs: Date.now(), mtimeMs: Date.now() } as Stats);

packages/tasks/src/local-apps.ts

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -121,6 +121,11 @@ const snippetLlamacpp = (model: ModelData, filepath?: string): LocalAppSnippet[]
121121
setup: "brew install llama.cpp",
122122
content: command("llama-cli"),
123123
},
124+
{
125+
title: "Install from WinGet (Windows)",
126+
setup: "winget install llama.cpp",
127+
content: command("llama-cli"),
128+
},
124129
{
125130
title: "Use pre-built binary",
126131
setup: [

packages/tasks/src/model-libraries.ts

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -116,6 +116,13 @@ export const MODEL_LIBRARIES_UI_ELEMENTS = {
116116
countDownloads: `path_extension:"pth"`,
117117
snippets: snippets.audioseal,
118118
},
119+
"bagel-mot": {
120+
prettyLabel: "Bagel",
121+
repoName: "Bagel",
122+
repoUrl: "https://github.com/ByteDance-Seed/Bagel/",
123+
filter: false,
124+
countDownloads: `path:"llm_config.json"`,
125+
},
119126
ben2: {
120127
prettyLabel: "BEN2",
121128
repoName: "BEN2",

packages/tasks/src/snippets/inputs.ts

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -91,6 +91,11 @@ const inputsImageToImage = () => `{
9191
"prompt": "Turn the cat into a tiger."
9292
}`;
9393

94+
const inputsImageToVideo = () => `{
95+
"image": "cat.png",
96+
"prompt": "The cat starts to dance"
97+
}`;
98+
9499
const inputsImageSegmentation = () => `"cats.jpg"`;
95100

96101
const inputsObjectDetection = () => `"cats.jpg"`;
@@ -126,6 +131,7 @@ const modelInputSnippets: {
126131
"image-classification": inputsImageClassification,
127132
"image-to-text": inputsImageToText,
128133
"image-to-image": inputsImageToImage,
134+
"image-to-video": inputsImageToVideo,
129135
"image-segmentation": inputsImageSegmentation,
130136
"object-detection": inputsObjectDetection,
131137
"question-answering": inputsQuestionAnswering,
Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,51 @@
1+
## Use Cases
2+
3+
Image-to-video models transform a static image into a video sequence. This can be used for a variety of creative and practical applications.
4+
5+
### Animated Images
6+
7+
Bring still photos to life by adding subtle motion or creating short animated clips. This is great for social media content or dynamic presentations.
8+
9+
### Storytelling from a Single Frame
10+
11+
Expand on the narrative of an image by generating a short video that imagines what happened before or after the moment captured in the photo.
12+
13+
### Video Generation with Visual Consistency
14+
15+
Use an input image as a strong visual anchor to guide the generation of a video, ensuring that the style, characters, or objects in the video remain consistent with the source image.
16+
17+
### Controllable Motion
18+
19+
Image-to-video models can be used to specify the direction or intensity of motion or camera control, giving more fine-grained control over the generated animation.
20+
21+
## Inference
22+
23+
Running the model Wan 2.1 T2V 1.3B with diffusers
24+
25+
```py
26+
import torch
27+
from diffusers import AutoencoderKLWan, WanPipeline
28+
from diffusers.utils import export_to_video
29+
30+
model_id = "Wan-AI/Wan2.1-T2V-1.3B-Diffusers"
31+
vae = AutoencoderKLWan.from_pretrained(model_id, subfolder="vae", torch_dtype=torch.float32)
32+
pipe = WanPipeline.from_pretrained(model_id, vae=vae, torch_dtype=torch.bfloat16)
33+
pipe.to("cuda")
34+
35+
prompt = "A cat walks on the grass, realistic"
36+
negative_prompt = "Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards"
37+
38+
output = pipe(
39+
prompt=prompt,
40+
negative_prompt=negative_prompt,
41+
height=480,
42+
width=832,
43+
num_frames=81,
44+
guidance_scale=5.0
45+
).frames[0]
46+
export_to_video(output, "output.mp4", fps=15)
47+
```
48+
49+
## Useful Resources
50+
51+
To train image-to-video LoRAs check out [finetrainers](https://github.com/a-r-r-o-w/finetrainers) and [musubi trainer](https://github.com/kohya-ss/musubi-tuner).
Lines changed: 126 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,126 @@
1+
import type { TaskDataCustom } from "../index.js";
2+
3+
const taskData: TaskDataCustom = {
4+
datasets: [
5+
{
6+
description: "A benchmark dataset for reference image controlled video generation.",
7+
id: "ali-vilab/VACE-Benchmark",
8+
},
9+
{
10+
description: "A dataset of video generation style preferences.",
11+
id: "Rapidata/sora-video-generation-style-likert-scoring",
12+
},
13+
{
14+
description: "A dataset with videos and captions throughout the videos.",
15+
id: "BestWishYsh/ChronoMagic",
16+
},
17+
],
18+
demo: {
19+
inputs: [
20+
{
21+
filename: "image-to-video-input.jpg",
22+
type: "img",
23+
},
24+
{
25+
label: "Optional Text Prompt",
26+
content: "This penguin is dancing",
27+
type: "text",
28+
},
29+
],
30+
outputs: [
31+
{
32+
filename: "image-to-video-output.gif",
33+
type: "img",
34+
},
35+
],
36+
},
37+
metrics: [
38+
{
39+
description:
40+
"Fréchet Video Distance (FVD) measures the perceptual similarity between the distributions of generated videos and a set of real videos, assessing overall visual quality and temporal coherence of the video generated from an input image.",
41+
id: "fvd",
42+
},
43+
{
44+
description:
45+
"CLIP Score measures the semantic similarity between a textual prompt (if provided alongside the input image) and the generated video frames. It evaluates how well the video's generated content and motion align with the textual description, conditioned on the initial image.",
46+
id: "clip_score",
47+
},
48+
{
49+
description:
50+
"First Frame Fidelity, often measured using LPIPS (Learned Perceptual Image Patch Similarity), PSNR, or SSIM, quantifies how closely the first frame of the generated video matches the input conditioning image.",
51+
id: "lpips",
52+
},
53+
{
54+
description:
55+
"Identity Preservation Score measures the consistency of identity (e.g., a person's face or a specific object's characteristics) between the input image and throughout the generated video frames, often calculated using features from specialized models like face recognition (e.g., ArcFace) or re-identification models.",
56+
id: "identity_preservation",
57+
},
58+
{
59+
description:
60+
"Motion Score evaluates the quality, realism, and temporal consistency of motion in the video generated from a static image. This can be based on optical flow analysis (e.g., smoothness, magnitude), consistency of object trajectories, or specific motion plausibility assessments.",
61+
id: "motion_score",
62+
},
63+
],
64+
models: [
65+
{
66+
description: "LTX-Video, a 13B parameter model for high quality video generation",
67+
id: "Lightricks/LTX-Video-0.9.7-dev",
68+
},
69+
{
70+
description: "A 14B parameter model for reference image controlled video generation",
71+
id: "Wan-AI/Wan2.1-VACE-14B",
72+
},
73+
{
74+
description: "An image-to-video generation model using FramePack F1 methodology with Hunyuan-DiT architecture",
75+
id: "lllyasviel/FramePack_F1_I2V_HY_20250503",
76+
},
77+
{
78+
description: "A distilled version of the LTX-Video-0.9.7-dev model for faster inference",
79+
id: "Lightricks/LTX-Video-0.9.7-distilled",
80+
},
81+
{
82+
description: "An image-to-video generation model by Skywork AI, 14B parameters, producing 720p videos.",
83+
id: "Skywork/SkyReels-V2-I2V-14B-720P",
84+
},
85+
{
86+
description: "Image-to-video variant of Tencent's HunyuanVideo.",
87+
id: "tencent/HunyuanVideo-I2V",
88+
},
89+
{
90+
description: "A 14B parameter model for 720p image-to-video generation by Wan-AI.",
91+
id: "Wan-AI/Wan2.1-I2V-14B-720P",
92+
},
93+
{
94+
description: "A Diffusers version of the Wan2.1-I2V-14B-720P model for 720p image-to-video generation.",
95+
id: "Wan-AI/Wan2.1-I2V-14B-720P-Diffusers",
96+
},
97+
],
98+
spaces: [
99+
{
100+
description: "An application to generate videos fast.",
101+
id: "Lightricks/ltx-video-distilled",
102+
},
103+
{
104+
description: "Generate videos with the FramePack-F1",
105+
id: "linoyts/FramePack-F1",
106+
},
107+
{
108+
description: "Generate videos with the FramePack",
109+
id: "lisonallen/framepack-i2v",
110+
},
111+
{
112+
description: "Wan2.1 with CausVid LoRA",
113+
id: "multimodalart/wan2-1-fast",
114+
},
115+
{
116+
description: "A demo for Stable Video Diffusion",
117+
id: "multimodalart/stable-video-diffusion",
118+
},
119+
],
120+
summary:
121+
"Image-to-video models take a still image as input and generate a video. These models can be guided by text prompts to influence the content and style of the output video.",
122+
widgetModels: [],
123+
youtubeId: undefined,
124+
};
125+
126+
export default taskData;
Lines changed: 74 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,74 @@
1+
/**
2+
* Inference code generated from the JSON schema spec in ./spec
3+
*
4+
* Using src/scripts/inference-codegen
5+
*/
6+
/**
7+
* Inputs for Image To Video inference
8+
*/
9+
export interface ImageToVideoInput {
10+
/**
11+
* The input image data as a base64-encoded string. If no `parameters` are provided, you can
12+
* also provide the image data as a raw bytes payload.
13+
*/
14+
inputs: Blob;
15+
/**
16+
* Additional inference parameters for Image To Video
17+
*/
18+
parameters?: ImageToVideoParameters;
19+
[property: string]: unknown;
20+
}
21+
/**
22+
* Additional inference parameters for Image To Video
23+
*/
24+
export interface ImageToVideoParameters {
25+
/**
26+
* For diffusion models. A higher guidance scale value encourages the model to generate
27+
* videos closely linked to the text prompt at the expense of lower image quality.
28+
*/
29+
guidance_scale?: number;
30+
/**
31+
* One prompt to guide what NOT to include in video generation.
32+
*/
33+
negative_prompt?: string;
34+
/**
35+
* The num_frames parameter determines how many video frames are generated.
36+
*/
37+
num_frames?: number;
38+
/**
39+
* The number of denoising steps. More denoising steps usually lead to a higher quality
40+
* video at the expense of slower inference.
41+
*/
42+
num_inference_steps?: number;
43+
/**
44+
* The text prompt to guide the video generation.
45+
*/
46+
prompt?: string;
47+
/**
48+
* Seed for the random number generator.
49+
*/
50+
seed?: number;
51+
/**
52+
* The size in pixel of the output video frames.
53+
*/
54+
target_size?: TargetSize;
55+
[property: string]: unknown;
56+
}
57+
/**
58+
* The size in pixel of the output video frames.
59+
*/
60+
export interface TargetSize {
61+
height: number;
62+
width: number;
63+
[property: string]: unknown;
64+
}
65+
/**
66+
* Outputs of inference for the Image To Video task
67+
*/
68+
export interface ImageToVideoOutput {
69+
/**
70+
* The generated video returned as raw bytes in the payload.
71+
*/
72+
video: unknown;
73+
[property: string]: unknown;
74+
}
Lines changed: 64 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,64 @@
1+
{
2+
"$id": "/inference/schemas/image-to-video/input.json",
3+
"$schema": "http://json-schema.org/draft-06/schema#",
4+
"description": "Inputs for Image To Video inference",
5+
"title": "ImageToVideoInput",
6+
"type": "object",
7+
"properties": {
8+
"inputs": {
9+
"type": "string",
10+
"description": "The input image data as a base64-encoded string. If no `parameters` are provided, you can also provide the image data as a raw bytes payload.",
11+
"comment": "type=binary"
12+
},
13+
"parameters": {
14+
"description": "Additional inference parameters for Image To Video",
15+
"$ref": "#/$defs/ImageToVideoParameters"
16+
}
17+
},
18+
"$defs": {
19+
"ImageToVideoParameters": {
20+
"title": "ImageToVideoParameters",
21+
"type": "object",
22+
"properties": {
23+
"prompt": {
24+
"type": "string",
25+
"description": "The text prompt to guide the video generation."
26+
},
27+
"guidance_scale": {
28+
"type": "number",
29+
"description": "For diffusion models. A higher guidance scale value encourages the model to generate videos closely linked to the text prompt at the expense of lower image quality."
30+
},
31+
"negative_prompt": {
32+
"type": "string",
33+
"description": "One prompt to guide what NOT to include in video generation."
34+
},
35+
"num_inference_steps": {
36+
"type": "integer",
37+
"description": "The number of denoising steps. More denoising steps usually lead to a higher quality video at the expense of slower inference."
38+
},
39+
"num_frames": {
40+
"type": "number",
41+
"description": "The num_frames parameter determines how many video frames are generated."
42+
},
43+
"target_size": {
44+
"type": "object",
45+
"description": "The size in pixel of the output video frames.",
46+
"properties": {
47+
"width": {
48+
"type": "integer"
49+
},
50+
"height": {
51+
"type": "integer"
52+
}
53+
},
54+
"required": ["width", "height"]
55+
},
56+
"seed": {
57+
"type": "integer",
58+
"description": "Seed for the random number generator."
59+
}
60+
}
61+
}
62+
},
63+
"required": ["inputs"]
64+
}
Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
{
2+
"$id": "/inference/schemas/image-to-video/output.json",
3+
"$schema": "http://json-schema.org/draft-06/schema#",
4+
"description": "Outputs of inference for the Image To Video task",
5+
"title": "ImageToVideoOutput",
6+
"type": "object",
7+
"properties": {
8+
"video": {
9+
"description": "The generated video returned as raw bytes in the payload."
10+
}
11+
},
12+
"required": ["video"]
13+
}

0 commit comments

Comments
 (0)