Skip to content

Commit 93093ed

Browse files
authored
Merge branch 'main' into cerebras-provider
2 parents f6491bf + 2027973 commit 93093ed

File tree

17 files changed

+287
-45
lines changed

17 files changed

+287
-45
lines changed

packages/ollama-utils/package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
{
22
"name": "@huggingface/ollama-utils",
33
"packageManager": "[email protected]",
4-
"version": "0.0.2",
4+
"version": "0.0.3",
55
"description": "Various utilities for maintaining Ollama compatibility with models on Hugging Face hub",
66
"repository": "https://github.com/huggingface/huggingface.js.git",
77
"publishConfig": {

packages/ollama-utils/src/chat-template-automap.ts

Lines changed: 71 additions & 6 deletions
Large diffs are not rendered by default.

packages/tasks-gen/scripts/generate-snippets-fixtures.ts

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -90,6 +90,17 @@ const TEST_CASES: {
9090
providers: ["hf-inference", "fal-ai"],
9191
languages: ["sh", "js", "py"],
9292
},
93+
{
94+
testName: "text-to-video",
95+
model: {
96+
id: "tencent/HunyuanVideo",
97+
pipeline_tag: "text-to-video",
98+
tags: [],
99+
inference: "",
100+
},
101+
providers: ["replicate", "fal-ai"],
102+
languages: ["js", "py"],
103+
},
93104
{
94105
testName: "text-classification",
95106
model: {
Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
import { HfInference } from "@huggingface/inference";
2+
3+
const client = new HfInference("api_token");
4+
5+
const video = await client.textToVideo({
6+
model: "tencent/HunyuanVideo",
7+
provider: "fal-ai",
8+
inputs: "A young man walking on the street",
9+
parameters: { num_inference_steps: 5 },
10+
});
11+
// Use the generated video (it's a Blob)
Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
import { HfInference } from "@huggingface/inference";
2+
3+
const client = new HfInference("api_token");
4+
5+
const video = await client.textToVideo({
6+
model: "tencent/HunyuanVideo",
7+
provider: "replicate",
8+
inputs: "A young man walking on the street",
9+
parameters: { num_inference_steps: 5 },
10+
});
11+
// Use the generated video (it's a Blob)
Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
from huggingface_hub import InferenceClient
2+
3+
client = InferenceClient(
4+
provider="fal-ai",
5+
api_key="api_token"
6+
)
7+
8+
video = client.text_to_video(
9+
"A young man walking on the street",
10+
model="tencent/HunyuanVideo"
11+
)
Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
from huggingface_hub import InferenceClient
2+
3+
client = InferenceClient(
4+
provider="replicate",
5+
api_key="api_token"
6+
)
7+
8+
video = client.text_to_video(
9+
"A young man walking on the street",
10+
model="tencent/HunyuanVideo"
11+
)

packages/tasks/package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
{
22
"name": "@huggingface/tasks",
33
"packageManager": "[email protected]",
4-
"version": "0.16.5",
4+
"version": "0.16.6",
55
"description": "List of ML tasks for huggingface.co/tasks",
66
"repository": "https://github.com/huggingface/huggingface.js.git",
77
"publishConfig": {

packages/tasks/src/model-libraries.ts

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -394,6 +394,12 @@ export const MODEL_LIBRARIES_UI_ELEMENTS = {
394394
repoUrl: "https://github.com/Tencent/HunyuanDiT",
395395
countDownloads: `path:"pytorch_model_ema.pt" OR path:"pytorch_model_distill.pt"`,
396396
},
397+
"hunyuan3d-2": {
398+
prettyLabel: "Hunyuan3D-2",
399+
repoName: "Hunyuan3D-2",
400+
repoUrl: "https://github.com/Tencent/Hunyuan3D-2",
401+
countDownloads: `path:"model_index.json" OR path:"config.yaml"`,
402+
},
397403
imstoucan: {
398404
prettyLabel: "IMS Toucan",
399405
repoName: "IMS-Toucan",

packages/tasks/src/snippets/inputs.ts

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -96,6 +96,8 @@ const inputsAudioClassification = () => `"sample1.flac"`;
9696

9797
const inputsTextToImage = () => `"Astronaut riding a horse"`;
9898

99+
const inputsTextToVideo = () => `"A young man walking on the street"`;
100+
99101
const inputsTextToSpeech = () => `"The answer to the universe is 42"`;
100102

101103
const inputsTextToAudio = () => `"liquid drum and bass, atmospheric synths, airy sounds"`;
@@ -130,6 +132,7 @@ const modelInputSnippets: {
130132
"text-generation": inputsTextGeneration,
131133
"image-text-to-text": inputsTextGeneration,
132134
"text-to-image": inputsTextToImage,
135+
"text-to-video": inputsTextToVideo,
133136
"text-to-speech": inputsTextToSpeech,
134137
"text-to-audio": inputsTextToAudio,
135138
"text2text-generation": inputsText2TextGeneration,

0 commit comments

Comments
 (0)