Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
const image = fs.readFileSync("{{inputs.asObj.inputs}}");

async function query(data) {
const response = await fetch(
"{{ fullUrl }}",
{
headers: {
Authorization: "{{ authorizationHeader }}",
"Content-Type": "image/jpeg",
{% if billTo %}
"X-HF-Bill-To": "{{ billTo }}",
{% endif %} },
method: "POST",
body: {
"inputs": `data:image/png;base64,${data.inputs.encode("base64")}`,
"parameters": data.parameters,
}
}
);
const result = await response.json();
return result;
}

query({
inputs: image,
parameters: {
prompt: "{{ inputs.asObj.parameters.prompt }}",
}
}).then((response) => {
console.log(JSON.stringify(response));
});
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
import { InferenceClient } from "@huggingface/inference";

const client = new InferenceClient("{{ accessToken }}");

const data = fs.readFileSync("{{inputs.asObj.inputs}}");

const image = await client.imageToImage({
{% if endpointUrl %}
endpointUrl: "{{ endpointUrl }}",
{% endif %}
provider: "{{provider}}",
model: "{{model.id}}",
inputs: data,
parameters: { prompt: "{{inputs.asObj.parameters.prompt}}", },
}{% if billTo %}, {
billTo: "{{ billTo }}",
}{% endif %});
/// Use the generated image (it's a Blob)
// For example, you can save it to a file or display it in an image element
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
{%if provider == "fal-ai" %}
import fal_client
import base64

def on_queue_update(update):
if isinstance(update, fal_client.InProgress):
for log in update.logs:
print(log["message"])

with open("{{inputs.asObj.inputs}}", "rb") as image_file:
image_base_64 = base64.b64encode(image_file.read()).decode('utf-8')

result = fal_client.subscribe(
"fal-ai/flux-kontext/dev",
arguments={
"prompt": f"data:image/png;base64,{image_base_64}",
"image_url": "{{ providerInputs.asObj.inputs }}",
},
with_logs=True,
on_queue_update=on_queue_update,
)
print(result)
{%endif%}
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
with open("{{ inputs.asObj.inputs }}", "rb") as image_file:
input_image = image_file.read()

# output is a PIL.Image object
image = client.image_to_image(
"{{ inputs.asObj.inputs }}",
input_image,
prompt="{{ inputs.asObj.parameters.prompt }}",
model="{{ model.id }}",
)
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
with open("{{inputs.asObj.inputs}}", "rb") as image_file:
image_base_64 = base64.b64encode(image_file.read()).decode('utf-8')

def query(payload):
with open(payload["inputs"], "rb") as f:
img = f.read()
Expand Down
4 changes: 2 additions & 2 deletions packages/tasks-gen/scripts/generate-snippets-fixtures.ts
Original file line number Diff line number Diff line change
Expand Up @@ -133,12 +133,12 @@ const TEST_CASES: {
testName: "image-to-image",
task: "image-to-image",
model: {
id: "stabilityai/stable-diffusion-xl-refiner-1.0",
id: "black-forest-labs/FLUX.1-Kontext-dev",
pipeline_tag: "image-to-image",
tags: [],
inference: "",
},
providers: ["hf-inference"],
providers: ["fal-ai", "replicate", "hf-inference"],
},
{
testName: "tabular",
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
const image = fs.readFileSync("cat.png");

async function query(data) {
const response = await fetch(
"https://router.huggingface.co/fal-ai/<fal-ai alias for black-forest-labs/FLUX.1-Kontext-dev>?_subdomain=queue",
{
headers: {
Authorization: `Bearer ${process.env.HF_TOKEN}`,
"Content-Type": "image/jpeg",
},
method: "POST",
body: {
"inputs": `data:image/png;base64,${data.inputs.encode("base64")}`,
"parameters": data.parameters,
}
}
);
const result = await response.json();
return result;
}

query({
inputs: image,
parameters: {
prompt: "Turn the cat into a tiger.",
}
}).then((response) => {
console.log(JSON.stringify(response));
});
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
const image = fs.readFileSync("cat.png");

async function query(data) {
const response = await fetch(
"https://router.huggingface.co/hf-inference/models/black-forest-labs/FLUX.1-Kontext-dev",
{
headers: {
Authorization: `Bearer ${process.env.HF_TOKEN}`,
"Content-Type": "image/jpeg",
},
method: "POST",
body: {
"inputs": `data:image/png;base64,${data.inputs.encode("base64")}`,
"parameters": data.parameters,
}
}
);
const result = await response.json();
return result;
}

query({
inputs: image,
parameters: {
prompt: "Turn the cat into a tiger.",
}
}).then((response) => {
console.log(JSON.stringify(response));
});
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
const image = fs.readFileSync("cat.png");

async function query(data) {
const response = await fetch(
"https://router.huggingface.co/replicate/v1/models/<replicate alias for black-forest-labs/FLUX.1-Kontext-dev>/predictions",
{
headers: {
Authorization: `Bearer ${process.env.HF_TOKEN}`,
"Content-Type": "image/jpeg",
},
method: "POST",
body: {
"inputs": `data:image/png;base64,${data.inputs.encode("base64")}`,
"parameters": data.parameters,
}
}
);
const result = await response.json();
return result;
}

query({
inputs: image,
parameters: {
prompt: "Turn the cat into a tiger.",
}
}).then((response) => {
console.log(JSON.stringify(response));
});
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
import { InferenceClient } from "@huggingface/inference";

const client = new InferenceClient(process.env.HF_TOKEN);

const data = fs.readFileSync("cat.png");

const image = await client.imageToImage({
provider: "fal-ai",
model: "black-forest-labs/FLUX.1-Kontext-dev",
inputs: data,
parameters: { prompt: "Turn the cat into a tiger.", },
});
/// Use the generated image (it's a Blob)
// For example, you can save it to a file or display it in an image element
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
import { InferenceClient } from "@huggingface/inference";

const client = new InferenceClient(process.env.HF_TOKEN);

const data = fs.readFileSync("cat.png");

const image = await client.imageToImage({
provider: "hf-inference",
model: "black-forest-labs/FLUX.1-Kontext-dev",
inputs: data,
parameters: { prompt: "Turn the cat into a tiger.", },
});
/// Use the generated image (it's a Blob)
// For example, you can save it to a file or display it in an image element
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
import { InferenceClient } from "@huggingface/inference";

const client = new InferenceClient(process.env.HF_TOKEN);

const data = fs.readFileSync("cat.png");

const image = await client.imageToImage({
provider: "replicate",
model: "black-forest-labs/FLUX.1-Kontext-dev",
inputs: data,
parameters: { prompt: "Turn the cat into a tiger.", },
});
/// Use the generated image (it's a Blob)
// For example, you can save it to a file or display it in an image element
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
import fal_client
import base64

def on_queue_update(update):
if isinstance(update, fal_client.InProgress):
for log in update.logs:
print(log["message"])

with open("cat.png", "rb") as image_file:
image_base_64 = base64.b64encode(image_file.read()).decode('utf-8')

result = fal_client.subscribe(
"fal-ai/flux-kontext/dev",
arguments={
"prompt": f"data:image/png;base64,{image_base_64}",
"image_url": "cat.png",
},
with_logs=True,
on_queue_update=on_queue_update,
)
print(result)
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
import os
from huggingface_hub import InferenceClient

client = InferenceClient(
provider="fal-ai",
api_key=os.environ["HF_TOKEN"],
)

with open("cat.png", "rb") as image_file:
input_image = image_file.read()

# output is a PIL.Image object
image = client.image_to_image(
input_image,
prompt="Turn the cat into a tiger.",
model="black-forest-labs/FLUX.1-Kontext-dev",
)
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,12 @@
api_key=os.environ["HF_TOKEN"],
)

with open("cat.png", "rb") as image_file:
input_image = image_file.read()

# output is a PIL.Image object
image = client.image_to_image(
"cat.png",
input_image,
prompt="Turn the cat into a tiger.",
model="stabilityai/stable-diffusion-xl-refiner-1.0",
model="black-forest-labs/FLUX.1-Kontext-dev",
)
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
import os
from huggingface_hub import InferenceClient

client = InferenceClient(
provider="replicate",
api_key=os.environ["HF_TOKEN"],
)

with open("cat.png", "rb") as image_file:
input_image = image_file.read()

# output is a PIL.Image object
image = client.image_to_image(
input_image,
prompt="Turn the cat into a tiger.",
model="black-forest-labs/FLUX.1-Kontext-dev",
)
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
import os
import base64
import requests

API_URL = "https://router.huggingface.co/fal-ai/<fal-ai alias for black-forest-labs/FLUX.1-Kontext-dev>?_subdomain=queue"
headers = {
"Authorization": f"Bearer {os.environ['HF_TOKEN']}",
}

with open("cat.png", "rb") as image_file:
image_base_64 = base64.b64encode(image_file.read()).decode('utf-8')

def query(payload):
with open(payload["inputs"], "rb") as f:
img = f.read()
payload["inputs"] = base64.b64encode(img).decode("utf-8")
response = requests.post(API_URL, headers=headers, json=payload)
return response.content

image_bytes = query({
"inputs": "cat.png",
"parameters": {
"prompt": "Turn the cat into a tiger."
}
})

# You can access the image with PIL.Image for example
import io
from PIL import Image
image = Image.open(io.BytesIO(image_bytes))
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,14 @@
import base64
import requests

API_URL = "https://router.huggingface.co/hf-inference/models/stabilityai/stable-diffusion-xl-refiner-1.0"
API_URL = "https://router.huggingface.co/hf-inference/models/black-forest-labs/FLUX.1-Kontext-dev"
headers = {
"Authorization": f"Bearer {os.environ['HF_TOKEN']}",
}

with open("cat.png", "rb") as image_file:
image_base_64 = base64.b64encode(image_file.read()).decode('utf-8')

def query(payload):
with open(payload["inputs"], "rb") as f:
img = f.read()
Expand Down
Loading