diff --git a/packages/inference/src/snippets/templates/js/fetch/imageToImage.jinja b/packages/inference/src/snippets/templates/js/fetch/imageToImage.jinja new file mode 100644 index 0000000000..a55ee7e159 --- /dev/null +++ b/packages/inference/src/snippets/templates/js/fetch/imageToImage.jinja @@ -0,0 +1,31 @@ +const image = fs.readFileSync("{{inputs.asObj.inputs}}"); + +async function query(data) { + const response = await fetch( + "{{ fullUrl }}", + { + headers: { + Authorization: "{{ authorizationHeader }}", + "Content-Type": "image/jpeg", +{% if billTo %} + "X-HF-Bill-To": "{{ billTo }}", +{% endif %} }, + method: "POST", + body: { + "inputs": `data:image/png;base64,${data.inputs.encode("base64")}`, + "parameters": data.parameters, + } + } + ); + const result = await response.json(); + return result; +} + +query({ + inputs: image, + parameters: { + prompt: "{{ inputs.asObj.parameters.prompt }}", + } +}).then((response) => { + console.log(JSON.stringify(response)); +}); \ No newline at end of file diff --git a/packages/inference/src/snippets/templates/js/huggingface.js/imageToImage.jinja b/packages/inference/src/snippets/templates/js/huggingface.js/imageToImage.jinja new file mode 100644 index 0000000000..c6be18f2c1 --- /dev/null +++ b/packages/inference/src/snippets/templates/js/huggingface.js/imageToImage.jinja @@ -0,0 +1,19 @@ +import { InferenceClient } from "@huggingface/inference"; + +const client = new InferenceClient("{{ accessToken }}"); + +const data = fs.readFileSync("{{inputs.asObj.inputs}}"); + +const image = await client.imageToImage({ +{% if endpointUrl %} + endpointUrl: "{{ endpointUrl }}", +{% endif %} + provider: "{{provider}}", + model: "{{model.id}}", + inputs: data, + parameters: { prompt: "{{inputs.asObj.parameters.prompt}}", }, +}{% if billTo %}, { + billTo: "{{ billTo }}", +}{% endif %}); +/// Use the generated image (it's a Blob) +// For example, you can save it to a file or display it in an image element diff --git a/packages/inference/src/snippets/templates/python/fal_client/imageToImage.jinja b/packages/inference/src/snippets/templates/python/fal_client/imageToImage.jinja new file mode 100644 index 0000000000..507cfc6095 --- /dev/null +++ b/packages/inference/src/snippets/templates/python/fal_client/imageToImage.jinja @@ -0,0 +1,23 @@ +{%if provider == "fal-ai" %} +import fal_client +import base64 + +def on_queue_update(update): + if isinstance(update, fal_client.InProgress): + for log in update.logs: + print(log["message"]) + +with open("{{inputs.asObj.inputs}}", "rb") as image_file: + image_base_64 = base64.b64encode(image_file.read()).decode('utf-8') + +result = fal_client.subscribe( + "fal-ai/flux-kontext/dev", + arguments={ + "prompt": f"data:image/png;base64,{image_base_64}", + "image_url": "{{ providerInputs.asObj.inputs }}", + }, + with_logs=True, + on_queue_update=on_queue_update, +) +print(result) +{%endif%} diff --git a/packages/inference/src/snippets/templates/python/huggingface_hub/imageToImage.jinja b/packages/inference/src/snippets/templates/python/huggingface_hub/imageToImage.jinja index 882c720b54..9ee1554fca 100644 --- a/packages/inference/src/snippets/templates/python/huggingface_hub/imageToImage.jinja +++ b/packages/inference/src/snippets/templates/python/huggingface_hub/imageToImage.jinja @@ -1,6 +1,9 @@ +with open("{{ inputs.asObj.inputs }}", "rb") as image_file: + input_image = image_file.read() + # output is a PIL.Image object image = client.image_to_image( - "{{ inputs.asObj.inputs }}", + input_image, prompt="{{ inputs.asObj.parameters.prompt }}", model="{{ model.id }}", ) \ No newline at end of file diff --git a/packages/inference/src/snippets/templates/python/requests/imageToImage.jinja b/packages/inference/src/snippets/templates/python/requests/imageToImage.jinja index a8782c00bb..35dcbb5536 100644 --- a/packages/inference/src/snippets/templates/python/requests/imageToImage.jinja +++ b/packages/inference/src/snippets/templates/python/requests/imageToImage.jinja @@ -1,3 +1,6 @@ +with open("{{inputs.asObj.inputs}}", "rb") as image_file: + image_base_64 = base64.b64encode(image_file.read()).decode('utf-8') + def query(payload): with open(payload["inputs"], "rb") as f: img = f.read() diff --git a/packages/tasks-gen/scripts/generate-snippets-fixtures.ts b/packages/tasks-gen/scripts/generate-snippets-fixtures.ts index 848815b107..c32456f558 100644 --- a/packages/tasks-gen/scripts/generate-snippets-fixtures.ts +++ b/packages/tasks-gen/scripts/generate-snippets-fixtures.ts @@ -133,12 +133,12 @@ const TEST_CASES: { testName: "image-to-image", task: "image-to-image", model: { - id: "stabilityai/stable-diffusion-xl-refiner-1.0", + id: "black-forest-labs/FLUX.1-Kontext-dev", pipeline_tag: "image-to-image", tags: [], inference: "", }, - providers: ["hf-inference"], + providers: ["fal-ai", "replicate", "hf-inference"], }, { testName: "tabular", diff --git a/packages/tasks-gen/snippets-fixtures/image-to-image/js/fetch/0.fal-ai.js b/packages/tasks-gen/snippets-fixtures/image-to-image/js/fetch/0.fal-ai.js new file mode 100644 index 0000000000..aad5423f2c --- /dev/null +++ b/packages/tasks-gen/snippets-fixtures/image-to-image/js/fetch/0.fal-ai.js @@ -0,0 +1,29 @@ +const image = fs.readFileSync("cat.png"); + +async function query(data) { + const response = await fetch( + "https://router.huggingface.co/fal-ai/?_subdomain=queue", + { + headers: { + Authorization: `Bearer ${process.env.HF_TOKEN}`, + "Content-Type": "image/jpeg", + }, + method: "POST", + body: { + "inputs": `data:image/png;base64,${data.inputs.encode("base64")}`, + "parameters": data.parameters, + } + } + ); + const result = await response.json(); + return result; +} + +query({ + inputs: image, + parameters: { + prompt: "Turn the cat into a tiger.", + } +}).then((response) => { + console.log(JSON.stringify(response)); +}); \ No newline at end of file diff --git a/packages/tasks-gen/snippets-fixtures/image-to-image/js/fetch/0.hf-inference.js b/packages/tasks-gen/snippets-fixtures/image-to-image/js/fetch/0.hf-inference.js new file mode 100644 index 0000000000..7c777e24ef --- /dev/null +++ b/packages/tasks-gen/snippets-fixtures/image-to-image/js/fetch/0.hf-inference.js @@ -0,0 +1,29 @@ +const image = fs.readFileSync("cat.png"); + +async function query(data) { + const response = await fetch( + "https://router.huggingface.co/hf-inference/models/black-forest-labs/FLUX.1-Kontext-dev", + { + headers: { + Authorization: `Bearer ${process.env.HF_TOKEN}`, + "Content-Type": "image/jpeg", + }, + method: "POST", + body: { + "inputs": `data:image/png;base64,${data.inputs.encode("base64")}`, + "parameters": data.parameters, + } + } + ); + const result = await response.json(); + return result; +} + +query({ + inputs: image, + parameters: { + prompt: "Turn the cat into a tiger.", + } +}).then((response) => { + console.log(JSON.stringify(response)); +}); \ No newline at end of file diff --git a/packages/tasks-gen/snippets-fixtures/image-to-image/js/fetch/0.replicate.js b/packages/tasks-gen/snippets-fixtures/image-to-image/js/fetch/0.replicate.js new file mode 100644 index 0000000000..d938c3c894 --- /dev/null +++ b/packages/tasks-gen/snippets-fixtures/image-to-image/js/fetch/0.replicate.js @@ -0,0 +1,29 @@ +const image = fs.readFileSync("cat.png"); + +async function query(data) { + const response = await fetch( + "https://router.huggingface.co/replicate/v1/models//predictions", + { + headers: { + Authorization: `Bearer ${process.env.HF_TOKEN}`, + "Content-Type": "image/jpeg", + }, + method: "POST", + body: { + "inputs": `data:image/png;base64,${data.inputs.encode("base64")}`, + "parameters": data.parameters, + } + } + ); + const result = await response.json(); + return result; +} + +query({ + inputs: image, + parameters: { + prompt: "Turn the cat into a tiger.", + } +}).then((response) => { + console.log(JSON.stringify(response)); +}); \ No newline at end of file diff --git a/packages/tasks-gen/snippets-fixtures/image-to-image/js/huggingface.js/0.fal-ai.js b/packages/tasks-gen/snippets-fixtures/image-to-image/js/huggingface.js/0.fal-ai.js new file mode 100644 index 0000000000..d12ba64f18 --- /dev/null +++ b/packages/tasks-gen/snippets-fixtures/image-to-image/js/huggingface.js/0.fal-ai.js @@ -0,0 +1,14 @@ +import { InferenceClient } from "@huggingface/inference"; + +const client = new InferenceClient(process.env.HF_TOKEN); + +const data = fs.readFileSync("cat.png"); + +const image = await client.imageToImage({ + provider: "fal-ai", + model: "black-forest-labs/FLUX.1-Kontext-dev", + inputs: data, + parameters: { prompt: "Turn the cat into a tiger.", }, +}); +/// Use the generated image (it's a Blob) +// For example, you can save it to a file or display it in an image element \ No newline at end of file diff --git a/packages/tasks-gen/snippets-fixtures/image-to-image/js/huggingface.js/0.hf-inference.js b/packages/tasks-gen/snippets-fixtures/image-to-image/js/huggingface.js/0.hf-inference.js new file mode 100644 index 0000000000..a5729e2bde --- /dev/null +++ b/packages/tasks-gen/snippets-fixtures/image-to-image/js/huggingface.js/0.hf-inference.js @@ -0,0 +1,14 @@ +import { InferenceClient } from "@huggingface/inference"; + +const client = new InferenceClient(process.env.HF_TOKEN); + +const data = fs.readFileSync("cat.png"); + +const image = await client.imageToImage({ + provider: "hf-inference", + model: "black-forest-labs/FLUX.1-Kontext-dev", + inputs: data, + parameters: { prompt: "Turn the cat into a tiger.", }, +}); +/// Use the generated image (it's a Blob) +// For example, you can save it to a file or display it in an image element \ No newline at end of file diff --git a/packages/tasks-gen/snippets-fixtures/image-to-image/js/huggingface.js/0.replicate.js b/packages/tasks-gen/snippets-fixtures/image-to-image/js/huggingface.js/0.replicate.js new file mode 100644 index 0000000000..415b4f43b1 --- /dev/null +++ b/packages/tasks-gen/snippets-fixtures/image-to-image/js/huggingface.js/0.replicate.js @@ -0,0 +1,14 @@ +import { InferenceClient } from "@huggingface/inference"; + +const client = new InferenceClient(process.env.HF_TOKEN); + +const data = fs.readFileSync("cat.png"); + +const image = await client.imageToImage({ + provider: "replicate", + model: "black-forest-labs/FLUX.1-Kontext-dev", + inputs: data, + parameters: { prompt: "Turn the cat into a tiger.", }, +}); +/// Use the generated image (it's a Blob) +// For example, you can save it to a file or display it in an image element \ No newline at end of file diff --git a/packages/tasks-gen/snippets-fixtures/image-to-image/python/fal_client/0.fal-ai.py b/packages/tasks-gen/snippets-fixtures/image-to-image/python/fal_client/0.fal-ai.py new file mode 100644 index 0000000000..742d445641 --- /dev/null +++ b/packages/tasks-gen/snippets-fixtures/image-to-image/python/fal_client/0.fal-ai.py @@ -0,0 +1,21 @@ +import fal_client +import base64 + +def on_queue_update(update): + if isinstance(update, fal_client.InProgress): + for log in update.logs: + print(log["message"]) + +with open("cat.png", "rb") as image_file: + image_base_64 = base64.b64encode(image_file.read()).decode('utf-8') + +result = fal_client.subscribe( + "fal-ai/flux-kontext/dev", + arguments={ + "prompt": f"data:image/png;base64,{image_base_64}", + "image_url": "cat.png", + }, + with_logs=True, + on_queue_update=on_queue_update, +) +print(result) \ No newline at end of file diff --git a/packages/tasks-gen/snippets-fixtures/image-to-image/python/huggingface_hub/0.fal-ai.py b/packages/tasks-gen/snippets-fixtures/image-to-image/python/huggingface_hub/0.fal-ai.py new file mode 100644 index 0000000000..cc8c180b92 --- /dev/null +++ b/packages/tasks-gen/snippets-fixtures/image-to-image/python/huggingface_hub/0.fal-ai.py @@ -0,0 +1,17 @@ +import os +from huggingface_hub import InferenceClient + +client = InferenceClient( + provider="fal-ai", + api_key=os.environ["HF_TOKEN"], +) + +with open("cat.png", "rb") as image_file: + input_image = image_file.read() + +# output is a PIL.Image object +image = client.image_to_image( + input_image, + prompt="Turn the cat into a tiger.", + model="black-forest-labs/FLUX.1-Kontext-dev", +) \ No newline at end of file diff --git a/packages/tasks-gen/snippets-fixtures/image-to-image/python/huggingface_hub/0.hf-inference.py b/packages/tasks-gen/snippets-fixtures/image-to-image/python/huggingface_hub/0.hf-inference.py index 1f94f838a8..fdd0e76332 100644 --- a/packages/tasks-gen/snippets-fixtures/image-to-image/python/huggingface_hub/0.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/image-to-image/python/huggingface_hub/0.hf-inference.py @@ -6,9 +6,12 @@ api_key=os.environ["HF_TOKEN"], ) +with open("cat.png", "rb") as image_file: + input_image = image_file.read() + # output is a PIL.Image object image = client.image_to_image( - "cat.png", + input_image, prompt="Turn the cat into a tiger.", - model="stabilityai/stable-diffusion-xl-refiner-1.0", + model="black-forest-labs/FLUX.1-Kontext-dev", ) \ No newline at end of file diff --git a/packages/tasks-gen/snippets-fixtures/image-to-image/python/huggingface_hub/0.replicate.py b/packages/tasks-gen/snippets-fixtures/image-to-image/python/huggingface_hub/0.replicate.py new file mode 100644 index 0000000000..0ff57f98c3 --- /dev/null +++ b/packages/tasks-gen/snippets-fixtures/image-to-image/python/huggingface_hub/0.replicate.py @@ -0,0 +1,17 @@ +import os +from huggingface_hub import InferenceClient + +client = InferenceClient( + provider="replicate", + api_key=os.environ["HF_TOKEN"], +) + +with open("cat.png", "rb") as image_file: + input_image = image_file.read() + +# output is a PIL.Image object +image = client.image_to_image( + input_image, + prompt="Turn the cat into a tiger.", + model="black-forest-labs/FLUX.1-Kontext-dev", +) \ No newline at end of file diff --git a/packages/tasks-gen/snippets-fixtures/image-to-image/python/requests/0.fal-ai.py b/packages/tasks-gen/snippets-fixtures/image-to-image/python/requests/0.fal-ai.py new file mode 100644 index 0000000000..ca6662a49f --- /dev/null +++ b/packages/tasks-gen/snippets-fixtures/image-to-image/python/requests/0.fal-ai.py @@ -0,0 +1,30 @@ +import os +import base64 +import requests + +API_URL = "https://router.huggingface.co/fal-ai/?_subdomain=queue" +headers = { + "Authorization": f"Bearer {os.environ['HF_TOKEN']}", +} + +with open("cat.png", "rb") as image_file: + image_base_64 = base64.b64encode(image_file.read()).decode('utf-8') + +def query(payload): + with open(payload["inputs"], "rb") as f: + img = f.read() + payload["inputs"] = base64.b64encode(img).decode("utf-8") + response = requests.post(API_URL, headers=headers, json=payload) + return response.content + +image_bytes = query({ + "inputs": "cat.png", + "parameters": { + "prompt": "Turn the cat into a tiger." + } +}) + +# You can access the image with PIL.Image for example +import io +from PIL import Image +image = Image.open(io.BytesIO(image_bytes)) \ No newline at end of file diff --git a/packages/tasks-gen/snippets-fixtures/image-to-image/python/requests/0.hf-inference.py b/packages/tasks-gen/snippets-fixtures/image-to-image/python/requests/0.hf-inference.py index eb9e981c6e..fe56c8ef16 100644 --- a/packages/tasks-gen/snippets-fixtures/image-to-image/python/requests/0.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/image-to-image/python/requests/0.hf-inference.py @@ -2,11 +2,14 @@ import base64 import requests -API_URL = "https://router.huggingface.co/hf-inference/models/stabilityai/stable-diffusion-xl-refiner-1.0" +API_URL = "https://router.huggingface.co/hf-inference/models/black-forest-labs/FLUX.1-Kontext-dev" headers = { "Authorization": f"Bearer {os.environ['HF_TOKEN']}", } +with open("cat.png", "rb") as image_file: + image_base_64 = base64.b64encode(image_file.read()).decode('utf-8') + def query(payload): with open(payload["inputs"], "rb") as f: img = f.read() diff --git a/packages/tasks-gen/snippets-fixtures/image-to-image/python/requests/0.replicate.py b/packages/tasks-gen/snippets-fixtures/image-to-image/python/requests/0.replicate.py new file mode 100644 index 0000000000..ea6d519f32 --- /dev/null +++ b/packages/tasks-gen/snippets-fixtures/image-to-image/python/requests/0.replicate.py @@ -0,0 +1,30 @@ +import os +import base64 +import requests + +API_URL = "https://router.huggingface.co/replicate/v1/models//predictions" +headers = { + "Authorization": f"Bearer {os.environ['HF_TOKEN']}", +} + +with open("cat.png", "rb") as image_file: + image_base_64 = base64.b64encode(image_file.read()).decode('utf-8') + +def query(payload): + with open(payload["inputs"], "rb") as f: + img = f.read() + payload["inputs"] = base64.b64encode(img).decode("utf-8") + response = requests.post(API_URL, headers=headers, json=payload) + return response.content + +image_bytes = query({ + "input": { + "prompt": "Turn the cat into a tiger.", + "input_image": "cat.png" + } +}) + +# You can access the image with PIL.Image for example +import io +from PIL import Image +image = Image.open(io.BytesIO(image_bytes)) \ No newline at end of file