@@ -326,6 +326,10 @@ dam = DescribeAnythingModel(
326326
327327const diffusersDefaultPrompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" ;
328328
329+ const diffusersImg2ImgDefaultPrompt = "Turn this cat into a dog" ;
330+
331+ const diffusersVideoDefaultPrompt = "A man with short gray hair plays a red electric guitar." ;
332+
329333const diffusers_default = ( model : ModelData ) => [
330334 `from diffusers import DiffusionPipeline
331335
@@ -335,6 +339,35 @@ prompt = "${get_prompt_from_diffusers_model(model) ?? diffusersDefaultPrompt}"
335339image = pipe(prompt).images[0]` ,
336340] ;
337341
342+ const diffusers_image_to_image = ( model : ModelData ) => [
343+ `from diffusers import DiffusionPipeline
344+ from diffusers.utils import load_image
345+
346+ pipe = DiffusionPipeline.from_pretrained("${ model . id } ")
347+
348+ prompt = "${ get_prompt_from_diffusers_model ( model ) ?? diffusersImg2ImgDefaultPrompt } "
349+ input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
350+
351+ image = pipe(image=input_image, prompt=prompt).images[0]` ,
352+ ] ;
353+
354+ const diffusers_image_to_video = ( model : ModelData ) => [
355+ `import torch
356+ from diffusers import DiffusionPipeline
357+ from diffusers.utils import load_image, export_to_video
358+
359+ pipe = DiffusionPipeline.from_pretrained("${ model . id } ", torch_dtype=torch.float16)
360+ pipe.to("cuda")
361+
362+ prompt = "${ get_prompt_from_diffusers_model ( model ) ?? diffusersVideoDefaultPrompt } "
363+ image = load_image(
364+ "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/guitar-man.png"
365+ )
366+
367+ output = pipe(image=image, prompt=prompt).frames[0]
368+ export_to_video(output, "output.mp4")` ,
369+ ] ;
370+
338371const diffusers_controlnet = ( model : ModelData ) => [
339372 `from diffusers import ControlNetModel, StableDiffusionControlNetPipeline
340373
@@ -354,6 +387,46 @@ prompt = "${get_prompt_from_diffusers_model(model) ?? diffusersDefaultPrompt}"
354387image = pipe(prompt).images[0]` ,
355388] ;
356389
390+ const diffusers_lora_image_to_image = ( model : ModelData ) => [
391+ `from diffusers import DiffusionPipeline
392+ from diffusers.utils import load_image
393+
394+ pipe = DiffusionPipeline.from_pretrained("${ get_base_diffusers_model ( model ) } ")
395+ pipe.load_lora_weights("${ model . id } ")
396+
397+ prompt = "${ get_prompt_from_diffusers_model ( model ) ?? diffusersImg2ImgDefaultPrompt } "
398+ input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
399+
400+ image = pipe(image=input_image, prompt=prompt).images[0]` ,
401+ ] ;
402+
403+ const diffusers_lora_text_to_video = ( model : ModelData ) => [
404+ `from diffusers import DiffusionPipeline
405+ from diffusers.utils import export_to_video
406+
407+ pipe = DiffusionPipeline.from_pretrained("${ get_base_diffusers_model ( model ) } ")
408+ pipe.load_lora_weights("${ model . id } ")
409+
410+ prompt = "${ get_prompt_from_diffusers_model ( model ) ?? diffusersVideoDefaultPrompt } "
411+
412+ output = pipe(prompt=prompt).frames[0]
413+ export_to_video(output, "output.mp4")` ,
414+ ] ;
415+
416+ const diffusers_lora_image_to_video = ( model : ModelData ) => [
417+ `from diffusers import DiffusionPipeline
418+ from diffusers.utils import load_image, export_to_video
419+
420+ pipe = DiffusionPipeline.from_pretrained("${ get_base_diffusers_model ( model ) } ")
421+ pipe.load_lora_weights("${ model . id } ")
422+
423+ prompt = "${ get_prompt_from_diffusers_model ( model ) ?? diffusersVideoDefaultPrompt } "
424+ input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/guitar-man.png")
425+
426+ image = pipe(image=input_image, prompt=prompt).frames[0]
427+ export_to_video(output, "output.mp4")` ,
428+ ] ;
429+
357430const diffusers_textual_inversion = ( model : ModelData ) => [
358431 `from diffusers import DiffusionPipeline
359432
@@ -365,9 +438,21 @@ export const diffusers = (model: ModelData): string[] => {
365438 if ( model . tags . includes ( "controlnet" ) ) {
366439 return diffusers_controlnet ( model ) ;
367440 } else if ( model . tags . includes ( "lora" ) ) {
368- return diffusers_lora ( model ) ;
441+ if ( model . pipeline_tag === "image-to-image" ) {
442+ return diffusers_lora_image_to_image ( model ) ;
443+ } else if ( model . pipeline_tag === "image-to-video" ) {
444+ return diffusers_lora_image_to_video ( model ) ;
445+ } else if ( model . pipeline_tag === "text-to-video" ) {
446+ return diffusers_lora_text_to_video ( model ) ;
447+ } else {
448+ return diffusers_lora ( model ) ;
449+ }
369450 } else if ( model . tags . includes ( "textual_inversion" ) ) {
370451 return diffusers_textual_inversion ( model ) ;
452+ } else if ( model . pipeline_tag === "image-to-video" ) {
453+ return diffusers_image_to_video ( model ) ;
454+ } else if ( model . pipeline_tag === "image-to-image" ) {
455+ return diffusers_image_to_image ( model ) ;
371456 } else {
372457 return diffusers_default ( model ) ;
373458 }
0 commit comments