|
175 | 175 | title: gguf |
176 | 176 | - local: quantization/torchao |
177 | 177 | title: torchao |
178 | | - - local: quantization/quanto |
| 178 | + - local: quantization/quanto |
179 | 179 | title: quanto |
180 | 180 | title: Quantization Methods |
181 | 181 | - sections: |
|
268 | 268 | - sections: |
269 | 269 | - local: api/models/controlnet |
270 | 270 | title: ControlNetModel |
271 | | - - local: api/models/controlnet_union |
272 | | - title: ControlNetUnionModel |
273 | 271 | - local: api/models/controlnet_flux |
274 | 272 | title: FluxControlNetModel |
275 | 273 | - local: api/models/controlnet_hunyuandit |
276 | 274 | title: HunyuanDiT2DControlNetModel |
277 | | - - local: api/models/controlnet_sana |
278 | | - title: SanaControlNetModel |
279 | 275 | - local: api/models/controlnet_sd3 |
280 | 276 | title: SD3ControlNetModel |
281 | 277 | - local: api/models/controlnet_sparsectrl |
282 | 278 | title: SparseControlNetModel |
| 279 | + - local: api/models/controlnet_union |
| 280 | + title: ControlNetUnionModel |
| 281 | + - local: api/models/controlnet_sana |
| 282 | + title: SanaControlNetModel |
283 | 283 | title: ControlNets |
284 | 284 | - sections: |
285 | 285 | - local: api/models/allegro_transformer3d |
|
288 | 288 | title: AuraFlowTransformer2DModel |
289 | 289 | - local: api/models/cogvideox_transformer3d |
290 | 290 | title: CogVideoXTransformer3DModel |
| 291 | + - local: api/models/consisid_transformer3d |
| 292 | + title: ConsisIDTransformer3DModel |
291 | 293 | - local: api/models/cogview3plus_transformer2d |
292 | 294 | title: CogView3PlusTransformer2DModel |
293 | 295 | - local: api/models/cogview4_transformer2d |
294 | 296 | title: CogView4Transformer2DModel |
295 | | - - local: api/models/consisid_transformer3d |
296 | | - title: ConsisIDTransformer3DModel |
297 | 297 | - local: api/models/dit_transformer2d |
298 | 298 | title: DiTTransformer2DModel |
299 | 299 | - local: api/models/easyanimate_transformer3d |
|
306 | 306 | title: HunyuanVideoTransformer3DModel |
307 | 307 | - local: api/models/latte_transformer3d |
308 | 308 | title: LatteTransformer3DModel |
309 | | - - local: api/models/ltx_video_transformer3d |
310 | | - title: LTXVideoTransformer3DModel |
311 | | - - local: api/models/lumina2_transformer2d |
312 | | - title: Lumina2Transformer2DModel |
313 | 309 | - local: api/models/lumina_nextdit2d |
314 | 310 | title: LuminaNextDiT2DModel |
| 311 | + - local: api/models/lumina2_transformer2d |
| 312 | + title: Lumina2Transformer2DModel |
| 313 | + - local: api/models/ltx_video_transformer3d |
| 314 | + title: LTXVideoTransformer3DModel |
315 | 315 | - local: api/models/mochi_transformer3d |
316 | 316 | title: MochiTransformer3DModel |
317 | 317 | - local: api/models/omnigen_transformer |
|
320 | 320 | title: PixArtTransformer2DModel |
321 | 321 | - local: api/models/prior_transformer |
322 | 322 | title: PriorTransformer |
323 | | - - local: api/models/sana_transformer2d |
324 | | - title: SanaTransformer2DModel |
325 | 323 | - local: api/models/sd3_transformer2d |
326 | 324 | title: SD3Transformer2DModel |
| 325 | + - local: api/models/sana_transformer2d |
| 326 | + title: SanaTransformer2DModel |
327 | 327 | - local: api/models/stable_audio_transformer |
328 | 328 | title: StableAudioDiTModel |
329 | 329 | - local: api/models/transformer2d |
|
338 | 338 | title: StableCascadeUNet |
339 | 339 | - local: api/models/unet |
340 | 340 | title: UNet1DModel |
341 | | - - local: api/models/unet2d-cond |
342 | | - title: UNet2DConditionModel |
343 | 341 | - local: api/models/unet2d |
344 | 342 | title: UNet2DModel |
| 343 | + - local: api/models/unet2d-cond |
| 344 | + title: UNet2DConditionModel |
345 | 345 | - local: api/models/unet3d-cond |
346 | 346 | title: UNet3DConditionModel |
347 | 347 | - local: api/models/unet-motion |
|
350 | 350 | title: UViT2DModel |
351 | 351 | title: UNets |
352 | 352 | - sections: |
353 | | - - local: api/models/asymmetricautoencoderkl |
354 | | - title: AsymmetricAutoencoderKL |
355 | | - - local: api/models/autoencoder_dc |
356 | | - title: AutoencoderDC |
357 | 353 | - local: api/models/autoencoderkl |
358 | 354 | title: AutoencoderKL |
359 | 355 | - local: api/models/autoencoderkl_allegro |
|
370 | 366 | title: AutoencoderKLMochi |
371 | 367 | - local: api/models/autoencoder_kl_wan |
372 | 368 | title: AutoencoderKLWan |
| 369 | + - local: api/models/asymmetricautoencoderkl |
| 370 | + title: AsymmetricAutoencoderKL |
| 371 | + - local: api/models/autoencoder_dc |
| 372 | + title: AutoencoderDC |
373 | 373 | - local: api/models/consistency_decoder_vae |
374 | 374 | title: ConsistencyDecoderVAE |
375 | 375 | - local: api/models/autoencoder_oobleck |
|
515 | 515 | - sections: |
516 | 516 | - local: api/pipelines/stable_diffusion/overview |
517 | 517 | title: Overview |
518 | | - - local: api/pipelines/stable_diffusion/depth2img |
519 | | - title: Depth-to-image |
520 | | - - local: api/pipelines/stable_diffusion/gligen |
521 | | - title: GLIGEN (Grounded Language-to-Image Generation) |
522 | | - - local: api/pipelines/stable_diffusion/image_variation |
523 | | - title: Image variation |
| 518 | + - local: api/pipelines/stable_diffusion/text2img |
| 519 | + title: Text-to-image |
524 | 520 | - local: api/pipelines/stable_diffusion/img2img |
525 | 521 | title: Image-to-image |
526 | 522 | - local: api/pipelines/stable_diffusion/svd |
527 | 523 | title: Image-to-video |
528 | 524 | - local: api/pipelines/stable_diffusion/inpaint |
529 | 525 | title: Inpainting |
530 | | - - local: api/pipelines/stable_diffusion/k_diffusion |
531 | | - title: K-Diffusion |
532 | | - - local: api/pipelines/stable_diffusion/latent_upscale |
533 | | - title: Latent upscaler |
534 | | - - local: api/pipelines/stable_diffusion/ldm3d_diffusion |
535 | | - title: LDM3D Text-to-(RGB, Depth), Text-to-(RGB-pano, Depth-pano), LDM3D Upscaler |
| 526 | + - local: api/pipelines/stable_diffusion/depth2img |
| 527 | + title: Depth-to-image |
| 528 | + - local: api/pipelines/stable_diffusion/image_variation |
| 529 | + title: Image variation |
536 | 530 | - local: api/pipelines/stable_diffusion/stable_diffusion_safe |
537 | 531 | title: Safe Stable Diffusion |
538 | | - - local: api/pipelines/stable_diffusion/sdxl_turbo |
539 | | - title: SDXL Turbo |
540 | 532 | - local: api/pipelines/stable_diffusion/stable_diffusion_2 |
541 | 533 | title: Stable Diffusion 2 |
542 | 534 | - local: api/pipelines/stable_diffusion/stable_diffusion_3 |
543 | 535 | title: Stable Diffusion 3 |
544 | 536 | - local: api/pipelines/stable_diffusion/stable_diffusion_xl |
545 | 537 | title: Stable Diffusion XL |
| 538 | + - local: api/pipelines/stable_diffusion/sdxl_turbo |
| 539 | + title: SDXL Turbo |
| 540 | + - local: api/pipelines/stable_diffusion/latent_upscale |
| 541 | + title: Latent upscaler |
546 | 542 | - local: api/pipelines/stable_diffusion/upscale |
547 | 543 | title: Super-resolution |
| 544 | + - local: api/pipelines/stable_diffusion/k_diffusion |
| 545 | + title: K-Diffusion |
| 546 | + - local: api/pipelines/stable_diffusion/ldm3d_diffusion |
| 547 | + title: LDM3D Text-to-(RGB, Depth), Text-to-(RGB-pano, Depth-pano), LDM3D Upscaler |
548 | 548 | - local: api/pipelines/stable_diffusion/adapter |
549 | 549 | title: T2I-Adapter |
550 | | - - local: api/pipelines/stable_diffusion/text2img |
551 | | - title: Text-to-image |
| 550 | + - local: api/pipelines/stable_diffusion/gligen |
| 551 | + title: GLIGEN (Grounded Language-to-Image Generation) |
552 | 552 | title: Stable Diffusion |
553 | 553 | - local: api/pipelines/stable_unclip |
554 | 554 | title: Stable unCLIP |
|
0 commit comments