Skip to content

Commit 6714388

Browse files
committed
benchmark_autoencoderkl_encode
1 parent 8eeee7e commit 6714388

File tree

4 files changed

+142
-21
lines changed

4 files changed

+142
-21
lines changed

benchmarks/base_classes.py

Lines changed: 95 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
import torch
55

66
from diffusers import (
7+
AutoencoderKL,
78
AutoPipelineForImage2Image,
89
AutoPipelineForInpainting,
910
AutoPipelineForText2Image,
@@ -15,7 +16,6 @@
1516
StableDiffusionXLControlNetPipeline,
1617
T2IAdapter,
1718
WuerstchenCombinedPipeline,
18-
AutoencoderKL,
1919
)
2020
from diffusers.utils import load_image
2121

@@ -31,8 +31,8 @@
3131
flush,
3232
generate_csv_dict,
3333
generate_csv_dict_model,
34-
write_to_csv,
3534
write_list_to_csv,
35+
write_to_csv,
3636
)
3737

3838

@@ -359,11 +359,7 @@ def __init__(self):
359359

360360
def get_result_filepath(self, suffix):
361361
name = (
362-
self.model_class_name
363-
+ "_"
364-
+ self.pretrained_model_name_or_path.replace("/", "_")
365-
+ "_"
366-
+ f"{suffix}.csv"
362+
self.model_class_name + "_" + self.pretrained_model_name_or_path.replace("/", "_") + "_" + f"{suffix}.csv"
367363
)
368364
filepath = os.path.join(BASE_PATH, name)
369365
return filepath
@@ -375,7 +371,9 @@ class AutoencoderKLBenchmark(BaseBenchmarkTestCase):
375371
def __init__(self, pretrained_model_name_or_path, dtype, tiling, **kwargs):
376372
super().__init__()
377373
self.dtype = getattr(torch, dtype)
378-
model = self.model_class.from_pretrained(pretrained_model_name_or_path, torch_dtype=self.dtype, **kwargs).eval()
374+
model = self.model_class.from_pretrained(
375+
pretrained_model_name_or_path, torch_dtype=self.dtype, **kwargs
376+
).eval()
379377
model = model.to("cuda")
380378
self.tiling = False
381379
if tiling:
@@ -389,13 +387,15 @@ def __init__(self, pretrained_model_name_or_path, dtype, tiling, **kwargs):
389387
def run_decode(self, model, tensor):
390388
_ = model.decode(tensor)
391389

392-
@torch.no_grad
390+
@torch.no_grad()
393391
def _test_decode(self, **kwargs):
394392
batch = kwargs.get("batch")
395393
height = kwargs.get("height")
396394
width = kwargs.get("width")
397395

398-
tensor = torch.randn((batch, self.model.config.latent_channels, height, width), dtype=self.dtype, device="cuda")
396+
tensor = torch.randn(
397+
(batch, self.model.config.latent_channels, height, width), dtype=self.dtype, device="cuda"
398+
)
399399

400400
try:
401401
time = benchmark_fn(self.run_decode, self.model, tensor)
@@ -406,7 +406,10 @@ def _test_decode(self, **kwargs):
406406

407407
benchmark_info = BenchmarkInfo(time=time, memory=memory)
408408
csv_dict = generate_csv_dict_model(
409-
model_cls=self.model_class_name, ckpt=self.pretrained_model_name_or_path, benchmark_info=benchmark_info, **kwargs,
409+
model_cls=self.model_class_name,
410+
ckpt=self.pretrained_model_name_or_path,
411+
benchmark_info=benchmark_info,
412+
**kwargs,
410413
)
411414
print(f"{self.model_class_name} decode - shape: {list(tensor.shape)}, time: {time}, memory: {memory}")
412415
return csv_dict
@@ -416,15 +419,92 @@ def test_decode(self):
416419

417420
batches = (1,)
418421
# heights = (32, 64, 128, 256,)
419-
widths = (32, 64, 128, 256,)
422+
widths = (
423+
32,
424+
64,
425+
128,
426+
256,
427+
)
420428
for batch in batches:
421429
# for height in heights:
422-
for width in widths:
423-
benchmark_info = self._test_decode(batch=batch, height=width, width=width)
424-
benchmark_infos.append(benchmark_info)
430+
for width in widths:
431+
benchmark_info = self._test_decode(batch=batch, height=width, width=width)
432+
benchmark_infos.append(benchmark_info)
425433

426434
suffix = "decode"
427435
if self.tiling:
428436
suffix = "tiled_decode"
429437
filepath = self.get_result_filepath(suffix)
430438
write_list_to_csv(filepath, benchmark_infos)
439+
440+
441+
class AutoencoderKLEncodeBenchmark(BaseBenchmarkTestCase):
442+
model_class = AutoencoderKL
443+
444+
def __init__(self, pretrained_model_name_or_path, dtype, tiling, **kwargs):
445+
super().__init__()
446+
self.dtype = getattr(torch, dtype)
447+
model = self.model_class.from_pretrained(
448+
pretrained_model_name_or_path, torch_dtype=self.dtype, **kwargs
449+
).eval()
450+
model = model.to("cuda")
451+
self.tiling = False
452+
if tiling:
453+
model.enable_tiling()
454+
self.tiling = True
455+
self.model = model
456+
self.model_class_name = str(self.model.__class__.__name__)
457+
self.pretrained_model_name_or_path = pretrained_model_name_or_path
458+
459+
@torch.no_grad()
460+
def run_encode(self, model, tensor):
461+
_ = model.encode(tensor)
462+
463+
@torch.no_grad()
464+
def _test_encode(self, **kwargs):
465+
batch = kwargs.get("batch")
466+
height = kwargs.get("height")
467+
width = kwargs.get("width")
468+
469+
tensor = torch.randn(
470+
(batch, self.model.config.latent_channels, height, width), dtype=self.dtype, device="cuda"
471+
)
472+
473+
try:
474+
time = benchmark_fn(self.run_encode, self.model, tensor)
475+
memory = bytes_to_giga_bytes(torch.cuda.max_memory_reserved())
476+
except torch.OutOfMemoryError:
477+
time = "OOM"
478+
memory = "OOM"
479+
480+
benchmark_info = BenchmarkInfo(time=time, memory=memory)
481+
csv_dict = generate_csv_dict_model(
482+
model_cls=self.model_class_name,
483+
ckpt=self.pretrained_model_name_or_path,
484+
benchmark_info=benchmark_info,
485+
**kwargs,
486+
)
487+
print(f"{self.model_class_name} encode - shape: {list(tensor.shape)}, time: {time}, memory: {memory}")
488+
return csv_dict
489+
490+
def test_encode(self):
491+
benchmark_infos = []
492+
493+
batches = (1,)
494+
widths = (
495+
256,
496+
512,
497+
1024,
498+
2048,
499+
)
500+
for batch in batches:
501+
# for height in heights:
502+
for width in widths:
503+
benchmark_info = self._test_encode(batch=batch, height=width, width=width)
504+
benchmark_infos.append(benchmark_info)
505+
506+
suffix = "encode"
507+
if self.tiling:
508+
suffix = "tiled_encode"
509+
filepath = self.get_result_filepath(suffix)
510+
write_list_to_csv(filepath, benchmark_infos)

benchmarks/benchmark_autoencoderkl.py

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -23,11 +23,13 @@
2323
type=str,
2424
default="float16",
2525
)
26-
parser.add_argument(
27-
"--tiling",
28-
action="store_true"
29-
)
26+
parser.add_argument("--tiling", action="store_true")
3027
args = parser.parse_args()
3128

32-
benchmark = AutoencoderKLBenchmark(pretrained_model_name_or_path=args.pretrained_model_name_or_path, dtype=args.dtype, tiling=args.tiling, subfolder=args.subfolder)
29+
benchmark = AutoencoderKLBenchmark(
30+
pretrained_model_name_or_path=args.pretrained_model_name_or_path,
31+
dtype=args.dtype,
32+
tiling=args.tiling,
33+
subfolder=args.subfolder,
34+
)
3335
benchmark.test_decode()
Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
import argparse
2+
import sys
3+
4+
5+
sys.path.append(".")
6+
from base_classes import AutoencoderKLEncodeBenchmark # noqa: E402
7+
8+
9+
if __name__ == "__main__":
10+
parser = argparse.ArgumentParser()
11+
parser.add_argument(
12+
"--pretrained_model_name_or_path",
13+
type=str,
14+
default="stable-diffusion-v1-5/stable-diffusion-v1-5",
15+
)
16+
parser.add_argument(
17+
"--subfolder",
18+
type=str,
19+
default=None,
20+
)
21+
parser.add_argument(
22+
"--dtype",
23+
type=str,
24+
default="float16",
25+
)
26+
parser.add_argument("--tiling", action="store_true")
27+
args = parser.parse_args()
28+
29+
benchmark = AutoencoderKLEncodeBenchmark(
30+
pretrained_model_name_or_path=args.pretrained_model_name_or_path,
31+
dtype=args.dtype,
32+
tiling=args.tiling,
33+
subfolder=args.subfolder,
34+
)
35+
benchmark.test_encode()

benchmarks/utils.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,10 @@ def generate_csv_dict(
7979

8080

8181
def generate_csv_dict_model(
82-
model_cls: str, ckpt: str, benchmark_info: BenchmarkInfo, **kwargs,
82+
model_cls: str,
83+
ckpt: str,
84+
benchmark_info: BenchmarkInfo,
85+
**kwargs,
8386
) -> Dict[str, Union[str, bool, float]]:
8487
"""Packs benchmarking data into a dictionary for latter serialization."""
8588
data_dict = {
@@ -94,6 +97,7 @@ def generate_csv_dict_model(
9497
}
9598
return data_dict
9699

100+
97101
def write_to_csv(file_name: str, data_dict: Dict[str, Union[str, bool, float]]):
98102
"""Serializes a dictionary into a CSV file."""
99103
with open(file_name, mode="w", newline="") as csvfile:

0 commit comments

Comments
 (0)