-
Notifications
You must be signed in to change notification settings - Fork 6.5k
Benchmark Autoencoder #10780
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Benchmark Autoencoder #10780
Changes from 4 commits
ef0c447
4902610
5351d9e
ee9a56d
7f37c1b
73688a2
82a238f
265f1f2
96449cd
8eeee7e
6714388
46a3b43
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change | ||||
|---|---|---|---|---|---|---|
|
|
@@ -15,6 +15,7 @@ | |||||
| StableDiffusionXLControlNetPipeline, | ||||||
| T2IAdapter, | ||||||
| WuerstchenCombinedPipeline, | ||||||
| AutoencoderKL, | ||||||
| ) | ||||||
| from diffusers.utils import load_image | ||||||
|
|
||||||
|
|
@@ -29,7 +30,9 @@ | |||||
| bytes_to_giga_bytes, | ||||||
| flush, | ||||||
| generate_csv_dict, | ||||||
| generate_csv_dict_model, | ||||||
| write_to_csv, | ||||||
| write_list_to_csv, | ||||||
| ) | ||||||
|
|
||||||
|
|
||||||
|
|
@@ -169,7 +172,7 @@ def benchmark(self, args): | |||||
| print(f"[INFO] {self.pipe.__class__.__name__}: Running benchmark with: {vars(args)}\n") | ||||||
|
|
||||||
| time = benchmark_fn(self.run_inference, self.pipe, args) # in seconds. | ||||||
| memory = bytes_to_giga_bytes(torch.cuda.max_memory_allocated()) # in GBs. | ||||||
| memory = bytes_to_giga_bytes(torch.cuda.reset_peak_memory_stats()) # in GBs. | ||||||
| benchmark_info = BenchmarkInfo(time=time, memory=memory) | ||||||
|
|
||||||
| pipeline_class_name = str(self.pipe.__class__.__name__) | ||||||
|
|
@@ -344,3 +347,72 @@ class T2IAdapterSDXLBenchmark(T2IAdapterBenchmark): | |||||
|
|
||||||
| def __init__(self, args): | ||||||
| super().__init__(args) | ||||||
|
|
||||||
|
|
||||||
| class BaseBenchmarkTestCase: | ||||||
| model_class = None | ||||||
| pretrained_model_name_or_path = None | ||||||
| model_class_name = None | ||||||
|
|
||||||
| def __init__(self): | ||||||
| super().__init__() | ||||||
|
|
||||||
| def get_result_filepath(self, suffix): | ||||||
| name = ( | ||||||
| self.model_class_name | ||||||
| + "_" | ||||||
| + self.pretrained_model_name_or_path.replace("/", "_") | ||||||
| + "_" | ||||||
| + f"{suffix}.csv" | ||||||
| ) | ||||||
| filepath = os.path.join(BASE_PATH, name) | ||||||
| return filepath | ||||||
|
|
||||||
|
|
||||||
| class AutoencoderKLBenchmark(BaseBenchmarkTestCase): | ||||||
| model_class = AutoencoderKL | ||||||
|
|
||||||
| def __init__(self, pretrained_model_name_or_path, dtype, **kwargs): | ||||||
| super().__init__() | ||||||
| self.dtype = getattr(torch, dtype) | ||||||
| model = self.model_class.from_pretrained(pretrained_model_name_or_path, torch_dtype=self.dtype, **kwargs).eval() | ||||||
|
||||||
| model = model.to("cuda") | ||||||
| self.model = model | ||||||
| self.model_class_name = str(self.model.__class__.__name__) | ||||||
| self.pretrained_model_name_or_path = pretrained_model_name_or_path | ||||||
|
|
||||||
| @torch.no_grad | ||||||
|
||||||
| @torch.no_grad | |
| @torch.no_grad() |
As per the docs: https://pytorch.org/docs/stable/generated/torch.no_grad.html
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Not needed for the first iteration but I would consider also including model.compile().
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,29 @@ | ||
| import argparse | ||
| import sys | ||
|
|
||
|
|
||
| sys.path.append(".") | ||
| from base_classes import AutoencoderKLBenchmark # noqa: E402 | ||
|
|
||
|
|
||
| if __name__ == "__main__": | ||
| parser = argparse.ArgumentParser() | ||
| parser.add_argument( | ||
| "--pretrained_model_name_or_path", | ||
| type=str, | ||
| default="stable-diffusion-v1-5/stable-diffusion-v1-5", | ||
| ) | ||
| parser.add_argument( | ||
| "--subfolder", | ||
| type=str, | ||
| default=None, | ||
| ) | ||
| parser.add_argument( | ||
| "--dtype", | ||
| type=str, | ||
| default="float16", | ||
| ) | ||
| args = parser.parse_args() | ||
|
|
||
| benchmark = AutoencoderKLBenchmark(pretrained_model_name_or_path=args.pretrained_model_name_or_path, dtype=args.dtype, subfolder=args.subfolder) | ||
| benchmark.test_decode() |
Uh oh!
There was an error while loading. Please reload this page.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Should we let the users define
dummy_inputs()per model class here? And then we could let them implement their own function that needs to be benchmarked.So,
BaseBenchmarkTestCasecould then have a methodbenchmark():