-
Couldn't load subscription status.
- Fork 6.5k
[tests] cache non lora pipeline outputs. #12298
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
Merged
Changes from 15 commits
Commits
Show all changes
26 commits
Select commit
Hold shift + click to select a range
02fd92e
cache non lora pipeline outputs.
sayakpaul c8afd1c
up
sayakpaul 6c0c72d
up
sayakpaul 4256de9
up
sayakpaul 772c32e
up
sayakpaul 2c47a2f
Revert "up"
sayakpaul 9c24d1f
Merge branch 'main' into cache-non-lora-outputs
sayakpaul cca03df
up
sayakpaul 53ca186
Revert "up"
sayakpaul 336efbd
up
sayakpaul 69f2d5c
Merge branch 'main' into cache-non-lora-outputs
sayakpaul 9d3f707
up
sayakpaul 4923986
Merge branch 'main' into cache-non-lora-outputs
sayakpaul 34d0aa2
resolve big conflicts.
sayakpaul 1569fca
add .
sayakpaul 8f405ed
Merge branch 'main' into cache-non-lora-outputs
sayakpaul ead2e04
up
sayakpaul fbcdf8b
Merge branch 'main' into cache-non-lora-outputs
sayakpaul 8efb5c4
up
sayakpaul ca424e5
up
sayakpaul 3e0a7f9
Merge branch 'main' into cache-non-lora-outputs
sayakpaul e442df0
up
sayakpaul dcfd979
Merge branch 'main' into cache-non-lora-outputs
sayakpaul 734b500
up
sayakpaul b01bf8e
up
sayakpaul c16d892
Merge branch 'main' into cache-non-lora-outputs
sayakpaul File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -126,13 +126,36 @@ class PeftLoraLoaderMixinTests: | |
| text_encoder_target_modules = ["q_proj", "k_proj", "v_proj", "out_proj"] | ||
| denoiser_target_modules = ["to_q", "to_k", "to_v", "to_out.0"] | ||
|
|
||
| def get_dummy_components(self, use_dora=False, lora_alpha=None): | ||
| cached_non_lora_outputs = {} | ||
|
|
||
| @pytest.fixture(scope="class", autouse=True) | ||
sayakpaul marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
| def get_base_pipeline_outputs(self): | ||
| """ | ||
| This fixture will be executed once per test class and will populate | ||
| the cached_non_lora_outputs dictionary. | ||
| """ | ||
| components, _, _ = self.get_dummy_components(self.scheduler_cls) | ||
| pipe = self.pipeline_class(**components) | ||
| pipe = pipe.to(torch_device) | ||
| pipe.set_progress_bar_config(disable=None) | ||
|
|
||
| # Always ensure the inputs are without the `generator`. Make sure to pass the `generator` | ||
| # explicitly. | ||
| _, _, inputs = self.get_dummy_inputs(with_generator=False) | ||
| output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] | ||
| self.cached_non_lora_outputs[self.scheduler_cls.__name__] = output_no_lora | ||
|
|
||
| # Ensures that there's no inconsistency when reusing the cache. | ||
| yield | ||
| self.cached_non_lora_outputs.clear() | ||
|
|
||
| def get_dummy_components(self, scheduler_cls=None, use_dora=False, lora_alpha=None): | ||
| if self.unet_kwargs and self.transformer_kwargs: | ||
| raise ValueError("Both `unet_kwargs` and `transformer_kwargs` cannot be specified.") | ||
| if self.has_two_text_encoders and self.has_three_text_encoders: | ||
| raise ValueError("Both `has_two_text_encoders` and `has_three_text_encoders` cannot be True.") | ||
|
|
||
| scheduler_cls = self.scheduler_cls | ||
| scheduler_cls = scheduler_cls if scheduler_cls is not None else self.scheduler_cls | ||
| rank = 4 | ||
| lora_alpha = rank if lora_alpha is None else lora_alpha | ||
|
|
||
|
|
@@ -316,13 +339,7 @@ def test_simple_inference(self): | |
| """ | ||
| Tests a simple inference and makes sure it works as expected | ||
| """ | ||
| components, text_lora_config, _ = self.get_dummy_components() | ||
| pipe = self.pipeline_class(**components) | ||
| pipe = pipe.to(torch_device) | ||
| pipe.set_progress_bar_config(disable=None) | ||
|
|
||
| _, _, inputs = self.get_dummy_inputs() | ||
| output_no_lora = pipe(**inputs)[0] | ||
| output_no_lora = self.cached_non_lora_outputs[self.scheduler_cls.__name__] | ||
| self.assertTrue(output_no_lora.shape == self.output_shape) | ||
|
|
||
| def test_simple_inference_with_text_lora(self): | ||
|
|
@@ -336,9 +353,7 @@ def test_simple_inference_with_text_lora(self): | |
| pipe.set_progress_bar_config(disable=None) | ||
| _, _, inputs = self.get_dummy_inputs(with_generator=False) | ||
|
|
||
| output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] | ||
| self.assertTrue(output_no_lora.shape == self.output_shape) | ||
|
|
||
| output_no_lora = self.cached_non_lora_outputs[self.scheduler_cls.__name__] | ||
| pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config=None) | ||
|
|
||
| output_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] | ||
|
|
@@ -414,9 +429,6 @@ def test_low_cpu_mem_usage_with_loading(self): | |
| pipe.set_progress_bar_config(disable=None) | ||
| _, _, inputs = self.get_dummy_inputs(with_generator=False) | ||
|
|
||
| output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] | ||
| self.assertTrue(output_no_lora.shape == self.output_shape) | ||
|
|
||
| pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config) | ||
|
|
||
| images_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] | ||
|
|
@@ -466,8 +478,7 @@ def test_simple_inference_with_text_lora_and_scale(self): | |
| pipe.set_progress_bar_config(disable=None) | ||
| _, _, inputs = self.get_dummy_inputs(with_generator=False) | ||
|
|
||
| output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] | ||
| self.assertTrue(output_no_lora.shape == self.output_shape) | ||
| output_no_lora = self.cached_non_lora_outputs[self.scheduler_cls.__name__] | ||
|
|
||
| pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config=None) | ||
|
|
||
|
|
@@ -503,8 +514,7 @@ def test_simple_inference_with_text_lora_fused(self): | |
| pipe.set_progress_bar_config(disable=None) | ||
| _, _, inputs = self.get_dummy_inputs(with_generator=False) | ||
|
|
||
| output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] | ||
| self.assertTrue(output_no_lora.shape == self.output_shape) | ||
| output_no_lora = self.cached_non_lora_outputs[self.scheduler_cls.__name__] | ||
|
|
||
| pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config=None) | ||
|
|
||
|
|
@@ -534,8 +544,7 @@ def test_simple_inference_with_text_lora_unloaded(self): | |
| pipe.set_progress_bar_config(disable=None) | ||
| _, _, inputs = self.get_dummy_inputs(with_generator=False) | ||
|
|
||
| output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] | ||
| self.assertTrue(output_no_lora.shape == self.output_shape) | ||
| output_no_lora = self.cached_non_lora_outputs[self.scheduler_cls.__name__] | ||
|
|
||
| pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config=None) | ||
|
|
||
|
|
@@ -566,9 +575,6 @@ def test_simple_inference_with_text_lora_save_load(self): | |
| pipe.set_progress_bar_config(disable=None) | ||
| _, _, inputs = self.get_dummy_inputs(with_generator=False) | ||
|
|
||
| output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] | ||
| self.assertTrue(output_no_lora.shape == self.output_shape) | ||
|
|
||
| pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config=None) | ||
|
|
||
| images_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] | ||
|
|
@@ -616,8 +622,7 @@ def test_simple_inference_with_partial_text_lora(self): | |
| pipe.set_progress_bar_config(disable=None) | ||
| _, _, inputs = self.get_dummy_inputs(with_generator=False) | ||
|
|
||
| output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] | ||
| self.assertTrue(output_no_lora.shape == self.output_shape) | ||
| output_no_lora = self.cached_non_lora_outputs[self.scheduler_cls.__name__] | ||
|
|
||
| pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config=None) | ||
|
|
||
|
|
@@ -666,9 +671,6 @@ def test_simple_inference_save_pretrained_with_text_lora(self): | |
| pipe.set_progress_bar_config(disable=None) | ||
| _, _, inputs = self.get_dummy_inputs(with_generator=False) | ||
|
|
||
| output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] | ||
| self.assertTrue(output_no_lora.shape == self.output_shape) | ||
|
|
||
| pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config=None) | ||
| images_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] | ||
|
|
||
|
|
@@ -708,9 +710,6 @@ def test_simple_inference_with_text_denoiser_lora_save_load(self): | |
| pipe.set_progress_bar_config(disable=None) | ||
| _, _, inputs = self.get_dummy_inputs(with_generator=False) | ||
|
|
||
| output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] | ||
| self.assertTrue(output_no_lora.shape == self.output_shape) | ||
|
|
||
| pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config) | ||
|
|
||
| images_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] | ||
|
|
@@ -747,9 +746,7 @@ def test_simple_inference_with_text_denoiser_lora_and_scale(self): | |
| pipe.set_progress_bar_config(disable=None) | ||
| _, _, inputs = self.get_dummy_inputs(with_generator=False) | ||
|
|
||
| output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] | ||
| self.assertTrue(output_no_lora.shape == self.output_shape) | ||
|
|
||
| output_no_lora = self.cached_non_lora_outputs[self.scheduler_cls.__name__] | ||
| pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config) | ||
|
|
||
| output_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] | ||
|
|
@@ -790,8 +787,7 @@ def test_simple_inference_with_text_lora_denoiser_fused(self): | |
| pipe.set_progress_bar_config(disable=None) | ||
| _, _, inputs = self.get_dummy_inputs(with_generator=False) | ||
|
|
||
| output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] | ||
| self.assertTrue(output_no_lora.shape == self.output_shape) | ||
| output_no_lora = self.cached_non_lora_outputs[self.scheduler_cls.__name__] | ||
|
|
||
| pipe, denoiser = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config) | ||
|
|
||
|
|
@@ -825,8 +821,7 @@ def test_simple_inference_with_text_denoiser_lora_unloaded(self): | |
| pipe.set_progress_bar_config(disable=None) | ||
| _, _, inputs = self.get_dummy_inputs(with_generator=False) | ||
|
|
||
| output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] | ||
| self.assertTrue(output_no_lora.shape == self.output_shape) | ||
| output_no_lora = self.cached_non_lora_outputs[self.scheduler_cls.__name__] | ||
|
|
||
| pipe, denoiser = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config) | ||
|
|
||
|
|
@@ -900,7 +895,7 @@ def test_simple_inference_with_text_denoiser_multi_adapter(self): | |
| pipe.set_progress_bar_config(disable=None) | ||
| _, _, inputs = self.get_dummy_inputs(with_generator=False) | ||
|
|
||
| output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] | ||
| output_no_lora = self.cached_non_lora_outputs[self.scheduler_cls.__name__] | ||
|
|
||
| if "text_encoder" in self.pipeline_class._lora_loadable_modules: | ||
| pipe.text_encoder.add_adapter(text_lora_config, "adapter-1") | ||
|
|
@@ -1024,7 +1019,7 @@ def test_simple_inference_with_text_denoiser_block_scale(self): | |
| pipe.set_progress_bar_config(disable=None) | ||
| _, _, inputs = self.get_dummy_inputs(with_generator=False) | ||
|
|
||
| output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] | ||
| output_no_lora = self.cached_non_lora_outputs[self.scheduler_cls.__name__] | ||
|
|
||
| pipe.text_encoder.add_adapter(text_lora_config, "adapter-1") | ||
| self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder") | ||
|
|
@@ -1080,7 +1075,7 @@ def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self): | |
| pipe.set_progress_bar_config(disable=None) | ||
| _, _, inputs = self.get_dummy_inputs(with_generator=False) | ||
|
|
||
| output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] | ||
| output_no_lora = self.cached_non_lora_outputs[self.scheduler_cls.__name__] | ||
|
|
||
| if "text_encoder" in self.pipeline_class._lora_loadable_modules: | ||
| pipe.text_encoder.add_adapter(text_lora_config, "adapter-1") | ||
|
|
@@ -1240,7 +1235,7 @@ def test_simple_inference_with_text_denoiser_multi_adapter_delete_adapter(self): | |
| pipe.set_progress_bar_config(disable=None) | ||
| _, _, inputs = self.get_dummy_inputs(with_generator=False) | ||
|
|
||
| output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] | ||
| output_no_lora = self.cached_non_lora_outputs[self.scheduler_cls.__name__] | ||
|
|
||
| if "text_encoder" in self.pipeline_class._lora_loadable_modules: | ||
| pipe.text_encoder.add_adapter(text_lora_config, "adapter-1") | ||
|
|
@@ -1331,7 +1326,7 @@ def test_simple_inference_with_text_denoiser_multi_adapter_weighted(self): | |
| pipe.set_progress_bar_config(disable=None) | ||
| _, _, inputs = self.get_dummy_inputs(with_generator=False) | ||
|
|
||
| output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] | ||
| output_no_lora = self.cached_non_lora_outputs[self.scheduler_cls.__name__] | ||
|
|
||
| if "text_encoder" in self.pipeline_class._lora_loadable_modules: | ||
| pipe.text_encoder.add_adapter(text_lora_config, "adapter-1") | ||
|
|
@@ -1551,7 +1546,6 @@ def test_get_list_adapters(self): | |
|
|
||
| self.assertDictEqual(pipe.get_list_adapters(), dicts_to_be_checked) | ||
|
|
||
| @require_peft_version_greater(peft_version="0.6.2") | ||
| def test_simple_inference_with_text_lora_denoiser_fused_multi( | ||
| self, expected_atol: float = 1e-3, expected_rtol: float = 1e-3 | ||
| ): | ||
|
|
@@ -1565,9 +1559,6 @@ def test_simple_inference_with_text_lora_denoiser_fused_multi( | |
| pipe.set_progress_bar_config(disable=None) | ||
| _, _, inputs = self.get_dummy_inputs(with_generator=False) | ||
|
|
||
| output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] | ||
| self.assertTrue(output_no_lora.shape == self.output_shape) | ||
|
|
||
| if "text_encoder" in self.pipeline_class._lora_loadable_modules: | ||
| pipe.text_encoder.add_adapter(text_lora_config, "adapter-1") | ||
| self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder") | ||
|
|
@@ -1641,8 +1632,7 @@ def test_lora_scale_kwargs_match_fusion(self, expected_atol: float = 1e-3, expec | |
| pipe.set_progress_bar_config(disable=None) | ||
| _, _, inputs = self.get_dummy_inputs(with_generator=False) | ||
|
|
||
| output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] | ||
| self.assertTrue(output_no_lora.shape == self.output_shape) | ||
| output_no_lora = self.cached_non_lora_outputs[self.scheduler_cls.__name__] | ||
|
|
||
| if "text_encoder" in self.pipeline_class._lora_loadable_modules: | ||
| pipe.text_encoder.add_adapter(text_lora_config, "adapter-1") | ||
|
|
@@ -1685,7 +1675,6 @@ def test_lora_scale_kwargs_match_fusion(self, expected_atol: float = 1e-3, expec | |
| "LoRA should change the output", | ||
| ) | ||
|
|
||
| @require_peft_version_greater(peft_version="0.9.0") | ||
| def test_simple_inference_with_dora(self): | ||
| components, text_lora_config, denoiser_lora_config = self.get_dummy_components(use_dora=True) | ||
| pipe = self.pipeline_class(**components) | ||
|
|
@@ -1695,7 +1684,6 @@ def test_simple_inference_with_dora(self): | |
|
|
||
| output_no_dora_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] | ||
| self.assertTrue(output_no_dora_lora.shape == self.output_shape) | ||
|
|
||
| pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config) | ||
|
|
||
| output_dora_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] | ||
|
|
@@ -1783,7 +1771,6 @@ def test_simple_inference_with_text_denoiser_lora_unfused_torch_compile(self): | |
| pipe = pipe.to(torch_device) | ||
| pipe.set_progress_bar_config(disable=None) | ||
| _, _, inputs = self.get_dummy_inputs(with_generator=False) | ||
|
|
||
| pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config) | ||
|
|
||
| pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) | ||
|
|
@@ -1820,7 +1807,7 @@ def test_logs_info_when_no_lora_keys_found(self): | |
| pipe.set_progress_bar_config(disable=None) | ||
|
|
||
| _, _, inputs = self.get_dummy_inputs(with_generator=False) | ||
| original_out = pipe(**inputs, generator=torch.manual_seed(0))[0] | ||
| output_no_lora = self.cached_non_lora_outputs[self.scheduler_cls.__name__] | ||
|
|
||
| no_op_state_dict = {"lora_foo": torch.tensor(2.0), "lora_bar": torch.tensor(3.0)} | ||
| logger = logging.get_logger("diffusers.loaders.peft") | ||
|
|
@@ -1832,7 +1819,7 @@ def test_logs_info_when_no_lora_keys_found(self): | |
|
|
||
| denoiser = getattr(pipe, "unet") if self.unet_kwargs is not None else getattr(pipe, "transformer") | ||
| self.assertTrue(cap_logger.out.startswith(f"No LoRA keys associated to {denoiser.__class__.__name__}")) | ||
| self.assertTrue(np.allclose(original_out, out_after_lora_attempt, atol=1e-5, rtol=1e-5)) | ||
| self.assertTrue(np.allclose(output_no_lora, out_after_lora_attempt, atol=1e-5, rtol=1e-5)) | ||
|
|
||
| # test only for text encoder | ||
| for lora_module in self.pipeline_class._lora_loadable_modules: | ||
|
|
@@ -1864,9 +1851,7 @@ def test_set_adapters_match_attention_kwargs(self): | |
| pipe.set_progress_bar_config(disable=None) | ||
| _, _, inputs = self.get_dummy_inputs(with_generator=False) | ||
|
|
||
| output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] | ||
| self.assertTrue(output_no_lora.shape == self.output_shape) | ||
|
|
||
| output_no_lora = self.cached_non_lora_outputs[self.scheduler_cls.__name__] | ||
| pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config) | ||
|
|
||
| lora_scale = 0.5 | ||
|
|
@@ -2212,9 +2197,6 @@ def test_lora_adapter_metadata_save_load_inference(self, lora_alpha): | |
| pipe = self.pipeline_class(**components).to(torch_device) | ||
| _, _, inputs = self.get_dummy_inputs(with_generator=False) | ||
|
|
||
| output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] | ||
| self.assertTrue(output_no_lora.shape == self.output_shape) | ||
|
|
||
| pipe, _ = self.add_adapters_to_pipeline( | ||
| pipe, text_lora_config=text_lora_config, denoiser_lora_config=denoiser_lora_config | ||
| ) | ||
|
|
@@ -2260,7 +2242,7 @@ def test_inference_load_delete_load_adapters(self): | |
| pipe.set_progress_bar_config(disable=None) | ||
| _, _, inputs = self.get_dummy_inputs(with_generator=False) | ||
|
|
||
| output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] | ||
| output_no_lora = self.cached_non_lora_outputs[self.scheduler_cls.__name__] | ||
|
|
||
| if "text_encoder" in self.pipeline_class._lora_loadable_modules: | ||
| pipe.text_encoder.add_adapter(text_lora_config) | ||
|
|
||
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Uh oh!
There was an error while loading. Please reload this page.