-
Notifications
You must be signed in to change notification settings - Fork 92
Enhance llmc CI on GPU and XPU #1483
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Open
chensuyue
wants to merge
8
commits into
main
Choose a base branch
from
suyue/ci
base: main
Could not load branches
Branch not found: {{ refName }}
Loading
Could not load tags
Nothing to show
Loading
Are you sure you want to change the base?
Some commits from the old base branch may be removed from the timeline,
and old review comments may become outdated.
Open
Changes from 2 commits
Commits
Show all changes
8 commits
Select commit
Hold shift + click to select a range
c8467ff
enhance llmc CI on GPU and XPU
chensuyue a970141
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] 9119bcb
update test requirements for xpu
chensuyue f805e19
add cuda llmc test back
chensuyue 5becea1
fix req path
chensuyue 234955c
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] 893cb65
Merge branch 'main' into suyue/ci
chensuyue 89c0d81
for test
chensuyue File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file was deleted.
Oops, something went wrong.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,239 @@ | ||
| import pytest | ||
| import torch | ||
| from compressed_tensors.quantization import QuantizationArgs, QuantizationScheme | ||
| from llmcompressor import oneshot | ||
| from llmcompressor.modifiers.autoround import AutoRoundModifier | ||
| from transformers import AutoModelForCausalLM, AutoTokenizer | ||
|
|
||
| from auto_round.calib_dataset import get_dataset | ||
|
|
||
| recipe_str = """ | ||
| quant_stage: | ||
| quant_modifiers: | ||
| AutoRoundModifier: | ||
| ignore: ["lm_head"] | ||
| iters: 10 | ||
| config_groups: | ||
| group_0: | ||
| targets: | ||
| - "Linear" | ||
| input_activations: null | ||
| output_activations: null | ||
| weights: | ||
| num_bits: 4 | ||
| type: "int" | ||
| symmetric: true | ||
| strategy: group | ||
| group_size: 128 | ||
| """ | ||
|
|
||
| recipe_modifier_full = AutoRoundModifier( | ||
| ignore=["lm_head"], | ||
| iters=10, | ||
chensuyue marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| config_groups={ | ||
| "group_0": QuantizationScheme( | ||
| targets=["Linear"], | ||
| weights=QuantizationArgs(num_bits=4, strategy="group", group_size=128), | ||
| ) | ||
| }, | ||
| ) | ||
| recipe_modifier_nvfp4 = AutoRoundModifier( | ||
| ignore=["lm_head"], | ||
| iters=2, | ||
| scheme="NVFP4", | ||
| ) | ||
|
|
||
| recipe_modifier_mxfp4 = AutoRoundModifier( | ||
| ignore=["lm_head"], | ||
| iters=0, | ||
| scheme="MXFP4", | ||
| ) | ||
|
|
||
| w8a8_dynamic_recipe_modifier = AutoRoundModifier( | ||
| ignore=["lm_head"], | ||
| iters=0, | ||
| config_groups={ | ||
| "group_0": QuantizationScheme( | ||
| targets=["Linear"], | ||
| weights=QuantizationArgs(num_bits=8, type="float", strategy="channel"), | ||
| input_activations=QuantizationArgs(num_bits=8, type="float", strategy="token", dynamic=True), | ||
| ) | ||
| }, | ||
| ) | ||
|
|
||
| w8a8_static_recipe_modifier = AutoRoundModifier( | ||
| ignore=["lm_head"], | ||
| iters=0, | ||
| config_groups={ | ||
| "group_0": QuantizationScheme( | ||
| targets=["Linear"], | ||
| weights=QuantizationArgs(num_bits=8, type="float", strategy="tensor"), | ||
| input_activations=QuantizationArgs(num_bits=8, type="float", strategy="tensor"), | ||
| ) | ||
| }, | ||
| ) | ||
|
|
||
|
|
||
| @pytest.mark.skipif(torch.xpu.device_count() < 1, reason="test requires at least 1 XPU") | ||
chensuyue marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| @pytest.mark.parametrize( | ||
| "recipe", | ||
| [ | ||
| recipe_str, | ||
| recipe_modifier_full, | ||
| recipe_modifier_nvfp4, | ||
| recipe_modifier_mxfp4, | ||
| ], | ||
| ) | ||
| def test_oneshot_application(recipe, tmp_path): | ||
| output = tmp_path / "oneshot_output" | ||
| model = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" | ||
chensuyue marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| tokenizer = AutoTokenizer.from_pretrained(model) | ||
| dataset = get_dataset( | ||
| tokenizer=tokenizer, | ||
| seqlen=1024, | ||
| nsamples=32, | ||
chensuyue marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| ) | ||
|
|
||
| device = "xpu:0" if torch.xpu.is_available() else "cpu" | ||
chensuyue marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
|
|
||
| oneshot( | ||
| model=model, | ||
| dataset=dataset, | ||
| output_dir=output, | ||
| recipe=recipe, | ||
| ) | ||
| model_loaded = AutoModelForCausalLM.from_pretrained(output, device_map=device) | ||
chensuyue marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
|
|
||
| # Check that the model is quantized | ||
| # decompress() will attach a quantization_config to the model | ||
| # as we decompress right away | ||
| quantization_config = model_loaded.config.quantization_config.quantization_config | ||
| assert quantization_config is not None | ||
|
|
||
| # check config is set properly | ||
| assert "lm_head" in quantization_config.ignore | ||
| assert len(quantization_config.config_groups) == 1 | ||
| quant_scheme = quantization_config.config_groups["group_0"] | ||
| assert isinstance(quant_scheme, QuantizationScheme) | ||
|
|
||
| weight_args = quantization_config.config_groups["group_0"].weights | ||
| assert isinstance(weight_args, QuantizationArgs) | ||
| assert weight_args.num_bits == 4 | ||
|
|
||
| # Check a specific layer is quantized | ||
| targeted_linear_layer = model_loaded.model.layers[2].self_attn.q_proj | ||
| assert hasattr(targeted_linear_layer, "quantization_scheme") | ||
|
|
||
| # Check lm-head is not quantized | ||
| not_targeted = model_loaded.lm_head | ||
| assert not hasattr(not_targeted, "quantization_scheme") | ||
|
|
||
|
|
||
| @pytest.mark.skipif(torch.xpu.device_count() < 2, reason="test requires at least 2 XPUs") | ||
chensuyue marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| def test_oneshot_with_device_ids(tmp_path): | ||
| output = tmp_path / "oneshot_output" | ||
| model = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" | ||
| tokenizer = AutoTokenizer.from_pretrained(model) | ||
| dataset = get_dataset( | ||
| tokenizer=tokenizer, | ||
| seqlen=512, | ||
| nsamples=4, | ||
| ) | ||
|
|
||
| device = "xpu:0" | ||
|
|
||
| recipe = AutoRoundModifier( | ||
| ignore=["lm_head"], | ||
| iters=10, | ||
chensuyue marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| config_groups={ | ||
| "group_0": QuantizationScheme( | ||
| targets=["Linear"], | ||
| weights=QuantizationArgs(num_bits=4, strategy="group", group_size=128), | ||
| ) | ||
| }, | ||
| device_ids="0,1", | ||
| ) | ||
|
|
||
| oneshot( | ||
| model=model, | ||
| dataset=dataset, | ||
| output_dir=output, | ||
| recipe=recipe, | ||
| ) | ||
| model_loaded = AutoModelForCausalLM.from_pretrained(output, device_map=device) | ||
chensuyue marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
|
|
||
| # Check that the model is quantized | ||
| # decompress() will attach a quantization_config to the model | ||
| # as we decompress right away | ||
| quantization_config = model_loaded.config.quantization_config.quantization_config | ||
| assert quantization_config is not None | ||
|
|
||
| # check config is set properly | ||
| assert "lm_head" in quantization_config.ignore | ||
| assert len(quantization_config.config_groups) == 1 | ||
| quant_scheme = quantization_config.config_groups["group_0"] | ||
| assert isinstance(quant_scheme, QuantizationScheme) | ||
|
|
||
| weight_args = quantization_config.config_groups["group_0"].weights | ||
| assert isinstance(weight_args, QuantizationArgs) | ||
| assert weight_args.num_bits == 4 | ||
|
|
||
| # Check a specific layer is quantized | ||
| targeted_linear_layer = model_loaded.model.layers[2].self_attn.q_proj | ||
| assert hasattr(targeted_linear_layer, "quantization_scheme") | ||
|
|
||
| # Check lm-head is not quantized | ||
| not_targeted = model_loaded.lm_head | ||
| assert not hasattr(not_targeted, "quantization_scheme") | ||
|
|
||
|
|
||
| @pytest.mark.skipif(torch.xpu.device_count() < 1, reason="test requires at least 1 XPU") | ||
chensuyue marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| @pytest.mark.parametrize( | ||
| "recipe", | ||
| [w8a8_dynamic_recipe_modifier, w8a8_static_recipe_modifier], | ||
| ) | ||
| def test_rtn_oneshot(recipe, tmp_path): | ||
| output = tmp_path / "oneshot_output" | ||
| model = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" | ||
| tokenizer = AutoTokenizer.from_pretrained(model) | ||
| dataset = get_dataset( | ||
| tokenizer=tokenizer, | ||
| seqlen=1024, | ||
| nsamples=32, | ||
chensuyue marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| ) | ||
|
|
||
| device = "xpu:0" | ||
|
|
||
| oneshot( | ||
| model=model, | ||
| dataset=dataset, | ||
| output_dir=output, | ||
| recipe=recipe, | ||
| ) | ||
| model_loaded = AutoModelForCausalLM.from_pretrained(output, device_map=device) | ||
chensuyue marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
|
|
||
| quantization_config = model_loaded.config.quantization_config.quantization_config | ||
| assert quantization_config is not None | ||
|
|
||
| # check config is set properly | ||
| assert "lm_head" in quantization_config.ignore | ||
| assert len(quantization_config.config_groups) == 1 | ||
| quant_scheme = quantization_config.config_groups["group_0"] | ||
| assert isinstance(quant_scheme, QuantizationScheme) | ||
|
|
||
| weight_args = quantization_config.config_groups["group_0"].weights | ||
| act_args = quantization_config.config_groups["group_0"].input_activations | ||
| assert isinstance(weight_args, QuantizationArgs) | ||
| assert weight_args.num_bits == recipe.config_groups["group_0"].weights.num_bits | ||
| assert weight_args.strategy == recipe.config_groups["group_0"].weights.strategy | ||
| if act_args is not None: | ||
| assert act_args.num_bits == recipe.config_groups["group_0"].input_activations.num_bits | ||
| assert act_args.strategy == recipe.config_groups["group_0"].input_activations.strategy | ||
|
|
||
| # Check a specific layer is quantized | ||
| targeted_linear_layer = model_loaded.model.layers[2].self_attn.q_proj | ||
| assert hasattr(targeted_linear_layer, "quantization_scheme") | ||
|
|
||
| # Check lm-head is not quantized | ||
| not_targeted = model_loaded.lm_head | ||
| assert not hasattr(not_targeted, "quantization_scheme") | ||
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Uh oh!
There was an error while loading. Please reload this page.