Skip to content

Commit 674a7bd

Browse files
added test to improve converage
1 parent 8cef1c7 commit 674a7bd

File tree

2 files changed

+42
-24
lines changed

2 files changed

+42
-24
lines changed

keras/src/models/model_test.py

Lines changed: 41 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -1293,57 +1293,57 @@ def call(self, inputs):
12931293

12941294
@pytest.mark.requires_trainable_backend
12951295
class ModelQuantizationTest(testing.TestCase):
1296-
def _run_gptq_test_on_dataset(self, dataset):
1297-
"""Helper function to run a full GPTQ quantization
1298-
test on a given dataset."""
1299-
1296+
def _run_gptq_test_on_dataset(self, dataset, **config_kwargs):
1297+
"""Helper function to run a full GPTQ quantization test."""
13001298
model = _get_model_with_dense_attention()
13011299
rng = np.random.default_rng(seed=42)
1300+
13021301
# 1. Common setup
13031302
NUM_SAMPLES = 16
13041303
SEQUENCE_LENGTH = 128
13051304
VOCAB_SIZE = 1000
13061305
W_BITS = 4
1307-
GROUP_SIZE = 32
1306+
1307+
# Default config that can be overridden by config_kwargs
1308+
base_config = {
1309+
"dataset": dataset,
1310+
"wbits": W_BITS,
1311+
"nsamples": NUM_SAMPLES,
1312+
"seqlen": SEQUENCE_LENGTH,
1313+
"group_size": 32,
1314+
"symmetric": False,
1315+
"act_order": False,
1316+
}
13081317

13091318
mock_tokenizer = lambda text: np.array(
13101319
[ord(c) % VOCAB_SIZE for c in text]
13111320
)
13121321
mock_tokenizer.tokenize = mock_tokenizer
1322+
base_config["tokenizer"] = mock_tokenizer
13131323

1314-
# 2. Find target layer and get original weights
1324+
# Find target layer and get original weights
13151325
target_layer = model.layers[2].ffn.layers[0]
1316-
13171326
self.assertIsNotNone(
13181327
target_layer,
1319-
"Test setup failed: No Dense layer was found inside "
1320-
"an 'ffn' block.",
1328+
"Test setup failed: No Dense layer found in 'ffn' block.",
13211329
)
13221330
original_weights = np.copy(target_layer.kernel.numpy())
13231331

1324-
# 3. Configure and run quantization
1325-
gptq_config = GPTQConfig(
1326-
dataset=dataset,
1327-
tokenizer=mock_tokenizer,
1328-
wbits=W_BITS,
1329-
nsamples=NUM_SAMPLES,
1330-
seqlen=SEQUENCE_LENGTH,
1331-
group_size=GROUP_SIZE,
1332-
)
1332+
# Configure and run quantization
1333+
final_config = {**base_config, **config_kwargs}
1334+
gptq_config = GPTQConfig(**final_config)
1335+
13331336
model.quantize("gptq", quant_config=gptq_config)
13341337

1335-
# 4. Assertions and verification
1338+
# Assertions and verification
13361339
quantized_weights = target_layer.kernel.numpy()
13371340

1338-
# Assert that the weights have been changed
13391341
self.assertNotAllClose(
13401342
original_weights,
13411343
quantized_weights,
1342-
msg="Weights were not changed by the GPTQ process for "
1343-
"dataset: {dataset}",
1344+
msg=f"Weights not changed by GPTQ for config: {config_kwargs}",
13441345
)
13451346

1346-
# Verify the quantized model can still make a prediction
13471347
dummy_sample = rng.integers(
13481348
low=0, high=VOCAB_SIZE, size=(1, SEQUENCE_LENGTH)
13491349
)
@@ -1378,3 +1378,21 @@ def test_quantize_gptq_on_different_datasets(self):
13781378
# for each specific dataset without stopping the whole test.
13791379
with self.subTest(dataset_type=dataset_name):
13801380
self._run_gptq_test_on_dataset(dataset)
1381+
1382+
def test_quantize_gptq_with_config_variations(self):
1383+
"""Tests GPTQ with specific config variations."""
1384+
config_variations = {
1385+
"per_channel": {"group_size": -1},
1386+
"act_order": {"act_order": True},
1387+
"symmetric": {"symmetric": True},
1388+
"all_options_enabled": {
1389+
"group_size": -1,
1390+
"act_order": True,
1391+
"symmetric": True,
1392+
},
1393+
}
1394+
1395+
dataset = ["This is the calibration data for the test."]
1396+
for config_name, config_overrides in config_variations.items():
1397+
with self.subTest(config_type=config_name):
1398+
self._run_gptq_test_on_dataset(dataset, **config_overrides)

keras/src/quantizers/gptqutils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -195,7 +195,7 @@ def apply_gptq_layerwise(
195195
progbar = keras_utils.Progbar(target=len(transformer_blocks))
196196

197197
for i, block in enumerate(transformer_blocks):
198-
logging.info(f"Quantizing Block {i} ---")
198+
logging.info(f"Quantizing Block {i}")
199199
sub_layers_map = find_layers_in_block(block)
200200

201201
if not sub_layers_map:

0 commit comments

Comments
 (0)