Skip to content

Commit 6beedfd

Browse files
Change variable name
1 parent 36cc916 commit 6beedfd

File tree

1 file changed

+8
-8
lines changed

1 file changed

+8
-8
lines changed

tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_mct_wrapper.ipynb

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -591,7 +591,7 @@
591591
"outputs": [],
592592
"source": [
593593
"# 1. Basic Post-Training Quantization for PyTorch\n",
594-
"flag, quantized_model = PTQ_Pytorch(float_model)"
594+
"flag, quantized_mode_ptq = PTQ_Pytorch(float_model)"
595595
]
596596
},
597597
{
@@ -602,7 +602,7 @@
602602
"outputs": [],
603603
"source": [
604604
"# 2. PTQ with Mixed Precision (optimized size/accuracy trade-off for PyTorch)\n",
605-
"flag, quantized_model2 = PTQ_Pytorch_mixed_precision(float_model)"
605+
"flag, quantized_model_ptq_mixed_precision = PTQ_Pytorch_mixed_precision(float_model)"
606606
]
607607
},
608608
{
@@ -613,7 +613,7 @@
613613
"outputs": [],
614614
"source": [
615615
"# 3. Gradient-based PTQ (improved accuracy through fine-tuning for PyTorch)\n",
616-
"flag, quantized_model3 = GPTQ_Pytorch(float_model)"
616+
"flag, quantized_model_gptq = GPTQ_Pytorch(float_model)"
617617
]
618618
},
619619
{
@@ -624,7 +624,7 @@
624624
"outputs": [],
625625
"source": [
626626
"# 4. GPTQ with Mixed Precision (best accuracy with optimal compression for PyTorch)\n",
627-
"flag, quantized_model4 = GPTQ_Pytorch_mixed_precision(float_model)"
627+
"flag, quantized_model_gptq_mixed_precision = GPTQ_Pytorch_mixed_precision(float_model)"
628628
]
629629
},
630630
{
@@ -673,7 +673,7 @@
673673
"source": [
674674
"# Evaluate PTQ quantized PyTorch model accuracy\n",
675675
"print(\"\\n=== PyTorch PTQ Model Evaluation ===\")\n",
676-
"evaluate(quantized_model, val_dataloader, 'PTQ_Pytorch')"
676+
"evaluate(quantized_mode_ptq, val_dataloader, 'PTQ_Pytorch')"
677677
]
678678
},
679679
{
@@ -685,7 +685,7 @@
685685
"source": [
686686
"# Evaluate PTQ + Mixed Precision PyTorch model accuracy\n",
687687
"print(\"\\n=== PyTorch PTQ + Mixed Precision Model Evaluation ===\")\n",
688-
"evaluate(quantized_model2, val_dataloader, 'PTQ_Pytorch_mixed_precision')"
688+
"evaluate(quantized_model_ptq_mixed_precision, val_dataloader, 'PTQ_Pytorch_mixed_precision')"
689689
]
690690
},
691691
{
@@ -697,7 +697,7 @@
697697
"source": [
698698
"# Evaluate GPTQ quantized PyTorch model accuracy\n",
699699
"print(\"\\n=== PyTorch GPTQ Model Evaluation ===\")\n",
700-
"evaluate(quantized_model3, val_dataloader, 'GPTQ_Pytorch')"
700+
"evaluate(quantized_model_gptq, val_dataloader, 'GPTQ_Pytorch')"
701701
]
702702
},
703703
{
@@ -709,7 +709,7 @@
709709
"source": [
710710
"# Evaluate GPTQ + Mixed Precision PyTorch model accuracy\n",
711711
"print(\"\\n=== PyTorch GPTQ + Mixed Precision Model Evaluation ===\")\n",
712-
"evaluate(quantized_model4, val_dataloader, 'GPTQ_Pytorch_mixed_precision')"
712+
"evaluate(quantized_model_gptq_mixed_precision, val_dataloader, 'GPTQ_Pytorch_mixed_precision')"
713713
]
714714
},
715715
{

0 commit comments

Comments
 (0)