|
168 | 168 | "metadata": {}, |
169 | 169 | "source": [ |
170 | 170 | "## Evaluation Function\n", |
171 | | - "Evaluate the model's performance using the test data loader and calculate and return the overall classification accuracy." |
| 171 | + "Evaluate the model's performance using the validation data loader and calculate and return the overall classification accuracy." |
172 | 172 | ] |
173 | 173 | }, |
174 | 174 | { |
|
177 | 177 | "metadata": {}, |
178 | 178 | "outputs": [], |
179 | 179 | "source": [ |
180 | | - "def evaluate(model, testloader):\n", |
| 180 | + "def evaluate(model, val_dataloader):\n", |
181 | 181 | " \"\"\"\n", |
182 | | - " Evaluate a model using a test loader.\n", |
| 182 | + " Evaluate a model using a validation loader.\n", |
183 | 183 | " \"\"\"\n", |
184 | 184 | " device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n", |
185 | 185 | " model.to(device)\n", |
186 | 186 | " model.eval() # Set the model to evaluation mode\n", |
187 | 187 | " correct = 0\n", |
188 | 188 | " total = 0\n", |
189 | 189 | " with torch.no_grad():\n", |
190 | | - " for data in tqdm(testloader):\n", |
| 190 | + " for data in tqdm(val_dataloader):\n", |
191 | 191 | " images, labels = data\n", |
192 | 192 | " images, labels = images.to(device), labels.to(device)\n", |
193 | 193 | " outputs = model(images)\n", |
|
216 | 216 | }, |
217 | 217 | "outputs": [], |
218 | 218 | "source": [ |
219 | | - "# Get a FrameworkQuantizationCapabilities object that models the hardware platform for the quantized model inference. \n", |
| 219 | + "# Get a TargetPlatformCapabilities object that models the hardware platform for the quantized model inference. \n", |
220 | 220 | "target_platform_cap = mct.get_target_platform_capabilities(tpc_version=1.0)\n", |
221 | 221 | "configuration = mct.core.CoreConfig()\n", |
222 | 222 | "dataloader = make_dataloader_and_reset_random_seed()\n", |
|
619 | 619 | "metadata": {}, |
620 | 620 | "source": [ |
621 | 621 | "## Conclusion\n", |
622 | | - "These analyses showed that accuracy improved by 0.31% when the number of images was 80, and by 0.54% when the number of GPTQ epochs was 80, resulting in a reduced quantization accuracy loss.Following these troubleshooting steps can help improve the accuracy of your quantized model.By following these troubleshooting steps, you can improve the accuracy of your quantized model." |
| 622 | + "These analyses showed that accuracy improved by 0.31% when the number of images was 80, and by 0.54% when the number of GPTQ epochs was 80, resulting in a reduced quantization accuracy loss.By following these troubleshooting steps, you can improve the accuracy of your quantized model." |
623 | 623 | ] |
624 | 624 | }, |
625 | 625 | { |
|
0 commit comments