@@ -226,10 +226,10 @@ Here is an example of how to use the pre-trained image classification models:
226226
227227.. code :: python
228228
229- from torchvision.io import read_image
229+ from torchvision.io import decode_image
230230 from torchvision.models import resnet50, ResNet50_Weights
231231
232- img = read_image (" test/assets/encode_jpeg/grace_hopper_517x606.jpg" )
232+ img = decode_image (" test/assets/encode_jpeg/grace_hopper_517x606.jpg" )
233233
234234 # Step 1: Initialize model with the best available weights
235235 weights = ResNet50_Weights.DEFAULT
@@ -283,10 +283,10 @@ Here is an example of how to use the pre-trained quantized image classification
283283
284284.. code :: python
285285
286- from torchvision.io import read_image
286+ from torchvision.io import decode_image
287287 from torchvision.models.quantization import resnet50, ResNet50_QuantizedWeights
288288
289- img = read_image (" test/assets/encode_jpeg/grace_hopper_517x606.jpg" )
289+ img = decode_image (" test/assets/encode_jpeg/grace_hopper_517x606.jpg" )
290290
291291 # Step 1: Initialize model with the best available weights
292292 weights = ResNet50_QuantizedWeights.DEFAULT
@@ -339,11 +339,11 @@ Here is an example of how to use the pre-trained semantic segmentation models:
339339
340340.. code :: python
341341
342- from torchvision.io.image import read_image
342+ from torchvision.io.image import decode_image
343343 from torchvision.models.segmentation import fcn_resnet50, FCN_ResNet50_Weights
344344 from torchvision.transforms.functional import to_pil_image
345345
346- img = read_image (" gallery/assets/dog1.jpg" )
346+ img = decode_image (" gallery/assets/dog1.jpg" )
347347
348348 # Step 1: Initialize model with the best available weights
349349 weights = FCN_ResNet50_Weights .DEFAULT
@@ -411,12 +411,12 @@ Here is an example of how to use the pre-trained object detection models:
411411.. code :: python
412412
413413
414- from torchvision.io.image import read_image
414+ from torchvision.io.image import decode_image
415415 from torchvision.models.detection import fasterrcnn_resnet50_fpn_v2, FasterRCNN_ResNet50_FPN_V2_Weights
416416 from torchvision.utils import draw_bounding_boxes
417417 from torchvision.transforms.functional import to_pil_image
418418
419- img = read_image (" test/assets/encode_jpeg/grace_hopper_517x606.jpg" )
419+ img = decode_image (" test/assets/encode_jpeg/grace_hopper_517x606.jpg" )
420420
421421 # Step 1: Initialize model with the best available weights
422422 weights = FasterRCNN_ResNet50_FPN_V2_Weights.DEFAULT
0 commit comments