Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions benchmarks/decoders/benchmark_decoders.py
Original file line number Diff line number Diff line change
Expand Up @@ -158,6 +158,11 @@ def main() -> None:
if torch.cuda.is_available()
else "not available"
),
"xpu": (
torch.xpu.get_device_properties(0).name
if torch.xpu.is_available()
else "not available"
),
},
}
plot_data(data, args.plot_path)
Expand Down
29 changes: 29 additions & 0 deletions benchmarks/decoders/benchmark_decoders_library.py
Original file line number Diff line number Diff line change
Expand Up @@ -603,6 +603,35 @@ def decode_and_resize(self, video_file, pts_list, height, width, device):
return frames


# check whether specified decoder is supported
def check_decoder_support(decoder_type):
if decoder_type == "cuda":
return torch.cuda.is_available()

if decoder_type == "xpu":
return torch.xpu.is_available()

if decoder_type == "torchaudio":
try:
import torchaudio # noqa: F401
except ImportError:
return False
return True

if decoder_type == "torchvision":
try:
Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This does not quite help:

$ python3 benchmark_decoders.py --decoders decord,decord_batch,torchcodec_core,torchcodec_core_batch,torchcodec_core_compiled,torchcodec_public,torchcodec_public_nonbatch
Traceback (most recent call last):
  File "/home/dvrogozh/git/pytorch/torchcodec/benchmarks/decoders/benchmark_decoders.py", line 172, in <module>
    main()
  File "/home/dvrogozh/git/pytorch/torchcodec/benchmarks/decoders/benchmark_decoders.py", line 129, in main
    decoders_to_run[display] = kind(**options)
  File "/home/dvrogozh/git/pytorch/torchcodec/benchmarks/decoders/benchmark_decoders_library.py", line 382, in __init__
    from torchvision.transforms import v2 as transforms_v2
ModuleNotFoundError: No module named 'torchvision'

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This check was only enabled in generate_readme_data.py since the other scripts allow specifying which decoders to test. But it is no problem to add checks to the other benchmarks also. Latest commit updates benchmark_decoders.py and gpu_benchmark.py to fail gracefully if dependencies are missing, and run the remaining tests if possible.

# also check whether video_decoder backend is installed
import torchvision

torchvision.set_video_backend("video_reader")
except (ImportError, RuntimeError):
return False
return True

print(f"Warning: unknown decoder_type {decoder_type}")
return False


def create_torchcodec_core_decode_first_frame(video_file):
video_decoder = create_from_file(video_file)
_add_video_stream(video_decoder)
Expand Down
75 changes: 69 additions & 6 deletions benchmarks/decoders/generate_readme_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

import argparse
import json
import os
import platform
Expand All @@ -14,6 +15,7 @@

from benchmark_decoders_library import (
BatchParameters,
check_decoder_support,
DataLoaderInspiredWorkloadParameters,
generate_videos,
retrieve_videos,
Expand All @@ -29,6 +31,50 @@
def main() -> None:
"""Benchmarks the performance of a few video decoders on synthetic videos"""

parser = argparse.ArgumentParser()
parser.add_argument(
"--decoders",
help=(
"Comma-separated list of decoders to benchmark. Valid options: cpu, cuda, xpu, torchvision, torchaudio"
),
type=str,
default=("cpu,cuda,torchvision,torchaudio"),
),
parser.add_argument(
"--resize_device",
help=(
"Device for resize. Default: cuda if available, else cpu. Valid options: cpu, cuda, xpu"
),
type=str,
default=("cuda"),
),
args = parser.parse_args()

# warn if device or module is not available
decoder_list = set(args.decoders.split(","))
if "xpu" in decoder_list and not check_decoder_support("xpu"):
print("Warning: xpu is not available. Test will be skipped.")
decoder_list.remove("xpu")

if "cuda" in decoder_list and not check_decoder_support("cuda"):
print("Warning: cuda is not available. Test will be skipped.")
decoder_list.remove("cuda")

if "torchaudio" in decoder_list and not check_decoder_support("torchaudio"):
print("Warning: torchaudio is not available. Test will be skipped.")
decoder_list.remove("torchaudio")

if "torchvision" in decoder_list and not check_decoder_support("torchvision"):
print("Warning: torchvision is not available. Test will be skipped.")
decoder_list.remove("torchvision")

resize_device = args.resize_device
if resize_device == "cuda" and not torch.cuda.is_available():
resize_device = "cpu"
if resize_device == "xpu" and not torch.xpu.is_available():
resize_device = "cpu"
print(f"resize_device = {resize_device}")

videos_dir_path = "/tmp/torchcodec_benchmarking_videos"
if not os.path.exists(videos_dir_path):
shutil.rmtree(videos_dir_path, ignore_errors=True)
Expand Down Expand Up @@ -60,15 +106,27 @@ def main() -> None:
retrieve_videos(urls_and_dest_paths)

decoder_dict = {}
decoder_dict["torchcodec"] = TorchCodecPublic()
decoder_dict["torchcodec[approx]"] = TorchCodecPublic(seek_mode="approximate")
if torch.cuda.is_available():
if "cpu" in decoder_list:
decoder_dict["torchcodec"] = TorchCodecPublic()
decoder_dict["torchcodec[approx]"] = TorchCodecPublic(seek_mode="approximate")

if "cuda" in decoder_list:
decoder_dict["torchcodec[cuda]"] = TorchCodecPublic(device="cuda")
decoder_dict["torchcodec[cuda,approx]"] = TorchCodecPublic(
device="cuda", seek_mode="approximate"
)
decoder_dict["torchvision[video_reader]"] = TorchVision("video_reader")
decoder_dict["torchaudio"] = TorchAudioDecoder()

if "xpu" in decoder_list:
decoder_dict["torchcodec[xpu]"] = TorchCodecPublic(device="xpu")
decoder_dict["torchcodec[xpu,approx]"] = TorchCodecPublic(
device="xpu", seek_mode="approximate"
)

if "torchvision" in decoder_list:
decoder_dict["torchvision[video_reader]"] = TorchVision("video_reader")

if "torchaudio" in decoder_list:
decoder_dict["torchaudio"] = TorchAudioDecoder()

# These are the number of uniform seeks we do in the seek+decode benchmark.
num_samples = 10
Expand All @@ -85,7 +143,7 @@ def main() -> None:
batch_parameters=BatchParameters(batch_size=50, num_threads=10),
resize_height=256,
resize_width=256,
resize_device="cuda" if torch.cuda.is_available() else "cpu",
resize_device=resize_device,
),
)
data_for_writing = {
Expand All @@ -100,6 +158,11 @@ def main() -> None:
if torch.cuda.is_available()
else "not available"
),
"xpu": (
torch.xpu.get_device_properties(0).name
if torch.xpu.is_available()
else "not available"
),
},
}

Expand Down
8 changes: 7 additions & 1 deletion benchmarks/decoders/gpu_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def decode_full_video(video_path, decode_device_string, resize_device_string):
print(f"{decode_device_string=} {resize_device_string=}")
decoder = torchcodec._core.create_from_file(video_path)
num_threads = None
if "cuda" in decode_device_string:
if "cuda" in decode_device_string or "xpu" in decode_device_string:
num_threads = 1
width = None
height = None
Expand Down Expand Up @@ -78,6 +78,8 @@ def decode_videos_using_threads(
actual_decode_device = decode_device_string
if "cuda" in decode_device_string and use_multiple_gpus:
actual_decode_device = f"cuda:{i % torch.cuda.device_count()}"
if "xpu" in decode_device_string and use_multiple_gpus:
actual_decode_device = f"xpu:{i % torch.xpu.device_count()}"
executor.submit(
decode_full_video, video_path, actual_decode_device, resize_device_string
)
Expand Down Expand Up @@ -154,10 +156,14 @@ def main():
if "cuda" in decode_label:
# Shorten "cuda:0" to "cuda"
decode_label = "cuda"
if "xpu" in decode_label:
decode_label = "xpu"
resize_label = resize_device_string
if "cuda" in resize_device_string:
# Shorten "cuda:0" to "cuda"
resize_label = "cuda"
if "xpu" in resize_device_string:
resize_label = "xpu"
print("decode_device", decode_device_string)
print("resize_device", resize_device_string)
if args.num_threads > 1:
Expand Down