Skip to content

Commit 9ba0557

Browse files
author
Chien Yuan Chang
committed
[SAMPLE-UPDATE] sample_get_result_file
1 parent 4985ecf commit 9ba0557

File tree

2 files changed

+40
-44
lines changed

2 files changed

+40
-44
lines changed

sdk/contentunderstanding/azure-ai-contentunderstanding/samples/async_samples/sample_get_result_file_async.py

Lines changed: 20 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -9,15 +9,18 @@
99
1010
DESCRIPTION:
1111
This sample demonstrates how to retrieve result files (such as keyframe images) from a
12-
video analysis operation using the get_result_file API.
12+
video analysis operation using the `get_result_file` API.
1313
14-
When analyzing video content, the Content Understanding service can generate result files:
14+
About result files:
15+
When analyzing video content, the Content Understanding service can generate result files such as:
1516
- Keyframe images: Extracted frames from the video at specific timestamps
1617
- Other result files: Additional files generated during analysis
1718
18-
The get_result_file API allows you to retrieve these files using:
19+
The `get_result_file` API allows you to retrieve these files using:
1920
- Operation ID: Extracted from the analysis operation
20-
- File path: The path to the specific result file (e.g., "keyframes/{frameTimeMs}")
21+
- File path: The path to the specific result file. In the recording, keyframes were accessed
22+
with paths like `keyframes/733` and `keyframes/9000`, following the
23+
`keyframes/{frameTimeMs}` pattern.
2124
2225
USAGE:
2326
python sample_get_result_file_async.py
@@ -54,50 +57,50 @@ async def main() -> None:
5457

5558
async with ContentUnderstandingClient(endpoint=endpoint, credential=credential) as client:
5659
# [START analyze_video_for_result_files]
57-
# Use a sample video URL
60+
# Use a sample video URL to get keyframes for GetResultFile testing
61+
# You can replace this with your own video file URL
5862
video_url = "https://github.com/Azure-Samples/azure-ai-content-understanding-assets/raw/refs/heads/main/videos/sdk_samples/FlightSimulator.mp4"
5963

60-
print(f"Analyzing video with prebuilt-videoSearch...")
64+
print("Analyzing video with prebuilt-videoSearch...")
6165
print(f" URL: {video_url}")
6266

63-
# Start the analysis operation (using begin_analyze which returns a poller)
64-
poller = await client.begin_analyze(
67+
# Analyze and wait for completion
68+
analyze_operation = await client.begin_analyze(
6569
analyzer_id="prebuilt-videoSearch",
6670
inputs=[AnalyzeInput(url=video_url)],
6771
)
6872

69-
# Get the operation ID from the poller
70-
operation_id = poller.operation_id
73+
# Get the operation ID - this is needed to retrieve result files later
74+
operation_id = analyze_operation.operation_id
7175
print(f" Operation ID: {operation_id}")
7276

73-
# Wait for completion
7477
print(" Waiting for analysis to complete...")
75-
result: AnalyzeResult = await poller.result()
78+
result: AnalyzeResult = await analyze_operation.result()
7679
# [END analyze_video_for_result_files]
7780

7881
# [START get_result_file]
7982
if not result.contents or len(result.contents) == 0:
8083
print("No content found in the analysis result.")
8184
return
8285

83-
content = result.contents[0]
84-
85-
# For video analysis, keyframes would be found in AudioVisualContent.KeyFrameTimesMs
86+
# For video analysis, keyframes would be found in AudioVisualContent.key_frame_times_ms
87+
# Cast MediaContent to AudioVisualContent to access video-specific properties
8688
video_content: AudioVisualContent = result.contents[0] # type: ignore
8789

90+
# Print keyframe information
8891
if video_content.key_frame_times_ms and len(video_content.key_frame_times_ms) > 0:
8992
total_keyframes = len(video_content.key_frame_times_ms)
9093
first_frame_time_ms = video_content.key_frame_times_ms[0]
9194

92-
print(f"\nTotal keyframes: {total_keyframes}")
95+
print(f"Total keyframes: {total_keyframes}")
9396
print(f"First keyframe time: {first_frame_time_ms} ms")
9497

9598
# Get the first keyframe as an example
9699
frame_path = f"keyframes/{first_frame_time_ms}"
97100

98101
print(f"Getting result file: {frame_path}")
99102

100-
# Get the result file (keyframe image)
103+
# Get the result file (keyframe image) using the operation ID obtained from Operation<T>.id
101104
file_response = await client.get_result_file(
102105
operation_id=operation_id,
103106
path=frame_path,
@@ -120,11 +123,6 @@ async def main() -> None:
120123
print("\nNote: This sample demonstrates GetResultFile API usage.")
121124
print(" For video analysis with keyframes, use prebuilt-videoSearch analyzer.")
122125
print(" Keyframes are available in AudioVisualContent.key_frame_times_ms.")
123-
print()
124-
print(f"Example usage with operation ID '{operation_id}':")
125-
print(" file_response = await client.get_result_file(")
126-
print(" operation_id=operation_id,")
127-
print(' path="keyframes/1000")')
128126
# [END get_result_file]
129127

130128
if not isinstance(credential, AzureKeyCredential):

sdk/contentunderstanding/azure-ai-contentunderstanding/samples/sample_get_result_file.py

Lines changed: 20 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -9,15 +9,18 @@
99
1010
DESCRIPTION:
1111
This sample demonstrates how to retrieve result files (such as keyframe images) from a
12-
video analysis operation using the get_result_file API.
12+
video analysis operation using the `get_result_file` API.
1313
14-
When analyzing video content, the Content Understanding service can generate result files:
14+
About result files:
15+
When analyzing video content, the Content Understanding service can generate result files such as:
1516
- Keyframe images: Extracted frames from the video at specific timestamps
1617
- Other result files: Additional files generated during analysis
1718
18-
The get_result_file API allows you to retrieve these files using:
19+
The `get_result_file` API allows you to retrieve these files using:
1920
- Operation ID: Extracted from the analysis operation
20-
- File path: The path to the specific result file (e.g., "keyframes/{frameTimeMs}")
21+
- File path: The path to the specific result file. In the recording, keyframes were accessed
22+
with paths like `keyframes/733` and `keyframes/9000`, following the
23+
`keyframes/{frameTimeMs}` pattern.
2124
2225
USAGE:
2326
python sample_get_result_file.py
@@ -54,50 +57,50 @@ def main() -> None:
5457
client = ContentUnderstandingClient(endpoint=endpoint, credential=credential)
5558

5659
# [START analyze_video_for_result_files]
57-
# Use a sample video URL
60+
# Use a sample video URL to get keyframes for GetResultFile testing
61+
# You can replace this with your own video file URL
5862
video_url = "https://github.com/Azure-Samples/azure-ai-content-understanding-assets/raw/refs/heads/main/videos/sdk_samples/FlightSimulator.mp4"
5963

60-
print(f"Analyzing video with prebuilt-videoSearch...")
64+
print("Analyzing video with prebuilt-videoSearch...")
6165
print(f" URL: {video_url}")
6266

63-
# Start the analysis operation (using begin_analyze which returns a poller)
64-
poller = client.begin_analyze(
67+
# Analyze and wait for completion
68+
analyze_operation = client.begin_analyze(
6569
analyzer_id="prebuilt-videoSearch",
6670
inputs=[AnalyzeInput(url=video_url)],
6771
)
6872

69-
# Get the operation ID from the poller
70-
operation_id = poller.operation_id
73+
# Get the operation ID - this is needed to retrieve result files later
74+
operation_id = analyze_operation.operation_id
7175
print(f" Operation ID: {operation_id}")
7276

73-
# Wait for completion
7477
print(" Waiting for analysis to complete...")
75-
result: AnalyzeResult = poller.result()
78+
result: AnalyzeResult = analyze_operation.result()
7679
# [END analyze_video_for_result_files]
7780

7881
# [START get_result_file]
7982
if not result.contents or len(result.contents) == 0:
8083
print("No content found in the analysis result.")
8184
return
8285

83-
content = result.contents[0]
84-
85-
# For video analysis, keyframes would be found in AudioVisualContent.KeyFrameTimesMs
86+
# For video analysis, keyframes would be found in AudioVisualContent.key_frame_times_ms
87+
# Cast MediaContent to AudioVisualContent to access video-specific properties
8688
video_content: AudioVisualContent = result.contents[0] # type: ignore
8789

90+
# Print keyframe information
8891
if video_content.key_frame_times_ms and len(video_content.key_frame_times_ms) > 0:
8992
total_keyframes = len(video_content.key_frame_times_ms)
9093
first_frame_time_ms = video_content.key_frame_times_ms[0]
9194

92-
print(f"\nTotal keyframes: {total_keyframes}")
95+
print(f"Total keyframes: {total_keyframes}")
9396
print(f"First keyframe time: {first_frame_time_ms} ms")
9497

9598
# Get the first keyframe as an example
9699
frame_path = f"keyframes/{first_frame_time_ms}"
97100

98101
print(f"Getting result file: {frame_path}")
99102

100-
# Get the result file (keyframe image)
103+
# Get the result file (keyframe image) using the operation ID obtained from Operation<T>.id
101104
file_response = client.get_result_file(
102105
operation_id=operation_id,
103106
path=frame_path,
@@ -120,11 +123,6 @@ def main() -> None:
120123
print("\nNote: This sample demonstrates GetResultFile API usage.")
121124
print(" For video analysis with keyframes, use prebuilt-videoSearch analyzer.")
122125
print(" Keyframes are available in AudioVisualContent.key_frame_times_ms.")
123-
print()
124-
print(f"Example usage with operation ID '{operation_id}':")
125-
print(" file_response = client.get_result_file(")
126-
print(" operation_id=operation_id,")
127-
print(' path="keyframes/1000")')
128126
# [END get_result_file]
129127

130128

0 commit comments

Comments
 (0)