|
1 | 1 | import cv2
|
2 |
| -import os |
| 2 | +import os |
| 3 | +from pathlib import Path |
3 | 4 |
|
4 | 5 | import pandas as pd
|
| 6 | +import numpy as np |
5 | 7 | import ffmpeg
|
6 | 8 |
|
7 | 9 | from typing import Tuple
|
@@ -33,22 +35,102 @@ def video_info(video_path: str) -> Tuple[int, int, int]:
|
33 | 35 | return video_w, video_h, n_frames
|
34 | 36 |
|
35 | 37 |
|
36 |
| -def extract_frames(video_file: str, timestamps: pd.DataFrame, output_dir: str): |
| 38 | +def extract_frames(video_file: str, timestamps_df: pd.DataFrame, output_dir: str): |
| 39 | + |
| 40 | + #_, _, num_frames = video_info(video_file) |
| 41 | + #print("NUM:", num_frames, len(timestamps_df)) |
37 | 42 |
|
38 | 43 | # Open the video file
|
39 | 44 | cap = cv2.VideoCapture(video_file)
|
40 |
| - assert cap.isOpened() == True |
41 |
| - video_name, _ = os.path.splitext(video_file) |
| 45 | + if not cap.isOpened(): |
| 46 | + raise Exception(f"Couldn't open video file '{video_file}'") |
| 47 | + |
| 48 | + first_col_name = timestamps_df.columns[0] |
| 49 | + timestamps: pd.Series = timestamps_df[first_col_name] |
42 | 50 |
|
43 | 51 | # Loop over each timestamp in the CSV file
|
44 |
| - for timestamp in timestamps: |
| 52 | + for fnum, timestamp in enumerate(timestamps): |
45 | 53 | # Extract the frame using OpenCV
|
46 |
| - cap.set(cv2.CAP_PROP_POS_MSEC, timestamp * 1000) |
| 54 | + #cap.set(cv2.CAP_PROP_POS_MSEC, timestamp * 1000) |
47 | 55 | ret, frame = cap.read()
|
48 | 56 | if ret:
|
49 |
| - frame_name = f"{video_name}_{timestamp:.3f}.jpg" |
50 |
| - frame_path = os.path.join(output_dir, frame_name) |
| 57 | + frame_path = os.path.join(output_dir, str(timestamp) + ".jpg") |
51 | 58 | cv2.imwrite(frame_path, frame)
|
| 59 | + else: |
| 60 | + print(f"At frame {fnum}, no more frames to extract from video '{video_file}'. Expected {len(timestamps)} frames.") |
| 61 | + #raise Exception(f"At frame {fnum}, no more frames to extract from video '{video_file}'. Expected {len(timestamps)} frames.") |
52 | 62 |
|
53 | 63 | # Release the video file
|
54 | 64 | cap.release()
|
| 65 | + |
| 66 | + |
| 67 | +def rebuild_video(dir: Path, frames: pd.DataFrame, outfile: Path) -> None: |
| 68 | + |
| 69 | + # We don't know the target video size, yet. |
| 70 | + frame_width = None |
| 71 | + frame_height = None |
| 72 | + |
| 73 | + # It will be instantiated later, after we know the size of the first image |
| 74 | + ffmpeg_video_out_process = None |
| 75 | + |
| 76 | + for idx, row in frames.iterrows(): |
| 77 | + ts = row["timestamp"] |
| 78 | + gen = row["generated"] |
| 79 | + |
| 80 | + if gen == "Original": |
| 81 | + |
| 82 | + frame_path = dir / (str(ts) + ".jpg") |
| 83 | + |
| 84 | + if not frame_path.exists(): |
| 85 | + print(f"Skipping frame {str(frame_path)}") |
| 86 | + continue # BEWARE! Continues the cycle |
| 87 | + |
| 88 | + img_bgr = cv2.imread(str(frame_path)) |
| 89 | + img = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB) |
| 90 | + |
| 91 | + # If the frame size was not determined yet, take it from the next picture and initialize the ffmpeg encoder |
| 92 | + if frame_width is None: |
| 93 | + assert frame_width is None and frame_height is None and ffmpeg_video_out_process is None |
| 94 | + |
| 95 | + frame_height, frame_width, _ = img.shape |
| 96 | + font_width = int(frame_height * 0.04) |
| 97 | + ffmpeg_video_out_process = ( |
| 98 | + ffmpeg |
| 99 | + .input('pipe:', format='rawvideo', pix_fmt='rgb24', s='{}x{}'.format(frame_width, frame_height)) |
| 100 | + # -vf "drawtext=fontfile=Arial.ttf: fontsize=48: text=%{n}: x=(w-tw)/2: y=h-(2*lh): fontcolor=white: box=1: boxcolor=0x00000099" |
| 101 | + .drawtext(text="%{n}", escape_text=False, |
| 102 | + #x=50, y=50, |
| 103 | + x="(w-tw)/2", y="h-(2*lh)", |
| 104 | + fontfile="Arial.ttf", fontsize=font_width, fontcolor="white", |
| 105 | + #boxcolor="0x00000099", |
| 106 | + box=1, boxborderw=2, boxcolor="[email protected]") |
| 107 | + |
| 108 | + .output(str(outfile), pix_fmt='yuv420p') |
| 109 | + |
| 110 | + .overwrite_output() |
| 111 | + .run_async(pipe_stdin=True) |
| 112 | + ) |
| 113 | + |
| 114 | + assert frame_width is not None and frame_height is not None and ffmpeg_video_out_process is not None |
| 115 | + |
| 116 | + # Send the frame to the ffmpeg process |
| 117 | + ffmpeg_video_out_process.stdin.write(img.tobytes()) |
| 118 | + |
| 119 | + elif gen == "Generated": |
| 120 | + |
| 121 | + # The first frame can NOT be a generated one |
| 122 | + assert frame_width is not None and frame_height is not None |
| 123 | + |
| 124 | + # Create an artificial black frame |
| 125 | + print(f"Injecting Black frame at idx {idx}") |
| 126 | + black_frame = np.zeros((frame_height, frame_width, 3), dtype=np.uint8) |
| 127 | + ffmpeg_video_out_process.stdin.write(black_frame.tobytes()) |
| 128 | + |
| 129 | + else: |
| 130 | + raise Exception(f"Unexpected value '{gen}' in column 'generated' at index {idx}") |
| 131 | + |
| 132 | + # Close the video stream |
| 133 | + if ffmpeg_video_out_process is not None: |
| 134 | + ffmpeg_video_out_process.stdin.close() |
| 135 | + ffmpeg_video_out_process.wait() |
| 136 | + ffmpeg_video_out_process = None |
0 commit comments