|
2 | 2 | Video Compositor — assemble shots, transitions, audio into final video. |
3 | 3 |
|
4 | 4 | Handles: |
5 | | -- Shot concatenation with transition effects (cut, fade, dissolve) |
| 5 | +- Shot concatenation with transition effects (cut, fade, dissolve, flash_white) |
6 | 6 | - Audio track merging (narration + BGM + SFX) |
7 | 7 | - Final video encoding via ffmpeg or moviepy |
8 | 8 | """ |
@@ -41,7 +41,7 @@ def compose( |
41 | 41 | Args: |
42 | 42 | shot_frame_lists: List of frame lists (one per shot). |
43 | 43 | output_path: Final output video path. |
44 | | - transitions: Transition type between shots ("cut", "fade", "dissolve"). |
| 44 | + transitions: Transition type between shots ("cut", "fade", "dissolve", "flash_white"). |
45 | 45 | audio_paths: Per-shot audio file paths (narration/dialogue). |
46 | 46 | bgm_path: Background music file path. |
47 | 47 | bgm_volume: BGM volume relative to narration (0.0-1.0). |
@@ -97,6 +97,9 @@ def _apply_transitions( |
97 | 97 | all_frames[-overlap:], frames[:overlap] |
98 | 98 | )) |
99 | 99 | all_frames.extend(frames[overlap:]) |
| 100 | + elif transition == "flash_white": |
| 101 | + all_frames.extend(self._flash_white_transition(all_frames[-1], frames[0], duration_frames=10)) |
| 102 | + all_frames.extend(frames[1:]) |
100 | 103 | else: # cut |
101 | 104 | all_frames.extend(frames) |
102 | 105 |
|
@@ -142,6 +145,38 @@ def _dissolve_transition( |
142 | 145 |
|
143 | 146 | return result |
144 | 147 |
|
| 148 | + def _flash_white_transition( |
| 149 | + self, last_frame: Image.Image, first_frame: Image.Image, duration_frames: int = 10 |
| 150 | + ) -> List[Image.Image]: |
| 151 | + """Create a flash-to-white transition for dramatic moments.""" |
| 152 | + result = [] |
| 153 | + arr_last = np.array(last_frame, dtype=np.float32) |
| 154 | + arr_first = np.array(first_frame, dtype=np.float32) |
| 155 | + white = np.full_like(arr_last, 255.0) |
| 156 | + |
| 157 | + fade_out = duration_frames // 3 # frames to fade to white |
| 158 | + hold = max(1, duration_frames // 5) # frames to hold white |
| 159 | + fade_in = duration_frames - fade_out - hold # frames to fade from white |
| 160 | + |
| 161 | + # Fade to white |
| 162 | + for i in range(fade_out): |
| 163 | + alpha = (i + 1) / fade_out |
| 164 | + blended = (arr_last * (1 - alpha) + white * alpha).astype(np.uint8) |
| 165 | + result.append(Image.fromarray(blended)) |
| 166 | + |
| 167 | + # Hold white |
| 168 | + white_frame = Image.fromarray(white.astype(np.uint8)) |
| 169 | + for _ in range(hold): |
| 170 | + result.append(white_frame.copy()) |
| 171 | + |
| 172 | + # Fade from white |
| 173 | + for i in range(fade_in): |
| 174 | + alpha = (i + 1) / fade_in |
| 175 | + blended = (white * (1 - alpha) + arr_first * alpha).astype(np.uint8) |
| 176 | + result.append(Image.fromarray(blended)) |
| 177 | + |
| 178 | + return result |
| 179 | + |
145 | 180 | def _save_frames_as_video(self, frames: List[Image.Image], output_path: str) -> str: |
146 | 181 | """Save frames as video using diffusers utility or ffmpeg.""" |
147 | 182 | try: |
@@ -432,3 +467,57 @@ def _save_with_ffmpeg(self, frames: List[Image.Image], output_path: str) -> str: |
432 | 467 | logger.info(f"Saved video via ffmpeg: {output_path}") |
433 | 468 |
|
434 | 469 | return output_path |
| 470 | + |
| 471 | + def apply_color_lut( |
| 472 | + self, |
| 473 | + frames: List[Image.Image], |
| 474 | + lut_name: str = "xianxia_blue_gold", |
| 475 | + ) -> List[Image.Image]: |
| 476 | + """Apply color grading LUT to frames. |
| 477 | +
|
| 478 | + Built-in LUTs: |
| 479 | + - xianxia_blue_gold: Cool shadows + warm highlights for xianxia atmosphere |
| 480 | + """ |
| 481 | + if lut_name == "xianxia_blue_gold": |
| 482 | + return [self._apply_xianxia_grade(f) for f in frames] |
| 483 | + else: |
| 484 | + logger.warning(f"Unknown LUT: {lut_name}, skipping color grading") |
| 485 | + return frames |
| 486 | + |
| 487 | + def _apply_xianxia_grade(self, frame: Image.Image) -> Image.Image: |
| 488 | + """Apply xianxia blue-gold color grading to a single frame. |
| 489 | +
|
| 490 | + Technique: Split-toning — cool shadows (blue) + warm highlights (gold). |
| 491 | + """ |
| 492 | + arr = np.array(frame, dtype=np.float32) / 255.0 |
| 493 | + |
| 494 | + # Compute luminance for split-toning |
| 495 | + lum = 0.299 * arr[:, :, 0] + 0.587 * arr[:, :, 1] + 0.114 * arr[:, :, 2] |
| 496 | + |
| 497 | + # Shadow mask (dark areas) and highlight mask (bright areas) |
| 498 | + shadow_mask = np.clip(1.0 - lum * 2, 0, 1)[:, :, np.newaxis] |
| 499 | + highlight_mask = np.clip(lum * 2 - 1, 0, 1)[:, :, np.newaxis] |
| 500 | + |
| 501 | + # Blue tint for shadows (subtle) |
| 502 | + shadow_tint = np.array([0.85, 0.9, 1.1]) # less red, less green, more blue |
| 503 | + |
| 504 | + # Gold tint for highlights (subtle) |
| 505 | + highlight_tint = np.array([1.1, 1.05, 0.85]) # more red, slightly more green, less blue |
| 506 | + |
| 507 | + # Apply split-toning |
| 508 | + result = arr.copy() |
| 509 | + result = result * (1.0 - shadow_mask * 0.15) + (result * shadow_tint) * (shadow_mask * 0.15) |
| 510 | + result = result * (1.0 - highlight_mask * 0.15) + (result * highlight_tint) * (highlight_mask * 0.15) |
| 511 | + |
| 512 | + # Slight saturation boost |
| 513 | + gray = lum[:, :, np.newaxis] |
| 514 | + result = gray + (result - gray) * 1.12 |
| 515 | + |
| 516 | + # Subtle contrast (S-curve approximation) |
| 517 | + result = np.clip(result, 0, 1) |
| 518 | + result = result * result * (3 - 2 * result) # smoothstep for gentle contrast |
| 519 | + # Blend 30% of the contrast curve with original to keep it subtle |
| 520 | + result = arr * 0.7 + result * 0.3 |
| 521 | + |
| 522 | + result = np.clip(result * 255, 0, 255).astype(np.uint8) |
| 523 | + return Image.fromarray(result) |
0 commit comments