Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion apps/web/app/s/[videoId]/_components/AuthOverlay.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,8 @@ import { signIn } from "next-auth/react";
import { useId, useState } from "react";
import { toast } from "sonner";
import { trackEvent } from "@/app/utils/analytics";
import OtpForm from "./OtpForm";
import { usePublicEnv } from "@/utils/public-env";
import OtpForm from "./OtpForm";

interface AuthOverlayProps {
isOpen: boolean;
Expand Down
2 changes: 1 addition & 1 deletion apps/web/app/s/[videoId]/_components/Sidebar.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ export const Sidebar = forwardRef<{ scrollToBottom: () => void }, SidebarProps>(
: !(
videoSettings?.disableTranscript ??
data.orgSettings?.disableTranscript
)
)
? "transcript"
: "activity";

Expand Down
105 changes: 95 additions & 10 deletions crates/media-info/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ pub enum AudioInfoError {
}

impl AudioInfo {
pub const MAX_AUDIO_CHANNELS: u16 = 8;
pub const MAX_AUDIO_CHANNELS: u16 = 16;

pub const fn new(
sample_format: Sample,
Expand Down Expand Up @@ -133,29 +133,48 @@ impl AudioInfo {
frame
}

pub fn wrap_frame(&self, data: &[u8]) -> frame::Audio {
let sample_size = self.sample_size();
let interleaved_chunk_size = sample_size * self.channels;
let samples = data.len() / interleaved_chunk_size;
/// Always expects packed input data
pub fn wrap_frame_with_max_channels(
&self,
packed_data: &[u8],
max_channels: usize,
) -> frame::Audio {
let out_channels = self.channels.min(max_channels);

let mut frame = frame::Audio::new(self.sample_format, samples, self.channel_layout());
let sample_size = self.sample_size();
let packed_sample_size = sample_size * self.channels;
let samples = packed_data.len() / packed_sample_size;

let mut frame = frame::Audio::new(
self.sample_format,
samples,
ChannelLayout::default(out_channels as i32),
);
frame.set_rate(self.sample_rate);

if self.channels == 0 {
unreachable!()
} else if self.channels == 1 || frame.is_packed() {
frame.data_mut(0)[0..data.len()].copy_from_slice(data)
} else if self.channels == 1 || (frame.is_packed() && self.channels <= max_channels) {
frame.data_mut(0)[0..packed_data.len()].copy_from_slice(packed_data)
} else if frame.is_packed() && self.channels > out_channels {
for (chunk_index, packed_chunk) in packed_data.chunks(packed_sample_size).enumerate() {
let start = chunk_index * sample_size * out_channels;
let end = start + sample_size * out_channels;

frame.data_mut(0)[start..end].copy_from_slice(&packed_chunk[0..(end - start)]);
}
} else {
// cpal *always* returns interleaved data (i.e. the first sample from every channel, followed
// by the second sample from every channel, et cetera). Many audio codecs work better/primarily
// with planar data, so we de-interleave it here if there is more than one channel.

for (chunk_index, interleaved_chunk) in data.chunks(interleaved_chunk_size).enumerate()
for (chunk_index, interleaved_chunk) in
packed_data.chunks(packed_sample_size).enumerate()
{
let start = chunk_index * sample_size;
let end = start + sample_size;

for channel in 0..self.channels {
for channel in 0..self.channels.min(max_channels) {
let channel_start = channel * sample_size;
let channel_end = channel_start + sample_size;
frame.data_mut(channel)[start..end]
Expand All @@ -166,6 +185,17 @@ impl AudioInfo {

frame
}

/// Always expects packed input data
pub fn wrap_frame(&self, data: &[u8]) -> frame::Audio {
self.wrap_frame_with_max_channels(data, self.channels)
}

pub fn with_max_channels(&self, channels: u16) -> Self {
let mut this = *self;
this.channels = this.channels.min(channels as usize);
this
}
}

pub enum RawVideoFormat {
Expand Down Expand Up @@ -292,3 +322,58 @@ pub fn ffmpeg_sample_format_for(sample_format: SampleFormat) -> Option<Sample> {
_ => None,
}
}

#[cfg(test)]
mod tests {
use super::*;

mod audio_info {
use super::*;

#[test]
fn wrap_packed_frame() {
let info = AudioInfo::new_raw(Sample::U8(Type::Packed), 2, 4);

let input = &[1, 2, 3, 4, 1, 2, 3, 4];
let frame = info.wrap_frame(input);

assert_eq!(&frame.data(0)[0..input.len()], input);
}

#[test]
fn wrap_planar_frame() {
let info = AudioInfo::new_raw(Sample::U8(Type::Planar), 2, 4);

let input = &[1, 2, 3, 4, 1, 2, 3, 4];
let frame = info.wrap_frame(input);

assert_eq!(frame.planes(), 4);
assert_eq!(&frame.data(0)[0..2], &[1, 1]);
assert_eq!(&frame.data(1)[0..2], &[2, 2]);
assert_eq!(&frame.data(2)[0..2], &[3, 3]);
assert_eq!(&frame.data(3)[0..2], &[4, 4]);
}

#[test]
fn wrap_packed_frame_max_channels() {
let info = AudioInfo::new_raw(Sample::U8(Type::Packed), 2, 4);

let input = &[1, 2, 3, 4, 1, 2, 3, 4];
let frame = info.wrap_frame_with_max_channels(input, 2);

assert_eq!(&frame.data(0)[0..4], &[1, 2, 1, 2]);
}

#[test]
fn wrap_planar_frame_max_channels() {
let info = AudioInfo::new_raw(Sample::U8(Type::Planar), 2, 4);

let input = &[1, 2, 3, 4, 1, 2, 3, 4];
let frame = info.wrap_frame_with_max_channels(input, 2);

assert_eq!(frame.planes(), 2);
assert_eq!(&frame.data(0)[0..2], &[1, 1]);
assert_eq!(&frame.data(1)[0..2], &[2, 2]);
}
}
}
18 changes: 7 additions & 11 deletions crates/recording/examples/recording-cli.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
use cap_recording::{screen_capture::ScreenCaptureTarget, *};
use cap_recording::{feeds::*, screen_capture::ScreenCaptureTarget, *};
use kameo::Actor as _;
use scap_targets::Display;
use std::time::Duration;
use std::{sync::Arc, time::Duration};
use tracing::*;

#[tokio::main]
Expand Down Expand Up @@ -42,31 +43,26 @@ pub async fn main() {

// mic_feed
// .ask(microphone::SetInput {
// label:
// // MicrophoneFeed::list()
// // .into_iter()
// // .find(|(k, _)| k.contains("Focusrite"))
// MicrophoneFeed::default()
// .map(|v| v.0)
// .unwrap(),
// label: MicrophoneFeed::default_device().map(|v| v.0).unwrap(),
// })
// .await
// .unwrap()
// .await
// .unwrap();

tokio::time::sleep(Duration::from_millis(10)).await;
// tokio::time::sleep(Duration::from_millis(10)).await;

let handle = instant_recording::Actor::builder(
dir.path().into(),
ScreenCaptureTarget::Display {
id: Display::primary().id(),
},
)
// .with_system_audio(true)
.with_system_audio(true)
// .with_camera_feed(std::sync::Arc::new(
// camera_feed.ask(feeds::camera::Lock).await.unwrap(),
// ))
// .with_mic_feed(Arc::new(mic_feed.ask(microphone::Lock).await.unwrap()))
.build(
#[cfg(target_os = "macos")]
cidre::sc::ShareableContent::current().await.unwrap(),
Expand Down
Loading