Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
20 commits
Select commit Hold shift + click to select a range
ae14589
restyled most components for better visuals
JiBing17 Mar 1, 2025
d8d0cbb
added light and dark mode functionality for some components
JiBing17 Mar 2, 2025
71c5d7c
added mobile responsive layout for smaller screens and removed redund…
JiBing17 Mar 2, 2025
1804766
added ScrollView in replacement of regular div to remove unecessary h…
JiBing17 Mar 2, 2025
185c639
added animations for smoother light and dark mode transitions
JiBing17 Mar 3, 2025
e63b901
removed border from buttons and added shadow instead
JiBing17 Mar 5, 2025
de27a3c
made score display on mobile reponsive with horizontal scrolling and …
JiBing17 Mar 5, 2025
204f5a0
adjusted header and footer styles to stick on top / bottom when scrol…
JiBing17 Mar 5, 2025
68252b5
cleaned up code structure and removed themestyles
JiBing17 Mar 9, 2025
a830d50
adjusted small screen breakpoint to include landscape mode devices
JiBing17 Mar 10, 2025
3448fbd
added function to generate and store session token on load within the…
JiBing17 Mar 10, 2025
e418775
replaced logic used to display score sheet from app.py to score.ts
JiBing17 Mar 10, 2025
40e7349
added basic cursor movement logic based on step and speed input
JiBing17 Mar 24, 2025
b68c21b
removed node modules from being tracked
JiBing17 Mar 24, 2025
5d85bf2
updated comments and formatted code
JiBing17 Mar 27, 2025
6a9c0e3
fixed merge conflicts
JiBing17 Mar 27, 2025
db2d7ce
implemented file upload logic for standalone app
JiBing17 Apr 2, 2025
ac332f8
converted features.py and added real-time chroma feature extraction f…
JiBing17 Apr 18, 2025
72fab5b
fixed merge conflict
JiBing17 Apr 18, 2025
21ea64e
Fix: resolved linter errors for AudioWorklet globals
JiBing17 Apr 18, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion backend/src/features.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,6 @@ def insert(self, y):
chroma[:] = 1
length = 12**(0.5)
chroma = chroma / length

return chroma


Expand Down
279 changes: 158 additions & 121 deletions frontend/companion-app/App.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@ import reducer_function from "./Dispatch";
import ScoreDisplay from "./components/ScoreDisplay";
import { SynthesizeButton } from "./components/SynthesizeButton";
import Icon from 'react-native-vector-icons/Feather';
import { ChromaMaker } from "./utils/features";
import FontAwesome from 'react-native-vector-icons/FontAwesome';

// Define the main application component
export default function App() {
Expand Down Expand Up @@ -61,6 +63,56 @@ export default function App() {
setSessionToken(newToken)
}, []);

// Initialize the chroma state as an array of 12 zeros (used to capture chroma vector at each chunk of audio).
const [chroma, setChroma] = useState<number[]>(new Array(12).fill(0));
const [started, setStarted] = useState(false); // state used to determine user selects live microphone option or not

useEffect(() => {
let audioCtx: AudioContext; // Declare a reference to the AudioContext, which manages all audio processing
let micStream: MediaStream; // Declare a reference to the MediaStream from the user's microphone

const initAudio = async () => {
try {
micStream = await navigator.mediaDevices.getUserMedia({ audio: true }); // Request access to user's microphone
audioCtx = new AudioContext(); // Create a new AudioContext for audio processing
await audioCtx.audioWorklet.addModule('../utils/mic-processor.js'); // Load the custom AudioWorkletProcessor
const source = audioCtx.createMediaStreamSource(micStream); // Create a source node from the microphone stream
const workletNode = new AudioWorkletNode(audioCtx, 'mic-processor'); // Create an AudioWorkletNode linked to our custom 'mic-processor'
source.connect(workletNode); // Connect the mic source to the worklet
workletNode.connect(audioCtx.destination); // connect worklet to output

// Initialize the ChromaMaker for extracting chroma features
const n_fft = 4096;
const chromaMaker = new ChromaMaker(audioCtx.sampleRate, n_fft);

// Handle incoming audio chunks from the worklet
workletNode.port.onmessage = (event) => {
const audioChunk = event.data as Float32Array;
try {
// Extract chroma features and update state
const chromaResult = chromaMaker.insert(audioChunk);
setChroma(chromaResult);
} catch (e) {
console.error('Chroma extraction error:', e);
}
};
} catch (err) {
console.error('Failed to initialize audio:', err);
}
};
// If "started" state is true, initialize audio processing
if (started) {
initAudio();
}

// Cleanup: when the component unmounts or `started` becomes false,
// stop the microphone stream and close the audio context to free up resources
return () => {
if (micStream) micStream.getTracks().forEach((track) => track.stop());
if (audioCtx) audioCtx.close();
};
}, [started]);

////////////////////////////////////////////////////////////////////////////////
// The lines below were modified, copied and pasted out of the audio recorder object
// (which never really needed a UI).
Expand All @@ -70,112 +122,112 @@ export default function App() {

// Audio-related states and refs
// State for whether we have microphone permissions - is set to true on first trip to playmode
const [permission, setPermission] = useState(false);
// Assorted audio-related objects in need of reference
// Tend to be re-created upon starting a recording
const mediaRecorder = useRef<MediaRecorder>(
new MediaRecorder(new MediaStream()),
);
const [stream, setStream] = useState<MediaStream>(new MediaStream());
const [audioChunks, setAudioChunks] = useState<Blob[]>([]);
// const [permission, setPermission] = useState(false);
// // Assorted audio-related objects in need of reference
// // Tend to be re-created upon starting a recording
// const mediaRecorder = useRef<MediaRecorder>(
// new MediaRecorder(new MediaStream()),
// );
// const [stream, setStream] = useState<MediaStream>(new MediaStream());
// const [audioChunks, setAudioChunks] = useState<Blob[]>([]);

const audioContextRef = useRef<any>(null);
const analyserRef = useRef<any>(null);
const dataArrayRef = useRef<any>(null);
const startTimeRef = useRef<any>(null);
// const audioContextRef = useRef<any>(null);
// const analyserRef = useRef<any>(null);
// const dataArrayRef = useRef<any>(null);
// const startTimeRef = useRef<any>(null);

// Audio-related functions
/////////////////////////////////////////////////////////
// This function sends a synchronization request and updates the state with the result
const UPDATE_INTERVAL = 100;
// // Audio-related functions
// /////////////////////////////////////////////////////////
// // This function sends a synchronization request and updates the state with the result
// const UPDATE_INTERVAL = 100;

const getAPIData = async () => {
analyserRef.current?.getByteTimeDomainData(dataArrayRef.current);
const {
playback_rate: newPlayRate,
estimated_position: estimated_position,
} = await synchronize(state.sessionToken, Array.from(dataArrayRef.current), state.timestamp);
// const getAPIData = async () => {
// analyserRef.current?.getByteTimeDomainData(dataArrayRef.current);
// const {
// playback_rate: newPlayRate,
// estimated_position: estimated_position,
// } = await synchronize(state.sessionToken, Array.from(dataArrayRef.current), state.timestamp);

dispatch({
type: "increment",
time: estimated_position,
rate: newPlayRate,
});
}
// dispatch({
// type: "increment",
// time: estimated_position,
// rate: newPlayRate,
// });
// }

// This function established new recording instances when re-entering play mode
const startRecording = async () => {
// It's possible some of these can be removed; not sure which relate to the
// making of the recorded object we don't need and which relate to the
// buffer we send to the backend.
startTimeRef.current = Date.now();
//create new Media recorder instance using the stream
const media = new MediaRecorder(stream, { mimeType: "audio/webm" });
//set the MediaRecorder instance to the mediaRecorder ref
mediaRecorder.current = media;
//invokes the start method to start the recording process
mediaRecorder.current.start();
let localAudioChunks: Blob[] = [];
mediaRecorder.current.ondataavailable = (event) => {
if (typeof event.data === "undefined") return;
if (event.data.size === 0) return;
localAudioChunks.push(event.data);
};
setAudioChunks(localAudioChunks);
// // This function established new recording instances when re-entering play mode
// const startRecording = async () => {
// // It's possible some of these can be removed; not sure which relate to the
// // making of the recorded object we don't need and which relate to the
// // buffer we send to the backend.
// startTimeRef.current = Date.now();
// //create new Media recorder instance using the stream
// const media = new MediaRecorder(stream, { mimeType: "audio/webm" });
// //set the MediaRecorder instance to the mediaRecorder ref
// mediaRecorder.current = media;
// //invokes the start method to start the recording process
// mediaRecorder.current.start();
// let localAudioChunks: Blob[] = [];
// mediaRecorder.current.ondataavailable = (event) => {
// if (typeof event.data === "undefined") return;
// if (event.data.size === 0) return;
// localAudioChunks.push(event.data);
// };
// setAudioChunks(localAudioChunks);

audioContextRef.current = new window.AudioContext();
const source = audioContextRef.current.createMediaStreamSource(stream);
analyserRef.current = audioContextRef.current.createAnalyser();
analyserRef.current.fftSize = 2048;
source.connect(analyserRef.current);
// audioContextRef.current = new window.AudioContext();
// const source = audioContextRef.current.createMediaStreamSource(stream);
// analyserRef.current = audioContextRef.current.createAnalyser();
// analyserRef.current.fftSize = 2048;
// source.connect(analyserRef.current);

const bufferLength = analyserRef.current.frequencyBinCount;
dataArrayRef.current = new Uint8Array(bufferLength);
// const bufferLength = analyserRef.current.frequencyBinCount;
// dataArrayRef.current = new Uint8Array(bufferLength);

getAPIData(); // run the first call
};
// getAPIData(); // run the first call
// };

//stops the recording instance
const stopRecording = () => {
mediaRecorder.current.stop();
audioContextRef.current?.close();
};
// //stops the recording instance
// const stopRecording = () => {
// mediaRecorder.current.stop();
// audioContextRef.current?.close();
// };

// Function to get permission to use browser microphone
const getMicrophonePermission = async () => {
if ("MediaRecorder" in window) {
try {
const streamData = await navigator.mediaDevices.getUserMedia({
audio: true,
video: false,
});
setPermission(true);
setStream(streamData);
} catch (err) {
alert((err as Error).message);
}
} else {
alert("The MediaRecorder API is not supported in your browser.");
}
};
// // Function to get permission to use browser microphone
// const getMicrophonePermission = async () => {
// if ("MediaRecorder" in window) {
// try {
// const streamData = await navigator.mediaDevices.getUserMedia({
// audio: true,
// video: false,
// });
// setPermission(true);
// setStream(streamData);
// } catch (err) {
// alert((err as Error).message);
// }
// } else {
// alert("The MediaRecorder API is not supported in your browser.");
// }
// };

/////////////////////////////////////////////
// Audio-related effects
// Get microphone permission on first time entering play state
useEffect(() => {
if (!permission) getMicrophonePermission();
}, [state.inPlayMode]);
// /////////////////////////////////////////////
// // Audio-related effects
// // Get microphone permission on first time entering play state
// useEffect(() => {
// if (!permission) getMicrophonePermission();
// }, [state.inPlayMode]);

// Start and stop recording when player is or isn't playing
useEffect(() => {
if (state.playing) startRecording();
else stopRecording();
}, [state.playing]);
// // Start and stop recording when player is or isn't playing
// useEffect(() => {
// if (state.playing) startRecording();
// else stopRecording();
// }, [state.playing]);

// Keep synchronizing while playing
useEffect(() => {
if (state.playing) setTimeout(getAPIData, UPDATE_INTERVAL);
}, [state.timestamp])
// // Keep synchronizing while playing
// useEffect(() => {
// if (state.playing) setTimeout(getAPIData, UPDATE_INTERVAL);
// }, [state.timestamp])

// State to conditionally render the style type of the components (can only be "light" or "dark")
const [theme, setTheme] = useState<"light" | "dark">("light");
Expand Down Expand Up @@ -255,7 +307,6 @@ export default function App() {
// Boolean used for dynmaic display (row or column)
const isSmallScreen = width < 960;


////////////////////////////////////////////////////////////////////////////////
// Render the component's UI
////////////////////////////////////////////////////////////////////////////////
Expand All @@ -264,10 +315,20 @@ export default function App() {
{/* Header with image */}
<Animated.View style={[styles.menu_bar, {backgroundColor: menubarBackgroundColor, height: isSmallScreen? 40: 80}]}>
<Image source={require('./assets/companion.png')} style={[styles.logo, {height: isSmallScreen? 30: 100, width: isSmallScreen? 100: 200}]}/>
<TouchableOpacity onPress={toggleTheme}>
<Icon name={theme === 'light' ? 'sun' : 'moon'} size={isSmallScreen? 15: 30} color="white" />
</TouchableOpacity>
<View style={{ flexDirection: 'row', alignItems: 'center', gap: 10 }}>
<TouchableOpacity onPress={() => setStarted(!started)}>
<FontAwesome
name={started ? 'microphone' : 'microphone-slash'}
size={isSmallScreen ? 15 : 30}
color="white"
/>
</TouchableOpacity>
<TouchableOpacity onPress={toggleTheme}>
<Icon name={theme === 'light' ? 'sun' : 'moon'} size={isSmallScreen? 15: 30} color="white" />
</TouchableOpacity>

</View>

</Animated.View>

{/* Provides safe area insets for mobile devices */}
Expand Down Expand Up @@ -326,41 +387,17 @@ export default function App() {
<ScoreDisplay state={state} dispatch={dispatch} />
</Animated.View>
</ScrollView>


</View>

{/* Footer display for status */}
<StatusBar style="auto" />
{/* Automatically adjusts the status bar style */}
</ScrollView>
</Animated.View>
<AudioPlayer state={state} menuStyle={{ backgroundColor: menubarBackgroundColor }}/>

</SafeAreaView>
);
}

// Theme-based styles (not needed since we have animated API to do light and dark transitions smoother)
// const themeStyles = {
// light: {
// container: { backgroundColor: '#F5F5F5' },
// menu_bar: { backgroundColor: '#2C3E50' },
// sidebar: { backgroundColor: '#ECF0F1' },
// mainContent: { backgroundColor: '#FFFFFF' },
// text: { color: "#2C3E50", fontWeight: "bold"} as TextStyle, // use for typscirpt syntax
// button: { backgroundColor: "#2C3E50"}
// },
// dark: {
// container: { backgroundColor: '#0F0F0F' },
// menu_bar: { backgroundColor: '#1A252F' },
// sidebar: { backgroundColor: '#4A627A' },
// mainContent: { backgroundColor: '#6B87A3' },
// text: { color: '#ffffff', fontWeight: "bold"} as TextStyle, // use for typscirpt syntax
// button: { backgroundColor: "#ffffff"}
// },
// };

// Define styles for the components using StyleSheet
const styles = StyleSheet.create({

Expand Down
20 changes: 11 additions & 9 deletions frontend/companion-app/Dispatch.ts
Original file line number Diff line number Diff line change
Expand Up @@ -100,15 +100,17 @@ const reducer_function = (state: any, action: any) => {
},
};

// Adds uploaded score's name to list
case "new_score_from_upload":
return {
...state,
...{
scores: [...state.scores, action.score],
score: action.score.filename,
},
};
case "new_score_from_upload":
return {
...state, // Keep the existing state
scores: [...state.scores, action.score.filename], // Add the new score filename to the scores array
score: action.score.filename, // Set the current score to the newly uploaded filename
scoreContents: {
...state.scoreContents, // Keep existing score content
[action.score.filename]: action.score.content, // Add the new score content to the scoreContents object using the filename as the key
},
};

default: // If no valid type, return state, otherwise the function returns null and the state is gone.
return state;
}
Expand Down
4 changes: 2 additions & 2 deletions frontend/companion-app/components/ScoreDisplay.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -87,8 +87,8 @@ export default function ScoreDisplay({
);

osdRef.current = osm;
// Retrieve the local XML content based on the selected score.
const xmlContent = scoresData[selectedScore];
// If score name is a key within ScoreContents use the xml content value within that key, otherwise access xml content through the static key value mapping defined within scores.ts
const xmlContent = (state.scoreContents && state.scoreContents[selectedScore]) || scoresData[selectedScore];
// Error handling if no xml content for selected score is found
if (!xmlContent) {
console.error("Score content not found for:", selectedScore);
Expand Down
Loading