Zeenmo is ChatGPT application solving relationship problems using your own method through a conversation.
- Integration with ChatGPT(gpt-4o)
- Implementation Audio Streaming
- Implementation Speech-to-Text using OpenAI Whisper model
import openai
messages = [
{"role": "system", "content": "You are Zeenmo. ..."},
{"role": "user", "content": "How are you today?"},
{"role": "assistant", "content": "I am fine. Thank you."},
]
response = openai.ChatCompletion.create(
model="gpt-4o",
messages=messages,
temperature=0.2,
)
print(response["choices"][0]["message"]["content"])import { ReactMic } from 'react-mic';
const [record, setRecord] = useState(false);
setRecord(true); // Start Audio recording
setRecord(false); // Finish Audio recording
const onStop = (recordedBlob) => { // Call when the audio recording is finished
console.log(recordedBlob);
};
<ReactMic
record={record}
className="sound-wave hidden"
onStop={onStop}
strokeColor="#000000"
backgroundColor="#FF4081"
visualSetting="sinewave"
visualSettingFillColor="#ffffff"
/>navigator.mediaDevices.getUserMedia({ audio: true }) // Check the audio device
.then((stream) => {
setRecord(true);
handleStream(stream); // Process with stream data
})
.catch((error) => {
console.error('Error accessing microphone:', error);
});const handleStream = (stream) => {
let audioCtx = new AudioContext();
const source = audioCtx.createMediaStreamSource(stream);
const analyser = audioCtx.createAnalyser();
analyser.fftSize = 2048;
const bufferLength = analyser.frequencyBinCount;
const dataArray = new Uint8Array(bufferLength);
source.connect(analyser);
function draw() {
analyser.getByteTimeDomainData(dataArray);
for (var x = 0; x < bufferLength; x++) {
...
}
if (count < 100 * bufferLength) { // Finish the audio recording when no noise for 3s
requestAnimationFrame(draw);
} else {
setRecord(false);
}
}
}audio = request.files['file']
params = {
"language": "en"
}
filename = 'recording.wav'
filepath = os.path.join(os.path.dirname(__file__), filename)
audio.save(filepath)
audio_file= open(filepath, "rb")
transcript = openai.Audio.transcribe("whisper-1", audio_file, **params)
print(transcript["text"])import Lottie from 'react-lottie';
import animationData from "@/widgets/lottie/waveform";
const defaultOptions = {
loop: true,
autoplay: true,
animationData: animationData,
rendererSettings: {
preserveAspectRatio: "xMidYMid slice"
}
};
<Lottie
options={defaultOptions}
height={100}
isStopped={isStopped}
/>import { motion } from "framer-motion"
<motion.div
initial={{ opacity: 0 }}
animate={{
transition: {
duration: 3,
delay: 1,
},
opacity: [0, 1, 1, 0.5, 0],
y: [0, -50, -50, -150, -400],
}}
exit="exit"
>
I'm Zeenmo!
</motion.div>import mixpanel from 'mixpanel-browser';
const mixpanelToken = "XXXXXX";
mixpanel.init(mixpanelToken);
mixpanel.track(id, action);
// mixpanelService.track("audio_recording");
// mixpanelService.track("refreshed", { msg_length: length });