11import { useState , useEffect , useRef , memo } from "react" ;
22import { createPortal } from "react-dom" ;
3- import { ArrowLeft , Play , Pause , List , AlignLeft , MessageCircle , Download , FileText , FileJson , FileImage , Check , StickyNote , Plus , X , Sparkles , Pencil , ChevronUp , ChevronDown , Info , Clock , Settings , Users } from "lucide-react" ;
3+ import { ArrowLeft , Play , Pause , List , AlignLeft , MessageCircle , Download , FileText , FileJson , FileImage , Check , StickyNote , Plus , X , Sparkles , Pencil , ChevronUp , ChevronDown , Info , Clock , Settings , Users , Loader2 } from "lucide-react" ;
44import WaveSurfer from "wavesurfer.js" ;
55import { Button } from "./ui/button" ;
66import {
@@ -164,6 +164,12 @@ const formatDuration = (seconds: number): string => {
164164 return `${ hours } h ${ remainingMinutes } m ${ remainingSeconds . toFixed ( 0 ) } s` ;
165165} ;
166166
167+ const formatElapsedTime = ( seconds : number ) : string => {
168+ const minutes = Math . floor ( seconds / 60 ) ;
169+ const remainingSeconds = seconds % 60 ;
170+ return `${ minutes } :${ remainingSeconds . toString ( ) . padStart ( 2 , '0' ) } ` ;
171+ } ;
172+
167173export const AudioDetailView = memo ( function AudioDetailView ( { audioId } : AudioDetailViewProps ) {
168174 const { navigate } = useRouter ( ) ;
169175 const { theme } = useTheme ( ) ;
@@ -186,6 +192,12 @@ export const AudioDetailView = memo(function AudioDetailView({ audioId }: AudioD
186192 // Speaker renaming state
187193 const [ speakerRenameDialogOpen , setSpeakerRenameDialogOpen ] = useState ( false ) ;
188194 const [ speakerMappings , setSpeakerMappings ] = useState < Record < string , string > > ( { } ) ;
195+
196+ // Polling state
197+ const [ pollingInterval , setPollingInterval ] = useState < NodeJS . Timeout | null > ( null ) ;
198+ const [ processingStartTime , setProcessingStartTime ] = useState < Date | null > ( null ) ;
199+ const [ elapsedTime , setElapsedTime ] = useState < number > ( 0 ) ;
200+ const [ currentStatus , setCurrentStatus ] = useState < string | null > ( null ) ;
189201 const waveformRef = useRef < HTMLDivElement > ( null ) ;
190202 const wavesurferRef = useRef < WaveSurfer | null > ( null ) ;
191203 const transcriptRef = useRef < HTMLDivElement > ( null ) ;
@@ -268,6 +280,71 @@ useEffect(() => {
268280 } ) ( ) ;
269281} , [ audioId ] ) ;
270282
283+ // Polling mechanism for status updates
284+ useEffect ( ( ) => {
285+ // Start polling if job is processing or pending
286+ const status = currentStatus || audioFile ?. status ;
287+ if ( audioFile && status && ( status === "processing" || status === "pending" ) ) {
288+ // Set processing start time if not already set
289+ if ( ! processingStartTime && status === "processing" ) {
290+ setProcessingStartTime ( new Date ( ) ) ;
291+ }
292+
293+ // Clear any existing interval
294+ if ( pollingInterval ) {
295+ clearInterval ( pollingInterval ) ;
296+ }
297+
298+ // Start polling every 3 seconds
299+ const interval = setInterval ( async ( ) => {
300+ await fetchStatusOnly ( ) ;
301+ } , 3000 ) ;
302+
303+ setPollingInterval ( interval ) ;
304+
305+ // Cleanup interval on unmount or when status changes
306+ return ( ) => {
307+ if ( interval ) {
308+ clearInterval ( interval ) ;
309+ }
310+ } ;
311+ } else {
312+ // Stop polling if status is completed, failed, or uploaded
313+ if ( pollingInterval ) {
314+ clearInterval ( pollingInterval ) ;
315+ setPollingInterval ( null ) ;
316+ }
317+ // Clear processing start time if completed or failed
318+ if ( audioFile && ( audioFile . status === "completed" || audioFile . status === "failed" ) ) {
319+ setProcessingStartTime ( null ) ;
320+ setElapsedTime ( 0 ) ;
321+ }
322+ }
323+ } , [ currentStatus , audioFile ?. status , audioId ] ) ; // Re-run when status or audioId changes
324+
325+ // Update elapsed time counter
326+ useEffect ( ( ) => {
327+ const status = currentStatus || audioFile ?. status ;
328+ if ( processingStartTime && status === "processing" ) {
329+ const timer = setInterval ( ( ) => {
330+ const now = new Date ( ) ;
331+ const elapsed = Math . floor ( ( now . getTime ( ) - processingStartTime . getTime ( ) ) / 1000 ) ;
332+ setElapsedTime ( elapsed ) ;
333+ } , 1000 ) ;
334+
335+ return ( ) => clearInterval ( timer ) ;
336+ }
337+ } , [ processingStartTime , currentStatus , audioFile ?. status ] ) ;
338+
339+ // Cleanup polling on unmount
340+ useEffect ( ( ) => {
341+ return ( ) => {
342+ if ( pollingInterval ) {
343+ clearInterval ( pollingInterval ) ;
344+ }
345+ } ;
346+ } , [ pollingInterval ] ) ;
347+
271348// Fetch speaker mappings when audio file is loaded and has diarization enabled
272349useEffect ( ( ) => {
273350 if ( audioFile ) {
@@ -373,6 +450,31 @@ useEffect(() => {
373450 }
374451 } , [ currentWordIndex ] ) ;
375452
453+ const fetchStatusOnly = async ( ) => {
454+ try {
455+ const response = await fetch ( `/api/v1/transcription/${ audioId } ` , {
456+ headers : {
457+ ...getAuthHeaders ( ) ,
458+ } ,
459+ } ) ;
460+
461+ if ( response . ok ) {
462+ const data = await response . json ( ) ;
463+ const previousStatus = currentStatus || audioFile ?. status ;
464+
465+ // Only update the status state, not the entire audioFile
466+ setCurrentStatus ( data . status ) ;
467+
468+ // If status changed to completed, fetch full details
469+ if ( data . status === "completed" && previousStatus === "processing" ) {
470+ await fetchAudioDetails ( ) ;
471+ }
472+ }
473+ } catch ( error ) {
474+ console . error ( 'Error fetching status:' , error ) ;
475+ }
476+ } ;
477+
376478 const fetchAudioDetails = async ( ) => {
377479 try {
378480 // Fetch audio file details
@@ -386,6 +488,7 @@ useEffect(() => {
386488 if ( audioResponse . ok ) {
387489 const audioData = await audioResponse . json ( ) ;
388490 setAudioFile ( audioData ) ;
491+ setCurrentStatus ( audioData . status ) ;
389492
390493 // Fetch transcript if completed
391494 if ( audioData . status === "completed" ) {
@@ -1584,26 +1687,54 @@ useEffect(() => {
15841687
15851688
15861689 { /* Status Messages */ }
1587- { audioFile . status !== "completed" && (
1690+ { ( currentStatus || audioFile . status ) !== "completed" && (
15881691 < div className = "bg-white dark:bg-gray-700 rounded-xl p-6" >
15891692 < div className = "text-center" >
1590- < h2 className = "text-xl font-semibold text-gray-900 dark:text-gray-50 mb-2" >
1591- { audioFile . status === "processing" &&
1592- "Transcription in Progress" }
1593- { audioFile . status === "pending" && "Transcription Queued" }
1594- { audioFile . status === "uploaded" && "Ready for Transcription" }
1595- { audioFile . status === "failed" && "Transcription Failed" }
1596- </ h2 >
1597- < p className = "text-gray-600 dark:text-gray-400" >
1598- { audioFile . status === "processing" &&
1599- "Please wait while we process your audio file..." }
1600- { audioFile . status === "pending" &&
1601- "Your audio file is in the transcription queue." }
1602- { audioFile . status === "uploaded" &&
1603- "Start transcription from the audio files list." }
1604- { audioFile . status === "failed" &&
1605- "There was an error processing your audio file." }
1606- </ p >
1693+ { /* Processing Status with Animation */ }
1694+ { ( currentStatus || audioFile . status ) === "processing" && (
1695+ < div className = "flex flex-col items-center space-y-4" >
1696+ < div className = "flex items-center space-x-3" >
1697+ < Loader2 className = "h-8 w-8 text-blue-500 animate-spin" />
1698+ < div >
1699+ < h2 className = "text-xl font-semibold text-gray-900 dark:text-gray-50" >
1700+ Transcription in Progress
1701+ </ h2 >
1702+ { elapsedTime > 0 && (
1703+ < p className = "text-sm text-gray-500 dark:text-gray-400 mt-1" >
1704+ Processing for { formatElapsedTime ( elapsedTime ) }
1705+ </ p >
1706+ ) }
1707+ </ div >
1708+ </ div >
1709+ < div className = "w-full max-w-md" >
1710+ < div className = "bg-gray-200 dark:bg-gray-600 rounded-full h-2" >
1711+ < div className = "bg-blue-500 h-2 rounded-full transition-all duration-1000 ease-out animate-pulse" style = { { width : '60%' } } > </ div >
1712+ </ div >
1713+ </ div >
1714+ < p className = "text-gray-600 dark:text-gray-400 text-sm" >
1715+ Converting your audio to text... This may take a few minutes.
1716+ </ p >
1717+ </ div >
1718+ ) }
1719+
1720+ { /* Other Status Messages */ }
1721+ { ( currentStatus || audioFile . status ) !== "processing" && (
1722+ < >
1723+ < h2 className = "text-xl font-semibold text-gray-900 dark:text-gray-50 mb-2" >
1724+ { ( currentStatus || audioFile . status ) === "pending" && "Transcription Queued" }
1725+ { ( currentStatus || audioFile . status ) === "uploaded" && "Ready for Transcription" }
1726+ { ( currentStatus || audioFile . status ) === "failed" && "Transcription Failed" }
1727+ </ h2 >
1728+ < p className = "text-gray-600 dark:text-gray-400" >
1729+ { ( currentStatus || audioFile . status ) === "pending" &&
1730+ "Your audio file is in the transcription queue." }
1731+ { ( currentStatus || audioFile . status ) === "uploaded" &&
1732+ "Start transcription from the audio files list." }
1733+ { ( currentStatus || audioFile . status ) === "failed" &&
1734+ "There was an error processing your audio file." }
1735+ </ p >
1736+ </ >
1737+ ) }
16071738 </ div >
16081739 </ div >
16091740 ) }
0 commit comments