Skip to content

Commit 7d86d35

Browse files
committed
fix: disable transcript polling since we use real-time IPC events
Removed refetchInterval from useTranscript query since we get real-time updates via IPC events from Recall.ai. Query still fetches once on mount for historical segments but doesn't continuously poll the backend.
1 parent dad6e40 commit 7d86d35

File tree

1 file changed

+85
-83
lines changed

1 file changed

+85
-83
lines changed
Lines changed: 85 additions & 83 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
import { useMutation, useQuery, useQueryClient } from "@tanstack/react-query";
2-
import { useCallback, useEffect, useRef, useState } from "react";
2+
import { useCallback, useEffect, useRef } from "react";
33
import { useAuthStore } from "../../../stores/authStore";
44

55
export interface TranscriptSegment {
@@ -27,8 +27,8 @@ export function useTranscript(recordingId: string | null) {
2727
return transcript as TranscriptData;
2828
},
2929
enabled: !!client && !!recordingId,
30-
refetchInterval: 5000, // Poll every 5s to get updates
31-
staleTime: 2000,
30+
refetchInterval: false, // Disabled: we get real-time updates via IPC events
31+
staleTime: Infinity, // Never consider stale since we update via IPC
3232
});
3333
}
3434

@@ -67,113 +67,115 @@ export function useUploadTranscript() {
6767
}
6868

6969
/**
70-
* Hook for managing real-time transcript with local buffering and batched uploads
70+
* Hook for managing real-time transcript with TanStack Query cache and batched uploads
7171
*/
7272
export function useLiveTranscript(posthogRecordingId: string | null) {
73-
const [localSegments, setLocalSegments] = useState<TranscriptSegment[]>([]);
74-
const [pendingUpload, setPendingUpload] = useState<TranscriptSegment[]>([]);
73+
const queryClient = useQueryClient();
7574
const uploadMutation = useUploadTranscript();
7675
const { data: serverTranscript } = useTranscript(posthogRecordingId);
7776

78-
// Keep track of uploaded segment timestamps to avoid duplicates
79-
const uploadedTimestamps = useRef(new Set<number>());
77+
// Track pending segments for upload
78+
const pendingSegmentsRef = useRef<TranscriptSegment[]>([]);
79+
const uploadTimerRef = useRef<number>();
80+
const posthogRecordingIdRef = useRef(posthogRecordingId);
81+
posthogRecordingIdRef.current = posthogRecordingId;
82+
83+
// Upload pending segments helper
84+
const doUpload = useCallback(() => {
85+
if (
86+
!posthogRecordingIdRef.current ||
87+
pendingSegmentsRef.current.length === 0
88+
)
89+
return;
8090

81-
// Merge server transcript with local segments
82-
const allSegments = [
83-
...(serverTranscript?.segments || []),
84-
...localSegments,
85-
].sort((a, b) => a.timestamp - b.timestamp);
91+
const toUpload = [...pendingSegmentsRef.current];
92+
pendingSegmentsRef.current = [];
8693

87-
// Add new segment from IPC
88-
const addSegment = useCallback((segment: TranscriptSegment) => {
89-
// Check if already uploaded or in local buffer
90-
if (uploadedTimestamps.current.has(segment.timestamp)) {
91-
return;
94+
if (uploadTimerRef.current) {
95+
clearTimeout(uploadTimerRef.current);
96+
uploadTimerRef.current = undefined;
9297
}
9398

94-
setLocalSegments((prev) => {
95-
// Avoid duplicates in local buffer
96-
const exists = prev.some((s) => s.timestamp === segment.timestamp);
97-
if (exists) return prev;
98-
return [...prev, segment];
99+
uploadMutation.mutate({
100+
recordingId: posthogRecordingIdRef.current,
101+
segments: toUpload,
99102
});
103+
}, [uploadMutation]);
104+
105+
// Add new segment from IPC - optimistically update cache
106+
const addSegment = useCallback(
107+
(segment: TranscriptSegment) => {
108+
if (!posthogRecordingId) return;
109+
110+
// Optimistically update TanStack Query cache
111+
queryClient.setQueryData<TranscriptData>(
112+
["notetaker-transcript", posthogRecordingId],
113+
(old) => {
114+
const existingSegments = old?.segments || [];
115+
116+
// Check if segment already exists
117+
const exists = existingSegments.some(
118+
(s) => s.timestamp === segment.timestamp,
119+
);
120+
if (exists) return old;
100121

101-
setPendingUpload((prev) => [...prev, segment]);
102-
}, []);
103-
104-
const uploadSegments = useCallback(() => {
105-
if (!posthogRecordingId || pendingUpload.length === 0) return;
106-
107-
const toUpload = [...pendingUpload];
108-
109-
uploadMutation.mutate(
110-
{
111-
recordingId: posthogRecordingId,
112-
segments: toUpload,
113-
},
114-
{
115-
onSuccess: () => {
116-
// Mark as uploaded
117-
for (const seg of toUpload) {
118-
uploadedTimestamps.current.add(seg.timestamp);
119-
}
120-
121-
// Remove from local buffer after successful upload
122-
setLocalSegments((prev) =>
123-
prev.filter(
124-
(s) => !toUpload.some((u) => u.timestamp === s.timestamp),
125-
),
122+
// Add to cache
123+
const newSegments = [...existingSegments, segment].sort(
124+
(a, b) => a.timestamp - b.timestamp,
126125
);
127126

128-
// Clear pending
129-
setPendingUpload([]);
130-
},
131-
onError: (error) => {
132-
console.error("[LiveTranscript] Failed to upload segments:", error);
133-
// Keep in pending for retry
127+
return {
128+
full_text: newSegments.map((s) => s.text).join(" "),
129+
segments: newSegments,
130+
};
134131
},
135-
},
136-
);
137-
}, [posthogRecordingId, pendingUpload, uploadMutation]);
138-
139-
// Batch upload every 10 segments or 10 seconds
140-
useEffect(() => {
141-
if (!posthogRecordingId || pendingUpload.length === 0) return;
142-
143-
const shouldUpload = pendingUpload.length >= 10;
144-
145-
const timer = setTimeout(() => {
146-
if (pendingUpload.length > 0) {
147-
uploadSegments();
132+
);
133+
134+
// Track for batched upload
135+
const alreadyPending = pendingSegmentsRef.current.some(
136+
(s) => s.timestamp === segment.timestamp,
137+
);
138+
if (!alreadyPending) {
139+
pendingSegmentsRef.current.push(segment);
148140
}
149-
}, 10000); // 10s
150141

151-
if (shouldUpload) {
152-
uploadSegments();
153-
}
142+
// Clear existing timer
143+
if (uploadTimerRef.current) {
144+
clearTimeout(uploadTimerRef.current);
145+
}
154146

155-
return () => clearTimeout(timer);
156-
}, [pendingUpload.length, posthogRecordingId, uploadSegments]);
147+
// Upload immediately if we have 10 segments
148+
if (pendingSegmentsRef.current.length >= 10) {
149+
doUpload();
150+
} else {
151+
// Otherwise schedule upload in 10 seconds
152+
uploadTimerRef.current = setTimeout(() => {
153+
doUpload();
154+
}, 10000);
155+
}
156+
},
157+
[posthogRecordingId, queryClient, doUpload],
158+
);
157159

158160
// Force upload (e.g., when meeting ends)
159161
const forceUpload = useCallback(() => {
160-
if (pendingUpload.length > 0) {
161-
uploadSegments();
162-
}
163-
}, [pendingUpload, uploadSegments]);
162+
doUpload();
163+
}, [doUpload]);
164164

165-
// Reset when recording changes
165+
// Cleanup timer on unmount
166166
useEffect(() => {
167-
setLocalSegments([]);
168-
setPendingUpload([]);
169-
uploadedTimestamps.current.clear();
167+
return () => {
168+
if (uploadTimerRef.current) {
169+
clearTimeout(uploadTimerRef.current);
170+
}
171+
};
170172
}, []);
171173

172174
return {
173-
segments: allSegments,
175+
segments: serverTranscript?.segments || [],
174176
addSegment,
175177
forceUpload,
176178
isUploading: uploadMutation.isPending,
177-
pendingCount: pendingUpload.length,
179+
pendingCount: pendingSegmentsRef.current.length,
178180
};
179181
}

0 commit comments

Comments
 (0)