Skip to content

Commit 101c178

Browse files
authored
Make shared summaries look good + Chat capability (#3414)
- shared summaries on web: UI is much better - introduced chat capability like granola where anyone can chat with the shared convo
2 parents 4f5e8ec + 32ec289 commit 101c178

File tree

21 files changed

+782
-102
lines changed

21 files changed

+782
-102
lines changed

.github/workflows/gcp_frontend.yml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,8 @@ jobs:
4444
--build-arg NEXT_PUBLIC_FIREBASE_MESSAGING_SENDER_ID=${{ secrets.NEXT_PUBLIC_FIREBASE_MESSAGING_SENDER_ID }} \
4545
--build-arg NEXT_PUBLIC_FIREBASE_APP_ID=${{ secrets.NEXT_PUBLIC_FIREBASE_APP_ID }} \
4646
--build-arg NEXT_PUBLIC_FIREBASE_MEASUREMENT_ID=${{ secrets.NEXT_PUBLIC_FIREBASE_MEASUREMENT_ID }} \
47+
--build-arg OPENAI_API_KEY=${{ secrets.OPENAI_API_KEY }} \
48+
--build-arg API_URL=${{ secrets.API_URL }} \
4749
-t gcr.io/${{ vars.GCP_PROJECT_ID }}/${{ env.SERVICE }} -f web/frontend/Dockerfile .
4850
4951
docker push gcr.io/${{ vars.GCP_PROJECT_ID }}/${{ env.SERVICE }}

backend/routers/conversations.py

Lines changed: 15 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -5,9 +5,11 @@
55
import database.conversations as conversations_db
66
import database.action_items as action_items_db
77
import database.redis_db as redis_db
8+
import database.users as users_db
89
from database.vector_db import delete_vector
910
from models.conversation import *
1011
from models.conversation import SearchRequest
12+
from models.other import Person
1113

1214
from utils.conversations.process_conversation import process_conversation, retrieve_in_progress_conversation
1315
from utils.conversations.search import search_conversations
@@ -501,21 +503,30 @@ def set_conversation_visibility(
501503
return {"status": "Ok"}
502504

503505

504-
@router.get("/v1/conversations/{conversation_id}/shared", response_model=Conversation, tags=['conversations'])
506+
@router.get("/v1/conversations/{conversation_id}/shared", tags=['conversations'])
505507
def get_shared_conversation_by_id(conversation_id: str):
506508
uid = redis_db.get_conversation_uid(conversation_id)
507509
if not uid:
508510
raise HTTPException(status_code=404, detail="Conversation is private")
509511

510-
# TODO: include speakers and people matched?
511-
# TODO: other fields that shouldn't be included?
512512
conversation = _get_valid_conversation_by_id(uid, conversation_id)
513513
visibility = conversation.get('visibility', ConversationVisibility.private)
514514
if not visibility or visibility == ConversationVisibility.private:
515515
raise HTTPException(status_code=404, detail="Conversation is private")
516516
conversation = Conversation(**conversation)
517517
conversation.geolocation = None
518-
return conversation
518+
519+
# Fetch people data for speaker names
520+
person_ids = conversation.get_person_ids()
521+
people = []
522+
if person_ids:
523+
people_data = users_db.get_people_by_ids(uid, person_ids)
524+
people = [Person(**p) for p in people_data]
525+
526+
# Return conversation with people data
527+
response_dict = conversation.as_dict_cleaned_dates()
528+
response_dict['people'] = [p.dict() for p in people]
529+
return response_dict
519530

520531

521532
@router.get("/v1/public-conversations", response_model=List[Conversation], tags=['conversations'])

web/frontend/Dockerfile

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,8 @@ ARG NEXT_PUBLIC_FIREBASE_STORAGE_BUCKET
3131
ARG NEXT_PUBLIC_FIREBASE_MESSAGING_SENDER_ID
3232
ARG NEXT_PUBLIC_FIREBASE_APP_ID
3333
ARG NEXT_PUBLIC_FIREBASE_MEASUREMENT_ID
34+
ARG OPENAI_API_KEY
35+
ARG API_URL
3436

3537
ENV NEXT_PUBLIC_FIREBASE_API_KEY=$NEXT_PUBLIC_FIREBASE_API_KEY
3638
ENV NEXT_PUBLIC_FIREBASE_AUTH_DOMAIN=$NEXT_PUBLIC_FIREBASE_AUTH_DOMAIN
@@ -39,7 +41,8 @@ ENV NEXT_PUBLIC_FIREBASE_STORAGE_BUCKET=$NEXT_PUBLIC_FIREBASE_STORAGE_BUCKET
3941
ENV NEXT_PUBLIC_FIREBASE_MESSAGING_SENDER_ID=$NEXT_PUBLIC_FIREBASE_MESSAGING_SENDER_ID
4042
ENV NEXT_PUBLIC_FIREBASE_APP_ID=$NEXT_PUBLIC_FIREBASE_APP_ID
4143
ENV NEXT_PUBLIC_FIREBASE_MEASUREMENT_ID=$NEXT_PUBLIC_FIREBASE_MEASUREMENT_ID
42-
ENV API_URL=https://backend-hhibjajaja-uc.a.run.app
44+
ENV API_URL=$API_URL
45+
ENV OPENAI_API_KEY=$OPENAI_API_KEY
4346
RUN npm run build
4447

4548
RUN echo "Files in builder:"
Lines changed: 164 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,164 @@
1+
'use server';
2+
3+
import envConfig from '@/src/constants/envConfig';
4+
5+
interface ChatMessage {
6+
role: 'user' | 'assistant';
7+
content: string;
8+
}
9+
10+
interface ChatWithMemoryRequest {
11+
messages: ChatMessage[];
12+
transcript: string;
13+
}
14+
15+
export interface ChatWithMemoryResponse {
16+
message: string;
17+
}
18+
19+
const OPENAI_API_KEY = envConfig.OPENAI_API_KEY;
20+
21+
if (!OPENAI_API_KEY) {
22+
throw new Error('OPENAI_API_KEY is not configured. Please set it in your environment variables.');
23+
}
24+
25+
// Rough token estimation: ~4 characters per token
26+
function estimateTokens(text: string): number {
27+
return Math.ceil(text.length / 4);
28+
}
29+
30+
// Truncate transcript to fit within token budget
31+
function truncateTranscript(transcript: string, maxTokens: number): string {
32+
const estimatedTokens = estimateTokens(transcript);
33+
if (estimatedTokens <= maxTokens) {
34+
return transcript;
35+
}
36+
37+
// If transcript is too long, take the beginning and end
38+
const targetLength = maxTokens * 4; // Convert tokens back to characters
39+
const startLength = Math.floor(targetLength * 0.6); // 60% from start
40+
const endLength = Math.floor(targetLength * 0.4); // 40% from end
41+
42+
const start = transcript.substring(0, startLength);
43+
const end = transcript.substring(transcript.length - endLength);
44+
45+
return `${start}\n\n[... transcript truncated ...]\n\n${end}`;
46+
}
47+
48+
export default async function chatWithMemory(
49+
data: ChatWithMemoryRequest,
50+
): Promise<ChatWithMemoryResponse | null> {
51+
try {
52+
// Use gpt-4.1 which has 128k context window, or fallback to gpt-3.5-turbo-16k
53+
const model = 'gpt-4.1';
54+
55+
// Estimate tokens for conversation messages (reserve ~2000 tokens for system message and response)
56+
const conversationTokens = data.messages.reduce(
57+
(sum, msg) => sum + estimateTokens(msg.content),
58+
0
59+
);
60+
61+
// Reserve tokens: 2000 for system message overhead, 2000 for response, 2000 for conversation
62+
const maxTranscriptTokens = 120000 - conversationTokens - 2000 - 2000;
63+
64+
// Truncate transcript if needed
65+
const processedTranscript = truncateTranscript(data.transcript, maxTranscriptTokens);
66+
67+
// Create system message with transcript context
68+
const systemMessage = {
69+
role: 'system' as const,
70+
content: `You are a helpful chatbot assistant. You have access to the following conversation transcript. Use this context to answer questions accurately and helpfully.
71+
72+
Important: As a chatbot, provide short and concise answers. Be direct and to the point while still being helpful.
73+
74+
Critical: Always try to reference things from the conversation transcript, even when the user asks questions that seem unrelated to the conversation. Find connections, examples, or relevant details from the transcript that relate to their question, and incorporate those references into your response.
75+
76+
Transcript:
77+
${processedTranscript}
78+
79+
Please answer questions based on the transcript above. Even if a question seems unrelated, always try to find and reference relevant information from the conversation.`,
80+
};
81+
82+
// Keep only recent conversation messages to avoid token limit issues
83+
// Keep last 10 messages (5 exchanges) to maintain context
84+
const recentMessages = data.messages.slice(-10);
85+
const messages = [systemMessage, ...recentMessages];
86+
87+
const response = await fetch('https://api.openai.com/v1/chat/completions', {
88+
method: 'POST',
89+
headers: {
90+
'Content-Type': 'application/json',
91+
Authorization: `Bearer ${OPENAI_API_KEY}`,
92+
},
93+
body: JSON.stringify({
94+
model: model,
95+
messages: messages,
96+
temperature: 0.7,
97+
}),
98+
});
99+
100+
if (!response.ok) {
101+
const errorData = await response.json().catch(() => ({}));
102+
console.error('OpenAI API error:', response.status, errorData);
103+
104+
// If context length error, try with gpt-3.5-turbo-16k as fallback
105+
if (errorData.error?.code === 'context_length_exceeded') {
106+
const fallbackModel = 'gpt-3.5-turbo-16k';
107+
const fallbackMaxTokens = 14000 - conversationTokens - 2000 - 2000;
108+
const fallbackTranscript = truncateTranscript(data.transcript, fallbackMaxTokens);
109+
110+
const fallbackSystemMessage = {
111+
role: 'system' as const,
112+
content: `You are a helpful chatbot assistant. You have access to the following conversation transcript. Use this context to answer questions accurately and helpfully.
113+
114+
Important: As a chatbot, provide short and concise answers. Be direct and to the point while still being helpful.
115+
116+
Critical: Always try to reference things from the conversation transcript, even when the user asks questions that seem unrelated to the conversation. Find connections, examples, or relevant details from the transcript that relate to their question, and incorporate those references into your response.
117+
118+
Try to say things like "like mentioned by x"
119+
120+
Transcript:
121+
${fallbackTranscript}
122+
123+
Please answer questions based on the transcript above. Even if a question seems unrelated, always try to find and reference relevant information from the conversation.`,
124+
};
125+
126+
const fallbackResponse = await fetch('https://api.openai.com/v1/chat/completions', {
127+
method: 'POST',
128+
headers: {
129+
'Content-Type': 'application/json',
130+
Authorization: `Bearer ${OPENAI_API_KEY}`,
131+
},
132+
body: JSON.stringify({
133+
model: fallbackModel,
134+
messages: [fallbackSystemMessage, ...recentMessages],
135+
temperature: 0.7,
136+
}),
137+
});
138+
139+
if (!fallbackResponse.ok) {
140+
const fallbackErrorData = await fallbackResponse.json().catch(() => ({}));
141+
console.error('OpenAI API fallback error:', fallbackResponse.status, fallbackErrorData);
142+
return null;
143+
}
144+
145+
const fallbackResult = await fallbackResponse.json();
146+
const assistantMessage = fallbackResult.choices[0]?.message?.content || 'Sorry, I could not generate a response.';
147+
return { message: assistantMessage };
148+
}
149+
150+
return null;
151+
}
152+
153+
const result = await response.json();
154+
const assistantMessage = result.choices[0]?.message?.content || 'Sorry, I could not generate a response.';
155+
156+
return {
157+
message: assistantMessage,
158+
};
159+
} catch (error) {
160+
console.error('Error chatting with memory:', error);
161+
return null;
162+
}
163+
}
164+

web/frontend/src/app/globals.css

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,3 +33,7 @@
3333
.search-hidden {
3434
display: none !important;
3535
}
36+
37+
.font-system-ui {
38+
font-family: system-ui, -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, 'Helvetica Neue', Arial, sans-serif;
39+
}

web/frontend/src/app/layout.tsx

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ import type { Metadata } from 'next';
22
import { Mulish } from 'next/font/google';
33
import './globals.css';
44
import AppHeader from '../components/shared/app-header';
5-
import Footer from '../components/shared/footer';
5+
import ConditionalFooter from '../components/shared/conditional-footer';
66
import envConfig from '../constants/envConfig';
77
import { GleapInit } from '@/src/components/shared/gleap';
88
import { GoogleAnalytics } from '@/src/components/shared/google-analytics';
@@ -39,7 +39,7 @@ export default function RootLayout({
3939
<main className="flex min-h-screen flex-col">
4040
<div className="w-full flex-grow">{children}</div>
4141
</main>
42-
<Footer />
42+
<ConditionalFooter />
4343
</body>
4444
<GleapInit />
4545
<GoogleAnalytics />

web/frontend/src/app/memories/[id]/page.tsx

Lines changed: 22 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@ import envConfig from '@/src/constants/envConfig';
55
import { DEFAULT_TITLE_MEMORY } from '@/src/constants/memory';
66
import { ParamsTypes, SearchParamsTypes } from '@/src/types/params.types';
77
import { Metadata, ResolvingMetadata } from 'next';
8+
import { notFound } from 'next/navigation';
89

910
interface MemoryPageProps {
1011
params: ParamsTypes;
@@ -16,13 +17,24 @@ export async function generateMetadata(
1617
parent: ResolvingMetadata,
1718
): Promise<Metadata> {
1819
const prevData = (await parent) as Metadata;
19-
const memory = (await (
20-
await fetch(`${envConfig.API_URL}/v1/conversations/${params.id}/shared`, {
20+
let memory: { structured?: { title?: string; overview?: string } } | null = null;
21+
22+
try {
23+
const response = await fetch(`${envConfig.API_URL}/v1/conversations/${params.id}/shared`, {
2124
next: {
2225
revalidate: 60,
2326
},
24-
})
25-
).json()) as { structured?: { title?: string; overview?: string } };
27+
});
28+
29+
if (response.ok) {
30+
const contentType = response.headers.get('content-type');
31+
if (contentType && contentType.includes('application/json')) {
32+
memory = await response.json();
33+
}
34+
}
35+
} catch (error) {
36+
// Silently handle errors in metadata generation
37+
}
2638

2739
const title = !memory
2840
? 'Memory Not Found'
@@ -49,12 +61,14 @@ export async function generateMetadata(
4961
export default async function MemoryPage({ params, searchParams }: MemoryPageProps) {
5062
const memoryId = params.id;
5163
const memory = await getSharedMemory(memoryId);
52-
if (!memory) throw new Error();
64+
if (!memory) {
65+
notFound();
66+
}
5367

5468
return (
55-
<div className="min-h-screen bg-gradient-to-b from-zinc-900 via-zinc-900 to-black">
56-
<div className="absolute inset-0 bg-[radial-gradient(circle_500px_at_50%_200px,#3e3e3e40,transparent)]" />
57-
<section className="relative mx-auto max-w-screen-md px-4 py-16 md:px-6 md:py-24">
69+
<div className="min-h-screen bg-gradient-to-b from-[#1a0a1f] via-[#0a0a2f] to-black font-system-ui">
70+
<div className="absolute inset-0 bg-[radial-gradient(circle_500px_at_50%_200px,rgba(88,28,135,0.2),transparent)]" />
71+
<section className="relative mx-auto max-w-screen-md px-6 py-16 md:px-12 md:py-24">
5872
<MemoryHeader />
5973
<Memory memory={memory} searchParams={searchParams} />
6074
</section>

web/frontend/src/app/my-apps/page.tsx

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@ import { useEffect, useState, useCallback } from 'react';
55
import { useRouter } from 'next/navigation';
66
import Image from 'next/image';
77
import Link from 'next/link';
8+
import envConfig from '@/src/constants/envConfig';
89

910
interface App {
1011
id: string;
@@ -40,12 +41,8 @@ export default function MyAppsPage() {
4041
throw new Error('Authentication token not available.');
4142
}
4243
const response = await fetch(
43-
`${process.env.NEXT_PUBLIC_API_BASE_URL || 'http://localhost:8000'}/v1/apps`,
44-
{
45-
headers: {
46-
Authorization: `Bearer ${token}`,
47-
},
48-
},
44+
`${envConfig.API_URL || 'http://localhost:8000'}/v1/apps`,
45+
{ headers: { Authorization: `Bearer ${token}` } },
4946
);
5047

5148
console.log('📡 [fetchUserApps] Backend response status:', response.status);

0 commit comments

Comments
 (0)