From 06ef8859fbb0452f0bb2898959db414a6f1ad1ed Mon Sep 17 00:00:00 2001 From: Kenneth Angelikas Date: Fri, 31 Oct 2025 09:29:52 -0400 Subject: [PATCH 1/5] updated readme with relevant info --- README.md | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 739cc900..46d2e54c 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,23 @@ -Development of this repository is currently in a halt, due to lack of time. Updates are comming end of June. - -working again ; ) -I am very busy at the moment so I would be very thankful for contributions and PR's +# Overlap +- Overlap is a team-first chat assistant that nudges teammates toward each other. +- When someone asks a question, the system checks a shared, opt‑in skills index and, if relevant, suggests which teammate(s) might help — replacing solitary AI answers with socially aware guidance. + +## MVP (minimal viable product) +- Simple web chat UI (single‑page) that sends {user_id, team_id, prompt} to the server. +- Team and user records with a short skills survey (store in SQLite for MVP). +- Backend augmentation: before calling the model, do a fast skill-match (keyword or simple normalization) and inject one short hint into the prompt if a teammate matches. +- Stream model responses back to the client unchanged except for the injected hint. +- Basic seed data, Docker support, and environment variable for the model API key. +- No production auth in MVP (trusted user_id); plan to add auth before public use. + +## User journey (MVP) +1. Join or create a team and complete a quick skills survey. +2. Open chat and ask a question. +3. Server checks team skills and finds possible matches. +4. If a match exists, the reply includes a short suggestion like “Alice knows React — want to connect?” +5. Conversation is logged; skill usage counters may be updated for future recommendations. + +## User personas ## To do - [x] Double confirm when deleting conversation From 983b9dddf7295cb5023ff9a48e36e75e1823caa6 Mon Sep 17 00:00:00 2001 From: Manvender Singh <151474058+manvendersingh21@users.noreply.github.com> Date: Fri, 31 Oct 2025 11:04:00 -0400 Subject: [PATCH 2/5] first --- README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/README.md b/README.md index 46d2e54c..dbca5df0 100644 --- a/README.md +++ b/README.md @@ -18,6 +18,13 @@ 5. Conversation is logged; skill usage counters may be updated for future recommendations. ## User personas +#Four User Personnas: + +Sam — Product Manager +Goal: Get a quick, team-aware answer or a referral to the right teammate. +Scenario: Asks “How do we track onboarding metrics?” and is suggested to talk to Maya, who owns analytics. + + ## To do - [x] Double confirm when deleting conversation From 5e2b0c91459fe7ae412d7acde6beda0ea69e3496 Mon Sep 17 00:00:00 2001 From: KennyAngelikas Date: Tue, 4 Nov 2025 13:53:54 -0500 Subject: [PATCH 3/5] refactor frontend + update UI --- client/css/style.css | 104 +++++++- client/html/index.html | 27 +- client/js/api.js | 56 ++++ client/js/chat.js | 567 ----------------------------------------- client/js/main.js | 201 +++++++++++++++ client/js/store.js | 141 ++++++++++ client/js/ui.js | 200 +++++++++++++++ client/js/utils.js | 65 +++++ server/app.py | 2 + 9 files changed, 768 insertions(+), 595 deletions(-) create mode 100644 client/js/api.js delete mode 100644 client/js/chat.js create mode 100644 client/js/main.js create mode 100644 client/js/store.js create mode 100644 client/js/ui.js create mode 100644 client/js/utils.js diff --git a/client/css/style.css b/client/css/style.css index a1f69087..58ef682f 100644 --- a/client/css/style.css +++ b/client/css/style.css @@ -47,7 +47,11 @@ html, body { scroll-behavior: smooth; + /* Keep the overall page from scrolling — the app will make the chat + messages area the only scrollable region. This keeps the input and + header always visible. */ overflow: hidden; + height: 100%; } body { @@ -60,7 +64,8 @@ body { .row { display: flex; gap: var(--section-gap); - height: 100%; + /* occupy the viewport (accounting for body padding) */ + height: calc(100vh - 2 * var(--section-gap)); } .box { @@ -76,8 +81,11 @@ body { .conversations { max-width: 260px; padding: var(--section-gap); - overflow: auto; - flex-shrink: 0; + /* keep the left sidebar static and visible; do not let the page scroll */ + overflow: visible; + /* make the sidebar a fixed column so it doesn't collapse to zero width */ + flex: 0 0 260px; + width: 260px; display: flex; flex-direction: column; justify-content: space-between; @@ -86,9 +94,12 @@ body { .conversation { width: 100%; min-height: 50%; - height: 100vh; - overflow-y: scroll; - overflow-x: hidden; + /* Fill remaining vertical space inside .row. The messages area will + scroll independently; the input stays fixed at the bottom of this box. */ + /* allow the conversation column to grow and take remaining horizontal space */ + flex: 1 1 auto; + height: 100%; + overflow: hidden; display: flex; flex-direction: column; gap: 15px; @@ -99,13 +110,16 @@ body { display: flex; flex-direction: column; overflow-wrap: break-word; - overflow-y: inherit; + /* messages area takes remaining space and scrolls */ + flex: 1 1 auto; + overflow-y: auto; overflow-x: hidden; - padding-bottom: 50px; + padding-bottom: 12px; } .conversation .user-input { - max-height: 10vh; + /* keep input area fixed height so layout is predictable */ + flex: 0 0 auto; } .conversation .user-input input { @@ -488,7 +502,25 @@ select { display: flex; flex-direction: column; gap: 16px; - overflow: auto; + /* allow the left-top area to scroll vertically if conversation list grows */ + overflow-y: auto; +} + +/* Styles for the conversation list container we render into */ +.conversation-list { + display: flex; + flex-direction: column; + gap: 8px; + max-height: calc(100vh - 260px); + overflow-y: auto; +} + +.conversation-list-items .no-convos { + color: var(--colour-3); + opacity: 0.8; + padding: 8px 12px; + border-radius: 6px; + border: 1px dashed var(--conversations); } #cursor { @@ -632,7 +664,8 @@ a:-webkit-any-link { .conversation .user-input textarea { font-size: 15px; width: 100%; - height: 100%; + /* give a static-ish size so layout doesn't jump */ + height: 110px; padding: 12px 15px; background: none; border: none; @@ -640,7 +673,7 @@ a:-webkit-any-link { color: var(--colour-3); resize: vertical; - max-height: 150px; + max-height: 180px; min-height: 80px; } @@ -812,4 +845,49 @@ a:-webkit-any-link { --clr-card-bg: hsl(209 50% 5%); --colour-3: hsl(209 50% 90%); --conversations: hsl(209 50% 80%); -} \ No newline at end of file +} + +/* Small layout fixes for input and new-convo visibility */ +.conversations .top { + /* ensure the top area can always show the new conversation button */ + min-height: 84px; + padding-bottom: 8px; +} + +.new_convo { + align-items: center; + display: flex; + gap: 8px; +} + +.box.input-box { + display: flex; + align-items: center; + gap: 12px; + padding: 10px; +} + +.input-box textarea#message-input { + width: 100%; + min-height: 80px; + max-height: 180px; + resize: vertical; + padding: 12px; + box-sizing: border-box; + background: none; + border: none; + outline: none; + color: var(--colour-3); +} + +#send-button { + width: 52px; + height: 52px; + display: flex; + align-items: center; + justify-content: center; + cursor: pointer; +} + +/* ensure the input box stays above decorative gradients */ +.box.input-box { z-index: 2; } \ No newline at end of file diff --git a/client/html/index.html b/client/html/index.html index 201ac155..415523be 100644 --- a/client/html/index.html +++ b/client/html/index.html @@ -14,8 +14,11 @@ - - + + + + + - - + ChatGPT
-
- + +
- -
- - By: @xtekky
- Version: 0.0.1-beta
- Release: 2023-04-18
-
-
diff --git a/client/js/api.js b/client/js/api.js new file mode 100644 index 00000000..5e6fe622 --- /dev/null +++ b/client/js/api.js @@ -0,0 +1,56 @@ +// Minimal API module: streaming POST to backend conversation endpoint. +// Exports streamConversation(payload, onChunk, signal) -> returns final accumulated text. + +export async function streamConversation(payload, onChunk, signal) { + const url = '/backend-api/v2/conversation'; + + const res = await fetch(url, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'Accept': 'text/event-stream' + }, + body: JSON.stringify(payload), + signal + }); + + if (!res.ok) { + // attempt to read response body for better error messages + const body = await res.text().catch(() => ''); + throw new Error(`Request failed: ${res.status} ${res.statusText}${body ? ' - ' + body : ''}`); + } + + if (!res.body) { + throw new Error('Response has no body stream'); + } + + const reader = res.body.getReader(); + const decoder = new TextDecoder(); + let finalText = ''; + + try { + while (true) { + const { value, done } = await reader.read(); + if (done) break; + + const chunk = decoder.decode(value, { stream: true }); + + // Basic protection: detect common HTML/CF challenge responses and convert to readable text + const safeChunk = chunk.includes('
Attention Required') + ? 'Error: Cloudflare/edge returned an HTML challenge. Refresh the page or check the server.' + : chunk; + + finalText += safeChunk; + + // fire UI callback, ignore errors from the callback + try { if (typeof onChunk === 'function') onChunk(safeChunk); } catch (e) { /* ignore */ } + } + } catch (err) { + // Propagate AbortError to allow callers to detect cancellation + throw err; + } finally { + try { reader.releaseLock(); } catch (e) { /* ignore */ } + } + + return finalText; +} \ No newline at end of file diff --git a/client/js/chat.js b/client/js/chat.js deleted file mode 100644 index cff0be5e..00000000 --- a/client/js/chat.js +++ /dev/null @@ -1,567 +0,0 @@ -const query = (obj) => - Object.keys(obj) - .map((k) => encodeURIComponent(k) + "=" + encodeURIComponent(obj[k])) - .join("&"); -const colorThemes = document.querySelectorAll('[name="theme"]'); -const markdown = window.markdownit(); -const message_box = document.getElementById(`messages`); -const message_input = document.getElementById(`message-input`); -const box_conversations = document.querySelector(`.top`); -const spinner = box_conversations.querySelector(".spinner"); -const stop_generating = document.querySelector(`.stop_generating`); -const send_button = document.querySelector(`#send-button`); -let prompt_lock = false; - -hljs.addPlugin(new CopyButtonPlugin()); - -function resizeTextarea(textarea) { - textarea.style.height = '80px'; - textarea.style.height = Math.min(textarea.scrollHeight, 200) + 'px'; -} - -const format = (text) => { - return text.replace(/(?:\r\n|\r|\n)/g, "
"); -}; - -message_input.addEventListener("blur", () => { - window.scrollTo(0, 0); -}); - -message_input.addEventListener("focus", () => { - document.documentElement.scrollTop = document.documentElement.scrollHeight; -}); - -const delete_conversations = async () => { - localStorage.clear(); - await new_conversation(); -}; - -const handle_ask = async () => { - message_input.style.height = `80px`; - message_input.focus(); - - window.scrollTo(0, 0); - let message = message_input.value; - - if (message.length > 0) { - message_input.value = ``; - await ask_gpt(message); - } -}; - -const remove_cancel_button = async () => { - stop_generating.classList.add(`stop_generating-hiding`); - - setTimeout(() => { - stop_generating.classList.remove(`stop_generating-hiding`); - stop_generating.classList.add(`stop_generating-hidden`); - }, 300); -}; - -const ask_gpt = async (message) => { - try { - message_input.value = ``; - message_input.innerHTML = ``; - message_input.innerText = ``; - - add_conversation(window.conversation_id, message.substr(0, 20)); - window.scrollTo(0, 0); - window.controller = new AbortController(); - - jailbreak = document.getElementById("jailbreak"); - model = document.getElementById("model"); - prompt_lock = true; - window.text = ``; - window.token = message_id(); - - stop_generating.classList.remove(`stop_generating-hidden`); - - message_box.innerHTML += ` -
-
- ${user_image} - -
-
- ${format(message)} -
-
- `; - - /* .replace(/(?:\r\n|\r|\n)/g, '
') */ - - message_box.scrollTop = message_box.scrollHeight; - window.scrollTo(0, 0); - await new Promise((r) => setTimeout(r, 500)); - window.scrollTo(0, 0); - - message_box.innerHTML += ` -
-
- ${gpt_image} -
-
-
-
-
- `; - - message_box.scrollTop = message_box.scrollHeight; - window.scrollTo(0, 0); - await new Promise((r) => setTimeout(r, 1000)); - window.scrollTo(0, 0); - - const response = await fetch(`/backend-api/v2/conversation`, { - method: `POST`, - signal: window.controller.signal, - headers: { - "content-type": `application/json`, - accept: `text/event-stream`, - }, - body: JSON.stringify({ - conversation_id: window.conversation_id, - action: `_ask`, - model: model.options[model.selectedIndex].value, - jailbreak: jailbreak.options[jailbreak.selectedIndex].value, - meta: { - id: window.token, - content: { - conversation: await get_conversation(window.conversation_id), - internet_access: document.getElementById("switch").checked, - content_type: "text", - parts: [ - { - content: message, - role: "user", - }, - ], - }, - }, - }), - }); - - const reader = response.body.getReader(); - - while (true) { - const { value, done } = await reader.read(); - if (done) break; - - chunk = new TextDecoder().decode(value); - - if ( - chunk.includes( - ` { - const elements = box_conversations.childNodes; - let index = elements.length; - - if (index > 0) { - while (index--) { - const element = elements[index]; - if ( - element.nodeType === Node.ELEMENT_NODE && - element.tagName.toLowerCase() !== `button` - ) { - box_conversations.removeChild(element); - } - } - } -}; - -const clear_conversation = async () => { - let messages = message_box.getElementsByTagName(`div`); - - while (messages.length > 0) { - message_box.removeChild(messages[0]); - } -}; - -const show_option = async (conversation_id) => { - const conv = document.getElementById(`conv-${conversation_id}`); - const yes = document.getElementById(`yes-${conversation_id}`); - const not = document.getElementById(`not-${conversation_id}`); - - conv.style.display = "none"; - yes.style.display = "block"; - not.style.display = "block"; -} - -const hide_option = async (conversation_id) => { - const conv = document.getElementById(`conv-${conversation_id}`); - const yes = document.getElementById(`yes-${conversation_id}`); - const not = document.getElementById(`not-${conversation_id}`); - - conv.style.display = "block"; - yes.style.display = "none"; - not.style.display = "none"; -} - -const delete_conversation = async (conversation_id) => { - localStorage.removeItem(`conversation:${conversation_id}`); - - const conversation = document.getElementById(`convo-${conversation_id}`); - conversation.remove(); - - if (window.conversation_id == conversation_id) { - await new_conversation(); - } - - await load_conversations(20, 0, true); -}; - -const set_conversation = async (conversation_id) => { - history.pushState({}, null, `/chat/${conversation_id}`); - window.conversation_id = conversation_id; - - await clear_conversation(); - await load_conversation(conversation_id); - await load_conversations(20, 0, true); -}; - -const new_conversation = async () => { - history.pushState({}, null, `/chat/`); - window.conversation_id = uuid(); - - await clear_conversation(); - await load_conversations(20, 0, true); -}; - -const load_conversation = async (conversation_id) => { - let conversation = await JSON.parse( - localStorage.getItem(`conversation:${conversation_id}`) - ); - console.log(conversation, conversation_id); - - for (item of conversation.items) { - message_box.innerHTML += ` -
-
- ${item.role == "assistant" ? gpt_image : user_image} - ${ - item.role == "assistant" - ? `` - : `` - } -
-
- ${ - item.role == "assistant" - ? markdown.render(item.content) - : item.content - } -
-
- `; - } - - document.querySelectorAll(`code`).forEach((el) => { - hljs.highlightElement(el); - }); - - message_box.scrollTo({ top: message_box.scrollHeight, behavior: "smooth" }); - - setTimeout(() => { - message_box.scrollTop = message_box.scrollHeight; - }, 500); -}; - -const get_conversation = async (conversation_id) => { - let conversation = await JSON.parse( - localStorage.getItem(`conversation:${conversation_id}`) - ); - return conversation.items; -}; - -const add_conversation = async (conversation_id, title) => { - if (localStorage.getItem(`conversation:${conversation_id}`) == null) { - localStorage.setItem( - `conversation:${conversation_id}`, - JSON.stringify({ - id: conversation_id, - title: title, - items: [], - }) - ); - } -}; - -const add_message = async (conversation_id, role, content) => { - before_adding = JSON.parse( - localStorage.getItem(`conversation:${conversation_id}`) - ); - - before_adding.items.push({ - role: role, - content: content, - }); - - localStorage.setItem( - `conversation:${conversation_id}`, - JSON.stringify(before_adding) - ); // update conversation -}; - -const load_conversations = async (limit, offset, loader) => { - //console.log(loader); - //if (loader === undefined) box_conversations.appendChild(spinner); - - let conversations = []; - for (let i = 0; i < localStorage.length; i++) { - if (localStorage.key(i).startsWith("conversation:")) { - let conversation = localStorage.getItem(localStorage.key(i)); - conversations.push(JSON.parse(conversation)); - } - } - - //if (loader === undefined) spinner.parentNode.removeChild(spinner) - await clear_conversations(); - - for (conversation of conversations) { - box_conversations.innerHTML += ` -
-
- - ${conversation.title} -
- - - -
- `; - } - - document.querySelectorAll(`code`).forEach((el) => { - hljs.highlightElement(el); - }); -}; - -document.getElementById(`cancelButton`).addEventListener(`click`, async () => { - window.controller.abort(); - console.log(`aborted ${window.conversation_id}`); -}); - -function h2a(str1) { - var hex = str1.toString(); - var str = ""; - - for (var n = 0; n < hex.length; n += 2) { - str += String.fromCharCode(parseInt(hex.substr(n, 2), 16)); - } - - return str; -} - -const uuid = () => { - return `xxxxxxxx-xxxx-4xxx-yxxx-${Date.now().toString(16)}`.replace( - /[xy]/g, - function (c) { - var r = (Math.random() * 16) | 0, - v = c == "x" ? r : (r & 0x3) | 0x8; - return v.toString(16); - } - ); -}; - -const message_id = () => { - random_bytes = (Math.floor(Math.random() * 1338377565) + 2956589730).toString( - 2 - ); - unix = Math.floor(Date.now() / 1000).toString(2); - - return BigInt(`0b${unix}${random_bytes}`).toString(); -}; - -window.onload = async () => { - load_settings_localstorage(); - - conversations = 0; - for (let i = 0; i < localStorage.length; i++) { - if (localStorage.key(i).startsWith("conversation:")) { - conversations += 1; - } - } - - if (conversations == 0) localStorage.clear(); - - await setTimeout(() => { - load_conversations(20, 0); - }, 1); - - if (!window.location.href.endsWith(`#`)) { - if (/\/chat\/.+/.test(window.location.href)) { - await load_conversation(window.conversation_id); - } - } - -message_input.addEventListener(`keydown`, async (evt) => { - if (prompt_lock) return; - if (evt.keyCode === 13 && !evt.shiftKey) { - evt.preventDefault(); - console.log('pressed enter'); - await handle_ask(); - } else { - message_input.style.removeProperty("height"); - message_input.style.height = message_input.scrollHeight + 4 + "px"; - } - }); - - send_button.addEventListener(`click`, async () => { - console.log("clicked send"); - if (prompt_lock) return; - await handle_ask(); - }); - - register_settings_localstorage(); -}; - -document.querySelector(".mobile-sidebar").addEventListener("click", (event) => { - const sidebar = document.querySelector(".conversations"); - - if (sidebar.classList.contains("shown")) { - sidebar.classList.remove("shown"); - event.target.classList.remove("rotated"); - } else { - sidebar.classList.add("shown"); - event.target.classList.add("rotated"); - } - - window.scrollTo(0, 0); -}); - -const register_settings_localstorage = async () => { - settings_ids = ["switch", "model", "jailbreak"]; - settings_elements = settings_ids.map((id) => document.getElementById(id)); - settings_elements.map((element) => - element.addEventListener(`change`, async (event) => { - switch (event.target.type) { - case "checkbox": - localStorage.setItem(event.target.id, event.target.checked); - break; - case "select-one": - localStorage.setItem(event.target.id, event.target.selectedIndex); - break; - default: - console.warn("Unresolved element type"); - } - }) - ); -}; - -const load_settings_localstorage = async () => { - settings_ids = ["switch", "model", "jailbreak"]; - settings_elements = settings_ids.map((id) => document.getElementById(id)); - settings_elements.map((element) => { - if (localStorage.getItem(element.id)) { - switch (element.type) { - case "checkbox": - element.checked = localStorage.getItem(element.id) === "true"; - break; - case "select-one": - element.selectedIndex = parseInt(localStorage.getItem(element.id)); - break; - default: - console.warn("Unresolved element type"); - } - } - }); -}; - -// Theme storage for recurring viewers -const storeTheme = function (theme) { - localStorage.setItem("theme", theme); -}; - -// set theme when visitor returns -const setTheme = function () { - const activeTheme = localStorage.getItem("theme"); - colorThemes.forEach((themeOption) => { - if (themeOption.id === activeTheme) { - themeOption.checked = true; - } - }); - // fallback for no :has() support - document.documentElement.className = activeTheme; -}; - -colorThemes.forEach((themeOption) => { - themeOption.addEventListener("click", () => { - storeTheme(themeOption.id); - // fallback for no :has() support - document.documentElement.className = themeOption.id; - }); -}); - -document.onload = setTheme(); diff --git a/client/js/main.js b/client/js/main.js new file mode 100644 index 00000000..bd4c0dbe --- /dev/null +++ b/client/js/main.js @@ -0,0 +1,201 @@ +// Minimal entrypoint (ES module) — wires UI to api/store/ui/utils modules. + +import { streamConversation } from './api.js'; +import * as store from './store.js'; +import { + renderUserMessage, + createAssistantPlaceholder, + renderAssistantChunk, + clearMessages, + showError, + scrollToBottom, + renderConversationList, +} from './ui.js'; +import { message_id, uuid, resizeTextarea } from './utils.js'; + +let currentAbort = null; + +// Enable a client-side mock mode for testing the UI without a backend/API key. +// Activate by visiting the app URL with `#local` (e.g. http://localhost:1338/chat/#local) +const MOCK_MODE = (typeof location !== 'undefined' && location.hash && location.hash.includes('local')); + +async function handleSend() { + const inputEl = document.getElementById('message-input'); + if (!inputEl) return; + const text = inputEl.value.trim(); + if (!text) return; + + inputEl.value = ''; + resizeTextarea(inputEl); + + const convId = window.conversation_id || uuid(); + store.addConversation(convId, convId); + store.addMessage(convId, 'user', text); + + const token = message_id(); + renderUserMessage(token, text); + + createAssistantPlaceholder(token); + + if (currentAbort) currentAbort.abort(); + currentAbort = new AbortController(); + + const payload = { + conversation_id: convId, + action: '_ask', + model: document.getElementById('model')?.value || 'default', + jailbreak: document.getElementById('jailbreak')?.value || 'false', + meta: { + id: message_id(), + content: { + conversation: (await store.getConversation(convId)).messages, + internet_access: document.getElementById('switch')?.checked || false, + content_type: 'text', + parts: [ { content: text, role: 'user' } ], + }, + }, + }; + // If MOCK_MODE is active, simulate a streaming assistant response locally + let acc = ''; + if (MOCK_MODE) { + const simulated = `Echo: ${text}\n\n(This is a local UI-only simulated response.)`; + // simulate streaming in small chunks + const chunks = []; + for (let i = 0; i < simulated.length; i += 20) chunks.push(simulated.slice(i, i + 20)); + + try { + for (const c of chunks) { + if (currentAbort && currentAbort.signal.aborted) throw new DOMException('Aborted', 'AbortError'); + await new Promise(r => setTimeout(r, 120)); + acc += c; + renderAssistantChunk(token, acc); + } + store.addMessage(convId, 'assistant', acc); + } catch (err) { + if (err.name === 'AbortError') { + renderAssistantChunk(token, acc + ' [aborted]'); + } else { + showError('Local mock failed'); + console.error(err); + renderAssistantChunk(token, acc + ' [error]'); + } + } finally { + currentAbort = null; + // force scroll at end so user sees final content + scrollToBottom(true); + } + return; + } + + try { + await streamConversation(payload, (chunk) => { + acc += chunk; + renderAssistantChunk(token, acc); + }, currentAbort.signal); + + store.addMessage(convId, 'assistant', acc); + } catch (err) { + if (err.name === 'AbortError') { + renderAssistantChunk(token, acc + ' [aborted]'); + } else { + showError('Failed to get response from server'); + console.error(err); + renderAssistantChunk(token, acc + ' [error]'); + } + } finally { + currentAbort = null; + scrollToBottom(); + } +} + +function handleCancel() { + if (currentAbort) currentAbort.abort(); +} + +async function setConversation(id, conv) { + window.conversation_id = id; + clearMessages(); + if (!conv) conv = await store.getConversation(id); + for (const m of conv.messages) { + if (m.role === 'user') { + const t = message_id(); + renderUserMessage(t, m.content); + } else { + const t = message_id(); + createAssistantPlaceholder(t); + renderAssistantChunk(t, m.content); + } + } +} + +export async function init() { + const sendBtn = document.getElementById('send-button'); + const cancelBtn = document.getElementById('cancelButton'); + const inputEl = document.getElementById('message-input'); + + if (sendBtn) sendBtn.addEventListener('click', () => handleSend()); + if (cancelBtn) cancelBtn.addEventListener('click', () => handleCancel()); + if (inputEl) { + inputEl.addEventListener('keydown', (e) => { + if (e.key === 'Enter' && !e.shiftKey) { + e.preventDefault(); + handleSend(); + } + }); + } + + // render into the dedicated list container; this keeps the New Conversation + // button and spinner intact (they live in #conversations) + const listEl = document.getElementById('conversation-list') || document.getElementById('conversations'); + const handlers = { + onSelect: async (id) => { + const c = await store.getConversation(id); + if (c) setConversation(id, c); + }, + onDelete: async (id) => { + await store.deleteConversation(id); + const l2 = await store.listConversations(); + if (listEl) renderConversationList(listEl, l2, handlers); + }, + onShowOption: (id) => { + console.log('show options for', id); + } + }; + + if (listEl) { + const list = await store.listConversations(); + renderConversationList(listEl, list, handlers); + } + + // focus the input so mobile/desktop shows the input area immediately + if (inputEl) { + try { inputEl.focus(); } catch(e) { /* ignore */ } + } + + // wire header buttons that previously used inline onclick attributes + const newBtn = document.getElementById('new-convo-button'); + if (newBtn) { + newBtn.addEventListener('click', async () => { + const id = uuid(); + window.conversation_id = id; + store.addConversation(id, id); + clearMessages(); + const list = await store.listConversations(); + if (listEl) renderConversationList(listEl, list, handlers); + // focus input after creating a new conversation + if (inputEl) { try { inputEl.focus(); } catch(e) {} } + }); + } + + const clearBtn = document.getElementById('clear-conversations-button'); + if (clearBtn) { + clearBtn.addEventListener('click', async () => { + store.clearConversations(); + clearMessages(); + if (listEl) renderConversationList(listEl, [], handlers); + }); + } +} + +// auto-init on load +window.addEventListener('load', () => { init().catch(console.error); }); \ No newline at end of file diff --git a/client/js/store.js b/client/js/store.js new file mode 100644 index 00000000..0f37a91e --- /dev/null +++ b/client/js/store.js @@ -0,0 +1,141 @@ +// Client-side conversation storage (localStorage fallback to in-memory). +// Exports: getConversation, saveConversation, addConversation, addMessage, +// listConversations, deleteConversation, clearConversations + +const PREFIX = 'conv:'; +const inMemory = new Map(); + +function storageAvailable() { + try { + const testKey = '__storage_test__'; + window.localStorage.setItem(testKey, testKey); + window.localStorage.removeItem(testKey); + return true; + } catch (e) { + return false; + } +} + +function key(id) { + return `${PREFIX}${id}`; +} + + +function safeParse(raw) { + try { return JSON.parse(raw); } catch (e) { return null; } +} + +function readRaw(k) { + if (storageAvailable()) { + return localStorage.getItem(k); + } + return inMemory.get(k) ?? null; +} + +function writeRaw(k, v) { + if (storageAvailable()) { + localStorage.setItem(k, v); + return; + } + inMemory.set(k, v); +} + +/** + * Get conversation object by id. + * Returns { id, title, messages: [] } or a fresh skeleton if missing. + */ +export function getConversation(id) { + if (!id) return { id: null, title: null, messages: [] }; + const raw = readRaw(key(id)); + let conv = safeParse(raw); + if (!conv) { + // return skeleton when there is no stored conversation in the new format + return { id, title: id, messages: [] }; + } + // expected new-format shape: messages is an array + conv.messages = Array.isArray(conv.messages) ? conv.messages : []; + return { id: conv.id || id, title: conv.title || id, messages: conv.messages }; +} + +/** Persist a full conversation object */ +export function saveConversation(conv) { + if (!conv || !conv.id) throw new Error('Conversation must have an id'); + const out = { + id: conv.id, + title: conv.title || conv.id, + messages: Array.isArray(conv.messages) ? conv.messages : [], + created_at: conv.created_at || Date.now(), + updated_at: Date.now(), + }; + writeRaw(key(conv.id), JSON.stringify(out)); +} + +/** Create a conversation if missing */ +export function addConversation(id, title = null) { + if (!id) throw new Error('id required'); + const existing = getConversation(id); + if (existing && existing.messages && existing.messages.length) return existing; + const conv = { id, title: title || id, messages: [], created_at: Date.now(), updated_at: Date.now() }; + saveConversation(conv); + return conv; +} + +/** Append a message to a conversation and persist it. + * message: role: 'user'|'assistant'|'system', content: string + */ +export function addMessage(id, role, content) { + if (!id) throw new Error('Conversation id required'); + const conv = getConversation(id); + const msg = { role: role || 'user', content: (content == null ? '' : content), ts: Date.now() }; + conv.messages.push(msg); + saveConversation(conv); + return msg; +} + +/** List all stored conversations (returns array of conversation objects) */ +export function listConversations() { + const out = []; + if (storageAvailable()) { + for (let i = 0; i < localStorage.length; i++) { + const k = localStorage.key(i); + if (!k || !k.startsWith(PREFIX)) continue; + const conv = safeParse(localStorage.getItem(k)); + if (conv && conv.id) out.push(conv); + } + } else { + for (const [k, v] of inMemory.entries()) { + if (!k.startsWith(PREFIX)) continue; + const conv = safeParse(v); + if (conv && conv.id) out.push(conv); + } + } + out.sort((a, b) => (b.updated_at || 0) - (a.updated_at || 0)); + return out; +} + +/** Delete single conversation */ +export function deleteConversation(id) { + if (!id) return false; + if (storageAvailable()) localStorage.removeItem(key(id)); + else inMemory.delete(key(id)); + return true; +} + +/** Remove all conversations stored under the prefix */ +export function clearConversations() { + if (storageAvailable()) { + const toRemove = []; + for (let i = 0; i < localStorage.length; i++) { + const k = localStorage.key(i); + if (k && k.startsWith(PREFIX)) toRemove.push(k); + } + toRemove.forEach(k => localStorage.removeItem(k)); + } else { + for (const k of Array.from(inMemory.keys())) { + if (k.startsWith(PREFIX)) inMemory.delete(k); + } + } + return true; +} + +// Legacy migration removed: this store only supports the new `conv:` format. diff --git a/client/js/ui.js b/client/js/ui.js new file mode 100644 index 00000000..6334e1c6 --- /dev/null +++ b/client/js/ui.js @@ -0,0 +1,200 @@ +// UI helpers used by main.js: renderUserMessage, createAssistantPlaceholder, +// renderAssistantChunk, clearMessages, showError, scrollToBottom + +function getMessagesBox() { + return document.getElementById('messages'); +} + +function safeMarkdownRender(text) { + if (window.markdownit) { + try { return window.markdownit().render(text); } catch (e) { /* fallthrough */ } + } + // very small fallback: escape HTML and replace newlines + const esc = String(text) + .replace(/&/g, '&') + .replace(//g, '>'); + return esc.replace(/\n/g, '
'); +} + +// scrollToBottom(force=false) +// If force is true, always jump to bottom. If force is false (default), only +// auto-scroll when the user is already near the bottom — this lets users +// scroll up to read previous messages without the UI fighting their scroll. +export function scrollToBottom(force = false) { + const box = getMessagesBox(); + if (!box) return; + try { + // Only auto-scroll when the messages container actually overflows (i.e. + // there is content to scroll). This prevents forcing scroll when the + // content fits the container (common on initial render). + if (box.scrollHeight <= box.clientHeight && !force) return; + + const distanceFromBottom = box.scrollHeight - (box.scrollTop + box.clientHeight); + // if user is within 120px of the bottom, consider them "at bottom" and + // auto-scroll. Otherwise, don't change their scroll position unless forced. + if (force || distanceFromBottom < 120) { + box.scrollTop = box.scrollHeight; + } + } catch (e) { + // fallback to always scrolling if something unexpected happens + box.scrollTop = box.scrollHeight; + } +} + +export function clearMessages() { + const box = getMessagesBox(); + if (!box) return; + box.innerHTML = ''; +} + +export function renderUserMessage(token, text, user_image_html = '') { + const box = getMessagesBox(); + if (!box) return; + const wrapper = document.createElement('div'); + wrapper.className = 'message user'; + wrapper.id = `user_${token}`; + wrapper.innerHTML = ` +
${user_image_html}
+
${safeMarkdownRender(text)}
+ `; + box.appendChild(wrapper); + scrollToBottom(); + return wrapper; +} + +export function createAssistantPlaceholder(token, gpt_image_html = '') { + const box = getMessagesBox(); + if (!box) return; + const wrapper = document.createElement('div'); + wrapper.className = 'message assistant'; + wrapper.id = `gpt_${token}`; + // store accumulated text in data attribute + wrapper.dataset.text = ''; + wrapper.innerHTML = ` +
${gpt_image_html}
+
+ `; + box.appendChild(wrapper); + scrollToBottom(); + return wrapper; +} + +export function renderAssistantChunk(token, chunk) { + const el = document.getElementById(`gpt_${token}`); + if (!el) return; + // accumulate plain text + const prev = el.dataset.text || ''; + const combined = prev + (chunk || ''); + el.dataset.text = combined; + // render markdown/html + el.querySelector('.content').innerHTML = safeMarkdownRender(combined); + // syntax highlight if hljs is present + try { + if (window.hljs) { + el.querySelectorAll('pre code').forEach(block => { + try { window.hljs.highlightElement(block); } catch (e) { /* ignore */ } + }); + } + } catch (e) { /* ignore */ } + scrollToBottom(); +} + +export function showError(message) { + const box = getMessagesBox(); + if (!box) return; + const wrapper = document.createElement('div'); + wrapper.className = 'message error'; + wrapper.innerText = message; + box.appendChild(wrapper); + scrollToBottom(); +} + +// Render a conversation list into a container element using programmatic +// event listeners (replaces inline onclick HTML generation). +// conversations: array of { id, title, messages } +// handlers: { onSelect(id), onDelete(id), onShowOption(id), onHideOption(id) } +export function renderConversationList(container, conversations, handlers = {}) { + if (!container) return; + container.innerHTML = ''; + // ensure the container has a predictable layout for the list + container.classList.add('conversation-list-items'); + // if there are no conversations, show a small placeholder so users + // can tell the list is intentionally empty (and the New Conversation + // button above remains visible). + if (!Array.isArray(conversations) || conversations.length === 0) { + const empty = document.createElement('div'); + empty.className = 'no-convos'; + empty.textContent = 'No conversations yet — click "New Conversation" to start.'; + container.appendChild(empty); + return; + } + conversations.forEach((conv) => { + const id = conv.id; + const item = document.createElement('div'); + item.className = 'convo'; + item.id = `convo-${id}`; + + // left column (click selects conversation) + const left = document.createElement('div'); + left.className = 'left'; + const icon = document.createElement('i'); + icon.className = 'fa-regular fa-comments'; + left.appendChild(icon); + const span = document.createElement('span'); + span.className = 'convo-title'; + span.textContent = conv.title || id; + left.appendChild(span); + item.appendChild(left); + + // action icons (trash, confirm, cancel) + const trash = document.createElement('i'); + trash.className = 'fa-regular fa-trash'; + trash.id = `conv-${id}`; + item.appendChild(trash); + + const yes = document.createElement('i'); + yes.className = 'fa-regular fa-check'; + yes.id = `yes-${id}`; + yes.style.display = 'none'; + item.appendChild(yes); + + const no = document.createElement('i'); + no.className = 'fa-regular fa-x'; + no.id = `not-${id}`; + no.style.display = 'none'; + item.appendChild(no); + + // wire events + left.addEventListener('click', (e) => { + if (handlers.onSelect) handlers.onSelect(id); + }); + + trash.addEventListener('click', (e) => { + e.stopPropagation(); + // show confirm icons + trash.style.display = 'none'; + yes.style.display = 'inline-block'; + no.style.display = 'inline-block'; + if (handlers.onShowOption) handlers.onShowOption(id); + }); + + yes.addEventListener('click', (e) => { + e.stopPropagation(); + if (handlers.onDelete) handlers.onDelete(id); + }); + + no.addEventListener('click', (e) => { + e.stopPropagation(); + // hide confirm icons + trash.style.display = 'inline-block'; + yes.style.display = 'none'; + no.style.display = 'none'; + if (handlers.onHideOption) handlers.onHideOption(id); + }); + + container.appendChild(item); + }); +} + +// Additional functions or exports can go here \ No newline at end of file diff --git a/client/js/utils.js b/client/js/utils.js new file mode 100644 index 00000000..f22183a0 --- /dev/null +++ b/client/js/utils.js @@ -0,0 +1,65 @@ +// Small utility helpers used by main.js / ui.js / api.js + +// Generate a UUID v4 (browser-friendly) +export function uuid() { + if (typeof crypto !== 'undefined' && crypto.getRandomValues) { + // RFC4122 version 4 compliant + const bytes = new Uint8Array(16); + crypto.getRandomValues(bytes); + bytes[6] = (bytes[6] & 0x0f) | 0x40; + bytes[8] = (bytes[8] & 0x3f) | 0x80; + const hex = Array.from(bytes).map(b => b.toString(16).padStart(2, '0')).join(''); + return `${hex.slice(0,8)}-${hex.slice(8,12)}-${hex.slice(12,16)}-${hex.slice(16,20)}-${hex.slice(20)}`; + } + // fallback + return 'xxxxxxxx-xxxx-4xxx-yxxx-'.replace(/[xy]/g, c => { + const r = (Math.random() * 16) | 0; + const v = c === 'x' ? r : (r & 0x3) | 0x8; + return v.toString(16); + }) + Date.now().toString(16).slice(-6); +} + +// Short message id used for DOM ids +export function message_id() { + return 'm_' + Math.random().toString(36).slice(2, 9) + '_' + Date.now().toString(36); +} + +// Simple text -> safe html / newline formatting fallback (used by ui.safeMarkdownRender) +export function format(text) { + if (text == null) return ''; + const s = String(text); + return s + .replace(/&/g, '&') + .replace(//g, '>') + .replace(/\r\n|\r|\n/g, '
'); +} + +// Resize a textarea element to fit content (pass element) +export function resizeTextarea(el) { + if (!el) return; + el.style.height = 'auto'; + el.style.height = Math.min(el.scrollHeight, 800) + 'px'; +} + +// hex (0x..) or plain hex string to ascii +export function h2a(hex) { + if (!hex) return ''; + // strip 0x prefix if present + const h = hex.startsWith('0x') ? hex.slice(2) : hex; + let out = ''; + for (let i = 0; i < h.length; i += 2) { + const byte = parseInt(h.substr(i, 2), 16); + if (isNaN(byte)) continue; + out += String.fromCharCode(byte); + } + return out; +} + +// expose small shims for legacy non-module code that expects globals +if (typeof window !== 'undefined') { + window.uuid = window.uuid || uuid; + window.message_id = window.message_id || message_id; + window.formatText = window.formatText || format; + window.resizeTextarea = window.resizeTextarea || resizeTextarea; +} \ No newline at end of file diff --git a/server/app.py b/server/app.py index 4490d8d8..0af74a6d 100644 --- a/server/app.py +++ b/server/app.py @@ -1,3 +1,5 @@ +# This file initializes the Flask application for the server. + from flask import Flask app = Flask(__name__, template_folder='./../client/html') From b768bf6805e8c644c48fca2fdbd89dd1215ba519 Mon Sep 17 00:00:00 2001 From: KennyAngelikas Date: Thu, 6 Nov 2025 09:45:39 -0500 Subject: [PATCH 4/5] the gemini api is working here --- client/js/api.js | 59 +++++++++-- config.json | 2 +- server/backend.py | 242 ++++++++++++++++++++++++++++++++++++---------- 3 files changed, 244 insertions(+), 59 deletions(-) diff --git a/client/js/api.js b/client/js/api.js index 5e6fe622..75f36e67 100644 --- a/client/js/api.js +++ b/client/js/api.js @@ -28,6 +28,11 @@ export async function streamConversation(payload, onChunk, signal) { const decoder = new TextDecoder(); let finalText = ''; + // We'll parse Server-Sent Events (SSE) framed as one or more 'data: ...' lines + // separated by a blank line (\n\n). The server emits JSON payloads in + // each data: event in the form {"text": "..."}. + let buffer = ''; + try { while (true) { const { value, done } = await reader.read(); @@ -36,14 +41,45 @@ export async function streamConversation(payload, onChunk, signal) { const chunk = decoder.decode(value, { stream: true }); // Basic protection: detect common HTML/CF challenge responses and convert to readable text - const safeChunk = chunk.includes('Attention Required') - ? 'Error: Cloudflare/edge returned an HTML challenge. Refresh the page or check the server.' - : chunk; + if (chunk.includes('Attention Required')) { + const msg = 'Error: Cloudflare/edge returned an HTML challenge. Refresh the page or check the server.'; + finalText += msg; + try { if (typeof onChunk === 'function') onChunk(msg); } catch (e) { /* ignore */ } + continue; + } + + buffer += chunk; + + // Process complete SSE events (separated by \n\n) + while (true) { + const idx = buffer.indexOf('\n\n'); + if (idx === -1) break; + const rawEvent = buffer.slice(0, idx); + buffer = buffer.slice(idx + 2); + + // Extract data: lines (may be multiple) and concatenate their payloads + const lines = rawEvent.split(/\r?\n/); + let dataPayload = ''; + for (const line of lines) { + if (line.startsWith('data:')) { + dataPayload += line.slice(5).trim(); + } + } - finalText += safeChunk; + if (!dataPayload) continue; - // fire UI callback, ignore errors from the callback - try { if (typeof onChunk === 'function') onChunk(safeChunk); } catch (e) { /* ignore */ } + // Try parsing JSON payloads emitted by the server: {"text":"..."} + let text = dataPayload; + try { + const parsed = JSON.parse(dataPayload); + if (parsed && typeof parsed.text === 'string') text = parsed.text; + } catch (e) { + // not JSON — keep raw payload + } + + finalText += text; + try { if (typeof onChunk === 'function') onChunk(text); } catch (e) { /* ignore */ } + } } } catch (err) { // Propagate AbortError to allow callers to detect cancellation @@ -52,5 +88,16 @@ export async function streamConversation(payload, onChunk, signal) { try { reader.releaseLock(); } catch (e) { /* ignore */ } } + // if any leftover buffer contains text (no trailing \n\n), try to process it + if (buffer) { + let text = buffer; + try { + const parsed = JSON.parse(buffer); + if (parsed && typeof parsed.text === 'string') text = parsed.text; + } catch (e) { /* ignore */ } + finalText += text; + try { if (typeof onChunk === 'function') onChunk(text); } catch (e) { /* ignore */ } + } + return finalText; } \ No newline at end of file diff --git a/config.json b/config.json index 87580ada..b8a45741 100644 --- a/config.json +++ b/config.json @@ -8,7 +8,7 @@ "openai_api_base": "https://api.openai.com", "proxy": { - "enable": true, + "enable": false, "http": "127.0.0.1:7890", "https": "127.0.0.1:7890" } diff --git a/server/backend.py b/server/backend.py index 18c7f231..d1db2f1b 100644 --- a/server/backend.py +++ b/server/backend.py @@ -3,8 +3,7 @@ from flask import request from hashlib import sha256 from datetime import datetime -from requests import get -from requests import post +import requests from json import loads import os @@ -16,6 +15,8 @@ def __init__(self, app, config: dict) -> None: self.app = app self.openai_key = os.getenv("OPENAI_API_KEY") or config['openai_key'] self.openai_api_base = os.getenv("OPENAI_API_BASE") or config['openai_api_base'] + # optional Gemini key — when present we'll call Gemini instead of OpenAI + self.gemini_key = os.getenv("GEMINI_API_KEY") or config.get('gemini_key') self.proxy = config['proxy'] self.routes = { '/backend-api/v2/conversation': { @@ -33,12 +34,31 @@ def _conversation(self): current_date = datetime.now().strftime("%Y-%m-%d") system_message = f'You are ChatGPT also known as ChatGPT, a large language model trained by OpenAI. Strictly follow the users instructions. Knowledge cutoff: 2021-09-01 Current date: {current_date}' + # Build proxies dict if proxy enabled in config. We'll prefer + # an explicit proxy from config.json but we create a session + # with trust_env=False to avoid using system environment proxy + # variables like HTTP_PROXY/HTTPS_PROXY. + proxies = None + if self.proxy.get('enable'): + proxies = { + 'http': self.proxy.get('http'), + 'https': self.proxy.get('https'), + } + + session = requests.Session() + session.trust_env = False + extra = [] if internet_access: - search = get('https://ddg-api.herokuapp.com/search', params={ - 'query': prompt["content"], - 'limit': 3, - }) + search = session.get( + 'https://ddg-api.herokuapp.com/search', + params={ + 'query': prompt["content"], + 'limit': 3, + }, + proxies=proxies, + timeout=10, + ) blob = '' @@ -55,58 +75,176 @@ def _conversation(self): extra + special_instructions[jailbreak] + \ _conversation + [prompt] - url = f"{self.openai_api_base}/v1/chat/completions" + # If a Gemini key is configured, call Gemini streaming endpoint. + if self.gemini_key: + # Map internal messages to Gemini 'contents' array. We skip the + # system message here because it will be passed as systemInstruction. + contents = [] + for msg in conversation: + role = msg.get('role', 'user') + if role == 'system': + continue + mapped_role = 'user' if role == 'user' else 'model' + contents.append({ + 'role': mapped_role, + 'parts': [{'text': msg.get('content', '')}] + }) - proxies = None - if self.proxy['enable']: - proxies = { - 'http': self.proxy['http'], - 'https': self.proxy['https'], - } + model = request.json.get('model', 'gemini-2.5-flash') + url = f"https://generativelanguage.googleapis.com/v1beta/models/{model}:streamGenerateContent?alt=sse" - gpt_resp = post( - url = url, - proxies = proxies, headers = { - 'Authorization': 'Bearer %s' % self.openai_key - }, - json = { - 'model' : request.json['model'], - 'messages' : conversation, - 'stream' : True - }, - stream = True - ) - - if gpt_resp.status_code >= 400: - error_data =gpt_resp.json().get('error', {}) - error_code = error_data.get('code', None) - error_message = error_data.get('message', "An error occurred") - return { - 'successs': False, - 'error_code': error_code, - 'message': error_message, - 'status_code': gpt_resp.status_code - }, gpt_resp.status_code - - def stream(): - for chunk in gpt_resp.iter_lines(): - try: - decoded_line = loads(chunk.decode("utf-8").split("data: ")[1]) - token = decoded_line["choices"][0]['delta'].get('content') + 'Content-Type': 'application/json', + 'x-goog-api-key': self.gemini_key + } - if token != None: - yield token - - except GeneratorExit: - break + body = { + 'contents': contents, + 'systemInstruction': {'parts': [{'text': system_message}]}, + 'generationConfig': request.json.get('generationConfig', {}) + } + # Allow a configurable fallback model (env or default) when the + # requested model is not available. This helps UX when the UI + # sends an alias or unsupported model name. + fallback_model = os.getenv('GEMINI_FALLBACK_MODEL') or 'gemini-2.5-flash' + + gpt_resp = session.post( + url, + headers = headers, + json = body, + proxies = proxies, + stream = True, + timeout = 60, + ) + + # If we got a 404 (model not found) and the model isn't already + # the fallback, retry once with the fallback model. + if gpt_resp.status_code == 404 and model != fallback_model: + try: + err = gpt_resp.json() + except Exception: + err = gpt_resp.text + print(f"Gemini model {model} not found (404). Retrying with fallback {fallback_model}: {err}") + # rebuild URL for fallback + fallback_url = f"https://generativelanguage.googleapis.com/v1beta/models/{fallback_model}:streamGenerateContent?alt=sse" + gpt_resp = session.post( + fallback_url, + headers = headers, + json = body, + proxies = proxies, + stream = True, + timeout = 60, + ) + + if gpt_resp.status_code >= 400: + try: + err = gpt_resp.json() + except Exception: + err = gpt_resp.text + return { + 'successs': False, + 'message': f'Gemini request failed: {gpt_resp.status_code} {err}' + }, gpt_resp.status_code + + def stream(): + try: + for raw_line in gpt_resp.iter_lines(decode_unicode=True): + if not raw_line: + continue + line = raw_line.strip() + # SSE format: lines start with 'data:' + if line.startswith('data:'): + payload_str = line.split('data:', 1)[1].strip() + # some SSE implementations send '[DONE]' or empty data + if payload_str in ('[DONE]', ''): + continue + try: + payload = loads(payload_str) + except Exception: + # non-JSON data; skip + continue + + candidates = payload.get('candidates', []) + for cand in candidates: + content = cand.get('content', {}) + parts = content.get('parts', []) + for p in parts: + text = p.get('text') + if text: + # Emit a proper SSE 'data:' framed event so + # clients reading the response as an Event + # Stream (or raw fetch stream) receive + # complete events. We JSON-encode the + # payload to safely transport newlines. + try: + s = dumps({'text': text}) + except Exception: + s = dumps({'text': str(text)}) + yield f"data: {s}\n\n" + except GeneratorExit: + return except Exception as e: - print(e) - print(e.__traceback__.tb_next) - continue - - return self.app.response_class(stream(), mimetype='text/event-stream') + print('Gemini stream error:', e) + return + + return self.app.response_class(stream(), mimetype='text/event-stream') + + # ---------------------------- + # OpenAI path (commented out while using Gemini) + # ---------------------------- + # url = f"{self.openai_api_base}/v1/chat/completions" + # + # gpt_resp = post( + # url = url, + # proxies = proxies, + # headers = { + # 'Authorization': 'Bearer %s' % self.openai_key + # }, + # json = { + # 'model' : request.json['model'], + # 'messages' : conversation, + # 'stream' : True + # }, + # stream = True + # ) + # + # if gpt_resp.status_code >= 400: + # error_data =gpt_resp.json().get('error', {}) + # error_code = error_data.get('code', None) + # error_message = error_data.get('message', "An error occurred") + # return { + # 'successs': False, + # 'error_code': error_code, + # 'message': error_message, + # 'status_code': gpt_resp.status_code + # }, gpt_resp.status_code + # + # def stream_openai(): + # for chunk in gpt_resp.iter_lines(): + # try: + # decoded_line = loads(chunk.decode("utf-8").split("data: ")[1]) + # token = decoded_line["choices"][0]['delta'].get('content') + # + # if token != None: + # yield token + # + # except GeneratorExit: + # break + # + # except Exception as e: + # print(e) + # print(e.__traceback__.tb_next) + # continue + # + # return self.app.response_class(stream_openai(), mimetype='text/event-stream') + + # If no provider available + return { + '_action': '_ask', + 'success': False, + "error": "No Gemini key configured and OpenAI path is disabled." + }, 400 except Exception as e: print(e) From 052d4487791b424520f2d586a628144a6ffe0016 Mon Sep 17 00:00:00 2001 From: Nicholas Simon Date: Thu, 6 Nov 2025 17:27:48 -0500 Subject: [PATCH 5/5] Created Join Team Button --- .DS_Store | Bin 0 -> 6148 bytes client/css/style.css | 1069 ++++++++++++++++++++-------------------- client/html/index.html | 328 ++++++------ client/js/main.js | 155 ++++-- 4 files changed, 828 insertions(+), 724 deletions(-) create mode 100644 .DS_Store diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..73e005c2183eb298266b2f4730cc9c8eeda3e22f GIT binary patch literal 6148 zcmeHK&2G~`5S~p#;xs};2&7(+EOAXuS_)F(lBUU_5^$&y8~}wl4y~zUhd4h}QKX#V z9e4$DtC-`P}k!%NU2%%_4n*DZXXLe@4)Vp0G60NDfM^qys3(8pBM)MotdDb;4 zXv%2WCHkANsvET{vl+vHVc>5tz~_SvWo&9(DU?SC8uhnp5H$Ka hmL0x|cc4-*XLASG)VNXz3&i{*AZaj-Vc?%K@DtS;