From 89d056c183ca538aedc541a87f89ab6fa48b1bf3 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Fri, 20 Feb 2026 22:26:32 -0800 Subject: [PATCH 01/90] chore: updates reqs.txt to take into account pip deps --- src/server/requirements.txt | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/server/requirements.txt b/src/server/requirements.txt index e69de29..85e139a 100644 --- a/src/server/requirements.txt +++ b/src/server/requirements.txt @@ -0,0 +1,4 @@ +flask +flask-cors +numpy +./docker/actiancortex-0.1.0b1-py3-none-any.whl From 68a53acafb3c0c2778898fb0d3642e894d25efd0 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Fri, 20 Feb 2026 22:32:04 -0800 Subject: [PATCH 02/90] writes the flask bridge server --- src/server/bridge.py | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/src/server/bridge.py b/src/server/bridge.py index e69de29..aba864f 100644 --- a/src/server/bridge.py +++ b/src/server/bridge.py @@ -0,0 +1,34 @@ +from flask import Flask, request, jsonify +from flask_cors import CORS +from cortex import CortexClient, DistanceMetric, Filter, Field + +app = Flask(__name__) +CORS(app) + +client = CortexClient("localhost:50051") +client.connect() +COLLECTION = "user_journals" + +@app.route('/init', methods=['GET']) +def init(): + if not client.has_collection(COLLECTION): + client.create_collection(name=COLLECTION, dimension=384) + return jsonify({"status": "ready"}) + +@app.route('/upsert', methods=['POST']) +def upsert(): + data = request.json + client.upsert(COLLECTION, id=data['id'], vector=data['vector'], + payload={"text": data['text'], "user_id": data['user_id']}) + client.flush(COLLECTION) + return jsonify({"success": True}) + +@app.route('/search', methods=['POST']) +def search(): + data = request.json + user_filter = Filter().must(Field("user_id").eq(data['user_id'])) + results = client.search(COLLECTION, query=data['vector'], filter=user_filter, top_k=5, with_payload=True) + return jsonify([{"text": r.payload['text'], "score": r.score} for r in results]) + +if __name__ == '__main__': + app.run(port=5001) From 7a97d8b05c10ed99fefbe3d531d9f019f13cbd59 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Fri, 20 Feb 2026 22:36:45 -0800 Subject: [PATCH 03/90] tells nextjs to not bundle large ai deps --- src/client/next.config.mjs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/client/next.config.mjs b/src/client/next.config.mjs index b108e1a..bb15219 100644 --- a/src/client/next.config.mjs +++ b/src/client/next.config.mjs @@ -1,6 +1,5 @@ /** @type {import('next').NextConfig} */ const nextConfig = { - /* config options here */ + experimental: { serverComponentsExternalPackages: ['@xenova/transformers'] }, }; - export default nextConfig; From 1d3cef4a22a4227f88abf2f57586a48791e8f374 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Fri, 20 Feb 2026 22:41:13 -0800 Subject: [PATCH 04/90] chore: updates package.json and lock to add embeeding deps --- src/client/package-lock.json | 869 ++++++++++++++++++++++++++++++++++- src/client/package.json | 1 + 2 files changed, 865 insertions(+), 5 deletions(-) diff --git a/src/client/package-lock.json b/src/client/package-lock.json index 06f3ad2..150c301 100644 --- a/src/client/package-lock.json +++ b/src/client/package-lock.json @@ -8,6 +8,7 @@ "name": "alleaf", "version": "0.1.0", "dependencies": { + "@xenova/transformers": "^2.17.2", "next": "16.1.6", "react": "19.2.3", "react-dom": "19.2.3" @@ -405,6 +406,15 @@ "node": "^18.18.0 || ^20.9.0 || >=21.1.0" } }, + "node_modules/@huggingface/jinja": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/@huggingface/jinja/-/jinja-0.2.2.tgz", + "integrity": "sha512-/KPde26khDUIPkTGU82jdtTW9UAuvUTumCAbFs/7giR0SxsvZC4hru51PBvpijH6BVkHcROcvZM/lpy5h1jRRA==", + "license": "MIT", + "engines": { + "node": ">=18" + } + }, "node_modules/@humanfs/core": { "version": "0.19.1", "resolved": "https://registry.npmjs.org/@humanfs/core/-/core-0.19.1.tgz", @@ -1129,6 +1139,70 @@ "node": ">=12.4.0" } }, + "node_modules/@protobufjs/aspromise": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz", + "integrity": "sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ==", + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/base64": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@protobufjs/base64/-/base64-1.1.2.tgz", + "integrity": "sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==", + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/codegen": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/@protobufjs/codegen/-/codegen-2.0.4.tgz", + "integrity": "sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==", + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/eventemitter": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/eventemitter/-/eventemitter-1.1.0.tgz", + "integrity": "sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q==", + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/fetch": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/fetch/-/fetch-1.1.0.tgz", + "integrity": "sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==", + "license": "BSD-3-Clause", + "dependencies": { + "@protobufjs/aspromise": "^1.1.1", + "@protobufjs/inquire": "^1.1.0" + } + }, + "node_modules/@protobufjs/float": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@protobufjs/float/-/float-1.0.2.tgz", + "integrity": "sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ==", + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/inquire": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/inquire/-/inquire-1.1.0.tgz", + "integrity": "sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q==", + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/path": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@protobufjs/path/-/path-1.1.2.tgz", + "integrity": "sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA==", + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/pool": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/pool/-/pool-1.1.0.tgz", + "integrity": "sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw==", + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/utf8": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/utf8/-/utf8-1.1.0.tgz", + "integrity": "sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==", + "license": "BSD-3-Clause" + }, "node_modules/@rtsao/scc": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@rtsao/scc/-/scc-1.1.0.tgz", @@ -1171,6 +1245,21 @@ "integrity": "sha512-dRLjCWHYg4oaA77cxO64oO+7JwCwnIzkZPdrrC71jQmQtlhM556pwKo5bUzqvZndkVbeFLIIi+9TC40JNF5hNQ==", "dev": true }, + "node_modules/@types/long": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@types/long/-/long-4.0.2.tgz", + "integrity": "sha512-MqTGEo5bj5t157U6fA/BiDynNkn0YknVdh48CMPkTSpFTVmvao5UQmm7uEF6xBEo7qIMAlY/JSleYaE6VOdpaA==", + "license": "MIT" + }, + "node_modules/@types/node": { + "version": "25.3.0", + "resolved": "https://registry.npmjs.org/@types/node/-/node-25.3.0.tgz", + "integrity": "sha512-4K3bqJpXpqfg2XKGK9bpDTc6xO/xoUP/RBWS7AtRMug6zZFaRekiLzjVtAoZMquxoAbzBvy5nxQ7veS5eYzf8A==", + "license": "MIT", + "dependencies": { + "undici-types": "~7.18.0" + } + }, "node_modules/@typescript-eslint/eslint-plugin": { "version": "8.56.0", "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.56.0.tgz", @@ -1688,6 +1777,55 @@ "win32" ] }, + "node_modules/@xenova/transformers": { + "version": "2.17.2", + "resolved": "https://registry.npmjs.org/@xenova/transformers/-/transformers-2.17.2.tgz", + "integrity": "sha512-lZmHqzrVIkSvZdKZEx7IYY51TK0WDrC8eR0c5IMnBsO8di8are1zzw8BlLhyO2TklZKLN5UffNGs1IJwT6oOqQ==", + "license": "Apache-2.0", + "dependencies": { + "@huggingface/jinja": "^0.2.2", + "onnxruntime-web": "1.14.0", + "sharp": "^0.32.0" + }, + "optionalDependencies": { + "onnxruntime-node": "1.14.0" + } + }, + "node_modules/@xenova/transformers/node_modules/semver": { + "version": "7.7.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.4.tgz", + "integrity": "sha512-vFKC2IEtQnVhpT78h1Yp8wzwrf8CM+MzKMHGJZfBtzhZNycRFnXsHk6E5TxIkkMsgNS7mdX3AGB7x2QM2di4lA==", + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@xenova/transformers/node_modules/sharp": { + "version": "0.32.6", + "resolved": "https://registry.npmjs.org/sharp/-/sharp-0.32.6.tgz", + "integrity": "sha512-KyLTWwgcR9Oe4d9HwCwNM2l7+J0dUQwn/yf7S0EnTtb0eVS4RxO0eUSvxPtzT4F3SY+C4K6fqdv/DO27sJ/v/w==", + "hasInstallScript": true, + "license": "Apache-2.0", + "dependencies": { + "color": "^4.2.3", + "detect-libc": "^2.0.2", + "node-addon-api": "^6.1.0", + "prebuild-install": "^7.1.1", + "semver": "^7.5.4", + "simple-get": "^4.0.1", + "tar-fs": "^3.0.4", + "tunnel-agent": "^0.6.0" + }, + "engines": { + "node": ">=14.15.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + } + }, "node_modules/acorn": { "version": "8.16.0", "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.16.0.tgz", @@ -1955,12 +2093,138 @@ "node": ">= 0.4" } }, + "node_modules/b4a": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/b4a/-/b4a-1.8.0.tgz", + "integrity": "sha512-qRuSmNSkGQaHwNbM7J78Wwy+ghLEYF1zNrSeMxj4Kgw6y33O3mXcQ6Ie9fRvfU/YnxWkOchPXbaLb73TkIsfdg==", + "license": "Apache-2.0", + "peerDependencies": { + "react-native-b4a": "*" + }, + "peerDependenciesMeta": { + "react-native-b4a": { + "optional": true + } + } + }, "node_modules/balanced-match": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", "dev": true }, + "node_modules/bare-events": { + "version": "2.8.2", + "resolved": "https://registry.npmjs.org/bare-events/-/bare-events-2.8.2.tgz", + "integrity": "sha512-riJjyv1/mHLIPX4RwiK+oW9/4c3TEUeORHKefKAKnZ5kyslbN+HXowtbaVEqt4IMUB7OXlfixcs6gsFeo/jhiQ==", + "license": "Apache-2.0", + "peerDependencies": { + "bare-abort-controller": "*" + }, + "peerDependenciesMeta": { + "bare-abort-controller": { + "optional": true + } + } + }, + "node_modules/bare-fs": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/bare-fs/-/bare-fs-4.5.4.tgz", + "integrity": "sha512-POK4oplfA7P7gqvetNmCs4CNtm9fNsx+IAh7jH7GgU0OJdge2rso0R20TNWVq6VoWcCvsTdlNDaleLHGaKx8CA==", + "license": "Apache-2.0", + "optional": true, + "dependencies": { + "bare-events": "^2.5.4", + "bare-path": "^3.0.0", + "bare-stream": "^2.6.4", + "bare-url": "^2.2.2", + "fast-fifo": "^1.3.2" + }, + "engines": { + "bare": ">=1.16.0" + }, + "peerDependencies": { + "bare-buffer": "*" + }, + "peerDependenciesMeta": { + "bare-buffer": { + "optional": true + } + } + }, + "node_modules/bare-os": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/bare-os/-/bare-os-3.6.2.tgz", + "integrity": "sha512-T+V1+1srU2qYNBmJCXZkUY5vQ0B4FSlL3QDROnKQYOqeiQR8UbjNHlPa+TIbM4cuidiN9GaTaOZgSEgsvPbh5A==", + "license": "Apache-2.0", + "optional": true, + "engines": { + "bare": ">=1.14.0" + } + }, + "node_modules/bare-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/bare-path/-/bare-path-3.0.0.tgz", + "integrity": "sha512-tyfW2cQcB5NN8Saijrhqn0Zh7AnFNsnczRcuWODH0eYAXBsJ5gVxAUuNr7tsHSC6IZ77cA0SitzT+s47kot8Mw==", + "license": "Apache-2.0", + "optional": true, + "dependencies": { + "bare-os": "^3.0.1" + } + }, + "node_modules/bare-stream": { + "version": "2.8.0", + "resolved": "https://registry.npmjs.org/bare-stream/-/bare-stream-2.8.0.tgz", + "integrity": "sha512-reUN0M2sHRqCdG4lUK3Fw8w98eeUIZHL5c3H7Mbhk2yVBL+oofgaIp0ieLfD5QXwPCypBpmEEKU2WZKzbAk8GA==", + "license": "Apache-2.0", + "optional": true, + "dependencies": { + "streamx": "^2.21.0", + "teex": "^1.0.1" + }, + "peerDependencies": { + "bare-buffer": "*", + "bare-events": "*" + }, + "peerDependenciesMeta": { + "bare-buffer": { + "optional": true + }, + "bare-events": { + "optional": true + } + } + }, + "node_modules/bare-url": { + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/bare-url/-/bare-url-2.3.2.tgz", + "integrity": "sha512-ZMq4gd9ngV5aTMa5p9+UfY0b3skwhHELaDkhEHetMdX0LRkW9kzaym4oo/Eh+Ghm0CCDuMTsRIGM/ytUc1ZYmw==", + "license": "Apache-2.0", + "optional": true, + "dependencies": { + "bare-path": "^3.0.0" + } + }, + "node_modules/base64-js": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", + "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, "node_modules/baseline-browser-mapping": { "version": "2.10.0", "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.10.0.tgz", @@ -1972,6 +2236,17 @@ "node": ">=6.0.0" } }, + "node_modules/bl": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/bl/-/bl-4.1.0.tgz", + "integrity": "sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==", + "license": "MIT", + "dependencies": { + "buffer": "^5.5.0", + "inherits": "^2.0.4", + "readable-stream": "^3.4.0" + } + }, "node_modules/brace-expansion": { "version": "1.1.12", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", @@ -2027,6 +2302,30 @@ "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" } }, + "node_modules/buffer": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz", + "integrity": "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "base64-js": "^1.3.1", + "ieee754": "^1.1.13" + } + }, "node_modules/call-bind": { "version": "1.0.8", "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.8.tgz", @@ -2118,16 +2417,34 @@ "url": "https://github.com/chalk/chalk?sponsor=1" } }, + "node_modules/chownr": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.4.tgz", + "integrity": "sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg==", + "license": "ISC" + }, "node_modules/client-only": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/client-only/-/client-only-0.0.1.tgz", "integrity": "sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA==" }, + "node_modules/color": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/color/-/color-4.2.3.tgz", + "integrity": "sha512-1rXeuUUiGGrykh+CeBdu5Ie7OJwinCgQY0bc7GCRxy5xVHy+moaqkpL/jqQq0MtQOeYcrqEz4abc5f0KtU7W4A==", + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1", + "color-string": "^1.9.0" + }, + "engines": { + "node": ">=12.5.0" + } + }, "node_modules/color-convert": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, "dependencies": { "color-name": "~1.1.4" }, @@ -2138,8 +2455,17 @@ "node_modules/color-name": { "version": "1.1.4", "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, + "node_modules/color-string": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/color-string/-/color-string-1.9.1.tgz", + "integrity": "sha512-shrVawQFojnZv6xM40anx4CkoDP+fZsw/ZerEMsW/pyzsRbElpsL/DBVW7q3ExxwusdNXI3lXpuhEZkzs8p5Eg==", + "license": "MIT", + "dependencies": { + "color-name": "^1.0.0", + "simple-swizzle": "^0.2.2" + } }, "node_modules/concat-map": { "version": "0.0.1", @@ -2241,6 +2567,30 @@ } } }, + "node_modules/decompress-response": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-6.0.0.tgz", + "integrity": "sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==", + "license": "MIT", + "dependencies": { + "mimic-response": "^3.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/deep-extend": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", + "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==", + "license": "MIT", + "engines": { + "node": ">=4.0.0" + } + }, "node_modules/deep-is": { "version": "0.1.4", "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", @@ -2285,7 +2635,6 @@ "version": "2.1.2", "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.1.2.tgz", "integrity": "sha512-Btj2BOOO83o3WyH59e8MgXsxEQVcarkUOpEYrubB0urwnN10yQ364rsiByU11nZlqWYZm05i/of7io4mzihBtQ==", - "optional": true, "engines": { "node": ">=8" } @@ -2328,6 +2677,15 @@ "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", "dev": true }, + "node_modules/end-of-stream": { + "version": "1.4.5", + "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.5.tgz", + "integrity": "sha512-ooEGc6HP26xXq/N+GCGOT0JKCLDGrq2bQUZrQ7gyrJiZANJ/8YDTxTpQBXGMn+WbIQXNVpyWymm7KYVICQnyOg==", + "license": "MIT", + "dependencies": { + "once": "^1.4.0" + } + }, "node_modules/es-abstract": { "version": "1.24.1", "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.24.1.tgz", @@ -2927,12 +3285,36 @@ "node": ">=0.10.0" } }, + "node_modules/events-universal": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/events-universal/-/events-universal-1.0.1.tgz", + "integrity": "sha512-LUd5euvbMLpwOF8m6ivPCbhQeSiYVNb8Vs0fQ8QjXo0JTkEHpz8pxdQf0gStltaPpw0Cca8b39KxvK9cfKRiAw==", + "license": "Apache-2.0", + "dependencies": { + "bare-events": "^2.7.0" + } + }, + "node_modules/expand-template": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/expand-template/-/expand-template-2.0.3.tgz", + "integrity": "sha512-XYfuKMvj4O35f/pOXLObndIRvyQ+/+6AhODh+OKWj9S9498pHHn/IMszH+gt0fBCRWMNfk1ZSp5x3AifmnI2vg==", + "license": "(MIT OR WTFPL)", + "engines": { + "node": ">=6" + } + }, "node_modules/fast-deep-equal": { "version": "3.1.3", "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", "dev": true }, + "node_modules/fast-fifo": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/fast-fifo/-/fast-fifo-1.3.2.tgz", + "integrity": "sha512-/d9sfos4yxzpwkDkuN7k2SqFKtYNmCTzgfEpz82x34IM9/zc8KGxQoXg1liNC/izpRM/MBdt44Nmx41ZWqk+FQ==", + "license": "MIT" + }, "node_modules/fast-glob": { "version": "3.3.1", "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.1.tgz", @@ -3035,6 +3417,12 @@ "node": ">=16" } }, + "node_modules/flatbuffers": { + "version": "1.12.0", + "resolved": "https://registry.npmjs.org/flatbuffers/-/flatbuffers-1.12.0.tgz", + "integrity": "sha512-c7CZADjRcl6j0PlvFy0ZqXQ67qSEZfrVPynmnL+2zPc+NtMvrF8Y0QceMo7QqnSPc7+uWjUIAbvCQ5WIKlMVdQ==", + "license": "SEE LICENSE IN LICENSE.txt" + }, "node_modules/flatted": { "version": "3.3.3", "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.3.tgz", @@ -3056,6 +3444,12 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/fs-constants": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs-constants/-/fs-constants-1.0.0.tgz", + "integrity": "sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow==", + "license": "MIT" + }, "node_modules/function-bind": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", @@ -3178,6 +3572,12 @@ "url": "https://github.com/privatenumber/get-tsconfig?sponsor=1" } }, + "node_modules/github-from-package": { + "version": "0.0.0", + "resolved": "https://registry.npmjs.org/github-from-package/-/github-from-package-0.0.0.tgz", + "integrity": "sha512-SyHy3T1v2NUXn29OsWdxmK6RwHD+vkj3v8en8AOBZ1wBQ/hCAQ5bAQTD02kW4W9tUp/3Qh6J8r9EvntiyCmOOw==", + "license": "MIT" + }, "node_modules/glob-parent": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", @@ -3230,6 +3630,12 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/guid-typescript": { + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/guid-typescript/-/guid-typescript-1.0.9.tgz", + "integrity": "sha512-Y8T4vYhEfwJOTbouREvG+3XDsjr8E3kIr7uf+JZ0BYloFsttiHU0WfvANVsR7TxNUJa/WpCnw/Ino/p+DeBhBQ==", + "license": "ISC" + }, "node_modules/has-bigints": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/has-bigints/-/has-bigints-1.1.0.tgz", @@ -3332,6 +3738,26 @@ "hermes-estree": "0.25.1" } }, + "node_modules/ieee754": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", + "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "BSD-3-Clause" + }, "node_modules/ignore": { "version": "5.3.2", "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", @@ -3366,6 +3792,18 @@ "node": ">=0.8.19" } }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "license": "ISC" + }, + "node_modules/ini": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", + "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==", + "license": "ISC" + }, "node_modules/internal-slot": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/internal-slot/-/internal-slot-1.1.0.tgz", @@ -3397,6 +3835,12 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/is-arrayish": { + "version": "0.3.4", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.3.4.tgz", + "integrity": "sha512-m6UrgzFVUYawGBh1dUsWR5M2Clqic9RVXC/9f8ceNlv2IcO9j9J/z8UoCLPqtsPBFNzEpfR3xftohbfqDx8EQA==", + "license": "MIT" + }, "node_modules/is-async-function": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/is-async-function/-/is-async-function-2.1.1.tgz", @@ -3933,6 +4377,12 @@ "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", "dev": true }, + "node_modules/long": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/long/-/long-4.0.0.tgz", + "integrity": "sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA==", + "license": "Apache-2.0" + }, "node_modules/loose-envify": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", @@ -3985,6 +4435,18 @@ "node": ">=8.6" } }, + "node_modules/mimic-response": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-3.1.0.tgz", + "integrity": "sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/minimatch": { "version": "3.1.2", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", @@ -4001,11 +4463,16 @@ "version": "1.2.8", "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", - "dev": true, "funding": { "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/mkdirp-classic": { + "version": "0.5.3", + "resolved": "https://registry.npmjs.org/mkdirp-classic/-/mkdirp-classic-0.5.3.tgz", + "integrity": "sha512-gKLcREMhtuZRwRAfqP3RFW+TK4JqApVBtOIftVgjuABpAtpxhPGaDcfvbhNvD0B8iD1oUr/txX35NjcaY6Ns/A==", + "license": "MIT" + }, "node_modules/ms": { "version": "2.1.3", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", @@ -4029,6 +4496,12 @@ "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" } }, + "node_modules/napi-build-utils": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/napi-build-utils/-/napi-build-utils-2.0.0.tgz", + "integrity": "sha512-GEbrYkbfF7MoNaoh2iGG84Mnf/WZfB0GdGEsM8wz7Expx/LlWf5U8t9nvJKXSp3qr5IsEbK04cBGhol/KwOsWA==", + "license": "MIT" + }, "node_modules/napi-postinstall": { "version": "0.3.4", "resolved": "https://registry.npmjs.org/napi-postinstall/-/napi-postinstall-0.3.4.tgz", @@ -4102,6 +4575,36 @@ } } }, + "node_modules/node-abi": { + "version": "3.87.0", + "resolved": "https://registry.npmjs.org/node-abi/-/node-abi-3.87.0.tgz", + "integrity": "sha512-+CGM1L1CgmtheLcBuleyYOn7NWPVu0s0EJH2C4puxgEZb9h8QpR9G2dBfZJOAUhi7VQxuBPMd0hiISWcTyiYyQ==", + "license": "MIT", + "dependencies": { + "semver": "^7.3.5" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/node-abi/node_modules/semver": { + "version": "7.7.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.4.tgz", + "integrity": "sha512-vFKC2IEtQnVhpT78h1Yp8wzwrf8CM+MzKMHGJZfBtzhZNycRFnXsHk6E5TxIkkMsgNS7mdX3AGB7x2QM2di4lA==", + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/node-addon-api": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/node-addon-api/-/node-addon-api-6.1.0.tgz", + "integrity": "sha512-+eawOlIgy680F0kBzPUNFhMZGtJ1YmqM6l4+Crf4IkImjYrO/mqPwRMh352g23uIaQKFItcQ64I7KMaJxHgAVA==", + "license": "MIT" + }, "node_modules/node-exports-info": { "version": "1.6.0", "resolved": "https://registry.npmjs.org/node-exports-info/-/node-exports-info-1.6.0.tgz", @@ -4241,6 +4744,59 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "license": "ISC", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/onnx-proto": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/onnx-proto/-/onnx-proto-4.0.4.tgz", + "integrity": "sha512-aldMOB3HRoo6q/phyB6QRQxSt895HNNw82BNyZ2CMh4bjeKv7g/c+VpAFtJuEMVfYLMbRx61hbuqnKceLeDcDA==", + "license": "MIT", + "dependencies": { + "protobufjs": "^6.8.8" + } + }, + "node_modules/onnxruntime-common": { + "version": "1.14.0", + "resolved": "https://registry.npmjs.org/onnxruntime-common/-/onnxruntime-common-1.14.0.tgz", + "integrity": "sha512-3LJpegM2iMNRX2wUmtYfeX/ytfOzNwAWKSq1HbRrKc9+uqG/FsEA0bbKZl1btQeZaXhC26l44NWpNUeXPII7Ew==", + "license": "MIT" + }, + "node_modules/onnxruntime-node": { + "version": "1.14.0", + "resolved": "https://registry.npmjs.org/onnxruntime-node/-/onnxruntime-node-1.14.0.tgz", + "integrity": "sha512-5ba7TWomIV/9b6NH/1x/8QEeowsb+jBEvFzU6z0T4mNsFwdPqXeFUM7uxC6QeSRkEbWu3qEB0VMjrvzN/0S9+w==", + "license": "MIT", + "optional": true, + "os": [ + "win32", + "darwin", + "linux" + ], + "dependencies": { + "onnxruntime-common": "~1.14.0" + } + }, + "node_modules/onnxruntime-web": { + "version": "1.14.0", + "resolved": "https://registry.npmjs.org/onnxruntime-web/-/onnxruntime-web-1.14.0.tgz", + "integrity": "sha512-Kcqf43UMfW8mCydVGcX9OMXI2VN17c0p6XvR7IPSZzBf/6lteBzXHvcEVWDPmCKuGombl997HgLqj91F11DzXw==", + "license": "MIT", + "dependencies": { + "flatbuffers": "^1.12.0", + "guid-typescript": "^1.0.9", + "long": "^4.0.0", + "onnx-proto": "^4.0.4", + "onnxruntime-common": "~1.14.0", + "platform": "^1.3.6" + } + }, "node_modules/optionator": { "version": "0.9.4", "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz", @@ -4358,6 +4914,12 @@ "url": "https://github.com/sponsors/jonschlinkert" } }, + "node_modules/platform": { + "version": "1.3.6", + "resolved": "https://registry.npmjs.org/platform/-/platform-1.3.6.tgz", + "integrity": "sha512-fnWVljUchTro6RiCFvCXBbNhJc2NijN7oIQxbwsyL0buWJPG85v81ehlHI9fXrJsMNgTofEoWIQeClKpgxFLrg==", + "license": "MIT" + }, "node_modules/possible-typed-array-names": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/possible-typed-array-names/-/possible-typed-array-names-1.1.0.tgz", @@ -4394,6 +4956,61 @@ "node": "^10 || ^12 || >=14" } }, + "node_modules/prebuild-install": { + "version": "7.1.3", + "resolved": "https://registry.npmjs.org/prebuild-install/-/prebuild-install-7.1.3.tgz", + "integrity": "sha512-8Mf2cbV7x1cXPUILADGI3wuhfqWvtiLA1iclTDbFRZkgRQS0NqsPZphna9V+HyTEadheuPmjaJMsbzKQFOzLug==", + "deprecated": "No longer maintained. Please contact the author of the relevant native addon; alternatives are available.", + "license": "MIT", + "dependencies": { + "detect-libc": "^2.0.0", + "expand-template": "^2.0.3", + "github-from-package": "0.0.0", + "minimist": "^1.2.3", + "mkdirp-classic": "^0.5.3", + "napi-build-utils": "^2.0.0", + "node-abi": "^3.3.0", + "pump": "^3.0.0", + "rc": "^1.2.7", + "simple-get": "^4.0.0", + "tar-fs": "^2.0.0", + "tunnel-agent": "^0.6.0" + }, + "bin": { + "prebuild-install": "bin.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/prebuild-install/node_modules/tar-fs": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-2.1.4.tgz", + "integrity": "sha512-mDAjwmZdh7LTT6pNleZ05Yt65HC3E+NiQzl672vQG38jIrehtJk/J3mNwIg+vShQPcLF/LV7CMnDW6vjj6sfYQ==", + "license": "MIT", + "dependencies": { + "chownr": "^1.1.1", + "mkdirp-classic": "^0.5.2", + "pump": "^3.0.0", + "tar-stream": "^2.1.4" + } + }, + "node_modules/prebuild-install/node_modules/tar-stream": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-2.2.0.tgz", + "integrity": "sha512-ujeqbceABgwMZxEJnk2HDY2DlnUZ+9oEcb1KzTVfYHio0UE6dG71n60d8D2I4qNvleWrrXpmjpt7vZeF1LnMZQ==", + "license": "MIT", + "dependencies": { + "bl": "^4.0.3", + "end-of-stream": "^1.4.1", + "fs-constants": "^1.0.0", + "inherits": "^2.0.3", + "readable-stream": "^3.1.1" + }, + "engines": { + "node": ">=6" + } + }, "node_modules/prelude-ls": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", @@ -4414,6 +5031,42 @@ "react-is": "^16.13.1" } }, + "node_modules/protobufjs": { + "version": "6.11.4", + "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-6.11.4.tgz", + "integrity": "sha512-5kQWPaJHi1WoCpjTGszzQ32PG2F4+wRY6BmAT4Vfw56Q2FZ4YZzK20xUYQH4YkfehY1e6QSICrJquM6xXZNcrw==", + "hasInstallScript": true, + "license": "BSD-3-Clause", + "dependencies": { + "@protobufjs/aspromise": "^1.1.2", + "@protobufjs/base64": "^1.1.2", + "@protobufjs/codegen": "^2.0.4", + "@protobufjs/eventemitter": "^1.1.0", + "@protobufjs/fetch": "^1.1.0", + "@protobufjs/float": "^1.0.2", + "@protobufjs/inquire": "^1.1.0", + "@protobufjs/path": "^1.1.2", + "@protobufjs/pool": "^1.1.0", + "@protobufjs/utf8": "^1.1.0", + "@types/long": "^4.0.1", + "@types/node": ">=13.7.0", + "long": "^4.0.0" + }, + "bin": { + "pbjs": "bin/pbjs", + "pbts": "bin/pbts" + } + }, + "node_modules/pump": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.3.tgz", + "integrity": "sha512-todwxLMY7/heScKmntwQG8CXVkWUOdYxIvY2s0VWAAMh/nd8SoYiRaKjlr7+iCs984f2P8zvrfWcDDYVb73NfA==", + "license": "MIT", + "dependencies": { + "end-of-stream": "^1.1.0", + "once": "^1.3.1" + } + }, "node_modules/punycode": { "version": "2.3.1", "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", @@ -4443,6 +5096,30 @@ } ] }, + "node_modules/rc": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz", + "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==", + "license": "(BSD-2-Clause OR MIT OR Apache-2.0)", + "dependencies": { + "deep-extend": "^0.6.0", + "ini": "~1.3.0", + "minimist": "^1.2.0", + "strip-json-comments": "~2.0.1" + }, + "bin": { + "rc": "cli.js" + } + }, + "node_modules/rc/node_modules/strip-json-comments": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", + "integrity": "sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/react": { "version": "19.2.3", "resolved": "https://registry.npmjs.org/react/-/react-19.2.3.tgz", @@ -4468,6 +5145,20 @@ "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==", "dev": true }, + "node_modules/readable-stream": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", + "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "license": "MIT", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, "node_modules/reflect.getprototypeof": { "version": "1.0.10", "resolved": "https://registry.npmjs.org/reflect.getprototypeof/-/reflect.getprototypeof-1.0.10.tgz", @@ -4600,6 +5291,26 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, "node_modules/safe-push-apply": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/safe-push-apply/-/safe-push-apply-1.0.0.tgz", @@ -4842,6 +5553,60 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/simple-concat": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/simple-concat/-/simple-concat-1.0.1.tgz", + "integrity": "sha512-cSFtAPtRhljv69IK0hTVZQ+OfE9nePi/rtJmw5UjHeVyVroEqJXP1sFztKUy1qU+xvz3u/sfYJLa947b7nAN2Q==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/simple-get": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/simple-get/-/simple-get-4.0.1.tgz", + "integrity": "sha512-brv7p5WgH0jmQJr1ZDDfKDOSeWWg+OVypG99A/5vYGPqJ6pxiaHLy8nxtFjBA7oMa01ebA9gfh1uMCFqOuXxvA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "decompress-response": "^6.0.0", + "once": "^1.3.1", + "simple-concat": "^1.0.0" + } + }, + "node_modules/simple-swizzle": { + "version": "0.2.4", + "resolved": "https://registry.npmjs.org/simple-swizzle/-/simple-swizzle-0.2.4.tgz", + "integrity": "sha512-nAu1WFPQSMNr2Zn9PGSZK9AGn4t/y97lEm+MXTtUDwfP0ksAIX4nO+6ruD9Jwut4C49SB1Ws+fbXsm/yScWOHw==", + "license": "MIT", + "dependencies": { + "is-arrayish": "^0.3.1" + } + }, "node_modules/source-map-js": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", @@ -4869,6 +5634,26 @@ "node": ">= 0.4" } }, + "node_modules/streamx": { + "version": "2.23.0", + "resolved": "https://registry.npmjs.org/streamx/-/streamx-2.23.0.tgz", + "integrity": "sha512-kn+e44esVfn2Fa/O0CPFcex27fjIL6MkVae0Mm6q+E6f0hWv578YCERbv+4m02cjxvDsPKLnmxral/rR6lBMAg==", + "license": "MIT", + "dependencies": { + "events-universal": "^1.0.0", + "fast-fifo": "^1.3.2", + "text-decoder": "^1.1.0" + } + }, + "node_modules/string_decoder": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", + "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", + "license": "MIT", + "dependencies": { + "safe-buffer": "~5.2.0" + } + }, "node_modules/string.prototype.includes": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/string.prototype.includes/-/string.prototype.includes-2.0.1.tgz", @@ -5043,6 +5828,50 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/tar-fs": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-3.1.1.tgz", + "integrity": "sha512-LZA0oaPOc2fVo82Txf3gw+AkEd38szODlptMYejQUhndHMLQ9M059uXR+AfS7DNo0NpINvSqDsvyaCrBVkptWg==", + "license": "MIT", + "dependencies": { + "pump": "^3.0.0", + "tar-stream": "^3.1.5" + }, + "optionalDependencies": { + "bare-fs": "^4.0.1", + "bare-path": "^3.0.0" + } + }, + "node_modules/tar-stream": { + "version": "3.1.7", + "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-3.1.7.tgz", + "integrity": "sha512-qJj60CXt7IU1Ffyc3NJMjh6EkuCFej46zUqJ4J7pqYlThyd9bO0XBTmcOIhSzZJVWfsLks0+nle/j538YAW9RQ==", + "license": "MIT", + "dependencies": { + "b4a": "^1.6.4", + "fast-fifo": "^1.2.0", + "streamx": "^2.15.0" + } + }, + "node_modules/teex": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/teex/-/teex-1.0.1.tgz", + "integrity": "sha512-eYE6iEI62Ni1H8oIa7KlDU6uQBtqr4Eajni3wX7rpfXD8ysFx8z0+dri+KWEPWpBsxXfxu58x/0jvTVT1ekOSg==", + "license": "MIT", + "optional": true, + "dependencies": { + "streamx": "^2.12.5" + } + }, + "node_modules/text-decoder": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/text-decoder/-/text-decoder-1.2.7.tgz", + "integrity": "sha512-vlLytXkeP4xvEq2otHeJfSQIRyWxo/oZGEbXrtEEF9Hnmrdly59sUbzZ/QgyWuLYHctCHxFF4tRQZNQ9k60ExQ==", + "license": "Apache-2.0", + "dependencies": { + "b4a": "^1.6.4" + } + }, "node_modules/tinyglobby": { "version": "0.2.15", "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz", @@ -5141,6 +5970,18 @@ "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==" }, + "node_modules/tunnel-agent": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", + "integrity": "sha512-McnNiV1l8RYeY8tBgEpuodCC1mLUdbSN+CYBL7kJsJNInOP8UjDDEwdk6Mw60vdLLrr5NHKZhMAOSrR2NZuQ+w==", + "license": "Apache-2.0", + "dependencies": { + "safe-buffer": "^5.0.1" + }, + "engines": { + "node": "*" + } + }, "node_modules/type-check": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", @@ -5282,6 +6123,12 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/undici-types": { + "version": "7.18.2", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-7.18.2.tgz", + "integrity": "sha512-AsuCzffGHJybSaRrmr5eHr81mwJU3kjw6M+uprWvCXiNeN9SOGwQ3Jn8jb8m3Z6izVgknn1R0FTCEAP2QrLY/w==", + "license": "MIT" + }, "node_modules/unrs-resolver": { "version": "1.11.1", "resolved": "https://registry.npmjs.org/unrs-resolver/-/unrs-resolver-1.11.1.tgz", @@ -5355,6 +6202,12 @@ "punycode": "^2.1.0" } }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", + "license": "MIT" + }, "node_modules/which": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", @@ -5464,6 +6317,12 @@ "node": ">=0.10.0" } }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", + "license": "ISC" + }, "node_modules/yallist": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", diff --git a/src/client/package.json b/src/client/package.json index dfafe9c..503627b 100644 --- a/src/client/package.json +++ b/src/client/package.json @@ -9,6 +9,7 @@ "lint": "eslint" }, "dependencies": { + "@xenova/transformers": "^2.17.2", "next": "16.1.6", "react": "19.2.3", "react-dom": "19.2.3" From 949515df1c2d2123f2bcb66cd44d402cd93c27ea Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Fri, 20 Feb 2026 22:46:54 -0800 Subject: [PATCH 05/90] writes api routes for saving and summarizing journal entries --- src/client/app/api/journal/save/route.js | 31 +++++++++++++ src/client/app/api/journal/summary/route.js | 50 +++++++++++++++++++++ 2 files changed, 81 insertions(+) create mode 100644 src/client/app/api/journal/save/route.js create mode 100644 src/client/app/api/journal/summary/route.js diff --git a/src/client/app/api/journal/save/route.js b/src/client/app/api/journal/save/route.js new file mode 100644 index 0000000..8868869 --- /dev/null +++ b/src/client/app/api/journal/save/route.js @@ -0,0 +1,31 @@ +import { NextResponse } from 'next/server'; +import { pipeline } from '@xenova/transformers'; + +export async function POST(req) { + try { + const { text, userId } = await req.json(); + + // 1. Vectorize the journal entry + const pipe = await pipeline('feature-extraction', 'Xenova/all-MiniLM-L6-v2'); + const output = await pipe(text, { pooling: 'mean', normalize: true }); + const vector = Array.from(output.data); + + // 2. Send to Python Bridge to save in Actian + const response = await fetch('http://localhost:5001/upsert', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + id: Date.now(), // Unique ID + text: text, + user_id: userId, + vector: vector + }) + }); + + const result = await response.json(); + return NextResponse.json(result); + + } catch (error) { + return NextResponse.json({ error: error.message }, { status: 500 }); + } +} diff --git a/src/client/app/api/journal/summary/route.js b/src/client/app/api/journal/summary/route.js new file mode 100644 index 0000000..e36e5b3 --- /dev/null +++ b/src/client/app/api/journal/summary/route.js @@ -0,0 +1,50 @@ +import { NextResponse } from 'next/server'; +import { pipeline } from '@xenova/transformers'; + +export async function POST(req) { + try { + const { userId, topic } = await req.json(); + + // 1. Generate the Vector locally on your CPU + const pipe = await pipeline('feature-extraction', 'Xenova/all-MiniLM-L6-v2'); + const output = await pipe(topic, { pooling: 'mean', normalize: true }); + const vector = Array.from(output.data); + + // 2. Fetch relevant entries from your Python Bridge + const actianRes = await fetch('http://localhost:5001/search', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + vector: vector, + user_id: userId + }), + }); + + const entries = await actianRes.json(); + + // Combine the retrieved logs into one "context" string + const context = entries.map(e => e.text).join("\n---\n"); + + // 3. Send to Ollama for the final summary + const ollamaRes = await fetch('http://localhost:11434/api/generate', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + model: "llama3", + prompt: `Based on these journal entries:\n${context}\n\nSummarize the user's day regarding "${topic}" in one concise sentence.`, + stream: false + }), + }); + + const final = await ollamaRes.json(); + + return NextResponse.json({ + summary: final.response, + rawContext: context + }); + + } catch (error) { + console.error("Pipeline Error:", error); + return NextResponse.json({ error: "Check if Bridge and Ollama are running" }, { status: 500 }); + } +} From 324e7a0143d23ea34d25ead47cab9f3d8639d340 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 09:33:58 -0800 Subject: [PATCH 06/90] chore: updates requirements.txt --- src/server/requirements.txt | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/server/requirements.txt b/src/server/requirements.txt index 85e139a..b6e15e1 100644 --- a/src/server/requirements.txt +++ b/src/server/requirements.txt @@ -1,4 +1,6 @@ flask flask-cors -numpy +langchain-google-genai +langgraph +pydantic ./docker/actiancortex-0.1.0b1-py3-none-any.whl From 82df968ba2dbf17c55b9992b2031c575f78e2edf Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 09:34:54 -0800 Subject: [PATCH 07/90] adds /agent/search http route to bridge.py --- src/server/bridge.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/src/server/bridge.py b/src/server/bridge.py index aba864f..4752694 100644 --- a/src/server/bridge.py +++ b/src/server/bridge.py @@ -30,5 +30,26 @@ def search(): results = client.search(COLLECTION, query=data['vector'], filter=user_filter, top_k=5, with_payload=True) return jsonify([{"text": r.payload['text'], "score": r.score} for r in results]) +# bridge.py updates +@app.route('/agent/search', methods=['POST']) +def agent_search(): + data = request.json + # The Reasoning Agent will send a "query_string" like + # "Find instances where the user mentions physical panic symptoms" + query_text = data.get("query") + user_id = data.get("user_id") + + # For the agent to work, we handle the embedding conversion here + # Use your existing pipeline or a utility function + vector = generate_embedding(query_text) + + results = client.search( + COLLECTION, + query=vector, + filter=Filter().must(Field("user_id").eq(user_id)), + top_k=8 + ) + return jsonify({"logs": [r.payload['text'] for r in results]}) + if __name__ == '__main__': app.run(port=5001) From 40646f91af362184cf0ad220dc8efd93161c64b6 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 09:35:50 -0800 Subject: [PATCH 08/90] writes langchain related code for the reasoning agent, archivist agent, and therapy agent --- src/server/agents.py | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 src/server/agents.py diff --git a/src/server/agents.py b/src/server/agents.py new file mode 100644 index 0000000..1f83335 --- /dev/null +++ b/src/server/agents.py @@ -0,0 +1,42 @@ +import os +from langchain_google_genai import ChatGoogleGenerativeAI +from langchain_core.messages import SystemMessage, HumanMessage + +# Initialize Gemini +llm = ChatGoogleGenerativeAI(model="gemini-1.5-pro", google_api_key=os.getenv("GEMINI_API_KEY")) + +# 1. The Archivist (Vector Query Agent) +def archivist_node(state: PatientFile): + # This node calls the Bridge to get raw data + res = requests.post("http://localhost:5001/agent/search", json={ + "query": state['current_input'], + "user_id": state['user_id'] + }) + logs = res.json().get("logs", []) + return {"logs_retrieved": logs} + +# 2. The Reasoning Agent (File Builder) +def reasoning_node(state: PatientFile): + context = "\n".join(state['logs_retrieved']) + prompt = f""" + You are a Clinical Reasoning Agent. + Using these journal logs: {context} + Update the 'Patient File' for this user. + Focus on: Triggers, Anxiety Levels, and Coping Success. + If information is missing, note it as 'Unknown'. + """ + response = llm.invoke([SystemMessage(content=prompt)]) + return {"clinical_file": response.content} + +# 3. The Master Therapist Agent +def therapist_node(state: PatientFile): + prompt = f""" + You are a compassionate Therapist Agent. + Use the following Patient File as your Source Truth: {state['clinical_file']} + + The user just said: {state['current_input']} + + Respond with empathy and use details from their 'File' to guide them. + """ + response = llm.invoke([SystemMessage(content=prompt)]) + return {"therapy_response": response.content} From 0452f2990487ebc0e77e73098b48b90ed0d91cde Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 09:37:12 -0800 Subject: [PATCH 09/90] writes langchain workflow --- src/server/workflow.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 src/server/workflow.py diff --git a/src/server/workflow.py b/src/server/workflow.py new file mode 100644 index 0000000..8d52904 --- /dev/null +++ b/src/server/workflow.py @@ -0,0 +1,17 @@ +from langgraph.graph import StateGraph, END + +workflow = StateGraph(PatientFile) + +# Add Nodes +workflow.add_node("archivist", archivist_node) +workflow.add_node("reasoning", reasoning_node) +workflow.add_node("therapist", therapist_node) + +# Define the Path +workflow.set_entry_point("archivist") +workflow.add_edge("archivist", "reasoning") +workflow.add_edge("reasoning", "therapist") +workflow.add_edge("therapist", END) + +# Compile the Graph +app_agent = workflow.compile() From a51b76bce96fcc1e337a33f7f6a2dd723cb4a44b Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 10:34:44 -0800 Subject: [PATCH 10/90] writes route.js for /api/therapy/chat --- src/client/app/api/therapy/chat/route.js | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 src/client/app/api/therapy/chat/route.js diff --git a/src/client/app/api/therapy/chat/route.js b/src/client/app/api/therapy/chat/route.js new file mode 100644 index 0000000..f0f09bb --- /dev/null +++ b/src/client/app/api/therapy/chat/route.js @@ -0,0 +1,17 @@ +export async function POST(req) { + const { userId, message } = await req.json(); + + // We call a new Python endpoint that runs the LangGraph workflow + const response = await fetch('http://localhost:5001/agent/run_session', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ user_id: userId, input: message }) + }); + + const data = await response.json(); + + return Response.json({ + reply: data.therapy_response, + patientFile: data.clinical_file // We return this so the UI can show the "Truth" + }); +} From 6e51e1c5c2cf077bba56449394b9fbb7906aad36 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 10:52:11 -0800 Subject: [PATCH 11/90] writes the type for a patient file --- src/server/state.py | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 src/server/state.py diff --git a/src/server/state.py b/src/server/state.py new file mode 100644 index 0000000..812d038 --- /dev/null +++ b/src/server/state.py @@ -0,0 +1,11 @@ +from typing import TypedDict, List, Annotated +from operator import add + +class PatientFile(TypedDict): + # This represents the "State" of the therapy session + user_id: str + current_input: str + clinical_file: str # The "Source Truth" file we build from journals + therapy_response: str + logs_retrieved: List[str] + iteration_count: int From ce8b1c2c0bb288fbca1a1415e8b1b63bf1e714b9 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 11:15:25 -0800 Subject: [PATCH 12/90] adds /journal/save_chat POST to bridge.py --- src/server/bridge.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/src/server/bridge.py b/src/server/bridge.py index 4752694..d7822ef 100644 --- a/src/server/bridge.py +++ b/src/server/bridge.py @@ -51,5 +51,21 @@ def agent_search(): ) return jsonify({"logs": [r.payload['text'] for r in results]}) +@app.route('/journal/save_chat', methods=['POST']) +def save_chat(): + data = request.json + # We save the summary of the chat as a new "memory" + client.upsert( + COLLECTION, + id=int(time.time()), + vector=data['vector'], + payload={ + "text": data['full_transcript'], + "user_id": data['user_id'], + "type": "session_summary" # Distinguished from daily logs + } + ) + return jsonify({"success": True}) + if __name__ == '__main__': app.run(port=5001) From c7ab5bee66ab0c66e2188bb612636d92662fed06 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 11:19:59 -0800 Subject: [PATCH 13/90] evolves state.py to cover full life cycle of therepy session --- src/server/state.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/server/state.py b/src/server/state.py index 812d038..1df3e93 100644 --- a/src/server/state.py +++ b/src/server/state.py @@ -1,11 +1,11 @@ from typing import TypedDict, List, Annotated from operator import add -class PatientFile(TypedDict): - # This represents the "State" of the therapy session +class TherapySessionState(TypedDict): user_id: str - current_input: str - clinical_file: str # The "Source Truth" file we build from journals - therapy_response: str - logs_retrieved: List[str] - iteration_count: int + session_id: str + transcript: List[dict] # The back-and-forth chat + evidence: List[str] # Raw journal logs found by the agent + patient_file: str # The "Case File" synthesized from logs + food_for_thought: str # The opening prompt + exercises: List[dict] # Step 9: The 3 final activities From 6e22785329ed710eb5dcb042fcad83b6ca6ab79a Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 11:22:50 -0800 Subject: [PATCH 14/90] docs: better documents state.py --- src/server/state.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/src/server/state.py b/src/server/state.py index 1df3e93..9c3f6b1 100644 --- a/src/server/state.py +++ b/src/server/state.py @@ -1,11 +1,16 @@ -from typing import TypedDict, List, Annotated -from operator import add +from typing import TypedDict, List, Union +from langchain_core.messages import BaseMessage class TherapySessionState(TypedDict): user_id: str session_id: str - transcript: List[dict] # The back-and-forth chat - evidence: List[str] # Raw journal logs found by the agent - patient_file: str # The "Case File" synthesized from logs - food_for_thought: str # The opening prompt - exercises: List[dict] # Step 9: The 3 final activities + # Step 5: The living conversation + transcript: List[BaseMessage] + # Step 3: Raw evidence retrieved from Actian + evidence: List[str] + # Step 4: The opening reflection + food_for_thought: str + # Step 9: The final 3 activities + exercises: List[dict] + # Internal counter for reasoning loops + iteration_count: int From 15b46d9b1f986498cd952e1192fd494f5b6424cf Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 11:23:58 -0800 Subject: [PATCH 15/90] updates agents definition for the newer 'schedule appointment' paradigm --- src/server/agents.py | 96 +++++++++++++++++++++++++++++--------------- 1 file changed, 64 insertions(+), 32 deletions(-) diff --git a/src/server/agents.py b/src/server/agents.py index 1f83335..1b6c9bc 100644 --- a/src/server/agents.py +++ b/src/server/agents.py @@ -1,42 +1,74 @@ import os from langchain_google_genai import ChatGoogleGenerativeAI -from langchain_core.messages import SystemMessage, HumanMessage +from langchain_core.messages import SystemMessage, HumanMessage, AIMessage +from state import TherapySessionState -# Initialize Gemini -llm = ChatGoogleGenerativeAI(model="gemini-1.5-pro", google_api_key=os.getenv("GEMINI_API_KEY")) +# Initialize Gemini 1.5 Pro +llm = ChatGoogleGenerativeAI( + model="gemini-1.5-pro", + google_api_key=os.getenv("GEMINI_API_KEY"), + temperature=0.7 +) -# 1. The Archivist (Vector Query Agent) -def archivist_node(state: PatientFile): - # This node calls the Bridge to get raw data - res = requests.post("http://localhost:5001/agent/search", json={ - "query": state['current_input'], - "user_id": state['user_id'] - }) - logs = res.json().get("logs", []) - return {"logs_retrieved": logs} +# --- NODES --- -# 2. The Reasoning Agent (File Builder) -def reasoning_node(state: PatientFile): - context = "\n".join(state['logs_retrieved']) - prompt = f""" - You are a Clinical Reasoning Agent. - Using these journal logs: {context} - Update the 'Patient File' for this user. - Focus on: Triggers, Anxiety Levels, and Coping Success. - If information is missing, note it as 'Unknown'. - """ +def research_node(state: TherapySessionState): + """Steps 2-4: Pre-session research & Food for thought""" + user_id = state['user_id'] + + # Step 3: Queries to build 'Source Truth' + queries = ["current anxiety triggers", "recent sleep patterns", "mood trends"] + all_logs = [] + + # Simulated search logic (connects to your Bridge) + for q in queries: + # Replace with your actual bridge call logic + # logs = requests.post("http://localhost:5001/agent/search", json={"query": q, "user_id": user_id}) + all_logs.append(f"Mock log for {q}: User mentioned feeling tension.") + + # Step 4: Formulate 'Food for Thought' + prompt = f"Based on these logs: {all_logs}, provide a gentle, one-sentence 'food for thought' to start a session." response = llm.invoke([SystemMessage(content=prompt)]) - return {"clinical_file": response.content} + + return { + "evidence": all_logs, + "food_for_thought": response.content, + "transcript": [AIMessage(content=response.content)] + } -# 3. The Master Therapist Agent -def therapist_node(state: PatientFile): - prompt = f""" - You are a compassionate Therapist Agent. - Use the following Patient File as your Source Truth: {state['clinical_file']} +def therapist_node(state: TherapySessionState): + """Step 5-7: The main therapy loop""" + system_prompt = f""" + You are an AI Therapist for an anxiety support app. + SOURCE TRUTH (User's History): {state['evidence']} - The user just said: {state['current_input']} + Guidelines: + 1. Be empathetic but professional. + 2. Reference the 'Source Truth' if relevant (e.g. 'I noticed you logged about...'). + 3. If you need more info to help, ask the user or state you're looking into it. + """ - Respond with empathy and use details from their 'File' to guide them. + messages = [SystemMessage(content=system_prompt)] + state['transcript'] + response = llm.invoke(messages) + + return {"transcript": state['transcript'] + [response]} + +def wrap_up_node(state: TherapySessionState): + """Step 8-9: Conversation embedding and Exercise generation""" + # Step 9: Structured JSON exercises + exercise_prompt = """ + Based on the session transcript, generate exactly 3 exercises. + Format MUST be a valid JSON list of objects: + [ + {"title": "...", "type": "breathing", "content": "..."}, + {"title": "...", "type": "todo", "content": "..."}, + {"title": "...", "type": "script", "content": "..."} + ] """ - response = llm.invoke([SystemMessage(content=prompt)]) - return {"therapy_response": response.content} + + # We combine the transcript into a string for the exercise generator + history = "\n".join([m.content for m in state['transcript']]) + response = llm.invoke([SystemMessage(content=exercise_prompt), HumanMessage(content=history)]) + + # In Step 8, you'd trigger the bridge to save 'history' to Actian + return {"exercises": response.content} # Add JSON parsing here From f383bd16d071f855286dfa8da0a2ad6df0a5b8e3 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 11:27:37 -0800 Subject: [PATCH 16/90] writes langchain workflow in new graph.py file --- src/server/graph.py | 23 +++++++++++++++++++++++ src/server/workflow.py | 17 ----------------- 2 files changed, 23 insertions(+), 17 deletions(-) create mode 100644 src/server/graph.py delete mode 100644 src/server/workflow.py diff --git a/src/server/graph.py b/src/server/graph.py new file mode 100644 index 0000000..fa30f57 --- /dev/null +++ b/src/server/graph.py @@ -0,0 +1,23 @@ +from langgraph.graph import StateGraph, END +from state import TherapySessionState +from agents import research_node, therapist_node, wrap_up_node + +workflow = StateGraph(TherapySessionState) + +# Add our 3 main stages +workflow.add_node("research", research_node) +workflow.add_node("therapist", therapist_node) +workflow.add_node("wrap_up", wrap_up_node) + +# Step 1-4 +workflow.set_entry_point("research") +workflow.add_edge("research", "therapist") + +# Steps 5-7 (This is where the user interacts) +# In production, you'd use an 'interrupt' here to wait for user input +workflow.add_edge("therapist", "wrap_up") + +# Step 8-9 +workflow.add_edge("wrap_up", END) + +app_agent = workflow.compile() diff --git a/src/server/workflow.py b/src/server/workflow.py deleted file mode 100644 index 8d52904..0000000 --- a/src/server/workflow.py +++ /dev/null @@ -1,17 +0,0 @@ -from langgraph.graph import StateGraph, END - -workflow = StateGraph(PatientFile) - -# Add Nodes -workflow.add_node("archivist", archivist_node) -workflow.add_node("reasoning", reasoning_node) -workflow.add_node("therapist", therapist_node) - -# Define the Path -workflow.set_entry_point("archivist") -workflow.add_edge("archivist", "reasoning") -workflow.add_edge("reasoning", "therapist") -workflow.add_edge("therapist", END) - -# Compile the Graph -app_agent = workflow.compile() From 110130f0f7eff2f4ba30d61ddcdd736eed4df2db Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 11:28:21 -0800 Subject: [PATCH 17/90] writes route.js for /api/therapy/start --- src/client/app/api/therapy/start/route.js | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 src/client/app/api/therapy/start/route.js diff --git a/src/client/app/api/therapy/start/route.js b/src/client/app/api/therapy/start/route.js new file mode 100644 index 0000000..96b0644 --- /dev/null +++ b/src/client/app/api/therapy/start/route.js @@ -0,0 +1,18 @@ +import { NextResponse } from 'next/server'; + +export async function POST(req) { + const { userId } = await req.json(); + + // Call the Python agent's research node + const res = await fetch('http://localhost:5001/agent/start', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ user_id: userId }) + }); + + const data = await res.json(); + return NextResponse.json({ + openingMessage: data.food_for_thought, + evidenceFound: data.evidence + }); +} From 78043a2ef35f27b89bb542ed5a7a9810f29e14a1 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 11:28:58 -0800 Subject: [PATCH 18/90] adds a component for exercise list --- src/client/app/components/ExerciseList.js | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 src/client/app/components/ExerciseList.js diff --git a/src/client/app/components/ExerciseList.js b/src/client/app/components/ExerciseList.js new file mode 100644 index 0000000..b814893 --- /dev/null +++ b/src/client/app/components/ExerciseList.js @@ -0,0 +1,16 @@ +export default function ExerciseList({ exercises }) { + return ( +
+ {exercises.map((ex, i) => ( +
+ {ex.type} +

{ex.title}

+

{ex.content}

+ +
+ ))} +
+ ); +} From 67ada665ce8da72fa4b49e7c9edd83268a27381b Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 11:30:09 -0800 Subject: [PATCH 19/90] chore: updates reqs.txt --- src/server/requirements.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/server/requirements.txt b/src/server/requirements.txt index b6e15e1..a238a3e 100644 --- a/src/server/requirements.txt +++ b/src/server/requirements.txt @@ -3,4 +3,6 @@ flask-cors langchain-google-genai langgraph pydantic +requests +sentence-transformers ./docker/actiancortex-0.1.0b1-py3-none-any.whl From a7bfd4981b6a8bf3b005c38f81664a98c193f6e4 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 12:00:42 -0800 Subject: [PATCH 20/90] updates state.py by removing iteration count --- src/server/state.py | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/src/server/state.py b/src/server/state.py index 9c3f6b1..5bdb8b6 100644 --- a/src/server/state.py +++ b/src/server/state.py @@ -4,13 +4,7 @@ class TherapySessionState(TypedDict): user_id: str session_id: str - # Step 5: The living conversation - transcript: List[BaseMessage] - # Step 3: Raw evidence retrieved from Actian - evidence: List[str] - # Step 4: The opening reflection - food_for_thought: str - # Step 9: The final 3 activities + transcript: List[BaseMessage] + evidence: List[str] + food_for_thought: str exercises: List[dict] - # Internal counter for reasoning loops - iteration_count: int From 94e3c43ca64658902e97f73c510af7da3c83612b Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 12:01:41 -0800 Subject: [PATCH 21/90] misc updates to agents.py --- src/server/agents.py | 70 +++++++++++++++++--------------------------- 1 file changed, 27 insertions(+), 43 deletions(-) diff --git a/src/server/agents.py b/src/server/agents.py index 1b6c9bc..aa1ba31 100644 --- a/src/server/agents.py +++ b/src/server/agents.py @@ -1,4 +1,5 @@ import os +import json from langchain_google_genai import ChatGoogleGenerativeAI from langchain_core.messages import SystemMessage, HumanMessage, AIMessage from state import TherapySessionState @@ -6,69 +7,52 @@ # Initialize Gemini 1.5 Pro llm = ChatGoogleGenerativeAI( model="gemini-1.5-pro", - google_api_key=os.getenv("GEMINI_API_KEY"), - temperature=0.7 + google_api_key=os.getenv("GEMINI_API_KEY") ) -# --- NODES --- - def research_node(state: TherapySessionState): - """Steps 2-4: Pre-session research & Food for thought""" - user_id = state['user_id'] - - # Step 3: Queries to build 'Source Truth' - queries = ["current anxiety triggers", "recent sleep patterns", "mood trends"] - all_logs = [] - - # Simulated search logic (connects to your Bridge) - for q in queries: - # Replace with your actual bridge call logic - # logs = requests.post("http://localhost:5001/agent/search", json={"query": q, "user_id": user_id}) - all_logs.append(f"Mock log for {q}: User mentioned feeling tension.") - - # Step 4: Formulate 'Food for Thought' - prompt = f"Based on these logs: {all_logs}, provide a gentle, one-sentence 'food for thought' to start a session." + """Steps 2-4: Pre-session retrieval and Opening Message.""" + # Step 3: Gather evidence (Connecting to your existing search logic) + import requests + search_query = "anxiety triggers and recent mood" + # We call our own bridge to get the logs + res = requests.post("http://localhost:5001/search", json={ + "vector": [0.1] * 384, # Replace with actual embedding of search_query + "user_id": state['user_id'] + }) + logs = [item['text'] for item in res.json()] + + # Step 4: Create Food for Thought + prompt = f"Based on these patient logs: {logs}, write a short, empathetic 'food for thought' to start our therapy session." response = llm.invoke([SystemMessage(content=prompt)]) return { - "evidence": all_logs, + "evidence": logs, "food_for_thought": response.content, "transcript": [AIMessage(content=response.content)] } def therapist_node(state: TherapySessionState): - """Step 5-7: The main therapy loop""" + """Steps 5-7: Active Conversation.""" system_prompt = f""" - You are an AI Therapist for an anxiety support app. - SOURCE TRUTH (User's History): {state['evidence']} - - Guidelines: - 1. Be empathetic but professional. - 2. Reference the 'Source Truth' if relevant (e.g. 'I noticed you logged about...'). - 3. If you need more info to help, ask the user or state you're looking into it. + You are a professional therapist. + PATIENT HISTORY: {state['evidence']} + Use this history to personalize your advice. If the user mentions something related to their logs, acknowledge it. """ - messages = [SystemMessage(content=system_prompt)] + state['transcript'] response = llm.invoke(messages) - return {"transcript": state['transcript'] + [response]} def wrap_up_node(state: TherapySessionState): - """Step 8-9: Conversation embedding and Exercise generation""" - # Step 9: Structured JSON exercises + """Steps 8-9: Closing and Exercise Generation.""" + # Step 9: Generate 3 Exercises exercise_prompt = """ - Based on the session transcript, generate exactly 3 exercises. - Format MUST be a valid JSON list of objects: - [ - {"title": "...", "type": "breathing", "content": "..."}, - {"title": "...", "type": "todo", "content": "..."}, - {"title": "...", "type": "script", "content": "..."} - ] + Based on today's chat, generate 3 anxiety exercises (1 Breathing, 1 Todo, 1 Audio Script). + Respond ONLY with a JSON list: [{"title": "...", "type": "...", "content": "..."}] """ - - # We combine the transcript into a string for the exercise generator history = "\n".join([m.content for m in state['transcript']]) response = llm.invoke([SystemMessage(content=exercise_prompt), HumanMessage(content=history)]) - # In Step 8, you'd trigger the bridge to save 'history' to Actian - return {"exercises": response.content} # Add JSON parsing here + # Clean the string for JSON parsing + clean_json = response.content.replace('```json', '').replace('```', '').strip() + return {"exercises": json.loads(clean_json)} From 4adec5bc43a1ba0a02cabc25190ed3d461f82307 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 12:04:21 -0800 Subject: [PATCH 22/90] misc updates to graph.py --- src/server/graph.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/src/server/graph.py b/src/server/graph.py index fa30f57..e51f183 100644 --- a/src/server/graph.py +++ b/src/server/graph.py @@ -4,20 +4,14 @@ workflow = StateGraph(TherapySessionState) -# Add our 3 main stages workflow.add_node("research", research_node) workflow.add_node("therapist", therapist_node) workflow.add_node("wrap_up", wrap_up_node) -# Step 1-4 workflow.set_entry_point("research") workflow.add_edge("research", "therapist") - -# Steps 5-7 (This is where the user interacts) -# In production, you'd use an 'interrupt' here to wait for user input +# In a real session, you would pause here for user input workflow.add_edge("therapist", "wrap_up") - -# Step 8-9 workflow.add_edge("wrap_up", END) app_agent = workflow.compile() From a6e436ddbbd1c5f419fb939ebd80bd82c362a30c Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 12:05:46 -0800 Subject: [PATCH 23/90] updates brigde.py by adding /agent/start and /journal/save-session api routes --- src/server/bridge.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/src/server/bridge.py b/src/server/bridge.py index d7822ef..748d0f7 100644 --- a/src/server/bridge.py +++ b/src/server/bridge.py @@ -67,5 +67,22 @@ def save_chat(): ) return jsonify({"success": True}) + +@app.route('/agent/start', methods=['POST']) +def start_agent(): + data = request.json + # Run the research node specifically + initial_state = {"user_id": data['user_id'], "transcript": [], "evidence": []} + result = app_agent.invoke(initial_state) + return jsonify(result) + +@app.route('/journal/save_session', methods=['POST']) +def save_session(): + """Step 8: Embed the final conversation and save to Actian.""" + data = request.json + # Here you would use sentence-transformers to embed data['text'] + # Then client.upsert(...) + return jsonify({"status": "saved"}) + if __name__ == '__main__': app.run(port=5001) From 8fab48ca60c529df99b5c33dc46112aa7f56a990 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 12:06:37 -0800 Subject: [PATCH 24/90] writes page.js for testing the therapy agent stuff --- src/client/app/therapy/page.js | 36 ++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 src/client/app/therapy/page.js diff --git a/src/client/app/therapy/page.js b/src/client/app/therapy/page.js new file mode 100644 index 0000000..5ce8018 --- /dev/null +++ b/src/client/app/therapy/page.js @@ -0,0 +1,36 @@ +"use client"; +import { useState } from 'react'; + +export default function TherapyPage() { + const [session, setSession] = useState(null); + const [loading, setLoading] = useState(false); + + const startSession = async () => { + setLoading(true); + const res = await fetch('http://localhost:5001/agent/start', { + method: 'POST', + headers: {'Content-Type': 'application/json'}, + body: JSON.stringify({ userId: 'horyzon' }) + }); + const data = await res.json(); + setSession(data); + setLoading(false); + }; + + return ( +
+ {!session ? ( + + ) : ( +
+
+ "Food for thought: {session.food_for_thought}" +
+ {/* Chat Interface Goes Here */} +
+ )} +
+ ); +} From 9a49ec8685125ba00a2a8736da1b731f6e95a418 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 12:12:11 -0800 Subject: [PATCH 25/90] updates route.js --- src/client/app/api/journal/save/route.js | 30 ++++++++++-------------- 1 file changed, 13 insertions(+), 17 deletions(-) diff --git a/src/client/app/api/journal/save/route.js b/src/client/app/api/journal/save/route.js index 8868869..ac8e437 100644 --- a/src/client/app/api/journal/save/route.js +++ b/src/client/app/api/journal/save/route.js @@ -1,31 +1,27 @@ import { NextResponse } from 'next/server'; -import { pipeline } from '@xenova/transformers'; export async function POST(req) { try { - const { text, userId } = await req.json(); + const data = await req.json(); + console.log("Data received in Next.js:", data); - // 1. Vectorize the journal entry - const pipe = await pipeline('feature-extraction', 'Xenova/all-MiniLM-L6-v2'); - const output = await pipe(text, { pooling: 'mean', normalize: true }); - const vector = Array.from(output.data); - - // 2. Send to Python Bridge to save in Actian - const response = await fetch('http://localhost:5001/upsert', { + // This is where you talk to your Python bridge + const res = await fetch('http://localhost:5001/upsert', { method: 'POST', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify({ - id: Date.now(), // Unique ID - text: text, - user_id: userId, - vector: vector + id: data.id || Date.now(), + text: data.text, + user_id: data.userId, + vector: Array(384).fill(0.1) // Temporary: Use real embeddings later }) }); - const result = await response.json(); - return NextResponse.json(result); + if (!res.ok) throw new Error("Bridge connection failed"); - } catch (error) { - return NextResponse.json({ error: error.message }, { status: 500 }); + return NextResponse.json({ success: true }); + } catch (err) { + console.error(err); + return NextResponse.json({ error: err.message }, { status: 500 }); } } From e1f4ea7730c4bbdff2f09a4807b50672df83c0bb Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 12:14:44 -0800 Subject: [PATCH 26/90] chore: updates next.config.mjs --- src/client/next.config.mjs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/client/next.config.mjs b/src/client/next.config.mjs index bb15219..8ba2d8b 100644 --- a/src/client/next.config.mjs +++ b/src/client/next.config.mjs @@ -1,5 +1,7 @@ + /** @type {import('next').NextConfig} */ const nextConfig = { - experimental: { serverComponentsExternalPackages: ['@xenova/transformers'] }, + serverExternalPackages: ['@xenova/transformers'], }; + export default nextConfig; From cf5ea97460034eaf48d11897d1e205be71e93748 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 12:17:49 -0800 Subject: [PATCH 27/90] debug: simplifies journal/save/route.js for debug purposes --- src/client/app/api/journal/save/route.js | 24 ++++++------------------ 1 file changed, 6 insertions(+), 18 deletions(-) diff --git a/src/client/app/api/journal/save/route.js b/src/client/app/api/journal/save/route.js index ac8e437..64b26da 100644 --- a/src/client/app/api/journal/save/route.js +++ b/src/client/app/api/journal/save/route.js @@ -3,25 +3,13 @@ import { NextResponse } from 'next/server'; export async function POST(req) { try { const data = await req.json(); - console.log("Data received in Next.js:", data); - - // This is where you talk to your Python bridge - const res = await fetch('http://localhost:5001/upsert', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - id: data.id || Date.now(), - text: data.text, - user_id: data.userId, - vector: Array(384).fill(0.1) // Temporary: Use real embeddings later - }) + + // We'll just return success for now to test the route + return NextResponse.json({ + message: "Route is working!", + received: data.text }); - - if (!res.ok) throw new Error("Bridge connection failed"); - - return NextResponse.json({ success: true }); } catch (err) { - console.error(err); - return NextResponse.json({ error: err.message }, { status: 500 }); + return NextResponse.json({ error: "JSON Parsing Failed: " + err.message }, { status: 500 }); } } From 7bad6d40fff9bfa457fab494cb692bc41d32495c Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 12:18:16 -0800 Subject: [PATCH 28/90] writes simple embedder script for backend --- src/server/embedder.py | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 src/server/embedder.py diff --git a/src/server/embedder.py b/src/server/embedder.py new file mode 100644 index 0000000..da98c91 --- /dev/null +++ b/src/server/embedder.py @@ -0,0 +1,7 @@ +from sentence_transformers import SentenceTransformer + +# This loads the model locally on your CPU/GPU +model = SentenceTransformer('all-MiniLM-L6-v2') + +def get_embedding(text): + return model.encode(text).tolist() From 81d4f54c2c457a2720fb6c4cdd3f48376b4671c0 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 12:19:36 -0800 Subject: [PATCH 29/90] updates bridge.py for backend embedding --- src/server/bridge.py | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/src/server/bridge.py b/src/server/bridge.py index 748d0f7..e464183 100644 --- a/src/server/bridge.py +++ b/src/server/bridge.py @@ -1,6 +1,7 @@ from flask import Flask, request, jsonify from flask_cors import CORS from cortex import CortexClient, DistanceMetric, Filter, Field +from embedder import get_embedding app = Flask(__name__) CORS(app) @@ -9,26 +10,36 @@ client.connect() COLLECTION = "user_journals" -@app.route('/init', methods=['GET']) -def init(): - if not client.has_collection(COLLECTION): - client.create_collection(name=COLLECTION, dimension=384) - return jsonify({"status": "ready"}) - @app.route('/upsert', methods=['POST']) def upsert(): data = request.json - client.upsert(COLLECTION, id=data['id'], vector=data['vector'], - payload={"text": data['text'], "user_id": data['user_id']}) + # Logic moved here: convert text to vector + vector = get_embedding(data['text']) + + client.upsert( + COLLECTION, + id=int(data.get('id', time.time())), + vector=vector, + payload={"text": data['text'], "user_id": data['user_id']} + ) client.flush(COLLECTION) return jsonify({"success": True}) @app.route('/search', methods=['POST']) def search(): data = request.json + # Logic moved here: convert search query to vector + vector = get_embedding(data['query']) + user_filter = Filter().must(Field("user_id").eq(data['user_id'])) - results = client.search(COLLECTION, query=data['vector'], filter=user_filter, top_k=5, with_payload=True) - return jsonify([{"text": r.payload['text'], "score": r.score} for r in results]) + results = client.search(COLLECTION, query=vector, filter=user_filter, top_k=5) + return jsonify([{"text": r.payload['text']} for r in results]) + +@app.route('/init', methods=['GET']) +def init(): + if not client.has_collection(COLLECTION): + client.create_collection(name=COLLECTION, dimension=384) + return jsonify({"status": "ready"}) # bridge.py updates @app.route('/agent/search', methods=['POST']) From b139b7779c2fe29c36bd30ffea57a5cf88614699 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 12:22:33 -0800 Subject: [PATCH 30/90] updates agents.py to take into account backend embedding paradigm --- src/server/agents.py | 85 ++++++++++++++++++++++++++++++++++---------- 1 file changed, 67 insertions(+), 18 deletions(-) diff --git a/src/server/agents.py b/src/server/agents.py index aa1ba31..66421ad 100644 --- a/src/server/agents.py +++ b/src/server/agents.py @@ -3,6 +3,8 @@ from langchain_google_genai import ChatGoogleGenerativeAI from langchain_core.messages import SystemMessage, HumanMessage, AIMessage from state import TherapySessionState +import requests +from embedder import get_embedding # Our new backend embedder # Initialize Gemini 1.5 Pro llm = ChatGoogleGenerativeAI( @@ -11,25 +13,72 @@ ) def research_node(state: TherapySessionState): - """Steps 2-4: Pre-session retrieval and Opening Message.""" - # Step 3: Gather evidence (Connecting to your existing search logic) - import requests - search_query = "anxiety triggers and recent mood" - # We call our own bridge to get the logs - res = requests.post("http://localhost:5001/search", json={ - "vector": [0.1] * 384, # Replace with actual embedding of search_query - "user_id": state['user_id'] - }) - logs = [item['text'] for item in res.json()] - - # Step 4: Create Food for Thought - prompt = f"Based on these patient logs: {logs}, write a short, empathetic 'food for thought' to start our therapy session." - response = llm.invoke([SystemMessage(content=prompt)]) - + """ + Step 2: Start session. + Step 3: Query Actian for 'Source Truth' based on clinical themes. + Step 4: Generate the 'Food for Thought' opening message. + """ + user_id = state['user_id'] + + # 1. Step 3: Define clinical query topics (These are your 'source truth' seeds) + # The agent uses these to search the database for relevant user history + query_topics = [ + "recent panic attacks or physical anxiety symptoms", + "social anxiety and workplace stress", + "sleep quality and nighttime anxiety" + ] + + all_evidence = [] + + # 2. Iterate through topics to gather evidence + for topic in query_topics: + # Convert topic to vector locally on the backend + vector = get_embedding(topic) + + # Call your existing Bridge search logic (or call Actian client directly) + # We assume your bridge.py is running on 5001 + try: + res = requests.post("http://localhost:5001/search", json={ + "query": topic, # Bridge now handles vectorization via get_embedding + "user_id": user_id + }) + + if res.status_code == 200: + logs = res.json() # List of {"text": "..."} + all_evidence.extend([log['text'] for log in logs]) + except Exception as e: + print(f"Research Node Error: Could not reach Actian bridge: {e}") + + # 3. Deduplicate and clean evidence + unique_evidence = list(set(all_evidence)) + + # 4. Step 4: Reason with the Evidence to create 'Food for Thought' + # We use Gemini to synthesize the raw logs into a gentle opening + from langchain_google_genai import ChatGoogleGenerativeAI + llm = ChatGoogleGenerativeAI(model="gemini-1.5-pro") + + research_summary_prompt = f""" + You are preparing for a therapy session. + Based on these recent journal logs from the patient: + {unique_evidence if unique_evidence else "No recent logs found."} + + Your task: + 1. Identify one recurring theme or a significant recent event. + 2. Write a gentle, reflective 'Food for Thought' sentence to start the session. + 3. Keep it brief and empathetic. Do not diagnose; just reflect. + + Example: 'I noticed you've been feeling some tension in social settings lately; perhaps we could explore what that feels like for you today?' + """ + + response = llm.invoke([SystemMessage(content=research_summary_prompt)]) + food_for_thought = response.content + + # 5. Return the updated state return { - "evidence": logs, - "food_for_thought": response.content, - "transcript": [AIMessage(content=response.content)] + "evidence": unique_evidence, + "food_for_thought": food_for_thought, + # We initialize the transcript with the AI's opening thought + "transcript": [AIMessage(content=food_for_thought)] } def therapist_node(state: TherapySessionState): From aa8b32a6f705ae0f80ac0d94b3c6a449b5617087 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 12:23:19 -0800 Subject: [PATCH 31/90] simplies journal save route.js for backend embedding --- src/client/app/api/journal/save/route.js | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/src/client/app/api/journal/save/route.js b/src/client/app/api/journal/save/route.js index 64b26da..8e5e911 100644 --- a/src/client/app/api/journal/save/route.js +++ b/src/client/app/api/journal/save/route.js @@ -2,14 +2,21 @@ import { NextResponse } from 'next/server'; export async function POST(req) { try { - const data = await req.json(); - - // We'll just return success for now to test the route - return NextResponse.json({ - message: "Route is working!", - received: data.text + const { userId, text, id } = await req.json(); + + // No vectorization here! Just send the raw text to Python. + const res = await fetch('http://localhost:5001/upsert', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + user_id: userId, + text: text, + id: id + }) }); + + return NextResponse.json(await res.json()); } catch (err) { - return NextResponse.json({ error: "JSON Parsing Failed: " + err.message }, { status: 500 }); + return NextResponse.json({ error: err.message }, { status: 500 }); } } From 8bc7cb1082a6e3294fdf497defb3dbd61094d9bc Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 12:29:18 -0800 Subject: [PATCH 32/90] debuggin updates brosky common --- src/server/bridge.py | 56 ++++++++++++++++++++++++++++---------------- 1 file changed, 36 insertions(+), 20 deletions(-) diff --git a/src/server/bridge.py b/src/server/bridge.py index e464183..63d3bc2 100644 --- a/src/server/bridge.py +++ b/src/server/bridge.py @@ -1,7 +1,8 @@ +import time from flask import Flask, request, jsonify from flask_cors import CORS from cortex import CortexClient, DistanceMetric, Filter, Field -from embedder import get_embedding +from embedder import get_embedding # Make sure embedder.py exists in the same folder app = Flask(__name__) CORS(app) @@ -12,28 +13,43 @@ @app.route('/upsert', methods=['POST']) def upsert(): - data = request.json - # Logic moved here: convert text to vector - vector = get_embedding(data['text']) - - client.upsert( - COLLECTION, - id=int(data.get('id', time.time())), - vector=vector, - payload={"text": data['text'], "user_id": data['user_id']} - ) - client.flush(COLLECTION) - return jsonify({"success": True}) + try: + data = request.json + # 1. Always generate the vector on the backend + vector = get_embedding(data['text']) + + # 2. Force ID to integer (critical for Actian) + entry_id = int(data.get('id', time.time())) + + client.upsert( + COLLECTION, + id=entry_id, + vector=vector, + payload={ + "text": data['text'], + "user_id": data['user_id'], + "type": "journal_entry" + } + ) + client.flush(COLLECTION) + return jsonify({"success": True, "id": entry_id}) + except Exception as e: + print(f"UPSERT ERROR: {e}") # This will show up in your bridge terminal + return jsonify({"success": False, "error": str(e)}), 500 @app.route('/search', methods=['POST']) def search(): - data = request.json - # Logic moved here: convert search query to vector - vector = get_embedding(data['query']) - - user_filter = Filter().must(Field("user_id").eq(data['user_id'])) - results = client.search(COLLECTION, query=vector, filter=user_filter, top_k=5) - return jsonify([{"text": r.payload['text']} for r in results]) + try: + data = request.json + # Generate vector for the query string + vector = get_embedding(data['query']) + + user_filter = Filter().must(Field("user_id").eq(data['user_id'])) + results = client.search(COLLECTION, query=vector, filter=user_filter, top_k=5, with_payload=True) + + return jsonify([{"text": r.payload['text']} for r in results]) + except Exception as e: + return jsonify({"error": str(e)}), 500 @app.route('/init', methods=['GET']) def init(): From 8a37c8cb4dfe5eddede72213fbf4e70b764e832d Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 12:31:47 -0800 Subject: [PATCH 33/90] updates /agent/start endpoint in flask bridge to acc execute langchain flow --- src/server/bridge.py | 30 +++++++++++++++++++++++++----- 1 file changed, 25 insertions(+), 5 deletions(-) diff --git a/src/server/bridge.py b/src/server/bridge.py index 63d3bc2..9766643 100644 --- a/src/server/bridge.py +++ b/src/server/bridge.py @@ -94,14 +94,34 @@ def save_chat(): ) return jsonify({"success": True}) +from graph import app_agent # Import the compiled LangGraph @app.route('/agent/start', methods=['POST']) def start_agent(): - data = request.json - # Run the research node specifically - initial_state = {"user_id": data['user_id'], "transcript": [], "evidence": []} - result = app_agent.invoke(initial_state) - return jsonify(result) + try: + data = request.json + user_id = data.get('user_id') + + # Initialize the state for the research node + initial_state = { + "user_id": user_id, + "session_id": str(int(time.time())), + "transcript": [], + "evidence": [], + "food_for_thought": "", + "exercises": [] + } + + # Run ONLY the research part for now + # Using 'interrupt' logic is complex, so we'll just get the start + result = app_agent.invoke(initial_state) + + return jsonify({ + "food_for_thought": result['food_for_thought'], + "evidence_count": len(result['evidence']) + }) + except Exception as e: + return jsonify({"error": str(e)}), 500 @app.route('/journal/save_session', methods=['POST']) def save_session(): From 9b6c0937569348b0fd9da4908bf960eb83f033b7 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 12:39:18 -0800 Subject: [PATCH 34/90] updates bridge.py to take into account the api route names that the front expects --- src/server/bridge.py | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/src/server/bridge.py b/src/server/bridge.py index 9766643..6ac228d 100644 --- a/src/server/bridge.py +++ b/src/server/bridge.py @@ -96,31 +96,37 @@ def save_chat(): from graph import app_agent # Import the compiled LangGraph -@app.route('/agent/start', methods=['POST']) -def start_agent(): +# Change the endpoint name to match what Next.js is calling +@app.route('/agent/run_session', methods=['POST']) +def run_session(): try: data = request.json user_id = data.get('user_id') + user_message = data.get('message') # The message from the curl - # Initialize the state for the research node - initial_state = { + # 1. Reconstruct the state + # In a real app, you'd pull the 'evidence' and 'transcript' from a cache/DB + # For now, we'll initialize it to keep the agent running + state = { "user_id": user_id, - "session_id": str(int(time.time())), - "transcript": [], - "evidence": [], + "transcript": [HumanMessage(content=user_message)], + "evidence": [], # This would be filled by the research_node normally + "patient_file": "", "food_for_thought": "", "exercises": [] } - # Run ONLY the research part for now - # Using 'interrupt' logic is complex, so we'll just get the start - result = app_agent.invoke(initial_state) + # 2. Run the Graph + # Note: app_agent must be imported from your graph.py + result = app_agent.invoke(state) + # 3. Return the response back to Next.js return jsonify({ - "food_for_thought": result['food_for_thought'], - "evidence_count": len(result['evidence']) + "therapy_response": result['transcript'][-1].content, + "status": "success" }) except Exception as e: + print(f"Error in run_session: {e}") return jsonify({"error": str(e)}), 500 @app.route('/journal/save_session', methods=['POST']) From f36637c7fbec4b3a8e02632ce4cec48d0480febf Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 12:39:42 -0800 Subject: [PATCH 35/90] @app.route('/agent/run_session', methods=['POST']) def run_session(): try: data = request.json user_id = data.get('user_id') user_message = data.get('message') # The message from the curl # 1. Reconstruct the state # In a real app, you'd pull the 'evidence' and 'transcript' from a cache/DB # For now, we'll initialize it to keep the agent running state = { "user_id": user_id, "transcript": [HumanMessage(content=user_message)], "evidence": [], # This would be filled by the research_node normally "patient_file": "", "food_for_thought": "", "exercises": [] } # 2. Run the Graph --- src/server/.bridge.py.swp | Bin 0 -> 20480 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 src/server/.bridge.py.swp diff --git a/src/server/.bridge.py.swp b/src/server/.bridge.py.swp new file mode 100644 index 0000000000000000000000000000000000000000..b209029918ddb58a31d1e849fe20f40c065b405c GIT binary patch literal 20480 zcmeHPU5Fh?6>c}W{;V6__z*TPmVLq7S8wiQl8qz-7mSmcB$({PnaTPv3{6jW-MN+N z+uiA|o|$VHWkJ+M)JJg<$)X6mg5bjn`rwl9flVH&5UE)Y8@2%kzg8{~{Tjx)jGe7)%v;IE9~^xU!auXr1@6@u4;C4aUFs zSOzQu?*IcquJ{1`}qQ^1qJG2oxa9p@jwZ-HL`F97#| zZQwQ#0CT``;CGKZ&I8~@padka3d{jr;0fSgA9S4iz>C1MKmiEgDsUWl^D)QyKClJ^ zz(wGbfD1edy!HXq1?~aQ0DpPE2;CbLV;0|yR_&9I^_&YwN zybAmh_%@IN9pIzDlfaw!kn#rbQ{ZJ_49o-PfKLF&f!Fb|<^|v$umf~}*D-!HhWCLB z4l_p~1IbyR-JM)%vH3J-NxIc#Tf7~|Oe8{zAQoTXOh_hEw!wMEnj{!07i!qRekwd1td>@Y~s7iuC;qAV3)I|)X(lCn`C z!y(%m29iC^x2Xk86d*2+X)GMoxAi>8hD?mIG?z3Kz8_3&j)}q8yXzKZ7@|M!3|r1izN4wA3E7!Y!*IBAoN*L&4F!Htjz&Q~b}SuZ6KdJoL&8jwn6V z!WR2H{506qgJ?)#=|GKXNP`y?@b`poF?YmADHu}q4stK@Dl+9$y?s7)JH>DX@zkWm zfj*^-?;Bbo9`vo`Jwwn+n;31cvH?;-jtl@tFUnXJtNX@XrkTIduM^5Z#FFRzd3EwS zy-`_6Z(i^?^4w}TJ3W5KGe4SyB9`)TUys7!G@GU%`V8R(5Aty6X}{IAaTB?T=SXfo zAFc5yg2#EvzE54#e%oTV3Qp-MXM9_GCCfy{5kj1y=K(9Fh=m-pL79XSp=<_~G9(3$ zjweLpR+?{6zGR5lC^jKmJ(nSELB&R?fDL)hbwf@6n|Y=tNIj=+?yf@WvRn1Qofk-= z0(mh&N-dBP@C4O3dH~W&LhUYCHXIiM>49ZIB0^S-N104Vg?2Zc7Z-Uz(VnccODeH% z31sX7nFmC;x-rPUxbL!9Z19IVs$vESP>syN`A^?G&`n!xqWEKFhG5{uVOa_DS4qz6 zV!nySuJKQX!+1xC8}NcKg{n}Ep+_mS-w0k7A+i%1p8mNDPibPs08^l>Yz%{5rR6uB=V>Ef>5U71sPUGYW@+!u#D&(j=z0m~lm>~y;9{FyVVINuI=CfS@iksl## z@R`Fbt=x4@M%)+CJNrYr&fyJvD%$sjSl#h1Qw~uDD>GrQoLc|g#Vkds;XL9($e~~5 zUYb4XM>Z@bF|nMlPH*}NI8#!TNS0TfX7dY zAC#EM(q!I0#N{92>=WDj{Gpz5=YzQ5UFII{A=HQrO_kojy>)YGZgH8-EiNuB&ak_D z$JI+8ED&@|57RBg73Fo6{B*D}gPd@fVv_C8daajz# z+M((ZssExr?Os>VmC4$|OnJU~h)!g5zd9z_^oZ31m9OGOJBgnB>KTYiRPI|Z9q6id znJNk#x>OxlI#j*iZK>{aeSOc;!KsW?77!aWwA4Jkrp00u0oGWS)U37G*&eOoL(Z;U zrllfk@^#Dw9v0bB%>m@%myisDq@(jHJ@27$qbX@V1Pk&@@m4S{CNr&y8e;`1iehLj z2sbc+O!{+V_?X4h-Wfw*-jz+OWwKNA36}Jr8_&S(R!stFk|6tg9diLCXR#>e+p}uT z=DBf-xp9~lat0~){4}Ol*j-%muxjo&_=l2H926U#@6+_*`zWB6u6lI=cFi&yZya^f z!D{zLuz?j|i8;9{2^qUyZeYc(St@8vKS<*UYIi9i= z{~Y)k@I8R;e+96IWxz6E8L$jk1}p=X0n318z%pPNunbrRECX+u0s2Ct_7Ka&qy4-- zzEezh9DJv)zDTfspS68|6o|z4+cQUgFsGfOsQO^8b^wy`a$A13Hz)QflfDm{J zI0yV2&-gR&L*Sc$dgfcE%vQ7vSOzQumI2FvWxz6E8L$jk2Hr6S=nbaDu97?nssl)- z$~a=se~`pTk{ekwF5mM zMMI^)3P=m3RHs^#mzrJh9;zxKk5bPaCfnR4u``7Z-jb$P(y>p`@!#n;=12!bo5Kx> zsL^paWcCPcMnh~trCrj=YK>GSC~@d|b-385hwgLiLs*wVDn`0Zt+9g)u-4|TuAH7; z?Lo?hv68iNVQ&|m%500BBgw8(k&+akvaU1%G`dE%S_=^LL$T97k*fAYvYBphPbqzy z;)ADC$!F?vj8ukf4FYAg-CfB_mAFNTVClb(s~ZM=#A2zp-W!Tny1!d(;__{thXO+l zN>)}U7O124Xudw%*geX;>t*%I}NF; Os;_P;<&OEQ>--0C&J|w( literal 0 HcmV?d00001 From e89125c3f5b8a5ef89f023c4c30a8796d1312042 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 12:41:16 -0800 Subject: [PATCH 36/90] updates route.js for /api/therapy/chat --- src/client/app/api/therapy/chat/route.js | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/src/client/app/api/therapy/chat/route.js b/src/client/app/api/therapy/chat/route.js index f0f09bb..c0e45e9 100644 --- a/src/client/app/api/therapy/chat/route.js +++ b/src/client/app/api/therapy/chat/route.js @@ -1,17 +1,22 @@ +// src/client/app/api/therapy/chat/route.js export async function POST(req) { const { userId, message } = await req.json(); - // We call a new Python endpoint that runs the LangGraph workflow - const response = await fetch('http://localhost:5001/agent/run_session', { + const res = await fetch('http://localhost:5001/agent/run_session', { method: 'POST', headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ user_id: userId, input: message }) + body: JSON.stringify({ + user_id: userId, // Match Python's 'user_id' + message: message // Match Python's 'message' + }) }); - const data = await response.json(); - - return Response.json({ - reply: data.therapy_response, - patientFile: data.clinical_file // We return this so the UI can show the "Truth" - }); + if (!res.ok) { + const errorText = await res.text(); + console.error("Python Bridge Error:", errorText); + throw new Error("Bridge failed"); + } + + const data = await res.json(); + return Response.json({ reply: data.therapy_response }); } From 8ed41da9a420b094d136d35673a1f83b6a2f75cd Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 12:43:14 -0800 Subject: [PATCH 37/90] adds missing langchain message import to top of bridge.py --- src/server/bridge.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/server/bridge.py b/src/server/bridge.py index 6ac228d..f78807b 100644 --- a/src/server/bridge.py +++ b/src/server/bridge.py @@ -3,6 +3,7 @@ from flask_cors import CORS from cortex import CortexClient, DistanceMetric, Filter, Field from embedder import get_embedding # Make sure embedder.py exists in the same folder +from langchain_core.messages import HumanMessage, AIMessage, SystemMessage app = Flask(__name__) CORS(app) From bdfcab7d1bf68ab9f204f1aabb9727a3e4500fd8 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 12:44:28 -0800 Subject: [PATCH 38/90] updates run/session in flask bridge to remember previous session (and not just restart) --- src/server/bridge.py | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/src/server/bridge.py b/src/server/bridge.py index f78807b..0cc4458 100644 --- a/src/server/bridge.py +++ b/src/server/bridge.py @@ -97,31 +97,27 @@ def save_chat(): from graph import app_agent # Import the compiled LangGraph -# Change the endpoint name to match what Next.js is calling @app.route('/agent/run_session', methods=['POST']) def run_session(): try: data = request.json user_id = data.get('user_id') - user_message = data.get('message') # The message from the curl + user_message = data.get('message') - # 1. Reconstruct the state - # In a real app, you'd pull the 'evidence' and 'transcript' from a cache/DB - # For now, we'll initialize it to keep the agent running + # 1. Reconstruct the state with the NEW message + # In the future, we will pull previous transcript messages from a database here state = { "user_id": user_id, "transcript": [HumanMessage(content=user_message)], - "evidence": [], # This would be filled by the research_node normally - "patient_file": "", + "evidence": [], "food_for_thought": "", "exercises": [] } # 2. Run the Graph - # Note: app_agent must be imported from your graph.py result = app_agent.invoke(state) - # 3. Return the response back to Next.js + # 3. Return the response return jsonify({ "therapy_response": result['transcript'][-1].content, "status": "success" From c1ab91c7b0b0e965e33684689c940c700f3363e9 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 12:49:05 -0800 Subject: [PATCH 39/90] tries make run_session in flask bridge to remember stuff again --- src/server/bridge.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/server/bridge.py b/src/server/bridge.py index 0cc4458..12502c5 100644 --- a/src/server/bridge.py +++ b/src/server/bridge.py @@ -104,26 +104,26 @@ def run_session(): user_id = data.get('user_id') user_message = data.get('message') - # 1. Reconstruct the state with the NEW message - # In the future, we will pull previous transcript messages from a database here + # To avoid the 'contents' error, check if this is a NEW session or a CHAT + # For a CHAT, we skip research and go straight to therapist_node + state = { "user_id": user_id, "transcript": [HumanMessage(content=user_message)], - "evidence": [], - "food_for_thought": "", + "evidence": data.get('evidence', []), # Pass evidence back from frontend if possible "exercises": [] } - # 2. Run the Graph - result = app_agent.invoke(state) + # Instead of running the whole graph, just call the therapist node logic + from agents import therapist_node + result = therapist_node(state) - # 3. Return the response return jsonify({ "therapy_response": result['transcript'][-1].content, "status": "success" }) except Exception as e: - print(f"Error in run_session: {e}") + print(f"Error: {e}") return jsonify({"error": str(e)}), 500 @app.route('/journal/save_session', methods=['POST']) From 34d7feb6f95aa709280cfd8de37132e727411c1c Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 12:49:13 -0800 Subject: [PATCH 40/90] updates agent.py --- src/server/agents.py | 124 +++++++++++++++++++++++++++---------------- 1 file changed, 78 insertions(+), 46 deletions(-) diff --git a/src/server/agents.py b/src/server/agents.py index 66421ad..4ada3e1 100644 --- a/src/server/agents.py +++ b/src/server/agents.py @@ -1,15 +1,16 @@ import os import json +import requests from langchain_google_genai import ChatGoogleGenerativeAI from langchain_core.messages import SystemMessage, HumanMessage, AIMessage from state import TherapySessionState -import requests -from embedder import get_embedding # Our new backend embedder +from embedder import get_embedding # Our backend embedder utility # Initialize Gemini 1.5 Pro llm = ChatGoogleGenerativeAI( - model="gemini-1.5-pro", - google_api_key=os.getenv("GEMINI_API_KEY") + model="gemini-1.5-pro", + google_api_key=os.getenv("GEMINI_API_KEY"), + temperature=0.7 # Slight randomness for better therapist tone ) def research_node(state: TherapySessionState): @@ -18,10 +19,11 @@ def research_node(state: TherapySessionState): Step 3: Query Actian for 'Source Truth' based on clinical themes. Step 4: Generate the 'Food for Thought' opening message. """ - user_id = state['user_id'] + user_id = state.get('user_id') + if not user_id: + return {"transcript": [AIMessage(content="Hello! I'm here to help, but I couldn't find your user profile. How are you feeling today?")]} - # 1. Step 3: Define clinical query topics (These are your 'source truth' seeds) - # The agent uses these to search the database for relevant user history + # 1. Step 3: Define clinical query topics query_topics = [ "recent panic attacks or physical anxiety symptoms", "social anxiety and workplace stress", @@ -30,78 +32,108 @@ def research_node(state: TherapySessionState): all_evidence = [] - # 2. Iterate through topics to gather evidence + # 2. Iterate through topics to gather evidence from Actian for topic in query_topics: - # Convert topic to vector locally on the backend - vector = get_embedding(topic) - - # Call your existing Bridge search logic (or call Actian client directly) - # We assume your bridge.py is running on 5001 try: + # We call the local bridge search endpoint res = requests.post("http://localhost:5001/search", json={ - "query": topic, # Bridge now handles vectorization via get_embedding + "query": topic, "user_id": user_id - }) + }, timeout=5) if res.status_code == 200: - logs = res.json() # List of {"text": "..."} + logs = res.json() # Expected format: [{"text": "..."}] all_evidence.extend([log['text'] for log in logs]) except Exception as e: - print(f"Research Node Error: Could not reach Actian bridge: {e}") + print(f"Research Node Error (Search): {e}") # 3. Deduplicate and clean evidence unique_evidence = list(set(all_evidence)) + + # Format evidence for the prompt; provide a fallback if empty + evidence_context = "\n".join([f"- {text}" for text in unique_evidence]) if unique_evidence else "The user has no recent journal logs." - # 4. Step 4: Reason with the Evidence to create 'Food for Thought' - # We use Gemini to synthesize the raw logs into a gentle opening - from langchain_google_genai import ChatGoogleGenerativeAI - llm = ChatGoogleGenerativeAI(model="gemini-1.5-pro") - + # 4. Step 4: Craft the opening message research_summary_prompt = f""" - You are preparing for a therapy session. - Based on these recent journal logs from the patient: - {unique_evidence if unique_evidence else "No recent logs found."} - - Your task: - 1. Identify one recurring theme or a significant recent event. - 2. Write a gentle, reflective 'Food for Thought' sentence to start the session. - 3. Keep it brief and empathetic. Do not diagnose; just reflect. + You are a compassionate AI therapist preparing for a session. + + PATIENT RECENT LOGS: + {evidence_context} - Example: 'I noticed you've been feeling some tension in social settings lately; perhaps we could explore what that feels like for you today?' + YOUR TASK: + 1. If there are logs: Identify a recurring theme and write a gentle, reflective 'Food for Thought' sentence to start the session. + 2. If there are NO logs: Write a warm, open-ended invitation for the user to share what's on their mind today. + 3. Keep it brief (max 2 sentences) and empathetic. """ - response = llm.invoke([SystemMessage(content=research_summary_prompt)]) - food_for_thought = response.content + try: + # We ensure the message list is never empty to avoid 'contents are required' + response = llm.invoke([SystemMessage(content=research_summary_prompt)]) + food_for_thought = response.content if response.content.strip() else "I'm glad you're here today. How have things been feeling for you lately?" + except Exception as e: + print(f"Research Node Error (LLM): {e}") + food_for_thought = "I'm glad we can connect today. What's been on your mind?" # 5. Return the updated state return { "evidence": unique_evidence, "food_for_thought": food_for_thought, - # We initialize the transcript with the AI's opening thought "transcript": [AIMessage(content=food_for_thought)] } def therapist_node(state: TherapySessionState): """Steps 5-7: Active Conversation.""" + evidence_str = "\n".join(state.get('evidence', [])) + system_prompt = f""" - You are a professional therapist. - PATIENT HISTORY: {state['evidence']} - Use this history to personalize your advice. If the user mentions something related to their logs, acknowledge it. + You are a professional AI therapist specialized in anxiety support. + + PATIENT HISTORY (Source Truth): + {evidence_str if evidence_str else "No historical logs available."} + + INSTRUCTIONS: + - Reference their history naturally (e.g., 'Earlier you mentioned...' or 'I recall your log about...'). + - If you need more detail to help, ask clarifying questions (Step 7). + - Stay empathetic, grounded, and focused on anxiety management. """ + + # Maintain the transcript history for the LLM messages = [SystemMessage(content=system_prompt)] + state['transcript'] - response = llm.invoke(messages) - return {"transcript": state['transcript'] + [response]} + + try: + response = llm.invoke(messages) + return {"transcript": state['transcript'] + [response]} + except Exception as e: + print(f"Therapist Node Error: {e}") + return {"transcript": state['transcript'] + [AIMessage(content="I'm sorry, I'm having a little trouble processing that. Could you say it another way?")]} def wrap_up_node(state: TherapySessionState): """Steps 8-9: Closing and Exercise Generation.""" # Step 9: Generate 3 Exercises exercise_prompt = """ - Based on today's chat, generate 3 anxiety exercises (1 Breathing, 1 Todo, 1 Audio Script). - Respond ONLY with a JSON list: [{"title": "...", "type": "...", "content": "..."}] + Based on the therapy session transcript provided, generate exactly 3 interactive exercises for the user. + 1. A breathing exercise. + 2. A simple TODO or action item. + 3. A short mindfulness or grounding script. + + Respond ONLY with a valid JSON list of objects: + [ + {"title": "...", "type": "breathing", "content": "..."}, + {"title": "...", "type": "todo", "content": "..."}, + {"title": "...", "type": "script", "content": "..."} + ] """ - history = "\n".join([m.content for m in state['transcript']]) - response = llm.invoke([SystemMessage(content=exercise_prompt), HumanMessage(content=history)]) - # Clean the string for JSON parsing - clean_json = response.content.replace('```json', '').replace('```', '').strip() - return {"exercises": json.loads(clean_json)} + history = "\n".join([f"{'User' if isinstance(m, HumanMessage) else 'Therapist'}: {m.content}" for m in state['transcript']]) + + try: + response = llm.invoke([SystemMessage(content=exercise_prompt), HumanMessage(content=history)]) + + # Clean the string for JSON parsing (Gemini sometimes adds markdown blocks) + clean_json = response.content.replace('```json', '').replace('```', '').strip() + exercises = json.loads(clean_json) + except Exception as e: + print(f"Wrap Up Error: {e}") + exercises = [] # Fallback to empty list + + return {"exercises": exercises} From 0d2bfb51ead2cf5ba36d6d12f6c18851301f9d2a Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 12:56:54 -0800 Subject: [PATCH 41/90] adds /agent/start to flask bridge endpoints --- src/server/bridge.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/src/server/bridge.py b/src/server/bridge.py index 12502c5..196e1a7 100644 --- a/src/server/bridge.py +++ b/src/server/bridge.py @@ -134,5 +134,32 @@ def save_session(): # Then client.upsert(...) return jsonify({"status": "saved"}) +@app.route('/agent/start', methods=['POST']) +def start_agent(): + try: + data = request.json + user_id = data.get('userId') # Next.js sends 'userId' + + # Initialize fresh state for Steps 2-4 + state = { + "user_id": user_id, + "session_id": str(int(time.time())), + "transcript": [], + "evidence": [], + "food_for_thought": "", + "exercises": [] + } + + # This triggers the research_node in your graph + from agents import research_node + result = research_node(state) + + return jsonify({ + "food_for_thought": result['food_for_thought'], + "evidence": result['evidence'] # Step 3 evidence + }) + except Exception as e: + return jsonify({"error": str(e)}), 500 + if __name__ == '__main__': app.run(port=5001) From 7eb02248a2e30cba32c30309531a0008c9f7a58a Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 12:57:48 -0800 Subject: [PATCH 42/90] uses correct function to get embedding in agent_search flask endpoint --- src/server/bridge.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/server/bridge.py b/src/server/bridge.py index 196e1a7..654aa39 100644 --- a/src/server/bridge.py +++ b/src/server/bridge.py @@ -69,7 +69,7 @@ def agent_search(): # For the agent to work, we handle the embedding conversion here # Use your existing pipeline or a utility function - vector = generate_embedding(query_text) + vector = get_embedding(query_text) # results = client.search( COLLECTION, From 8578ab29eca9ab7597bb0b0e5a71e25d735a504a Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 12:58:44 -0800 Subject: [PATCH 43/90] attempts to fix run_session flask endpoint to not use empty evidences at start --- src/server/bridge.py | 52 ++++++++++++++++++++------------------------ 1 file changed, 23 insertions(+), 29 deletions(-) diff --git a/src/server/bridge.py b/src/server/bridge.py index 654aa39..51f4248 100644 --- a/src/server/bridge.py +++ b/src/server/bridge.py @@ -97,35 +97,6 @@ def save_chat(): from graph import app_agent # Import the compiled LangGraph -@app.route('/agent/run_session', methods=['POST']) -def run_session(): - try: - data = request.json - user_id = data.get('user_id') - user_message = data.get('message') - - # To avoid the 'contents' error, check if this is a NEW session or a CHAT - # For a CHAT, we skip research and go straight to therapist_node - - state = { - "user_id": user_id, - "transcript": [HumanMessage(content=user_message)], - "evidence": data.get('evidence', []), # Pass evidence back from frontend if possible - "exercises": [] - } - - # Instead of running the whole graph, just call the therapist node logic - from agents import therapist_node - result = therapist_node(state) - - return jsonify({ - "therapy_response": result['transcript'][-1].content, - "status": "success" - }) - except Exception as e: - print(f"Error: {e}") - return jsonify({"error": str(e)}), 500 - @app.route('/journal/save_session', methods=['POST']) def save_session(): """Step 8: Embed the final conversation and save to Actian.""" @@ -161,5 +132,28 @@ def start_agent(): except Exception as e: return jsonify({"error": str(e)}), 500 +@app.route('/agent/run_session', methods=['POST']) +def run_session(): + try: + data = request.json + # The frontend needs to send the full transcript and evidence back + state = { + "user_id": data.get('user_id'), + "transcript": [HumanMessage(content=m['content']) if m['role'] == 'user' else AIMessage(content=m['content']) for m in data.get('transcript', [])], + "evidence": data.get('evidence', []), + "exercises": [] + } + + # Add the newest message + state['transcript'].append(HumanMessage(content=data.get('message'))) + + from agents import therapist_node + result = therapist_node(state) + + return jsonify({ + "therapy_response": result['transcript'][-1].content, + "full_transcript": [{"role": "user" if isinstance(m, HumanMessage) else "assistant", "content": m.content} for m in result['transcript']] + }) + if __name__ == '__main__': app.run(port=5001) From 26eb8ffe6bb8aeb24590e9a6ef9326ef535bc564 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 12:59:15 -0800 Subject: [PATCH 44/90] adds journal/save_session endpoint to flask bridge --- src/server/bridge.py | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/src/server/bridge.py b/src/server/bridge.py index 51f4248..cb57933 100644 --- a/src/server/bridge.py +++ b/src/server/bridge.py @@ -155,5 +155,29 @@ def run_session(): "full_transcript": [{"role": "user" if isinstance(m, HumanMessage) else "assistant", "content": m.content} for m in result['transcript']] }) +@app.route('/journal/save_session', methods=['POST']) +def save_session(): + try: + data = request.json + full_text = data.get('text') + user_id = data.get('user_id') + + vector = get_embedding(full_text) + + client.upsert( + COLLECTION, + id=int(time.time()), + vector=vector, + payload={ + "text": f"Session Summary: {full_text}", + "user_id": user_id, + "type": "session_memory" + } + ) + client.flush(COLLECTION) + return jsonify({"status": "saved"}) + except Exception as e: + return jsonify({"error": str(e)}), 500 + if __name__ == '__main__': app.run(port=5001) From d04e70571caf4f7afb4a342817fa9bf926630b96 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 13:04:19 -0800 Subject: [PATCH 45/90] chore removes a swp file --- src/server/.bridge.py.swp | Bin 20480 -> 0 bytes 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 src/server/.bridge.py.swp diff --git a/src/server/.bridge.py.swp b/src/server/.bridge.py.swp deleted file mode 100644 index b209029918ddb58a31d1e849fe20f40c065b405c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 20480 zcmeHPU5Fh?6>c}W{;V6__z*TPmVLq7S8wiQl8qz-7mSmcB$({PnaTPv3{6jW-MN+N z+uiA|o|$VHWkJ+M)JJg<$)X6mg5bjn`rwl9flVH&5UE)Y8@2%kzg8{~{Tjx)jGe7)%v;IE9~^xU!auXr1@6@u4;C4aUFs zSOzQu?*IcquJ{1`}qQ^1qJG2oxa9p@jwZ-HL`F97#| zZQwQ#0CT``;CGKZ&I8~@padka3d{jr;0fSgA9S4iz>C1MKmiEgDsUWl^D)QyKClJ^ zz(wGbfD1edy!HXq1?~aQ0DpPE2;CbLV;0|yR_&9I^_&YwN zybAmh_%@IN9pIzDlfaw!kn#rbQ{ZJ_49o-PfKLF&f!Fb|<^|v$umf~}*D-!HhWCLB z4l_p~1IbyR-JM)%vH3J-NxIc#Tf7~|Oe8{zAQoTXOh_hEw!wMEnj{!07i!qRekwd1td>@Y~s7iuC;qAV3)I|)X(lCn`C z!y(%m29iC^x2Xk86d*2+X)GMoxAi>8hD?mIG?z3Kz8_3&j)}q8yXzKZ7@|M!3|r1izN4wA3E7!Y!*IBAoN*L&4F!Htjz&Q~b}SuZ6KdJoL&8jwn6V z!WR2H{506qgJ?)#=|GKXNP`y?@b`poF?YmADHu}q4stK@Dl+9$y?s7)JH>DX@zkWm zfj*^-?;Bbo9`vo`Jwwn+n;31cvH?;-jtl@tFUnXJtNX@XrkTIduM^5Z#FFRzd3EwS zy-`_6Z(i^?^4w}TJ3W5KGe4SyB9`)TUys7!G@GU%`V8R(5Aty6X}{IAaTB?T=SXfo zAFc5yg2#EvzE54#e%oTV3Qp-MXM9_GCCfy{5kj1y=K(9Fh=m-pL79XSp=<_~G9(3$ zjweLpR+?{6zGR5lC^jKmJ(nSELB&R?fDL)hbwf@6n|Y=tNIj=+?yf@WvRn1Qofk-= z0(mh&N-dBP@C4O3dH~W&LhUYCHXIiM>49ZIB0^S-N104Vg?2Zc7Z-Uz(VnccODeH% z31sX7nFmC;x-rPUxbL!9Z19IVs$vESP>syN`A^?G&`n!xqWEKFhG5{uVOa_DS4qz6 zV!nySuJKQX!+1xC8}NcKg{n}Ep+_mS-w0k7A+i%1p8mNDPibPs08^l>Yz%{5rR6uB=V>Ef>5U71sPUGYW@+!u#D&(j=z0m~lm>~y;9{FyVVINuI=CfS@iksl## z@R`Fbt=x4@M%)+CJNrYr&fyJvD%$sjSl#h1Qw~uDD>GrQoLc|g#Vkds;XL9($e~~5 zUYb4XM>Z@bF|nMlPH*}NI8#!TNS0TfX7dY zAC#EM(q!I0#N{92>=WDj{Gpz5=YzQ5UFII{A=HQrO_kojy>)YGZgH8-EiNuB&ak_D z$JI+8ED&@|57RBg73Fo6{B*D}gPd@fVv_C8daajz# z+M((ZssExr?Os>VmC4$|OnJU~h)!g5zd9z_^oZ31m9OGOJBgnB>KTYiRPI|Z9q6id znJNk#x>OxlI#j*iZK>{aeSOc;!KsW?77!aWwA4Jkrp00u0oGWS)U37G*&eOoL(Z;U zrllfk@^#Dw9v0bB%>m@%myisDq@(jHJ@27$qbX@V1Pk&@@m4S{CNr&y8e;`1iehLj z2sbc+O!{+V_?X4h-Wfw*-jz+OWwKNA36}Jr8_&S(R!stFk|6tg9diLCXR#>e+p}uT z=DBf-xp9~lat0~){4}Ol*j-%muxjo&_=l2H926U#@6+_*`zWB6u6lI=cFi&yZya^f z!D{zLuz?j|i8;9{2^qUyZeYc(St@8vKS<*UYIi9i= z{~Y)k@I8R;e+96IWxz6E8L$jk1}p=X0n318z%pPNunbrRECX+u0s2Ct_7Ka&qy4-- zzEezh9DJv)zDTfspS68|6o|z4+cQUgFsGfOsQO^8b^wy`a$A13Hz)QflfDm{J zI0yV2&-gR&L*Sc$dgfcE%vQ7vSOzQumI2FvWxz6E8L$jk2Hr6S=nbaDu97?nssl)- z$~a=se~`pTk{ekwF5mM zMMI^)3P=m3RHs^#mzrJh9;zxKk5bPaCfnR4u``7Z-jb$P(y>p`@!#n;=12!bo5Kx> zsL^paWcCPcMnh~trCrj=YK>GSC~@d|b-385hwgLiLs*wVDn`0Zt+9g)u-4|TuAH7; z?Lo?hv68iNVQ&|m%500BBgw8(k&+akvaU1%G`dE%S_=^LL$T97k*fAYvYBphPbqzy z;)ADC$!F?vj8ukf4FYAg-CfB_mAFNTVClb(s~ZM=#A2zp-W!Tny1!d(;__{thXO+l zN>)}U7O124Xudw%*geX;>t*%I}NF; Os;_P;<&OEQ>--0C&J|w( From 181115ffef8221a57ace384355774f6ce8d86166 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 13:04:22 -0800 Subject: [PATCH 46/90] updates page.js --- src/client/app/therapy/page.js | 49 +++++++++++++++++++++++++++------- 1 file changed, 40 insertions(+), 9 deletions(-) diff --git a/src/client/app/therapy/page.js b/src/client/app/therapy/page.js index 5ce8018..482eb31 100644 --- a/src/client/app/therapy/page.js +++ b/src/client/app/therapy/page.js @@ -1,35 +1,66 @@ "use client"; import { useState } from 'react'; +import ExerciseList from '../components/ExerciseList'; export default function TherapyPage() { const [session, setSession] = useState(null); + const [chat, setChat] = useState([]); + const [input, setInput] = useState(""); const [loading, setLoading] = useState(false); + const [exercises, setExercises] = useState(null); const startSession = async () => { setLoading(true); - const res = await fetch('http://localhost:5001/agent/start', { + const res = await fetch('/api/therapy/start', { method: 'POST', - headers: {'Content-Type': 'application/json'}, body: JSON.stringify({ userId: 'horyzon' }) }); const data = await res.json(); setSession(data); + setChat([{ role: 'assistant', content: data.openingMessage }]); setLoading(false); }; + const sendMessage = async () => { + const newChat = [...chat, { role: 'user', content: input }]; + setChat(newChat); + setInput(""); + + const res = await fetch('/api/therapy/chat', { + method: 'POST', + body: JSON.stringify({ + userId: 'horyzon', + message: input, + transcript: chat, + evidence: session.evidenceFound + }) + }); + const data = await res.json(); + setChat([...newChat, { role: 'assistant', content: data.reply }]); + }; + return ( -
+
{!session ? ( ) : ( -
-
- "Food for thought: {session.food_for_thought}" + <> +
+ {chat.map((m, i) => ( +
+ + {m.content} + +
+ ))} +
+
+ setInput(e.target.value)} className="flex-1 border p-2 rounded" /> +
- {/* Chat Interface Goes Here */} -
+ )}
); From e408667ce615b4d2fe740255ffdc4224c40b26e9 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 13:07:11 -0800 Subject: [PATCH 47/90] updates flask bridge to have end_session endpoint --- src/server/bridge.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/src/server/bridge.py b/src/server/bridge.py index cb57933..21a1130 100644 --- a/src/server/bridge.py +++ b/src/server/bridge.py @@ -179,5 +179,27 @@ def save_session(): except Exception as e: return jsonify({"error": str(e)}), 500 +@app.route('/agent/end_session', methods=['POST']) +def end_session(): + try: + data = request.json + state = { + "user_id": data.get('user_id'), + "transcript": [HumanMessage(content=m['content']) if m['role'] == 'user' else AIMessage(content=m['content']) for m in data.get('transcript', [])], + "evidence": data.get('evidence', []), + "exercises": [] + } + + # We manually trigger the wrap_up_node logic + from agents import wrap_up_node + result = wrap_up_node(state) + + return jsonify({ + "exercises": result['exercises'], + "status": "completed" + }) + except Exception as e: + return jsonify({"error": str(e)}), 500 + if __name__ == '__main__': app.run(port=5001) From 08ecded622b45db91c60d39b074194bb9dc3dc06 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 13:09:56 -0800 Subject: [PATCH 48/90] updates page.js to use the therapy components and new api routes properly --- src/client/app/therapy/page.js | 102 +++++++++++++++++++++++++++------ 1 file changed, 84 insertions(+), 18 deletions(-) diff --git a/src/client/app/therapy/page.js b/src/client/app/therapy/page.js index 482eb31..5f08727 100644 --- a/src/client/app/therapy/page.js +++ b/src/client/app/therapy/page.js @@ -9,57 +9,123 @@ export default function TherapyPage() { const [loading, setLoading] = useState(false); const [exercises, setExercises] = useState(null); + // STEP 2 & 3: Start session and get "Food for Thought" const startSession = async () => { setLoading(true); - const res = await fetch('/api/therapy/start', { + const res = await fetch('http://localhost:5001/agent/start', { method: 'POST', + headers: { 'Content-Type': 'application/json' }, body: JSON.stringify({ userId: 'horyzon' }) }); const data = await res.json(); - setSession(data); - setChat([{ role: 'assistant', content: data.openingMessage }]); + + setSession({ + evidence: data.evidence, + user_id: 'horyzon' + }); + setChat([{ role: 'assistant', content: data.food_for_thought }]); setLoading(false); }; + // STEP 5: Main Chat Loop const sendMessage = async () => { - const newChat = [...chat, { role: 'user', content: input }]; + if (!input.trim()) return; + + const userMsg = { role: 'user', content: input }; + const newChat = [...chat, userMsg]; setChat(newChat); setInput(""); - - const res = await fetch('/api/therapy/chat', { + setLoading(true); + + const res = await fetch('http://localhost:5001/agent/run_session', { method: 'POST', - body: JSON.stringify({ - userId: 'horyzon', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + user_id: 'horyzon', message: input, transcript: chat, - evidence: session.evidenceFound + evidence: session.evidence }) }); const data = await res.json(); - setChat([...newChat, { role: 'assistant', content: data.reply }]); + setChat([...newChat, { role: 'assistant', content: data.therapy_response }]); + setLoading(false); }; + // STEP 7: Wrap up and get Exercises + const finishSession = async () => { + setLoading(true); + const res = await fetch('http://localhost:5001/agent/end_session', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + user_id: 'horyzon', + transcript: chat, + evidence: session.evidence + }) + }); + const data = await res.json(); + setExercises(data.exercises); + setLoading(false); + }; + + if (exercises) { + return ( +
+

Session Complete

+

Based on our talk, here are some exercises to help you move forward:

+ + +
+ ); + } + return (
{!session ? ( - +
+

Ready for your session?

+

I'll take a moment to review your recent journals first.

+ +
) : ( <> -
+
{chat.map((m, i) => (
- +
{m.content} - +
))} + {loading &&
Assistant is thinking...
}
+
- setInput(e.target.value)} className="flex-1 border p-2 rounded" /> - + setInput(e.target.value)} + onKeyDown={(e) => e.key === 'Enter' && sendMessage()} + className="flex-1 border p-3 rounded-lg outline-none focus:ring-2 focus:ring-blue-500" + placeholder="Type your message..." + /> +
+ + )}
From e476e721bcc3b3780d3fa205c90f45f012d18d27 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 13:13:06 -0800 Subject: [PATCH 49/90] updates run_session and start agent endpoint in flask bridge --- src/server/bridge.py | 107 +++++++++++++++++++++++-------------------- 1 file changed, 57 insertions(+), 50 deletions(-) diff --git a/src/server/bridge.py b/src/server/bridge.py index 21a1130..efaa413 100644 --- a/src/server/bridge.py +++ b/src/server/bridge.py @@ -105,56 +105,6 @@ def save_session(): # Then client.upsert(...) return jsonify({"status": "saved"}) -@app.route('/agent/start', methods=['POST']) -def start_agent(): - try: - data = request.json - user_id = data.get('userId') # Next.js sends 'userId' - - # Initialize fresh state for Steps 2-4 - state = { - "user_id": user_id, - "session_id": str(int(time.time())), - "transcript": [], - "evidence": [], - "food_for_thought": "", - "exercises": [] - } - - # This triggers the research_node in your graph - from agents import research_node - result = research_node(state) - - return jsonify({ - "food_for_thought": result['food_for_thought'], - "evidence": result['evidence'] # Step 3 evidence - }) - except Exception as e: - return jsonify({"error": str(e)}), 500 - -@app.route('/agent/run_session', methods=['POST']) -def run_session(): - try: - data = request.json - # The frontend needs to send the full transcript and evidence back - state = { - "user_id": data.get('user_id'), - "transcript": [HumanMessage(content=m['content']) if m['role'] == 'user' else AIMessage(content=m['content']) for m in data.get('transcript', [])], - "evidence": data.get('evidence', []), - "exercises": [] - } - - # Add the newest message - state['transcript'].append(HumanMessage(content=data.get('message'))) - - from agents import therapist_node - result = therapist_node(state) - - return jsonify({ - "therapy_response": result['transcript'][-1].content, - "full_transcript": [{"role": "user" if isinstance(m, HumanMessage) else "assistant", "content": m.content} for m in result['transcript']] - }) - @app.route('/journal/save_session', methods=['POST']) def save_session(): try: @@ -201,5 +151,62 @@ def end_session(): except Exception as e: return jsonify({"error": str(e)}), 500 +@app.route('/agent/run_session', methods=['POST']) +def run_session(): + try: + data = request.json + # Convert the JSON transcript back into LangChain Message objects + raw_transcript = data.get('transcript', []) + formatted_transcript = [] + for m in raw_transcript: + if m['role'] == 'user': + formatted_transcript.append(HumanMessage(content=m['content'])) + else: + formatted_transcript.append(AIMessage(content=m['content'])) + + state = { + "user_id": data.get('user_id'), + "transcript": formatted_transcript, + "evidence": data.get('evidence', []), # Crucial for context + "exercises": [] + } + + # Add the newest user message to the state + state['transcript'].append(HumanMessage(content=data.get('message'))) + + from agents import therapist_node + result = therapist_node(state) + + # Return the content of the LAST message (the AI response) + return jsonify({ + "therapy_response": result['transcript'][-1].content, + "full_transcript": [{"role": "user" if isinstance(m, HumanMessage) else "assistant", "content": m.content} for m in result['transcript']] + }) + except Exception as e: + return jsonify({"error": str(e)}), 500 + +@app.route('/agent/start', methods=['POST']) +def start_agent(): + try: + data = request.json + user_id = data.get('user_id') # Ensure this matches Next.js key + + state = { + "user_id": user_id, + "transcript": [], + "evidence": [], + "food_for_thought": "" + } + + from agents import research_node + result = research_node(state) + + return jsonify({ + "food_for_thought": result['food_for_thought'], + "evidence": result['evidence'] + }) + except Exception as e: + return jsonify({"error": str(e)}), 500 + if __name__ == '__main__': app.run(port=5001) From 8fa28b6756f1f2b23eee52ddff65889820c15678 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 13:13:30 -0800 Subject: [PATCH 50/90] removes the entire page.js --- src/client/app/therapy/page.js | 133 --------------------------------- 1 file changed, 133 deletions(-) diff --git a/src/client/app/therapy/page.js b/src/client/app/therapy/page.js index 5f08727..e69de29 100644 --- a/src/client/app/therapy/page.js +++ b/src/client/app/therapy/page.js @@ -1,133 +0,0 @@ -"use client"; -import { useState } from 'react'; -import ExerciseList from '../components/ExerciseList'; - -export default function TherapyPage() { - const [session, setSession] = useState(null); - const [chat, setChat] = useState([]); - const [input, setInput] = useState(""); - const [loading, setLoading] = useState(false); - const [exercises, setExercises] = useState(null); - - // STEP 2 & 3: Start session and get "Food for Thought" - const startSession = async () => { - setLoading(true); - const res = await fetch('http://localhost:5001/agent/start', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ userId: 'horyzon' }) - }); - const data = await res.json(); - - setSession({ - evidence: data.evidence, - user_id: 'horyzon' - }); - setChat([{ role: 'assistant', content: data.food_for_thought }]); - setLoading(false); - }; - - // STEP 5: Main Chat Loop - const sendMessage = async () => { - if (!input.trim()) return; - - const userMsg = { role: 'user', content: input }; - const newChat = [...chat, userMsg]; - setChat(newChat); - setInput(""); - setLoading(true); - - const res = await fetch('http://localhost:5001/agent/run_session', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - user_id: 'horyzon', - message: input, - transcript: chat, - evidence: session.evidence - }) - }); - const data = await res.json(); - setChat([...newChat, { role: 'assistant', content: data.therapy_response }]); - setLoading(false); - }; - - // STEP 7: Wrap up and get Exercises - const finishSession = async () => { - setLoading(true); - const res = await fetch('http://localhost:5001/agent/end_session', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - user_id: 'horyzon', - transcript: chat, - evidence: session.evidence - }) - }); - const data = await res.json(); - setExercises(data.exercises); - setLoading(false); - }; - - if (exercises) { - return ( -
-

Session Complete

-

Based on our talk, here are some exercises to help you move forward:

- - -
- ); - } - - return ( -
- {!session ? ( -
-

Ready for your session?

-

I'll take a moment to review your recent journals first.

- -
- ) : ( - <> -
- {chat.map((m, i) => ( -
-
- {m.content} -
-
- ))} - {loading &&
Assistant is thinking...
} -
- -
- setInput(e.target.value)} - onKeyDown={(e) => e.key === 'Enter' && sendMessage()} - className="flex-1 border p-3 rounded-lg outline-none focus:ring-2 focus:ring-blue-500" - placeholder="Type your message..." - /> - -
- - - - )} -
- ); -} From 29fb701c2c1baa1db9d3d62fdbc73a747ee44239 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 13:14:51 -0800 Subject: [PATCH 51/90] updates therapy/chat route.js --- src/client/app/api/therapy/chat/route.js | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/src/client/app/api/therapy/chat/route.js b/src/client/app/api/therapy/chat/route.js index c0e45e9..9d77da9 100644 --- a/src/client/app/api/therapy/chat/route.js +++ b/src/client/app/api/therapy/chat/route.js @@ -1,22 +1,22 @@ -// src/client/app/api/therapy/chat/route.js export async function POST(req) { - const { userId, message } = await req.json(); + const { userId, message, transcript, evidence } = await req.json(); const res = await fetch('http://localhost:5001/agent/run_session', { method: 'POST', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify({ - user_id: userId, // Match Python's 'user_id' - message: message // Match Python's 'message' + user_id: userId, + message: message, + transcript: transcript, // Pass history back to Python + evidence: evidence // Pass evidence back to Python }) }); - if (!res.ok) { - const errorText = await res.text(); - console.error("Python Bridge Error:", errorText); - throw new Error("Bridge failed"); - } + if (!res.ok) throw new Error("Bridge failed"); const data = await res.json(); - return Response.json({ reply: data.therapy_response }); + return Response.json({ + reply: data.therapy_response, + fullTranscript: data.full_transcript + }); } From f3c34b51a9a9a94961a3838269e0d3193a1739bc Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 13:16:09 -0800 Subject: [PATCH 52/90] updates page.js --- src/client/app/therapy/page.js | 201 +++++++++++++++++++++++++++++++++ 1 file changed, 201 insertions(+) diff --git a/src/client/app/therapy/page.js b/src/client/app/therapy/page.js index e69de29..aeefae5 100644 --- a/src/client/app/therapy/page.js +++ b/src/client/app/therapy/page.js @@ -0,0 +1,201 @@ +"use client"; +import { useState, useEffect, useRef } from 'react'; +import ExerciseList from '../components/ExerciseList'; + +export default function TherapyPage() { + const [session, setSession] = useState(null); + const [chat, setChat] = useState([]); + const [input, setInput] = useState(""); + const [loading, setLoading] = useState(false); + const [exercises, setExercises] = useState(null); + + // Ref for auto-scrolling the chat window + const chatEndRef = useRef(null); + + const scrollToBottom = () => { + chatEndRef.current?.scrollIntoView({ behavior: "smooth" }); + }; + + useEffect(() => { + scrollToBottom(); + }, [chat, loading]); + + // STEP 2 & 3: Start session and get "Food for Thought" from Gemini via Research Node + const startSession = async () => { + setLoading(true); + try { + const res = await fetch('/api/therapy/start', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ userId: 'horyzon' }) + }); + const data = await res.json(); + + setSession({ + evidence: data.evidenceFound, // The clinical themes found in Actian + user_id: 'horyzon' + }); + + // The opening message generated in agents.py + setChat([{ role: 'assistant', content: data.openingMessage }]); + } catch (err) { + console.error("Failed to start session:", err); + } finally { + setLoading(false); + } + }; + + // STEP 5: Main Chat Loop with History Management + const sendMessage = async () => { + if (!input.trim() || loading) return; + + const userMsg = { role: 'user', content: input }; + const currentChat = [...chat, userMsg]; + + // UI Update: Show user message immediately + setChat(currentChat); + setInput(""); + setLoading(true); + + try { + const res = await fetch('/api/therapy/chat', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + userId: 'horyzon', + message: input, + transcript: chat, // Send history so Gemini has memory + evidence: session.evidence // Send Actian evidence for grounding + }) + }); + + const data = await res.json(); + + if (data.reply) { + setChat([...currentChat, { role: 'assistant', content: data.reply }]); + } + } catch (err) { + console.error("Chat Error:", err); + setChat([...currentChat, { role: 'assistant', content: "I'm having trouble connecting. Is the Python bridge running?" }]); + } finally { + setLoading(false); + } + }; + + // STEP 7: Wrap up and trigger JSON Exercise generation in wrap_up_node + const finishSession = async () => { + setLoading(true); + try { + const res = await fetch('http://localhost:5001/agent/end_session', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + user_id: 'horyzon', + transcript: chat, + evidence: session.evidence + }) + }); + const data = await res.json(); + setExercises(data.exercises); + } catch (err) { + console.error("Wrap up error:", err); + } finally { + setLoading(false); + } + }; + + // Render Exercise Results View + if (exercises) { + return ( +
+
+

Session Complete

+

Based on our conversation, here are three tailored tools for you:

+
+ +
+ +
+
+ ); + } + + return ( +
+ {!session ? ( +
+
+ 🌿 +
+

Start your session

+

+ I'll review your recent journal logs to help guide our conversation today. +

+ +
+ ) : ( +
+ {/* Chat Window */} +
+ {chat.map((m, i) => ( +
+
+

{m.content}

+
+
+ ))} + {loading && ( +
+
+ Thinking... +
+
+ )} +
+
+ + {/* Input Area */} +
+ setInput(e.target.value)} + onKeyDown={(e) => e.key === 'Enter' && sendMessage()} + className="flex-1 p-3 outline-none text-gray-700" + placeholder="What's on your mind?" + disabled={loading} + /> + +
+ + +
+ )} +
+ ); +} From 67eb50fbb950e10b3bc8bdd938a5a9863fa99dc9 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 13:18:12 -0800 Subject: [PATCH 53/90] removes duplicate route definition in bridge.py --- src/server/bridge.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/src/server/bridge.py b/src/server/bridge.py index efaa413..9f490b9 100644 --- a/src/server/bridge.py +++ b/src/server/bridge.py @@ -97,14 +97,6 @@ def save_chat(): from graph import app_agent # Import the compiled LangGraph -@app.route('/journal/save_session', methods=['POST']) -def save_session(): - """Step 8: Embed the final conversation and save to Actian.""" - data = request.json - # Here you would use sentence-transformers to embed data['text'] - # Then client.upsert(...) - return jsonify({"status": "saved"}) - @app.route('/journal/save_session', methods=['POST']) def save_session(): try: From d08302a8353a75ee527c61e7ec896adf3c22ad96 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 13:20:59 -0800 Subject: [PATCH 54/90] writes db.py --- src/server/db.py | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 src/server/db.py diff --git a/src/server/db.py b/src/server/db.py new file mode 100644 index 0000000..d825473 --- /dev/null +++ b/src/server/db.py @@ -0,0 +1,7 @@ +import os +from cortex import CortexClient + +# Initialize Actian Cortex Client +client = CortexClient("localhost:50051") +client.connect() +COLLECTION = "user_journals" From 13414106633709f64208583d886243757bd7fe68 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 13:23:24 -0800 Subject: [PATCH 55/90] updates agents.py to take into account db.py --- src/server/agents.py | 68 +++++++++++--------------------------------- 1 file changed, 17 insertions(+), 51 deletions(-) diff --git a/src/server/agents.py b/src/server/agents.py index 4ada3e1..1f3c88d 100644 --- a/src/server/agents.py +++ b/src/server/agents.py @@ -1,29 +1,20 @@ import os import json -import requests from langchain_google_genai import ChatGoogleGenerativeAI from langchain_core.messages import SystemMessage, HumanMessage, AIMessage from state import TherapySessionState -from embedder import get_embedding # Our backend embedder utility +from db import client, COLLECTION # Import from our new db.py +from cortex import Filter, Field -# Initialize Gemini 1.5 Pro +# Initialize Gemini llm = ChatGoogleGenerativeAI( model="gemini-1.5-pro", google_api_key=os.getenv("GEMINI_API_KEY"), - temperature=0.7 # Slight randomness for better therapist tone + temperature=0.7 ) def research_node(state: TherapySessionState): - """ - Step 2: Start session. - Step 3: Query Actian for 'Source Truth' based on clinical themes. - Step 4: Generate the 'Food for Thought' opening message. - """ user_id = state.get('user_id') - if not user_id: - return {"transcript": [AIMessage(content="Hello! I'm here to help, but I couldn't find your user profile. How are you feeling today?")]} - - # 1. Step 3: Define clinical query topics query_topics = [ "recent panic attacks or physical anxiety symptoms", "social anxiety and workplace stress", @@ -32,53 +23,28 @@ def research_node(state: TherapySessionState): all_evidence = [] - # 2. Iterate through topics to gather evidence from Actian + # 1. Search Actian Directly (No requests.post!) + from embedder import get_embedding for topic in query_topics: try: - # We call the local bridge search endpoint - res = requests.post("http://localhost:5001/search", json={ - "query": topic, - "user_id": user_id - }, timeout=5) - - if res.status_code == 200: - logs = res.json() # Expected format: [{"text": "..."}] - all_evidence.extend([log['text'] for log in logs]) + vector = get_embedding(topic) + user_filter = Filter().must(Field("user_id").eq(user_id)) + results = client.search(COLLECTION, query=vector, filter=user_filter, top_k=3, with_payload=True) + all_evidence.extend([r.payload['text'] for r in results]) except Exception as e: - print(f"Research Node Error (Search): {e}") + print(f"DB Search Error: {e}") - # 3. Deduplicate and clean evidence unique_evidence = list(set(all_evidence)) - - # Format evidence for the prompt; provide a fallback if empty - evidence_context = "\n".join([f"- {text}" for text in unique_evidence]) if unique_evidence else "The user has no recent journal logs." + evidence_context = "\n".join([f"- {text}" for text in unique_evidence]) if unique_evidence else "No logs found." - # 4. Step 4: Craft the opening message - research_summary_prompt = f""" - You are a compassionate AI therapist preparing for a session. - - PATIENT RECENT LOGS: - {evidence_context} - - YOUR TASK: - 1. If there are logs: Identify a recurring theme and write a gentle, reflective 'Food for Thought' sentence to start the session. - 2. If there are NO logs: Write a warm, open-ended invitation for the user to share what's on their mind today. - 3. Keep it brief (max 2 sentences) and empathetic. - """ - - try: - # We ensure the message list is never empty to avoid 'contents are required' - response = llm.invoke([SystemMessage(content=research_summary_prompt)]) - food_for_thought = response.content if response.content.strip() else "I'm glad you're here today. How have things been feeling for you lately?" - except Exception as e: - print(f"Research Node Error (LLM): {e}") - food_for_thought = "I'm glad we can connect today. What's been on your mind?" + # 2. Prompt Gemini for the opening hook + prompt = f"PATIENT LOGS:\n{evidence_context}\n\nTask: Write a 1-2 sentence empathetic opening based on these logs." + response = llm.invoke([SystemMessage(content=prompt)]) - # 5. Return the updated state return { "evidence": unique_evidence, - "food_for_thought": food_for_thought, - "transcript": [AIMessage(content=food_for_thought)] + "food_for_thought": response.content, + "transcript": [AIMessage(content=response.content)] } def therapist_node(state: TherapySessionState): From 29ead7f6af8b848a40fb0b9e4465c6b65db716f9 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 13:28:42 -0800 Subject: [PATCH 56/90] updates bridge.py --- src/server/bridge.py | 108 ++++++++++++------------------------------- 1 file changed, 30 insertions(+), 78 deletions(-) diff --git a/src/server/bridge.py b/src/server/bridge.py index 9f490b9..a89702c 100644 --- a/src/server/bridge.py +++ b/src/server/bridge.py @@ -1,9 +1,10 @@ -import time from flask import Flask, request, jsonify from flask_cors import CORS -from cortex import CortexClient, DistanceMetric, Filter, Field -from embedder import get_embedding # Make sure embedder.py exists in the same folder -from langchain_core.messages import HumanMessage, AIMessage, SystemMessage +import time +from db import client, COLLECTION # Use the shared client +from embedder import get_embedding +from langchain_core.messages import HumanMessage, AIMessage +from agents import research_node, therapist_node, wrap_up_node app = Flask(__name__) CORS(app) @@ -121,84 +122,35 @@ def save_session(): except Exception as e: return jsonify({"error": str(e)}), 500 -@app.route('/agent/end_session', methods=['POST']) -def end_session(): - try: - data = request.json - state = { - "user_id": data.get('user_id'), - "transcript": [HumanMessage(content=m['content']) if m['role'] == 'user' else AIMessage(content=m['content']) for m in data.get('transcript', [])], - "evidence": data.get('evidence', []), - "exercises": [] - } - - # We manually trigger the wrap_up_node logic - from agents import wrap_up_node - result = wrap_up_node(state) - - return jsonify({ - "exercises": result['exercises'], - "status": "completed" - }) - except Exception as e: - return jsonify({"error": str(e)}), 500 - -@app.route('/agent/run_session', methods=['POST']) -def run_session(): - try: - data = request.json - # Convert the JSON transcript back into LangChain Message objects - raw_transcript = data.get('transcript', []) - formatted_transcript = [] - for m in raw_transcript: - if m['role'] == 'user': - formatted_transcript.append(HumanMessage(content=m['content'])) - else: - formatted_transcript.append(AIMessage(content=m['content'])) - - state = { - "user_id": data.get('user_id'), - "transcript": formatted_transcript, - "evidence": data.get('evidence', []), # Crucial for context - "exercises": [] - } - - # Add the newest user message to the state - state['transcript'].append(HumanMessage(content=data.get('message'))) - - from agents import therapist_node - result = therapist_node(state) - - # Return the content of the LAST message (the AI response) - return jsonify({ - "therapy_response": result['transcript'][-1].content, - "full_transcript": [{"role": "user" if isinstance(m, HumanMessage) else "assistant", "content": m.content} for m in result['transcript']] - }) - except Exception as e: - return jsonify({"error": str(e)}), 500 - @app.route('/agent/start', methods=['POST']) def start_agent(): - try: - data = request.json - user_id = data.get('user_id') # Ensure this matches Next.js key - - state = { - "user_id": user_id, - "transcript": [], - "evidence": [], - "food_for_thought": "" - } + data = request.json + state = {"user_id": data.get('user_id'), "transcript": [], "evidence": []} + result = research_node(state) + return jsonify({"food_for_thought": result['food_for_thought'], "evidence": result['evidence']}) - from agents import research_node - result = research_node(state) +@app.route('/agent/run_session', methods=['POST']) +def run_session(): + data = request.json + # Reconstruct history + history = [HumanMessage(content=m['content']) if m['role'] == 'user' else AIMessage(content=m['content']) for m in data.get('transcript', [])] + history.append(HumanMessage(content=data.get('message'))) + + state = {"user_id": data.get('user_id'), "transcript": history, "evidence": data.get('evidence', [])} + result = therapist_node(state) + + return jsonify({ + "therapy_response": result['transcript'][-1].content, + "full_transcript": [{"role": "user" if isinstance(m, HumanMessage) else "assistant", "content": m.content} for m in result['transcript']] + }) - return jsonify({ - "food_for_thought": result['food_for_thought'], - "evidence": result['evidence'] - }) - except Exception as e: - return jsonify({"error": str(e)}), 500 +@app.route('/agent/end_session', methods=['POST']) +def end_session(): + data = request.json + history = [HumanMessage(content=m['content']) if m['role'] == 'user' else AIMessage(content=m['content']) for m in data.get('transcript', [])] + state = {"user_id": data.get('user_id'), "transcript": history, "evidence": data.get('evidence', [])} + result = wrap_up_node(state) + return jsonify({"exercises": result['exercises']}) if __name__ == '__main__': app.run(port=5001) From 37d4e6acdbbf39f5ce605d326f4114b796ae9937 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 13:56:34 -0800 Subject: [PATCH 57/90] so many updates --- list-files.sh | 44 +++++++++ src/client/app/api/therapy/end/route.js | 14 +++ src/client/app/therapy/page.js | 8 +- src/server/agents.py | 93 ++++++------------ src/server/bridge.py | 124 ++++++++---------------- src/server/db.py | 8 ++ 6 files changed, 139 insertions(+), 152 deletions(-) create mode 100755 list-files.sh create mode 100644 src/client/app/api/therapy/end/route.js diff --git a/list-files.sh b/list-files.sh new file mode 100755 index 0000000..ffc92c8 --- /dev/null +++ b/list-files.sh @@ -0,0 +1,44 @@ +echo src/server/bridge.py +cat src/server/bridge.py + +echo src/server/agents.py +cat src/server/agents.py + +echo src/server/db.py +cat src/server/db.py + +echo src/server/graph.py +cat src/server/graph.py + +echo src/server/state.py +cat src/server/state.py + +echo src/server/requirements.txt +cat src/server/requirements.txt + +echo src/server/embedder.py +cat src/server/embedder.py + +echo src/server/docker/docker-compose.yml +cat src/server/docker/docker-compose.yml + +echo src/client/next.config.mjs +cat src/client/next.config.mjs + +echo src/client/app/api/journal/save/route.js +cat src/client/app/api/journal/save/route.js + +echo src/client/app/api/journal/summary/route.js +cat src/client/app/api/journal/summary/route.js + +echo src/client/app/api/therapy/chat/route.js +cat src/client/app/api/therapy/chat/route.js + +echo src/client/app/api/therapy/start/route.js +cat src/client/app/api/therapy/start/route.js + +echo src/client/app/components/ExerciseList.js +cat src/client/app/components/ExerciseList.js + +echo src/client/app/api/therapy/end/route.js +cat src/client/app/api/therapy/end/route.js diff --git a/src/client/app/api/therapy/end/route.js b/src/client/app/api/therapy/end/route.js new file mode 100644 index 0000000..006960c --- /dev/null +++ b/src/client/app/api/therapy/end/route.js @@ -0,0 +1,14 @@ +export async function POST(req) { + const body = await req.json(); + const res = await fetch('http://localhost:5001/agent/end_session', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + user_id: body.userId, + transcript: body.transcript, + evidence: body.evidence + }) + }); + const data = await res.json(); + return Response.json(data); +} diff --git a/src/client/app/therapy/page.js b/src/client/app/therapy/page.js index aeefae5..1feb980 100644 --- a/src/client/app/therapy/page.js +++ b/src/client/app/therapy/page.js @@ -82,15 +82,15 @@ export default function TherapyPage() { } }; - // STEP 7: Wrap up and trigger JSON Exercise generation in wrap_up_node - const finishSession = async () => { + const finishSession = async () => { setLoading(true); try { - const res = await fetch('http://localhost:5001/agent/end_session', { + // Create a proxy for this in /api/therapy/end/route.js or hit bridge via proxy + const res = await fetch('/api/therapy/end', { method: 'POST', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify({ - user_id: 'horyzon', + userId: 'horyzon', transcript: chat, evidence: session.evidence }) diff --git a/src/server/agents.py b/src/server/agents.py index 1f3c88d..c732cca 100644 --- a/src/server/agents.py +++ b/src/server/agents.py @@ -3,10 +3,10 @@ from langchain_google_genai import ChatGoogleGenerativeAI from langchain_core.messages import SystemMessage, HumanMessage, AIMessage from state import TherapySessionState -from db import client, COLLECTION # Import from our new db.py +from embedder import get_embedding +import db from cortex import Filter, Field -# Initialize Gemini llm = ChatGoogleGenerativeAI( model="gemini-1.5-pro", google_api_key=os.getenv("GEMINI_API_KEY"), @@ -15,91 +15,52 @@ def research_node(state: TherapySessionState): user_id = state.get('user_id') - query_topics = [ - "recent panic attacks or physical anxiety symptoms", - "social anxiety and workplace stress", - "sleep quality and nighttime anxiety" - ] - + query_topics = ["panic attacks and physical anxiety", "work stress and social anxiety", "sleep quality"] all_evidence = [] - - # 1. Search Actian Directly (No requests.post!) - from embedder import get_embedding + for topic in query_topics: try: vector = get_embedding(topic) user_filter = Filter().must(Field("user_id").eq(user_id)) - results = client.search(COLLECTION, query=vector, filter=user_filter, top_k=3, with_payload=True) - all_evidence.extend([r.payload['text'] for r in results]) + results = db.client.search(db.COLLECTION, query=vector, filter=user_filter, top_k=3, with_payload=True) + for r in results: + # Ensure the key exists in payload + if 'text' in r.payload: + all_evidence.append(r.payload['text']) except Exception as e: - print(f"DB Search Error: {e}") + print(f"Research Node Error: {e}") unique_evidence = list(set(all_evidence)) - evidence_context = "\n".join([f"- {text}" for text in unique_evidence]) if unique_evidence else "No logs found." + evidence_context = "\n".join([f"- {text}" for text in unique_evidence]) if unique_evidence else "No recent logs." - # 2. Prompt Gemini for the opening hook - prompt = f"PATIENT LOGS:\n{evidence_context}\n\nTask: Write a 1-2 sentence empathetic opening based on these logs." - response = llm.invoke([SystemMessage(content=prompt)]) + prompt = f"PATIENT LOGS:\n{evidence_context}\n\nTask: Write a brief, empathetic therapist opening message." + + try: + response = llm.invoke([SystemMessage(content=prompt)]) + fft = response.content + except: + fft = "I'm glad you're here today. How are things feeling?" return { "evidence": unique_evidence, - "food_for_thought": response.content, - "transcript": [AIMessage(content=response.content)] + "food_for_thought": fft, + "transcript": [AIMessage(content=fft)] } def therapist_node(state: TherapySessionState): - """Steps 5-7: Active Conversation.""" evidence_str = "\n".join(state.get('evidence', [])) - - system_prompt = f""" - You are a professional AI therapist specialized in anxiety support. - - PATIENT HISTORY (Source Truth): - {evidence_str if evidence_str else "No historical logs available."} - - INSTRUCTIONS: - - Reference their history naturally (e.g., 'Earlier you mentioned...' or 'I recall your log about...'). - - If you need more detail to help, ask clarifying questions (Step 7). - - Stay empathetic, grounded, and focused on anxiety management. - """ - - # Maintain the transcript history for the LLM + system_prompt = f"You are an AI therapist. Context: {evidence_str}" messages = [SystemMessage(content=system_prompt)] + state['transcript'] - - try: - response = llm.invoke(messages) - return {"transcript": state['transcript'] + [response]} - except Exception as e: - print(f"Therapist Node Error: {e}") - return {"transcript": state['transcript'] + [AIMessage(content="I'm sorry, I'm having a little trouble processing that. Could you say it another way?")]} + response = llm.invoke(messages) + return {"transcript": state['transcript'] + [response]} def wrap_up_node(state: TherapySessionState): - """Steps 8-9: Closing and Exercise Generation.""" - # Step 9: Generate 3 Exercises - exercise_prompt = """ - Based on the therapy session transcript provided, generate exactly 3 interactive exercises for the user. - 1. A breathing exercise. - 2. A simple TODO or action item. - 3. A short mindfulness or grounding script. - - Respond ONLY with a valid JSON list of objects: - [ - {"title": "...", "type": "breathing", "content": "..."}, - {"title": "...", "type": "todo", "content": "..."}, - {"title": "...", "type": "script", "content": "..."} - ] - """ - - history = "\n".join([f"{'User' if isinstance(m, HumanMessage) else 'Therapist'}: {m.content}" for m in state['transcript']]) - + exercise_prompt = "Generate 3 exercises (breathing, todo, script) in a JSON list." + history = "\n".join([m.content for m in state['transcript']]) try: response = llm.invoke([SystemMessage(content=exercise_prompt), HumanMessage(content=history)]) - - # Clean the string for JSON parsing (Gemini sometimes adds markdown blocks) clean_json = response.content.replace('```json', '').replace('```', '').strip() exercises = json.loads(clean_json) - except Exception as e: - print(f"Wrap Up Error: {e}") - exercises = [] # Fallback to empty list - + except: + exercises = [] return {"exercises": exercises} diff --git a/src/server/bridge.py b/src/server/bridge.py index a89702c..4d1b290 100644 --- a/src/server/bridge.py +++ b/src/server/bridge.py @@ -1,30 +1,31 @@ +import time from flask import Flask, request, jsonify from flask_cors import CORS -import time -from db import client, COLLECTION # Use the shared client + +# 1. Critical Imports +from cortex import Filter, Field from embedder import get_embedding from langchain_core.messages import HumanMessage, AIMessage -from agents import research_node, therapist_node, wrap_up_node +import db # Shared source of truth app = Flask(__name__) CORS(app) -client = CortexClient("localhost:50051") -client.connect() -COLLECTION = "user_journals" +# 2. Helper to prevent circular import crashes +def get_agent_nodes(): + from agents import research_node, therapist_node, wrap_up_node + return research_node, therapist_node, wrap_up_node + +# --- JOURNAL ROUTES --- @app.route('/upsert', methods=['POST']) def upsert(): try: data = request.json - # 1. Always generate the vector on the backend vector = get_embedding(data['text']) - - # 2. Force ID to integer (critical for Actian) entry_id = int(data.get('id', time.time())) - - client.upsert( - COLLECTION, + db.client.upsert( + db.COLLECTION, id=entry_id, vector=vector, payload={ @@ -33,97 +34,46 @@ def upsert(): "type": "journal_entry" } ) - client.flush(COLLECTION) + db.client.flush(db.COLLECTION) return jsonify({"success": True, "id": entry_id}) except Exception as e: - print(f"UPSERT ERROR: {e}") # This will show up in your bridge terminal return jsonify({"success": False, "error": str(e)}), 500 @app.route('/search', methods=['POST']) def search(): try: data = request.json - # Generate vector for the query string vector = get_embedding(data['query']) - + # Correctly using imported Filter and Field user_filter = Filter().must(Field("user_id").eq(data['user_id'])) - results = client.search(COLLECTION, query=vector, filter=user_filter, top_k=5, with_payload=True) - + results = db.client.search(db.COLLECTION, query=vector, filter=user_filter, top_k=5, with_payload=True) return jsonify([{"text": r.payload['text']} for r in results]) except Exception as e: return jsonify({"error": str(e)}), 500 -@app.route('/init', methods=['GET']) -def init(): - if not client.has_collection(COLLECTION): - client.create_collection(name=COLLECTION, dimension=384) - return jsonify({"status": "ready"}) - -# bridge.py updates -@app.route('/agent/search', methods=['POST']) -def agent_search(): - data = request.json - # The Reasoning Agent will send a "query_string" like - # "Find instances where the user mentions physical panic symptoms" - query_text = data.get("query") - user_id = data.get("user_id") - - # For the agent to work, we handle the embedding conversion here - # Use your existing pipeline or a utility function - vector = get_embedding(query_text) # - - results = client.search( - COLLECTION, - query=vector, - filter=Filter().must(Field("user_id").eq(user_id)), - top_k=8 - ) - return jsonify({"logs": [r.payload['text'] for r in results]}) - @app.route('/journal/save_chat', methods=['POST']) def save_chat(): - data = request.json - # We save the summary of the chat as a new "memory" - client.upsert( - COLLECTION, - id=int(time.time()), - vector=data['vector'], - payload={ - "text": data['full_transcript'], - "user_id": data['user_id'], - "type": "session_summary" # Distinguished from daily logs - } - ) - return jsonify({"success": True}) - -from graph import app_agent # Import the compiled LangGraph - -@app.route('/journal/save_session', methods=['POST']) -def save_session(): try: data = request.json - full_text = data.get('text') - user_id = data.get('user_id') - - vector = get_embedding(full_text) - - client.upsert( - COLLECTION, + db.client.upsert( + db.COLLECTION, id=int(time.time()), - vector=vector, + vector=data['vector'], payload={ - "text": f"Session Summary: {full_text}", - "user_id": user_id, - "type": "session_memory" + "text": data['full_transcript'], + "user_id": data['user_id'], + "type": "session_summary" } ) - client.flush(COLLECTION) - return jsonify({"status": "saved"}) + return jsonify({"success": True}) except Exception as e: return jsonify({"error": str(e)}), 500 +# --- AGENT ROUTES --- + @app.route('/agent/start', methods=['POST']) def start_agent(): + research_node, _, _ = get_agent_nodes() data = request.json state = {"user_id": data.get('user_id'), "transcript": [], "evidence": []} result = research_node(state) @@ -131,14 +81,15 @@ def start_agent(): @app.route('/agent/run_session', methods=['POST']) def run_session(): + _, therapist_node, _ = get_agent_nodes() data = request.json - # Reconstruct history - history = [HumanMessage(content=m['content']) if m['role'] == 'user' else AIMessage(content=m['content']) for m in data.get('transcript', [])] + raw_transcript = data.get('transcript', []) + history = [HumanMessage(content=m['content']) if m['role'] == 'user' else AIMessage(content=m['content']) for m in raw_transcript] history.append(HumanMessage(content=data.get('message'))) - + state = {"user_id": data.get('user_id'), "transcript": history, "evidence": data.get('evidence', [])} result = therapist_node(state) - + return jsonify({ "therapy_response": result['transcript'][-1].content, "full_transcript": [{"role": "user" if isinstance(m, HumanMessage) else "assistant", "content": m.content} for m in result['transcript']] @@ -146,11 +97,20 @@ def run_session(): @app.route('/agent/end_session', methods=['POST']) def end_session(): + _, _, wrap_up_node = get_agent_nodes() data = request.json - history = [HumanMessage(content=m['content']) if m['role'] == 'user' else AIMessage(content=m['content']) for m in data.get('transcript', [])] + raw_transcript = data.get('transcript', []) + history = [HumanMessage(content=m['content']) if m['role'] == 'user' else AIMessage(content=m['content']) for m in raw_transcript] + state = {"user_id": data.get('user_id'), "transcript": history, "evidence": data.get('evidence', [])} result = wrap_up_node(state) return jsonify({"exercises": result['exercises']}) +@app.route('/init', methods=['GET']) +def init(): + db.init_db() + return jsonify({"status": "ready"}) + if __name__ == '__main__': - app.run(port=5001) + db.init_db() # Ensure collection is created on startup + app.run(port=5001, debug=True) diff --git a/src/server/db.py b/src/server/db.py index d825473..8f2780a 100644 --- a/src/server/db.py +++ b/src/server/db.py @@ -1,7 +1,15 @@ import os +import time from cortex import CortexClient # Initialize Actian Cortex Client +# This is our single source of truth for the DB connection client = CortexClient("localhost:50051") client.connect() COLLECTION = "user_journals" + +def init_db(): + """Ensure the collection exists on startup.""" + if not client.has_collection(COLLECTION): + client.create_collection(name=COLLECTION, dimension=384) + return True From 268c2ceefd5cb12b3e4112f6727bf57d9620d9fb Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 14:13:29 -0800 Subject: [PATCH 58/90] removes the list-files.sh script --- list-files.sh | 44 -------------------------------------------- 1 file changed, 44 deletions(-) delete mode 100755 list-files.sh diff --git a/list-files.sh b/list-files.sh deleted file mode 100755 index ffc92c8..0000000 --- a/list-files.sh +++ /dev/null @@ -1,44 +0,0 @@ -echo src/server/bridge.py -cat src/server/bridge.py - -echo src/server/agents.py -cat src/server/agents.py - -echo src/server/db.py -cat src/server/db.py - -echo src/server/graph.py -cat src/server/graph.py - -echo src/server/state.py -cat src/server/state.py - -echo src/server/requirements.txt -cat src/server/requirements.txt - -echo src/server/embedder.py -cat src/server/embedder.py - -echo src/server/docker/docker-compose.yml -cat src/server/docker/docker-compose.yml - -echo src/client/next.config.mjs -cat src/client/next.config.mjs - -echo src/client/app/api/journal/save/route.js -cat src/client/app/api/journal/save/route.js - -echo src/client/app/api/journal/summary/route.js -cat src/client/app/api/journal/summary/route.js - -echo src/client/app/api/therapy/chat/route.js -cat src/client/app/api/therapy/chat/route.js - -echo src/client/app/api/therapy/start/route.js -cat src/client/app/api/therapy/start/route.js - -echo src/client/app/components/ExerciseList.js -cat src/client/app/components/ExerciseList.js - -echo src/client/app/api/therapy/end/route.js -cat src/client/app/api/therapy/end/route.js From 3de1e00601ca18f99de6f550fce3acd8aaf9e905 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 14:14:25 -0800 Subject: [PATCH 59/90] changes to agents.py to use newer gemini models --- src/server/agents.py | 41 +++++++++++++++++++++++++++++------------ 1 file changed, 29 insertions(+), 12 deletions(-) diff --git a/src/server/agents.py b/src/server/agents.py index c732cca..77fb6a2 100644 --- a/src/server/agents.py +++ b/src/server/agents.py @@ -8,7 +8,7 @@ from cortex import Filter, Field llm = ChatGoogleGenerativeAI( - model="gemini-1.5-pro", + model="gemini-2.0-flash", google_api_key=os.getenv("GEMINI_API_KEY"), temperature=0.7 ) @@ -33,12 +33,17 @@ def research_node(state: TherapySessionState): unique_evidence = list(set(all_evidence)) evidence_context = "\n".join([f"- {text}" for text in unique_evidence]) if unique_evidence else "No recent logs." - prompt = f"PATIENT LOGS:\n{evidence_context}\n\nTask: Write a brief, empathetic therapist opening message." - + # FIX: Explicit list structure for LangChain Google adapter + messages = [ + SystemMessage(content="You are a compassionate AI therapist."), + HumanMessage(content=f"Review these logs and write a 1-2 sentence empathetic opening referencing a recurring theme: {evidence_context}") + ] + try: - response = llm.invoke([SystemMessage(content=prompt)]) + response = llm.invoke(messages) fft = response.content - except: + except Exception as e: + print(f"LLM ERROR in research_node: {e}") fft = "I'm glad you're here today. How are things feeling?" return { @@ -49,18 +54,30 @@ def research_node(state: TherapySessionState): def therapist_node(state: TherapySessionState): evidence_str = "\n".join(state.get('evidence', [])) - system_prompt = f"You are an AI therapist. Context: {evidence_str}" + system_prompt = f"You are a professional AI therapist. Ground your responses in this patient history: {evidence_str}" + + # Reconstruct the full message list for Gemini messages = [SystemMessage(content=system_prompt)] + state['transcript'] - response = llm.invoke(messages) - return {"transcript": state['transcript'] + [response]} + + try: + response = llm.invoke(messages) + return {"transcript": state['transcript'] + [response]} + except Exception as e: + print(f"Therapist Node LLM Error: {e}") + return {"transcript": state['transcript'] + [AIMessage(content="I'm here for you. Tell me more about that.")]} def wrap_up_node(state: TherapySessionState): - exercise_prompt = "Generate 3 exercises (breathing, todo, script) in a JSON list." - history = "\n".join([m.content for m in state['transcript']]) + exercise_prompt = "Generate 3 exercises (breathing, todo, script) in a JSON list format." + history = "\n".join([f"{'User' if isinstance(m, HumanMessage) else 'Therapist'}: {m.content}" for m in state['transcript']]) + try: - response = llm.invoke([SystemMessage(content=exercise_prompt), HumanMessage(content=history)]) + response = llm.invoke([ + SystemMessage(content=exercise_prompt), + HumanMessage(content=f"Session history:\n{history}") + ]) clean_json = response.content.replace('```json', '').replace('```', '').strip() exercises = json.loads(clean_json) - except: + except Exception as e: + print(f"Wrap Up Error: {e}") exercises = [] return {"exercises": exercises} From ba86a9d10694994791cea28331fb69362bcd6ff3 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 14:34:45 -0800 Subject: [PATCH 60/90] updates model to 2.5-flash --- src/server/agents.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/server/agents.py b/src/server/agents.py index 77fb6a2..5e3ffa9 100644 --- a/src/server/agents.py +++ b/src/server/agents.py @@ -8,7 +8,7 @@ from cortex import Filter, Field llm = ChatGoogleGenerativeAI( - model="gemini-2.0-flash", + model="gemini-2.5-flash", google_api_key=os.getenv("GEMINI_API_KEY"), temperature=0.7 ) From 200e91e6de158f2241323decebaaee619e2ef76b Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 14:38:17 -0800 Subject: [PATCH 61/90] updates therapy session end route.js --- src/client/app/api/therapy/end/route.js | 32 +++++++++++++++---------- 1 file changed, 20 insertions(+), 12 deletions(-) diff --git a/src/client/app/api/therapy/end/route.js b/src/client/app/api/therapy/end/route.js index 006960c..7b69999 100644 --- a/src/client/app/api/therapy/end/route.js +++ b/src/client/app/api/therapy/end/route.js @@ -1,14 +1,22 @@ export async function POST(req) { - const body = await req.json(); - const res = await fetch('http://localhost:5001/agent/end_session', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - user_id: body.userId, - transcript: body.transcript, - evidence: body.evidence - }) - }); - const data = await res.json(); - return Response.json(data); + try { + const body = await req.json(); + + const res = await fetch('http://localhost:5001/agent/end_session', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + user_id: body.userId, + transcript: body.transcript, + evidence: body.evidence + }) + }); + + if (!res.ok) throw new Error("Bridge failed"); + + const data = await res.json(); + return Response.json(data); + } catch (err) { + return Response.json({ error: err.message }, { status: 500 }); + } } From 084f9562679340e39a3addf8bb7b2988e89374d8 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 15:22:46 -0800 Subject: [PATCH 62/90] writes a langchain tool for searching the vector db --- src/server/tools.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 src/server/tools.py diff --git a/src/server/tools.py b/src/server/tools.py new file mode 100644 index 0000000..d15c122 --- /dev/null +++ b/src/server/tools.py @@ -0,0 +1,23 @@ +from langchain_core.tools import tool +from db import client, COLLECTION +from embedder import get_embedding +from cortex import Filter, Field + +@tool +def search_user_history(query: str, user_id: str): + """ + Searches the user's past journal entries and session summaries for specific information. + Use this when the user mentions a specific event, person, or date (like a birthday) + that isn't in the current context. + """ + try: + vector = get_embedding(query) + user_filter = Filter().must(Field("user_id").eq(user_id)) + results = client.search(COLLECTION, query=vector, filter=user_filter, top_k=5, with_payload=True) + + if not results: + return "No relevant history found for this query." + + return "\n".join([f"- {r.payload['text']}" for r in results if 'text' in r.payload]) + except Exception as e: + return f"Error searching history: {str(e)}" From a5169f7a63aa96711fb36a2684ffc0ea5aedc64a Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 15:24:28 -0800 Subject: [PATCH 63/90] docs: adds custom-guidelines workflow --- custom-guidelines-pseudocode.txt | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 custom-guidelines-pseudocode.txt diff --git a/custom-guidelines-pseudocode.txt b/custom-guidelines-pseudocode.txt new file mode 100644 index 0000000..bbd1d45 --- /dev/null +++ b/custom-guidelines-pseudocode.txt @@ -0,0 +1,11 @@ +if session_notes: + custom_guidelines = get_cg("aklsdjfkljasdf", "fasdfasdjf") +else: + + custom_guidelines = get_cg("aklsdjfkljasdf") + +get_cg(general_guidelines, session_notes = "") -> str: + return custom_guidelines + + +systemp = f"{custom-guidelines}" From d8866d1f78509f5ef4fbe5a1831e815c59ecb3b2 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 15:28:53 -0800 Subject: [PATCH 64/90] binds langchain llm to new tool --- src/server/agents.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/server/agents.py b/src/server/agents.py index 5e3ffa9..3e8def9 100644 --- a/src/server/agents.py +++ b/src/server/agents.py @@ -1,17 +1,20 @@ import os import json from langchain_google_genai import ChatGoogleGenerativeAI -from langchain_core.messages import SystemMessage, HumanMessage, AIMessage +from langchain_core.messages import SystemMessage, HumanMessage, AIMessage, ToolMessage from state import TherapySessionState from embedder import get_embedding import db from cortex import Filter, Field +from tools import search_user_history llm = ChatGoogleGenerativeAI( model="gemini-2.5-flash", google_api_key=os.getenv("GEMINI_API_KEY"), temperature=0.7 ) +tools = [search_user_history] +llm_with_tools = llm.bind_tools(tools) def research_node(state: TherapySessionState): user_id = state.get('user_id') From 0fdf810d6bee46f79c5f4d59243ebe1bdce6efb5 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 15:29:21 -0800 Subject: [PATCH 65/90] chore: lowers temp of model --- src/server/agents.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/server/agents.py b/src/server/agents.py index 3e8def9..a66d198 100644 --- a/src/server/agents.py +++ b/src/server/agents.py @@ -11,7 +11,7 @@ llm = ChatGoogleGenerativeAI( model="gemini-2.5-flash", google_api_key=os.getenv("GEMINI_API_KEY"), - temperature=0.7 + temperature=0.4 ) tools = [search_user_history] llm_with_tools = llm.bind_tools(tools) From 507766c4f688b791758040ebdc5bea6ec67f317a Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 17:13:24 -0800 Subject: [PATCH 66/90] updates therapist_node agent to use tools --- src/server/agents.py | 42 ++++++++++++++++++++++++++++++++---------- 1 file changed, 32 insertions(+), 10 deletions(-) diff --git a/src/server/agents.py b/src/server/agents.py index a66d198..72853ac 100644 --- a/src/server/agents.py +++ b/src/server/agents.py @@ -56,18 +56,40 @@ def research_node(state: TherapySessionState): } def therapist_node(state: TherapySessionState): + """Agentic Loop: Can search history if needed.""" + user_id = state.get('user_id') evidence_str = "\n".join(state.get('evidence', [])) - system_prompt = f"You are a professional AI therapist. Ground your responses in this patient history: {evidence_str}" - - # Reconstruct the full message list for Gemini + + system_prompt = f""" + You are a professional AI therapist. + CURRENT CONTEXT: {evidence_str} + + If the user mentions something you don't recognize in the context (like a birthday, + a specific name, or a past event), use the 'search_user_history' tool to look it up. + """ + messages = [SystemMessage(content=system_prompt)] + state['transcript'] - - try: - response = llm.invoke(messages) - return {"transcript": state['transcript'] + [response]} - except Exception as e: - print(f"Therapist Node LLM Error: {e}") - return {"transcript": state['transcript'] + [AIMessage(content="I'm here for you. Tell me more about that.")]} + + # First call: LLM decides to reply OR call a tool + response = llm_with_tools.invoke(messages) + + # If the LLM wants to call a tool: + if response.tool_calls: + for tool_call in response.tool_calls: + # 1. Execute the search + query = tool_call['args']['query'] + print(f"--- AGENT TOOL CALL: Searching for '{query}' ---") + search_result = search_user_history.invoke({"query": query, "user_id": user_id}) + + # 2. Add tool result to conversation + messages.append(response) # Add the 'assistant' tool request + messages.append(ToolMessage(content=search_result, tool_call_id=tool_call['id'])) + + # 3. Final call: LLM sees the new data and gives the final therapeutic reply + final_response = llm_with_tools.invoke(messages) + return {"transcript": state['transcript'] + [final_response]} + + return {"transcript": state['transcript'] + [response]} def wrap_up_node(state: TherapySessionState): exercise_prompt = "Generate 3 exercises (breathing, todo, script) in a JSON list format." From d9fb31b204c823f562e3874f2c32bc16b77c89b4 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 17:22:09 -0800 Subject: [PATCH 67/90] writes a simple debug script for inspecting the vector db --- src/server/inspect-db.py | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100644 src/server/inspect-db.py diff --git a/src/server/inspect-db.py b/src/server/inspect-db.py new file mode 100644 index 0000000..a19d98b --- /dev/null +++ b/src/server/inspect-db.py @@ -0,0 +1,38 @@ +import db + +def inspect_collection(): + print(f"--- Inspecting Collection: {db.COLLECTION} ---") + try: + # We perform a 'dummy' search with a very broad filter to get results + # Actian doesn't have a 'get_all', so we search for 'anxiety' or similar + # and set top_k high to see the recent entries. + + # Alternatively, we can just fetch everything if the SDK allows, + # but a top_k search on an empty string usually works for inspection. + results = db.client.search( + db.COLLECTION, + query=[0.0] * 384, # Dummy vector + top_k=50, + with_payload=True + ) + + if not results: + print("The database is currently empty.") + return + + print(f"Found {len(results)} entries:\n") + for i, r in enumerate(results): + p = r.payload + p_type = p.get('type', 'unknown') + text = p.get('text', 'No text found') + user = p.get('user_id', 'unknown') + + print(f"[{i+1}] TYPE: {p_type} | USER: {user}") + print(f" TEXT: {text[:150]}...") # Print first 150 chars + print("-" * 30) + + except Exception as e: + print(f"Error inspecting DB: {e}") + +if __name__ == "__main__": + inspect_collection() From 529e50e35ddda6be4346603ae2229cc5403f3d36 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 17:22:32 -0800 Subject: [PATCH 68/90] writes a fucntion to reset the db --- src/server/db.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/server/db.py b/src/server/db.py index 8f2780a..6516a5c 100644 --- a/src/server/db.py +++ b/src/server/db.py @@ -13,3 +13,14 @@ def init_db(): if not client.has_collection(COLLECTION): client.create_collection(name=COLLECTION, dimension=384) return True + +def reset_db(): + """Drops the collection and recreates it from scratch.""" + if client.has_collection(COLLECTION): + client.drop_collection(COLLECTION) + print(f"--- Collection {COLLECTION} dropped! ---") + + # Recreate with the correct dimensions for your embedder + client.create_collection(name=COLLECTION, dimension=384) + print(f"--- Collection {COLLECTION} recreated! ---") + return True From a9adb657be7209568351f470fe1249d0bf4b9424 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 17:26:51 -0800 Subject: [PATCH 69/90] adds logic to flask /init endpoint to also reset vector db --- src/server/bridge.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/server/bridge.py b/src/server/bridge.py index 4d1b290..a915737 100644 --- a/src/server/bridge.py +++ b/src/server/bridge.py @@ -108,6 +108,12 @@ def end_session(): @app.route('/init', methods=['GET']) def init(): + should_reset = request.args.get('reset', 'false').lower() == 'true' + + if should_reset: + db.reset_db() + return jsonify({"status": "database_wiped_and_restarted"}) + db.init_db() return jsonify({"status": "ready"}) From 221b3e28cffed455f72eb5517ba1d964e8222185 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 17:34:50 -0800 Subject: [PATCH 70/90] updates reset_db in db.py --- src/server/db.py | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/src/server/db.py b/src/server/db.py index 6516a5c..d96c5f0 100644 --- a/src/server/db.py +++ b/src/server/db.py @@ -15,12 +15,17 @@ def init_db(): return True def reset_db(): - """Drops the collection and recreates it from scratch.""" - if client.has_collection(COLLECTION): - client.drop_collection(COLLECTION) - print(f"--- Collection {COLLECTION} dropped! ---") + """Wipes the collection and recreates it from scratch.""" + try: + if client.has_collection(COLLECTION): + # FIX: Changed 'drop_collection' to 'delete_collection' + client.delete_collection(name=COLLECTION) + print(f"--- Collection {COLLECTION} deleted! ---") - # Recreate with the correct dimensions for your embedder - client.create_collection(name=COLLECTION, dimension=384) - print(f"--- Collection {COLLECTION} recreated! ---") - return True + # Recreate the collection + client.create_collection(name=COLLECTION, dimension=384) + print(f"--- Collection {COLLECTION} recreated! ---") + return True + except Exception as e: + print(f"Error resetting DB: {e}") + return False From 0fc449571c4f4bca94a5da9dd66cecd286c4dd87 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 17:38:03 -0800 Subject: [PATCH 71/90] updates wrap_up_node fn try except --- src/server/agents.py | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/src/server/agents.py b/src/server/agents.py index 72853ac..d43269c 100644 --- a/src/server/agents.py +++ b/src/server/agents.py @@ -94,15 +94,25 @@ def therapist_node(state: TherapySessionState): def wrap_up_node(state: TherapySessionState): exercise_prompt = "Generate 3 exercises (breathing, todo, script) in a JSON list format." history = "\n".join([f"{'User' if isinstance(m, HumanMessage) else 'Therapist'}: {m.content}" for m in state['transcript']]) - + try: response = llm.invoke([ - SystemMessage(content=exercise_prompt), + SystemMessage(content=exercise_prompt), HumanMessage(content=f"Session history:\n{history}") ]) - clean_json = response.content.replace('```json', '').replace('```', '').strip() - exercises = json.loads(clean_json) + content = response.content + # Better cleaning for markdown-wrapped JSON + if "```json" in content: + content = content.split("```json")[1].split("```")[0].strip() + elif "```" in content: + content = content.split("```")[1].split("```")[0].strip() + + exercises = json.loads(content) + # FORCE it to be a list + if isinstance(exercises, dict): + exercises = [exercises] + except Exception as e: print(f"Wrap Up Error: {e}") - exercises = [] + exercises = [] # Return empty array instead of None or Error Object return {"exercises": exercises} From cc6f4687aa35205416b2698fdd0285576d1e5e1c Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 17:47:13 -0800 Subject: [PATCH 72/90] adds check for object return in therapist node, and handles it properly --- src/server/agents.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/server/agents.py b/src/server/agents.py index d43269c..f55c3e5 100644 --- a/src/server/agents.py +++ b/src/server/agents.py @@ -88,6 +88,9 @@ def therapist_node(state: TherapySessionState): # 3. Final call: LLM sees the new data and gives the final therapeutic reply final_response = llm_with_tools.invoke(messages) return {"transcript": state['transcript'] + [final_response]} + + if isinstance(response.content, list): + response.content = "".join([part.get("text", "") if isinstance(part, dict) else str(part) for part in response.content]) return {"transcript": state['transcript'] + [response]} From ec282bde07ddef9ce2a166364bd3ed5a7f08d15d Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 17:56:28 -0800 Subject: [PATCH 73/90] better response handling in therapy_node (after tool call) --- src/server/agents.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/server/agents.py b/src/server/agents.py index f55c3e5..d1515d5 100644 --- a/src/server/agents.py +++ b/src/server/agents.py @@ -85,12 +85,12 @@ def therapist_node(state: TherapySessionState): messages.append(response) # Add the 'assistant' tool request messages.append(ToolMessage(content=search_result, tool_call_id=tool_call['id'])) - # 3. Final call: LLM sees the new data and gives the final therapeutic reply - final_response = llm_with_tools.invoke(messages) - return {"transcript": state['transcript'] + [final_response]} - + response = llm_with_tools.invoke(messages) + if isinstance(response.content, list): response.content = "".join([part.get("text", "") if isinstance(part, dict) else str(part) for part in response.content]) + else: + response.content = str(response.content) return {"transcript": state['transcript'] + [response]} From da25560612649c8e0f79d8db351fe8ed0f7b613d Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 17:57:50 -0800 Subject: [PATCH 74/90] ensures every message content is forced to a string in /run_session --- src/server/bridge.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/server/bridge.py b/src/server/bridge.py index a915737..68b0d22 100644 --- a/src/server/bridge.py +++ b/src/server/bridge.py @@ -88,11 +88,16 @@ def run_session(): history.append(HumanMessage(content=data.get('message'))) state = {"user_id": data.get('user_id'), "transcript": history, "evidence": data.get('evidence', [])} + result = therapist_node(state) return jsonify({ - "therapy_response": result['transcript'][-1].content, - "full_transcript": [{"role": "user" if isinstance(m, HumanMessage) else "assistant", "content": m.content} for m in result['transcript']] + "therapy_response": str(result['transcript'][-1].content), + # FIX: Ensure every message content is forced to a string + "full_transcript": [ + {"role": "user" if isinstance(m, HumanMessage) else "assistant", "content": str(m.content)} + for m in result['transcript'] + ] }) @app.route('/agent/end_session', methods=['POST']) From 5238826c7ec6c9e15e6b2fa0eb6134cf9de2638f Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 17:59:40 -0800 Subject: [PATCH 75/90] write ensure_text fn in agents.py --- src/server/agents.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/server/agents.py b/src/server/agents.py index d1515d5..d13f017 100644 --- a/src/server/agents.py +++ b/src/server/agents.py @@ -8,6 +8,13 @@ from cortex import Filter, Field from tools import search_user_history +def ensure_text(content): + if isinstance(content, str): + return content + if isinstance(content, list): + return "".join([part.get("text", "") if isinstance(part, dict) else str(part) for part in content]) + return str(content) + llm = ChatGoogleGenerativeAI( model="gemini-2.5-flash", google_api_key=os.getenv("GEMINI_API_KEY"), From 0d67ce04797cb8a302e3afdfa7cdfc679614fe8c Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 18:03:36 -0800 Subject: [PATCH 76/90] calls ensure_text() where appropriate --- src/server/agents.py | 43 +++++++++++++------------------------------ 1 file changed, 13 insertions(+), 30 deletions(-) diff --git a/src/server/agents.py b/src/server/agents.py index d13f017..95b85e6 100644 --- a/src/server/agents.py +++ b/src/server/agents.py @@ -46,12 +46,13 @@ def research_node(state: TherapySessionState): # FIX: Explicit list structure for LangChain Google adapter messages = [ SystemMessage(content="You are a compassionate AI therapist."), - HumanMessage(content=f"Review these logs and write a 1-2 sentence empathetic opening referencing a recurring theme: {evidence_context}") + HumanMessage(content=f"Review these logs and write a 1-2 sentence empathetic opening: {evidence_context}") ] try: response = llm.invoke(messages) - fft = response.content + # FIX: Ensure result is a string + fft = ensure_text(response.content) except Exception as e: print(f"LLM ERROR in research_node: {e}") fft = "I'm glad you're here today. How are things feeling?" @@ -63,42 +64,25 @@ def research_node(state: TherapySessionState): } def therapist_node(state: TherapySessionState): - """Agentic Loop: Can search history if needed.""" user_id = state.get('user_id') evidence_str = "\n".join(state.get('evidence', [])) - - system_prompt = f""" - You are a professional AI therapist. - CURRENT CONTEXT: {evidence_str} - - If the user mentions something you don't recognize in the context (like a birthday, - a specific name, or a past event), use the 'search_user_history' tool to look it up. - """ - + system_prompt = f"You are a professional AI therapist. Context: {evidence_str}" messages = [SystemMessage(content=system_prompt)] + state['transcript'] - # First call: LLM decides to reply OR call a tool response = llm_with_tools.invoke(messages) - # If the LLM wants to call a tool: if response.tool_calls: for tool_call in response.tool_calls: - # 1. Execute the search query = tool_call['args']['query'] print(f"--- AGENT TOOL CALL: Searching for '{query}' ---") search_result = search_user_history.invoke({"query": query, "user_id": user_id}) - - # 2. Add tool result to conversation - messages.append(response) # Add the 'assistant' tool request + messages.append(response) messages.append(ToolMessage(content=search_result, tool_call_id=tool_call['id'])) - + # Get final response after tool call response = llm_with_tools.invoke(messages) - if isinstance(response.content, list): - response.content = "".join([part.get("text", "") if isinstance(part, dict) else str(part) for part in response.content]) - else: - response.content = str(response.content) - + # FIX: Always clean content regardless of which branch was taken + response.content = ensure_text(response.content) return {"transcript": state['transcript'] + [response]} def wrap_up_node(state: TherapySessionState): @@ -110,19 +94,18 @@ def wrap_up_node(state: TherapySessionState): SystemMessage(content=exercise_prompt), HumanMessage(content=f"Session history:\n{history}") ]) - content = response.content - # Better cleaning for markdown-wrapped JSON + # FIX: Clean content before JSON parsing + content = ensure_text(response.content) + if "```json" in content: content = content.split("```json")[1].split("```")[0].strip() elif "```" in content: content = content.split("```")[1].split("```")[0].strip() - + exercises = json.loads(content) - # FORCE it to be a list if isinstance(exercises, dict): exercises = [exercises] - except Exception as e: print(f"Wrap Up Error: {e}") - exercises = [] # Return empty array instead of None or Error Object + exercises = [] return {"exercises": exercises} From 9ed8e130c225e2a0786c626c9a7dc2286ed32210 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 18:46:22 -0800 Subject: [PATCH 77/90] updates agent.py and bridge.py to perform actual chat save under new tooling calling logic --- src/server/agents.py | 23 ++++++++++++++++++++--- src/server/bridge.py | 25 +++++++++++++++++++++---- 2 files changed, 41 insertions(+), 7 deletions(-) diff --git a/src/server/agents.py b/src/server/agents.py index 95b85e6..64aec81 100644 --- a/src/server/agents.py +++ b/src/server/agents.py @@ -87,16 +87,30 @@ def therapist_node(state: TherapySessionState): def wrap_up_node(state: TherapySessionState): exercise_prompt = "Generate 3 exercises (breathing, todo, script) in a JSON list format." + # NEW: We also need a prompt for the summary + summary_prompt = "Summarize the key personal details and emotional state from this chat in 2 concise sentences for long-term memory." + history = "\n".join([f"{'User' if isinstance(m, HumanMessage) else 'Therapist'}: {m.content}" for m in state['transcript']]) + # --- PART 1: Generate the Summary (The "Memory" part) --- + try: + summary_res = llm.invoke([ + SystemMessage(content=summary_prompt), + HumanMessage(content=history) + ]) + summary_text = ensure_text(summary_res.content) + except Exception as e: + print(f"Summary Error: {e}") + summary_text = "Session summary unavailable." + + # --- PART 2: Generate the Exercises (The "UI" part) --- try: response = llm.invoke([ SystemMessage(content=exercise_prompt), HumanMessage(content=f"Session history:\n{history}") ]) - # FIX: Clean content before JSON parsing content = ensure_text(response.content) - + if "```json" in content: content = content.split("```json")[1].split("```")[0].strip() elif "```" in content: @@ -108,4 +122,7 @@ def wrap_up_node(state: TherapySessionState): except Exception as e: print(f"Wrap Up Error: {e}") exercises = [] - return {"exercises": exercises} + + # --- PART 3: Return BOTH --- + # We return the summary so bridge.py can catch it and save it to Actian + return {"exercises": exercises, "summary": summary_text} diff --git a/src/server/bridge.py b/src/server/bridge.py index 68b0d22..12dcb5f 100644 --- a/src/server/bridge.py +++ b/src/server/bridge.py @@ -84,18 +84,18 @@ def run_session(): _, therapist_node, _ = get_agent_nodes() data = request.json raw_transcript = data.get('transcript', []) + history = [HumanMessage(content=m['content']) if m['role'] == 'user' else AIMessage(content=m['content']) for m in raw_transcript] history.append(HumanMessage(content=data.get('message'))) state = {"user_id": data.get('user_id'), "transcript": history, "evidence": data.get('evidence', [])} - result = therapist_node(state) + # Cleanest possible response - agents.py did the hard work return jsonify({ - "therapy_response": str(result['transcript'][-1].content), - # FIX: Ensure every message content is forced to a string + "therapy_response": result['transcript'][-1].content, "full_transcript": [ - {"role": "user" if isinstance(m, HumanMessage) else "assistant", "content": str(m.content)} + {"role": "user" if isinstance(m, HumanMessage) else "assistant", "content": m.content} for m in result['transcript'] ] }) @@ -109,6 +109,23 @@ def end_session(): state = {"user_id": data.get('user_id'), "transcript": history, "evidence": data.get('evidence', [])} result = wrap_up_node(state) + + # --- THE FIX: Actually save to Actian --- + summary_text = result.get('summary', "Session ended.") + vector = get_embedding(summary_text) + + db.client.upsert( + db.COLLECTION, + id=int(time.time()), + vector=vector, + payload={ + "text": summary_text, + "user_id": data.get('user_id'), + "type": "session_summary" + } + ) + db.client.flush(db.COLLECTION) + return jsonify({"exercises": result['exercises']}) @app.route('/init', methods=['GET']) From a08b2aa3b8f513421d4492e13be36ac38e4c2b05 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 19:02:28 -0800 Subject: [PATCH 78/90] makes it more explicit to agents what the user id is this is to ensure that the agent does not ask the user for their user_id, which is not the kind of information that the user should have --- src/server/agents.py | 20 +++++++++++++++++--- src/server/tools.py | 2 ++ 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/src/server/agents.py b/src/server/agents.py index 64aec81..40164b2 100644 --- a/src/server/agents.py +++ b/src/server/agents.py @@ -66,18 +66,32 @@ def research_node(state: TherapySessionState): def therapist_node(state: TherapySessionState): user_id = state.get('user_id') evidence_str = "\n".join(state.get('evidence', [])) - system_prompt = f"You are a professional AI therapist. Context: {evidence_str}" + + + system_prompt = f""" + You are a professional AI therapist. + User ID: {user_id} + CURRENT CONTEXT: {evidence_str} + + If you need to search past entries, use 'search_user_history'. + The user_id is already provided in the system context; do not ask the user for it. + """ + + messages = [SystemMessage(content=system_prompt)] + state['transcript'] response = llm_with_tools.invoke(messages) if response.tool_calls: for tool_call in response.tool_calls: - query = tool_call['args']['query'] - print(f"--- AGENT TOOL CALL: Searching for '{query}' ---") + + query = tool_call['args'].get('query', '') + print(f"--- AGENT TOOL CALL: Searching for '{query}' for user '{user_id}' ---") + search_result = search_user_history.invoke({"query": query, "user_id": user_id}) messages.append(response) messages.append(ToolMessage(content=search_result, tool_call_id=tool_call['id'])) + # Get final response after tool call response = llm_with_tools.invoke(messages) diff --git a/src/server/tools.py b/src/server/tools.py index d15c122..dc8b9ad 100644 --- a/src/server/tools.py +++ b/src/server/tools.py @@ -9,6 +9,8 @@ def search_user_history(query: str, user_id: str): Searches the user's past journal entries and session summaries for specific information. Use this when the user mentions a specific event, person, or date (like a birthday) that isn't in the current context. + NOTE: The user_id is automatically provided by the system. + DO NOT ask the user for their ID. """ try: vector = get_embedding(query) From a8ed8324d2cb6331903f3f06289d9ac3bf6ab53e Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 19:23:59 -0800 Subject: [PATCH 79/90] updates exercise_prompt to interactive/asynchronous dichotomy --- src/server/agents.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/src/server/agents.py b/src/server/agents.py index 40164b2..ad5869e 100644 --- a/src/server/agents.py +++ b/src/server/agents.py @@ -100,7 +100,18 @@ def therapist_node(state: TherapySessionState): return {"transcript": state['transcript'] + [response]} def wrap_up_node(state: TherapySessionState): - exercise_prompt = "Generate 3 exercises (breathing, todo, script) in a JSON list format." + exercise_prompt = """ + Generate exactly 3 tailored mental health exercises. + Each exercise must be one of two types: "asynchronous" or "interactive". + + 1. Asynchronous: Static instructions for the user to read. + 2. Interactive: A script for a guided session. + - Use the [BREAK] token between sentences where the AI should stop reading and wait for the user. + - Example: "Close your eyes. [BREAK] Now, take a deep breath. [BREAK] Hold it for 4 seconds." + + Return ONLY a JSON list of objects: + {"type": "interactive"|"asynchronous", "title": "...", "content": "..."} + """ # NEW: We also need a prompt for the summary summary_prompt = "Summarize the key personal details and emotional state from this chat in 2 concise sentences for long-term memory." From 99b4712d4453a734c4f2795ce403e101d3a74859 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 19:24:39 -0800 Subject: [PATCH 80/90] updates exercicse list to tkae into account the two different kinds of exercies --- src/client/app/components/ExerciseList.js | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/src/client/app/components/ExerciseList.js b/src/client/app/components/ExerciseList.js index b814893..bcf8e95 100644 --- a/src/client/app/components/ExerciseList.js +++ b/src/client/app/components/ExerciseList.js @@ -1,13 +1,20 @@ export default function ExerciseList({ exercises }) { + const [activeScript, setActiveScript] = useState(null); + + if (activeScript) { + return setActiveScript(null)} />; + } + return (
{exercises.map((ex, i) => ( -
- {ex.type} -

{ex.title}

-

{ex.content}

-
))} From 3d07d743cbf918fa52dcd02e81601412dd20c07d Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 20:09:16 -0800 Subject: [PATCH 81/90] updates frontend to showcase newest func --- src/client/app/components/ExerciseList.js | 35 ++++++++--- src/client/app/components/GuidedExercise.js | 68 +++++++++++++++++++++ 2 files changed, 95 insertions(+), 8 deletions(-) create mode 100644 src/client/app/components/GuidedExercise.js diff --git a/src/client/app/components/ExerciseList.js b/src/client/app/components/ExerciseList.js index bcf8e95..05926dd 100644 --- a/src/client/app/components/ExerciseList.js +++ b/src/client/app/components/ExerciseList.js @@ -1,20 +1,39 @@ +"use client"; +import { useState } from 'react'; +import GuidedExercise from './GuidedExercise'; + export default function ExerciseList({ exercises }) { - const [activeScript, setActiveScript] = useState(null); + const [activeExercise, setActiveExercise] = useState(null); - if (activeScript) { - return setActiveScript(null)} />; + if (activeExercise) { + return ( + setActiveExercise(null)} + /> + ); } return (
{exercises.map((ex, i) => ( -
- {/* ... existing content ... */} +
+
+ + {ex.type} + +

{ex.title}

+

{ex.content}

+
+
))} diff --git a/src/client/app/components/GuidedExercise.js b/src/client/app/components/GuidedExercise.js new file mode 100644 index 0000000..88be238 --- /dev/null +++ b/src/client/app/components/GuidedExercise.js @@ -0,0 +1,68 @@ +"use client"; +import { useState, useEffect } from 'react'; + +export default function GuidedExercise({ title, script, onExit }) { + // Split the script by our special token + const sentences = script.split("[BREAK]").map(s => s.trim()).filter(s => s); + const [index, setIndex] = useState(0); + const [isPlaying, setIsPlaying] = useState(false); + + const speak = (text) => { + // Stop any existing speech + window.speechSynthesis.cancel(); + + const utterance = new SpeechSynthesisUtterance(text); + utterance.rate = 0.9; // Slightly slower for therapy + + utterance.onend = () => { + setIsPlaying(false); + // Auto-advance the index so the UI highlights the next sentence + if (index < sentences.length - 1) { + setIndex(prev => prev + 1); + } + }; + + window.speechSynthesis.speak(utterance); + setIsPlaying(true); + }; + + return ( +
+ + +
+ Guided Session +

{title}

+ + {/* The Karaoke Display */} +
+ {sentences.map((s, i) => ( +

+ {s} +

+ ))} +
+ + {/* Control Button */} + + +

+ {index === sentences.length - 1 && !isPlaying ? "Exercise Complete" : "Press play to continue"} +

+
+
+ ); +} From d6e259b88771b2f477d427edab4dd80b62e92f19 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 20:29:49 -0800 Subject: [PATCH 82/90] achieves message streaming --- .../app/api/therapy/chat_stream/route.js | 19 +++++ src/client/app/therapy/page.js | 73 ++++++++++--------- src/server/agents.py | 12 +++ src/server/bridge.py | 42 ++++++++++- 4 files changed, 109 insertions(+), 37 deletions(-) create mode 100644 src/client/app/api/therapy/chat_stream/route.js diff --git a/src/client/app/api/therapy/chat_stream/route.js b/src/client/app/api/therapy/chat_stream/route.js new file mode 100644 index 0000000..a3314e5 --- /dev/null +++ b/src/client/app/api/therapy/chat_stream/route.js @@ -0,0 +1,19 @@ +export async function POST(req) { + const body = await req.json(); + + const res = await fetch('http://localhost:5001/agent/chat_stream', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify(body), + }); + + if (!res.ok) return new Response("Bridge Connection Failed", { status: 500 }); + + return new Response(res.body, { + headers: { + 'Content-Type': 'text/event-stream', + 'Cache-Control': 'no-cache', + 'Connection': 'keep-alive', + }, + }); +} diff --git a/src/client/app/therapy/page.js b/src/client/app/therapy/page.js index 1feb980..ba9677c 100644 --- a/src/client/app/therapy/page.js +++ b/src/client/app/therapy/page.js @@ -45,42 +45,43 @@ export default function TherapyPage() { } }; - // STEP 5: Main Chat Loop with History Management - const sendMessage = async () => { - if (!input.trim() || loading) return; - - const userMsg = { role: 'user', content: input }; - const currentChat = [...chat, userMsg]; - - // UI Update: Show user message immediately - setChat(currentChat); - setInput(""); - setLoading(true); - - try { - const res = await fetch('/api/therapy/chat', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - userId: 'horyzon', - message: input, - transcript: chat, // Send history so Gemini has memory - evidence: session.evidence // Send Actian evidence for grounding - }) - }); - - const data = await res.json(); - - if (data.reply) { - setChat([...currentChat, { role: 'assistant', content: data.reply }]); - } - } catch (err) { - console.error("Chat Error:", err); - setChat([...currentChat, { role: 'assistant', content: "I'm having trouble connecting. Is the Python bridge running?" }]); - } finally { - setLoading(false); - } - }; + const sendMessage = async () => { + if (!input.trim() || loading) return; + + const userMsg = { role: 'user', content: input }; + const currentChat = [...chat, userMsg]; + setChat(currentChat); + setInput(""); + setLoading(true); + + // Add an empty assistant message we will fill up + setChat(prev => [...prev, { role: 'assistant', content: "" }]); + + const response = await fetch('/api/therapy/chat_stream', { // Hit a new streaming proxy + method: 'POST', + body: JSON.stringify({ userId: 'horyzon', message: input, transcript: chat, evidence: session.evidence }) + }); + + const reader = response.body.getReader(); + const decoder = new TextDecoder(); + let fullReply = ""; + + while (true) { + const { done, value } = await reader.read(); + if (done) break; + + const chunk = decoder.decode(value); + fullReply += chunk; + + // Update the LAST message in the chat array with the new chunk + setChat(prev => { + const newChat = [...prev]; + newChat[newChat.length - 1].content = fullReply; + return newChat; + }); + } + setLoading(false); + }; const finishSession = async () => { setLoading(true); diff --git a/src/server/agents.py b/src/server/agents.py index ad5869e..78c3659 100644 --- a/src/server/agents.py +++ b/src/server/agents.py @@ -151,3 +151,15 @@ def wrap_up_node(state: TherapySessionState): # --- PART 3: Return BOTH --- # We return the summary so bridge.py can catch it and save it to Actian return {"exercises": exercises, "summary": summary_text} + +def therapist_stream_node(state: TherapySessionState): + user_id = state.get('user_id') + evidence_str = "\n".join(state.get('evidence', [])) + system_prompt = f"You are a professional AI therapist. User ID: {user_id}. Context: {evidence_str}" + messages = [SystemMessage(content=system_prompt)] + state['transcript'] + + # We use .stream instead of .invoke + # Note: Tool calling with streaming is complex, + # so we will stream the FINAL response. + for chunk in llm.stream(messages): + yield ensure_text(chunk.content) diff --git a/src/server/bridge.py b/src/server/bridge.py index 12dcb5f..a2eca7b 100644 --- a/src/server/bridge.py +++ b/src/server/bridge.py @@ -5,7 +5,7 @@ # 1. Critical Imports from cortex import Filter, Field from embedder import get_embedding -from langchain_core.messages import HumanMessage, AIMessage +from langchain_core.messages import HumanMessage, AIMessage, SystemMessage import db # Shared source of truth app = Flask(__name__) @@ -139,6 +139,46 @@ def init(): db.init_db() return jsonify({"status": "ready"}) +from flask import Response +# At the top of bridge.py, update this line: + +# ... (keep existing routes) ... + +@app.route('/agent/chat_stream', methods=['POST']) +def chat_stream(): + # Critical: Import these inside the function to avoid circular imports + from agents import llm, ensure_text + + try: + data = request.json + raw_transcript = data.get('transcript', []) + history = [ + HumanMessage(content=m['content']) if m['role'] == 'user' + else AIMessage(content=m['content']) + for m in raw_transcript + ] + history.append(HumanMessage(content=data.get('message'))) + + # Get evidence context from the frontend session + evidence_list = data.get('evidence', []) + evidence_str = "\n".join(evidence_list) if evidence_list else "No previous context." + user_id = data.get('user_id', 'unknown') + + def generate(): + system_prompt = f"You are a professional AI therapist. User ID: {user_id}. Context: {evidence_str}" + messages = [SystemMessage(content=system_prompt)] + history + + # Use the .stream method for real-time tokens + for chunk in llm.stream(messages): + content = ensure_text(chunk.content) + if content: + yield content + + return Response(generate(), mimetype='text/event-stream') + except Exception as e: + print(f"Streaming Error: {e}") + return jsonify({"error": str(e)}), 500 + if __name__ == '__main__': db.init_db() # Ensure collection is created on startup app.run(port=5001, debug=True) From cb1e71b5f2174d6034a46967fada7e80861d2b8f Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 20:29:58 -0800 Subject: [PATCH 83/90] updates db.py to add keep connection alive logic --- src/server/db.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/src/server/db.py b/src/server/db.py index d96c5f0..8c79bae 100644 --- a/src/server/db.py +++ b/src/server/db.py @@ -2,14 +2,23 @@ import time from cortex import CortexClient -# Initialize Actian Cortex Client -# This is our single source of truth for the DB connection client = CortexClient("localhost:50051") -client.connect() COLLECTION = "user_journals" +def ensure_connected(): + """Checks if the client is connected, and if not, reconnects.""" + try: + # Try a lightweight call to see if the connection is alive + client.has_collection(COLLECTION) + except Exception: + print("--- DB Connection lost. Reconnecting... ---") + try: + client.connect() + except: + pass # Already connected or handled by SDK internal logic + def init_db(): - """Ensure the collection exists on startup.""" + ensure_connected() # Use our new helper if not client.has_collection(COLLECTION): client.create_collection(name=COLLECTION, dimension=384) return True From 656b951b67895e05e54c29409d073c84258c18ac Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 20:32:10 -0800 Subject: [PATCH 84/90] wraps end_session logic in keep connection alive wrapper --- src/server/bridge.py | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/src/server/bridge.py b/src/server/bridge.py index a2eca7b..f5d6d56 100644 --- a/src/server/bridge.py +++ b/src/server/bridge.py @@ -114,17 +114,24 @@ def end_session(): summary_text = result.get('summary', "Session ended.") vector = get_embedding(summary_text) - db.client.upsert( - db.COLLECTION, - id=int(time.time()), - vector=vector, - payload={ - "text": summary_text, - "user_id": data.get('user_id'), - "type": "session_summary" - } - ) - db.client.flush(db.COLLECTION) + # --- THE FIX: Wrap the upsert in a retry/reconnect block --- + try: + db.ensure_connected() # Make sure we weren't kicked out during the stream + db.client.upsert( + db.COLLECTION, + id=int(time.time()), + vector=vector, + payload={ + "text": summary_text, + "user_id": data.get('user_id'), + "type": "session_summary" + } + ) + db.client.flush(db.COLLECTION) + except Exception as e: + print(f"Upsert failed after stream: {e}. Retrying once...") + db.client.connect() # Force a hard reconnect + db.client.upsert(db.COLLECTION, id=int(time.time()), vector=vector, payload={"text": summary_text, "user_id": data.get('user_id'), "type": "session_summary"}) return jsonify({"exercises": result['exercises']}) From 7021b8d9be5541249d631165e4d30d74557babcc Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 21:18:37 -0800 Subject: [PATCH 85/90] feat!: generates agenda in research_node --- src/server/agents.py | 34 ++++++++++++++++++++++------------ 1 file changed, 22 insertions(+), 12 deletions(-) diff --git a/src/server/agents.py b/src/server/agents.py index 78c3659..26ab8d7 100644 --- a/src/server/agents.py +++ b/src/server/agents.py @@ -25,6 +25,7 @@ def ensure_text(content): def research_node(state: TherapySessionState): user_id = state.get('user_id') + user_notes = state.get('user_notes', "") query_topics = ["panic attacks and physical anxiety", "work stress and social anxiety", "sleep quality"] all_evidence = [] @@ -43,26 +44,35 @@ def research_node(state: TherapySessionState): unique_evidence = list(set(all_evidence)) evidence_context = "\n".join([f"- {text}" for text in unique_evidence]) if unique_evidence else "No recent logs." - # FIX: Explicit list structure for LangChain Google adapter - messages = [ - SystemMessage(content="You are a compassionate AI therapist."), - HumanMessage(content=f"Review these logs and write a 1-2 sentence empathetic opening: {evidence_context}") - ] + # 2. NEW: Generate the Agenda + agenda_prompt = f""" + You are a clinical supervisor. Based on the User's Pre-session Notes and Past History, + create a 3-point "Session Agenda" for the AI Therapist. - try: - response = llm.invoke(messages) - # FIX: Ensure result is a string - fft = ensure_text(response.content) - except Exception as e: - print(f"LLM ERROR in research_node: {e}") - fft = "I'm glad you're here today. How are things feeling?" + USER NOTES: {user_notes} + PAST HISTORY: {evidence_context} + + Format the agenda as a weighted list of objectives. + """ + + agenda_response = llm.invoke([SystemMessage(content=agenda_prompt)]) + agenda = ensure_text(agenda_response.content) + + # 3. Generate the empathetic opening using the agenda + opening_res = llm.invoke([ + SystemMessage(content="You are a compassionate therapist."), + HumanMessage(content=f"Based on this agenda, give a warm 1-sentence opening: {agenda}") + ]) + fft = ensure_text(opening_res.content) return { "evidence": unique_evidence, + "agenda": agenda, # Save this to state! "food_for_thought": fft, "transcript": [AIMessage(content=fft)] } + def therapist_node(state: TherapySessionState): user_id = state.get('user_id') evidence_str = "\n".join(state.get('evidence', [])) From cd3543a5c5bec4b8aa5c2696607c215a72d3e4a6 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 21:39:09 -0800 Subject: [PATCH 86/90] updates state.py to take into account agenda --- src/server/state.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/src/server/state.py b/src/server/state.py index 5bdb8b6..35176f2 100644 --- a/src/server/state.py +++ b/src/server/state.py @@ -1,10 +1,15 @@ -from typing import TypedDict, List, Union +from typing import TypedDict, List, Optional from langchain_core.messages import BaseMessage class TherapySessionState(TypedDict): user_id: str session_id: str - transcript: List[BaseMessage] - evidence: List[str] - food_for_thought: str + transcript: List[BaseMessage] + evidence: List[str] + food_for_thought: str + # New fields for Collaborative Agenda Setting + user_notes: str + agenda: str + # Field for the final session summary + summary: Optional[str] exercises: List[dict] From 97a85bcf229746181df588b2009bd3333f4a7deb Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 21:39:14 -0800 Subject: [PATCH 87/90] updates agent.py to use the new agenda --- src/server/agents.py | 92 ++++++++++++++++++++++++++------------------ 1 file changed, 55 insertions(+), 37 deletions(-) diff --git a/src/server/agents.py b/src/server/agents.py index 26ab8d7..3d6cf6c 100644 --- a/src/server/agents.py +++ b/src/server/agents.py @@ -4,7 +4,7 @@ from langchain_core.messages import SystemMessage, HumanMessage, AIMessage, ToolMessage from state import TherapySessionState from embedder import get_embedding -import db +import db from cortex import Filter, Field from tools import search_user_history @@ -25,17 +25,17 @@ def ensure_text(content): def research_node(state: TherapySessionState): user_id = state.get('user_id') - user_notes = state.get('user_notes', "") + user_notes = state.get('user_notes', "No specific notes provided.") query_topics = ["panic attacks and physical anxiety", "work stress and social anxiety", "sleep quality"] all_evidence = [] - + + # 1. Gather Historical Context for topic in query_topics: try: vector = get_embedding(topic) user_filter = Filter().must(Field("user_id").eq(user_id)) results = db.client.search(db.COLLECTION, query=vector, filter=user_filter, top_k=3, with_payload=True) for r in results: - # Ensure the key exists in payload if 'text' in r.payload: all_evidence.append(r.payload['text']) except Exception as e: @@ -44,71 +44,82 @@ def research_node(state: TherapySessionState): unique_evidence = list(set(all_evidence)) evidence_context = "\n".join([f"- {text}" for text in unique_evidence]) if unique_evidence else "No recent logs." - # 2. NEW: Generate the Agenda + # 2. GENERATE AGENDA: Synthesize notes + history agenda_prompt = f""" - You are a clinical supervisor. Based on the User's Pre-session Notes and Past History, - create a 3-point "Session Agenda" for the AI Therapist. - - USER NOTES: {user_notes} - PAST HISTORY: {evidence_context} + You are a clinical supervisor. Create a 3-point "Session Agenda" for the AI Therapist. + + USER'S CURRENT CONCERNS (Pre-session notes): {user_notes} + USER'S HISTORY: {evidence_context} - Format the agenda as a weighted list of objectives. + Format this as a clear 'Weighted List of Objectives' for the therapist to follow. """ - agenda_response = llm.invoke([SystemMessage(content=agenda_prompt)]) - agenda = ensure_text(agenda_response.content) + try: + agenda_response = llm.invoke([SystemMessage(content=agenda_prompt)]) + agenda = ensure_text(agenda_response.content) + except Exception as e: + print(f"Agenda Generation Error: {e}") + agenda = "Provide general empathetic support and address user concerns." - # 3. Generate the empathetic opening using the agenda - opening_res = llm.invoke([ - SystemMessage(content="You are a compassionate therapist."), - HumanMessage(content=f"Based on this agenda, give a warm 1-sentence opening: {agenda}") - ]) - fft = ensure_text(opening_res.content) + # 3. Generate Opening + try: + opening_res = llm.invoke([ + SystemMessage(content="You are a compassionate therapist starting a session."), + HumanMessage(content=f"Based on this agenda, give a warm 1-2 sentence opening that acknowledges their notes: {agenda}") + ]) + fft = ensure_text(opening_res.content) + except Exception as e: + print(f"Opening Error: {e}") + fft = "I'm here and ready to listen. How are you feeling today?" return { "evidence": unique_evidence, - "agenda": agenda, # Save this to state! + "agenda": agenda, "food_for_thought": fft, "transcript": [AIMessage(content=fft)] } - def therapist_node(state: TherapySessionState): user_id = state.get('user_id') evidence_str = "\n".join(state.get('evidence', [])) + agenda = state.get('agenda', "Provide general emotional support.") - + # We keep user_id and context, but ADD the agenda as the primary directive system_prompt = f""" - You are a professional AI therapist. + You are a professional AI therapist. User ID: {user_id} - CURRENT CONTEXT: {evidence_str} - If you need to search past entries, use 'search_user_history'. - The user_id is already provided in the system context; do not ask the user for it. - """ + HISTORICAL CONTEXT: + {evidence_str} + SESSION AGENDA (Your primary objectives for this session): + {agenda} - messages = [SystemMessage(content=system_prompt)] + state['transcript'] + INSTRUCTIONS: + 1. Focus on the Agenda points, but stay flexible if the user needs to vent. + 2. Use 'search_user_history' if you need more details on a specific past event. + 3. The user_id is already provided; do not ask for it. + """ + messages = [SystemMessage(content=system_prompt)] + state['transcript'] response = llm_with_tools.invoke(messages) if response.tool_calls: for tool_call in response.tool_calls: - query = tool_call['args'].get('query', '') print(f"--- AGENT TOOL CALL: Searching for '{query}' for user '{user_id}' ---") - search_result = search_user_history.invoke({"query": query, "user_id": user_id}) messages.append(response) messages.append(ToolMessage(content=search_result, tool_call_id=tool_call['id'])) - - # Get final response after tool call + response = llm_with_tools.invoke(messages) - # FIX: Always clean content regardless of which branch was taken response.content = ensure_text(response.content) return {"transcript": state['transcript'] + [response]} +# wrap_up_node and therapist_stream_node remain largely the same, +# but could optionally use state.get('agenda') for better summaries. + def wrap_up_node(state: TherapySessionState): exercise_prompt = """ Generate exactly 3 tailored mental health exercises. @@ -165,11 +176,18 @@ def wrap_up_node(state: TherapySessionState): def therapist_stream_node(state: TherapySessionState): user_id = state.get('user_id') evidence_str = "\n".join(state.get('evidence', [])) - system_prompt = f"You are a professional AI therapist. User ID: {user_id}. Context: {evidence_str}" + agenda = state.get('agenda', "Provide general emotional support.") # Add this + + # Match the prompt from therapist_node for consistency + system_prompt = f""" + You are a professional AI therapist. User ID: {user_id}. + + SESSION AGENDA: + {agenda} + + CONTEXT: {evidence_str} + """ messages = [SystemMessage(content=system_prompt)] + state['transcript'] - # We use .stream instead of .invoke - # Note: Tool calling with streaming is complex, - # so we will stream the FINAL response. for chunk in llm.stream(messages): yield ensure_text(chunk.content) From c127144fb18b6aabb97e65973e80908cffffe209 Mon Sep 17 00:00:00 2001 From: gustavogalvao Date: Sun, 22 Feb 2026 00:53:01 -0500 Subject: [PATCH 88/90] Created prompt file --- src/server/prompts.py | 75 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) create mode 100644 src/server/prompts.py diff --git a/src/server/prompts.py b/src/server/prompts.py new file mode 100644 index 0000000..888c6dc --- /dev/null +++ b/src/server/prompts.py @@ -0,0 +1,75 @@ +therapist_system_prompt = """ +**Role**: Professional Lead Therapist AI +**Primary Directive**: You are a compassionate, professional therapist. Your goal is to guide the user through their mental health journey by following the clinical roadmap provided in the "CURRENT CONTEXT." + +===USER AND SESSION INFORMATION=== +**User ID**: {user_id} +**Agenda**: {agenda} +=== + +**Operational Guidelines**: +1. **Agenda Integration**: Use the agenda as your session script. Do not reveal the technical structure of the agenda to the user; instead, weave the topics and approaches naturally into the dialogue. +2. **Grounding Priority**: If the agenda or the user's tone suggests high distress, prioritize "Grounding Exercises." Guide the user through these exercises step-by-step, waiting for their response between steps. +3. **Tone & Style**: Maintain a warm, non-judgmental, and validating tone. Use open-ended questions to encourage reflection. +4. **Historical Awareness**: If the user references a specific person, date, or event not found in the current agenda, use the 'search_user_history' tool to provide personalized continuity. +5. **Boundaries**: You are an AI, not a human doctor. If the user expresses a crisis or self-harm, provide immediate professional hotline resources alongside grounding techniques. + +**Constraint**: Never ask the user for their User ID. It is handled internally. +""" + +researcher_system_prompt = """ +Temp +""" + +summary_system_prompt = """ +**Role**: Clinical Memory Architect +**Objective**: Distill the current session into a high-density, 2-sentence summary for the user's permanent longitudinal record. + +**Guidelines**: +1. **Sentence 1 (Emotional Baseline & Triggers)**: Identify the primary emotional state, core stressors, and any specific life events or individuals mentioned. +2. **Sentence 2 (Intervention & Progress)**: Detail which therapeutic approach or exercise was used and the user's observable response or "shift" by the end of the chat. + +**Tone**: Clinical, objective, and dense. Avoid filler words like "The user said..." or "In this session...". +**Constraint**: Keep the total output to exactly 2 sentences. Focus on data-rich nouns and adjectives. +""" + +exercises_system_prompt = """ +**Role**: Behavioral Health Exercise Specialist +**Objective**: Generate exactly 3 tailored mental health exercises based on the provided session context and user needs. + +**Exercise Categories**: +1. **Asynchronous**: Static, instructional content for the user to complete at their own pace. Focus on journaling, habit tracking, or educational reading. +2. **Interactive**: A real-time guided script. + - **Crucial**: You must insert the `[BREAK]` token where the AI should pause and wait for a user response. + - **Example**: "Place your hand on your heart. [BREAK] Can you feel your heartbeat? [BREAK] Now, take a slow breath in." + +**Instructions**: +- Align exercises with the therapeutic modality suggested in the current context (e.g., CBT, DBT, Mindfulness). +- Ensure the difficulty level matches the user's current emotional capacity. +- **Output Format**: Return ONLY a valid JSON list of 3 objects. Do not include any conversational filler. + +**JSON Schema**: +[ + { + "type": "interactive" | "asynchronous", + "title": "Clear, engaging title", + "content": "Full text of the exercise including [BREAK] tokens for interactive types" + } +] +""" + + +def get_therapist_system_prompt(user_id, agenda): + return therapist_system_prompt.format(user_id=user_id, agenda=agenda) + + +def get_researcher_system_prompt(): + return researcher_system_prompt + + +def get_summary_system_prompt(): + return summary_system_prompt + + +def get_exercises_system_prompt(): + return exercises_system_prompt From 98982da32f3a78c1f77d392f2990efafeb2725b9 Mon Sep 17 00:00:00 2001 From: kalyanoliveira Date: Sat, 21 Feb 2026 22:17:03 -0800 Subject: [PATCH 89/90] achieves agenda functionality --- src/client/app/api/therapy/chat/route.js | 5 +- src/client/app/api/therapy/end/route.js | 3 +- src/client/app/api/therapy/start/route.js | 11 +++-- src/client/app/therapy/page.js | 17 +++++-- src/server/.prompts.py.swp | Bin 0 -> 12288 bytes src/server/agents.py | 54 ++++++++++++++-------- src/server/bridge.py | 31 +++++++++++-- src/server/inspect-db.py | 30 +++++++----- 8 files changed, 106 insertions(+), 45 deletions(-) create mode 100644 src/server/.prompts.py.swp diff --git a/src/client/app/api/therapy/chat/route.js b/src/client/app/api/therapy/chat/route.js index 9d77da9..11eb7b6 100644 --- a/src/client/app/api/therapy/chat/route.js +++ b/src/client/app/api/therapy/chat/route.js @@ -1,5 +1,5 @@ export async function POST(req) { - const { userId, message, transcript, evidence } = await req.json(); + const { userId, message, transcript, evidence, agenda } = await req.json(); const res = await fetch('http://localhost:5001/agent/run_session', { method: 'POST', @@ -8,7 +8,8 @@ export async function POST(req) { user_id: userId, message: message, transcript: transcript, // Pass history back to Python - evidence: evidence // Pass evidence back to Python + evidence: evidence, // Pass evidence back to Python + agenda: agenda }) }); diff --git a/src/client/app/api/therapy/end/route.js b/src/client/app/api/therapy/end/route.js index 7b69999..975334b 100644 --- a/src/client/app/api/therapy/end/route.js +++ b/src/client/app/api/therapy/end/route.js @@ -8,7 +8,8 @@ export async function POST(req) { body: JSON.stringify({ user_id: body.userId, transcript: body.transcript, - evidence: body.evidence + evidence: body.evidence, + agenda: body.agenda }) }); diff --git a/src/client/app/api/therapy/start/route.js b/src/client/app/api/therapy/start/route.js index 96b0644..d1d1c53 100644 --- a/src/client/app/api/therapy/start/route.js +++ b/src/client/app/api/therapy/start/route.js @@ -1,18 +1,21 @@ import { NextResponse } from 'next/server'; export async function POST(req) { - const { userId } = await req.json(); + const { userId, userNotes } = await req.json(); // Capture userNotes - // Call the Python agent's research node const res = await fetch('http://localhost:5001/agent/start', { method: 'POST', headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ user_id: userId }) + body: JSON.stringify({ + user_id: userId, + user_notes: userNotes // Pass it to Flask + }) }); const data = await res.json(); return NextResponse.json({ openingMessage: data.food_for_thought, - evidenceFound: data.evidence + evidenceFound: data.evidence, + agenda: data.agenda // Pass the generated agenda back to the frontend }); } diff --git a/src/client/app/therapy/page.js b/src/client/app/therapy/page.js index ba9677c..6a91962 100644 --- a/src/client/app/therapy/page.js +++ b/src/client/app/therapy/page.js @@ -8,6 +8,7 @@ export default function TherapyPage() { const [input, setInput] = useState(""); const [loading, setLoading] = useState(false); const [exercises, setExercises] = useState(null); + const [userNotes, setUserNotes] = useState(""); // NEW // Ref for auto-scrolling the chat window const chatEndRef = useRef(null); @@ -27,13 +28,14 @@ export default function TherapyPage() { const res = await fetch('/api/therapy/start', { method: 'POST', headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ userId: 'horyzon' }) + body: JSON.stringify({ userId: 'horyzon', userNotes: userNotes }) }); const data = await res.json(); setSession({ evidence: data.evidenceFound, // The clinical themes found in Actian - user_id: 'horyzon' + user_id: 'horyzon', + agenda: data.agenda }); // The opening message generated in agents.py @@ -59,7 +61,7 @@ export default function TherapyPage() { const response = await fetch('/api/therapy/chat_stream', { // Hit a new streaming proxy method: 'POST', - body: JSON.stringify({ userId: 'horyzon', message: input, transcript: chat, evidence: session.evidence }) + body: JSON.stringify({ userId: 'horyzon', message: input, transcript: chat, evidence: session.evidence, agenda: session.agenda }) }); const reader = response.body.getReader(); @@ -93,7 +95,8 @@ export default function TherapyPage() { body: JSON.stringify({ userId: 'horyzon', transcript: chat, - evidence: session.evidence + evidence: session.evidence, + agenda: session.agenda }) }); const data = await res.json(); @@ -137,6 +140,12 @@ export default function TherapyPage() {

I'll review your recent journal logs to help guide our conversation today.

+