Skip to content
Open
Show file tree
Hide file tree
Changes from 4 commits
Commits
Show all changes
41 commits
Select commit Hold shift + click to select a range
274c984
feat: Add Anthropic Claude and LM Studio provider support
csfet9 Dec 16, 2025
f87fd02
feat: Add dynamic timeout for local LLM providers
csfet9 Dec 17, 2025
7e6a757
fix: Address PR review feedback
csfet9 Dec 17, 2025
47c6008
chore: Remove deleted AI assistant files from .gitignore
csfet9 Dec 17, 2025
8c7e28f
docs: Add CLAUDE.md for Claude Code integration
csfet9 Dec 17, 2025
c8b33a0
chore: Include local dev files and sync changes
csfet9 Dec 17, 2025
655b054
fix: Address PR review feedback for LLM provider support
csfet9 Dec 19, 2025
f69a048
chore: Remove local dev docker-compose.yml
csfet9 Dec 19, 2025
4ce365f
chore: Add local dev docker-compose.yml
csfet9 Dec 19, 2025
10eb651
fix: Update LM Studio port to 2222 in docker-compose
csfet9 Dec 19, 2025
92d05d5
chore: Remove obsolete version attribute from docker-compose
csfet9 Dec 19, 2025
79bc663
fix: Remove test file and docker-compose per PR review
csfet9 Dec 24, 2025
ef578ea
feat: Add feedback signal API for tracking fact usefulness
csfet9 Dec 30, 2025
1cea994
feat: Add retry wrapper script for Docker container startup
csfet9 Dec 31, 2025
9239955
feat: Add reasoning model thinking tag support in LLM wrapper
csfet9 Jan 1, 2026
0cd9d25
feat: Integrate retry-start.sh for dependency waiting on container st…
csfet9 Jan 1, 2026
20624a5
fix: Skip database check for embedded pg0 in retry-start.sh
csfet9 Jan 1, 2026
383476a
docs: Add comprehensive local LLM testing report
csfet9 Jan 1, 2026
a0cec18
docs: Move local LLM testing docs to hindsight-docs
csfet9 Jan 1, 2026
bf9c0c0
Merge remote-tracking branch 'upstream/main'
csfet9 Jan 2, 2026
8ac53d7
feat: add LLM comparison benchmark and reduce max_completion_tokens
csfet9 Jan 4, 2026
b4bf2ee
feat: add Gemini 3 Flash Preview optimizations
csfet9 Jan 4, 2026
7c7589b
fix: handle empty LLM responses and support cloud providers in Docker
csfet9 Jan 4, 2026
6e57586
Merge upstream/main into main
csfet9 Jan 5, 2026
6789313
feat: Add feedback signal API for tracking fact usefulness
csfet9 Dec 30, 2025
5ff6e19
feat: add Gemini 3 Flash Preview optimizations
csfet9 Jan 4, 2026
46ece7b
fix: handle empty LLM responses and support cloud providers in Docker
csfet9 Jan 4, 2026
a85ac28
chore: remove redundant retry-start.sh
csfet9 Jan 5, 2026
e4eae57
chore: remove benchmark result files from PR
csfet9 Jan 5, 2026
3e6ec36
feat: implement query-context aware feedback scoring
csfet9 Jan 5, 2026
ffa5fe3
refactor: use reasoning_effort for Gemini 3 thinking level
csfet9 Jan 5, 2026
8e5264d
Merge upstream/main into feature/llm-improvements-and-feedback-api
csfet9 Jan 5, 2026
d2278a7
feat: make thinking level configurable via environment variable
csfet9 Jan 6, 2026
b82cbbd
Merge branch 'feature/llm-improvements-and-feedback-api' into main
csfet9 Jan 7, 2026
1b6ffb7
Merge remote-tracking branch 'upstream/main'
csfet9 Jan 7, 2026
0c31269
fix: restore missing retry-start.sh for Docker build
csfet9 Jan 7, 2026
e7ccbc5
fix: sync Dockerfile with upstream, remove obsolete retry-start.sh
csfet9 Jan 7, 2026
a45fe17
fix: validate UUID format in signal endpoint to return 422 instead of…
csfet9 Jan 7, 2026
d085415
Merge branch 'feature/llm-improvements-and-feedback-api'
csfet9 Jan 7, 2026
362f7a0
Merge upstream/main: resolve conflicts in LLM wrapper and fact extrac…
csfet9 Jan 8, 2026
74e2190
fix: escape < in MDX to fix docs build
csfet9 Jan 8, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,149 @@
"""add_feedback_signal_tables

Revision ID: f1a2b3c4d5e6
Revises: e0a1b2c3d4e5
Create Date: 2025-12-30

Adds tables for feedback signal tracking:
- fact_usefulness: Aggregate usefulness scores per fact
- usefulness_signals: Individual signal records for audit/analytics
- query_pattern_stats: Pattern tracking for query optimization
"""

from collections.abc import Sequence

import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql

revision: str = "f1a2b3c4d5e6"
down_revision: str | Sequence[str] | None = "e0a1b2c3d4e5"
branch_labels: str | Sequence[str] | None = None
depends_on: str | Sequence[str] | None = None


def upgrade() -> None:
"""Add feedback signal tables."""

# Create fact_usefulness table - aggregate scores per fact
op.create_table(
"fact_usefulness",
sa.Column("fact_id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("bank_id", sa.Text(), nullable=False),
sa.Column("usefulness_score", sa.Float(), server_default="0.5", nullable=False),
sa.Column("signal_count", sa.Integer(), server_default="0", nullable=False),
sa.Column("last_signal_at", postgresql.TIMESTAMP(timezone=True), nullable=True),
sa.Column(
"last_decay_at",
postgresql.TIMESTAMP(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.Column(
"created_at",
postgresql.TIMESTAMP(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.Column(
"updated_at",
postgresql.TIMESTAMP(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.PrimaryKeyConstraint("fact_id", name=op.f("pk_fact_usefulness")),
)
op.create_index("idx_fact_usefulness_bank_id", "fact_usefulness", ["bank_id"])
op.create_index(
"idx_fact_usefulness_score",
"fact_usefulness",
["bank_id", sa.text("usefulness_score DESC")],
)

# Create usefulness_signals table - individual signal records
op.create_table(
"usefulness_signals",
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("gen_random_uuid()"),
nullable=False,
),
sa.Column("fact_id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("bank_id", sa.Text(), nullable=False),
sa.Column("signal_type", sa.Text(), nullable=False),
sa.Column("confidence", sa.Float(), server_default="1.0", nullable=False),
sa.Column("query_hash", sa.Text(), nullable=True),
sa.Column("context", sa.Text(), nullable=True),
sa.Column(
"created_at",
postgresql.TIMESTAMP(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.PrimaryKeyConstraint("id", name=op.f("pk_usefulness_signals")),
sa.CheckConstraint(
"signal_type IN ('used', 'ignored', 'helpful', 'not_helpful')",
name="usefulness_signals_type_check",
),
sa.CheckConstraint(
"confidence >= 0.0 AND confidence <= 1.0",
name="usefulness_signals_confidence_check",
),
)
op.create_index("idx_usefulness_signals_fact_id", "usefulness_signals", ["fact_id"])
op.create_index("idx_usefulness_signals_bank_id", "usefulness_signals", ["bank_id"])
op.create_index(
"idx_usefulness_signals_created_at",
"usefulness_signals",
[sa.text("created_at DESC")],
)

# Create query_pattern_stats table - pattern tracking
op.create_table(
"query_pattern_stats",
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("gen_random_uuid()"),
nullable=False,
),
sa.Column("bank_id", sa.Text(), nullable=False),
sa.Column("query_hash", sa.Text(), nullable=False),
sa.Column("query_example", sa.Text(), nullable=True),
sa.Column("total_signals", sa.Integer(), server_default="0", nullable=False),
sa.Column("helpful_count", sa.Integer(), server_default="0", nullable=False),
sa.Column("not_helpful_count", sa.Integer(), server_default="0", nullable=False),
sa.Column("used_count", sa.Integer(), server_default="0", nullable=False),
sa.Column("ignored_count", sa.Integer(), server_default="0", nullable=False),
sa.Column(
"created_at",
postgresql.TIMESTAMP(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.Column(
"updated_at",
postgresql.TIMESTAMP(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.PrimaryKeyConstraint("id", name=op.f("pk_query_pattern_stats")),
sa.UniqueConstraint("bank_id", "query_hash", name="uq_query_pattern_stats_bank_hash"),
)
op.create_index("idx_query_pattern_stats_bank_id", "query_pattern_stats", ["bank_id"])


def downgrade() -> None:
"""Remove feedback signal tables."""
op.drop_index("idx_query_pattern_stats_bank_id", table_name="query_pattern_stats")
op.drop_table("query_pattern_stats")

op.drop_index("idx_usefulness_signals_created_at", table_name="usefulness_signals")
op.drop_index("idx_usefulness_signals_bank_id", table_name="usefulness_signals")
op.drop_index("idx_usefulness_signals_fact_id", table_name="usefulness_signals")
op.drop_table("usefulness_signals")

op.drop_index("idx_fact_usefulness_score", table_name="fact_usefulness")
op.drop_index("idx_fact_usefulness_bank_id", table_name="fact_usefulness")
op.drop_table("fact_usefulness")
Loading
Loading