diff --git a/app/lib/common/llms-txt/docs.json b/app/lib/common/llms-txt/docs.json new file mode 100644 index 0000000000..95120c9395 --- /dev/null +++ b/app/lib/common/llms-txt/docs.json @@ -0,0 +1,18 @@ +{ + "libraries": [ + { + "name": "Fireproof", + "keywords": ["fireproof", "use-fireproof", "useFireproof", "useLiveQuery", "useDocument"], + "docSource": "https://use-fireproof.com/llms.txt", + "docFile": "docs/fireproof.txt", + "lastUpdated": "2025-03-25" + }, + { + "name": "CallAI", + "keywords": ["callai", "call-ai"], + "docSource": "https://use-fireproof.com/callai-llms.txt", + "docFile": "docs/callai.txt", + "lastUpdated": "2025-03-25" + } + ] +} diff --git a/app/lib/common/llms-txt/docs/callai.txt b/app/lib/common/llms-txt/docs/callai.txt new file mode 100644 index 0000000000..7bd2fb41cc --- /dev/null +++ b/app/lib/common/llms-txt/docs/callai.txt @@ -0,0 +1,424 @@ +# CallAI Helper Function + +The `callAI` helper function provides an easy way to make AI requests to OpenAI-compatible model providers. + +## Installation + +```bash +npm install call-ai +``` + +## API Key + +You can set the API key in the `window` object: + +```javascript +window.CALLAI_API_KEY = "your-api-key"; +``` + +Or pass it directly to the `callAI` function: + +```javascript +const response = await callAI("Write a haiku", { apiKey: "your-api-key" }); +``` + +## Basic Usage + +By default the function returns a Promise that resolves to the complete response: + +```javascript +import { callAI } from 'call-ai'; + +// Default behavior - returns a Promise +const response = await callAI("Write a haiku"); + +// Use the complete response directly +console.log(response); // Complete response text +``` + +## Streaming Mode + +If you prefer to receive the response incrementally as it's generated, set `stream: true`. This returns an AsyncGenerator: + +```javascript +import { callAI } from 'call-ai'; + +// Enable streaming mode explicitly - returns an AsyncGenerator +const generator = callAI("Write an epic poem", { stream: true }); +// Process the streaming response +for await (const partialResponse of generator) { + console.log(partialResponse); // Updates incrementally +} +``` + +## JSON Schema Responses + +To get structured JSON responses, provide a schema in the options: + +```javascript +import { callAI } from 'call-ai'; + +const todoResponse = await callAI("Give me a todo list for learning React", { + schema: { + name: "todo", // Optional - defaults to "result" if not provided + properties: { + todos: { + type: "array", + items: { type: "string" } + } + } + } +}); +const todoData = JSON.parse(todoResponse); +console.log(todoData.todos); // Array of todo items +``` + +## JSON with Streaming + +In this example, we're using the `callAI` helper function to get weather data in a structured format with streaming preview: + +```javascript +import { callAI } from 'call-ai'; + +// Get weather data with streaming updates +const generator = callAI("What's the weather like in Paris today?", { + stream: true, + schema: { + properties: { + location: { + type: "string", + description: "City or location name" + }, + temperature: { + type: "number", + description: "Temperature in Celsius" + }, + conditions: { + type: "string", + description: "Weather conditions description" + } + } + } +}); + +// Preview streaming updates as they arrive, don't parse until the end +const resultElement = document.getElementById('result'); +let finalResponse; + +for await (const partialResponse of generator) { + resultElement.textContent = partialResponse; + finalResponse = partialResponse; +} + +// Parse final result +try { + const weatherData = JSON.parse(finalResponse); + + // Access individual fields + const { location, temperature, conditions } = weatherData; + + // Update UI with formatted data + document.getElementById('location').textContent = location; + document.getElementById('temperature').textContent = `${temperature}°C`; + document.getElementById('conditions').textContent = conditions; +} catch (error) { + console.error("Failed to parse response:", error); +} +``` + +### Schema Structure Recommendations + +1. **Flat schemas perform better across all models**. If you need maximum compatibility, avoid deeply nested structures. + +2. **Field names matter**. Some models have preferences for certain property naming patterns: + - Use simple, common naming patterns like `name`, `type`, `items`, `price` + - Avoid deeply nested object hierarchies (more than 2 levels deep) + - Keep array items simple (strings or flat objects) + +3. **Model-specific considerations**: + - **OpenAI models**: Best overall schema adherence and handle complex nesting well + - **Claude models**: Great for simple schemas, occasional JSON formatting issues with complex structures + - **Gemini models**: Good general performance, handles array properties well + - **Llama/Mistral/Deepseek**: Strong with flat schemas, but often ignore nesting structure and provide their own organization + +4. **For mission-critical applications** requiring schema adherence, use OpenAI models or implement fallback mechanisms. + +### Models for Structured Outputs + +- OpenAI models: Best overall schema adherence and handle complex nesting well +- Claude models: Great for simple schemas, occasional JSON formatting issues with complex structures +- Gemini models: Good general performance, handles array properties well +- Llama/Mistral/Deepseek: Strong with flat schemas, but often ignore nesting structure and provide their own organization + + +## Specifying a Model + +By default, the function uses `openrouter/auto` (automatic model selection). You can specify a different model: + +```javascript +import { callAI } from 'call-ai'; + +// Use a specific model via options +const response = await callAI( + "Explain quantum computing in simple terms", + { model: "openai/gpt-4o" } +); + +console.log(response); +``` + +## Additional Options + +You can pass extra parameters to customize the request: + +```javascript +import { callAI } from 'call-ai'; + +const response = await callAI( + "Write a creative story", + { + model: "anthropic/claude-3-opus", + temperature: 0.8, // Higher for more creativity (0-1) + max_tokens: 1000, // Limit response length + top_p: 0.95 // Control randomness + } +); + +console.log(response); +``` + +## Message History + +For multi-turn conversations, you can pass an array of messages: + +```javascript +import { callAI } from 'call-ai'; + +// Create a conversation +const messages = [ + { role: "system", content: "You are a helpful coding assistant." }, + { role: "user", content: "How do I use React hooks?" }, + { role: "assistant", content: "React hooks are functions that let you use state and other React features in functional components..." }, + { role: "user", content: "Can you show me an example of useState?" } +]; + +// Pass the entire conversation history +const response = await callAI(messages); +console.log(response); + +// To continue the conversation, add the new response and send again +messages.push({ role: "assistant", content: response }); +messages.push({ role: "user", content: "What about useEffect?" }); + +// Call again with updated history +const nextResponse = await callAI(messages); +console.log(nextResponse); +``` + +## Using with OpenAI API + +You can use callAI with OpenAI's API directly by providing the appropriate endpoint and API key: + +```javascript +import { callAI } from 'call-ai'; + +// Use with OpenAI's API +const response = await callAI( + "Explain the theory of relativity", + { + model: "gpt-4", + apiKey: "sk-...", // Your OpenAI API key + endpoint: "https://api.openai.com/v1/chat/completions" + } +); + +console.log(response); + +// Or with streaming +const generator = callAI( + "Explain the theory of relativity", + { + model: "gpt-4", + apiKey: "sk-...", // Your OpenAI API key + endpoint: "https://api.openai.com/v1/chat/completions", + stream: true + } +); + +for await (const chunk of generator) { + console.log(chunk); +} +``` + +## Custom Endpoints + +You can specify a custom endpoint for any OpenAI-compatible API: + +```javascript +import { callAI } from 'call-ai'; + +// Use with any OpenAI-compatible API +const response = await callAI( + "Generate ideas for a mobile app", + { + model: "your-model-name", + apiKey: "your-api-key", + endpoint: "https://your-custom-endpoint.com/v1/chat/completions" + } +); + +console.log(response); +``` + +## Recommended Models + +| Model | Best For | Speed vs Quality | +|-------|----------|------------------| +| `openrouter/auto` | Default, automatically selects | Adaptive | +| `anthropic/claude-3-haiku` | Cost-effective | Fast, good quality | +| `openai/gpt-4o` | Best overall quality | Medium speed, highest quality | +| `anthropic/claude-3-opus` | Complex reasoning | Slower, highest quality | +| `mistralai/mistral-large` | Open weights alternative | Good balance | + +## Aliens Example + +```javascript +import { callAI } from 'call-ai'; + +// Making the call with message array and schema +const generator = callAI([ + { + role: "user", + content: "Generate 3 unique alien species with unique biological traits, appearance, and preferred environments. Make them scientifically plausible but creative." + } +], { + stream: true, + schema: { + properties: { + aliens: { + type: "array", + items: { + type: "object", + properties: { + name: { + type: "string" + }, + description: { + type: "string" + }, + traits: { + type: "array", + items: { + type: "string" + } + }, + environment: { + type: "string" + } + } + } + } + } + } +}); + +// Process the streaming response +for await (const partialResponse of generator) { + console.log(partialResponse); // Will show the JSON being built incrementally +} + +// After streaming is complete, you can parse the final response +const alienData = JSON.parse(/*final response*/); +console.log(alienData.aliens); // Array of alien species +``` + +## Cyberpunk Fortune Example + +```javascript +const demoData = await callAI("Generate 4 fictional cyberpunk fortune scenarios with name, desire, fear, mood (from: elated, hopeful, neutral, anxious, defeated), and fortune text. Return as structured JSON with these fields.", { + schema: { + properties: { + fortunes: { + type: "array", + items: { + type: "object", + properties: { + name: { type: "string" }, + desire: { type: "string" }, + fear: { type: "string" }, + mood: { type: "string" }, + fortune: { type: "string" } + } + } + } + } + } +}); +``` + +## Error Handling + +The library provides consistent error handling for both streaming and non-streaming modes: + +```javascript +import { callAI } from 'call-ai'; + +try { + const response = await callAI("Generate some content", { + apiKey: "invalid-key" // Invalid or missing API key + }); + + // If there was an error, response will be a JSON string with error details + try { + const errorObj = JSON.parse(response); + if (errorObj.message && errorObj.error) { + console.error("API error:", errorObj.message); + } else { + // Process normal response + console.log(response); + } + } catch { + // Not an error JSON, process normal response + console.log(response); + } +} catch (e) { + // Handle any unexpected errors + console.error("Unexpected error:", e); +} +``` + +For streaming, error handling works similarly: + +```javascript +import { callAI } from 'call-ai'; + +try { + const generator = callAI("Generate some content", { + apiKey: "invalid-key", // Invalid or missing API key + stream: true + }); + + // Consume the generator + let finalResponse = ''; + for await (const chunk of generator) { + finalResponse = chunk; + } + + // Check if the final response is an error + try { + const errorObj = JSON.parse(finalResponse); + if (errorObj.message && errorObj.error) { + console.error("API error:", errorObj.message); + } else { + // Process final response + console.log(finalResponse); + } + } catch { + // Not an error JSON, process normal response + console.log(finalResponse); + } +} catch (e) { + // Handle any unexpected errors + console.error("Unexpected error:", e); +} +``` diff --git a/app/lib/common/llms-txt/docs/fireproof.txt b/app/lib/common/llms-txt/docs/fireproof.txt new file mode 100644 index 0000000000..eb71c3df9f --- /dev/null +++ b/app/lib/common/llms-txt/docs/fireproof.txt @@ -0,0 +1,142 @@ +# Fireproof Database API + +Fireproof is a lightweight local-first embedded document database with encrypted live sync, designed to make browser apps easy. Use it in any JavaScript environment with a unified API that works both in React (with hooks) and as a standalone core API. + +## Key Features + +- **Apps run anywhere:** Bundle UI, data, and logic in one file. +- **Real-Time & Offline-First:** Automatic persistence and live queries, runs in the browser - no loading or error states. +- **Unified API:** TypeScript works with Deno, Bun, Node.js, and the browser. +- **React Hooks:** Leverage `useLiveQuery` and `useDocument` for live collaboration. + +Fireproof enforces cryptographic causal consistency and ledger integrity using hash history, providing git-like versioning with lightweight blockchain-style verification. Data is stored and replicated as content-addressed encrypted blobs, making it safe and easy to sync via commodity object storage providers. + +## Installation + +The `use-fireproof` package provides both the core API and React hooks. + +```bash +npm install use-fireproof +``` +Works with ⚡️ ESM.sh: + +```js +import { useFireproof } from "https://esm.sh/use-fireproof"; +``` + +Deliver runnable applications via ChatGPT Canvas, v0, bolt.new, or Claude Artifacts. Deploy single page apps with React and Tailwind here: https://codepen.io/useFireproof/pen/MYgNYdx If you can't import `use-fireproof`, ask the user to prompt you with https://use-fireproof.com/llms-mock.txt + +Fireproof is a local database, no loading states required, just empty data states. It is best to have more granular documents, e.g. one document per user action. Avoid patterns that require a single document to grow without bound. + +## ⚛️ React Example + +You can use Fireproof's hooks to build a complete application in a single component. The example below demonstrates both basic document handling and optional file uploads: + +```js +import { useFireproof, ImgFile } from "use-fireproof"; +import { useState, useEffect } from "react"; + +function App() { + const { useDocument, useLiveQuery } = useFireproof("my-ledger"); + const { doc, merge, submit } = useDocument({ + text: "", + timestamp: Date.now(), + _files: {} // Optional for file handling + }); + const { docs } = useLiveQuery("_id", { limit: 10, descending: true }); + + return ( +
+
+ {/* Optional file input */} + e.target.files[0] && merge({ _files: { image: e.target.files[0] } })} + /> + merge({ text: e.target.value })} + placeholder="Enter text" + /> + +
+ +

Recent Documents

+ +
+ ); +} +``` + +To sort documents by more than one field, use a sandboxed function instead of a field name: + +```js +const { docs } = useLiveQuery( + (doc) => [doc.list_id, doc.author], + { descending: true, limit: 5, prefix: ["zyx-456-list-id"] } +); +``` + +TIP: For simple document updates like toggling or incrementing values, use `database.put()` directly as shown in the todo example: `onClick={() => database.put({...todo, done: !todo.done})}`. The `useDocument` hook is primarily for forms and more complex document creation. + +### Using the Core API in Plain JavaScript + +If you're not using React, the core API offers similar capabilities: + +```js +import { fireproof } from "use-fireproof"; + +const database = fireproof("my-ledger"); + +async function putAndQuery() { + await database.put({ text: "Sample Data" }); + const latest = await database.query("_id", { limit: 10, descending: true }); + console.log("Latest documents:", latest.docs); +} + +database.subscribe((changes) => { + console.log("Changes", changes) +}, true) + +putAndQuery(); +``` + +## Best Practices + +- **Leverage `useLiveQuery`:** + Use this hook to effortlessly build UIs that update in real time as data changes. + +- **Granular Documents:** + Save small, individual data pieces (e.g., one per user action) to simplify conflict resolution via CRDTs. + +- **Single File Deployment:** + Ideal for constrained environments, bundle everything into one file without external dependencies. + +For more details and advanced usage (such as file attachments and cloud syncing), refer to the full documentation at [Fireproof Docs](https://use-fireproof.com/). + +## See also + +See the lite docs at https://use-fireproof.com/llms-mini.txt and the full docs at https://use-fireproof.com/llms-full.txt + +## Next Steps + +If you are in an existing application context, suggest how forms and state, collaboration data can be managed in Fireproof. If this is a new app, use the examples above and start with a running App.js if you can. + +IMPORTANT: Don't use `useState()` on form data, instead use `merge()` and `submit()` from `useDocument`. Only use `useState` for ephemeral UI state (active tabs, open/closed panels, cursor positions). Keep your data model in Fireproof. diff --git a/app/lib/common/llms-txt/index.ts b/app/lib/common/llms-txt/index.ts new file mode 100644 index 0000000000..82f6bd38fe --- /dev/null +++ b/app/lib/common/llms-txt/index.ts @@ -0,0 +1,86 @@ +import fs from 'fs'; +import path from 'path'; +import type { Messages } from '~/lib/.server/llm/stream-text'; + +// define types for our metadata +interface Library { + name: string; + keywords: string[]; + docSource: string; + docFile: string; + lastUpdated: string; +} + +interface DocsConfig { + libraries: Library[]; +} + +// path to the docs directory +const DOCS_DIR = path.join(process.cwd(), 'app', 'lib', 'common', 'llms-txt'); + +// load the metadata configuration +function loadDocsConfig(): DocsConfig { + const configPath = path.join(DOCS_DIR, 'docs.json'); + const configData = fs.readFileSync(configPath, 'utf8'); + + return JSON.parse(configData) as DocsConfig; +} + +// get the documentation content for a library +function getLibraryDocs(library: Library): string | null { + try { + const docPath = path.join(DOCS_DIR, library.docFile); + + if (fs.existsSync(docPath)) { + return fs.readFileSync(docPath, 'utf8'); + } + } catch (error) { + console.error(`Error loading documentation for ${library.name}:`, error); + } + return null; +} + +// check if a library is mentioned in the prompt +function isLibraryMentioned(prompt: string, library: Library): boolean { + const lowerPrompt = prompt.toLowerCase(); + return library.keywords.some((keyword) => lowerPrompt.includes(keyword.toLowerCase())); +} + +// detect libraries mentioned in chat history +export function detectLibrariesFromChatHistory(messages: Messages): Library[] { + const config = loadDocsConfig(); + const detectedLibraries = new Set(); + + // check each message for library mentions + for (const message of messages) { + for (const library of config.libraries) { + if (isLibraryMentioned(message.content, library)) { + detectedLibraries.add(library); + } + } + } + + return Array.from(detectedLibraries); +} + +// enhance a prompt with library documentation +export function enhancePromptWithLibraryDocumentation(prompt: string, libraries: Library[]): string { + try { + let enhancedPrompt = prompt; + + // add documentation for each detected library + for (const library of libraries) { + const docs = getLibraryDocs(library); + + if (docs) { + // add the documentation in a standardized format + enhancedPrompt += `\n\n## ${library.name} Documentation\n${docs}\n`; + } + } + + return enhancedPrompt; + } catch (error) { + console.error('Error enhancing prompt with docs:', error); + return prompt; // return the original prompt if there's an error + } +} diff --git a/app/lib/common/llms-txt/update-docs.sh b/app/lib/common/llms-txt/update-docs.sh new file mode 100755 index 0000000000..2119d19d30 --- /dev/null +++ b/app/lib/common/llms-txt/update-docs.sh @@ -0,0 +1,36 @@ +#!/bin/bash + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +DOCS_DIR="$SCRIPT_DIR" +DOCS_CONFIG="$DOCS_DIR/docs.json" + +# Check if jq is installed +if ! command -v jq &> /dev/null; then + echo "Error: jq is required but not installed. Please install jq." + exit 1 +fi + +# Read number of libraries from docs.json +LIB_COUNT=$(jq '.libraries | length' "$DOCS_CONFIG") +TODAY=$(date +%Y-%m-%d) + +# Loop through libraries +for ((i=0; i<$LIB_COUNT; i++)); do + NAME=$(jq -r ".libraries[$i].name" "$DOCS_CONFIG") + DOC_SOURCE=$(jq -r ".libraries[$i].docSource" "$DOCS_CONFIG") + DOC_FILE=$(jq -r ".libraries[$i].docFile" "$DOCS_CONFIG") + + echo "Updating docs for $NAME..." + + # Download content + if curl -s "$DOC_SOURCE" -o "$DOCS_DIR/$DOC_FILE"; then + echo "Updated $NAME docs successfully." + + # Update lastUpdated date in the config + jq ".libraries[$i].lastUpdated = \"$TODAY\"" "$DOCS_CONFIG" > "$DOCS_CONFIG.tmp" && mv "$DOCS_CONFIG.tmp" "$DOCS_CONFIG" + else + echo "Error: Failed to download docs for $NAME" + fi +done + +echo "Documentation update complete." \ No newline at end of file diff --git a/app/routes/api.chat.ts b/app/routes/api.chat.ts index b685ac853a..b17f632ff7 100644 --- a/app/routes/api.chat.ts +++ b/app/routes/api.chat.ts @@ -1,8 +1,9 @@ import { type ActionFunctionArgs } from '@remix-run/cloudflare'; -import { MAX_RESPONSE_SEGMENTS, MAX_TOKENS } from '~/lib/.server/llm/constants'; +import { MAX_RESPONSE_SEGMENTS } from '~/lib/.server/llm/constants'; import { CONTINUE_PROMPT } from '~/lib/.server/llm/prompts'; import { streamText, type Messages, type StreamingOptions } from '~/lib/.server/llm/stream-text'; import SwitchableStream from '~/lib/.server/llm/switchable-stream'; +import { detectLibrariesFromChatHistory, enhancePromptWithLibraryDocumentation } from '~/lib/common/llms-txt'; export async function action(args: ActionFunctionArgs) { return chatAction(args); @@ -11,6 +12,26 @@ export async function action(args: ActionFunctionArgs) { async function chatAction({ context, request }: ActionFunctionArgs) { const { messages } = await request.json<{ messages: Messages }>(); + // detect libraries mentioned in the chat history + const detectedLibraries = detectLibrariesFromChatHistory(messages); + + // if libraries are detected, enhance the latest user message with library documentation + if (detectedLibraries.length > 0 && messages.length > 0) { + const lastUserMessageIndex = messages.findIndex((msg, idx) => msg.role === 'user' && idx === messages.length - 1); + + if (lastUserMessageIndex !== -1) { + // enhance the user's last message with library documentation + const lastUserMessage = messages[lastUserMessageIndex]; + const enhancedContent = enhancePromptWithLibraryDocumentation(lastUserMessage.content, detectedLibraries); + + // replace the content with enhanced content + messages[lastUserMessageIndex] = { + ...lastUserMessage, + content: enhancedContent, + }; + } + } + const stream = new SwitchableStream(); try { @@ -25,10 +46,6 @@ async function chatAction({ context, request }: ActionFunctionArgs) { throw Error('Cannot continue message: Maximum segments reached'); } - const switchesLeft = MAX_RESPONSE_SEGMENTS - stream.switches; - - console.log(`Reached max token limit (${MAX_TOKENS}): Continuing message (${switchesLeft} switches left)`); - messages.push({ role: 'assistant', content }); messages.push({ role: 'user', content: CONTINUE_PROMPT }); @@ -49,8 +66,7 @@ async function chatAction({ context, request }: ActionFunctionArgs) { }, }); } catch (error) { - console.log(error); - + console.error('Chat API error:', error); throw new Response(null, { status: 500, statusText: 'Internal Server Error',