|
| 1 | +import type { LanguageModelV1StreamPart } from 'ai' |
| 2 | +import { streamText, extractReasoningMiddleware, wrapLanguageModel } from 'ai' |
| 3 | +import { createWorkersAI } from 'workers-ai-provider' |
| 4 | +import { Hono } from 'hono' |
| 5 | + |
| 6 | +interface Env { |
| 7 | + ASSETS: Fetcher |
| 8 | + AI: Ai |
| 9 | +} |
| 10 | + |
| 11 | +type message = { |
| 12 | + role: 'system' | 'user' | 'assistant' | 'data' |
| 13 | + content: string |
| 14 | +} |
| 15 | + |
| 16 | +const app = new Hono<{ Bindings: Env }>() |
| 17 | + |
| 18 | +// Handle the /api/chat endpoint |
| 19 | +app.post('/api/chat', async (c) => { |
| 20 | + try { |
| 21 | + const { messages, reasoning }: { messages: message[]; reasoning: boolean } = await c.req.json() |
| 22 | + |
| 23 | + const workersai = createWorkersAI({ binding: c.env.AI }) |
| 24 | + |
| 25 | + // Choose model based on reasoning preference |
| 26 | + const model = reasoning |
| 27 | + ? wrapLanguageModel({ |
| 28 | + model: workersai('@cf/deepseek-ai/deepseek-r1-distill-qwen-32b'), |
| 29 | + middleware: [ |
| 30 | + extractReasoningMiddleware({ tagName: 'think' }), |
| 31 | + //custom middleware to inject <think> tag at the beginning of a reasoning if it is missing |
| 32 | + { |
| 33 | + wrapGenerate: async ({ doGenerate }) => { |
| 34 | + const result = await doGenerate() |
| 35 | + |
| 36 | + if (!result.text?.includes('<think>')) { |
| 37 | + result.text = `<think>${result.text}` |
| 38 | + } |
| 39 | + |
| 40 | + return result |
| 41 | + }, |
| 42 | + wrapStream: async ({ doStream }) => { |
| 43 | + const { stream, ...rest } = await doStream() |
| 44 | + |
| 45 | + let generatedText = '' |
| 46 | + const transformStream = new TransformStream<LanguageModelV1StreamPart, LanguageModelV1StreamPart>({ |
| 47 | + transform(chunk, controller) { |
| 48 | + //we are manually adding the <think> tag because some times, distills of reasoning models omit it |
| 49 | + if (chunk.type === 'text-delta') { |
| 50 | + if (!generatedText.includes('<think>')) { |
| 51 | + generatedText += '<think>' |
| 52 | + controller.enqueue({ |
| 53 | + type: 'text-delta', |
| 54 | + textDelta: '<think>', |
| 55 | + }) |
| 56 | + } |
| 57 | + generatedText += chunk.textDelta |
| 58 | + } |
| 59 | + |
| 60 | + controller.enqueue(chunk) |
| 61 | + }, |
| 62 | + }) |
| 63 | + |
| 64 | + return { |
| 65 | + stream: stream.pipeThrough(transformStream), |
| 66 | + ...rest, |
| 67 | + } |
| 68 | + }, |
| 69 | + }, |
| 70 | + ], |
| 71 | + }) |
| 72 | + : workersai('@cf/meta/llama-3.3-70b-instruct-fp8-fast') |
| 73 | + |
| 74 | + const systemPrompt: message = { |
| 75 | + role: 'system', |
| 76 | + content: ` |
| 77 | + - Do not wrap your responses in html tags. |
| 78 | + - Do not apply any formatting to your responses. |
| 79 | + - You are an expert conversational chatbot. Your objective is to be as helpful as possible. |
| 80 | + - You must keep your responses relevant to the user's prompt. |
| 81 | + - You must respond with a maximum of 512 tokens (300 words). |
| 82 | + - You must respond clearly and concisely, and explain your logic if required. |
| 83 | + - You must not provide any personal information. |
| 84 | + - Do not respond with your own personal opinions, and avoid topics unrelated to the user's prompt. |
| 85 | + ${ |
| 86 | + messages.length <= 1 && |
| 87 | + `- Important REMINDER: You MUST provide a 5 word title at the END of your response using <chat-title> </chat-title> tags. |
| 88 | + If you do not do this, this session will error. |
| 89 | + For example, <chat-title>Hello and Welcome</chat-title> Hi, how can I help you today? |
| 90 | + ` |
| 91 | + } |
| 92 | + `, |
| 93 | + } |
| 94 | + |
| 95 | + const text = await streamText({ |
| 96 | + model, |
| 97 | + messages: [systemPrompt, ...messages], |
| 98 | + maxTokens: 2048, |
| 99 | + maxRetries: 3, |
| 100 | + }) |
| 101 | + |
| 102 | + return text.toDataStreamResponse({ |
| 103 | + sendReasoning: true, |
| 104 | + }) |
| 105 | + } catch (error) { |
| 106 | + return c.json({ error: `Chat completion failed. ${(error as Error)?.message}` }, 500) |
| 107 | + } |
| 108 | +}) |
| 109 | + |
| 110 | +// Handle static assets and fallback routes |
| 111 | +app.all('*', async (c) => { |
| 112 | + if (c.env.ASSETS) { |
| 113 | + return c.env.ASSETS.fetch(c.req.raw) |
| 114 | + } |
| 115 | + return c.text('Not found', 404) |
| 116 | +}) |
| 117 | + |
| 118 | +export default app |
0 commit comments