|
| 1 | +// ───────────────────────────────────────────────────────────────────────────── |
| 2 | +// AuthorityLayer + OpenAI — Integration Example |
| 3 | +// |
| 4 | +// Shows how to wrap OpenAI chat completions with AuthorityLayer enforcement: |
| 5 | +// • Budget cap: halt when cumulative token spend exceeds a USD limit |
| 6 | +// • Loop guard: halt after too many tool calls in a single run |
| 7 | +// • Tool throttle: halt if tool calls exceed a per-minute rate |
| 8 | +// |
| 9 | +// This file uses a MOCK OpenAI client so it runs without a real API key. |
| 10 | +// To use with a real key, swap the mock for the real OpenAI client |
| 11 | +// (see the comment in createOpenAIClient() below). |
| 12 | +// |
| 13 | +// Install: |
| 14 | +// npm install authority-layer openai |
| 15 | +// |
| 16 | +// Run (from repo root, after building): |
| 17 | +// npx ts-node examples/openai-agent.ts |
| 18 | +// ───────────────────────────────────────────────────────────────────────────── |
| 19 | + |
| 20 | +import { AuthorityLayer, EnforcementHalt } from "authority-layer"; |
| 21 | + |
| 22 | +// ── OpenAI pricing constants (GPT-4o, as of 2025) ──────────────────────────── |
| 23 | +// https://openai.com/pricing |
| 24 | +const GPT4O_INPUT_PRICE_PER_TOKEN = 0.000005; // $5.00 / 1M tokens |
| 25 | +const GPT4O_OUTPUT_PRICE_PER_TOKEN = 0.000015; // $15.00 / 1M tokens |
| 26 | + |
| 27 | +function estimateCostUSD(usage: { prompt_tokens: number; completion_tokens: number }): number { |
| 28 | + return ( |
| 29 | + usage.prompt_tokens * GPT4O_INPUT_PRICE_PER_TOKEN + |
| 30 | + usage.completion_tokens * GPT4O_OUTPUT_PRICE_PER_TOKEN |
| 31 | + ); |
| 32 | +} |
| 33 | + |
| 34 | +// ── Mock OpenAI client (swap this for `new OpenAI()` with a real key) ──────── |
| 35 | +// |
| 36 | +// Real usage: |
| 37 | +// import OpenAI from "openai"; |
| 38 | +// const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY }); |
| 39 | +// |
| 40 | +function createOpenAIClient() { |
| 41 | + return { |
| 42 | + chat: { |
| 43 | + completions: { |
| 44 | + async create(params: { model: string; messages: { role: string; content: string }[] }) { |
| 45 | + // Simulate network latency |
| 46 | + await new Promise(r => setTimeout(r, 30)); |
| 47 | + |
| 48 | + const prompt = params.messages.at(-1)?.content ?? ""; |
| 49 | + return { |
| 50 | + choices: [ |
| 51 | + { message: { role: "assistant", content: `[mock] Response to: "${prompt}"` } }, |
| 52 | + ], |
| 53 | + usage: { |
| 54 | + prompt_tokens: 120, // realistic for a short system + user message |
| 55 | + completion_tokens: 80, // realistic short response |
| 56 | + total_tokens: 200, |
| 57 | + }, |
| 58 | + model: params.model, |
| 59 | + }; |
| 60 | + }, |
| 61 | + }, |
| 62 | + }, |
| 63 | + }; |
| 64 | +} |
| 65 | + |
| 66 | +const openai = createOpenAIClient(); |
| 67 | + |
| 68 | +// ── AuthorityLayer configuration ────────────────────────────────────────────── |
| 69 | + |
| 70 | +const authority = new AuthorityLayer({ |
| 71 | + // Halt the agent if cumulative token spend exceeds $0.05 (demo threshold). |
| 72 | + // Set this to your actual daily/per-run budget in production. |
| 73 | + budget: { dailyUSD: 0.05 }, |
| 74 | + |
| 75 | + // Halt if a single agent run makes more than 10 tool calls. |
| 76 | + // Prevents infinite reasoning loops that call the model repeatedly. |
| 77 | + loopGuard: { maxToolCallsPerRun: 10 }, |
| 78 | + |
| 79 | + // Halt if more than 60 tool calls happen within any 60-second window. |
| 80 | + // Prevents retry storms from hammering the OpenAI API. |
| 81 | + toolThrottle: { maxCallsPerMinute: 60 }, |
| 82 | +}); |
| 83 | + |
| 84 | +// ── Simulated agent task ────────────────────────────────────────────────────── |
| 85 | + |
| 86 | +const SYSTEM_PROMPT = "You are a helpful assistant. Answer concisely."; |
| 87 | + |
| 88 | +const USER_TASKS = [ |
| 89 | + "What is the capital of France?", |
| 90 | + "Summarize the water cycle in one sentence.", |
| 91 | + "What year was the Eiffel Tower built?", |
| 92 | + "Name three programming languages released before 1990.", |
| 93 | + "What is the boiling point of water in Fahrenheit?", |
| 94 | + "Translate 'hello' into Spanish.", |
| 95 | + // The 7th call will push cumulative spend past the $0.05 budget cap |
| 96 | + // and trigger an EnforcementHalt with reason: "budget_exceeded" |
| 97 | + "This call will exceed the budget cap and be halted by AuthorityLayer.", |
| 98 | +]; |
| 99 | + |
| 100 | +// ── Main agent loop ─────────────────────────────────────────────────────────── |
| 101 | + |
| 102 | +async function main() { |
| 103 | + console.log("\nAuthorityLayer + OpenAI Example"); |
| 104 | + console.log("─".repeat(50)); |
| 105 | + console.log(`Config: $0.05 budget cap · 10 calls/run · 60 calls/min\n`); |
| 106 | + |
| 107 | + let totalSpend = 0; |
| 108 | + let callCount = 0; |
| 109 | + |
| 110 | + try { |
| 111 | + await authority.wrap(async () => { |
| 112 | + for (const task of USER_TASKS) { |
| 113 | + // ── Key integration point ────────────────────────────────── |
| 114 | + // Route every OpenAI call through authority.tool(). |
| 115 | + // This applies loop guard + throttle BEFORE the call executes. |
| 116 | + const response = await authority.tool("openai.chat.completions", () => |
| 117 | + openai.chat.completions.create({ |
| 118 | + model: "gpt-4o", |
| 119 | + messages: [ |
| 120 | + { role: "system", content: SYSTEM_PROMPT }, |
| 121 | + { role: "user", content: task }, |
| 122 | + ], |
| 123 | + }) |
| 124 | + ); |
| 125 | + |
| 126 | + callCount++; |
| 127 | + |
| 128 | + // ── Spend reporting ──────────────────────────────────────── |
| 129 | + // AuthorityLayer doesn't know your pricing model. |
| 130 | + // You calculate the USD cost from token counts and report it. |
| 131 | + const cost = estimateCostUSD(response.usage); |
| 132 | + totalSpend += cost; |
| 133 | + authority.recordSpend(cost); // ← triggers budget check |
| 134 | + |
| 135 | + const reply = response.choices[0]?.message?.content ?? "(no response)"; |
| 136 | + console.log(` [call ${callCount}] ${reply.slice(0, 60)}`); |
| 137 | + console.log(` tokens: ${response.usage.total_tokens} · cost: $${cost.toFixed(4)} · cumulative: $${totalSpend.toFixed(4)}\n`); |
| 138 | + } |
| 139 | + }); |
| 140 | + |
| 141 | + } catch (err) { |
| 142 | + if (err instanceof EnforcementHalt) { |
| 143 | + // ── Enforcement halt ─────────────────────────────────────────── |
| 144 | + // Always access halt details via err.enforcement — never parse err.message. |
| 145 | + const { reason, limit, spent, event_id } = err.enforcement; |
| 146 | + |
| 147 | + console.error(`\n⛔ Execution halted by AuthorityLayer\n`); |
| 148 | + console.error(JSON.stringify({ status: "halted", reason, limit, spent, event_id }, null, 2)); |
| 149 | + } else { |
| 150 | + // Re-throw unexpected errors — don't swallow real bugs |
| 151 | + throw err; |
| 152 | + } |
| 153 | + } |
| 154 | + |
| 155 | + // ── Chain audit ─────────────────────────────────────────────────────────── |
| 156 | + // Inspect the full tamper-evident event log after the run. |
| 157 | + const chain = authority.getChain(); |
| 158 | + const intact = authority.verifyChain(); |
| 159 | + |
| 160 | + console.log("\n── Enforcement event chain " + "─".repeat(24)); |
| 161 | + for (const event of chain) { |
| 162 | + console.log(` ${event.type.padEnd(22)} ${event.event_id}`); |
| 163 | + } |
| 164 | + console.log(`\nChain integrity : ${intact ? "✅ verified" : "❌ TAMPERED"}`); |
| 165 | + console.log(`Total events : ${chain.length}\n`); |
| 166 | +} |
| 167 | + |
| 168 | +main().catch(err => { |
| 169 | + console.error("Unexpected error:", err); |
| 170 | + process.exit(1); |
| 171 | +}); |
0 commit comments