@@ -1514,4 +1514,268 @@ describe("GPT-5 streaming event coverage (additional)", () => {
15141514 // @ts -ignore
15151515 delete global . fetch
15161516 } )
1517+
1518+ describe ( "Codex Mini Model" , ( ) => {
1519+ let handler : OpenAiNativeHandler
1520+ const mockOptions : ApiHandlerOptions = {
1521+ openAiNativeApiKey : "test-api-key" ,
1522+ apiModelId : "codex-mini-latest" ,
1523+ }
1524+
1525+ it ( "should handle codex-mini-latest streaming response" , async ( ) => {
1526+ // Mock fetch for Codex Mini responses API
1527+ const mockFetch = vitest . fn ( ) . mockResolvedValue ( {
1528+ ok : true ,
1529+ body : new ReadableStream ( {
1530+ start ( controller ) {
1531+ // Codex Mini uses the same responses API format
1532+ controller . enqueue (
1533+ new TextEncoder ( ) . encode ( 'data: {"type":"response.output_text.delta","delta":"Hello"}\n\n' ) ,
1534+ )
1535+ controller . enqueue (
1536+ new TextEncoder ( ) . encode ( 'data: {"type":"response.output_text.delta","delta":" from"}\n\n' ) ,
1537+ )
1538+ controller . enqueue (
1539+ new TextEncoder ( ) . encode (
1540+ 'data: {"type":"response.output_text.delta","delta":" Codex"}\n\n' ,
1541+ ) ,
1542+ )
1543+ controller . enqueue (
1544+ new TextEncoder ( ) . encode (
1545+ 'data: {"type":"response.output_text.delta","delta":" Mini!"}\n\n' ,
1546+ ) ,
1547+ )
1548+ controller . enqueue ( new TextEncoder ( ) . encode ( 'data: {"type":"response.completed"}\n\n' ) )
1549+ controller . enqueue ( new TextEncoder ( ) . encode ( "data: [DONE]\n\n" ) )
1550+ controller . close ( )
1551+ } ,
1552+ } ) ,
1553+ } )
1554+ global . fetch = mockFetch as any
1555+
1556+ handler = new OpenAiNativeHandler ( {
1557+ ...mockOptions ,
1558+ apiModelId : "codex-mini-latest" ,
1559+ } )
1560+
1561+ const systemPrompt = "You are a helpful coding assistant."
1562+ const messages : Anthropic . Messages . MessageParam [ ] = [
1563+ { role : "user" , content : "Write a hello world function" } ,
1564+ ]
1565+
1566+ const stream = handler . createMessage ( systemPrompt , messages )
1567+ const chunks : any [ ] = [ ]
1568+ for await ( const chunk of stream ) {
1569+ chunks . push ( chunk )
1570+ }
1571+
1572+ // Verify text chunks
1573+ const textChunks = chunks . filter ( ( c ) => c . type === "text" )
1574+ expect ( textChunks ) . toHaveLength ( 4 )
1575+ expect ( textChunks . map ( ( c ) => c . text ) . join ( "" ) ) . toBe ( "Hello from Codex Mini!" )
1576+
1577+ // Verify usage estimation (based on character count)
1578+ const usageChunks = chunks . filter ( ( c ) => c . type === "usage" )
1579+ expect ( usageChunks ) . toHaveLength ( 1 )
1580+ expect ( usageChunks [ 0 ] ) . toMatchObject ( {
1581+ type : "usage" ,
1582+ inputTokens : expect . any ( Number ) ,
1583+ outputTokens : expect . any ( Number ) ,
1584+ totalCost : expect . any ( Number ) , // Codex Mini has pricing: $1.5/M input, $6/M output
1585+ } )
1586+
1587+ // Verify cost is calculated correctly
1588+ expect ( usageChunks [ 0 ] . totalCost ) . toBeGreaterThan ( 0 )
1589+
1590+ // Verify the request was made with correct parameters
1591+ expect ( mockFetch ) . toHaveBeenCalledWith (
1592+ "https://api.openai.com/v1/responses" ,
1593+ expect . objectContaining ( {
1594+ method : "POST" ,
1595+ headers : expect . objectContaining ( {
1596+ "Content-Type" : "application/json" ,
1597+ Authorization : "Bearer test-api-key" ,
1598+ Accept : "text/event-stream" ,
1599+ } ) ,
1600+ body : expect . any ( String ) ,
1601+ } ) ,
1602+ )
1603+
1604+ const requestBody = JSON . parse ( mockFetch . mock . calls [ 0 ] [ 1 ] . body )
1605+ expect ( requestBody ) . toMatchObject ( {
1606+ model : "codex-mini-latest" ,
1607+ instructions : systemPrompt ,
1608+ input : "Write a hello world function" ,
1609+ stream : true ,
1610+ } )
1611+
1612+ // Clean up
1613+ delete ( global as any ) . fetch
1614+ } )
1615+
1616+ it ( "should handle codex-mini-latest non-streaming completion" , async ( ) => {
1617+ // Mock fetch for non-streaming response
1618+ const mockFetch = vitest . fn ( ) . mockResolvedValue ( {
1619+ ok : true ,
1620+ json : async ( ) => ( {
1621+ output_text : "def hello_world():\n print('Hello, World!')" ,
1622+ } ) ,
1623+ } )
1624+ global . fetch = mockFetch as any
1625+
1626+ handler = new OpenAiNativeHandler ( {
1627+ ...mockOptions ,
1628+ apiModelId : "codex-mini-latest" ,
1629+ } )
1630+
1631+ const result = await handler . completePrompt ( "Write a hello world function in Python" )
1632+
1633+ expect ( result ) . toBe ( "def hello_world():\n print('Hello, World!')" )
1634+
1635+ // Verify the request
1636+ expect ( mockFetch ) . toHaveBeenCalledWith (
1637+ "https://api.openai.com/v1/responses" ,
1638+ expect . objectContaining ( {
1639+ method : "POST" ,
1640+ headers : expect . objectContaining ( {
1641+ "Content-Type" : "application/json" ,
1642+ Authorization : "Bearer test-api-key" ,
1643+ } ) ,
1644+ body : expect . any ( String ) ,
1645+ } ) ,
1646+ )
1647+
1648+ const requestBody = JSON . parse ( mockFetch . mock . calls [ 0 ] [ 1 ] . body )
1649+ expect ( requestBody ) . toMatchObject ( {
1650+ model : "codex-mini-latest" ,
1651+ instructions : "Complete the following prompt:" ,
1652+ input : "Write a hello world function in Python" ,
1653+ stream : false ,
1654+ } )
1655+
1656+ // Clean up
1657+ delete ( global as any ) . fetch
1658+ } )
1659+
1660+ it ( "should handle codex-mini-latest API errors" , async ( ) => {
1661+ // Mock fetch with error response
1662+ const mockFetch = vitest . fn ( ) . mockResolvedValue ( {
1663+ ok : false ,
1664+ status : 429 ,
1665+ statusText : "Too Many Requests" ,
1666+ text : async ( ) => "Rate limit exceeded" ,
1667+ } )
1668+ global . fetch = mockFetch as any
1669+
1670+ handler = new OpenAiNativeHandler ( {
1671+ ...mockOptions ,
1672+ apiModelId : "codex-mini-latest" ,
1673+ } )
1674+
1675+ const systemPrompt = "You are a helpful assistant."
1676+ const messages : Anthropic . Messages . MessageParam [ ] = [ { role : "user" , content : "Hello" } ]
1677+
1678+ const stream = handler . createMessage ( systemPrompt , messages )
1679+
1680+ // Should throw an error
1681+ await expect ( async ( ) => {
1682+ for await ( const chunk of stream ) {
1683+ // consume stream
1684+ }
1685+ } ) . rejects . toThrow ( "Codex Mini API request failed (429): Rate limit exceeded" )
1686+
1687+ // Clean up
1688+ delete ( global as any ) . fetch
1689+ } )
1690+
1691+ it ( "should handle codex-mini-latest with multiple user messages" , async ( ) => {
1692+ // Mock fetch for streaming response
1693+ const mockFetch = vitest . fn ( ) . mockResolvedValue ( {
1694+ ok : true ,
1695+ body : new ReadableStream ( {
1696+ start ( controller ) {
1697+ controller . enqueue (
1698+ new TextEncoder ( ) . encode (
1699+ 'data: {"type":"response.output_text.delta","delta":"Combined response"}\n\n' ,
1700+ ) ,
1701+ )
1702+ controller . enqueue ( new TextEncoder ( ) . encode ( 'data: {"type":"response.completed"}\n\n' ) )
1703+ controller . enqueue ( new TextEncoder ( ) . encode ( "data: [DONE]\n\n" ) )
1704+ controller . close ( )
1705+ } ,
1706+ } ) ,
1707+ } )
1708+ global . fetch = mockFetch as any
1709+
1710+ handler = new OpenAiNativeHandler ( {
1711+ ...mockOptions ,
1712+ apiModelId : "codex-mini-latest" ,
1713+ } )
1714+
1715+ const systemPrompt = "You are a helpful assistant."
1716+ const messages : Anthropic . Messages . MessageParam [ ] = [
1717+ { role : "user" , content : "First question" } ,
1718+ { role : "assistant" , content : "First answer" } ,
1719+ { role : "user" , content : "Second question" } ,
1720+ ]
1721+
1722+ const stream = handler . createMessage ( systemPrompt , messages )
1723+ const chunks : any [ ] = [ ]
1724+ for await ( const chunk of stream ) {
1725+ chunks . push ( chunk )
1726+ }
1727+
1728+ // Verify the request body only includes user messages
1729+ const requestBody = JSON . parse ( mockFetch . mock . calls [ 0 ] [ 1 ] . body )
1730+ expect ( requestBody . input ) . toBe ( "First question\n\nSecond question" )
1731+ expect ( requestBody . input ) . not . toContain ( "First answer" )
1732+
1733+ // Clean up
1734+ delete ( global as any ) . fetch
1735+ } )
1736+
1737+ it ( "should handle codex-mini-latest stream error events" , async ( ) => {
1738+ // Mock fetch with error event in stream
1739+ const mockFetch = vitest . fn ( ) . mockResolvedValue ( {
1740+ ok : true ,
1741+ body : new ReadableStream ( {
1742+ start ( controller ) {
1743+ controller . enqueue (
1744+ new TextEncoder ( ) . encode (
1745+ 'data: {"type":"response.output_text.delta","delta":"Partial"}\n\n' ,
1746+ ) ,
1747+ )
1748+ controller . enqueue (
1749+ new TextEncoder ( ) . encode (
1750+ 'data: {"type":"response.error","error":{"message":"Model overloaded"}}\n\n' ,
1751+ ) ,
1752+ )
1753+ controller . close ( )
1754+ } ,
1755+ } ) ,
1756+ } )
1757+ global . fetch = mockFetch as any
1758+
1759+ handler = new OpenAiNativeHandler ( {
1760+ ...mockOptions ,
1761+ apiModelId : "codex-mini-latest" ,
1762+ } )
1763+
1764+ const systemPrompt = "You are a helpful assistant."
1765+ const messages : Anthropic . Messages . MessageParam [ ] = [ { role : "user" , content : "Hello" } ]
1766+
1767+ const stream = handler . createMessage ( systemPrompt , messages )
1768+
1769+ // Should throw an error when encountering error event
1770+ await expect ( async ( ) => {
1771+ const chunks = [ ]
1772+ for await ( const chunk of stream ) {
1773+ chunks . push ( chunk )
1774+ }
1775+ } ) . rejects . toThrow ( "Codex Mini stream error: Model overloaded" )
1776+
1777+ // Clean up
1778+ delete ( global as any ) . fetch
1779+ } )
1780+ } )
15171781} )
0 commit comments