1+ /*
2+ * Muna
3+ * Copyright © 2025 NatML Inc. All Rights Reserved.
4+ */
5+
6+ export interface ChatCompletion {
7+ /**
8+ * The object type, which is always `chat.completion`.
9+ */
10+ object : "chat.completion" ;
11+ /**
12+ * A unique identifier for the chat completion.
13+ */
14+ id : string ;
15+ /**
16+ * The model used for the chat completion.
17+ */
18+ model : string ;
19+ /**
20+ * A list of chat completion choices. Can be more than one if `n` is greater
21+ * than 1.
22+ */
23+ choices : Array < ChatCompletion . Choice > ;
24+ /**
25+ * The Unix timestamp (in seconds) of when the chat completion was created.
26+ */
27+ created : number ;
28+ /**
29+ * Usage statistics for the completion request.
30+ */
31+ usage ?: CompletionUsage ;
32+ }
33+
34+ export interface ChatCompletionChunk {
35+ /**
36+ * The object type, which is always `chat.completion.chunk`.
37+ */
38+ object : "chat.completion.chunk" ;
39+ /**
40+ * A unique identifier for the chat completion. Each chunk has the same ID.
41+ */
42+ id : string ;
43+ /**
44+ * The model to generate the completion.
45+ */
46+ model : string ;
47+ /**
48+ * A list of chat completion choices. Can contain more than one elements if `n` is
49+ * greater than 1. Can also be empty for the last chunk if you set
50+ * `stream_options: {"include_usage": true}`.
51+ */
52+ choices : Array < ChatCompletionChunk . Choice > ;
53+ /**
54+ * The Unix timestamp (in seconds) of when the chat completion was created. Each
55+ * chunk has the same timestamp.
56+ */
57+ created : number ;
58+ /**
59+ * Usage statistics for the completion request.
60+ */
61+ usage ?: CompletionUsage ;
62+ }
63+
64+ export namespace ChatCompletion {
65+
66+ export interface Choice {
67+ /**
68+ * The index of the choice in the list of choices.
69+ */
70+ index : number ;
71+ /**
72+ * A chat completion message generated by the model.
73+ */
74+ message : ChatCompletionMessage ;
75+ /**
76+ * The reason the model stopped generating tokens. This will be `stop` if the model
77+ * hit a natural stop point or a provided stop sequence, `length` if the maximum
78+ * number of tokens specified in the request was reached, `content_filter` if
79+ * content was omitted due to a flag from our content filters, `tool_calls` if the
80+ * model called a tool, or `function_call` (deprecated) if the model called a
81+ * function.
82+ */
83+ finish_reason : "stop" | "length" | "tool_calls" | "content_filter" | "function_call" ;
84+ /**
85+ * Log probability information for the choice.
86+ */
87+ logprobs ?: null ;
88+ }
89+ }
90+
91+ export namespace ChatCompletionChunk {
92+
93+ export interface Choice {
94+ /**
95+ * The index of the choice in the list of choices.
96+ */
97+ index : number ;
98+ /**
99+ * A chat completion delta generated by streamed model responses.
100+ */
101+ delta : Choice . Delta ;
102+ /**
103+ * The reason the model stopped generating tokens. This will be `stop` if the model
104+ * hit a natural stop point or a provided stop sequence, `length` if the maximum
105+ * number of tokens specified in the request was reached, `content_filter` if
106+ * content was omitted due to a flag from our content filters, `tool_calls` if the
107+ * model called a tool, or `function_call` (deprecated) if the model called a
108+ * function.
109+ */
110+ finish_reason : "stop" | "length" | "tool_calls" | "content_filter" | "function_call" | null ;
111+ /**
112+ * Log probability information for the choice.
113+ */
114+ logprobs ?: null ;
115+ }
116+
117+ export namespace Choice {
118+
119+ export interface Delta {
120+ /**
121+ * The role of the author of this message.
122+ */
123+ role ?: "developer" | "system" | "user" | "assistant" | "tool" ;
124+ /**
125+ * The contents of the chunk message.
126+ */
127+ content ?: string | null ;
128+ }
129+ }
130+ }
131+
132+ export interface ChatCompletionMessage {
133+ /**
134+ * The role of the author of this message.
135+ */
136+ role : "assistant" | "user" | "system" ;
137+ /**
138+ * The contents of the message.
139+ */
140+ content : string | null ;
141+ }
142+
143+ export interface CompletionUsage {
144+ /**
145+ * Number of tokens in the generated completion.
146+ */
147+ completion_tokens : number ;
148+ /**
149+ * Number of tokens in the prompt.
150+ */
151+ prompt_tokens : number ;
152+ /**
153+ * Total number of tokens used in the request (prompt + completion).
154+ */
155+ total_tokens : number ;
156+ }
0 commit comments