@@ -33,7 +33,7 @@ openai = OpenAI::Client.new(
3333)
3434
3535chat_completion = openai.chat.completions.create(
36- messages: [{role: : user , content: " Say this is a test" }],
36+ messages: [{role: " user" , content: " Say this is a test" }],
3737 model: :"gpt-4.1"
3838)
3939
@@ -48,7 +48,7 @@ When using sorbet, it is recommended to use model classes as below. This provide
4848
4949``` ruby
5050openai.chat.completions.create(
51- messages: [OpenAI ::Models ::Chat ::ChatCompletionUserMessageParam .new (role: : user , content: " Say this is a test" )],
51+ messages: [OpenAI ::Models ::Chat ::ChatCompletionUserMessageParam .new (role: " user" , content: " Say this is a test" )],
5252 model: :"gpt-4.1"
5353)
5454```
@@ -80,7 +80,7 @@ We provide support for streaming responses using Server-Sent Events (SSE).
8080
8181``` ruby
8282stream = openai.chat.completions.stream_raw(
83- messages: [{role: : user , content: " Say this is a test" }],
83+ messages: [{role: " user" , content: " Say this is a test" }],
8484 model: :"gpt-4.1"
8585)
8686
@@ -97,11 +97,11 @@ Request parameters that correspond to file uploads can be passed as `StringIO`,
9797require " pathname"
9898
9999# using `Pathname`, the file will be lazily read, without reading everything in to memory
100- file_object = openai.files.create(file: Pathname (" input.jsonl" ), purpose: : "fine-tune" )
100+ file_object = openai.files.create(file: Pathname (" input.jsonl" ), purpose: " fine-tune" )
101101
102102file = File .read(" input.jsonl" )
103103# using `StringIO`, useful if you already have the data in memory
104- file_object = openai.files.create(file: StringIO .new (file), purpose: : "fine-tune" )
104+ file_object = openai.files.create(file: StringIO .new (file), purpose: " fine-tune" )
105105
106106puts (file_object.id)
107107```
@@ -150,7 +150,7 @@ openai = OpenAI::Client.new(
150150
151151# Or, configure per-request:
152152openai.chat.completions.create(
153- messages: [{role: : user , content: " How can I get the name of the current day in JavaScript?" }],
153+ messages: [{role: " user" , content: " How can I get the name of the current day in JavaScript?" }],
154154 model: :"gpt-4.1" ,
155155 request_options: {max_retries: 5 }
156156)
@@ -172,7 +172,7 @@ openai = OpenAI::Client.new(
172172
173173# Or, configure per-request:
174174openai.chat.completions.create(
175- messages: [{role: : user , content: " How can I list all files in a directory using Python?" }],
175+ messages: [{role: " user" , content: " How can I list all files in a directory using Python?" }],
176176 model: :"gpt-4.1" ,
177177 request_options: {timeout: 5 }
178178)
@@ -189,13 +189,13 @@ In all places where a `BaseModel` type is specified, vanilla Ruby `Hash` can als
189189``` ruby
190190# This has tooling readability, for auto-completion, static analysis, and goto definition with supported language services
191191params = OpenAI ::Models ::Chat ::CompletionCreateParams .new (
192- messages: [OpenAI ::Models ::Chat ::ChatCompletionUserMessageParam .new (role: : user , content: " Say this is a test" )],
192+ messages: [OpenAI ::Models ::Chat ::ChatCompletionUserMessageParam .new (role: " user" , content: " Say this is a test" )],
193193 model: :"gpt-4.1"
194194)
195195
196196# This also works
197197params = {
198- messages: [{role: : user , content: " Say this is a test" }],
198+ messages: [{role: " user" , content: " Say this is a test" }],
199199 model: :"gpt-4.1"
200200}
201201```
@@ -257,7 +257,7 @@ It is possible to pass a compatible model / parameter class to a method that exp
257257
258258``` ruby
259259params = OpenAI ::Models ::Chat ::CompletionCreateParams .new (
260- messages: [OpenAI ::Models ::Chat ::ChatCompletionUserMessageParam .new (role: : user , content: " Say this is a test" )],
260+ messages: [OpenAI ::Models ::Chat ::ChatCompletionUserMessageParam .new (role: " user" , content: " Say this is a test" )],
261261 model: :"gpt-4.1"
262262)
263263openai.chat.completions.create(** params)
0 commit comments