Skip to content

Commit 24dfc31

Browse files
Adds support for image generation and responses endpoints (#69)
* update gitignore * adding example for create_edit to completion.jl * added request to the images/generations endpoint * added support for responses w/ examples * fix http kwargs and tests * adding functions example * examples folder * example up * fixes streamcallback --------- Co-authored-by: = <=>
1 parent 5935334 commit 24dfc31

File tree

3 files changed

+171
-7
lines changed

3 files changed

+171
-7
lines changed

examples/functions_example.jl

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
## Example of using Functions for Julia
2+
3+
4+
## Functions
5+
tools = [
6+
Dict(
7+
"type" => "function",
8+
"name" => "get_avg_temperature",
9+
"description" => "Get average temperature in a given location",
10+
"parameters" => Dict(
11+
"type" => "object",
12+
"properties" => Dict(
13+
"location" => Dict(
14+
"type" => "string",
15+
"description" => "The city with no spaces, e.g. SanFrancisco",
16+
)
17+
),
18+
"required" => ["location"],
19+
)
20+
)
21+
]
22+
resp = create_responses(ENV["OPENAI_API_KEY"], "What is the avg temp in New York?"; tools=tools, tool_choice="auto")
23+
24+
resp.response.output

src/OpenAI.jl

Lines changed: 88 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,10 @@ function request_body_live(url; method, input, headers, streamcallback, kwargs..
100100
r = HTTP.startread(stream) # start reading the response
101101
isdone = false
102102

103-
while !eof(stream) || !isdone
103+
while !isdone
104+
if eof(stream)
105+
break
106+
end
104107
# Extract all available messages
105108
masterchunk = String(readavailable(stream))
106109

@@ -114,6 +117,7 @@ function request_body_live(url; method, input, headers, streamcallback, kwargs..
114117
for chunk in chunks
115118
if occursin(chunk, "data: [DONE]") # TODO - maybe don't strip, but instead us a regex in the endswith call
116119
isdone = true
120+
break
117121
end
118122

119123
# call the callback (if present) on the latest chunk
@@ -180,14 +184,14 @@ function _request(api::AbstractString,
180184
return if isnothing(streamcallback)
181185
OpenAIResponse(resp.status, JSON3.read(body))
182186
else
183-
# assemble the streaming response body into a proper JSON object
184-
lines = split(body, "\n") # split body into lines
187+
# Assemble the streaming response body into a proper JSON object
188+
lines = split(body, "\n") # Split body into lines
185189

186-
# throw out empty lines, skip "data: [DONE] bits
187-
lines = filter(x -> !isempty(x) && !occursin("[DONE]", x), lines)
190+
# Filter out empty lines and lines that are not JSON (e.g., "event: ...")
191+
lines = filter(x -> !isempty(x) && startswith(x, "data: "), lines)
188192

189-
# read each line, which looks like "data: {<json elements>}"
190-
parsed = map(line -> JSON3.read(line[6:end]), lines)
193+
# Parse each line, removing the "data: " prefix
194+
parsed = map(line -> JSON3.read(line[7:end]), lines)
191195

192196
OpenAIResponse(resp.status, parsed)
193197
end
@@ -466,6 +470,82 @@ end
466470

467471
include("assistants.jl")
468472

473+
474+
"""
475+
Create responses
476+
477+
https://platform.openai.com/docs/api-reference/responses/create
478+
479+
# Arguments:
480+
- `api_key::String`: OpenAI API key
481+
- `input`: The input text to generate the response(s) for, as String or Dict.
482+
To get responses for multiple inputs in a single request, pass an array of strings
483+
or array of token arrays. Each input must not exceed 8192 tokens in length.
484+
- `model::String`: Model id. Defaults to "gpt-4o-mini".
485+
- `kwargs...`: Additional arguments to pass to the API.
486+
- `tools::Int`: The number of responses to generate for the input. Defaults to 1.
487+
488+
# Examples:
489+
```julia
490+
491+
## Image input
492+
input = [Dict("role" => "user",
493+
"content" => [Dict("type" => "input_text", "text" => "What is in this image?"),
494+
Dict("type" => "input_image", "image_url" => "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg")])
495+
]
496+
create_responses(api_key, input)
497+
498+
## Web search
499+
create_responses(api_key, "What was a positive news story from today?"; tools=[Dict("type" => "web_search_preview")])
500+
501+
## File search - fails because example vector store does not exist
502+
tools = [Dict("type" => "file_search",
503+
"vector_store_ids" => ["vs_1234567890"],
504+
"max_num_results" => 20)]
505+
create_responses(api_key, "What are the attributes of an ancient brown dragon?"; tools=tools)
506+
507+
## Streaming
508+
resp = create_responses(api_key, "Hello!"; instructions="You are a helpful assistant.", stream=true, streamcallback = x->println(x))
509+
510+
## Functions
511+
tools = [
512+
Dict(
513+
"type" => "function",
514+
"name" => "get_current_weather",
515+
"description" => "Get the current weather in a given location",
516+
"parameters" => Dict(
517+
"type" => "object",
518+
"properties" => Dict(
519+
"location" => Dict(
520+
"type" => "string",
521+
"description" => "The city and state, e.g. San Francisco, CA",
522+
),
523+
"unit"=> Dict("type" => "string", "enum" => ["celsius", "fahrenheit"]),
524+
),
525+
"required" => ["location", "unit"],
526+
)
527+
)
528+
]
529+
resp = create_responses(api_key, "What is the weather in Boston?"; tools=tools, tool_choice="auto")
530+
531+
## Reasoning
532+
533+
response = create_responses(api_key, "How much wood would a woodchuck chuck?";
534+
model = "o3-mini",
535+
reasoning=Dict("effort" => "high"))
536+
```
537+
538+
"""
539+
function create_responses(api_key::String, input, model="gpt-4o-mini"; http_kwargs::NamedTuple = NamedTuple(), kwargs...)
540+
return openai_request("responses",
541+
api_key;
542+
method = "POST",
543+
input = input,
544+
model=model,
545+
http_kwargs = http_kwargs,
546+
kwargs...)
547+
end
548+
469549
export OpenAIResponse
470550
export list_models
471551
export retrieve_model
@@ -500,5 +580,6 @@ export list_runs
500580
export retrieve_run
501581
export delete_run
502582
export modify_run
583+
export create_responses
503584

504585
end # module

test/responses.jl

Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,59 @@
1+
2+
@testset "Responses" begin
3+
## Image response tag
4+
input = [Dict("role" => "user",
5+
"content" => [Dict("type" => "input_text", "text" => "What is in this image?"),
6+
Dict("type" => "input_image", "image_url" => "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg")])
7+
]
8+
resp = create_responses(ENV["OPENAI_API_KEY"], input)
9+
if !=(resp.status, 200)
10+
@test false
11+
end
12+
13+
## Web search
14+
resp = create_responses(ENV["OPENAI_API_KEY"], "What was a positive news story from today?"; tools=[Dict("type" => "web_search_preview")])
15+
if !=(resp.status, 200)
16+
@test false
17+
end
18+
19+
## Streaming
20+
resp = create_responses(ENV["OPENAI_API_KEY"], "Hello!"; instructions="You are a helpful assistant.", stream=true, streamcallback = x->println(x))
21+
if !=(resp.status, 200)
22+
@test false
23+
end
24+
25+
## Functions
26+
tools = [
27+
Dict(
28+
"type" => "function",
29+
"name" => "get_current_weather",
30+
"description" => "Get the current weather in a given location",
31+
"parameters" => Dict(
32+
"type" => "object",
33+
"properties" => Dict(
34+
"location" => Dict(
35+
"type" => "string",
36+
"description" => "The city and state, e.g. San Francisco, CA",
37+
),
38+
"unit"=> Dict("type" => "string", "enum" => ["celsius", "fahrenheit"]),
39+
),
40+
"required" => ["location", "unit"],
41+
)
42+
)
43+
]
44+
resp = create_responses(ENV["OPENAI_API_KEY"], "What is the weather in Boston?"; tools=tools, tool_choice="auto")
45+
if !=(resp.status, 200)
46+
@test false
47+
end
48+
49+
## Reasoning
50+
51+
resp = create_responses(ENV["OPENAI_API_KEY"], "How much wood would a woodchuck chuck?";
52+
model = "o3-mini",
53+
reasoning=Dict("effort" => "high"))
54+
if !=(resp.status, 200)
55+
@test false
56+
end
57+
58+
59+
end

0 commit comments

Comments
 (0)