From a5a028468a1a4a68700d52b8297b8b73ef950db7 Mon Sep 17 00:00:00 2001 From: J S <49557684+svilupp@users.noreply.github.com> Date: Sat, 9 Aug 2025 10:53:56 +0100 Subject: [PATCH 1/7] Refine StreamCallbacks integration --- Project.toml | 5 +++- src/OpenAI.jl | 52 +++++++++++++++++++++++++++++++++++++++++ test/runtests.jl | 27 +++++++++++++-------- test/streamcallbacks.jl | 27 +++++++++++++++++++++ 4 files changed, 100 insertions(+), 11 deletions(-) create mode 100644 test/streamcallbacks.jl diff --git a/Project.toml b/Project.toml index 121f4d3..0b95dd3 100644 --- a/Project.toml +++ b/Project.toml @@ -7,17 +7,20 @@ version = "0.11.0" Dates = "ade2ca70-3891-5945-98fb-dc099432e06a" HTTP = "cd3eb016-35fb-5094-929b-558a96fad6f3" JSON3 = "0f8b85d8-7281-11e9-16c2-39a750bddbf1" +StreamCallbacks = "c1b9e933-98a0-46fc-8ea7-3b58b195fb0a" [compat] Dates = "1" HTTP = "1" JSON3 = "1" +StreamCallbacks = "0.6" julia = "1" [extras] JET = "c3a54625-cd67-489e-a8e7-0a5a0ff4e31b" Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" +Sockets = "6462fe0b-24de-5631-8697-dd941f90decc" [targets] -test = ["JET", "Pkg", "Test"] +test = ["JET", "Pkg", "Test", "Sockets"] diff --git a/src/OpenAI.jl b/src/OpenAI.jl index 6e501be..8516547 100644 --- a/src/OpenAI.jl +++ b/src/OpenAI.jl @@ -3,6 +3,7 @@ module OpenAI using JSON3 using HTTP using Dates +using StreamCallbacks abstract type AbstractOpenAIProvider end Base.@kwdef struct OpenAIProvider <: AbstractOpenAIProvider @@ -223,6 +224,22 @@ function openai_request(api::AbstractString, _request(api, provider; method, http_kwargs, streamcallback = streamcallback, kwargs...) end +""" + configure_callback!(streamcallback, schema; kwargs...) + +Configure a streaming callback for OpenAI APIs. If `streamcallback` is an IO or +Channel, a new `StreamCallback` is created. The callback flavor defaults to +`OpenAIStream` and streaming-related keyword arguments are appended to +`kwargs`. +Returns the configured callback and updated keyword arguments. +""" +function configure_callback!(streamcallback, schema; kwargs...) + cb = streamcallback isa StreamCallback ? streamcallback : StreamCallback(out = streamcallback) + isnothing(cb.flavor) && (cb.flavor = OpenAIStream()) + new_kwargs = (; kwargs..., stream = true, stream_options = (; include_usage = true)) + return cb, new_kwargs +end + struct OpenAIResponse{R} status::Int16 response::R @@ -393,6 +410,41 @@ function create_chat(provider::AbstractOpenAIProvider, kwargs...) end +""" + create_chat(schema, api_key, model, conversation; http_kwargs=NamedTuple(), + streamcallback=nothing, kwargs...) + +Fallback-compatible method that integrates `StreamCallbacks.jl`. When a +`streamcallback` is provided that is *not* a plain function, the request is +handled via `StreamCallbacks.streamed_request!` with the callback configured by +`configure_callback!`. +""" +function create_chat(schema, + api_key::AbstractString, + model::AbstractString, + conversation; + provider = DEFAULT_PROVIDER, + http_kwargs::NamedTuple = NamedTuple(), + streamcallback::Any = nothing, + kwargs...) + if !isnothing(streamcallback) && !(streamcallback isa Function) + url = build_url(provider, "chat/completions") + headers = auth_header(provider, api_key) + streamcallback, new_kwargs = configure_callback!(streamcallback, schema; kwargs...) + input = build_params((; messages = conversation, model, new_kwargs...)) + resp = streamed_request!(streamcallback, url, headers, input; http_kwargs...) + return OpenAIResponse(resp.status, JSON3.read(resp.body)) + else + return _request("chat/completions", provider, api_key; + method = "POST", + http_kwargs = http_kwargs, + streamcallback = streamcallback, + model = model, + messages = conversation, + kwargs...) + end +end + """ Create embeddings diff --git a/test/runtests.jl b/test/runtests.jl index 76453c0..ba5b88d 100755 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -21,17 +21,24 @@ end @testset "OpenAI.jl" begin printstyled(color = :blue, "\n") - @testset "models" begin - include("models.jl") - end - @testset "chatcompletion" begin - include("chatcompletion.jl") - end - @testset "completion" begin - include("completion.jl") + if haskey(ENV, "OPENAI_API_KEY") + @testset "models" begin + include("models.jl") + end + @testset "chatcompletion" begin + include("chatcompletion.jl") + end + @testset "completion" begin + include("completion.jl") + end + @testset "embeddings" begin + include("embeddings.jl") + end + else + @info "OPENAI_API_KEY not set; skipping live API tests" end - @testset "embeddings" begin - include("embeddings.jl") + @testset "streamcallbacks" begin + include("streamcallbacks.jl") end # @testset "assistants" begin # include("assistants.jl") diff --git a/test/streamcallbacks.jl b/test/streamcallbacks.jl new file mode 100644 index 0000000..3008ea2 --- /dev/null +++ b/test/streamcallbacks.jl @@ -0,0 +1,27 @@ +using Test +using HTTP +using Sockets +using OpenAI +using StreamCallbacks + +@testset "StreamCallbacks integration" begin + port = 9178 + handler(req) = HTTP.Response(200, + ["Content-Type" => "text/event-stream"], + "data: {\"choices\": [{\"delta\": {\"content\": \"hello\"}}]}\n\n" * + "data: [DONE]\n\n") + server = HTTP.serve!(handler, Sockets.localhost, port; verbose = false) + try + io = IOBuffer() + provider = OpenAI.OpenAIProvider(base_url = "http://127.0.0.1:$port") + resp = OpenAI.create_chat(nothing, "key", "model", + [Dict("role" => "user", "content" => "hi")]; + provider = provider, + streamcallback = io) + seekstart(io) + @test occursin("hello", String(take!(io))) + @test resp.status == 200 + finally + HTTP.forceclose(server) + end +end From bac27c05a327081a0b53f2a7fd1aa28400a86c2d Mon Sep 17 00:00:00 2001 From: J S <49557684+svilupp@users.noreply.github.com> Date: Sun, 10 Aug 2025 10:27:36 +0100 Subject: [PATCH 2/7] Document StreamCallbacks examples --- README.md | 49 +++++++++++++++++++++++++++++++++++++ docs/src/index.md | 6 ++++- docs/src/streaming.md | 43 ++++++++++++++++++++++++++++++++ examples/streamcallbacks.jl | 33 +++++++++++++++++++++++++ 4 files changed, 130 insertions(+), 1 deletion(-) create mode 100644 docs/src/streaming.md create mode 100644 examples/streamcallbacks.jl diff --git a/README.md b/README.md index d7cefba..3026fa3 100644 --- a/README.md +++ b/README.md @@ -64,6 +64,55 @@ response = create_chat( For more use cases [see tests](https://github.com/JuliaML/OpenAI.jl/tree/main/test). +## Streaming with StreamCallbacks + +OpenAI.jl integrates [StreamCallbacks.jl](https://github.com/svilupp/StreamCallbacks.jl) for +streaming responses. + +### 1. Stream to any `IO` + +```julia +create_chat(secret_key, model, messages; streamcallback=stdout) +``` + +### 2. Capture stream chunks + +```julia +using StreamCallbacks +cb = StreamCallback() +create_chat(secret_key, model, messages; streamcallback=cb) +cb.chunks +``` + +### 3. Customize printing + +```julia +using StreamCallbacks +import StreamCallbacks: print_content + +function print_content(io::IO, content; kwargs...) + printstyled(io, "🌊 $content"; color=:cyan) +end + +cb = StreamCallback() +create_chat(secret_key, model, messages; streamcallback=cb) +``` + +To fully customize processing, you can overload `StreamCallbacks.callback`: + +```julia +using StreamCallbacks: callback, AbstractStreamCallback, AbstractStreamChunk, extract_content, print_content + +@inline function callback(cb::AbstractStreamCallback, chunk::AbstractStreamChunk; kwargs...) + processed_text = extract_content(cb.flavor, chunk; kwargs...) + isnothing(processed_text) && return nothing + print_content(cb.out, processed_text; kwargs...) + return nothing +end +``` + +See [`examples/streamcallbacks.jl`](examples/streamcallbacks.jl) for a full walkthrough. + ## Feature requests Feel free to open a PR, or file an issue if that's out of reach! diff --git a/docs/src/index.md b/docs/src/index.md index 3a336e3..de436a0 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -22,4 +22,8 @@ create_embeddings(api_key::String, input, model_id::String=DEFAULT_EMBEDDING_MOD ```@docs create_images(api_key::String, prompt, n::Integer=1, size::String="256x256"; http_kwargs::NamedTuple=NamedTuple(), kwargs...) -``` \ No newline at end of file +``` + +## Streaming + +See [Streaming](streaming.md) for examples using StreamCallbacks. diff --git a/docs/src/streaming.md b/docs/src/streaming.md new file mode 100644 index 0000000..2166414 --- /dev/null +++ b/docs/src/streaming.md @@ -0,0 +1,43 @@ +# Streaming + +OpenAI.jl integrates [StreamCallbacks.jl](https://github.com/svilupp/StreamCallbacks.jl) for streaming responses. + +## 1. Stream to any `IO` +```julia +create_chat(secret_key, model, messages; streamcallback=stdout) +``` + +## 2. Capture stream chunks +```julia +using StreamCallbacks +cb = StreamCallback() +create_chat(secret_key, model, messages; streamcallback=cb) +cb.chunks +``` + +## 3. Customize printing +```julia +using StreamCallbacks +import StreamCallbacks: print_content + +function print_content(io::IO, content; kwargs...) + printstyled(io, "🌊 $content"; color=:cyan) +end + +cb = StreamCallback() +create_chat(secret_key, model, messages; streamcallback=cb) +``` + +For complete control you can overload `StreamCallbacks.callback`: +```julia +using StreamCallbacks: callback, AbstractStreamCallback, AbstractStreamChunk, extract_content, print_content + +@inline function callback(cb::AbstractStreamCallback, chunk::AbstractStreamChunk; kwargs...) + processed_text = extract_content(cb.flavor, chunk; kwargs...) + isnothing(processed_text) && return nothing + print_content(cb.out, processed_text; kwargs...) + return nothing +end +``` + +See the `examples/streamcallbacks.jl` script for a full walkthrough. diff --git a/examples/streamcallbacks.jl b/examples/streamcallbacks.jl new file mode 100644 index 0000000..09643ff --- /dev/null +++ b/examples/streamcallbacks.jl @@ -0,0 +1,33 @@ +# Streaming examples using StreamCallbacks.jl +using OpenAI, StreamCallbacks + +api_key = get(ENV, "OPENAI_API_KEY", "") +model = "gpt-4o-mini" +messages = [Dict("role" => "user", "content" => "Write a short haiku about streams.")] + +# 1. Stream to stdout (no differences) +create_chat(api_key, model, messages; streamcallback=stdout) + +# 2. Stream with explicit StreamCallback to capture chunks +cb = StreamCallback() +create_chat(api_key, model, messages; streamcallback=cb) +@info "Received $(length(cb.chunks)) chunks" + +# 3. Customize printing via `print_content` +import StreamCallbacks: print_content +function print_content(io::IO, content; kwargs...) + printstyled(io, "🌊 $content"; color=:cyan) +end +cb2 = StreamCallback() +create_chat(api_key, model, messages; streamcallback=cb2) + +# 4. Overload `callback` to change chunk handling +import StreamCallbacks: callback, AbstractStreamCallback, AbstractStreamChunk, extract_content +@inline function callback(cb::AbstractStreamCallback, chunk::AbstractStreamChunk; kwargs...) + processed_text = extract_content(cb.flavor, chunk; kwargs...) + isnothing(processed_text) && return nothing + print_content(cb.out, reverse(processed_text); kwargs...) + return nothing +end +cb3 = StreamCallback() +create_chat(api_key, model, messages; streamcallback=cb3) From 0f2c0234b8354271648e3a79c2ba88257d0bc070 Mon Sep 17 00:00:00 2001 From: J S <49557684+svilupp@users.noreply.github.com> Date: Sun, 10 Aug 2025 10:27:42 +0100 Subject: [PATCH 3/7] Use StreamCallbacks for streaming --- README.md | 7 +- docs/src/streaming.md | 7 +- examples/streamcallbacks.jl | 2 +- src/OpenAI.jl | 154 +++++++++++------------------------- test/runtests.jl | 6 +- test/streamcallbacks.jl | 16 +++- 6 files changed, 69 insertions(+), 123 deletions(-) diff --git a/README.md b/README.md index 3026fa3..7b045d7 100644 --- a/README.md +++ b/README.md @@ -78,7 +78,7 @@ create_chat(secret_key, model, messages; streamcallback=stdout) ### 2. Capture stream chunks ```julia -using StreamCallbacks +using OpenAI cb = StreamCallback() create_chat(secret_key, model, messages; streamcallback=cb) cb.chunks @@ -87,7 +87,7 @@ cb.chunks ### 3. Customize printing ```julia -using StreamCallbacks +using OpenAI import StreamCallbacks: print_content function print_content(io::IO, content; kwargs...) @@ -101,7 +101,8 @@ create_chat(secret_key, model, messages; streamcallback=cb) To fully customize processing, you can overload `StreamCallbacks.callback`: ```julia -using StreamCallbacks: callback, AbstractStreamCallback, AbstractStreamChunk, extract_content, print_content +using OpenAI +import StreamCallbacks: callback, AbstractStreamCallback, AbstractStreamChunk, extract_content, print_content @inline function callback(cb::AbstractStreamCallback, chunk::AbstractStreamChunk; kwargs...) processed_text = extract_content(cb.flavor, chunk; kwargs...) diff --git a/docs/src/streaming.md b/docs/src/streaming.md index 2166414..a8c450b 100644 --- a/docs/src/streaming.md +++ b/docs/src/streaming.md @@ -9,7 +9,7 @@ create_chat(secret_key, model, messages; streamcallback=stdout) ## 2. Capture stream chunks ```julia -using StreamCallbacks +using OpenAI cb = StreamCallback() create_chat(secret_key, model, messages; streamcallback=cb) cb.chunks @@ -17,7 +17,7 @@ cb.chunks ## 3. Customize printing ```julia -using StreamCallbacks +using OpenAI import StreamCallbacks: print_content function print_content(io::IO, content; kwargs...) @@ -30,7 +30,8 @@ create_chat(secret_key, model, messages; streamcallback=cb) For complete control you can overload `StreamCallbacks.callback`: ```julia -using StreamCallbacks: callback, AbstractStreamCallback, AbstractStreamChunk, extract_content, print_content +using OpenAI +import StreamCallbacks: callback, AbstractStreamCallback, AbstractStreamChunk, extract_content, print_content @inline function callback(cb::AbstractStreamCallback, chunk::AbstractStreamChunk; kwargs...) processed_text = extract_content(cb.flavor, chunk; kwargs...) diff --git a/examples/streamcallbacks.jl b/examples/streamcallbacks.jl index 09643ff..4ab9855 100644 --- a/examples/streamcallbacks.jl +++ b/examples/streamcallbacks.jl @@ -1,5 +1,5 @@ # Streaming examples using StreamCallbacks.jl -using OpenAI, StreamCallbacks +using OpenAI api_key = get(ENV, "OPENAI_API_KEY", "") model = "gpt-4o-mini" diff --git a/src/OpenAI.jl b/src/OpenAI.jl index 8516547..c8d4818 100644 --- a/src/OpenAI.jl +++ b/src/OpenAI.jl @@ -3,7 +3,16 @@ module OpenAI using JSON3 using HTTP using Dates -using StreamCallbacks +using StreamCallbacks: StreamCallback, OpenAIStream, streamed_request! +import StreamCallbacks: print_content + +""" + print_content(f::Function, text::AbstractString; kwargs...) + +Allow plain functions to be used as streaming sinks. The provided function will +receive each processed text chunk. +""" +print_content(f::Function, text::AbstractString; kwargs...) = f(text) abstract type AbstractOpenAIProvider end Base.@kwdef struct OpenAIProvider <: AbstractOpenAIProvider @@ -89,52 +98,8 @@ function request_body(url, method; input, headers, query, kwargs...) end function request_body_live(url; method, input, headers, streamcallback, kwargs...) - resp = nothing - - body = sprint() do output - resp = HTTP.open("POST", url, headers) do stream - body = String(take!(input)) - write(stream, body) - - HTTP.closewrite(stream) # indicate we're done writing to the request - - r = HTTP.startread(stream) # start reading the response - isdone = false - - while !isdone - if eof(stream) - break - end - # Extract all available messages - masterchunk = String(readavailable(stream)) - - # Split into subchunks on newlines. - # Occasionally, the streaming will append multiple messages together, - # and iterating through each line in turn will make sure that - # streamingcallback is called on each message in turn. - chunks = String.(filter(!isempty, split(masterchunk, "\n"))) - - # Iterate through each chunk in turn. - for chunk in chunks - if occursin(chunk, "data: [DONE]") # TODO - maybe don't strip, but instead us a regex in the endswith call - isdone = true - break - end - - # call the callback (if present) on the latest chunk - if !isnothing(streamcallback) - streamcallback(chunk) - end - - # append the latest chunk to the body - print(output, chunk) - end - end - HTTP.closeread(stream) - end - end - - return resp, body + resp = streamed_request!(streamcallback, url, headers, input; kwargs...) + return resp, resp.body end function status_error(resp, log = nothing) @@ -151,51 +116,35 @@ function _request(api::AbstractString, streamcallback = nothing, additional_headers::AbstractVector = Pair{String, String}[], kwargs...) - # add stream: True to the API call if a stream callback function is passed + cb = nothing if !isnothing(streamcallback) - kwargs = (kwargs..., stream = true) + cb, kwargs = configure_callback!(streamcallback; kwargs...) end params = build_params(kwargs) url = build_url(provider, api) - resp, body = let - # Add whatever other headers we were given - headers = vcat(auth_header(provider, api_key), additional_headers) - - if isnothing(streamcallback) - request_body(url, - method; - input = params, - headers = headers, - query = query, - http_kwargs...) - else - request_body_live(url; - method, - input = params, - headers = headers, - query = query, - streamcallback = streamcallback, - http_kwargs...) - end + headers = vcat(auth_header(provider, api_key), additional_headers) + + if isnothing(cb) + resp, body = request_body(url, + method; + input = params, + headers = headers, + query = query, + http_kwargs...) + else + resp, body = request_body_live(url; + method, + input = params, + headers = headers, + streamcallback = cb, + http_kwargs...) end + if resp.status >= 400 status_error(resp, body) else - return if isnothing(streamcallback) - OpenAIResponse(resp.status, JSON3.read(body)) - else - # Assemble the streaming response body into a proper JSON object - lines = split(body, "\n") # Split body into lines - - # Filter out empty lines and lines that are not JSON (e.g., "event: ...") - lines = filter(x -> !isempty(x) && startswith(x, "data: "), lines) - - # Parse each line, removing the "data: " prefix - parsed = map(line -> JSON3.read(line[7:end]), lines) - - OpenAIResponse(resp.status, parsed) - end + return OpenAIResponse(resp.status, JSON3.read(body)) end end @@ -225,15 +174,14 @@ function openai_request(api::AbstractString, end """ - configure_callback!(streamcallback, schema; kwargs...) + configure_callback!(streamcallback; kwargs...) -Configure a streaming callback for OpenAI APIs. If `streamcallback` is an IO or -Channel, a new `StreamCallback` is created. The callback flavor defaults to -`OpenAIStream` and streaming-related keyword arguments are appended to -`kwargs`. +Prepare a `StreamCallback` for OpenAI APIs. If `streamcallback` is an IO, Channel, +or `Function`, a new `StreamCallback` is created with `OpenAIStream` flavor. +Streaming-related keyword arguments are added to `kwargs`. Returns the configured callback and updated keyword arguments. """ -function configure_callback!(streamcallback, schema; kwargs...) +function configure_callback!(streamcallback; kwargs...) cb = streamcallback isa StreamCallback ? streamcallback : StreamCallback(out = streamcallback) isnothing(cb.flavor) && (cb.flavor = OpenAIStream()) new_kwargs = (; kwargs..., stream = true, stream_options = (; include_usage = true)) @@ -414,35 +362,20 @@ end create_chat(schema, api_key, model, conversation; http_kwargs=NamedTuple(), streamcallback=nothing, kwargs...) -Fallback-compatible method that integrates `StreamCallbacks.jl`. When a -`streamcallback` is provided that is *not* a plain function, the request is -handled via `StreamCallbacks.streamed_request!` with the callback configured by -`configure_callback!`. +Convenience overload for testing/debugging that forwards to `create_chat` with +support for `StreamCallback` objects. """ function create_chat(schema, api_key::AbstractString, model::AbstractString, conversation; - provider = DEFAULT_PROVIDER, http_kwargs::NamedTuple = NamedTuple(), streamcallback::Any = nothing, kwargs...) - if !isnothing(streamcallback) && !(streamcallback isa Function) - url = build_url(provider, "chat/completions") - headers = auth_header(provider, api_key) - streamcallback, new_kwargs = configure_callback!(streamcallback, schema; kwargs...) - input = build_params((; messages = conversation, model, new_kwargs...)) - resp = streamed_request!(streamcallback, url, headers, input; http_kwargs...) - return OpenAIResponse(resp.status, JSON3.read(resp.body)) - else - return _request("chat/completions", provider, api_key; - method = "POST", - http_kwargs = http_kwargs, - streamcallback = streamcallback, - model = model, - messages = conversation, - kwargs...) - end + return create_chat(api_key, model, conversation; + http_kwargs = http_kwargs, + streamcallback = streamcallback, + kwargs...) end """ @@ -599,6 +532,7 @@ function create_responses(api_key::String, input, model="gpt-4o-mini"; http_kwar end export OpenAIResponse +export StreamCallback export list_models export retrieve_model export create_chat diff --git a/test/runtests.jl b/test/runtests.jl index ba5b88d..c65cf45 100755 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -21,7 +21,9 @@ end @testset "OpenAI.jl" begin printstyled(color = :blue, "\n") - if haskey(ENV, "OPENAI_API_KEY") + if haskey(ENV, "OPENAI_API_KEY") && get(ENV, "OPENAI_RUN_LIVE_TESTS", "") == "true" + # Disable SSL verification in CI environments that inject self-signed certificates + ENV["JULIA_SSL_NO_VERIFY_HOSTS"] = "*" @testset "models" begin include("models.jl") end @@ -35,7 +37,7 @@ end include("embeddings.jl") end else - @info "OPENAI_API_KEY not set; skipping live API tests" + @info "Skipping live API tests" end @testset "streamcallbacks" begin include("streamcallbacks.jl") diff --git a/test/streamcallbacks.jl b/test/streamcallbacks.jl index 3008ea2..6d9ca2b 100644 --- a/test/streamcallbacks.jl +++ b/test/streamcallbacks.jl @@ -2,7 +2,6 @@ using Test using HTTP using Sockets using OpenAI -using StreamCallbacks @testset "StreamCallbacks integration" begin port = 9178 @@ -12,15 +11,24 @@ using StreamCallbacks "data: [DONE]\n\n") server = HTTP.serve!(handler, Sockets.localhost, port; verbose = false) try + provider = OpenAI.OpenAIProvider(api_key = "key", base_url = "http://127.0.0.1:$port") + io = IOBuffer() - provider = OpenAI.OpenAIProvider(base_url = "http://127.0.0.1:$port") - resp = OpenAI.create_chat(nothing, "key", "model", + resp = OpenAI.create_chat(provider, "model", [Dict("role" => "user", "content" => "hi")]; - provider = provider, streamcallback = io) seekstart(io) @test occursin("hello", String(take!(io))) @test resp.status == 200 + + buf = IOBuffer() + cbfunc = text -> write(buf, text) + resp2 = OpenAI.create_chat(provider, "model", + [Dict("role" => "user", "content" => "hi")]; + streamcallback = cbfunc) + seekstart(buf) + @test occursin("hello", String(take!(buf))) + @test resp2.status == 200 finally HTTP.forceclose(server) end From 78e6c5d582befcb0784517c2beb1427f82064603 Mon Sep 17 00:00:00 2001 From: J S <49557684+svilupp@users.noreply.github.com> Date: Sun, 10 Aug 2025 18:31:52 +0100 Subject: [PATCH 4/7] Format code with JuliaFormatter --- examples/functions_example.jl | 24 ++--- examples/streamcallbacks.jl | 13 +-- src/OpenAI.jl | 143 ++++++++++++++------------- src/assistants.jl | 180 +++++++++++++++++----------------- test/chatcompletion.jl | 4 +- test/responses.jl | 49 ++++----- 6 files changed, 211 insertions(+), 202 deletions(-) diff --git a/examples/functions_example.jl b/examples/functions_example.jl index 4419a56..85d92d8 100644 --- a/examples/functions_example.jl +++ b/examples/functions_example.jl @@ -1,24 +1,24 @@ ## Example of using Functions for Julia - ## Functions tools = [ Dict( - "type" => "function", - "name" => "get_avg_temperature", - "description" => "Get average temperature in a given location", - "parameters" => Dict( + "type" => "function", + "name" => "get_avg_temperature", + "description" => "Get average temperature in a given location", + "parameters" => Dict( "type" => "object", "properties" => Dict( "location" => Dict( - "type" => "string", - "description" => "The city with no spaces, e.g. SanFrancisco", - ) - ), - "required" => ["location"], + "type" => "string", + "description" => "The city with no spaces, e.g. SanFrancisco" ) + ), + "required" => ["location"] ) +) ] -resp = create_responses(ENV["OPENAI_API_KEY"], "What is the avg temp in New York?"; tools=tools, tool_choice="auto") +resp = create_responses(ENV["OPENAI_API_KEY"], "What is the avg temp in New York?"; + tools = tools, tool_choice = "auto") -resp.response.output \ No newline at end of file +resp.response.output diff --git a/examples/streamcallbacks.jl b/examples/streamcallbacks.jl index 4ab9855..addd9cf 100644 --- a/examples/streamcallbacks.jl +++ b/examples/streamcallbacks.jl @@ -6,23 +6,24 @@ model = "gpt-4o-mini" messages = [Dict("role" => "user", "content" => "Write a short haiku about streams.")] # 1. Stream to stdout (no differences) -create_chat(api_key, model, messages; streamcallback=stdout) +create_chat(api_key, model, messages; streamcallback = stdout) # 2. Stream with explicit StreamCallback to capture chunks cb = StreamCallback() -create_chat(api_key, model, messages; streamcallback=cb) +create_chat(api_key, model, messages; streamcallback = cb) @info "Received $(length(cb.chunks)) chunks" # 3. Customize printing via `print_content` import StreamCallbacks: print_content function print_content(io::IO, content; kwargs...) - printstyled(io, "🌊 $content"; color=:cyan) + printstyled(io, "🌊 $content"; color = :cyan) end cb2 = StreamCallback() -create_chat(api_key, model, messages; streamcallback=cb2) +create_chat(api_key, model, messages; streamcallback = cb2) # 4. Overload `callback` to change chunk handling -import StreamCallbacks: callback, AbstractStreamCallback, AbstractStreamChunk, extract_content +import StreamCallbacks: callback, AbstractStreamCallback, AbstractStreamChunk, + extract_content @inline function callback(cb::AbstractStreamCallback, chunk::AbstractStreamChunk; kwargs...) processed_text = extract_content(cb.flavor, chunk; kwargs...) isnothing(processed_text) && return nothing @@ -30,4 +31,4 @@ import StreamCallbacks: callback, AbstractStreamCallback, AbstractStreamChunk, e return nothing end cb3 = StreamCallback() -create_chat(api_key, model, messages; streamcallback=cb3) +create_chat(api_key, model, messages; streamcallback = cb3) diff --git a/src/OpenAI.jl b/src/OpenAI.jl index c8d4818..06e1d5c 100644 --- a/src/OpenAI.jl +++ b/src/OpenAI.jl @@ -50,14 +50,14 @@ function auth_header(::OpenAIProvider, api_key::AbstractString) isempty(api_key) && throw(ArgumentError("api_key cannot be empty")) [ "Authorization" => "Bearer $api_key", - "Content-Type" => "application/json", + "Content-Type" => "application/json" ] end function auth_header(::AzureProvider, api_key::AbstractString) isempty(api_key) && throw(ArgumentError("api_key cannot be empty")) [ "api-key" => api_key, - "Content-Type" => "application/json", + "Content-Type" => "application/json" ] end @@ -108,14 +108,14 @@ function status_error(resp, log = nothing) end function _request(api::AbstractString, - provider::AbstractOpenAIProvider, - api_key::AbstractString = provider.api_key; - method, - query = nothing, - http_kwargs, - streamcallback = nothing, - additional_headers::AbstractVector = Pair{String, String}[], - kwargs...) + provider::AbstractOpenAIProvider, + api_key::AbstractString = provider.api_key; + method, + query = nothing, + http_kwargs, + streamcallback = nothing, + additional_headers::AbstractVector = Pair{String, String}[], + kwargs...) cb = nothing if !isnothing(streamcallback) cb, kwargs = configure_callback!(streamcallback; kwargs...) @@ -126,14 +126,16 @@ function _request(api::AbstractString, headers = vcat(auth_header(provider, api_key), additional_headers) if isnothing(cb) - resp, body = request_body(url, + resp, + body = request_body(url, method; input = params, headers = headers, query = query, http_kwargs...) else - resp, body = request_body_live(url; + resp, + body = request_body_live(url; method, input = params, headers = headers, @@ -149,11 +151,11 @@ function _request(api::AbstractString, end function openai_request(api::AbstractString, - api_key::AbstractString; - method, - http_kwargs, - streamcallback = nothing, - kwargs...) + api_key::AbstractString; + method, + http_kwargs, + streamcallback = nothing, + kwargs...) global DEFAULT_PROVIDER _request(api, DEFAULT_PROVIDER, @@ -165,11 +167,11 @@ function openai_request(api::AbstractString, end function openai_request(api::AbstractString, - provider::AbstractOpenAIProvider; - method, - http_kwargs, - streamcallback = nothing, - kwargs...) + provider::AbstractOpenAIProvider; + method, + http_kwargs, + streamcallback = nothing, + kwargs...) _request(api, provider; method, http_kwargs, streamcallback = streamcallback, kwargs...) end @@ -182,7 +184,8 @@ Streaming-related keyword arguments are added to `kwargs`. Returns the configured callback and updated keyword arguments. """ function configure_callback!(streamcallback; kwargs...) - cb = streamcallback isa StreamCallback ? streamcallback : StreamCallback(out = streamcallback) + cb = streamcallback isa StreamCallback ? streamcallback : + StreamCallback(out = streamcallback) isnothing(cb.flavor) && (cb.flavor = OpenAIStream()) new_kwargs = (; kwargs..., stream = true, stream_options = (; include_usage = true)) return cb, new_kwargs @@ -221,8 +224,8 @@ Retrieve model For additional details, visit """ function retrieve_model(api_key::String, - model_id::String; - http_kwargs::NamedTuple = NamedTuple()) + model_id::String; + http_kwargs::NamedTuple = NamedTuple()) return openai_request("models/$(model_id)", api_key; method = "GET", @@ -246,9 +249,9 @@ For more details about the endpoint and additional arguments, visit map(r->r["choices"][1]["delta"], CC.response) ``` """ function create_chat(api_key::String, - model_id::String, - messages; - http_kwargs::NamedTuple = NamedTuple(), - streamcallback = nothing, - kwargs...) + model_id::String, + messages; + http_kwargs::NamedTuple = NamedTuple(), + streamcallback = nothing, + kwargs...) return openai_request("chat/completions", api_key; method = "POST", @@ -343,11 +346,11 @@ function create_chat(api_key::String, kwargs...) end function create_chat(provider::AbstractOpenAIProvider, - model_id::String, - messages; - http_kwargs::NamedTuple = NamedTuple(), - streamcallback = nothing, - kwargs...) + model_id::String, + messages; + http_kwargs::NamedTuple = NamedTuple(), + streamcallback = nothing, + kwargs...) return openai_request("chat/completions", provider; method = "POST", @@ -366,12 +369,12 @@ Convenience overload for testing/debugging that forwards to `create_chat` with support for `StreamCallback` objects. """ function create_chat(schema, - api_key::AbstractString, - model::AbstractString, - conversation; - http_kwargs::NamedTuple = NamedTuple(), - streamcallback::Any = nothing, - kwargs...) + api_key::AbstractString, + model::AbstractString, + conversation; + http_kwargs::NamedTuple = NamedTuple(), + streamcallback::Any = nothing, + kwargs...) return create_chat(api_key, model, conversation; http_kwargs = http_kwargs, streamcallback = streamcallback, @@ -394,10 +397,10 @@ Create embeddings For additional details about the endpoint, visit """ function create_embeddings(api_key::String, - input, - model_id::String = DEFAULT_EMBEDDING_MODEL_ID; - http_kwargs::NamedTuple = NamedTuple(), - kwargs...) + input, + model_id::String = DEFAULT_EMBEDDING_MODEL_ID; + http_kwargs::NamedTuple = NamedTuple(), + kwargs...) return openai_request("embeddings", api_key; method = "POST", @@ -407,16 +410,16 @@ function create_embeddings(api_key::String, kwargs...) end function create_embeddings(provider::AbstractOpenAIProvider, - input; - model_id::String = DEFAULT_EMBEDDING_MODEL_ID, - http_kwargs::NamedTuple=NamedTuple(), - streamcallback=nothing, - kwargs...) + input; + model_id::String = DEFAULT_EMBEDDING_MODEL_ID, + http_kwargs::NamedTuple = NamedTuple(), + streamcallback = nothing, + kwargs...) return OpenAI.openai_request("embeddings", provider; - method="POST", - http_kwargs=http_kwargs, - model=model_id, + method = "POST", + http_kwargs = http_kwargs, + model = model_id, input, kwargs...) end @@ -440,11 +443,11 @@ download like this: `download(r.response["data"][begin]["url"], "image.png")` """ function create_images(api_key::String, - prompt, - n::Integer = 1, - size::String = "256x256"; - http_kwargs::NamedTuple = NamedTuple(), - kwargs...) + prompt, + n::Integer = 1, + size::String = "256x256"; + http_kwargs::NamedTuple = NamedTuple(), + kwargs...) return openai_request("images/generations", api_key; method = "POST", @@ -455,7 +458,6 @@ end include("assistants.jl") - """ Create responses @@ -521,14 +523,15 @@ response = create_responses(api_key, "How much wood would a woodchuck chuck?"; ``` """ -function create_responses(api_key::String, input, model="gpt-4o-mini"; http_kwargs::NamedTuple = NamedTuple(), kwargs...) - return openai_request("responses", - api_key; - method = "POST", - input = input, - model=model, - http_kwargs = http_kwargs, - kwargs...) +function create_responses(api_key::String, input, model = "gpt-4o-mini"; + http_kwargs::NamedTuple = NamedTuple(), kwargs...) + return openai_request("responses", + api_key; + method = "POST", + input = input, + model = model, + http_kwargs = http_kwargs, + kwargs...) end export OpenAIResponse diff --git a/src/assistants.jl b/src/assistants.jl index 31466ff..b8c732a 100644 --- a/src/assistants.jl +++ b/src/assistants.jl @@ -63,14 +63,14 @@ Main.OpenAI.OpenAIResponse{JSON3.Object{Vector{UInt8}, Vector{UInt64}}}(200, { """ function create_assistant(api_key::String, - model_id::String; - name::String = "", - description::String = "", - instructions::String = "", - tools::Vector = [], - file_ids::Vector = [], - metadata::Dict = Dict(), - http_kwargs::NamedTuple = NamedTuple()) + model_id::String; + name::String = "", + description::String = "", + instructions::String = "", + tools::Vector = [], + file_ids::Vector = [], + metadata::Dict = Dict(), + http_kwargs::NamedTuple = NamedTuple()) # The API endpoint is # POST https://api.openai.com/v1/assistants # Requires the OpenAI-Beta: assistants=v1 header @@ -133,8 +133,8 @@ Main.OpenAI.OpenAIResponse{JSON3.Object{Vector{UInt8}, Vector{UInt64}}}(200, { ``` """ function get_assistant(api_key::String, - assistant_id::String; - http_kwargs::NamedTuple = NamedTuple()) + assistant_id::String; + http_kwargs::NamedTuple = NamedTuple()) # The API endpoint is # GET https://api.openai.com/v1/assistants/:assistant_id # Requires the OpenAI-Beta: assistants=v1 header @@ -212,18 +212,18 @@ Main.OpenAI.OpenAIResponse{JSON3.Object{Vector{UInt8}, Vector{UInt64}}}(200, { ``` """ function list_assistants(api_key::AbstractString; - limit::Union{Integer, AbstractString} = 20, - order::AbstractString = "desc", - after::AbstractString = "", - before::AbstractString = "", - http_kwargs::NamedTuple = NamedTuple()) + limit::Union{Integer, AbstractString} = 20, + order::AbstractString = "desc", + after::AbstractString = "", + before::AbstractString = "", + http_kwargs::NamedTuple = NamedTuple()) # The API endpoint is # GET https://api.openai.com/v1/assistants # Requires the OpenAI-Beta: assistants=v1 header # Build query parameters query = Pair{String, String}["limit" => string(limit), - "order" => order] + "order" => order] length(after) > 0 && push!(query, "after" => after) length(before) > 0 && push!(query, "before" => before) @@ -296,15 +296,15 @@ Main.OpenAI.OpenAIResponse{JSON3.Object{Vector{UInt8}, Vector{UInt64}}}(200, { ``` """ function modify_assistant(api_key::AbstractString, - assistant_id::AbstractString; - model = nothing, - name = nothing, - description = nothing, - instructions = nothing, - tools = nothing, - file_ids = nothing, - metadata = nothing, - http_kwargs::NamedTuple = NamedTuple()) + assistant_id::AbstractString; + model = nothing, + name = nothing, + description = nothing, + instructions = nothing, + tools = nothing, + file_ids = nothing, + metadata = nothing, + http_kwargs::NamedTuple = NamedTuple()) # The API endpoint is # PATCH https://api.openai.com/v1/assistants/:assistant_id # Requires the OpenAI-Beta: assistants=v1 header @@ -379,8 +379,8 @@ Main.OpenAI.OpenAIResponse{JSON3.Object{Vector{UInt8}, Vector{UInt64}}}(200, { ``` """ function delete_assistant(api_key::AbstractString, - assistant_id::AbstractString; - http_kwargs::NamedTuple = NamedTuple()) + assistant_id::AbstractString; + http_kwargs::NamedTuple = NamedTuple()) # The API endpoint is # DELETE https://api.openai.com/v1/assistants/:assistant_id # Requires the OpenAI-Beta: assistants=v1 header @@ -421,8 +421,8 @@ thread_id = create_thread(api_key, [ ``` """ function create_thread(api_key::AbstractString, - messages = nothing; - http_kwargs::NamedTuple = NamedTuple()) + messages = nothing; + http_kwargs::NamedTuple = NamedTuple()) # The API endpoint is # POST https://api.openai.com/v1/threads # Requires the OpenAI-Beta: assistants=v1 header @@ -444,8 +444,8 @@ thread = retrieve_thread(api_key, thread_id) ``` """ function retrieve_thread(api_key::AbstractString, - thread_id::AbstractString; - http_kwargs::NamedTuple = NamedTuple()) + thread_id::AbstractString; + http_kwargs::NamedTuple = NamedTuple()) # The API endpoint is # GET https://api.openai.com/v1/threads/:thread_id # Requires the OpenAI-Beta: assistants=v1 header @@ -466,8 +466,8 @@ delete_thread(api_key, thread_id) ``` """ function delete_thread(api_key::AbstractString, - thread_id::AbstractString; - http_kwargs::NamedTuple = NamedTuple()) + thread_id::AbstractString; + http_kwargs::NamedTuple = NamedTuple()) # The API endpoint is # DELETE https://api.openai.com/v1/threads/:thread_id # Requires the OpenAI-Beta: assistants=v1 header @@ -492,9 +492,9 @@ modify_thread(api_key, thread_id, metadata=Dict("key" => "value")) ``` """ function modify_thread(api_key::AbstractString, - thread_id::AbstractString; - metadata = nothing, - http_kwargs::NamedTuple = NamedTuple()) + thread_id::AbstractString; + metadata = nothing, + http_kwargs::NamedTuple = NamedTuple()) # The API endpoint is # PATCH https://api.openai.com/v1/threads/:thread_id # Requires the OpenAI-Beta: assistants=v1 header @@ -515,12 +515,12 @@ end """ function create_message(api_key::AbstractString, - thread_id::AbstractString, - # role::AbstractString, # Currently role is always "user" - content::AbstractString; - file_ids = nothing, - metadata = nothing, - http_kwargs::NamedTuple = NamedTuple()) + thread_id::AbstractString, + # role::AbstractString, # Currently role is always "user" + content::AbstractString; + file_ids = nothing, + metadata = nothing, + http_kwargs::NamedTuple = NamedTuple()) # The API endpoint is # POST https://api.openai.com/v1/threads/:thread_id/messages # Requires the OpenAI-Beta: assistants=v1 header @@ -554,9 +554,9 @@ end Retrieves a message by ID. """ function retrieve_message(api_key::AbstractString, - thread_id::AbstractString, - message_id::AbstractString; - http_kwargs::NamedTuple = NamedTuple()) + thread_id::AbstractString, + message_id::AbstractString; + http_kwargs::NamedTuple = NamedTuple()) # The API endpoint is # GET https://api.openai.com/v1/threads/:thread_id/messages/:message_id # Requires the OpenAI-Beta: assistants=v1 header @@ -572,9 +572,9 @@ end """ function delete_message(api_key::AbstractString, - thread_id::AbstractString, - message_id::AbstractString; - http_kwargs::NamedTuple = NamedTuple()) + thread_id::AbstractString, + message_id::AbstractString; + http_kwargs::NamedTuple = NamedTuple()) # The API endpoint is # DELETE https://api.openai.com/v1/threads/:thread_id/messages/:message_id # Requires the OpenAI-Beta: assistants=v1 header @@ -590,10 +590,10 @@ end """ function modify_message(api_key::AbstractString, - thread_id::AbstractString, - message_id::AbstractString; - metadata = nothing, - http_kwargs::NamedTuple = NamedTuple()) + thread_id::AbstractString, + message_id::AbstractString; + metadata = nothing, + http_kwargs::NamedTuple = NamedTuple()) # The API endpoint is # PATCH https://api.openai.com/v1/threads/:thread_id/messages/:message_id # Requires the OpenAI-Beta: assistants=v1 header @@ -612,19 +612,19 @@ Returns an `OpenAIResponse` object containing a list of messages, sorted by the `created_at` timestamp of the objects. """ function list_messages(api_key::AbstractString, - thread_id::AbstractString; - limit::Union{Integer, AbstractString} = 20, - order::AbstractString = "desc", - after::AbstractString = "", - before::AbstractString = "", - http_kwargs::NamedTuple = NamedTuple()) + thread_id::AbstractString; + limit::Union{Integer, AbstractString} = 20, + order::AbstractString = "desc", + after::AbstractString = "", + before::AbstractString = "", + http_kwargs::NamedTuple = NamedTuple()) # The API endpoint is # GET https://api.openai.com/v1/threads/:thread_id/messages # Requires the OpenAI-Beta: assistants=v1 header # Build query parameters query = Pair{String, String}["limit" => string(limit), - "order" => order] + "order" => order] length(after) > 0 && push!(query, "after" => after) length(before) > 0 && push!(query, "before" => before) @@ -647,13 +647,13 @@ end POST https://api.openai.com/v1/threads/{thread_id}/runs """ function create_run(api_key::AbstractString, - thread_id::AbstractString, - assistant_id::AbstractString, - instructions = nothing; - tools = nothing, - metadata = nothing, - model = nothing, - http_kwargs::NamedTuple = NamedTuple()) + thread_id::AbstractString, + assistant_id::AbstractString, + instructions = nothing; + tools = nothing, + metadata = nothing, + model = nothing, + http_kwargs::NamedTuple = NamedTuple()) # The API endpoint is # POST https://api.openai.com/v1/threads/:thread_id/runs # Requires the OpenAI-Beta: assistants=v1 header @@ -675,9 +675,9 @@ end GET https://api.openai.com/v1/threads/{thread_id}/runs/{run_id} """ function retrieve_run(api_key::AbstractString, - thread_id::AbstractString, - run_id::AbstractString; - http_kwargs::NamedTuple = NamedTuple()) + thread_id::AbstractString, + run_id::AbstractString; + http_kwargs::NamedTuple = NamedTuple()) # The API endpoint is # GET https://api.openai.com/v1/threads/:thread_id/runs/:run_id # Requires the OpenAI-Beta: assistants=v1 header @@ -694,10 +694,10 @@ end POST https://api.openai.com/v1/threads/{thread_id}/runs/{run_id} """ function modify_run(api_key::AbstractString, - thread_id::AbstractString, - run_id::AbstractString; - metadata = nothing, - http_kwargs::NamedTuple = NamedTuple()) + thread_id::AbstractString, + run_id::AbstractString; + metadata = nothing, + http_kwargs::NamedTuple = NamedTuple()) # The API endpoint is # POST https://api.openai.com/v1/threads/:thread_id/runs/:run_id # Requires the OpenAI-Beta: assistants=v1 header @@ -715,19 +715,19 @@ end GET https://api.openai.com/v1/threads/{thread_id}/runs """ function list_runs(api_key::AbstractString, - thread_id::AbstractString; - limit::Union{Integer, AbstractString} = 20, - order::AbstractString = "desc", - after::AbstractString = "", - before::AbstractString = "", - http_kwargs::NamedTuple = NamedTuple()) + thread_id::AbstractString; + limit::Union{Integer, AbstractString} = 20, + order::AbstractString = "desc", + after::AbstractString = "", + before::AbstractString = "", + http_kwargs::NamedTuple = NamedTuple()) # The API endpoint is # GET https://api.openai.com/v1/threads/:thread_id/runs # Requires the OpenAI-Beta: assistants=v1 header # Build query parameters query = Pair{String, String}["limit" => string(limit), - "order" => order] + "order" => order] length(after) > 0 && push!(query, "after" => after) length(before) > 0 && push!(query, "before" => before) @@ -746,9 +746,9 @@ end POST https://api.openai.com/v1/threads/{thread_id}/runs/{run_id}/cancel """ function cancel_run(api_key::AbstractString, - thread_id::AbstractString, - run_id::AbstractString; - http_kwargs::NamedTuple = NamedTuple()) + thread_id::AbstractString, + run_id::AbstractString; + http_kwargs::NamedTuple = NamedTuple()) # The API endpoint is # POST https://api.openai.com/v1/threads/:thread_id/runs/:run_id/cancel # Requires the OpenAI-Beta: assistants=v1 header @@ -781,13 +781,13 @@ POST https://api.openai.com/v1/threads/runs - `metadata` is a `Dict` representing the metadata for the run. """ function create_thread_and_run(api_key::AbstractString, - assistant_id::AbstractString; - thread = nothing, - model = nothing, - instructions = nothing, - tools = nothing, - metadata = nothing, - http_kwargs::NamedTuple = NamedTuple()) + assistant_id::AbstractString; + thread = nothing, + model = nothing, + instructions = nothing, + tools = nothing, + metadata = nothing, + http_kwargs::NamedTuple = NamedTuple()) # The API endpoint is # POST https://api.openai.com/v1/threads/runs # Requires the OpenAI-Beta: assistants=v1 header diff --git a/test/chatcompletion.jl b/test/chatcompletion.jl index 6cd9691..b0f3117 100644 --- a/test/chatcompletion.jl +++ b/test/chatcompletion.jl @@ -12,7 +12,7 @@ "gpt-4o-mini", [ Dict("role" => "user", - "content" => "Summarize HTTP.jl package in a short sentence."), + "content" => "Summarize HTTP.jl package in a short sentence."), ], http_kwargs = (connect_timeout = 10, readtimeout = 0)) println(r.response["choices"][begin]["message"]["content"]) @@ -26,7 +26,7 @@ end "gpt-4o-mini", [ Dict("role" => "user", - "content" => "What continent is New York in? Two word answer."), + "content" => "What continent is New York in? Two word answer."), ], streamcallback = let count = 0 diff --git a/test/responses.jl b/test/responses.jl index 587b0d9..b3d2ca7 100644 --- a/test/responses.jl +++ b/test/responses.jl @@ -1,23 +1,28 @@ -@testset "Responses" begin +@testset "Responses" begin ## Image response tag - input = [Dict("role" => "user", - "content" => [Dict("type" => "input_text", "text" => "What is in this image?"), - Dict("type" => "input_image", "image_url" => "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg")]) - ] + input = [Dict("role" => "user", + "content" => [Dict("type" => "input_text", "text" => "What is in this image?"), + Dict("type" => "input_image", + "image_url" => "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg")]) + ] resp = create_responses(ENV["OPENAI_API_KEY"], input) if !=(resp.status, 200) @test false end ## Web search - resp = create_responses(ENV["OPENAI_API_KEY"], "What was a positive news story from today?"; tools=[Dict("type" => "web_search_preview")]) + resp = create_responses( + ENV["OPENAI_API_KEY"], "What was a positive news story from today?"; + tools = [Dict("type" => "web_search_preview")]) if !=(resp.status, 200) @test false end ## Streaming - resp = create_responses(ENV["OPENAI_API_KEY"], "Hello!"; instructions="You are a helpful assistant.", stream=true, streamcallback = x->println(x)) + resp = create_responses( + ENV["OPENAI_API_KEY"], "Hello!"; instructions = "You are a helpful assistant.", + stream = true, streamcallback = x->println(x)) if !=(resp.status, 200) @test false end @@ -25,35 +30,35 @@ ## Functions tools = [ Dict( - "type" => "function", - "name" => "get_current_weather", - "description" => "Get the current weather in a given location", - "parameters" => Dict( + "type" => "function", + "name" => "get_current_weather", + "description" => "Get the current weather in a given location", + "parameters" => Dict( "type" => "object", "properties" => Dict( "location" => Dict( "type" => "string", - "description" => "The city and state, e.g. San Francisco, CA", + "description" => "The city and state, e.g. San Francisco, CA" ), - "unit"=> Dict("type" => "string", "enum" => ["celsius", "fahrenheit"]), + "unit" => Dict("type" => "string", "enum" => ["celsius", "fahrenheit"]) ), - "required" => ["location", "unit"], - ) + "required" => ["location", "unit"] ) + ) ] - resp = create_responses(ENV["OPENAI_API_KEY"], "What is the weather in Boston?"; tools=tools, tool_choice="auto") + resp = create_responses(ENV["OPENAI_API_KEY"], "What is the weather in Boston?"; + tools = tools, tool_choice = "auto") if !=(resp.status, 200) @test false end ## Reasoning - resp = create_responses(ENV["OPENAI_API_KEY"], "How much wood would a woodchuck chuck?"; - model = "o3-mini", - reasoning=Dict("effort" => "high")) + resp = create_responses( + ENV["OPENAI_API_KEY"], "How much wood would a woodchuck chuck?"; + model = "o3-mini", + reasoning = Dict("effort" => "high")) if !=(resp.status, 200) @test false end - - -end \ No newline at end of file +end From 4152a520966ef3477386597706dd796c6a511891 Mon Sep 17 00:00:00 2001 From: J S <49557684+svilupp@users.noreply.github.com> Date: Sun, 10 Aug 2025 18:31:57 +0100 Subject: [PATCH 5/7] Run live API tests unconditionally --- test/runtests.jl | 30 +++++++++++++----------------- 1 file changed, 13 insertions(+), 17 deletions(-) diff --git a/test/runtests.jl b/test/runtests.jl index c65cf45..7e573ac 100755 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -21,23 +21,19 @@ end @testset "OpenAI.jl" begin printstyled(color = :blue, "\n") - if haskey(ENV, "OPENAI_API_KEY") && get(ENV, "OPENAI_RUN_LIVE_TESTS", "") == "true" - # Disable SSL verification in CI environments that inject self-signed certificates - ENV["JULIA_SSL_NO_VERIFY_HOSTS"] = "*" - @testset "models" begin - include("models.jl") - end - @testset "chatcompletion" begin - include("chatcompletion.jl") - end - @testset "completion" begin - include("completion.jl") - end - @testset "embeddings" begin - include("embeddings.jl") - end - else - @info "Skipping live API tests" + # Disable SSL verification in CI environments that inject self-signed certificates + ENV["JULIA_SSL_NO_VERIFY_HOSTS"] = "*" + @testset "models" begin + include("models.jl") + end + @testset "chatcompletion" begin + include("chatcompletion.jl") + end + @testset "completion" begin + include("completion.jl") + end + @testset "embeddings" begin + include("embeddings.jl") end @testset "streamcallbacks" begin include("streamcallbacks.jl") From a226b419f60a98011b3b94657fda170ffbdea2af Mon Sep 17 00:00:00 2001 From: svilupp Date: Sun, 10 Aug 2025 20:24:59 +0100 Subject: [PATCH 6/7] change model to gpt-5-mini --- README.md | 4 ++-- examples/streamcallbacks.jl | 2 +- src/OpenAI.jl | 8 ++++---- test/chatcompletion.jl | 24 +++++++++--------------- 4 files changed, 16 insertions(+), 22 deletions(-) diff --git a/README.md b/README.md index 7b045d7..2e68e76 100644 --- a/README.md +++ b/README.md @@ -28,7 +28,7 @@ __⚠️ We strongly suggest setting up your API key as an ENV variable__. ```julia secret_key = ENV["OPENAI_API_KEY"] -model = "gpt-4o-mini" +model = "gpt-5-mini" prompt = "Say \"this is a test\"" r = create_chat( @@ -57,7 +57,7 @@ provider = OpenAI.OpenAIProvider( ) response = create_chat( provider, - "gpt-4o-mini", + "gpt-5-mini", [Dict("role" => "user", "content" => "Write some ancient Greek poetry")] ) ``` diff --git a/examples/streamcallbacks.jl b/examples/streamcallbacks.jl index addd9cf..dc5f6e5 100644 --- a/examples/streamcallbacks.jl +++ b/examples/streamcallbacks.jl @@ -2,7 +2,7 @@ using OpenAI api_key = get(ENV, "OPENAI_API_KEY", "") -model = "gpt-4o-mini" +model = "gpt-5-mini" messages = [Dict("role" => "user", "content" => "Write a short haiku about streams.")] # 1. Stream to stdout (no differences) diff --git a/src/OpenAI.jl b/src/OpenAI.jl index 06e1d5c..d7245e6 100644 --- a/src/OpenAI.jl +++ b/src/OpenAI.jl @@ -284,7 +284,7 @@ For more details about the endpoint and additional arguments, visit CC = create_chat("..........", "gpt-4o-mini", +julia> CC = create_chat("..........", "gpt-5-mini", [Dict("role" => "user", "content"=> "What is the OpenAI mission?")] ); @@ -302,7 +302,7 @@ The response body will reflect the chunked nature of the response, so some reass message returned by the API. ```julia -julia> CC = create_chat(key, "gpt-4o-mini", +julia> CC = create_chat(key, "gpt-5-mini", [Dict("role" => "user", "content"=> "What continent is New York in? Two word answer.")], streamcallback = x->println(Dates.now())); 2023-03-27T12:34:50.428 @@ -468,7 +468,7 @@ https://platform.openai.com/docs/api-reference/responses/create - `input`: The input text to generate the response(s) for, as String or Dict. To get responses for multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed 8192 tokens in length. -- `model::String`: Model id. Defaults to "gpt-4o-mini". +- `model::String`: Model id. Defaults to "gpt-5-mini". - `kwargs...`: Additional arguments to pass to the API. - `tools::Int`: The number of responses to generate for the input. Defaults to 1. @@ -523,7 +523,7 @@ response = create_responses(api_key, "How much wood would a woodchuck chuck?"; ``` """ -function create_responses(api_key::String, input, model = "gpt-4o-mini"; +function create_responses(api_key::String, input, model = "gpt-5-mini"; http_kwargs::NamedTuple = NamedTuple(), kwargs...) return openai_request("responses", api_key; diff --git a/test/chatcompletion.jl b/test/chatcompletion.jl index b0f3117..a44f03c 100644 --- a/test/chatcompletion.jl +++ b/test/chatcompletion.jl @@ -1,6 +1,6 @@ @testset "chatcompletion" begin r = create_chat(ENV["OPENAI_API_KEY"], - "gpt-4o-mini", + "gpt-5-mini", [Dict("role" => "user", "content" => "What is the OpenAI mission?")]) println(r.response["choices"][begin]["message"]["content"]) if !=(r.status, 200) @@ -9,10 +9,10 @@ # with http kwargs (with default values) r = create_chat(ENV["OPENAI_API_KEY"], - "gpt-4o-mini", + "gpt-5-mini", [ Dict("role" => "user", - "content" => "Summarize HTTP.jl package in a short sentence."), + "content" => "Summarize HTTP.jl package in a short sentence.") ], http_kwargs = (connect_timeout = 10, readtimeout = 0)) println(r.response["choices"][begin]["message"]["content"]) @@ -22,22 +22,16 @@ end @testset "chatcompletion - streaming" begin + cb = StreamCallback() r = create_chat(ENV["OPENAI_API_KEY"], - "gpt-4o-mini", + "gpt-5-mini", [ Dict("role" => "user", - "content" => "What continent is New York in? Two word answer."), - ], - streamcallback = let - count = 0 - - function f(s::String) - count = count + 1 - println("Chunk $count") - end - end) + "content" => "What continent is New York in? Two word answer.") + ]; + streamcallback = cb) - println(map(r -> r["choices"][1]["delta"], r.response)) + println("Received $(length(cb.chunks)) chunks") if !=(r.status, 200) @test false end From b43dcc6a583519e91cb082d785d2a166efb0e685 Mon Sep 17 00:00:00 2001 From: svilupp Date: Sun, 10 Aug 2025 20:33:56 +0100 Subject: [PATCH 7/7] update model --- test/assistants.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/assistants.jl b/test/assistants.jl index c8d7c10..0161089 100644 --- a/test/assistants.jl +++ b/test/assistants.jl @@ -32,7 +32,7 @@ # Set API/model api_key = ENV["OPENAI_API_KEY"] -test_model = "gpt-4o-mini" +test_model = "gpt-5-mini" # Test functions for the assistant generation/modification/etc. @testset "Assistants" begin @@ -158,7 +158,7 @@ end # Make a thread thread = create_thread(api_key, [ - Dict("role" => "user", "content" => "Hello, how are you?"), + Dict("role" => "user", "content" => "Hello, how are you?") ]) # Make a run