Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
8 changes: 4 additions & 4 deletions .stats.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
configured_endpoints: 95
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-0ee6b36cf3cc278cef4199a6aec5f7d530a6c1f17a74830037e96d50ca1edc50.yml
openapi_spec_hash: e8ec5f46bc0655b34f292422d58a60f6
config_hash: d9b6b6e6bc85744663e300eebc482067
configured_endpoints: 99
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-794a6ed3c3d3d77887564755168056af8a426b17cf1ec721e3a300503dc22a41.yml
openapi_spec_hash: 25a81c220713cd5b0bafc221d1dfa79a
config_hash: 0b768ed1b56c6d82816f0fa40dc4aaf5
301 changes: 271 additions & 30 deletions README.md

Large diffs are not rendered by default.

2 changes: 2 additions & 0 deletions openai-java-core/build.gradle.kts
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,8 @@ dependencies {
implementation("com.fasterxml.jackson.module:jackson-module-kotlin:2.18.2")
implementation("org.apache.httpcomponents.core5:httpcore5:5.2.4")
implementation("org.apache.httpcomponents.client5:httpclient5:5.3.1")
implementation("com.github.victools:jsonschema-generator:4.38.0")
implementation("com.github.victools:jsonschema-module-jackson:4.38.0")

testImplementation(kotlin("test"))
testImplementation(project(":openai-java-client-okhttp"))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import com.openai.services.blocking.EmbeddingService
import com.openai.services.blocking.EvalService
import com.openai.services.blocking.FileService
import com.openai.services.blocking.FineTuningService
import com.openai.services.blocking.GraderService
import com.openai.services.blocking.ImageService
import com.openai.services.blocking.ModelService
import com.openai.services.blocking.ModerationService
Expand Down Expand Up @@ -65,6 +66,8 @@ interface OpenAIClient {

fun fineTuning(): FineTuningService

fun graders(): GraderService

fun vectorStores(): VectorStoreService

fun beta(): BetaService
Expand Down Expand Up @@ -111,6 +114,8 @@ interface OpenAIClient {

fun fineTuning(): FineTuningService.WithRawResponse

fun graders(): GraderService.WithRawResponse

fun vectorStores(): VectorStoreService.WithRawResponse

fun beta(): BetaService.WithRawResponse
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import com.openai.services.async.EmbeddingServiceAsync
import com.openai.services.async.EvalServiceAsync
import com.openai.services.async.FileServiceAsync
import com.openai.services.async.FineTuningServiceAsync
import com.openai.services.async.GraderServiceAsync
import com.openai.services.async.ImageServiceAsync
import com.openai.services.async.ModelServiceAsync
import com.openai.services.async.ModerationServiceAsync
Expand Down Expand Up @@ -65,6 +66,8 @@ interface OpenAIClientAsync {

fun fineTuning(): FineTuningServiceAsync

fun graders(): GraderServiceAsync

fun vectorStores(): VectorStoreServiceAsync

fun beta(): BetaServiceAsync
Expand Down Expand Up @@ -111,6 +114,8 @@ interface OpenAIClientAsync {

fun fineTuning(): FineTuningServiceAsync.WithRawResponse

fun graders(): GraderServiceAsync.WithRawResponse

fun vectorStores(): VectorStoreServiceAsync.WithRawResponse

fun beta(): BetaServiceAsync.WithRawResponse
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,8 @@ import com.openai.services.async.FileServiceAsync
import com.openai.services.async.FileServiceAsyncImpl
import com.openai.services.async.FineTuningServiceAsync
import com.openai.services.async.FineTuningServiceAsyncImpl
import com.openai.services.async.GraderServiceAsync
import com.openai.services.async.GraderServiceAsyncImpl
import com.openai.services.async.ImageServiceAsync
import com.openai.services.async.ImageServiceAsyncImpl
import com.openai.services.async.ModelServiceAsync
Expand Down Expand Up @@ -84,6 +86,10 @@ class OpenAIClientAsyncImpl(private val clientOptions: ClientOptions) : OpenAICl
FineTuningServiceAsyncImpl(clientOptionsWithUserAgent)
}

private val graders: GraderServiceAsync by lazy {
GraderServiceAsyncImpl(clientOptionsWithUserAgent)
}

private val vectorStores: VectorStoreServiceAsync by lazy {
VectorStoreServiceAsyncImpl(clientOptionsWithUserAgent)
}
Expand Down Expand Up @@ -126,6 +132,8 @@ class OpenAIClientAsyncImpl(private val clientOptions: ClientOptions) : OpenAICl

override fun fineTuning(): FineTuningServiceAsync = fineTuning

override fun graders(): GraderServiceAsync = graders

override fun vectorStores(): VectorStoreServiceAsync = vectorStores

override fun beta(): BetaServiceAsync = beta
Expand Down Expand Up @@ -179,6 +187,10 @@ class OpenAIClientAsyncImpl(private val clientOptions: ClientOptions) : OpenAICl
FineTuningServiceAsyncImpl.WithRawResponseImpl(clientOptions)
}

private val graders: GraderServiceAsync.WithRawResponse by lazy {
GraderServiceAsyncImpl.WithRawResponseImpl(clientOptions)
}

private val vectorStores: VectorStoreServiceAsync.WithRawResponse by lazy {
VectorStoreServiceAsyncImpl.WithRawResponseImpl(clientOptions)
}
Expand Down Expand Up @@ -221,6 +233,8 @@ class OpenAIClientAsyncImpl(private val clientOptions: ClientOptions) : OpenAICl

override fun fineTuning(): FineTuningServiceAsync.WithRawResponse = fineTuning

override fun graders(): GraderServiceAsync.WithRawResponse = graders

override fun vectorStores(): VectorStoreServiceAsync.WithRawResponse = vectorStores

override fun beta(): BetaServiceAsync.WithRawResponse = beta
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,8 @@ import com.openai.services.blocking.FileService
import com.openai.services.blocking.FileServiceImpl
import com.openai.services.blocking.FineTuningService
import com.openai.services.blocking.FineTuningServiceImpl
import com.openai.services.blocking.GraderService
import com.openai.services.blocking.GraderServiceImpl
import com.openai.services.blocking.ImageService
import com.openai.services.blocking.ImageServiceImpl
import com.openai.services.blocking.ModelService
Expand Down Expand Up @@ -78,6 +80,8 @@ class OpenAIClientImpl(private val clientOptions: ClientOptions) : OpenAIClient
FineTuningServiceImpl(clientOptionsWithUserAgent)
}

private val graders: GraderService by lazy { GraderServiceImpl(clientOptionsWithUserAgent) }

private val vectorStores: VectorStoreService by lazy {
VectorStoreServiceImpl(clientOptionsWithUserAgent)
}
Expand Down Expand Up @@ -116,6 +120,8 @@ class OpenAIClientImpl(private val clientOptions: ClientOptions) : OpenAIClient

override fun fineTuning(): FineTuningService = fineTuning

override fun graders(): GraderService = graders

override fun vectorStores(): VectorStoreService = vectorStores

override fun beta(): BetaService = beta
Expand Down Expand Up @@ -169,6 +175,10 @@ class OpenAIClientImpl(private val clientOptions: ClientOptions) : OpenAIClient
FineTuningServiceImpl.WithRawResponseImpl(clientOptions)
}

private val graders: GraderService.WithRawResponse by lazy {
GraderServiceImpl.WithRawResponseImpl(clientOptions)
}

private val vectorStores: VectorStoreService.WithRawResponse by lazy {
VectorStoreServiceImpl.WithRawResponseImpl(clientOptions)
}
Expand Down Expand Up @@ -211,6 +221,8 @@ class OpenAIClientImpl(private val clientOptions: ClientOptions) : OpenAIClient

override fun fineTuning(): FineTuningService.WithRawResponse = fineTuning

override fun graders(): GraderService.WithRawResponse = graders

override fun vectorStores(): VectorStoreService.WithRawResponse = vectorStores

override fun beta(): BetaService.WithRawResponse = beta
Expand Down
21 changes: 21 additions & 0 deletions openai-java-core/src/main/kotlin/com/openai/core/AutoPager.kt
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
// File generated from our OpenAPI spec by Stainless.

package com.openai.core

import java.util.stream.Stream
import java.util.stream.StreamSupport

class AutoPager<T> private constructor(private val firstPage: Page<T>) : Iterable<T> {

companion object {

fun <T> from(firstPage: Page<T>): AutoPager<T> = AutoPager(firstPage)
}

override fun iterator(): Iterator<T> =
generateSequence(firstPage) { if (it.hasNextPage()) it.nextPage() else null }
.flatMap { it.items() }
.iterator()

fun stream(): Stream<T> = StreamSupport.stream(spliterator(), false)
}
88 changes: 88 additions & 0 deletions openai-java-core/src/main/kotlin/com/openai/core/AutoPagerAsync.kt
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
// File generated from our OpenAPI spec by Stainless.

package com.openai.core

import com.openai.core.http.AsyncStreamResponse
import java.util.Optional
import java.util.concurrent.CompletableFuture
import java.util.concurrent.CompletionException
import java.util.concurrent.Executor
import java.util.concurrent.atomic.AtomicReference

class AutoPagerAsync<T>
private constructor(private val firstPage: PageAsync<T>, private val defaultExecutor: Executor) :
AsyncStreamResponse<T> {

companion object {

fun <T> from(firstPage: PageAsync<T>, defaultExecutor: Executor): AutoPagerAsync<T> =
AutoPagerAsync(firstPage, defaultExecutor)
}

private val onCompleteFuture = CompletableFuture<Void?>()
private val state = AtomicReference(State.NEW)

override fun subscribe(handler: AsyncStreamResponse.Handler<T>): AsyncStreamResponse<T> =
subscribe(handler, defaultExecutor)

override fun subscribe(
handler: AsyncStreamResponse.Handler<T>,
executor: Executor,
): AsyncStreamResponse<T> = apply {
// TODO(JDK): Use `compareAndExchange` once targeting JDK 9.
check(state.compareAndSet(State.NEW, State.SUBSCRIBED)) {
if (state.get() == State.SUBSCRIBED) "Cannot subscribe more than once"
else "Cannot subscribe after the response is closed"
}

fun PageAsync<T>.handle(): CompletableFuture<Void?> {
if (state.get() == State.CLOSED) {
return CompletableFuture.completedFuture(null)
}

items().forEach { handler.onNext(it) }
return if (hasNextPage()) nextPage().thenCompose { it.handle() }
else CompletableFuture.completedFuture(null)
}

executor.execute {
firstPage.handle().whenComplete { _, error ->
val actualError =
if (error is CompletionException && error.cause != null) error.cause else error
try {
handler.onComplete(Optional.ofNullable(actualError))
} finally {
try {
if (actualError == null) {
onCompleteFuture.complete(null)
} else {
onCompleteFuture.completeExceptionally(actualError)
}
} finally {
close()
}
}
}
}
}

override fun onCompleteFuture(): CompletableFuture<Void?> = onCompleteFuture

override fun close() {
val previousState = state.getAndSet(State.CLOSED)
if (previousState == State.CLOSED) {
return
}

// When the stream is closed, we should always consider it closed. If it closed due
// to an error, then we will have already completed the future earlier, and this
// will be a no-op.
onCompleteFuture.complete(null)
}
}

private enum class State {
NEW,
SUBSCRIBED,
CLOSED,
}
3 changes: 3 additions & 0 deletions openai-java-core/src/main/kotlin/com/openai/core/Check.kt
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,9 @@ package com.openai.core
import com.fasterxml.jackson.core.Version
import com.fasterxml.jackson.core.util.VersionUtil

fun checkRequired(name: String, condition: Boolean) =
check(condition) { "`$name` is required, but was not set" }

fun <T : Any> checkRequired(name: String, value: T?): T =
checkNotNull(value) { "`$name` is required, but was not set" }

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
package com.openai.core

/**
* Options for local validation of JSON schemas derived from arbitrary classes before a request is
* executed.
*/
enum class JsonSchemaLocalValidation {
/**
* Validate the JSON schema locally before the request is executed. The remote AI model will
* also validate the JSON schema.
*/
YES,

/**
* Do not validate the JSON schema locally before the request is executed. The remote AI model
* will always validate the JSON schema.
*/
NO,
}
Loading