Skip to content

Commit c07a442

Browse files
authored
chore(examples-docs): upgrade to OpenAI V1 (#1785)
1 parent cd3aa15 commit c07a442

File tree

3 files changed

+47
-28
lines changed

3 files changed

+47
-28
lines changed

docs/source/getting_started/quickstart.rst

Lines changed: 19 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -157,11 +157,16 @@ Since this server is compatible with OpenAI API, you can use it as a drop-in rep
157157

158158
.. code-block:: python
159159
160-
import openai
160+
from openai import OpenAI
161+
161162
# Modify OpenAI's API key and API base to use vLLM's API server.
162-
openai.api_key = "EMPTY"
163-
openai.api_base = "http://localhost:8000/v1"
164-
completion = openai.Completion.create(model="facebook/opt-125m",
163+
openai_api_key = "EMPTY"
164+
openai_api_base = "http://localhost:8000/v1"
165+
client = OpenAI(
166+
api_key=openai_api_key,
167+
base_url=openai_api_base,
168+
)
169+
completion = client.completions.create(model="facebook/opt-125m",
165170
prompt="San Francisco is a")
166171
print("Completion result:", completion)
167172
@@ -194,11 +199,17 @@ Using the `openai` python package, you can also communicate with the model in a
194199

195200
.. code-block:: python
196201
197-
import openai
202+
from openai import OpenAI
198203
# Set OpenAI's API key and API base to use vLLM's API server.
199-
openai.api_key = "EMPTY"
200-
openai.api_base = "http://localhost:8000/v1"
201-
chat_response = openai.ChatCompletion.create(
204+
openai_api_key = "EMPTY"
205+
openai_api_base = "http://localhost:8000/v1"
206+
207+
client = OpenAI(
208+
api_key=openai_api_key,
209+
base_url=openai_api_base,
210+
)
211+
212+
chat_response = client.chat.completions.create(
202213
model="facebook/opt-125m",
203214
messages=[
204215
{"role": "system", "content": "You are a helpful assistant."},
Lines changed: 15 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,18 +1,19 @@
1-
import openai
1+
from openai import OpenAI
22

33
# Modify OpenAI's API key and API base to use vLLM's API server.
4-
openai.api_key = "EMPTY"
5-
openai.api_base = "http://localhost:8000/v1"
4+
openai_api_key = "EMPTY"
5+
openai_api_base = "http://localhost:8000/v1"
66

7-
# List models API
8-
models = openai.Model.list()
9-
print("Models:", models)
7+
client = OpenAI(
8+
# defaults to os.environ.get("OPENAI_API_KEY")
9+
api_key=openai_api_key,
10+
base_url=openai_api_base,
11+
)
1012

11-
model = models["data"][0]["id"]
13+
models = client.models.list()
14+
model = models.data[0].id
1215

13-
# Chat completion API
14-
chat_completion = openai.ChatCompletion.create(
15-
model=model,
16+
chat_completion = client.chat.completions.create(
1617
messages=[{
1718
"role": "system",
1819
"content": "You are a helpful assistant."
@@ -27,7 +28,10 @@
2728
}, {
2829
"role": "user",
2930
"content": "Where was it played?"
30-
}])
31+
}],
32+
model=model,
33+
)
34+
3135

3236
print("Chat completion results:")
3337
print(chat_completion)

examples/openai_completion_client.py

Lines changed: 13 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,24 +1,28 @@
1-
import openai
1+
from openai import OpenAI
22

33
# Modify OpenAI's API key and API base to use vLLM's API server.
4-
openai.api_key = "EMPTY"
5-
openai.api_base = "http://localhost:8000/v1"
4+
openai_api_key = "EMPTY"
5+
openai_api_base = "http://localhost:8000/v1"
66

7-
# List models API
8-
models = openai.Model.list()
9-
print("Models:", models)
7+
client = OpenAI(
8+
# defaults to os.environ.get("OPENAI_API_KEY")
9+
api_key=openai_api_key,
10+
base_url=openai_api_base,
11+
)
1012

11-
model = models["data"][0]["id"]
13+
models = client.models.list()
14+
model = models.data[0].id
1215

1316
# Completion API
1417
stream = False
15-
completion = openai.Completion.create(
18+
completion = client.completions.create(
1619
model=model,
1720
prompt="A robot may not injure a human being",
1821
echo=False,
1922
n=2,
2023
stream=stream,
21-
logprobs=3)
24+
logprobs=3
25+
)
2226

2327
print("Completion results:")
2428
if stream:

0 commit comments

Comments
 (0)