Skip to content

Commit 8e99af9

Browse files
stainless-botRobertCraigie
authored andcommitted
feat(api): move n_epochs under hyperparameters
1 parent d8274ad commit 8e99af9

File tree

7 files changed

+47
-44
lines changed

7 files changed

+47
-44
lines changed

src/openai/resources/embeddings.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ def create(
3434
Args:
3535
input: Input text to embed, encoded as a string or array of tokens. To embed multiple
3636
inputs in a single request, pass an array of strings or array of token arrays.
37-
Each input must not exceed the max input tokens for the model (8191 tokens for
37+
The input must not exceed the max input tokens for the model (8192 tokens for
3838
`text-embedding-ada-002`) and cannot be an empty string.
3939
[Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
4040
for counting tokens.
@@ -94,7 +94,7 @@ async def create(
9494
Args:
9595
input: Input text to embed, encoded as a string or array of tokens. To embed multiple
9696
inputs in a single request, pass an array of strings or array of token arrays.
97-
Each input must not exceed the max input tokens for the model (8191 tokens for
97+
The input must not exceed the max input tokens for the model (8192 tokens for
9898
`text-embedding-ada-002`) and cannot be an empty string.
9999
[Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
100100
for counting tokens.

src/openai/resources/fine_tunes.py

Lines changed: 8 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -32,9 +32,9 @@ def create(
3232
classification_n_classes: Optional[int] | NotGiven = NOT_GIVEN,
3333
classification_positive_class: Optional[str] | NotGiven = NOT_GIVEN,
3434
compute_classification_metrics: Optional[bool] | NotGiven = NOT_GIVEN,
35+
hyperparameters: fine_tune_create_params.Hyperparameters | NotGiven = NOT_GIVEN,
3536
learning_rate_multiplier: Optional[float] | NotGiven = NOT_GIVEN,
3637
model: Union[str, Literal["ada", "babbage", "curie", "davinci"], None] | NotGiven = NOT_GIVEN,
37-
n_epochs: Optional[int] | NotGiven = NOT_GIVEN,
3838
prompt_loss_weight: Optional[float] | NotGiven = NOT_GIVEN,
3939
suffix: Optional[str] | NotGiven = NOT_GIVEN,
4040
validation_file: Optional[str] | NotGiven = NOT_GIVEN,
@@ -101,6 +101,8 @@ def create(
101101
multiclass classification or `classification_positive_class` for binary
102102
classification.
103103
104+
hyperparameters: The hyperparameters used for the fine-tuning job.
105+
104106
learning_rate_multiplier: The learning rate multiplier to use for training. The fine-tuning learning rate
105107
is the original learning rate used for pretraining multiplied by this value.
106108
@@ -114,9 +116,6 @@ def create(
114116
2023-08-22. To learn more about these models, see the
115117
[Models](https://platform.openai.com/docs/models) documentation.
116118
117-
n_epochs: The number of epochs to train the model for. An epoch refers to one full cycle
118-
through the training dataset.
119-
120119
prompt_loss_weight: The weight to use for loss on the prompt tokens. This controls how much the
121120
model tries to learn to generate the prompt (as compared to the completion which
122121
always has a weight of 1.0), and can add a stabilizing effect to training when
@@ -164,9 +163,9 @@ def create(
164163
"classification_n_classes": classification_n_classes,
165164
"classification_positive_class": classification_positive_class,
166165
"compute_classification_metrics": compute_classification_metrics,
166+
"hyperparameters": hyperparameters,
167167
"learning_rate_multiplier": learning_rate_multiplier,
168168
"model": model,
169-
"n_epochs": n_epochs,
170169
"prompt_loss_weight": prompt_loss_weight,
171170
"suffix": suffix,
172171
"validation_file": validation_file,
@@ -370,9 +369,9 @@ async def create(
370369
classification_n_classes: Optional[int] | NotGiven = NOT_GIVEN,
371370
classification_positive_class: Optional[str] | NotGiven = NOT_GIVEN,
372371
compute_classification_metrics: Optional[bool] | NotGiven = NOT_GIVEN,
372+
hyperparameters: fine_tune_create_params.Hyperparameters | NotGiven = NOT_GIVEN,
373373
learning_rate_multiplier: Optional[float] | NotGiven = NOT_GIVEN,
374374
model: Union[str, Literal["ada", "babbage", "curie", "davinci"], None] | NotGiven = NOT_GIVEN,
375-
n_epochs: Optional[int] | NotGiven = NOT_GIVEN,
376375
prompt_loss_weight: Optional[float] | NotGiven = NOT_GIVEN,
377376
suffix: Optional[str] | NotGiven = NOT_GIVEN,
378377
validation_file: Optional[str] | NotGiven = NOT_GIVEN,
@@ -439,6 +438,8 @@ async def create(
439438
multiclass classification or `classification_positive_class` for binary
440439
classification.
441440
441+
hyperparameters: The hyperparameters used for the fine-tuning job.
442+
442443
learning_rate_multiplier: The learning rate multiplier to use for training. The fine-tuning learning rate
443444
is the original learning rate used for pretraining multiplied by this value.
444445
@@ -452,9 +453,6 @@ async def create(
452453
2023-08-22. To learn more about these models, see the
453454
[Models](https://platform.openai.com/docs/models) documentation.
454455
455-
n_epochs: The number of epochs to train the model for. An epoch refers to one full cycle
456-
through the training dataset.
457-
458456
prompt_loss_weight: The weight to use for loss on the prompt tokens. This controls how much the
459457
model tries to learn to generate the prompt (as compared to the completion which
460458
always has a weight of 1.0), and can add a stabilizing effect to training when
@@ -502,9 +500,9 @@ async def create(
502500
"classification_n_classes": classification_n_classes,
503501
"classification_positive_class": classification_positive_class,
504502
"compute_classification_metrics": compute_classification_metrics,
503+
"hyperparameters": hyperparameters,
505504
"learning_rate_multiplier": learning_rate_multiplier,
506505
"model": model,
507-
"n_epochs": n_epochs,
508506
"prompt_loss_weight": prompt_loss_weight,
509507
"suffix": suffix,
510508
"validation_file": validation_file,

src/openai/types/chat/chat_completion_chunk.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ class Choice(BaseModel):
4040
delta: ChoiceDelta
4141
"""A chat completion delta generated by streamed model responses."""
4242

43-
finish_reason: Optional[Literal["stop", "length", "function_call"]]
43+
finish_reason: Optional[Literal["stop", "length", "function_call", "content_filter"]]
4444
"""The reason the model stopped generating tokens.
4545
4646
This will be `stop` if the model hit a natural stop point or a provided stop

src/openai/types/embedding_create_params.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,8 @@ class EmbeddingCreateParams(TypedDict, total=False):
1313
"""Input text to embed, encoded as a string or array of tokens.
1414
1515
To embed multiple inputs in a single request, pass an array of strings or array
16-
of token arrays. Each input must not exceed the max input tokens for the model
17-
(8191 tokens for `text-embedding-ada-002`) and cannot be an empty string.
16+
of token arrays. The input must not exceed the max input tokens for the model
17+
(8192 tokens for `text-embedding-ada-002`) and cannot be an empty string.
1818
[Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
1919
for counting tokens.
2020
"""

src/openai/types/fine_tune_create_params.py

Lines changed: 12 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
from typing import List, Union, Optional
66
from typing_extensions import Literal, Required, TypedDict
77

8-
__all__ = ["FineTuneCreateParams"]
8+
__all__ = ["FineTuneCreateParams", "Hyperparameters"]
99

1010

1111
class FineTuneCreateParams(TypedDict, total=False):
@@ -72,6 +72,9 @@ class FineTuneCreateParams(TypedDict, total=False):
7272
classification.
7373
"""
7474

75+
hyperparameters: Hyperparameters
76+
"""The hyperparameters used for the fine-tuning job."""
77+
7578
learning_rate_multiplier: Optional[float]
7679
"""
7780
The learning rate multiplier to use for training. The fine-tuning learning rate
@@ -91,12 +94,6 @@ class FineTuneCreateParams(TypedDict, total=False):
9194
models, see the [Models](https://platform.openai.com/docs/models) documentation.
9295
"""
9396

94-
n_epochs: Optional[int]
95-
"""The number of epochs to train the model for.
96-
97-
An epoch refers to one full cycle through the training dataset.
98-
"""
99-
10097
prompt_loss_weight: Optional[float]
10198
"""The weight to use for loss on the prompt tokens.
10299
@@ -133,3 +130,11 @@ class FineTuneCreateParams(TypedDict, total=False):
133130
[fine-tuning guide](https://platform.openai.com/docs/guides/legacy-fine-tuning/creating-training-data)
134131
for more details.
135132
"""
133+
134+
135+
class Hyperparameters(TypedDict, total=False):
136+
n_epochs: Union[Literal["auto"], int]
137+
"""The number of epochs to train the model for.
138+
139+
An epoch refers to one full cycle through the training dataset.
140+
"""

tests/api_resources/chat/test_completions.py

Lines changed: 20 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,8 @@ def test_method_create_overload_1(self, client: OpenAI) -> None:
2424
completion = client.chat.completions.create(
2525
messages=[
2626
{
27-
"role": "system",
2827
"content": "string",
28+
"role": "system",
2929
}
3030
],
3131
model="gpt-3.5-turbo",
@@ -37,22 +37,22 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None:
3737
completion = client.chat.completions.create(
3838
messages=[
3939
{
40-
"role": "system",
4140
"content": "string",
42-
"name": "string",
4341
"function_call": {
44-
"name": "string",
4542
"arguments": "string",
43+
"name": "string",
4644
},
45+
"name": "string",
46+
"role": "system",
4747
}
4848
],
4949
model="gpt-3.5-turbo",
5050
frequency_penalty=-2,
5151
function_call="none",
5252
functions=[
5353
{
54-
"name": "string",
5554
"description": "string",
55+
"name": "string",
5656
"parameters": {"foo": "bar"},
5757
}
5858
],
@@ -73,8 +73,8 @@ def test_method_create_overload_2(self, client: OpenAI) -> None:
7373
client.chat.completions.create(
7474
messages=[
7575
{
76-
"role": "system",
7776
"content": "string",
77+
"role": "system",
7878
}
7979
],
8080
model="gpt-3.5-turbo",
@@ -86,13 +86,13 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None:
8686
client.chat.completions.create(
8787
messages=[
8888
{
89-
"role": "system",
9089
"content": "string",
91-
"name": "string",
9290
"function_call": {
93-
"name": "string",
9491
"arguments": "string",
92+
"name": "string",
9593
},
94+
"name": "string",
95+
"role": "system",
9696
}
9797
],
9898
model="gpt-3.5-turbo",
@@ -101,8 +101,8 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None:
101101
function_call="none",
102102
functions=[
103103
{
104-
"name": "string",
105104
"description": "string",
105+
"name": "string",
106106
"parameters": {"foo": "bar"},
107107
}
108108
],
@@ -127,8 +127,8 @@ async def test_method_create_overload_1(self, client: AsyncOpenAI) -> None:
127127
completion = await client.chat.completions.create(
128128
messages=[
129129
{
130-
"role": "system",
131130
"content": "string",
131+
"role": "system",
132132
}
133133
],
134134
model="gpt-3.5-turbo",
@@ -140,22 +140,22 @@ async def test_method_create_with_all_params_overload_1(self, client: AsyncOpenA
140140
completion = await client.chat.completions.create(
141141
messages=[
142142
{
143-
"role": "system",
144143
"content": "string",
145-
"name": "string",
146144
"function_call": {
147-
"name": "string",
148145
"arguments": "string",
146+
"name": "string",
149147
},
148+
"name": "string",
149+
"role": "system",
150150
}
151151
],
152152
model="gpt-3.5-turbo",
153153
frequency_penalty=-2,
154154
function_call="none",
155155
functions=[
156156
{
157-
"name": "string",
158157
"description": "string",
158+
"name": "string",
159159
"parameters": {"foo": "bar"},
160160
}
161161
],
@@ -176,8 +176,8 @@ async def test_method_create_overload_2(self, client: AsyncOpenAI) -> None:
176176
await client.chat.completions.create(
177177
messages=[
178178
{
179-
"role": "system",
180179
"content": "string",
180+
"role": "system",
181181
}
182182
],
183183
model="gpt-3.5-turbo",
@@ -189,13 +189,13 @@ async def test_method_create_with_all_params_overload_2(self, client: AsyncOpenA
189189
await client.chat.completions.create(
190190
messages=[
191191
{
192-
"role": "system",
193192
"content": "string",
194-
"name": "string",
195193
"function_call": {
196-
"name": "string",
197194
"arguments": "string",
195+
"name": "string",
198196
},
197+
"name": "string",
198+
"role": "system",
199199
}
200200
],
201201
model="gpt-3.5-turbo",
@@ -204,8 +204,8 @@ async def test_method_create_with_all_params_overload_2(self, client: AsyncOpenA
204204
function_call="none",
205205
functions=[
206206
{
207-
"name": "string",
208207
"description": "string",
208+
"name": "string",
209209
"parameters": {"foo": "bar"},
210210
}
211211
],

tests/api_resources/test_fine_tunes.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -36,9 +36,9 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None:
3636
classification_n_classes=0,
3737
classification_positive_class="string",
3838
compute_classification_metrics=True,
39+
hyperparameters={"n_epochs": "auto"},
3940
learning_rate_multiplier=0,
4041
model="curie",
41-
n_epochs=0,
4242
prompt_loss_weight=0,
4343
suffix="x",
4444
validation_file="file-abc123",
@@ -111,9 +111,9 @@ async def test_method_create_with_all_params(self, client: AsyncOpenAI) -> None:
111111
classification_n_classes=0,
112112
classification_positive_class="string",
113113
compute_classification_metrics=True,
114+
hyperparameters={"n_epochs": "auto"},
114115
learning_rate_multiplier=0,
115116
model="curie",
116-
n_epochs=0,
117117
prompt_loss_weight=0,
118118
suffix="x",
119119
validation_file="file-abc123",

0 commit comments

Comments
 (0)