Skip to content

Commit a20659d

Browse files
update changes
1 parent 3ebe2fc commit a20659d

File tree

1 file changed

+138
-44
lines changed

1 file changed

+138
-44
lines changed

openapi.yaml

Lines changed: 138 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -2888,7 +2888,10 @@ paths:
28882888
"instructions": "You are a helpful assistant designed to make me better at coding!",
28892889
"tools": [],
28902890
"file_ids": [],
2891-
"metadata": {}
2891+
"metadata": {},
2892+
"top_p": 1.0,
2893+
"temperature": 1.0,
2894+
"response_format": "auto"
28922895
},
28932896
{
28942897
"id": "asst_abc456",
@@ -2900,7 +2903,10 @@ paths:
29002903
"instructions": "You are a helpful assistant designed to make me better at coding!",
29012904
"tools": [],
29022905
"file_ids": [],
2903-
"metadata": {}
2906+
"metadata": {},
2907+
"top_p": 1.0,
2908+
"temperature": 1.0,
2909+
"response_format": "auto"
29042910
},
29052911
{
29062912
"id": "asst_abc789",
@@ -2912,7 +2918,10 @@ paths:
29122918
"instructions": null,
29132919
"tools": [],
29142920
"file_ids": [],
2915-
"metadata": {}
2921+
"metadata": {},
2922+
"top_p": 1.0,
2923+
"temperature": 1.0,
2924+
"response_format": "auto"
29162925
}
29172926
],
29182927
"first_id": "asst_abc123",
@@ -3001,7 +3010,10 @@ paths:
30013010
}
30023011
],
30033012
"file_ids": [],
3004-
"metadata": {}
3013+
"metadata": {},
3014+
"top_p": 1.0,
3015+
"temperature": 1.0,
3016+
"response_format": "auto"
30053017
}
30063018
- title: Files
30073019
request:
@@ -3064,7 +3076,10 @@ paths:
30643076
"file_ids": [
30653077
"file-abc123"
30663078
],
3067-
"metadata": {}
3079+
"metadata": {},
3080+
"top_p": 1.0,
3081+
"temperature": 1.0,
3082+
"response_format": "auto"
30683083
}
30693084
30703085
/assistants/{assistant_id}:
@@ -3238,7 +3253,10 @@ paths:
32383253
"file-abc123",
32393254
"file-abc456"
32403255
],
3241-
"metadata": {}
3256+
"metadata": {},
3257+
"top_p": 1.0,
3258+
"temperature": 1.0,
3259+
"response_format": "auto"
32423260
}
32433261
delete:
32443262
operationId: deleteAssistant
@@ -4501,7 +4519,8 @@ paths:
45014519
"completion_tokens": 456,
45024520
"total_tokens": 579
45034521
},
4504-
"temperature": 1,
4522+
"temperature": 1.0,
4523+
"top_p": 1.0,
45054524
"max_prompt_tokens": 1000,
45064525
"max_completion_tokens": 1000,
45074526
"truncation_strategy": {
@@ -4542,7 +4561,8 @@ paths:
45424561
"completion_tokens": 456,
45434562
"total_tokens": 579
45444563
},
4545-
"temperature": 1,
4564+
"temperature": 1.0,
4565+
"top_p": 1.0,
45464566
"max_prompt_tokens": 1000,
45474567
"max_completion_tokens": 1000,
45484568
"truncation_strategy": {
@@ -4651,7 +4671,8 @@ paths:
46514671
],
46524672
"metadata": {},
46534673
"usage": null,
4654-
"temperature": 1,
4674+
"temperature": 1.0,
4675+
"top_p": 1.0,
46554676
"max_prompt_tokens": 1000,
46564677
"max_completion_tokens": 1000,
46574678
"truncation_strategy": {
@@ -4996,7 +5017,8 @@ paths:
49965017
"completion_tokens": 456,
49975018
"total_tokens": 579
49985019
},
4999-
"temperature": 1,
5020+
"temperature": 1.0,
5021+
"top_p": 1.0,
50005022
"max_prompt_tokens": 1000,
50015023
"max_completion_tokens": 1000,
50025024
"truncation_strategy": {
@@ -5119,7 +5141,8 @@ paths:
51195141
"completion_tokens": 456,
51205142
"total_tokens": 579
51215143
},
5122-
"temperature": 1,
5144+
"temperature": 1.0,
5145+
"top_p": 1.0,
51235146
"max_prompt_tokens": 1000,
51245147
"max_completion_tokens": 1000,
51255148
"truncation_strategy": {
@@ -5266,7 +5289,8 @@ paths:
52665289
"file_ids": [],
52675290
"metadata": {},
52685291
"usage": null,
5269-
"temperature": 1,
5292+
"temperature": 1.0,
5293+
"top_p": 1.0,
52705294
"max_prompt_tokens": 1000,
52715295
"max_completion_tokens": 1000,
52725296
"truncation_strategy": {
@@ -5474,7 +5498,8 @@ paths:
54745498
"file_ids": [],
54755499
"metadata": {},
54765500
"usage": null,
5477-
"temperature": 1
5501+
"temperature": 1.0,
5502+
"top_p": 1.0,
54785503
}
54795504
54805505
/threads/{thread_id}/runs/{run_id}/steps:
@@ -7775,7 +7800,7 @@ components:
77757800
description: |
77767801
The ID of an uploaded file that contains training data.
77777802
7778-
See [upload file](/docs/api-reference/files/upload) for how to upload a file.
7803+
See [upload file](/docs/api-reference/files/create) for how to upload a file.
77797804
77807805
Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`.
77817806
@@ -8770,6 +8795,33 @@ components:
87708795
- total_tokens
87718796
nullable: true
87728797

8798+
AssistantsApiResponseFormatOption:
8799+
description: |
8800+
Specifies the format that the model must output. Compatible with [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
8801+
8802+
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
8803+
8804+
**Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length.
8805+
oneOf:
8806+
- type: string
8807+
description: >
8808+
`auto` is the default value
8809+
enum: [none, auto]
8810+
- $ref: "#/components/schemas/AssistantsApiResponseFormat"
8811+
x-oaiExpandable: true
8812+
8813+
AssistantsApiResponseFormat:
8814+
type: object
8815+
description: |
8816+
An object describing the expected output of the model. If `json_object` only `function` type `tools` are allowed to be passed to the Run. If `text` the model can return text or any value needed.
8817+
properties:
8818+
type:
8819+
type: string
8820+
enum: ["text", "json_object"]
8821+
example: "json_object"
8822+
default: "text"
8823+
description: Must be one of `text` or `json_object`.
8824+
87738825
AssistantObject:
87748826
type: object
87758827
title: Assistant
@@ -8918,6 +8970,29 @@ components:
89188970
type: object
89198971
x-oaiTypeLabel: map
89208972
nullable: true
8973+
temperature:
8974+
description: &run_temperature_description |
8975+
What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
8976+
type: number
8977+
minimum: 0
8978+
maximum: 2
8979+
default: 1
8980+
example: 1
8981+
nullable: true
8982+
top_p:
8983+
type: number
8984+
minimum: 0
8985+
maximum: 1
8986+
default: 1
8987+
example: 1
8988+
nullable: true
8989+
description: &run_top_p_description |
8990+
An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
8991+
8992+
We generally recommend altering this or temperature but not both.
8993+
response_format:
8994+
$ref: "#/components/schemas/AssistantsApiResponseFormatOption"
8995+
nullable: true
89218996
required:
89228997
- model
89238998

@@ -8968,6 +9043,29 @@ components:
89689043
type: object
89699044
x-oaiTypeLabel: map
89709045
nullable: true
9046+
temperature:
9047+
description: &run_temperature_description |
9048+
What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
9049+
type: number
9050+
minimum: 0
9051+
maximum: 2
9052+
default: 1
9053+
example: 1
9054+
nullable: true
9055+
top_p:
9056+
type: number
9057+
minimum: 0
9058+
maximum: 1
9059+
default: 1
9060+
example: 1
9061+
nullable: true
9062+
description: &run_top_p_description |
9063+
An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
9064+
9065+
We generally recommend altering this or temperature but not both.
9066+
response_format:
9067+
$ref: "#/components/schemas/AssistantsApiResponseFormatOption"
9068+
nullable: true
89719069

89729070
DeleteAssistantResponse:
89739071
type: object
@@ -9101,33 +9199,6 @@ components:
91019199
required:
91029200
- type
91039201

9104-
AssistantsApiResponseFormatOption:
9105-
description: |
9106-
Specifies the format that the model must output. Compatible with [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
9107-
9108-
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
9109-
9110-
**Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length.
9111-
oneOf:
9112-
- type: string
9113-
description: >
9114-
`auto` is the default value
9115-
enum: [none, auto]
9116-
- $ref: "#/components/schemas/AssistantsApiResponseFormat"
9117-
x-oaiExpandable: true
9118-
9119-
AssistantsApiResponseFormat:
9120-
type: object
9121-
description: |
9122-
An object describing the expected output of the model. If `json_object` only `function` type `tools` are allowed to be passed to the Run. If `text` the model can return text or any value needed.
9123-
properties:
9124-
type:
9125-
type: string
9126-
enum: ["text", "json_object"]
9127-
example: "json_object"
9128-
default: "text"
9129-
description: Must be one of `text` or `json_object`.
9130-
91319202
RunObject:
91329203
type: object
91339204
title: A run on a thread
@@ -9264,6 +9335,10 @@ components:
92649335
description: The sampling temperature used for this run. If not set, defaults to 1.
92659336
type: number
92669337
nullable: true
9338+
top_p:
9339+
description: The nucleus sampling value used for this run. If not set, defaults to 1.
9340+
type: number
9341+
nullable: true
92679342
max_prompt_tokens:
92689343
type: integer
92699344
nullable: true
@@ -9339,7 +9414,8 @@ components:
93399414
"completion_tokens": 456,
93409415
"total_tokens": 579
93419416
},
9342-
"temperature": 1,
9417+
"temperature": 1.0,
9418+
"top_p": 1.0,
93439419
"max_prompt_tokens": 1000,
93449420
"max_completion_tokens": 1000,
93459421
"truncation_strategy": {
@@ -9422,8 +9498,18 @@ components:
94229498
default: 1
94239499
example: 1
94249500
nullable: true
9425-
description: &run_temperature_description |
9426-
What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
9501+
description: *run_temperature_description
9502+
top_p:
9503+
type: number
9504+
minimum: 0
9505+
maximum: 1
9506+
default: 1
9507+
example: 1
9508+
nullable: true
9509+
description: &run_top_p_description |
9510+
An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
9511+
9512+
We generally recommend altering this or temperature but not both.
94279513
stream:
94289514
type: boolean
94299515
nullable: true
@@ -9606,6 +9692,14 @@ components:
96069692
example: 1
96079693
nullable: true
96089694
description: *run_temperature_description
9695+
top_p:
9696+
type: number
9697+
minimum: 0
9698+
maximum: 1
9699+
default: 1
9700+
example: 1
9701+
nullable: true
9702+
description: *run_top_p_description
96099703
stream:
96109704
type: boolean
96119705
nullable: true

0 commit comments

Comments
 (0)