|
25 | 25 | from writerai._exceptions import BadRequestError, WriterError |
26 | 26 | from writerai._response import BinaryAPIResponse |
27 | 27 | from writerai._streaming import Stream |
28 | | -from writerai._types import Body, Headers, NotGiven, Query |
| 28 | +from writerai._types import Body, Headers, NotGiven, Omit, Query |
29 | 29 | from writerai.resources import FilesResource, GraphsResource |
30 | 30 | from writerai.types import ( |
31 | 31 | ApplicationListResponse, |
@@ -95,40 +95,40 @@ class ChatOptions(APIOptions, total=False): |
95 | 95 | Iterable[ |
96 | 96 | Union[SDKGraphTool, SDKFunctionTool, SDKLlmTool, SDKWebSearchTool] |
97 | 97 | ], |
98 | | - NotGiven |
| 98 | + Omit |
99 | 99 | ] |
100 | | - response_format: Union[ResponseFormat, NotGiven] |
101 | | - logprobs: Union[bool, NotGiven] |
102 | | - max_tokens: Union[int, NotGiven] |
103 | | - n: Union[int, NotGiven] |
104 | | - stop: Union[List[str], str, NotGiven] |
105 | | - temperature: Union[float, NotGiven] |
106 | | - top_p: Union[float, NotGiven] |
| 100 | + response_format: Union[ResponseFormat, Omit] |
| 101 | + logprobs: Union[bool, Omit] |
| 102 | + max_tokens: Union[int, Omit] |
| 103 | + n: Union[int, Omit] |
| 104 | + stop: Union[List[str], str, Omit] |
| 105 | + temperature: Union[float, Omit] |
| 106 | + top_p: Union[float, Omit] |
107 | 107 |
|
108 | 108 |
|
109 | 109 | class CreateOptions(APIOptions, total=False): |
110 | 110 | model: str |
111 | | - best_of: Union[int, NotGiven] |
112 | | - max_tokens: Union[int, NotGiven] |
113 | | - random_seed: Union[int, NotGiven] |
114 | | - stop: Union[List[str], str, NotGiven] |
115 | | - temperature: Union[float, NotGiven] |
116 | | - top_p: Union[float, NotGiven] |
| 111 | + best_of: Union[int, Omit] |
| 112 | + max_tokens: Union[int, Omit] |
| 113 | + random_seed: Union[int, Omit] |
| 114 | + stop: Union[List[str], str, Omit] |
| 115 | + temperature: Union[float, Omit] |
| 116 | + top_p: Union[float, Omit] |
117 | 117 |
|
118 | 118 |
|
119 | 119 | class APIListOptions(APIOptions, total=False): |
120 | | - after: Union[str, NotGiven] |
121 | | - before: Union[str, NotGiven] |
122 | | - limit: Union[int, NotGiven] |
123 | | - order: Union[Literal["asc", "desc"], NotGiven] |
| 120 | + after: Union[str, Omit] |
| 121 | + before: Union[str, Omit] |
| 122 | + limit: Union[int, Omit] |
| 123 | + order: Union[Literal["asc", "desc"], Omit] |
124 | 124 |
|
125 | 125 |
|
126 | 126 | class APIRetrieveJobsOptions(APIOptions, total=False): |
127 | | - limit: Union[int, NotGiven] |
128 | | - offset: Union[int, NotGiven] |
| 127 | + limit: Union[int, Omit] |
| 128 | + offset: Union[int, Omit] |
129 | 129 | status: Union[ |
130 | 130 | Literal["completed", "failed", "in_progress"], |
131 | | - NotGiven |
| 131 | + Omit |
132 | 132 | ] |
133 | 133 |
|
134 | 134 |
|
@@ -479,8 +479,8 @@ def update( |
479 | 479 | graphs = self._retrieve_graphs_accessor() |
480 | 480 | response = graphs.update( |
481 | 481 | self.id, |
482 | | - name=payload.get("name", NotGiven()), |
483 | | - description=payload.get("description", NotGiven()), |
| 482 | + name=payload.get("name", Omit()), |
| 483 | + description=payload.get("description", Omit()), |
484 | 484 | **config |
485 | 485 | ) |
486 | 486 | Graph.stale_ids.add(self.id) |
@@ -744,7 +744,7 @@ def create_graph( |
744 | 744 | graphs = Graph._retrieve_graphs_accessor() |
745 | 745 | graph_object = graphs.create( |
746 | 746 | name=name, |
747 | | - description=description or NotGiven(), |
| 747 | + description=description or Omit(), |
748 | 748 | **config |
749 | 749 | ) |
750 | 750 | converted_object = cast(SDKGraph, graph_object) |
@@ -801,13 +801,13 @@ def list_graphs(config: Optional[APIListOptions] = None) -> List[Graph]: |
801 | 801 | Additional body parameters for the request. |
802 | 802 | - `timeout` (Union[float, httpx.Timeout, None, NotGiven]): |
803 | 803 | Timeout for the request in seconds. |
804 | | - - `after` (Union[str, NotGiven]): |
| 804 | + - `after` (Union[str, Omit]): |
805 | 805 | Filter to retrieve items created after a specific cursor. |
806 | | - - `before` (Union[str, NotGiven]): |
| 806 | + - `before` (Union[str, Omit]): |
807 | 807 | Filter to retrieve items created before a specific cursor. |
808 | | - - `limit` (Union[int, NotGiven]): |
| 808 | + - `limit` (Union[int, Omit]): |
809 | 809 | The number of items to retrieve. |
810 | | - - `order` (Union[Literal["asc", "desc"], NotGiven]): |
| 810 | + - `order` (Union[Literal["asc", "desc"], Omit]): |
811 | 811 | The order in which to retrieve items. |
812 | 812 | """ |
813 | 813 | config = config or {} |
@@ -942,13 +942,13 @@ def list_files(config: Optional[APIListOptions] = None) -> List[File]: |
942 | 942 | Additional body parameters for the request. |
943 | 943 | - `timeout` (Union[float, httpx.Timeout, None, NotGiven]): |
944 | 944 | Timeout for the request in seconds. |
945 | | - - `after` (Union[str, NotGiven]): |
| 945 | + - `after` (Union[str, Omit]): |
946 | 946 | Filter to retrieve items created after a specific cursor. |
947 | | - - `before` (Union[str, NotGiven]): |
| 947 | + - `before` (Union[str, Omit]): |
948 | 948 | Filter to retrieve items created before a specific cursor. |
949 | | - - `limit` (Union[int, NotGiven]): |
| 949 | + - `limit` (Union[int, Omit]): |
950 | 950 | The number of items to retrieve. |
951 | | - - `order` (Union[Literal["asc", "desc"], NotGiven]): |
| 951 | + - `order` (Union[Literal["asc", "desc"], Omit]): |
952 | 952 | The order in which to retrieve items. |
953 | 953 | """ |
954 | 954 | config = config or {} |
@@ -1127,23 +1127,23 @@ class Conversation: |
1127 | 1127 | Configure how the model will call functions: `auto` will allow the model |
1128 | 1128 | to automatically choose the best tool, `none` disables tool calling. |
1129 | 1129 | You can also pass a specific previously defined function. |
1130 | | - - `logprobs` (Union[bool, NotGiven]): |
| 1130 | + - `logprobs` (Union[bool, Omit]): |
1131 | 1131 | Specifies whether to return log probabilities of the output tokens. |
1132 | 1132 | - `tools` (Union[Iterable[Union[SDKGraphTool, |
1133 | | - SDKFunctionTool, SDKLlmTool]], NotGiven]): |
| 1133 | + SDKFunctionTool, SDKLlmTool]], Omit]): |
1134 | 1134 | Tools available for the model to use. |
1135 | | - - `max_tokens` (Union[int, NotGiven]): |
| 1135 | + - `max_tokens` (Union[int, Omit]): |
1136 | 1136 | Maximum number of tokens to generate. |
1137 | | - - `n` (Union[int, NotGiven]): |
| 1137 | + - `n` (Union[int, Omit]): |
1138 | 1138 | Number of completions to generate. |
1139 | | - - `stop` (Union[List[str], str, NotGiven]): |
| 1139 | + - `stop` (Union[List[str], str, Omit]): |
1140 | 1140 | Sequences where the API will stop generating tokens. |
1141 | | - - `temperature` (Union[float, NotGiven]): |
| 1141 | + - `temperature` (Union[float, Omit]): |
1142 | 1142 | Controls the randomness or creativity of the model's responses. |
1143 | 1143 | A higher temperature results in more varied and less predictable text, |
1144 | 1144 | while a lower temperature produces more deterministic |
1145 | 1145 | and conservative outputs. |
1146 | | - - `top_p` (Union[float, NotGiven]): |
| 1146 | + - `top_p` (Union[float, Omit]): |
1147 | 1147 | Sets the threshold for "nucleus sampling," a technique to focus the model's |
1148 | 1148 | token generation on the most likely subset of tokens. Only tokens with |
1149 | 1149 | cumulative probability above this threshold are considered, controlling the |
@@ -1910,25 +1910,25 @@ def _send_chat_request( |
1910 | 1910 | f"prepared messages – {prepared_messages}, " + |
1911 | 1911 | f"request_data – {request_data}" |
1912 | 1912 | ) |
1913 | | - tools = request_data.get('tools', NotGiven()) |
1914 | | - tool_choice: Union[ToolChoice, NotGiven] |
1915 | | - if isinstance(tools, NotGiven): |
1916 | | - tool_choice = NotGiven() |
| 1913 | + tools = request_data.get('tools', Omit()) |
| 1914 | + tool_choice: Union[ToolChoice, Omit] |
| 1915 | + if isinstance(tools, Omit): |
| 1916 | + tool_choice = Omit() |
1917 | 1917 | else: |
1918 | 1918 | tool_choice = request_data.get('tool_choice', cast(ToolChoice, 'auto')) |
1919 | 1919 | return client.chat.chat( |
1920 | 1920 | messages=prepared_messages, |
1921 | 1921 | model=request_model, |
1922 | 1922 | stream=stream, |
1923 | | - logprobs=request_data.get('logprobs', NotGiven()), |
| 1923 | + logprobs=request_data.get('logprobs', Omit()), |
1924 | 1924 | tools=tools, |
1925 | 1925 | tool_choice=tool_choice, |
1926 | | - response_format=request_data.get('response_format', NotGiven()), |
1927 | | - max_tokens=request_data.get('max_tokens', NotGiven()), |
1928 | | - n=request_data.get('n', NotGiven()), |
1929 | | - stop=request_data.get('stop', NotGiven()), |
1930 | | - temperature=request_data.get('temperature', NotGiven()), |
1931 | | - top_p=request_data.get('top_p', NotGiven()), |
| 1926 | + response_format=request_data.get('response_format', Omit()), |
| 1927 | + max_tokens=request_data.get('max_tokens', Omit()), |
| 1928 | + n=request_data.get('n', Omit()), |
| 1929 | + stop=request_data.get('stop', Omit()), |
| 1930 | + temperature=request_data.get('temperature', Omit()), |
| 1931 | + top_p=request_data.get('top_p', Omit()), |
1932 | 1932 | extra_headers=request_data.get('extra_headers'), |
1933 | 1933 | extra_query=request_data.get('extra_query'), |
1934 | 1934 | extra_body=request_data.get('extra_body'), |
@@ -2980,12 +2980,12 @@ def complete( |
2980 | 2980 | response_data: Completion = client.completions.create( |
2981 | 2981 | model=request_model, |
2982 | 2982 | prompt=initial_text, |
2983 | | - best_of=config.get("best_of", NotGiven()), |
2984 | | - max_tokens=config.get("max_tokens", NotGiven()), |
2985 | | - random_seed=config.get("random_seed", NotGiven()), |
2986 | | - stop=config.get("stop", NotGiven()), |
2987 | | - temperature=config.get("temperature", NotGiven()), |
2988 | | - top_p=config.get("top_p", NotGiven()), |
| 2983 | + best_of=config.get("best_of", Omit()), |
| 2984 | + max_tokens=config.get("max_tokens", Omit()), |
| 2985 | + random_seed=config.get("random_seed", Omit()), |
| 2986 | + stop=config.get("stop", Omit()), |
| 2987 | + temperature=config.get("temperature", Omit()), |
| 2988 | + top_p=config.get("top_p", Omit()), |
2989 | 2989 | extra_headers=config.get("extra_headers"), |
2990 | 2990 | extra_body=config.get("extra_body"), |
2991 | 2991 | extra_query=config.get("extra_query"), |
@@ -3026,12 +3026,12 @@ def stream_complete( |
3026 | 3026 | model=request_model, |
3027 | 3027 | prompt=initial_text, |
3028 | 3028 | stream=True, |
3029 | | - best_of=config.get("best_of", NotGiven()), |
3030 | | - max_tokens=config.get("max_tokens", NotGiven()), |
3031 | | - random_seed=config.get("random_seed", NotGiven()), |
3032 | | - stop=config.get("stop", NotGiven()), |
3033 | | - temperature=config.get("temperature", NotGiven()), |
3034 | | - top_p=config.get("top_p", NotGiven()), |
| 3029 | + best_of=config.get("best_of", Omit()), |
| 3030 | + max_tokens=config.get("max_tokens", Omit()), |
| 3031 | + random_seed=config.get("random_seed", Omit()), |
| 3032 | + stop=config.get("stop", Omit()), |
| 3033 | + temperature=config.get("temperature", Omit()), |
| 3034 | + top_p=config.get("top_p", Omit()), |
3035 | 3035 | extra_headers=config.get("extra_headers"), |
3036 | 3036 | extra_body=config.get("extra_body"), |
3037 | 3037 | extra_query=config.get("extra_query"), |
|
0 commit comments