@@ -24,9 +24,10 @@ class Models:
2424 def call (
2525 cls ,
2626 model : str ,
27- messages : List ,
27+ messages : List = None ,
2828 api_key : str = None ,
2929 workspace : str = None ,
30+ text : str = None ,
3031 ** kwargs
3132 ) -> Union [MultiModalConversationResponse , Generator [
3233 MultiModalConversationResponse , None , None ]]:
@@ -55,6 +56,7 @@ def call(
5556 if None, will retrieve by rule [1].
5657 [1]: https://help.aliyun.com/zh/dashscope/developer-reference/api-key-settings. # noqa E501
5758 workspace (str): The dashscope workspace id.
59+ text (str): The text to generate.
5860 **kwargs:
5961 stream(bool, `optional`): Enable server-sent events
6062 (ref: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events) # noqa E501
@@ -68,8 +70,11 @@ def call(
6870 tokens with top_p probability mass. So 0.1 means only
6971 the tokens comprising the top 10% probability mass are
7072 considered[qwen-turbo,bailian-v1].
73+ voice(string, `optional`): The voice name of qwen tts, include 'Cherry'/'Ethan'/'Sunny'/'Dylan' and so on,
74+ you can get the total voice list : https://help.aliyun.com/zh/model-studio/qwen-tts.
7175 top_k(float, `optional`):
7276
77+
7378 Raises:
7479 InvalidInput: The history and auto_history are mutually exclusive.
7580
@@ -78,18 +83,24 @@ def call(
7883 Generator[MultiModalConversationResponse, None, None]]: If
7984 stream is True, return Generator, otherwise MultiModalConversationResponse.
8085 """
81- if (messages is None or not messages ):
82- raise InputRequired ('prompt or messages is required!' )
8386 if model is None or not model :
8487 raise ModelRequired ('Model is required!' )
8588 task_group , _ = _get_task_group_and_task (__name__ )
86- msg_copy = copy .deepcopy (messages )
87- has_upload = cls ._preprocess_messages (model , msg_copy , api_key )
88- if has_upload :
89- headers = kwargs .pop ('headers' , {})
90- headers ['X-DashScope-OssResourceResolve' ] = 'enable'
91- kwargs ['headers' ] = headers
92- input = {'messages' : msg_copy }
89+ input = {}
90+ msg_copy = None
91+
92+ if messages is not None and messages :
93+ msg_copy = copy .deepcopy (messages )
94+ has_upload = cls ._preprocess_messages (model , msg_copy , api_key )
95+ if has_upload :
96+ headers = kwargs .pop ('headers' , {})
97+ headers ['X-DashScope-OssResourceResolve' ] = 'enable'
98+ kwargs ['headers' ] = headers
99+
100+ if text is not None and text :
101+ input .update ({'text' : text })
102+ if msg_copy is not None :
103+ input .update ({'messages' : msg_copy })
93104 response = super ().call (model = model ,
94105 task_group = task_group ,
95106 task = MultiModalConversation .task ,
@@ -145,9 +156,10 @@ class Models:
145156 async def call (
146157 cls ,
147158 model : str ,
148- messages : List ,
159+ messages : List = None ,
149160 api_key : str = None ,
150161 workspace : str = None ,
162+ text : str = None ,
151163 ** kwargs
152164 ) -> Union [MultiModalConversationResponse , Generator [
153165 MultiModalConversationResponse , None , None ]]:
@@ -176,6 +188,7 @@ async def call(
176188 if None, will retrieve by rule [1].
177189 [1]: https://help.aliyun.com/zh/dashscope/developer-reference/api-key-settings. # noqa E501
178190 workspace (str): The dashscope workspace id.
191+ text (str): The text to generate.
179192 **kwargs:
180193 stream(bool, `optional`): Enable server-sent events
181194 (ref: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events) # noqa E501
@@ -189,6 +202,8 @@ async def call(
189202 tokens with top_p probability mass. So 0.1 means only
190203 the tokens comprising the top 10% probability mass are
191204 considered[qwen-turbo,bailian-v1].
205+ voice(string, `optional`): The voice name of qwen tts, include 'Cherry'/'Ethan'/'Sunny'/'Dylan' and so on,
206+ you can get the total voice list : https://help.aliyun.com/zh/model-studio/qwen-tts.
192207 top_k(float, `optional`):
193208
194209 Raises:
@@ -199,18 +214,24 @@ async def call(
199214 Generator[MultiModalConversationResponse, None, None]]: If
200215 stream is True, return Generator, otherwise MultiModalConversationResponse.
201216 """
202- if (messages is None or not messages ):
203- raise InputRequired ('prompt or messages is required!' )
204217 if model is None or not model :
205218 raise ModelRequired ('Model is required!' )
206219 task_group , _ = _get_task_group_and_task (__name__ )
207- msg_copy = copy .deepcopy (messages )
208- has_upload = cls ._preprocess_messages (model , msg_copy , api_key )
209- if has_upload :
210- headers = kwargs .pop ('headers' , {})
211- headers ['X-DashScope-OssResourceResolve' ] = 'enable'
212- kwargs ['headers' ] = headers
213- input = {'messages' : msg_copy }
220+ input = {}
221+ msg_copy = None
222+
223+ if messages is not None and messages :
224+ msg_copy = copy .deepcopy (messages )
225+ has_upload = cls ._preprocess_messages (model , msg_copy , api_key )
226+ if has_upload :
227+ headers = kwargs .pop ('headers' , {})
228+ headers ['X-DashScope-OssResourceResolve' ] = 'enable'
229+ kwargs ['headers' ] = headers
230+
231+ if text is not None and text :
232+ input .update ({'text' : text })
233+ if msg_copy is not None :
234+ input .update ({'messages' : msg_copy })
214235 response = await super ().call (model = model ,
215236 task_group = task_group ,
216237 task = AioMultiModalConversation .task ,
0 commit comments