@@ -1112,9 +1112,9 @@ def generate_client_response_streaming(
1112
1112
model_input : Messages ,
1113
1113
) -> Generator [YieldMessage , Any , tuple [Message , Any ]]:
1114
1114
msg_stream : Generator [Message , Any , Any ]
1115
- model_input_str = messages_to_str (block .model , model_input )
1116
1115
match block :
1117
1116
case BamModelBlock ():
1117
+ model_input_str = messages_to_str (block .model , model_input )
1118
1118
msg_stream = BamModel .generate_text_stream (
1119
1119
model_id = block .model ,
1120
1120
prompt_id = block .prompt_id ,
@@ -1172,9 +1172,9 @@ def generate_client_response_single(
1172
1172
model_input : Messages ,
1173
1173
) -> Generator [YieldMessage , Any , tuple [Message , Any ]]:
1174
1174
msg : Message
1175
- model_input_str = messages_to_str (block .model , model_input )
1176
1175
match block :
1177
1176
case BamModelBlock ():
1177
+ model_input_str = messages_to_str (block .model , model_input )
1178
1178
msg , raw_result = BamModel .generate_text (
1179
1179
model_id = block .model ,
1180
1180
prompt_id = block .prompt_id ,
@@ -1203,9 +1203,9 @@ def generate_client_response_batching( # pylint: disable=too-many-arguments
1203
1203
# model: str,
1204
1204
model_input : Messages ,
1205
1205
) -> Generator [YieldMessage , Any , Message ]:
1206
- model_input_str = messages_to_str (block .model , model_input )
1207
1206
match block :
1208
1207
case BamModelBlock ():
1208
+ model_input_str = messages_to_str (block .model , model_input )
1209
1209
msg = yield ModelCallMessage (
1210
1210
model_id = block .model ,
1211
1211
prompt_id = block .prompt_id ,
0 commit comments