Skip to content

Commit 21d798a

Browse files
authored
[serving] fix schema parameters naming (#429)
1 parent e56d5ce commit 21d798a

File tree

1 file changed

+8
-8
lines changed

1 file changed

+8
-8
lines changed

dataflow/serving/api_google_vertexai_serving.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -312,7 +312,7 @@ def generate_from_input(
312312
self,
313313
user_inputs: List[str],
314314
system_prompt: str = "",
315-
response_schema: Optional[Union[type[BaseModel], dict]] = None,
315+
json_schema: Optional[Union[type[BaseModel], dict]] = None,
316316
use_function_call: Optional[bool] = None,
317317
use_batch: Optional[bool] = None,
318318
batch_wait: Optional[bool] = None,
@@ -326,7 +326,7 @@ def generate_from_input(
326326
Args:
327327
user_inputs: List of user input strings to process.
328328
system_prompt: System prompt for the model.
329-
response_schema: Optional Pydantic BaseModel or dict for structured output.
329+
json_schema: Optional Pydantic BaseModel or dict for structured output.
330330
use_batch: If True, use batch processing via BigQuery. If False, use parallel real-time generation.
331331
batch_wait: If True (and use_batch=True), wait for batch job to complete and return results.
332332
If False, return the batch job name immediately for later retrieval.
@@ -357,7 +357,7 @@ def generate_from_input(
357357
return self._generate_with_batch(
358358
user_inputs=user_inputs,
359359
system_prompt=system_prompt,
360-
response_schema=response_schema,
360+
response_schema=json_schema,
361361
use_function_call=use_function_call,
362362
wait_for_completion=batch_wait,
363363
dataset_name=batch_dataset,
@@ -368,7 +368,7 @@ def generate_from_input(
368368
return self._generate_with_parallel(
369369
user_inputs=user_inputs,
370370
system_prompt=system_prompt,
371-
response_schema=response_schema,
371+
response_schema=json_schema,
372372
use_function_call=use_function_call,
373373
)
374374

@@ -947,7 +947,7 @@ class UserDetails(BaseModel):
947947
"John Doe is 30 years old and lives in New York.",
948948
"My name is Jane Smith, I am 25, and I reside in London."
949949
]
950-
results_json = gemini_server_json.generate_from_input(user_prompts_json, system_prompt_json, response_schema=UserDetails) # Pass the schema here
950+
results_json = gemini_server_json.generate_from_input(user_prompts_json, system_prompt_json, json_schema=UserDetails) # Pass the schema here
951951
print("--- Generation Complete ---")
952952
for i, (prompt, result) in enumerate(zip(user_prompts_json, results_json)):
953953
print(f"\n[Prompt {i+1}]: {prompt}")
@@ -975,7 +975,7 @@ class UserDetails(BaseModel):
975975
"Alice Johnson is 28 years old and lives in San Francisco.",
976976
"Bob Brown, aged 35, resides in Toronto."
977977
]
978-
results_json_schema = gemini_server_json_schema.generate_from_input(user_prompts_json_schema, system_prompt_json_schema, response_schema=json_schema)
978+
results_json_schema = gemini_server_json_schema.generate_from_input(user_prompts_json_schema, system_prompt_json_schema, json_schema=json_schema)
979979
print("--- Generation Complete ---")
980980
for i, (prompt, result) in enumerate(zip(user_prompts_json_schema, results_json_schema)):
981981
print(f"\n[Prompt {i+1}]: {prompt}")
@@ -1007,7 +1007,7 @@ class Capital(BaseModel):
10071007
batch_job_name = gemini_server_batch.generate_from_input(
10081008
user_inputs=user_prompts_batch,
10091009
system_prompt=system_prompt_batch,
1010-
response_schema=Capital,
1010+
json_schema=Capital,
10111011
use_batch=True,
10121012
batch_wait=False # Don't wait for completion
10131013
)
@@ -1027,7 +1027,7 @@ class Capital(BaseModel):
10271027
results_batch = gemini_server_batch.generate_from_input(
10281028
user_inputs=user_prompts_batch,
10291029
system_prompt=system_prompt_batch,
1030-
response_schema=Capital,
1030+
json_schema=Capital,
10311031
use_batch=True,
10321032
batch_wait=True # Wait for completion
10331033
)

0 commit comments

Comments
 (0)