@@ -86,6 +86,7 @@ def create_llm_inputs(
8686 add_model_name : bool = False ,
8787 add_stream : bool = False ,
8888 tokenizer : AutoTokenizer = DEFAULT_TOKENIZER ,
89+ extra_inputs : Dict = {},
8990 ) -> Dict :
9091 """
9192 Given an input type, input format, and output type. Output a string of LLM Inputs
@@ -109,9 +110,11 @@ def create_llm_inputs(
109110 length:
110111 Number of entries to gather
111112 add_model_name:
112- If true adds a model name field to each payload
113+ If true, adds a model name field to each payload
113114 add_stream:
114- If true adds a steam field to each payload
115+ If true, adds a steam field to each payload
116+ extra_inputs:
117+ If provided, append these inputs to every request
115118
116119 Required Synthetic Prompt Generation Parameters
117120 -----------------------------------------------
@@ -164,7 +167,12 @@ def create_llm_inputs(
164167 )
165168
166169 json_in_pa_format = LlmInputs ._convert_generic_json_to_output_format (
167- output_format , generic_dataset_json , add_model_name , add_stream , model_name
170+ output_format ,
171+ generic_dataset_json ,
172+ add_model_name ,
173+ add_stream ,
174+ model_name ,
175+ extra_inputs ,
168176 )
169177 LlmInputs ._write_json_to_file (json_in_pa_format )
170178
@@ -309,24 +317,29 @@ def _convert_generic_json_to_output_format(
309317 add_model_name : bool ,
310318 add_stream : bool ,
311319 model_name : str = "" ,
320+ extra_inputs : Dict = {},
312321 ) -> Dict :
313322 if output_format == OutputFormat .OPENAI_CHAT_COMPLETIONS :
314323 output_json = (
315324 LlmInputs ._convert_generic_json_to_openai_chat_completions_format (
316- generic_dataset , add_model_name , add_stream , model_name
325+ generic_dataset ,
326+ add_model_name ,
327+ add_stream ,
328+ model_name ,
329+ extra_inputs ,
317330 )
318331 )
319332 elif output_format == OutputFormat .OPENAI_COMPLETIONS :
320333 output_json = LlmInputs ._convert_generic_json_to_openai_completions_format (
321- generic_dataset , add_model_name , add_stream , model_name
334+ generic_dataset , add_model_name , add_stream , model_name , extra_inputs
322335 )
323336 elif output_format == OutputFormat .VLLM :
324337 output_json = LlmInputs ._convert_generic_json_to_vllm_format (
325- generic_dataset , add_model_name , add_stream , model_name
338+ generic_dataset , add_model_name , add_stream , model_name , extra_inputs
326339 )
327340 elif output_format == OutputFormat .TRTLLM :
328341 output_json = LlmInputs ._convert_generic_json_to_trtllm_format (
329- generic_dataset , add_model_name , add_stream , model_name
342+ generic_dataset , add_model_name , add_stream , model_name , extra_inputs
330343 )
331344 else :
332345 raise GenAIPerfException (
@@ -342,6 +355,7 @@ def _convert_generic_json_to_openai_chat_completions_format(
342355 add_model_name : bool ,
343356 add_stream : bool ,
344357 model_name : str = "" ,
358+ extra_inputs : Dict = {},
345359 ) -> Dict :
346360 # TODO (TMA-1757): Implement a way to select a role for `text_input`
347361 (
@@ -356,6 +370,7 @@ def _convert_generic_json_to_openai_chat_completions_format(
356370 add_model_name ,
357371 add_stream ,
358372 model_name ,
373+ extra_inputs ,
359374 )
360375
361376 return pa_json
@@ -367,6 +382,7 @@ def _convert_generic_json_to_openai_completions_format(
367382 add_model_name : bool ,
368383 add_stream : bool ,
369384 model_name : str = "" ,
385+ extra_inputs : Dict = {},
370386 ) -> Dict :
371387 (
372388 system_role_headers ,
@@ -381,6 +397,7 @@ def _convert_generic_json_to_openai_completions_format(
381397 add_model_name ,
382398 add_stream ,
383399 model_name ,
400+ extra_inputs ,
384401 )
385402
386403 return pa_json
@@ -392,6 +409,7 @@ def _convert_generic_json_to_vllm_format(
392409 add_model_name : bool ,
393410 add_stream : bool ,
394411 model_name : str = "" ,
412+ extra_inputs : Dict = {},
395413 ) -> Dict :
396414 (
397415 system_role_headers ,
@@ -407,6 +425,7 @@ def _convert_generic_json_to_vllm_format(
407425 add_model_name ,
408426 add_stream ,
409427 model_name ,
428+ extra_inputs ,
410429 )
411430
412431 return pa_json
@@ -418,6 +437,7 @@ def _convert_generic_json_to_trtllm_format(
418437 add_model_name : bool ,
419438 add_stream : bool ,
420439 model_name : str = "" ,
440+ extra_inputs : Dict = {},
421441 ) -> Dict :
422442 (
423443 system_role_headers ,
@@ -433,6 +453,7 @@ def _convert_generic_json_to_trtllm_format(
433453 add_model_name ,
434454 add_stream ,
435455 model_name ,
456+ extra_inputs ,
436457 )
437458
438459 return pa_json
@@ -480,6 +501,7 @@ def _populate_openai_chat_completions_output_json(
480501 add_model_name : bool ,
481502 add_stream : bool ,
482503 model_name : str = "" ,
504+ extra_inputs : Dict = {},
483505 ) -> Dict :
484506 pa_json = LlmInputs ._create_empty_openai_pa_json ()
485507
@@ -497,7 +519,7 @@ def _populate_openai_chat_completions_output_json(
497519 )
498520
499521 pa_json = LlmInputs ._add_optional_tags_to_openai_json (
500- pa_json , index , add_model_name , add_stream , model_name
522+ pa_json , index , add_model_name , add_stream , model_name , extra_inputs
501523 )
502524
503525 return pa_json
@@ -512,6 +534,7 @@ def _populate_openai_completions_output_json(
512534 add_model_name : bool ,
513535 add_stream : bool ,
514536 model_name : str = "" ,
537+ extra_inputs : Dict = {},
515538 ) -> Dict :
516539 pa_json = LlmInputs ._create_empty_openai_pa_json ()
517540
@@ -531,7 +554,7 @@ def _populate_openai_completions_output_json(
531554 pa_json = LlmInputs ._add_new_prompt_to_json (pa_json , index , new_prompt )
532555
533556 pa_json = LlmInputs ._add_optional_tags_to_openai_json (
534- pa_json , index , add_model_name , add_stream , model_name
557+ pa_json , index , add_model_name , add_stream , model_name , extra_inputs
535558 )
536559
537560 return pa_json
@@ -546,6 +569,7 @@ def _populate_vllm_output_json(
546569 add_model_name : bool ,
547570 add_stream : bool ,
548571 model_name : str = "" ,
572+ extra_inputs : Dict = {},
549573 ) -> Dict :
550574 pa_json = LlmInputs ._create_empty_vllm_pa_json ()
551575
@@ -566,7 +590,7 @@ def _populate_vllm_output_json(
566590 )
567591
568592 pa_json = LlmInputs ._add_optional_tags_to_vllm_json (
569- pa_json , index , add_model_name , add_stream , model_name
593+ pa_json , index , add_model_name , add_stream , model_name , extra_inputs
570594 )
571595
572596 return pa_json
@@ -581,6 +605,7 @@ def _populate_trtllm_output_json(
581605 add_model_name : bool ,
582606 add_stream : bool ,
583607 model_name : str = "" ,
608+ extra_inputs : Dict = {},
584609 ) -> Dict :
585610 pa_json = LlmInputs ._create_empty_trtllm_pa_json ()
586611
@@ -602,7 +627,7 @@ def _populate_trtllm_output_json(
602627
603628 pa_json = LlmInputs ._add_required_tags_to_trtllm_json (pa_json , index )
604629 pa_json = LlmInputs ._add_optional_tags_to_trtllm_json (
605- pa_json , index , add_model_name , add_stream , model_name
630+ pa_json , index , add_model_name , add_stream , model_name , extra_inputs
606631 )
607632
608633 return pa_json
@@ -737,11 +762,14 @@ def _add_optional_tags_to_openai_json(
737762 add_model_name : bool ,
738763 add_stream : bool ,
739764 model_name : str = "" ,
765+ extra_inputs : Dict = {},
740766 ) -> Dict :
741767 if add_model_name :
742768 pa_json ["data" ][index ]["payload" ][0 ]["model" ] = model_name
743769 if add_stream :
744770 pa_json ["data" ][index ]["payload" ][0 ]["stream" ] = True
771+ for key , value in extra_inputs .items ():
772+ pa_json ["data" ][index ]["payload" ][0 ][key ] = value
745773
746774 return pa_json
747775
@@ -753,11 +781,14 @@ def _add_optional_tags_to_vllm_json(
753781 add_model_name : bool ,
754782 add_stream : bool ,
755783 model_name : str = "" ,
784+ extra_inputs : Dict = {},
756785 ) -> Dict :
757786 if add_model_name :
758787 pa_json ["data" ][index ]["model" ] = model_name
759788 if add_stream :
760789 pa_json ["data" ][index ]["stream" ] = [True ]
790+ for key , value in extra_inputs .items ():
791+ pa_json ["data" ][index ][key ] = [value ]
761792
762793 return pa_json
763794
@@ -769,11 +800,14 @@ def _add_optional_tags_to_trtllm_json(
769800 add_model_name : bool ,
770801 add_stream : bool ,
771802 model_name : str = "" ,
803+ extra_inputs : Dict = {},
772804 ) -> Dict :
773805 if add_model_name :
774806 pa_json ["data" ][index ]["model" ] = model_name
775807 if add_stream :
776808 pa_json ["data" ][index ]["stream" ] = [True ]
809+ for key , value in extra_inputs .items ():
810+ pa_json ["data" ][index ][key ] = [value ]
777811
778812 return pa_json
779813
0 commit comments