@@ -33,8 +33,6 @@ class AbstractMTBenchEvaluator(Evaluator):
3333 model_name Name of the model to evaluate
3434 judge_model_name Name of the judge model
3535 output_dir The directory to use for evaluation output
36- max_workers Max parallel workers to run the evaluation with (int or "auto")
37- serving_gpus Number of gpus allocated for serving. Used to tune with max_workers=auto.
3836 merge_system_user_message Boolean indicating whether to merge system and user messages (required for Mistral based judges)
3937 """
4038
@@ -43,16 +41,12 @@ def __init__(
4341 model_name : str ,
4442 judge_model_name : str ,
4543 output_dir : str = "eval_output" ,
46- max_workers : int | str = "auto" ,
47- serving_gpus : int | None = None ,
4844 merge_system_user_message : bool = False ,
4945 ) -> None :
5046 self .model_name = model_name
5147 self .judge_model_name = judge_model_name
5248 self .output_dir = output_dir
53- self .serving_gpus = serving_gpus
5449 self .merge_system_user_message = merge_system_user_message
55- self .max_workers = self ._calc_max_workers (max_workers , serving_gpus )
5650
5751 def _calc_max_workers (
5852 self , max_workers : int | str | None , serving_gpus : int | None
@@ -105,8 +99,6 @@ class MTBenchEvaluator(AbstractMTBenchEvaluator):
10599 model_name Name of the model to evaluate
106100 judge_model_name Name of the judge model
107101 output_dir The directory to use for evaluation output
108- max_workers Max parallel workers to run the evaluation with (int or "auto")
109- serving_gpus Number of gpus allocated for serving. Used to tune with max_workers=auto.
110102 merge_system_user_message Boolean indicating whether to merge system and user messages (required for Mistral based judges)
111103 """
112104
@@ -180,8 +172,6 @@ class MTBenchBranchEvaluator(AbstractMTBenchEvaluator):
180172 taxonomy_git_repo_path Taxonomy git repo path
181173 branch Branch of taxonomy repo to eval QNAs against model
182174 output_dir The directory to use for evaluation output
183- max_workers Max parallel workers to run the evaluation with (int or "auto")
184- serving_gpus Number of gpus allocated for serving. Used to tune with max_workers=auto.
185175 merge_system_user_message Boolean indicating whether to merge system and user messages (required for Mistral based judges)
186176 """
187177
@@ -194,16 +184,12 @@ def __init__(
194184 taxonomy_git_repo_path : str ,
195185 branch : str ,
196186 output_dir : str = "eval_output" ,
197- max_workers : int | str = "auto" ,
198- serving_gpus : int | None = None ,
199187 merge_system_user_message : bool = False ,
200188 ) -> None :
201189 super ().__init__ (
202190 model_name ,
203191 judge_model_name ,
204192 output_dir ,
205- max_workers ,
206- serving_gpus ,
207193 merge_system_user_message ,
208194 )
209195 self .taxonomy_git_repo_path = taxonomy_git_repo_path
0 commit comments