File tree Expand file tree Collapse file tree 3 files changed +5
-5
lines changed Expand file tree Collapse file tree 3 files changed +5
-5
lines changed Original file line number Diff line number Diff line change @@ -84,7 +84,7 @@ def convert_a2a_part_to_genai_part(
84
84
return None
85
85
86
86
if isinstance (part , a2a_types .DataPart ):
87
- # Convert the Data Part to funcall and function reponse .
87
+ # Convert the Data Part to funcall and function response .
88
88
# This is mainly for converting human in the loop and auth request and
89
89
# response.
90
90
# TODO once A2A defined how to suervice such information, migrate below
@@ -179,7 +179,7 @@ def convert_genai_part_to_a2a_part(
179
179
180
180
return a2a_types .Part (root = a2a_part )
181
181
182
- # Convert the funcall and function reponse to A2A DataPart.
182
+ # Convert the funcall and function response to A2A DataPart.
183
183
# This is mainly for converting human in the loop and auth request and
184
184
# response.
185
185
# TODO once A2A defined how to suervice such information, migrate below
Original file line number Diff line number Diff line change @@ -199,7 +199,7 @@ async def _evaluate_single_inference_result(
199
199
200
200
# We also keep track of the overall score for a metric, derived from all
201
201
# invocation. For example, if we were keeping track the metric that compares
202
- # how well is the final resposne as compared to a golden answer, then each
202
+ # how well is the final response as compared to a golden answer, then each
203
203
# invocation will have the value of this metric. We will also have an
204
204
# overall score using aggregation strategy across all invocations. This
205
205
# would be the score for the eval case.
Original file line number Diff line number Diff line change @@ -36,7 +36,7 @@ class ResponseEvaluator(Evaluator):
36
36
37
37
This class supports two metrics:
38
38
1) response_evaluation_score
39
- This metric evaluates how coherent agent's resposne was.
39
+ This metric evaluates how coherent agent's response was.
40
40
41
41
Value range of this metric is [1,5], with values closer to 5 more desirable.
42
42
@@ -81,7 +81,7 @@ def get_metric_info(metric_name: str) -> MetricInfo:
81
81
return MetricInfo (
82
82
metric_name = PrebuiltMetrics .RESPONSE_EVALUATION_SCORE .value ,
83
83
description = (
84
- "This metric evaluates how coherent agent's resposne was. Value"
84
+ "This metric evaluates how coherent agent's response was. Value"
85
85
" range of this metric is [1,5], with values closer to 5 more"
86
86
" desirable."
87
87
),
You can’t perform that action at this time.
0 commit comments