Skip to content

Commit 5d37bb8

Browse files
committed
chore: Fix spelling: response
Signed-off-by: Josh Soref <[email protected]>
1 parent f767515 commit 5d37bb8

File tree

3 files changed

+5
-5
lines changed

3 files changed

+5
-5
lines changed

src/google/adk/a2a/converters/part_converter.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ def convert_a2a_part_to_genai_part(
8484
return None
8585

8686
if isinstance(part, a2a_types.DataPart):
87-
# Convert the Data Part to funcall and function reponse.
87+
# Convert the Data Part to funcall and function response.
8888
# This is mainly for converting human in the loop and auth request and
8989
# response.
9090
# TODO once A2A defined how to suervice such information, migrate below
@@ -179,7 +179,7 @@ def convert_genai_part_to_a2a_part(
179179

180180
return a2a_types.Part(root=a2a_part)
181181

182-
# Convert the funcall and function reponse to A2A DataPart.
182+
# Convert the funcall and function response to A2A DataPart.
183183
# This is mainly for converting human in the loop and auth request and
184184
# response.
185185
# TODO once A2A defined how to suervice such information, migrate below

src/google/adk/evaluation/local_eval_service.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -199,7 +199,7 @@ async def _evaluate_single_inference_result(
199199

200200
# We also keep track of the overall score for a metric, derived from all
201201
# invocation. For example, if we were keeping track the metric that compares
202-
# how well is the final resposne as compared to a golden answer, then each
202+
# how well is the final response as compared to a golden answer, then each
203203
# invocation will have the value of this metric. We will also have an
204204
# overall score using aggregation strategy across all invocations. This
205205
# would be the score for the eval case.

src/google/adk/evaluation/response_evaluator.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ class ResponseEvaluator(Evaluator):
3636
3737
This class supports two metrics:
3838
1) response_evaluation_score
39-
This metric evaluates how coherent agent's resposne was.
39+
This metric evaluates how coherent agent's response was.
4040
4141
Value range of this metric is [1,5], with values closer to 5 more desirable.
4242
@@ -81,7 +81,7 @@ def get_metric_info(metric_name: str) -> MetricInfo:
8181
return MetricInfo(
8282
metric_name=PrebuiltMetrics.RESPONSE_EVALUATION_SCORE.value,
8383
description=(
84-
"This metric evaluates how coherent agent's resposne was. Value"
84+
"This metric evaluates how coherent agent's response was. Value"
8585
" range of this metric is [1,5], with values closer to 5 more"
8686
" desirable."
8787
),

0 commit comments

Comments
 (0)