Skip to content

Commit 33d9f71

Browse files
committed
lint
1 parent cbf45e7 commit 33d9f71

File tree

2 files changed

+8
-6
lines changed

2 files changed

+8
-6
lines changed

sdks/python/apache_beam/ml/inference/vertex_ai_inference_test.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -50,12 +50,12 @@ def test_exception_on_private_without_network(self):
5050

5151
class ParseInvokeResponseTest(unittest.TestCase):
5252
"""Tests for _parse_invoke_response method."""
53-
5453
def _create_handler_with_invoke_route(self, invoke_route="/test"):
5554
"""Creates a mock handler with invoke_route for testing."""
5655
import unittest.mock as mock
57-
with mock.patch.object(
58-
VertexAIModelHandlerJSON, '_retrieve_endpoint', return_value=None):
56+
with mock.patch.object(VertexAIModelHandlerJSON,
57+
'_retrieve_endpoint',
58+
return_value=None):
5959
handler = VertexAIModelHandlerJSON(
6060
endpoint_id="1",
6161
project="testproject",
@@ -67,7 +67,9 @@ def test_parse_invoke_response_with_predictions_key(self):
6767
"""Test parsing response with standard 'predictions' key."""
6868
handler = self._create_handler_with_invoke_route()
6969
batch = [{"input": "test1"}, {"input": "test2"}]
70-
response = b'{"predictions": ["result1", "result2"], "deployedModelId": "model123"}'
70+
response = (
71+
b'{"predictions": ["result1", "result2"], '
72+
b'"deployedModelId": "model123"}')
7173

7274
results = list(handler._parse_invoke_response(batch, response))
7375

@@ -114,4 +116,3 @@ def test_parse_invoke_response_non_json(self):
114116

115117
if __name__ == '__main__':
116118
unittest.main()
117-

sdks/python/apache_beam/yaml/yaml_ml.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -241,7 +241,8 @@ def __init__(
241241
endpoints with arbitrary prediction routes. When specified, uses
242242
`Endpoint.invoke()` instead of `Endpoint.predict()`. The route
243243
should start with a forward slash, e.g., "/predict/v1".
244-
See https://cloud.google.com/vertex-ai/docs/predictions/use-arbitrary-custom-routes
244+
See
245+
https://cloud.google.com/vertex-ai/docs/predictions/use-arbitrary-custom-routes
245246
for more information.
246247
min_batch_size: The minimum batch size to use when batching
247248
inputs.

0 commit comments

Comments
 (0)