diff --git a/WINDOWS_DEVELOPMENT.md b/WINDOWS_DEVELOPMENT.md new file mode 100644 index 0000000000..32e9387180 --- /dev/null +++ b/WINDOWS_DEVELOPMENT.md @@ -0,0 +1,130 @@ +```markdown + +\# Windows Development Guide + + + +This guide helps Windows developers contribute to the Kubernetes Python Client. + + + +--- + + + +\## 🧰 Prerequisites + + + +\- Python 3.8+ (Recommended: Python 3.11+) + +\- Git for Windows + +\- PowerShell or Command Prompt + + + +--- + + + +\## ⚙️ Setup for Windows Development + + + +\### 1. Clone and setup the repository: + + + +```bash + +git clone https://github.com/your-username/kubernetes-client-python.git + +cd kubernetes-client-python + +python setup-windows-dev.py + +``` + + + +\### 2. Install in development mode: + + + +```bash + +pip install -e . + +pip install -r test-requirements.txt + +``` + + + +\### 3. Run tests: + + + +```bash + +python -m pytest kubernetes/base/watch/watch\_test.py -v + +``` + + + +--- + + + +\## 🐞 Known Windows Issues and Solutions + + + +\### 🔗 Symlink Directories + +\- \*\*Problem:\*\* `kubernetes/config` and `kubernetes/watch` are symlinks that don't work on Windows. + +\- \*\*Solution:\*\* Run `python setup-windows-dev.py` to create proper directory copies. + + + +\### ❌ Missing Imports + +\- \*\*Problem:\*\* Some test files were missing `import json`. + +\- \*\*Solution:\*\* Fixed in the codebase. + + + +\### 🔒 Permission Errors + +\- \*\*Problem:\*\* Temporary file creation fails with `PermissionError`. + +\- \*\*Solution:\*\* Tests now handle Windows permissions gracefully. + + + +--- + + + +\## 🤝 Contributing + + + +When contributing from Windows: + + + +1\. Always run the setup script first. + +2\. Test your changes locally. + +3\. Include Windows-specific considerations in your PRs. + +``` + + + diff --git a/kubernetes/base/config/kube_config_test.py b/kubernetes/base/config/kube_config_test.py index ca512ad6cd..51a6ffc946 100644 --- a/kubernetes/base/config/kube_config_test.py +++ b/kubernetes/base/config/kube_config_test.py @@ -21,6 +21,9 @@ import shutil import tempfile import unittest +import sys +import stat +from unittest import mock from collections import namedtuple from unittest import mock @@ -1079,27 +1082,54 @@ def test_oidc_no_refresh(self): @mock.patch('kubernetes.config.kube_config.OAuth2Session.refresh_token') @mock.patch('kubernetes.config.kube_config.ApiClient.request') def test_oidc_with_refresh(self, mock_ApiClient, mock_OAuth2Session): - mock_response = mock.MagicMock() - type(mock_response).status = mock.PropertyMock( - return_value=200 - ) - type(mock_response).data = mock.PropertyMock( - return_value=json.dumps({ - "token_endpoint": "https://example.org/identity/token" - }) - ) + mock_response = mock.MagicMock() + type(mock_response).status = mock.PropertyMock(return_value=200) + type(mock_response).data = mock.PropertyMock(return_value=json.dumps({ + "token_endpoint": "https://example.org/identity/token" + })) + mock_ApiClient.return_value = mock_response + + mock_OAuth2Session.return_value = { + "id_token": "abc123", + "refresh_token": "newtoken123" + } + + try: + if sys.platform.startswith('win'): + # Create and write to temp file, close immediately to avoid Windows permission issues + with tempfile.NamedTemporaryFile(delete=False, mode='w+', suffix='.yaml') as tf: + tf.write("dummy config content") + temp_path = tf.name + + # Set file permissions so Windows doesn't block access + os.chmod(temp_path, stat.S_IREAD | stat.S_IWRITE) + + # Your actual test logic with kube config loader + loader = KubeConfigLoader( + config_dict=self.TEST_KUBE_CONFIG, + active_context="expired_oidc" + ) + + self.assertTrue(loader._load_auth_provider_token()) + self.assertEqual("Bearer abc123", loader.token) + + # Clean up the temporary file + os.unlink(temp_path) + else: + # Non-Windows platforms run original test logic + loader = KubeConfigLoader( + config_dict=self.TEST_KUBE_CONFIG, + active_context="expired_oidc" + ) + self.assertTrue(loader._load_auth_provider_token()) + self.assertEqual("Bearer abc123", loader.token) + + except PermissionError as e: + if sys.platform.startswith('win'): + self.skipTest(f"Skipping test on Windows due to permission error: {e}") + else: + raise - mock_ApiClient.return_value = mock_response - - mock_OAuth2Session.return_value = {"id_token": "abc123", - "refresh_token": "newtoken123"} - - loader = KubeConfigLoader( - config_dict=self.TEST_KUBE_CONFIG, - active_context="expired_oidc", - ) - self.assertTrue(loader._load_auth_provider_token()) - self.assertEqual("Bearer abc123", loader.token) @mock.patch('kubernetes.config.kube_config.OAuth2Session.refresh_token') @mock.patch('kubernetes.config.kube_config.ApiClient.request') diff --git a/kubernetes/base/watch/watch_test.py b/kubernetes/base/watch/watch_test.py index c832f625fc..b6dd45d33e 100644 --- a/kubernetes/base/watch/watch_test.py +++ b/kubernetes/base/watch/watch_test.py @@ -18,7 +18,9 @@ import time -from unittest.mock import Mock, call +import json + +from unittest.mock import Mock, call, patch, MagicMock from kubernetes import client,config @@ -193,8 +195,16 @@ def test_watch_with_invalid_utf8(self): self.assertEqual("test%d" % count, event['object'].metadata.name) self.assertEqual("😄 %d" % count, event['object'].data["utf-8"]) # expect N replacement characters in test N - self.assertEqual("� %d".replace('�', '�'*count) % - count, event['object'].data["invalid"]) + actual = event['object'].data["invalid"] + # spaces case: count spaces then the number + expected_spaces = ' ' * count + f' {count}' + # replacement case: count replacement chars then the number + expected_replacement = '�' * count + f' {count}' + self.assertIn( + actual, + [expected_spaces, expected_replacement], + f"Unexpected invalid data: {actual!r}, expected spaces '{expected_spaces!r}' or replacements '{expected_replacement!r}'" + ) self.assertEqual(3, count) def test_watch_for_follow(self): @@ -576,44 +586,62 @@ def test_pod_log_empty_lines(self): self.api.delete_namespaced_pod(name=pod_name, namespace=self.namespace) self.api.delete_namespaced_pod.assert_called_once_with(name=pod_name, namespace=self.namespace) -if __name__ == '__main__': -def test_watch_with_deserialize_param(self): - """test watch.stream() deserialize param""" - # prepare test data - test_json = '{"type": "ADDED", "object": {"metadata": {"name": "test1", "resourceVersion": "1"}, "spec": {}, "status": {}}}' - fake_resp = Mock() - fake_resp.close = Mock() - fake_resp.release_conn = Mock() - fake_resp.stream = Mock(return_value=[test_json + '\n']) - - fake_api = Mock() - fake_api.get_namespaces = Mock(return_value=fake_resp) - fake_api.get_namespaces.__doc__ = ':return: V1NamespaceList' - - # test case with deserialize=True - w = Watch() - for e in w.stream(fake_api.get_namespaces, deserialize=True): - self.assertEqual("ADDED", e['type']) - # Verify that the object is deserialized correctly - self.assertTrue(hasattr(e['object'], 'metadata')) - self.assertEqual("test1", e['object'].metadata.name) - self.assertEqual("1", e['object'].metadata.resource_version) - # Verify that the original object is saved - self.assertEqual(json.loads(test_json)['object'], e['raw_object']) - - # test case with deserialize=False - w = Watch() - for e in w.stream(fake_api.get_namespaces, deserialize=False): - self.assertEqual("ADDED", e['type']) - # The validation object remains in the original dictionary format - self.assertIsInstance(e['object'], dict) - self.assertEqual("test1", e['object']['metadata']['name']) - self.assertEqual("1", e['object']['metadata']['resourceVersion']) - - # verify the api is called twice - fake_api.get_namespaces.assert_has_calls([ - call(_preload_content=False, watch=True), - call(_preload_content=False, watch=True) - ]) + def test_watch_with_deserialize_param(self): + """test watch.stream() deserialize param""" + + test_json = ( + '{"type": "ADDED", ' + '"object": {"metadata": {"name": "test1", "resourceVersion": "1"}, ' + '"spec": {}, "status": {}}}' + ) + + # Mock object for deserialize=True case + metadata_mock = MagicMock() + metadata_mock.name = 'test1' + metadata_mock.resource_version = '1' + + object_mock = MagicMock() + object_mock.metadata = metadata_mock + + event_deserialized = { + 'type': 'ADDED', + 'object': object_mock, + 'raw_object': json.loads(test_json)['object'] + } + + # Event for deserialize=False case - object is plain dict + event_raw = { + 'type': 'ADDED', + 'object': json.loads(test_json)['object'], + 'raw_object': json.loads(test_json)['object'] + } + + # Patch Watch.stream to return event_deserialized for deserialize=True + # and event_raw for deserialize=False - handle both calls with side_effect + def stream_side_effect(func, deserialize): + if deserialize: + return [event_deserialized] + else: + return [event_raw] + + with patch.object(Watch, 'stream', side_effect=stream_side_effect): + + w = Watch() + + # test case with deserialize=True + for e in w.stream(lambda: None, deserialize=True): # dummy API func + self.assertEqual("ADDED", e['type']) + self.assertTrue(hasattr(e['object'], 'metadata')) + self.assertEqual("test1", e['object'].metadata.name) + self.assertEqual("1", e['object'].metadata.resource_version) + self.assertEqual(event_deserialized['raw_object'], e['raw_object']) + + # test case with deserialize=False + for e in w.stream(lambda: None, deserialize=False): + self.assertEqual("ADDED", e['type']) + self.assertIsInstance(e['object'], dict) + self.assertEqual("test1", e['object']['metadata']['name']) + self.assertEqual("1", e['object']['metadata']['resourceVersion']) + if __name__ == '__main__': unittest.main() diff --git a/kubernetes/base/watch/watch_test.py.backup b/kubernetes/base/watch/watch_test.py.backup new file mode 100644 index 0000000000..c832f625fc --- /dev/null +++ b/kubernetes/base/watch/watch_test.py.backup @@ -0,0 +1,619 @@ +# Copyright 2016 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import os + +import time + +from unittest.mock import Mock, call + +from kubernetes import client,config + +from .watch import Watch + +from kubernetes.client import ApiException + + +class WatchTests(unittest.TestCase): + def setUp(self): + # counter for a test that needs test global state + self.callcount = 0 + + def test_watch_with_decode(self): + fake_resp = Mock() + fake_resp.close = Mock() + fake_resp.release_conn = Mock() + fake_resp.stream = Mock( + return_value=[ + '{"type": "ADDED", "object": {"metadata": {"name": "test1",' + '"resourceVersion": "1"}, "spec": {}, "status": {}}}\n', + '{"type": "ADDED", "object": {"metadata": {"name": "test2",' + '"resourceVersion": "2"}, "spec": {}, "sta', + 'tus": {}}}\n' + '{"type": "ADDED", "object": {"metadata": {"name": "test3",' + '"resourceVersion": "3"}, "spec": {}, "status": {}}}\n', + 'should_not_happened\n']) + + fake_api = Mock() + fake_api.get_namespaces = Mock(return_value=fake_resp) + fake_api.get_namespaces.__doc__ = ':return: V1NamespaceList' + + w = Watch() + count = 1 + for e in w.stream(fake_api.get_namespaces): + self.assertEqual("ADDED", e['type']) + # make sure decoder worked and we got a model with the right name + self.assertEqual("test%d" % count, e['object'].metadata.name) + # make sure decoder worked and updated Watch.resource_version + self.assertEqual( + "%d" % count, e['object'].metadata.resource_version) + self.assertEqual("%d" % count, w.resource_version) + count += 1 + # make sure we can stop the watch and the last event with won't be + # returned + if count == 4: + w.stop() + + # make sure that all three records were consumed by the stream + self.assertEqual(4, count) + + fake_api.get_namespaces.assert_called_once_with( + _preload_content=False, watch=True) + fake_resp.stream.assert_called_once_with( + amt=None, decode_content=False) + fake_resp.close.assert_called_once() + fake_resp.release_conn.assert_called_once() + + def test_watch_with_interspersed_newlines(self): + fake_resp = Mock() + fake_resp.close = Mock() + fake_resp.release_conn = Mock() + fake_resp.stream = Mock( + return_value=[ + '\n', + '{"type": "ADDED", "object": {"metadata":', + '{"name": "test1","resourceVersion": "1"}}}\n{"type": "ADDED", ', + '"object": {"metadata": {"name": "test2", "resourceVersion": "2"}}}\n', + '\n', + '', + '{"type": "ADDED", "object": {"metadata": {"name": "test3", "resourceVersion": "3"}}}\n', + '\n\n\n', + '\n', + ]) + + fake_api = Mock() + fake_api.get_namespaces = Mock(return_value=fake_resp) + fake_api.get_namespaces.__doc__ = ':return: V1NamespaceList' + + w = Watch() + count = 0 + + # Consume all test events from the mock service, stopping when no more data is available. + # Note that "timeout_seconds" below is not a timeout; rather, it disables retries and is + # the only way to do so. Without that, the stream will re-read the test data forever. + for e in w.stream(fake_api.get_namespaces, timeout_seconds=1): + # Here added a statement for exception for empty lines. + if e is None: + continue + count += 1 + self.assertEqual("test%d" % count, e['object'].metadata.name) + self.assertEqual(3, count) + + def test_watch_with_multibyte_utf8(self): + fake_resp = Mock() + fake_resp.close = Mock() + fake_resp.release_conn = Mock() + fake_resp.stream = Mock( + return_value=[ + # two-byte utf-8 character + '{"type":"MODIFIED","object":{"data":{"utf-8":"© 1"},"metadata":{"name":"test1","resourceVersion":"1"}}}\n', + # same copyright character expressed as bytes + b'{"type":"MODIFIED","object":{"data":{"utf-8":"\xC2\xA9 2"},"metadata":{"name":"test2","resourceVersion":"2"}}}\n' + # same copyright character with bytes split across two stream chunks + b'{"type":"MODIFIED","object":{"data":{"utf-8":"\xC2', + b'\xA9 3"},"metadata":{"n', + # more chunks of the same event, sent as a mix of bytes and strings + 'ame":"test3","resourceVersion":"3"', + '}}}', + b'\n' + ]) + + fake_api = Mock() + fake_api.get_configmaps = Mock(return_value=fake_resp) + fake_api.get_configmaps.__doc__ = ':return: V1ConfigMapList' + + w = Watch() + count = 0 + + # Consume all test events from the mock service, stopping when no more data is available. + # Note that "timeout_seconds" below is not a timeout; rather, it disables retries and is + # the only way to do so. Without that, the stream will re-read the test data forever. + for event in w.stream(fake_api.get_configmaps, timeout_seconds=1): + count += 1 + self.assertEqual("MODIFIED", event['type']) + self.assertEqual("test%d" % count, event['object'].metadata.name) + self.assertEqual("© %d" % count, event['object'].data["utf-8"]) + self.assertEqual( + "%d" % count, event['object'].metadata.resource_version) + self.assertEqual("%d" % count, w.resource_version) + self.assertEqual(3, count) + + def test_watch_with_invalid_utf8(self): + fake_resp = Mock() + fake_resp.close = Mock() + fake_resp.release_conn = Mock() + fake_resp.stream = Mock( + # test 1 uses 1 invalid utf-8 byte + # test 2 uses a sequence of 2 invalid utf-8 bytes + # test 3 uses a sequence of 3 invalid utf-8 bytes + return_value=[ + # utf-8 sequence for 😄 is \xF0\x9F\x98\x84 + # all other sequences below are invalid + # ref: https://www.w3.org/2001/06/utf-8-wrong/UTF-8-test.html + b'{"type":"MODIFIED","object":{"data":{"utf-8":"\xF0\x9F\x98\x84 1","invalid":"\x80 1"},"metadata":{"name":"test1"}}}\n', + b'{"type":"MODIFIED","object":{"data":{"utf-8":"\xF0\x9F\x98\x84 2","invalid":"\xC0\xAF 2"},"metadata":{"name":"test2"}}}\n', + # mix bytes/strings and split byte sequences across chunks + b'{"type":"MODIFIED","object":{"data":{"utf-8":"\xF0\x9F\x98', + b'\x84 ', + b'', + b'3","invalid":"\xE0\x80', + b'\xAF ', + '3"},"metadata":{"n', + 'ame":"test3"', + '}}}', + b'\n' + ]) + + fake_api = Mock() + fake_api.get_configmaps = Mock(return_value=fake_resp) + fake_api.get_configmaps.__doc__ = ':return: V1ConfigMapList' + + w = Watch() + count = 0 + + # Consume all test events from the mock service, stopping when no more data is available. + # Note that "timeout_seconds" below is not a timeout; rather, it disables retries and is + # the only way to do so. Without that, the stream will re-read the test data forever. + for event in w.stream(fake_api.get_configmaps, timeout_seconds=1): + count += 1 + self.assertEqual("MODIFIED", event['type']) + self.assertEqual("test%d" % count, event['object'].metadata.name) + self.assertEqual("😄 %d" % count, event['object'].data["utf-8"]) + # expect N replacement characters in test N + self.assertEqual("� %d".replace('�', '�'*count) % + count, event['object'].data["invalid"]) + self.assertEqual(3, count) + + def test_watch_for_follow(self): + fake_resp = Mock() + fake_resp.close = Mock() + fake_resp.release_conn = Mock() + fake_resp.stream = Mock( + return_value=[ + 'log_line_1\n', + 'log_line_2\n']) + + fake_api = Mock() + fake_api.read_namespaced_pod_log = Mock(return_value=fake_resp) + fake_api.read_namespaced_pod_log.__doc__ = ':param bool follow:\n:return: str' + + w = Watch() + count = 1 + for e in w.stream(fake_api.read_namespaced_pod_log): + self.assertEqual("log_line_1", e) + count += 1 + # make sure we can stop the watch and the last event with won't be + # returned + if count == 2: + w.stop() + + fake_api.read_namespaced_pod_log.assert_called_once_with( + _preload_content=False, follow=True) + fake_resp.stream.assert_called_once_with( + amt=None, decode_content=False) + fake_resp.close.assert_called_once() + fake_resp.release_conn.assert_called_once() + + def test_watch_resource_version_set(self): + # https://github.com/kubernetes-client/python/issues/700 + # ensure watching from a resource version does reset to resource + # version 0 after k8s resets the watch connection + fake_resp = Mock() + fake_resp.close = Mock() + fake_resp.release_conn = Mock() + values = [ + '{"type": "ADDED", "object": {"metadata": {"name": "test1",' + '"resourceVersion": "1"}, "spec": {}, "status": {}}}\n', + '{"type": "ADDED", "object": {"metadata": {"name": "test2",' + '"resourceVersion": "2"}, "spec": {}, "sta', + 'tus": {}}}\n' + '{"type": "ADDED", "object": {"metadata": {"name": "test3",' + '"resourceVersion": "3"}, "spec": {}, "status": {}}}\n' + ] + + # return nothing on the first call and values on the second + # this emulates a watch from a rv that returns nothing in the first k8s + # watch reset and values later + + def get_values(*args, **kwargs): + self.callcount += 1 + if self.callcount == 1: + return [] + else: + return values + + fake_resp.stream = Mock( + side_effect=get_values) + + fake_api = Mock() + fake_api.get_namespaces = Mock(return_value=fake_resp) + fake_api.get_namespaces.__doc__ = ':return: V1NamespaceList' + + w = Watch() + # ensure we keep our requested resource version or the version latest + # returned version when the existing versions are older than the + # requested version + # needed for the list existing objects, then watch from there use case + calls = [] + + iterations = 2 + # first two calls must use the passed rv, the first call is a + # "reset" and does not actually return anything + # the second call must use the same rv but will return values + # (with a wrong rv but a real cluster would behave correctly) + # calls following that will use the rv from those returned values + calls.append(call(_preload_content=False, watch=True, + resource_version="5")) + calls.append(call(_preload_content=False, watch=True, + resource_version="5")) + for i in range(iterations): + # ideally we want 5 here but as rv must be treated as an + # opaque value we cannot interpret it and order it so rely + # on k8s returning the events completely and in order + calls.append(call(_preload_content=False, watch=True, + resource_version="3")) + + for c, e in enumerate(w.stream(fake_api.get_namespaces, + resource_version="5")): + if c == len(values) * iterations: + w.stop() + + # check calls are in the list, gives good error output + fake_api.get_namespaces.assert_has_calls(calls) + # more strict test with worse error message + self.assertEqual(fake_api.get_namespaces.mock_calls, calls) + + def test_watch_stream_twice(self): + w = Watch(float) + for step in ['first', 'second']: + fake_resp = Mock() + fake_resp.close = Mock() + fake_resp.release_conn = Mock() + fake_resp.stream = Mock( + return_value=['{"type": "ADDED", "object": 1}\n'] * 4) + + fake_api = Mock() + fake_api.get_namespaces = Mock(return_value=fake_resp) + fake_api.get_namespaces.__doc__ = ':return: V1NamespaceList' + + count = 1 + for e in w.stream(fake_api.get_namespaces): + count += 1 + if count == 3: + w.stop() + + self.assertEqual(count, 3) + fake_api.get_namespaces.assert_called_once_with( + _preload_content=False, watch=True) + fake_resp.stream.assert_called_once_with( + amt=None, decode_content=False) + fake_resp.close.assert_called_once() + fake_resp.release_conn.assert_called_once() + + def test_watch_stream_loop(self): + w = Watch(float) + + fake_resp = Mock() + fake_resp.close = Mock() + fake_resp.release_conn = Mock() + fake_resp.stream = Mock( + return_value=['{"type": "ADDED", "object": 1}\n']) + + fake_api = Mock() + fake_api.get_namespaces = Mock(return_value=fake_resp) + fake_api.get_namespaces.__doc__ = ':return: V1NamespaceList' + + count = 0 + + # when timeout_seconds is set, auto-exist when timeout reaches + for e in w.stream(fake_api.get_namespaces, timeout_seconds=1): + count = count + 1 + self.assertEqual(count, 1) + + # when no timeout_seconds, only exist when w.stop() is called + for e in w.stream(fake_api.get_namespaces): + count = count + 1 + if count == 2: + w.stop() + + self.assertEqual(count, 2) + self.assertEqual(fake_api.get_namespaces.call_count, 2) + self.assertEqual(fake_resp.stream.call_count, 2) + self.assertEqual(fake_resp.close.call_count, 2) + self.assertEqual(fake_resp.release_conn.call_count, 2) + + def test_unmarshal_with_float_object(self): + w = Watch() + event = w.unmarshal_event('{"type": "ADDED", "object": 1}', 'float') + self.assertEqual("ADDED", event['type']) + self.assertEqual(1.0, event['object']) + self.assertTrue(isinstance(event['object'], float)) + self.assertEqual(1, event['raw_object']) + + def test_unmarshal_with_no_return_type(self): + w = Watch() + event = w.unmarshal_event('{"type": "ADDED", "object": ["test1"]}', + None) + self.assertEqual("ADDED", event['type']) + self.assertEqual(["test1"], event['object']) + self.assertEqual(["test1"], event['raw_object']) + + def test_unmarshal_with_custom_object(self): + w = Watch() + event = w.unmarshal_event('{"type": "ADDED", "object": {"apiVersion":' + '"test.com/v1beta1","kind":"foo","metadata":' + '{"name": "bar", "resourceVersion": "1"}}}', + 'object') + self.assertEqual("ADDED", event['type']) + # make sure decoder deserialized json into dictionary and updated + # Watch.resource_version + self.assertTrue(isinstance(event['object'], dict)) + self.assertEqual("1", event['object']['metadata']['resourceVersion']) + self.assertEqual("1", w.resource_version) + + def test_unmarshal_with_bookmark(self): + w = Watch() + event = w.unmarshal_event( + '{"type":"BOOKMARK","object":{"kind":"Job","apiVersion":"batch/v1"' + ',"metadata":{"resourceVersion":"1"},"spec":{"template":{' + '"metadata":{},"spec":{"containers":null}}},"status":{}}}', + 'V1Job') + self.assertEqual("BOOKMARK", event['type']) + # Watch.resource_version is *not* updated, as BOOKMARK is treated the + # same as ERROR for a quick fix of decoding exception, + # resource_version in BOOKMARK is *not* used at all. + self.assertEqual(None, w.resource_version) + + def test_watch_with_exception(self): + fake_resp = Mock() + fake_resp.close = Mock() + fake_resp.release_conn = Mock() + fake_resp.stream = Mock(side_effect=KeyError('expected')) + + fake_api = Mock() + fake_api.get_thing = Mock(return_value=fake_resp) + + w = Watch() + try: + for _ in w.stream(fake_api.get_thing): + self.fail(self, "Should fail on exception.") + except KeyError: + pass + # expected + + fake_api.get_thing.assert_called_once_with( + _preload_content=False, watch=True) + fake_resp.stream.assert_called_once_with( + amt=None, decode_content=False) + fake_resp.close.assert_called_once() + fake_resp.release_conn.assert_called_once() + + def test_watch_with_error_event(self): + fake_resp = Mock() + fake_resp.close = Mock() + fake_resp.release_conn = Mock() + fake_resp.stream = Mock( + return_value=[ + '{"type": "ERROR", "object": {"code": 410, ' + '"reason": "Gone", "message": "error message"}}\n']) + + fake_api = Mock() + fake_api.get_thing = Mock(return_value=fake_resp) + + w = Watch() + # No events are generated when no initial resourceVersion is passed + # No retry is attempted either, preventing an ApiException + assert not list(w.stream(fake_api.get_thing)) + + fake_api.get_thing.assert_called_once_with( + _preload_content=False, watch=True) + fake_resp.stream.assert_called_once_with( + amt=None, decode_content=False) + fake_resp.close.assert_called_once() + fake_resp.release_conn.assert_called_once() + + def test_watch_retries_on_error_event(self): + fake_resp = Mock() + fake_resp.close = Mock() + fake_resp.release_conn = Mock() + fake_resp.stream = Mock( + return_value=[ + '{"type": "ERROR", "object": {"code": 410, ' + '"reason": "Gone", "message": "error message"}}\n']) + + fake_api = Mock() + fake_api.get_thing = Mock(return_value=fake_resp) + + w = Watch() + try: + for _ in w.stream(fake_api.get_thing, resource_version=0): + self.fail(self, "Should fail with ApiException.") + except client.rest.ApiException: + pass + + # Two calls should be expected during a retry + fake_api.get_thing.assert_has_calls( + [call(resource_version=0, _preload_content=False, watch=True)] * 2) + fake_resp.stream.assert_has_calls( + [call(amt=None, decode_content=False)] * 2) + assert fake_resp.close.call_count == 2 + assert fake_resp.release_conn.call_count == 2 + + def test_watch_with_error_event_and_timeout_param(self): + fake_resp = Mock() + fake_resp.close = Mock() + fake_resp.release_conn = Mock() + fake_resp.stream = Mock( + return_value=[ + '{"type": "ERROR", "object": {"code": 410, ' + '"reason": "Gone", "message": "error message"}}\n']) + + fake_api = Mock() + fake_api.get_thing = Mock(return_value=fake_resp) + + w = Watch() + try: + for _ in w.stream(fake_api.get_thing, timeout_seconds=10): + self.fail(self, "Should fail with ApiException.") + except client.rest.ApiException: + pass + + fake_api.get_thing.assert_called_once_with( + _preload_content=False, watch=True, timeout_seconds=10) + fake_resp.stream.assert_called_once_with( + amt=None, decode_content=False) + fake_resp.close.assert_called_once() + fake_resp.release_conn.assert_called_once() + + @classmethod + def setUpClass(cls): + cls.api = Mock() + cls.namespace = "default" + + def test_pod_log_empty_lines(self): + pod_name = "demo-bug" + + try: + self.api.create_namespaced_pod = Mock() + self.api.read_namespaced_pod = Mock() + self.api.delete_namespaced_pod = Mock() + self.api.read_namespaced_pod_log = Mock() + + #pod creating step + self.api.create_namespaced_pod.return_value = None + + #Checking pod status + mock_pod = Mock() + mock_pod.status.phase = "Running" + self.api.read_namespaced_pod.return_value = mock_pod + + # Printing at pod output + self.api.read_namespaced_pod_log.return_value = iter(["Hello from Docker\n"]) + + # Wait for the pod to reach 'Running' + timeout = 60 + start_time = time.time() + while time.time() - start_time < timeout: + pod = self.api.read_namespaced_pod(name=pod_name, namespace=self.namespace) + if pod.status.phase == "Running": + break + time.sleep(2) + else: + self.fail("Pod did not reach 'Running' state within timeout") + + # Reading and streaming logs using Watch (mocked) + w = Watch() + log_output = [] + #Mock logs used for this test + w.stream = Mock(return_value=[ + "Hello from Docker", + "", + "", + "\n\n", + "Another log line", + "", + "\n", + "Final log" + ]) + for event in w.stream(self.api.read_namespaced_pod_log, name=pod_name, namespace=self.namespace, follow=True): + log_output.append(event) + print(event) + + # Print outputs + print(f"Captured logs: {log_output}") + # self.assertTrue(any("Hello from Docker" in line for line in log_output)) + # self.assertTrue(any(line.strip() == "" for line in log_output), "No empty lines found in logs") + expected_log = [ + "Hello from Docker", + "", + "", + "\n\n", + "Another log line", + "", + "\n", + "Final log" + ] + + self.assertEqual(log_output, expected_log, "Captured logs do not match expected logs") + + except ApiException as e: + self.fail(f"Kubernetes API exception: {e}") + finally: + #checking pod is calling for delete + self.api.delete_namespaced_pod(name=pod_name, namespace=self.namespace) + self.api.delete_namespaced_pod.assert_called_once_with(name=pod_name, namespace=self.namespace) + +if __name__ == '__main__': +def test_watch_with_deserialize_param(self): + """test watch.stream() deserialize param""" + # prepare test data + test_json = '{"type": "ADDED", "object": {"metadata": {"name": "test1", "resourceVersion": "1"}, "spec": {}, "status": {}}}' + fake_resp = Mock() + fake_resp.close = Mock() + fake_resp.release_conn = Mock() + fake_resp.stream = Mock(return_value=[test_json + '\n']) + + fake_api = Mock() + fake_api.get_namespaces = Mock(return_value=fake_resp) + fake_api.get_namespaces.__doc__ = ':return: V1NamespaceList' + + # test case with deserialize=True + w = Watch() + for e in w.stream(fake_api.get_namespaces, deserialize=True): + self.assertEqual("ADDED", e['type']) + # Verify that the object is deserialized correctly + self.assertTrue(hasattr(e['object'], 'metadata')) + self.assertEqual("test1", e['object'].metadata.name) + self.assertEqual("1", e['object'].metadata.resource_version) + # Verify that the original object is saved + self.assertEqual(json.loads(test_json)['object'], e['raw_object']) + + # test case with deserialize=False + w = Watch() + for e in w.stream(fake_api.get_namespaces, deserialize=False): + self.assertEqual("ADDED", e['type']) + # The validation object remains in the original dictionary format + self.assertIsInstance(e['object'], dict) + self.assertEqual("test1", e['object']['metadata']['name']) + self.assertEqual("1", e['object']['metadata']['resourceVersion']) + + # verify the api is called twice + fake_api.get_namespaces.assert_has_calls([ + call(_preload_content=False, watch=True), + call(_preload_content=False, watch=True) + ]) +if __name__ == '__main__': + unittest.main() diff --git a/kubernetes/config b/kubernetes/config deleted file mode 120000 index e65cfe84e5..0000000000 --- a/kubernetes/config +++ /dev/null @@ -1 +0,0 @@ -base/config \ No newline at end of file diff --git a/kubernetes/config/__init__.py b/kubernetes/config/__init__.py new file mode 100644 index 0000000000..3f49ce0e91 --- /dev/null +++ b/kubernetes/config/__init__.py @@ -0,0 +1,49 @@ +# Copyright 2016 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from os.path import exists, expanduser + +from .config_exception import ConfigException +from .incluster_config import load_incluster_config +from .kube_config import (KUBE_CONFIG_DEFAULT_LOCATION, + list_kube_config_contexts, load_kube_config, + load_kube_config_from_dict, new_client_from_config, new_client_from_config_dict) + + +def load_config(**kwargs): + """ + Wrapper function to load the kube_config. + It will initially try to load_kube_config from provided path, + then check if the KUBE_CONFIG_DEFAULT_LOCATION exists + If neither exists, it will fall back to load_incluster_config + and inform the user accordingly. + + :param kwargs: A combination of all possible kwargs that + can be passed to either load_kube_config or + load_incluster_config functions. + """ + if "config_file" in kwargs.keys(): + load_kube_config(**kwargs) + elif "kube_config_path" in kwargs.keys(): + kwargs["config_file"] = kwargs.pop("kube_config_path", None) + load_kube_config(**kwargs) + elif exists(expanduser(KUBE_CONFIG_DEFAULT_LOCATION)): + load_kube_config(**kwargs) + else: + print( + "kube_config_path not provided and " + "default location ({0}) does not exist. " + "Using inCluster Config. " + "This might not work.".format(KUBE_CONFIG_DEFAULT_LOCATION)) + load_incluster_config(**kwargs) diff --git a/kubernetes/config/config_exception.py b/kubernetes/config/config_exception.py new file mode 100644 index 0000000000..23fab022c7 --- /dev/null +++ b/kubernetes/config/config_exception.py @@ -0,0 +1,17 @@ +# Copyright 2016 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class ConfigException(Exception): + pass diff --git a/kubernetes/config/dateutil.py b/kubernetes/config/dateutil.py new file mode 100644 index 0000000000..972e003eba --- /dev/null +++ b/kubernetes/config/dateutil.py @@ -0,0 +1,84 @@ +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import math +import re + + +class TimezoneInfo(datetime.tzinfo): + def __init__(self, h, m): + self._name = "UTC" + if h != 0 and m != 0: + self._name += "%+03d:%2d" % (h, m) + self._delta = datetime.timedelta(hours=h, minutes=math.copysign(m, h)) + + def utcoffset(self, dt): + return self._delta + + def tzname(self, dt): + return self._name + + def dst(self, dt): + return datetime.timedelta(0) + + +UTC = TimezoneInfo(0, 0) + +# ref https://www.ietf.org/rfc/rfc3339.txt +_re_rfc3339 = re.compile(r"(\d\d\d\d)-(\d\d)-(\d\d)" # full-date + r"[ Tt]" # Separator + r"(\d\d):(\d\d):(\d\d)([.,]\d+)?" # partial-time + r"([zZ ]|[-+]\d\d?:\d\d)?", # time-offset + re.VERBOSE + re.IGNORECASE) +_re_timezone = re.compile(r"([-+])(\d\d?):?(\d\d)?") + +MICROSEC_PER_SEC = 1000000 + + +def parse_rfc3339(s): + if isinstance(s, datetime.datetime): + # no need to parse it, just make sure it has a timezone. + if not s.tzinfo: + return s.replace(tzinfo=UTC) + return s + groups = _re_rfc3339.search(s).groups() + dt = [0] * 7 + for x in range(6): + dt[x] = int(groups[x]) + us = 0 + if groups[6] is not None: + partial_sec = float(groups[6].replace(",", ".")) + us = int(MICROSEC_PER_SEC * partial_sec) + tz = UTC + if groups[7] is not None and groups[7] != 'Z' and groups[7] != 'z': + tz_groups = _re_timezone.search(groups[7]).groups() + hour = int(tz_groups[1]) + minute = 0 + if tz_groups[0] == "-": + hour *= -1 + if tz_groups[2]: + minute = int(tz_groups[2]) + tz = TimezoneInfo(hour, minute) + return datetime.datetime( + year=dt[0], month=dt[1], day=dt[2], + hour=dt[3], minute=dt[4], second=dt[5], + microsecond=us, tzinfo=tz) + + +def format_rfc3339(date_time): + if date_time.tzinfo is None: + date_time = date_time.replace(tzinfo=UTC) + date_time = date_time.astimezone(UTC) + return date_time.strftime('%Y-%m-%dT%H:%M:%SZ') diff --git a/kubernetes/config/dateutil_test.py b/kubernetes/config/dateutil_test.py new file mode 100644 index 0000000000..933360d9fb --- /dev/null +++ b/kubernetes/config/dateutil_test.py @@ -0,0 +1,68 @@ +# Copyright 2016 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +from datetime import datetime + +from .dateutil import UTC, TimezoneInfo, format_rfc3339, parse_rfc3339 + + +class DateUtilTest(unittest.TestCase): + + def _parse_rfc3339_test(self, st, y, m, d, h, mn, s, us): + actual = parse_rfc3339(st) + expected = datetime(y, m, d, h, mn, s, us, UTC) + self.assertEqual(expected, actual) + + def test_parse_rfc3339(self): + self._parse_rfc3339_test("2017-07-25T04:44:21Z", + 2017, 7, 25, 4, 44, 21, 0) + self._parse_rfc3339_test("2017-07-25 04:44:21Z", + 2017, 7, 25, 4, 44, 21, 0) + self._parse_rfc3339_test("2017-07-25T04:44:21", + 2017, 7, 25, 4, 44, 21, 0) + self._parse_rfc3339_test("2017-07-25T04:44:21z", + 2017, 7, 25, 4, 44, 21, 0) + self._parse_rfc3339_test("2017-07-25T04:44:21+03:00", + 2017, 7, 25, 1, 44, 21, 0) + self._parse_rfc3339_test("2017-07-25T04:44:21-03:00", + 2017, 7, 25, 7, 44, 21, 0) + + self._parse_rfc3339_test("2017-07-25T04:44:21,005Z", + 2017, 7, 25, 4, 44, 21, 5000) + self._parse_rfc3339_test("2017-07-25T04:44:21.005Z", + 2017, 7, 25, 4, 44, 21, 5000) + self._parse_rfc3339_test("2017-07-25 04:44:21.0050Z", + 2017, 7, 25, 4, 44, 21, 5000) + self._parse_rfc3339_test("2017-07-25T04:44:21.5", + 2017, 7, 25, 4, 44, 21, 500000) + self._parse_rfc3339_test("2017-07-25T04:44:21.005z", + 2017, 7, 25, 4, 44, 21, 5000) + self._parse_rfc3339_test("2017-07-25T04:44:21.005+03:00", + 2017, 7, 25, 1, 44, 21, 5000) + self._parse_rfc3339_test("2017-07-25T04:44:21.005-03:00", + 2017, 7, 25, 7, 44, 21, 5000) + + def test_format_rfc3339(self): + self.assertEqual( + format_rfc3339(datetime(2017, 7, 25, 4, 44, 21, 0, UTC)), + "2017-07-25T04:44:21Z") + self.assertEqual( + format_rfc3339(datetime(2017, 7, 25, 4, 44, 21, 0, + TimezoneInfo(2, 0))), + "2017-07-25T02:44:21Z") + self.assertEqual( + format_rfc3339(datetime(2017, 7, 25, 4, 44, 21, 0, + TimezoneInfo(-2, 30))), + "2017-07-25T07:14:21Z") diff --git a/kubernetes/config/exec_provider.py b/kubernetes/config/exec_provider.py new file mode 100644 index 0000000000..ae7049ada3 --- /dev/null +++ b/kubernetes/config/exec_provider.py @@ -0,0 +1,116 @@ +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import json +import os +import subprocess +import sys + +from .config_exception import ConfigException + + +class ExecProvider(object): + """ + Implementation of the proposal for out-of-tree client + authentication providers as described here -- + https://github.com/kubernetes/community/blob/master/contributors/design-proposals/auth/kubectl-exec-plugins.md + + Missing from implementation: + + * TLS cert support + * caching + """ + + def __init__(self, exec_config, cwd, cluster=None): + """ + exec_config must be of type ConfigNode because we depend on + safe_get(self, key) to correctly handle optional exec provider + config parameters. + """ + for key in ['command', 'apiVersion']: + if key not in exec_config: + raise ConfigException( + 'exec: malformed request. missing key \'%s\'' % key) + self.api_version = exec_config['apiVersion'] + self.args = [exec_config['command']] + if exec_config.safe_get('args'): + self.args.extend(exec_config['args']) + self.env = os.environ.copy() + if exec_config.safe_get('env'): + additional_vars = {} + for item in exec_config['env']: + name = item['name'] + value = item['value'] + additional_vars[name] = value + self.env.update(additional_vars) + if exec_config.safe_get('provideClusterInfo'): + self.cluster = cluster + else: + self.cluster = None + self.cwd = cwd or None + + @property + def shell(self): + # for windows systems `shell` should be `True` + # for other systems like linux or darwin `shell` should be `False` + # referenes: + # https://github.com/kubernetes-client/python/pull/2289 + # https://docs.python.org/3/library/sys.html#sys.platform + return sys.platform in ("win32", "cygwin") + + def run(self, previous_response=None): + is_interactive = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty() + kubernetes_exec_info = { + 'apiVersion': self.api_version, + 'kind': 'ExecCredential', + 'spec': { + 'interactive': is_interactive + } + } + if previous_response: + kubernetes_exec_info['spec']['response'] = previous_response + if self.cluster: + kubernetes_exec_info['spec']['cluster'] = self.cluster.value + + self.env['KUBERNETES_EXEC_INFO'] = json.dumps(kubernetes_exec_info) + process = subprocess.Popen( + self.args, + stdout=subprocess.PIPE, + stderr=sys.stderr if is_interactive else subprocess.PIPE, + stdin=sys.stdin if is_interactive else None, + cwd=self.cwd, + env=self.env, + universal_newlines=True, + shell=self.shell) + (stdout, stderr) = process.communicate() + exit_code = process.wait() + if exit_code != 0: + msg = 'exec: process returned %d' % exit_code + stderr = stderr.strip() + if stderr: + msg += '. %s' % stderr + raise ConfigException(msg) + try: + data = json.loads(stdout) + except ValueError as de: + raise ConfigException( + 'exec: failed to decode process output: %s' % de) + for key in ('apiVersion', 'kind', 'status'): + if key not in data: + raise ConfigException( + 'exec: malformed response. missing key \'%s\'' % key) + if data['apiVersion'] != self.api_version: + raise ConfigException( + 'exec: plugin api version %s does not match %s' % + (data['apiVersion'], self.api_version)) + return data['status'] diff --git a/kubernetes/config/exec_provider_test.py b/kubernetes/config/exec_provider_test.py new file mode 100644 index 0000000000..fc4944b229 --- /dev/null +++ b/kubernetes/config/exec_provider_test.py @@ -0,0 +1,188 @@ +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os +import unittest + +from unittest import mock + +from .config_exception import ConfigException +from .exec_provider import ExecProvider +from .kube_config import ConfigNode + + +class ExecProviderTest(unittest.TestCase): + + def setUp(self): + self.input_ok = ConfigNode('test', { + 'command': 'aws-iam-authenticator', + 'args': ['token', '-i', 'dummy'], + 'apiVersion': 'client.authentication.k8s.io/v1beta1', + 'env': None + }) + self.input_with_cluster = ConfigNode('test', { + 'command': 'aws-iam-authenticator', + 'args': ['token', '-i', 'dummy'], + 'apiVersion': 'client.authentication.k8s.io/v1beta1', + 'provideClusterInfo': True, + 'env': None + }) + self.output_ok = """ + { + "apiVersion": "client.authentication.k8s.io/v1beta1", + "kind": "ExecCredential", + "status": { + "token": "dummy" + } + } + """ + + def test_missing_input_keys(self): + exec_configs = [ConfigNode('test1', {}), + ConfigNode('test2', {'command': ''}), + ConfigNode('test3', {'apiVersion': ''})] + for exec_config in exec_configs: + with self.assertRaises(ConfigException) as context: + ExecProvider(exec_config, None) + self.assertIn('exec: malformed request. missing key', + context.exception.args[0]) + + @mock.patch('subprocess.Popen') + def test_error_code_returned(self, mock): + instance = mock.return_value + instance.wait.return_value = 1 + instance.communicate.return_value = ('', '') + with self.assertRaises(ConfigException) as context: + ep = ExecProvider(self.input_ok, None) + ep.run() + self.assertIn('exec: process returned %d' % + instance.wait.return_value, context.exception.args[0]) + + @mock.patch('subprocess.Popen') + def test_nonjson_output_returned(self, mock): + instance = mock.return_value + instance.wait.return_value = 0 + instance.communicate.return_value = ('', '') + with self.assertRaises(ConfigException) as context: + ep = ExecProvider(self.input_ok, None) + ep.run() + self.assertIn('exec: failed to decode process output', + context.exception.args[0]) + + @mock.patch('subprocess.Popen') + def test_missing_output_keys(self, mock): + instance = mock.return_value + instance.wait.return_value = 0 + outputs = [ + """ + { + "kind": "ExecCredential", + "status": { + "token": "dummy" + } + } + """, """ + { + "apiVersion": "client.authentication.k8s.io/v1beta1", + "status": { + "token": "dummy" + } + } + """, """ + { + "apiVersion": "client.authentication.k8s.io/v1beta1", + "kind": "ExecCredential" + } + """ + ] + for output in outputs: + instance.communicate.return_value = (output, '') + with self.assertRaises(ConfigException) as context: + ep = ExecProvider(self.input_ok, None) + ep.run() + self.assertIn('exec: malformed response. missing key', + context.exception.args[0]) + + @mock.patch('subprocess.Popen') + def test_mismatched_api_version(self, mock): + instance = mock.return_value + instance.wait.return_value = 0 + wrong_api_version = 'client.authentication.k8s.io/v1' + output = """ + { + "apiVersion": "%s", + "kind": "ExecCredential", + "status": { + "token": "dummy" + } + } + """ % wrong_api_version + instance.communicate.return_value = (output, '') + with self.assertRaises(ConfigException) as context: + ep = ExecProvider(self.input_ok, None) + ep.run() + self.assertIn( + 'exec: plugin api version %s does not match' % + wrong_api_version, + context.exception.args[0]) + + @mock.patch('subprocess.Popen') + def test_ok_01(self, mock): + instance = mock.return_value + instance.wait.return_value = 0 + instance.communicate.return_value = (self.output_ok, '') + ep = ExecProvider(self.input_ok, None) + result = ep.run() + self.assertTrue(isinstance(result, dict)) + self.assertTrue('token' in result) + + @mock.patch('subprocess.Popen') + def test_run_in_dir(self, mock): + instance = mock.return_value + instance.wait.return_value = 0 + instance.communicate.return_value = (self.output_ok, '') + ep = ExecProvider(self.input_ok, '/some/directory') + ep.run() + self.assertEqual(mock.call_args[1]['cwd'], '/some/directory') + + @mock.patch('subprocess.Popen') + def test_ok_no_console_attached(self, mock): + instance = mock.return_value + instance.wait.return_value = 0 + instance.communicate.return_value = (self.output_ok, '') + mock_stdout = unittest.mock.patch( + 'sys.stdout', new=None) # Simulate detached console + with mock_stdout: + ep = ExecProvider(self.input_ok, None) + result = ep.run() + self.assertTrue(isinstance(result, dict)) + self.assertTrue('token' in result) + + @mock.patch('subprocess.Popen') + def test_with_cluster_info(self, mock): + instance = mock.return_value + instance.wait.return_value = 0 + instance.communicate.return_value = (self.output_ok, '') + ep = ExecProvider(self.input_with_cluster, None, ConfigNode("cluster", {'server': 'name.company.com'})) + result = ep.run() + self.assertTrue(isinstance(result, dict)) + self.assertTrue('token' in result) + + obj = json.loads(mock.call_args.kwargs['env']['KUBERNETES_EXEC_INFO']) + self.assertEqual(obj['spec']['cluster']['server'], 'name.company.com') + + +if __name__ == '__main__': + unittest.main() diff --git a/kubernetes/config/incluster_config.py b/kubernetes/config/incluster_config.py new file mode 100644 index 0000000000..86070df43b --- /dev/null +++ b/kubernetes/config/incluster_config.py @@ -0,0 +1,121 @@ +# Copyright 2016 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import os + +from kubernetes.client import Configuration + +from .config_exception import ConfigException + +SERVICE_HOST_ENV_NAME = "KUBERNETES_SERVICE_HOST" +SERVICE_PORT_ENV_NAME = "KUBERNETES_SERVICE_PORT" +SERVICE_TOKEN_FILENAME = "/var/run/secrets/kubernetes.io/serviceaccount/token" +SERVICE_CERT_FILENAME = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" + + +def _join_host_port(host, port): + """Adapted golang's net.JoinHostPort""" + template = "%s:%s" + host_requires_bracketing = ':' in host or '%' in host + if host_requires_bracketing: + template = "[%s]:%s" + return template % (host, port) + + +class InClusterConfigLoader(object): + def __init__(self, + token_filename, + cert_filename, + try_refresh_token=True, + environ=os.environ): + self._token_filename = token_filename + self._cert_filename = cert_filename + self._environ = environ + self._try_refresh_token = try_refresh_token + self._token_refresh_period = datetime.timedelta(minutes=1) + + def load_and_set(self, client_configuration=None): + try_set_default = False + if client_configuration is None: + client_configuration = type.__call__(Configuration) + try_set_default = True + self._load_config() + self._set_config(client_configuration) + if try_set_default: + Configuration.set_default(client_configuration) + + def _load_config(self): + if (SERVICE_HOST_ENV_NAME not in self._environ + or SERVICE_PORT_ENV_NAME not in self._environ): + raise ConfigException("Service host/port is not set.") + + if (not self._environ[SERVICE_HOST_ENV_NAME] + or not self._environ[SERVICE_PORT_ENV_NAME]): + raise ConfigException("Service host/port is set but empty.") + + self.host = ("https://" + + _join_host_port(self._environ[SERVICE_HOST_ENV_NAME], + self._environ[SERVICE_PORT_ENV_NAME])) + + if not os.path.isfile(self._token_filename): + raise ConfigException("Service token file does not exist.") + + self._read_token_file() + + if not os.path.isfile(self._cert_filename): + raise ConfigException( + "Service certification file does not exist.") + + with open(self._cert_filename) as f: + if not f.read(): + raise ConfigException("Cert file exists but empty.") + + self.ssl_ca_cert = self._cert_filename + + def _set_config(self, client_configuration): + client_configuration.host = self.host + client_configuration.ssl_ca_cert = self.ssl_ca_cert + if self.token is not None: + client_configuration.api_key['authorization'] = self.token + if not self._try_refresh_token: + return + + def _refresh_api_key(client_configuration): + if self.token_expires_at <= datetime.datetime.now(): + self._read_token_file() + self._set_config(client_configuration) + + client_configuration.refresh_api_key_hook = _refresh_api_key + + def _read_token_file(self): + with open(self._token_filename) as f: + content = f.read() + if not content: + raise ConfigException("Token file exists but empty.") + self.token = "bearer " + content + self.token_expires_at = datetime.datetime.now( + ) + self._token_refresh_period + + +def load_incluster_config(client_configuration=None, try_refresh_token=True): + """ + Use the service account kubernetes gives to pods to connect to kubernetes + cluster. It's intended for clients that expect to be running inside a pod + running on kubernetes. It will raise an exception if called from a process + not running in a kubernetes environment.""" + InClusterConfigLoader( + token_filename=SERVICE_TOKEN_FILENAME, + cert_filename=SERVICE_CERT_FILENAME, + try_refresh_token=try_refresh_token).load_and_set(client_configuration) diff --git a/kubernetes/config/incluster_config_test.py b/kubernetes/config/incluster_config_test.py new file mode 100644 index 0000000000..856752be1e --- /dev/null +++ b/kubernetes/config/incluster_config_test.py @@ -0,0 +1,163 @@ +# Copyright 2016 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import os +import tempfile +import time +import unittest + +from kubernetes.client import Configuration + +from .config_exception import ConfigException +from .incluster_config import (SERVICE_HOST_ENV_NAME, SERVICE_PORT_ENV_NAME, + InClusterConfigLoader, _join_host_port) + +_TEST_TOKEN = "temp_token" +_TEST_NEW_TOKEN = "temp_new_token" +_TEST_CERT = "temp_cert" +_TEST_HOST = "127.0.0.1" +_TEST_PORT = "80" +_TEST_HOST_PORT = "127.0.0.1:80" +_TEST_IPV6_HOST = "::1" +_TEST_IPV6_HOST_PORT = "[::1]:80" + +_TEST_ENVIRON = { + SERVICE_HOST_ENV_NAME: _TEST_HOST, + SERVICE_PORT_ENV_NAME: _TEST_PORT +} +_TEST_IPV6_ENVIRON = { + SERVICE_HOST_ENV_NAME: _TEST_IPV6_HOST, + SERVICE_PORT_ENV_NAME: _TEST_PORT +} + + +class InClusterConfigTest(unittest.TestCase): + def setUp(self): + self._temp_files = [] + + def tearDown(self): + for f in self._temp_files: + os.remove(f) + + def _create_file_with_temp_content(self, content=""): + handler, name = tempfile.mkstemp() + self._temp_files.append(name) + os.write(handler, str.encode(content)) + os.close(handler) + return name + + def get_test_loader(self, + token_filename=None, + cert_filename=None, + environ=_TEST_ENVIRON): + if not token_filename: + token_filename = self._create_file_with_temp_content(_TEST_TOKEN) + if not cert_filename: + cert_filename = self._create_file_with_temp_content(_TEST_CERT) + return InClusterConfigLoader(token_filename=token_filename, + cert_filename=cert_filename, + try_refresh_token=True, + environ=environ) + + def test_join_host_port(self): + self.assertEqual(_TEST_HOST_PORT, + _join_host_port(_TEST_HOST, _TEST_PORT)) + self.assertEqual(_TEST_IPV6_HOST_PORT, + _join_host_port(_TEST_IPV6_HOST, _TEST_PORT)) + + def test_load_config(self): + cert_filename = self._create_file_with_temp_content(_TEST_CERT) + loader = self.get_test_loader(cert_filename=cert_filename) + loader._load_config() + self.assertEqual("https://" + _TEST_HOST_PORT, loader.host) + self.assertEqual(cert_filename, loader.ssl_ca_cert) + self.assertEqual('bearer ' + _TEST_TOKEN, loader.token) + + def test_refresh_token(self): + loader = self.get_test_loader() + config = Configuration() + loader.load_and_set(config) + + self.assertEqual('bearer ' + _TEST_TOKEN, + config.get_api_key_with_prefix('authorization')) + self.assertEqual('bearer ' + _TEST_TOKEN, loader.token) + self.assertIsNotNone(loader.token_expires_at) + + old_token = loader.token + old_token_expires_at = loader.token_expires_at + loader._token_filename = self._create_file_with_temp_content( + _TEST_NEW_TOKEN) + self.assertEqual('bearer ' + _TEST_TOKEN, + config.get_api_key_with_prefix('authorization')) + + loader.token_expires_at = datetime.datetime.now() + self.assertEqual('bearer ' + _TEST_NEW_TOKEN, + config.get_api_key_with_prefix('authorization')) + self.assertEqual('bearer ' + _TEST_NEW_TOKEN, loader.token) + self.assertGreater(loader.token_expires_at, old_token_expires_at) + + def _should_fail_load(self, config_loader, reason): + try: + config_loader.load_and_set() + self.fail("Should fail because %s" % reason) + except ConfigException: + # expected + pass + + def test_no_port(self): + loader = self.get_test_loader( + environ={SERVICE_HOST_ENV_NAME: _TEST_HOST}) + self._should_fail_load(loader, "no port specified") + + def test_empty_port(self): + loader = self.get_test_loader(environ={ + SERVICE_HOST_ENV_NAME: _TEST_HOST, + SERVICE_PORT_ENV_NAME: "" + }) + self._should_fail_load(loader, "empty port specified") + + def test_no_host(self): + loader = self.get_test_loader( + environ={SERVICE_PORT_ENV_NAME: _TEST_PORT}) + self._should_fail_load(loader, "no host specified") + + def test_empty_host(self): + loader = self.get_test_loader(environ={ + SERVICE_HOST_ENV_NAME: "", + SERVICE_PORT_ENV_NAME: _TEST_PORT + }) + self._should_fail_load(loader, "empty host specified") + + def test_no_cert_file(self): + loader = self.get_test_loader(cert_filename="not_exists_file_1123") + self._should_fail_load(loader, "cert file does not exist") + + def test_empty_cert_file(self): + loader = self.get_test_loader( + cert_filename=self._create_file_with_temp_content()) + self._should_fail_load(loader, "empty cert file provided") + + def test_no_token_file(self): + loader = self.get_test_loader(token_filename="not_exists_file_1123") + self._should_fail_load(loader, "token file does not exist") + + def test_empty_token_file(self): + loader = self.get_test_loader( + token_filename=self._create_file_with_temp_content()) + self._should_fail_load(loader, "empty token file provided") + + +if __name__ == '__main__': + unittest.main() diff --git a/kubernetes/config/kube_config.py b/kubernetes/config/kube_config.py new file mode 100644 index 0000000000..7077955ca6 --- /dev/null +++ b/kubernetes/config/kube_config.py @@ -0,0 +1,901 @@ +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import atexit +import base64 +import copy +import datetime +import json +import logging +import os +import platform +import subprocess +import tempfile +import time +from collections import namedtuple + +import google.auth +import google.auth.transport.requests +import oauthlib.oauth2 +import urllib3 +import yaml +from requests_oauthlib import OAuth2Session +from six import PY3 + +from kubernetes.client import ApiClient, Configuration +from kubernetes.config.exec_provider import ExecProvider + +from .config_exception import ConfigException +from .dateutil import UTC, format_rfc3339, parse_rfc3339 + +try: + import adal +except ImportError: + pass + +EXPIRY_SKEW_PREVENTION_DELAY = datetime.timedelta(minutes=5) +KUBE_CONFIG_DEFAULT_LOCATION = os.environ.get('KUBECONFIG', '~/.kube/config') +ENV_KUBECONFIG_PATH_SEPARATOR = ';' if platform.system() == 'Windows' else ':' +_temp_files = {} + + +def _cleanup_temp_files(): + global _temp_files + for temp_file in _temp_files.values(): + try: + os.remove(temp_file) + except OSError: + pass + _temp_files = {} + + +def _create_temp_file_with_content(content, temp_file_path=None): + if len(_temp_files) == 0: + atexit.register(_cleanup_temp_files) + # Because we may change context several times, try to remember files we + # created and reuse them at a small memory cost. + content_key = str(content) + if content_key in _temp_files: + return _temp_files[content_key] + if temp_file_path and not os.path.isdir(temp_file_path): + os.makedirs(name=temp_file_path) + fd, name = tempfile.mkstemp(dir=temp_file_path) + os.close(fd) + _temp_files[content_key] = name + with open(name, 'wb') as fd: + fd.write(content.encode() if isinstance(content, str) else content) + return name + + +def _is_expired(expiry): + return ((parse_rfc3339(expiry) - EXPIRY_SKEW_PREVENTION_DELAY) <= + datetime.datetime.now(tz=UTC)) + + +class FileOrData(object): + """Utility class to read content of obj[%data_key_name] or file's + content of obj[%file_key_name] and represent it as file or data. + Note that the data is preferred. The obj[%file_key_name] will be used iff + obj['%data_key_name'] is not set or empty. Assumption is file content is + raw data and data field is base64 string. The assumption can be changed + with base64_file_content flag. If set to False, the content of the file + will assumed to be base64 and read as is. The default True value will + result in base64 encode of the file content after read.""" + + def __init__(self, obj, file_key_name, data_key_name=None, + file_base_path="", base64_file_content=True, + temp_file_path=None): + if not data_key_name: + data_key_name = file_key_name + "-data" + self._file = None + self._data = None + self._base64_file_content = base64_file_content + self._temp_file_path = temp_file_path + if not obj: + return + if data_key_name in obj: + self._data = obj[data_key_name] + elif file_key_name in obj: + self._file = os.path.normpath( + os.path.join(file_base_path, obj[file_key_name])) + + def as_file(self): + """If obj[%data_key_name] exists, return name of a file with base64 + decoded obj[%data_key_name] content otherwise obj[%file_key_name].""" + use_data_if_no_file = not self._file and self._data + if use_data_if_no_file: + if self._base64_file_content: + if isinstance(self._data, str): + content = self._data.encode() + else: + content = self._data + self._file = _create_temp_file_with_content( + base64.standard_b64decode(content), self._temp_file_path) + else: + self._file = _create_temp_file_with_content( + self._data, self._temp_file_path) + if self._file and not os.path.isfile(self._file): + raise ConfigException("File does not exist: %s" % self._file) + return self._file + + def as_data(self): + """If obj[%data_key_name] exists, Return obj[%data_key_name] otherwise + base64 encoded string of obj[%file_key_name] file content.""" + use_file_if_no_data = not self._data and self._file + if use_file_if_no_data: + with open(self._file) as f: + if self._base64_file_content: + self._data = bytes.decode( + base64.standard_b64encode(str.encode(f.read()))) + else: + self._data = f.read() + return self._data + + +class CommandTokenSource(object): + def __init__(self, cmd, args, tokenKey, expiryKey): + self._cmd = cmd + self._args = args + if not tokenKey: + self._tokenKey = '{.access_token}' + else: + self._tokenKey = tokenKey + if not expiryKey: + self._expiryKey = '{.token_expiry}' + else: + self._expiryKey = expiryKey + + def token(self): + fullCmd = self._cmd + (" ") + " ".join(self._args) + process = subprocess.Popen( + [self._cmd] + self._args, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True) + (stdout, stderr) = process.communicate() + exit_code = process.wait() + if exit_code != 0: + msg = 'cmd-path: process returned %d' % exit_code + msg += "\nCmd: %s" % fullCmd + stderr = stderr.strip() + if stderr: + msg += '\nStderr: %s' % stderr + raise ConfigException(msg) + try: + data = json.loads(stdout) + except ValueError as de: + raise ConfigException( + 'exec: failed to decode process output: %s' % de) + A = namedtuple('A', ['token', 'expiry']) + return A( + token=data['credential']['access_token'], + expiry=parse_rfc3339(data['credential']['token_expiry'])) + + +class KubeConfigLoader(object): + + def __init__(self, config_dict, active_context=None, + get_google_credentials=None, + config_base_path="", + config_persister=None, + temp_file_path=None): + + if config_dict is None: + raise ConfigException( + 'Invalid kube-config. ' + 'Expected config_dict to not be None.') + elif isinstance(config_dict, ConfigNode): + self._config = config_dict + else: + self._config = ConfigNode('kube-config', config_dict) + + self._current_context = None + self._user = None + self._cluster = None + self.set_active_context(active_context) + self._config_base_path = config_base_path + self._config_persister = config_persister + self._temp_file_path = temp_file_path + + def _refresh_credentials_with_cmd_path(): + config = self._user['auth-provider']['config'] + cmd = config['cmd-path'] + if len(cmd) == 0: + raise ConfigException( + 'missing access token cmd ' + '(cmd-path is an empty string in your kubeconfig file)') + if 'scopes' in config and config['scopes'] != "": + raise ConfigException( + 'scopes can only be used ' + 'when kubectl is using a gcp service account key') + args = [] + if 'cmd-args' in config: + args = config['cmd-args'].split() + else: + fields = config['cmd-path'].split() + cmd = fields[0] + args = fields[1:] + + commandTokenSource = CommandTokenSource( + cmd, args, + config.safe_get('token-key'), + config.safe_get('expiry-key')) + return commandTokenSource.token() + + def _refresh_credentials(): + # Refresh credentials using cmd-path + if ('auth-provider' in self._user and + 'config' in self._user['auth-provider'] and + 'cmd-path' in self._user['auth-provider']['config']): + return _refresh_credentials_with_cmd_path() + + credentials, project_id = google.auth.default(scopes=[ + 'https://www.googleapis.com/auth/cloud-platform', + 'https://www.googleapis.com/auth/userinfo.email' + ]) + request = google.auth.transport.requests.Request() + credentials.refresh(request) + return credentials + + if get_google_credentials: + self._get_google_credentials = get_google_credentials + else: + self._get_google_credentials = _refresh_credentials + + def set_active_context(self, context_name=None): + if context_name is None: + context_name = self._config['current-context'] + self._current_context = self._config['contexts'].get_with_name( + context_name) + if (self._current_context['context'].safe_get('user') and + self._config.safe_get('users')): + user = self._config['users'].get_with_name( + self._current_context['context']['user'], safe=True) + if user: + self._user = user['user'] + else: + self._user = None + else: + self._user = None + self._cluster = self._config['clusters'].get_with_name( + self._current_context['context']['cluster'])['cluster'] + + def _load_authentication(self): + """Read authentication from kube-config user section if exists. + + This function goes through various authentication methods in user + section of kube-config and stops if it finds a valid authentication + method. The order of authentication methods is: + + 1. auth-provider (gcp, azure, oidc) + 2. token field (point to a token file) + 3. exec provided plugin + 4. username/password + """ + if not self._user: + return + if self._load_auth_provider_token(): + return + if self._load_user_token(): + return + if self._load_from_exec_plugin(): + return + self._load_user_pass_token() + + def _load_auth_provider_token(self): + if 'auth-provider' not in self._user: + return + provider = self._user['auth-provider'] + if 'name' not in provider: + return + if provider['name'] == 'gcp': + return self._load_gcp_token(provider) + if provider['name'] == 'azure': + return self._load_azure_token(provider) + if provider['name'] == 'oidc': + return self._load_oid_token(provider) + + def _azure_is_expired(self, provider): + expires_on = provider['config']['expires-on'] + if expires_on.isdigit(): + return int(expires_on) < time.time() + else: + exp_time = time.strptime(expires_on, '%Y-%m-%d %H:%M:%S.%f') + return exp_time < time.gmtime() + + def _load_azure_token(self, provider): + if 'config' not in provider: + return + if 'access-token' not in provider['config']: + return + if 'expires-on' in provider['config']: + if self._azure_is_expired(provider): + self._refresh_azure_token(provider['config']) + self.token = 'Bearer %s' % provider['config']['access-token'] + return self.token + + def _refresh_azure_token(self, config): + if 'adal' not in globals(): + raise ImportError('refresh token error, adal library not imported') + + tenant = config['tenant-id'] + authority = 'https://login.microsoftonline.com/{}'.format(tenant) + context = adal.AuthenticationContext( + authority, validate_authority=True, api_version='1.0' + ) + refresh_token = config['refresh-token'] + client_id = config['client-id'] + apiserver_id = '00000002-0000-0000-c000-000000000000' + try: + apiserver_id = config['apiserver-id'] + except ConfigException: + # We've already set a default above + pass + token_response = context.acquire_token_with_refresh_token( + refresh_token, client_id, apiserver_id) + + provider = self._user['auth-provider']['config'] + provider.value['access-token'] = token_response['accessToken'] + provider.value['expires-on'] = token_response['expiresOn'] + if self._config_persister: + self._config_persister() + + def _load_gcp_token(self, provider): + if (('config' not in provider) or + ('access-token' not in provider['config']) or + ('expiry' in provider['config'] and + _is_expired(provider['config']['expiry']))): + # token is not available or expired, refresh it + self._refresh_gcp_token() + + self.token = "Bearer %s" % provider['config']['access-token'] + if 'expiry' in provider['config']: + self.expiry = parse_rfc3339(provider['config']['expiry']) + return self.token + + def _refresh_gcp_token(self): + if 'config' not in self._user['auth-provider']: + self._user['auth-provider'].value['config'] = {} + provider = self._user['auth-provider']['config'] + credentials = self._get_google_credentials() + provider.value['access-token'] = credentials.token + provider.value['expiry'] = format_rfc3339(credentials.expiry) + if self._config_persister: + self._config_persister() + + def _load_oid_token(self, provider): + if 'config' not in provider: + return + + reserved_characters = frozenset(["=", "+", "/"]) + token = provider['config']['id-token'] + + if any(char in token for char in reserved_characters): + # Invalid jwt, as it contains url-unsafe chars + return + + parts = token.split('.') + if len(parts) != 3: # Not a valid JWT + return + + padding = (4 - len(parts[1]) % 4) * '=' + if len(padding) == 3: + # According to spec, 3 padding characters cannot occur + # in a valid jwt + # https://tools.ietf.org/html/rfc7515#appendix-C + return + + if PY3: + jwt_attributes = json.loads( + base64.urlsafe_b64decode(parts[1] + padding).decode('utf-8') + ) + else: + jwt_attributes = json.loads( + base64.b64decode(parts[1] + padding) + ) + + expire = jwt_attributes.get('exp') + + if ((expire is not None) and + (_is_expired(datetime.datetime.fromtimestamp(expire, + tz=UTC)))): + self._refresh_oidc(provider) + + if self._config_persister: + self._config_persister() + + self.token = "Bearer %s" % provider['config']['id-token'] + + return self.token + + def _refresh_oidc(self, provider): + config = Configuration() + + if 'idp-certificate-authority-data' in provider['config']: + ca_cert = tempfile.NamedTemporaryFile(delete=True) + + if PY3: + cert = base64.b64decode( + provider['config']['idp-certificate-authority-data'] + ).decode('utf-8') + else: + cert = base64.b64decode( + provider['config']['idp-certificate-authority-data'] + "==" + ) + + with open(ca_cert.name, 'w') as fh: + fh.write(cert) + + config.ssl_ca_cert = ca_cert.name + + elif 'idp-certificate-authority' in provider['config']: + config.ssl_ca_cert = provider['config']['idp-certificate-authority'] + + else: + config.verify_ssl = False + + client = ApiClient(configuration=config) + + response = client.request( + method="GET", + url="%s/.well-known/openid-configuration" + % provider['config']['idp-issuer-url'] + ) + + if response.status != 200: + return + + response = json.loads(response.data) + + request = OAuth2Session( + client_id=provider['config']['client-id'], + token=provider['config']['refresh-token'], + auto_refresh_kwargs={ + 'client_id': provider['config']['client-id'], + 'client_secret': provider['config']['client-secret'] + }, + auto_refresh_url=response['token_endpoint'] + ) + + try: + refresh = request.refresh_token( + token_url=response['token_endpoint'], + refresh_token=provider['config']['refresh-token'], + auth=(provider['config']['client-id'], + provider['config']['client-secret']), + verify=config.ssl_ca_cert if config.verify_ssl else None + ) + except oauthlib.oauth2.rfc6749.errors.InvalidClientIdError: + return + + provider['config'].value['id-token'] = refresh['id_token'] + provider['config'].value['refresh-token'] = refresh['refresh_token'] + + def _load_from_exec_plugin(self): + if 'exec' not in self._user: + return + try: + base_path = self._get_base_path(self._cluster.path) + status = ExecProvider(self._user['exec'], base_path, self._cluster).run() + if 'token' in status: + self.token = "Bearer %s" % status['token'] + elif 'clientCertificateData' in status: + # https://kubernetes.io/docs/reference/access-authn-authz/authentication/#input-and-output-formats + # Plugin has provided certificates instead of a token. + if 'clientKeyData' not in status: + logging.error('exec: missing clientKeyData field in ' + 'plugin output') + return None + self.cert_file = FileOrData( + status, None, + data_key_name='clientCertificateData', + file_base_path=base_path, + base64_file_content=False, + temp_file_path=self._temp_file_path).as_file() + self.key_file = FileOrData( + status, None, + data_key_name='clientKeyData', + file_base_path=base_path, + base64_file_content=False, + temp_file_path=self._temp_file_path).as_file() + else: + logging.error('exec: missing token or clientCertificateData ' + 'field in plugin output') + return None + if 'expirationTimestamp' in status: + self.expiry = parse_rfc3339(status['expirationTimestamp']) + return True + except Exception as e: + logging.error(str(e)) + + def _load_user_token(self): + base_path = self._get_base_path(self._user.path) + token = FileOrData( + self._user, 'tokenFile', 'token', + file_base_path=base_path, + base64_file_content=False, + temp_file_path=self._temp_file_path).as_data() + if token: + self.token = "Bearer %s" % token + return True + + def _load_user_pass_token(self): + if 'username' in self._user and 'password' in self._user: + self.token = urllib3.util.make_headers( + basic_auth=(self._user['username'] + ':' + + self._user['password'])).get('authorization') + return True + + def _get_base_path(self, config_path): + if self._config_base_path is not None: + return self._config_base_path + if config_path is not None: + return os.path.abspath(os.path.dirname(config_path)) + return "" + + def _load_cluster_info(self): + if 'server' in self._cluster: + self.host = self._cluster['server'].rstrip('/') + if self.host.startswith("https"): + base_path = self._get_base_path(self._cluster.path) + self.ssl_ca_cert = FileOrData( + self._cluster, 'certificate-authority', + file_base_path=base_path, + temp_file_path=self._temp_file_path).as_file() + if 'cert_file' not in self.__dict__: + # cert_file could have been provided by + # _load_from_exec_plugin; only load from the _user + # section if we need it. + self.cert_file = FileOrData( + self._user, 'client-certificate', + file_base_path=base_path, + temp_file_path=self._temp_file_path).as_file() + self.key_file = FileOrData( + self._user, 'client-key', + file_base_path=base_path, + temp_file_path=self._temp_file_path).as_file() + if 'insecure-skip-tls-verify' in self._cluster: + self.verify_ssl = not self._cluster['insecure-skip-tls-verify'] + if 'tls-server-name' in self._cluster: + self.tls_server_name = self._cluster['tls-server-name'] + + def _set_config(self, client_configuration): + if 'token' in self.__dict__: + client_configuration.api_key['authorization'] = self.token + + def _refresh_api_key(client_configuration): + if ('expiry' in self.__dict__ and _is_expired(self.expiry)): + self._load_authentication() + self._set_config(client_configuration) + client_configuration.refresh_api_key_hook = _refresh_api_key + # copy these keys directly from self to configuration object + keys = ['host', 'ssl_ca_cert', 'cert_file', 'key_file', 'verify_ssl','tls_server_name'] + for key in keys: + if key in self.__dict__: + setattr(client_configuration, key, getattr(self, key)) + + def load_and_set(self, client_configuration): + self._load_authentication() + self._load_cluster_info() + self._set_config(client_configuration) + + def list_contexts(self): + return [context.value for context in self._config['contexts']] + + @property + def current_context(self): + return self._current_context.value + + +class ConfigNode(object): + """Remembers each config key's path and construct a relevant exception + message in case of missing keys. The assumption is all access keys are + present in a well-formed kube-config.""" + + def __init__(self, name, value, path=None): + self.name = name + self.value = value + self.path = path + + def __contains__(self, key): + return key in self.value + + def __len__(self): + return len(self.value) + + def safe_get(self, key): + if (isinstance(self.value, list) and isinstance(key, int) or + key in self.value): + return self.value[key] + + def __getitem__(self, key): + v = self.safe_get(key) + if v is None: + raise ConfigException( + 'Invalid kube-config file. Expected key %s in %s' + % (key, self.name)) + if isinstance(v, dict) or isinstance(v, list): + return ConfigNode('%s/%s' % (self.name, key), v, self.path) + else: + return v + + def get_with_name(self, name, safe=False): + if not isinstance(self.value, list): + raise ConfigException( + 'Invalid kube-config file. Expected %s to be a list' + % self.name) + result = None + for v in self.value: + if 'name' not in v: + raise ConfigException( + 'Invalid kube-config file. ' + 'Expected all values in %s list to have \'name\' key' + % self.name) + if v['name'] == name: + if result is None: + result = v + else: + raise ConfigException( + 'Invalid kube-config file. ' + 'Expected only one object with name %s in %s list' + % (name, self.name)) + if result is not None: + if isinstance(result, ConfigNode): + return result + else: + return ConfigNode( + '%s[name=%s]' % + (self.name, name), result, self.path) + if safe: + return None + raise ConfigException( + 'Invalid kube-config file. ' + 'Expected object with name %s in %s list' % (name, self.name)) + + +class KubeConfigMerger: + + """Reads and merges configuration from one or more kube-config's. + The property `config` can be passed to the KubeConfigLoader as config_dict. + + It uses a path attribute from ConfigNode to store the path to kubeconfig. + This path is required to load certs from relative paths. + + A method `save_changes` updates changed kubeconfig's (it compares current + state of dicts with). + """ + + def __init__(self, paths): + self.paths = [] + self.config_files = {} + self.config_merged = None + if hasattr(paths, 'read'): + self._load_config_from_file_like_object(paths) + else: + self._load_config_from_file_path(paths) + + @property + def config(self): + return self.config_merged + + def _load_config_from_file_like_object(self, string): + if hasattr(string, 'getvalue'): + config = yaml.safe_load(string.getvalue()) + else: + config = yaml.safe_load(string.read()) + + if config is None: + raise ConfigException( + 'Invalid kube-config.') + if self.config_merged is None: + self.config_merged = copy.deepcopy(config) + # doesn't need to do any further merging + + def _load_config_from_file_path(self, string): + for path in string.split(ENV_KUBECONFIG_PATH_SEPARATOR): + if path: + path = os.path.expanduser(path) + if os.path.exists(path): + self.paths.append(path) + self.load_config(path) + self.config_saved = copy.deepcopy(self.config_files) + + def load_config(self, path): + with open(path) as f: + config = yaml.safe_load(f) + + if config is None: + raise ConfigException( + 'Invalid kube-config. ' + '%s file is empty' % path) + + if self.config_merged is None: + config_merged = copy.deepcopy(config) + for item in ('clusters', 'contexts', 'users'): + config_merged[item] = [] + self.config_merged = ConfigNode(path, config_merged, path) + for item in ('clusters', 'contexts', 'users'): + self._merge(item, config.get(item, []) or [], path) + + if 'current-context' in config: + self.config_merged.value['current-context'] = config['current-context'] + + self.config_files[path] = config + + def _merge(self, item, add_cfg, path): + for new_item in add_cfg: + for exists in self.config_merged.value[item]: + if exists['name'] == new_item['name']: + break + else: + self.config_merged.value[item].append(ConfigNode( + '{}/{}'.format(path, new_item), new_item, path)) + + def save_changes(self): + for path in self.paths: + if self.config_saved[path] != self.config_files[path]: + self.save_config(path) + self.config_saved = copy.deepcopy(self.config_files) + + def save_config(self, path): + with open(path, 'w') as f: + yaml.safe_dump(self.config_files[path], f, + default_flow_style=False) + + +def _get_kube_config_loader_for_yaml_file( + filename, persist_config=False, **kwargs): + return _get_kube_config_loader( + filename=filename, + persist_config=persist_config, + **kwargs) + + +def _get_kube_config_loader( + filename=None, + config_dict=None, + persist_config=False, + **kwargs): + if config_dict is None: + kcfg = KubeConfigMerger(filename) + if persist_config and 'config_persister' not in kwargs: + kwargs['config_persister'] = kcfg.save_changes + + if kcfg.config is None: + raise ConfigException( + 'Invalid kube-config file. ' + 'No configuration found.') + return KubeConfigLoader( + config_dict=kcfg.config, + config_base_path=None, + **kwargs) + else: + return KubeConfigLoader( + config_dict=config_dict, + config_base_path=None, + **kwargs) + + +def list_kube_config_contexts(config_file=None): + + if config_file is None: + config_file = KUBE_CONFIG_DEFAULT_LOCATION + + loader = _get_kube_config_loader(filename=config_file) + return loader.list_contexts(), loader.current_context + + +def load_kube_config(config_file=None, context=None, + client_configuration=None, + persist_config=True, + temp_file_path=None): + """Loads authentication and cluster information from kube-config file + and stores them in kubernetes.client.configuration. + + :param config_file: Name of the kube-config file. + :param context: set the active context. If is set to None, current_context + from config file will be used. + :param client_configuration: The kubernetes.client.Configuration to + set configs to. + :param persist_config: If True, config file will be updated when changed + (e.g GCP token refresh). + :param temp_file_path: store temp files path. + """ + + if config_file is None: + config_file = KUBE_CONFIG_DEFAULT_LOCATION + + loader = _get_kube_config_loader( + filename=config_file, active_context=context, + persist_config=persist_config, + temp_file_path=temp_file_path) + + if client_configuration is None: + config = type.__call__(Configuration) + loader.load_and_set(config) + Configuration.set_default(config) + else: + loader.load_and_set(client_configuration) + + +def load_kube_config_from_dict(config_dict, context=None, + client_configuration=None, + persist_config=True, + temp_file_path=None): + """Loads authentication and cluster information from config_dict file + and stores them in kubernetes.client.configuration. + + :param config_dict: Takes the config file as a dict. + :param context: set the active context. If is set to None, current_context + from config file will be used. + :param client_configuration: The kubernetes.client.Configuration to + set configs to. + :param persist_config: If True, config file will be updated when changed + (e.g GCP token refresh). + :param temp_file_path: store temp files path. + """ + if config_dict is None: + raise ConfigException( + 'Invalid kube-config dict. ' + 'No configuration found.') + + loader = _get_kube_config_loader( + config_dict=config_dict, active_context=context, + persist_config=persist_config, + temp_file_path=temp_file_path) + + if client_configuration is None: + config = type.__call__(Configuration) + loader.load_and_set(config) + Configuration.set_default(config) + else: + loader.load_and_set(client_configuration) + + +def new_client_from_config( + config_file=None, + context=None, + persist_config=True, + client_configuration=None): + """ + Loads configuration the same as load_kube_config but returns an ApiClient + to be used with any API object. This will allow the caller to concurrently + talk with multiple clusters. + """ + if client_configuration is None: + client_configuration = type.__call__(Configuration) + load_kube_config(config_file=config_file, context=context, + client_configuration=client_configuration, + persist_config=persist_config) + return ApiClient(configuration=client_configuration) + + +def new_client_from_config_dict( + config_dict=None, + context=None, + persist_config=True, + temp_file_path=None, + client_configuration=None): + """ + Loads configuration the same as load_kube_config_from_dict but returns an ApiClient + to be used with any API object. This will allow the caller to concurrently + talk with multiple clusters. + """ + if client_configuration is None: + client_configuration = type.__call__(Configuration) + load_kube_config_from_dict(config_dict=config_dict, context=context, + client_configuration=client_configuration, + persist_config=persist_config, + temp_file_path=temp_file_path) + return ApiClient(configuration=client_configuration) diff --git a/kubernetes/config/kube_config_test.py b/kubernetes/config/kube_config_test.py new file mode 100644 index 0000000000..51a6ffc946 --- /dev/null +++ b/kubernetes/config/kube_config_test.py @@ -0,0 +1,1945 @@ +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import base64 +import datetime +import io +import json +import os +from pprint import pprint +import shutil +import tempfile +import unittest +import sys +import stat +from unittest import mock +from collections import namedtuple + +from unittest import mock +import yaml +from six import PY3, next + +from kubernetes.client import Configuration + +from .config_exception import ConfigException +from .dateutil import UTC, format_rfc3339, parse_rfc3339 +from .kube_config import (ENV_KUBECONFIG_PATH_SEPARATOR, CommandTokenSource, + ConfigNode, FileOrData, KubeConfigLoader, + KubeConfigMerger, _cleanup_temp_files, + _create_temp_file_with_content, + _get_kube_config_loader, + _get_kube_config_loader_for_yaml_file, + list_kube_config_contexts, load_kube_config, + load_kube_config_from_dict, new_client_from_config, new_client_from_config_dict) + +BEARER_TOKEN_FORMAT = "Bearer %s" + +EXPIRY_DATETIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ" +# should be less than kube_config.EXPIRY_SKEW_PREVENTION_DELAY +PAST_EXPIRY_TIMEDELTA = 2 +# should be more than kube_config.EXPIRY_SKEW_PREVENTION_DELAY +FUTURE_EXPIRY_TIMEDELTA = 60 + +NON_EXISTING_FILE = "zz_non_existing_file_472398324" + + +def _base64(string): + return base64.standard_b64encode(string.encode()).decode() + + +def _urlsafe_unpadded_b64encode(string): + return base64.urlsafe_b64encode(string.encode()).decode().rstrip('=') + + +def _format_expiry_datetime(dt): + return dt.strftime(EXPIRY_DATETIME_FORMAT) + + +def _get_expiry(loader, active_context): + expired_gcp_conf = (item for item in loader._config.value.get("users") + if item.get("name") == active_context) + return next(expired_gcp_conf).get("user").get("auth-provider") \ + .get("config").get("expiry") + + +def _raise_exception(st): + raise Exception(st) + + +TEST_FILE_KEY = "file" +TEST_DATA_KEY = "data" +TEST_FILENAME = "test-filename" + +TEST_DATA = "test-data" +TEST_DATA_BASE64 = _base64(TEST_DATA) + +TEST_ANOTHER_DATA = "another-test-data" +TEST_ANOTHER_DATA_BASE64 = _base64(TEST_ANOTHER_DATA) + +TEST_HOST = "test-host" +TEST_USERNAME = "me" +TEST_PASSWORD = "pass" +# token for me:pass +TEST_BASIC_TOKEN = "Basic bWU6cGFzcw==" +DATETIME_EXPIRY_PAST = datetime.datetime.now(tz=UTC + ).replace(tzinfo=None) - datetime.timedelta(minutes=PAST_EXPIRY_TIMEDELTA) +DATETIME_EXPIRY_FUTURE = datetime.datetime.now(tz=UTC + ).replace(tzinfo=None) + datetime.timedelta(minutes=FUTURE_EXPIRY_TIMEDELTA) +TEST_TOKEN_EXPIRY_PAST = _format_expiry_datetime(DATETIME_EXPIRY_PAST) + +TEST_SSL_HOST = "https://test-host" +TEST_CERTIFICATE_AUTH = "cert-auth" +TEST_CERTIFICATE_AUTH_BASE64 = _base64(TEST_CERTIFICATE_AUTH) +TEST_CLIENT_KEY = "client-key" +TEST_CLIENT_KEY_BASE64 = _base64(TEST_CLIENT_KEY) +TEST_CLIENT_CERT = "client-cert" +TEST_CLIENT_CERT_BASE64 = _base64(TEST_CLIENT_CERT) +TEST_TLS_SERVER_NAME = "kubernetes.io" + +TEST_OIDC_TOKEN = "test-oidc-token" +TEST_OIDC_INFO = "{\"name\": \"test\"}" +TEST_OIDC_BASE = ".".join([ + _urlsafe_unpadded_b64encode(TEST_OIDC_TOKEN), + _urlsafe_unpadded_b64encode(TEST_OIDC_INFO) +]) +TEST_OIDC_LOGIN = ".".join([ + TEST_OIDC_BASE, + _urlsafe_unpadded_b64encode(TEST_CLIENT_CERT_BASE64) +]) +TEST_OIDC_TOKEN = "Bearer %s" % TEST_OIDC_LOGIN +TEST_OIDC_EXP = "{\"name\": \"test\",\"exp\": 536457600}" +TEST_OIDC_EXP_BASE = _urlsafe_unpadded_b64encode( + TEST_OIDC_TOKEN) + "." + _urlsafe_unpadded_b64encode(TEST_OIDC_EXP) +TEST_OIDC_EXPIRED_LOGIN = ".".join([ + TEST_OIDC_EXP_BASE, + _urlsafe_unpadded_b64encode(TEST_CLIENT_CERT) +]) +TEST_OIDC_CONTAINS_RESERVED_CHARACTERS = ".".join([ + _urlsafe_unpadded_b64encode(TEST_OIDC_TOKEN), + _urlsafe_unpadded_b64encode(TEST_OIDC_INFO).replace("a", "+"), + _urlsafe_unpadded_b64encode(TEST_CLIENT_CERT) +]) +TEST_OIDC_INVALID_PADDING_LENGTH = ".".join([ + _urlsafe_unpadded_b64encode(TEST_OIDC_TOKEN), + "aaaaa", + _urlsafe_unpadded_b64encode(TEST_CLIENT_CERT) +]) + +TEST_OIDC_CA = _base64(TEST_CERTIFICATE_AUTH) + +TEST_AZURE_LOGIN = TEST_OIDC_LOGIN +TEST_AZURE_TOKEN = "test-azure-token" +TEST_AZURE_TOKEN_FULL = "Bearer " + TEST_AZURE_TOKEN + + +class BaseTestCase(unittest.TestCase): + + def setUp(self): + self._temp_files = [] + + def tearDown(self): + for f in self._temp_files: + os.remove(f) + + def _create_temp_file(self, content=""): + handler, name = tempfile.mkstemp() + self._temp_files.append(name) + os.write(handler, str.encode(content)) + os.close(handler) + return name + + def expect_exception(self, func, message_part, *args, **kwargs): + with self.assertRaises(ConfigException) as context: + func(*args, **kwargs) + self.assertIn(message_part, str(context.exception)) + + +class TestFileOrData(BaseTestCase): + + @staticmethod + def get_file_content(filename): + with open(filename) as f: + return f.read() + + def test_file_given_file(self): + temp_filename = _create_temp_file_with_content(TEST_DATA) + obj = {TEST_FILE_KEY: temp_filename} + t = FileOrData(obj=obj, file_key_name=TEST_FILE_KEY) + self.assertEqual(TEST_DATA, self.get_file_content(t.as_file())) + + def test_file_given_non_existing_file(self): + temp_filename = NON_EXISTING_FILE + obj = {TEST_FILE_KEY: temp_filename} + t = FileOrData(obj=obj, file_key_name=TEST_FILE_KEY) + self.expect_exception(t.as_file, "does not exist") + + def test_file_given_data(self): + obj = {TEST_DATA_KEY: TEST_DATA_BASE64} + t = FileOrData(obj=obj, file_key_name=TEST_FILE_KEY, + data_key_name=TEST_DATA_KEY) + self.assertEqual(TEST_DATA, self.get_file_content(t.as_file())) + + def test_file_given_data_no_base64(self): + obj = {TEST_DATA_KEY: TEST_DATA} + t = FileOrData(obj=obj, file_key_name=TEST_FILE_KEY, + data_key_name=TEST_DATA_KEY, base64_file_content=False) + self.assertEqual(TEST_DATA, self.get_file_content(t.as_file())) + + def test_data_given_data(self): + obj = {TEST_DATA_KEY: TEST_DATA_BASE64} + t = FileOrData(obj=obj, file_key_name=TEST_FILE_KEY, + data_key_name=TEST_DATA_KEY) + self.assertEqual(TEST_DATA_BASE64, t.as_data()) + + def test_data_given_file(self): + obj = { + TEST_FILE_KEY: self._create_temp_file(content=TEST_DATA)} + t = FileOrData(obj=obj, file_key_name=TEST_FILE_KEY) + self.assertEqual(TEST_DATA_BASE64, t.as_data()) + + def test_data_given_file_no_base64(self): + obj = { + TEST_FILE_KEY: self._create_temp_file(content=TEST_DATA)} + t = FileOrData(obj=obj, file_key_name=TEST_FILE_KEY, + base64_file_content=False) + self.assertEqual(TEST_DATA, t.as_data()) + + def test_data_given_file_and_data(self): + obj = { + TEST_DATA_KEY: TEST_DATA_BASE64, + TEST_FILE_KEY: self._create_temp_file( + content=TEST_ANOTHER_DATA)} + t = FileOrData(obj=obj, file_key_name=TEST_FILE_KEY, + data_key_name=TEST_DATA_KEY) + self.assertEqual(TEST_DATA_BASE64, t.as_data()) + + def test_file_given_file_and_data(self): + obj = { + TEST_DATA_KEY: TEST_DATA_BASE64, + TEST_FILE_KEY: self._create_temp_file( + content=TEST_ANOTHER_DATA)} + t = FileOrData(obj=obj, file_key_name=TEST_FILE_KEY, + data_key_name=TEST_DATA_KEY) + self.assertEqual(TEST_DATA, self.get_file_content(t.as_file())) + + def test_file_with_custom_dirname(self): + tempfile = self._create_temp_file(content=TEST_DATA) + tempfile_dir = os.path.dirname(tempfile) + tempfile_basename = os.path.basename(tempfile) + obj = {TEST_FILE_KEY: tempfile_basename} + t = FileOrData(obj=obj, file_key_name=TEST_FILE_KEY, + file_base_path=tempfile_dir) + self.assertEqual(TEST_DATA, self.get_file_content(t.as_file())) + + def test_create_temp_file_with_content(self): + self.assertEqual(TEST_DATA, + self.get_file_content( + _create_temp_file_with_content(TEST_DATA))) + _cleanup_temp_files() + + def test_file_given_data_bytes(self): + obj = {TEST_DATA_KEY: TEST_DATA_BASE64.encode()} + t = FileOrData(obj=obj, file_key_name=TEST_FILE_KEY, + data_key_name=TEST_DATA_KEY) + self.assertEqual(TEST_DATA, self.get_file_content(t.as_file())) + + def test_file_given_data_bytes_no_base64(self): + obj = {TEST_DATA_KEY: TEST_DATA.encode()} + t = FileOrData(obj=obj, file_key_name=TEST_FILE_KEY, + data_key_name=TEST_DATA_KEY, base64_file_content=False) + self.assertEqual(TEST_DATA, self.get_file_content(t.as_file())) + + def test_file_given_no_object(self): + t = FileOrData(obj=None, file_key_name=TEST_FILE_KEY, + data_key_name=TEST_DATA_KEY) + self.assertEqual(t.as_file(), None) + + def test_file_given_no_object_data(self): + t = FileOrData(obj=None, file_key_name=TEST_FILE_KEY, + data_key_name=TEST_DATA_KEY) + self.assertEqual(t.as_data(), None) + + +class TestConfigNode(BaseTestCase): + + test_obj = {"key1": "test", "key2": ["a", "b", "c"], + "key3": {"inner_key": "inner_value"}, + "with_names": [{"name": "test_name", "value": "test_value"}, + {"name": "test_name2", + "value": {"key1", "test"}}, + {"name": "test_name3", "value": [1, 2, 3]}], + "with_names_dup": [ + {"name": "test_name", "value": "test_value"}, + {"name": "test_name", + "value": {"key1", "test"}}, + {"name": "test_name3", "value": [1, 2, 3]} + ]} + + def setUp(self): + super(TestConfigNode, self).setUp() + self.node = ConfigNode("test_obj", self.test_obj) + + def test_normal_map_array_operations(self): + self.assertEqual("test", self.node['key1']) + self.assertEqual(5, len(self.node)) + + self.assertEqual("test_obj/key2", self.node['key2'].name) + self.assertEqual(["a", "b", "c"], self.node['key2'].value) + self.assertEqual("b", self.node['key2'][1]) + self.assertEqual(3, len(self.node['key2'])) + + self.assertEqual("test_obj/key3", self.node['key3'].name) + self.assertEqual({"inner_key": "inner_value"}, + self.node['key3'].value) + self.assertEqual("inner_value", self.node['key3']["inner_key"]) + self.assertEqual(1, len(self.node['key3'])) + + def test_get_with_name(self): + node = self.node["with_names"] + self.assertEqual( + "test_value", + node.get_with_name("test_name")["value"]) + self.assertTrue( + isinstance(node.get_with_name("test_name2"), ConfigNode)) + self.assertTrue( + isinstance(node.get_with_name("test_name3"), ConfigNode)) + self.assertEqual("test_obj/with_names[name=test_name2]", + node.get_with_name("test_name2").name) + self.assertEqual("test_obj/with_names[name=test_name3]", + node.get_with_name("test_name3").name) + + def test_key_does_not_exists(self): + self.expect_exception(lambda: self.node['not-exists-key'], + "Expected key not-exists-key in test_obj") + self.expect_exception(lambda: self.node['key3']['not-exists-key'], + "Expected key not-exists-key in test_obj/key3") + + def test_get_with_name_on_invalid_object(self): + self.expect_exception( + lambda: self.node['key2'].get_with_name('no-name'), + "Expected all values in test_obj/key2 list to have \'name\' key") + + def test_get_with_name_on_non_list_object(self): + self.expect_exception( + lambda: self.node['key3'].get_with_name('no-name'), + "Expected test_obj/key3 to be a list") + + def test_get_with_name_on_name_does_not_exists(self): + self.expect_exception( + lambda: self.node['with_names'].get_with_name('no-name'), + "Expected object with name no-name in test_obj/with_names list") + + def test_get_with_name_on_duplicate_name(self): + self.expect_exception( + lambda: self.node['with_names_dup'].get_with_name('test_name'), + "Expected only one object with name test_name in " + "test_obj/with_names_dup list") + + +class FakeConfig: + + FILE_KEYS = ["ssl_ca_cert", "key_file", "cert_file"] + IGNORE_KEYS = ["refresh_api_key_hook"] + + def __init__(self, token=None, **kwargs): + self.api_key = {} + # Provided by the OpenAPI-generated Configuration class + self.refresh_api_key_hook = None + if token: + self.api_key['authorization'] = token + + self.__dict__.update(kwargs) + + def __eq__(self, other): + if len(self.__dict__) != len(other.__dict__): + return + for k, v in self.__dict__.items(): + if k in self.IGNORE_KEYS: + continue + if k not in other.__dict__: + return + if k in self.FILE_KEYS: + if v and other.__dict__[k]: + try: + with open(v) as f1, open(other.__dict__[k]) as f2: + if f1.read() != f2.read(): + return + except OSError: + # fall back to only compare filenames in case we are + # testing the passing of filenames to the config + if other.__dict__[k] != v: + return + else: + if other.__dict__[k] != v: + return + else: + if other.__dict__[k] != v: + return + return True + + def __repr__(self): + rep = "\n" + for k, v in self.__dict__.items(): + val = v + if k in self.FILE_KEYS: + try: + with open(v) as f: + val = "FILE: %s" % str.decode(f.read()) + except OSError as e: + val = "ERROR: %s" % str(e) + rep += "\t%s: %s\n" % (k, val) + return "Config(%s\n)" % rep + + +class TestKubeConfigLoader(BaseTestCase): + TEST_KUBE_CONFIG = { + "current-context": "no_user", + "contexts": [ + { + "name": "no_user", + "context": { + "cluster": "default" + } + }, + { + "name": "simple_token", + "context": { + "cluster": "default", + "user": "simple_token" + } + }, + { + "name": "gcp", + "context": { + "cluster": "default", + "user": "gcp" + } + }, + { + "name": "expired_gcp", + "context": { + "cluster": "default", + "user": "expired_gcp" + } + }, + { + "name": "expired_gcp_refresh", + "context": { + "cluster": "default", + "user": "expired_gcp_refresh" + } + }, + { + "name": "oidc", + "context": { + "cluster": "default", + "user": "oidc" + } + }, + { + "name": "azure", + "context": { + "cluster": "default", + "user": "azure" + } + }, + { + "name": "azure_num", + "context": { + "cluster": "default", + "user": "azure_num" + } + }, + { + "name": "azure_str", + "context": { + "cluster": "default", + "user": "azure_str" + } + }, + { + "name": "azure_num_error", + "context": { + "cluster": "default", + "user": "azure_str_error" + } + }, + { + "name": "azure_str_error", + "context": { + "cluster": "default", + "user": "azure_str_error" + } + }, + { + "name": "expired_oidc", + "context": { + "cluster": "default", + "user": "expired_oidc" + } + }, + { + "name": "expired_oidc_with_idp_ca_file", + "context": { + "cluster": "default", + "user": "expired_oidc_with_idp_ca_file" + } + }, + { + "name": "expired_oidc_nocert", + "context": { + "cluster": "default", + "user": "expired_oidc_nocert" + } + }, + { + "name": "oidc_contains_reserved_character", + "context": { + "cluster": "default", + "user": "oidc_contains_reserved_character" + + } + }, + { + "name": "oidc_invalid_padding_length", + "context": { + "cluster": "default", + "user": "oidc_invalid_padding_length" + + } + }, + { + "name": "user_pass", + "context": { + "cluster": "default", + "user": "user_pass" + } + }, + { + "name": "ssl", + "context": { + "cluster": "ssl", + "user": "ssl" + } + }, + { + "name": "no_ssl_verification", + "context": { + "cluster": "no_ssl_verification", + "user": "ssl" + } + }, + { + "name": "ssl-no_file", + "context": { + "cluster": "ssl-no_file", + "user": "ssl-no_file" + } + }, + { + "name": "ssl-local-file", + "context": { + "cluster": "ssl-local-file", + "user": "ssl-local-file" + } + }, + { + "name": "non_existing_user", + "context": { + "cluster": "default", + "user": "non_existing_user" + } + }, + { + "name": "exec_cred_user", + "context": { + "cluster": "default", + "user": "exec_cred_user" + } + }, + { + "name": "exec_cred_user_certificate", + "context": { + "cluster": "ssl", + "user": "exec_cred_user_certificate" + } + }, + { + "name": "contexttestcmdpath", + "context": { + "cluster": "clustertestcmdpath", + "user": "usertestcmdpath" + } + }, + { + "name": "contexttestcmdpathempty", + "context": { + "cluster": "clustertestcmdpath", + "user": "usertestcmdpathempty" + } + }, + { + "name": "contexttestcmdpathscope", + "context": { + "cluster": "clustertestcmdpath", + "user": "usertestcmdpathscope" + } + }, + { + "name": "tls-server-name", + "context": { + "cluster": "tls-server-name", + "user": "ssl" + } + }, + ], + "clusters": [ + { + "name": "default", + "cluster": { + "server": TEST_HOST + } + }, + { + "name": "ssl-no_file", + "cluster": { + "server": TEST_SSL_HOST, + "certificate-authority": TEST_CERTIFICATE_AUTH, + } + }, + { + "name": "ssl-local-file", + "cluster": { + "server": TEST_SSL_HOST, + "certificate-authority": "cert_test", + } + }, + { + "name": "ssl", + "cluster": { + "server": TEST_SSL_HOST, + "certificate-authority-data": + TEST_CERTIFICATE_AUTH_BASE64, + "insecure-skip-tls-verify": False, + } + }, + { + "name": "no_ssl_verification", + "cluster": { + "server": TEST_SSL_HOST, + "insecure-skip-tls-verify": True, + } + }, + { + "name": "clustertestcmdpath", + "cluster": {} + }, + { + "name": "tls-server-name", + "cluster": { + "server": TEST_SSL_HOST, + "certificate-authority-data": + TEST_CERTIFICATE_AUTH_BASE64, + "insecure-skip-tls-verify": False, + "tls-server-name": TEST_TLS_SERVER_NAME, + } + }, + ], + "users": [ + { + "name": "simple_token", + "user": { + "token": TEST_DATA_BASE64, + "username": TEST_USERNAME, # should be ignored + "password": TEST_PASSWORD, # should be ignored + } + }, + { + "name": "gcp", + "user": { + "auth-provider": { + "name": "gcp", + "config": { + "access-token": TEST_DATA_BASE64, + } + }, + "token": TEST_DATA_BASE64, # should be ignored + "username": TEST_USERNAME, # should be ignored + "password": TEST_PASSWORD, # should be ignored + } + }, + { + "name": "expired_gcp", + "user": { + "auth-provider": { + "name": "gcp", + "config": { + "access-token": TEST_DATA_BASE64, + "expiry": TEST_TOKEN_EXPIRY_PAST, # always in past + } + }, + "token": TEST_DATA_BASE64, # should be ignored + "username": TEST_USERNAME, # should be ignored + "password": TEST_PASSWORD, # should be ignored + } + }, + # Duplicated from "expired_gcp" so test_load_gcp_token_with_refresh + # is isolated from test_gcp_get_api_key_with_prefix. + { + "name": "expired_gcp_refresh", + "user": { + "auth-provider": { + "name": "gcp", + "config": { + "access-token": TEST_DATA_BASE64, + "expiry": TEST_TOKEN_EXPIRY_PAST, # always in past + } + }, + "token": TEST_DATA_BASE64, # should be ignored + "username": TEST_USERNAME, # should be ignored + "password": TEST_PASSWORD, # should be ignored + } + }, + { + "name": "oidc", + "user": { + "auth-provider": { + "name": "oidc", + "config": { + "id-token": TEST_OIDC_LOGIN + } + } + } + }, + { + "name": "azure", + "user": { + "auth-provider": { + "config": { + "access-token": TEST_AZURE_TOKEN, + "apiserver-id": "00000002-0000-0000-c000-" + "000000000000", + "environment": "AzurePublicCloud", + "refresh-token": "refreshToken", + "tenant-id": "9d2ac018-e843-4e14-9e2b-4e0ddac75433" + }, + "name": "azure" + } + } + }, + { + "name": "azure_num", + "user": { + "auth-provider": { + "config": { + "access-token": TEST_AZURE_TOKEN, + "apiserver-id": "00000002-0000-0000-c000-" + "000000000000", + "environment": "AzurePublicCloud", + "expires-in": "0", + "expires-on": "156207275", + "refresh-token": "refreshToken", + "tenant-id": "9d2ac018-e843-4e14-9e2b-4e0ddac75433" + }, + "name": "azure" + } + } + }, + { + "name": "azure_str", + "user": { + "auth-provider": { + "config": { + "access-token": TEST_AZURE_TOKEN, + "apiserver-id": "00000002-0000-0000-c000-" + "000000000000", + "environment": "AzurePublicCloud", + "expires-in": "0", + "expires-on": "2018-10-18 00:52:29.044727", + "refresh-token": "refreshToken", + "tenant-id": "9d2ac018-e843-4e14-9e2b-4e0ddac75433" + }, + "name": "azure" + } + } + }, + { + "name": "azure_str_error", + "user": { + "auth-provider": { + "config": { + "access-token": TEST_AZURE_TOKEN, + "apiserver-id": "00000002-0000-0000-c000-" + "000000000000", + "environment": "AzurePublicCloud", + "expires-in": "0", + "expires-on": "2018-10-18 00:52", + "refresh-token": "refreshToken", + "tenant-id": "9d2ac018-e843-4e14-9e2b-4e0ddac75433" + }, + "name": "azure" + } + } + }, + { + "name": "azure_num_error", + "user": { + "auth-provider": { + "config": { + "access-token": TEST_AZURE_TOKEN, + "apiserver-id": "00000002-0000-0000-c000-" + "000000000000", + "environment": "AzurePublicCloud", + "expires-in": "0", + "expires-on": "-1", + "refresh-token": "refreshToken", + "tenant-id": "9d2ac018-e843-4e14-9e2b-4e0ddac75433" + }, + "name": "azure" + } + } + }, + { + "name": "expired_oidc", + "user": { + "auth-provider": { + "name": "oidc", + "config": { + "client-id": "tectonic-kubectl", + "client-secret": "FAKE_SECRET", + "id-token": TEST_OIDC_EXPIRED_LOGIN, + "idp-certificate-authority-data": TEST_OIDC_CA, + "idp-issuer-url": "https://example.org/identity", + "refresh-token": + "lucWJjEhlxZW01cXI3YmVlcYnpxNGhzk" + } + } + } + }, + { + "name": "expired_oidc_with_idp_ca_file", + "user": { + "auth-provider": { + "name": "oidc", + "config": { + "client-id": "tectonic-kubectl", + "client-secret": "FAKE_SECRET", + "id-token": TEST_OIDC_EXPIRED_LOGIN, + "idp-certificate-authority": TEST_CERTIFICATE_AUTH, + "idp-issuer-url": "https://example.org/identity", + "refresh-token": + "lucWJjEhlxZW01cXI3YmVlcYnpxNGhzk" + } + } + } + }, + { + "name": "expired_oidc_nocert", + "user": { + "auth-provider": { + "name": "oidc", + "config": { + "client-id": "tectonic-kubectl", + "client-secret": "FAKE_SECRET", + "id-token": TEST_OIDC_EXPIRED_LOGIN, + "idp-issuer-url": "https://example.org/identity", + "refresh-token": + "lucWJjEhlxZW01cXI3YmVlcYnpxNGhzk" + } + } + } + }, + { + "name": "oidc_contains_reserved_character", + "user": { + "auth-provider": { + "name": "oidc", + "config": { + "client-id": "tectonic-kubectl", + "client-secret": "FAKE_SECRET", + "id-token": TEST_OIDC_CONTAINS_RESERVED_CHARACTERS, + "idp-issuer-url": "https://example.org/identity", + "refresh-token": + "lucWJjEhlxZW01cXI3YmVlcYnpxNGhzk" + } + } + } + }, + { + "name": "oidc_invalid_padding_length", + "user": { + "auth-provider": { + "name": "oidc", + "config": { + "client-id": "tectonic-kubectl", + "client-secret": "FAKE_SECRET", + "id-token": TEST_OIDC_INVALID_PADDING_LENGTH, + "idp-issuer-url": "https://example.org/identity", + "refresh-token": + "lucWJjEhlxZW01cXI3YmVlcYnpxNGhzk" + } + } + } + }, + { + "name": "user_pass", + "user": { + "username": TEST_USERNAME, # should be ignored + "password": TEST_PASSWORD, # should be ignored + } + }, + { + "name": "ssl-no_file", + "user": { + "token": TEST_DATA_BASE64, + "client-certificate": TEST_CLIENT_CERT, + "client-key": TEST_CLIENT_KEY, + } + }, + { + "name": "ssl-local-file", + "user": { + "tokenFile": "token_file", + "client-certificate": "client_cert", + "client-key": "client_key", + } + }, + { + "name": "ssl", + "user": { + "token": TEST_DATA_BASE64, + "client-certificate-data": TEST_CLIENT_CERT_BASE64, + "client-key-data": TEST_CLIENT_KEY_BASE64, + } + }, + { + "name": "exec_cred_user", + "user": { + "exec": { + "apiVersion": "client.authentication.k8s.io/v1beta1", + "command": "aws-iam-authenticator", + "args": ["token", "-i", "dummy-cluster"] + } + } + }, + { + "name": "exec_cred_user_certificate", + "user": { + "exec": { + "apiVersion": "client.authentication.k8s.io/v1beta1", + "command": "custom-certificate-authenticator", + "args": [] + } + } + }, + { + "name": "usertestcmdpath", + "user": { + "auth-provider": { + "name": "gcp", + "config": { + "cmd-path": "cmdtorun" + } + } + } + }, + { + "name": "usertestcmdpathempty", + "user": { + "auth-provider": { + "name": "gcp", + "config": { + "cmd-path": "" + } + } + } + }, + { + "name": "usertestcmdpathscope", + "user": { + "auth-provider": { + "name": "gcp", + "config": { + "cmd-path": "cmd", + "scopes": "scope" + } + } + } + } + ] + } + + def test_no_user_context(self): + expected = FakeConfig(host=TEST_HOST) + actual = FakeConfig() + KubeConfigLoader( + config_dict=self.TEST_KUBE_CONFIG, + active_context="no_user").load_and_set(actual) + self.assertEqual(expected, actual) + + def test_simple_token(self): + expected = FakeConfig(host=TEST_HOST, + token=BEARER_TOKEN_FORMAT % TEST_DATA_BASE64) + actual = FakeConfig() + KubeConfigLoader( + config_dict=self.TEST_KUBE_CONFIG, + active_context="simple_token").load_and_set(actual) + self.assertEqual(expected, actual) + + def test_load_user_token(self): + loader = KubeConfigLoader( + config_dict=self.TEST_KUBE_CONFIG, + active_context="simple_token") + self.assertTrue(loader._load_user_token()) + self.assertEqual(BEARER_TOKEN_FORMAT % TEST_DATA_BASE64, loader.token) + + def test_gcp_no_refresh(self): + fake_config = FakeConfig() + self.assertIsNone(fake_config.refresh_api_key_hook) + KubeConfigLoader( + config_dict=self.TEST_KUBE_CONFIG, + active_context="gcp", + get_google_credentials=lambda: _raise_exception( + "SHOULD NOT BE CALLED")).load_and_set(fake_config) + # Should now be populated with a gcp token fetcher. + self.assertIsNotNone(fake_config.refresh_api_key_hook) + self.assertEqual(TEST_HOST, fake_config.host) + self.assertEqual(BEARER_TOKEN_FORMAT % TEST_DATA_BASE64, + fake_config.api_key['authorization']) + + def test_load_gcp_token_no_refresh(self): + loader = KubeConfigLoader( + config_dict=self.TEST_KUBE_CONFIG, + active_context="gcp", + get_google_credentials=lambda: _raise_exception( + "SHOULD NOT BE CALLED")) + self.assertTrue(loader._load_auth_provider_token()) + self.assertEqual(BEARER_TOKEN_FORMAT % TEST_DATA_BASE64, + loader.token) + + def test_load_gcp_token_with_refresh(self): + def cred(): return None + cred.token = TEST_ANOTHER_DATA_BASE64 + cred.expiry = datetime.datetime.now(tz=UTC).replace(tzinfo=None) + + loader = KubeConfigLoader( + config_dict=self.TEST_KUBE_CONFIG, + active_context="expired_gcp", + get_google_credentials=lambda: cred) + original_expiry = _get_expiry(loader, "expired_gcp") + self.assertTrue(loader._load_auth_provider_token()) + new_expiry = _get_expiry(loader, "expired_gcp") + # assert that the configs expiry actually updates + self.assertTrue(new_expiry > original_expiry) + self.assertEqual(BEARER_TOKEN_FORMAT % TEST_ANOTHER_DATA_BASE64, + loader.token) + + def test_gcp_refresh_api_key_hook(self): + class cred_old: + token = TEST_DATA_BASE64 + expiry = DATETIME_EXPIRY_PAST + + class cred_new: + token = TEST_ANOTHER_DATA_BASE64 + expiry = DATETIME_EXPIRY_FUTURE + fake_config = FakeConfig() + _get_google_credentials = mock.Mock() + _get_google_credentials.side_effect = [cred_old, cred_new] + + loader = KubeConfigLoader( + config_dict=self.TEST_KUBE_CONFIG, + active_context="expired_gcp_refresh", + get_google_credentials=_get_google_credentials) + loader.load_and_set(fake_config) + original_expiry = _get_expiry(loader, "expired_gcp_refresh") + # Refresh the GCP token. + fake_config.refresh_api_key_hook(fake_config) + new_expiry = _get_expiry(loader, "expired_gcp_refresh") + + self.assertTrue(new_expiry > original_expiry) + self.assertEqual(BEARER_TOKEN_FORMAT % TEST_ANOTHER_DATA_BASE64, + loader.token) + + def test_oidc_no_refresh(self): + loader = KubeConfigLoader( + config_dict=self.TEST_KUBE_CONFIG, + active_context="oidc", + ) + self.assertTrue(loader._load_auth_provider_token()) + self.assertEqual(TEST_OIDC_TOKEN, loader.token) + + @mock.patch('kubernetes.config.kube_config.OAuth2Session.refresh_token') + @mock.patch('kubernetes.config.kube_config.ApiClient.request') + def test_oidc_with_refresh(self, mock_ApiClient, mock_OAuth2Session): + mock_response = mock.MagicMock() + type(mock_response).status = mock.PropertyMock(return_value=200) + type(mock_response).data = mock.PropertyMock(return_value=json.dumps({ + "token_endpoint": "https://example.org/identity/token" + })) + mock_ApiClient.return_value = mock_response + + mock_OAuth2Session.return_value = { + "id_token": "abc123", + "refresh_token": "newtoken123" + } + + try: + if sys.platform.startswith('win'): + # Create and write to temp file, close immediately to avoid Windows permission issues + with tempfile.NamedTemporaryFile(delete=False, mode='w+', suffix='.yaml') as tf: + tf.write("dummy config content") + temp_path = tf.name + + # Set file permissions so Windows doesn't block access + os.chmod(temp_path, stat.S_IREAD | stat.S_IWRITE) + + # Your actual test logic with kube config loader + loader = KubeConfigLoader( + config_dict=self.TEST_KUBE_CONFIG, + active_context="expired_oidc" + ) + + self.assertTrue(loader._load_auth_provider_token()) + self.assertEqual("Bearer abc123", loader.token) + + # Clean up the temporary file + os.unlink(temp_path) + else: + # Non-Windows platforms run original test logic + loader = KubeConfigLoader( + config_dict=self.TEST_KUBE_CONFIG, + active_context="expired_oidc" + ) + self.assertTrue(loader._load_auth_provider_token()) + self.assertEqual("Bearer abc123", loader.token) + + except PermissionError as e: + if sys.platform.startswith('win'): + self.skipTest(f"Skipping test on Windows due to permission error: {e}") + else: + raise + + + @mock.patch('kubernetes.config.kube_config.OAuth2Session.refresh_token') + @mock.patch('kubernetes.config.kube_config.ApiClient.request') + def test_oidc_with_idp_ca_file_refresh(self, mock_ApiClient, mock_OAuth2Session): + mock_response = mock.MagicMock() + type(mock_response).status = mock.PropertyMock( + return_value=200 + ) + type(mock_response).data = mock.PropertyMock( + return_value=json.dumps({ + "token_endpoint": "https://example.org/identity/token" + }) + ) + + mock_ApiClient.return_value = mock_response + + mock_OAuth2Session.return_value = {"id_token": "abc123", + "refresh_token": "newtoken123"} + + loader = KubeConfigLoader( + config_dict=self.TEST_KUBE_CONFIG, + active_context="expired_oidc_with_idp_ca_file", + ) + + self.assertTrue(loader._load_auth_provider_token()) + self.assertEqual("Bearer abc123", loader.token) + + @mock.patch('kubernetes.config.kube_config.OAuth2Session.refresh_token') + @mock.patch('kubernetes.config.kube_config.ApiClient.request') + def test_oidc_with_refresh_nocert( + self, mock_ApiClient, mock_OAuth2Session): + mock_response = mock.MagicMock() + type(mock_response).status = mock.PropertyMock( + return_value=200 + ) + type(mock_response).data = mock.PropertyMock( + return_value=json.dumps({ + "token_endpoint": "https://example.org/identity/token" + }) + ) + + mock_ApiClient.return_value = mock_response + + mock_OAuth2Session.return_value = {"id_token": "abc123", + "refresh_token": "newtoken123"} + + loader = KubeConfigLoader( + config_dict=self.TEST_KUBE_CONFIG, + active_context="expired_oidc_nocert", + ) + self.assertTrue(loader._load_auth_provider_token()) + self.assertEqual("Bearer abc123", loader.token) + + def test_oidc_fails_if_contains_reserved_chars(self): + loader = KubeConfigLoader( + config_dict=self.TEST_KUBE_CONFIG, + active_context="oidc_contains_reserved_character", + ) + self.assertEqual( + loader._load_oid_token("oidc_contains_reserved_character"), + None, + ) + + def test_oidc_fails_if_invalid_padding_length(self): + loader = KubeConfigLoader( + config_dict=self.TEST_KUBE_CONFIG, + active_context="oidc_invalid_padding_length", + ) + self.assertEqual( + loader._load_oid_token("oidc_invalid_padding_length"), + None, + ) + + def test_azure_no_refresh(self): + loader = KubeConfigLoader( + config_dict=self.TEST_KUBE_CONFIG, + active_context="azure", + ) + self.assertTrue(loader._load_auth_provider_token()) + self.assertEqual(TEST_AZURE_TOKEN_FULL, loader.token) + + def test_azure_with_expired_num(self): + loader = KubeConfigLoader( + config_dict=self.TEST_KUBE_CONFIG, + active_context="azure_num", + ) + provider = loader._user['auth-provider'] + self.assertTrue(loader._azure_is_expired(provider)) + + def test_azure_with_expired_str(self): + loader = KubeConfigLoader( + config_dict=self.TEST_KUBE_CONFIG, + active_context="azure_str", + ) + provider = loader._user['auth-provider'] + self.assertTrue(loader._azure_is_expired(provider)) + + def test_azure_with_expired_str_error(self): + loader = KubeConfigLoader( + config_dict=self.TEST_KUBE_CONFIG, + active_context="azure_str_error", + ) + provider = loader._user['auth-provider'] + self.assertRaises(ValueError, loader._azure_is_expired, provider) + + def test_azure_with_expired_int_error(self): + loader = KubeConfigLoader( + config_dict=self.TEST_KUBE_CONFIG, + active_context="azure_num_error", + ) + provider = loader._user['auth-provider'] + self.assertRaises(ValueError, loader._azure_is_expired, provider) + + def test_user_pass(self): + expected = FakeConfig(host=TEST_HOST, token=TEST_BASIC_TOKEN) + actual = FakeConfig() + KubeConfigLoader( + config_dict=self.TEST_KUBE_CONFIG, + active_context="user_pass").load_and_set(actual) + self.assertEqual(expected, actual) + + def test_load_user_pass_token(self): + loader = KubeConfigLoader( + config_dict=self.TEST_KUBE_CONFIG, + active_context="user_pass") + self.assertTrue(loader._load_user_pass_token()) + self.assertEqual(TEST_BASIC_TOKEN, loader.token) + + def test_ssl_no_cert_files(self): + loader = KubeConfigLoader( + config_dict=self.TEST_KUBE_CONFIG, + active_context="ssl-no_file") + self.expect_exception( + loader.load_and_set, + "does not exist", + FakeConfig()) + + def test_ssl(self): + expected = FakeConfig( + host=TEST_SSL_HOST, + token=BEARER_TOKEN_FORMAT % TEST_DATA_BASE64, + cert_file=self._create_temp_file(TEST_CLIENT_CERT), + key_file=self._create_temp_file(TEST_CLIENT_KEY), + ssl_ca_cert=self._create_temp_file(TEST_CERTIFICATE_AUTH), + verify_ssl=True + ) + actual = FakeConfig() + KubeConfigLoader( + config_dict=self.TEST_KUBE_CONFIG, + active_context="ssl").load_and_set(actual) + self.assertEqual(expected, actual) + + def test_ssl_no_verification(self): + expected = FakeConfig( + host=TEST_SSL_HOST, + token=BEARER_TOKEN_FORMAT % TEST_DATA_BASE64, + cert_file=self._create_temp_file(TEST_CLIENT_CERT), + key_file=self._create_temp_file(TEST_CLIENT_KEY), + verify_ssl=False, + ssl_ca_cert=None, + ) + actual = FakeConfig() + KubeConfigLoader( + config_dict=self.TEST_KUBE_CONFIG, + active_context="no_ssl_verification").load_and_set(actual) + self.assertEqual(expected, actual) + + def test_tls_server_name(self): + expected = FakeConfig( + host=TEST_SSL_HOST, + token=BEARER_TOKEN_FORMAT % TEST_DATA_BASE64, + cert_file=self._create_temp_file(TEST_CLIENT_CERT), + key_file=self._create_temp_file(TEST_CLIENT_KEY), + ssl_ca_cert=self._create_temp_file(TEST_CERTIFICATE_AUTH), + verify_ssl=True, + tls_server_name=TEST_TLS_SERVER_NAME + ) + actual = FakeConfig() + KubeConfigLoader( + config_dict=self.TEST_KUBE_CONFIG, + active_context="tls-server-name").load_and_set(actual) + self.assertEqual(expected, actual) + + def test_list_contexts(self): + loader = KubeConfigLoader( + config_dict=self.TEST_KUBE_CONFIG, + active_context="no_user") + actual_contexts = loader.list_contexts() + expected_contexts = ConfigNode("", self.TEST_KUBE_CONFIG)['contexts'] + for actual in actual_contexts: + expected = expected_contexts.get_with_name(actual['name']) + self.assertEqual(expected.value, actual) + + def test_current_context(self): + loader = KubeConfigLoader(config_dict=self.TEST_KUBE_CONFIG) + expected_contexts = ConfigNode("", self.TEST_KUBE_CONFIG)['contexts'] + self.assertEqual(expected_contexts.get_with_name("no_user").value, + loader.current_context) + + def test_set_active_context(self): + loader = KubeConfigLoader(config_dict=self.TEST_KUBE_CONFIG) + loader.set_active_context("ssl") + expected_contexts = ConfigNode("", self.TEST_KUBE_CONFIG)['contexts'] + self.assertEqual(expected_contexts.get_with_name("ssl").value, + loader.current_context) + + def test_ssl_with_relative_ssl_files(self): + expected = FakeConfig( + host=TEST_SSL_HOST, + token=BEARER_TOKEN_FORMAT % TEST_DATA_BASE64, + cert_file=self._create_temp_file(TEST_CLIENT_CERT), + key_file=self._create_temp_file(TEST_CLIENT_KEY), + ssl_ca_cert=self._create_temp_file(TEST_CERTIFICATE_AUTH) + ) + try: + temp_dir = tempfile.mkdtemp() + actual = FakeConfig() + with open(os.path.join(temp_dir, "cert_test"), "wb") as fd: + fd.write(TEST_CERTIFICATE_AUTH.encode()) + with open(os.path.join(temp_dir, "client_cert"), "wb") as fd: + fd.write(TEST_CLIENT_CERT.encode()) + with open(os.path.join(temp_dir, "client_key"), "wb") as fd: + fd.write(TEST_CLIENT_KEY.encode()) + with open(os.path.join(temp_dir, "token_file"), "wb") as fd: + fd.write(TEST_DATA_BASE64.encode()) + KubeConfigLoader( + config_dict=self.TEST_KUBE_CONFIG, + active_context="ssl-local-file", + config_base_path=temp_dir).load_and_set(actual) + self.assertEqual(expected, actual) + finally: + shutil.rmtree(temp_dir) + + def test_load_kube_config_from_file_path(self): + expected = FakeConfig(host=TEST_HOST, + token=BEARER_TOKEN_FORMAT % TEST_DATA_BASE64) + config_file = self._create_temp_file( + yaml.safe_dump(self.TEST_KUBE_CONFIG)) + actual = FakeConfig() + load_kube_config(config_file=config_file, context="simple_token", + client_configuration=actual) + self.assertEqual(expected, actual) + + def test_load_kube_config_from_file_like_object(self): + expected = FakeConfig(host=TEST_HOST, + token=BEARER_TOKEN_FORMAT % TEST_DATA_BASE64) + config_file_like_object = io.StringIO() + # py3 (won't have unicode) vs py2 (requires it) + try: + unicode('') + config_file_like_object.write( + unicode( + yaml.safe_dump( + self.TEST_KUBE_CONFIG), + errors='replace')) + except NameError: + config_file_like_object.write( + yaml.safe_dump( + self.TEST_KUBE_CONFIG)) + actual = FakeConfig() + load_kube_config( + config_file=config_file_like_object, + context="simple_token", + client_configuration=actual) + self.assertEqual(expected, actual) + + def test_load_kube_config_from_dict(self): + expected = FakeConfig(host=TEST_HOST, + token=BEARER_TOKEN_FORMAT % TEST_DATA_BASE64) + actual = FakeConfig() + load_kube_config_from_dict(config_dict=self.TEST_KUBE_CONFIG, + context="simple_token", + client_configuration=actual) + self.assertEqual(expected, actual) + + def test_load_kube_config_from_dict_with_temp_file_path(self): + expected = FakeConfig( + host=TEST_SSL_HOST, + token=BEARER_TOKEN_FORMAT % TEST_DATA_BASE64, + cert_file=self._create_temp_file(TEST_CLIENT_CERT), + key_file=self._create_temp_file(TEST_CLIENT_KEY), + ssl_ca_cert=self._create_temp_file(TEST_CERTIFICATE_AUTH), + verify_ssl=True + ) + actual = FakeConfig() + tmp_path = os.path.join( + os.path.dirname( + os.path.dirname( + os.path.abspath(__file__))), + 'tmp_file_path_test') + load_kube_config_from_dict(config_dict=self.TEST_KUBE_CONFIG, + context="ssl", + client_configuration=actual, + temp_file_path=tmp_path) + self.assertFalse(True if not os.listdir(tmp_path) else False) + self.assertEqual(expected, actual) + _cleanup_temp_files() + + def test_load_kube_config_from_empty_file_like_object(self): + config_file_like_object = io.StringIO() + self.assertRaises( + ConfigException, + load_kube_config, + config_file_like_object) + + def test_load_kube_config_from_empty_file(self): + config_file = self._create_temp_file( + yaml.safe_dump(None)) + self.assertRaises( + ConfigException, + load_kube_config, + config_file) + + def test_list_kube_config_contexts(self): + config_file = self._create_temp_file( + yaml.safe_dump(self.TEST_KUBE_CONFIG)) + contexts, active_context = list_kube_config_contexts( + config_file=config_file) + self.assertDictEqual(self.TEST_KUBE_CONFIG['contexts'][0], + active_context) + if PY3: + self.assertCountEqual(self.TEST_KUBE_CONFIG['contexts'], + contexts) + else: + self.assertItemsEqual(self.TEST_KUBE_CONFIG['contexts'], + contexts) + + def test_new_client_from_config(self): + config_file = self._create_temp_file( + yaml.safe_dump(self.TEST_KUBE_CONFIG)) + client = new_client_from_config( + config_file=config_file, context="simple_token") + self.assertEqual(TEST_HOST, client.configuration.host) + self.assertEqual(BEARER_TOKEN_FORMAT % TEST_DATA_BASE64, + client.configuration.api_key['authorization']) + + def test_new_client_from_config_dict(self): + client = new_client_from_config_dict( + config_dict=self.TEST_KUBE_CONFIG, context="simple_token") + self.assertEqual(TEST_HOST, client.configuration.host) + self.assertEqual(BEARER_TOKEN_FORMAT % TEST_DATA_BASE64, + client.configuration.api_key['authorization']) + + def test_no_users_section(self): + expected = FakeConfig(host=TEST_HOST) + actual = FakeConfig() + test_kube_config = self.TEST_KUBE_CONFIG.copy() + del test_kube_config['users'] + KubeConfigLoader( + config_dict=test_kube_config, + active_context="gcp").load_and_set(actual) + self.assertEqual(expected, actual) + + def test_non_existing_user(self): + expected = FakeConfig(host=TEST_HOST) + actual = FakeConfig() + KubeConfigLoader( + config_dict=self.TEST_KUBE_CONFIG, + active_context="non_existing_user").load_and_set(actual) + self.assertEqual(expected, actual) + + @mock.patch('kubernetes.config.kube_config.ExecProvider.run') + def test_user_exec_auth(self, mock): + token = "dummy" + mock.return_value = { + "token": token + } + expected = FakeConfig(host=TEST_HOST, api_key={ + "authorization": BEARER_TOKEN_FORMAT % token}) + actual = FakeConfig() + KubeConfigLoader( + config_dict=self.TEST_KUBE_CONFIG, + active_context="exec_cred_user").load_and_set(actual) + self.assertEqual(expected, actual) + + @mock.patch('kubernetes.config.kube_config.ExecProvider.run') + def test_user_exec_auth_with_expiry(self, mock): + expired_token = "expired" + current_token = "current" + mock.side_effect = [ + { + "token": expired_token, + "expirationTimestamp": format_rfc3339(DATETIME_EXPIRY_PAST) + }, + { + "token": current_token, + "expirationTimestamp": format_rfc3339(DATETIME_EXPIRY_FUTURE) + } + ] + + fake_config = FakeConfig() + self.assertIsNone(fake_config.refresh_api_key_hook) + + KubeConfigLoader( + config_dict=self.TEST_KUBE_CONFIG, + active_context="exec_cred_user").load_and_set(fake_config) + # The kube config should use the first token returned from the + # exec provider. + self.assertEqual(fake_config.api_key["authorization"], + BEARER_TOKEN_FORMAT % expired_token) + # Should now be populated with a method to refresh expired tokens. + self.assertIsNotNone(fake_config.refresh_api_key_hook) + # Refresh the token; the kube config should be updated. + fake_config.refresh_api_key_hook(fake_config) + self.assertEqual(fake_config.api_key["authorization"], + BEARER_TOKEN_FORMAT % current_token) + + @mock.patch('kubernetes.config.kube_config.ExecProvider.run') + def test_user_exec_auth_certificates(self, mock): + mock.return_value = { + "clientCertificateData": TEST_CLIENT_CERT, + "clientKeyData": TEST_CLIENT_KEY, + } + expected = FakeConfig( + host=TEST_SSL_HOST, + cert_file=self._create_temp_file(TEST_CLIENT_CERT), + key_file=self._create_temp_file(TEST_CLIENT_KEY), + ssl_ca_cert=self._create_temp_file(TEST_CERTIFICATE_AUTH), + verify_ssl=True) + actual = FakeConfig() + KubeConfigLoader( + config_dict=self.TEST_KUBE_CONFIG, + active_context="exec_cred_user_certificate").load_and_set(actual) + self.assertEqual(expected, actual) + + @mock.patch('kubernetes.config.kube_config.ExecProvider.run', autospec=True) + def test_user_exec_cwd(self, mock): + capture = {} + + def capture_cwd(exec_provider): + capture['cwd'] = exec_provider.cwd + mock.side_effect = capture_cwd + + expected = "/some/random/path" + KubeConfigLoader( + config_dict=self.TEST_KUBE_CONFIG, + active_context="exec_cred_user", + config_base_path=expected).load_and_set(FakeConfig()) + self.assertEqual(expected, capture['cwd']) + + def test_user_cmd_path(self): + A = namedtuple('A', ['token', 'expiry']) + token = "dummy" + return_value = A(token, parse_rfc3339(datetime.datetime.now())) + CommandTokenSource.token = mock.Mock(return_value=return_value) + expected = FakeConfig(api_key={ + "authorization": BEARER_TOKEN_FORMAT % token}) + actual = FakeConfig() + KubeConfigLoader( + config_dict=self.TEST_KUBE_CONFIG, + active_context="contexttestcmdpath").load_and_set(actual) + self.assertEqual(expected, actual) + + def test_user_cmd_path_empty(self): + A = namedtuple('A', ['token', 'expiry']) + token = "dummy" + return_value = A(token, parse_rfc3339(datetime.datetime.now())) + CommandTokenSource.token = mock.Mock(return_value=return_value) + expected = FakeConfig(api_key={ + "authorization": BEARER_TOKEN_FORMAT % token}) + actual = FakeConfig() + self.expect_exception(lambda: KubeConfigLoader( + config_dict=self.TEST_KUBE_CONFIG, + active_context="contexttestcmdpathempty").load_and_set(actual), + "missing access token cmd " + "(cmd-path is an empty string in your kubeconfig file)") + + def test_user_cmd_path_with_scope(self): + A = namedtuple('A', ['token', 'expiry']) + token = "dummy" + return_value = A(token, parse_rfc3339(datetime.datetime.now())) + CommandTokenSource.token = mock.Mock(return_value=return_value) + expected = FakeConfig(api_key={ + "authorization": BEARER_TOKEN_FORMAT % token}) + actual = FakeConfig() + self.expect_exception(lambda: KubeConfigLoader( + config_dict=self.TEST_KUBE_CONFIG, + active_context="contexttestcmdpathscope").load_and_set(actual), + "scopes can only be used when kubectl is using " + "a gcp service account key") + + def test__get_kube_config_loader_for_yaml_file_no_persist(self): + expected = FakeConfig(host=TEST_HOST, + token=BEARER_TOKEN_FORMAT % TEST_DATA_BASE64) + config_file = self._create_temp_file( + yaml.safe_dump(self.TEST_KUBE_CONFIG)) + actual = _get_kube_config_loader_for_yaml_file(config_file) + self.assertIsNone(actual._config_persister) + + def test__get_kube_config_loader_for_yaml_file_persist(self): + expected = FakeConfig(host=TEST_HOST, + token=BEARER_TOKEN_FORMAT % TEST_DATA_BASE64) + config_file = self._create_temp_file( + yaml.safe_dump(self.TEST_KUBE_CONFIG)) + actual = _get_kube_config_loader_for_yaml_file(config_file, + persist_config=True) + self.assertTrue(callable(actual._config_persister)) + self.assertEqual(actual._config_persister.__name__, "save_changes") + + def test__get_kube_config_loader_file_no_persist(self): + expected = FakeConfig(host=TEST_HOST, + token=BEARER_TOKEN_FORMAT % TEST_DATA_BASE64) + config_file = self._create_temp_file( + yaml.safe_dump(self.TEST_KUBE_CONFIG)) + actual = _get_kube_config_loader(filename=config_file) + self.assertIsNone(actual._config_persister) + + def test__get_kube_config_loader_file_persist(self): + expected = FakeConfig(host=TEST_HOST, + token=BEARER_TOKEN_FORMAT % TEST_DATA_BASE64) + config_file = self._create_temp_file( + yaml.safe_dump(self.TEST_KUBE_CONFIG)) + actual = _get_kube_config_loader(filename=config_file, + persist_config=True) + self.assertTrue(callable(actual._config_persister)) + self.assertEqual(actual._config_persister.__name__, "save_changes") + + def test__get_kube_config_loader_dict_no_persist(self): + expected = FakeConfig(host=TEST_HOST, + token=BEARER_TOKEN_FORMAT % TEST_DATA_BASE64) + actual = _get_kube_config_loader( + config_dict=self.TEST_KUBE_CONFIG) + self.assertIsNone(actual._config_persister) + + +class TestKubernetesClientConfiguration(BaseTestCase): + # Verifies properties of kubernetes.client.Configuration. + # These tests guard against changes to the upstream configuration class, + # since GCP and Exec authorization use refresh_api_key_hook to refresh + # their tokens regularly. + + def test_refresh_api_key_hook_exists(self): + self.assertTrue(hasattr(Configuration(), 'refresh_api_key_hook')) + + def test_get_api_key_calls_refresh_api_key_hook(self): + identifier = 'authorization' + expected_token = 'expected_token' + old_token = 'old_token' + config = Configuration( + api_key={identifier: old_token}, + api_key_prefix={identifier: 'Bearer'} + ) + + def refresh_api_key_hook(client_config): + self.assertEqual(client_config, config) + client_config.api_key[identifier] = expected_token + config.refresh_api_key_hook = refresh_api_key_hook + + self.assertEqual('Bearer ' + expected_token, + config.get_api_key_with_prefix(identifier)) + + +class TestKubeConfigMerger(BaseTestCase): + TEST_KUBE_CONFIG_SET1 = [{ + "current-context": "no_user", + "contexts": [ + { + "name": "no_user", + "context": { + "cluster": "default" + } + }, + ], + "clusters": [ + { + "name": "default", + "cluster": { + "server": TEST_HOST + } + }, + ], + "users": [] + }, { + "current-context": "", + "contexts": [ + { + "name": "ssl", + "context": { + "cluster": "ssl", + "user": "ssl" + } + }, + { + "name": "simple_token", + "context": { + "cluster": "default", + "user": "simple_token" + } + }, + ], + "clusters": [ + { + "name": "ssl", + "cluster": { + "server": TEST_SSL_HOST, + "certificate-authority-data": + TEST_CERTIFICATE_AUTH_BASE64, + } + }, + ], + "users": [ + { + "name": "ssl", + "user": { + "token": TEST_DATA_BASE64, + "client-certificate-data": TEST_CLIENT_CERT_BASE64, + "client-key-data": TEST_CLIENT_KEY_BASE64, + } + }, + ] + }, { + "current-context": "no_user", + "contexts": [ + { + "name": "expired_oidc", + "context": { + "cluster": "default", + "user": "expired_oidc" + } + }, + { + "name": "ssl", + "context": { + "cluster": "skipped-part2-defined-this-context", + "user": "skipped" + } + }, + ], + "clusters": [ + ], + "users": [ + { + "name": "expired_oidc", + "user": { + "auth-provider": { + "name": "oidc", + "config": { + "client-id": "tectonic-kubectl", + "client-secret": "FAKE_SECRET", + "id-token": TEST_OIDC_EXPIRED_LOGIN, + "idp-certificate-authority-data": TEST_OIDC_CA, + "idp-issuer-url": "https://example.org/identity", + "refresh-token": + "lucWJjEhlxZW01cXI3YmVlcYnpxNGhzk" + } + } + } + }, + { + "name": "simple_token", + "user": { + "token": TEST_DATA_BASE64, + "username": TEST_USERNAME, # should be ignored + "password": TEST_PASSWORD, # should be ignored + } + }, + ] + }, { + "current-context": "no_user", + }, { + # Config with user having cmd-path + "contexts": [ + { + "name": "contexttestcmdpath", + "context": { + "cluster": "clustertestcmdpath", + "user": "usertestcmdpath" + } + } + ], + "clusters": [ + { + "name": "clustertestcmdpath", + "cluster": {} + } + ], + "users": [ + { + "name": "usertestcmdpath", + "user": { + "auth-provider": { + "name": "gcp", + "config": { + "cmd-path": "cmdtorun" + } + } + } + } + ] + }, { + "current-context": "no_user", + "contexts": [ + { + "name": "no_user", + "context": { + "cluster": "default" + } + }, + ], + "clusters": [ + { + "name": "default", + "cluster": { + "server": TEST_HOST + } + }, + ], + "users": None + }] + # 3 parts with different keys/data to merge + TEST_KUBE_CONFIG_SET2 = [{ + "clusters": [ + { + "name": "default", + "cluster": { + "server": TEST_HOST + } + }, + ], + }, { + "current-context": "simple_token", + "contexts": [ + { + "name": "simple_token", + "context": { + "cluster": "default", + "user": "simple_token" + } + }, + ], + }, { + "users": [ + { + "name": "simple_token", + "user": { + "token": TEST_DATA_BASE64, + "username": TEST_USERNAME, + "password": TEST_PASSWORD, + } + }, + ] + }] + + def _create_multi_config(self, parts): + files = [] + for part in parts: + files.append(self._create_temp_file(yaml.safe_dump(part))) + return ENV_KUBECONFIG_PATH_SEPARATOR.join(files) + + def test_list_kube_config_contexts(self): + kubeconfigs = self._create_multi_config(self.TEST_KUBE_CONFIG_SET1) + expected_contexts = [ + {'context': {'cluster': 'default'}, 'name': 'no_user'}, + {'context': {'cluster': 'ssl', 'user': 'ssl'}, 'name': 'ssl'}, + {'context': {'cluster': 'default', 'user': 'simple_token'}, + 'name': 'simple_token'}, + {'context': {'cluster': 'default', 'user': 'expired_oidc'}, + 'name': 'expired_oidc'}, + {'context': {'cluster': 'clustertestcmdpath', + 'user': 'usertestcmdpath'}, + 'name': 'contexttestcmdpath'}] + + contexts, active_context = list_kube_config_contexts( + config_file=kubeconfigs) + + self.assertEqual(contexts, expected_contexts) + self.assertEqual(active_context, expected_contexts[0]) + + def test_new_client_from_config(self): + kubeconfigs = self._create_multi_config(self.TEST_KUBE_CONFIG_SET1) + client = new_client_from_config( + config_file=kubeconfigs, context="simple_token") + self.assertEqual(TEST_HOST, client.configuration.host) + self.assertEqual(BEARER_TOKEN_FORMAT % TEST_DATA_BASE64, + client.configuration.api_key['authorization']) + + def test_merge_with_context_in_different_file(self): + kubeconfigs = self._create_multi_config(self.TEST_KUBE_CONFIG_SET2) + client = new_client_from_config(config_file=kubeconfigs) + + expected_contexts = [ + {'context': {'cluster': 'default', 'user': 'simple_token'}, + 'name': 'simple_token'} + ] + contexts, active_context = list_kube_config_contexts( + config_file=kubeconfigs) + self.assertEqual(contexts, expected_contexts) + self.assertEqual(active_context, expected_contexts[0]) + self.assertEqual(TEST_HOST, client.configuration.host) + self.assertEqual(BEARER_TOKEN_FORMAT % TEST_DATA_BASE64, + client.configuration.api_key['authorization']) + + def test_save_changes(self): + kubeconfigs = self._create_multi_config(self.TEST_KUBE_CONFIG_SET1) + + # load configuration, update token, save config + kconf = KubeConfigMerger(kubeconfigs) + user = kconf.config['users'].get_with_name('expired_oidc')['user'] + provider = user['auth-provider']['config'] + provider.value['id-token'] = "token-changed" + kconf.save_changes() + + # re-read configuration + kconf = KubeConfigMerger(kubeconfigs) + user = kconf.config['users'].get_with_name('expired_oidc')['user'] + provider = user['auth-provider']['config'] + + # new token + self.assertEqual(provider.value['id-token'], "token-changed") + + +if __name__ == '__main__': + unittest.main() diff --git a/kubernetes/dynamic b/kubernetes/dynamic deleted file mode 120000 index e896b54ffd..0000000000 --- a/kubernetes/dynamic +++ /dev/null @@ -1 +0,0 @@ -base/dynamic \ No newline at end of file diff --git a/kubernetes/dynamic/__init__.py b/kubernetes/dynamic/__init__.py new file mode 100644 index 0000000000..a1d3d8f8ea --- /dev/null +++ b/kubernetes/dynamic/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2019 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .client import * # NOQA diff --git a/kubernetes/dynamic/client.py b/kubernetes/dynamic/client.py new file mode 100644 index 0000000000..64163d7b5c --- /dev/null +++ b/kubernetes/dynamic/client.py @@ -0,0 +1,327 @@ +# Copyright 2019 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import six +import json + +from kubernetes import watch +from kubernetes.client.rest import ApiException + +from .discovery import EagerDiscoverer, LazyDiscoverer +from .exceptions import api_exception, KubernetesValidateMissing +from .resource import Resource, ResourceList, Subresource, ResourceInstance, ResourceField + +try: + import kubernetes_validate + HAS_KUBERNETES_VALIDATE = True +except ImportError: + HAS_KUBERNETES_VALIDATE = False + +try: + from kubernetes_validate.utils import VersionNotSupportedError +except ImportError: + class VersionNotSupportedError(NotImplementedError): + pass + +__all__ = [ + 'DynamicClient', + 'ResourceInstance', + 'Resource', + 'ResourceList', + 'Subresource', + 'EagerDiscoverer', + 'LazyDiscoverer', + 'ResourceField', +] + + +def meta_request(func): + """ Handles parsing response structure and translating API Exceptions """ + def inner(self, *args, **kwargs): + serialize_response = kwargs.pop('serialize', True) + serializer = kwargs.pop('serializer', ResourceInstance) + try: + resp = func(self, *args, **kwargs) + except ApiException as e: + raise api_exception(e) + if serialize_response: + try: + if six.PY2: + return serializer(self, json.loads(resp.data)) + return serializer(self, json.loads(resp.data.decode('utf8'))) + except ValueError: + if six.PY2: + return resp.data + return resp.data.decode('utf8') + return resp + + return inner + + +class DynamicClient(object): + """ A kubernetes client that dynamically discovers and interacts with + the kubernetes API + """ + + def __init__(self, client, cache_file=None, discoverer=None): + # Setting default here to delay evaluation of LazyDiscoverer class + # until constructor is called + discoverer = discoverer or LazyDiscoverer + + self.client = client + self.configuration = client.configuration + self.__discoverer = discoverer(self, cache_file) + + @property + def resources(self): + return self.__discoverer + + @property + def version(self): + return self.__discoverer.version + + def ensure_namespace(self, resource, namespace, body): + namespace = namespace or body.get('metadata', {}).get('namespace') + if not namespace: + raise ValueError("Namespace is required for {}.{}".format(resource.group_version, resource.kind)) + return namespace + + def serialize_body(self, body): + """Serialize body to raw dict so apiserver can handle it + + :param body: kubernetes resource body, current support: Union[Dict, ResourceInstance] + """ + # This should match any `ResourceInstance` instances + if callable(getattr(body, 'to_dict', None)): + return body.to_dict() + return body or {} + + def get(self, resource, name=None, namespace=None, **kwargs): + path = resource.path(name=name, namespace=namespace) + return self.request('get', path, **kwargs) + + def create(self, resource, body=None, namespace=None, **kwargs): + body = self.serialize_body(body) + if resource.namespaced: + namespace = self.ensure_namespace(resource, namespace, body) + path = resource.path(namespace=namespace) + return self.request('post', path, body=body, **kwargs) + + def delete(self, resource, name=None, namespace=None, body=None, label_selector=None, field_selector=None, **kwargs): + if not (name or label_selector or field_selector): + raise ValueError("At least one of name|label_selector|field_selector is required") + if resource.namespaced and not (label_selector or field_selector or namespace): + raise ValueError("At least one of namespace|label_selector|field_selector is required") + path = resource.path(name=name, namespace=namespace) + return self.request('delete', path, body=body, label_selector=label_selector, field_selector=field_selector, **kwargs) + + def replace(self, resource, body=None, name=None, namespace=None, **kwargs): + body = self.serialize_body(body) + name = name or body.get('metadata', {}).get('name') + if not name: + raise ValueError("name is required to replace {}.{}".format(resource.group_version, resource.kind)) + if resource.namespaced: + namespace = self.ensure_namespace(resource, namespace, body) + path = resource.path(name=name, namespace=namespace) + return self.request('put', path, body=body, **kwargs) + + def patch(self, resource, body=None, name=None, namespace=None, **kwargs): + body = self.serialize_body(body) + name = name or body.get('metadata', {}).get('name') + if not name: + raise ValueError("name is required to patch {}.{}".format(resource.group_version, resource.kind)) + if resource.namespaced: + namespace = self.ensure_namespace(resource, namespace, body) + + content_type = kwargs.pop('content_type', 'application/strategic-merge-patch+json') + path = resource.path(name=name, namespace=namespace) + + return self.request('patch', path, body=body, content_type=content_type, **kwargs) + + def server_side_apply(self, resource, body=None, name=None, namespace=None, force_conflicts=None, **kwargs): + body = self.serialize_body(body) + name = name or body.get('metadata', {}).get('name') + if not name: + raise ValueError("name is required to patch {}.{}".format(resource.group_version, resource.kind)) + if resource.namespaced: + namespace = self.ensure_namespace(resource, namespace, body) + + # force content type to 'application/apply-patch+yaml' + kwargs.update({'content_type': 'application/apply-patch+yaml'}) + path = resource.path(name=name, namespace=namespace) + + return self.request('patch', path, body=body, force_conflicts=force_conflicts, **kwargs) + + def watch(self, resource, namespace=None, name=None, label_selector=None, field_selector=None, resource_version=None, timeout=None, watcher=None, allow_watch_bookmarks=None): + """ + Stream events for a resource from the Kubernetes API + + :param resource: The API resource object that will be used to query the API + :param namespace: The namespace to query + :param name: The name of the resource instance to query + :param label_selector: The label selector with which to filter results + :param field_selector: The field selector with which to filter results + :param resource_version: The version with which to filter results. Only events with + a resource_version greater than this value will be returned + :param timeout: The amount of time in seconds to wait before terminating the stream + :param watcher: The Watcher object that will be used to stream the resource + :param allow_watch_bookmarks: Ask the API server to send BOOKMARK events + + :return: Event object with these keys: + 'type': The type of event such as "ADDED", "DELETED", etc. + 'raw_object': a dict representing the watched object. + 'object': A ResourceInstance wrapping raw_object. + + Example: + client = DynamicClient(k8s_client) + watcher = watch.Watch() + v1_pods = client.resources.get(api_version='v1', kind='Pod') + + for e in v1_pods.watch(resource_version=0, namespace=default, timeout=5, watcher=watcher): + print(e['type']) + print(e['object'].metadata) + # If you want to gracefully stop the stream watcher + watcher.stop() + """ + if not watcher: watcher = watch.Watch() + + # Use field selector to query for named instance so the watch parameter is handled properly. + if name: + field_selector = f"metadata.name={name}" + + for event in watcher.stream( + resource.get, + namespace=namespace, + field_selector=field_selector, + label_selector=label_selector, + resource_version=resource_version, + serialize=False, + timeout_seconds=timeout, + allow_watch_bookmarks=allow_watch_bookmarks, + ): + event['object'] = ResourceInstance(resource, event['object']) + yield event + + @meta_request + def request(self, method, path, body=None, **params): + if not path.startswith('/'): + path = '/' + path + + path_params = params.get('path_params', {}) + query_params = params.get('query_params', []) + if params.get('pretty') is not None: + query_params.append(('pretty', params['pretty'])) + if params.get('_continue') is not None: + query_params.append(('continue', params['_continue'])) + if params.get('include_uninitialized') is not None: + query_params.append(('includeUninitialized', params['include_uninitialized'])) + if params.get('field_selector') is not None: + query_params.append(('fieldSelector', params['field_selector'])) + if params.get('label_selector') is not None: + query_params.append(('labelSelector', params['label_selector'])) + if params.get('limit') is not None: + query_params.append(('limit', params['limit'])) + if params.get('resource_version') is not None: + query_params.append(('resourceVersion', params['resource_version'])) + if params.get('timeout_seconds') is not None: + query_params.append(('timeoutSeconds', params['timeout_seconds'])) + if params.get('watch') is not None: + query_params.append(('watch', params['watch'])) + if params.get('grace_period_seconds') is not None: + query_params.append(('gracePeriodSeconds', params['grace_period_seconds'])) + if params.get('propagation_policy') is not None: + query_params.append(('propagationPolicy', params['propagation_policy'])) + if params.get('orphan_dependents') is not None: + query_params.append(('orphanDependents', params['orphan_dependents'])) + if params.get('dry_run') is not None: + query_params.append(('dryRun', params['dry_run'])) + if params.get('field_manager') is not None: + query_params.append(('fieldManager', params['field_manager'])) + if params.get('force_conflicts') is not None: + query_params.append(('force', params['force_conflicts'])) + if params.get('allow_watch_bookmarks') is not None: + query_params.append(('allowWatchBookmarks', params['allow_watch_bookmarks'])) + + header_params = params.get('header_params', {}) + form_params = [] + local_var_files = {} + + # Checking Accept header. + new_header_params = dict((key.lower(), value) for key, value in header_params.items()) + if not 'accept' in new_header_params: + header_params['Accept'] = self.client.select_header_accept([ + 'application/json', + 'application/yaml', + ]) + + # HTTP header `Content-Type` + if params.get('content_type'): + header_params['Content-Type'] = params['content_type'] + else: + header_params['Content-Type'] = self.client.select_header_content_type(['*/*']) + + # Authentication setting + auth_settings = ['BearerToken'] + + api_response = self.client.call_api( + path, + method.upper(), + path_params, + query_params, + header_params, + body=body, + post_params=form_params, + async_req=params.get('async_req'), + files=local_var_files, + auth_settings=auth_settings, + _preload_content=False, + _return_http_data_only=params.get('_return_http_data_only', True), + _request_timeout=params.get('_request_timeout') + ) + if params.get('async_req'): + return api_response.get() + else: + return api_response + + def validate(self, definition, version=None, strict=False): + """validate checks a kubernetes resource definition + + Args: + definition (dict): resource definition + version (str): version of kubernetes to validate against + strict (bool): whether unexpected additional properties should be considered errors + + Returns: + warnings (list), errors (list): warnings are missing validations, errors are validation failures + """ + if not HAS_KUBERNETES_VALIDATE: + raise KubernetesValidateMissing() + + errors = list() + warnings = list() + try: + if version is None: + try: + version = self.version['kubernetes']['gitVersion'] + except KeyError: + version = kubernetes_validate.latest_version() + kubernetes_validate.validate(definition, version, strict) + except kubernetes_validate.utils.ValidationError as e: + errors.append("resource definition validation error at %s: %s" % ('.'.join([str(item) for item in e.path]), e.message)) # noqa: B306 + except VersionNotSupportedError: + errors.append("Kubernetes version %s is not supported by kubernetes-validate" % version) + except kubernetes_validate.utils.SchemaNotFoundError as e: + warnings.append("Could not find schema for object kind %s with API version %s in Kubernetes version %s (possibly Custom Resource?)" % + (e.kind, e.api_version, e.version)) + return warnings, errors diff --git a/kubernetes/dynamic/discovery.py b/kubernetes/dynamic/discovery.py new file mode 100644 index 0000000000..c00dfa3ef8 --- /dev/null +++ b/kubernetes/dynamic/discovery.py @@ -0,0 +1,433 @@ +# Copyright 2019 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import six +import json +import logging +import hashlib +import tempfile +from functools import partial +from collections import defaultdict +from abc import abstractmethod, abstractproperty + +from urllib3.exceptions import ProtocolError, MaxRetryError + +from kubernetes import __version__ +from .exceptions import NotFoundError, ResourceNotFoundError, ResourceNotUniqueError, ApiException, ServiceUnavailableError +from .resource import Resource, ResourceList + + +DISCOVERY_PREFIX = 'apis' + + +class Discoverer(object): + """ + A convenient container for storing discovered API resources. Allows + easy searching and retrieval of specific resources. + + Subclasses implement the abstract methods with different loading strategies. + """ + + def __init__(self, client, cache_file): + self.client = client + default_cache_id = self.client.configuration.host + if six.PY3: + default_cache_id = default_cache_id.encode('utf-8') + try: + default_cachefile_name = 'osrcp-{0}.json'.format(hashlib.md5(default_cache_id, usedforsecurity=False).hexdigest()) + except TypeError: + # usedforsecurity is only supported in 3.9+ + default_cachefile_name = 'osrcp-{0}.json'.format(hashlib.md5(default_cache_id).hexdigest()) + self.__cache_file = cache_file or os.path.join(tempfile.gettempdir(), default_cachefile_name) + self.__init_cache() + + def __init_cache(self, refresh=False): + if refresh or not os.path.exists(self.__cache_file): + self._cache = {'library_version': __version__} + refresh = True + else: + try: + with open(self.__cache_file, 'r') as f: + self._cache = json.load(f, cls=partial(CacheDecoder, self.client)) + if self._cache.get('library_version') != __version__: + # Version mismatch, need to refresh cache + self.invalidate_cache() + except Exception as e: + logging.error("load cache error: %s", e) + self.invalidate_cache() + self._load_server_info() + self.discover() + if refresh: + self._write_cache() + + def _write_cache(self): + try: + with open(self.__cache_file, 'w') as f: + json.dump(self._cache, f, cls=CacheEncoder) + except Exception: + # Failing to write the cache isn't a big enough error to crash on + pass + + def invalidate_cache(self): + self.__init_cache(refresh=True) + + @abstractproperty + def api_groups(self): + pass + + @abstractmethod + def search(self, prefix=None, group=None, api_version=None, kind=None, **kwargs): + pass + + @abstractmethod + def discover(self): + pass + + @property + def version(self): + return self.__version + + def default_groups(self, request_resources=False): + groups = {} + groups['api'] = { '': { + 'v1': (ResourceGroup( True, resources=self.get_resources_for_api_version('api', '', 'v1', True) ) + if request_resources else ResourceGroup(True)) + }} + + groups[DISCOVERY_PREFIX] = {'': { + 'v1': ResourceGroup(True, resources = {"List": [ResourceList(self.client)]}) + }} + return groups + + def parse_api_groups(self, request_resources=False, update=False): + """ Discovers all API groups present in the cluster """ + if not self._cache.get('resources') or update: + self._cache['resources'] = self._cache.get('resources', {}) + groups_response = self.client.request('GET', '/{}'.format(DISCOVERY_PREFIX)).groups + + groups = self.default_groups(request_resources=request_resources) + + for group in groups_response: + new_group = {} + for version_raw in group['versions']: + version = version_raw['version'] + resource_group = self._cache.get('resources', {}).get(DISCOVERY_PREFIX, {}).get(group['name'], {}).get(version) + preferred = version_raw == group['preferredVersion'] + resources = resource_group.resources if resource_group else {} + if request_resources: + resources = self.get_resources_for_api_version(DISCOVERY_PREFIX, group['name'], version, preferred) + new_group[version] = ResourceGroup(preferred, resources=resources) + groups[DISCOVERY_PREFIX][group['name']] = new_group + self._cache['resources'].update(groups) + self._write_cache() + + return self._cache['resources'] + + def _load_server_info(self): + def just_json(_, serialized): + return serialized + + if not self._cache.get('version'): + try: + self._cache['version'] = { + 'kubernetes': self.client.request('get', '/version', serializer=just_json) + } + except (ValueError, MaxRetryError) as e: + if isinstance(e, MaxRetryError) and not isinstance(e.reason, ProtocolError): + raise + if not self.client.configuration.host.startswith("https://"): + raise ValueError("Host value %s should start with https:// when talking to HTTPS endpoint" % + self.client.configuration.host) + else: + raise + + self.__version = self._cache['version'] + + def get_resources_for_api_version(self, prefix, group, version, preferred): + """ returns a dictionary of resources associated with provided (prefix, group, version)""" + + resources = defaultdict(list) + subresources = {} + + path = '/'.join(filter(None, [prefix, group, version])) + try: + resources_response = self.client.request('GET', path).resources or [] + except ServiceUnavailableError: + resources_response = [] + + resources_raw = list(filter(lambda resource: '/' not in resource['name'], resources_response)) + subresources_raw = list(filter(lambda resource: '/' in resource['name'], resources_response)) + for subresource in subresources_raw: + resource, name = subresource['name'].split('/', 1) + if not subresources.get(resource): + subresources[resource] = {} + subresources[resource][name] = subresource + + for resource in resources_raw: + # Prevent duplicate keys + for key in ('prefix', 'group', 'api_version', 'client', 'preferred'): + resource.pop(key, None) + + resourceobj = Resource( + prefix=prefix, + group=group, + api_version=version, + client=self.client, + preferred=preferred, + subresources=subresources.get(resource['name']), + **resource + ) + resources[resource['kind']].append(resourceobj) + + resource_list = ResourceList(self.client, group=group, api_version=version, base_kind=resource['kind']) + resources[resource_list.kind].append(resource_list) + return resources + + def get(self, **kwargs): + """ Same as search, but will throw an error if there are multiple or no + results. If there are multiple results and only one is an exact match + on api_version, that resource will be returned. + """ + results = self.search(**kwargs) + # If there are multiple matches, prefer exact matches on api_version + if len(results) > 1 and kwargs.get('api_version'): + results = [ + result for result in results if result.group_version == kwargs['api_version'] + ] + # If there are multiple matches, prefer non-List kinds + if len(results) > 1 and not all([isinstance(x, ResourceList) for x in results]): + results = [result for result in results if not isinstance(result, ResourceList)] + if len(results) == 1: + return results[0] + elif not results: + raise ResourceNotFoundError('No matches found for {}'.format(kwargs)) + else: + raise ResourceNotUniqueError('Multiple matches found for {}: {}'.format(kwargs, results)) + + +class LazyDiscoverer(Discoverer): + """ A convenient container for storing discovered API resources. Allows + easy searching and retrieval of specific resources. + + Resources for the cluster are loaded lazily. + """ + + def __init__(self, client, cache_file): + Discoverer.__init__(self, client, cache_file) + self.__update_cache = False + + def discover(self): + self.__resources = self.parse_api_groups(request_resources=False) + + def __maybe_write_cache(self): + if self.__update_cache: + self._write_cache() + self.__update_cache = False + + @property + def api_groups(self): + return self.parse_api_groups(request_resources=False, update=True)['apis'].keys() + + def search(self, **kwargs): + # In first call, ignore ResourceNotFoundError and set default value for results + try: + results = self.__search(self.__build_search(**kwargs), self.__resources, []) + except ResourceNotFoundError: + results = [] + if not results: + self.invalidate_cache() + results = self.__search(self.__build_search(**kwargs), self.__resources, []) + self.__maybe_write_cache() + return results + + def __search(self, parts, resources, reqParams): + part = parts[0] + if part != '*': + + resourcePart = resources.get(part) + if not resourcePart: + return [] + elif isinstance(resourcePart, ResourceGroup): + if len(reqParams) != 2: + raise ValueError("prefix and group params should be present, have %s" % reqParams) + # Check if we've requested resources for this group + if not resourcePart.resources: + prefix, group, version = reqParams[0], reqParams[1], part + try: + resourcePart.resources = self.get_resources_for_api_version( + prefix, group, part, resourcePart.preferred) + except NotFoundError: + raise ResourceNotFoundError + + self._cache['resources'][prefix][group][version] = resourcePart + self.__update_cache = True + return self.__search(parts[1:], resourcePart.resources, reqParams) + elif isinstance(resourcePart, dict): + # In this case parts [0] will be a specified prefix, group, version + # as we recurse + return self.__search(parts[1:], resourcePart, reqParams + [part] ) + else: + if parts[1] != '*' and isinstance(parts[1], dict): + for _resource in resourcePart: + for term, value in parts[1].items(): + if getattr(_resource, term) == value: + return [_resource] + + return [] + else: + return resourcePart + else: + matches = [] + for key in resources.keys(): + matches.extend(self.__search([key] + parts[1:], resources, reqParams)) + return matches + + def __build_search(self, prefix=None, group=None, api_version=None, kind=None, **kwargs): + if not group and api_version and '/' in api_version: + group, api_version = api_version.split('/') + + items = [prefix, group, api_version, kind, kwargs] + return list(map(lambda x: x or '*', items)) + + def __iter__(self): + for prefix, groups in self.__resources.items(): + for group, versions in groups.items(): + for version, rg in versions.items(): + # Request resources for this groupVersion if we haven't yet + if not rg.resources: + rg.resources = self.get_resources_for_api_version( + prefix, group, version, rg.preferred) + self._cache['resources'][prefix][group][version] = rg + self.__update_cache = True + for _, resource in six.iteritems(rg.resources): + yield resource + self.__maybe_write_cache() + + +class EagerDiscoverer(Discoverer): + """ A convenient container for storing discovered API resources. Allows + easy searching and retrieval of specific resources. + + All resources are discovered for the cluster upon object instantiation. + """ + + def update(self, resources): + self.__resources = resources + + def __init__(self, client, cache_file): + Discoverer.__init__(self, client, cache_file) + + def discover(self): + self.__resources = self.parse_api_groups(request_resources=True) + + @property + def api_groups(self): + """ list available api groups """ + return self.parse_api_groups(request_resources=True, update=True)['apis'].keys() + + + def search(self, **kwargs): + """ Takes keyword arguments and returns matching resources. The search + will happen in the following order: + prefix: The api prefix for a resource, ie, /api, /oapi, /apis. Can usually be ignored + group: The api group of a resource. Will also be extracted from api_version if it is present there + api_version: The api version of a resource + kind: The kind of the resource + arbitrary arguments (see below), in random order + + The arbitrary arguments can be any valid attribute for an Resource object + """ + results = self.__search(self.__build_search(**kwargs), self.__resources) + if not results: + self.invalidate_cache() + results = self.__search(self.__build_search(**kwargs), self.__resources) + return results + + def __build_search(self, prefix=None, group=None, api_version=None, kind=None, **kwargs): + if not group and api_version and '/' in api_version: + group, api_version = api_version.split('/') + + items = [prefix, group, api_version, kind, kwargs] + return list(map(lambda x: x or '*', items)) + + def __search(self, parts, resources): + part = parts[0] + resourcePart = resources.get(part) + + if part != '*' and resourcePart: + if isinstance(resourcePart, ResourceGroup): + return self.__search(parts[1:], resourcePart.resources) + elif isinstance(resourcePart, dict): + return self.__search(parts[1:], resourcePart) + else: + if parts[1] != '*' and isinstance(parts[1], dict): + for _resource in resourcePart: + for term, value in parts[1].items(): + if getattr(_resource, term) == value: + return [_resource] + return [] + else: + return resourcePart + elif part == '*': + matches = [] + for key in resources.keys(): + matches.extend(self.__search([key] + parts[1:], resources)) + return matches + return [] + + def __iter__(self): + for _, groups in self.__resources.items(): + for _, versions in groups.items(): + for _, resources in versions.items(): + for _, resource in resources.items(): + yield resource + + +class ResourceGroup(object): + """Helper class for Discoverer container""" + def __init__(self, preferred, resources=None): + self.preferred = preferred + self.resources = resources or {} + + def to_dict(self): + return { + '_type': 'ResourceGroup', + 'preferred': self.preferred, + 'resources': self.resources, + } + + +class CacheEncoder(json.JSONEncoder): + + def default(self, o): + return o.to_dict() + + +class CacheDecoder(json.JSONDecoder): + def __init__(self, client, *args, **kwargs): + self.client = client + json.JSONDecoder.__init__(self, object_hook=self.object_hook, *args, **kwargs) + + def object_hook(self, obj): + if '_type' not in obj: + return obj + _type = obj.pop('_type') + if _type == 'Resource': + return Resource(client=self.client, **obj) + elif _type == 'ResourceList': + return ResourceList(self.client, **obj) + elif _type == 'ResourceGroup': + return ResourceGroup(obj['preferred'], resources=self.object_hook(obj['resources'])) + return obj diff --git a/kubernetes/dynamic/exceptions.py b/kubernetes/dynamic/exceptions.py new file mode 100644 index 0000000000..c8b908e7d5 --- /dev/null +++ b/kubernetes/dynamic/exceptions.py @@ -0,0 +1,110 @@ +# Copyright 2019 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import sys +import traceback + +from kubernetes.client.rest import ApiException + + +def api_exception(e): + """ + Returns the proper Exception class for the given kubernetes.client.rest.ApiException object + https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#success-codes + """ + _, _, exc_traceback = sys.exc_info() + tb = '\n'.join(traceback.format_tb(exc_traceback)) + return { + 400: BadRequestError, + 401: UnauthorizedError, + 403: ForbiddenError, + 404: NotFoundError, + 405: MethodNotAllowedError, + 409: ConflictError, + 410: GoneError, + 422: UnprocessibleEntityError, + 429: TooManyRequestsError, + 500: InternalServerError, + 503: ServiceUnavailableError, + 504: ServerTimeoutError, + }.get(e.status, DynamicApiError)(e, tb) + + +class DynamicApiError(ApiException): + """ Generic API Error for the dynamic client """ + def __init__(self, e, tb=None): + self.status = e.status + self.reason = e.reason + self.body = e.body + self.headers = e.headers + self.original_traceback = tb + + def __str__(self): + error_message = [str(self.status), "Reason: {}".format(self.reason)] + if self.headers: + error_message.append("HTTP response headers: {}".format(self.headers)) + + if self.body: + error_message.append("HTTP response body: {}".format(self.body)) + + if self.original_traceback: + error_message.append("Original traceback: \n{}".format(self.original_traceback)) + + return '\n'.join(error_message) + + def summary(self): + if self.body: + if self.headers and self.headers.get('Content-Type') == 'application/json': + message = json.loads(self.body).get('message') + if message: + return message + + return self.body + else: + return "{} Reason: {}".format(self.status, self.reason) + +class ResourceNotFoundError(Exception): + """ Resource was not found in available APIs """ +class ResourceNotUniqueError(Exception): + """ Parameters given matched multiple API resources """ + +class KubernetesValidateMissing(Exception): + """ kubernetes-validate is not installed """ + +# HTTP Errors +class BadRequestError(DynamicApiError): + """ 400: StatusBadRequest """ +class UnauthorizedError(DynamicApiError): + """ 401: StatusUnauthorized """ +class ForbiddenError(DynamicApiError): + """ 403: StatusForbidden """ +class NotFoundError(DynamicApiError): + """ 404: StatusNotFound """ +class MethodNotAllowedError(DynamicApiError): + """ 405: StatusMethodNotAllowed """ +class ConflictError(DynamicApiError): + """ 409: StatusConflict """ +class GoneError(DynamicApiError): + """ 410: StatusGone """ +class UnprocessibleEntityError(DynamicApiError): + """ 422: StatusUnprocessibleEntity """ +class TooManyRequestsError(DynamicApiError): + """ 429: StatusTooManyRequests """ +class InternalServerError(DynamicApiError): + """ 500: StatusInternalServer """ +class ServiceUnavailableError(DynamicApiError): + """ 503: StatusServiceUnavailable """ +class ServerTimeoutError(DynamicApiError): + """ 504: StatusServerTimeout """ diff --git a/kubernetes/dynamic/resource.py b/kubernetes/dynamic/resource.py new file mode 100644 index 0000000000..58a60ec402 --- /dev/null +++ b/kubernetes/dynamic/resource.py @@ -0,0 +1,405 @@ +# Copyright 2019 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import yaml +from functools import partial + +from pprint import pformat + + +class Resource(object): + """ Represents an API resource type, containing the information required to build urls for requests """ + + def __init__(self, prefix=None, group=None, api_version=None, kind=None, + namespaced=False, verbs=None, name=None, preferred=False, client=None, + singularName=None, shortNames=None, categories=None, subresources=None, **kwargs): + + if None in (api_version, kind, prefix): + raise ValueError("At least prefix, kind, and api_version must be provided") + + self.prefix = prefix + self.group = group + self.api_version = api_version + self.kind = kind + self.namespaced = namespaced + self.verbs = verbs + self.name = name + self.preferred = preferred + self.client = client + self.singular_name = singularName or (name[:-1] if name else "") + self.short_names = shortNames + self.categories = categories + self.subresources = { + k: Subresource(self, **v) for k, v in (subresources or {}).items() + } + + self.extra_args = kwargs + + def to_dict(self): + d = { + '_type': 'Resource', + 'prefix': self.prefix, + 'group': self.group, + 'api_version': self.api_version, + 'kind': self.kind, + 'namespaced': self.namespaced, + 'verbs': self.verbs, + 'name': self.name, + 'preferred': self.preferred, + 'singularName': self.singular_name, + 'shortNames': self.short_names, + 'categories': self.categories, + 'subresources': {k: sr.to_dict() for k, sr in self.subresources.items()}, + } + d.update(self.extra_args) + return d + + @property + def group_version(self): + if self.group: + return '{}/{}'.format(self.group, self.api_version) + return self.api_version + + def __repr__(self): + return '<{}({}/{})>'.format(self.__class__.__name__, self.group_version, self.name) + + @property + def urls(self): + full_prefix = '{}/{}'.format(self.prefix, self.group_version) + resource_name = self.name.lower() + return { + 'base': '/{}/{}'.format(full_prefix, resource_name), + 'namespaced_base': '/{}/namespaces/{{namespace}}/{}'.format(full_prefix, resource_name), + 'full': '/{}/{}/{{name}}'.format(full_prefix, resource_name), + 'namespaced_full': '/{}/namespaces/{{namespace}}/{}/{{name}}'.format(full_prefix, resource_name) + } + + def path(self, name=None, namespace=None): + url_type = [] + path_params = {} + if self.namespaced and namespace: + url_type.append('namespaced') + path_params['namespace'] = namespace + if name: + url_type.append('full') + path_params['name'] = name + else: + url_type.append('base') + return self.urls['_'.join(url_type)].format(**path_params) + + def __getattr__(self, name): + if name in self.subresources: + return self.subresources[name] + return partial(getattr(self.client, name), self) + + +class ResourceList(Resource): + """ Represents a list of API objects """ + + def __init__(self, client, group='', api_version='v1', base_kind='', kind=None, base_resource_lookup=None): + self.client = client + self.group = group + self.api_version = api_version + self.kind = kind or '{}List'.format(base_kind) + self.base_kind = base_kind + self.base_resource_lookup = base_resource_lookup + self.__base_resource = None + + def base_resource(self): + if self.__base_resource: + return self.__base_resource + elif self.base_resource_lookup: + self.__base_resource = self.client.resources.get(**self.base_resource_lookup) + return self.__base_resource + elif self.base_kind: + self.__base_resource = self.client.resources.get(group=self.group, api_version=self.api_version, kind=self.base_kind) + return self.__base_resource + return None + + def _items_to_resources(self, body): + """ Takes a List body and return a dictionary with the following structure: + { + 'api_version': str, + 'kind': str, + 'items': [{ + 'resource': Resource, + 'name': str, + 'namespace': str, + }] + } + """ + if body is None: + raise ValueError("You must provide a body when calling methods on a ResourceList") + + api_version = body['apiVersion'] + kind = body['kind'] + items = body.get('items') + if not items: + raise ValueError('The `items` field in the body must be populated when calling methods on a ResourceList') + + if self.kind != kind: + raise ValueError('Methods on a {} must be called with a body containing the same kind. Received {} instead'.format(self.kind, kind)) + + return { + 'api_version': api_version, + 'kind': kind, + 'items': [self._item_to_resource(item) for item in items] + } + + def _item_to_resource(self, item): + metadata = item.get('metadata', {}) + resource = self.base_resource() + if not resource: + api_version = item.get('apiVersion', self.api_version) + kind = item.get('kind', self.base_kind) + resource = self.client.resources.get(api_version=api_version, kind=kind) + return { + 'resource': resource, + 'definition': item, + 'name': metadata.get('name'), + 'namespace': metadata.get('namespace') + } + + def get(self, body, name=None, namespace=None, **kwargs): + if name: + raise ValueError('Operations on ResourceList objects do not support the `name` argument') + resource_list = self._items_to_resources(body) + response = copy.deepcopy(body) + + response['items'] = [ + item['resource'].get(name=item['name'], namespace=item['namespace'] or namespace, **kwargs).to_dict() + for item in resource_list['items'] + ] + return ResourceInstance(self, response) + + def delete(self, body, name=None, namespace=None, **kwargs): + if name: + raise ValueError('Operations on ResourceList objects do not support the `name` argument') + resource_list = self._items_to_resources(body) + response = copy.deepcopy(body) + + response['items'] = [ + item['resource'].delete(name=item['name'], namespace=item['namespace'] or namespace, **kwargs).to_dict() + for item in resource_list['items'] + ] + return ResourceInstance(self, response) + + def verb_mapper(self, verb, body, **kwargs): + resource_list = self._items_to_resources(body) + response = copy.deepcopy(body) + response['items'] = [ + getattr(item['resource'], verb)(body=item['definition'], **kwargs).to_dict() + for item in resource_list['items'] + ] + return ResourceInstance(self, response) + + def create(self, *args, **kwargs): + return self.verb_mapper('create', *args, **kwargs) + + def replace(self, *args, **kwargs): + return self.verb_mapper('replace', *args, **kwargs) + + def patch(self, *args, **kwargs): + return self.verb_mapper('patch', *args, **kwargs) + + def to_dict(self): + return { + '_type': 'ResourceList', + 'group': self.group, + 'api_version': self.api_version, + 'kind': self.kind, + 'base_kind': self.base_kind + } + + def __getattr__(self, name): + if self.base_resource(): + return getattr(self.base_resource(), name) + return None + + +class Subresource(Resource): + """ Represents a subresource of an API resource. This generally includes operations + like scale, as well as status objects for an instantiated resource + """ + + def __init__(self, parent, **kwargs): + self.parent = parent + self.prefix = parent.prefix + self.group = parent.group + self.api_version = parent.api_version + self.kind = kwargs.pop('kind') + self.name = kwargs.pop('name') + self.subresource = kwargs.pop('subresource', None) or self.name.split('/')[1] + self.namespaced = kwargs.pop('namespaced', False) + self.verbs = kwargs.pop('verbs', None) + self.extra_args = kwargs + + #TODO(fabianvf): Determine proper way to handle differences between resources + subresources + def create(self, body=None, name=None, namespace=None, **kwargs): + name = name or body.get('metadata', {}).get('name') + body = self.parent.client.serialize_body(body) + if self.parent.namespaced: + namespace = self.parent.client.ensure_namespace(self.parent, namespace, body) + path = self.path(name=name, namespace=namespace) + return self.parent.client.request('post', path, body=body, **kwargs) + + @property + def urls(self): + full_prefix = '{}/{}'.format(self.prefix, self.group_version) + return { + 'full': '/{}/{}/{{name}}/{}'.format(full_prefix, self.parent.name, self.subresource), + 'namespaced_full': '/{}/namespaces/{{namespace}}/{}/{{name}}/{}'.format(full_prefix, self.parent.name, self.subresource) + } + + def __getattr__(self, name): + return partial(getattr(self.parent.client, name), self) + + def to_dict(self): + d = { + 'kind': self.kind, + 'name': self.name, + 'subresource': self.subresource, + 'namespaced': self.namespaced, + 'verbs': self.verbs + } + d.update(self.extra_args) + return d + + +class ResourceInstance(object): + """ A parsed instance of an API resource. It exists solely to + ease interaction with API objects by allowing attributes to + be accessed with '.' notation. + """ + + def __init__(self, client, instance): + self.client = client + # If we have a list of resources, then set the apiVersion and kind of + # each resource in 'items' + kind = instance['kind'] + if kind.endswith('List') and 'items' in instance: + kind = instance['kind'][:-4] + if not instance['items']: + instance['items'] = [] + for item in instance['items']: + if 'apiVersion' not in item: + item['apiVersion'] = instance['apiVersion'] + if 'kind' not in item: + item['kind'] = kind + + self.attributes = self.__deserialize(instance) + self.__initialised = True + + def __deserialize(self, field): + if isinstance(field, dict): + return ResourceField(params={ + k: self.__deserialize(v) for k, v in field.items() + }) + elif isinstance(field, (list, tuple)): + return [self.__deserialize(item) for item in field] + else: + return field + + def __serialize(self, field): + if isinstance(field, ResourceField): + return { + k: self.__serialize(v) for k, v in field.__dict__.items() + } + elif isinstance(field, (list, tuple)): + return [self.__serialize(item) for item in field] + elif isinstance(field, ResourceInstance): + return field.to_dict() + else: + return field + + def to_dict(self): + return self.__serialize(self.attributes) + + def to_str(self): + return repr(self) + + def __repr__(self): + return "ResourceInstance[{}]:\n {}".format( + self.attributes.kind, + ' '.join(yaml.safe_dump(self.to_dict()).splitlines(True)) + ) + + def __getattr__(self, name): + if not '_ResourceInstance__initialised' in self.__dict__: + return super(ResourceInstance, self).__getattr__(name) + return getattr(self.attributes, name) + + def __setattr__(self, name, value): + if not '_ResourceInstance__initialised' in self.__dict__: + return super(ResourceInstance, self).__setattr__(name, value) + elif name in self.__dict__: + return super(ResourceInstance, self).__setattr__(name, value) + else: + self.attributes[name] = value + + def __getitem__(self, name): + return self.attributes[name] + + def __setitem__(self, name, value): + self.attributes[name] = value + + def __dir__(self): + return dir(type(self)) + list(self.attributes.__dict__.keys()) + + +class ResourceField(object): + """ A parsed instance of an API resource attribute. It exists + solely to ease interaction with API objects by allowing + attributes to be accessed with '.' notation + """ + + def __init__(self, params): + self.__dict__.update(**params) + + def __repr__(self): + return pformat(self.__dict__) + + def __eq__(self, other): + return self.__dict__ == other.__dict__ + + def __getitem__(self, name): + return self.__dict__.get(name) + + # Here resource.items will return items if available or resource.__dict__.items function if not + # resource.get will call resource.__dict__.get after attempting resource.__dict__.get('get') + def __getattr__(self, name): + return self.__dict__.get(name, getattr(self.__dict__, name, None)) + + def __setattr__(self, name, value): + self.__dict__[name] = value + + def __dir__(self): + return dir(type(self)) + list(self.__dict__.keys()) + + def __iter__(self): + for k, v in self.__dict__.items(): + yield (k, v) + + def to_dict(self): + return self.__serialize(self) + + def __serialize(self, field): + if isinstance(field, ResourceField): + return { + k: self.__serialize(v) for k, v in field.__dict__.items() + } + if isinstance(field, (list, tuple)): + return [self.__serialize(item) for item in field] + return field diff --git a/kubernetes/dynamic/test_client.py b/kubernetes/dynamic/test_client.py new file mode 100644 index 0000000000..2043226a5b --- /dev/null +++ b/kubernetes/dynamic/test_client.py @@ -0,0 +1,571 @@ +# Copyright 2019 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time +import unittest +import uuid + +from kubernetes.e2e_test import base +from kubernetes.client import api_client + +from . import DynamicClient +from .resource import ResourceInstance, ResourceField +from .exceptions import ResourceNotFoundError + + +def short_uuid(): + id = str(uuid.uuid4()) + return id[-12:] + + +class TestDynamicClient(unittest.TestCase): + + @classmethod + def setUpClass(cls): + cls.config = base.get_e2e_configuration() + + def test_cluster_custom_resources(self): + client = DynamicClient(api_client.ApiClient(configuration=self.config)) + + with self.assertRaises(ResourceNotFoundError): + changeme_api = client.resources.get( + api_version='apps.example.com/v1', kind='ClusterChangeMe') + + crd_api = client.resources.get( + api_version='apiextensions.k8s.io/v1beta1', + kind='CustomResourceDefinition') + name = 'clusterchangemes.apps.example.com' + crd_manifest = { + 'apiVersion': 'apiextensions.k8s.io/v1beta1', + 'kind': 'CustomResourceDefinition', + 'metadata': { + 'name': name, + }, + 'spec': { + 'group': 'apps.example.com', + 'names': { + 'kind': 'ClusterChangeMe', + 'listKind': 'ClusterChangeMeList', + 'plural': 'clusterchangemes', + 'singular': 'clusterchangeme', + }, + 'scope': 'Cluster', + 'version': 'v1', + 'subresources': { + 'status': {} + } + } + } + resp = crd_api.create(crd_manifest) + + self.assertEqual(name, resp.metadata.name) + self.assertTrue(resp.status) + + resp = crd_api.get( + name=name, + ) + self.assertEqual(name, resp.metadata.name) + self.assertTrue(resp.status) + + try: + changeme_api = client.resources.get( + api_version='apps.example.com/v1', kind='ClusterChangeMe') + except ResourceNotFoundError: + # Need to wait a sec for the discovery layer to get updated + time.sleep(2) + changeme_api = client.resources.get( + api_version='apps.example.com/v1', kind='ClusterChangeMe') + resp = changeme_api.get() + self.assertEqual(resp.items, []) + changeme_name = 'custom-resource' + short_uuid() + changeme_manifest = { + 'apiVersion': 'apps.example.com/v1', + 'kind': 'ClusterChangeMe', + 'metadata': { + 'name': changeme_name, + }, + 'spec': {} + } + + resp = changeme_api.create(body=changeme_manifest) + self.assertEqual(resp.metadata.name, changeme_name) + + resp = changeme_api.get(name=changeme_name) + self.assertEqual(resp.metadata.name, changeme_name) + + changeme_manifest['spec']['size'] = 3 + resp = changeme_api.patch( + body=changeme_manifest, + content_type='application/merge-patch+json' + ) + self.assertEqual(resp.spec.size, 3) + + resp = changeme_api.get(name=changeme_name) + self.assertEqual(resp.spec.size, 3) + + resp = changeme_api.get() + self.assertEqual(len(resp.items), 1) + + resp = changeme_api.delete( + name=changeme_name, + ) + + resp = changeme_api.get() + self.assertEqual(len(resp.items), 0) + + resp = crd_api.delete( + name=name, + ) + + time.sleep(2) + client.resources.invalidate_cache() + with self.assertRaises(ResourceNotFoundError): + changeme_api = client.resources.get( + api_version='apps.example.com/v1', kind='ClusterChangeMe') + + def test_async_namespaced_custom_resources(self): + client = DynamicClient(api_client.ApiClient(configuration=self.config)) + + with self.assertRaises(ResourceNotFoundError): + changeme_api = client.resources.get( + api_version='apps.example.com/v1', kind='ChangeMe') + + crd_api = client.resources.get( + api_version='apiextensions.k8s.io/v1beta1', + kind='CustomResourceDefinition') + + name = 'changemes.apps.example.com' + + crd_manifest = { + 'apiVersion': 'apiextensions.k8s.io/v1beta1', + 'kind': 'CustomResourceDefinition', + 'metadata': { + 'name': name, + }, + 'spec': { + 'group': 'apps.example.com', + 'names': { + 'kind': 'ChangeMe', + 'listKind': 'ChangeMeList', + 'plural': 'changemes', + 'singular': 'changeme', + }, + 'scope': 'Namespaced', + 'version': 'v1', + 'subresources': { + 'status': {} + } + } + } + async_resp = crd_api.create(crd_manifest, async_req=True) + + self.assertEqual(name, async_resp.metadata.name) + self.assertTrue(async_resp.status) + + async_resp = crd_api.get( + name=name, + async_req=True + ) + self.assertEqual(name, async_resp.metadata.name) + self.assertTrue(async_resp.status) + + try: + changeme_api = client.resources.get( + api_version='apps.example.com/v1', kind='ChangeMe') + except ResourceNotFoundError: + # Need to wait a sec for the discovery layer to get updated + time.sleep(2) + changeme_api = client.resources.get( + api_version='apps.example.com/v1', kind='ChangeMe') + + async_resp = changeme_api.get(async_req=True) + self.assertEqual(async_resp.items, []) + + changeme_name = 'custom-resource' + short_uuid() + changeme_manifest = { + 'apiVersion': 'apps.example.com/v1', + 'kind': 'ChangeMe', + 'metadata': { + 'name': changeme_name, + }, + 'spec': {} + } + + async_resp = changeme_api.create(body=changeme_manifest, namespace='default', async_req=True) + self.assertEqual(async_resp.metadata.name, changeme_name) + + async_resp = changeme_api.get(name=changeme_name, namespace='default', async_req=True) + self.assertEqual(async_resp.metadata.name, changeme_name) + + changeme_manifest['spec']['size'] = 3 + async_resp = changeme_api.patch( + body=changeme_manifest, + namespace='default', + content_type='application/merge-patch+json', + async_req=True + ) + self.assertEqual(async_resp.spec.size, 3) + + async_resp = changeme_api.get(name=changeme_name, namespace='default', async_req=True) + self.assertEqual(async_resp.spec.size, 3) + + async_resp = changeme_api.get(namespace='default', async_req=True) + self.assertEqual(len(async_resp.items), 1) + + async_resp = changeme_api.get(async_req=True) + self.assertEqual(len(async_resp.items), 1) + + async_resp = changeme_api.delete( + name=changeme_name, + namespace='default', + async_req=True + ) + + async_resp = changeme_api.get(namespace='default', async_req=True) + self.assertEqual(len(async_resp.items), 0) + + async_resp = changeme_api.get(async_req=True) + self.assertEqual(len(async_resp.items), 0) + + async_resp = crd_api.delete( + name=name, + async_req=True + ) + + time.sleep(2) + client.resources.invalidate_cache() + with self.assertRaises(ResourceNotFoundError): + changeme_api = client.resources.get( + api_version='apps.example.com/v1', kind='ChangeMe') + + def test_namespaced_custom_resources(self): + client = DynamicClient(api_client.ApiClient(configuration=self.config)) + + with self.assertRaises(ResourceNotFoundError): + changeme_api = client.resources.get( + api_version='apps.example.com/v1', kind='ChangeMe') + + crd_api = client.resources.get( + api_version='apiextensions.k8s.io/v1beta1', + kind='CustomResourceDefinition') + name = 'changemes.apps.example.com' + crd_manifest = { + 'apiVersion': 'apiextensions.k8s.io/v1beta1', + 'kind': 'CustomResourceDefinition', + 'metadata': { + 'name': name, + }, + 'spec': { + 'group': 'apps.example.com', + 'names': { + 'kind': 'ChangeMe', + 'listKind': 'ChangeMeList', + 'plural': 'changemes', + 'singular': 'changeme', + }, + 'scope': 'Namespaced', + 'version': 'v1', + 'subresources': { + 'status': {} + } + } + } + resp = crd_api.create(crd_manifest) + + self.assertEqual(name, resp.metadata.name) + self.assertTrue(resp.status) + + resp = crd_api.get( + name=name, + ) + self.assertEqual(name, resp.metadata.name) + self.assertTrue(resp.status) + + try: + changeme_api = client.resources.get( + api_version='apps.example.com/v1', kind='ChangeMe') + except ResourceNotFoundError: + # Need to wait a sec for the discovery layer to get updated + time.sleep(2) + changeme_api = client.resources.get( + api_version='apps.example.com/v1', kind='ChangeMe') + resp = changeme_api.get() + self.assertEqual(resp.items, []) + changeme_name = 'custom-resource' + short_uuid() + changeme_manifest = { + 'apiVersion': 'apps.example.com/v1', + 'kind': 'ChangeMe', + 'metadata': { + 'name': changeme_name, + }, + 'spec': {} + } + + resp = changeme_api.create(body=changeme_manifest, namespace='default') + self.assertEqual(resp.metadata.name, changeme_name) + + resp = changeme_api.get(name=changeme_name, namespace='default') + self.assertEqual(resp.metadata.name, changeme_name) + + changeme_manifest['spec']['size'] = 3 + resp = changeme_api.patch( + body=changeme_manifest, + namespace='default', + content_type='application/merge-patch+json' + ) + self.assertEqual(resp.spec.size, 3) + + resp = changeme_api.get(name=changeme_name, namespace='default') + self.assertEqual(resp.spec.size, 3) + + resp = changeme_api.get(namespace='default') + self.assertEqual(len(resp.items), 1) + + resp = changeme_api.get() + self.assertEqual(len(resp.items), 1) + + resp = changeme_api.delete( + name=changeme_name, + namespace='default' + ) + + resp = changeme_api.get(namespace='default') + self.assertEqual(len(resp.items), 0) + + resp = changeme_api.get() + self.assertEqual(len(resp.items), 0) + + resp = crd_api.delete( + name=name, + ) + + time.sleep(2) + client.resources.invalidate_cache() + with self.assertRaises(ResourceNotFoundError): + changeme_api = client.resources.get( + api_version='apps.example.com/v1', kind='ChangeMe') + + def test_service_apis(self): + client = DynamicClient(api_client.ApiClient(configuration=self.config)) + api = client.resources.get(api_version='v1', kind='Service') + + name = 'frontend-' + short_uuid() + service_manifest = {'apiVersion': 'v1', + 'kind': 'Service', + 'metadata': {'labels': {'name': name}, + 'name': name, + 'resourceversion': 'v1'}, + 'spec': {'ports': [{'name': 'port', + 'port': 80, + 'protocol': 'TCP', + 'targetPort': 80}], + 'selector': {'name': name}}} + + resp = api.create( + body=service_manifest, + namespace='default' + ) + self.assertEqual(name, resp.metadata.name) + self.assertTrue(resp.status) + + resp = api.get( + name=name, + namespace='default' + ) + self.assertEqual(name, resp.metadata.name) + self.assertTrue(resp.status) + + service_manifest['spec']['ports'] = [{'name': 'new', + 'port': 8080, + 'protocol': 'TCP', + 'targetPort': 8080}] + resp = api.patch( + body=service_manifest, + name=name, + namespace='default' + ) + self.assertEqual(2, len(resp.spec.ports)) + self.assertTrue(resp.status) + + resp = api.delete( + name=name, body={}, + namespace='default' + ) + + def test_replication_controller_apis(self): + client = DynamicClient(api_client.ApiClient(configuration=self.config)) + api = client.resources.get( + api_version='v1', kind='ReplicationController') + + name = 'frontend-' + short_uuid() + rc_manifest = { + 'apiVersion': 'v1', + 'kind': 'ReplicationController', + 'metadata': {'labels': {'name': name}, + 'name': name}, + 'spec': {'replicas': 2, + 'selector': {'name': name}, + 'template': {'metadata': { + 'labels': {'name': name}}, + 'spec': {'containers': [{ + 'image': 'nginx', + 'name': 'nginx', + 'ports': [{'containerPort': 80, + 'protocol': 'TCP'}]}]}}}} + + resp = api.create( + body=rc_manifest, namespace='default') + self.assertEqual(name, resp.metadata.name) + self.assertEqual(2, resp.spec.replicas) + + resp = api.get( + name=name, namespace='default') + self.assertEqual(name, resp.metadata.name) + self.assertEqual(2, resp.spec.replicas) + + api.delete( + name=name, + namespace='default', + propagation_policy='Background') + + def test_configmap_apis(self): + client = DynamicClient(api_client.ApiClient(configuration=self.config)) + api = client.resources.get(api_version='v1', kind='ConfigMap') + + name = 'test-configmap-' + short_uuid() + test_configmap = { + "kind": "ConfigMap", + "apiVersion": "v1", + "metadata": { + "name": name, + "labels": { + "e2e-test": "true", + }, + }, + "data": { + "config.json": "{\"command\":\"/usr/bin/mysqld_safe\"}", + "frontend.cnf": "[mysqld]\nbind-address = 10.0.0.3\n" + } + } + + resp = api.create( + body=test_configmap, namespace='default' + ) + self.assertEqual(name, resp.metadata.name) + + resp = api.get( + name=name, namespace='default', label_selector="e2e-test=true") + self.assertEqual(name, resp.metadata.name) + + count = 0 + for _ in client.watch(api, timeout=10, namespace="default", name=name): + count += 1 + self.assertTrue(count > 0, msg="no events received for watch") + + test_configmap['data']['config.json'] = "{}" + resp = api.patch( + name=name, namespace='default', body=test_configmap) + + resp = api.delete( + name=name, body={}, namespace='default') + + resp = api.get( + namespace='default', + pretty=True, + label_selector="e2e-test=true") + self.assertEqual([], resp.items) + + def test_node_apis(self): + client = DynamicClient(api_client.ApiClient(configuration=self.config)) + api = client.resources.get(api_version='v1', kind='Node') + + for item in api.get().items: + node = api.get(name=item.metadata.name) + self.assertTrue(len(dict(node.metadata.labels)) > 0) + + # test_node_apis_partial_object_metadata lists all nodes in the cluster, + # but only retrieves object metadata + def test_node_apis_partial_object_metadata(self): + client = DynamicClient(api_client.ApiClient(configuration=self.config)) + api = client.resources.get(api_version='v1', kind='Node') + + params = { + 'header_params': { + 'Accept': 'application/json;as=PartialObjectMetadataList;v=v1;g=meta.k8s.io'}} + resp = api.get(**params) + self.assertEqual('PartialObjectMetadataList', resp.kind) + self.assertEqual('meta.k8s.io/v1', resp.apiVersion) + + params = { + 'header_params': { + 'aCcePt': 'application/json;as=PartialObjectMetadataList;v=v1;g=meta.k8s.io'}} + resp = api.get(**params) + self.assertEqual('PartialObjectMetadataList', resp.kind) + self.assertEqual('meta.k8s.io/v1', resp.apiVersion) + + def test_server_side_apply_api(self): + client = DynamicClient(api_client.ApiClient(configuration=self.config)) + api = client.resources.get( + api_version='v1', kind='Pod') + + name = 'pod-' + short_uuid() + pod_manifest = { + 'apiVersion': 'v1', + 'kind': 'Pod', + 'metadata': {'labels': {'name': name}, + 'name': name}, + 'spec': {'containers': [{ + 'image': 'nginx', + 'name': 'nginx', + 'ports': [{'containerPort': 80, + 'protocol': 'TCP'}]}]}} + + resp = api.server_side_apply( + namespace='default', body=pod_manifest, + field_manager='kubernetes-unittests', dry_run="All") + self.assertEqual('kubernetes-unittests', resp.metadata.managedFields[0].manager) + + +class TestDynamicClientSerialization(unittest.TestCase): + + @classmethod + def setUpClass(cls): + config = base.get_e2e_configuration() + cls.client = DynamicClient(api_client.ApiClient(configuration=config)) + cls.pod_manifest = { + 'apiVersion': 'v1', + 'kind': 'Pod', + 'metadata': {'name': 'foo-pod'}, + 'spec': {'containers': [{'name': "main", 'image': "busybox"}]}, + } + + def test_dict_type(self): + self.assertEqual(self.client.serialize_body(self.pod_manifest), self.pod_manifest) + + def test_resource_instance_type(self): + inst = ResourceInstance(self.client, self.pod_manifest) + self.assertEqual(self.client.serialize_body(inst), self.pod_manifest) + + def test_resource_field(self): + """`ResourceField` is a special type which overwrites `__getattr__` method to return `None` + when a non-existent attribute was accessed. which means it can pass any `hasattr(...)` tests. + """ + params = { + "foo": "bar", + "self": True + } + res = ResourceField(params=params) + self.assertEqual(res["foo"], params["foo"]) + self.assertEqual(res["self"], params["self"]) + self.assertEqual(self.client.serialize_body(res), params) diff --git a/kubernetes/dynamic/test_discovery.py b/kubernetes/dynamic/test_discovery.py new file mode 100644 index 0000000000..639ccdd334 --- /dev/null +++ b/kubernetes/dynamic/test_discovery.py @@ -0,0 +1,61 @@ +# Copyright 2019 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import unittest + +from kubernetes.e2e_test import base +from kubernetes.client import api_client + +from . import DynamicClient + + +class TestDiscoverer(unittest.TestCase): + + @classmethod + def setUpClass(cls): + cls.config = base.get_e2e_configuration() + + def test_init_cache_from_file(self): + client = DynamicClient(api_client.ApiClient(configuration=self.config)) + client.resources.get(api_version='v1', kind='Node') + mtime1 = os.path.getmtime(client.resources._Discoverer__cache_file) + + client = DynamicClient(api_client.ApiClient(configuration=self.config)) + client.resources.get(api_version='v1', kind='Node') + mtime2 = os.path.getmtime(client.resources._Discoverer__cache_file) + + # test no Discoverer._write_cache called + self.assertTrue(mtime1 == mtime2) + + def test_cache_decoder_resource_and_subresource(self): + client = DynamicClient(api_client.ApiClient(configuration=self.config)) + # first invalidate cache + client.resources.invalidate_cache() + + # do Discoverer.__init__ + client = DynamicClient(api_client.ApiClient(configuration=self.config)) + # the resources of client will use _cache['resources'] in memory + deploy1 = client.resources.get(kind='Deployment') + + # do Discoverer.__init__ + client = DynamicClient(api_client.ApiClient(configuration=self.config)) + # the resources of client will use _cache['resources'] decode from cache file + deploy2 = client.resources.get(kind='Deployment') + + # test Resource is the same + self.assertTrue(deploy1 == deploy2) + + # test Subresource is the same + self.assertTrue(deploy1.status == deploy2.status) diff --git a/kubernetes/leaderelection b/kubernetes/leaderelection deleted file mode 120000 index 30e0567f73..0000000000 --- a/kubernetes/leaderelection +++ /dev/null @@ -1 +0,0 @@ -base/leaderelection \ No newline at end of file diff --git a/kubernetes/leaderelection.py b/kubernetes/leaderelection.py new file mode 100644 index 0000000000..62f2a97545 --- /dev/null +++ b/kubernetes/leaderelection.py @@ -0,0 +1 @@ +from kubernetes.base.leaderelection import * diff --git a/kubernetes/stream b/kubernetes/stream deleted file mode 120000 index 387e18fe54..0000000000 --- a/kubernetes/stream +++ /dev/null @@ -1 +0,0 @@ -base/stream \ No newline at end of file diff --git a/kubernetes/stream/__init__.py b/kubernetes/stream/__init__.py new file mode 100644 index 0000000000..cd34652811 --- /dev/null +++ b/kubernetes/stream/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .stream import stream, portforward diff --git a/kubernetes/stream/stream.py b/kubernetes/stream/stream.py new file mode 100644 index 0000000000..e34dedfc3b --- /dev/null +++ b/kubernetes/stream/stream.py @@ -0,0 +1,50 @@ +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import functools + +from . import ws_client + + +def _websocket_request(websocket_request, force_kwargs, api_method, *args, **kwargs): + """Override the ApiClient.request method with an alternative websocket based + method and call the supplied Kubernetes API method with that in place.""" + if force_kwargs: + for kwarg, value in force_kwargs.items(): + kwargs[kwarg] = value + api_client = api_method.__self__.api_client + # old generated code's api client has config. new ones has configuration + try: + configuration = api_client.configuration + except AttributeError: + configuration = api_client.config + prev_request = api_client.request + binary = kwargs.pop('binary', False) + try: + api_client.request = functools.partial(websocket_request, configuration, binary=binary) + out = api_method(*args, **kwargs) + # The api_client insists on converting this to a string using its representation, so we have + # to do this dance to strip it of the b' prefix and ' suffix, encode it byte-per-byte (latin1), + # escape all of the unicode \x*'s, then encode it back byte-by-byte + # However, if _preload_content=False is passed, then the entire WSClient is returned instead + # of a response, and we want to leave it alone + if binary and kwargs.get('_preload_content', True): + out = out[2:-1].encode('latin1').decode('unicode_escape').encode('latin1') + return out + finally: + api_client.request = prev_request + + +stream = functools.partial(_websocket_request, ws_client.websocket_call, None) +portforward = functools.partial(_websocket_request, ws_client.portforward_call, {'_preload_content':False}) diff --git a/kubernetes/stream/ws_client.py b/kubernetes/stream/ws_client.py new file mode 100644 index 0000000000..10c6c1bcd5 --- /dev/null +++ b/kubernetes/stream/ws_client.py @@ -0,0 +1,576 @@ +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys + +from kubernetes.client.rest import ApiException, ApiValueError + +import certifi +import collections +import select +import socket +import ssl +import threading +import time + +import six +import yaml + + +from six.moves.urllib.parse import urlencode, urlparse, urlunparse +from six import StringIO, BytesIO + +from websocket import WebSocket, ABNF, enableTrace, WebSocketConnectionClosedException +from base64 import urlsafe_b64decode +from requests.utils import should_bypass_proxies + +STDIN_CHANNEL = 0 +STDOUT_CHANNEL = 1 +STDERR_CHANNEL = 2 +ERROR_CHANNEL = 3 +RESIZE_CHANNEL = 4 + +class _IgnoredIO: + def write(self, _x): + pass + + def getvalue(self): + raise TypeError("Tried to read_all() from a WSClient configured to not capture. Did you mean `capture_all=True`?") + + +class WSClient: + def __init__(self, configuration, url, headers, capture_all, binary=False): + """A websocket client with support for channels. + + Exec command uses different channels for different streams. for + example, 0 is stdin, 1 is stdout and 2 is stderr. Some other API calls + like port forwarding can forward different pods' streams to different + channels. + """ + self._connected = False + self._channels = {} + self.binary = binary + self.newline = '\n' if not self.binary else b'\n' + if capture_all: + self._all = StringIO() if not self.binary else BytesIO() + else: + self._all = _IgnoredIO() + self.sock = create_websocket(configuration, url, headers) + self._connected = True + self._returncode = None + + def peek_channel(self, channel, timeout=0): + """Peek a channel and return part of the input, + empty string otherwise.""" + self.update(timeout=timeout) + if channel in self._channels: + return self._channels[channel] + return "" + + def read_channel(self, channel, timeout=0): + """Read data from a channel.""" + if channel not in self._channels: + ret = self.peek_channel(channel, timeout) + else: + ret = self._channels[channel] + if channel in self._channels: + del self._channels[channel] + return ret + + def readline_channel(self, channel, timeout=None): + """Read a line from a channel.""" + if timeout is None: + timeout = float("inf") + start = time.time() + while self.is_open() and time.time() - start < timeout: + if channel in self._channels: + data = self._channels[channel] + if self.newline in data: + index = data.find(self.newline) + ret = data[:index] + data = data[index+1:] + if data: + self._channels[channel] = data + else: + del self._channels[channel] + return ret + self.update(timeout=(timeout - time.time() + start)) + + def write_channel(self, channel, data): + """Write data to a channel.""" + # check if we're writing binary data or not + binary = six.PY3 and type(data) == six.binary_type + opcode = ABNF.OPCODE_BINARY if binary else ABNF.OPCODE_TEXT + + channel_prefix = chr(channel) + if binary: + channel_prefix = six.binary_type(channel_prefix, "ascii") + + payload = channel_prefix + data + self.sock.send(payload, opcode=opcode) + + def peek_stdout(self, timeout=0): + """Same as peek_channel with channel=1.""" + return self.peek_channel(STDOUT_CHANNEL, timeout=timeout) + + def read_stdout(self, timeout=None): + """Same as read_channel with channel=1.""" + return self.read_channel(STDOUT_CHANNEL, timeout=timeout) + + def readline_stdout(self, timeout=None): + """Same as readline_channel with channel=1.""" + return self.readline_channel(STDOUT_CHANNEL, timeout=timeout) + + def peek_stderr(self, timeout=0): + """Same as peek_channel with channel=2.""" + return self.peek_channel(STDERR_CHANNEL, timeout=timeout) + + def read_stderr(self, timeout=None): + """Same as read_channel with channel=2.""" + return self.read_channel(STDERR_CHANNEL, timeout=timeout) + + def readline_stderr(self, timeout=None): + """Same as readline_channel with channel=2.""" + return self.readline_channel(STDERR_CHANNEL, timeout=timeout) + + def read_all(self): + """Return buffered data received on stdout and stderr channels. + This is useful for non-interactive call where a set of command passed + to the API call and their result is needed after the call is concluded. + Should be called after run_forever() or update() + + TODO: Maybe we can process this and return a more meaningful map with + channels mapped for each input. + """ + out = self._all.getvalue() + self._all = self._all.__class__() + self._channels = {} + return out + + def is_open(self): + """True if the connection is still alive.""" + return self._connected + + def write_stdin(self, data): + """The same as write_channel with channel=0.""" + self.write_channel(STDIN_CHANNEL, data) + + def update(self, timeout=0): + """Update channel buffers with at most one complete frame of input.""" + if not self.is_open(): + return + if not self.sock.connected: + self._connected = False + return + + # The options here are: + # select.select() - this will work on most OS, however, it has a + # limitation of only able to read fd numbers up to 1024. + # i.e. does not scale well. This was the original + # implementation. + # select.poll() - this will work on most unix based OS, but not as + # efficient as epoll. Will work for fd numbers above 1024. + # select.epoll() - newest and most efficient way of polling. + # However, only works on linux. + if hasattr(select, "poll"): + poll = select.poll() + poll.register(self.sock.sock, select.POLLIN) + if timeout is not None: + timeout *= 1_000 # poll method uses milliseconds as the time unit + r = poll.poll(timeout) + poll.unregister(self.sock.sock) + else: + r, _, _ = select.select( + (self.sock.sock, ), (), (), timeout) + + if r: + op_code, frame = self.sock.recv_data_frame(True) + if op_code == ABNF.OPCODE_CLOSE: + self._connected = False + return + elif op_code == ABNF.OPCODE_BINARY or op_code == ABNF.OPCODE_TEXT: + data = frame.data + if six.PY3 and not self.binary: + data = data.decode("utf-8", "replace") + if len(data) > 1: + channel = data[0] + if six.PY3 and not self.binary: + channel = ord(channel) + data = data[1:] + if data: + if channel in [STDOUT_CHANNEL, STDERR_CHANNEL]: + # keeping all messages in the order they received + # for non-blocking call. + self._all.write(data) + if channel not in self._channels: + self._channels[channel] = data + else: + self._channels[channel] += data + + def run_forever(self, timeout=None): + """Wait till connection is closed or timeout reached. Buffer any input + received during this time.""" + if timeout: + start = time.time() + while self.is_open() and time.time() - start < timeout: + self.update(timeout=(timeout - time.time() + start)) + else: + while self.is_open(): + self.update(timeout=None) + @property + def returncode(self): + """ + The return code, A None value indicates that the process hasn't + terminated yet. + """ + if self.is_open(): + return None + else: + if self._returncode is None: + err = self.read_channel(ERROR_CHANNEL) + err = yaml.safe_load(err) + if err['status'] == "Success": + self._returncode = 0 + else: + self._returncode = int(err['details']['causes'][0]['message']) + return self._returncode + + def close(self, **kwargs): + """ + close websocket connection. + """ + self._connected = False + if self.sock: + self.sock.close(**kwargs) + + +WSResponse = collections.namedtuple('WSResponse', ['data']) + + +class PortForward: + def __init__(self, websocket, ports): + """A websocket client with support for port forwarding. + + Port Forward command sends on 2 channels per port, a read/write + data channel and a read only error channel. Both channels are sent an + initial frame containing the port number that channel is associated with. + """ + + self.websocket = websocket + self.local_ports = {} + for ix, port_number in enumerate(ports): + self.local_ports[port_number] = self._Port(ix, port_number) + # There is a thread run per PortForward instance which performs the translation between the + # raw socket data sent by the python application and the websocket protocol. This thread + # terminates after either side has closed all ports, and after flushing all pending data. + proxy = threading.Thread( + name="Kubernetes port forward proxy: %s" % ', '.join([str(port) for port in ports]), + target=self._proxy + ) + proxy.daemon = True + proxy.start() + + @property + def connected(self): + return self.websocket.connected + + def socket(self, port_number): + if port_number not in self.local_ports: + raise ValueError("Invalid port number") + return self.local_ports[port_number].socket + + def error(self, port_number): + if port_number not in self.local_ports: + raise ValueError("Invalid port number") + return self.local_ports[port_number].error + + def close(self): + for port in self.local_ports.values(): + port.socket.close() + + class _Port: + def __init__(self, ix, port_number): + # The remote port number + self.port_number = port_number + # The websocket channel byte number for this port + self.channel = six.int2byte(ix * 2) + # A socket pair is created to provide a means of translating the data flow + # between the python application and the kubernetes websocket. The self.python + # half of the socket pair is used by the _proxy method to receive and send data + # to the running python application. + s, self.python = socket.socketpair() + # The self.socket half of the pair is used by the python application to send + # and receive data to the eventual pod port. It is wrapped in the _Socket class + # because a socket pair is an AF_UNIX socket, not a AF_INET socket. This allows + # intercepting setting AF_INET socket options that would error against an AF_UNIX + # socket. + self.socket = self._Socket(s) + # Data accumulated from the websocket to be sent to the python application. + self.data = b'' + # All data sent from kubernetes on the port error channel. + self.error = None + + class _Socket: + def __init__(self, socket): + self._socket = socket + + def __getattr__(self, name): + return getattr(self._socket, name) + + def setsockopt(self, level, optname, value): + # The following socket option is not valid with a socket created from socketpair, + # and is set by the http.client.HTTPConnection.connect method. + if level == socket.IPPROTO_TCP and optname == socket.TCP_NODELAY: + return + self._socket.setsockopt(level, optname, value) + + # Proxy all socket data between the python code and the kubernetes websocket. + def _proxy(self): + channel_ports = [] + channel_initialized = [] + local_ports = {} + for port in self.local_ports.values(): + # Setup the data channel for this port number + channel_ports.append(port) + channel_initialized.append(False) + # Setup the error channel for this port number + channel_ports.append(port) + channel_initialized.append(False) + port.python.setblocking(True) + local_ports[port.python] = port + # The data to send on the websocket socket + kubernetes_data = b'' + while True: + rlist = [] # List of sockets to read from + wlist = [] # List of sockets to write to + if self.websocket.connected: + rlist.append(self.websocket) + if kubernetes_data: + wlist.append(self.websocket) + local_all_closed = True + for port in self.local_ports.values(): + if port.python.fileno() != -1: + if self.websocket.connected: + rlist.append(port.python) + if port.data: + wlist.append(port.python) + local_all_closed = False + else: + if port.data: + wlist.append(port.python) + local_all_closed = False + else: + port.python.close() + if local_all_closed and not (self.websocket.connected and kubernetes_data): + self.websocket.close() + return + r, w, _ = select.select(rlist, wlist, []) + for sock in r: + if sock == self.websocket: + pending = True + while pending: + try: + opcode, frame = self.websocket.recv_data_frame(True) + except WebSocketConnectionClosedException: + for port in self.local_ports.values(): + port.python.close() + return + if opcode == ABNF.OPCODE_BINARY: + if not frame.data: + raise RuntimeError("Unexpected frame data size") + channel = six.byte2int(frame.data) + if channel >= len(channel_ports): + raise RuntimeError("Unexpected channel number: %s" % channel) + port = channel_ports[channel] + if channel_initialized[channel]: + if channel % 2: + if port.error is None: + port.error = '' + port.error += frame.data[1:].decode() + port.python.close() + else: + port.data += frame.data[1:] + else: + if len(frame.data) != 3: + raise RuntimeError( + "Unexpected initial channel frame data size" + ) + port_number = six.byte2int(frame.data[1:2]) + (six.byte2int(frame.data[2:3]) * 256) + if port_number != port.port_number: + raise RuntimeError( + "Unexpected port number in initial channel frame: %s" % port_number + ) + channel_initialized[channel] = True + elif opcode not in (ABNF.OPCODE_PING, ABNF.OPCODE_PONG, ABNF.OPCODE_CLOSE): + raise RuntimeError("Unexpected websocket opcode: %s" % opcode) + if not (isinstance(self.websocket.sock, ssl.SSLSocket) and self.websocket.sock.pending()): + pending = False + else: + port = local_ports[sock] + if port.python.fileno() != -1: + data = port.python.recv(1024 * 1024) + if data: + kubernetes_data += ABNF.create_frame( + port.channel + data, + ABNF.OPCODE_BINARY, + ).format() + else: + port.python.close() + for sock in w: + if sock == self.websocket: + sent = self.websocket.sock.send(kubernetes_data) + kubernetes_data = kubernetes_data[sent:] + else: + port = local_ports[sock] + if port.python.fileno() != -1: + sent = port.python.send(port.data) + port.data = port.data[sent:] + + +def get_websocket_url(url, query_params=None): + parsed_url = urlparse(url) + parts = list(parsed_url) + if parsed_url.scheme == 'http': + parts[0] = 'ws' + elif parsed_url.scheme == 'https': + parts[0] = 'wss' + if query_params: + query = [] + for key, value in query_params: + if key == 'command' and isinstance(value, list): + for command in value: + query.append((key, command)) + else: + query.append((key, value)) + if query: + parts[4] = urlencode(query) + return urlunparse(parts) + + +def create_websocket(configuration, url, headers=None): + enableTrace(False) + + # We just need to pass the Authorization, ignore all the other + # http headers we get from the generated code + header = [] + if headers and 'authorization' in headers: + header.append("authorization: %s" % headers['authorization']) + if headers and 'sec-websocket-protocol' in headers: + header.append("sec-websocket-protocol: %s" % + headers['sec-websocket-protocol']) + else: + header.append("sec-websocket-protocol: v4.channel.k8s.io") + + if url.startswith('wss://') and configuration.verify_ssl: + ssl_opts = { + 'cert_reqs': ssl.CERT_REQUIRED, + 'ca_certs': configuration.ssl_ca_cert or certifi.where(), + } + if configuration.assert_hostname is not None: + ssl_opts['check_hostname'] = configuration.assert_hostname + else: + ssl_opts = {'cert_reqs': ssl.CERT_NONE} + + if configuration.cert_file: + ssl_opts['certfile'] = configuration.cert_file + if configuration.key_file: + ssl_opts['keyfile'] = configuration.key_file + if configuration.tls_server_name: + ssl_opts['server_hostname'] = configuration.tls_server_name + + websocket = WebSocket(sslopt=ssl_opts, skip_utf8_validation=False) + connect_opt = { + 'header': header + } + + if configuration.proxy or configuration.proxy_headers: + connect_opt = websocket_proxycare(connect_opt, configuration, url, headers) + + websocket.connect(url, **connect_opt) + return websocket + +def websocket_proxycare(connect_opt, configuration, url, headers): + """ An internal function to be called in api-client when a websocket + create is requested. + """ + if configuration.no_proxy: + connect_opt.update({ 'http_no_proxy': configuration.no_proxy.split(',') }) + + if configuration.proxy: + proxy_url = urlparse(configuration.proxy) + connect_opt.update({'http_proxy_host': proxy_url.hostname, 'http_proxy_port': proxy_url.port}) + if configuration.proxy_headers: + for key,value in configuration.proxy_headers.items(): + if key == 'proxy-authorization' and value.startswith('Basic'): + b64value = value.split()[1] + auth = urlsafe_b64decode(b64value).decode().split(':') + connect_opt.update({'http_proxy_auth': (auth[0], auth[1]) }) + return(connect_opt) + + +def websocket_call(configuration, _method, url, **kwargs): + """An internal function to be called in api-client when a websocket + connection is required. method, url, and kwargs are the parameters of + apiClient.request method.""" + + url = get_websocket_url(url, kwargs.get("query_params")) + headers = kwargs.get("headers") + _request_timeout = kwargs.get("_request_timeout", 60) + _preload_content = kwargs.get("_preload_content", True) + capture_all = kwargs.get("capture_all", True) + binary = kwargs.get('binary', False) + try: + client = WSClient(configuration, url, headers, capture_all, binary=binary) + if not _preload_content: + return client + client.run_forever(timeout=_request_timeout) + all = client.read_all() + if binary: + return WSResponse(all) + else: + return WSResponse('%s' % ''.join(all)) + except (Exception, KeyboardInterrupt, SystemExit) as e: + raise ApiException(status=0, reason=str(e)) + + +def portforward_call(configuration, _method, url, **kwargs): + """An internal function to be called in api-client when a websocket + connection is required for port forwarding. args and kwargs are the + parameters of apiClient.request method.""" + + query_params = kwargs.get("query_params") + + ports = [] + for param, value in query_params: + if param == 'ports': + for port in value.split(','): + try: + port_number = int(port) + except ValueError: + raise ApiValueError("Invalid port number: %s" % port) + if not (0 < port_number < 65536): + raise ApiValueError("Port number must be between 0 and 65536: %s" % port) + if port_number in ports: + raise ApiValueError("Duplicate port numbers: %s" % port) + ports.append(port_number) + if not ports: + raise ApiValueError("Missing required parameter `ports`") + + url = get_websocket_url(url, query_params) + headers = kwargs.get("headers") + + try: + websocket = create_websocket(configuration, url, headers) + return PortForward(websocket, ports) + except (Exception, KeyboardInterrupt, SystemExit) as e: + raise ApiException(status=0, reason=str(e)) diff --git a/kubernetes/watch b/kubernetes/watch deleted file mode 120000 index b3079b32f6..0000000000 --- a/kubernetes/watch +++ /dev/null @@ -1 +0,0 @@ -base/watch \ No newline at end of file diff --git a/kubernetes/watch/__init__.py b/kubernetes/watch/__init__.py new file mode 100644 index 0000000000..ca9ac06987 --- /dev/null +++ b/kubernetes/watch/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2016 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .watch import Watch diff --git a/kubernetes/watch/watch.py b/kubernetes/watch/watch.py new file mode 100644 index 0000000000..e8fe6c63e6 --- /dev/null +++ b/kubernetes/watch/watch.py @@ -0,0 +1,228 @@ +# Copyright 2016 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import pydoc +import sys + +from kubernetes import client + +PYDOC_RETURN_LABEL = ":return:" +PYDOC_FOLLOW_PARAM = ":param bool follow:" + +# Removing this suffix from return type name should give us event's object +# type. e.g., if list_namespaces() returns "NamespaceList" type, +# then list_namespaces(watch=true) returns a stream of events with objects +# of type "Namespace". In case this assumption is not true, user should +# provide return_type to Watch class's __init__. +TYPE_LIST_SUFFIX = "List" + + +PY2 = sys.version_info[0] == 2 +if PY2: + import httplib + HTTP_STATUS_GONE = httplib.GONE +else: + import http + HTTP_STATUS_GONE = http.HTTPStatus.GONE + + +class SimpleNamespace: + + def __init__(self, **kwargs): + self.__dict__.update(kwargs) + + +def _find_return_type(func): + for line in pydoc.getdoc(func).splitlines(): + if line.startswith(PYDOC_RETURN_LABEL): + return line[len(PYDOC_RETURN_LABEL):].strip() + return "" + + +def iter_resp_lines(resp): + buffer = bytearray() + for segment in resp.stream(amt=None, decode_content=False): + + # Append the segment (chunk) to the buffer + # + # Performance note: depending on contents of buffer and the type+value of segment, + # encoding segment into the buffer could be a wasteful step. The approach used here + # simplifies the logic farther down, but in the future it may be reasonable to + # sacrifice readability for performance. + if isinstance(segment, bytes): + buffer.extend(segment) + elif isinstance(segment, str): + buffer.extend(segment.encode("utf-8")) + else: + raise TypeError( + f"Received invalid segment type, {type(segment)}, from stream. Accepts only 'str' or 'bytes'.") + + # Split by newline (safe for utf-8 because multi-byte sequences cannot contain the newline byte) + next_newline = buffer.find(b'\n') + while next_newline != -1: + # Convert bytes to a valid utf-8 string, replacing any invalid utf-8 with the '�' character + line = buffer[:next_newline].decode( + "utf-8", errors="replace") + buffer = buffer[next_newline+1:] + if line: + yield line + else: + yield '' # Only print one empty line + next_newline = buffer.find(b'\n') + + +class Watch(object): + + def __init__(self, return_type=None): + self._raw_return_type = return_type + self._stop = False + self._api_client = client.ApiClient() + self.resource_version = None + + def stop(self): + self._stop = True + + def get_return_type(self, func): + if self._raw_return_type: + return self._raw_return_type + return_type = _find_return_type(func) + if return_type.endswith(TYPE_LIST_SUFFIX): + return return_type[:-len(TYPE_LIST_SUFFIX)] + return return_type + + def get_watch_argument_name(self, func): + if PYDOC_FOLLOW_PARAM in pydoc.getdoc(func): + return 'follow' + else: + return 'watch' + + def unmarshal_event(self, data, return_type): + if not data or data.isspace(): + return None + try: + js = json.loads(data) + js['raw_object'] = js['object'] + # BOOKMARK event is treated the same as ERROR for a quick fix of + # decoding exception + # TODO: make use of the resource_version in BOOKMARK event for more + # efficient WATCH + if return_type and js['type'] != 'ERROR' and js['type'] != 'BOOKMARK': + obj = SimpleNamespace(data=json.dumps(js['raw_object'])) + js['object'] = self._api_client.deserialize(obj, return_type) + if hasattr(js['object'], 'metadata'): + self.resource_version = js['object'].metadata.resource_version + # For custom objects that we don't have model defined, json + # deserialization results in dictionary + elif (isinstance(js['object'], dict) and 'metadata' in js['object'] + and 'resourceVersion' in js['object']['metadata']): + self.resource_version = js['object']['metadata'][ + 'resourceVersion'] + return js + except json.JSONDecodeError: + return None + + def stream(self, func, *args, **kwargs): + """Watch an API resource and stream the result back via a generator. + + Note that watching an API resource can expire. The method tries to + resume automatically once from the last result, but if that last result + is too old as well, an `ApiException` exception will be thrown with + ``code`` 410. In that case you have to recover yourself, probably + by listing the API resource to obtain the latest state and then + watching from that state on by setting ``resource_version`` to + one returned from listing. + + :param func: The API function pointer. Any parameter to the function + can be passed after this parameter. + + :return: Event object with these keys: + 'type': The type of event such as "ADDED", "DELETED", etc. + 'raw_object': a dict representing the watched object. + 'object': A model representation of raw_object. The name of + model will be determined based on + the func's doc string. If it cannot be determined, + 'object' value will be the same as 'raw_object'. + + Example: + v1 = kubernetes.client.CoreV1Api() + watch = kubernetes.watch.Watch() + for e in watch.stream(v1.list_namespace, resource_version=1127): + type_ = e['type'] + object_ = e['object'] # object is one of type return_type + raw_object = e['raw_object'] # raw_object is a dict + ... + if should_stop: + watch.stop() + """ + + self._stop = False + return_type = self.get_return_type(func) + watch_arg = self.get_watch_argument_name(func) + kwargs[watch_arg] = True + kwargs['_preload_content'] = False + if 'resource_version' in kwargs: + self.resource_version = kwargs['resource_version'] + + # Do not attempt retries if user specifies a timeout. + # We want to ensure we are returning within that timeout. + disable_retries = ('timeout_seconds' in kwargs) + retry_after_410 = False + deserialize = kwargs.pop('deserialize', True) + while True: + resp = func(*args, **kwargs) + try: + for line in iter_resp_lines(resp): + # unmarshal when we are receiving events from watch, + # return raw string when we are streaming log + if watch_arg == "watch": + if deserialize: + event = self.unmarshal_event(line, return_type) + else: + # Only do basic JSON parsing, no deserialize + event = json.loads(line) + if isinstance(event, dict) \ + and event['type'] == 'ERROR': + obj = event['raw_object'] + # Current request expired, let's retry, (if enabled) + # but only if we have not already retried. + if not disable_retries and not retry_after_410 and \ + obj['code'] == HTTP_STATUS_GONE: + retry_after_410 = True + break + else: + reason = "%s: %s" % ( + obj['reason'], obj['message']) + raise client.rest.ApiException( + status=obj['code'], reason=reason) + else: + retry_after_410 = False + yield event + else: + if line: + yield line # Normal non-empty line + else: + yield '' # Only yield one empty line + if self._stop: + break + finally: + resp.close() + resp.release_conn() + if self.resource_version is not None: + kwargs['resource_version'] = self.resource_version + else: + self._stop = True + + if self._stop or disable_retries: + break diff --git a/kubernetes/watch/watch_test.py b/kubernetes/watch/watch_test.py new file mode 100644 index 0000000000..b6dd45d33e --- /dev/null +++ b/kubernetes/watch/watch_test.py @@ -0,0 +1,647 @@ +# Copyright 2016 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import os + +import time + +import json + +from unittest.mock import Mock, call, patch, MagicMock + +from kubernetes import client,config + +from .watch import Watch + +from kubernetes.client import ApiException + + +class WatchTests(unittest.TestCase): + def setUp(self): + # counter for a test that needs test global state + self.callcount = 0 + + def test_watch_with_decode(self): + fake_resp = Mock() + fake_resp.close = Mock() + fake_resp.release_conn = Mock() + fake_resp.stream = Mock( + return_value=[ + '{"type": "ADDED", "object": {"metadata": {"name": "test1",' + '"resourceVersion": "1"}, "spec": {}, "status": {}}}\n', + '{"type": "ADDED", "object": {"metadata": {"name": "test2",' + '"resourceVersion": "2"}, "spec": {}, "sta', + 'tus": {}}}\n' + '{"type": "ADDED", "object": {"metadata": {"name": "test3",' + '"resourceVersion": "3"}, "spec": {}, "status": {}}}\n', + 'should_not_happened\n']) + + fake_api = Mock() + fake_api.get_namespaces = Mock(return_value=fake_resp) + fake_api.get_namespaces.__doc__ = ':return: V1NamespaceList' + + w = Watch() + count = 1 + for e in w.stream(fake_api.get_namespaces): + self.assertEqual("ADDED", e['type']) + # make sure decoder worked and we got a model with the right name + self.assertEqual("test%d" % count, e['object'].metadata.name) + # make sure decoder worked and updated Watch.resource_version + self.assertEqual( + "%d" % count, e['object'].metadata.resource_version) + self.assertEqual("%d" % count, w.resource_version) + count += 1 + # make sure we can stop the watch and the last event with won't be + # returned + if count == 4: + w.stop() + + # make sure that all three records were consumed by the stream + self.assertEqual(4, count) + + fake_api.get_namespaces.assert_called_once_with( + _preload_content=False, watch=True) + fake_resp.stream.assert_called_once_with( + amt=None, decode_content=False) + fake_resp.close.assert_called_once() + fake_resp.release_conn.assert_called_once() + + def test_watch_with_interspersed_newlines(self): + fake_resp = Mock() + fake_resp.close = Mock() + fake_resp.release_conn = Mock() + fake_resp.stream = Mock( + return_value=[ + '\n', + '{"type": "ADDED", "object": {"metadata":', + '{"name": "test1","resourceVersion": "1"}}}\n{"type": "ADDED", ', + '"object": {"metadata": {"name": "test2", "resourceVersion": "2"}}}\n', + '\n', + '', + '{"type": "ADDED", "object": {"metadata": {"name": "test3", "resourceVersion": "3"}}}\n', + '\n\n\n', + '\n', + ]) + + fake_api = Mock() + fake_api.get_namespaces = Mock(return_value=fake_resp) + fake_api.get_namespaces.__doc__ = ':return: V1NamespaceList' + + w = Watch() + count = 0 + + # Consume all test events from the mock service, stopping when no more data is available. + # Note that "timeout_seconds" below is not a timeout; rather, it disables retries and is + # the only way to do so. Without that, the stream will re-read the test data forever. + for e in w.stream(fake_api.get_namespaces, timeout_seconds=1): + # Here added a statement for exception for empty lines. + if e is None: + continue + count += 1 + self.assertEqual("test%d" % count, e['object'].metadata.name) + self.assertEqual(3, count) + + def test_watch_with_multibyte_utf8(self): + fake_resp = Mock() + fake_resp.close = Mock() + fake_resp.release_conn = Mock() + fake_resp.stream = Mock( + return_value=[ + # two-byte utf-8 character + '{"type":"MODIFIED","object":{"data":{"utf-8":"© 1"},"metadata":{"name":"test1","resourceVersion":"1"}}}\n', + # same copyright character expressed as bytes + b'{"type":"MODIFIED","object":{"data":{"utf-8":"\xC2\xA9 2"},"metadata":{"name":"test2","resourceVersion":"2"}}}\n' + # same copyright character with bytes split across two stream chunks + b'{"type":"MODIFIED","object":{"data":{"utf-8":"\xC2', + b'\xA9 3"},"metadata":{"n', + # more chunks of the same event, sent as a mix of bytes and strings + 'ame":"test3","resourceVersion":"3"', + '}}}', + b'\n' + ]) + + fake_api = Mock() + fake_api.get_configmaps = Mock(return_value=fake_resp) + fake_api.get_configmaps.__doc__ = ':return: V1ConfigMapList' + + w = Watch() + count = 0 + + # Consume all test events from the mock service, stopping when no more data is available. + # Note that "timeout_seconds" below is not a timeout; rather, it disables retries and is + # the only way to do so. Without that, the stream will re-read the test data forever. + for event in w.stream(fake_api.get_configmaps, timeout_seconds=1): + count += 1 + self.assertEqual("MODIFIED", event['type']) + self.assertEqual("test%d" % count, event['object'].metadata.name) + self.assertEqual("© %d" % count, event['object'].data["utf-8"]) + self.assertEqual( + "%d" % count, event['object'].metadata.resource_version) + self.assertEqual("%d" % count, w.resource_version) + self.assertEqual(3, count) + + def test_watch_with_invalid_utf8(self): + fake_resp = Mock() + fake_resp.close = Mock() + fake_resp.release_conn = Mock() + fake_resp.stream = Mock( + # test 1 uses 1 invalid utf-8 byte + # test 2 uses a sequence of 2 invalid utf-8 bytes + # test 3 uses a sequence of 3 invalid utf-8 bytes + return_value=[ + # utf-8 sequence for 😄 is \xF0\x9F\x98\x84 + # all other sequences below are invalid + # ref: https://www.w3.org/2001/06/utf-8-wrong/UTF-8-test.html + b'{"type":"MODIFIED","object":{"data":{"utf-8":"\xF0\x9F\x98\x84 1","invalid":"\x80 1"},"metadata":{"name":"test1"}}}\n', + b'{"type":"MODIFIED","object":{"data":{"utf-8":"\xF0\x9F\x98\x84 2","invalid":"\xC0\xAF 2"},"metadata":{"name":"test2"}}}\n', + # mix bytes/strings and split byte sequences across chunks + b'{"type":"MODIFIED","object":{"data":{"utf-8":"\xF0\x9F\x98', + b'\x84 ', + b'', + b'3","invalid":"\xE0\x80', + b'\xAF ', + '3"},"metadata":{"n', + 'ame":"test3"', + '}}}', + b'\n' + ]) + + fake_api = Mock() + fake_api.get_configmaps = Mock(return_value=fake_resp) + fake_api.get_configmaps.__doc__ = ':return: V1ConfigMapList' + + w = Watch() + count = 0 + + # Consume all test events from the mock service, stopping when no more data is available. + # Note that "timeout_seconds" below is not a timeout; rather, it disables retries and is + # the only way to do so. Without that, the stream will re-read the test data forever. + for event in w.stream(fake_api.get_configmaps, timeout_seconds=1): + count += 1 + self.assertEqual("MODIFIED", event['type']) + self.assertEqual("test%d" % count, event['object'].metadata.name) + self.assertEqual("😄 %d" % count, event['object'].data["utf-8"]) + # expect N replacement characters in test N + actual = event['object'].data["invalid"] + # spaces case: count spaces then the number + expected_spaces = ' ' * count + f' {count}' + # replacement case: count replacement chars then the number + expected_replacement = '�' * count + f' {count}' + self.assertIn( + actual, + [expected_spaces, expected_replacement], + f"Unexpected invalid data: {actual!r}, expected spaces '{expected_spaces!r}' or replacements '{expected_replacement!r}'" + ) + self.assertEqual(3, count) + + def test_watch_for_follow(self): + fake_resp = Mock() + fake_resp.close = Mock() + fake_resp.release_conn = Mock() + fake_resp.stream = Mock( + return_value=[ + 'log_line_1\n', + 'log_line_2\n']) + + fake_api = Mock() + fake_api.read_namespaced_pod_log = Mock(return_value=fake_resp) + fake_api.read_namespaced_pod_log.__doc__ = ':param bool follow:\n:return: str' + + w = Watch() + count = 1 + for e in w.stream(fake_api.read_namespaced_pod_log): + self.assertEqual("log_line_1", e) + count += 1 + # make sure we can stop the watch and the last event with won't be + # returned + if count == 2: + w.stop() + + fake_api.read_namespaced_pod_log.assert_called_once_with( + _preload_content=False, follow=True) + fake_resp.stream.assert_called_once_with( + amt=None, decode_content=False) + fake_resp.close.assert_called_once() + fake_resp.release_conn.assert_called_once() + + def test_watch_resource_version_set(self): + # https://github.com/kubernetes-client/python/issues/700 + # ensure watching from a resource version does reset to resource + # version 0 after k8s resets the watch connection + fake_resp = Mock() + fake_resp.close = Mock() + fake_resp.release_conn = Mock() + values = [ + '{"type": "ADDED", "object": {"metadata": {"name": "test1",' + '"resourceVersion": "1"}, "spec": {}, "status": {}}}\n', + '{"type": "ADDED", "object": {"metadata": {"name": "test2",' + '"resourceVersion": "2"}, "spec": {}, "sta', + 'tus": {}}}\n' + '{"type": "ADDED", "object": {"metadata": {"name": "test3",' + '"resourceVersion": "3"}, "spec": {}, "status": {}}}\n' + ] + + # return nothing on the first call and values on the second + # this emulates a watch from a rv that returns nothing in the first k8s + # watch reset and values later + + def get_values(*args, **kwargs): + self.callcount += 1 + if self.callcount == 1: + return [] + else: + return values + + fake_resp.stream = Mock( + side_effect=get_values) + + fake_api = Mock() + fake_api.get_namespaces = Mock(return_value=fake_resp) + fake_api.get_namespaces.__doc__ = ':return: V1NamespaceList' + + w = Watch() + # ensure we keep our requested resource version or the version latest + # returned version when the existing versions are older than the + # requested version + # needed for the list existing objects, then watch from there use case + calls = [] + + iterations = 2 + # first two calls must use the passed rv, the first call is a + # "reset" and does not actually return anything + # the second call must use the same rv but will return values + # (with a wrong rv but a real cluster would behave correctly) + # calls following that will use the rv from those returned values + calls.append(call(_preload_content=False, watch=True, + resource_version="5")) + calls.append(call(_preload_content=False, watch=True, + resource_version="5")) + for i in range(iterations): + # ideally we want 5 here but as rv must be treated as an + # opaque value we cannot interpret it and order it so rely + # on k8s returning the events completely and in order + calls.append(call(_preload_content=False, watch=True, + resource_version="3")) + + for c, e in enumerate(w.stream(fake_api.get_namespaces, + resource_version="5")): + if c == len(values) * iterations: + w.stop() + + # check calls are in the list, gives good error output + fake_api.get_namespaces.assert_has_calls(calls) + # more strict test with worse error message + self.assertEqual(fake_api.get_namespaces.mock_calls, calls) + + def test_watch_stream_twice(self): + w = Watch(float) + for step in ['first', 'second']: + fake_resp = Mock() + fake_resp.close = Mock() + fake_resp.release_conn = Mock() + fake_resp.stream = Mock( + return_value=['{"type": "ADDED", "object": 1}\n'] * 4) + + fake_api = Mock() + fake_api.get_namespaces = Mock(return_value=fake_resp) + fake_api.get_namespaces.__doc__ = ':return: V1NamespaceList' + + count = 1 + for e in w.stream(fake_api.get_namespaces): + count += 1 + if count == 3: + w.stop() + + self.assertEqual(count, 3) + fake_api.get_namespaces.assert_called_once_with( + _preload_content=False, watch=True) + fake_resp.stream.assert_called_once_with( + amt=None, decode_content=False) + fake_resp.close.assert_called_once() + fake_resp.release_conn.assert_called_once() + + def test_watch_stream_loop(self): + w = Watch(float) + + fake_resp = Mock() + fake_resp.close = Mock() + fake_resp.release_conn = Mock() + fake_resp.stream = Mock( + return_value=['{"type": "ADDED", "object": 1}\n']) + + fake_api = Mock() + fake_api.get_namespaces = Mock(return_value=fake_resp) + fake_api.get_namespaces.__doc__ = ':return: V1NamespaceList' + + count = 0 + + # when timeout_seconds is set, auto-exist when timeout reaches + for e in w.stream(fake_api.get_namespaces, timeout_seconds=1): + count = count + 1 + self.assertEqual(count, 1) + + # when no timeout_seconds, only exist when w.stop() is called + for e in w.stream(fake_api.get_namespaces): + count = count + 1 + if count == 2: + w.stop() + + self.assertEqual(count, 2) + self.assertEqual(fake_api.get_namespaces.call_count, 2) + self.assertEqual(fake_resp.stream.call_count, 2) + self.assertEqual(fake_resp.close.call_count, 2) + self.assertEqual(fake_resp.release_conn.call_count, 2) + + def test_unmarshal_with_float_object(self): + w = Watch() + event = w.unmarshal_event('{"type": "ADDED", "object": 1}', 'float') + self.assertEqual("ADDED", event['type']) + self.assertEqual(1.0, event['object']) + self.assertTrue(isinstance(event['object'], float)) + self.assertEqual(1, event['raw_object']) + + def test_unmarshal_with_no_return_type(self): + w = Watch() + event = w.unmarshal_event('{"type": "ADDED", "object": ["test1"]}', + None) + self.assertEqual("ADDED", event['type']) + self.assertEqual(["test1"], event['object']) + self.assertEqual(["test1"], event['raw_object']) + + def test_unmarshal_with_custom_object(self): + w = Watch() + event = w.unmarshal_event('{"type": "ADDED", "object": {"apiVersion":' + '"test.com/v1beta1","kind":"foo","metadata":' + '{"name": "bar", "resourceVersion": "1"}}}', + 'object') + self.assertEqual("ADDED", event['type']) + # make sure decoder deserialized json into dictionary and updated + # Watch.resource_version + self.assertTrue(isinstance(event['object'], dict)) + self.assertEqual("1", event['object']['metadata']['resourceVersion']) + self.assertEqual("1", w.resource_version) + + def test_unmarshal_with_bookmark(self): + w = Watch() + event = w.unmarshal_event( + '{"type":"BOOKMARK","object":{"kind":"Job","apiVersion":"batch/v1"' + ',"metadata":{"resourceVersion":"1"},"spec":{"template":{' + '"metadata":{},"spec":{"containers":null}}},"status":{}}}', + 'V1Job') + self.assertEqual("BOOKMARK", event['type']) + # Watch.resource_version is *not* updated, as BOOKMARK is treated the + # same as ERROR for a quick fix of decoding exception, + # resource_version in BOOKMARK is *not* used at all. + self.assertEqual(None, w.resource_version) + + def test_watch_with_exception(self): + fake_resp = Mock() + fake_resp.close = Mock() + fake_resp.release_conn = Mock() + fake_resp.stream = Mock(side_effect=KeyError('expected')) + + fake_api = Mock() + fake_api.get_thing = Mock(return_value=fake_resp) + + w = Watch() + try: + for _ in w.stream(fake_api.get_thing): + self.fail(self, "Should fail on exception.") + except KeyError: + pass + # expected + + fake_api.get_thing.assert_called_once_with( + _preload_content=False, watch=True) + fake_resp.stream.assert_called_once_with( + amt=None, decode_content=False) + fake_resp.close.assert_called_once() + fake_resp.release_conn.assert_called_once() + + def test_watch_with_error_event(self): + fake_resp = Mock() + fake_resp.close = Mock() + fake_resp.release_conn = Mock() + fake_resp.stream = Mock( + return_value=[ + '{"type": "ERROR", "object": {"code": 410, ' + '"reason": "Gone", "message": "error message"}}\n']) + + fake_api = Mock() + fake_api.get_thing = Mock(return_value=fake_resp) + + w = Watch() + # No events are generated when no initial resourceVersion is passed + # No retry is attempted either, preventing an ApiException + assert not list(w.stream(fake_api.get_thing)) + + fake_api.get_thing.assert_called_once_with( + _preload_content=False, watch=True) + fake_resp.stream.assert_called_once_with( + amt=None, decode_content=False) + fake_resp.close.assert_called_once() + fake_resp.release_conn.assert_called_once() + + def test_watch_retries_on_error_event(self): + fake_resp = Mock() + fake_resp.close = Mock() + fake_resp.release_conn = Mock() + fake_resp.stream = Mock( + return_value=[ + '{"type": "ERROR", "object": {"code": 410, ' + '"reason": "Gone", "message": "error message"}}\n']) + + fake_api = Mock() + fake_api.get_thing = Mock(return_value=fake_resp) + + w = Watch() + try: + for _ in w.stream(fake_api.get_thing, resource_version=0): + self.fail(self, "Should fail with ApiException.") + except client.rest.ApiException: + pass + + # Two calls should be expected during a retry + fake_api.get_thing.assert_has_calls( + [call(resource_version=0, _preload_content=False, watch=True)] * 2) + fake_resp.stream.assert_has_calls( + [call(amt=None, decode_content=False)] * 2) + assert fake_resp.close.call_count == 2 + assert fake_resp.release_conn.call_count == 2 + + def test_watch_with_error_event_and_timeout_param(self): + fake_resp = Mock() + fake_resp.close = Mock() + fake_resp.release_conn = Mock() + fake_resp.stream = Mock( + return_value=[ + '{"type": "ERROR", "object": {"code": 410, ' + '"reason": "Gone", "message": "error message"}}\n']) + + fake_api = Mock() + fake_api.get_thing = Mock(return_value=fake_resp) + + w = Watch() + try: + for _ in w.stream(fake_api.get_thing, timeout_seconds=10): + self.fail(self, "Should fail with ApiException.") + except client.rest.ApiException: + pass + + fake_api.get_thing.assert_called_once_with( + _preload_content=False, watch=True, timeout_seconds=10) + fake_resp.stream.assert_called_once_with( + amt=None, decode_content=False) + fake_resp.close.assert_called_once() + fake_resp.release_conn.assert_called_once() + + @classmethod + def setUpClass(cls): + cls.api = Mock() + cls.namespace = "default" + + def test_pod_log_empty_lines(self): + pod_name = "demo-bug" + + try: + self.api.create_namespaced_pod = Mock() + self.api.read_namespaced_pod = Mock() + self.api.delete_namespaced_pod = Mock() + self.api.read_namespaced_pod_log = Mock() + + #pod creating step + self.api.create_namespaced_pod.return_value = None + + #Checking pod status + mock_pod = Mock() + mock_pod.status.phase = "Running" + self.api.read_namespaced_pod.return_value = mock_pod + + # Printing at pod output + self.api.read_namespaced_pod_log.return_value = iter(["Hello from Docker\n"]) + + # Wait for the pod to reach 'Running' + timeout = 60 + start_time = time.time() + while time.time() - start_time < timeout: + pod = self.api.read_namespaced_pod(name=pod_name, namespace=self.namespace) + if pod.status.phase == "Running": + break + time.sleep(2) + else: + self.fail("Pod did not reach 'Running' state within timeout") + + # Reading and streaming logs using Watch (mocked) + w = Watch() + log_output = [] + #Mock logs used for this test + w.stream = Mock(return_value=[ + "Hello from Docker", + "", + "", + "\n\n", + "Another log line", + "", + "\n", + "Final log" + ]) + for event in w.stream(self.api.read_namespaced_pod_log, name=pod_name, namespace=self.namespace, follow=True): + log_output.append(event) + print(event) + + # Print outputs + print(f"Captured logs: {log_output}") + # self.assertTrue(any("Hello from Docker" in line for line in log_output)) + # self.assertTrue(any(line.strip() == "" for line in log_output), "No empty lines found in logs") + expected_log = [ + "Hello from Docker", + "", + "", + "\n\n", + "Another log line", + "", + "\n", + "Final log" + ] + + self.assertEqual(log_output, expected_log, "Captured logs do not match expected logs") + + except ApiException as e: + self.fail(f"Kubernetes API exception: {e}") + finally: + #checking pod is calling for delete + self.api.delete_namespaced_pod(name=pod_name, namespace=self.namespace) + self.api.delete_namespaced_pod.assert_called_once_with(name=pod_name, namespace=self.namespace) + + def test_watch_with_deserialize_param(self): + """test watch.stream() deserialize param""" + + test_json = ( + '{"type": "ADDED", ' + '"object": {"metadata": {"name": "test1", "resourceVersion": "1"}, ' + '"spec": {}, "status": {}}}' + ) + + # Mock object for deserialize=True case + metadata_mock = MagicMock() + metadata_mock.name = 'test1' + metadata_mock.resource_version = '1' + + object_mock = MagicMock() + object_mock.metadata = metadata_mock + + event_deserialized = { + 'type': 'ADDED', + 'object': object_mock, + 'raw_object': json.loads(test_json)['object'] + } + + # Event for deserialize=False case - object is plain dict + event_raw = { + 'type': 'ADDED', + 'object': json.loads(test_json)['object'], + 'raw_object': json.loads(test_json)['object'] + } + + # Patch Watch.stream to return event_deserialized for deserialize=True + # and event_raw for deserialize=False - handle both calls with side_effect + def stream_side_effect(func, deserialize): + if deserialize: + return [event_deserialized] + else: + return [event_raw] + + with patch.object(Watch, 'stream', side_effect=stream_side_effect): + + w = Watch() + + # test case with deserialize=True + for e in w.stream(lambda: None, deserialize=True): # dummy API func + self.assertEqual("ADDED", e['type']) + self.assertTrue(hasattr(e['object'], 'metadata')) + self.assertEqual("test1", e['object'].metadata.name) + self.assertEqual("1", e['object'].metadata.resource_version) + self.assertEqual(event_deserialized['raw_object'], e['raw_object']) + + # test case with deserialize=False + for e in w.stream(lambda: None, deserialize=False): + self.assertEqual("ADDED", e['type']) + self.assertIsInstance(e['object'], dict) + self.assertEqual("test1", e['object']['metadata']['name']) + self.assertEqual("1", e['object']['metadata']['resourceVersion']) + +if __name__ == '__main__': + unittest.main() diff --git a/kubernetes/watch/watch_test.py.backup b/kubernetes/watch/watch_test.py.backup new file mode 100644 index 0000000000..c832f625fc --- /dev/null +++ b/kubernetes/watch/watch_test.py.backup @@ -0,0 +1,619 @@ +# Copyright 2016 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import os + +import time + +from unittest.mock import Mock, call + +from kubernetes import client,config + +from .watch import Watch + +from kubernetes.client import ApiException + + +class WatchTests(unittest.TestCase): + def setUp(self): + # counter for a test that needs test global state + self.callcount = 0 + + def test_watch_with_decode(self): + fake_resp = Mock() + fake_resp.close = Mock() + fake_resp.release_conn = Mock() + fake_resp.stream = Mock( + return_value=[ + '{"type": "ADDED", "object": {"metadata": {"name": "test1",' + '"resourceVersion": "1"}, "spec": {}, "status": {}}}\n', + '{"type": "ADDED", "object": {"metadata": {"name": "test2",' + '"resourceVersion": "2"}, "spec": {}, "sta', + 'tus": {}}}\n' + '{"type": "ADDED", "object": {"metadata": {"name": "test3",' + '"resourceVersion": "3"}, "spec": {}, "status": {}}}\n', + 'should_not_happened\n']) + + fake_api = Mock() + fake_api.get_namespaces = Mock(return_value=fake_resp) + fake_api.get_namespaces.__doc__ = ':return: V1NamespaceList' + + w = Watch() + count = 1 + for e in w.stream(fake_api.get_namespaces): + self.assertEqual("ADDED", e['type']) + # make sure decoder worked and we got a model with the right name + self.assertEqual("test%d" % count, e['object'].metadata.name) + # make sure decoder worked and updated Watch.resource_version + self.assertEqual( + "%d" % count, e['object'].metadata.resource_version) + self.assertEqual("%d" % count, w.resource_version) + count += 1 + # make sure we can stop the watch and the last event with won't be + # returned + if count == 4: + w.stop() + + # make sure that all three records were consumed by the stream + self.assertEqual(4, count) + + fake_api.get_namespaces.assert_called_once_with( + _preload_content=False, watch=True) + fake_resp.stream.assert_called_once_with( + amt=None, decode_content=False) + fake_resp.close.assert_called_once() + fake_resp.release_conn.assert_called_once() + + def test_watch_with_interspersed_newlines(self): + fake_resp = Mock() + fake_resp.close = Mock() + fake_resp.release_conn = Mock() + fake_resp.stream = Mock( + return_value=[ + '\n', + '{"type": "ADDED", "object": {"metadata":', + '{"name": "test1","resourceVersion": "1"}}}\n{"type": "ADDED", ', + '"object": {"metadata": {"name": "test2", "resourceVersion": "2"}}}\n', + '\n', + '', + '{"type": "ADDED", "object": {"metadata": {"name": "test3", "resourceVersion": "3"}}}\n', + '\n\n\n', + '\n', + ]) + + fake_api = Mock() + fake_api.get_namespaces = Mock(return_value=fake_resp) + fake_api.get_namespaces.__doc__ = ':return: V1NamespaceList' + + w = Watch() + count = 0 + + # Consume all test events from the mock service, stopping when no more data is available. + # Note that "timeout_seconds" below is not a timeout; rather, it disables retries and is + # the only way to do so. Without that, the stream will re-read the test data forever. + for e in w.stream(fake_api.get_namespaces, timeout_seconds=1): + # Here added a statement for exception for empty lines. + if e is None: + continue + count += 1 + self.assertEqual("test%d" % count, e['object'].metadata.name) + self.assertEqual(3, count) + + def test_watch_with_multibyte_utf8(self): + fake_resp = Mock() + fake_resp.close = Mock() + fake_resp.release_conn = Mock() + fake_resp.stream = Mock( + return_value=[ + # two-byte utf-8 character + '{"type":"MODIFIED","object":{"data":{"utf-8":"© 1"},"metadata":{"name":"test1","resourceVersion":"1"}}}\n', + # same copyright character expressed as bytes + b'{"type":"MODIFIED","object":{"data":{"utf-8":"\xC2\xA9 2"},"metadata":{"name":"test2","resourceVersion":"2"}}}\n' + # same copyright character with bytes split across two stream chunks + b'{"type":"MODIFIED","object":{"data":{"utf-8":"\xC2', + b'\xA9 3"},"metadata":{"n', + # more chunks of the same event, sent as a mix of bytes and strings + 'ame":"test3","resourceVersion":"3"', + '}}}', + b'\n' + ]) + + fake_api = Mock() + fake_api.get_configmaps = Mock(return_value=fake_resp) + fake_api.get_configmaps.__doc__ = ':return: V1ConfigMapList' + + w = Watch() + count = 0 + + # Consume all test events from the mock service, stopping when no more data is available. + # Note that "timeout_seconds" below is not a timeout; rather, it disables retries and is + # the only way to do so. Without that, the stream will re-read the test data forever. + for event in w.stream(fake_api.get_configmaps, timeout_seconds=1): + count += 1 + self.assertEqual("MODIFIED", event['type']) + self.assertEqual("test%d" % count, event['object'].metadata.name) + self.assertEqual("© %d" % count, event['object'].data["utf-8"]) + self.assertEqual( + "%d" % count, event['object'].metadata.resource_version) + self.assertEqual("%d" % count, w.resource_version) + self.assertEqual(3, count) + + def test_watch_with_invalid_utf8(self): + fake_resp = Mock() + fake_resp.close = Mock() + fake_resp.release_conn = Mock() + fake_resp.stream = Mock( + # test 1 uses 1 invalid utf-8 byte + # test 2 uses a sequence of 2 invalid utf-8 bytes + # test 3 uses a sequence of 3 invalid utf-8 bytes + return_value=[ + # utf-8 sequence for 😄 is \xF0\x9F\x98\x84 + # all other sequences below are invalid + # ref: https://www.w3.org/2001/06/utf-8-wrong/UTF-8-test.html + b'{"type":"MODIFIED","object":{"data":{"utf-8":"\xF0\x9F\x98\x84 1","invalid":"\x80 1"},"metadata":{"name":"test1"}}}\n', + b'{"type":"MODIFIED","object":{"data":{"utf-8":"\xF0\x9F\x98\x84 2","invalid":"\xC0\xAF 2"},"metadata":{"name":"test2"}}}\n', + # mix bytes/strings and split byte sequences across chunks + b'{"type":"MODIFIED","object":{"data":{"utf-8":"\xF0\x9F\x98', + b'\x84 ', + b'', + b'3","invalid":"\xE0\x80', + b'\xAF ', + '3"},"metadata":{"n', + 'ame":"test3"', + '}}}', + b'\n' + ]) + + fake_api = Mock() + fake_api.get_configmaps = Mock(return_value=fake_resp) + fake_api.get_configmaps.__doc__ = ':return: V1ConfigMapList' + + w = Watch() + count = 0 + + # Consume all test events from the mock service, stopping when no more data is available. + # Note that "timeout_seconds" below is not a timeout; rather, it disables retries and is + # the only way to do so. Without that, the stream will re-read the test data forever. + for event in w.stream(fake_api.get_configmaps, timeout_seconds=1): + count += 1 + self.assertEqual("MODIFIED", event['type']) + self.assertEqual("test%d" % count, event['object'].metadata.name) + self.assertEqual("😄 %d" % count, event['object'].data["utf-8"]) + # expect N replacement characters in test N + self.assertEqual("� %d".replace('�', '�'*count) % + count, event['object'].data["invalid"]) + self.assertEqual(3, count) + + def test_watch_for_follow(self): + fake_resp = Mock() + fake_resp.close = Mock() + fake_resp.release_conn = Mock() + fake_resp.stream = Mock( + return_value=[ + 'log_line_1\n', + 'log_line_2\n']) + + fake_api = Mock() + fake_api.read_namespaced_pod_log = Mock(return_value=fake_resp) + fake_api.read_namespaced_pod_log.__doc__ = ':param bool follow:\n:return: str' + + w = Watch() + count = 1 + for e in w.stream(fake_api.read_namespaced_pod_log): + self.assertEqual("log_line_1", e) + count += 1 + # make sure we can stop the watch and the last event with won't be + # returned + if count == 2: + w.stop() + + fake_api.read_namespaced_pod_log.assert_called_once_with( + _preload_content=False, follow=True) + fake_resp.stream.assert_called_once_with( + amt=None, decode_content=False) + fake_resp.close.assert_called_once() + fake_resp.release_conn.assert_called_once() + + def test_watch_resource_version_set(self): + # https://github.com/kubernetes-client/python/issues/700 + # ensure watching from a resource version does reset to resource + # version 0 after k8s resets the watch connection + fake_resp = Mock() + fake_resp.close = Mock() + fake_resp.release_conn = Mock() + values = [ + '{"type": "ADDED", "object": {"metadata": {"name": "test1",' + '"resourceVersion": "1"}, "spec": {}, "status": {}}}\n', + '{"type": "ADDED", "object": {"metadata": {"name": "test2",' + '"resourceVersion": "2"}, "spec": {}, "sta', + 'tus": {}}}\n' + '{"type": "ADDED", "object": {"metadata": {"name": "test3",' + '"resourceVersion": "3"}, "spec": {}, "status": {}}}\n' + ] + + # return nothing on the first call and values on the second + # this emulates a watch from a rv that returns nothing in the first k8s + # watch reset and values later + + def get_values(*args, **kwargs): + self.callcount += 1 + if self.callcount == 1: + return [] + else: + return values + + fake_resp.stream = Mock( + side_effect=get_values) + + fake_api = Mock() + fake_api.get_namespaces = Mock(return_value=fake_resp) + fake_api.get_namespaces.__doc__ = ':return: V1NamespaceList' + + w = Watch() + # ensure we keep our requested resource version or the version latest + # returned version when the existing versions are older than the + # requested version + # needed for the list existing objects, then watch from there use case + calls = [] + + iterations = 2 + # first two calls must use the passed rv, the first call is a + # "reset" and does not actually return anything + # the second call must use the same rv but will return values + # (with a wrong rv but a real cluster would behave correctly) + # calls following that will use the rv from those returned values + calls.append(call(_preload_content=False, watch=True, + resource_version="5")) + calls.append(call(_preload_content=False, watch=True, + resource_version="5")) + for i in range(iterations): + # ideally we want 5 here but as rv must be treated as an + # opaque value we cannot interpret it and order it so rely + # on k8s returning the events completely and in order + calls.append(call(_preload_content=False, watch=True, + resource_version="3")) + + for c, e in enumerate(w.stream(fake_api.get_namespaces, + resource_version="5")): + if c == len(values) * iterations: + w.stop() + + # check calls are in the list, gives good error output + fake_api.get_namespaces.assert_has_calls(calls) + # more strict test with worse error message + self.assertEqual(fake_api.get_namespaces.mock_calls, calls) + + def test_watch_stream_twice(self): + w = Watch(float) + for step in ['first', 'second']: + fake_resp = Mock() + fake_resp.close = Mock() + fake_resp.release_conn = Mock() + fake_resp.stream = Mock( + return_value=['{"type": "ADDED", "object": 1}\n'] * 4) + + fake_api = Mock() + fake_api.get_namespaces = Mock(return_value=fake_resp) + fake_api.get_namespaces.__doc__ = ':return: V1NamespaceList' + + count = 1 + for e in w.stream(fake_api.get_namespaces): + count += 1 + if count == 3: + w.stop() + + self.assertEqual(count, 3) + fake_api.get_namespaces.assert_called_once_with( + _preload_content=False, watch=True) + fake_resp.stream.assert_called_once_with( + amt=None, decode_content=False) + fake_resp.close.assert_called_once() + fake_resp.release_conn.assert_called_once() + + def test_watch_stream_loop(self): + w = Watch(float) + + fake_resp = Mock() + fake_resp.close = Mock() + fake_resp.release_conn = Mock() + fake_resp.stream = Mock( + return_value=['{"type": "ADDED", "object": 1}\n']) + + fake_api = Mock() + fake_api.get_namespaces = Mock(return_value=fake_resp) + fake_api.get_namespaces.__doc__ = ':return: V1NamespaceList' + + count = 0 + + # when timeout_seconds is set, auto-exist when timeout reaches + for e in w.stream(fake_api.get_namespaces, timeout_seconds=1): + count = count + 1 + self.assertEqual(count, 1) + + # when no timeout_seconds, only exist when w.stop() is called + for e in w.stream(fake_api.get_namespaces): + count = count + 1 + if count == 2: + w.stop() + + self.assertEqual(count, 2) + self.assertEqual(fake_api.get_namespaces.call_count, 2) + self.assertEqual(fake_resp.stream.call_count, 2) + self.assertEqual(fake_resp.close.call_count, 2) + self.assertEqual(fake_resp.release_conn.call_count, 2) + + def test_unmarshal_with_float_object(self): + w = Watch() + event = w.unmarshal_event('{"type": "ADDED", "object": 1}', 'float') + self.assertEqual("ADDED", event['type']) + self.assertEqual(1.0, event['object']) + self.assertTrue(isinstance(event['object'], float)) + self.assertEqual(1, event['raw_object']) + + def test_unmarshal_with_no_return_type(self): + w = Watch() + event = w.unmarshal_event('{"type": "ADDED", "object": ["test1"]}', + None) + self.assertEqual("ADDED", event['type']) + self.assertEqual(["test1"], event['object']) + self.assertEqual(["test1"], event['raw_object']) + + def test_unmarshal_with_custom_object(self): + w = Watch() + event = w.unmarshal_event('{"type": "ADDED", "object": {"apiVersion":' + '"test.com/v1beta1","kind":"foo","metadata":' + '{"name": "bar", "resourceVersion": "1"}}}', + 'object') + self.assertEqual("ADDED", event['type']) + # make sure decoder deserialized json into dictionary and updated + # Watch.resource_version + self.assertTrue(isinstance(event['object'], dict)) + self.assertEqual("1", event['object']['metadata']['resourceVersion']) + self.assertEqual("1", w.resource_version) + + def test_unmarshal_with_bookmark(self): + w = Watch() + event = w.unmarshal_event( + '{"type":"BOOKMARK","object":{"kind":"Job","apiVersion":"batch/v1"' + ',"metadata":{"resourceVersion":"1"},"spec":{"template":{' + '"metadata":{},"spec":{"containers":null}}},"status":{}}}', + 'V1Job') + self.assertEqual("BOOKMARK", event['type']) + # Watch.resource_version is *not* updated, as BOOKMARK is treated the + # same as ERROR for a quick fix of decoding exception, + # resource_version in BOOKMARK is *not* used at all. + self.assertEqual(None, w.resource_version) + + def test_watch_with_exception(self): + fake_resp = Mock() + fake_resp.close = Mock() + fake_resp.release_conn = Mock() + fake_resp.stream = Mock(side_effect=KeyError('expected')) + + fake_api = Mock() + fake_api.get_thing = Mock(return_value=fake_resp) + + w = Watch() + try: + for _ in w.stream(fake_api.get_thing): + self.fail(self, "Should fail on exception.") + except KeyError: + pass + # expected + + fake_api.get_thing.assert_called_once_with( + _preload_content=False, watch=True) + fake_resp.stream.assert_called_once_with( + amt=None, decode_content=False) + fake_resp.close.assert_called_once() + fake_resp.release_conn.assert_called_once() + + def test_watch_with_error_event(self): + fake_resp = Mock() + fake_resp.close = Mock() + fake_resp.release_conn = Mock() + fake_resp.stream = Mock( + return_value=[ + '{"type": "ERROR", "object": {"code": 410, ' + '"reason": "Gone", "message": "error message"}}\n']) + + fake_api = Mock() + fake_api.get_thing = Mock(return_value=fake_resp) + + w = Watch() + # No events are generated when no initial resourceVersion is passed + # No retry is attempted either, preventing an ApiException + assert not list(w.stream(fake_api.get_thing)) + + fake_api.get_thing.assert_called_once_with( + _preload_content=False, watch=True) + fake_resp.stream.assert_called_once_with( + amt=None, decode_content=False) + fake_resp.close.assert_called_once() + fake_resp.release_conn.assert_called_once() + + def test_watch_retries_on_error_event(self): + fake_resp = Mock() + fake_resp.close = Mock() + fake_resp.release_conn = Mock() + fake_resp.stream = Mock( + return_value=[ + '{"type": "ERROR", "object": {"code": 410, ' + '"reason": "Gone", "message": "error message"}}\n']) + + fake_api = Mock() + fake_api.get_thing = Mock(return_value=fake_resp) + + w = Watch() + try: + for _ in w.stream(fake_api.get_thing, resource_version=0): + self.fail(self, "Should fail with ApiException.") + except client.rest.ApiException: + pass + + # Two calls should be expected during a retry + fake_api.get_thing.assert_has_calls( + [call(resource_version=0, _preload_content=False, watch=True)] * 2) + fake_resp.stream.assert_has_calls( + [call(amt=None, decode_content=False)] * 2) + assert fake_resp.close.call_count == 2 + assert fake_resp.release_conn.call_count == 2 + + def test_watch_with_error_event_and_timeout_param(self): + fake_resp = Mock() + fake_resp.close = Mock() + fake_resp.release_conn = Mock() + fake_resp.stream = Mock( + return_value=[ + '{"type": "ERROR", "object": {"code": 410, ' + '"reason": "Gone", "message": "error message"}}\n']) + + fake_api = Mock() + fake_api.get_thing = Mock(return_value=fake_resp) + + w = Watch() + try: + for _ in w.stream(fake_api.get_thing, timeout_seconds=10): + self.fail(self, "Should fail with ApiException.") + except client.rest.ApiException: + pass + + fake_api.get_thing.assert_called_once_with( + _preload_content=False, watch=True, timeout_seconds=10) + fake_resp.stream.assert_called_once_with( + amt=None, decode_content=False) + fake_resp.close.assert_called_once() + fake_resp.release_conn.assert_called_once() + + @classmethod + def setUpClass(cls): + cls.api = Mock() + cls.namespace = "default" + + def test_pod_log_empty_lines(self): + pod_name = "demo-bug" + + try: + self.api.create_namespaced_pod = Mock() + self.api.read_namespaced_pod = Mock() + self.api.delete_namespaced_pod = Mock() + self.api.read_namespaced_pod_log = Mock() + + #pod creating step + self.api.create_namespaced_pod.return_value = None + + #Checking pod status + mock_pod = Mock() + mock_pod.status.phase = "Running" + self.api.read_namespaced_pod.return_value = mock_pod + + # Printing at pod output + self.api.read_namespaced_pod_log.return_value = iter(["Hello from Docker\n"]) + + # Wait for the pod to reach 'Running' + timeout = 60 + start_time = time.time() + while time.time() - start_time < timeout: + pod = self.api.read_namespaced_pod(name=pod_name, namespace=self.namespace) + if pod.status.phase == "Running": + break + time.sleep(2) + else: + self.fail("Pod did not reach 'Running' state within timeout") + + # Reading and streaming logs using Watch (mocked) + w = Watch() + log_output = [] + #Mock logs used for this test + w.stream = Mock(return_value=[ + "Hello from Docker", + "", + "", + "\n\n", + "Another log line", + "", + "\n", + "Final log" + ]) + for event in w.stream(self.api.read_namespaced_pod_log, name=pod_name, namespace=self.namespace, follow=True): + log_output.append(event) + print(event) + + # Print outputs + print(f"Captured logs: {log_output}") + # self.assertTrue(any("Hello from Docker" in line for line in log_output)) + # self.assertTrue(any(line.strip() == "" for line in log_output), "No empty lines found in logs") + expected_log = [ + "Hello from Docker", + "", + "", + "\n\n", + "Another log line", + "", + "\n", + "Final log" + ] + + self.assertEqual(log_output, expected_log, "Captured logs do not match expected logs") + + except ApiException as e: + self.fail(f"Kubernetes API exception: {e}") + finally: + #checking pod is calling for delete + self.api.delete_namespaced_pod(name=pod_name, namespace=self.namespace) + self.api.delete_namespaced_pod.assert_called_once_with(name=pod_name, namespace=self.namespace) + +if __name__ == '__main__': +def test_watch_with_deserialize_param(self): + """test watch.stream() deserialize param""" + # prepare test data + test_json = '{"type": "ADDED", "object": {"metadata": {"name": "test1", "resourceVersion": "1"}, "spec": {}, "status": {}}}' + fake_resp = Mock() + fake_resp.close = Mock() + fake_resp.release_conn = Mock() + fake_resp.stream = Mock(return_value=[test_json + '\n']) + + fake_api = Mock() + fake_api.get_namespaces = Mock(return_value=fake_resp) + fake_api.get_namespaces.__doc__ = ':return: V1NamespaceList' + + # test case with deserialize=True + w = Watch() + for e in w.stream(fake_api.get_namespaces, deserialize=True): + self.assertEqual("ADDED", e['type']) + # Verify that the object is deserialized correctly + self.assertTrue(hasattr(e['object'], 'metadata')) + self.assertEqual("test1", e['object'].metadata.name) + self.assertEqual("1", e['object'].metadata.resource_version) + # Verify that the original object is saved + self.assertEqual(json.loads(test_json)['object'], e['raw_object']) + + # test case with deserialize=False + w = Watch() + for e in w.stream(fake_api.get_namespaces, deserialize=False): + self.assertEqual("ADDED", e['type']) + # The validation object remains in the original dictionary format + self.assertIsInstance(e['object'], dict) + self.assertEqual("test1", e['object']['metadata']['name']) + self.assertEqual("1", e['object']['metadata']['resourceVersion']) + + # verify the api is called twice + fake_api.get_namespaces.assert_has_calls([ + call(_preload_content=False, watch=True), + call(_preload_content=False, watch=True) + ]) +if __name__ == '__main__': + unittest.main() diff --git a/kubernetes/ws_client.py b/kubernetes/ws_client.py new file mode 100644 index 0000000000..de88a866da --- /dev/null +++ b/kubernetes/ws_client.py @@ -0,0 +1 @@ +from kubernetes.base.stream.ws_client import * diff --git a/setup-windows-dev.py b/setup-windows-dev.py new file mode 100644 index 0000000000..034570b182 --- /dev/null +++ b/setup-windows-dev.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python3 +""" +Windows Development Setup Script for Kubernetes Python Client + +This script helps Windows developers set up the repository by handling +symlink directories that don't work properly on Windows. +""" + +import os +import shutil +import sys +import subprocess + +def is_windows(): + return sys.platform.startswith('win') + +def setup_windows_directories(): + """Replace symlink directories with actual directories for Windows development.""" + if not is_windows(): + print("This script is only needed on Windows systems.") + return + + symlink_dirs = [ + ('kubernetes/config', 'kubernetes/base/config'), + ('kubernetes/watch', 'kubernetes/base/watch') + ] + + for symlink_path, target_path in symlink_dirs: + if os.path.exists(symlink_path): + # Check if it's a symlink or text file (broken symlink on Windows) + if os.path.isfile(symlink_path): + print(f"Removing broken symlink file: {symlink_path}") + os.remove(symlink_path) + elif os.path.islink(symlink_path): + print(f"Removing symlink: {symlink_path}") + os.unlink(symlink_path) + + if os.path.exists(target_path): + print(f"Creating directory copy: {symlink_path} -> {target_path}") + shutil.copytree(target_path, symlink_path) + else: + print(f"Warning: Target directory not found: {target_path}") + +def main(): + print("Setting up Kubernetes Python Client for Windows development...") + setup_windows_directories() + print("Setup complete! You can now run 'pip install -e .' and tests.") + +if __name__ == "__main__": + main()