From f597cf34c67b27bbd022b502a83b6fe2afb17161 Mon Sep 17 00:00:00 2001 From: "Bala.FA" Date: Wed, 15 Oct 2025 06:16:57 +0530 Subject: [PATCH] Make BaseException internals to be set Fixes #1519 Signed-off-by: Bala.FA --- minio/error.py | 107 +++++++-- tests/functional/tests.py | 474 +++++++++++++++++++------------------- 2 files changed, 327 insertions(+), 254 deletions(-) diff --git a/minio/error.py b/minio/error.py index b73bdc40..fd169beb 100644 --- a/minio/error.py +++ b/minio/error.py @@ -30,7 +30,6 @@ from __future__ import absolute_import, annotations -from dataclasses import dataclass from typing import Optional, Type, TypeVar from xml.etree import ElementTree as ET @@ -80,7 +79,6 @@ def status_code(self) -> int: A = TypeVar("A", bound="S3Error") -@dataclass(frozen=True) class S3Error(MinioException): """ Raised to indicate that error response is received @@ -92,24 +90,65 @@ class S3Error(MinioException): resource: Optional[str] request_id: Optional[str] host_id: Optional[str] - bucket_name: Optional[str] = None - object_name: Optional[str] = None + bucket_name: Optional[str] + object_name: Optional[str] + + _EXC_MUTABLES = {"__traceback__", "__context__", "__cause__"} + + def __init__( # pylint: disable=too-many-positional-arguments + self, + response: BaseHTTPResponse, + code: Optional[str], + message: Optional[str], + resource: Optional[str], + request_id: Optional[str], + host_id: Optional[str], + bucket_name: Optional[str] = None, + object_name: Optional[str] = None, + ): + object.__setattr__(self, "response", response) + object.__setattr__(self, "code", code) + object.__setattr__(self, "message", message) + object.__setattr__(self, "resource", resource) + object.__setattr__(self, "request_id", request_id) + object.__setattr__(self, "host_id", host_id) + object.__setattr__(self, "bucket_name", bucket_name) + object.__setattr__(self, "object_name", object_name) + + bucket_message = f", bucket_name: {bucket_name}" if bucket_name else "" + object_message = f", object_name: {object_name}" if object_name else "" - def __post_init__(self): - bucket_message = ( - (", bucket_name: " + self.bucket_name) - if self.bucket_name else "" - ) - object_message = ( - (", object_name: " + self.object_name) - if self.object_name else "" - ) super().__init__( - f"S3 operation failed; code: {self.code}, message: {self.message}, " - f"resource: {self.resource}, request_id: {self.request_id}, " - f"host_id: {self.host_id}{bucket_message}{object_message}" + f"S3 operation failed; code: {code}, message: {message}, " + f"resource: {resource}, request_id: {request_id}, " + f"host_id: {host_id}{bucket_message}{object_message}" ) + # freeze after init + object.__setattr__(self, "_is_frozen", True) + + def __setattr__(self, name, value): + if name in self._EXC_MUTABLES: + object.__setattr__(self, name, value) + return + if getattr(self, "_is_frozen", False): + raise AttributeError( + f"{self.__class__.__name__} is frozen and " + "does not allow attribute assignment" + ) + object.__setattr__(self, name, value) + + def __delattr__(self, name): + if name in self._EXC_MUTABLES: + object.__delattr__(self, name) + return + if getattr(self, "_is_frozen", False): + raise AttributeError( + f"{self.__class__.__name__} is frozen and " + "does not allow attribute deletion" + ) + object.__delattr__(self, name) + @classmethod def fromxml(cls: Type[A], response: BaseHTTPResponse) -> A: """Create new object with values from XML element.""" @@ -126,7 +165,7 @@ def fromxml(cls: Type[A], response: BaseHTTPResponse) -> A: ) def copy(self, code: str, message: str) -> S3Error: - """Make a copy with replace code and message.""" + """Make a copy with replaced code and message.""" return S3Error( response=self.response, code=code, @@ -138,6 +177,40 @@ def copy(self, code: str, message: str) -> S3Error: object_name=self.object_name, ) + def __repr__(self): + return ( + f"S3Error(code={self.code!r}, message={self.message!r}, " + f"resource={self.resource!r}, request_id={self.request_id!r}, " + f"host_id={self.host_id!r}, bucket_name={self.bucket_name!r}, " + f"object_name={self.object_name!r})" + ) + + def __eq__(self, other): + if not isinstance(other, S3Error): + return NotImplemented + return ( + self.code == other.code + and self.message == other.message + and self.resource == other.resource + and self.request_id == other.request_id + and self.host_id == other.host_id + and self.bucket_name == other.bucket_name + and self.object_name == other.object_name + ) + + def __hash__(self): + return hash( + ( + self.code, + self.message, + self.resource, + self.request_id, + self.host_id, + self.bucket_name, + self.object_name, + ) + ) + class MinioAdminException(Exception): """Raised to indicate admin API execution error.""" diff --git a/tests/functional/tests.py b/tests/functional/tests.py index 58935976..8d7f667c 100644 --- a/tests/functional/tests.py +++ b/tests/functional/tests.py @@ -53,10 +53,10 @@ from minio.time import to_http_header from minio.versioningconfig import SUSPENDED, VersioningConfig -_CLIENT = None # initialized in main(). -_TEST_FILE = None # initialized in main(). -_LARGE_FILE = None # initialized in main(). -_IS_AWS = None # initialized in main(). +_client = None # pylint: disable=invalid-name +_test_file = None # pylint: disable=invalid-name +_large_file = None # pylint: disable=invalid-name +_is_aws = None # pylint: disable=invalid-name KB = 1024 MB = 1024 * KB HTTP = urllib3.PoolManager( @@ -174,20 +174,20 @@ def test_make_bucket_default_region(log_entry): } # Create a bucket with default bucket location - _call(log_entry, _CLIENT.make_bucket, bucket_name=bucket_name) + _call(log_entry, _client.make_bucket, bucket_name=bucket_name) # Check if bucket was created properly - _call(log_entry, _CLIENT.bucket_exists, bucket_name=bucket_name) + _call(log_entry, _client.bucket_exists, bucket_name=bucket_name) # Remove bucket - _call(log_entry, _CLIENT.remove_bucket, bucket_name=bucket_name) + _call(log_entry, _client.remove_bucket, bucket_name=bucket_name) # Test passes - log_entry["method"] = _CLIENT.make_bucket + log_entry["method"] = _client.make_bucket def test_make_bucket_with_region(log_entry): """Test make_bucket() with region.""" # Only test make bucket with region against AWS S3 - if not _IS_AWS: + if not _is_aws: return # Get a unique bucket_name @@ -203,16 +203,16 @@ def test_make_bucket_with_region(log_entry): # Create a bucket with default bucket location _call( log_entry, - _CLIENT.make_bucket, + _client.make_bucket, bucket_name=bucket_name, location=location, ) # Check if bucket was created properly - _call(log_entry, _CLIENT.bucket_exists, bucket_name=bucket_name) + _call(log_entry, _client.bucket_exists, bucket_name=bucket_name) # Remove bucket - _call(log_entry, _CLIENT.remove_bucket, bucket_name=bucket_name) + _call(log_entry, _client.remove_bucket, bucket_name=bucket_name) # Test passes - log_entry["method"] = _CLIENT.make_bucket + log_entry["method"] = _client.make_bucket def test_negative_make_bucket_invalid_name( # pylint: disable=invalid-name @@ -235,15 +235,15 @@ def test_negative_make_bucket_invalid_name( # pylint: disable=invalid-name log_entry["args"]["bucket_name"] = name try: # Create a bucket with default bucket location - _call(log_entry, _CLIENT.make_bucket, bucket_name=name) + _call(log_entry, _client.make_bucket, bucket_name=name) # Check if bucket was created properly - _call(log_entry, _CLIENT.bucket_exists, bucket_name=name) + _call(log_entry, _client.bucket_exists, bucket_name=name) # Remove bucket - _call(log_entry, _CLIENT.remove_bucket, bucket_name=name) + _call(log_entry, _client.remove_bucket, bucket_name=name) except ValueError: pass # Test passes - log_entry["method"] = _CLIENT.make_bucket + log_entry["method"] = _client.make_bucket log_entry["args"]['bucket_name'] = invalid_bucket_name_list @@ -254,10 +254,10 @@ def test_list_buckets(log_entry): bucket_name = _gen_bucket_name() # Create a bucket with default bucket location - _call(log_entry, _CLIENT.make_bucket, bucket_name=bucket_name) + _call(log_entry, _client.make_bucket, bucket_name=bucket_name) try: - buckets = _CLIENT.list_buckets() + buckets = _client.list_buckets() for bucket in buckets: # bucket object should be of a valid value. if bucket.name and bucket.creation_date: @@ -265,7 +265,7 @@ def test_list_buckets(log_entry): raise ValueError('list_bucket api failure') finally: # Remove bucket - _call(log_entry, _CLIENT.remove_bucket, bucket_name=bucket_name) + _call(log_entry, _client.remove_bucket, bucket_name=bucket_name) def test_select_object_content(log_entry): @@ -281,9 +281,9 @@ def test_select_object_content(log_entry): } try: - _CLIENT.make_bucket(bucket_name=bucket_name) + _client.make_bucket(bucket_name=bucket_name) content = io.BytesIO(b"col1,col2,col3\none,two,three\nX,Y,Z\n") - _CLIENT.put_object( + _client.put_object( bucket_name=bucket_name, object_name=csvfile, data=content, @@ -296,7 +296,7 @@ def test_select_object_content(log_entry): CSVOutputSerialization(), request_progress=True, ) - data = _CLIENT.select_object_content( + data = _client.select_object_content( bucket_name=bucket_name, object_name=csvfile, request=request, @@ -314,16 +314,16 @@ def test_select_object_content(log_entry): '"col1,col2,col3\none,two,three\nX,Y,Z\n"', f"Received {records.getvalue().decode()}") finally: - _CLIENT.remove_object(bucket_name=bucket_name, object_name=csvfile) - _CLIENT.remove_bucket(bucket_name=bucket_name) + _client.remove_object(bucket_name=bucket_name, object_name=csvfile) + _client.remove_bucket(bucket_name=bucket_name) def _test_fput_object(bucket_name, object_name, filename, metadata, sse): """Test fput_object().""" try: - _CLIENT.make_bucket(bucket_name=bucket_name) - if _IS_AWS: - _CLIENT.fput_object( + _client.make_bucket(bucket_name=bucket_name) + if _is_aws: + _client.fput_object( bucket_name=bucket_name, object_name=object_name, file_path=filename, @@ -331,21 +331,21 @@ def _test_fput_object(bucket_name, object_name, filename, metadata, sse): sse=sse, ) else: - _CLIENT.fput_object( + _client.fput_object( bucket_name=bucket_name, object_name=object_name, file_path=filename, sse=sse, ) - _CLIENT.stat_object( + _client.stat_object( bucket_name=bucket_name, object_name=object_name, ssec=sse, ) finally: - _CLIENT.remove_object(bucket_name=bucket_name, object_name=object_name) - _CLIENT.remove_bucket(bucket_name=bucket_name) + _client.remove_object(bucket_name=bucket_name, object_name=object_name) + _client.remove_bucket(bucket_name=bucket_name) def test_fput_object_small_file(log_entry, sse=None): @@ -362,11 +362,11 @@ def test_fput_object_small_file(log_entry, sse=None): log_entry["args"] = { "bucket_name": bucket_name, "object_name": object_name, - "file_path": _TEST_FILE, + "file_path": _test_file, "metadata": _serialize(metadata), } - _test_fput_object(bucket_name, object_name, _TEST_FILE, metadata, sse) + _test_fput_object(bucket_name, object_name, _test_file, metadata, sse) def test_fput_object_large_file(log_entry, sse=None): @@ -383,12 +383,12 @@ def test_fput_object_large_file(log_entry, sse=None): log_entry["args"] = { "bucket_name": bucket_name, "object_name": object_name, - "file_path": _LARGE_FILE, + "file_path": _large_file, "metadata": _serialize(metadata), } # upload local large file through multipart. - _test_fput_object(bucket_name, object_name, _LARGE_FILE, metadata, sse) + _test_fput_object(bucket_name, object_name, _large_file, metadata, sse) def test_fput_object_with_content_type( # pylint: disable=invalid-name @@ -404,12 +404,12 @@ def test_fput_object_with_content_type( # pylint: disable=invalid-name log_entry["args"] = { "bucket_name": bucket_name, "object_name": object_name, - "file_path": _TEST_FILE, + "file_path": _test_file, "metadata": _serialize(metadata), "content_type": content_type, } - _test_fput_object(bucket_name, object_name, _TEST_FILE, metadata, None) + _test_fput_object(bucket_name, object_name, _test_file, metadata, None) def _validate_stat(st_obj, expected_size, expected_meta, version_id=None): @@ -475,18 +475,18 @@ def test_copy_object_no_copy_condition( # pylint: disable=invalid-name } try: - _CLIENT.make_bucket(bucket_name=bucket_name) + _client.make_bucket(bucket_name=bucket_name) # Upload a streaming object of 1 KiB size = 1 * KB reader = LimitedRandomReader(size) - _CLIENT.put_object( + _client.put_object( bucket_name=bucket_name, object_name=object_source, data=reader, length=size, sse=ssec, ) - _CLIENT.copy_object( + _client.copy_object( bucket_name=bucket_name, object_name=object_copy, sse=ssec, @@ -496,19 +496,19 @@ def test_copy_object_no_copy_condition( # pylint: disable=invalid-name ssec=ssec_copy, ), ) - st_obj = _CLIENT.stat_object( + st_obj = _client.stat_object( bucket_name=bucket_name, object_name=object_copy, ssec=ssec, ) _validate_stat(st_obj, size, {}) finally: - _CLIENT.remove_object( + _client.remove_object( bucket_name=bucket_name, object_name=object_source, ) - _CLIENT.remove_object(bucket_name=bucket_name, object_name=object_copy) - _CLIENT.remove_bucket(bucket_name=bucket_name) + _client.remove_object(bucket_name=bucket_name, object_name=object_copy) + _client.remove_bucket(bucket_name=bucket_name) def test_copy_object_with_metadata(log_entry): @@ -533,18 +533,18 @@ def test_copy_object_with_metadata(log_entry): } try: - _CLIENT.make_bucket(bucket_name=bucket_name) + _client.make_bucket(bucket_name=bucket_name) # Upload a streaming object of 1 KiB size = 1 * KB reader = LimitedRandomReader(size) - _CLIENT.put_object( + _client.put_object( bucket_name=bucket_name, object_name=object_source, data=reader, length=size, ) # Perform a server side copy of an object - _CLIENT.copy_object( + _client.copy_object( bucket_name=bucket_name, object_name=object_copy, source=CopySource( @@ -555,7 +555,7 @@ def test_copy_object_with_metadata(log_entry): metadata_directive=REPLACE, ) # Verification - st_obj = _CLIENT.stat_object( + st_obj = _client.stat_object( bucket_name=bucket_name, object_name=object_copy, ) @@ -564,12 +564,12 @@ def test_copy_object_with_metadata(log_entry): 'x-amz-meta-10': 'value'} _validate_stat(st_obj, size, expected_metadata) finally: - _CLIENT.remove_object( + _client.remove_object( bucket_name=bucket_name, object_name=object_source, ) - _CLIENT.remove_object(bucket_name=bucket_name, object_name=object_copy) - _CLIENT.remove_bucket(bucket_name=bucket_name) + _client.remove_object(bucket_name=bucket_name, object_name=object_copy) + _client.remove_bucket(bucket_name=bucket_name) def test_copy_object_etag_match(log_entry): @@ -588,18 +588,18 @@ def test_copy_object_etag_match(log_entry): } try: - _CLIENT.make_bucket(bucket_name=bucket_name) + _client.make_bucket(bucket_name=bucket_name) # Upload a streaming object of 1 KiB size = 1 * KB reader = LimitedRandomReader(size) - _CLIENT.put_object( + _client.put_object( bucket_name=bucket_name, object_name=object_source, data=reader, length=size, ) # Perform a server side copy of an object - _CLIENT.copy_object( + _client.copy_object( bucket_name=bucket_name, object_name=object_copy, source=CopySource( @@ -608,12 +608,12 @@ def test_copy_object_etag_match(log_entry): ), ) # Verification - source_etag = _CLIENT.stat_object( + source_etag = _client.stat_object( bucket_name=bucket_name, object_name=object_source, ).etag log_entry["args"]["conditions"] = {'set_match_etag': source_etag} - _CLIENT.copy_object( + _client.copy_object( bucket_name=bucket_name, object_name=object_copy, source=CopySource( @@ -623,12 +623,12 @@ def test_copy_object_etag_match(log_entry): ), ) finally: - _CLIENT.remove_object( + _client.remove_object( bucket_name=bucket_name, object_name=object_source, ) - _CLIENT.remove_object(bucket_name=bucket_name, object_name=object_copy) - _CLIENT.remove_bucket(bucket_name=bucket_name) + _client.remove_object(bucket_name=bucket_name, object_name=object_copy) + _client.remove_bucket(bucket_name=bucket_name) def test_copy_object_negative_etag_match( # pylint: disable=invalid-name @@ -648,11 +648,11 @@ def test_copy_object_negative_etag_match( # pylint: disable=invalid-name } try: - _CLIENT.make_bucket(bucket_name=bucket_name) + _client.make_bucket(bucket_name=bucket_name) # Upload a streaming object of 1 KiB size = 1 * KB reader = LimitedRandomReader(size) - _CLIENT.put_object( + _client.put_object( bucket_name=bucket_name, object_name=object_source, data=reader, @@ -663,7 +663,7 @@ def test_copy_object_negative_etag_match( # pylint: disable=invalid-name # with incorrect pre-conditions and fail etag = 'test-etag' log_entry["args"]["conditions"] = {'set_match_etag': etag} - _CLIENT.copy_object( + _client.copy_object( bucket_name=bucket_name, object_name=object_copy, source=CopySource( @@ -676,12 +676,12 @@ def test_copy_object_negative_etag_match( # pylint: disable=invalid-name if exc.code != "PreconditionFailed": raise finally: - _CLIENT.remove_object( + _client.remove_object( bucket_name=bucket_name, object_name=object_source, ) - _CLIENT.remove_object(bucket_name=bucket_name, object_name=object_copy) - _CLIENT.remove_bucket(bucket_name=bucket_name) + _client.remove_object(bucket_name=bucket_name, object_name=object_copy) + _client.remove_bucket(bucket_name=bucket_name) def test_copy_object_modified_since(log_entry): @@ -700,11 +700,11 @@ def test_copy_object_modified_since(log_entry): } try: - _CLIENT.make_bucket(bucket_name=bucket_name) + _client.make_bucket(bucket_name=bucket_name) # Upload a streaming object of 1 KiB size = 1 * KB reader = LimitedRandomReader(size) - _CLIENT.put_object( + _client.put_object( bucket_name=bucket_name, object_name=object_source, data=reader, @@ -716,7 +716,7 @@ def test_copy_object_modified_since(log_entry): 'set_modified_since': to_http_header(mod_since)} # Perform a server side copy of an object # and expect the copy to complete successfully - _CLIENT.copy_object( + _client.copy_object( bucket_name=bucket_name, object_name=object_copy, source=CopySource( @@ -726,12 +726,12 @@ def test_copy_object_modified_since(log_entry): ), ) finally: - _CLIENT.remove_object( + _client.remove_object( bucket_name=bucket_name, object_name=object_source, ) - _CLIENT.remove_object(bucket_name=bucket_name, object_name=object_copy) - _CLIENT.remove_bucket(bucket_name=bucket_name) + _client.remove_object(bucket_name=bucket_name, object_name=object_copy) + _client.remove_bucket(bucket_name=bucket_name) def test_copy_object_unmodified_since( # pylint: disable=invalid-name @@ -751,11 +751,11 @@ def test_copy_object_unmodified_since( # pylint: disable=invalid-name } try: - _CLIENT.make_bucket(bucket_name=bucket_name) + _client.make_bucket(bucket_name=bucket_name) # Upload a streaming object of 1 KiB size = 1 * KB reader = LimitedRandomReader(size) - _CLIENT.put_object( + _client.put_object( bucket_name=bucket_name, object_name=object_source, data=reader, @@ -769,7 +769,7 @@ def test_copy_object_unmodified_since( # pylint: disable=invalid-name # Perform a server side copy of an object and expect # the copy to fail since the creation/modification # time is now, way later than unmodification time, April 1st, 2014 - _CLIENT.copy_object( + _client.copy_object( bucket_name=bucket_name, object_name=object_copy, source=CopySource( @@ -782,12 +782,12 @@ def test_copy_object_unmodified_since( # pylint: disable=invalid-name if exc.code != "PreconditionFailed": raise finally: - _CLIENT.remove_object( + _client.remove_object( bucket_name=bucket_name, object_name=object_source, ) - _CLIENT.remove_object(bucket_name=bucket_name, object_name=object_copy) - _CLIENT.remove_bucket(bucket_name=bucket_name) + _client.remove_object(bucket_name=bucket_name, object_name=object_copy) + _client.remove_bucket(bucket_name=bucket_name) def test_put_object(log_entry, sse=None): @@ -809,17 +809,17 @@ def test_put_object(log_entry, sse=None): } try: - _CLIENT.make_bucket(bucket_name=bucket_name) + _client.make_bucket(bucket_name=bucket_name) # Put/Upload a streaming object of 1 MiB reader = LimitedRandomReader(length) - _CLIENT.put_object( + _client.put_object( bucket_name=bucket_name, object_name=object_name, data=reader, length=length, sse=sse, ) - _CLIENT.stat_object( + _client.stat_object( bucket_name=bucket_name, object_name=object_name, ssec=sse, @@ -839,7 +839,7 @@ def test_put_object(log_entry, sse=None): log_entry["args"]["content_type"] = content_type = ( "application/octet-stream") log_entry["args"]["object_name"] = object_name + "-metadata" - _CLIENT.put_object( + _client.put_object( bucket_name=bucket_name, object_name=object_name + "-metadata", data=reader, @@ -851,7 +851,7 @@ def test_put_object(log_entry, sse=None): # Stat on the uploaded object to check if it exists # Fetch saved stat metadata on a previously uploaded object with # metadata. - st_obj = _CLIENT.stat_object( + st_obj = _client.stat_object( bucket_name=bucket_name, object_name=object_name + "-metadata", ssec=sse, @@ -868,12 +868,12 @@ def test_put_object(log_entry, sse=None): if 'x-amz-meta-test-key' not in normalized_meta: raise ValueError("Metadata key 'x-amz-meta-test-key' not found") finally: - _CLIENT.remove_object(bucket_name=bucket_name, object_name=object_name) - _CLIENT.remove_object( + _client.remove_object(bucket_name=bucket_name, object_name=object_name) + _client.remove_object( bucket_name=bucket_name, object_name=object_name+'-metadata', ) - _CLIENT.remove_bucket(bucket_name=bucket_name) + _client.remove_bucket(bucket_name=bucket_name) def test_negative_put_object_with_path_segment( # pylint: disable=invalid-name @@ -893,19 +893,19 @@ def test_negative_put_object_with_path_segment( # pylint: disable=invalid-name } try: - _CLIENT.make_bucket(bucket_name=bucket_name) - _CLIENT.put_object( + _client.make_bucket(bucket_name=bucket_name) + _client.put_object( bucket_name=bucket_name, object_name=object_name, data=io.BytesIO(b''), length=0, ) - _CLIENT.remove_object(bucket_name=bucket_name, object_name=object_name) + _client.remove_object(bucket_name=bucket_name, object_name=object_name) except S3Error as err: if err.code != 'XMinioInvalidObjectName': raise finally: - _CLIENT.remove_bucket(bucket_name=bucket_name) + _client.remove_bucket(bucket_name=bucket_name) def _test_stat_object(log_entry, sse=None, version_check=False): @@ -929,16 +929,16 @@ def _test_stat_object(log_entry, sse=None, version_check=False): version_id1 = None version_id2 = None - _CLIENT.make_bucket(bucket_name=bucket_name) + _client.make_bucket(bucket_name=bucket_name) try: if version_check: - _CLIENT.set_bucket_versioning( + _client.set_bucket_versioning( bucket_name=bucket_name, config=VersioningConfig(ENABLED), ) # Put/Upload a streaming object of 1 MiB reader = LimitedRandomReader(length) - result = _CLIENT.put_object( + result = _client.put_object( bucket_name=bucket_name, object_name=object_name, data=reader, @@ -946,7 +946,7 @@ def _test_stat_object(log_entry, sse=None, version_check=False): sse=sse, ) version_id1 = result.version_id - _CLIENT.stat_object( + _client.stat_object( bucket_name=bucket_name, object_name=object_name, ssec=sse, @@ -962,7 +962,7 @@ def _test_stat_object(log_entry, sse=None, version_check=False): log_entry["args"]["content_type"] = content_type = ( "application/octet-stream") log_entry["args"]["object_name"] = object_name + "-metadata" - result = _CLIENT.put_object( + result = _client.put_object( bucket_name=bucket_name, object_name=object_name + "-metadata", data=reader, @@ -975,7 +975,7 @@ def _test_stat_object(log_entry, sse=None, version_check=False): # Stat on the uploaded object to check if it exists # Fetch saved stat metadata on a previously uploaded object with # metadata. - st_obj = _CLIENT.stat_object( + st_obj = _client.stat_object( bucket_name=bucket_name, object_name=object_name + "-metadata", ssec=sse, @@ -986,17 +986,17 @@ def _test_stat_object(log_entry, sse=None, version_check=False): st_obj, length, metadata, version_id=version_id2, ) finally: - _CLIENT.remove_object( + _client.remove_object( bucket_name=bucket_name, object_name=object_name, version_id=version_id1, ) - _CLIENT.remove_object( + _client.remove_object( bucket_name=bucket_name, object_name=object_name+'-metadata', version_id=version_id2, ) - _CLIENT.remove_bucket(bucket_name=bucket_name) + _client.remove_bucket(bucket_name=bucket_name) def test_stat_object(log_entry, sse=None): @@ -1022,26 +1022,26 @@ def _test_remove_object(log_entry, version_check=False): "object_name": object_name, } - _CLIENT.make_bucket(bucket_name=bucket_name) + _client.make_bucket(bucket_name=bucket_name) try: if version_check: - _CLIENT.set_bucket_versioning( + _client.set_bucket_versioning( bucket_name=bucket_name, config=VersioningConfig(ENABLED), ) - result = _CLIENT.put_object( + result = _client.put_object( bucket_name=bucket_name, object_name=object_name, data=LimitedRandomReader(length), length=length, ) - _CLIENT.remove_object( + _client.remove_object( bucket_name=bucket_name, object_name=object_name, version_id=result.version_id, ) finally: - _CLIENT.remove_bucket(bucket_name=bucket_name) + _client.remove_bucket(bucket_name=bucket_name) def test_remove_object(log_entry): @@ -1070,15 +1070,15 @@ def _test_get_object(log_entry, sse=None, version_check=False): "object_name": object_name, } - _CLIENT.make_bucket(bucket_name=bucket_name) + _client.make_bucket(bucket_name=bucket_name) version_id = None try: if version_check: - _CLIENT.set_bucket_versioning( + _client.set_bucket_versioning( bucket_name=bucket_name, config=VersioningConfig(ENABLED), ) - result = _CLIENT.put_object( + result = _client.put_object( bucket_name=bucket_name, object_name=object_name, data=LimitedRandomReader(length), @@ -1087,7 +1087,7 @@ def _test_get_object(log_entry, sse=None, version_check=False): ) version_id = result.version_id # Get/Download a full object, iterate on response to save to disk - object_data = _CLIENT.get_object( + object_data = _client.get_object( bucket_name=bucket_name, object_name=object_name, ssec=sse, @@ -1098,12 +1098,12 @@ def _test_get_object(log_entry, sse=None, version_check=False): shutil.copyfileobj(object_data, file_data) os.remove(newfile) finally: - _CLIENT.remove_object( + _client.remove_object( bucket_name=bucket_name, object_name=object_name, version_id=version_id, ) - _CLIENT.remove_bucket(bucket_name=bucket_name) + _client.remove_bucket(bucket_name=bucket_name) def test_get_object(log_entry, sse=None): @@ -1135,15 +1135,15 @@ def _test_fget_object(log_entry, sse=None, version_check=False): "file_path": tmpfile } - _CLIENT.make_bucket(bucket_name=bucket_name) + _client.make_bucket(bucket_name=bucket_name) version_id = None try: if version_check: - _CLIENT.set_bucket_versioning( + _client.set_bucket_versioning( bucket_name=bucket_name, config=VersioningConfig(ENABLED), ) - result = _CLIENT.put_object( + result = _client.put_object( bucket_name=bucket_name, object_name=object_name, data=LimitedRandomReader(length), @@ -1152,7 +1152,7 @@ def _test_fget_object(log_entry, sse=None, version_check=False): ) version_id = result.version_id # Get/Download a full object and save locally at path - _CLIENT.fget_object( + _client.fget_object( bucket_name=bucket_name, object_name=object_name, file_path=tmpfile, @@ -1161,12 +1161,12 @@ def _test_fget_object(log_entry, sse=None, version_check=False): ) os.remove(tmpfile) finally: - _CLIENT.remove_object( + _client.remove_object( bucket_name=bucket_name, object_name=object_name, version_id=version_id, ) - _CLIENT.remove_bucket(bucket_name=bucket_name) + _client.remove_bucket(bucket_name=bucket_name) def test_fget_object(log_entry, sse=None): @@ -1199,9 +1199,9 @@ def test_get_object_with_default_length( # pylint: disable=invalid-name "offset": offset } - _CLIENT.make_bucket(bucket_name=bucket_name) + _client.make_bucket(bucket_name=bucket_name) try: - _CLIENT.put_object( + _client.put_object( bucket_name=bucket_name, object_name=object_name, data=LimitedRandomReader(size), @@ -1209,7 +1209,7 @@ def test_get_object_with_default_length( # pylint: disable=invalid-name sse=sse, ) # Get half of the object - object_data = _CLIENT.get_object( + object_data = _client.get_object( bucket_name=bucket_name, object_name=object_name, offset=offset, @@ -1225,8 +1225,8 @@ def test_get_object_with_default_length( # pylint: disable=invalid-name if new_file_size != length: raise ValueError('Unexpected file size after running ') finally: - _CLIENT.remove_object(bucket_name=bucket_name, object_name=object_name) - _CLIENT.remove_bucket(bucket_name=bucket_name) + _client.remove_object(bucket_name=bucket_name, object_name=object_name) + _client.remove_bucket(bucket_name=bucket_name) def test_get_partial_object(log_entry, sse=None): @@ -1248,9 +1248,9 @@ def test_get_partial_object(log_entry, sse=None): "offset": offset } - _CLIENT.make_bucket(bucket_name=bucket_name) + _client.make_bucket(bucket_name=bucket_name) try: - _CLIENT.put_object( + _client.put_object( bucket_name=bucket_name, object_name=object_name, data=LimitedRandomReader(size), @@ -1258,7 +1258,7 @@ def test_get_partial_object(log_entry, sse=None): sse=sse, ) # Get half of the object - object_data = _CLIENT.get_object( + object_data = _client.get_object( bucket_name=bucket_name, object_name=object_name, offset=offset, @@ -1275,8 +1275,8 @@ def test_get_partial_object(log_entry, sse=None): if new_file_size != length: raise ValueError('Unexpected file size after running ') finally: - _CLIENT.remove_object(bucket_name=bucket_name, object_name=object_name) - _CLIENT.remove_bucket(bucket_name=bucket_name) + _client.remove_object(bucket_name=bucket_name, object_name=object_name) + _client.remove_bucket(bucket_name=bucket_name) def _test_list_objects(log_entry, use_api_v1=False, version_check=False): @@ -1293,24 +1293,24 @@ def _test_list_objects(log_entry, use_api_v1=False, version_check=False): "recursive": is_recursive, } - _CLIENT.make_bucket(bucket_name=bucket_name) + _client.make_bucket(bucket_name=bucket_name) version_id1 = None version_id2 = None try: if version_check: - _CLIENT.set_bucket_versioning( + _client.set_bucket_versioning( bucket_name=bucket_name, config=VersioningConfig(ENABLED), ) size = 1 * KB - result = _CLIENT.put_object( + result = _client.put_object( bucket_name=bucket_name, object_name=object_name + "-1", data=LimitedRandomReader(size), length=size, ) version_id1 = result.version_id - result = _CLIENT.put_object( + result = _client.put_object( bucket_name=bucket_name, object_name=object_name + "-2", data=LimitedRandomReader(size), @@ -1318,7 +1318,7 @@ def _test_list_objects(log_entry, use_api_v1=False, version_check=False): ) version_id2 = result.version_id # List all object paths in bucket. - objects = _CLIENT.list_objects( + objects = _client.list_objects( bucket_name=bucket_name, prefix='', recursive=is_recursive, @@ -1335,17 +1335,17 @@ def _test_list_objects(log_entry, use_api_v1=False, version_check=False): f"got:{obj.version_id}" ) finally: - _CLIENT.remove_object( + _client.remove_object( bucket_name=bucket_name, object_name=object_name + "-1", version_id=version_id1, ) - _CLIENT.remove_object( + _client.remove_object( bucket_name=bucket_name, object_name=object_name + "-2", version_id=version_id2, ) - _CLIENT.remove_bucket(bucket_name=bucket_name) + _client.remove_bucket(bucket_name=bucket_name) def test_list_objects_v1(log_entry): @@ -1363,7 +1363,7 @@ def _test_list_objects_api(bucket_name, expected_no, **kwargs): # argv is composed of prefix and recursive arguments of # list_objects api. They are both supposed to be passed as strings. - objects = _CLIENT.list_objects(bucket_name=bucket_name, **kwargs) + objects = _client.list_objects(bucket_name=bucket_name, **kwargs) # expect all objects to be listed no_of_files = 0 @@ -1391,14 +1391,14 @@ def test_list_objects_with_prefix(log_entry): "object_name": object_name, } - _CLIENT.make_bucket(bucket_name=bucket_name) + _client.make_bucket(bucket_name=bucket_name) try: size = 1 * KB no_of_created_files = 4 path_prefix = "" # Create files and directories for i in range(no_of_created_files): - _CLIENT.put_object( + _client.put_object( bucket_name=bucket_name, object_name=f"{path_prefix}{i}_{object_name}", data=LimitedRandomReader(size), @@ -1455,12 +1455,12 @@ def test_list_objects_with_prefix(log_entry): finally: path_prefix = "" for i in range(no_of_created_files): - _CLIENT.remove_object( + _client.remove_object( bucket_name=bucket_name, object_name=f"{path_prefix}{i}_{object_name}", ) path_prefix = f"{path_prefix}{i}/" - _CLIENT.remove_bucket(bucket_name=bucket_name) + _client.remove_bucket(bucket_name=bucket_name) # Test passes log_entry["args"]["prefix"] = ( "Several prefix/recursive combinations are tested") @@ -1481,13 +1481,13 @@ def test_list_objects_with_1001_files( # pylint: disable=invalid-name "object_name": f"{object_name}_0 ~ {0}_1000", } - _CLIENT.make_bucket(bucket_name=bucket_name) + _client.make_bucket(bucket_name=bucket_name) try: size = 1 * KB no_of_created_files = 2000 # Create files and directories for i in range(no_of_created_files): - _CLIENT.put_object( + _client.put_object( bucket_name=bucket_name, object_name=f"{object_name}_{i}", data=LimitedRandomReader(size), @@ -1498,11 +1498,11 @@ def test_list_objects_with_1001_files( # pylint: disable=invalid-name _test_list_objects_api(bucket_name, no_of_created_files) finally: for i in range(no_of_created_files): - _CLIENT.remove_object( + _client.remove_object( bucket_name=bucket_name, object_name=f"{object_name}_{i}", ) - _CLIENT.remove_bucket(bucket_name=bucket_name) + _client.remove_bucket(bucket_name=bucket_name) def test_list_objects(log_entry): @@ -1528,16 +1528,16 @@ def test_presigned_get_object_default_expiry( # pylint: disable=invalid-name "object_name": object_name, } - _CLIENT.make_bucket(bucket_name=bucket_name) + _client.make_bucket(bucket_name=bucket_name) try: size = 1 * KB - _CLIENT.put_object( + _client.put_object( bucket_name=bucket_name, object_name=object_name, data=LimitedRandomReader(size), length=size, ) - presigned_get_object_url = _CLIENT.presigned_get_object( + presigned_get_object_url = _client.presigned_get_object( bucket_name=bucket_name, object_name=object_name, ) @@ -1548,8 +1548,8 @@ def test_presigned_get_object_default_expiry( # pylint: disable=invalid-name f"code: {response.code}, error: {response.data}" ) finally: - _CLIENT.remove_object(bucket_name=bucket_name, object_name=object_name) - _CLIENT.remove_bucket(bucket_name=bucket_name) + _client.remove_object(bucket_name=bucket_name, object_name=object_name) + _client.remove_bucket(bucket_name=bucket_name) def test_presigned_get_object_expiry( # pylint: disable=invalid-name @@ -1565,16 +1565,16 @@ def test_presigned_get_object_expiry( # pylint: disable=invalid-name "object_name": object_name, } - _CLIENT.make_bucket(bucket_name=bucket_name) + _client.make_bucket(bucket_name=bucket_name) try: size = 1 * KB - _CLIENT.put_object( + _client.put_object( bucket_name=bucket_name, object_name=object_name, data=LimitedRandomReader(size), length=size, ) - presigned_get_object_url = _CLIENT.presigned_get_object( + presigned_get_object_url = _client.presigned_get_object( bucket_name=bucket_name, object_name=object_name, expires=timedelta(seconds=120), @@ -1604,7 +1604,7 @@ def test_presigned_get_object_expiry( # pylint: disable=invalid-name f"code: {response.code}, error: {response.data}" ) - presigned_get_object_url = _CLIENT.presigned_get_object( + presigned_get_object_url = _client.presigned_get_object( bucket_name=bucket_name, object_name=object_name, expires=timedelta(seconds=1), @@ -1624,8 +1624,8 @@ def test_presigned_get_object_expiry( # pylint: disable=invalid-name if response.status == 200: raise ValueError('Presigned get url failed to expire!') finally: - _CLIENT.remove_object(bucket_name=bucket_name, object_name=object_name) - _CLIENT.remove_bucket(bucket_name=bucket_name) + _client.remove_object(bucket_name=bucket_name, object_name=object_name) + _client.remove_bucket(bucket_name=bucket_name) def test_presigned_get_object_response_headers( # pylint: disable=invalid-name @@ -1645,10 +1645,10 @@ def test_presigned_get_object_response_headers( # pylint: disable=invalid-name "content_language": content_language, } - _CLIENT.make_bucket(bucket_name=bucket_name) + _client.make_bucket(bucket_name=bucket_name) try: size = 1 * KB - _CLIENT.put_object( + _client.put_object( bucket_name=bucket_name, object_name=object_name, data=LimitedRandomReader(size), @@ -1658,7 +1658,7 @@ def test_presigned_get_object_response_headers( # pylint: disable=invalid-name 'response-content-type': content_type, 'response-content-language': content_language, }) - presigned_get_object_url = _CLIENT.presigned_get_object( + presigned_get_object_url = _client.presigned_get_object( bucket_name=bucket_name, object_name=object_name, expires=timedelta(seconds=120), @@ -1690,8 +1690,8 @@ def test_presigned_get_object_response_headers( # pylint: disable=invalid-name "code: {response.code}, error: {response.data}" ) finally: - _CLIENT.remove_object(bucket_name=bucket_name, object_name=object_name) - _CLIENT.remove_bucket(bucket_name=bucket_name) + _client.remove_object(bucket_name=bucket_name, object_name=object_name) + _client.remove_bucket(bucket_name=bucket_name) def test_presigned_get_object_range( # pylint: disable=invalid-name @@ -1707,17 +1707,17 @@ def test_presigned_get_object_range( # pylint: disable=invalid-name "object_name": object_name, } - _CLIENT.make_bucket(bucket_name=bucket_name) + _client.make_bucket(bucket_name=bucket_name) try: size = 556433 # on purpose its unaligned - _CLIENT.put_object( + _client.put_object( bucket_name=bucket_name, object_name=object_name, data=LimitedRandomReader(size), length=size, ) - presigned_get_object_url = _CLIENT.presigned_get_object( + presigned_get_object_url = _client.presigned_get_object( bucket_name=bucket_name, object_name=object_name, expires=timedelta(seconds=120), @@ -1742,8 +1742,8 @@ def test_presigned_get_object_range( # pylint: disable=invalid-name "code: {response.code}, error: {response.data}" ) finally: - _CLIENT.remove_object(bucket_name=bucket_name, object_name=object_name) - _CLIENT.remove_bucket(bucket_name=bucket_name) + _client.remove_object(bucket_name=bucket_name, object_name=object_name) + _client.remove_bucket(bucket_name=bucket_name) def test_presigned_get_object_version( # pylint: disable=invalid-name @@ -1759,22 +1759,22 @@ def test_presigned_get_object_version( # pylint: disable=invalid-name "object_name": object_name, } - _CLIENT.make_bucket(bucket_name=bucket_name) + _client.make_bucket(bucket_name=bucket_name) version_id = None try: - _CLIENT.set_bucket_versioning( + _client.set_bucket_versioning( bucket_name=bucket_name, config=VersioningConfig(ENABLED), ) size = 1 * KB - result = _CLIENT.put_object( + result = _client.put_object( bucket_name=bucket_name, object_name=object_name, data=LimitedRandomReader(size), length=size, ) version_id = result.version_id - presigned_get_object_url = _CLIENT.presigned_get_object( + presigned_get_object_url = _client.presigned_get_object( bucket_name=bucket_name, object_name=object_name, version_id=version_id, @@ -1786,12 +1786,12 @@ def test_presigned_get_object_version( # pylint: disable=invalid-name f"code: {response.code}, error: {response.data}" ) finally: - _CLIENT.remove_object( + _client.remove_object( bucket_name=bucket_name, object_name=object_name, version_id=version_id, ) - _CLIENT.remove_bucket(bucket_name=bucket_name) + _client.remove_bucket(bucket_name=bucket_name) def test_presigned_put_object_default_expiry( # pylint: disable=invalid-name @@ -1807,9 +1807,9 @@ def test_presigned_put_object_default_expiry( # pylint: disable=invalid-name "object_name": object_name, } - _CLIENT.make_bucket(bucket_name=bucket_name) + _client.make_bucket(bucket_name=bucket_name) try: - presigned_put_object_url = _CLIENT.presigned_put_object( + presigned_put_object_url = _client.presigned_put_object( bucket_name=bucket_name, object_name=object_name, ) @@ -1821,13 +1821,13 @@ def test_presigned_put_object_default_expiry( # pylint: disable=invalid-name f"Presigned PUT object URL {presigned_put_object_url} failed; " f"code: {response.code}, error: {response.data}" ) - _CLIENT.stat_object( + _client.stat_object( bucket_name=bucket_name, object_name=object_name, ) finally: - _CLIENT.remove_object(bucket_name=bucket_name, object_name=object_name) - _CLIENT.remove_bucket(bucket_name=bucket_name) + _client.remove_object(bucket_name=bucket_name, object_name=object_name) + _client.remove_bucket(bucket_name=bucket_name) def test_presigned_put_object_expiry( # pylint: disable=invalid-name @@ -1843,9 +1843,9 @@ def test_presigned_put_object_expiry( # pylint: disable=invalid-name "object_name": object_name, } - _CLIENT.make_bucket(bucket_name=bucket_name) + _client.make_bucket(bucket_name=bucket_name) try: - presigned_put_object_url = _CLIENT.presigned_put_object( + presigned_put_object_url = _client.presigned_put_object( bucket_name=bucket_name, object_name=object_name, expires=timedelta(seconds=1), @@ -1858,8 +1858,8 @@ def test_presigned_put_object_expiry( # pylint: disable=invalid-name if response.status == 200: raise ValueError('Presigned put url failed to expire!') finally: - _CLIENT.remove_object(bucket_name=bucket_name, object_name=object_name) - _CLIENT.remove_bucket(bucket_name=bucket_name) + _client.remove_object(bucket_name=bucket_name, object_name=object_name) + _client.remove_bucket(bucket_name=bucket_name) def test_presigned_post_policy(log_entry): @@ -1872,7 +1872,7 @@ def test_presigned_post_policy(log_entry): "bucket_name": bucket_name, } - _CLIENT.make_bucket(bucket_name=bucket_name) + _client.make_bucket(bucket_name=bucket_name) try: no_of_days = 10 prefix = 'objectPrefix/' @@ -1889,9 +1889,9 @@ def test_presigned_post_policy(log_entry): "content_length_range": "64KiB to 10MiB", "Content-Type": "image/", } - _CLIENT.presigned_post_policy(policy=policy) + _client.presigned_post_policy(policy=policy) finally: - _CLIENT.remove_bucket(bucket_name=bucket_name) + _client.remove_bucket(bucket_name=bucket_name) def test_thread_safe(log_entry): @@ -1902,15 +1902,15 @@ def test_thread_safe(log_entry): "bucket_name": bucket_name, "object_name": object_name, } - _CLIENT.make_bucket(bucket_name=bucket_name) + _client.make_bucket(bucket_name=bucket_name) - test_file_sha256sum = _get_sha256sum(_LARGE_FILE) + test_file_sha256sum = _get_sha256sum(_large_file) exceptions = [] def get_object_and_check(index): local_file = f"copied_file_{index}" try: - _CLIENT.fget_object( + _client.fget_object( bucket_name=bucket_name, object_name=object_name, file_path=local_file, @@ -1924,10 +1924,10 @@ def get_object_and_check(index): _ = os.path.isfile(local_file) and os.remove(local_file) try: - _CLIENT.fput_object( + _client.fput_object( bucket_name=bucket_name, object_name=object_name, - file_path=_LARGE_FILE, + file_path=_large_file, ) thread_count = 5 @@ -1944,8 +1944,8 @@ def get_object_and_check(index): if exceptions: raise exceptions[0] finally: - _CLIENT.remove_object(bucket_name=bucket_name, object_name=object_name) - _CLIENT.remove_bucket(bucket_name=bucket_name) + _client.remove_object(bucket_name=bucket_name, object_name=object_name) + _client.remove_bucket(bucket_name=bucket_name) def test_get_bucket_policy(log_entry): @@ -1956,14 +1956,14 @@ def test_get_bucket_policy(log_entry): log_entry["args"] = { "bucket_name": bucket_name, } - _CLIENT.make_bucket(bucket_name=bucket_name) + _client.make_bucket(bucket_name=bucket_name) try: - _CLIENT.get_bucket_policy(bucket_name=bucket_name) + _client.get_bucket_policy(bucket_name=bucket_name) except S3Error as exc: if exc.code != "NoSuchBucketPolicy": raise finally: - _CLIENT.remove_bucket(bucket_name=bucket_name) + _client.remove_bucket(bucket_name=bucket_name) def _get_policy_actions(stat): @@ -1982,7 +1982,7 @@ def listit(value): def _validate_policy(bucket_name, policy): """Validate policy.""" policy_dict = json.loads( - _CLIENT.get_bucket_policy(bucket_name=bucket_name)) + _client.get_bucket_policy(bucket_name=bucket_name)) actions = _get_policy_actions(policy_dict.get('Statement')) expected_actions = _get_policy_actions(policy.get('Statement')) return expected_actions == actions @@ -1997,16 +1997,16 @@ def test_get_bucket_notification(log_entry): "bucket_name": bucket_name, } - _CLIENT.make_bucket(bucket_name=bucket_name) + _client.make_bucket(bucket_name=bucket_name) try: - config = _CLIENT.get_bucket_notification(bucket_name=bucket_name) + config = _client.get_bucket_notification(bucket_name=bucket_name) if ( config.cloud_func_config_list or config.queue_config_list or config.topic_config_list ): raise ValueError("Failed to receive an empty bucket notification") finally: - _CLIENT.remove_bucket(bucket_name=bucket_name) + _client.remove_bucket(bucket_name=bucket_name) def test_set_bucket_policy_readonly(log_entry): @@ -2018,7 +2018,7 @@ def test_set_bucket_policy_readonly(log_entry): "bucket_name": bucket_name, } - _CLIENT.make_bucket(bucket_name=bucket_name) + _client.make_bucket(bucket_name=bucket_name) try: # read-only policy policy = { @@ -2048,7 +2048,7 @@ def test_set_bucket_policy_readonly(log_entry): ] } # Set read-only policy - _CLIENT.set_bucket_policy( + _client.set_bucket_policy( bucket_name=bucket_name, policy=json.dumps(policy), ) @@ -2056,7 +2056,7 @@ def test_set_bucket_policy_readonly(log_entry): if not _validate_policy(bucket_name, policy): raise ValueError('Failed to set ReadOnly bucket policy') finally: - _CLIENT.remove_bucket(bucket_name=bucket_name) + _client.remove_bucket(bucket_name=bucket_name) def test_set_bucket_policy_readwrite( # pylint: disable=invalid-name @@ -2069,7 +2069,7 @@ def test_set_bucket_policy_readwrite( # pylint: disable=invalid-name "bucket_name": bucket_name, } - _CLIENT.make_bucket(bucket_name=bucket_name) + _client.make_bucket(bucket_name=bucket_name) try: # Read-write policy policy = { @@ -2110,7 +2110,7 @@ def test_set_bucket_policy_readwrite( # pylint: disable=invalid-name ] } # Set read-write policy - _CLIENT.set_bucket_policy( + _client.set_bucket_policy( bucket_name=bucket_name, policy=json.dumps(policy), ) @@ -2118,7 +2118,7 @@ def test_set_bucket_policy_readwrite( # pylint: disable=invalid-name if not _validate_policy(bucket_name, policy): raise ValueError('Failed to set ReadOnly bucket policy') finally: - _CLIENT.remove_bucket(bucket_name=bucket_name) + _client.remove_bucket(bucket_name=bucket_name) def _test_remove_objects(log_entry, version_check=False): @@ -2130,12 +2130,12 @@ def _test_remove_objects(log_entry, version_check=False): "bucket_name": bucket_name, } - _CLIENT.make_bucket(bucket_name=bucket_name) + _client.make_bucket(bucket_name=bucket_name) object_names = [] delete_object_list = [] try: if version_check: - _CLIENT.set_bucket_versioning( + _client.set_bucket_versioning( bucket_name=bucket_name, config=VersioningConfig(ENABLED), ) @@ -2143,7 +2143,7 @@ def _test_remove_objects(log_entry, version_check=False): # Upload some new objects to prepare for multi-object delete test. for i in range(10): object_name = f"prefix-{i}" - result = _CLIENT.put_object( + result = _client.put_object( bucket_name=bucket_name, object_name=object_name, data=LimitedRandomReader(size), @@ -2161,7 +2161,7 @@ def _test_remove_objects(log_entry, version_check=False): else DeleteObject(args[0], args[1]) ) # delete the objects in a single library call. - errs = _CLIENT.remove_objects( + errs = _client.remove_objects( bucket_name=bucket_name, delete_object_list=delete_object_list, ) @@ -2169,13 +2169,13 @@ def _test_remove_objects(log_entry, version_check=False): raise ValueError(f"Remove objects err: {err}") finally: # Try to clean everything to keep our server intact - errs = _CLIENT.remove_objects( + errs = _client.remove_objects( bucket_name=bucket_name, delete_object_list=delete_object_list, ) for err in errs: raise ValueError(f"Remove objects err: {err}") - _CLIENT.remove_bucket(bucket_name=bucket_name) + _client.remove_bucket(bucket_name=bucket_name) def test_remove_objects(log_entry): @@ -2193,21 +2193,21 @@ def test_remove_bucket(log_entry): # Get a unique bucket_name bucket_name = _gen_bucket_name() - if _IS_AWS: + if _is_aws: bucket_name += ".unique" log_entry["args"] = { "bucket_name": bucket_name, } - if _IS_AWS: + if _is_aws: log_entry["args"]["location"] = location = "us-east-1" - _CLIENT.make_bucket(bucket_name=bucket_name, location=location) + _client.make_bucket(bucket_name=bucket_name, location=location) else: - _CLIENT.make_bucket(bucket_name=bucket_name) + _client.make_bucket(bucket_name=bucket_name) # Removing bucket. This operation will only work if your bucket is empty. - _CLIENT.remove_bucket(bucket_name=bucket_name) + _client.remove_bucket(bucket_name=bucket_name) def _test_upload_snowball_objects(log_entry, staging_filename=None): @@ -2221,11 +2221,11 @@ def _test_upload_snowball_objects(log_entry, staging_filename=None): } try: - _CLIENT.make_bucket(bucket_name=bucket_name) + _client.make_bucket(bucket_name=bucket_name) size = 3 * MB reader1 = LimitedRandomReader(size) reader2 = LimitedRandomReader(size) - _CLIENT.upload_snowball_objects( + _client.upload_snowball_objects( bucket_name=bucket_name, objects=[ SnowballObject("my-object1", data=io.BytesIO(b"py"), length=2), @@ -2241,13 +2241,13 @@ def _test_upload_snowball_objects(log_entry, staging_filename=None): ) _test_list_objects_api(bucket_name, 3) finally: - _CLIENT.remove_object(bucket_name=bucket_name, + _client.remove_object(bucket_name=bucket_name, object_name="my-object1") - _CLIENT.remove_object(bucket_name=bucket_name, + _client.remove_object(bucket_name=bucket_name, object_name="my-object2") - _CLIENT.remove_object(bucket_name=bucket_name, + _client.remove_object(bucket_name=bucket_name, object_name="my-object3") - _CLIENT.remove_bucket(bucket_name=bucket_name) + _client.remove_bucket(bucket_name=bucket_name) if staging_filename and os.path.exists(staging_filename): os.remove(staging_filename) @@ -2276,18 +2276,18 @@ def test_set_get_bucket_versioning(log_entry): excl_prefixes = ['prefix1', 'prefix2'] - _CLIENT.make_bucket(bucket_name=bucket_name) + _client.make_bucket(bucket_name=bucket_name) try: # Test all fields of versioning configuration - _CLIENT.set_bucket_versioning( + _client.set_bucket_versioning( bucket_name=bucket_name, config=VersioningConfig(status=ENABLED, exclude_folders=True, excluded_prefixes=excl_prefixes), ) - vcfg = _CLIENT.get_bucket_versioning(bucket_name=bucket_name) + vcfg = _client.get_bucket_versioning(bucket_name=bucket_name) if vcfg.status != ENABLED: raise ValueError(f'(1) unexpected get_bucket_versioning result: ' f'status: {vcfg.status}') @@ -2299,12 +2299,12 @@ def test_set_get_bucket_versioning(log_entry): f'excluded_prefixes: {vcfg.excluded_prefixes}') # Disable all fields of versioning configuration - _CLIENT.set_bucket_versioning( + _client.set_bucket_versioning( bucket_name=bucket_name, config=VersioningConfig(status=SUSPENDED), ) - vcfg = _CLIENT.get_bucket_versioning(bucket_name=bucket_name) + vcfg = _client.get_bucket_versioning(bucket_name=bucket_name) if vcfg.status != SUSPENDED: raise ValueError(f'(2) unexpected get_bucket_versioning result: ' f'status: {vcfg.status}') @@ -2316,7 +2316,7 @@ def test_set_get_bucket_versioning(log_entry): f'excluded_prefixes: {vcfg.excluded_prefixes}') finally: - _CLIENT.remove_bucket(bucket_name=bucket_name) + _client.remove_bucket(bucket_name=bucket_name) def main(): @@ -2324,7 +2324,7 @@ def main(): Functional testing of minio python library. """ # pylint: disable=global-statement - global _CLIENT, _TEST_FILE, _LARGE_FILE, _IS_AWS + global _client, _test_file, _large_file, _is_aws access_key = os.getenv('ACCESS_KEY') secret_key = os.getenv('SECRET_KEY') @@ -2336,13 +2336,13 @@ def main(): secret_key = 'zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG' secure = True - _CLIENT = Minio( + _client = Minio( endpoint=server_endpoint, access_key=access_key, secret_key=secret_key, secure=secure, ) - _IS_AWS = ".amazonaws.com" in server_endpoint + _is_aws = ".amazonaws.com" in server_endpoint # Check if we are running in the mint environment. data_dir = os.getenv('DATA_DIR', '/mint/data') @@ -2354,18 +2354,18 @@ def main(): ) # Enable trace - # _CLIENT.trace_on(sys.stderr) + # _client.trace_on(sys.stderr) - _TEST_FILE = 'datafile-1-MB' - _LARGE_FILE = 'datafile-11-MB' + _test_file = 'datafile-1-MB' + _large_file = 'datafile-11-MB' if is_mint_env: # Choose data files - _TEST_FILE = os.path.join(data_dir, 'datafile-1-MB') - _LARGE_FILE = os.path.join(data_dir, 'datafile-11-MB') + _test_file = os.path.join(data_dir, 'datafile-1-MB') + _large_file = os.path.join(data_dir, 'datafile-11-MB') else: - with open(_TEST_FILE, 'wb') as file_data: + with open(_test_file, 'wb') as file_data: shutil.copyfileobj(LimitedRandomReader(1 * MB), file_data) - with open(_LARGE_FILE, 'wb') as file_data: + with open(_large_file, 'wb') as file_data: shutil.copyfileobj(LimitedRandomReader(11 * MB), file_data) ssec = None @@ -2469,8 +2469,8 @@ def main(): # Remove temporary files. if not is_mint_env: - os.remove(_TEST_FILE) - os.remove(_LARGE_FILE) + os.remove(_test_file) + os.remove(_large_file) if __name__ == "__main__":