Skip to content

Commit 09decd7

Browse files
meyertst-awsford-at-aws
authored andcommitted
Editing changes
1 parent 975ceb0 commit 09decd7

File tree

1 file changed

+57
-78
lines changed

1 file changed

+57
-78
lines changed

python/example_code/s3-directory-buckets/s3_express_getting_started.py

Lines changed: 57 additions & 78 deletions
Original file line numberDiff line numberDiff line change
@@ -72,8 +72,8 @@ def s3_express_scenario(self):
7272
print(
7373
"""
7474
Let's get started! First, please note that S3 Express One Zone works best when working within the AWS infrastructure,
75-
specifically when working in the same Availability Zone. To see the best results in this example, and when you implement
76-
Directory buckets into your infrastructure, it is best to put your Compute resources in the same AZ as your Directory
75+
specifically when working in the same Availability Zone. To see the best results in this example and when you implement
76+
Directory buckets into your infrastructure, it is best to put your compute resources in the same AZ as your Directory
7777
bucket.
7878
"""
7979
)
@@ -110,12 +110,10 @@ def create_vpc_and_users(self) -> None:
110110
"""
111111
# Configure a gateway VPC endpoint. This is the recommended method to allow S3 Express One Zone traffic without
112112
# the need to pass through an internet gateway or NAT device.
113-
print("")
114-
print(
115-
"1. First, we'll set up a new VPC and VPC Endpoint if this program is running in an EC2 instance in the same AZ as your Directory buckets will be."
116-
)
117-
print(
118-
"Are you running this in an EC2 instance located in the same AZ as your intended Directory buckets?"
113+
print("""
114+
1. First, we'll set up a new VPC and VPC Endpoint if this program is running in an EC2 instance in the same AZ as your
115+
Directory buckets will be. Are you running this in an EC2 instance located in the same AZ as your intended Directory buckets?
116+
"""
119117
)
120118
if q.ask("Do you want to setup a VPC Endpoint? (y/n) ", q.is_yesno):
121119
print(
@@ -125,10 +123,11 @@ def create_vpc_and_users(self) -> None:
125123
press_enter_to_continue()
126124
else:
127125
print("Skipping the VPC setup. Don't forget to use this in production!")
128-
print("")
129-
print("2. Policies, users, and roles with CDK.")
130126
print(
131-
"Now, we'll set up some policies, roles, and a user. This user will only have permissions to do S3 Express One Zone actions."
127+
"""
128+
2. Policies, users, and roles with CDK.
129+
Now, we'll set up some policies, roles, and a user. This user will only have permissions to do S3 Express One Zone actions.
130+
"""
132131
)
133132
press_enter_to_continue()
134133
stack_name = f"cfn-stack-s3-express-basics--{uuid.uuid4()}"
@@ -162,12 +161,11 @@ def setup_clients_and_buckets(
162161
regular_credentials = self.create_access_key(regular_user_name)
163162
express_credentials = self.create_access_key(express_user_name)
164163
# 3. Create an additional client using the credentials with S3 Express permissions.
165-
print("")
166164
print(
167-
"3. Create an additional client using the credentials with S3 Express permissions."
168-
)
169-
print(
170-
"This client is created with the credentials associated with the user account with the S3 Express policy attached, so it can perform S3 Express operations."
165+
"""
166+
3. Create an additional client using the credentials with S3 Express permissions. This client is created with the
167+
credentials associated with the user account with the S3 Express policy attached, so it can perform S3 Express operations.
168+
"""
171169
)
172170
press_enter_to_continue()
173171
self.s3_regular_client = self.create_s3__client_with_access_key_credentials(
@@ -177,23 +175,21 @@ def setup_clients_and_buckets(
177175
express_credentials
178176
)
179177
print(
180-
"All the roles and policies were created an attached to the user. Then, a new S3 Client and Service were created using that user's credentials."
181-
)
182-
print(
183-
"We can now use this client to make calls to S3 Express operations. Keeping permissions in mind (and adhering to least-privilege) is crucial to S3 Express."
178+
"""
179+
All the roles and policies were created and attached to the user. Then a new S3 Client were created using
180+
that user's credentials. We can now use this client to make calls to S3 Express operations. Keeping permissions in mind
181+
(and adhering to least-privilege) is crucial to S3 Express.
182+
"""
184183
)
185184
press_enter_to_continue()
186185
# 4. Create two buckets.
187-
print("")
188-
print("3. Create two buckets.")
189186
print(
190-
"Now we will create a Directory bucket, which is the linchpin of the S3 Express One Zone service."
191-
)
192-
print(
193-
"Directory buckets behave in different ways from regular S3 buckets, which we will explore here."
194-
)
195-
print(
196-
"We'll also create a normal bucket, put an object into the normal bucket, and copy it over to the Directory bucket."
187+
"""
188+
3. Create two buckets.
189+
Now we will create a Directory bucket which is the linchpin of the S3 Express One Zone service. Directory buckets
190+
behave in different ways from regular S3 buckets which we will explore here. We'll also create a normal bucket, put
191+
an object into the normal bucket, and copy it over to the Directory bucket.
192+
"""
197193
)
198194
# Create a directory bucket. These are different from normal S3 buckets in subtle ways.
199195
bucket_prefix = q.ask(
@@ -234,17 +230,12 @@ def create_session_and_add_objects(self) -> None:
234230
"""
235231
Create a session for the express S3 client and add objects to the buckets.
236232
"""
237-
print("")
238-
print("5. Create an object and copy it over.")
239-
print(
240-
"We'll create a basic object consisting of some text and upload it to the normal bucket."
241-
)
242-
print(
243-
"Next, we'll copy the object into the Directory bucket using the regular client."
244-
)
245-
print(
246-
"This works fine, because copy operations are not restricted for Directory buckets."
247-
)
233+
print("""
234+
5. Create an object and copy it over.
235+
We'll create a basic object consisting of some text and upload it to the normal bucket. Next we'll copy the object
236+
into the Directory bucket using the regular client. This works fine because copy operations are not restricted for
237+
Directory buckets.
238+
""")
248239
press_enter_to_continue()
249240
bucket_object = "basic-text-object"
250241
S3ExpressScenario.put_object(
@@ -262,13 +253,12 @@ def create_session_and_add_objects(self) -> None:
262253
bucket_object,
263254
)
264255
print(
265-
"It worked! It's important to remember the user permissions when interacting with Directory buckets."
266-
)
267-
print(
268-
"Instead of validating permissions on every call as normal buckets do, Directory buckets utilize the user credentials and session token to validate."
269-
)
270-
print(
271-
"This allows for much faster connection speeds on every call. For single calls, this is low, but for many concurrent calls, this adds up to a lot of time saved."
256+
"""
257+
It worked! It's important to remember the user permissions when interacting with Directory buckets. Instead of validating
258+
permissions on every call as normal buckets do, Directory buckets utilize the user credentials and session token to validate.
259+
This allows for much faster connection speeds on every call. For single calls, this is low, but for many concurrent calls
260+
this adds up to a lot of time saved.
261+
"""
272262
)
273263
press_enter_to_continue()
274264
return bucket_object
@@ -281,7 +271,11 @@ def demonstrate_performance(self, bucket_object: str) -> None:
281271
print("")
282272
print("6. Demonstrate performance difference.")
283273
print(
284-
"Now, let's do a performance test. We'll download the same object from each bucket $downloads times and compare the total time needed. Note: the performance difference will be much more pronounced if this example is run in an EC2 instance in the same AZ as the bucket."
274+
"""
275+
Now, let's do a performance test. We'll download the same object from each bucket 'downloads' times
276+
and compare the total time needed. Note: the performance difference will be much more pronounced if this
277+
example is run in an EC2 instance in the same Availability Zone as the bucket.
278+
"""
285279
)
286280
downloads = 1000
287281
print(
@@ -294,7 +288,7 @@ def demonstrate_performance(self, bucket_object: str) -> None:
294288
q.is_int,
295289
q.in_range(1, max_downloads),
296290
)
297-
# Download the object $downloads times from each bucket and time it to demonstrate the speed difference.
291+
# Download the object 'downloads' times from each bucket and time it to demonstrate the speed difference.
298292
print("Downloading from the Directory bucket.")
299293
directory_time_start = time.time_ns()
300294
for index in range(downloads):
@@ -331,27 +325,15 @@ def show_lexicographical_differences(self, bucket_object: str) -> None:
331325
This is done by creating a few objects in each bucket and listing them to show the difference.
332326
:param bucket_object: The object to use for the listing operations.
333327
"""
334-
print("")
335-
print("7. Populate the buckets to show the lexicographical difference.")
336-
print(
337-
"Now let's explore how Directory buckets store objects in a different manner to regular buckets."
338-
)
339-
print('The key is in the name "Directory!"')
340-
print(
341-
"Where regular buckets store their key/value pairs in a flat manner, Directory buckets use actual directories/folders."
342-
)
343-
print(
344-
"This allows for more rapid indexing, traversing, and therefore retrieval times!"
345-
)
346-
print(
347-
"The more segmented your bucket is, with lots of directories, sub-directories, and objects, the more efficient it becomes."
348-
)
349-
print(
350-
"This structural difference also causes ListObjects to behave differently, which can cause unexpected results."
351-
)
352-
print(
353-
"Let's add a few more objects with layered directories as see how the output of ListObjects changes."
354-
)
328+
print("""
329+
7. Populate the buckets to show the lexicographical difference.
330+
Now let's explore how Directory buckets store objects in a different manner to regular buckets. The key is in the name
331+
"Directory". Where regular buckets store their key/value pairs in a flat manner, Directory buckets use actual
332+
directories/folders. This allows for more rapid indexing, traversing, and therefore retrieval times! The more segmented
333+
your bucket is, with lots of directories, sub-directories, and objects, the more efficient it becomes. This structural
334+
difference also causes ListObjects to behave differently, which can cause unexpected results. Let's add a few more
335+
objects with layered directories to see how the output of ListObjects changes.
336+
""")
355337
press_enter_to_continue()
356338
# Populate a few more files in each bucket so that we can use ListObjects and show the difference.
357339
other_object = f"other/{bucket_object}"
@@ -387,14 +369,11 @@ def show_lexicographical_differences(self, bucket_object: str) -> None:
387369
print("Normal bucket content")
388370
for bucket_object in regular_bucket_objects:
389371
print(f" {bucket_object['Key']}")
390-
print(
391-
"Notice how the normal bucket lists objects in lexicographical order, while the directory bucket does not."
392-
)
393-
print(
394-
'This is because the normal bucket considers the whole "key" to be the object identifies, while the'
395-
)
396-
print(
397-
'directory bucket actually creates directories and uses the object "key" as a path to the object.'
372+
print("""
373+
Notice how the normal bucket lists objects in lexicographical order, while the directory bucket does not. This is
374+
because the normal bucket considers the whole "key" to be the object identifier, while the directory bucket actually
375+
creates directories and uses the object "key" as a path to the object.
376+
"""
398377
)
399378
press_enter_to_continue()
400379

0 commit comments

Comments
 (0)