Skip to content

Commit b34a0e3

Browse files
authored
Issue 59: gettingData example was overwriting parts of objects when retrieving multi-blob objects from BP. Updating how the file is opened so that zero padding stops overwriting existing data on seek. (#60)
1 parent 426e836 commit b34a0e3

File tree

2 files changed

+5
-4
lines changed

2 files changed

+5
-4
lines changed

samples/gettingData.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -80,8 +80,9 @@
8080
# For each blob within this chunk, retrieve the data and land it on the destination.
8181
for obj in chunk['ObjectList']:
8282
# Open the destination file and seek to the offset corresponding with this blob.
83-
objectStream = open(objectNameToDestinationPathMap[obj['Name']], "wb")
84-
objectStream.seek(int(obj['Offset']))
83+
fd = os.open(objectNameToDestinationPathMap[obj['Name']], os.O_CREAT | os.O_WRONLY)
84+
objectStream = os.fdopen(fd, 'wb')
85+
objectStream.seek(int(obj['Offset']), 0)
8586

8687
# Get the blob for the current object and write it to the destination.
8788
client.get_object(ds3.GetObjectRequest(bucketName,

samples/puttingData.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -69,10 +69,10 @@ def pathForResource(resourceName):
6969
for chunk in chunks:
7070
if not chunk['ChunkId'] in chunkIds:
7171
continue
72-
72+
7373
chunkIds.remove(chunk['ChunkId'])
7474
for obj in chunk['ObjectList']:
75-
# it is possible that if we start resending a chunk, due to the program crashing, that
75+
# it is possible that if we start resending a chunk, due to the program crashing, that
7676
# some objects will already be in cache. Check to make sure that they are not, and then
7777
# send the object to Spectra S3
7878
if obj['InCache'] == 'false':

0 commit comments

Comments
 (0)