1010# specific language governing permissions and limitations under the License.
1111
1212import os
13- import tempfile
13+ import time
1414
1515from ds3 import ds3
1616
3636 "folder/folder2/ulysses.txt" :"resources/ulysses.txt"
3737}
3838
39- # this method is used to get the size of the files
39+ # this method is used to map a file path to a Ds3PutObject
4040# we need two parameters because the S3 API wants the name that the object will take on the server, but the size obviously needs to come from the file on the current file system
41- def getSize (fileName , realFileName ):
41+ def fileNameToDs3PutObject (fileName , realFileName ):
4242 size = os .stat (realFileName ).st_size
43- return ds3 .FileObject (fileName , size )
43+ return ds3 .Ds3PutObject (fileName , size )
4444
4545# get the sizes for each file
46- fileList = ds3 . FileObjectList ([ getSize (key , fileListMapping [key ]) for key in list (fileListMapping .keys ())])
46+ fileList = list ([ fileNameToDs3PutObject (key , fileListMapping [key ]) for key in list (fileListMapping .keys ())])
4747
4848# submit the put bulk request to DS3
4949bulkResult = client .put_bulk_job_spectra_s3 (ds3 .PutBulkJobSpectraS3Request (bucketName , fileList ))
@@ -81,17 +81,21 @@ def getSize(fileName, realFileName):
8181 # it is possible that if we start resending a chunk, due to the program crashing, that
8282 # some objects will already be in cache. Check to make sure that they are not, and then
8383 # send the object to Spectra S3
84- if not obj ['InCache' ]:
85- client .put_object (PutObjectRequest (bucketName ,
86- obj ['Name' ],
87- obj ['Offset' ],
88- obj ['Length' ],
89- bulkResult .result ['JobId' ],
90- real_file_name = fileListMapping [obj .name ]))
84+ if obj ['InCache' ] == 'false' :
85+ objectDataStream = open (fileListMapping [obj ['Name' ]], "rb" )
86+ objectDataStream .seek (int (obj ['Offset' ]), 0 )
87+ putObjectResponse = client .put_object (ds3 .PutObjectRequest (bucket_name = bucketName ,
88+ object_name = obj ['Name' ],
89+ offset = obj ['Offset' ],
90+ length = obj ['Length' ],
91+ stream = objectDataStream ,
92+ job = bulkResult .result ['JobId' ]))
93+
9194
9295# we now verify that all our objects have been sent to DS3
9396bucketResponse = client .get_bucket (ds3 .GetBucketRequest (bucketName ))
9497
98+ print ("\n Files in bucket:" )
9599for obj in bucketResponse .result ['ContentsList' ]:
96100 print (obj ['Key' ])
97101
@@ -100,15 +104,15 @@ def getSize(fileName, realFileName):
100104
101105client .delete_folder_recursively_spectra_s3 (ds3 .DeleteFolderRecursivelySpectraS3Request (bucketName , "folder/folder2" ))
102106
103- print ("\n After deletion number 1 :" )
107+ print ("\n After deleting 'folder/folder2' :" )
104108bucketResponse = client .get_bucket (ds3 .GetBucketRequest (bucketName ))
105109
106110for obj in bucketResponse .result ['ContentsList' ]:
107111 print (obj ['Key' ])
108112
109113client .delete_folder_recursively_spectra_s3 (ds3 .DeleteFolderRecursivelySpectraS3Request (bucketName , "folder" ))
110114
111- print ("\n After deletion number 2 :" )
115+ print ("\n After deleting 'folder' :" )
112116bucketResponse = client .get_bucket (ds3 .GetBucketRequest (bucketName ))
113117
114118for obj in bucketResponse .result ['ContentsList' ]:
0 commit comments