1+ from __future__ import print_function
12import os , sys
23import boto , boto3
34import datetime
@@ -41,16 +42,16 @@ def killdeadAlarms(fleetId,monitorapp):
4142 cmd = 'aws cloudwatch delete-alarms --alarm-name ' + monitorapp + '_' + eachmachine
4243 subprocess .Popen (cmd .split ())
4344 time .sleep (3 ) #Avoid Rate exceeded error
44- print 'Deleted' , monitorapp + '_' + eachmachine , 'if it existed'
45- print 'Old alarms deleted'
45+ print ( 'Deleted' , monitorapp + '_' + eachmachine , 'if it existed' )
46+ print ( 'Old alarms deleted' )
4647
4748def seeIfLogExportIsDone (logExportId ):
4849 while True :
4950 cmd = 'aws logs describe-export-tasks --task-id ' + logExportId
5051 result = getAWSJsonOutput (cmd )
5152 if result ['exportTasks' ][0 ]['status' ]['code' ]!= 'PENDING' :
5253 if result ['exportTasks' ][0 ]['status' ]['code' ]!= 'RUNNING' :
53- print result ['exportTasks' ][0 ]['status' ]['code' ]
54+ print ( result ['exportTasks' ][0 ]['status' ]['code' ])
5455 break
5556 time .sleep (30 )
5657
@@ -132,7 +133,7 @@ def __init__(self,name=None):
132133 def scheduleBatch (self , data ):
133134 msg = json .dumps (data )
134135 response = self .queue .send_message (MessageBody = msg )
135- print 'Batch sent. Message ID:' ,response .get ('MessageId' )
136+ print ( 'Batch sent. Message ID:' ,response .get ('MessageId' ) )
136137
137138 def pendingLoad (self ):
138139 self .queue .load ()
@@ -142,7 +143,7 @@ def pendingLoad(self):
142143 self .pending = visible
143144 self .inProcess = nonVis
144145 d = datetime .datetime .now ()
145- print d ,'In process:' ,nonVis ,'Pending' ,visible
146+ print ( d ,'In process:' ,nonVis ,'Pending' ,visible )
146147 if visible + nonVis > 0 :
147148 return True
148149 else :
@@ -155,7 +156,7 @@ def pendingLoad(self):
155156
156157def submitJob ():
157158 if len (sys .argv ) < 3 :
158- print 'Use: run.py submitJob jobfile'
159+ print ( 'Use: run.py submitJob jobfile' )
159160 sys .exit ()
160161
161162 # Step 1: Read the job configuration file
@@ -171,25 +172,25 @@ def submitJob():
171172 }
172173
173174 # Step 2: Reach the queue and schedule tasks
174- print 'Contacting queue'
175+ print ( 'Contacting queue' )
175176 queue = JobQueue ()
176- print 'Scheduling tasks'
177+ print ( 'Scheduling tasks' )
177178 for batch in jobInfo ["groups" ]:
178179 #support Metadata passed as either a single string or as a list
179180 try : #single string ('canonical' DCP)
180181 templateMessage ["Metadata" ] = batch ["Metadata" ]
181182 except KeyError : #list of parameters (cellprofiler --print-groups)
182183 templateMessage ["Metadata" ] = batch
183184 queue .scheduleBatch (templateMessage )
184- print 'Job submitted. Check your queue'
185+ print ( 'Job submitted. Check your queue' )
185186
186187#################################
187188# SERVICE 2: START CLUSTER
188189#################################
189190
190191def startCluster ():
191192 if len (sys .argv ) < 3 :
192- print 'Use: run.py startCluster configFile'
193+ print ( 'Use: run.py startCluster configFile' )
193194 sys .exit ()
194195
195196 #Step 1: set up the configuration files
@@ -204,8 +205,8 @@ def startCluster():
204205 # Step 2: make the spot fleet request
205206 ec2client = boto3 .client ('ec2' )
206207 requestInfo = ec2client .request_spot_fleet (SpotFleetRequestConfig = spotfleetConfig )
207- print 'Request in process. Wait until your machines are available in the cluster.'
208- print 'SpotFleetRequestId' ,requestInfo ['SpotFleetRequestId' ]
208+ print ( 'Request in process. Wait until your machines are available in the cluster.' )
209+ print ( 'SpotFleetRequestId' ,requestInfo ['SpotFleetRequestId' ])
209210
210211 # Step 3: Make the monitor
211212 starttime = str (int (time .time ()* 1000 ))
@@ -231,12 +232,12 @@ def startCluster():
231232 logclient .put_retention_policy (logGroupName = LOG_GROUP_NAME + '_perInstance' , retentionInDays = 60 )
232233
233234 # Step 5: update the ECS service to be ready to inject docker containers in EC2 instances
234- print 'Updating service'
235+ print ( 'Updating service' )
235236 cmd = 'aws ecs update-service --cluster ' + ECS_CLUSTER + \
236237 ' --service ' + APP_NAME + 'Service' + \
237238 ' --desired-count ' + str (CLUSTER_MACHINES * TASKS_PER_MACHINE )
238239 update = getAWSJsonOutput (cmd )
239- print 'Service updated.'
240+ print ( 'Service updated.' )
240241
241242 # Step 6: Monitor the creation of the instances until all are present
242243 cmd = 'aws ec2 describe-spot-fleet-instances --spot-fleet-request-id ' + requestInfo ['SpotFleetRequestId' ]
@@ -247,25 +248,25 @@ def startCluster():
247248 # First check to make sure there's not a problem
248249 errorcheck = getAWSJsonOutput (cmd_tbl )
249250 if len (errorcheck ['HistoryRecords' ]) != 0 :
250- print 'Your spot fleet request is causing an error and is now being cancelled. Please check your configuration and try again'
251+ print ( 'Your spot fleet request is causing an error and is now being cancelled. Please check your configuration and try again' )
251252 for eacherror in errorcheck ['HistoryRecords' ]:
252- print eacherror ['EventInformation' ]['EventSubType' ] + ' : ' + eacherror ['EventInformation' ]['EventDescription' ]
253+ print ( eacherror ['EventInformation' ]['EventSubType' ] + ' : ' + eacherror ['EventInformation' ]['EventDescription' ])
253254 cmd = 'aws ec2 cancel-spot-fleet-requests --spot-fleet-request-ids ' + requestInfo ['SpotFleetRequestId' ] + ' --terminate-instances'
254255 result = getAWSJsonOutput (cmd )
255256 return
256257 # If everything seems good, just bide your time until you're ready to go
257258 time .sleep (20 )
258- print '.' ,
259+ print ( '.' , end = ' ' )
259260 status = getAWSJsonOutput (cmd )
260- print 'Spot fleet successfully created. Your job should start in a few minutes.'
261+ print ( 'Spot fleet successfully created. Your job should start in a few minutes.' )
261262
262263#################################
263264# SERVICE 3: MONITOR JOB
264265#################################
265266
266267def monitor ():
267268 if len (sys .argv ) < 3 :
268- print 'Use: run.py monitor spotFleetIdFile'
269+ print ( 'Use: run.py monitor spotFleetIdFile' )
269270 sys .exit ()
270271
271272 monitorInfo = loadConfig (sys .argv [2 ])
@@ -300,7 +301,7 @@ def monitor():
300301 ' --service ' + monitorapp + 'Service' + \
301302 ' --desired-count 0'
302303 update = getAWSJsonOutput (cmd )
303- print 'Service has been downscaled'
304+ print ( 'Service has been downscaled' )
304305
305306 # Step3: Delete the alarms from active machines and machines that have died since the last sweep
306307 cmd = 'aws ec2 describe-spot-fleet-instances --spot-fleet-request-id ' + fleetId + " --output json"
@@ -311,37 +312,37 @@ def monitor():
311312 killdeadAlarms (fleetId ,monitorapp )
312313
313314 # Step 4: Read spot fleet id and terminate all EC2 instances
314- print 'Shutting down spot fleet' ,fleetId
315+ print ( 'Shutting down spot fleet' ,fleetId )
315316 cmd = 'aws ec2 cancel-spot-fleet-requests --spot-fleet-request-ids ' + fleetId + ' --terminate-instances'
316317 result = getAWSJsonOutput (cmd )
317- print 'Job done.'
318+ print ( 'Job done.' )
318319
319320 # Step 5. Release other resources
320321 # Remove SQS queue, ECS Task Definition, ECS Service
321322 ECS_TASK_NAME = monitorapp + 'Task'
322323 ECS_SERVICE_NAME = monitorapp + 'Service'
323- print 'Deleting existing queue.'
324+ print ( 'Deleting existing queue.' )
324325 removequeue (queueId )
325- print 'Deleting service'
326+ print ( 'Deleting service' )
326327 cmd = 'aws ecs delete-service --cluster ' + monitorcluster + ' --service ' + ECS_SERVICE_NAME
327328 result = getAWSJsonOutput (cmd )
328- print 'De-registering task'
329+ print ( 'De-registering task' )
329330 deregistertask (ECS_TASK_NAME )
330- print "Removing cluster if it's not the default and not otherwise in use"
331+ print ( "Removing cluster if it's not the default and not otherwise in use" )
331332 removeClusterIfUnused (monitorcluster )
332333
333334 #Step 6: Export the logs to S3
334335 cmd = 'aws logs create-export-task --task-name "' + loggroupId + '" --log-group-name ' + loggroupId + \
335336 ' --from ' + starttime + ' --to ' + '%d' % (time .time ()* 1000 )+ ' --destination ' + bucketId + ' --destination-prefix exportedlogs/' + loggroupId
336337 result = getAWSJsonOutput (cmd )
337- print 'Log transfer 1 to S3 initiated'
338+ print ( 'Log transfer 1 to S3 initiated' )
338339 seeIfLogExportIsDone (result ['taskId' ])
339340 cmd = 'aws logs create-export-task --task-name "' + loggroupId + '_perInstance" --log-group-name ' + loggroupId + '_perInstance ' + \
340341 '--from ' + starttime + ' --to ' + '%d' % (time .time ()* 1000 )+ ' --destination ' + bucketId + ' --destination-prefix exportedlogs/' + loggroupId
341342 result = getAWSJsonOutput (cmd )
342- print 'Log transfer 2 to S3 initiated'
343+ print ( 'Log transfer 2 to S3 initiated' )
343344 seeIfLogExportIsDone (result ['taskId' ])
344- print 'All export tasks done'
345+ print ( 'All export tasks done' )
345346
346347
347348#################################
@@ -350,7 +351,7 @@ def monitor():
350351
351352if __name__ == '__main__' :
352353 if len (sys .argv ) < 2 :
353- print 'Use: run.py submitJob | startCluster | monitor'
354+ print ( 'Use: run.py submitJob | startCluster | monitor' )
354355 sys .exit ()
355356 if sys .argv [1 ] == 'submitJob' :
356357 submitJob ()
0 commit comments