Skip to content

Commit 6e9135d

Browse files
cclaussbethac07
authored andcommitted
print() is a function in Python 3 (#81)
1 parent 275902e commit 6e9135d

File tree

4 files changed

+48
-44
lines changed

4 files changed

+48
-44
lines changed

fabfile.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
from __future__ import print_function
12
import boto
23
import boto.s3
34
import json
@@ -103,7 +104,7 @@ def generate_dockerfile():
103104

104105

105106
def show_dockerfile():
106-
print generate_dockerfile()
107+
print(generate_dockerfile())
107108

108109

109110
def generate_task_definition():
@@ -159,7 +160,7 @@ def generate_task_definition():
159160

160161

161162
def show_task_definition():
162-
print json.dumps(generate_task_definition(), indent=4)
163+
print(json.dumps(generate_task_definition(), indent=4))
163164

164165

165166
def update_ecs_task_definition():
@@ -187,13 +188,13 @@ def create_or_update_ecs_service():
187188
data = json.loads(info)
188189
service = [srv for srv in data['serviceArns'] if srv.endswith(ECS_SERVICE_NAME)]
189190
if len(service) > 0:
190-
print 'Service exists. Removing'
191+
print('Service exists. Removing')
191192
local('aws ecs delete-service --cluster ' + ECS_CLUSTER +
192193
' --service ' + ECS_SERVICE_NAME,
193194
capture=True)
194195
time.sleep(WAIT_TIME)
195196

196-
print 'Creating new service'
197+
print('Creating new service')
197198
local('aws ecs create-service --cluster ' + ECS_CLUSTER +
198199
' --service-name ' + ECS_SERVICE_NAME +
199200
' --task-definition ' + ECS_TASK_NAME +

files/ManualMetadata.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
This is designed to be called from the command line with
33
$ python ManualMetadata.py pathtocsv/csvfile.csv "['Metadata_Metadata1','Metadata_Metadata2']"
44
'''
5+
from __future__ import print_function
56

67
import pandas as pd
78
import sys
@@ -13,7 +14,7 @@
1314
def manualmetadata():
1415
incsv=pd.read_csv(csv)
1516
manmet=open(csv[:-4]+'batch.txt','w')
16-
print incsv.shape
17+
print(incsv.shape)
1718
done=[]
1819
for i in range(incsv.shape[0]):
1920
metadatatext='{"Metadata": "'
@@ -24,5 +25,5 @@ def manualmetadata():
2425
manmet.write(metadatatext)
2526
done.append(metadatatext)
2627
manmet.close()
27-
print str(len(done)), 'batches found'
28+
print(str(len(done)), 'batches found')
2829
manualmetadata()

run.py

Lines changed: 31 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
from __future__ import print_function
12
import os, sys
23
import boto, boto3
34
import datetime
@@ -41,16 +42,16 @@ def killdeadAlarms(fleetId,monitorapp):
4142
cmd='aws cloudwatch delete-alarms --alarm-name '+monitorapp+'_'+eachmachine
4243
subprocess.Popen(cmd.split())
4344
time.sleep(3) #Avoid Rate exceeded error
44-
print 'Deleted', monitorapp+'_'+eachmachine, 'if it existed'
45-
print 'Old alarms deleted'
45+
print('Deleted', monitorapp+'_'+eachmachine, 'if it existed')
46+
print('Old alarms deleted')
4647

4748
def seeIfLogExportIsDone(logExportId):
4849
while True:
4950
cmd='aws logs describe-export-tasks --task-id '+logExportId
5051
result =getAWSJsonOutput(cmd)
5152
if result['exportTasks'][0]['status']['code']!='PENDING':
5253
if result['exportTasks'][0]['status']['code']!='RUNNING':
53-
print result['exportTasks'][0]['status']['code']
54+
print(result['exportTasks'][0]['status']['code'])
5455
break
5556
time.sleep(30)
5657

@@ -132,7 +133,7 @@ def __init__(self,name=None):
132133
def scheduleBatch(self, data):
133134
msg = json.dumps(data)
134135
response = self.queue.send_message(MessageBody=msg)
135-
print 'Batch sent. Message ID:',response.get('MessageId')
136+
print('Batch sent. Message ID:',response.get('MessageId'))
136137

137138
def pendingLoad(self):
138139
self.queue.load()
@@ -142,7 +143,7 @@ def pendingLoad(self):
142143
self.pending = visible
143144
self.inProcess = nonVis
144145
d = datetime.datetime.now()
145-
print d,'In process:',nonVis,'Pending',visible
146+
print(d,'In process:',nonVis,'Pending',visible)
146147
if visible + nonVis > 0:
147148
return True
148149
else:
@@ -155,7 +156,7 @@ def pendingLoad(self):
155156

156157
def submitJob():
157158
if len(sys.argv) < 3:
158-
print 'Use: run.py submitJob jobfile'
159+
print('Use: run.py submitJob jobfile')
159160
sys.exit()
160161

161162
# Step 1: Read the job configuration file
@@ -171,25 +172,25 @@ def submitJob():
171172
}
172173

173174
# Step 2: Reach the queue and schedule tasks
174-
print 'Contacting queue'
175+
print('Contacting queue')
175176
queue = JobQueue()
176-
print 'Scheduling tasks'
177+
print('Scheduling tasks')
177178
for batch in jobInfo["groups"]:
178179
#support Metadata passed as either a single string or as a list
179180
try: #single string ('canonical' DCP)
180181
templateMessage["Metadata"] = batch["Metadata"]
181182
except KeyError: #list of parameters (cellprofiler --print-groups)
182183
templateMessage["Metadata"] = batch
183184
queue.scheduleBatch(templateMessage)
184-
print 'Job submitted. Check your queue'
185+
print('Job submitted. Check your queue')
185186

186187
#################################
187188
# SERVICE 2: START CLUSTER
188189
#################################
189190

190191
def startCluster():
191192
if len(sys.argv) < 3:
192-
print 'Use: run.py startCluster configFile'
193+
print('Use: run.py startCluster configFile')
193194
sys.exit()
194195

195196
#Step 1: set up the configuration files
@@ -204,8 +205,8 @@ def startCluster():
204205
# Step 2: make the spot fleet request
205206
ec2client=boto3.client('ec2')
206207
requestInfo = ec2client.request_spot_fleet(SpotFleetRequestConfig=spotfleetConfig)
207-
print 'Request in process. Wait until your machines are available in the cluster.'
208-
print 'SpotFleetRequestId',requestInfo['SpotFleetRequestId']
208+
print('Request in process. Wait until your machines are available in the cluster.')
209+
print('SpotFleetRequestId',requestInfo['SpotFleetRequestId'])
209210

210211
# Step 3: Make the monitor
211212
starttime=str(int(time.time()*1000))
@@ -231,12 +232,12 @@ def startCluster():
231232
logclient.put_retention_policy(logGroupName=LOG_GROUP_NAME+'_perInstance', retentionInDays=60)
232233

233234
# Step 5: update the ECS service to be ready to inject docker containers in EC2 instances
234-
print 'Updating service'
235+
print('Updating service')
235236
cmd = 'aws ecs update-service --cluster ' + ECS_CLUSTER + \
236237
' --service ' + APP_NAME + 'Service' + \
237238
' --desired-count ' + str(CLUSTER_MACHINES*TASKS_PER_MACHINE)
238239
update = getAWSJsonOutput(cmd)
239-
print 'Service updated.'
240+
print('Service updated.')
240241

241242
# Step 6: Monitor the creation of the instances until all are present
242243
cmd = 'aws ec2 describe-spot-fleet-instances --spot-fleet-request-id ' + requestInfo['SpotFleetRequestId']
@@ -247,25 +248,25 @@ def startCluster():
247248
# First check to make sure there's not a problem
248249
errorcheck = getAWSJsonOutput(cmd_tbl)
249250
if len(errorcheck['HistoryRecords']) != 0:
250-
print 'Your spot fleet request is causing an error and is now being cancelled. Please check your configuration and try again'
251+
print('Your spot fleet request is causing an error and is now being cancelled. Please check your configuration and try again')
251252
for eacherror in errorcheck['HistoryRecords']:
252-
print eacherror['EventInformation']['EventSubType'] + ' : ' + eacherror['EventInformation']['EventDescription']
253+
print(eacherror['EventInformation']['EventSubType'] + ' : ' + eacherror['EventInformation']['EventDescription'])
253254
cmd = 'aws ec2 cancel-spot-fleet-requests --spot-fleet-request-ids ' + requestInfo['SpotFleetRequestId'] + ' --terminate-instances'
254255
result = getAWSJsonOutput(cmd)
255256
return
256257
# If everything seems good, just bide your time until you're ready to go
257258
time.sleep(20)
258-
print '.',
259+
print('.', end=' ')
259260
status = getAWSJsonOutput(cmd)
260-
print 'Spot fleet successfully created. Your job should start in a few minutes.'
261+
print('Spot fleet successfully created. Your job should start in a few minutes.')
261262

262263
#################################
263264
# SERVICE 3: MONITOR JOB
264265
#################################
265266

266267
def monitor():
267268
if len(sys.argv) < 3:
268-
print 'Use: run.py monitor spotFleetIdFile'
269+
print('Use: run.py monitor spotFleetIdFile')
269270
sys.exit()
270271

271272
monitorInfo = loadConfig(sys.argv[2])
@@ -300,7 +301,7 @@ def monitor():
300301
' --service ' + monitorapp + 'Service' + \
301302
' --desired-count 0'
302303
update = getAWSJsonOutput(cmd)
303-
print 'Service has been downscaled'
304+
print('Service has been downscaled')
304305

305306
# Step3: Delete the alarms from active machines and machines that have died since the last sweep
306307
cmd= 'aws ec2 describe-spot-fleet-instances --spot-fleet-request-id '+fleetId+" --output json"
@@ -311,37 +312,37 @@ def monitor():
311312
killdeadAlarms(fleetId,monitorapp)
312313

313314
# Step 4: Read spot fleet id and terminate all EC2 instances
314-
print 'Shutting down spot fleet',fleetId
315+
print('Shutting down spot fleet',fleetId)
315316
cmd = 'aws ec2 cancel-spot-fleet-requests --spot-fleet-request-ids '+ fleetId +' --terminate-instances'
316317
result = getAWSJsonOutput(cmd)
317-
print 'Job done.'
318+
print('Job done.')
318319

319320
# Step 5. Release other resources
320321
# Remove SQS queue, ECS Task Definition, ECS Service
321322
ECS_TASK_NAME = monitorapp + 'Task'
322323
ECS_SERVICE_NAME = monitorapp + 'Service'
323-
print 'Deleting existing queue.'
324+
print('Deleting existing queue.')
324325
removequeue(queueId)
325-
print 'Deleting service'
326+
print('Deleting service')
326327
cmd='aws ecs delete-service --cluster '+monitorcluster+' --service '+ECS_SERVICE_NAME
327328
result=getAWSJsonOutput(cmd)
328-
print 'De-registering task'
329+
print('De-registering task')
329330
deregistertask(ECS_TASK_NAME)
330-
print "Removing cluster if it's not the default and not otherwise in use"
331+
print("Removing cluster if it's not the default and not otherwise in use")
331332
removeClusterIfUnused(monitorcluster)
332333

333334
#Step 6: Export the logs to S3
334335
cmd = 'aws logs create-export-task --task-name "'+loggroupId+'" --log-group-name '+loggroupId+ \
335336
' --from '+starttime+' --to '+'%d' %(time.time()*1000)+' --destination '+bucketId+' --destination-prefix exportedlogs/'+loggroupId
336337
result =getAWSJsonOutput(cmd)
337-
print 'Log transfer 1 to S3 initiated'
338+
print('Log transfer 1 to S3 initiated')
338339
seeIfLogExportIsDone(result['taskId'])
339340
cmd = 'aws logs create-export-task --task-name "'+loggroupId+'_perInstance" --log-group-name '+loggroupId+'_perInstance '+ \
340341
'--from '+starttime+' --to '+'%d' %(time.time()*1000)+' --destination '+bucketId+' --destination-prefix exportedlogs/'+loggroupId
341342
result =getAWSJsonOutput(cmd)
342-
print 'Log transfer 2 to S3 initiated'
343+
print('Log transfer 2 to S3 initiated')
343344
seeIfLogExportIsDone(result['taskId'])
344-
print 'All export tasks done'
345+
print('All export tasks done')
345346

346347

347348
#################################
@@ -350,7 +351,7 @@ def monitor():
350351

351352
if __name__ == '__main__':
352353
if len(sys.argv) < 2:
353-
print 'Use: run.py submitJob | startCluster | monitor'
354+
print('Use: run.py submitJob | startCluster | monitor')
354355
sys.exit()
355356
if sys.argv[1] == 'submitJob':
356357
submitJob()

worker/cp-worker.py

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
from __future__ import print_function
12
import boto3
23
import glob
34
import json
@@ -60,11 +61,11 @@ def monitorAndLog(process,logger):
6061
if output== '' and process.poll() is not None:
6162
break
6263
if output:
63-
print output.strip()
64+
print(output.strip())
6465
logger.info(output)
6566

6667
def printandlog(text,logger):
67-
print text
68+
print(text)
6869
logger.info(text)
6970

7071
#################################
@@ -148,7 +149,7 @@ def runCellProfiler(message):
148149
else:
149150
cmd = 'cellprofiler -c -r -b -p %(DATA)s/%(PL)s -o %(OUT)s -d ' + cpDone + ' --data-file=%(DATA)s/%(FL)s -g %(Metadata)s'
150151
cmd = cmd % replaceValues
151-
print 'Running', cmd
152+
print('Running', cmd)
152153
logger.info(cmd)
153154

154155
subp = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
@@ -206,13 +207,13 @@ def main():
206207
if msg is not None:
207208
result = runCellProfiler(msg)
208209
if result == 'SUCCESS':
209-
print 'Batch completed successfully.'
210+
print('Batch completed successfully.')
210211
queue.deleteMessage(handle)
211212
else:
212-
print 'Returning message to the queue.'
213+
print('Returning message to the queue.')
213214
queue.returnMessage(handle)
214215
else:
215-
print 'No messages in the queue'
216+
print('No messages in the queue')
216217
break
217218

218219
#################################
@@ -221,7 +222,7 @@ def main():
221222

222223
if __name__ == '__main__':
223224
logging.basicConfig(level=logging.INFO)
224-
print 'Worker started'
225+
print('Worker started')
225226
main()
226-
print 'Worker finished'
227+
print('Worker finished')
227228

0 commit comments

Comments
 (0)