4040# Format of the secret should be:
4141# {"fsId": "fsxadmin-password", "fsId": "fsxadmin-password", ...}
4242#secretRegion = "us-west-2"
43- #secretArn = "arn:aws:secretsmanager:us-west-2:759995470648:secret:FSXN_passwords-MJixz9 "
43+ #secretArn = ""
4444#
4545# Where to store last read stats.
4646#s3BucketRegion = "us-west-2"
47- #s3BucketName = "keith-test-mon-ems-events "
47+ #s3BucketName = ""
4848#statsName = "lastFileRead"
4949#
5050# The region to process the FSxNs in.
5454# all FSxNs.
5555#volumeName = "audit_logs"
5656#
57- # The name of the vserver that holds the audit logs. Assumed to be the same on
58- # all FSxNs.
59- #vserverName = "fsx"
60- #
6157# The CloudWatch log group to store the audit logs in.
6258#logGroupName = "/fsx/audit_logs"
6359
@@ -118,7 +114,7 @@ def processFile(ontapAdminServer, headers, volumeUUID, filePath):
118114 else :
119115 f .write (part .content )
120116 else :
121- print (f'API call to { endpoint } failed. HTTP status code: { response .status } .' )
117+ print (f'Warning: API call to { endpoint } failed. HTTP status code: { response .status } .' )
122118 break
123119
124120 f .close ()
@@ -204,7 +200,7 @@ def ingestAuditFile(auditLogPath, auditLogName):
204200 dictData = xmltodict .parse (data )
205201
206202 if dictData .get ('Events' ) == None or dictData ['Events' ].get ('Event' ) == None :
207- print (f"No events found in { auditLogName } " )
203+ print (f"Info: No events found in { auditLogName } . " )
208204 return
209205 #
210206 # Ensure the logstream exists.
@@ -214,7 +210,7 @@ def ingestAuditFile(auditLogPath, auditLogName):
214210 #
215211 # This really shouldn't happen, since we should only be processing
216212 # each file once, but during testing it happens all the time.
217- print (f"Log stream { auditLogName } already exists" )
213+ print (f"Info: Log stream { auditLogName } already exists. " )
218214 #
219215 # If there is only one event, then the dict['Events']['Event'] will be a
220216 # dictionary, otherwise it will be a list of dictionaries.
@@ -223,25 +219,25 @@ def ingestAuditFile(auditLogPath, auditLogName):
223219 for event in dictData ['Events' ]['Event' ]:
224220 cwEvents .append (createCWEvent (event ))
225221 if len (cwEvents ) == 5000 : # The real maximum is 10000 events, but there is also a size limit, so we will use 5000.
226- print ("Putting 5000 events" )
222+ print ("Info: Putting 5000 events" )
227223 response = cwLogsClient .put_log_events (logGroupName = config ['logGroupName' ], logStreamName = auditLogName , logEvents = cwEvents )
228224 if response .get ('rejectedLogEventsInfo' ) != None :
229- if response ['rejectedLogEventsInfo' ].get ('tooNewLogEventStartIndex' ) > 0 :
225+ if response ['rejectedLogEventsInfo' ].get ('tooNewLogEventStartIndex' ) is not None :
230226 print (f"Warning: Too new log event start index: { response ['rejectedLogEventsInfo' ]['tooNewLogEventStartIndex' ]} " )
231- if response ['rejectedLogEventsInfo' ].get ('tooOldLogEventStartIndex ' ) > 0 :
232- print (f"Warning: Too old log event start index: { response ['rejectedLogEventsInfo' ]['tooOldLogEventStartIndex ' ]} " )
227+ if response ['rejectedLogEventsInfo' ].get ('tooOldLogEventEndIndex ' ) is not None :
228+ print (f"Warning: Too old log event end index: { response ['rejectedLogEventsInfo' ]['tooOldLogEventEndIndex ' ]} " )
233229 cwEvents = []
234230 else :
235231 cwEvents = [createCWEvent (dictData ['Events' ]['Event' ])]
236232
237233 if len (cwEvents ) > 0 :
238- print (f"Putting { len (cwEvents )} events" )
234+ print (f"Info: Putting { len (cwEvents )} events" )
239235 response = cwLogsClient .put_log_events (logGroupName = config ['logGroupName' ], logStreamName = auditLogName , logEvents = cwEvents )
240236 if response .get ('rejectedLogEventsInfo' ) != None :
241- if response ['rejectedLogEventsInfo' ].get ('tooNewLogEventStartIndex' ) > 0 :
237+ if response ['rejectedLogEventsInfo' ].get ('tooNewLogEventStartIndex' ) is not None :
242238 print (f"Warning: Too new log event start index: { response ['rejectedLogEventsInfo' ]['tooNewLogEventStartIndex' ]} " )
243- if response ['rejectedLogEventsInfo' ].get ('tooOldLogEventStartIndex ' ) > 0 :
244- print (f"Warning: Too old log event start index: { response ['rejectedLogEventsInfo' ]['tooOldLogEventStartIndex ' ]} " )
239+ if response ['rejectedLogEventsInfo' ].get ('tooOldLogEventEndIndex ' ) is not None :
240+ print (f"Warning: Too old log event end index: { response ['rejectedLogEventsInfo' ]['tooOldLogEventEndIndex ' ]} " )
245241
246242################################################################################
247243# This function checks that all the required configuration variables are set.
@@ -257,8 +253,7 @@ def checkConfig():
257253 'secretArn' : secretArn if 'secretArn' in globals () else None , # pylint: disable=E0602
258254 's3BucketRegion' : s3BucketRegion if 's3BucketRegion' in globals () else None , # pylint: disable=E0602
259255 's3BucketName' : s3BucketName if 's3BucketName' in globals () else None , # pylint: disable=E0602
260- 'statsName' : statsName if 'statsName' in globals () else None , # pylint: disable=E0602
261- 'vserverName' : vserverName if 'vserverName' in globals () else None # pylint: disable=E0602
256+ 'statsName' : statsName if 'statsName' in globals () else None # pylint: disable=E0602
262257 }
263258
264259 for item in config :
@@ -341,39 +336,64 @@ def lambda_handler(event, context): # pylint: disable=W0613
341336 headersDownload = { ** auth , 'Accept' : 'multipart/form-data' }
342337 headersQuery = { ** auth }
343338 #
344- # Get the volume UUID for the audit_logs volume.
345- volumeUUID = None
346- endpoint = f"https://{ fsxn } /api/storage/volumes?name={ config ['volumeName' ]} &svm={ config ['vserverName' ]} "
339+ # Get the list of SVMs on the FSxN.
340+ endpoint = f"https://{ fsxn } /api/svm/svms?return_timeout=4"
347341 response = http .request ('GET' , endpoint , headers = headersQuery , timeout = 5.0 )
348342 if response .status == 200 :
349- data = json .loads (response .data .decode ('utf-8' ))
350- if data ['num_records' ] > 0 :
351- volumeUUID = data ['records' ][0 ]['uuid' ] # Since we specified the volume, and vserver name, there should only be one record.
343+ svmsData = json .loads (response .data .decode ('utf-8' ))
344+ numSvms = svmsData ['num_records' ]
345+ #
346+ # Loop over all the SVMs.
347+ while numSvms > 0 :
348+ for record in svmsData ['records' ]:
349+ vserverName = record ['name' ]
350+ #
351+ # Get the volume UUID for the audit_logs volume.
352+ volumeUUID = None
353+ endpoint = f"https://{ fsxn } /api/storage/volumes?name={ config ['volumeName' ]} &svm={ vserverName } "
354+ response = http .request ('GET' , endpoint , headers = headersQuery , timeout = 5.0 )
355+ if response .status == 200 :
356+ data = json .loads (response .data .decode ('utf-8' ))
357+ if data ['num_records' ] > 0 :
358+ volumeUUID = data ['records' ][0 ]['uuid' ] # Since we specified the volume, and vserver name, there should only be one record.
352359
353- if volumeUUID == None :
354- print (f"Warning: Volume { config ['volumeName' ]} not found for { fsId } under SVM: { config [ ' vserverName' ] } ." )
355- continue
356- #
357- # Get all the files in the volume that match the audit file pattern.
358- endpoint = f"https://{ fsxn } /api/storage/volumes/{ volumeUUID } /files?name=audit_{ config [ ' vserverName' ] } _D*.xml&order_by=name%20asc&fields=name"
359- response = http .request ('GET' , endpoint , headers = headersQuery , timeout = 5.0 )
360- data = json .loads (response .data .decode ('utf-8' ))
361- if data .get ('num_records' ) == 0 :
362- print (f"Warning: No XML audit log files found on FsID: { fsId } ; SvmID: { config [ ' vserverName' ] } ; Volume: { config ['volumeName' ]} ." )
363- continue
360+ if volumeUUID == None :
361+ print (f"Warning: Volume { config ['volumeName' ]} not found for { fsId } under SVM: { vserverName } ." )
362+ continue
363+ #
364+ # Get all the files in the volume that match the audit file pattern.
365+ endpoint = f"https://{ fsxn } /api/storage/volumes/{ volumeUUID } /files?name=audit_{ vserverName } _D*.xml&order_by=name%20asc&fields=name"
366+ response = http .request ('GET' , endpoint , headers = headersQuery , timeout = 5.0 )
367+ data = json .loads (response .data .decode ('utf-8' ))
368+ if data .get ('num_records' ) == 0 :
369+ print (f"Warning: No XML audit log files found on FsID: { fsId } ; SvmID: { vserverName } ; Volume: { config ['volumeName' ]} ." )
370+ continue
364371
365- for file in data ['records' ]:
366- filePath = file ['name' ]
367- if lastFileRead .get (fsxn ) == None or getEpoch (filePath ) > lastFileRead [fsxn ]:
372+ for file in data ['records' ]:
373+ filePath = file ['name' ]
374+ if lastFileRead .get (fsxn ) is None or lastFileRead [fsxn ].get (vserverName ) is None or getEpoch (filePath ) > lastFileRead [fsxn ][vserverName ]:
375+ #
376+ # Process the file.
377+ processFile (fsxn , headersDownload , volumeUUID , filePath )
378+ lastFileRead [fsxn ] = {vserverName : getEpoch (filePath )}
379+ s3Client .put_object (Key = config ['statsName' ], Bucket = config ['s3BucketName' ], Body = json .dumps (lastFileRead ).encode ('UTF-8' ))
368380 #
369- # Process the file.
370- processFile (fsxn , headersDownload , volumeUUID , filePath )
371- lastFileRead [fsxn ] = getEpoch (filePath )
372- s3Client .put_object (Key = config ['statsName' ], Bucket = config ['s3BucketName' ], Body = json .dumps (lastFileRead ).encode ('UTF-8' ))
381+ # Get the next set of SVMs.
382+ if svmsData ['_links' ].get ('next' ) != None :
383+ endpoint = f"https://{ fsxn } { svmsData ['_links' ]['next' ]['href' ]} "
384+ response = http .request ('GET' , endpoint , headers = headersQuery , timeout = 5.0 )
385+ if response .status == 200 :
386+ svmsData = json .loads (response .data .decode ('utf-8' ))
387+ numSvms = svmsData ['num_records' ]
388+ else :
389+ print (f"Warning: API call to { endpoint } failed. HTTP status code: { response .status } ." )
390+ break # Break out of the for all SVMs loop. Maybe the call to the next FSxN will work.
391+ else :
392+ numSvms = 0
393+ else :
394+ print (f"Warning: API call to { endpoint } failed. HTTP status code: { response .status } ." )
395+ break # Break out of the for all FSxNs loop.
373396#
374397# If this script is not running as a Lambda function, then call the lambda_handler function.
375398if os .environ .get ('AWS_LAMBDA_FUNCTION_NAME' ) == None :
376- lambdaFunction = False
377399 lambda_handler (None , None )
378- else :
379- lambdaFunction = True
0 commit comments