Skip to content

Commit af35eb6

Browse files
admacdonald1Eric Lei
authored andcommitted
Created consolidated_csv_risk_report and source_report_for_sub_projects scripts.
1 parent ece3750 commit af35eb6

File tree

2 files changed

+229
-0
lines changed

2 files changed

+229
-0
lines changed
Lines changed: 92 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,92 @@
1+
"""
2+
3+
Created on December 6, 2019
4+
5+
@author: AMacDonald
6+
7+
A report generator that allows for user to input date range and get information on how many Code Locations have been
8+
created in this time, as well as the counts of high, medium, and low vulnerabilities for the related scans.
9+
10+
Script requires input "start date" and "end date" (though default for end date is today's date), and will document how
11+
many scans were run in that time, specifying the number that were mapped to a project, as well as the counts of how many
12+
components contain at least 1 critical, high, medium and low vulnerability, based on the highest vulnerability score.
13+
14+
CSV report will be generated in the working directory
15+
16+
For this script to run, the hub-rest-api-python library and pandas library will need to be installed.
17+
18+
"""
19+
20+
import argparse
21+
import datetime
22+
import pandas as pd
23+
import time
24+
25+
from blackduck.HubRestApi import HubInstance
26+
27+
hub = HubInstance()
28+
today = datetime.date.today()
29+
timestamp = time.strftime('%m_%d_%Y_%H_%M')
30+
file_out = ('consolidated_risk_report-' + timestamp + '.csv')
31+
32+
33+
parser = argparse.ArgumentParser("Input start date and end date for range you would like report to be generated")
34+
parser.add_argument("--start_date", type=str, help="start date")
35+
parser.add_argument("--end_date", type=str, help="end date, default value is today's date", default=today.strftime('%Y-%m-%d'))
36+
args = parser.parse_args()
37+
38+
scancount = []
39+
bomcount = []
40+
aggregatedlow = {}
41+
aggregatedmedium = {}
42+
aggregatedhigh = {}
43+
aggregatedcritical = {}
44+
45+
46+
def get_scan_counts():
47+
parameters = {}
48+
start = datetime.datetime.strptime(args.start_date, '%Y-%m-%d').date()
49+
end = datetime.datetime.strptime(args.end_date, '%Y-%m-%d').date()
50+
scans = hub.get_codelocations(limit=10000, parameters=parameters)
51+
52+
for scan in scans['items']:
53+
scandate = (scan['createdAt'])
54+
datetime_object = datetime.datetime.strptime(scandate, '%Y-%m-%dT%H:%M:%S.%fZ').date()
55+
date1 = datetime_object
56+
if 'mappedProjectVersion' in scan:
57+
if start < date1 < end:
58+
bomcount.append(date1)
59+
rpl = (scan['mappedProjectVersion'] + '/risk-profile')
60+
url = rpl
61+
risk_profile = hub.execute_get(url)
62+
data = risk_profile.json()
63+
for key, value in data['categories'].items():
64+
if key == "VULNERABILITY":
65+
aggregatedcritical['CRITICAL'] = aggregatedcritical.get('CRITICAL', 0) + value['CRITICAL']
66+
aggregatedhigh['HIGH'] = aggregatedhigh.get('HIGH', 0) + value['HIGH']
67+
aggregatedmedium['MEDIUM'] = aggregatedmedium.get('MEDIUM', 0) + value['MEDIUM']
68+
aggregatedlow['LOW'] = aggregatedlow.get('LOW', 0) + value['LOW']
69+
70+
if start < date1 < end:
71+
scanlist = scancount
72+
scanlist.append(scandate)
73+
74+
count = len(bomcount)
75+
count2 = len(scanlist)
76+
df = pd.DataFrame({'Start Date': [start],
77+
'Dnd Date': [end],
78+
'Mapped Scans': [count],
79+
'Total Scans': [count2],
80+
'Critical Vuln Components': [aggregatedcritical['CRITICAL']],
81+
'High Vuln Components': [aggregatedhigh['HIGH']],
82+
'Medium Vuln Components': [aggregatedmedium['MEDIUM']],
83+
'Low Vuln Components': [aggregatedlow['LOW']]})
84+
df.to_csv(file_out, encoding='utf-8', index=False)
85+
print(df)
86+
87+
88+
def main():
89+
get_scan_counts()
90+
91+
92+
main()
Lines changed: 137 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,137 @@
1+
"""
2+
generate_source_report_for_sub_projects
3+
4+
Created on November 6, 2019
5+
6+
@author: AMacDonald
7+
8+
Script designed to generate and collate Source reports for sub-projects that are part of a master project.
9+
10+
To run this script, you will need to pass arguments for the master project name and the master project version. Once
11+
they are specified, the script will investigate to see if the master project contains sub-projects and will generate
12+
reports for all sub-projects it discovers. Finally, it will combine them into a single report, saving it to a "results"
13+
sub-directory.
14+
15+
For this script to run, the hub-rest-api-python library and pandas library will need to be installed.
16+
17+
"""
18+
19+
import argparse
20+
from blackduck.HubRestApi import HubInstance
21+
import time
22+
from zipfile import ZipFile
23+
import shutil
24+
import os
25+
import glob
26+
import pandas
27+
28+
parser = argparse.ArgumentParser("A program to create consolidated Source report for sub projects")
29+
parser.add_argument("project_name")
30+
parser.add_argument("version_name")
31+
32+
args = parser.parse_args()
33+
hub = HubInstance()
34+
FILES = ["FILES"] # as hub.create_version_reports requires the parameters to be an array/list
35+
projversion = hub.get_project_version_by_name(args.project_name, args.version_name)
36+
components = hub.get_version_components(projversion)
37+
csv_list = []
38+
projname = args.project_name
39+
timestamp = time.strftime('%m_%d_%Y_%H_%M')
40+
file_out = (projname + '_' + "Consolidated_src_report-" + timestamp)
41+
file_out = (file_out + ".csv")
42+
43+
44+
class FailedReportDownload(Exception):
45+
pass
46+
47+
48+
def download_report(location, filename, retries=4):
49+
report_id = location.split("/")[-1]
50+
51+
if retries:
52+
print("Retrieving generated report from {}".format(location))
53+
response = hub.download_report(report_id)
54+
if response.status_code == 200:
55+
with open(filename, "wb") as f:
56+
f.write(response.content)
57+
58+
print("Successfully downloaded zip file to {} for report {}".format(filename, report_id))
59+
else:
60+
print("Failed to retrieve report {}".format(report_id))
61+
print("Probably not ready yet, waiting 5 seconds then retrying...")
62+
time.sleep(5)
63+
retries -= 1
64+
download_report(location, filename, retries)
65+
else:
66+
raise FailedReportDownload("Failed to retrieve report {} after multiple retries".format(report_id))
67+
68+
69+
def genreport():
70+
for component in components['items']:
71+
subname = (component['componentName'] + '_' + component['componentVersionName'] + '.zip')
72+
subname = (subname.replace(" ", ""))
73+
# Above step is to generate the output from the get_version_components and specifically look at the activityData
74+
# portion to indicate whether a component is a KB component, or a subproject.
75+
if len(component['activityData']) == 0:
76+
# Above checks length of output from activityData is >0. If equals 0, is sub-project.
77+
print('activityData is empty, is subproject')
78+
version = hub.get_project_version_by_name(component['componentName'],component['componentVersionName'])
79+
# Above determines the project name from hub.get_project_version_by_name, passing the component name
80+
# and component version name pieces.
81+
result = hub.create_version_reports(version=version, report_list=FILES, format="CSV")
82+
# Generates reports in subprojects for FILES (source) report,
83+
# Using the version object (line 21) to say which reports are needed
84+
# prints out success/error code.
85+
if result.status_code == 201:
86+
print("Successfully created reports ({}) for project {} and version {}".format(
87+
FILES, args.project_name, args.version_name))
88+
location = result.headers['Location']
89+
download_report(location, subname)
90+
else:
91+
print("Failed to create reports for project {} version {}, status code returned {}".format(
92+
args.project_name, args.version_name, result.status_code))
93+
elif len(component['activityData']) != 0:
94+
print('is OSS component, no report to download')
95+
96+
97+
def checkdirs():
98+
if os.path.isdir('./temp') == False:
99+
os.makedirs('./temp')
100+
print('made temp directory')
101+
else:
102+
print('temp directory already exists')
103+
if os.path.isdir('./results') == False:
104+
os.makedirs('./results')
105+
print('made results directory')
106+
else:
107+
print('results directory already exists')
108+
109+
110+
def unzip():
111+
for filename in os.listdir("."):
112+
if filename.endswith(".zip"):
113+
shutil.move(filename, './temp/')
114+
curdir = (os.getcwd() + './temp/')
115+
os.chdir(curdir)
116+
for zipfile in os.listdir(curdir):
117+
with ZipFile(zipfile, 'r') as zipObj:
118+
zipObj.extractall()
119+
120+
121+
def concat():
122+
for csv in glob.iglob('**/*.csv'):
123+
csv_list.append(csv)
124+
consolidated = pandas.concat([pandas.read_csv(csv) for csv in csv_list])
125+
consolidated.to_csv(file_out, index=False, encoding="utf-8")
126+
shutil.move(file_out, '../results/')
127+
shutil.rmtree('../temp', ignore_errors=True)
128+
129+
130+
def main():
131+
checkdirs()
132+
genreport()
133+
unzip()
134+
concat()
135+
136+
137+
main()

0 commit comments

Comments
 (0)