Skip to content

Commit f58b172

Browse files
Code cleanup
1 parent 3716f49 commit f58b172

11 files changed

+85
-86
lines changed

examples/add_notification_email.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,6 @@
3232
# Return the result
3333
#
3434
if res[0]:
35-
print 'Recipient added successfully'
35+
print 'Recipient added successfully'
3636
else:
37-
print res[1]
37+
print res[1]

examples/create_alert.py

Lines changed: 14 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
#!/usr/bin/env python
22
#
33
# This script shows how to use the create_alert() call to create the following
4-
# Sysdig Cloud alert: 'send an email notification when the CPU of any tomcat
4+
# Sysdig Cloud alert: 'send an email notification when the CPU of any tomcat
55
# process running on any of the instrumented machines goes over 80%'
66
#
77
#
@@ -29,24 +29,23 @@
2929
# Create the alert.
3030
#
3131
res = sdclient.create_alert('tomcat cpu > 80% on any host', # Alert name.
32-
'this alert was automatically created using the python Sysdig Cloud library', # Alert description.
33-
6, # Syslog-encoded severity. 6 means 'info'.
34-
60, # The alert will fire if the condition is met for at least 60 seconds.
35-
'avg(cpu.used.percent) > 80', # The condition.
36-
['host.mac', 'proc.name'], # Segmentation. We want to check this metric for every process on every machine.
37-
'ANY', # in case there is more than one tomcat process, this alert will fire when a single one of them crosses the 80% threshold.
38-
'proc.name = "tomcat"', # Filter. We want to receive a notification only if the name of the process meeting the condition is 'tomcat'.
39-
['EMAIL'], # Notification target. We want an email to be sent. Alerts email recipients can be defined here: https://app.sysdigcloud.com/#/settings/notifications
40-
False) # This alert will be disabled when it's created.
41-
32+
'this alert was automatically created using the python Sysdig Cloud library', # Alert description.
33+
6, # Syslog-encoded severity. 6 means 'info'.
34+
60, # The alert will fire if the condition is met for at least 60 seconds.
35+
'avg(cpu.used.percent) > 80', # The condition.
36+
['host.mac', 'proc.name'], # Segmentation. We want to check this metric for every process on every machine.
37+
'ANY', # in case there is more than one tomcat process, this alert will fire when a single one of them crosses the 80% threshold.
38+
'proc.name = "tomcat"', # Filter. We want to receive a notification only if the name of the process meeting the condition is 'tomcat'.
39+
['EMAIL'], # Notification target. We want an email to be sent. Alerts email recipients can be defined here: https://app.sysdigcloud.com/#/settings/notifications
40+
False) # This alert will be disabled when it's created.
4241

4342
#
4443
# Validate a print the results.
4544
#
4645
if res[0]:
47-
data = res[1]
46+
data = res[1]
4847
else:
49-
print res[1]
50-
sys.exit(0)
48+
print res[1]
49+
sys.exit(0)
5150

52-
print res
51+
print res

examples/create_dashboard.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -29,32 +29,32 @@
2929
# Create the new dashboard, apllying to cassandra in production
3030
#
3131
res = sdclient.create_dashboard_from_view("API test - cassandra in prod", # The name we're giving to the new dashboard.
32-
"Overview by Process", # The view we're copying.
33-
'kubernetes.namespace.name=prod and proc.name = cassandra') # the filter specifying what this dasboard appies to.
34-
# Remember that here you can use combinations of any
35-
# segmentation criteria that you find in the Sysdig
36-
# Cloud explore page.
32+
"Overview by Process", # The view we're copying.
33+
"kubernetes.namespace.name=prod and proc.name = cassandra") # the filter specifying what this dasboard appies to.
34+
# Remember that here you can use combinations of any
35+
# segmentation criteria that you find in the Sysdig
36+
# Cloud explore page.
3737
#
3838
# Check the result
3939
#
4040
if res[0]:
41-
print 'Dashboard created successfully'
41+
print 'Dashboard created successfully'
4242
else:
43-
print res[1]
44-
sys.exit(0)
43+
print res[1]
44+
sys.exit(0)
4545

4646
#
4747
# Make a Copy the just created dasboard, this time applying it to cassandra in the dev namespace
4848
#
4949
res = sdclient.create_dashboard_from_dashboard("API test - cassandra in dev", # The name we're giving to the new dashboard.
50-
"API test - cassandra in prod", # The view we're copying.
51-
'kubernetes.namespace.name=dev and proc.name = cassandra') # the filter specifying what this dasboard appies to.
50+
"API test - cassandra in prod", # The view we're copying.
51+
"kubernetes.namespace.name=dev and proc.name = cassandra") # the filter specifying what this dasboard appies to.
5252

5353
#
5454
# Check the result
5555
#
5656
if res[0]:
57-
print 'Dashboard copied successfully'
57+
print 'Dashboard copied successfully'
5858
else:
59-
print res[1]
59+
print res[1]
6060

examples/get_data_advanced.py

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
#!/usr/bin/env python
22
#
3-
# This script shows an advanced Sysdig Cloud data request that leverages
3+
# This script shows an advanced Sysdig Cloud data request that leverages
44
# filtering and segmentation.
55
#
66
# The request returns the last 10 minutes of CPU utilization for all of the
@@ -28,20 +28,20 @@
2828
sdclient = SdcClient(sdc_token)
2929

3030
#
31-
# Prepare the metrics list.
31+
# Prepare the metrics list.
3232
#
3333
metrics = [
3434
# The first metric we request is the container name. This is a segmentation
35-
# metric, and you can tell by the fact that we don't specify any aggregation
35+
# metric, and you can tell by the fact that we don't specify any aggregation
3636
# criteria. This entry tells Sysdig Cloud that we want to see the CPU
3737
# utilization for each container separately.
3838
{"id": "container.name"},
3939
# The second metric we reuest is the CPU. We aggregate it as an average.
4040
{"id": "cpu.used.percent",
41-
"aggregations": {
42-
"time": "avg",
43-
"group": "avg"
44-
}
41+
"aggregations": {
42+
"time": "avg",
43+
"group": "avg"
44+
}
4545
}
4646
]
4747

@@ -53,20 +53,20 @@
5353
#
5454
# Fire the query.
5555
#
56-
res = sdclient.get_data(metrics, # metrics list
57-
-600, # start_ts = 600 seconds ago
58-
0, # end_ts = now
59-
60, # 1 data point per minute
60-
filter, # The filter
61-
'container') # The source for our metrics is the container
56+
res = sdclient.get_data(metrics, # metrics list
57+
-600, # start_ts = 600 seconds ago
58+
0, # end_ts = now
59+
60, # 1 data point per minute
60+
filter, # The filter
61+
'container') # The source for our metrics is the container
6262

6363
#
6464
# Show the result!
6565
#
6666
if res[0]:
67-
data = res[1]
67+
data = res[1]
6868
else:
69-
print res[1]
70-
sys.exit(0)
69+
print res[1]
70+
sys.exit(0)
7171

7272
print data

examples/get_data_simple.py

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
11
#!/usr/bin/env python
22
#
3-
# This script shows the basics of getting data out of Sysdig Cloud by crating a
3+
# This script shows the basics of getting data out of Sysdig Cloud by crating a
44
# very simple request that has no filter an no segmentation.
55
#
6-
# The request queries for the average CPU across all of the instrumented hosts for
6+
# The request queries for the average CPU across all of the instrumented hosts for
77
# the last 10 minutes, with 1 minute data granularity
88
#
99

@@ -27,33 +27,33 @@
2727
sdclient = SdcClient(sdc_token)
2828

2929
#
30-
# Prepare the metrics list. In this case, we have just one metric: cpu.used.percent,
30+
# Prepare the metrics list. In this case, we have just one metric: cpu.used.percent,
3131
# without segmentation.
3232
# The data will be aggregated as average both in time and across the hosts.
3333
#
3434
metrics = [
3535
{"id": "cpu.used.percent",
36-
"aggregations": {
37-
"time": "avg",
38-
"group": "avg"
39-
}
40-
}]
36+
"aggregations": {
37+
"time": "avg",
38+
"group": "avg"
39+
}
40+
}]
4141

4242
#
4343
# Fire the query.
4444
#
45-
res = sdclient.get_data(metrics, # metrics list
46-
-600, # start_ts = 600 seconds ago
47-
0, # end_ts = now
48-
60) # 1 data point per minute
45+
res = sdclient.get_data(metrics, # metrics list
46+
-600, # start_ts = 600 seconds ago
47+
0, # end_ts = now
48+
60) # 1 data point per minute
4949

5050
#
5151
# Show the result
5252
#
5353
if res[0]:
54-
data = res[1]
54+
data = res[1]
5555
else:
56-
print res[1]
57-
sys.exit(0)
56+
print res[1]
57+
sys.exit(0)
5858

5959
print data

examples/list_alerts.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -31,10 +31,10 @@
3131
# Show the list of alerts
3232
#
3333
if res[0]:
34-
data = res[1]
34+
data = res[1]
3535
else:
36-
print res[1]
37-
sys.exit(0)
36+
print res[1]
37+
sys.exit(0)
3838

3939
for alert in data['alerts']:
4040
print 'enabled: %s, name: %s' % (str(alert['enabled']), alert['name'])

examples/list_dashboards.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -31,10 +31,10 @@
3131
# Show the list of alerts
3232
#
3333
if res[0]:
34-
data = res[1]
34+
data = res[1]
3535
else:
36-
print res[1]
37-
sys.exit(0)
36+
print res[1]
37+
sys.exit(0)
3838

3939
for db in data['dashboards']:
4040
print "Name: %s, # Charts: %d" % (db['name'], len(db['items']))

examples/list_hosts.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424
sdclient = SdcClient(sdc_token)
2525

2626
#
27-
# Prepare the query's metrics list. In this case, we have just one metric:
27+
# Prepare the query's metrics list. In this case, we have just one metric:
2828
# host.hostName. This is a 'key' metric, and we don't include any number metric.
2929
# Essentially, we create an 'enumeration' of hostnames.
3030
#
@@ -35,18 +35,18 @@
3535
# Note: there's no sampling time. This means that we're requesting the result to
3636
# come as a single sample.
3737
#
38-
res = sdclient.get_data(metrics, # metrics list
39-
-600, # cover the last 300 seconds...
40-
0, # ... ending now...
41-
600) # ... with just one 300s sample
38+
res = sdclient.get_data(metrics, # metrics list
39+
-600, # cover the last 300 seconds...
40+
0, # ... ending now...
41+
600) # ... with just one 300s sample
4242

4343
#
4444
# Show the results!
4545
#
4646
if res[0]:
47-
data = res[1]
47+
data = res[1]
4848
else:
49-
print res[1]
50-
sys.exit(0)
49+
print res[1]
50+
sys.exit(0)
5151

5252
print data

examples/post_event.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121

2222
severity = 6
2323
if len(sys.argv) < 4:
24-
severity = int(sys.argv[4])
24+
severity = int(sys.argv[4])
2525

2626
#
2727
# Instantiate the SDC client
@@ -37,6 +37,6 @@
3737
# Return the result
3838
#
3939
if res[0]:
40-
print 'Event Posted Successfully'
40+
print 'Event Posted Successfully'
4141
else:
42-
print res[1]
42+
print res[1]

examples/print_data_retention_info.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -31,9 +31,9 @@
3131
# Show the list of retention intervals
3232
#
3333
if res[0]:
34-
data = res[1]
34+
data = res[1]
3535
else:
36-
print res[1]
37-
sys.exit(0)
36+
print res[1]
37+
sys.exit(0)
3838

3939
print data['agents']

0 commit comments

Comments
 (0)