|
22 | 22 |
|
23 | 23 | sdc_token = sys.argv[1] |
24 | 24 |
|
25 | | -# |
26 | | -# Instantiate the SDC client |
27 | | -# |
28 | 25 | sdclient = SdcClient(sdc_token) |
29 | 26 |
|
30 | 27 | # |
31 | | -# Prepare the metrics list. In this case, we have just one metric: cpu.used.percent, |
32 | | -# without segmentation. |
33 | | -# The data will be aggregated as average both in time and across the hosts. |
| 28 | +# List of metrics to export. Imagine a SQL data table, with key columns and value columns |
| 29 | +# You just need to specify the ID for keys, and ID with aggregation for values. |
34 | 30 | # |
35 | | -metrics = [ |
36 | | - {"id": "cpu.used.percent", |
37 | | - "aggregations": { |
38 | | - "time": "avg", |
39 | | - "group": "avg" |
40 | | - } |
41 | | - }] |
| 31 | +metrics = [ |
| 32 | + # { "id": "agent.tag.team" }, |
| 33 | + # { "id": "kubernetes.pod.label.name" }, |
| 34 | + # { "id": "agent.tag.env", "aggregations": { "time": "concat", "group": "concat" } }, |
| 35 | + { "id": "cpu.used.percent", "aggregations": { "time": "timeAvg", "group": "avg" } } |
| 36 | + ] |
| 37 | +filter = None # or None |
| 38 | +start = -600 # start timestamp, or lower than 0 for "last X seconds" |
| 39 | +end = 0 # end timestamp, or 0 for "last X seconds" |
| 40 | +sampling = 60 # sampling time in seconds for time series, or 0 for aggregated data |
42 | 41 |
|
43 | 42 | # |
44 | | -# Fire the query. |
| 43 | +# Load data |
45 | 44 | # |
46 | | -res = sdclient.get_data(metrics, # metrics list |
47 | | - -600, # start_ts = 600 seconds ago |
48 | | - 0, # end_ts = now |
49 | | - 60) # 1 data point per minute |
| 45 | +res = sdclient.get_data(metrics, start, end, sampling, filter = filter) |
50 | 46 |
|
51 | 47 | # |
52 | 48 | # Show the result |
53 | 49 | # |
54 | 50 | if res[0]: |
55 | | - data = res[1] |
| 51 | + # |
| 52 | + # Read response. The JSON looks like this: |
| 53 | + # |
| 54 | + # { |
| 55 | + # start: timestamp, |
| 56 | + # end: timestamp, |
| 57 | + # data: [ |
| 58 | + # { |
| 59 | + # t: timestamp, |
| 60 | + # d: [ value1, value2, value3, ... ] |
| 61 | + # }, |
| 62 | + # ... |
| 63 | + # ] |
| 64 | + # } |
| 65 | + # |
| 66 | + response = res[1] |
| 67 | + |
| 68 | + colLen = 25 |
| 69 | + |
| 70 | + # |
| 71 | + # Print summary (what, when) |
| 72 | + # |
| 73 | + start = response['start'] |
| 74 | + end = response['end'] |
| 75 | + data = response['data'] |
| 76 | + |
| 77 | + print 'Data for %s from %d to %d' % (filter if filter else 'everything', start, end) |
| 78 | + print '' |
| 79 | + |
| 80 | + # |
| 81 | + # Print table headers |
| 82 | + # |
| 83 | + dataToPrint = ' '.join( [ str(x['id']).ljust(colLen) if len(str(x['id'])) < colLen else str(x['id'])[:(colLen - 3)].ljust(colLen - 3) + '...' for x in metrics ] ) |
| 84 | + print '%s %s' % ('timestamp'.ljust(colLen), dataToPrint) if sampling > 0 else dataToPrint |
| 85 | + print '' |
| 86 | + |
| 87 | + # |
| 88 | + # Print table body |
| 89 | + # |
| 90 | + for d in data: |
| 91 | + timestamp = d['t'] if sampling > 0 else start |
| 92 | + values = d['d'] |
| 93 | + |
| 94 | + dataToPrint = ' '.join( [ str(x).ljust(colLen) if len(str(x)) < colLen else str(x)[:(colLen - 3)].ljust(colLen - 3) + '...' for x in values ] ) |
| 95 | + |
| 96 | + print '%s %s' % ( ('<t: %d>' % (timestamp)).ljust(colLen), dataToPrint ) if sampling > 0 else dataToPrint |
| 97 | + |
56 | 98 | else: |
57 | 99 | print res[1] |
58 | 100 | sys.exit(1) |
59 | | - |
60 | | -print data |
|
0 commit comments