Skip to content

Commit 2460fa2

Browse files
committed
Refactored metrics extractors
1 parent 3407d3a commit 2460fa2

25 files changed

+691
-691
lines changed

README.md

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,10 @@ source ./pure-fa-ome-build/bin/activate
4747
python -m pip install --upgrade pip
4848
pip install build
4949

50+
# optionally install pytest and sanic_testing if you want to run tests
51+
pip install pytest
52+
pip install sanic_testing
53+
5054
# clone the repository
5155
git clone [email protected]:PureStorage-OpenConnect/pure-fa-openmetrics-exporter.git
5256

@@ -73,15 +77,11 @@ docker build --build-arg exporter_version=$VERSION -t pure-fa-ome:$VERSION .
7377

7478
### Scraping endpoints
7579

76-
The exporter uses a RESTful API schema to provide Prometheus scraping endpoints.
80+
The exporter uses a RESTful schema to provide scraping endpoints to OpenMetrics clients.
7781

7882
**Authentication**
7983

80-
Authentication is used by the exporter as the mechanism to cross authenticate to the scraped appliance, therefore for each array it is required to provide the REST API token for an account that has a 'readonly' role. The api-token must be provided in the http request using the HTTP Authorization header of type 'Bearer'. This is achieved by specifying the api-token value as the authorization parameter of the specific job in the Prometheus configuration file.
81-
82-
### Scraping endpoints
83-
84-
The exporter uses a RESTful API schema to provide Prometheus scraping endpoints.
84+
Authentication is used by the exporter as the mechanism to cross authenticate to the scraped appliance, therefore for each array it is required to provide the REST API token for an account that has a 'readonly' role. The api-token must be provided in the http request using the HTTP Authorization header of type 'Bearer'. In case Prometheus is used, this is achieved by specifying the api-token value as the authorization parameter of the specific job in the Prometheus configuration file.
8585

8686

8787
URL | Description
@@ -99,4 +99,3 @@ Depending on the target array, scraping for the whole set of metrics could resul
9999
### Prometheus configuration examples
100100

101101
The [examples](examples) directory provides an example of deployment of a Prometheus Grafana stack on k8s that can be used as the starting point to build your own solution.
102-

setup.cfg

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -51,8 +51,7 @@ install_requires =
5151
importlib-metadata; python_version<"3.8"
5252
sanic
5353
prometheus-client
54-
# temporary workaround for Pure Python SDK
55-
py-pure-client @ git+https://github.com/genegr/py-pure-client.git
54+
py-pure-client
5655

5756
[options.packages.find]
5857
where = src

src/pure_fa_openmetrics_exporter/flasharray_collector/flasharray_metrics/array_events_metrics.py

Lines changed: 7 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -5,25 +5,23 @@ class ArrayEventsMetrics():
55
Base class for FlashArray Prometheus events metrics
66
"""
77
def __init__(self, fa_client):
8-
self.open_events = None
98
self.alerts = fa_client.alerts()
10-
11-
def _open_events(self):
12-
"""
13-
Create a metric of gauge type for the open alerts:
14-
critical, warning and info, with the severity as label.
15-
"""
169
self.open_events = GaugeMetricFamily('purefa_alerts_open',
1710
'Open alert events',
1811
labels=['severity',
1912
'component_type',
2013
'component_name'])
21-
14+
15+
def _build_metrics(self):
16+
cnt_e = 0
2217
for a in self.alerts:
2318
self.open_events.add_metric([a.severity,
2419
a.component_type,
2520
a.component_name], 1.0)
21+
cnt_e += 1
22+
if cnt_e == 0 :
23+
self.open_events = None
2624

2725
def get_metrics(self):
28-
self._open_events()
26+
self._build_metrics()
2927
yield self.open_events

src/pure_fa_openmetrics_exporter/flasharray_collector/flasharray_metrics/array_hardware_metrics.py

Lines changed: 18 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -5,12 +5,7 @@ class ArrayHardwareMetrics:
55
Base class for FlashArray OpenMetrics hardware metrics
66
"""
77
def __init__(self, fa_client):
8-
self.hardware_status = None
9-
self.temperature = None
10-
self.power = None
118
self.hardware = fa_client.hardware()
12-
13-
def _hardware(self):
149
self.hardware_status = GaugeMetricFamily(
1510
'purefa_hardware_health',
1611
'FlashArray hardware component health status',
@@ -19,31 +14,41 @@ def _hardware(self):
1914
self.temperature = GaugeMetricFamily(
2015
'purefa_hardware_temperature_celsius',
2116
'FlashArray hardware temperature sensors',
22-
labels=['name',
23-
'type'])
17+
labels=['name', 'type'])
2418

2519
self.power = GaugeMetricFamily(
2620
'purefa_hardware_power_volts',
2721
'FlashArray hardware power supply voltage',
2822
labels=['name', 'type'])
2923

30-
24+
def _build_metrics(self):
25+
cnt_s = 0
26+
cnt_t = 0
27+
cnt_p = 0
3128
for comp in self.hardware:
3229
if (comp.status == 'not_installed'):
3330
continue
3431
status = 1 if comp.status == 'ok' else 0
3532
self.hardware_status.add_metric([comp.name, comp.type], status)
36-
33+
cnt_s += 1
3734
if comp.type == 'temp_sensor':
38-
self.power.add_metric([comp.name, comp.type],
35+
self.temperature.add_metric([comp.name, comp.type],
3936
float(comp.temperature))
37+
cnt_t += 1
4038
elif comp.type == 'power_supply':
4139
if comp.voltage is not None:
42-
self.power.add_metric([comp.name, comp.type],
43-
float(comp.voltage))
40+
self.power.add_metric([comp.name, comp.type],
41+
float(comp.voltage))
42+
cnt_p += 1
43+
if cnt_s == 0:
44+
self.hardware_status = None
45+
if cnt_t == 0:
46+
self.temperature = None
47+
if cnt_p == 0:
48+
self.power = None
4449

4550
def get_metrics(self):
46-
self._hardware()
51+
self._build_metrics()
4752
yield self.hardware_status
4853
yield self.temperature
4954
yield self.power

src/pure_fa_openmetrics_exporter/flasharray_collector/flasharray_metrics/array_info_metrics.py

Lines changed: 9 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -5,22 +5,18 @@ class ArrayInfoMetrics():
55
Base class for FlashArray OpenMetrics array info
66
"""
77
def __init__(self, fa_client):
8-
self.array_info = None
98
self.array = fa_client.arrays()[0]
9+
self.array_info = InfoMetricFamily('purefa',
10+
'FlashArray system information',
11+
labels = [])
1012

11-
def _array(self):
12-
"""Assemble a simple information metric defining the scraped system."""
13-
13+
def _build_metrics(self):
1414
array = self.array['array']
15-
self.array_info = InfoMetricFamily(
16-
'purefa',
17-
'FlashArray system information',
18-
value={'array_name': array.name,
19-
'system_id': array.id,
20-
'os': array.os,
21-
'version': array.version
22-
})
15+
self.array_info.add_metric([], {'array_name': array.name,
16+
'system_id': array.id,
17+
'os' : array.os,
18+
'version': array.version})
2319

2420
def get_metrics(self):
25-
self._array()
21+
self._build_metrics()
2622
yield self.array_info
Lines changed: 87 additions & 80 deletions
Original file line numberDiff line numberDiff line change
@@ -1,97 +1,104 @@
11
from prometheus_client.core import GaugeMetricFamily
22

3+
protocols = ['all', 'nfs', 'smb']
4+
5+
performance_latency_kpis = ['local_queue_usec_per_op',
6+
'queue_usec_per_read_op',
7+
'queue_usec_per_write_op',
8+
'queue_usec_per_mirrored_write_op',
9+
'san_usec_per_read_op',
10+
'san_usec_per_write_op',
11+
'san_usec_per_mirrored_write_op',
12+
'service_usec_per_mirrored_write_op',
13+
'service_usec_per_read_op',
14+
'service_usec_per_write_op',
15+
'usec_per_read_op',
16+
'usec_per_write_op',
17+
'usec_per_mirrored_write_op',
18+
'usec_per_other_op']
19+
20+
performance_bandwidth_kpis = ['read_bytes_per_sec',
21+
'write_bytes_per_sec',
22+
'mirrored_write_bytes_per_sec']
23+
24+
performance_iops_kpis = ['reads_per_sec',
25+
'writes_per_sec',
26+
'mirrored_writes_per_sec',
27+
'others_per_sec']
28+
29+
performance_avg_size_kpis = ['bytes_per_read',
30+
'bytes_per_write',
31+
'bytes_per_op']
32+
333
class ArrayPerformanceMetrics():
434
"""
535
Base class for FlashArray OpenMetrics array performance metrics
636
"""
737

838
def __init__(self, fa_client):
9-
self.latency = None
10-
self.iops = None
11-
self.bandwidth = None
12-
self.avg_bsz = None
1339
self.array = fa_client.arrays()[0]
14-
15-
def _performance(self):
16-
"""
17-
Create array performance metrics of gauge type.
18-
"""
1940
self.latency = GaugeMetricFamily(
20-
'purefa_array_performance_latency_usec',
21-
'FlashArray array latency',
22-
labels=['protocol', 'dimension'])
23-
24-
self.iops = GaugeMetricFamily('purefa_array_performance_iops',
25-
'FlashArray IOPS',
26-
labels=['protocol', 'dimension'])
27-
41+
'purefa_array_performance_latency',
42+
'FlashArray array latency',
43+
labels=['protocol', 'dimension'],
44+
unit='usec')
2845
self.bandwidth = GaugeMetricFamily(
29-
'purefa_array_performance_bandwidth_bytes',
30-
'FlashArray bandwidth',
31-
labels=['protocol', 'dimension'])
32-
33-
self.avg_bsz = GaugeMetricFamily(
34-
'purefa_array_performance_average_block_bytes',
35-
'FlashArray array average block size',
36-
labels=['protocol', 'dimension'])
37-
38-
array_perf = self.array['performance']
39-
for p in ['all', 'nfs', 'smb']:
40-
self.latency.add_metric([p, 'local_queue_usec_per_op'],
41-
array_perf[p].local_queue_usec_per_op or 0)
42-
self.latency.add_metric([p, 'queue_usec_per_read_op'],
43-
array_perf[p].queue_usec_per_read_op or 0)
44-
self.latency.add_metric([p, 'queue_usec_per_write_op'],
45-
array_perf[p].queue_usec_per_write_op or 0)
46-
self.latency.add_metric([p, 'queue_usec_per_mirrored_write_op'],
47-
array_perf[p].queue_usec_per_mirrored_write_op or 0)
48-
self.latency.add_metric([p, 'san_usec_per_read_op'],
49-
array_perf[p].san_usec_per_read_op or 0)
50-
self.latency.add_metric([p, 'san_usec_per_write_op'],
51-
array_perf[p].san_usec_per_write_op or 0)
52-
self.latency.add_metric([p, 'san_usec_per_mirrored_write_op'],
53-
array_perf[p].san_usec_per_mirrored_write_op or 0)
54-
self.latency.add_metric([p, 'service_usec_per_mirrored_write_op'],
55-
array_perf[p].service_usec_per_mirrored_write_op or 0)
56-
self.latency.add_metric([p, 'service_usec_per_read_op'],
57-
array_perf[p].service_usec_per_read_op or 0)
58-
self.latency.add_metric([p, 'service_usec_per_write_op'],
59-
array_perf[p].service_usec_per_write_op or 0)
60-
self.latency.add_metric([p, 'usec_per_read_op'],
61-
array_perf[p].usec_per_read_op or 0)
62-
self.latency.add_metric([p, 'usec_per_write_op'],
63-
array_perf[p].usec_per_write_op or 0)
64-
self.latency.add_metric([p, 'usec_per_mirrored_write_op'],
65-
array_perf[p].usec_per_mirrored_write_op or 0)
66-
self.latency.add_metric([p, 'usec_per_other_op'],
67-
array_perf[p].usec_per_other_op or 0)
68-
69-
self.bandwidth.add_metric([p, 'read_bytes_per_sec'],
70-
array_perf[p].read_bytes_per_sec or 0)
71-
self.bandwidth.add_metric([p, 'write_bytes_per_sec'],
72-
array_perf[p].write_bytes_per_sec or 0)
73-
self.bandwidth.add_metric([p, 'mirrored_write_bytes_per_sec'],
74-
array_perf[p].mirrored_write_bytes_per_sec or 0)
75-
76-
self.iops.add_metric([p, 'reads_per_sec'],
77-
array_perf[p].reads_per_sec or 0)
78-
self.iops.add_metric([p, 'writes_per_sec'],
79-
array_perf[p].writes_per_sec or 0)
80-
self.iops.add_metric([p, 'mirrored_writes_per_sec'],
81-
array_perf[p].mirrored_writes_per_sec or 0)
82-
self.iops.add_metric([p, 'others_per_sec'],
83-
array_perf[p].others_per_sec or 0)
46+
'purefa_array_performance_bandwidth',
47+
'FlashArray bandwidth',
48+
labels=['protocol', 'dimension'],
49+
unit='bytes')
50+
self.iops = GaugeMetricFamily(
51+
'purefa_array_performance_iops',
52+
'FlashArray IOPS',
53+
labels=['protocol', 'dimension'])
54+
self.avg_size = GaugeMetricFamily(
55+
'purefa_array_performance_average_block',
56+
'FlashArray array average block size',
57+
labels=['protocol', 'dimension'],
58+
unit='bytes')
8459

85-
self.avg_bsz.add_metric([p, 'bytes_per_read'],
86-
array_perf[p].bytes_per_read or 0)
87-
self.avg_bsz.add_metric([p, 'bytes_per_write'],
88-
array_perf[p].bytes_per_write or 0)
89-
self.avg_bsz.add_metric([p, 'bytes_per_op'],
90-
array_perf[p].bytes_per_op or 0)
60+
def _build_metrics(self):
61+
cnt_l = 0
62+
cnt_b = 0
63+
cnt_i = 0
64+
cnt_a = 0
65+
for p in protocols:
66+
for k in performance_latency_kpis:
67+
v = getattr(self.array['performance'][p], k)
68+
if v is None:
69+
continue
70+
cnt_l += 1
71+
self.latency.add_metric([p, k], v)
72+
for k in performance_bandwidth_kpis:
73+
v = getattr(self.array['performance'][p], k)
74+
if v is None:
75+
continue
76+
cnt_b += 1
77+
self.bandwidth.add_metric([p, k], v)
78+
for k in performance_iops_kpis:
79+
v = getattr(self.array['performance'][p], k)
80+
if v is None:
81+
continue
82+
cnt_i += 1
83+
self.iops.add_metric([p, k], v)
84+
for k in performance_avg_size_kpis:
85+
v = getattr(self.array['performance'][p], k)
86+
if v is None:
87+
continue
88+
cnt_a += 1
89+
self.avg_size.add_metric([p, k], v)
90+
if cnt_l == 0:
91+
self.latency = None
92+
if cnt_b == 0:
93+
self.bandwidth = None
94+
if cnt_i == 0:
95+
self.iops = None
96+
if cnt_a == 0:
97+
self.avg_size = None
9198

9299
def get_metrics(self):
93-
self._performance()
100+
self._build_metrics()
94101
yield self.latency
95102
yield self.bandwidth
96103
yield self.iops
97-
yield self.avg_bsz
104+
yield self.avg_size

0 commit comments

Comments
 (0)