Skip to content

Commit a54adaa

Browse files
authored
Merge pull request #171 from PureStorage-OpenConnect/170-consider-backporting-new-metrics---qos-metrics
Add QoS Metrics
2 parents 9979cbc + 1244198 commit a54adaa

File tree

8 files changed

+106
-40
lines changed

8 files changed

+106
-40
lines changed

internal/openmetrics-exporter/collector.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@ func Collector(ctx context.Context, metrics string, registry *prometheus.Registr
7373
if metrics == "all" || metrics == "volumes" {
7474
vols := faclient.GetVolumes()
7575
volperfcoll := NewVolumesPerformanceCollector(faclient, vols)
76-
volspacecoll := NewVolumesSpaceCollector(vols)
76+
volspacecoll := NewVolumesCollector(vols)
7777
registry.MustRegister(
7878
volperfcoll,
7979
volspacecoll,

internal/openmetrics-exporter/volumes_space_collector.go renamed to internal/openmetrics-exporter/volumes_collector.go

Lines changed: 38 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -6,23 +6,41 @@ import (
66
"github.com/prometheus/client_golang/prometheus"
77
)
88

9-
type VolumesSpaceCollector struct {
10-
ReductionDesc *prometheus.Desc
11-
SpaceDesc *prometheus.Desc
12-
Volumes *client.VolumesList
9+
type VolumesCollector struct {
10+
QoSBandwidthLimitDesc *prometheus.Desc
11+
QoSIPOSLimitDesc *prometheus.Desc
12+
ReductionDesc *prometheus.Desc
13+
SpaceDesc *prometheus.Desc
14+
Volumes *client.VolumesList
1315
}
1416

15-
func (c *VolumesSpaceCollector) Describe(ch chan<- *prometheus.Desc) {
17+
func (c *VolumesCollector) Describe(ch chan<- *prometheus.Desc) {
1618
prometheus.DescribeByCollect(c, ch)
1719
}
1820

19-
func (c *VolumesSpaceCollector) Collect(ch chan<- prometheus.Metric) {
21+
func (c *VolumesCollector) Collect(ch chan<- prometheus.Metric) {
2022
purenaa := "naa.624a9370"
2123
volumes := c.Volumes
2224
if len(volumes.Items) == 0 {
2325
return
2426
}
2527
for _, v := range volumes.Items {
28+
if v.QoS.BandwidthLimit != nil {
29+
ch <- prometheus.MustNewConstMetric(
30+
c.QoSBandwidthLimitDesc,
31+
prometheus.GaugeValue,
32+
float64(*v.QoS.BandwidthLimit),
33+
purenaa+v.Serial, v.Name, v.Pod.Name, v.VolumeGroup.Name,
34+
)
35+
}
36+
if v.QoS.IopsLimit != nil {
37+
ch <- prometheus.MustNewConstMetric(
38+
c.QoSIPOSLimitDesc,
39+
prometheus.GaugeValue,
40+
float64(*v.QoS.IopsLimit),
41+
purenaa+v.Serial, v.Name, v.Pod.Name, v.VolumeGroup.Name,
42+
)
43+
}
2644
if v.Space.DataReduction != nil {
2745
ch <- prometheus.MustNewConstMetric(
2846
c.ReductionDesc,
@@ -146,8 +164,20 @@ func (c *VolumesSpaceCollector) Collect(ch chan<- prometheus.Metric) {
146164
}
147165
}
148166

149-
func NewVolumesSpaceCollector(volumes *client.VolumesList) *VolumesSpaceCollector {
150-
return &VolumesSpaceCollector{
167+
func NewVolumesCollector(volumes *client.VolumesList) *VolumesCollector {
168+
return &VolumesCollector{
169+
QoSBandwidthLimitDesc: prometheus.NewDesc(
170+
"purefa_volume_qos_bandwidth_bytes_per_sec_limit",
171+
"FlashArray volume maximum QoS bandwidth limit in bytes per second",
172+
[]string{"naa_id", "name", "pod", "volume_group"},
173+
prometheus.Labels{},
174+
),
175+
QoSIPOSLimitDesc: prometheus.NewDesc(
176+
"purefa_volume_qos_iops_limit",
177+
"FlashArray volume QoS IOPs limit",
178+
[]string{"naa_id", "name", "pod", "volume_group"},
179+
prometheus.Labels{},
180+
),
151181
ReductionDesc: prometheus.NewDesc(
152182
"purefa_volume_space_data_reduction_ratio",
153183
"FlashArray volume space data reduction",

internal/openmetrics-exporter/volumes_space_collector_test.go renamed to internal/openmetrics-exporter/volumes_collector_test.go

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,12 @@ func TestVolumesSpaceCollector(t *testing.T) {
3535
e := endp[len(endp)-1]
3636
want := make(map[string]bool)
3737
for _, v := range volumes.Items {
38+
if v.QoS.BandwidthLimit != nil {
39+
want[fmt.Sprintf("label:{name:\"naa_id\" value:\"%s\"} label:{name:\"name\" value:\"%s\"} label:{name:\"pod\" value:\"%s\"} label:{name:\"volume_group\" value:\"%s\"} gauge:{value:%g}", purenaa+v.Serial, v.Name, v.Pod.Name, v.VolumeGroup.Name, float64(*v.QoS.BandwidthLimit))] = true
40+
}
41+
if v.QoS.IopsLimit != nil {
42+
want[fmt.Sprintf("label:{name:\"naa_id\" value:\"%s\"} label:{name:\"name\" value:\"%s\"} label:{name:\"pod\" value:\"%s\"} label:{name:\"volume_group\" value:\"%s\"} gauge:{value:%g}", purenaa+v.Serial, v.Name, v.Pod.Name, v.VolumeGroup.Name, float64(*v.QoS.IopsLimit))] = true
43+
}
3844
if v.Space.DataReduction != nil {
3945
want[fmt.Sprintf("label:{name:\"naa_id\" value:\"%s\"} label:{name:\"name\" value:\"%s\"} label:{name:\"pod\" value:\"%s\"} label:{name:\"volume_group\" value:\"%s\"} gauge:{value:%g}", purenaa+v.Serial, v.Name, v.Pod.Name, v.VolumeGroup.Name, *v.Space.DataReduction)] = true
4046
}
@@ -84,6 +90,6 @@ func TestVolumesSpaceCollector(t *testing.T) {
8490
defer server.Close()
8591
c := client.NewRestClient(e, "fake-api-token", "latest", "test-user-agent-string", "test-X-Request-Id-string", false, false)
8692
vl := c.GetVolumes()
87-
pc := NewVolumesSpaceCollector(vl)
93+
pc := NewVolumesCollector(vl)
8894
metricsCheck(t, pc, want)
8995
}

internal/openmetrics-exporter/volumes_performance_collector.go

Lines changed: 20 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
11
package collectors
22

3-
43
import (
4+
client "purestorage/fa-openmetrics-exporter/internal/rest-client"
5+
56
"github.com/prometheus/client_golang/prometheus"
6-
"purestorage/fa-openmetrics-exporter/internal/rest-client"
77
)
88

99
type VolumesPerformanceCollector struct {
@@ -103,6 +103,24 @@ func (c *VolumesPerformanceCollector) Collect(ch chan<- prometheus.Metric) {
103103
vp.ServiceUsecPerReadOpCacheReduction,
104104
c.NAAids[vp.Name], vp.Name, "service_usec_per_read_op_cache_reduction",
105105
)
106+
ch <- prometheus.MustNewConstMetric(
107+
c.LatencyDesc,
108+
prometheus.GaugeValue,
109+
float64(*vp.QosRateLimitUsecPerMirroredWriteOp),
110+
c.NAAids[vp.Name], vp.Name, "qos_rate_limit_usec_per_mirrored_write_op",
111+
)
112+
ch <- prometheus.MustNewConstMetric(
113+
c.LatencyDesc,
114+
prometheus.GaugeValue,
115+
float64(*vp.QosRateLimitUsecPerReadOp),
116+
c.NAAids[vp.Name], vp.Name, "qos_rate_limit_usec_per_read_op",
117+
)
118+
ch <- prometheus.MustNewConstMetric(
119+
c.LatencyDesc,
120+
prometheus.GaugeValue,
121+
float64(*vp.QosRateLimitUsecPerWriteOp),
122+
c.NAAids[vp.Name], vp.Name, "qos_rate_limit_usec_per_write_op",
123+
)
106124
ch <- prometheus.MustNewConstMetric(
107125
c.BandwidthDesc,
108126
prometheus.GaugeValue,

internal/openmetrics-exporter/volumes_performance_collector_test.go

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,9 @@ func TestVolumesPerformanceCollector(t *testing.T) {
5050
}
5151
want := make(map[string]bool)
5252
for _, p := range volumesperf.Items {
53+
want[fmt.Sprintf("label:{name:\"dimension\" value:\"qos_rate_limit_usec_per_mirrored_write_op\"} label:{name:\"naa_id\" value:\"%s\"} label:{name:\"name\" value:\"%s\"} gauge:{value:%g}", naaid[p.Name], p.Name, float64(*p.QosRateLimitUsecPerMirroredWriteOp))] = true
54+
want[fmt.Sprintf("label:{name:\"dimension\" value:\"qos_rate_limit_usec_per_read_op\"} label:{name:\"naa_id\" value:\"%s\"} label:{name:\"name\" value:\"%s\"} gauge:{value:%g}", naaid[p.Name], p.Name, float64(*p.QosRateLimitUsecPerReadOp))] = true
55+
want[fmt.Sprintf("label:{name:\"dimension\" value:\"qos_rate_limit_usec_per_write_op\"} label:{name:\"naa_id\" value:\"%s\"} label:{name:\"name\" value:\"%s\"} gauge:{value:%g}", naaid[p.Name], p.Name, float64(*p.QosRateLimitUsecPerWriteOp))] = true
5356
want[fmt.Sprintf("label:{name:\"dimension\" value:\"queue_usec_per_mirrored_write_op\"} label:{name:\"naa_id\" value:\"%s\"} label:{name:\"name\" value:\"%s\"} gauge:{value:%g}", naaid[p.Name], p.Name, p.QueueUsecPerMirroredWriteOp)] = true
5457
want[fmt.Sprintf("label:{name:\"dimension\" value:\"queue_usec_per_read_op\"} label:{name:\"naa_id\" value:\"%s\"} label:{name:\"name\" value:\"%s\"} gauge:{value:%g}", naaid[p.Name], p.Name, p.QueueUsecPerReadOp)] = true
5558
want[fmt.Sprintf("label:{name:\"dimension\" value:\"queue_usec_per_write_op\"} label:{name:\"naa_id\" value:\"%s\"} label:{name:\"name\" value:\"%s\"} gauge:{value:%g}", naaid[p.Name], p.Name, p.QueueUsecPerWriteOp)] = true

internal/rest-client/volumes.go

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
package client
22

33
type Qos struct {
4-
BandwidthLimit int `json:"bandwidth_limit"`
5-
IopsLimit int `json:"iops_limit"`
4+
BandwidthLimit *int64 `json:"bandwidth_limit"`
5+
IopsLimit *int64 `json:"iops_limit"`
66
}
77

88
type PriorityAdjustment struct {
@@ -29,6 +29,7 @@ type Volume struct {
2929
HostEncryptionKeyStatus string `json:"host_encryption_key_status"`
3030
PriorityAdjustment PriorityAdjustment `json:"priority_adjustment"`
3131
Provisioned int `json:"provisioned"`
32+
QoS Qos `json:"qos"`
3233
Serial string `json:"serial"`
3334
Space Space `json:"space"`
3435
TimeRemaining int `json:"time_remaining"`

internal/rest-client/volumes_performance.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,9 +9,9 @@ type VolumePerformance struct {
99
BytesPerWrite float64 `json:"bytes_per_write"`
1010
MirroredWriteBytesPerSec float64 `json:"mirrored_write_bytes_per_sec"`
1111
MirroredWritesPerSec float64 `json:"mirrored_writes_per_sec"`
12-
QosRateLimitUsecPerMirroredWriteOp float64 `json:"qos_rate_limit_usec_per_mirrored_write_op"`
13-
QosRateLimitUsecPerReadOp float64 `json:"qos_rate_limit_usec_per_read_op"`
14-
QosRateLimitUsecPerWriteOp float64 `json:"qos_rate_limit_usec_per_write_op"`
12+
QosRateLimitUsecPerMirroredWriteOp *int64 `json:"qos_rate_limit_usec_per_mirrored_write_op"`
13+
QosRateLimitUsecPerReadOp *int64 `json:"qos_rate_limit_usec_per_read_op"`
14+
QosRateLimitUsecPerWriteOp *int64 `json:"qos_rate_limit_usec_per_write_op"`
1515
QueueUsecPerMirroredWriteOp float64 `json:"queue_usec_per_mirrored_write_op"`
1616
QueueUsecPerReadOp float64 `json:"queue_usec_per_read_op"`
1717
QueueUsecPerWriteOp float64 `json:"queue_usec_per_write_op"`

0 commit comments

Comments
 (0)