Skip to content

Commit 147f128

Browse files
authored
Merge pull request #53 from snetsystems/release-1.23-snet-yongsik
Release 1.23 snet yongsik
2 parents cad1530 + 7fefb54 commit 147f128

File tree

13 files changed

+1456
-60
lines changed

13 files changed

+1456
-60
lines changed

plugins/inputs/all/all.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -146,7 +146,8 @@ import (
146146
_ "github.com/influxdata/telegraf/plugins/inputs/openstack"
147147
_ "github.com/influxdata/telegraf/plugins/inputs/opentelemetry"
148148
_ "github.com/influxdata/telegraf/plugins/inputs/openweathermap"
149-
_ "github.com/influxdata/telegraf/plugins/inputs/oracledb"
149+
150+
// _ "github.com/influxdata/telegraf/plugins/inputs/oracledb"
150151
_ "github.com/influxdata/telegraf/plugins/inputs/passenger"
151152
_ "github.com/influxdata/telegraf/plugins/inputs/pf"
152153
_ "github.com/influxdata/telegraf/plugins/inputs/pgbouncer"

plugins/inputs/kube_inventory/README.md

Lines changed: 36 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@ resources:
1313
- pods (containers)
1414
- services
1515
- statefulsets
16+
- resourcequotas
1617

1718
Kubernetes is a fast moving project, with a new minor release every 3 months. As
1819
such, we will aim to maintain support only for versions that are supported by
@@ -61,7 +62,7 @@ avoid cardinality issues:
6162
## Optional Resources to exclude from gathering
6263
## Leave them with blank with try to gather everything available.
6364
## Values can be - "daemonsets", deployments", "endpoints", "ingress", "nodes",
64-
## "persistentvolumes", "persistentvolumeclaims", "pods", "services", "statefulsets"
65+
## "persistentvolumes", "persistentvolumeclaims", "pods", "services", "statefulsets", "resourcequotas"
6566
# resource_exclude = [ "deployments", "nodes", "statefulsets" ]
6667

6768
## Optional Resources to include when gathering
@@ -97,7 +98,6 @@ list "persistentvolumes" and "nodes". You will then need to make an [aggregated
9798
ClusterRole][agg] that will eventually be bound to a user or group.
9899

99100
[rbac]: https://kubernetes.io/docs/reference/access-authn-authz/rbac/
100-
101101
[agg]: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#aggregated-clusterroles
102102

103103
```yaml
@@ -107,11 +107,11 @@ apiVersion: rbac.authorization.k8s.io/v1
107107
metadata:
108108
name: influx:cluster:viewer
109109
labels:
110-
rbac.authorization.k8s.io/aggregate-view-telegraf: "true"
110+
rbac.authorization.k8s.io/aggregate-view-telegraf: 'true'
111111
rules:
112-
- apiGroups: [""]
113-
resources: ["persistentvolumes", "nodes"]
114-
verbs: ["get", "list"]
112+
- apiGroups: ['']
113+
resources: ['persistentvolumes', 'nodes']
114+
verbs: ['get', 'list']
115115

116116
---
117117
kind: ClusterRole
@@ -121,9 +121,9 @@ metadata:
121121
aggregationRule:
122122
clusterRoleSelectors:
123123
- matchLabels:
124-
rbac.authorization.k8s.io/aggregate-view-telegraf: "true"
124+
rbac.authorization.k8s.io/aggregate-view-telegraf: 'true'
125125
- matchLabels:
126-
rbac.authorization.k8s.io/aggregate-to-view: "true"
126+
rbac.authorization.k8s.io/aggregate-to-view: 'true'
127127
rules: [] # Rules are automatically filled in by the controller manager.
128128
```
129129
@@ -149,7 +149,7 @@ subjects:
149149
## Quickstart in k3s
150150
151151
When monitoring [k3s](https://k3s.io) server instances one can re-use already
152-
generated administration token. This is less secure than using the more
152+
generated administration token. This is less secure than using the more
153153
restrictive dedicated telegraf user but more convienient to set up.
154154
155155
```console
@@ -170,6 +170,7 @@ tls_key = "/run/telegraf-kubernetes-key"
170170
## Metrics
171171

172172
- kubernetes_daemonset
173+
173174
- tags:
174175
- daemonset_name
175176
- namespace
@@ -185,6 +186,7 @@ tls_key = "/run/telegraf-kubernetes-key"
185186
- updated_number_scheduled
186187

187188
- kubernetes_deployment
189+
188190
- tags:
189191
- deployment_name
190192
- namespace
@@ -195,6 +197,7 @@ tls_key = "/run/telegraf-kubernetes-key"
195197
- created
196198

197199
- kubernetes_endpoints
200+
198201
- tags:
199202
- endpoint_name
200203
- namespace
@@ -210,6 +213,7 @@ tls_key = "/run/telegraf-kubernetes-key"
210213
- port
211214

212215
- kubernetes_ingress
216+
213217
- tags:
214218
- ingress_name
215219
- namespace
@@ -225,6 +229,7 @@ tls_key = "/run/telegraf-kubernetes-key"
225229
- tls
226230

227231
- kubernetes_node
232+
228233
- tags:
229234
- node_name
230235
- fields:
@@ -238,6 +243,7 @@ tls_key = "/run/telegraf-kubernetes-key"
238243
- allocatable_pods
239244

240245
- kubernetes_persistentvolume
246+
241247
- tags:
242248
- pv_name
243249
- phase
@@ -246,6 +252,7 @@ tls_key = "/run/telegraf-kubernetes-key"
246252
- phase_type (int, [see below](#pv-phase_type))
247253

248254
- kubernetes_persistentvolumeclaim
255+
249256
- tags:
250257
- pvc_name
251258
- namespace
@@ -256,6 +263,7 @@ tls_key = "/run/telegraf-kubernetes-key"
256263
- phase_type (int, [see below](#pvc-phase_type))
257264

258265
- kubernetes_pod_container
266+
259267
- tags:
260268
- container_name
261269
- namespace
@@ -277,6 +285,7 @@ tls_key = "/run/telegraf-kubernetes-key"
277285
- resource_limits_memory_bytes
278286

279287
- kubernetes_service
288+
280289
- tags:
281290
- service_name
282291
- namespace
@@ -292,6 +301,7 @@ tls_key = "/run/telegraf-kubernetes-key"
292301
- target_port
293302

294303
- kubernetes_statefulset
304+
295305
- tags:
296306
- statefulset_name
297307
- namespace
@@ -306,6 +316,22 @@ tls_key = "/run/telegraf-kubernetes-key"
306316
- spec_replicas
307317
- observed_generation
308318

319+
- kubernetes_statefulset
320+
- tags:
321+
- resource
322+
- namespace
323+
- fields:
324+
- hard_cpu_cores_limit
325+
- hard_cpu_cores_request
326+
- hard_memory_bytes_limit
327+
- hard_memory_bytes_request
328+
- hard_storage_bytes_request
329+
- used_cpu_cores_limit
330+
- used_cpu_cores_request
331+
- used_memory_bytes_limit
332+
- used_memory_bytes_request
333+
- used_storage_bytes_request
334+
309335
### pv `phase_type`
310336

311337
The persistentvolume "phase" is saved in the `phase` tag with a correlated
@@ -345,6 +371,7 @@ kubernetes_pod,namespace=default,node_name=ip-172-17-0-2.internal,pod_name=tick1
345371
kubernetes_service,cluster_ip=172.29.61.80,namespace=redis-cache-0001,port_name=redis,port_protocol=TCP,selector_app=myapp,selector_io.kompose.service=redis,selector_role=slave,service_name=redis-slave created=1588690034000000000i,generation=0i,port=6379i,target_port=0i 1547597616000000000
346372
kubernetes_pod_container,container_name=telegraf,namespace=default,node_name=ip-172-17-0-2.internal,node_selector_node-role.kubernetes.io/compute=true,pod_name=tick1,phase=Running,state=running,readiness=ready resource_requests_cpu_units=0.1,resource_limits_memory_bytes=524288000,resource_limits_cpu_units=0.5,restarts_total=0i,state_code=0i,state_reason="",phase_reason="",resource_requests_memory_bytes=524288000 1547597616000000000
347373
kubernetes_statefulset,namespace=default,selector_select1=s1,statefulset_name=etcd replicas_updated=3i,spec_replicas=3i,observed_generation=1i,created=1544101669000000000i,generation=1i,replicas=3i,replicas_current=3i,replicas_ready=3i 1547597616000000000
374+
kubernetes_resourcequota,host=S2100113,namespace=test-prj0,resource=default-resource-quota hard_cpu_cores_limit=16i,hard_cpu_cores_request=16i,hard_memory_bytes_limit=34359738368i,hard_memory_bytes_request=34359738368i,hard_storage_bytes_request=107374182400i,used_cpu_cores_limit=1i,used_cpu_cores_request=1i,used_memory_bytes_limit=2684354560i,used_memory_bytes_request=2684354560i,used_storage_bytes_request=3221225472i 1683003895000000000
348375
```
349376

350377
[metric filtering]: https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#metric-filtering

plugins/inputs/kube_inventory/client.go

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -110,3 +110,9 @@ func (c *client) getStatefulSets(ctx context.Context) (*appsv1.StatefulSetList,
110110
defer cancel()
111111
return c.AppsV1().StatefulSets(c.namespace).List(ctx, metav1.ListOptions{})
112112
}
113+
114+
func (c *client) getResourceQuotas(ctx context.Context) (*corev1.ResourceQuotaList, error) {
115+
ctx, cancel := context.WithTimeout(ctx, c.timeout)
116+
defer cancel()
117+
return c.CoreV1().ResourceQuotas(c.namespace).List(ctx, metav1.ListOptions{})
118+
}

plugins/inputs/kube_inventory/kube_inventory.go

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -113,6 +113,7 @@ var availableCollectors = map[string]func(ctx context.Context, acc telegraf.Accu
113113
"statefulsets": collectStatefulSets,
114114
"persistentvolumes": collectPersistentVolumes,
115115
"persistentvolumeclaims": collectPersistentVolumeClaims,
116+
"resourcequotas": collectResourceQuotas,
116117
}
117118

118119
func atoi(s string) int64 {
@@ -160,6 +161,7 @@ var (
160161
podContainerMeasurement = "kubernetes_pod_container"
161162
serviceMeasurement = "kubernetes_service"
162163
statefulSetMeasurement = "kubernetes_statefulset"
164+
resourcequotaMeasurement = "kubernetes_resourcequota"
163165
)
164166

165167
func init() {
Lines changed: 60 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,60 @@
1+
package kube_inventory
2+
3+
import (
4+
"context"
5+
6+
corev1 "k8s.io/api/core/v1"
7+
8+
"github.com/influxdata/telegraf"
9+
)
10+
11+
func collectResourceQuotas(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory) {
12+
list, err := ki.client.getResourceQuotas(ctx)
13+
if err != nil {
14+
acc.AddError(err)
15+
return
16+
}
17+
for _, i := range list.Items {
18+
ki.gatherResourceQuota(i, acc)
19+
}
20+
}
21+
22+
func (ki *KubernetesInventory) gatherResourceQuota(r corev1.ResourceQuota, acc telegraf.Accumulator) {
23+
fields := map[string]interface{}{}
24+
tags := map[string]string{
25+
"resource": r.Name,
26+
"namespace": r.Namespace,
27+
}
28+
29+
for resourceName, val := range r.Status.Hard {
30+
switch resourceName {
31+
case "limits.cpu":
32+
fields["hard_cpu_cores_limit"] = ki.convertQuantity(val.String(), 1)
33+
case "limits.memory":
34+
fields["hard_memory_bytes_limit"] = ki.convertQuantity(val.String(), 1)
35+
case "requests.cpu":
36+
fields["hard_cpu_cores_request"] = ki.convertQuantity(val.String(), 1)
37+
case "requests.memory":
38+
fields["hard_memory_bytes_request"] = ki.convertQuantity(val.String(), 1)
39+
case "requests.storage":
40+
fields["hard_storage_bytes_request"] = ki.convertQuantity(val.String(), 1)
41+
}
42+
}
43+
44+
for resourceName, val := range r.Status.Used {
45+
switch resourceName {
46+
case "limits.cpu":
47+
fields["used_cpu_cores_limit"] = ki.convertQuantity(val.String(), 1)
48+
case "limits.memory":
49+
fields["used_memory_bytes_limit"] = ki.convertQuantity(val.String(), 1)
50+
case "requests.cpu":
51+
fields["used_cpu_cores_request"] = ki.convertQuantity(val.String(), 1)
52+
case "requests.memory":
53+
fields["used_memory_bytes_request"] = ki.convertQuantity(val.String(), 1)
54+
case "requests.storage":
55+
fields["used_storage_bytes_request"] = ki.convertQuantity(val.String(), 1)
56+
}
57+
}
58+
59+
acc.AddFields(resourcequotaMeasurement, fields, tags)
60+
}
Lines changed: 121 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,121 @@
1+
package kube_inventory
2+
3+
import (
4+
"testing"
5+
"time"
6+
7+
corev1 "k8s.io/api/core/v1"
8+
"k8s.io/apimachinery/pkg/api/resource"
9+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
10+
11+
"github.com/influxdata/telegraf"
12+
"github.com/influxdata/telegraf/testutil"
13+
"github.com/stretchr/testify/require"
14+
)
15+
16+
func TestResourceQuota(t *testing.T) {
17+
cli := &client{}
18+
now := time.Now()
19+
20+
tests := []struct {
21+
name string
22+
handler *mockHandler
23+
output []telegraf.Metric
24+
hasError bool
25+
}{
26+
{
27+
name: "no ressourcequota",
28+
handler: &mockHandler{
29+
responseMap: map[string]interface{}{
30+
"/resourcequotas/": corev1.ResourceQuotaList{},
31+
},
32+
},
33+
output: []telegraf.Metric{},
34+
hasError: false,
35+
},
36+
{
37+
name: "collect resourceqota",
38+
handler: &mockHandler{
39+
responseMap: map[string]interface{}{
40+
"/resourcequotas/": corev1.ResourceQuotaList{
41+
Items: []corev1.ResourceQuota{
42+
{
43+
Status: corev1.ResourceQuotaStatus{
44+
Hard: corev1.ResourceList{
45+
"limits.cpu": resource.MustParse("16"),
46+
"limits.memory": resource.MustParse("34359738368"),
47+
"requests.cpu": resource.MustParse("16"),
48+
"requests.memory": resource.MustParse("34359738368"),
49+
"requests.storage": resource.MustParse("107374182400"),
50+
},
51+
Used: corev1.ResourceList{
52+
"limits.cpu": resource.MustParse("1"),
53+
"limits.memory": resource.MustParse("1610612736"),
54+
"requests.cpu": resource.MustParse("1"),
55+
"requests.memory": resource.MustParse("1610612736"),
56+
"requests.storage": resource.MustParse("1073741824"),
57+
},
58+
},
59+
ObjectMeta: metav1.ObjectMeta{
60+
Generation: 11232,
61+
Namespace: "ns1",
62+
Name: "rs1",
63+
Labels: map[string]string{
64+
"lab1": "v1",
65+
"lab2": "v2",
66+
},
67+
CreationTimestamp: metav1.Time{Time: now},
68+
},
69+
},
70+
},
71+
},
72+
},
73+
},
74+
output: []telegraf.Metric{
75+
testutil.MustMetric(
76+
resourcequotaMeasurement,
77+
map[string]string{
78+
"resource": "rs1",
79+
"namespace": "ns1",
80+
},
81+
map[string]interface{}{
82+
"hard_cpu_cores_limit": int64(16),
83+
"hard_memory_bytes_limit": int64(34359738368),
84+
"hard_cpu_cores_request": int64(16),
85+
"hard_memory_bytes_request": int64(34359738368),
86+
"hard_storage_bytes_request": int64(107374182400),
87+
"used_cpu_cores_limit": int64(1),
88+
"used_memory_bytes_limit": int64(1610612736),
89+
"used_cpu_cores_request": int64(1),
90+
"used_memory_bytes_request": int64(1610612736),
91+
"used_storage_bytes_request": int64(1073741824),
92+
},
93+
time.Unix(0, 0),
94+
),
95+
},
96+
hasError: false,
97+
},
98+
}
99+
100+
for _, v := range tests {
101+
ks := &KubernetesInventory{
102+
client: cli,
103+
}
104+
acc := new(testutil.Accumulator)
105+
for _, quota := range ((v.handler.responseMap["/resourcequotas/"]).(corev1.ResourceQuotaList)).Items {
106+
ks.gatherResourceQuota(quota, acc)
107+
}
108+
109+
err := acc.FirstError()
110+
if v.hasError {
111+
require.Errorf(t, err, "%s failed, should have error", v.name)
112+
continue
113+
}
114+
115+
// No error case
116+
require.NoErrorf(t, err, "%s failed, err: %v", v.name, err)
117+
118+
require.Len(t, acc.Metrics, len(v.output))
119+
testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime())
120+
}
121+
}

0 commit comments

Comments
 (0)