Skip to content

Commit 7b97ed4

Browse files
committed
0.9.2
Signed-off-by: peekjf72 <jfpik78@gmail.com>
1 parent d3fdedf commit 7b97ed4

File tree

17 files changed

+443
-284
lines changed

17 files changed

+443
-284
lines changed

.env_oracle

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
export GO111MODULE=on
22
export GOSUMDB=off
33
export GOFLAGS="-tags=oracle"
4-
export CGO_CFLAGS="-I/usr/include/oracle/19.23/client64/"
5-
export CGO_LDFLAGS="-L/usr/lib/oracle/19.23/client64/lib"
4+
export CGO_CFLAGS="-I/usr/include/oracle/19.18/client64/"
5+
export CGO_LDFLAGS="-L/usr/lib/oracle/19.18/client64/lib"
66
export PKG_CONFIG_PATH="${HOME}/go/src/sql_exporter"

CHANGELOG.md

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,15 @@ All notable changes to this project will be documented in this file.
33
This project adheres to [Semantic Versioning](http://semver.org/) and [Keep a changelog](https://github.com/olivierlacan/keep-a-changelog).
44

55
<!--next-version-placeholder-->
6+
## 0.9.2 / 2025-02-25
7+
- fixed: label set uppercase on config: converted to lower case, both in config and in query results.
8+
- fixed: panic when label name set for value is not found in query results.
9+
- fixed: allow spaces between operator and pattern in collector list for target (e.g.: - ~ oracle_standard.*)
10+
- fixed: add log message when error found during parsing of target or collector files.
11+
- added: new parameter for /metric endpoint: collector. allow to collect target only for that collector list (&collector=name1&collector=nameX...)
12+
- fixed: now constant labels set for targets are used.
13+
- upgrade to go 1.24
14+
615
## 0.9.1 / 2024-12-14
716

817
- add for dynamic target a validation of the dsn format, so that invalid one are rejected.

README.md

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -65,6 +65,12 @@ Here a small summary for linux:
6565
export LD_LIBRARY_PATH=$IBM_DB_HOME/lib:$LD_LIBRARY_PATH
6666
```
6767

68+
for RH 10, libcrypt.so.1 is required and may need to install libxcrypt-compat:
69+
70+
```bash
71+
dnf install libxcrypt-compat
72+
```
73+
6874
If you have root access you can set path to DB2 dynamic library via ld.so.conf:
6975

7076
```bash
@@ -402,3 +408,4 @@ The entrypoint "/metrics" accepts the following argument:
402408
* auth_name=&lt;auth_name&gt; the authentication parameters to use to connect with data_source_name
403409
* auth_key=&lt;auth_key&gt; the shared key used to decipher encrypted password.
404410
* health=&lt;true&gt; alter scraping behavior: only return the target connection status metrics; Use to determine if the connection to target is OK or not 1|0.
411+
* collector=&lt;collector_name&gt;[&amp;collector=&lt;coll_name2&gt;&amp;...] alter scraping behavior; collect specific collectors list, instead of the default defined for the target; usefull to build a specific job with custom metrics with a different scraping interval by example.

VERSION

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
0.9.1
1+
0.9.2

config.go

Lines changed: 31 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,7 @@ type Config struct {
5454
logger *slog.Logger
5555
// collectorName is a restriction: collectors set for a target are replaced by this only one.
5656
collectorName string
57+
collectors map[string]*CollectorConfig
5758

5859
// Catches all undefined fields and must be empty after parsing.
5960
XXX map[string]interface{} `yaml:",inline" json:"-"`
@@ -228,6 +229,10 @@ collectors:
228229
}
229230
tnames[t.Name] = nil
230231

232+
if t.ScrapeTimeout == 0 {
233+
t.ScrapeTimeout = c.Globals.ScrapeTimeout
234+
}
235+
231236
// skip targets with DSN "template"
232237
if t.DSN == "template" {
233238
continue
@@ -236,11 +241,9 @@ collectors:
236241
return fmt.Errorf("duplicate data source definition %q in target %+v", t.Name, t)
237242
}
238243
dsns[string(t.DSN)] = nil
239-
240-
if t.ScrapeTimeout == 0 {
241-
t.ScrapeTimeout = c.Globals.ScrapeTimeout
242-
}
243244
}
245+
// reserve collector ref;
246+
c.collectors = colls
244247

245248
return checkOverflow(c.XXX, "config")
246249
}
@@ -254,6 +257,15 @@ func (c *Config) FindAuthConfig(auth_name string) *AuthConfig {
254257
return auth
255258
}
256259

260+
func (c *Config) FindCollector(collector_name string) *CollectorConfig {
261+
var coll *CollectorConfig
262+
coll, found := c.collectors[collector_name]
263+
if !found {
264+
return nil
265+
}
266+
return coll
267+
}
268+
257269
type dumpConfig struct {
258270
Globals *GlobalConfig `yaml:"global" json:"global"`
259271
CollectorFiles []string `yaml:"collector_files,omitempty" json:"collector_files,omitempty"`
@@ -316,7 +328,7 @@ func (c *Config) loadCollectorFiles() error {
316328
cc := CollectorConfig{}
317329
err = yaml.Unmarshal(buf, &cc)
318330
if err != nil {
319-
return err
331+
return fmt.Errorf("collector '%s': %s", cf, err)
320332
}
321333
cc.fromFile = cf
322334
c.Collectors = append(c.Collectors, &cc)
@@ -355,7 +367,7 @@ func (c *Config) loadTargetsFiles(targetFilepath []string) error {
355367
target := TargetConfig{}
356368
err = yaml.Unmarshal(buf, &target)
357369
if err != nil {
358-
return err
370+
return fmt.Errorf("target file '%s': %s", tf, err)
359371
}
360372
target.setFromFile(tf)
361373
c.Targets = append(c.Targets, &target)
@@ -522,12 +534,16 @@ func (t *TargetConfig) buildDumpTargetconfig() *dumpTargetConfig {
522534
if t.targetType == TargetTypeDynamic {
523535
name = dsn
524536
}
537+
collectors := make([]string, len(t.collectors))
538+
for idx, coll := range t.collectors {
539+
collectors[idx] = coll.Name
540+
}
525541
return &dumpTargetConfig{
526542
Name: name,
527543
DSN: dsn,
528544
ScrapeTimeout: t.ScrapeTimeout,
529545
Labels: t.Labels,
530-
CollectorRefs: t.CollectorRefs,
546+
CollectorRefs: collectors,
531547
TargetsFiles: t.TargetsFiles,
532548
AuthName: t.AuthName,
533549
AuthConfig: t.AuthConfig,
@@ -560,6 +576,7 @@ func (t *TargetConfig) Clone(dsn string, auth_name string) (*TargetConfig, error
560576
new := &TargetConfig{
561577
Name: dsn,
562578
DSN: Secret(dsn),
579+
CollectorRefs: t.CollectorRefs,
563580
AuthConfig: t.AuthConfig,
564581
Labels: t.Labels,
565582
collectors: t.collectors,
@@ -734,10 +751,15 @@ func (m *MetricConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
734751
return fmt.Errorf("unsupported metric type: %s", m.TypeString)
735752
}
736753

754+
m.ValueLabel = strings.ToLower(m.ValueLabel)
755+
737756
// Check for duplicate key labels
738757
for i, li := range m.KeyLabels {
739758
checkLabel(li, "metric", m.Name)
759+
m.KeyLabels[i] = strings.ToLower(li)
760+
li = m.KeyLabels[i]
740761
for _, lj := range m.KeyLabels[i+1:] {
762+
741763
if li == lj {
742764
return fmt.Errorf("duplicate key label %q for metric %q", li, m.Name)
743765
}
@@ -879,14 +901,14 @@ func resolveCollectorRefs(
879901
for _, cref := range collectorRefs {
880902
// check if cref(a collector name) is a pattern or not
881903
if strings.HasPrefix(cref, "~") {
882-
pat := regexp.MustCompile(cref[1:])
904+
pat := regexp.MustCompile(strings.TrimSpace(cref[1:]))
883905
for c_name, c := range collectors {
884906
if pat.MatchString(c_name) {
885907
resolved = append(resolved, c)
886908
}
887909
}
888910
} else if strings.HasPrefix(cref, "!~") {
889-
pat := regexp.MustCompile(cref[2:])
911+
pat := regexp.MustCompile(strings.TrimSpace(cref[2:]))
890912
for c_name, c := range collectors {
891913
if !pat.MatchString(c_name) {
892914
resolved = append(resolved, c)
Lines changed: 210 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,210 @@
1+
collector_name: oracle_rman
2+
namespace: oracledb_rman
3+
metrics:
4+
#backup
5+
- metric_name: backup_duration_seconds
6+
type: gauge
7+
help: 'db backup duration in seconds by backup id and session type. labeled by start and end time'
8+
key_labels:
9+
- session_key
10+
- BACKUP_TYPE
11+
- start_time
12+
- end_time
13+
values:
14+
- backup_duration_seconds
15+
query_ref: rman_backup
16+
17+
- metric_name: backup_size
18+
type: gauge
19+
help: 'db backup size in bytes by backup id and session type'
20+
key_labels:
21+
- session_key
22+
- BACKUP_TYPE
23+
values:
24+
- out_size_bytes
25+
query_ref: rman_backup
26+
27+
- metric_name: backup_status
28+
type: gauge
29+
help: 'db backup status by backup id and session type: 0:FAILED - 1:COMPLETED - 2:COMPLETED WITH WARNINGS - 3:COMPLETED WITH ERRORS - 4:RUNNING WITH WARNINGS - 5:RUNNING WITH ERRORS'
30+
key_labels:
31+
- session_key
32+
- BACKUP_TYPE
33+
values:
34+
- status
35+
query_ref: rman_backup
36+
37+
# *****************
38+
- metric_name: backup_last_timestamp
39+
type: gauge
40+
help: 'unix timestamp in second of the last backup indexed by backup type'
41+
key_labels:
42+
- backup_type
43+
values:
44+
- last_start_ts
45+
query_ref: last_backup
46+
47+
# - metric_name: backup_last_duration_seconds
48+
# type: gauge
49+
# help: 'number of second since last backup indexed by backup type'
50+
# key_labels:
51+
# - backup_type
52+
# values:
53+
# - last_backup_seconds
54+
# query_ref: last_backup
55+
# with cte as (
56+
# select
57+
# max(START_TIME) last_bckup,
58+
# INPUT_TYPE as BACKUP_TYPE
59+
# from V$RMAN_BACKUP_JOB_DETAILS
60+
# group by INPUT_TYPE
61+
# )
62+
# select (sysdate - last_bckup) * 86400 as last_backup_seconds, BACKUP_TYPE
63+
# from cte
64+
65+
- metric_name: backup_last_size_bytes
66+
type: gauge
67+
help: 'number of bytes output in last backup indexed by backup type'
68+
key_labels:
69+
- session_key
70+
- backup_type
71+
values:
72+
- last_backup_bytes
73+
query_ref:
74+
last_backup
75+
# query:
76+
# with cte as (
77+
# select
78+
# SESSION_KEY,
79+
# RANK () OVER( PARTITION by INPUT_TYPE order by START_TIME DESC) as R_NUM
80+
# from V$RMAN_BACKUP_JOB_DETAILS
81+
# )
82+
# select OUTPUT_BYTES AS last_backup_bytes,
83+
# cte.SESSION_KEY,
84+
# INPUT_TYPE as BACKUP_TYPE
85+
# from cte
86+
# join V$RMAN_BACKUP_JOB_DETAILS bck on cte.SESSION_KEY = bck.SESSION_KEY
87+
# where cte.R_NUM = 1
88+
89+
- metric_name: backup_last_status
90+
type: gauge
91+
help: 'db backup status by backup id and session type: 0:FAILED - 1:COMPLETED - 2:COMPLETED WITH WARNINGS - 3:COMPLETED WITH ERRORS - 4:RUNNING WITH WARNINGS - 5:RUNNING WITH ERRORS'
92+
key_labels:
93+
- session_key
94+
- BACKUP_TYPE
95+
values:
96+
- last_status
97+
query_ref:
98+
last_backup
99+
100+
101+
102+
103+
104+
# backup status
105+
# 0 FAILED
106+
# 1 COMPLETED
107+
# 2 COMPLETED WITH WARNINGS
108+
# 3 COMPLETED WITH ERRORS
109+
# 4 RUNNING WITH WARNINGS
110+
# 5 RUNNING WITH ERRORS
111+
queries:
112+
# Populates rman_backup: last 10 backup from each type
113+
- query_name: rman_backup
114+
query: |
115+
with backups as (
116+
select
117+
SESSION_KEY,
118+
RANK () OVER( PARTITION by INPUT_TYPE order by START_TIME DESC) as R_NUM
119+
from V$RMAN_BACKUP_JOB_DETAILS
120+
)
121+
SELECT bck.session_key,
122+
input_type as BACKUP_TYPE,
123+
CASE status
124+
WHEN 'COMPLETED' THEN 1
125+
WHEN 'COMPLETED WITH WARNINGS' THEN 2
126+
WHEN 'COMPLETED WITH ERRORS' THEN 3
127+
WHEN 'FAILED' THEN 0
128+
WHEN 'RUNNING WITH WARNINGS' THEN 4
129+
WHEN 'RUNNING WITH ERRORS' THEN 5
130+
END AS status,
131+
TO_CHAR(start_time,'YYYY/MM/DD hh24:mi') AS start_time,
132+
TO_CHAR(end_time, 'YYYY/MM/DD hh24:mi') AS end_time,
133+
-- output_device_type,
134+
elapsed_seconds AS backup_duration_seconds,
135+
-- input_bytes AS in_size_bytes,
136+
output_bytes AS out_size_bytes
137+
from backups bck
138+
join V$RMAN_BACKUP_JOB_DETAILS rawb on bck.session_key = rawb.session_key
139+
where bck.R_NUM <= 10
140+
141+
# with bck_type as (
142+
# select distinct input_type as BACKUP_TYPE
143+
# from v$rman_backup_job_details
144+
# ),
145+
# backups as (
146+
# SELECT session_key,
147+
# input_type as BACKUP_TYPE,
148+
# CASE status
149+
# WHEN 'COMPLETED' THEN 1
150+
# WHEN 'COMPLETED WITH WARNINGS' THEN 2
151+
# WHEN 'COMPLETED WITH ERRORS' THEN 3
152+
# WHEN 'FAILED' THEN 0
153+
# WHEN 'RUNNING WITH WARNINGS' THEN 4
154+
# WHEN 'RUNNING WITH ERRORS' THEN 5
155+
# END AS status,
156+
# TO_CHAR(start_time,'YYYY/MM/DD hh24:mi') AS start_time,
157+
# TO_CHAR(end_time, 'YYYY/MM/DD hh24:mi') AS end_time,
158+
# output_device_type,
159+
# elapsed_seconds AS backup_duration_seconds,
160+
# input_bytes AS in_size_bytes,
161+
# output_bytes AS out_size_bytes
162+
# FROM v$rman_backup_job_details
163+
# WHERE
164+
# rownum <= 10
165+
# ORDER BY start_time desc
166+
# )
167+
# SELECT *
168+
# FROM bck_type bt
169+
# JOIN backups b on bt.backup_type = b.backup_type
170+
171+
# WHERE
172+
173+
- query_name: last_backup
174+
query: |
175+
with cte as (
176+
select
177+
SESSION_KEY,
178+
RANK () OVER( PARTITION by INPUT_TYPE order by START_TIME DESC) as R_NUM
179+
from V$RMAN_BACKUP_JOB_DETAILS
180+
)
181+
select
182+
cte.SESSION_KEY,
183+
INPUT_TYPE as BACKUP_TYPE,
184+
(cast( (cast(start_time as timestamp with time zone) at time zone 'UTC') as date)
185+
- date '1970-01-01') * 86400 as last_start_ts,
186+
-- (sysdate - start_time) * 86400 as last_backup_seconds
187+
OUTPUT_BYTES AS last_backup_bytes,
188+
CASE status
189+
WHEN 'COMPLETED' THEN 1
190+
WHEN 'COMPLETED WITH WARNINGS' THEN 2
191+
WHEN 'COMPLETED WITH ERRORS' THEN 3
192+
WHEN 'FAILED' THEN 0
193+
WHEN 'RUNNING WITH WARNINGS' THEN 4
194+
WHEN 'RUNNING WITH ERRORS' THEN 5
195+
END AS last_status
196+
from cte
197+
join V$RMAN_BACKUP_JOB_DETAILS bck on cte.SESSION_KEY = bck.SESSION_KEY
198+
where cte.R_NUM = 1
199+
200+
201+
# (cast( SYS_EXTRACT_UTC(FROM_TZ( cast(start_time as timestamp), 'Europe/Paris')) at time zone 'UTC' as dat
202+
# e) - date '1970-01-01') * 86400 as utc_ts,
203+
204+
205+
# select
206+
# ((cast(start_time as timestamp with time zone) at time zone 'UTC') - date '1970-01-01') * 86400 as utc_ts,
207+
# start_time
208+
# from V$RMAN_BACKUP_JOB_DETAILS
209+
# where SESSION_KEY = 8940
210+
# /

0 commit comments

Comments
 (0)