Skip to content

Commit 45d47e8

Browse files
authored
refactor: reducing duplicated code (trickstercache#890)
* refactor: reducing duplicated code --------- Signed-off-by: Chris Randles <randles.chris@gmail.com>
1 parent f9b5114 commit 45d47e8

File tree

26 files changed

+388
-473
lines changed

26 files changed

+388
-473
lines changed

.golangci.yml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,8 @@ linters:
2727
disable:
2828
- errcheck
2929
settings:
30+
dupl:
31+
threshold: 150 # default is 100; have brought current codebase down to 75, but leaving default at 150 for now
3032
lll:
3133
line-length: 180
3234
staticcheck:

pkg/backends/clickhouse/model/marshal_csv.go

Lines changed: 11 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -82,41 +82,32 @@ func marshalTimeseriesXSV(w io.Writer, ds *dataset.DataSet,
8282
// at this point, we're going to write the TSV/CSV
8383
cw := csv.NewWriter(w)
8484
cw.Comma = rune(separator)
85-
if writeNames || writeTypes {
85+
86+
// Helper function to write a row with field data
87+
writeFieldRow := func(getValue func(timeseries.FieldDefinition) string) {
8688
row := make([]string, fieldCount)
8789
fd := tfd
88-
row[fd.OutputPosition] = fd.Name
90+
row[fd.OutputPosition] = getValue(fd)
8991
for _, fd = range tags {
9092
if fd.Name == tfd.Name {
9193
continue
9294
}
9395
if fd.OutputPosition > fieldCount {
9496
continue
9597
}
96-
row[fd.OutputPosition] = fd.Name
98+
row[fd.OutputPosition] = getValue(fd)
9799
}
98100
for _, fd = range vals {
99-
row[fd.OutputPosition] = fd.Name
101+
row[fd.OutputPosition] = getValue(fd)
100102
}
101103
cw.Write(row)
102104
}
105+
106+
if writeNames || writeTypes {
107+
writeFieldRow(func(fd timeseries.FieldDefinition) string { return fd.Name })
108+
}
103109
if writeTypes {
104-
row := make([]string, fieldCount)
105-
fd := tfd
106-
row[fd.OutputPosition] = fd.SDataType
107-
for _, fd = range tags {
108-
if fd.Name == tfd.Name {
109-
continue
110-
}
111-
if fd.OutputPosition > fieldCount {
112-
continue
113-
}
114-
row[fd.OutputPosition] = fd.SDataType
115-
}
116-
for _, fd = range vals {
117-
row[fd.OutputPosition] = fd.SDataType
118-
}
119-
cw.Write(row)
110+
writeFieldRow(func(fd timeseries.FieldDefinition) string { return fd.SDataType })
120111
}
121112
for _, s := range ds.Results[0].SeriesList {
122113
for _, p := range s.Points {

pkg/backends/clickhouse/url.go

Lines changed: 13 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -51,14 +51,9 @@ func (c *Client) SetExtent(r *http.Request, trq *timeseries.TimeRangeQuery,
5151
}
5252
}
5353

54-
func interpolateTimeQuery(template string, tfd timeseries.FieldDefinition,
55-
extent *timeseries.Extent,
56-
) string {
57-
var start, end, tStart, tEnd string
58-
59-
// tfd.DataType holds the database internal format for the timestamp used
60-
// when setting extents
61-
switch tfd.DataType {
54+
// formatTimestampValues formats start and end timestamps based on the data type
55+
func formatTimestampValues(dataType timeseries.FieldDataType, extent *timeseries.Extent) (start, end string) {
56+
switch dataType {
6257
case timeseries.DateTimeUnixMilli: // epoch millisecs
6358
start = strconv.FormatInt(extent.Start.UnixMilli(), 10)
6459
end = strconv.FormatInt(extent.End.UnixMilli(), 10)
@@ -72,22 +67,18 @@ func interpolateTimeQuery(template string, tfd timeseries.FieldDefinition,
7267
start = strconv.FormatInt(extent.Start.Unix(), 10)
7368
end = strconv.FormatInt(extent.End.Unix(), 10)
7469
}
70+
return start, end
71+
}
72+
73+
func interpolateTimeQuery(template string, tfd timeseries.FieldDefinition,
74+
extent *timeseries.Extent,
75+
) string {
76+
// tfd.DataType holds the database internal format for the timestamp used
77+
// when setting extents
78+
start, end := formatTimestampValues(tfd.DataType, extent)
7579

7680
// ProviderData1 holds the format of a secondary time field
77-
switch timeseries.FieldDataType(tfd.ProviderData1) {
78-
case timeseries.DateTimeUnixMilli: // epoch millisecs
79-
tStart = strconv.FormatInt(extent.Start.UnixMilli(), 10)
80-
tEnd = strconv.FormatInt(extent.End.UnixMilli(), 10)
81-
case timeseries.DateTimeUnixNano: // epoch nanosecs
82-
tStart = strconv.FormatInt(extent.Start.UnixNano(), 10)
83-
tEnd = strconv.FormatInt(extent.End.UnixNano(), 10)
84-
case timeseries.DateTimeSQL: // '2025-05-01 11:39:18'
85-
tStart = "'" + extent.Start.Format(sql.SQLDateTimeLayout) + "'"
86-
tEnd = "'" + extent.End.Format(sql.SQLDateTimeLayout) + "'"
87-
default: // epoch secs
88-
tStart = strconv.FormatInt(extent.Start.Unix(), 10)
89-
tEnd = strconv.FormatInt(extent.End.Unix(), 10)
90-
}
81+
tStart, tEnd := formatTimestampValues(timeseries.FieldDataType(tfd.ProviderData1), extent)
9182
trange := fmt.Sprintf("%s BETWEEN %s AND %s", tfd.Name, start, end)
9283
out := strings.NewReplacer(
9384
tkRange, trange,

pkg/backends/influxdb/flux/marshal_csv.go

Lines changed: 17 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -72,20 +72,31 @@ func marshalTimeseriesCSVWriter(ds *dataset.DataSet, frb *JSONRequestBody,
7272
return nil
7373
}
7474

75-
func printCsvDatatypeAnnotationRow(w *csv.Writer,
75+
// printCsvAnnotationRow is a generic helper function for printing CSV annotation rows
76+
func printCsvAnnotationRow(w *csv.Writer,
7677
fds timeseries.FieldDefinitions,
78+
annotationType string,
79+
getValue func(timeseries.FieldDefinition) string,
7780
) error {
7881
cells := make([]string, len(fds))
7982
for i, fd := range fds {
8083
if i == 0 {
81-
cells[i] = "#datatype"
84+
cells[i] = annotationType
8285
continue
8386
}
84-
cells[i] = fd.SDataType
87+
cells[i] = getValue(fd)
8588
}
8689
return w.Write(cells)
8790
}
8891

92+
func printCsvDatatypeAnnotationRow(w *csv.Writer,
93+
fds timeseries.FieldDefinitions,
94+
) error {
95+
return printCsvAnnotationRow(w, fds, "#datatype", func(fd timeseries.FieldDefinition) string {
96+
return fd.SDataType
97+
})
98+
}
99+
89100
func printCsvGroupAnnotationRow(w *csv.Writer,
90101
fds timeseries.FieldDefinitions,
91102
) error {
@@ -109,15 +120,9 @@ func printCsvGroupAnnotationRow(w *csv.Writer,
109120
func printCsvDefaultAnnotationRow(w *csv.Writer,
110121
fds timeseries.FieldDefinitions,
111122
) error {
112-
cells := make([]string, len(fds))
113-
for i, fd := range fds {
114-
if i == 0 {
115-
cells[i] = "#default"
116-
continue
117-
}
118-
cells[i] = fd.DefaultValue
119-
}
120-
return w.Write(cells)
123+
return printCsvAnnotationRow(w, fds, "#default", func(fd timeseries.FieldDefinition) string {
124+
return fd.DefaultValue
125+
})
121126
}
122127

123128
func printCsvHeaderRow(w *csv.Writer, fds timeseries.FieldDefinitions) error {

pkg/backends/prometheus/handler_labels.go

Lines changed: 1 addition & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -18,8 +18,6 @@ package prometheus
1818

1919
import (
2020
"net/http"
21-
"strconv"
22-
"time"
2321

2422
"github.com/trickstercache/trickster/v2/pkg/backends/prometheus/model"
2523
"github.com/trickstercache/trickster/v2/pkg/proxy/engines"
@@ -40,17 +38,7 @@ func (c *Client) LabelsHandler(w http.ResponseWriter, r *http.Request) {
4038
qp, _, _ := params.GetRequestValues(r)
4139

4240
// Round Start and End times down to top of most recent minute for cacheability
43-
if p := qp.Get(upStart); p != "" {
44-
if i, err := strconv.ParseInt(p, 10, 64); err == nil {
45-
qp.Set(upStart, strconv.FormatInt(time.Unix(i, 0).Truncate(time.Second*time.Duration(60)).Unix(), 10))
46-
}
47-
}
48-
49-
if p := qp.Get(upEnd); p != "" {
50-
if i, err := strconv.ParseInt(p, 10, 64); err == nil {
51-
qp.Set(upEnd, strconv.FormatInt(time.Unix(i, 0).Truncate(time.Second*time.Duration(60)).Unix(), 10))
52-
}
53-
}
41+
roundTimestampsToMinute(qp)
5442

5543
r.URL = u
5644
params.SetRequestValues(r, qp)

pkg/backends/prometheus/handler_series.go

Lines changed: 1 addition & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -18,8 +18,6 @@ package prometheus
1818

1919
import (
2020
"net/http"
21-
"strconv"
22-
"time"
2321

2422
"github.com/trickstercache/trickster/v2/pkg/backends/prometheus/model"
2523
"github.com/trickstercache/trickster/v2/pkg/proxy/engines"
@@ -40,17 +38,7 @@ func (c *Client) SeriesHandler(w http.ResponseWriter, r *http.Request) {
4038
qp, _, _ := params.GetRequestValues(r)
4139

4240
// Round Start and End times down to top of most recent minute for cacheability
43-
if p := qp.Get(upStart); p != "" {
44-
if i, err := strconv.ParseInt(p, 10, 64); err == nil {
45-
qp.Set(upStart, strconv.FormatInt(time.Unix(i, 0).Truncate(time.Second*time.Duration(60)).Unix(), 10))
46-
}
47-
}
48-
49-
if p := qp.Get(upEnd); p != "" {
50-
if i, err := strconv.ParseInt(p, 10, 64); err == nil {
51-
qp.Set(upEnd, strconv.FormatInt(time.Unix(i, 0).Truncate(time.Second*time.Duration(60)).Unix(), 10))
52-
}
53-
}
41+
roundTimestampsToMinute(qp)
5442

5543
r.URL = u
5644
params.SetRequestValues(r, qp)

pkg/backends/prometheus/model/alerts.go

Lines changed: 53 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -98,27 +98,54 @@ func (a *WFAlerts) Merge(results ...*WFAlerts) {
9898
a.Data.Alerts = alerts
9999
}
100100

101-
// MergeAndWriteAlerts merges the provided Responses into a single prometheus Alerts data object,
102-
// and writes it to the provided ResponseWriter
103-
func MergeAndWriteAlerts(w http.ResponseWriter, r *http.Request, rgs merge.ResponseGates) {
104-
var a *WFAlerts
101+
// Mergeable represents types that can merge with other instances of the same type
102+
type Mergeable[T any] interface {
103+
*T
104+
Merge(...*T)
105+
}
106+
107+
// MarshallerPtr represents pointer types that can start marshaling with an envelope
108+
type MarshallerPtr[T any] interface {
109+
*T
110+
StartMarshal(w io.Writer, httpStatus int)
111+
}
112+
113+
// unmarshalAndMerge is a generic function that handles the common pattern of
114+
// unmarshaling JSON responses and merging them using a Merge method
115+
func unmarshalAndMerge[T any, PT Mergeable[T]](
116+
r *http.Request,
117+
rgs merge.ResponseGates,
118+
errorType string,
119+
newInstance func() PT,
120+
) (PT, []int, *http.Response) {
121+
var result PT
105122
responses, bestResp := gatherResponses(r, rgs, func(rg *merge.ResponseGate) bool {
106-
a1 := &WFAlerts{}
107-
err := json.Unmarshal(rg.Body(), &a1)
123+
instance := newInstance()
124+
err := json.Unmarshal(rg.Body(), instance)
108125
if err != nil {
109-
logger.Error("alerts unmarshaling error",
126+
logger.Error(errorType+" unmarshaling error",
110127
logging.Pairs{"provider": providers.Prometheus, "detail": err.Error()})
111128
return false
112129
}
113-
if a == nil {
114-
a = a1
130+
if result == nil {
131+
result = instance
115132
} else {
116-
a.Merge(a1)
133+
result.Merge(instance)
117134
}
118135
return true
119136
})
137+
return result, responses, bestResp
138+
}
120139

121-
if a == nil || len(responses) == 0 {
140+
// handleMergeResult handles the common error pattern and starts marshaling if successful
141+
func handleMergeResult[T any, PT MarshallerPtr[T]](
142+
w http.ResponseWriter,
143+
r *http.Request,
144+
result PT,
145+
responses []int,
146+
bestResp *http.Response,
147+
) bool {
148+
if result == nil || len(responses) == 0 {
122149
if bestResp != nil {
123150
h := w.Header()
124151
headers.Merge(h, bestResp.Header)
@@ -127,12 +154,25 @@ func MergeAndWriteAlerts(w http.ResponseWriter, r *http.Request, rgs merge.Respo
127154
} else {
128155
failures.HandleBadGateway(w, r)
129156
}
130-
return
157+
return false
131158
}
132159

133160
sort.Ints(responses)
134161
statusCode := responses[0]
135-
a.StartMarshal(w, statusCode)
162+
result.StartMarshal(w, statusCode)
163+
return true
164+
}
165+
166+
// MergeAndWriteAlerts merges the provided Responses into a single prometheus Alerts data object,
167+
// and writes it to the provided ResponseWriter
168+
func MergeAndWriteAlerts(w http.ResponseWriter, r *http.Request, rgs merge.ResponseGates) {
169+
a, responses, bestResp := unmarshalAndMerge(r, rgs, "alerts", func() *WFAlerts {
170+
return &WFAlerts{}
171+
})
172+
173+
if !handleMergeResult(w, r, a, responses, bestResp) {
174+
return
175+
}
136176

137177
var sep string
138178
w.Write([]byte(`,"data":{"alerts":[`))

pkg/backends/prometheus/model/labels.go

Lines changed: 3 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -17,19 +17,11 @@
1717
package model
1818

1919
import (
20-
"encoding/json"
2120
"fmt"
22-
"io"
2321
"net/http"
24-
"slices"
2522
"sort"
2623
"strings"
2724

28-
"github.com/trickstercache/trickster/v2/pkg/backends/providers"
29-
"github.com/trickstercache/trickster/v2/pkg/observability/logging"
30-
"github.com/trickstercache/trickster/v2/pkg/observability/logging/logger"
31-
"github.com/trickstercache/trickster/v2/pkg/proxy/handlers/trickster/failures"
32-
"github.com/trickstercache/trickster/v2/pkg/proxy/headers"
3325
"github.com/trickstercache/trickster/v2/pkg/proxy/response/merge"
3426
"github.com/trickstercache/trickster/v2/pkg/util/sets"
3527
)
@@ -59,39 +51,14 @@ func (ld *WFLabelData) Merge(results ...*WFLabelData) {
5951
// MergeAndWriteLabelData merges the provided Responses into a single prometheus basic data object,
6052
// and writes it to the provided ResponseWriter
6153
func MergeAndWriteLabelData(w http.ResponseWriter, r *http.Request, rgs merge.ResponseGates) {
62-
var ld *WFLabelData
63-
responses, bestResp := gatherResponses(r, rgs, func(rg *merge.ResponseGate) bool {
64-
ld1 := &WFLabelData{}
65-
err := json.Unmarshal(rg.Body(), &ld1)
66-
if err != nil {
67-
logger.Error("labels unmarshaling error",
68-
logging.Pairs{"provider": providers.Prometheus, "detail": err.Error()})
69-
return false
70-
}
71-
if ld == nil {
72-
ld = ld1
73-
} else {
74-
ld.Merge(ld1)
75-
}
76-
return true
54+
ld, responses, bestResp := unmarshalAndMerge(r, rgs, "labels", func() *WFLabelData {
55+
return &WFLabelData{}
7756
})
7857

79-
if ld == nil || len(responses) == 0 {
80-
if bestResp != nil {
81-
h := w.Header()
82-
headers.Merge(h, bestResp.Header)
83-
w.WriteHeader(bestResp.StatusCode)
84-
io.Copy(w, bestResp.Body)
85-
} else {
86-
failures.HandleBadGateway(w, r)
87-
}
58+
if !handleMergeResult(w, r, ld, responses, bestResp) {
8859
return
8960
}
9061

91-
slices.Sort(responses)
92-
statusCode := responses[0]
93-
ld.StartMarshal(w, statusCode)
94-
9562
if len(ld.Data) > 0 {
9663
sort.Strings(ld.Data)
9764
fmt.Fprintf(w, `,"data":["%s"]`, strings.Join(ld.Data, `","`))

0 commit comments

Comments
 (0)