9
9
"context"
10
10
"time"
11
11
12
- "github.com/cockroachdb/cockroach/pkg/jobs"
13
- "github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
14
12
"github.com/cockroachdb/cockroach/pkg/multitenant/tenantcapabilities"
15
13
"github.com/cockroachdb/cockroach/pkg/server/serverpb"
16
14
"github.com/cockroachdb/cockroach/pkg/settings"
@@ -73,13 +71,11 @@ type HotRangeGetter interface {
73
71
HotRangesV2 (ctx context.Context , req * serverpb.HotRangesRequest ) (* serverpb.HotRangesResponseV2 , error )
74
72
}
75
73
76
- // hotRangesLoggingScheduler is responsible for logging index usage stats
74
+ // hotRangesLogger is responsible for logging index usage stats
77
75
// on a scheduled interval.
78
- type hotRangesLoggingScheduler struct {
76
+ type hotRangesLogger struct {
79
77
sServer HotRangeGetter
80
78
st * cluster.Settings
81
- stopper * stop.Stopper
82
- job * jobs.Job
83
79
multiTenant bool
84
80
lastLogged time.Time
85
81
}
@@ -90,8 +86,8 @@ type hotRangesLoggingScheduler struct {
90
86
// For system tenants, or single tenant deployments, it runs as
91
87
// a task on each node, logging only the ranges on the node in
92
88
// which it runs. For app tenants in a multi-tenant deployment,
93
- // it runs on a single node in the sql cluster, applying a fanout
94
- // to the kv layer to collect the hot ranges from all nodes .
89
+ // it does nothing, allowing the hot range logging job to be the
90
+ // entrypoint .
95
91
func StartHotRangesLoggingScheduler (
96
92
ctx context.Context ,
97
93
stopper * stop.Stopper ,
@@ -100,42 +96,30 @@ func StartHotRangesLoggingScheduler(
100
96
ti * tenantcapabilities.Entry ,
101
97
) error {
102
98
multiTenant := ti != nil && ti .TenantID .IsSet () && ! ti .TenantID .IsSystem ()
103
- scheduler := hotRangesLoggingScheduler {
99
+
100
+ if multiTenant {
101
+ return nil
102
+ }
103
+
104
+ logger := hotRangesLogger {
104
105
sServer : sServer ,
105
106
st : st ,
106
- stopper : stopper ,
107
- multiTenant : multiTenant ,
107
+ multiTenant : false ,
108
108
lastLogged : timeutil .Now (),
109
109
}
110
110
111
- if multiTenant {
112
- return scheduler .startJob ()
113
- }
114
-
115
- return scheduler .startTask (ctx , stopper )
111
+ return logger .startTask (ctx , stopper )
116
112
}
117
113
118
114
// startTask is for usage in a system-tenant or non-multi-tenant
119
115
// installation.
120
- func (s * hotRangesLoggingScheduler ) startTask (ctx context.Context , stopper * stop.Stopper ) error {
116
+ func (s * hotRangesLogger ) startTask (ctx context.Context , stopper * stop.Stopper ) error {
121
117
return stopper .RunAsyncTask (ctx , "hot-ranges-stats" , func (ctx context.Context ) {
122
118
s .start (ctx , stopper )
123
119
})
124
120
}
125
121
126
- func (s * hotRangesLoggingScheduler ) startJob () error {
127
- jobs .RegisterConstructor (
128
- jobspb .TypeHotRangesLogger ,
129
- func (job * jobs.Job , settings * cluster.Settings ) jobs.Resumer {
130
- s .job = job
131
- return s
132
- },
133
- jobs .DisablesTenantCostControl ,
134
- )
135
- return nil
136
- }
137
-
138
- func (s * hotRangesLoggingScheduler ) start (ctx context.Context , stopper * stop.Stopper ) {
122
+ func (s * hotRangesLogger ) start (ctx context.Context , stopper * stop.Stopper ) {
139
123
for {
140
124
ci := CheckInterval
141
125
if s .multiTenant {
@@ -156,7 +140,7 @@ func (s *hotRangesLoggingScheduler) start(ctx context.Context, stopper *stop.Sto
156
140
157
141
// maybeLogHotRanges is a small helper function which couples the
158
142
// functionality of checking whether to log and logging.
159
- func (s * hotRangesLoggingScheduler ) maybeLogHotRanges (ctx context.Context , stopper * stop.Stopper ) {
143
+ func (s * hotRangesLogger ) maybeLogHotRanges (ctx context.Context , stopper * stop.Stopper ) {
160
144
if s .shouldLog (ctx ) {
161
145
s .logHotRanges (ctx , stopper )
162
146
s .lastLogged = timeutil .Now ()
@@ -171,7 +155,7 @@ func (s *hotRangesLoggingScheduler) maybeLogHotRanges(ctx context.Context, stopp
171
155
// - One of the following conditions is met:
172
156
// -- It's been greater than the log interval since we last logged.
173
157
// -- One of the replicas see exceeds our cpu threshold.
174
- func (s * hotRangesLoggingScheduler ) shouldLog (ctx context.Context ) bool {
158
+ func (s * hotRangesLogger ) shouldLog (ctx context.Context ) bool {
175
159
176
160
enabled := TelemetryHotRangesStatsEnabled .Get (& s .st .SV )
177
161
if ! enabled {
@@ -210,7 +194,7 @@ func maxCPU(ranges []*serverpb.HotRangesResponseV2_HotRange) time.Duration {
210
194
// stats for ranges requested, or everything. It also determines
211
195
// whether to limit the request to only the local node, or to
212
196
// issue a fanout for multi-tenant apps.
213
- func (s * hotRangesLoggingScheduler ) getHotRanges (
197
+ func (s * hotRangesLogger ) getHotRanges (
214
198
ctx context.Context , statsOnly bool ,
215
199
) (* serverpb.HotRangesResponseV2 , error ) {
216
200
req := & serverpb.HotRangesRequest {
@@ -228,7 +212,7 @@ func (s *hotRangesLoggingScheduler) getHotRanges(
228
212
229
213
// logHotRanges collects the hot ranges from this node's status server and
230
214
// sends them to the HEALTH log channel.
231
- func (s * hotRangesLoggingScheduler ) logHotRanges (ctx context.Context , stopper * stop.Stopper ) {
215
+ func (s * hotRangesLogger ) logHotRanges (ctx context.Context , stopper * stop.Stopper ) {
232
216
resp , err := s .getHotRanges (ctx , false )
233
217
if err != nil {
234
218
log .Warningf (ctx , "failed to get hot ranges: %s" , err )
0 commit comments