Skip to content

Commit d9f6b1a

Browse files
committed
server: unexport Server
We should really only look at top level servers through an interface. Release note: None
1 parent a462321 commit d9f6b1a

29 files changed

+271
-205
lines changed

pkg/BUILD.bazel

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1544,6 +1544,7 @@ GO_TARGETS = [
15441544
"//pkg/server/profiler:profiler",
15451545
"//pkg/server/profiler:profiler_test",
15461546
"//pkg/server/rangetestutils:rangetestutils",
1547+
"//pkg/server/serverctl:serverctl",
15471548
"//pkg/server/serverpb:serverpb",
15481549
"//pkg/server/serverpb:serverpb_test",
15491550
"//pkg/server/serverrules:serverrules",

pkg/cli/BUILD.bazel

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -148,6 +148,7 @@ go_library(
148148
"//pkg/server/autoconfig/acprovider",
149149
"//pkg/server/pgurl",
150150
"//pkg/server/profiler",
151+
"//pkg/server/serverctl",
151152
"//pkg/server/serverpb",
152153
"//pkg/server/status",
153154
"//pkg/server/status/statuspb",

pkg/cli/mt_start_sql.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ import (
1616
"github.com/cockroachdb/cockroach/pkg/cli/clierrorplus"
1717
"github.com/cockroachdb/cockroach/pkg/clusterversion"
1818
"github.com/cockroachdb/cockroach/pkg/server"
19+
"github.com/cockroachdb/cockroach/pkg/server/serverctl"
1920
"github.com/cockroachdb/cockroach/pkg/util/stop"
2021
"github.com/cockroachdb/redact"
2122
"github.com/spf13/cobra"
@@ -78,7 +79,7 @@ func runStartSQL(cmd *cobra.Command, args []string) error {
7879
)
7980
}
8081

81-
newServerFn := func(ctx context.Context, serverCfg server.Config, stopper *stop.Stopper) (serverStartupInterface, error) {
82+
newServerFn := func(ctx context.Context, serverCfg server.Config, stopper *stop.Stopper) (serverctl.ServerStartupInterface, error) {
8283
// Beware of not writing simply 'return server.NewServer()'. This is
8384
// because it would cause the serverStartupInterface reference to
8485
// always be non-nil, even if NewServer returns a nil pointer (and

pkg/cli/start.go

Lines changed: 16 additions & 59 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,7 @@ import (
3838
"github.com/cockroachdb/cockroach/pkg/security/clientsecopts"
3939
"github.com/cockroachdb/cockroach/pkg/security/username"
4040
"github.com/cockroachdb/cockroach/pkg/server"
41+
"github.com/cockroachdb/cockroach/pkg/server/serverctl"
4142
"github.com/cockroachdb/cockroach/pkg/server/status"
4243
"github.com/cockroachdb/cockroach/pkg/settings"
4344
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
@@ -312,45 +313,7 @@ func initTempStorageConfig(
312313
return tempStorageConfig, nil
313314
}
314315

315-
type newServerFn func(ctx context.Context, serverCfg server.Config, stopper *stop.Stopper) (serverStartupInterface, error)
316-
317-
type serverStartupInterface interface {
318-
serverShutdownInterface
319-
320-
// ClusterSettings retrieves this server's settings.
321-
ClusterSettings() *cluster.Settings
322-
323-
// LogicalClusterID retrieves this server's logical cluster ID.
324-
LogicalClusterID() uuid.UUID
325-
326-
// PreStart starts the server on the specified port(s) and
327-
// initializes subsystems.
328-
// It does not activate the pgwire listener over the network / unix
329-
// socket, which is done by the AcceptClients() method. The separation
330-
// between the two exists so that SQL initialization can take place
331-
// before the first client is accepted.
332-
PreStart(ctx context.Context) error
333-
334-
// AcceptClients starts listening for incoming SQL clients over the network.
335-
AcceptClients(ctx context.Context) error
336-
// AcceptInternalClients starts listening for incoming internal SQL clients over the
337-
// loopback interface.
338-
AcceptInternalClients(ctx context.Context) error
339-
340-
// InitialStart returns whether this node is starting for the first time.
341-
// This is (currently) used when displaying the server status report
342-
// on the terminal & in logs. We know that some folk have automation
343-
// that depend on certain strings displayed from this when orchestrating
344-
// KV-only nodes.
345-
InitialStart() bool
346-
347-
// RunInitialSQL runs the SQL initialization for brand new clusters,
348-
// if the cluster is being started for the first time.
349-
// The arguments are:
350-
// - startSingleNode is used by 'demo' and 'start-single-node'.
351-
// - adminUser/adminPassword is used for 'demo'.
352-
RunInitialSQL(ctx context.Context, startSingleNode bool, adminUser, adminPassword string) error
353-
}
316+
type newServerFn func(ctx context.Context, serverCfg server.Config, stopper *stop.Stopper) (serverctl.ServerStartupInterface, error)
354317

355318
var errCannotUseJoin = errors.New("cannot use --join with 'cockroach start-single-node' -- use 'cockroach start' instead")
356319

@@ -398,9 +361,9 @@ func runStartJoin(cmd *cobra.Command, args []string) error {
398361
func runStart(cmd *cobra.Command, args []string, startSingleNode bool) error {
399362
const serverType redact.SafeString = "node"
400363

401-
newServerFn := func(_ context.Context, serverCfg server.Config, stopper *stop.Stopper) (serverStartupInterface, error) {
364+
newServerFn := func(_ context.Context, serverCfg server.Config, stopper *stop.Stopper) (serverctl.ServerStartupInterface, error) {
402365
// Beware of not writing simply 'return server.NewServer()'. This is
403-
// because it would cause the serverStartupInterface reference to
366+
// because it would cause the serverctl.ServerStartupInterface reference to
404367
// always be non-nil, even if NewServer returns a nil pointer (and
405368
// an error). The code below is dependent on the interface
406369
// reference remaining nil in case of error.
@@ -757,10 +720,10 @@ func createAndStartServerAsync(
757720
newServerFn newServerFn,
758721
startSingleNode bool,
759722
serverType redact.SafeString,
760-
) (srvStatus *serverStatus, serverShutdownReqC <-chan server.ShutdownRequest) {
723+
) (srvStatus *serverStatus, serverShutdownReqC <-chan serverctl.ShutdownRequest) {
761724
var serverStatusMu serverStatus
762-
var s serverStartupInterface
763-
shutdownReqC := make(chan server.ShutdownRequest, 1)
725+
var s serverctl.ServerStartupInterface
726+
shutdownReqC := make(chan serverctl.ShutdownRequest, 1)
764727

765728
log.Ops.Infof(ctx, "starting cockroach %s", serverType)
766729

@@ -851,8 +814,8 @@ func createAndStartServerAsync(
851814
return reportServerInfo(ctx, tBegin, serverCfg, s.ClusterSettings(),
852815
serverType, s.InitialStart(), s.LogicalClusterID())
853816
}(); err != nil {
854-
shutdownReqC <- server.MakeShutdownRequest(
855-
server.ShutdownReasonServerStartupError, errors.Wrapf(err, "server startup failed"))
817+
shutdownReqC <- serverctl.MakeShutdownRequest(
818+
serverctl.ShutdownReasonServerStartupError, errors.Wrapf(err, "server startup failed"))
856819
} else {
857820
// Start a goroutine that watches for shutdown requests and notifies
858821
// errChan.
@@ -885,7 +848,7 @@ type serverStatus struct {
885848
// s is a reference to the server, to be used by the shutdown process. This
886849
// starts as nil, and is set by setStarted(). Once set, a graceful shutdown
887850
// should use a soft drain.
888-
s serverShutdownInterface
851+
s serverctl.ServerShutdownInterface
889852
// stopper is the server's stopper. This is set in setStarted(), together with
890853
// `s`. The stopper is handed out to callers of startShutdown(), who will
891854
// Stop() it.
@@ -904,7 +867,9 @@ type serverStatus struct {
904867
// setStarted returns whether shutdown has been requested already. If it has,
905868
// then the serverStatus does not take ownership of the stopper; the caller is
906869
// responsible for calling stopper.Stop().
907-
func (s *serverStatus) setStarted(server serverShutdownInterface, stopper *stop.Stopper) bool {
870+
func (s *serverStatus) setStarted(
871+
server serverctl.ServerShutdownInterface, stopper *stop.Stopper,
872+
) bool {
908873
s.Lock()
909874
defer s.Unlock()
910875
if s.shutdownRequested {
@@ -927,21 +892,13 @@ func (s *serverStatus) shutdownInProgress() bool {
927892
// was started already. If the server started, a reference to the server is also
928893
// returned, and a reference to the stopper that the caller needs to eventually
929894
// Stop().
930-
func (s *serverStatus) startShutdown() (bool, serverShutdownInterface, *stop.Stopper) {
895+
func (s *serverStatus) startShutdown() (bool, serverctl.ServerShutdownInterface, *stop.Stopper) {
931896
s.Lock()
932897
defer s.Unlock()
933898
s.shutdownRequested = true
934899
return s.s != nil, s.s, s.stopper
935900
}
936901

937-
// serverShutdownInterface is the subset of the APIs on a server
938-
// object that's sufficient to run a server shutdown.
939-
type serverShutdownInterface interface {
940-
AnnotateCtx(context.Context) context.Context
941-
Drain(ctx context.Context, verbose bool) (uint64, redact.RedactableString, error)
942-
ShutdownRequested() <-chan server.ShutdownRequest
943-
}
944-
945902
// waitForShutdown blocks until interrupted by a shutdown signal, which can come
946903
// in several forms:
947904
// - a shutdown request coming from an internal module being signaled on
@@ -953,7 +910,7 @@ type serverShutdownInterface interface {
953910
// before shutting down.
954911
func waitForShutdown(
955912
stopper *stop.Stopper,
956-
shutdownC <-chan server.ShutdownRequest,
913+
shutdownC <-chan serverctl.ShutdownRequest,
957914
signalCh <-chan os.Signal,
958915
serverStatusMu *serverStatus,
959916
) (returnErr error) {
@@ -966,7 +923,7 @@ func waitForShutdown(
966923
case shutdownRequest := <-shutdownC:
967924
returnErr = shutdownRequest.ShutdownCause()
968925
// There's no point in draining if the server didn't even fully start.
969-
drain := shutdownRequest.Reason != server.ShutdownReasonServerStartupError
926+
drain := shutdownRequest.Reason != serverctl.ShutdownReasonServerStartupError
970927
startShutdownAsync(serverStatusMu, stopWithoutDrain, drain)
971928

972929
case sig := <-signalCh:

pkg/server/BUILD.bazel

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -186,6 +186,7 @@ go_library(
186186
"//pkg/server/pgurl",
187187
"//pkg/server/privchecker",
188188
"//pkg/server/profiler",
189+
"//pkg/server/serverctl",
189190
"//pkg/server/serverpb",
190191
"//pkg/server/serverrules",
191192
"//pkg/server/settingswatcher",

pkg/server/admin.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -130,7 +130,7 @@ type systemAdminServer struct {
130130
*adminServer
131131

132132
nodeLiveness *liveness.NodeLiveness
133-
server *Server
133+
server *topLevelServer
134134
}
135135

136136
// noteworthyAdminMemoryUsageBytes is the minimum size tracked by the
@@ -161,7 +161,7 @@ func newSystemAdminServer(
161161
distSender *kvcoord.DistSender,
162162
grpc *grpcServer,
163163
drainServer *drainServer,
164-
s *Server,
164+
s *topLevelServer,
165165
) *systemAdminServer {
166166
adminServer := newAdminServer(
167167
sqlServer,

pkg/server/auto_upgrade.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ import (
2424
)
2525

2626
// startAttemptUpgrade attempts to upgrade cluster version.
27-
func (s *Server) startAttemptUpgrade(ctx context.Context) error {
27+
func (s *topLevelServer) startAttemptUpgrade(ctx context.Context) error {
2828
return s.stopper.RunAsyncTask(ctx, "auto-upgrade", func(ctx context.Context) {
2929
ctx, cancel := s.stopper.WithCancelOnQuiesce(ctx)
3030
defer cancel()
@@ -120,7 +120,7 @@ const (
120120

121121
// upgradeStatus lets the main checking loop know if we should do upgrade,
122122
// keep checking upgrade status, or stop attempting upgrade.
123-
func (s *Server) upgradeStatus(
123+
func (s *topLevelServer) upgradeStatus(
124124
ctx context.Context, clusterVersion string,
125125
) (st upgradeStatus, err error) {
126126
nodes, err := s.status.ListNodesInternal(ctx, nil)
@@ -205,7 +205,7 @@ func (s *Server) upgradeStatus(
205205
// clusterVersion returns the current cluster version from the SQL subsystem
206206
// (which returns the version from the KV store as opposed to the possibly
207207
// lagging settings subsystem).
208-
func (s *Server) clusterVersion(ctx context.Context) (string, error) {
208+
func (s *topLevelServer) clusterVersion(ctx context.Context) (string, error) {
209209
row, err := s.sqlServer.internalExecutor.QueryRowEx(
210210
ctx, "show-version", nil, /* txn */
211211
sessiondata.RootUserSessionDataOverride,

pkg/server/clock_monotonicity.go

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ var (
4545

4646
// startMonitoringForwardClockJumps starts a background task to monitor forward
4747
// clock jumps based on a cluster setting.
48-
func (s *Server) startMonitoringForwardClockJumps(ctx context.Context) error {
48+
func (s *topLevelServer) startMonitoringForwardClockJumps(ctx context.Context) error {
4949
forwardJumpCheckEnabled := make(chan bool, 1)
5050
s.stopper.AddCloser(stop.CloserFn(func() { close(forwardJumpCheckEnabled) }))
5151

@@ -69,7 +69,7 @@ func (s *Server) startMonitoringForwardClockJumps(ctx context.Context) error {
6969
// checkHLCUpperBoundExists determines whether there's an HLC
7070
// upper bound that will need to refreshed/persisted after
7171
// the server has initialized.
72-
func (s *Server) checkHLCUpperBoundExistsAndEnsureMonotonicity(
72+
func (s *topLevelServer) checkHLCUpperBoundExistsAndEnsureMonotonicity(
7373
ctx context.Context, initialStart bool,
7474
) (hlcUpperBoundExists bool, err error) {
7575
if initialStart {
@@ -238,7 +238,9 @@ func periodicallyPersistHLCUpperBound(
238238
//
239239
// tickCallback is called whenever persistHLCUpperBoundCh or a ticker tick is
240240
// processed
241-
func (s *Server) startPersistingHLCUpperBound(ctx context.Context, hlcUpperBoundExists bool) error {
241+
func (s *topLevelServer) startPersistingHLCUpperBound(
242+
ctx context.Context, hlcUpperBoundExists bool,
243+
) error {
242244
tickerFn := time.NewTicker
243245
persistHLCUpperBoundFn := func(t int64) error { /* function to persist upper bound of HLC to all stores */
244246
return s.node.SetHLCUpperBound(context.Background(), t)

pkg/server/decommission.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -141,7 +141,7 @@ func getPingCheckDecommissionFn(
141141
// or remove actions. If maxErrors >0, range checks will stop once maxError is
142142
// reached.
143143
// The error returned is a gRPC error.
144-
func (s *Server) DecommissionPreCheck(
144+
func (s *topLevelServer) DecommissionPreCheck(
145145
ctx context.Context,
146146
nodeIDs []roachpb.NodeID,
147147
strictReadiness bool,
@@ -314,7 +314,7 @@ func evaluateRangeCheckResult(
314314

315315
// Decommission idempotently sets the decommissioning flag for specified nodes.
316316
// The error return is a gRPC error.
317-
func (s *Server) Decommission(
317+
func (s *topLevelServer) Decommission(
318318
ctx context.Context, targetStatus livenesspb.MembershipStatus, nodeIDs []roachpb.NodeID,
319319
) error {
320320
// If we're asked to decommission ourself we may lose access to cluster RPC,
@@ -396,7 +396,7 @@ func (s *Server) Decommission(
396396

397397
// DecommissioningNodeMap returns the set of node IDs that are decommissioning
398398
// from the perspective of the server.
399-
func (s *Server) DecommissioningNodeMap() map[roachpb.NodeID]interface{} {
399+
func (s *topLevelServer) DecommissioningNodeMap() map[roachpb.NodeID]interface{} {
400400
s.decomNodeMap.RLock()
401401
defer s.decomNodeMap.RUnlock()
402402
nodes := make(map[roachpb.NodeID]interface{})

pkg/server/drain.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ import (
1919

2020
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness"
2121
"github.com/cockroachdb/cockroach/pkg/roachpb"
22+
"github.com/cockroachdb/cockroach/pkg/server/serverctl"
2223
"github.com/cockroachdb/cockroach/pkg/server/serverpb"
2324
"github.com/cockroachdb/cockroach/pkg/server/srverrors"
2425
"github.com/cockroachdb/cockroach/pkg/settings"
@@ -197,7 +198,7 @@ func (s *drainServer) maybeShutdownAfterDrain(
197198
// away (and who knows whether gRPC-goroutines are tied up in some
198199
// stopper task somewhere).
199200
s.grpc.Stop()
200-
s.stopTrigger.signalStop(ctx, MakeShutdownRequest(ShutdownReasonDrainRPC, nil /* err */))
201+
s.stopTrigger.signalStop(ctx, serverctl.MakeShutdownRequest(serverctl.ShutdownReasonDrainRPC, nil /* err */))
201202
}()
202203

203204
select {

0 commit comments

Comments
 (0)