Skip to content

Commit 2415bcd

Browse files
committed
log,*: find and replace log.*Info* with log.Dev.*Info*
Epic: CRDB-53410 Release note: None
1 parent d1af030 commit 2415bcd

File tree

529 files changed

+1874
-1874
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

529 files changed

+1874
-1874
lines changed

pkg/acceptance/cluster/docker.go

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -125,12 +125,12 @@ func pullImage(
125125
// acceptance test even though that image is already present. So we first
126126
// check to see if our image is present in order to avoid this slowness.
127127
if hasImage(ctx, l, ref) == nil {
128-
log.Infof(ctx, "ImagePull %s already exists", ref)
128+
log.Dev.Infof(ctx, "ImagePull %s already exists", ref)
129129
return nil
130130
}
131131

132-
log.Infof(ctx, "ImagePull %s starting", ref)
133-
defer log.Infof(ctx, "ImagePull %s complete", ref)
132+
log.Dev.Infof(ctx, "ImagePull %s starting", ref)
133+
defer log.Dev.Infof(ctx, "ImagePull %s complete", ref)
134134

135135
rc, err := l.client.ImagePull(ctx, ref, options)
136136
if err != nil {
@@ -424,13 +424,13 @@ func (cli resilientDockerClient) ContainerCreate(
424424
ctx, config, hostConfig, networkingConfig, platformSpec, containerName,
425425
)
426426
if err != nil && strings.Contains(err.Error(), "already in use") {
427-
log.Infof(ctx, "unable to create container %s: %v", containerName, err)
427+
log.Dev.Infof(ctx, "unable to create container %s: %v", containerName, err)
428428
containers, cerr := cli.ContainerList(ctx, types.ContainerListOptions{
429429
All: true,
430430
Limit: -1, // no limit, see docker/docker/client/container_list.go
431431
})
432432
if cerr != nil {
433-
log.Infof(ctx, "unable to list containers: %v", cerr)
433+
log.Dev.Infof(ctx, "unable to list containers: %v", cerr)
434434
return container.CreateResponse{}, err
435435
}
436436
for _, c := range containers {
@@ -440,13 +440,13 @@ func (cli resilientDockerClient) ContainerCreate(
440440
if n != containerName {
441441
continue
442442
}
443-
log.Infof(ctx, "trying to remove %s", c.ID)
443+
log.Dev.Infof(ctx, "trying to remove %s", c.ID)
444444
options := types.ContainerRemoveOptions{
445445
RemoveVolumes: true,
446446
Force: true,
447447
}
448448
if rerr := cli.ContainerRemove(ctx, c.ID, options); rerr != nil {
449-
log.Infof(ctx, "unable to remove container: %v", rerr)
449+
log.Dev.Infof(ctx, "unable to remove container: %v", rerr)
450450
return container.CreateResponse{}, err
451451
}
452452
return cli.ContainerCreate(ctx, config, hostConfig, networkingConfig, platformSpec, containerName)

pkg/acceptance/cluster/dockercluster.go

Lines changed: 23 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -181,7 +181,7 @@ func CreateDocker(
181181
volumesDir = filepath.Join(pwd, volumesDir)
182182
}
183183
maybePanic(os.MkdirAll(volumesDir, 0755))
184-
log.Infof(ctx, "cluster volume directory: %s", volumesDir)
184+
log.Dev.Infof(ctx, "cluster volume directory: %s", volumesDir)
185185

186186
return &DockerCluster{
187187
clusterID: clusterIDS,
@@ -281,7 +281,7 @@ func (l *DockerCluster) createNetwork(ctx context.Context) {
281281
l.panicOnStop()
282282

283283
l.networkName = fmt.Sprintf("%s-%s", networkPrefix, l.clusterID)
284-
log.Infof(ctx, "creating docker network with name: %s", l.networkName)
284+
log.Dev.Infof(ctx, "creating docker network with name: %s", l.networkName)
285285
net, err := l.client.NetworkInspect(ctx, l.networkName, types.NetworkInspectOptions{})
286286
if err == nil {
287287
// We need to destroy the network and any running containers inside of it.
@@ -313,7 +313,7 @@ func (l *DockerCluster) createNetwork(ctx context.Context) {
313313
func (l *DockerCluster) initCluster(ctx context.Context) {
314314
configJSON, err := json.Marshal(l.config)
315315
maybePanic(err)
316-
log.Infof(ctx, "Initializing Cluster %s:\n%s", l.config.Name, configJSON)
316+
log.Dev.Infof(ctx, "Initializing Cluster %s:\n%s", l.config.Name, configJSON)
317317
l.panicOnStop()
318318

319319
pwd, err := os.Getwd()
@@ -417,7 +417,7 @@ func (l *DockerCluster) createRoach(
417417
if node.index >= 0 {
418418
hostname = fmt.Sprintf("roach-%s-%d", l.clusterID, node.index)
419419
}
420-
log.Infof(ctx, "creating docker container with name: %s", hostname)
420+
log.Dev.Infof(ctx, "creating docker container with name: %s", hostname)
421421
var err error
422422
node.Container, err = createContainer(
423423
ctx,
@@ -516,7 +516,7 @@ func (l *DockerCluster) startNode(ctx context.Context, node *testNode, singleNod
516516
maybePanic(node.Start(ctx))
517517
httpAddr := node.Addr(ctx, defaultHTTP)
518518

519-
log.Infof(ctx, `*** started %[1]s ***
519+
log.Dev.Infof(ctx, `*** started %[1]s ***
520520
ui: %[2]s
521521
trace: %[2]s/debug/requests
522522
logs: %[3]s/cockroach.INFO
@@ -551,10 +551,10 @@ func (l *DockerCluster) RunInitCommand(ctx context.Context, nodeIdx int) {
551551
},
552552
}
553553

554-
log.Infof(ctx, "trying to initialize via %v", containerConfig.Cmd)
554+
log.Dev.Infof(ctx, "trying to initialize via %v", containerConfig.Cmd)
555555
maybePanic(l.OneShot(ctx, defaultImage, types.ImagePullOptions{},
556556
containerConfig, container.HostConfig{}, platforms.DefaultSpec(), "init-command-"+randomID))
557-
log.Info(ctx, "cluster successfully initialized")
557+
log.Dev.Info(ctx, "cluster successfully initialized")
558558
}
559559

560560
// returns false is the event
@@ -564,12 +564,12 @@ func (l *DockerCluster) processEvent(ctx context.Context, event events.Message)
564564

565565
// Logging everything we get from Docker in service of finding the root
566566
// cause of #58955.
567-
log.Infof(ctx, "processing event from Docker: %+v", event)
567+
log.Dev.Infof(ctx, "processing event from Docker: %+v", event)
568568

569569
// If there's currently a oneshot container, ignore any die messages from
570570
// it because those are expected.
571571
if l.oneshot != nil && event.ID == l.oneshot.id && event.Status == eventDie {
572-
log.Infof(ctx, "Docker event was: the oneshot container terminated")
572+
log.Dev.Infof(ctx, "Docker event was: the oneshot container terminated")
573573
return true
574574
}
575575

@@ -587,7 +587,7 @@ func (l *DockerCluster) processEvent(ctx context.Context, event events.Message)
587587
}
588588
}
589589

590-
log.Infof(ctx, "received docker event for unrecognized container: %+v",
590+
log.Dev.Infof(ctx, "received docker event for unrecognized container: %+v",
591591
event)
592592

593593
// An event on any other container is unexpected. Die.
@@ -604,7 +604,7 @@ func (l *DockerCluster) processEvent(ctx context.Context, event events.Message)
604604
}); err == nil {
605605
defer rc.Close()
606606
if _, err := io.Copy(os.Stderr, rc); err != nil {
607-
log.Infof(ctx, "error listing logs: %s", err)
607+
log.Dev.Infof(ctx, "error listing logs: %s", err)
608608
}
609609
}
610610
}
@@ -615,8 +615,8 @@ func (l *DockerCluster) monitor(ctx context.Context, monitorDone chan struct{})
615615
defer close(monitorDone)
616616

617617
if log.V(1) {
618-
log.Infof(ctx, "events monitor starts")
619-
defer log.Infof(ctx, "events monitor exits")
618+
log.Dev.Infof(ctx, "events monitor starts")
619+
defer log.Dev.Infof(ctx, "events monitor exits")
620620
}
621621
longPoll := func() bool {
622622
// If our context was canceled, it's time to go home.
@@ -632,10 +632,10 @@ func (l *DockerCluster) monitor(ctx context.Context, monitorDone chan struct{})
632632
for {
633633
select {
634634
case <-l.monitorCtx.Done():
635-
log.Infof(ctx, "monitor shutting down")
635+
log.Dev.Infof(ctx, "monitor shutting down")
636636
return false
637637
case err := <-errq:
638-
log.Infof(ctx, "event stream done, resetting...: %s", err)
638+
log.Dev.Infof(ctx, "event stream done, resetting...: %s", err)
639639
// Sometimes we get a random string-wrapped EOF error back.
640640
// Hard to assert on, so we just let this goroutine spin.
641641
return true
@@ -666,10 +666,10 @@ func (l *DockerCluster) Start(ctx context.Context) {
666666

667667
l.createNetwork(ctx)
668668
l.initCluster(ctx)
669-
log.Infof(ctx, "creating node certs (%dbit) in: %s", keyLen, certsDir)
669+
log.Dev.Infof(ctx, "creating node certs (%dbit) in: %s", keyLen, certsDir)
670670
l.createNodeCerts()
671671

672-
log.Infof(ctx, "starting %d nodes", len(l.Nodes))
672+
log.Dev.Infof(ctx, "starting %d nodes", len(l.Nodes))
673673
l.monitorCtx, l.monitorCtxCancelFunc = context.WithCancel(context.Background())
674674
l.monitorDone = make(chan struct{})
675675
go l.monitor(ctx, l.monitorDone)
@@ -730,7 +730,7 @@ func (l *DockerCluster) Assert(ctx context.Context, t testing.TB) {
730730
t.Fatalf("unexpected extra event %v (after %v)", cur, events)
731731
}
732732
if log.V(2) {
733-
log.Infof(ctx, "asserted %v", events)
733+
log.Dev.Infof(ctx, "asserted %v", events)
734734
}
735735
}
736736

@@ -744,11 +744,11 @@ func (l *DockerCluster) AssertAndStop(ctx context.Context, t testing.TB) {
744744
// stop stops the cluster.
745745
func (l *DockerCluster) stop(ctx context.Context) {
746746
if *waitOnStop {
747-
log.Infof(ctx, "waiting for interrupt")
747+
log.Dev.Infof(ctx, "waiting for interrupt")
748748
<-l.stopper.ShouldQuiesce()
749749
}
750750

751-
log.Infof(ctx, "stopping")
751+
log.Dev.Infof(ctx, "stopping")
752752

753753
l.mu.Lock()
754754
defer l.mu.Unlock()
@@ -778,9 +778,9 @@ func (l *DockerCluster) stop(ctx context.Context) {
778778
defer w.Close()
779779
maybePanic(n.Logs(ctx, w))
780780
}()
781-
log.Infof(ctx, "node %d: stderr at %s", i, file)
781+
log.Dev.Infof(ctx, "node %d: stderr at %s", i, file)
782782
if crashed {
783-
log.Infof(ctx, "~~~ node %d CRASHED ~~~~", i)
783+
log.Dev.Infof(ctx, "~~~ node %d CRASHED ~~~~", i)
784784
}
785785
maybePanic(n.Remove(ctx))
786786
n.Container = nil
@@ -925,7 +925,7 @@ func (l *DockerCluster) Cleanup(ctx context.Context, preserveLogs bool) {
925925
}
926926
for _, v := range volumes {
927927
if preserveLogs && v.Name() == "logs" {
928-
log.Infof(ctx, "preserving log directory: %s", l.volumesDir)
928+
log.Dev.Infof(ctx, "preserving log directory: %s", l.volumesDir)
929929
continue
930930
}
931931
if err := os.RemoveAll(filepath.Join(l.volumesDir, v.Name())); err != nil {

pkg/acceptance/localcluster/cluster.go

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -197,13 +197,13 @@ func (c *Cluster) Start(ctx context.Context) {
197197
}
198198
}
199199

200-
log.Infof(context.Background(), "started %.3fs", timeutil.Since(c.started).Seconds())
200+
log.Dev.Infof(context.Background(), "started %.3fs", timeutil.Since(c.started).Seconds())
201201

202202
if c.Cfg.NumNodes > 1 || !c.Cfg.NoWait {
203203
c.waitForFullReplication()
204204
} else {
205205
// NB: This is useful for TestRapidRestarts.
206-
log.Infof(ctx, "not waiting for initial replication")
206+
log.Dev.Infof(ctx, "not waiting for initial replication")
207207
}
208208
}
209209

@@ -314,15 +314,15 @@ func (c *Cluster) waitForFullReplication() {
314314
done, detail := c.isReplicated()
315315
if (done && i >= 50) || (i%50) == 0 {
316316
fmt.Print(detail)
317-
log.Infof(context.Background(), "waiting for replication")
317+
log.Dev.Infof(context.Background(), "waiting for replication")
318318
}
319319
if done {
320320
break
321321
}
322322
time.Sleep(100 * time.Millisecond)
323323
}
324324

325-
log.Infof(context.Background(), "replicated %.3fs", timeutil.Since(c.started).Seconds())
325+
log.Dev.Infof(context.Background(), "replicated %.3fs", timeutil.Since(c.started).Seconds())
326326
}
327327

328328
func (c *Cluster) isReplicated() (bool, string) {
@@ -572,7 +572,7 @@ func (n *Node) startAsyncInnerLocked(ctx context.Context, joins ...string) error
572572
return errors.Wrapf(err, "running %s %v", n.cmd.Path, n.cmd.Args)
573573
}
574574

575-
log.Infof(ctx, "process %d starting: %s", n.cmd.Process.Pid, n.cmd.Args)
575+
log.Dev.Infof(ctx, "process %d starting: %s", n.cmd.Process.Pid, n.cmd.Args)
576576

577577
go func(cmd *exec.Cmd) {
578578
waitErr := cmd.Wait()
@@ -586,7 +586,7 @@ func (n *Node) startAsyncInnerLocked(ctx context.Context, joins ...string) error
586586
log.Warningf(ctx, "%v", err)
587587
}
588588

589-
log.Infof(ctx, "process %d: %s", cmd.Process.Pid, cmd.ProcessState)
589+
log.Dev.Infof(ctx, "process %d: %s", cmd.Process.Pid, cmd.ProcessState)
590590

591591
var execErr *exec.ExitError
592592
_ = errors.As(waitErr, &execErr)
@@ -701,20 +701,20 @@ func (n *Node) waitUntilLive(dur time.Duration) error {
701701
}
702702
n.Unlock()
703703
if pid == 0 {
704-
log.Info(ctx, "process already quit")
704+
log.Dev.Info(ctx, "process already quit")
705705
return nil
706706
}
707707

708708
urlBytes, err := os.ReadFile(n.listeningURLFile())
709709
if err != nil {
710-
log.Infof(ctx, "%v", err)
710+
log.Dev.Infof(ctx, "%v", err)
711711
continue
712712
}
713713

714714
var pgURL *url.URL
715715
_, pgURL, err = portFromURL(string(urlBytes))
716716
if err != nil {
717-
log.Infof(ctx, "%v", err)
717+
log.Dev.Infof(ctx, "%v", err)
718718
continue
719719
}
720720

@@ -750,12 +750,12 @@ func (n *Node) waitUntilLive(dur time.Duration) error {
750750
if err := n.db.QueryRow(
751751
`SELECT value FROM crdb_internal.node_runtime_info WHERE component='UI' AND field = 'URL'`,
752752
).Scan(&uiStr); err != nil {
753-
log.Infof(ctx, "%v", err)
753+
log.Dev.Infof(ctx, "%v", err)
754754
} else if _, uiURL, err = portFromURL(uiStr); err != nil {
755-
log.Infof(ctx, "%v", err)
755+
log.Dev.Infof(ctx, "%v", err)
756756
// TODO(tschottdorf): see above.
757757
}
758-
log.Infof(ctx, "process %d started (db: %s ui: %s)", pid, pgURL, uiURL)
758+
log.Dev.Infof(ctx, "process %d started (db: %s ui: %s)", pid, pgURL, uiURL)
759759
return nil
760760
}
761761
return errors.Errorf("node %+v was unable to join cluster within %s", n.Cfg, dur)

pkg/acceptance/util_cluster.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@ func StartCluster(ctx context.Context, t *testing.T, cfg cluster.TestConfig) (c
100100
// We actually start zero-node clusters in the reference tests. For one-node
101101
// clusters, no replication is possible, so we can also skip this step.
102102
if wantedReplicas > 1 {
103-
log.Infof(ctx, "waiting for first range to have %d replicas", wantedReplicas)
103+
log.Dev.Infof(ctx, "waiting for first range to have %d replicas", wantedReplicas)
104104

105105
testutils.SucceedsSoon(t, func() error {
106106
select {
@@ -131,7 +131,7 @@ func StartCluster(ctx context.Context, t *testing.T, cfg cluster.TestConfig) (c
131131
t.Fatalf("unable to scan for length of replicas array: %s", err)
132132
}
133133
if log.V(1) {
134-
log.Infof(ctx, "found %d replicas", foundReplicas)
134+
log.Dev.Infof(ctx, "found %d replicas", foundReplicas)
135135
}
136136
} else {
137137
return errors.Errorf("no ranges listed")

pkg/acceptance/util_docker.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -146,20 +146,20 @@ func testDocker(
146146
// Otherwise, the directory removal will cause the cluster nodes
147147
// to crash and report abnormal termination, even when the test
148148
// succeeds otherwise.
149-
log.Infof(ctx, "cleaning up docker volume")
149+
log.Dev.Infof(ctx, "cleaning up docker volume")
150150
l.Cleanup(ctx, preserveLogs)
151151
}()
152152

153153
if len(l.Nodes) > 0 {
154154
containerConfig.Env = append(containerConfig.Env, "PGHOST="+l.Hostname(0))
155155
}
156156

157-
log.Infof(ctx, "starting one-shot container")
157+
log.Dev.Infof(ctx, "starting one-shot container")
158158
err = l.OneShot(
159159
ctx, acceptanceImage, types.ImagePullOptions{}, containerConfig, hostConfig,
160160
platforms.DefaultSpec(), "docker-"+name,
161161
)
162-
log.Infof(ctx, "one-shot container terminated: %v", err)
162+
log.Dev.Infof(ctx, "one-shot container terminated: %v", err)
163163
preserveLogs = err != nil
164164
})
165165
return err
@@ -233,7 +233,7 @@ func runTestDockerCLI(t *testing.T, testNameSuffix, testFilePath string) {
233233
testFile := filepath.Base(testFilePath)
234234
testPath := filepath.Join(containerPath, testFile)
235235
t.Run(testFile, func(t *testing.T) {
236-
log.Infof(ctx, "-- starting tests from: %s", testFile)
236+
log.Dev.Infof(ctx, "-- starting tests from: %s", testFile)
237237

238238
// Symlink the logs directory to /logs, which is visible outside of the
239239
// container and preserved if the test fails. (They don't write to /logs

pkg/backup/backup_job.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -796,7 +796,7 @@ func (b *backupResumer) Resume(ctx context.Context, execCtx interface{}) error {
796796
// If we made decent progress with the BACKUP, reset the last
797797
// progress state.
798798
if madeProgress := curProgress - lastProgress; madeProgress >= 0.01 {
799-
log.Infof(ctx, "backport made %d%% progress, resetting retry duration", int(math.Round(float64(100*madeProgress))))
799+
log.Dev.Infof(ctx, "backport made %d%% progress, resetting retry duration", int(math.Round(float64(100*madeProgress))))
800800
lastProgress = curProgress
801801
r.Reset()
802802
}
@@ -1364,7 +1364,7 @@ func maybeRelocateJobExecution(
13641364
return err
13651365
}
13661366
if ok, missedTier := current.Locality.Matches(locality); !ok {
1367-
log.Infof(ctx,
1367+
log.Dev.Infof(ctx,
13681368
"%s job %d initially adopted on instance %d but it does not match locality filter %s, finding a new coordinator",
13691369
jobDesc, jobID, current.NodeID, missedTier.String(),
13701370
)

0 commit comments

Comments
 (0)