Skip to content

Commit 65fe8d6

Browse files
committed
roachtestutil: use quiet logger for disk staller
1 parent 3091388 commit 65fe8d6

File tree

2 files changed

+35
-11
lines changed

2 files changed

+35
-11
lines changed

pkg/cmd/roachtest/roachtestutil/disk_stall.go

Lines changed: 31 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -75,22 +75,25 @@ func (s *cgroupDiskStaller) Setup(ctx context.Context) {
7575
// Safety measure.
7676
s.f.Fatalf("cluster needs ReusePolicyNone to support disk stalls")
7777
}
78-
if err := s.Failer.Setup(ctx, s.f.L(), failures.DiskStallArgs{
78+
l := newDiskStallLogger(s.f.L(), s.c.CRDBNodes(), "Setup")
79+
if err := s.Failer.Setup(ctx, l, failures.DiskStallArgs{
7980
StallLogs: s.stallLogs,
8081
Nodes: s.c.CRDBNodes().InstallNodes(),
8182
}); err != nil {
8283
s.f.Fatalf("failed to setup disk stall: %s", err)
8384
}
8485
}
8586
func (s *cgroupDiskStaller) Cleanup(ctx context.Context) {
86-
err := s.Failer.Cleanup(ctx, s.f.L())
87+
l := newDiskStallLogger(s.f.L(), s.c.CRDBNodes(), "Cleanup")
88+
err := s.Failer.Cleanup(ctx, l)
8789
if err != nil {
8890
s.f.Fatalf("failed to cleanup disk stall: %s", err)
8991
}
9092
}
9193

9294
func (s *cgroupDiskStaller) Stall(ctx context.Context, nodes option.NodeListOption) {
93-
if err := s.Failer.Inject(ctx, s.f.L(), failures.DiskStallArgs{
95+
l := newDiskStallLogger(s.f.L(), nodes, "Stall")
96+
if err := s.Failer.Inject(ctx, l, failures.DiskStallArgs{
9497
StallLogs: s.stallLogs,
9598
StallWrites: true,
9699
StallReads: s.stallReads,
@@ -103,7 +106,8 @@ func (s *cgroupDiskStaller) Stall(ctx context.Context, nodes option.NodeListOpti
103106
func (s *cgroupDiskStaller) Slow(
104107
ctx context.Context, nodes option.NodeListOption, bytesPerSecond int,
105108
) {
106-
if err := s.Failer.Inject(ctx, s.f.L(), failures.DiskStallArgs{
109+
l := newDiskStallLogger(s.f.L(), nodes, "Slow")
110+
if err := s.Failer.Inject(ctx, l, failures.DiskStallArgs{
107111
StallLogs: s.stallLogs,
108112
StallWrites: true,
109113
StallReads: s.stallReads,
@@ -115,7 +119,8 @@ func (s *cgroupDiskStaller) Slow(
115119
}
116120

117121
func (s *cgroupDiskStaller) Unstall(ctx context.Context, nodes option.NodeListOption) {
118-
if err := s.Failer.Recover(ctx, s.f.L()); err != nil {
122+
l := newDiskStallLogger(s.f.L(), nodes, "Unstall")
123+
if err := s.Failer.Recover(ctx, l); err != nil {
119124
s.f.Fatalf("failed to unstall disk: %s", err)
120125
}
121126
}
@@ -141,19 +146,22 @@ func (s *dmsetupDiskStaller) Setup(ctx context.Context) {
141146
// We disable journaling and do all kinds of things below.
142147
s.f.Fatalf("cluster needs ReusePolicyNone to support disk stalls")
143148
}
144-
if err := s.Failer.Setup(ctx, s.f.L(), failures.DiskStallArgs{Nodes: s.c.CRDBNodes().InstallNodes()}); err != nil {
149+
l := newDiskStallLogger(s.f.L(), s.c.CRDBNodes(), "Setup")
150+
if err := s.Failer.Setup(ctx, l, failures.DiskStallArgs{Nodes: s.c.CRDBNodes().InstallNodes()}); err != nil {
145151
s.f.Fatalf("failed to setup disk stall: %s", err)
146152
}
147153
}
148154

149155
func (s *dmsetupDiskStaller) Cleanup(ctx context.Context) {
150-
if err := s.Failer.Cleanup(ctx, s.f.L()); err != nil {
156+
l := newDiskStallLogger(s.f.L(), s.c.CRDBNodes(), "Cleanup")
157+
if err := s.Failer.Cleanup(ctx, l); err != nil {
151158
s.f.Fatalf("failed to cleanup disk stall: %s", err)
152159
}
153160
}
154161

155162
func (s *dmsetupDiskStaller) Stall(ctx context.Context, nodes option.NodeListOption) {
156-
if err := s.Failer.Inject(ctx, s.f.L(), failures.DiskStallArgs{
163+
l := newDiskStallLogger(s.f.L(), nodes, "Stall")
164+
if err := s.Failer.Inject(ctx, l, failures.DiskStallArgs{
157165
Nodes: nodes.InstallNodes(),
158166
}); err != nil {
159167
s.f.Fatalf("failed to stall disk: %s", err)
@@ -168,10 +176,24 @@ func (s *dmsetupDiskStaller) Slow(
168176
}
169177

170178
func (s *dmsetupDiskStaller) Unstall(ctx context.Context, nodes option.NodeListOption) {
171-
if err := s.Failer.Recover(ctx, s.f.L()); err != nil {
179+
l := newDiskStallLogger(s.f.L(), nodes, "Unstall")
180+
if err := s.Failer.Recover(ctx, l); err != nil {
172181
s.f.Fatalf("failed to unstall disk: %s", err)
173182
}
174183
}
175184

176185
func (s *dmsetupDiskStaller) DataDir() string { return "{store-dir}" }
177186
func (s *dmsetupDiskStaller) LogDir() string { return "logs" }
187+
188+
// newDiskStallLogger attempts to create a quiet child logger for a given
189+
// disk staller method. If the child logger cannot be created, it logs
190+
// a warning and continues with the parent logger.
191+
func newDiskStallLogger(l *logger.Logger, nodes option.NodeListOption, name string) *logger.Logger {
192+
quietLogger, file, err := LoggerForCmd(l, nodes, name)
193+
if err != nil {
194+
l.Printf("WARN: failed to create child logger for %s(): %s", name, err)
195+
return l
196+
}
197+
l.Printf("%s() details in %s.log", name, file)
198+
return quietLogger
199+
}

pkg/roachprod/failureinjection/failures/reset.go

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,8 +32,10 @@ func registerResetVM(r *FailureRegistry) {
3232
r.add(ResetVMFailureName, ResetVMArgs{}, MakeResetVMFailure)
3333
}
3434

35-
func MakeResetVMFailure(clusterName string, l *logger.Logger, secure bool) (FailureMode, error) {
36-
c, err := roachprod.GetClusterFromCache(l, clusterName, install.SecureOption(secure))
35+
func MakeResetVMFailure(
36+
clusterName string, l *logger.Logger, clusterOpts ClusterOptions,
37+
) (FailureMode, error) {
38+
c, err := roachprod.GetClusterFromCache(l, clusterName, install.SecureOption(clusterOpts.secure))
3739
if err != nil {
3840
return nil, err
3941
}

0 commit comments

Comments
 (0)