@@ -26,6 +26,7 @@ import (
26
26
"math"
27
27
"os"
28
28
"path"
29
+ "regexp"
29
30
"strings"
30
31
"sync"
31
32
"testing"
@@ -38,6 +39,7 @@ import (
38
39
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
39
40
"k8s.io/apimachinery/pkg/labels"
40
41
"k8s.io/apimachinery/pkg/runtime/schema"
42
+ "k8s.io/apimachinery/pkg/util/runtime"
41
43
"k8s.io/apimachinery/pkg/util/wait"
42
44
utilfeature "k8s.io/apiserver/pkg/util/feature"
43
45
cacheddiscovery "k8s.io/client-go/discovery/cached/memory"
@@ -48,6 +50,7 @@ import (
48
50
"k8s.io/client-go/restmapper"
49
51
"k8s.io/component-base/featuregate"
50
52
featuregatetesting "k8s.io/component-base/featuregate/testing"
53
+ logsapi "k8s.io/component-base/logs/api/v1"
51
54
"k8s.io/component-base/metrics/legacyregistry"
52
55
"k8s.io/klog/v2"
53
56
"k8s.io/kubernetes/pkg/scheduler/apis/config"
@@ -94,6 +97,29 @@ const (
94
97
pluginLabelName = "plugin"
95
98
)
96
99
100
+ // Run with -v=2, this is the default log level in production.
101
+ //
102
+ // In a PR this can be bumped up temporarily to run pull-kubernetes-scheduler-perf
103
+ // with more log output.
104
+ const DefaultLoggingVerbosity = 2
105
+
106
+ var LoggingFeatureGate FeatureGateFlag
107
+ var LoggingConfig * logsapi.LoggingConfiguration
108
+
109
+ type FeatureGateFlag interface {
110
+ featuregate.FeatureGate
111
+ flag.Value
112
+ }
113
+
114
+ func init () {
115
+ f := featuregate .NewFeatureGate ()
116
+ runtime .Must (logsapi .AddFeatureGates (f ))
117
+ LoggingFeatureGate = f
118
+
119
+ LoggingConfig = logsapi .NewLoggingConfiguration ()
120
+ LoggingConfig .Verbosity = DefaultLoggingVerbosity
121
+ }
122
+
97
123
var (
98
124
defaultMetricsCollectorConfig = metricsCollectorConfig {
99
125
Metrics : map [string ][]* labelValues {
@@ -764,8 +790,62 @@ func initTestOutput(tb testing.TB) io.Writer {
764
790
765
791
var perfSchedulingLabelFilter = flag .String ("perf-scheduling-label-filter" , "performance" , "comma-separated list of labels which a testcase must have (no prefix or +) or must not have (-), used by BenchmarkPerfScheduling" )
766
792
793
+ var specialFilenameChars = regexp .MustCompile (`[^a-zA-Z0-9-_]` )
794
+
767
795
func setupTestCase (t testing.TB , tc * testCase , output io.Writer , outOfTreePluginRegistry frameworkruntime.Registry ) (informers.SharedInformerFactory , ktesting.TContext ) {
768
796
tCtx := ktesting .Init (t , initoption .PerTestOutput (* useTestingLog ))
797
+ artifacts , doArtifacts := os .LookupEnv ("ARTIFACTS" )
798
+ if ! * useTestingLog && doArtifacts {
799
+ // Reconfigure logging so that it goes to a separate file per
800
+ // test instead of stderr. If the test passes, the file gets
801
+ // deleted. The overall output can be very large (> 200 MB for
802
+ // ci-benchmark-scheduler-perf-master). With this approach, we
803
+ // have log output for failures without having to store large
804
+ // amounts of data that no-one is looking at. The performance
805
+ // is the same as writing to stderr.
806
+ if err := logsapi .ResetForTest (LoggingFeatureGate ); err != nil {
807
+ t .Fatalf ("Failed to reset the logging configuration: %v" , err )
808
+ }
809
+ logfileName := path .Join (artifacts , specialFilenameChars .ReplaceAllString (t .Name (), "_" )+ ".log" )
810
+ out , err := os .Create (logfileName )
811
+ if err != nil {
812
+ t .Fatalf ("Failed to create per-test log output file: %v" , err )
813
+ }
814
+ t .Cleanup (func () {
815
+ // Everything should have stopped by now, checked below
816
+ // by GoleakCheck (which runs first during test
817
+ // shutdown!). Therefore we can clean up. Errors get logged
818
+ // and fail the test, but cleanup tries to continue.
819
+ //
820
+ // Note that the race detector will flag any goroutine
821
+ // as causing a race if there is no explicit wait for
822
+ // that goroutine to stop. We know that they must have
823
+ // stopped (GoLeakCheck!) but the race detector
824
+ // doesn't.
825
+ //
826
+ // This is a major issue because many Kubernetes goroutines get
827
+ // started without waiting for them to stop :-(
828
+ if err := logsapi .ResetForTest (LoggingFeatureGate ); err != nil {
829
+ t .Errorf ("Failed to reset the logging configuration: %v" , err )
830
+ }
831
+ if err := out .Close (); err != nil {
832
+ t .Errorf ("Failed to close the per-test log output file: %s: %v" , logfileName , err )
833
+ }
834
+ if ! t .Failed () {
835
+ if err := os .Remove (logfileName ); err != nil {
836
+ t .Errorf ("Failed to remove the per-test log output file: %v" , err )
837
+ }
838
+ }
839
+ })
840
+ opts := & logsapi.LoggingOptions {
841
+ ErrorStream : out ,
842
+ InfoStream : out ,
843
+ }
844
+ if err := logsapi .ValidateAndApplyWithOptions (LoggingConfig , opts , LoggingFeatureGate ); err != nil {
845
+ t .Fatalf ("Failed to apply the per-test logging configuration: %v" , err )
846
+ }
847
+
848
+ }
769
849
770
850
// Ensure that there are no leaked
771
851
// goroutines. They could influence
0 commit comments