@@ -26,6 +26,7 @@ import (
26
26
"math"
27
27
"os"
28
28
"path"
29
+ "regexp"
29
30
"strings"
30
31
"sync"
31
32
"testing"
@@ -38,6 +39,7 @@ import (
38
39
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
39
40
"k8s.io/apimachinery/pkg/labels"
40
41
"k8s.io/apimachinery/pkg/runtime/schema"
42
+ "k8s.io/apimachinery/pkg/util/runtime"
41
43
"k8s.io/apimachinery/pkg/util/wait"
42
44
utilfeature "k8s.io/apiserver/pkg/util/feature"
43
45
cacheddiscovery "k8s.io/client-go/discovery/cached/memory"
@@ -48,6 +50,7 @@ import (
48
50
"k8s.io/client-go/restmapper"
49
51
"k8s.io/component-base/featuregate"
50
52
featuregatetesting "k8s.io/component-base/featuregate/testing"
53
+ logsapi "k8s.io/component-base/logs/api/v1"
51
54
"k8s.io/component-base/metrics/legacyregistry"
52
55
"k8s.io/klog/v2"
53
56
"k8s.io/kubernetes/pkg/scheduler/apis/config"
@@ -94,6 +97,29 @@ const (
94
97
pluginLabelName = "plugin"
95
98
)
96
99
100
+ // Run with -v=2, this is the default log level in production.
101
+ //
102
+ // In a PR this can be bumped up temporarily to run pull-kubernetes-scheduler-perf
103
+ // with more log output.
104
+ const DefaultLoggingVerbosity = 2
105
+
106
+ var LoggingFeatureGate FeatureGateFlag
107
+ var LoggingConfig * logsapi.LoggingConfiguration
108
+
109
+ type FeatureGateFlag interface {
110
+ featuregate.FeatureGate
111
+ flag.Value
112
+ }
113
+
114
+ func init () {
115
+ f := featuregate .NewFeatureGate ()
116
+ runtime .Must (logsapi .AddFeatureGates (f ))
117
+ LoggingFeatureGate = f
118
+
119
+ LoggingConfig = logsapi .NewLoggingConfiguration ()
120
+ LoggingConfig .Verbosity = DefaultLoggingVerbosity
121
+ }
122
+
97
123
var (
98
124
defaultMetricsCollectorConfig = metricsCollectorConfig {
99
125
Metrics : map [string ][]* labelValues {
@@ -760,8 +786,62 @@ func initTestOutput(tb testing.TB) io.Writer {
760
786
761
787
var perfSchedulingLabelFilter = flag .String ("perf-scheduling-label-filter" , "performance" , "comma-separated list of labels which a testcase must have (no prefix or +) or must not have (-), used by BenchmarkPerfScheduling" )
762
788
789
+ var specialFilenameChars = regexp .MustCompile (`[^a-zA-Z0-9-_]` )
790
+
763
791
func setupTestCase (t testing.TB , tc * testCase , output io.Writer , outOfTreePluginRegistry frameworkruntime.Registry ) (informers.SharedInformerFactory , ktesting.TContext ) {
764
792
tCtx := ktesting .Init (t , initoption .PerTestOutput (* useTestingLog ))
793
+ artifacts , doArtifacts := os .LookupEnv ("ARTIFACTS" )
794
+ if ! * useTestingLog && doArtifacts {
795
+ // Reconfigure logging so that it goes to a separate file per
796
+ // test instead of stderr. If the test passes, the file gets
797
+ // deleted. The overall output can be very large (> 200 MB for
798
+ // ci-benchmark-scheduler-perf-master). With this approach, we
799
+ // have log output for failures without having to store large
800
+ // amounts of data that no-one is looking at. The performance
801
+ // is the same as writing to stderr.
802
+ if err := logsapi .ResetForTest (LoggingFeatureGate ); err != nil {
803
+ t .Fatalf ("Failed to reset the logging configuration: %v" , err )
804
+ }
805
+ logfileName := path .Join (artifacts , specialFilenameChars .ReplaceAllString (t .Name (), "_" )+ ".log" )
806
+ out , err := os .Create (logfileName )
807
+ if err != nil {
808
+ t .Fatalf ("Failed to create per-test log output file: %v" , err )
809
+ }
810
+ t .Cleanup (func () {
811
+ // Everything should have stopped by now, checked below
812
+ // by GoleakCheck (which runs first during test
813
+ // shutdown!). Therefore we can clean up. Errors get logged
814
+ // and fail the test, but cleanup tries to continue.
815
+ //
816
+ // Note that the race detector will flag any goroutine
817
+ // as causing a race if there is no explicit wait for
818
+ // that goroutine to stop. We know that they must have
819
+ // stopped (GoLeakCheck!) but the race detector
820
+ // doesn't.
821
+ //
822
+ // This is a major issue because many Kubernetes goroutines get
823
+ // started without waiting for them to stop :-(
824
+ if err := logsapi .ResetForTest (LoggingFeatureGate ); err != nil {
825
+ t .Errorf ("Failed to reset the logging configuration: %v" , err )
826
+ }
827
+ if err := out .Close (); err != nil {
828
+ t .Errorf ("Failed to close the per-test log output file: %s: %v" , logfileName , err )
829
+ }
830
+ if ! t .Failed () {
831
+ if err := os .Remove (logfileName ); err != nil {
832
+ t .Errorf ("Failed to remove the per-test log output file: %v" , err )
833
+ }
834
+ }
835
+ })
836
+ opts := & logsapi.LoggingOptions {
837
+ ErrorStream : out ,
838
+ InfoStream : out ,
839
+ }
840
+ if err := logsapi .ValidateAndApplyWithOptions (LoggingConfig , opts , LoggingFeatureGate ); err != nil {
841
+ t .Fatalf ("Failed to apply the per-test logging configuration: %v" , err )
842
+ }
843
+
844
+ }
765
845
766
846
// Ensure that there are no leaked
767
847
// goroutines. They could influence
0 commit comments