@@ -46,7 +46,6 @@ import (
46
46
"k8s.io/client-go/rest"
47
47
"k8s.io/client-go/restmapper"
48
48
scaleclient "k8s.io/client-go/scale"
49
- e2elog "k8s.io/kubernetes/test/e2e/framework/log"
50
49
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
51
50
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
52
51
e2epsp "k8s.io/kubernetes/test/e2e/framework/psp"
@@ -225,7 +224,7 @@ func (f *Framework) BeforeEach() {
225
224
err = WaitForDefaultServiceAccountInNamespace (f .ClientSet , namespace .Name )
226
225
ExpectNoError (err )
227
226
} else {
228
- e2elog . Logf ("Skipping waiting for service account" )
227
+ Logf ("Skipping waiting for service account" )
229
228
}
230
229
f .UniqueName = f .Namespace .GetName ()
231
230
} else {
@@ -253,7 +252,7 @@ func (f *Framework) BeforeEach() {
253
252
PrintVerboseLogs : false ,
254
253
}, nil )
255
254
if err != nil {
256
- e2elog . Logf ("Error while creating NewResourceUsageGatherer: %v" , err )
255
+ Logf ("Error while creating NewResourceUsageGatherer: %v" , err )
257
256
} else {
258
257
go f .gatherer .StartGatheringData ()
259
258
}
@@ -274,13 +273,13 @@ func (f *Framework) BeforeEach() {
274
273
if gatherMetricsAfterTest && TestContext .IncludeClusterAutoscalerMetrics {
275
274
grabber , err := e2emetrics .NewMetricsGrabber (f .ClientSet , f .KubemarkExternalClusterClientSet , ! ProviderIs ("kubemark" ), false , false , false , TestContext .IncludeClusterAutoscalerMetrics )
276
275
if err != nil {
277
- e2elog . Logf ("Failed to create MetricsGrabber (skipping ClusterAutoscaler metrics gathering before test): %v" , err )
276
+ Logf ("Failed to create MetricsGrabber (skipping ClusterAutoscaler metrics gathering before test): %v" , err )
278
277
} else {
279
278
f .clusterAutoscalerMetricsBeforeTest , err = grabber .Grab ()
280
279
if err != nil {
281
- e2elog . Logf ("MetricsGrabber failed to grab CA metrics before test (skipping metrics gathering): %v" , err )
280
+ Logf ("MetricsGrabber failed to grab CA metrics before test (skipping metrics gathering): %v" , err )
282
281
} else {
283
- e2elog . Logf ("Gathered ClusterAutoscaler metrics before test" )
282
+ Logf ("Gathered ClusterAutoscaler metrics before test" )
284
283
}
285
284
}
286
285
@@ -311,15 +310,15 @@ func (f *Framework) AfterEach() {
311
310
if ! apierrors .IsNotFound (err ) {
312
311
nsDeletionErrors [ns .Name ] = err
313
312
} else {
314
- e2elog . Logf ("Namespace %v was already deleted" , ns .Name )
313
+ Logf ("Namespace %v was already deleted" , ns .Name )
315
314
}
316
315
}
317
316
}
318
317
} else {
319
318
if ! TestContext .DeleteNamespace {
320
- e2elog . Logf ("Found DeleteNamespace=false, skipping namespace deletion!" )
319
+ Logf ("Found DeleteNamespace=false, skipping namespace deletion!" )
321
320
} else {
322
- e2elog . Logf ("Found DeleteNamespaceOnFailure=false and current test failed, skipping namespace deletion!" )
321
+ Logf ("Found DeleteNamespaceOnFailure=false and current test failed, skipping namespace deletion!" )
323
322
}
324
323
}
325
324
@@ -334,7 +333,7 @@ func (f *Framework) AfterEach() {
334
333
for namespaceKey , namespaceErr := range nsDeletionErrors {
335
334
messages = append (messages , fmt .Sprintf ("Couldn't delete ns: %q: %s (%#v)" , namespaceKey , namespaceErr , namespaceErr ))
336
335
}
337
- e2elog . Failf (strings .Join (messages , "," ))
336
+ Failf (strings .Join (messages , "," ))
338
337
}
339
338
}()
340
339
@@ -366,11 +365,11 @@ func (f *Framework) AfterEach() {
366
365
grabMetricsFromKubelets := TestContext .GatherMetricsAfterTest != "master" && ! ProviderIs ("kubemark" )
367
366
grabber , err := e2emetrics .NewMetricsGrabber (f .ClientSet , f .KubemarkExternalClusterClientSet , grabMetricsFromKubelets , true , true , true , TestContext .IncludeClusterAutoscalerMetrics )
368
367
if err != nil {
369
- e2elog . Logf ("Failed to create MetricsGrabber (skipping metrics gathering): %v" , err )
368
+ Logf ("Failed to create MetricsGrabber (skipping metrics gathering): %v" , err )
370
369
} else {
371
370
received , err := grabber .Grab ()
372
371
if err != nil {
373
- e2elog . Logf ("MetricsGrabber failed to grab some of the metrics: %v" , err )
372
+ Logf ("MetricsGrabber failed to grab some of the metrics: %v" , err )
374
373
}
375
374
(* e2emetrics .ComponentCollection )(& received ).ComputeClusterAutoscalerMetricsDelta (f .clusterAutoscalerMetricsBeforeTest )
376
375
f .TestSummaries = append (f .TestSummaries , (* e2emetrics .ComponentCollection )(& received ))
@@ -391,7 +390,7 @@ func (f *Framework) AfterEach() {
391
390
// This is explicitly done at the very end of the test, to avoid
392
391
// e.g. not removing namespace in case of this failure.
393
392
if err := AllNodesReady (f .ClientSet , 3 * time .Minute ); err != nil {
394
- e2elog . Failf ("All nodes should be ready after test, %v" , err )
393
+ Failf ("All nodes should be ready after test, %v" , err )
395
394
}
396
395
}
397
396
@@ -490,7 +489,7 @@ func (f *Framework) WriteFileViaContainer(podName, containerName string, path st
490
489
command := fmt .Sprintf ("echo '%s' > '%s'" , contents , path )
491
490
stdout , stderr , err := kubectlExecWithRetry (f .Namespace .Name , podName , containerName , "--" , "/bin/sh" , "-c" , command )
492
491
if err != nil {
493
- e2elog . Logf ("error running kubectl exec to write file: %v\n stdout=%v\n stderr=%v)" , err , string (stdout ), string (stderr ))
492
+ Logf ("error running kubectl exec to write file: %v\n stdout=%v\n stderr=%v)" , err , string (stdout ), string (stderr ))
494
493
}
495
494
return err
496
495
}
@@ -501,7 +500,7 @@ func (f *Framework) ReadFileViaContainer(podName, containerName string, path str
501
500
502
501
stdout , stderr , err := kubectlExecWithRetry (f .Namespace .Name , podName , containerName , "--" , "cat" , path )
503
502
if err != nil {
504
- e2elog . Logf ("error running kubectl exec to read file: %v\n stdout=%v\n stderr=%v)" , err , string (stdout ), string (stderr ))
503
+ Logf ("error running kubectl exec to read file: %v\n stdout=%v\n stderr=%v)" , err , string (stdout ), string (stderr ))
505
504
}
506
505
return string (stdout ), err
507
506
}
@@ -512,7 +511,7 @@ func (f *Framework) CheckFileSizeViaContainer(podName, containerName, path strin
512
511
513
512
stdout , stderr , err := kubectlExecWithRetry (f .Namespace .Name , podName , containerName , "--" , "ls" , "-l" , path )
514
513
if err != nil {
515
- e2elog . Logf ("error running kubectl exec to read file: %v\n stdout=%v\n stderr=%v)" , err , string (stdout ), string (stderr ))
514
+ Logf ("error running kubectl exec to read file: %v\n stdout=%v\n stderr=%v)" , err , string (stdout ), string (stderr ))
516
515
}
517
516
return string (stdout ), err
518
517
}
@@ -549,7 +548,7 @@ func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName str
549
548
TargetPort : intstr .FromInt (contPort ),
550
549
}}
551
550
}
552
- e2elog . Logf ("Creating a service-for-%v for selecting app=%v-pod" , appName , appName )
551
+ Logf ("Creating a service-for-%v for selecting app=%v-pod" , appName , appName )
553
552
service , err := f .ClientSet .CoreV1 ().Services (f .Namespace .Name ).Create (& v1.Service {
554
553
ObjectMeta : metav1.ObjectMeta {
555
554
Name : "service-for-" + appName ,
@@ -575,7 +574,7 @@ func (f *Framework) CreatePodsPerNodeForSimpleApp(appName string, podSpec func(n
575
574
for i , node := range nodes .Items {
576
575
// one per node, but no more than maxCount.
577
576
if i <= maxCount {
578
- e2elog . Logf ("%v/%v : Creating container with label app=%v-pod" , i , maxCount , appName )
577
+ Logf ("%v/%v : Creating container with label app=%v-pod" , i , maxCount , appName )
579
578
_ , err := f .ClientSet .CoreV1 ().Pods (f .Namespace .Name ).Create (& v1.Pod {
580
579
ObjectMeta : metav1.ObjectMeta {
581
580
Name : fmt .Sprintf (appName + "-pod-%v" , i ),
@@ -646,19 +645,19 @@ func (kc *KubeConfig) FindCluster(name string) *KubeCluster {
646
645
func kubectlExecWithRetry (namespace string , podName , containerName string , args ... string ) ([]byte , []byte , error ) {
647
646
for numRetries := 0 ; numRetries < maxKubectlExecRetries ; numRetries ++ {
648
647
if numRetries > 0 {
649
- e2elog . Logf ("Retrying kubectl exec (retry count=%v/%v)" , numRetries + 1 , maxKubectlExecRetries )
648
+ Logf ("Retrying kubectl exec (retry count=%v/%v)" , numRetries + 1 , maxKubectlExecRetries )
650
649
}
651
650
652
651
stdOutBytes , stdErrBytes , err := kubectlExec (namespace , podName , containerName , args ... )
653
652
if err != nil {
654
653
if strings .Contains (strings .ToLower (string (stdErrBytes )), "i/o timeout" ) {
655
654
// Retry on "i/o timeout" errors
656
- e2elog . Logf ("Warning: kubectl exec encountered i/o timeout.\n err=%v\n stdout=%v\n stderr=%v)" , err , string (stdOutBytes ), string (stdErrBytes ))
655
+ Logf ("Warning: kubectl exec encountered i/o timeout.\n err=%v\n stdout=%v\n stderr=%v)" , err , string (stdOutBytes ), string (stdErrBytes ))
657
656
continue
658
657
}
659
658
if strings .Contains (strings .ToLower (string (stdErrBytes )), "container not found" ) {
660
659
// Retry on "container not found" errors
661
- e2elog . Logf ("Warning: kubectl exec encountered container not found.\n err=%v\n stdout=%v\n stderr=%v)" , err , string (stdOutBytes ), string (stdErrBytes ))
660
+ Logf ("Warning: kubectl exec encountered container not found.\n err=%v\n stdout=%v\n stderr=%v)" , err , string (stdOutBytes ), string (stdErrBytes ))
662
661
time .Sleep (2 * time .Second )
663
662
continue
664
663
}
@@ -683,7 +682,7 @@ func kubectlExec(namespace string, podName, containerName string, args ...string
683
682
cmd := KubectlCmd (cmdArgs ... )
684
683
cmd .Stdout , cmd .Stderr = & stdout , & stderr
685
684
686
- e2elog . Logf ("Running '%s %s'" , cmd .Path , strings .Join (cmdArgs , " " ))
685
+ Logf ("Running '%s %s'" , cmd .Path , strings .Join (cmdArgs , " " ))
687
686
err := cmd .Run ()
688
687
return stdout .Bytes (), stderr .Bytes (), err
689
688
}
@@ -790,7 +789,7 @@ func (p *PodStateVerification) filter(c clientset.Interface, namespace *v1.Names
790
789
791
790
ns := namespace .Name
792
791
pl , err := filterLabels (p .Selectors , c , ns ) // Build an v1.PodList to operate against.
793
- e2elog . Logf ("Selector matched %v pods for %v" , len (pl .Items ), p .Selectors )
792
+ Logf ("Selector matched %v pods for %v" , len (pl .Items ), p .Selectors )
794
793
if len (pl .Items ) == 0 || err != nil {
795
794
return pl .Items , err
796
795
}
@@ -805,7 +804,7 @@ ReturnPodsSoFar:
805
804
}
806
805
passesVerify , err := passesVerifyFilter (pod , p .Verify )
807
806
if err != nil {
808
- e2elog . Logf ("Error detected on %v : %v !" , pod .Name , err )
807
+ Logf ("Error detected on %v : %v !" , pod .Name , err )
809
808
break ReturnPodsSoFar
810
809
}
811
810
if passesVerify {
@@ -826,12 +825,12 @@ func (cl *ClusterVerification) WaitFor(atLeast int, timeout time.Duration) ([]v1
826
825
827
826
// Failure
828
827
if returnedErr != nil {
829
- e2elog . Logf ("Cutting polling short: We got an error from the pod filtering layer." )
828
+ Logf ("Cutting polling short: We got an error from the pod filtering layer." )
830
829
// stop polling if the pod filtering returns an error. that should never happen.
831
830
// it indicates, for example, that the client is broken or something non-pod related.
832
831
return false , returnedErr
833
832
}
834
- e2elog . Logf ("Found %v / %v" , len (pods ), atLeast )
833
+ Logf ("Found %v / %v" , len (pods ), atLeast )
835
834
836
835
// Success
837
836
if len (pods ) >= atLeast {
@@ -840,15 +839,15 @@ func (cl *ClusterVerification) WaitFor(atLeast int, timeout time.Duration) ([]v1
840
839
// Keep trying...
841
840
return false , nil
842
841
})
843
- e2elog . Logf ("WaitFor completed with timeout %v. Pods found = %v out of %v" , timeout , len (pods ), atLeast )
842
+ Logf ("WaitFor completed with timeout %v. Pods found = %v out of %v" , timeout , len (pods ), atLeast )
844
843
return pods , err
845
844
}
846
845
847
846
// WaitForOrFail provides a shorthand WaitFor with failure as an option if anything goes wrong.
848
847
func (cl * ClusterVerification ) WaitForOrFail (atLeast int , timeout time.Duration ) {
849
848
pods , err := cl .WaitFor (atLeast , timeout )
850
849
if err != nil || len (pods ) < atLeast {
851
- e2elog . Failf ("Verified %v of %v pods , error : %v" , len (pods ), atLeast , err )
850
+ Failf ("Verified %v of %v pods , error : %v" , len (pods ), atLeast , err )
852
851
}
853
852
}
854
853
@@ -861,14 +860,14 @@ func (cl *ClusterVerification) ForEach(podFunc func(v1.Pod)) error {
861
860
pods , err := cl .podState .filter (cl .client , cl .namespace )
862
861
if err == nil {
863
862
if len (pods ) == 0 {
864
- e2elog . Failf ("No pods matched the filter." )
863
+ Failf ("No pods matched the filter." )
865
864
}
866
- e2elog . Logf ("ForEach: Found %v pods from the filter. Now looping through them." , len (pods ))
865
+ Logf ("ForEach: Found %v pods from the filter. Now looping through them." , len (pods ))
867
866
for _ , p := range pods {
868
867
podFunc (p )
869
868
}
870
869
} else {
871
- e2elog . Logf ("ForEach: Something went wrong when filtering pods to execute against: %v" , err )
870
+ Logf ("ForEach: Something went wrong when filtering pods to execute against: %v" , err )
872
871
}
873
872
874
873
return err
@@ -880,7 +879,7 @@ func GetLogToFileFunc(file *os.File) func(format string, args ...interface{}) {
880
879
return func (format string , args ... interface {}) {
881
880
writer := bufio .NewWriter (file )
882
881
if _ , err := fmt .Fprintf (writer , format , args ... ); err != nil {
883
- e2elog . Logf ("Failed to write file %v with test performance data: %v" , file .Name (), err )
882
+ Logf ("Failed to write file %v with test performance data: %v" , file .Name (), err )
884
883
}
885
884
writer .Flush ()
886
885
}
0 commit comments