Skip to content

Commit 4d4e4f4

Browse files
committed
Modify apimachinery,apps,instrumentation tests to import framework/log
This is part of the transition to using framework/log instead of the Logf inside the framework package. This will help with import size/cycles when importing the framework or subpackages
1 parent 71bbabc commit 4d4e4f4

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

42 files changed

+321
-280
lines changed

test/e2e/apimachinery/BUILD

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -81,6 +81,7 @@ go_library(
8181
"//test/e2e/apps:go_default_library",
8282
"//test/e2e/framework:go_default_library",
8383
"//test/e2e/framework/deployment:go_default_library",
84+
"//test/e2e/framework/log:go_default_library",
8485
"//test/e2e/framework/metrics:go_default_library",
8586
"//test/e2e/framework/ssh:go_default_library",
8687
"//test/utils:go_default_library",

test/e2e/apimachinery/aggregator.go

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,7 @@ import (
4141
rbacv1beta1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1beta1"
4242
"k8s.io/kubernetes/test/e2e/framework"
4343
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
44+
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
4445
imageutils "k8s.io/kubernetes/test/utils/image"
4546
samplev1alpha1 "k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1"
4647
"k8s.io/utils/pointer"
@@ -373,16 +374,16 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
373374
}, "Waited %s for the sample-apiserver to be ready to handle requests.")
374375
if err != nil {
375376
currentAPIServiceJSON, _ := json.Marshal(currentAPIService)
376-
framework.Logf("current APIService: %s", string(currentAPIServiceJSON))
377+
e2elog.Logf("current APIService: %s", string(currentAPIServiceJSON))
377378

378379
currentPodsJSON, _ := json.Marshal(currentPods)
379-
framework.Logf("current pods: %s", string(currentPodsJSON))
380+
e2elog.Logf("current pods: %s", string(currentPodsJSON))
380381

381382
if currentPods != nil {
382383
for _, pod := range currentPods.Items {
383384
for _, container := range pod.Spec.Containers {
384385
logs, err := framework.GetPodLogs(client, namespace, pod.Name, container.Name)
385-
framework.Logf("logs of %s/%s (error: %v): %s", pod.Name, container.Name, err, logs)
386+
e2elog.Logf("logs of %s/%s (error: %v): %s", pod.Name, container.Name, err, logs)
386387
}
387388
}
388389
}
@@ -485,12 +486,12 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
485486
}
486487

487488
// pollTimed will call Poll but time how long Poll actually took.
488-
// It will then framework.logf the msg with the duration of the Poll.
489+
// It will then e2elog.Logf the msg with the duration of the Poll.
489490
// It is assumed that msg will contain one %s for the elapsed time.
490491
func pollTimed(interval, timeout time.Duration, condition wait.ConditionFunc, msg string) error {
491492
defer func(start time.Time, msg string) {
492493
elapsed := time.Since(start)
493-
framework.Logf(msg, elapsed)
494+
e2elog.Logf(msg, elapsed)
494495
}(time.Now(), msg)
495496
return wait.Poll(interval, timeout, condition)
496497
}

test/e2e/apimachinery/chunking.go

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@ import (
3333
"k8s.io/apiserver/pkg/storage/storagebackend"
3434
"k8s.io/client-go/util/workqueue"
3535
"k8s.io/kubernetes/test/e2e/framework"
36+
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
3637
)
3738

3839
const numberOfTotalResources = 400
@@ -62,7 +63,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
6263
if err == nil {
6364
return
6465
}
65-
framework.Logf("Got an error creating template %d: %v", i, err)
66+
e2elog.Logf("Got an error creating template %d: %v", i, err)
6667
}
6768
ginkgo.Fail("Unable to create template %d, exiting", i)
6869
})
@@ -81,7 +82,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
8182
opts.Limit = int64(rand.Int31n(numberOfTotalResources/10) + 1)
8283
list, err := client.List(opts)
8384
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
84-
framework.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, list.Continue)
85+
e2elog.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, list.Continue)
8586
gomega.Expect(len(list.Items)).To(gomega.BeNumerically("<=", opts.Limit))
8687

8788
if len(lastRV) == 0 {
@@ -122,21 +123,21 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
122123
firstToken := list.Continue
123124
firstRV := list.ResourceVersion
124125
gomega.Expect(int(list.RemainingItemCount) + len(list.Items)).To(gomega.BeNumerically("==", numberOfTotalResources))
125-
framework.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, firstToken)
126+
e2elog.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, firstToken)
126127

127128
ginkgo.By("retrieving the second page until the token expires")
128129
opts.Continue = firstToken
129130
var inconsistentToken string
130131
wait.Poll(20*time.Second, 2*storagebackend.DefaultCompactInterval, func() (bool, error) {
131132
_, err := client.List(opts)
132133
if err == nil {
133-
framework.Logf("Token %s has not expired yet", firstToken)
134+
e2elog.Logf("Token %s has not expired yet", firstToken)
134135
return false, nil
135136
}
136137
if err != nil && !errors.IsResourceExpired(err) {
137138
return false, err
138139
}
139-
framework.Logf("got error %s", err)
140+
e2elog.Logf("got error %s", err)
140141
status, ok := err.(errors.APIStatus)
141142
if !ok {
142143
return false, fmt.Errorf("expect error to implement the APIStatus interface, got %v", reflect.TypeOf(err))
@@ -145,7 +146,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
145146
if len(inconsistentToken) == 0 {
146147
return false, fmt.Errorf("expect non empty continue token")
147148
}
148-
framework.Logf("Retrieved inconsistent continue %s", inconsistentToken)
149+
e2elog.Logf("Retrieved inconsistent continue %s", inconsistentToken)
149150
return true, nil
150151
})
151152

@@ -169,7 +170,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
169170
list, err := client.List(opts)
170171
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
171172
gomega.Expect(int(list.RemainingItemCount) + len(list.Items) + found).To(gomega.BeNumerically("==", numberOfTotalResources))
172-
framework.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, list.Continue)
173+
e2elog.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, list.Continue)
173174
gomega.Expect(len(list.Items)).To(gomega.BeNumerically("<=", opts.Limit))
174175
gomega.Expect(list.ResourceVersion).To(gomega.Equal(lastRV))
175176
for _, item := range list.Items {

test/e2e/apimachinery/crd_conversion_webhook.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@ import (
3333
clientset "k8s.io/client-go/kubernetes"
3434
"k8s.io/kubernetes/test/e2e/framework"
3535
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
36+
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
3637
"k8s.io/kubernetes/test/utils/crd"
3738
imageutils "k8s.io/kubernetes/test/utils/image"
3839
"k8s.io/utils/pointer"
@@ -169,7 +170,7 @@ func createAuthReaderRoleBindingForCRDConversion(f *framework.Framework, namespa
169170
},
170171
})
171172
if err != nil && errors.IsAlreadyExists(err) {
172-
framework.Logf("role binding %s already exists", roleBindingCRDName)
173+
e2elog.Logf("role binding %s already exists", roleBindingCRDName)
173174
} else {
174175
framework.ExpectNoError(err, "creating role binding %s:webhook to access configMap", namespace)
175176
}

test/e2e/apimachinery/etcd_failure.go

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ import (
2525
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
2626
"k8s.io/kubernetes/test/e2e/apps"
2727
"k8s.io/kubernetes/test/e2e/framework"
28+
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
2829
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
2930
testutils "k8s.io/kubernetes/test/utils"
3031
imageutils "k8s.io/kubernetes/test/utils/image"
@@ -114,7 +115,7 @@ func checkExistingRCRecovers(f *framework.Framework) {
114115
options := metav1.ListOptions{LabelSelector: rcSelector.String()}
115116
pods, err := podClient.List(options)
116117
if err != nil {
117-
framework.Logf("apiserver returned error, as expected before recovery: %v", err)
118+
e2elog.Logf("apiserver returned error, as expected before recovery: %v", err)
118119
return false, nil
119120
}
120121
if len(pods.Items) == 0 {
@@ -124,7 +125,7 @@ func checkExistingRCRecovers(f *framework.Framework) {
124125
err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
125126
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete pod %s in namespace: %s", pod.Name, f.Namespace.Name)
126127
}
127-
framework.Logf("apiserver has recovered")
128+
e2elog.Logf("apiserver has recovered")
128129
return true, nil
129130
}))
130131

test/e2e/apimachinery/garbage_collector.go

Lines changed: 26 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,7 @@ import (
3838
"k8s.io/apiserver/pkg/storage/names"
3939
clientset "k8s.io/client-go/kubernetes"
4040
"k8s.io/kubernetes/test/e2e/framework"
41+
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
4142
"k8s.io/kubernetes/test/e2e/framework/metrics"
4243

4344
"github.com/onsi/ginkgo"
@@ -244,14 +245,14 @@ func gatherMetrics(f *framework.Framework) {
244245
var summary framework.TestDataSummary
245246
grabber, err := metrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, false, false, true, false, false)
246247
if err != nil {
247-
framework.Logf("Failed to create MetricsGrabber. Skipping metrics gathering.")
248+
e2elog.Logf("Failed to create MetricsGrabber. Skipping metrics gathering.")
248249
} else {
249250
received, err := grabber.Grab()
250251
if err != nil {
251-
framework.Logf("MetricsGrabber failed grab metrics. Skipping metrics gathering.")
252+
e2elog.Logf("MetricsGrabber failed grab metrics. Skipping metrics gathering.")
252253
} else {
253254
summary = (*framework.MetricsForE2E)(&received)
254-
framework.Logf(summary.PrintHumanReadable())
255+
e2elog.Logf(summary.PrintHumanReadable())
255256
}
256257
}
257258
}
@@ -653,15 +654,15 @@ var _ = SIGDescribe("Garbage collector", func() {
653654
_, err := rcClient.Get(rc.Name, metav1.GetOptions{})
654655
if err == nil {
655656
pods, _ := podClient.List(metav1.ListOptions{})
656-
framework.Logf("%d pods remaining", len(pods.Items))
657+
e2elog.Logf("%d pods remaining", len(pods.Items))
657658
count := 0
658659
for _, pod := range pods.Items {
659660
if pod.ObjectMeta.DeletionTimestamp == nil {
660661
count++
661662
}
662663
}
663-
framework.Logf("%d pods has nil DeletionTimestamp", count)
664-
framework.Logf("")
664+
e2elog.Logf("%d pods has nil DeletionTimestamp", count)
665+
e2elog.Logf("")
665666
return false, nil
666667
}
667668
if errors.IsNotFound(err) {
@@ -673,10 +674,10 @@ var _ = SIGDescribe("Garbage collector", func() {
673674
if err2 != nil {
674675
framework.Failf("%v", err2)
675676
}
676-
framework.Logf("%d remaining pods are:", len(pods.Items))
677-
framework.Logf("The ObjectMeta of the remaining pods are:")
677+
e2elog.Logf("%d remaining pods are:", len(pods.Items))
678+
e2elog.Logf("The ObjectMeta of the remaining pods are:")
678679
for _, pod := range pods.Items {
679-
framework.Logf("%#v", pod.ObjectMeta)
680+
e2elog.Logf("%#v", pod.ObjectMeta)
680681
}
681682
framework.Failf("failed to delete the rc: %v", err)
682683
}
@@ -755,15 +756,15 @@ var _ = SIGDescribe("Garbage collector", func() {
755756
_, err := rcClient.Get(rc1.Name, metav1.GetOptions{})
756757
if err == nil {
757758
pods, _ := podClient.List(metav1.ListOptions{})
758-
framework.Logf("%d pods remaining", len(pods.Items))
759+
e2elog.Logf("%d pods remaining", len(pods.Items))
759760
count := 0
760761
for _, pod := range pods.Items {
761762
if pod.ObjectMeta.DeletionTimestamp == nil {
762763
count++
763764
}
764765
}
765-
framework.Logf("%d pods has nil DeletionTimestamp", count)
766-
framework.Logf("")
766+
e2elog.Logf("%d pods has nil DeletionTimestamp", count)
767+
e2elog.Logf("")
767768
return false, nil
768769
}
769770
if errors.IsNotFound(err) {
@@ -775,10 +776,10 @@ var _ = SIGDescribe("Garbage collector", func() {
775776
if err2 != nil {
776777
framework.Failf("%v", err2)
777778
}
778-
framework.Logf("%d remaining pods are:", len(pods.Items))
779-
framework.Logf("ObjectMeta of remaining pods are:")
779+
e2elog.Logf("%d remaining pods are:", len(pods.Items))
780+
e2elog.Logf("ObjectMeta of remaining pods are:")
780781
for _, pod := range pods.Items {
781-
framework.Logf("%#v", pod.ObjectMeta)
782+
e2elog.Logf("%#v", pod.ObjectMeta)
782783
}
783784
framework.Failf("failed to delete rc %s, err: %v", rc1Name, err)
784785
}
@@ -830,15 +831,15 @@ var _ = SIGDescribe("Garbage collector", func() {
830831
patch1 := addRefPatch(pod3.Name, pod3.UID)
831832
pod1, err = podClient.Patch(pod1.Name, types.StrategicMergePatchType, patch1)
832833
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod1.Name, f.Namespace.Name, patch1)
833-
framework.Logf("pod1.ObjectMeta.OwnerReferences=%#v", pod1.ObjectMeta.OwnerReferences)
834+
e2elog.Logf("pod1.ObjectMeta.OwnerReferences=%#v", pod1.ObjectMeta.OwnerReferences)
834835
patch2 := addRefPatch(pod1.Name, pod1.UID)
835836
pod2, err = podClient.Patch(pod2.Name, types.StrategicMergePatchType, patch2)
836837
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod2.Name, f.Namespace.Name, patch2)
837-
framework.Logf("pod2.ObjectMeta.OwnerReferences=%#v", pod2.ObjectMeta.OwnerReferences)
838+
e2elog.Logf("pod2.ObjectMeta.OwnerReferences=%#v", pod2.ObjectMeta.OwnerReferences)
838839
patch3 := addRefPatch(pod2.Name, pod2.UID)
839840
pod3, err = podClient.Patch(pod3.Name, types.StrategicMergePatchType, patch3)
840841
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod3.Name, f.Namespace.Name, patch3)
841-
framework.Logf("pod3.ObjectMeta.OwnerReferences=%#v", pod3.ObjectMeta.OwnerReferences)
842+
e2elog.Logf("pod3.ObjectMeta.OwnerReferences=%#v", pod3.ObjectMeta.OwnerReferences)
842843
// delete one pod, should result in the deletion of all pods
843844
deleteOptions := getForegroundOptions()
844845
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(pod1.UID))
@@ -858,7 +859,7 @@ var _ = SIGDescribe("Garbage collector", func() {
858859
}
859860
return false, nil
860861
}); err != nil {
861-
framework.Logf("pods are %#v", pods.Items)
862+
e2elog.Logf("pods are %#v", pods.Items)
862863
framework.Failf("failed to wait for all pods to be deleted: %v", err)
863864
}
864865
})
@@ -909,7 +910,7 @@ var _ = SIGDescribe("Garbage collector", func() {
909910
if err != nil {
910911
framework.Failf("failed to create owner resource %q: %v", ownerName, err)
911912
}
912-
framework.Logf("created owner resource %q", ownerName)
913+
e2elog.Logf("created owner resource %q", ownerName)
913914

914915
// Create a custom dependent resource.
915916
dependentName := names.SimpleNameGenerator.GenerateName("dependent")
@@ -934,7 +935,7 @@ var _ = SIGDescribe("Garbage collector", func() {
934935
if err != nil {
935936
framework.Failf("failed to create dependent resource %q: %v", dependentName, err)
936937
}
937-
framework.Logf("created dependent resource %q", dependentName)
938+
e2elog.Logf("created dependent resource %q", dependentName)
938939

939940
// Delete the owner.
940941
background := metav1.DeletePropagationBackground
@@ -948,8 +949,8 @@ var _ = SIGDescribe("Garbage collector", func() {
948949
_, err := resourceClient.Get(dependentName, metav1.GetOptions{})
949950
return errors.IsNotFound(err), nil
950951
}); err != nil {
951-
framework.Logf("owner: %#v", persistedOwner)
952-
framework.Logf("dependent: %#v", persistedDependent)
952+
e2elog.Logf("owner: %#v", persistedOwner)
953+
e2elog.Logf("dependent: %#v", persistedDependent)
953954
framework.Failf("failed waiting for dependent resource %q to be deleted", dependentName)
954955
}
955956

@@ -1010,7 +1011,7 @@ var _ = SIGDescribe("Garbage collector", func() {
10101011
if err != nil {
10111012
framework.Failf("failed to create owner resource %q: %v", ownerName, err)
10121013
}
1013-
framework.Logf("created owner resource %q", ownerName)
1014+
e2elog.Logf("created owner resource %q", ownerName)
10141015

10151016
// Create a custom dependent resource.
10161017
dependentName := names.SimpleNameGenerator.GenerateName("dependent")
@@ -1035,7 +1036,7 @@ var _ = SIGDescribe("Garbage collector", func() {
10351036
if err != nil {
10361037
framework.Failf("failed to create dependent resource %q: %v", dependentName, err)
10371038
}
1038-
framework.Logf("created dependent resource %q", dependentName)
1039+
e2elog.Logf("created dependent resource %q", dependentName)
10391040

10401041
// Delete the owner and orphan the dependent.
10411042
err = resourceClient.Delete(ownerName, getOrphanOptions())

test/e2e/apimachinery/namespace.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@ import (
2828
"k8s.io/apimachinery/pkg/util/intstr"
2929
"k8s.io/apimachinery/pkg/util/wait"
3030
"k8s.io/kubernetes/test/e2e/framework"
31+
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
3132
imageutils "k8s.io/kubernetes/test/utils/image"
3233

3334
"github.com/onsi/ginkgo"
@@ -74,7 +75,7 @@ func extinguish(f *framework.Framework, totalNS int, maxAllowedAfterDel int, max
7475
}
7576
}
7677
if cnt > maxAllowedAfterDel {
77-
framework.Logf("Remaining namespaces : %v", cnt)
78+
e2elog.Logf("Remaining namespaces : %v", cnt)
7879
return false, nil
7980
}
8081
return true, nil

test/e2e/apimachinery/resource_quota.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@ import (
3333
clientset "k8s.io/client-go/kubernetes"
3434
"k8s.io/kubernetes/pkg/quota/v1/evaluator/core"
3535
"k8s.io/kubernetes/test/e2e/framework"
36+
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
3637
"k8s.io/kubernetes/test/utils/crd"
3738
imageutils "k8s.io/kubernetes/test/utils/image"
3839

@@ -1591,7 +1592,7 @@ func waitForResourceQuota(c clientset.Interface, ns, quotaName string, used v1.R
15911592
// verify that the quota shows the expected used resource values
15921593
for k, v := range used {
15931594
if actualValue, found := resourceQuota.Status.Used[k]; !found || (actualValue.Cmp(v) != 0) {
1594-
framework.Logf("resource %s, expected %s, actual %s", k, v.String(), actualValue.String())
1595+
e2elog.Logf("resource %s, expected %s, actual %s", k, v.String(), actualValue.String())
15951596
return false, nil
15961597
}
15971598
}

0 commit comments

Comments
 (0)