Skip to content

Commit e123922

Browse files
committed
Trace by opentelemetry
Signed-off-by: zouyu <[email protected]>
1 parent 901911d commit e123922

File tree

14 files changed

+370
-35
lines changed

14 files changed

+370
-35
lines changed

cmd/kube-apiserver/apiserver.go

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,13 +27,17 @@ import (
2727
_ "k8s.io/component-base/metrics/prometheus/clientgo" // load all the prometheus client-go plugins
2828
_ "k8s.io/component-base/metrics/prometheus/version" // for version metric registration
2929
"k8s.io/kubernetes/cmd/kube-apiserver/app"
30+
"k8s.io/kubernetes/pkg/util/httptrace"
3031
)
3132

3233
func main() {
3334
rand.Seed(time.Now().UnixNano())
3435

3536
command := app.NewAPIServerCommand()
3637

38+
shutdown := httptrace.InitTracer()
39+
defer shutdown()
40+
3741
// TODO: once we switch everything over to Cobra commands, we can go back to calling
3842
// utilflag.InitFlags() (by removing its pflag.Parse() call). For now, we have to set the
3943
// normalize func and add the go flag set by hand.

pkg/controller/controller_utils.go

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -453,7 +453,7 @@ type PodControlInterface interface {
453453
// and sets the ControllerRef.
454454
CreatePodsOnNode(nodeName, namespace string, template *v1.PodTemplateSpec, object runtime.Object, controllerRef *metav1.OwnerReference) error
455455
// CreatePodsWithControllerRef creates new pods according to the spec, and sets object as the pod's controller.
456-
CreatePodsWithControllerRef(namespace string, template *v1.PodTemplateSpec, object runtime.Object, controllerRef *metav1.OwnerReference) error
456+
CreatePodsWithControllerRef(ctx context.Context, namespace string, template *v1.PodTemplateSpec, object runtime.Object, controllerRef *metav1.OwnerReference) error
457457
// DeletePod deletes the pod identified by podID.
458458
DeletePod(namespace string, podID string, object runtime.Object) error
459459
// PatchPod patches the pod.
@@ -519,21 +519,21 @@ func validateControllerRef(controllerRef *metav1.OwnerReference) error {
519519
}
520520

521521
func (r RealPodControl) CreatePods(namespace string, template *v1.PodTemplateSpec, object runtime.Object) error {
522-
return r.createPods("", namespace, template, object, nil)
522+
return r.createPods(context.Background(), "", namespace, template, object, nil)
523523
}
524524

525-
func (r RealPodControl) CreatePodsWithControllerRef(namespace string, template *v1.PodTemplateSpec, controllerObject runtime.Object, controllerRef *metav1.OwnerReference) error {
525+
func (r RealPodControl) CreatePodsWithControllerRef(ctx context.Context, namespace string, template *v1.PodTemplateSpec, controllerObject runtime.Object, controllerRef *metav1.OwnerReference) error {
526526
if err := validateControllerRef(controllerRef); err != nil {
527527
return err
528528
}
529-
return r.createPods("", namespace, template, controllerObject, controllerRef)
529+
return r.createPods(ctx, "", namespace, template, controllerObject, controllerRef)
530530
}
531531

532532
func (r RealPodControl) CreatePodsOnNode(nodeName, namespace string, template *v1.PodTemplateSpec, object runtime.Object, controllerRef *metav1.OwnerReference) error {
533533
if err := validateControllerRef(controllerRef); err != nil {
534534
return err
535535
}
536-
return r.createPods(nodeName, namespace, template, object, controllerRef)
536+
return r.createPods(context.Background(), nodeName, namespace, template, object, controllerRef)
537537
}
538538

539539
func (r RealPodControl) PatchPod(namespace, name string, data []byte) error {
@@ -566,7 +566,7 @@ func GetPodFromTemplate(template *v1.PodTemplateSpec, parentObject runtime.Objec
566566
return pod, nil
567567
}
568568

569-
func (r RealPodControl) createPods(nodeName, namespace string, template *v1.PodTemplateSpec, object runtime.Object, controllerRef *metav1.OwnerReference) error {
569+
func (r RealPodControl) createPods(ctx context.Context, nodeName, namespace string, template *v1.PodTemplateSpec, object runtime.Object, controllerRef *metav1.OwnerReference) error {
570570
pod, err := GetPodFromTemplate(template, object, controllerRef)
571571
if err != nil {
572572
return err
@@ -577,7 +577,7 @@ func (r RealPodControl) createPods(nodeName, namespace string, template *v1.PodT
577577
if len(labels.Set(pod.Labels)) == 0 {
578578
return fmt.Errorf("unable to create pods, no labels")
579579
}
580-
newPod, err := r.KubeClient.CoreV1().Pods(namespace).Create(context.TODO(), pod, metav1.CreateOptions{})
580+
newPod, err := r.KubeClient.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{})
581581
if err != nil {
582582
// only send an event if the namespace isn't terminating
583583
if !apierrors.HasStatusCause(err, v1.NamespaceTerminatingCause) {
@@ -652,7 +652,7 @@ func (f *FakePodControl) CreatePods(namespace string, spec *v1.PodTemplateSpec,
652652
return nil
653653
}
654654

655-
func (f *FakePodControl) CreatePodsWithControllerRef(namespace string, spec *v1.PodTemplateSpec, object runtime.Object, controllerRef *metav1.OwnerReference) error {
655+
func (f *FakePodControl) CreatePodsWithControllerRef(ctx context.Context, namespace string, spec *v1.PodTemplateSpec, object runtime.Object, controllerRef *metav1.OwnerReference) error {
656656
f.Lock()
657657
defer f.Unlock()
658658
f.CreateCallCount++

pkg/controller/daemon/daemon_controller.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -968,7 +968,7 @@ func (dsc *DaemonSetsController) syncNodes(ds *apps.DaemonSet, podsToDelete, nod
968968
podTemplate.Spec.Affinity = util.ReplaceDaemonSetPodNodeNameNodeAffinity(
969969
podTemplate.Spec.Affinity, nodesNeedingDaemonPods[ix])
970970

971-
err := dsc.podControl.CreatePodsWithControllerRef(ds.Namespace, podTemplate,
971+
err := dsc.podControl.CreatePodsWithControllerRef(context.Background(), ds.Namespace, podTemplate,
972972
ds, metav1.NewControllerRef(ds, controllerKind))
973973

974974
if err != nil {

pkg/controller/daemon/daemon_controller_test.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -267,10 +267,10 @@ func (f *fakePodControl) CreatePodsOnNode(nodeName, namespace string, template *
267267
return nil
268268
}
269269

270-
func (f *fakePodControl) CreatePodsWithControllerRef(namespace string, template *v1.PodTemplateSpec, object runtime.Object, controllerRef *metav1.OwnerReference) error {
270+
func (f *fakePodControl) CreatePodsWithControllerRef(ctx context.Context, namespace string, template *v1.PodTemplateSpec, object runtime.Object, controllerRef *metav1.OwnerReference) error {
271271
f.Lock()
272272
defer f.Unlock()
273-
if err := f.FakePodControl.CreatePodsWithControllerRef(namespace, template, object, controllerRef); err != nil {
273+
if err := f.FakePodControl.CreatePodsWithControllerRef(context.Background(), namespace, template, object, controllerRef); err != nil {
274274
return fmt.Errorf("failed to create pod for DaemonSet")
275275
}
276276

pkg/controller/deployment/sync.go

Lines changed: 27 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -24,12 +24,13 @@ import (
2424
"strconv"
2525

2626
apps "k8s.io/api/apps/v1"
27-
"k8s.io/api/core/v1"
27+
v1 "k8s.io/api/core/v1"
2828
"k8s.io/apimachinery/pkg/api/errors"
2929
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
3030
"k8s.io/klog/v2"
3131
"k8s.io/kubernetes/pkg/controller"
3232
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
33+
"k8s.io/kubernetes/pkg/util/httptrace"
3334
labelsutil "k8s.io/kubernetes/pkg/util/labels"
3435
)
3536

@@ -72,6 +73,9 @@ func (dc *DeploymentController) sync(d *apps.Deployment, rsList []*apps.ReplicaS
7273
// These conditions are needed so that we won't accidentally report lack of progress for resumed deployments
7374
// that were paused for longer than progressDeadlineSeconds.
7475
func (dc *DeploymentController) checkPausedConditions(d *apps.Deployment) error {
76+
// Get span from annotations and set to ctx
77+
ctx := httptrace.SpanContextFromAnnotations(context.Background(), d.GetAnnotations())
78+
7579
if !deploymentutil.HasProgressDeadline(d) {
7680
return nil
7781
}
@@ -98,7 +102,7 @@ func (dc *DeploymentController) checkPausedConditions(d *apps.Deployment) error
98102
}
99103

100104
var err error
101-
_, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d, metav1.UpdateOptions{})
105+
_, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(ctx, d, metav1.UpdateOptions{})
102106
return err
103107
}
104108

@@ -136,6 +140,9 @@ const (
136140
// 3. If there's no existing new RS and createIfNotExisted is true, create one with appropriate revision number (maxOldRevision + 1) and replicas.
137141
// Note that the pod-template-hash will be added to adopted RSes and pods.
138142
func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, oldRSs []*apps.ReplicaSet, createIfNotExisted bool) (*apps.ReplicaSet, error) {
143+
// Get span from annotations and set to ctx
144+
ctx := httptrace.SpanContextFromAnnotations(context.Background(), d.GetAnnotations())
145+
139146
existingNewRS := deploymentutil.FindNewReplicaSet(d, rsList)
140147

141148
// Calculate the max revision number among all old RSes
@@ -155,7 +162,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
155162
minReadySecondsNeedsUpdate := rsCopy.Spec.MinReadySeconds != d.Spec.MinReadySeconds
156163
if annotationsUpdated || minReadySecondsNeedsUpdate {
157164
rsCopy.Spec.MinReadySeconds = d.Spec.MinReadySeconds
158-
return dc.client.AppsV1().ReplicaSets(rsCopy.ObjectMeta.Namespace).Update(context.TODO(), rsCopy, metav1.UpdateOptions{})
165+
return dc.client.AppsV1().ReplicaSets(rsCopy.ObjectMeta.Namespace).Update(ctx, rsCopy, metav1.UpdateOptions{})
159166
}
160167

161168
// Should use the revision in existingNewRS's annotation, since it set by before
@@ -173,7 +180,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
173180

174181
if needsUpdate {
175182
var err error
176-
if _, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d, metav1.UpdateOptions{}); err != nil {
183+
if _, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(ctx, d, metav1.UpdateOptions{}); err != nil {
177184
return nil, err
178185
}
179186
}
@@ -220,7 +227,8 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
220227
// hash collisions. If there is any other error, we need to report it in the status of
221228
// the Deployment.
222229
alreadyExists := false
223-
createdRS, err := dc.client.AppsV1().ReplicaSets(d.Namespace).Create(context.TODO(), &newRS, metav1.CreateOptions{})
230+
231+
createdRS, err := dc.client.AppsV1().ReplicaSets(d.Namespace).Create(ctx, &newRS, metav1.CreateOptions{})
224232
switch {
225233
// We may end up hitting this due to a slow cache or a fast resync of the Deployment.
226234
case errors.IsAlreadyExists(err):
@@ -252,7 +260,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
252260
*d.Status.CollisionCount++
253261
// Update the collisionCount for the Deployment and let it requeue by returning the original
254262
// error.
255-
_, dErr := dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d, metav1.UpdateOptions{})
263+
_, dErr := dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(ctx, d, metav1.UpdateOptions{})
256264
if dErr == nil {
257265
klog.V(2).Infof("Found a hash collision for deployment %q - bumping collisionCount (%d->%d) to resolve it", d.Name, preCollisionCount, *d.Status.CollisionCount)
258266
}
@@ -268,7 +276,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
268276
// We don't really care about this error at this point, since we have a bigger issue to report.
269277
// TODO: Identify which errors are permanent and switch DeploymentIsFailed to take into account
270278
// these reasons as well. Related issue: https://github.com/kubernetes/kubernetes/issues/18568
271-
_, _ = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d, metav1.UpdateOptions{})
279+
_, _ = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(ctx, d, metav1.UpdateOptions{})
272280
}
273281
dc.eventRecorder.Eventf(d, v1.EventTypeWarning, deploymentutil.FailedRSCreateReason, msg)
274282
return nil, err
@@ -285,7 +293,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
285293
needsUpdate = true
286294
}
287295
if needsUpdate {
288-
_, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d, metav1.UpdateOptions{})
296+
_, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(ctx, d, metav1.UpdateOptions{})
289297
}
290298
return createdRS, err
291299
}
@@ -409,6 +417,8 @@ func (dc *DeploymentController) scaleReplicaSetAndRecordEvent(rs *apps.ReplicaSe
409417
}
410418

411419
func (dc *DeploymentController) scaleReplicaSet(rs *apps.ReplicaSet, newScale int32, deployment *apps.Deployment, scalingOperation string) (bool, *apps.ReplicaSet, error) {
420+
// Get span from annotations and set to ctx
421+
ctx := httptrace.SpanContextFromAnnotations(context.Background(), rs.GetAnnotations())
412422

413423
sizeNeedsUpdate := *(rs.Spec.Replicas) != newScale
414424

@@ -420,7 +430,7 @@ func (dc *DeploymentController) scaleReplicaSet(rs *apps.ReplicaSet, newScale in
420430
rsCopy := rs.DeepCopy()
421431
*(rsCopy.Spec.Replicas) = newScale
422432
deploymentutil.SetReplicasAnnotations(rsCopy, *(deployment.Spec.Replicas), *(deployment.Spec.Replicas)+deploymentutil.MaxSurge(*deployment))
423-
rs, err = dc.client.AppsV1().ReplicaSets(rsCopy.Namespace).Update(context.TODO(), rsCopy, metav1.UpdateOptions{})
433+
rs, err = dc.client.AppsV1().ReplicaSets(rsCopy.Namespace).Update(ctx, rsCopy, metav1.UpdateOptions{})
424434
if err == nil && sizeNeedsUpdate {
425435
scaled = true
426436
dc.eventRecorder.Eventf(deployment, v1.EventTypeNormal, "ScalingReplicaSet", "Scaled %s replica set %s to %d", scalingOperation, rs.Name, newScale)
@@ -433,6 +443,9 @@ func (dc *DeploymentController) scaleReplicaSet(rs *apps.ReplicaSet, newScale in
433443
// where N=d.Spec.RevisionHistoryLimit. Old replica sets are older versions of the podtemplate of a deployment kept
434444
// around by default 1) for historical reasons and 2) for the ability to rollback a deployment.
435445
func (dc *DeploymentController) cleanupDeployment(oldRSs []*apps.ReplicaSet, deployment *apps.Deployment) error {
446+
// Get span from annotations and set to ctx
447+
ctx := httptrace.SpanContextFromAnnotations(context.Background(), deployment.GetAnnotations())
448+
436449
if !deploymentutil.HasRevisionHistoryLimit(deployment) {
437450
return nil
438451
}
@@ -458,7 +471,7 @@ func (dc *DeploymentController) cleanupDeployment(oldRSs []*apps.ReplicaSet, dep
458471
continue
459472
}
460473
klog.V(4).Infof("Trying to cleanup replica set %q for deployment %q", rs.Name, deployment.Name)
461-
if err := dc.client.AppsV1().ReplicaSets(rs.Namespace).Delete(context.TODO(), rs.Name, metav1.DeleteOptions{}); err != nil && !errors.IsNotFound(err) {
474+
if err := dc.client.AppsV1().ReplicaSets(rs.Namespace).Delete(ctx, rs.Name, metav1.DeleteOptions{}); err != nil && !errors.IsNotFound(err) {
462475
// Return error instead of aggregating and continuing DELETEs on the theory
463476
// that we may be overloading the api server.
464477
return err
@@ -470,6 +483,9 @@ func (dc *DeploymentController) cleanupDeployment(oldRSs []*apps.ReplicaSet, dep
470483

471484
// syncDeploymentStatus checks if the status is up-to-date and sync it if necessary
472485
func (dc *DeploymentController) syncDeploymentStatus(allRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet, d *apps.Deployment) error {
486+
// Get span from annotations and set to ctx
487+
ctx := httptrace.SpanContextFromAnnotations(context.Background(), d.GetAnnotations())
488+
473489
newStatus := calculateStatus(allRSs, newRS, d)
474490

475491
if reflect.DeepEqual(d.Status, newStatus) {
@@ -478,7 +494,7 @@ func (dc *DeploymentController) syncDeploymentStatus(allRSs []*apps.ReplicaSet,
478494

479495
newDeployment := d
480496
newDeployment.Status = newStatus
481-
_, err := dc.client.AppsV1().Deployments(newDeployment.Namespace).UpdateStatus(context.TODO(), newDeployment, metav1.UpdateOptions{})
497+
_, err := dc.client.AppsV1().Deployments(newDeployment.Namespace).UpdateStatus(ctx, newDeployment, metav1.UpdateOptions{})
482498
return err
483499
}
484500

pkg/controller/job/job_controller.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ import (
2626
"time"
2727

2828
batch "k8s.io/api/batch/v1"
29-
"k8s.io/api/core/v1"
29+
v1 "k8s.io/api/core/v1"
3030
"k8s.io/apimachinery/pkg/api/errors"
3131
apierrors "k8s.io/apimachinery/pkg/api/errors"
3232
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -776,7 +776,7 @@ func (jm *Controller) manageJob(activePods []*v1.Pod, succeeded int32, job *batc
776776
for i := int32(0); i < batchSize; i++ {
777777
go func() {
778778
defer wait.Done()
779-
err := jm.podControl.CreatePodsWithControllerRef(job.Namespace, &job.Spec.Template, job, metav1.NewControllerRef(job, controllerKind))
779+
err := jm.podControl.CreatePodsWithControllerRef(context.Background(), job.Namespace, &job.Spec.Template, job, metav1.NewControllerRef(job, controllerKind))
780780
if err != nil {
781781
if errors.HasStatusCause(err, v1.NamespaceTerminatingCause) {
782782
// If the namespace is being torn down, we can safely ignore

pkg/controller/replicaset/replica_set.go

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ import (
3737
"time"
3838

3939
apps "k8s.io/api/apps/v1"
40-
"k8s.io/api/core/v1"
40+
v1 "k8s.io/api/core/v1"
4141
"k8s.io/apimachinery/pkg/api/errors"
4242
apierrors "k8s.io/apimachinery/pkg/api/errors"
4343
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -60,6 +60,7 @@ import (
6060
"k8s.io/klog/v2"
6161
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
6262
"k8s.io/kubernetes/pkg/controller"
63+
"k8s.io/kubernetes/pkg/util/httptrace"
6364
"k8s.io/utils/integer"
6465
)
6566

@@ -539,6 +540,9 @@ func (rsc *ReplicaSetController) processNextWorkItem() bool {
539540
// Does NOT modify <filteredPods>.
540541
// It will requeue the replica set in case of an error while creating/deleting pods.
541542
func (rsc *ReplicaSetController) manageReplicas(filteredPods []*v1.Pod, rs *apps.ReplicaSet) error {
543+
// Get span from annotations and set to ctx
544+
ctx := httptrace.SpanContextFromAnnotations(context.Background(), rs.GetAnnotations())
545+
542546
diff := len(filteredPods) - int(*(rs.Spec.Replicas))
543547
rsKey, err := controller.KeyFunc(rs)
544548
if err != nil {
@@ -566,7 +570,8 @@ func (rsc *ReplicaSetController) manageReplicas(filteredPods []*v1.Pod, rs *apps
566570
// after one of its pods fails. Conveniently, this also prevents the
567571
// event spam that those failures would generate.
568572
successfulCreations, err := slowStartBatch(diff, controller.SlowStartInitialBatchSize, func() error {
569-
err := rsc.podControl.CreatePodsWithControllerRef(rs.Namespace, &rs.Spec.Template, rs, metav1.NewControllerRef(rs, rsc.GroupVersionKind))
573+
574+
err = rsc.podControl.CreatePodsWithControllerRef(ctx, rs.Namespace, &rs.Spec.Template, rs, metav1.NewControllerRef(rs, rsc.GroupVersionKind))
570575
if err != nil {
571576
if errors.HasStatusCause(err, v1.NamespaceTerminatingCause) {
572577
// if the namespace is being terminated, we don't have to do
@@ -711,10 +716,13 @@ func (rsc *ReplicaSetController) syncReplicaSet(key string) error {
711716
}
712717

713718
func (rsc *ReplicaSetController) claimPods(rs *apps.ReplicaSet, selector labels.Selector, filteredPods []*v1.Pod) ([]*v1.Pod, error) {
719+
// Get span from annotations and set to ctx
720+
ctx := httptrace.SpanContextFromAnnotations(context.Background(), rs.GetAnnotations())
721+
714722
// If any adoptions are attempted, we should first recheck for deletion with
715723
// an uncached quorum read sometime after listing Pods (see #42639).
716724
canAdoptFunc := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) {
717-
fresh, err := rsc.kubeClient.AppsV1().ReplicaSets(rs.Namespace).Get(context.TODO(), rs.Name, metav1.GetOptions{})
725+
fresh, err := rsc.kubeClient.AppsV1().ReplicaSets(rs.Namespace).Get(ctx, rs.Name, metav1.GetOptions{})
718726
if err != nil {
719727
return nil, err
720728
}

pkg/controller/replication/conversion.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ import (
2929

3030
apps "k8s.io/api/apps/v1"
3131
autoscalingv1 "k8s.io/api/autoscaling/v1"
32-
"k8s.io/api/core/v1"
32+
v1 "k8s.io/api/core/v1"
3333
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
3434
"k8s.io/apimachinery/pkg/labels"
3535
"k8s.io/apimachinery/pkg/runtime"
@@ -333,12 +333,12 @@ func (pc podControlAdapter) CreatePodsOnNode(nodeName, namespace string, templat
333333
return errors.New("CreatePodsOnNode() is not implemented for podControlAdapter")
334334
}
335335

336-
func (pc podControlAdapter) CreatePodsWithControllerRef(namespace string, template *v1.PodTemplateSpec, object runtime.Object, controllerRef *metav1.OwnerReference) error {
336+
func (pc podControlAdapter) CreatePodsWithControllerRef(ctx context.Context, namespace string, template *v1.PodTemplateSpec, object runtime.Object, controllerRef *metav1.OwnerReference) error {
337337
rc, err := convertRStoRC(object.(*apps.ReplicaSet))
338338
if err != nil {
339339
return err
340340
}
341-
return pc.PodControlInterface.CreatePodsWithControllerRef(namespace, template, rc, controllerRef)
341+
return pc.PodControlInterface.CreatePodsWithControllerRef(ctx, namespace, template, rc, controllerRef)
342342
}
343343

344344
func (pc podControlAdapter) DeletePod(namespace string, podID string, object runtime.Object) error {

0 commit comments

Comments
 (0)