Skip to content

Commit dec4684

Browse files
committed
Remove NodeGetInfo kubeclient calls
1 parent 18c35d2 commit dec4684

File tree

4 files changed

+2
-43
lines changed

4 files changed

+2
-43
lines changed

cmd/gce-pd-csi-driver/main.go

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -280,16 +280,6 @@ func handle() {
280280
DeviceInUseTimeout: *deviceInUseTimeout,
281281
EnableDataCache: *enableDataCacheFlag,
282282
DataCacheEnabledNodePool: isDataCacheEnabledNodePool,
283-
EnableDiskTopology: *diskTopology,
284-
}
285-
286-
if *diskTopology {
287-
klog.V(2).Infof("Setting up kubeClient")
288-
kubeClient, err := instantiateKubeClient()
289-
if err != nil {
290-
klog.Fatalf("Failed to instantiate Kubernetes client: %v", err)
291-
}
292-
nsArgs.KubeClient = kubeClient
293283
}
294284

295285
nodeServer = driver.NewNodeServer(gceDriver, mounter, deviceUtils, meta, statter, nsArgs)

pkg/gce-pd-csi-driver/gce-pd-driver.go

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -157,8 +157,6 @@ func NewNodeServer(gceDriver *GCEDriver, mounter *mount.SafeFormatAndMount, devi
157157
deviceInUseErrors: newDeviceErrMap(args.DeviceInUseTimeout),
158158
EnableDataCache: args.EnableDataCache,
159159
DataCacheEnabledNodePool: args.DataCacheEnabledNodePool,
160-
KubeClient: args.KubeClient,
161-
EnableDiskTopology: args.EnableDiskTopology,
162160
}
163161
}
164162

pkg/gce-pd-csi-driver/node.go

Lines changed: 1 addition & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,6 @@ import (
3535
"k8s.io/klog/v2"
3636
"k8s.io/mount-utils"
3737

38-
"k8s.io/client-go/kubernetes"
3938
"sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/common"
4039
"sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/deviceutils"
4140
metadataservice "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/gce-cloud-provider/metadata"
@@ -52,14 +51,11 @@ type GCENodeServer struct {
5251
EnableDataCache bool
5352
DataCacheEnabledNodePool bool
5453

55-
KubeClient kubernetes.Interface
56-
EnableDiskTopology bool
57-
5854
// A map storing all volumes with ongoing operations so that additional operations
5955
// for that same volume (as defined by VolumeID) return an Aborted error
6056
volumeLocks *common.VolumeLocks
6157

62-
// enableDeviceInUseCheck, if true, will block NodeUnstageVolume requests if the specified
58+
// enableDeviceInUseCheck, if true, will block NodeUnstageVolume request if the specified
6359
// device is still in use (or until --device-in-use-timeout is reached, if specified)
6460
enableDeviceInUseCheck bool
6561
// deviceInUseErrors keeps tracks of device names and a timestamp for when an error is
@@ -90,10 +86,6 @@ type NodeServerArgs struct {
9086
EnableDataCache bool
9187

9288
DataCacheEnabledNodePool bool
93-
94-
KubeClient kubernetes.Interface
95-
96-
EnableDiskTopology bool
9789
}
9890

9991
var _ csi.NodeServer = &GCENodeServer{}
@@ -580,20 +572,6 @@ func (ns *GCENodeServer) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRe
580572
},
581573
}
582574

583-
if ns.EnableDiskTopology {
584-
labels, err := ns.fetchGKETopologyLabels(ctx, ns.MetadataService.GetName())
585-
if err != nil {
586-
// Perhaps we don't want to fail here. We are introducing a new
587-
// dependency and we might be better off allowing this failure to
588-
// happen and moving on to retrieve the zone from GCE MDS.
589-
return nil, err
590-
}
591-
592-
for k, v := range labels {
593-
top.Segments[k] = v
594-
}
595-
}
596-
597575
nodeID := common.CreateNodeID(ns.MetadataService.GetProject(), ns.MetadataService.GetZone(), ns.MetadataService.GetName())
598576
volumeLimits, err := ns.GetVolumeLimits()
599577
if err != nil {

pkg/gce-pd-csi-driver/node_test.go

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -65,13 +65,6 @@ func getTestGCEDriverWithCustomMounter(t *testing.T, mounter *mount.SafeFormatAn
6565
return getCustomTestGCEDriver(t, mounter, deviceutils.NewFakeDeviceUtils(false), metadataservice.NewFakeService(), &NodeServerArgs{})
6666
}
6767

68-
func getTestGCEDriverWithMockKubeClient(t *testing.T, kubeClient kubernetes.Interface) *GCEDriver {
69-
args := &NodeServerArgs{
70-
KubeClient: kubeClient,
71-
}
72-
return getCustomTestGCEDriver(t, mountmanager.NewFakeSafeMounter(), deviceutils.NewFakeDeviceUtils(false), metadataservice.NewFakeService(), args)
73-
}
74-
7568
func getCustomTestGCEDriver(t *testing.T, mounter *mount.SafeFormatAndMount, deviceUtils deviceutils.DeviceUtils, metaService metadataservice.MetadataService, args *NodeServerArgs) *GCEDriver {
7669
gceDriver := GetGCEDriver()
7770
nodeServer := NewNodeServer(gceDriver, mounter, deviceUtils, metaService, mountmanager.NewFakeStatter(mounter), args)
@@ -384,7 +377,7 @@ func TestNodeGetInfo_Topologies(t *testing.T) {
384377
},
385378
},
386379
}
387-
gceDriver := getTestGCEDriverWithMockKubeClient(t, NewFakeKubeClient([]*corev1.Node{nodeA, nodeB}))
380+
gceDriver := getTestGCEDriver(t)
388381
ns := gceDriver.ns
389382

390383
volumeLimit, err := ns.GetVolumeLimits()

0 commit comments

Comments
 (0)