Skip to content

Commit 089d3e6

Browse files
authored
Merge pull request kubernetes#86910 from RaunakShah/golint_vspherestorage
Fix golint errors in test/e2e/storage/vsphere
2 parents 426b353 + 18f05ef commit 089d3e6

18 files changed

+183
-168
lines changed

hack/.golint_failures

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -514,5 +514,4 @@ staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1
514514
staging/src/k8s.io/sample-apiserver/pkg/registry/wardle/fischer
515515
staging/src/k8s.io/sample-apiserver/pkg/registry/wardle/flunder
516516
test/e2e/common
517-
test/e2e/storage/vsphere
518517
test/utils

test/e2e/storage/vsphere/bootstrap.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ func bootstrapOnce() {
4949
if err != nil {
5050
framework.Failf("Failed to get nodes: %v", err)
5151
}
52-
TestContext = VSphereContext{NodeMapper: &NodeMapper{}, VSphereInstances: vsphereInstances}
52+
TestContext = Context{NodeMapper: &NodeMapper{}, VSphereInstances: vsphereInstances}
5353
// 3. Get Node to VSphere mapping
5454
err = TestContext.NodeMapper.GenerateNodeMap(vsphereInstances, *nodeList)
5555
if err != nil {

test/e2e/storage/vsphere/config.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,7 @@ func GetVSphereInstances() (map[string]*VSphere, error) {
9999
func getConfig() (*ConfigFile, error) {
100100
if confFileLocation == "" {
101101
if framework.TestContext.CloudConfig.ConfigFile == "" {
102-
return nil, fmt.Errorf("Env variable 'VSPHERE_CONF_FILE' is not set, and no config-file specified")
102+
return nil, fmt.Errorf("env variable 'VSPHERE_CONF_FILE' is not set, and no config-file specified")
103103
}
104104
confFileLocation = framework.TestContext.CloudConfig.ConfigFile
105105
}

test/e2e/storage/vsphere/context.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,10 +17,10 @@ limitations under the License.
1717
package vsphere
1818

1919
// Context holds common information for vSphere tests
20-
type VSphereContext struct {
20+
type Context struct {
2121
NodeMapper *NodeMapper
2222
VSphereInstances map[string]*VSphere
2323
}
2424

2525
// TestContext should be used by all tests to access common context data. It should be initialized only once, during bootstrapping the tests.
26-
var TestContext VSphereContext
26+
var TestContext Context

test/e2e/storage/vsphere/nodemapper.go

Lines changed: 13 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -33,9 +33,11 @@ import (
3333
neturl "net/url"
3434
)
3535

36+
// NodeMapper contains information to generate nameToNodeInfo and vcToZoneDatastore maps
3637
type NodeMapper struct {
3738
}
3839

40+
// NodeInfo contains information about vcenter nodes
3941
type NodeInfo struct {
4042
Name string
4143
DataCenterRef types.ManagedObjectReference
@@ -46,9 +48,9 @@ type NodeInfo struct {
4648
}
4749

4850
const (
49-
DatacenterType = "Datacenter"
50-
ClusterComputeResourceType = "ClusterComputeResource"
51-
HostSystemType = "HostSystem"
51+
datacenterType = "Datacenter"
52+
clusterComputeResourceType = "ClusterComputeResource"
53+
hostSystemType = "HostSystem"
5254
)
5355

5456
var (
@@ -58,13 +60,13 @@ var (
5860

5961
// GenerateNodeMap populates node name to node info map
6062
func (nm *NodeMapper) GenerateNodeMap(vSphereInstances map[string]*VSphere, nodeList v1.NodeList) error {
61-
type VmSearch struct {
63+
type VMSearch struct {
6264
vs *VSphere
6365
datacenter *object.Datacenter
6466
}
6567

6668
var wg sync.WaitGroup
67-
var queueChannel []*VmSearch
69+
var queueChannel []*VMSearch
6870

6971
var datacenters []*object.Datacenter
7072
var err error
@@ -99,7 +101,7 @@ func (nm *NodeMapper) GenerateNodeMap(vSphereInstances map[string]*VSphere, node
99101

100102
for _, dc := range datacenters {
101103
framework.Logf("Search candidates vc=%s and datacenter=%s", vs.Config.Hostname, dc.Name())
102-
queueChannel = append(queueChannel, &VmSearch{vs: vs, datacenter: dc})
104+
queueChannel = append(queueChannel, &VMSearch{vs: vs, datacenter: dc})
103105
}
104106
}
105107

@@ -170,7 +172,7 @@ func retrieveZoneInformationForNode(nodeName string, connection *VSphere, hostSy
170172
// zone precedence will be received by the HostSystem type.
171173
for _, ancestor := range ancestors {
172174
moType := ancestor.ExtensibleManagedObject.Self.Type
173-
if moType == DatacenterType || moType == ClusterComputeResourceType || moType == HostSystemType {
175+
if moType == datacenterType || moType == clusterComputeResourceType || moType == hostSystemType {
174176
validAncestors = append(validAncestors, ancestor)
175177
}
176178
}
@@ -208,7 +210,7 @@ func retrieveZoneInformationForNode(nodeName string, connection *VSphere, hostSy
208210
return zones
209211
}
210212

211-
// Generate zone to datastore mapping for easily verifying volume placement
213+
// GenerateZoneToDatastoreMap generates a mapping of zone to datastore for easily verifying volume placement
212214
func (nm *NodeMapper) GenerateZoneToDatastoreMap() error {
213215
// 1. Create zone to hosts map for each VC
214216
var vcToZoneHostsMap = make(map[string](map[string][]string))
@@ -254,7 +256,7 @@ func (nm *NodeMapper) GenerateZoneToDatastoreMap() error {
254256
return nil
255257
}
256258

257-
// Retrieves the common datastores from the specified hosts
259+
// retrieveCommonDatastoresAmongHosts retrieves the common datastores from the specified hosts
258260
func retrieveCommonDatastoresAmongHosts(hosts []string, hostToDatastoresMap map[string][]string) []string {
259261
var datastoreCountMap = make(map[string]int)
260262
for _, host := range hosts {
@@ -272,12 +274,12 @@ func retrieveCommonDatastoresAmongHosts(hosts []string, hostToDatastoresMap map[
272274
return commonDatastores
273275
}
274276

275-
// Get all the datastores in the specified zone
277+
// GetDatastoresInZone returns all the datastores in the specified zone
276278
func (nm *NodeMapper) GetDatastoresInZone(vc string, zone string) []string {
277279
return vcToZoneDatastoresMap[vc][zone]
278280
}
279281

280-
// GetNodeInfo return NodeInfo for given nodeName
282+
// GetNodeInfo returns NodeInfo for given nodeName
281283
func (nm *NodeMapper) GetNodeInfo(nodeName string) *NodeInfo {
282284
return nameToNodeInfo[nodeName]
283285
}

test/e2e/storage/vsphere/pvc_label_selector.go

Lines changed: 35 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -36,23 +36,23 @@ import (
3636
----------
3737
1. Create VMDK.
3838
2. Create pv with label volume-type:ssd, volume path set to vmdk created in previous step, and PersistentVolumeReclaimPolicy is set to Delete.
39-
3. Create PVC (pvc_vvol) with label selector to match with volume-type:vvol
40-
4. Create PVC (pvc_ssd) with label selector to match with volume-type:ssd
41-
5. Wait and verify pvc_ssd is bound with PV.
42-
6. Verify Status of pvc_vvol is still pending.
43-
7. Delete pvc_ssd.
39+
3. Create PVC (pvcVvol) with label selector to match with volume-type:vvol
40+
4. Create PVC (pvcSsd) with label selector to match with volume-type:ssd
41+
5. Wait and verify pvSsd is bound with PV.
42+
6. Verify Status of pvcVvol is still pending.
43+
7. Delete pvcSsd.
4444
8. verify associated pv is also deleted.
45-
9. delete pvc_vvol
45+
9. delete pvcVvol
4646
4747
*/
4848
var _ = utils.SIGDescribe("PersistentVolumes [Feature:LabelSelector]", func() {
4949
f := framework.NewDefaultFramework("pvclabelselector")
5050
var (
5151
c clientset.Interface
5252
ns string
53-
pv_ssd *v1.PersistentVolume
54-
pvc_ssd *v1.PersistentVolumeClaim
55-
pvc_vvol *v1.PersistentVolumeClaim
53+
pvSsd *v1.PersistentVolume
54+
pvcSsd *v1.PersistentVolumeClaim
55+
pvcVvol *v1.PersistentVolumeClaim
5656
volumePath string
5757
ssdlabels map[string]string
5858
vvollabels map[string]string
@@ -77,35 +77,35 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:LabelSelector]", func() {
7777
ginkgo.AfterEach(func() {
7878
ginkgo.By("Running clean up actions")
7979
if framework.ProviderIs("vsphere") {
80-
testCleanupVSpherePVClabelselector(c, ns, nodeInfo, volumePath, pv_ssd, pvc_ssd, pvc_vvol)
80+
testCleanupVSpherePVClabelselector(c, ns, nodeInfo, volumePath, pvSsd, pvcSsd, pvcVvol)
8181
}
8282
})
8383
ginkgo.It("should bind volume with claim for given label", func() {
84-
volumePath, pv_ssd, pvc_ssd, pvc_vvol, err = testSetupVSpherePVClabelselector(c, nodeInfo, ns, ssdlabels, vvollabels)
84+
volumePath, pvSsd, pvcSsd, pvcVvol, err = testSetupVSpherePVClabelselector(c, nodeInfo, ns, ssdlabels, vvollabels)
8585
framework.ExpectNoError(err)
8686

87-
ginkgo.By("wait for the pvc_ssd to bind with pv_ssd")
88-
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, ns, pv_ssd, pvc_ssd))
87+
ginkgo.By("wait for the pvcSsd to bind with pvSsd")
88+
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, ns, pvSsd, pvcSsd))
8989

90-
ginkgo.By("Verify status of pvc_vvol is pending")
91-
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimPending, c, ns, pvc_vvol.Name, 3*time.Second, 300*time.Second)
90+
ginkgo.By("Verify status of pvcVvol is pending")
91+
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimPending, c, ns, pvcVvol.Name, 3*time.Second, 300*time.Second)
9292
framework.ExpectNoError(err)
9393

94-
ginkgo.By("delete pvc_ssd")
95-
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc_ssd.Name, ns), "Failed to delete PVC ", pvc_ssd.Name)
94+
ginkgo.By("delete pvcSsd")
95+
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvcSsd.Name, ns), "Failed to delete PVC ", pvcSsd.Name)
9696

97-
ginkgo.By("verify pv_ssd is deleted")
98-
err = framework.WaitForPersistentVolumeDeleted(c, pv_ssd.Name, 3*time.Second, 300*time.Second)
97+
ginkgo.By("verify pvSsd is deleted")
98+
err = framework.WaitForPersistentVolumeDeleted(c, pvSsd.Name, 3*time.Second, 300*time.Second)
9999
framework.ExpectNoError(err)
100100
volumePath = ""
101101

102-
ginkgo.By("delete pvc_vvol")
103-
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc_vvol.Name, ns), "Failed to delete PVC ", pvc_vvol.Name)
102+
ginkgo.By("delete pvcVvol")
103+
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvcVvol.Name, ns), "Failed to delete PVC ", pvcVvol.Name)
104104
})
105105
})
106106
})
107107

108-
func testSetupVSpherePVClabelselector(c clientset.Interface, nodeInfo *NodeInfo, ns string, ssdlabels map[string]string, vvollabels map[string]string) (volumePath string, pv_ssd *v1.PersistentVolume, pvc_ssd *v1.PersistentVolumeClaim, pvc_vvol *v1.PersistentVolumeClaim, err error) {
108+
func testSetupVSpherePVClabelselector(c clientset.Interface, nodeInfo *NodeInfo, ns string, ssdlabels map[string]string, vvollabels map[string]string) (volumePath string, pvSsd *v1.PersistentVolume, pvcSsd *v1.PersistentVolumeClaim, pvcVvol *v1.PersistentVolumeClaim, err error) {
109109
ginkgo.By("creating vmdk")
110110
volumePath = ""
111111
volumePath, err = nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
@@ -114,37 +114,37 @@ func testSetupVSpherePVClabelselector(c clientset.Interface, nodeInfo *NodeInfo,
114114
}
115115

116116
ginkgo.By("creating the pv with label volume-type:ssd")
117-
pv_ssd = getVSpherePersistentVolumeSpec(volumePath, v1.PersistentVolumeReclaimDelete, ssdlabels)
118-
pv_ssd, err = c.CoreV1().PersistentVolumes().Create(pv_ssd)
117+
pvSsd = getVSpherePersistentVolumeSpec(volumePath, v1.PersistentVolumeReclaimDelete, ssdlabels)
118+
pvSsd, err = c.CoreV1().PersistentVolumes().Create(pvSsd)
119119
if err != nil {
120120
return
121121
}
122122

123123
ginkgo.By("creating pvc with label selector to match with volume-type:vvol")
124-
pvc_vvol = getVSpherePersistentVolumeClaimSpec(ns, vvollabels)
125-
pvc_vvol, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc_vvol)
124+
pvcVvol = getVSpherePersistentVolumeClaimSpec(ns, vvollabels)
125+
pvcVvol, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvcVvol)
126126
if err != nil {
127127
return
128128
}
129129

130130
ginkgo.By("creating pvc with label selector to match with volume-type:ssd")
131-
pvc_ssd = getVSpherePersistentVolumeClaimSpec(ns, ssdlabels)
132-
pvc_ssd, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc_ssd)
131+
pvcSsd = getVSpherePersistentVolumeClaimSpec(ns, ssdlabels)
132+
pvcSsd, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvcSsd)
133133
return
134134
}
135135

136-
func testCleanupVSpherePVClabelselector(c clientset.Interface, ns string, nodeInfo *NodeInfo, volumePath string, pv_ssd *v1.PersistentVolume, pvc_ssd *v1.PersistentVolumeClaim, pvc_vvol *v1.PersistentVolumeClaim) {
136+
func testCleanupVSpherePVClabelselector(c clientset.Interface, ns string, nodeInfo *NodeInfo, volumePath string, pvSsd *v1.PersistentVolume, pvcSsd *v1.PersistentVolumeClaim, pvcVvol *v1.PersistentVolumeClaim) {
137137
ginkgo.By("running testCleanupVSpherePVClabelselector")
138138
if len(volumePath) > 0 {
139139
nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
140140
}
141-
if pvc_ssd != nil {
142-
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc_ssd.Name, ns), "Failed to delete PVC ", pvc_ssd.Name)
141+
if pvcSsd != nil {
142+
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvcSsd.Name, ns), "Failed to delete PVC ", pvcSsd.Name)
143143
}
144-
if pvc_vvol != nil {
145-
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc_vvol.Name, ns), "Failed to delete PVC ", pvc_vvol.Name)
144+
if pvcVvol != nil {
145+
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvcVvol.Name, ns), "Failed to delete PVC ", pvcVvol.Name)
146146
}
147-
if pv_ssd != nil {
148-
framework.ExpectNoError(e2epv.DeletePersistentVolume(c, pv_ssd.Name), "Failed to delete PV ", pv_ssd.Name)
147+
if pvSsd != nil {
148+
framework.ExpectNoError(e2epv.DeletePersistentVolume(c, pvSsd.Name), "Failed to delete PV ", pvSsd.Name)
149149
}
150150
}

test/e2e/storage/vsphere/vsphere.go

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -34,14 +34,14 @@ import (
3434
)
3535

3636
const (
37-
VolDir = "kubevols"
38-
DefaultDiskCapacityKB = 2097152
39-
DefaultDiskFormat = "thin"
40-
DefaultSCSIControllerType = "lsiLogic"
41-
VirtualMachineType = "VirtualMachine"
37+
volDir = "kubevols"
38+
defaultDiskCapacityKB = 2097152
39+
defaultDiskFormat = "thin"
40+
defaultSCSIControllerType = "lsiLogic"
41+
virtualMachineType = "VirtualMachine"
4242
)
4343

44-
// Represents a vSphere instance where one or more kubernetes nodes are running.
44+
// VSphere represents a vSphere instance where one or more kubernetes nodes are running.
4545
type VSphere struct {
4646
Config *Config
4747
Client *govmomi.Client
@@ -63,7 +63,7 @@ func (vs *VSphere) GetDatacenter(ctx context.Context, datacenterPath string) (*o
6363
return finder.Datacenter(ctx, datacenterPath)
6464
}
6565

66-
// GetDatacenter returns the DataCenter Object for the given datacenterPath
66+
// GetDatacenterFromObjectReference returns the DataCenter Object for the given datacenter reference
6767
func (vs *VSphere) GetDatacenterFromObjectReference(ctx context.Context, dc object.Reference) *object.Datacenter {
6868
Connect(ctx, vs)
6969
return object.NewDatacenter(vs.Client.Client, dc.Reference())
@@ -76,7 +76,7 @@ func (vs *VSphere) GetAllDatacenter(ctx context.Context) ([]*object.Datacenter,
7676
return finder.DatacenterList(ctx, "*")
7777
}
7878

79-
// GetVMByUUID gets the VM object Reference from the given vmUUID
79+
// GetVMByUUID returns the VM object Reference from the given vmUUID
8080
func (vs *VSphere) GetVMByUUID(ctx context.Context, vmUUID string, dc object.Reference) (object.Reference, error) {
8181
Connect(ctx, vs)
8282
datacenter := vs.GetDatacenterFromObjectReference(ctx, dc)
@@ -85,7 +85,7 @@ func (vs *VSphere) GetVMByUUID(ctx context.Context, vmUUID string, dc object.Ref
8585
return s.FindByUuid(ctx, datacenter, vmUUID, true, nil)
8686
}
8787

88-
// Get host object reference of the host on which the specified VM resides
88+
// GetHostFromVMReference returns host object reference of the host on which the specified VM resides
8989
func (vs *VSphere) GetHostFromVMReference(ctx context.Context, vm types.ManagedObjectReference) types.ManagedObjectReference {
9090
Connect(ctx, vs)
9191
var vmMo mo.VirtualMachine
@@ -94,15 +94,15 @@ func (vs *VSphere) GetHostFromVMReference(ctx context.Context, vm types.ManagedO
9494
return host
9595
}
9696

97-
// Get the datastore references of all the datastores mounted on the specified host
97+
// GetDatastoresMountedOnHost returns the datastore references of all the datastores mounted on the specified host
9898
func (vs *VSphere) GetDatastoresMountedOnHost(ctx context.Context, host types.ManagedObjectReference) []types.ManagedObjectReference {
9999
Connect(ctx, vs)
100100
var hostMo mo.HostSystem
101101
vs.Client.RetrieveOne(ctx, host, []string{"datastore"}, &hostMo)
102102
return hostMo.Datastore
103103
}
104104

105-
// Get the datastore reference of the specified datastore
105+
// GetDatastoreRefFromName returns the datastore reference of the specified datastore
106106
func (vs *VSphere) GetDatastoreRefFromName(ctx context.Context, dc object.Reference, datastoreName string) (types.ManagedObjectReference, error) {
107107
Connect(ctx, vs)
108108
datacenter := object.NewDatacenter(vs.Client.Client, dc.Reference())
@@ -148,7 +148,7 @@ func (vs *VSphere) CreateVolume(volumeOptions *VolumeOptions, dataCenterRef type
148148
if err != nil {
149149
return "", fmt.Errorf("Failed while searching for datastore: %s. err: %+v", volumeOptions.Datastore, err)
150150
}
151-
directoryPath := filepath.Clean(ds.Path(VolDir)) + "/"
151+
directoryPath := filepath.Clean(ds.Path(volDir)) + "/"
152152
fileManager := object.NewFileManager(ds.Client())
153153
err = fileManager.MakeDirectory(ctx, directoryPath, datacenter, false)
154154
if err != nil {
@@ -237,7 +237,7 @@ func (vs *VSphere) IsVMPresent(vmName string, dataCenterRef types.ManagedObjectR
237237
return
238238
}
239239
for _, vmFoldersChild := range vmFoldersChildren {
240-
if vmFoldersChild.Reference().Type == VirtualMachineType {
240+
if vmFoldersChild.Reference().Type == virtualMachineType {
241241
if object.NewVirtualMachine(vs.Client.Client, vmFoldersChild.Reference()).Name() == vmName {
242242
return true, nil
243243
}
@@ -255,15 +255,15 @@ func (vs *VSphere) initVolumeOptions(volumeOptions *VolumeOptions) {
255255
volumeOptions.Datastore = vs.Config.DefaultDatastore
256256
}
257257
if volumeOptions.CapacityKB == 0 {
258-
volumeOptions.CapacityKB = DefaultDiskCapacityKB
258+
volumeOptions.CapacityKB = defaultDiskCapacityKB
259259
}
260260
if volumeOptions.Name == "" {
261261
volumeOptions.Name = "e2e-vmdk-" + strconv.FormatInt(time.Now().UnixNano(), 10)
262262
}
263263
if volumeOptions.DiskFormat == "" {
264-
volumeOptions.DiskFormat = DefaultDiskFormat
264+
volumeOptions.DiskFormat = defaultDiskFormat
265265
}
266266
if volumeOptions.SCSIControllerType == "" {
267-
volumeOptions.SCSIControllerType = DefaultSCSIControllerType
267+
volumeOptions.SCSIControllerType = defaultSCSIControllerType
268268
}
269269
}

0 commit comments

Comments
 (0)