Skip to content

Commit 63ffa3c

Browse files
authored
Merge pull request kubernetes#75623 from oomichi/golint-e2e-framework-k-l
Fix golint failures of e2e/framework/[k-l]*.go
2 parents 1cb5502 + 2007488 commit 63ffa3c

File tree

2 files changed

+41
-13
lines changed

2 files changed

+41
-13
lines changed

test/e2e/framework/kubelet_stats.go

Lines changed: 23 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ import (
4343
"github.com/prometheus/common/model"
4444
)
4545

46-
// KubeletMetric stores metrics scraped from the kubelet server's /metric endpoint.
46+
// KubeletLatencyMetric stores metrics scraped from the kubelet server's /metric endpoint.
4747
// TODO: Get some more structure around the metrics and this type
4848
type KubeletLatencyMetric struct {
4949
// eg: list, info, create
@@ -55,7 +55,7 @@ type KubeletLatencyMetric struct {
5555
Latency time.Duration
5656
}
5757

58-
// KubeletMetricByLatency implements sort.Interface for []KubeletMetric based on
58+
// KubeletLatencyMetrics implements sort.Interface for []KubeletMetric based on
5959
// the latency field.
6060
type KubeletLatencyMetrics []KubeletLatencyMetric
6161

@@ -159,6 +159,7 @@ type RuntimeOperationErrorRate struct {
159159
TimeoutRate float64
160160
}
161161

162+
// NewRuntimeOperationMonitor returns a new RuntimeOperationMonitor.
162163
func NewRuntimeOperationMonitor(c clientset.Interface) *RuntimeOperationMonitor {
163164
m := &RuntimeOperationMonitor{
164165
client: c,
@@ -433,7 +434,7 @@ const (
433434
rootContainerName = "/"
434435
)
435436

436-
// A list of containers for which we want to collect resource usage.
437+
// TargetContainers returns a list of containers for which we want to collect resource usage.
437438
func TargetContainers() []string {
438439
return []string{
439440
rootContainerName,
@@ -442,6 +443,7 @@ func TargetContainers() []string {
442443
}
443444
}
444445

446+
// ContainerResourceUsage is a structure for gathering container resource usage.
445447
type ContainerResourceUsage struct {
446448
Name string
447449
Timestamp time.Time
@@ -457,7 +459,10 @@ func (r *ContainerResourceUsage) isStrictlyGreaterThan(rhs *ContainerResourceUsa
457459
return r.CPUUsageInCores > rhs.CPUUsageInCores && r.MemoryWorkingSetInBytes > rhs.MemoryWorkingSetInBytes
458460
}
459461

462+
// ResourceUsagePerContainer is map of ContainerResourceUsage
460463
type ResourceUsagePerContainer map[string]*ContainerResourceUsage
464+
465+
// ResourceUsagePerNode is map of ResourceUsagePerContainer.
461466
type ResourceUsagePerNode map[string]ResourceUsagePerContainer
462467

463468
func formatResourceUsageStats(nodeName string, containerStats ResourceUsagePerContainer) string {
@@ -491,6 +496,7 @@ type usageDataPerContainer struct {
491496
memWorkSetData []uint64
492497
}
493498

499+
// GetKubeletHeapStats returns stats of kubelet heap.
494500
func GetKubeletHeapStats(c clientset.Interface, nodeName string) (string, error) {
495501
client, err := NodeProxyRequest(c, nodeName, "debug/pprof/heap", ports.KubeletPort)
496502
if err != nil {
@@ -507,6 +513,7 @@ func GetKubeletHeapStats(c clientset.Interface, nodeName string) (string, error)
507513
return strings.Join(lines[len(lines)-numLines:], "\n"), nil
508514
}
509515

516+
// PrintAllKubeletPods outputs status of all kubelet pods into log.
510517
func PrintAllKubeletPods(c clientset.Interface, nodeName string) {
511518
podList, err := GetKubeletPods(c, nodeName)
512519
if err != nil {
@@ -661,6 +668,7 @@ type ResourceMonitor struct {
661668
collectors map[string]*resourceCollector
662669
}
663670

671+
// NewResourceMonitor returns a new ResourceMonitor.
664672
func NewResourceMonitor(c clientset.Interface, containerNames []string, pollingInterval time.Duration) *ResourceMonitor {
665673
return &ResourceMonitor{
666674
containers: containerNames,
@@ -669,6 +677,7 @@ func NewResourceMonitor(c clientset.Interface, containerNames []string, pollingI
669677
}
670678
}
671679

680+
// Start starts collectors.
672681
func (r *ResourceMonitor) Start() {
673682
// It should be OK to monitor unschedulable Nodes
674683
nodes, err := r.client.CoreV1().Nodes().List(metav1.ListOptions{})
@@ -683,18 +692,21 @@ func (r *ResourceMonitor) Start() {
683692
}
684693
}
685694

695+
// Stop stops collectors.
686696
func (r *ResourceMonitor) Stop() {
687697
for _, collector := range r.collectors {
688698
collector.Stop()
689699
}
690700
}
691701

702+
// Reset resets collectors.
692703
func (r *ResourceMonitor) Reset() {
693704
for _, collector := range r.collectors {
694705
collector.Reset()
695706
}
696707
}
697708

709+
// LogLatest outputs the latest resource usage into log.
698710
func (r *ResourceMonitor) LogLatest() {
699711
summary, err := r.GetLatest()
700712
if err != nil {
@@ -703,6 +715,8 @@ func (r *ResourceMonitor) LogLatest() {
703715
Logf("%s", r.FormatResourceUsage(summary))
704716
}
705717

718+
// FormatResourceUsage returns the formatted string for LogLatest().
719+
// TODO(oomichi): This can be made to local function after making test/e2e/node/kubelet_perf.go use LogLatest directly instead.
706720
func (r *ResourceMonitor) FormatResourceUsage(s ResourceUsagePerNode) string {
707721
summary := []string{}
708722
for node, usage := range s {
@@ -711,6 +725,7 @@ func (r *ResourceMonitor) FormatResourceUsage(s ResourceUsagePerNode) string {
711725
return strings.Join(summary, "\n")
712726
}
713727

728+
// GetLatest returns the latest resource usage.
714729
func (r *ResourceMonitor) GetLatest() (ResourceUsagePerNode, error) {
715730
result := make(ResourceUsagePerNode)
716731
errs := []error{}
@@ -725,6 +740,7 @@ func (r *ResourceMonitor) GetLatest() (ResourceUsagePerNode, error) {
725740
return result, utilerrors.NewAggregate(errs)
726741
}
727742

743+
// GetMasterNodeLatest returns the latest resource usage of master and node.
728744
func (r *ResourceMonitor) GetMasterNodeLatest(usagePerNode ResourceUsagePerNode) ResourceUsagePerNode {
729745
result := make(ResourceUsagePerNode)
730746
var masterUsage ResourceUsagePerContainer
@@ -767,6 +783,7 @@ type ContainersCPUSummary map[string]map[float64]float64
767783
// ContainersCPUSummary map.
768784
type NodesCPUSummary map[string]ContainersCPUSummary
769785

786+
// FormatCPUSummary returns the string of human-readable CPU summary from the specified summary data.
770787
func (r *ResourceMonitor) FormatCPUSummary(summary NodesCPUSummary) string {
771788
// Example output for a node (the percentiles may differ):
772789
// CPU usage of containers on node "e2e-test-foo-node-0vj7":
@@ -804,11 +821,13 @@ func (r *ResourceMonitor) FormatCPUSummary(summary NodesCPUSummary) string {
804821
return strings.Join(summaryStrings, "\n")
805822
}
806823

824+
// LogCPUSummary outputs summary of CPU into log.
807825
func (r *ResourceMonitor) LogCPUSummary() {
808826
summary := r.GetCPUSummary()
809827
Logf("%s", r.FormatCPUSummary(summary))
810828
}
811829

830+
// GetCPUSummary returns summary of CPU.
812831
func (r *ResourceMonitor) GetCPUSummary() NodesCPUSummary {
813832
result := make(NodesCPUSummary)
814833
for nodeName, collector := range r.collectors {
@@ -821,6 +840,7 @@ func (r *ResourceMonitor) GetCPUSummary() NodesCPUSummary {
821840
return result
822841
}
823842

843+
// GetMasterNodeCPUSummary returns summary of master node CPUs.
824844
func (r *ResourceMonitor) GetMasterNodeCPUSummary(summaryPerNode NodesCPUSummary) NodesCPUSummary {
825845
result := make(NodesCPUSummary)
826846
var masterSummary ContainersCPUSummary

test/e2e/framework/log_size_monitoring.go

Lines changed: 18 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -75,16 +75,20 @@ type LogsSizeVerifier struct {
7575
workers []*LogSizeGatherer
7676
}
7777

78+
// SingleLogSummary is a structure for handling average generation rate and number of probes.
7879
type SingleLogSummary struct {
7980
AverageGenerationRate int
8081
NumberOfProbes int
8182
}
8283

84+
// LogSizeDataTimeseries is map of timestamped size.
8385
type LogSizeDataTimeseries map[string]map[string][]TimestampedSize
8486

87+
// LogsSizeDataSummary is map of log summary.
8588
// node -> file -> data
8689
type LogsSizeDataSummary map[string]map[string]SingleLogSummary
8790

91+
// PrintHumanReadable returns string of log size data summary.
8892
// TODO: make sure that we don't need locking here
8993
func (s *LogsSizeDataSummary) PrintHumanReadable() string {
9094
buf := &bytes.Buffer{}
@@ -100,14 +104,17 @@ func (s *LogsSizeDataSummary) PrintHumanReadable() string {
100104
return buf.String()
101105
}
102106

107+
// PrintJSON returns the summary of log size data with JSON format.
103108
func (s *LogsSizeDataSummary) PrintJSON() string {
104109
return PrettyPrintJSON(*s)
105110
}
106111

112+
// SummaryKind returns the summary of log size data summary.
107113
func (s *LogsSizeDataSummary) SummaryKind() string {
108114
return "LogSizeSummary"
109115
}
110116

117+
// LogsSizeData is a structure for handling timeseries of log size data and lock.
111118
type LogsSizeData struct {
112119
data LogSizeDataTimeseries
113120
lock sync.Mutex
@@ -133,7 +140,7 @@ func prepareData(masterAddress string, nodeAddresses []string) *LogsSizeData {
133140
}
134141
}
135142

136-
func (d *LogsSizeData) AddNewData(ip, path string, timestamp time.Time, size int) {
143+
func (d *LogsSizeData) addNewData(ip, path string, timestamp time.Time, size int) {
137144
d.lock.Lock()
138145
defer d.lock.Unlock()
139146
d.data[ip][path] = append(
@@ -197,26 +204,27 @@ func (s *LogsSizeVerifier) GetSummary() *LogsSizeDataSummary {
197204
}
198205

199206
// Run starts log size gathering. It starts a gorouting for every worker and then blocks until stopChannel is closed
200-
func (v *LogsSizeVerifier) Run() {
201-
v.workChannel <- WorkItem{
202-
ip: v.masterAddress,
207+
func (s *LogsSizeVerifier) Run() {
208+
s.workChannel <- WorkItem{
209+
ip: s.masterAddress,
203210
paths: masterLogsToCheck,
204211
backoffMultiplier: 1,
205212
}
206-
for _, node := range v.nodeAddresses {
207-
v.workChannel <- WorkItem{
213+
for _, node := range s.nodeAddresses {
214+
s.workChannel <- WorkItem{
208215
ip: node,
209216
paths: nodeLogsToCheck,
210217
backoffMultiplier: 1,
211218
}
212219
}
213-
for _, worker := range v.workers {
220+
for _, worker := range s.workers {
214221
go worker.Run()
215222
}
216-
<-v.stopChannel
217-
v.wg.Wait()
223+
<-s.stopChannel
224+
s.wg.Wait()
218225
}
219226

227+
// Run starts log size gathering.
220228
func (g *LogSizeGatherer) Run() {
221229
for g.Work() {
222230
}
@@ -270,7 +278,7 @@ func (g *LogSizeGatherer) Work() bool {
270278
Logf("Error during conversion to int: %v, skipping data. Error: %v", results[i+1], err)
271279
continue
272280
}
273-
g.data.AddNewData(workItem.ip, path, now, size)
281+
g.data.addNewData(workItem.ip, path, now, size)
274282
}
275283
go g.pushWorkItem(workItem)
276284
return true

0 commit comments

Comments
 (0)