Skip to content

Commit 95360ea

Browse files
author
Jonathan S. Katz
committed
Fix misspellings caught by misspell
It's inevitable they get into the codebase, but it's good to clean them up every once in awhile.
1 parent f8e515e commit 95360ea

File tree

10 files changed

+14
-14
lines changed

10 files changed

+14
-14
lines changed

apiserver/clusterservice/clusterimpl.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1297,7 +1297,7 @@ func getClusterParams(request *msgs.CreateClusterRequest, name string, userLabel
12971297
annotations := map[string]string{}
12981298
// store the default current primary value as an annotation
12991299
annotations[config.ANNOTATION_CURRENT_PRIMARY] = spec.Name
1300-
// store the inital deployment value, which will match the
1300+
// store the initial deployment value, which will match the
13011301
// cluster name initially
13021302
annotations[config.ANNOTATION_PRIMARY_DEPLOYMENT] = spec.Name
13031303

apiserver/pgbouncerservice/pgbouncerimpl.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -187,7 +187,7 @@ func DeletePgbouncer(request *msgs.DeletePgbouncerRequest, ns string) msgs.Delet
187187
}
188188
}
189189

190-
// Disable the pgBouncer Deploymnet, whcih means setting Replicas to 0
190+
// Disable the pgBouncer Deploymnet, which means setting Replicas to 0
191191
cluster.Spec.PgBouncer.Replicas = 0
192192
// Set the MemoryLimit of pgBouncer to false as well, as this is the default
193193
// setting

bin/pgo-backrest-repo-sync/pgo-backrest-repo-sync.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ rsync_repo() {
4747
}
4848

4949
# Use the aws cli sync command to sync files from a source location to a target
50-
# location. The this inlcudes syncing files between who s3 locations,
50+
# location. The this includes syncing files between who s3 locations,
5151
# syncing a local directory to s3, or syncing from s3 to a local directory.
5252
aws_sync_repo() {
5353
export AWS_CA_BUNDLE="${PGBACKREST_REPO1_S3_CA_FILE}"

controller/configmap/configmapcontroller.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,7 @@ func (c *Controller) RunWorker(stopCh <-chan struct{}, doneCh chan<- struct{}) {
9494
func (c *Controller) waitForShutdown(stopCh <-chan struct{}) {
9595
<-stopCh
9696
c.workqueue.ShutDown()
97-
log.Debug("ConfigMap Contoller: recieved stop signal, worker queue told to shutdown")
97+
log.Debug("ConfigMap Contoller: received stop signal, worker queue told to shutdown")
9898
}
9999

100100
// ShutdownWorker shuts down the work queue
@@ -158,7 +158,7 @@ func (c *Controller) processNextWorkItem() bool {
158158
key, err)
159159
}
160160

161-
// Finally if no error has occured forget this item
161+
// Finally if no error has occurred forget this item
162162
c.workqueue.Forget(obj)
163163

164164
return true

controller/pgcluster/pgclustercontroller.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@ func (c *Controller) RunWorker(stopCh <-chan struct{}, doneCh chan<- struct{}) {
8585
func (c *Controller) waitForShutdown(stopCh <-chan struct{}) {
8686
<-stopCh
8787
c.Queue.ShutDown()
88-
log.Debug("pgcluster Contoller: recieved stop signal, worker queue told to shutdown")
88+
log.Debug("pgcluster Contoller: received stop signal, worker queue told to shutdown")
8989
}
9090

9191
func (c *Controller) processNextItem() bool {
@@ -145,7 +145,7 @@ func (c *Controller) processNextItem() bool {
145145
clusteroperator.AddClusterBase(c.PgclusterClientset, c.PgclusterClient, &cluster, cluster.ObjectMeta.Namespace)
146146

147147
// Now scale the repo deployment only to ensure it is initialized prior to the primary DB.
148-
// Once the repo is ready, the primary datbase deployment will then also be scaled to 1.
148+
// Once the repo is ready, the primary database deployment will then also be scaled to 1.
149149
clusterInfo, err := clusteroperator.ScaleClusterDeployments(c.PgclusterClientset,
150150
cluster, 1, false, false, true, false)
151151
if err != nil {

controller/pgreplica/pgreplicacontroller.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ func (c *Controller) RunWorker(stopCh <-chan struct{}, doneCh chan<- struct{}) {
5757
func (c *Controller) waitForShutdown(stopCh <-chan struct{}) {
5858
<-stopCh
5959
c.Queue.ShutDown()
60-
log.Debug("pgreplica Contoller: recieved stop signal, worker queue told to shutdown")
60+
log.Debug("pgreplica Contoller: received stop signal, worker queue told to shutdown")
6161
}
6262

6363
func (c *Controller) processNextItem() bool {

controller/pgtask/pgtaskcontroller.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ func (c *Controller) RunWorker(stopCh <-chan struct{}, doneCh chan<- struct{}) {
6161
func (c *Controller) waitForShutdown(stopCh <-chan struct{}) {
6262
<-stopCh
6363
c.Queue.ShutDown()
64-
log.Debug("pgtask Contoller: recieved stop signal, worker queue told to shutdown")
64+
log.Debug("pgtask Contoller: received stop signal, worker queue told to shutdown")
6565
}
6666

6767
func (c *Controller) processNextItem() bool {

controller/pod/podcontroller.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -144,7 +144,7 @@ func (c *Controller) onUpdate(oldObj, newObj interface{}) {
144144
// setCurrentPrimary checks whether the newly promoted primary value differs from the pgcluster's
145145
// current primary value. If different, patch the CRD's annotation to match the new value
146146
func setCurrentPrimary(restclient *rest.RESTClient, newPod *apiv1.Pod, cluster *crv1.Pgcluster) {
147-
// if a failover has occured and the current primary has changed, update the pgcluster CRD's annotation accordingly
147+
// if a failover has occurred and the current primary has changed, update the pgcluster CRD's annotation accordingly
148148
if cluster.Annotations[config.ANNOTATION_CURRENT_PRIMARY] != newPod.ObjectMeta.Labels[config.LABEL_DEPLOYMENT_NAME] {
149149
err := util.CurrentPrimaryUpdate(restclient, cluster, newPod.ObjectMeta.Labels[config.LABEL_DEPLOYMENT_NAME], newPod.Namespace)
150150
if err != nil {

operator/cluster/upgrade.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -250,7 +250,7 @@ func getCurrentPrimary(clusterName, podPrimary, crPrimary, labelPrimary string)
250250
return clusterName
251251
}
252252

253-
// handleReplicas deletes all pgreplicas related to the pgcluster to be ugpraded, then returns the number
253+
// handleReplicas deletes all pgreplicas related to the pgcluster to be upgraded, then returns the number
254254
// of pgreplicas that were found. This will delete any PVCs that match the existing pgreplica CRs, but
255255
// will leave any other PVCs, whether they are from the current primary, previous primaries that are now
256256
// unassociated because of a failover or the backrest-shared-repo PVC. The total number of current replicas
@@ -349,7 +349,7 @@ func deleteBeforeUpgrade(clientset *kubernetes.Clientset, restclient *rest.RESTC
349349
}
350350

351351
// deploymentWait is modified from cluster.waitForDeploymentDelete. It simply waits for the current primary deployment
352-
// deletion to complete before proceding with the rest of the pgcluster upgrade.
352+
// deletion to complete before proceeding with the rest of the pgcluster upgrade.
353353
func deploymentWait(clientset *kubernetes.Clientset, namespace, deploymentName string, timeoutSecs, periodSecs time.Duration) string {
354354
timeout := time.After(timeoutSecs * time.Second)
355355
tick := time.Tick(periodSecs * time.Second)

operator/common.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -228,7 +228,7 @@ func initializeContainerImageOverrides() {
228228
}
229229

230230
// initControllerRefreshIntervals initializes the refresh intervals for any informers
231-
// created by the Operator requiring a refresh interval. This inlcudes first attempting
231+
// created by the Operator requiring a refresh interval. This includes first attempting
232232
// to utilize the refresh interval(s) defined in the pgo.yaml config file, and if not
233233
// present then falling back to a default value.
234234
func initializeControllerRefreshIntervals() {
@@ -256,7 +256,7 @@ func initializeControllerRefreshIntervals() {
256256
}
257257

258258
// initControllerWorkerCounts sets the number of workers that will be created for any worker
259-
// queues created within the various controllers created by the Operator. This inlcudes first
259+
// queues created within the various controllers created by the Operator. This includes first
260260
// attempting to utilize the worker counts defined in the pgo.yaml config file, and if not
261261
// present then falling back to a default value.
262262
func initializeControllerWorkerCounts() {

0 commit comments

Comments
 (0)