Skip to content

Commit 4b22ef4

Browse files
MLE-14506: removed scaleDown test as deletion of node from ML cluster is not needed to be tested (#248)
1 parent 1884b39 commit 4b22ef4

File tree

1 file changed

+0
-116
lines changed

1 file changed

+0
-116
lines changed

test/e2e/scaling_test.go

Lines changed: 0 additions & 116 deletions
Original file line numberDiff line numberDiff line change
@@ -129,119 +129,3 @@ func TestHelmScaleUp(t *testing.T) {
129129
t.Errorf("Incorrect number of MarkLogic hosts")
130130
}
131131
}
132-
133-
func TestHelmScaleDown(t *testing.T) {
134-
// Path to the helm chart we will test
135-
helmChartPath, e := filepath.Abs("../../charts")
136-
if e != nil {
137-
t.Fatalf(e.Error())
138-
}
139-
imageRepo, repoPres := os.LookupEnv("dockerRepository")
140-
imageTag, tagPres := os.LookupEnv("dockerVersion")
141-
username := "admin"
142-
password := "admin"
143-
144-
if !repoPres {
145-
imageRepo = "marklogicdb/marklogic-db"
146-
t.Logf("No imageRepo variable present, setting to default value: " + imageRepo)
147-
}
148-
149-
if !tagPres {
150-
imageTag = "latest"
151-
t.Logf("No imageTag variable present, setting to default value: " + imageTag)
152-
}
153-
154-
namespaceName := "ml-" + strings.ToLower(random.UniqueId())
155-
kubectlOptions := k8s.NewKubectlOptions("", "", namespaceName)
156-
options := &helm.Options{
157-
KubectlOptions: kubectlOptions,
158-
SetValues: map[string]string{
159-
"persistence.enabled": "false",
160-
"replicaCount": "2",
161-
"image.repository": imageRepo,
162-
"image.tag": imageTag,
163-
"auth.adminUsername": username,
164-
"auth.adminPassword": password,
165-
"logCollection.enabled": "false",
166-
},
167-
}
168-
169-
t.Logf("====Creating namespace: " + namespaceName)
170-
k8s.CreateNamespace(t, kubectlOptions, namespaceName)
171-
defer t.Logf("====Deleting namespace: " + namespaceName)
172-
defer k8s.DeleteNamespace(t, kubectlOptions, namespaceName)
173-
174-
t.Logf("====Installing Helm Chart")
175-
releaseName := "test-scale-down"
176-
helm.Install(t, options, helmChartPath, releaseName)
177-
178-
podName1 := releaseName + "-1"
179-
180-
// wait until the pod is in Ready status
181-
k8s.WaitUntilPodAvailable(t, kubectlOptions, podName1, 15, 20*time.Second)
182-
183-
newOptions := &helm.Options{
184-
KubectlOptions: kubectlOptions,
185-
SetValues: map[string]string{
186-
"persistence.enabled": "false",
187-
"replicaCount": "1",
188-
"image.repository": imageRepo,
189-
"image.tag": imageTag,
190-
"logCollection.enabled": "false",
191-
},
192-
}
193-
194-
t.Logf("====Upgrading Helm Chart")
195-
helm.Upgrade(t, newOptions, helmChartPath, releaseName)
196-
197-
time.Sleep(20 * time.Second)
198-
199-
podName0 := releaseName + "-0"
200-
201-
tunnel := k8s.NewTunnel(
202-
kubectlOptions, k8s.ResourceTypePod, podName0, 8002, 8002)
203-
defer tunnel.Close()
204-
tunnel.ForwardPort(t)
205-
206-
numOfHostsOffline := 1
207-
client := req.C()
208-
_, err := client.R().
209-
SetDigestAuth(username, password).
210-
SetRetryCount(3).
211-
SetRetryFixedInterval(10 * time.Second).
212-
AddRetryCondition(func(resp *req.Response, err error) bool {
213-
if resp == nil || err != nil {
214-
t.Logf("error in AddRetryCondition: %s", err.Error())
215-
return true
216-
}
217-
if resp.Response == nil {
218-
t.Log("Could not get the Response Object, Retrying...")
219-
return true
220-
}
221-
if resp.Body == nil {
222-
t.Log("Could not get the body for the response, Retrying...")
223-
return true
224-
}
225-
body, err := io.ReadAll(resp.Body)
226-
if body == nil || err != nil {
227-
t.Logf("error in read response body: %s", err.Error())
228-
return true
229-
}
230-
totalOfflineHosts := gjson.Get(string(body), `host-status-list.status-list-summary.total-hosts-offline.value`)
231-
numOfHostsOffline = int(totalOfflineHosts.Num)
232-
if numOfHostsOffline != 1 {
233-
t.Log("Waiting for second host to shutdown")
234-
}
235-
return numOfHostsOffline != 1
236-
}).
237-
Get("http://localhost:8002/manage/v2/hosts?view=status&format=json")
238-
239-
if err != nil {
240-
t.Fatalf(err.Error())
241-
}
242-
243-
// verify total number of hosts on the clsuter after scaling up
244-
if numOfHostsOffline != 1 {
245-
t.Errorf("Incorrect number of offline hosts")
246-
}
247-
}

0 commit comments

Comments
 (0)