Skip to content

Commit 4fdb065

Browse files
committed
update integration tests with new headless service name
1 parent b42c075 commit 4fdb065

File tree

2 files changed

+13
-11
lines changed

2 files changed

+13
-11
lines changed

tests/templates/kuttl/commons/healthcheck.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,9 @@
2424
}
2525

2626
for role, port in druid_role_ports.items():
27-
url = f"https://{druid_cluster_name}-{role}-default:{port}/status/health"
27+
url = (
28+
f"https://{druid_cluster_name}-{role}-default-metrics:{port}/status/health"
29+
)
2830
count = 1
2931

3032
# As this script is intended to be executed by Kuttl which is in charge of overall test timeouts it is ok

tests/templates/kuttl/commons/ingestioncheck-tls.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,7 @@ def query_datasource(self, url, sql, expected, iterations):
9494
Query tasks
9595
===========""")
9696
tasks = druid.get_tasks(
97-
url=f"{protocol}://{druid_cluster_name}-coordinator-default-0.{druid_cluster_name}-coordinator-default.{namespace}.svc.cluster.local:{coordinator_port}/druid/indexer/v1/tasks",
97+
url=f"{protocol}://{druid_cluster_name}-coordinator-default-0.{druid_cluster_name}-coordinator-default-metrics.{namespace}.svc.cluster.local:{coordinator_port}/druid/indexer/v1/tasks",
9898
)
9999
task_count = len(json.loads(tasks))
100100
print(f"existing tasks: {task_count}")
@@ -103,7 +103,7 @@ def query_datasource(self, url, sql, expected, iterations):
103103
Start ingestion task
104104
====================""")
105105
ingestion = druid.post_task(
106-
url=f"{protocol}://{druid_cluster_name}-coordinator-default-0.{druid_cluster_name}-coordinator-default.{namespace}.svc.cluster.local:{coordinator_port}/druid/indexer/v1/task",
106+
url=f"{protocol}://{druid_cluster_name}-coordinator-default-0.{druid_cluster_name}-coordinator-default-metrics.{namespace}.svc.cluster.local:{coordinator_port}/druid/indexer/v1/task",
107107
input="/tmp/druid-quickstartimport.json",
108108
)
109109
task_id = json.loads(ingestion)["task"]
@@ -113,11 +113,11 @@ def query_datasource(self, url, sql, expected, iterations):
113113
Re-query tasks
114114
==============""")
115115
tasks = druid.get_tasks(
116-
url=f"{protocol}://{druid_cluster_name}-coordinator-default-0.{druid_cluster_name}-coordinator-default.{namespace}.svc.cluster.local:{coordinator_port}/druid/indexer/v1/tasks",
116+
url=f"{protocol}://{druid_cluster_name}-coordinator-default-0.{druid_cluster_name}-coordinator-default-metrics.{namespace}.svc.cluster.local:{coordinator_port}/druid/indexer/v1/tasks",
117117
)
118118
new_task_count = len(json.loads(tasks))
119119
print(f"new tasks: {new_task_count}")
120-
print(f"assert {new_task_count} == {task_count+1}")
120+
print(f"assert {new_task_count} == {task_count + 1}")
121121
assert new_task_count == task_count + 1
122122

123123
print("""
@@ -127,13 +127,13 @@ def query_datasource(self, url, sql, expected, iterations):
127127
while not job_finished:
128128
time.sleep(5)
129129
task = druid.get(
130-
url=f"{protocol}://{druid_cluster_name}-coordinator-default-0.{druid_cluster_name}-coordinator-default.{namespace}.svc.cluster.local:{coordinator_port}/druid/indexer/v1/task/{url_encoded_taskid}/status",
130+
url=f"{protocol}://{druid_cluster_name}-coordinator-default-0.{druid_cluster_name}-coordinator-default-metrics.{namespace}.svc.cluster.local:{coordinator_port}/druid/indexer/v1/task/{url_encoded_taskid}/status",
131131
)
132132
task_status = json.loads(task)["status"]["statusCode"]
133133
print(f"Current task status: [{task_status}]")
134-
assert (
135-
task_status == "RUNNING" or task_status == "SUCCESS"
136-
), f"Taskstatus not running or succeeeded: {task_status}"
134+
assert task_status == "RUNNING" or task_status == "SUCCESS", (
135+
f"Taskstatus not running or succeeeded: {task_status}"
136+
)
137137
job_finished = task_status == "SUCCESS"
138138

139139
print("""
@@ -143,7 +143,7 @@ def query_datasource(self, url, sql, expected, iterations):
143143
while not broker_ready:
144144
time.sleep(2)
145145
broker_ready_rc = druid.check_rc(
146-
f"{protocol}://{druid_cluster_name}-broker-default-0.{druid_cluster_name}-broker-default.{namespace}.svc.cluster.local:{broker_port}/druid/broker/v1/readiness"
146+
f"{protocol}://{druid_cluster_name}-broker-default-0.{druid_cluster_name}-broker-default-metrics.{namespace}.svc.cluster.local:{broker_port}/druid/broker/v1/readiness"
147147
)
148148
broker_ready = broker_ready_rc == 200
149149
print(f"Broker respondend with [{broker_ready_rc}] to readiness check")
@@ -153,7 +153,7 @@ def query_datasource(self, url, sql, expected, iterations):
153153
==============""")
154154
sample_data_size = 39244
155155
result = druid.query_datasource(
156-
url=f"{protocol}://{druid_cluster_name}-broker-default-0.{druid_cluster_name}-broker-default.{namespace}.svc.cluster.local:{broker_port}/druid/v2/sql",
156+
url=f"{protocol}://{druid_cluster_name}-broker-default-0.{druid_cluster_name}-broker-default-metrics.{namespace}.svc.cluster.local:{broker_port}/druid/v2/sql",
157157
sql={"query": 'select count(*) as c from "wikipedia-2015-09-12"'},
158158
expected=sample_data_size,
159159
iterations=12,

0 commit comments

Comments
 (0)