-
-
Couldn't load subscription status.
- Fork 155
Scale Dask worker group from Cluster spec #720
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 1 commit
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -143,6 +143,48 @@ async def test_scalesimplecluster(k8s_cluster, kopf_runner, gen_cluster): | |
| await client.wait_for_workers(3) | ||
|
|
||
|
|
||
| @pytest.mark.asyncio | ||
| async def test_scalesimplecluster_from_cluster_spec( | ||
| k8s_cluster, kopf_runner, gen_cluster | ||
| ): | ||
| with kopf_runner as runner: | ||
| async with gen_cluster() as cluster_name: | ||
| scheduler_pod_name = "simple-scheduler" | ||
| worker_pod_name = "simple-default-worker" | ||
| service_name = "simple-scheduler" | ||
| while scheduler_pod_name not in k8s_cluster.kubectl("get", "pods"): | ||
| await asyncio.sleep(0.1) | ||
| while service_name not in k8s_cluster.kubectl("get", "svc"): | ||
| await asyncio.sleep(0.1) | ||
| while worker_pod_name not in k8s_cluster.kubectl("get", "pods"): | ||
| await asyncio.sleep(0.1) | ||
| k8s_cluster.kubectl( | ||
| "wait", | ||
| "pods", | ||
| "--for=condition=Ready", | ||
| scheduler_pod_name, | ||
| "--timeout=120s", | ||
| ) | ||
| with k8s_cluster.port_forward(f"service/{service_name}", 8786) as port: | ||
| async with Client( | ||
| f"tcp://localhost:{port}", asynchronous=True | ||
| ) as client: | ||
| k8s_cluster.kubectl( | ||
| "scale", | ||
| "--replicas=5", | ||
| "daskcluster.kubernetes.dask.org", | ||
| cluster_name, | ||
| ) | ||
| await client.wait_for_workers(5) | ||
| k8s_cluster.kubectl( | ||
| "scale", | ||
| "--replicas=3", | ||
| "daskcluster.kubernetes.dask.org", | ||
| cluster_name, | ||
| ) | ||
| await client.wait_for_workers(3) | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This wont actually test anything because I opened dask/distributed#6377 a while ago to allow you to modify the behaviour for cases like this. I should nudge that PR along. Maybe we should have a |
||
|
|
||
|
|
||
| @pytest.mark.timeout(180) | ||
| @pytest.mark.asyncio | ||
| async def test_simplecluster(k8s_cluster, kopf_runner, gen_cluster): | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I'm excited about the
kr8schanges in #696 because this will be hugely simplified.Happy to merge this as-is though and I'll update it in a follow-up once
kr8sis integrated nicely.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Oh, wow! Definitely looking forward to that!