@@ -626,12 +626,14 @@ def _fetch_cluster_data(namespace):
626626 if item .worker_extended_resources else "nvidia.com/gpu: 0"
627627 for item in rayclusters
628628 ]
629- head_cpus = [item .head_cpus if item .head_cpus else 0 for item in rayclusters ]
630- head_mem = [item .head_mem if item .head_mem else 0 for item in rayclusters ]
631- worker_cpu_min = [item .worker_cpu_min if item .worker_cpu_min else 0 for item in rayclusters ]
632- worker_cpu_max = [item .worker_cpu_max if item .worker_cpu_max else 0 for item in rayclusters ]
633- worker_mem_min = [item .worker_mem_min if item .worker_mem_min else 0 for item in rayclusters ]
634- worker_mem_max = [item .worker_mem_max if item .worker_mem_max else 0 for item in rayclusters ]
629+ head_cpu_requests = [item .head_cpu_requests if item .head_cpu_requests else 0 for item in rayclusters ]
630+ head_cpu_limits = [item .head_cpu_limits if item .head_cpu_limits else 0 for item in rayclusters ]
631+ head_mem_requests = [item .head_mem_requests if item .head_mem_requests else 0 for item in rayclusters ]
632+ head_mem_limits = [item .head_mem_limits if item .head_mem_limits else 0 for item in rayclusters ]
633+ worker_cpu_requests = [item .worker_cpu_requests if item .worker_cpu_requests else 0 for item in rayclusters ]
634+ worker_cpu_limits = [item .worker_cpu_limits if item .worker_cpu_limits else 0 for item in rayclusters ]
635+ worker_mem_requests = [item .worker_mem_requests if item .worker_mem_requests else 0 for item in rayclusters ]
636+ worker_mem_limits = [item .worker_mem_limits if item .worker_mem_limits else 0 for item in rayclusters ]
635637 status = [item .status .name for item in rayclusters ]
636638
637639 status = [format_status (item .status ) for item in rayclusters ]
@@ -641,12 +643,14 @@ def _fetch_cluster_data(namespace):
641643 "namespace" : namespaces ,
642644 "head gpus" : head_extended_resources ,
643645 "worker gpus" : worker_extended_resources ,
644- "head cpus" : head_cpus ,
645- "head memory" : head_mem ,
646- "worker cpu requests" : worker_cpu_min ,
647- "worker cpu limits" : worker_cpu_max ,
648- "worker memory requests" : worker_mem_min ,
649- "worker memory limits" : worker_mem_max ,
646+ "head cpu requests" : head_cpu_requests ,
647+ "head cpu limits" : head_cpu_limits ,
648+ "head memory requests" : head_mem_requests ,
649+ "head memory limits" : head_mem_limits ,
650+ "worker cpu requests" : worker_cpu_requests ,
651+ "worker cpu limits" : worker_cpu_limits ,
652+ "worker memory requests" : worker_mem_requests ,
653+ "worker memory limits" : worker_mem_limits ,
650654 "status" : status
651655 }
652656 return pd .DataFrame (data )
@@ -668,7 +672,7 @@ def on_cluster_click(change):
668672 new_value = change ["new" ]
669673 my_output .clear_output ()
670674 with my_output :
671- display (HTML (df [df ["name" ]== new_value ][["name" , "namespace" , "head gpus" , "worker gpus" , "head cpus " , "head memory" , "worker memory requests" , "worker memory limits" , "status" ]].to_html (escape = False , index = False , border = 2 )))
675+ display (HTML (df [df ["name" ]== new_value ][["name" , "namespace" , "head gpus" , "worker gpus" , "head cpu requests " , "head cpu limits" , "head memory requests" , "head memory limits " , "worker memory requests" , "worker memory limits" , "status" ]].to_html (escape = False , index = False , border = 2 )))
672676
673677 classification_widget .observe (on_cluster_click , names = "value" )
674678 display (widgets .VBox ([classification_widget , my_output ]))
@@ -1041,8 +1045,8 @@ def _map_to_ray_cluster(rc) -> Optional[RayCluster]:
10411045 worker_mem_requests = rc ["spec" ]["workerGroupSpecs" ][0 ]["template" ]["spec" ][
10421046 "containers"
10431047 ][0 ]["resources" ]["requests" ]["memory" ],
1044- worker_cpu_min = rc ["spec" ]["workerGroupSpecs" ][0 ]["template" ]["spec" ]["containers" ][0 ]["resources" ]["requests" ]["cpu" ],
1045- worker_cpu_max = rc ["spec" ]["workerGroupSpecs" ][0 ]["template" ]["spec" ]["containers" ][
1048+ worker_cpu_requests = rc ["spec" ]["workerGroupSpecs" ][0 ]["template" ]["spec" ]["containers" ][0 ]["resources" ]["requests" ]["cpu" ],
1049+ worker_cpu_limits = rc ["spec" ]["workerGroupSpecs" ][0 ]["template" ]["spec" ]["containers" ][
10461050 0
10471051 ]["resources" ]["limits" ]["cpu" ],
10481052 worker_extended_resources = worker_extended_resources ,
@@ -1083,8 +1087,8 @@ def _copy_to_ray(cluster: Cluster) -> RayCluster:
10831087 workers = cluster .config .num_workers ,
10841088 worker_mem_requests = cluster .config .worker_memory_requests ,
10851089 worker_mem_limits = cluster .config .worker_memory_limits ,
1086- worker_cpu_min = cluster .config .worker_cpu_requests ,
1087- worker_cpu_max = cluster .config .worker_cpu_limits ,
1090+ worker_cpu_requests = cluster .config .worker_cpu_requests ,
1091+ worker_cpu_limits = cluster .config .worker_cpu_limits ,
10881092 worker_extended_resources = cluster .config .worker_extended_resource_requests ,
10891093 namespace = cluster .config .namespace ,
10901094 dashboard = cluster .cluster_dashboard_uri (),
0 commit comments