|
5 | 5 | import os
|
6 | 6 | import sys
|
7 | 7 |
|
8 |
| -from notebook.base.handlers import IPythonHandler |
9 |
| -from notebook.nbextensions import install_nbextension |
| 8 | +from notebook.base.handlers import APIHandler |
10 | 9 | from notebook.utils import url_path_join as ujoin
|
11 | 10 | from tornado import web
|
12 | 11 |
|
13 |
| -from .clustermanager import ClusterManager |
| 12 | +from ..cluster import ClusterManager |
| 13 | +from ..util import abbreviate_profile_dir |
14 | 14 |
|
15 | 15 |
|
16 | 16 | static = os.path.join(os.path.dirname(__file__), 'static')
|
17 | 17 |
|
18 | 18 |
|
19 |
| -class ClusterHandler(IPythonHandler): |
| 19 | +class ClusterHandler(APIHandler): |
20 | 20 | @property
|
21 | 21 | def cluster_manager(self):
|
22 | 22 | return self.settings['cluster_manager']
|
23 | 23 |
|
| 24 | + def cluster_model(self, cluster): |
| 25 | + """Create a JSONable cluster model""" |
| 26 | + d = cluster.to_dict() |
| 27 | + profile_dir = d['cluster']['profile_dir'] |
| 28 | + # provide abbreviated profile info |
| 29 | + d['cluster']['profile'] = abbreviate_profile_dir(profile_dir) |
| 30 | + return d |
| 31 | + |
| 32 | + |
| 33 | +class ClusterListHandler(ClusterHandler): |
| 34 | + """List and create new clusters |
| 35 | +
|
| 36 | + GET /clusters : list current clusters |
| 37 | + POST / clusters (JSON body) : create a new cluster""" |
24 | 38 |
|
25 |
| -class MainClusterHandler(ClusterHandler): |
26 | 39 | @web.authenticated
|
27 | 40 | def get(self):
|
28 |
| - self.finish(json.dumps(self.cluster_manager.list_profiles())) |
29 |
| - |
| 41 | + # currently reloads everything from disk. Is that what we want? |
| 42 | + clusters = self.cluster_manager.load_clusters() |
| 43 | + self.finish( |
| 44 | + {key: self.cluster_model(cluster) for key, cluster in clusters.items()} |
| 45 | + ) |
30 | 46 |
|
31 |
| -class ClusterProfileHandler(ClusterHandler): |
32 | 47 | @web.authenticated
|
33 |
| - def get(self, profile): |
34 |
| - self.finish(json.dumps(self.cluster_manager.profile_info(profile))) |
| 48 | + def post(self): |
| 49 | + body = self.get_json_body() or {} |
| 50 | + # profile |
| 51 | + # cluster_id |
| 52 | + cluster_id, cluster = self.cluster_manager.new_cluster(**body) |
| 53 | + self.write(json.dumps({})) |
35 | 54 |
|
36 | 55 |
|
37 | 56 | class ClusterActionHandler(ClusterHandler):
|
| 57 | + """Actions on a single cluster |
| 58 | +
|
| 59 | + GET: read single cluster model |
| 60 | + POST: start |
| 61 | + PATCH: engines? |
| 62 | + DELETE: stop |
| 63 | + """ |
| 64 | + |
| 65 | + def get_cluster(self, cluster_key): |
| 66 | + try: |
| 67 | + return self.cluster_manager.get_cluster(cluster_key) |
| 68 | + except KeyError: |
| 69 | + raise web.HTTPError(404, f"No such cluster: {cluster_key}") |
| 70 | + |
| 71 | + @web.authenticated |
| 72 | + async def post(self, cluster_key): |
| 73 | + cluster = self.get_cluster(cluster_key) |
| 74 | + n = self.get_argument('n', default=None) |
| 75 | + await cluster.start_cluster(n=n) |
| 76 | + self.write(json.dumps(self.cluster_model(cluster))) |
| 77 | + |
38 | 78 | @web.authenticated
|
39 |
| - def post(self, profile, action): |
40 |
| - cm = self.cluster_manager |
41 |
| - if action == 'start': |
42 |
| - n = self.get_argument('n', default=None) |
43 |
| - if not n: |
44 |
| - data = cm.start_cluster(profile) |
45 |
| - else: |
46 |
| - data = cm.start_cluster(profile, int(n)) |
47 |
| - if action == 'stop': |
48 |
| - data = cm.stop_cluster(profile) |
49 |
| - self.finish(json.dumps(data)) |
| 79 | + async def get(self, cluster_key): |
| 80 | + cluster = self.get_cluster(cluster_key) |
| 81 | + self.write(json.dumps(self.cluster_model(cluster))) |
| 82 | + |
| 83 | + @web.authenticated |
| 84 | + async def delete(self, cluster_key): |
| 85 | + cluster = self.get_cluster(cluster_key) |
| 86 | + await cluster.stop_cluster() |
| 87 | + self.cluster_manager.remove_cluster(cluster_key) |
| 88 | + self.write(json.dumps(self.cluster_model(cluster))) |
50 | 89 |
|
51 | 90 |
|
52 | 91 | # -----------------------------------------------------------------------------
|
53 | 92 | # URL to handler mappings
|
54 | 93 | # -----------------------------------------------------------------------------
|
55 | 94 |
|
56 | 95 |
|
57 |
| -_cluster_action_regex = r"(?P<action>start|stop)" |
58 |
| -_profile_regex = r"(?P<profile>[^\/]+)" # there is almost no text that is invalid |
| 96 | +_cluster_action_regex = r"(?P<action>start|stop|create)" |
| 97 | +_cluster_key_regex = ( |
| 98 | + r"(?P<cluster_key>[^\/]+)" # there is almost no text that is invalid |
| 99 | +) |
59 | 100 |
|
60 | 101 | default_handlers = [
|
61 |
| - (r"/clusters", MainClusterHandler), |
| 102 | + (r"/clusters", ClusterListHandler), |
62 | 103 | (
|
63 |
| - r"/clusters/%s/%s" % (_profile_regex, _cluster_action_regex), |
| 104 | + rf"/clusters/{_cluster_key_regex}", |
64 | 105 | ClusterActionHandler,
|
65 | 106 | ),
|
66 |
| - (r"/clusters/%s" % _profile_regex, ClusterProfileHandler), |
67 | 107 | ]
|
68 | 108 |
|
69 | 109 |
|
70 | 110 | def load_jupyter_server_extension(nbapp):
|
71 | 111 | """Load the nbserver extension"""
|
72 |
| - from distutils.version import LooseVersion as V |
73 | 112 | import notebook
|
74 | 113 |
|
75 | 114 | nbapp.log.info("Loading IPython parallel extension")
|
76 | 115 | webapp = nbapp.web_app
|
77 | 116 | webapp.settings['cluster_manager'] = ClusterManager(parent=nbapp)
|
78 | 117 |
|
79 |
| - if V(notebook.__version__) < V('4.2'): |
80 |
| - windows = sys.platform.startswith('win') |
81 |
| - install_nbextension( |
82 |
| - static, destination='ipyparallel', symlink=not windows, user=True |
83 |
| - ) |
84 |
| - cfgm = nbapp.config_manager |
85 |
| - cfgm.update( |
86 |
| - 'tree', |
87 |
| - { |
88 |
| - 'load_extensions': { |
89 |
| - 'ipyparallel/main': True, |
90 |
| - } |
91 |
| - }, |
92 |
| - ) |
93 | 118 | base_url = webapp.settings['base_url']
|
94 | 119 | webapp.add_handlers(
|
95 | 120 | ".*$", [(ujoin(base_url, pat), handler) for pat, handler in default_handlers]
|
|
0 commit comments