@@ -4,9 +4,12 @@ name = "notebooks"
4
4
version = " 2025.1"
5
5
description = " Open Data Hub / OpenShift AI Notebook / Workbench images, and tests for the same in Python."
6
6
readme = " README.md"
7
- package-mode = false
8
7
requires-python = " >=3.11,<3.13"
9
8
9
+ # WARNING: Do NOT attempt `uv lock --universal` (the default) resolution on this pyproject.toml.
10
+ # It would work (had the ` conflics ` section were defined), but it would run 10+ minutes and produce a huge `uv.lock` file (60MiB+).
11
+ # Instead, use the `scripts/sync-requirements-txt.sh` script which runs in seconds.
12
+
10
13
[dependency-groups ]
11
14
12
15
# ###########################
@@ -35,26 +38,24 @@ dev = [
35
38
]
36
39
37
40
base = [
38
-
39
41
" wheel~=0.45.1" ,
40
- " setuptools~=78.1.1; python_version == '3.11'" ,
41
- " setuptools~=75.8.2; python_version == '3.12'" ,
42
+ " setuptools~=78.1.1" ,
42
43
]
43
44
44
45
jupyter-base = [
45
- " jupyterlab==4.2.7 " ,
46
- " jupyter-server~=2.15 .0" ,
46
+ " jupyterlab==4.4.4 " ,
47
+ " jupyter-server~=2.16 .0" ,
47
48
" jupyter-server-proxy~=4.4.0" ,
48
49
" jupyter-server-terminals~=0.5.3" ,
49
- " jupyterlab-git~=0.50 .1" ,
50
+ " jupyterlab-git~=0.51 .1" ,
50
51
" nbdime~=4.0.2" ,
51
52
" nbgitpuller~=1.2.2" ,
52
53
]
53
54
54
55
elyra-base = [
55
- " odh-elyra==4.2.0 " ,
56
- " jupyterlab-lsp~=5.1.0 " ,
57
- " jupyterlab-widgets~=3.0.13 " ,
56
+ " odh-elyra==4.2.3 " ,
57
+ " jupyterlab-lsp~=5.1.1 " ,
58
+ " jupyterlab-widgets~=3.0.15 " ,
58
59
" jupyter-resource-usage~=1.1.1" ,
59
60
]
60
61
@@ -63,15 +64,14 @@ elyra-preferred = [
63
64
]
64
65
65
66
elyra-trustyai = [
66
- " jupyter-bokeh~=3.0.5" ,
67
+ " jupyter-bokeh~=3.0.5" , # trustyai 0.6.1 depends on jupyter-bokeh~=3.0.5
67
68
]
68
69
69
70
db-connectors = [
70
71
" pymongo~=4.11.2" ,
71
72
" psycopg~=3.2.5" ,
72
73
" pyodbc~=5.2.0" ,
73
- " mysql-connector-python~=9.3.0; python_version == '3.11'" ,
74
- " mysql-connector-python~=9.2.0; python_version == '3.12'" ,
74
+ " mysql-connector-python~=9.3.0" ,
75
75
]
76
76
77
77
# onnxconverter-common ~=1.13.0 required for skl2onnx, as upgraded version is not compatible with protobuf
@@ -87,8 +87,7 @@ datascience-base = [
87
87
]
88
88
89
89
codeflare = [
90
- " codeflare-sdk~=0.29.0; python_version == '3.11'" ,
91
- " codeflare-sdk~=0.28.1; python_version == '3.12'" ,
90
+ " codeflare-sdk~=0.30.0" ,
92
91
]
93
92
94
93
datascience-preferred = [
@@ -109,7 +108,7 @@ datascience-trustyai = [
109
108
" matplotlib~=3.6.3" ,
110
109
" numpy~=1.24.1" ,
111
110
" pandas~=1.5.3" ,
112
- " scikit-learn~=1.2.1 "
111
+ " scikit-learn~=1.7.0 "
113
112
]
114
113
115
114
tensorflowcuda = [
@@ -118,8 +117,8 @@ tensorflowcuda= [
118
117
" tf2onnx~=1.16.1" ,
119
118
]
120
119
tensorflowrocm = [
121
- " tensorflow-rocm~=2.14.0.600 " ,
122
- " tensorboard~=2.14 .0" ,
120
+ " tensorflow-rocm~=2.18.1 " ,
121
+ " tensorboard~=2.18 .0" ,
123
122
" tf2onnx~=1.16.1" ,
124
123
]
125
124
pytorchcuda = [
@@ -135,7 +134,7 @@ pytorchrocm = [
135
134
]
136
135
llmcompressor = [
137
136
" vllm~=0.8.5" ,
138
- " llmcompressor~=0.5.1; python_version == '3.11' " ,
137
+ " llmcompressor~=0.6.0 " ,
139
138
" lm-eval~=0.4.8" ,
140
139
" loguru" ,
141
140
" pyyaml>=5.0.0" ,
@@ -150,7 +149,8 @@ llmcompressor = [
150
149
]
151
150
trustyai = [
152
151
" torch==2.6.0" ,
153
- " transformers~=4.49.0" ,
152
+ " transformers~=4.53.0; python_version == '3.11'" ,
153
+ " transformers~=4.55.0; python_version == '3.12'" ,
154
154
" datasets~=3.4.1" ,
155
155
" accelerate~=1.5.2" ,
156
156
" trustyai~=0.6.1" ,
@@ -160,14 +160,14 @@ trustyai = [
160
160
# Workbench Image Groups #
161
161
# ########################
162
162
163
+ # https://docs.astral.sh/uv/concepts/projects/workspaces/#when-not-to-use-workspaces
164
+
163
165
jupyter-minimal-image = [
164
- { include-group = " dev" },
165
166
{ include-group = " base" },
166
167
{ include-group = " jupyter-base" },
167
168
]
168
169
169
170
jupyter-datascience-image = [
170
- { include-group = " dev" },
171
171
{ include-group = " base" },
172
172
{ include-group = " jupyter-base" },
173
173
{ include-group = " elyra-base" },
@@ -179,7 +179,6 @@ jupyter-datascience-image = [
179
179
]
180
180
181
181
jupyter-tensorflow-image = [
182
- { include-group = " dev" },
183
182
{ include-group = " base" },
184
183
{ include-group = " jupyter-base" },
185
184
{ include-group = " elyra-base" },
@@ -192,7 +191,6 @@ jupyter-tensorflow-image = [
192
191
]
193
192
194
193
jupyter-tensorflow-rocm-image = [
195
- { include-group = " dev" },
196
194
{ include-group = " base" },
197
195
{ include-group = " jupyter-base" },
198
196
{ include-group = " elyra-base" },
@@ -205,7 +203,6 @@ jupyter-tensorflow-rocm-image = [
205
203
]
206
204
207
205
jupyter-pytorch-image = [
208
- { include-group = " dev" },
209
206
{ include-group = " base" },
210
207
{ include-group = " jupyter-base" },
211
208
{ include-group = " elyra-base" },
@@ -218,7 +215,6 @@ jupyter-pytorch-image = [
218
215
]
219
216
220
217
jupyter-pytorch-rocm-image = [
221
- { include-group = " dev" },
222
218
{ include-group = " base" },
223
219
{ include-group = " jupyter-base" },
224
220
{ include-group = " elyra-base" },
@@ -231,7 +227,6 @@ jupyter-pytorch-rocm-image = [
231
227
]
232
228
233
229
jupyter-pytorch-llmcompressor-image = [
234
- { include-group = " dev" },
235
230
{ include-group = " base" },
236
231
{ include-group = " jupyter-base" },
237
232
{ include-group = " elyra-base" },
@@ -240,10 +235,10 @@ jupyter-pytorch-llmcompressor-image = [
240
235
{ include-group = " datascience-tensorflow" },
241
236
{ include-group = " db-connectors" },
242
237
{ include-group = " llmcompressor" },
238
+ { include-group = " pytorchcuda" },
243
239
]
244
240
245
241
jupyter-trustyai-image = [
246
- { include-group = " dev" },
247
242
{ include-group = " base" },
248
243
{ include-group = " jupyter-base" },
249
244
{ include-group = " elyra-base" },
@@ -253,23 +248,40 @@ jupyter-trustyai-image = [
253
248
{ include-group = " codeflare" },
254
249
{ include-group = " db-connectors" },
255
250
{ include-group = " trustyai" },
251
+ { include-group = " pytorchcuda" },
256
252
]
257
253
254
+ # https://docs.astral.sh/uv/concepts/projects/dependencies/#dependency-sources
258
255
[tool .uv .sources ]
256
+
257
+ # NOTE: it is important to specify the `index` for the top-level groups, the ones used in the final resolution.
258
+ # Index values do not inherit from a lower-level group to the one where it is included.
259
+
260
+ # https://docs.astral.sh/uv/guides/integration/pytorch/#using-uv-with-pytorch
259
261
torch = [
260
- { index = " pytorch-cuda" , group = " pytorchcuda" },
261
- { index = " pytorch-cuda" , group = " trustyai" },
262
- { index = " pytorch-rocm" , group = " pytorchrocm" },
262
+ { index = " pytorch-cuda" , group = " jupyter-pytorch-image" },
263
+ { index = " pytorch-cuda" , group = " jupyter-pytorch-llmcompressor-image" },
264
+ { index = " pytorch-cuda" , group = " jupyter-trustyai-image" },
265
+
266
+ { index = " pytorch-rocm" , group = " jupyter-pytorch-rocm-image" },
263
267
]
264
268
torchvision = [
265
- { index = " pytorch-cuda" , group = " pytorchcuda" },
266
- { index = " pytorch-rocm" , group = " pytorchrocm" },
269
+ { index = " pytorch-cuda" , group = " jupyter-pytorch-image" },
270
+ { index = " pytorch-cuda" , group = " jupyter-pytorch-llmcompressor-image" },
271
+ { index = " pytorch-cuda" , group = " jupyter-trustyai-image" },
272
+
273
+ { index = " pytorch-rocm" , group = " jupyter-pytorch-rocm-image" },
267
274
]
268
275
pytorch-triton-rocm = [
269
- { index = " pytorch-rocm" },
276
+ { index = " pytorch-rocm" , group = " jupyter-pytorch-rocm-image" },
277
+ ]
278
+ tensorflow-rocm = [
279
+ { url = " https://repo.radeon.com/rocm/manylinux/rocm-rel-6.4/tensorflow_rocm-2.18.1-cp312-cp312-manylinux_2_28_x86_64.whl" , group = " jupyter-tensorflow-rocm-image" },
270
280
]
271
281
272
-
282
+ # https://docs.astral.sh/uv/concepts/indexes/#package-indexes
283
+ # TODO(jdanek): explicit = false, otherwise `uv pip compile --emit-index-url` wont emit it
284
+ # also see https://github.com/astral-sh/uv/issues/10008, https://github.com/astral-sh/uv/issues/15534
273
285
[[tool .uv .index ]]
274
286
name = " pytorch-cuda"
275
287
url = " https://download.pytorch.org/whl/cu126"
@@ -283,7 +295,7 @@ explicit = true
283
295
[[tool .uv .index ]]
284
296
name = " pypi"
285
297
url = " https://pypi.org/simple/"
286
- explicit = true
298
+ explicit = true
287
299
288
300
[[tool .uv .dependency-metadata ]]
289
301
name = " tf2onnx"
@@ -297,68 +309,15 @@ requires-dist = ["compressed-tensors"]
297
309
298
310
[[tool .uv .dependency-metadata ]]
299
311
name = " tensorflow-rocm"
300
- version = " 2.14.0.600"
301
- requires-dist = [
302
- " tensorflow-cpu-aws; platform_machine != 'x86_64'" ,
303
- " tensorflow-estimator" ,
304
- " tensorflow-io-gcs-filesystem" ,
305
- ]
312
+ version = " 2.18.1"
313
+ requires-dist = []
306
314
307
315
[tool .uv ]
308
316
package = false
309
- required-environments = [
310
- " sys_platform == 'linux'"
311
- ]
312
-
313
- conflicts = [
314
- [
315
- { group = " elyra-preferred" },
316
- { group = " elyra-trustyai" }
317
- ],
318
- [
319
- { group = " elyra-preferred" },
320
- { group = " trustyai" }
321
- ],
322
- [
323
- { group = " datascience-preferred" },
324
- { group = " trustyai" },
325
- { group = " tensorflowcuda" },
326
- { group = " llmcompressor" },
327
- ],
328
- [
329
- { group = " datascience-tensorflow" },
330
- { group = " trustyai" }
331
- ],
332
- [
333
- { group = " datascience-preferred" },
334
- { group = " datascience-trustyai" },
335
- { group = " datascience-tensorflow" },
336
- ],
337
- [
338
- { group = " tensorflowcuda" },
339
- { group = " tensorflowrocm" },
340
- { group = " pytorchcuda" },
341
- { group = " pytorchrocm" },
342
- { group = " trustyai" },
343
- ],
344
- [
345
- { group = " tensorflowcuda" },
346
- { group = " tensorflowrocm" },
347
- { group = " datascience-base" },
348
- ],
349
- [
350
- { group = " tensorflowcuda" },
351
- { group = " trustyai" },
352
- { group = " datascience-trustyai" },
353
- ],
354
- [
355
- {group = " llmcompressor" },
356
- {group = " trustyai" },
357
- ],
358
- [
359
- { group = " llmcompressor" },
360
- { group = " codeflare" },
361
- ]
317
+
318
+ # https://docs.astral.sh/uv/concepts/resolution/#platform-specific-resolution
319
+ environments = [
320
+ " sys_platform == 'linux' and implementation_name == 'cpython'" ,
362
321
]
363
322
364
323
# https://github.com/astral-sh/uv/issues/3957#issuecomment-2659350181
@@ -477,4 +436,4 @@ indent-style = "space"
477
436
skip-magic-trailing-comma = false
478
437
479
438
docstring-code-format = true
480
- docstring-code-line-length = " dynamic"
439
+ docstring-code-line-length = " dynamic"
0 commit comments