-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathpyproject.toml
More file actions
76 lines (73 loc) · 2.57 KB
/
pyproject.toml
File metadata and controls
76 lines (73 loc) · 2.57 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
[project]
name = "txt2img-unsupervised"
version = "0.1.0"
description = ""
authors = [{ name = "Echo Nolan", email = "echo@echonolan.net" }]
license = { text = "BSD-3-Clause" }
readme = "README.md"
requires-python = ">=3.11.14,<3.12"
dependencies = [
"CloseableQueue-py3>=0.9.2,<0.10",
"dacite>=1.8.1,<2",
"datasets>=3.0.1,<4",
"einops>=0.7.0,<0.8",
"flash-attention-jax",
"flax>=0.8.3,<0.9",
"imageio-ffmpeg>=0.4.8,<0.5",
"infinidata",
"internetarchive>=3.5.0,<4",
"numpy>=1,<2",
"omegaconf>=2.3.0,<3",
"optax",
"orbax-checkpoint>=0.5.10,<0.6",
"pillow>=9.5.0,<10",
"pytest>=7.3.1,<8",
"pytorch-lightning>=2.0.2,<3",
"sortedcontainers>=2.4.0,<3",
"torch",
"transformers>=4.34.1,<5",
"wandb>=0.17.9,<0.18",
"warcat>=2.2.5,<3",
"xdg-base-dirs>=6.0.1,<7",
"flash-attn-jax>=0.2.2,<0.3",
"rasterio>=1.4.3,<2",
"seaborn>=0.13.2,<0.14",
]
[dependency-groups]
cuda = [
"jax[cuda12]>=0.4.33,<0.4.34",
"nvidia-cublas-cu12>=12.1.3.1,<12.7",
"nvidia-cuda-cupti-cu12>=12.1.105,<12.7",
"nvidia-cuda-nvcc-cu12>=12.1.105,<12.7",
"nvidia-cuda-runtime-cu12>=12.1.105,<12.7",
"nvidia-cudnn-cu12>=9.1,<10.0",
"nvidia-cufft-cu12>=11.0.2.54,<11.3",
"nvidia-cusolver-cu12>=11.4.5.107,<11.7",
"nvidia-cusparse-cu12>=12.1.0.106,<12.4",
"nvidia-nccl-cu12>=2.18.1,<2.24",
"nvidia-nvjitlink-cu12>=12.1.105,<12.7",
]
dev = [
"black>=23.3.0,<24",
"flameprof>=0.4,<0.5",
"hypothesis[numpy]>=6.92.2,<7",
"ipython>=8.14.0,<9",
"matplotlib>=3.7.1,<4",
"memray>=1.11.0,<2",
"mypy>=1.3.0,<2",
"types-pillow>=9.5.0.4,<10",
"types-tqdm>=4.65.0.1,<5",
"pre-commit>=3.3.0,<4",
"py-spy>=0.4.0,<0.5",
]
[tool.uv.sources]
flash-attention-jax = { git = "https://github.com/enolan/flash-attention-jax.git", rev = "20621388795614de137c159a5c07abec5c475388" }
# TODO: use regular release of optax
# we can get rid of my special optax branch and use optax.contrib.schedule_free_adamw instead of the use_first_moment=False hack
# but the schedule_free_adamw function isn't in a release yet, and my code that makes donating the train state work isn't either.
optax = { git = "https://github.com/enolan/optax.git", rev = "b60629df6d0323db984a425ca114a11ee1f012d9" }
infinidata = { url = "https://github.com/enolan/infinidata/releases/download/v0.0.1-alpha2/infinidata-0.0.1a2-cp311-cp311-manylinux_2_34_x86_64.whl" }
torch = { url = "https://download.pytorch.org/whl/cpu/torch-2.0.1%2Bcpu-cp311-cp311-linux_x86_64.whl" }
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"