-
Notifications
You must be signed in to change notification settings - Fork 78
Expand file tree
/
Copy pathpyproject.toml
More file actions
48 lines (45 loc) · 1.13 KB
/
pyproject.toml
File metadata and controls
48 lines (45 loc) · 1.13 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
[project]
name = "autokernel"
version = "1.0.0"
description = "Autonomous AI agents optimizing GPU kernels overnight -- for any PyTorch model"
readme = "README.md"
requires-python = ">=3.10"
dependencies = [
"matplotlib>=3.10.0",
"numpy>=2.2.0",
"pandas>=2.2.0",
"torch>=2.4.0",
"triton>=3.3.0",
]
[project.optional-dependencies]
# Model ingestion: profile and optimize any PyTorch model
models = [
"transformers>=4.50.0",
"accelerate>=1.5.0",
"safetensors>=0.5.0",
]
# CUDA C++ backend: native CUDA kernel compilation (speeds up build from ~30s to ~5s)
cuda = [
"ninja>=1.11.0",
]
# KernelBench integration: benchmark against 250+ standardized GPU kernel problems
kernelbench = [
"datasets>=2.16.0",
]
# Advanced profiling: HTA, memory analysis
profiling = [
"HolisticTraceAnalysis>=0.2.0",
]
# HuggingFace Kernels: export and distribute optimized kernels via the Hub
hf-kernels = [
"kernels>=0.4.0",
"huggingface-hub>=0.20.0",
]
[tool.uv.sources]
torch = [
{ index = "pytorch-cu128" },
]
[[tool.uv.index]]
name = "pytorch-cu128"
url = "https://download.pytorch.org/whl/cu128"
explicit = true