-
Notifications
You must be signed in to change notification settings - Fork 2.7k
Expand file tree
/
Copy pathMakefile
More file actions
146 lines (116 loc) · 5.04 KB
/
Makefile
File metadata and controls
146 lines (116 loc) · 5.04 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
# This is not the build system, just a helper to run common development commands.
# Make sure to first initialize the build system with:
# make dev-install
PYTHON ?= python
BUILD_DIR := $(shell cd python; $(PYTHON) -c 'from build_helpers import get_cmake_dir; print(get_cmake_dir())')
INSTALL_DIR ?= $(dir $(BUILD_DIR))install
TRITON_OPT := $(BUILD_DIR)/bin/triton-opt
PYTEST := $(PYTHON) -m pytest
LLVM_BUILD_PATH ?= "$(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))/.llvm-project/build"
NUM_PROCS ?= 8
# Incremental builds
.PHONY: all
all:
ninja -C $(BUILD_DIR)
.PHONY: triton-opt
triton-opt:
ninja -C $(BUILD_DIR) triton-opt
# Testing
.PHONY: test-lit
test-lit:
ninja -C $(BUILD_DIR) check-triton-lit-tests
.PHONY: test-cpp
test-cpp:
ninja -C $(BUILD_DIR) check-triton-unit-tests
.PHONY: test-unit
test-unit: all
cd python/test/unit && $(PYTEST) -n $(NUM_PROCS) --ignore-glob='plugins/*' --ignore=test_debug.py
$(PYTEST) -n $(NUM_PROCS) python/test/unit/test_debug.py
$(PYTEST) -n 6 python/triton_kernels/tests/
# Run attention separately to avoid out of gpu memory
$(PYTEST) python/tutorials/06-fused-attention.py
TRITON_ALWAYS_COMPILE=1 TRITON_DISABLE_LINE_INFO=0 LLVM_PASS_PLUGIN_PATH=python/triton/instrumentation/libGPUInstrumentationTestLib.so \
$(PYTEST) --capture=tee-sys -rfs -vvv python/test/unit/instrumentation/test_gpuhello.py
TRITON_PASS_PLUGIN_PATH=python/triton/plugins/libTritonPluginsTestLib.so \
$(PYTEST) -vvv python/test/unit/plugins/test_plugin.py
TRITON_PASS_PLUGIN_PATH=python/triton/plugins/libMLIRDialectPlugin.so \
$(PYTEST) -vvv python/test/unit/plugins/test_dialect_plugin.py
.PHONY: test-gluon
test-gluon: all
$(PYTEST) -n $(NUM_PROCS) python/test/gluon/ python/tutorials/gluon/
$(PYTEST) -n 2 python/examples/gluon/
.PHONY: test-regression
test-regression: all
$(PYTEST) -n $(NUM_PROCS) python/test/regression
.PHONY: test-microbenchmark
test-microbenchmark: all
$(PYTHON) python/test/microbenchmark/launch_overhead.py
.PHONY: test-interpret
test-interpret: all
cd python/test/unit && TRITON_INTERPRET=1 $(PYTEST) -n 16 -m interpreter cuda language/test_core.py language/test_standard.py \
language/test_random.py language/test_block_pointer.py language/test_subprocess.py language/test_line_info.py \
language/test_tuple.py runtime/test_launch.py runtime/test_autotuner.py::test_kwargs[False] \
../../tutorials/06-fused-attention.py::test_op --device=cpu
.PHONY: test-proton
test-proton: all
$(PYTEST) -n 8 third_party/proton/test --ignore=third_party/proton/test/test_override.py -k "not test_overhead and not test_hw_trace"
$(PYTEST) third_party/proton/test/test_profile.py::test_hw_trace
$(PYTEST) third_party/proton/test/test_override.py
$(PYTEST) third_party/proton/test/test_instrumentation.py::test_overhead
.PHONY: test-python
test-python: test-unit test-regression test-interpret test-proton
.PHONY: test-nogpu
test-nogpu: test-lit test-cpp
$(PYTEST) python/test/gluon/test_frontend.py
$(PYTEST) python/test/unit/language/test_frontend.py
.PHONY: test
test: test-lit test-cpp test-python
# pip install-ing
.PHONY: dev-install-requires
dev-install-requires:
$(PYTHON) -m pip install -r python/requirements.txt
$(PYTHON) -m pip install -r python/test-requirements.txt
.PHONY: dev-install-torch
dev-install-torch:
# install torch but ensure pytorch-triton isn't installed
$(PYTHON) -m pip install torch
$(PYTHON) -m pip uninstall triton pytorch-triton -y
.PHONY: dev-install-triton
dev-install-triton:
$(PYTHON) -m pip install -e . --no-build-isolation -v
.PHONY: dev-install
.NOPARALLEL: dev-install
dev-install: dev-install-requires dev-install-triton
.PHONY: dev-install-llvm
.NOPARALLEL: dev-install-llvm
dev-install-llvm:
LLVM_BUILD_PATH=$(LLVM_BUILD_PATH) scripts/build-llvm-project.sh
TRITON_BUILD_WITH_CLANG_LLD=1 TRITON_BUILD_WITH_CCACHE=0 \
LLVM_INCLUDE_DIRS=$(LLVM_BUILD_PATH)/include \
LLVM_LIBRARY_DIR=$(LLVM_BUILD_PATH)/lib \
LLVM_SYSPATH=$(LLVM_BUILD_PATH) \
$(MAKE) dev-install
# Package C++ artifacts
.PHONY: install
install:
cmake --install $(BUILD_DIR) --prefix $(INSTALL_DIR)
# Updating lit tests
.PHONY: golden-samples
golden-samples: triton-opt
$(TRITON_OPT) test/TritonGPU/samples/simulated-grouped-gemm.mlir.in -tritongpu-pipeline -canonicalize | \
$(PYTHON) utils/generate-test-checks.py --source test/TritonGPU/samples/simulated-grouped-gemm.mlir.in --source_delim_regex="\bmodule" \
-o test/TritonGPU/samples/simulated-grouped-gemm.mlir
$(TRITON_OPT) test/TritonGPU/samples/descriptor-matmul-pipeline.mlir.in -tritongpu-assign-latencies -tritongpu-schedule-loops -tritongpu-pipeline -canonicalize | \
$(PYTHON) utils/generate-test-checks.py --source test/TritonGPU/samples/descriptor-matmul-pipeline.mlir.in --source_delim_regex="\bmodule" \
-o test/TritonGPU/samples/descriptor-matmul-pipeline.mlir
# Documentation
#
.PHONY: docs-requirements
docs-requirements:
$(PYTHON) -m pip install -r docs/requirements.txt -q
.PHONY: docs-only
docs-only:
cd docs; PATH="$(BUILD_DIR):$(PATH)" $(PYTHON) -m sphinx . _build/html/main
.PHONY: docs
.NOPARALLEL: docs
docs: docs-requirements docs-only