Skip to content

Commit e808515

Browse files
committed
add 'preparatory' changes for binary releases
1 parent 8addc3e commit e808515

File tree

11 files changed

+300
-37
lines changed

11 files changed

+300
-37
lines changed

.dev-scripts/README.md

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
# Development Scripts
2+
3+
To publish a binary release with docker (for CUDA), run (replace engine version and cuda version):
4+
```bash
5+
./.dev-scripts/publish-with-docker.sh v0.2.3 11.8
6+
```

.dev-scripts/extract_install_cmd.py

Lines changed: 6 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
parser.add_argument("custom_pytorch_path", help="Path to custom PyTorch wheel")
88
args = parser.parse_args()
99

10-
BLOCK_HEADER_START = "### Conda on Linux"
10+
BLOCK_HEADER_START = "#### Conda on Linux"
1111

1212
with open("README.md") as infile:
1313
content = infile.readlines()
@@ -22,18 +22,10 @@
2222

2323
FILE_INTRO = """#!/usr/bin/env bash
2424
25-
function check_error() {
26-
# shows and then runs a command. if the exit code is not zero, aborts the script
27-
# usage: check_error mv foo bar
25+
trap exit INT
26+
set -o errexit
27+
set -o xtrace
2828
29-
echo + $@
30-
"$@"
31-
local exit_code=$?
32-
if [ "${exit_code}" -ne 0 ]; then
33-
echo "! > An error occured, aborting."
34-
exit 1
35-
fi
36-
}
3729
"""
3830
EXTRA_CONDA_INSTRUCTION = """# extra step for bash script (not required in a proper command line):
3931
eval "$(conda shell.bash hook)"
@@ -71,14 +63,14 @@
7163

7264
# replace some line contents and add some lines
7365
if "conda activate" in line:
74-
line = EXTRA_CONDA_INSTRUCTION + "check_error " + line
66+
line = EXTRA_CONDA_INSTRUCTION + line
7567
if "export BITORCH_WORKSPACE" in line:
7668
line = line.replace("${HOME}", "$(pwd)")
7769
if line.startswith("pip install torch-"):
7870
line = "pip install {}\n".format(args.custom_pytorch_path)
7971

8072
# decide how to write line
81-
line_format = "check_error {line}"
73+
line_format = "{line}"
8274
if line.startswith("#"):
8375
line_format = "{line}"
8476
if insert_block_pause:
Lines changed: 130 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,130 @@
1+
#!/usr/bin/env bash
2+
3+
function usage() {
4+
echo "./.dev-scripts/publish-docker-internal.sh OPTION"
5+
echo "builds a package and publishes it to (test-)pypi"
6+
echo
7+
echo "OPTION must be either 'pre-release' or 'release'."
8+
}
9+
10+
if ! [ "$#" = "1" ] || [ "${1}" = "-h" ]; then
11+
usage
12+
exit
13+
fi
14+
15+
if ! [ "${1}" = "release" ] && ! [ "${1}" = "pre-release" ]; then
16+
usage
17+
exit
18+
fi
19+
20+
trap exit INT
21+
22+
function check_yes() {
23+
# asks the given yes or no question, returns true if they answer YES
24+
# usage:
25+
# if check_yes "Do you really want to delete foo?"; then
26+
# rm foo
27+
# fi
28+
29+
local prompt="${1}"
30+
read -p "${prompt} [y/N] " REPLY
31+
echo ""
32+
if [[ ! "${REPLY}" =~ ^[Yy]$ ]]; then
33+
return 1
34+
fi
35+
return 0
36+
}
37+
38+
function check_no() {
39+
# asks the given yes or no question, returns false if they answer NO
40+
# usage:
41+
# if check_no "Do you want to exit the script?"; then
42+
# exit 0
43+
# fi
44+
45+
local prompt="${1}"
46+
read -p "${prompt} [Y/n] " REPLY
47+
echo ""
48+
if [[ "${REPLY}" =~ ^[Nn]$ ]]; then
49+
return 1
50+
fi
51+
return 0
52+
}
53+
54+
function check_error() {
55+
# shows and then runs a command. if the exit code is not zero, asks the user whether to continue
56+
# usage: check_error mv foo bar
57+
58+
echo + $@
59+
"$@"
60+
local exit_code=$?
61+
if [ "${exit_code}" -ne 0 ]; then
62+
if ! check_yes "! > An error occurred, continue with the script?"; then
63+
if [ "${1}" = "pre-release" ]; then
64+
git checkout "${version_file}"
65+
fi
66+
exit 1
67+
fi
68+
fi
69+
}
70+
71+
SRC_ROOT="${BITORCH_ENGINE_ROOT:-/bitorch-engine}"
72+
check_error [ -f "${SRC_ROOT}/setup.py" ]
73+
cd "${SRC_ROOT}"
74+
75+
# main script content
76+
77+
if [ -z "$(git status --porcelain)" ]; then
78+
echo "Git seems clean."
79+
else
80+
if check_yes "Git not clean. Do you want to see the diff to proceed?"; then
81+
git diff
82+
if ! check_yes "Proceed with these differences?"; then
83+
echo "There are uncommitted changes, aborting."
84+
exit 1
85+
fi
86+
else
87+
echo "Git not clean, aborting."
88+
exit 1
89+
fi
90+
fi
91+
92+
if [ "${1}" = "release" ]; then
93+
version_file="${SRC_ROOT}/version.txt"
94+
version_content="$(cat "${version_file}")"
95+
major_minor_patch="$(cut -d '.' -f 1,2,3 <<< "${version_content}")"
96+
version_str="${major_minor_patch}"
97+
else
98+
version_file="${SRC_ROOT}/version.txt"
99+
version_content="$(cat "${version_file}")"
100+
major_minor_patch="$(cut -d '.' -f 1,2,3 <<< "${version_content}")"
101+
date_str="$(date +"%Y%m%d")"
102+
git_ref="$(git rev-parse --short HEAD)"
103+
version_str="${major_minor_patch}.dev${date_str}+${git_ref}"
104+
fi
105+
106+
if [ "${1}" = "release" ] && ! [ "${version_content}" = "${version_str}" ]; then
107+
echo "The file version.txt does not seem to contain a release version."
108+
exit 1
109+
fi
110+
111+
echo "Building version ${version_str}."
112+
113+
if [ "${1}" = "pre-release" ]; then
114+
echo "${version_str}" > "${version_file}"
115+
fi
116+
117+
pip install build
118+
119+
#check_error pytest .
120+
121+
check_error python3 -m build --no-isolation --wheel
122+
123+
echo "To publish to real PyPI use:"
124+
echo " python3 -m twine dist-${PUBLISH_BIE_VERSION}/*"
125+
echo "To publish to test PyPI use:"
126+
echo " python3 -m twine upload --repository testpypi dist-${PUBLISH_BIE_VERSION}/*"
127+
128+
if [ "${1}" = "pre-release" ]; then
129+
git checkout "${version_file}"
130+
fi
Lines changed: 85 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,85 @@
1+
#!/usr/bin/env bash
2+
3+
set -o xtrace
4+
5+
function usage() {
6+
echo "./.dev-scripts/publish-with-docker.sh BIE_VERSION [CUDA_VERSION]"
7+
echo "builds a package and publishes it to (test-)pypi"
8+
echo
9+
echo "BIE_VERSION must be a version string like 'v1.2.3'."
10+
echo "optional: CUDA_VERSION can be either '11.8' (default) or '12.1'."
11+
}
12+
13+
trap exit INT
14+
15+
if ! ((1 <= $# && $# <= 2)) || [ "${1}" = "-h" ]; then
16+
usage
17+
exit
18+
fi
19+
20+
export PUBLISH_BIE_VERSION="${1}"
21+
CUDA_VERSION="${2:-11.8}"
22+
23+
if ! [[ "${PUBLISH_BIE_VERSION}" =~ ^v[0-9].[0-9].[0-9]$ ]]; then
24+
echo "Invalid BIE_VERSION '${PUBLISH_BIE_VERSION}' given."
25+
echo
26+
usage
27+
exit
28+
fi
29+
30+
cuda_known="false"
31+
add_build_arg=""
32+
cuda_abbrev="unknown"
33+
if [ "${CUDA_VERSION}" = "11.8" ]; then
34+
cuda_known="true"
35+
cuda_abbrev="cu118"
36+
torch_requirement="torch==2.2.2"
37+
fi
38+
if [ "${CUDA_VERSION}" = "12.1" ]; then
39+
cuda_known="true"
40+
cuda_abbrev="cu121"
41+
add_build_arg="--build-arg FROM_IMAGE=pytorch/pytorch:2.2.0-cuda12.1-cudnn8-devel"
42+
torch_requirement="torch==2.2.2"
43+
fi
44+
if [ "${cuda_known}" = "false" ]; then
45+
echo "Unknown CUDA_VERSION '${CUDA_VERSION}' given."
46+
echo
47+
usage
48+
exit
49+
fi
50+
51+
echo "building bitorch engine ${PUBLISH_BIE_VERSION}"
52+
echo "building for cuda ${CUDA_VERSION}"
53+
54+
bie_image_tag="bitorch/engine:publish-${cuda_abbrev}-${PUBLISH_BIE_VERSION}"
55+
bie_container_name="bie-${cuda_abbrev}-${PUBLISH_BIE_VERSION}"
56+
output_folder="./dist/${cuda_abbrev}"
57+
58+
# build/tag docker image
59+
pushd docker
60+
docker build --target no-examples ${add_build_arg} --build-arg GIT_BRANCH="${PUBLISH_BIE_VERSION}" -t "${bie_image_tag}" .
61+
popd
62+
63+
docker container create -it \
64+
--rm \
65+
-it \
66+
-v "${output_folder}:/bitorch-engine/dist" \
67+
--name "${bie_container_name}" \
68+
-e PUBLISH_BIE_VERSION \
69+
-e BIE_FORCE_CUDA="true" \
70+
-e BIE_SKIP_BUILD="true" \
71+
-e BIE_TORCH_REQUIREMENT="${torch_requirement}" \
72+
-e BIE_WHEEL_PLATFORM="linux_x86_64" \
73+
-w /bitorch-engine \
74+
"${bie_image_tag}" \
75+
/workspace/publish-docker-internal.sh release
76+
77+
# make sure correct version is set
78+
echo "${PUBLISH_BIE_VERSION#v}" > version.txt && docker container cp version.txt "${bie_container_name}":/bitorch-engine
79+
docker container cp .dev-scripts/publish-docker-internal.sh "${bie_container_name}":/workspace
80+
81+
# for previous versions, we need to manually overwrite setup.py:
82+
# TODO: can (hopefully) be removed later on
83+
docker container cp setup.py "${bie_container_name}":/bitorch-engine
84+
85+
docker start -ai "${bie_container_name}"

CHANGELOG.md

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,21 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/)
55
and this project adheres to [Semantic Versioning](http://semver.org/).
66

77

8+
## [0.2.5] - 2024/05/24
9+
10+
### Added
11+
12+
- Development scripts for preparing binary releases
13+
14+
### Changed
15+
16+
- Updated build instructions to clarify torchvision installation
17+
- Adapted `setup.py` logic for preparing binary releases
18+
19+
### Fixed
20+
21+
- Broken build process by setting setuptools version
22+
823
## [0.2.4] - 2024/05/23
924

1025
### Added

README.md

Lines changed: 31 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,14 @@ In addition, for layers to speed up on specific hardware (such as CUDA devices,
4949
- **[MLX](https://github.com/ml-explore/mlx)** for mlx-based layers on MacOS
5050
- **[CUTLASS](https://github.com/NVIDIA/cutlass)** for cutlass-based layers
5151

52-
Currently, the engine **needs to be built from source**.
52+
### Binary Releases (coming soon)
53+
54+
We are currently preparing experimental binary releases.
55+
Their installation will be documented in this section.
56+
For now, please follow the guide below to build from source.
57+
58+
### Build From Source
59+
5360
We provide instructions for the following options:
5461

5562
- Conda + Linux (with CUDA and cutlass)
@@ -60,7 +67,7 @@ We recommend managing your BITorch Engine installation in a conda environment (o
6067
You may want to keep everything (environment, code, etc.) in one directory or use the default directory for conda environments.
6168
You may wish to adapt the CUDA version to 12.1 where applicable.
6269

63-
### Conda on Linux (with CUDA)
70+
#### Conda on Linux (with CUDA)
6471

6572
To use these instructions, you need to have [conda](https://conda.io/projects/conda/en/latest/user-guide/getting-started.html) and a suitable C++ compiler installed.
6673

@@ -73,11 +80,12 @@ conda activate bitorch-engine
7380
```bash
7481
conda install -y -c "nvidia/label/cuda-11.8.0" cuda-toolkit
7582
```
76-
3. [Download customized Torch 2.1.0](https://drive.google.com/drive/folders/1T22b8JhN-E3xbn3h332rI1VjqXONZeB7?usp=sharing) (it allows gradients on INT tensors, built for Python 3.9 and CUDA 11.8) and install it with pip:
83+
3. Download our customized torch for CUDA 11.8 and Python 3.9, it allows gradients on INT tensors and install it with pip (you can find other versions [here](https://packages.greenbit.ai/whl/)):
7784
```bash
78-
pip install torch-2.1.0-cp39-cp39-linux_x86_64.whl
79-
# optional: install corresponding torchvision (check https://github.com/pytorch/vision?tab=readme-ov-file#installation in the future)
85+
pip install "https://packages.greenbit.ai/whl/cu118/torch/torch-2.1.0-cp39-cp39-linux_x86_64.whl"
86+
# as bitorch currently requires torchvision, we need to install a version for our correct CUDA (otherwise it will reinstall torch)
8087
pip install "torchvision==0.16.0" --index-url https://download.pytorch.org/whl/cu118
88+
# (check https://github.com/pytorch/vision?tab=readme-ov-file#installation in the future)
8189
```
8290
4. To use cutlass layers, you should also install CUTLASS 2.8.0 (from source), adjust `CUTLASS_HOME` (this is where we clone and install cutlass)
8391
(if you have older or newer GPUs you may need to add your [CUDA compute capability](https://developer.nvidia.com/cuda-gpus) in `CUTLASS_NVCC_ARCHS`):
@@ -112,13 +120,12 @@ conda activate ./conda-env
112120
```bash
113121
conda install -y -c "nvidia/label/cuda-11.8.0" cuda-toolkit
114122
```
115-
3. [Download customized Torch 2.1.0](https://drive.google.com/drive/folders/1T22b8JhN-E3xbn3h332rI1VjqXONZeB7?usp=sharing),
116-
select the package fit for the cuda version you installed in the previous step
117-
(it allows gradients on INT tensors, built for Python 3.9 and CUDA 11.8) and install it with pip:
123+
3. Download our customized torch for CUDA 11.8 and Python 3.9, it allows gradients on INT tensors and install it with pip (you can find other versions [here](https://packages.greenbit.ai/whl/)):
118124
```bash
119-
pip install torch-2.1.0-cp39-cp39-linux_x86_64.whl
120-
# optional: install corresponding torchvision (check https://github.com/pytorch/vision?tab=readme-ov-file#installation in the future)
125+
pip install "https://packages.greenbit.ai/whl/cu118/torch/torch-2.1.0-cp39-cp39-linux_x86_64.whl"
126+
# as bitorch currently requires torchvision, we need to install a version for our correct CUDA (otherwise it will reinstall torch)
121127
pip install "torchvision==0.16.0" --index-url https://download.pytorch.org/whl/cu118
128+
# (check https://github.com/pytorch/vision?tab=readme-ov-file#installation in the future)
122129
```
123130
4. To use cutlass layers, you should also install CUTLASS 2.8.0
124131
(if you have older or newer GPUs you may need to add your [CUDA compute capability](https://developer.nvidia.com/cuda-gpus) in `CUTLASS_NVCC_ARCHS`):
@@ -149,7 +156,7 @@ cd bitorch-engine
149156
CPATH="${CUTLASS_HOME}/install/include" CUDA_HOME="${CONDA_PREFIX}" pip install -e . -v
150157
```
151158

152-
### Docker (with CUDA)
159+
#### Docker (with CUDA)
153160

154161
You can also use our prepared Dockerfile to build a docker image (which includes building the engine under `/bitorch-engine`):
155162

@@ -161,7 +168,7 @@ docker run -it --rm --gpus all --volume "/path/to/your/project":"/workspace" bit
161168

162169
Check the [docker readme](docker/README.md) for options and more details.
163170

164-
### Conda on MacOS (with MLX)
171+
#### Conda on MacOS (with MLX)
165172

166173
1. We recommend to create a virtual environment for and activate it. In the following example we use a conda environment for python 3.9,
167174
but virtualenv should work as well.
@@ -171,9 +178,11 @@ conda activate bitorch-engine
171178
```
172179
2. Download [customized Torch for MacOS/arm](https://drive.google.com/drive/folders/1T22b8JhN-E3xbn3h332rI1VjqXONZeB7?usp=sharing) (it allows gradients on INT tensors, built for Python 3.9 and CUDA 11.8) and install it with pip:
173180
```bash
174-
pip install path/to/torch-2.2.1-cp39-none-macosx_11_0_arm64.whl
175-
# optional: install corresponding torchvision (check https://github.com/pytorch/vision?tab=readme-ov-file#installation in the future)
181+
pip install "https://packages.greenbit.ai/whl/macosx/torch/torch-2.2.1-cp39-none-macosx_11_0_arm64.whl"
182+
# as bitorch currently requires torchvision, we need to install a version for our correct CUDA (otherwise it will reinstall torch)
176183
pip install "torchvision==0.17.1"
184+
# (check https://github.com/pytorch/vision?tab=readme-ov-file#installation in the future)
185+
177186
```
178187
3. For MacOS users and to use OpenMP acceleration, install OpenMP with Homebrew and configure the environment:
179188
```bash
@@ -224,6 +233,14 @@ you can still build the extensions that depend on CUDA, by setting `BIE_FORCE_CU
224233
BIE_FORCE_CUDA="true" pip install -e . -v
225234
```
226235

236+
### Skip Library File Building
237+
238+
If you just want to avoid rebuilding any files, you can set `BIE_SKIP_BUILD`:
239+
```bash
240+
BIE_SKIP_BUILD="true" python3 -m build --no-isolation --wheel
241+
```
242+
This would create a wheel and package `.so` files without trying to rebuild them.
243+
227244
## Development
228245

229246
To adjust the build options or address build failures, modify the configurations in

0 commit comments

Comments
 (0)