Skip to content

Commit 994d5c3

Browse files
authored
Merge pull request #12 from stratika/feat/windows-support
Windows support for GPULlama3.java
2 parents fd7fa8e + b299d18 commit 994d5c3

File tree

4 files changed

+109
-6
lines changed

4 files changed

+109
-6
lines changed

Makefile

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,6 @@ clean:
1414
# Package the project without running tests
1515
package:
1616
mvn package -DskipTests
17-
. ./set_paths
1817

1918

2019
# Combined clean and package

README.md

Lines changed: 37 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,10 @@ cd GPULlama3.java
9191

9292
# Update the submodules to match the exact commit point recorded in this repository
9393
git submodule update --recursive
94+
```
9495

96+
#### On Linux or macOS
97+
```bash
9598
# Enter the TornadoVM submodule directory
9699
cd external/tornadovm
97100

@@ -110,9 +113,6 @@ source setvars.sh
110113
# Navigate back to the project root directory
111114
cd ../../
112115

113-
# Make the llama-tornado script executable
114-
chmod +x llama-tornado
115-
116116
# Source the project-specific environment paths -> this will ensure the correct paths are set for the project and the TornadoVM SDK
117117
# Expect to see: [INFO] Environment configured for Llama3 with TornadoVM at: /home/YOUR_PATH_TO_TORNADOVM
118118
source set_paths
@@ -124,6 +124,39 @@ make
124124
# Run the model (make sure you have downloaded the model file first - see below)
125125
./llama-tornado --gpu --verbose-init --opencl --model beehive-llama-3.2-1b-instruct-fp16.gguf --prompt "tell me a joke"
126126
```
127+
128+
#### On Windows
129+
```bash
130+
# Enter the TornadoVM submodule directory
131+
cd external/tornadovm
132+
133+
# Optional: Create and activate a Python virtual environment if needed
134+
python -m venv .venv
135+
.venv\Scripts\activate.bat
136+
.\bin\windowsMicrosoftStudioTools2022.cmd
137+
138+
# Install TornadoVM with a supported JDK 21 and select the backends (--backend opencl,ptx).
139+
# To see the compatible JDKs run: ./bin/tornadovm-installer --listJDKs
140+
# For example, to install with OpenJDK 21 and build the OpenCL backend, run:
141+
python bin\tornadovm-installer --jdk jdk21 --backend opencl
142+
143+
# Source the TornadoVM environment variables
144+
setvars.cmd
145+
146+
# Navigate back to the project root directory
147+
cd ../../
148+
149+
# Source the project-specific environment paths -> this will ensure the correct paths are set for the project and the TornadoVM SDK
150+
# Expect to see: [INFO] Environment configured for Llama3 with TornadoVM at: C:\Users\YOUR_PATH_TO_TORNADOVM
151+
set_paths.cmd
152+
153+
# Build the project using Maven (skip tests for faster build)
154+
# mvn clean package -DskipTests or just make
155+
make
156+
157+
# Run the model (make sure you have downloaded the model file first - see below)
158+
python llama-tornado --gpu --verbose-init --opencl --model beehive-llama-3.2-1b-instruct-fp16.gguf --prompt "tell me a joke"
159+
```
127160
-----------
128161

129162
The above model can we swapped with one of the other models, such as `beehive-llama-3.2-3b-instruct-fp16.gguf` or `beehive-llama-3.2-8b-instruct-fp16.gguf`, depending on your needs.
@@ -182,7 +215,7 @@ Run a model with a text prompt:
182215
#### GPU Execution (FP16 Model)
183216
Enable GPU acceleration with Q8_0 quantization:
184217
```bash
185-
llama-tornado --gpu --verbose-init --model beehive-llama-3.2-1b-instruct-fp16.gguf --prompt "tell me a joke"
218+
./llama-tornado --gpu --verbose-init --model beehive-llama-3.2-1b-instruct-fp16.gguf --prompt "tell me a joke"
186219
```
187220

188221
-----------

llama-tornado

Lines changed: 43 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ import os
99
import subprocess
1010
import sys
1111
import time
12+
import platform
1213
from pathlib import Path
1314
from typing import List, Optional, Dict, Any
1415
from enum import Enum
@@ -44,6 +45,11 @@ class LlamaRunner:
4445
print(f"Error: {name} path does not exist: {path}")
4546
sys.exit(1)
4647

48+
@staticmethod
49+
def module_path_colon_sep(paths: List[str]) -> str:
50+
"""Return OS-specific separator for Java module paths."""
51+
return ";".join(paths) if platform.system() == "Windows" else ":".join(paths)
52+
4753
def _build_base_command(self, args: argparse.Namespace) -> List[str]:
4854
"""Build the base Java command with JVM options."""
4955
cmd = [
@@ -56,7 +62,7 @@ class LlamaRunner:
5662
"--enable-preview",
5763
f"-Djava.library.path={self.tornado_sdk}/lib",
5864
"-Djdk.module.showModuleResolution=false",
59-
"--module-path", f".:{self.tornado_sdk}/share/java/tornado",
65+
"--module-path", self.module_path_colon_sep([".", f"{self.tornado_sdk}/share/java/tornado"]),
6066
]
6167

6268
# TornadoVM configuration
@@ -221,6 +227,41 @@ class LlamaRunner:
221227
print(f"Error: {e}")
222228
return 1
223229

230+
def load_env_from_script():
231+
system = platform.system()
232+
233+
if system == "Windows":
234+
# Call set_paths.cmd and capture output as environment
235+
result = subprocess.run(
236+
["cmd.exe", "/c", "set_paths.cmd && set"],
237+
capture_output=True, text=True, shell=False
238+
)
239+
if result.returncode != 0:
240+
print("Failed to run set_paths.cmd")
241+
sys.exit(1)
242+
243+
# Parse environment variables from output
244+
for line in result.stdout.splitlines():
245+
if '=' in line:
246+
key, value = line.strip().split('=', 1)
247+
os.environ[key] = value
248+
249+
elif system in ("Linux", "Darwin"):
250+
# Source the set_paths file and capture env
251+
command = ['bash', '-c', 'source ./set_paths && env']
252+
result = subprocess.run(command, capture_output=True, text=True)
253+
if result.returncode != 0:
254+
print("Failed to source set_paths")
255+
sys.exit(1)
256+
257+
for line in result.stdout.splitlines():
258+
if '=' in line:
259+
key, value = line.strip().split('=', 1)
260+
os.environ[key] = value
261+
else:
262+
print(f"Unsupported OS: {system}")
263+
sys.exit(1)
264+
224265
def create_parser() -> argparse.ArgumentParser:
225266
"""Create and configure the argument parser."""
226267
parser = argparse.ArgumentParser(
@@ -317,6 +358,7 @@ def create_parser() -> argparse.ArgumentParser:
317358

318359
def main():
319360
"""Main entry point."""
361+
load_env_from_script()
320362
parser = create_parser()
321363
args = parser.parse_args()
322364

set_paths.cmd

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
@echo off
2+
REM ============================================
3+
REM Environment setup script for LLaMA3 + TornadoVM (Windows)
4+
REM ============================================
5+
6+
REM Resolve the absolute path to this script's directory
7+
set "LLAMA_ROOT=%~dp0"
8+
set "LLAMA_ROOT=%LLAMA_ROOT:~0,-1%"
9+
10+
REM Set TornadoVM root and SDK paths
11+
set "TORNADO_ROOT=%LLAMA_ROOT%\external\tornadovm"
12+
set "TORNADO_SDK=%TORNADO_ROOT%\bin\sdk"
13+
14+
REM Add TornadoVM SDK and LLaMA3 bin to PATH
15+
set "PATH=%TORNADO_SDK%;%LLAMA_ROOT%\bin;%PATH%"
16+
17+
REM Optional: Set JAVA_HOME if needed
18+
REM set "JAVA_HOME=C:\Path\To\GraalVM"
19+
REM set "PATH=%JAVA_HOME%\bin;%PATH%"
20+
21+
echo [INFO] Environment configured for LLaMA3 with TornadoVM at: %TORNADO_ROOT%
22+
23+
REM ===== Notes =====
24+
REM After running this script:
25+
REM 1. TornadoVM will be available for GPU computation
26+
REM 2. LLaMA3 command-line tools will be in your PATH
27+
REM 3. You can run LLaMA3 with GPU acceleration using TornadoVM
28+
REM
29+
REM To use this script: call set_paths.cmd

0 commit comments

Comments
 (0)