Skip to content

Commit 98097a8

Browse files
committed
Try windows workflow
Add Windows workflow Try adding vulkan to Windows build Try to switch to newer fedora for newer vulkan Try to add explicit vulkan dep to meson Try to switch win64 build to dynamic llama.cpp Fix llama linkage? Link against common statically again Missing comma. Further workflow adjustments Tweak flags
1 parent eb1aaf6 commit 98097a8

File tree

4 files changed

+125
-2
lines changed

4 files changed

+125
-2
lines changed

.github/workflows/windows.yml

Lines changed: 67 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,67 @@
1+
name: MinGW Windows Nightly Build
2+
3+
on:
4+
push:
5+
branches:
6+
- master
7+
8+
jobs:
9+
mingw:
10+
runs-on: ubuntu-latest
11+
container:
12+
image: registry.fedoraproject.org/fedora:40
13+
options: --privileged
14+
steps:
15+
- uses: actions/checkout@v4
16+
- name: Download dependencies 📥
17+
run: |
18+
dnf install -y "dnf-command(config-manager)" curl git patch
19+
dnf install -y gcc-c++ cmake meson
20+
dnf install -y mingw64-filesystem mingw64-gcc-c++
21+
dnf install -y mingw64-winpthreads-static
22+
dnf install -y mingw64-gtkmm30 mingw64-gtksourceviewmm3 mingw64-jsoncpp mingw64-zlib mingw64-fontconfig mingw64-librsvg2
23+
dnf install -y mingw64-vulkan-headers mingw64-vulkan-loader mingw64-vulkan-tools
24+
dnf install -y adwaita-icon-theme gtk-update-icon-cache
25+
- name: Fetch and build llama.cpp
26+
run: |
27+
git clone https://github.com/ggerganov/llama.cpp.git
28+
pushd llama.cpp/
29+
git reset --hard 88540445
30+
patch -p1 < ../llama.patch
31+
cmake -DCMAKE_TOOLCHAIN_FILE=/usr/share/mingw/toolchain-mingw64.cmake -DBUILD_SHARED_LIBS=ON .
32+
make llama && make common
33+
popd
34+
- name: Configure 🔧
35+
run: meson --prefix=/ --cross-file=/usr/share/mingw/toolchain-mingw64.meson --default-library shared bin-x86_64-w64-mingw32
36+
- name: Compile 🎲
37+
run: ninja -C bin-x86_64-w64-mingw32
38+
- name: Build Windows-compatible directory 📁
39+
run: |
40+
mkdir dist
41+
DESTDIR="$PWD/dist/" ninja -C bin-x86_64-w64-mingw32 install
42+
# we'll ship the whole bin/ folder for now
43+
cp -r /usr/x86_64-w64-mingw32/sys-root/mingw/bin/* dist/bin/
44+
mkdir dist/bin/share
45+
mkdir dist/bin/data
46+
# copy share dir from mingw root to release
47+
cp -r /usr/x86_64-w64-mingw32/sys-root/mingw/share/* dist/bin/share/
48+
# add gtk-pixbuf libs
49+
mkdir dist/bin/lib/
50+
cp -r /usr/x86_64-w64-mingw32/sys-root/mingw/lib/gdk-pixbuf-2.0 dist/bin/lib/
51+
# I'd advise against patching auto-generated files, however to run gtk-pixbuf-query-loaders.exe we'd need wine
52+
sed 's|^"\.\./lib/|"./lib/|' -i dist/bin/lib/gdk-pixbuf-2.0/2.10.0/loaders.cache
53+
# add Adwaita icons
54+
cp -r /usr/share/icons/Adwaita/ dist/bin/data/icons/
55+
cp LICENSE dist/
56+
# copy llama dll
57+
cp llama.cpp/bin/*.dll dist/bin/
58+
mv dist/bin/ dist/autopen/
59+
pushd dist/
60+
zip -9r autopen_mingw.zip autopen/ LICENSE
61+
popd
62+
- name: Upload nightly Windows build 📤
63+
uses: actions/upload-artifact@v4
64+
with:
65+
name: Autopen - Windows x86_64 nightly build
66+
path: dist/autopen_mingw.zip
67+

main.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ int main (int argc, char *argv[])
1414
#endif
1515
{
1616
#ifdef _WIN32
17-
if (g_getenv("NK_WIN32_DEBUG") && AllocConsole()) {
17+
if (AllocConsole()) {
1818
freopen("CONOUT$", "w", stdout);
1919
freopen("CONOUT$", "w", stderr);
2020
}

meson.build

Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
project('autopen', 'cpp',
2+
version : '0.1.0',
3+
default_options : ['warning_level=3', 'cpp_std=c++14']
4+
)
5+
6+
has_devenv = meson.version().version_compare('>=0.58.0')
7+
if has_devenv
8+
devenv = environment()
9+
endif
10+
11+
conf_data = configuration_data()
12+
conf_data.set('version', meson.project_version())
13+
14+
gnome = import('gnome')
15+
16+
deps = [
17+
dependency('gtkmm-3.0'),
18+
dependency('gtksourceviewmm-3.0'),
19+
dependency('zlib'),
20+
dependency('fontconfig'),
21+
dependency('vulkan'),
22+
dependency('gdk-x11-3.0', required : false),
23+
declare_dependency(link_args : ['-Wl,-Bdynamic','-L'+meson.current_source_dir()+'/llama.cpp/bin', '-lllama']),
24+
declare_dependency(link_args : [meson.current_source_dir()+'/llama.cpp/common/libcommon.a'])
25+
]
26+
27+
src = [
28+
'main.cpp',
29+
'mainwindow.cpp',
30+
'tokentree.cpp'
31+
]
32+
33+
incdir = include_directories('llama.cpp', 'llama.cpp/common/')
34+
35+
if meson.version().version_compare('>=0.56.0')
36+
executable('autopen', src,
37+
dependencies: deps,
38+
include_directories : incdir,
39+
install : true,
40+
win_subsystem: 'windows'
41+
)
42+
else
43+
executable('autopen', src,
44+
dependencies: deps,
45+
include_directories : incdir,
46+
install : true
47+
)
48+
endif
49+
50+
if has_devenv
51+
meson.add_devenv(devenv)
52+
endif

tokentree.cpp

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,8 @@ void LLMBuffer::init()
1010
/* init llama.cpp */
1111
gpt_params params;
1212

13+
params.n_gpu_layers = 99;
14+
1315
llama_backend_init();
1416
llama_numa_init(params.numa);
1517

@@ -21,7 +23,9 @@ void LLMBuffer::init()
2123
//model = llama_load_model_from_file("llama.cpp/tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf", model_params);
2224
//model = llama_load_model_from_file("llama.cpp/openhermes-2-mistral-7b.Q4_K_M.gguf", model_params);
2325
//model = llama_load_model_from_file("llama.cpp/stablelm-zephyr-3b.Q4_K_M.gguf", model_params);
24-
model = llama_load_model_from_file("qwen2.5-1.5b-instruct-q4_k_m.gguf", model_params);
26+
//model = llama_load_model_from_file("qwen2.5-1.5b-instruct-q4_k_m.gguf", model_params);
27+
model = llama_load_model_from_file("Qwen2.5-3B.Q4_K_M.gguf", model_params);
28+
//model = llama_load_model_from_file("Phi-3.5-mini-instruct-Q4_K_M.gguf", model_params);
2529

2630
if (model == NULL) {
2731
fprintf(stderr , "%s: error: unable to load model\n" , __func__);

0 commit comments

Comments
 (0)