Skip to content

Commit e42ca2b

Browse files
authored
[mac-ai] Work on the releasing process (#785)
<!-- This is an auto-generated comment: release notes by coderabbit.ai --> ## Summary by CodeRabbit * **New Features** * Added new environment variables for logging and provider selection in Podman machine startup scripts. * Expanded support for multiple GGML backend libraries in remoting environments and packaging. * **Bug Fixes** * Corrected handling of version and branch references in configuration and version link formatting. * Fixed use of mutable default arguments in remote access utilities. * **Refactor** * Simplified and unified handling of backend libraries in packaging and preparation scripts. * Modularized and improved error handling in update scripts for krunkit and related binaries. * Removed legacy `git_ref` logic in favor of streamlined version-based workflows. * **Style** * Standardized informational and error messages in scripts for improved clarity. * **Chores** * Updated test platform configurations and build flags for improved compatibility and maintainability. <!-- end of auto-generated comment: release notes by coderabbit.ai -->
2 parents fa53bfb + 4905ef6 commit e42ca2b

File tree

9 files changed

+284
-136
lines changed

9 files changed

+284
-136
lines changed

projects/mac_ai/testing/config.yaml

Lines changed: 12 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -63,8 +63,7 @@ ci_presets:
6363

6464
cpt_ramalama:
6565
extends: [use_intlab_os, analyze_kpis, export_kpis]
66-
prepare.ramalama.repo.version: null
67-
prepare.ramalama.repo.git_ref: main
66+
prepare.ramalama.repo.version: main
6867
prepare.ramalama.build_image.enabled: true
6968

7069
test.platform: [podman/ramalama]
@@ -95,22 +94,20 @@ ci_presets:
9594
remoting_publish:
9695
extends: [remoting]
9796
prepare.remoting.publish: true
98-
prepare.llama_cpp.source.repo.version: b5709-v250627
99-
prepare.virglrenderer.repo.branch: v25-06-18
100-
prepare.ramalama.build_image.enabled: "remoting"
97+
prepare.llama_cpp.source.repo.version: b5709-v250701.0
98+
prepare.virglrenderer.repo.branch: v1.1.1-remoting-0.1
99+
prepare.ramalama.build_image.enabled: true
101100
prepare.ramalama.build_image.publish.enabled: true
102101
prepare.ramalama.build_image.registry_path: quay.io/crcont
103102
prepare.ramalama.build_image.name: remoting
104103
prepare.ramalama.repo.url: https://github.com/kpouget/ramalama
105-
prepare.ramalama.repo.git_ref: v0.9.3-remoting-rc5
106-
prepare.ramalama.repo.version: null
104+
prepare.ramalama.repo.version: v0.10.0-remoting-0.2.0
107105

108106
test.platform:
109107
- podman/ramalama/remoting
110108
- podman/llama_cpp/remoting
111109
- macos/llama_cpp/metal
112-
- macos/llama_cpp/vulkan
113-
# - podman/llama_cpp/vulkan # not working today
110+
- podman/llama_cpp/vulkan
114111

115112
remoting_testing:
116113
extends: [remoting]
@@ -310,11 +307,11 @@ prepare:
310307
repo:
311308
url: https://github.com/containers/ramalama
312309
version: latest
313-
git_ref:
314310
build_image:
315311
enabled: false
316312
name: ramalama
317313
registry_path: localhost
314+
debug: false
318315
publish:
319316
enabled: false
320317
credentials: "*[email protected]_registry"
@@ -345,7 +342,7 @@ prepare:
345342
url: https://gitlab.freedesktop.org/kpouget/virglrenderer
346343
branch: main
347344
build:
348-
flags: -Dvenus=true -Dc_link_args=-L/opt/homebrew/lib/
345+
flags: -Dapir=true -Dc_link_args=-L/opt/homebrew/lib/
349346

350347
podman:
351348
repo:
@@ -385,10 +382,10 @@ prepare:
385382
CONTAINERS_HELPER_BINARY_DIR: /opt/homebrew/bin/
386383
remoting_env:
387384
enabled: false
388-
apir_lib:
389-
name: libggml-remotingbackend.dylib
390-
ggml_lib:
391-
name: libggml-metal.dylib
385+
ggml_libs:
386+
- libggml-remotingbackend.dylib # keep the APIR backend first
387+
- libggml-metal.dylib # keep the GGML backend second
388+
- libggml-base.dylib
392389
env:
393390
APIR_LLAMA_CPP_GGML_LIBRARY_REG: ggml_backend_metal_reg
394391
APIR_LLAMA_CPP_GGML_LIBRARY_INIT: ggml_backend_metal_init

projects/mac_ai/testing/podman_machine.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -47,14 +47,14 @@ def start(base_work_dir, use_remoting=None):
4747
if use_remoting:
4848
env["DYLD_LIBRARY_PATH"] = prepare_virglrenderer.get_dyld_library_path(base_work_dir) # not working ... (blocked by MacOS when SSHing ...)
4949
llama_remoting_backend_build_dir = prepare_llama_cpp.get_remoting_build_dir(base_work_dir)
50-
env["VIRGL_APIR_BACKEND_LIBRARY"] = llama_remoting_backend_build_dir / config.project.get_config("prepare.podman.machine.remoting_env.apir_lib.name")
51-
env["APIR_LLAMA_CPP_GGML_LIBRARY_PATH"] = llama_remoting_backend_build_dir / config.project.get_config("prepare.podman.machine.remoting_env.ggml_lib.name")
50+
env["VIRGL_APIR_BACKEND_LIBRARY"] = llama_remoting_backend_build_dir / config.project.get_config("prepare.podman.machine.remoting_env.ggml_libs[0]")
51+
env["APIR_LLAMA_CPP_GGML_LIBRARY_PATH"] = llama_remoting_backend_build_dir / config.project.get_config("prepare.podman.machine.remoting_env.ggml_libs[1]")
5252
env |= config.project.get_config("prepare.podman.machine.remoting_env.env")
5353
prepare_virglrenderer.configure(base_work_dir, use_custom=True)
5454
else:
5555
prepare_virglrenderer.configure(base_work_dir, use_custom=False)
5656

57-
ret = _run(base_work_dir, f"start {name}", env, print_cmd=True)
57+
ret = _run(base_work_dir, f"start {name} --no-info", env, print_cmd=True)
5858

5959
if use_remoting and config.project.get_config("prepare.podman.machine.remoting_env.enabled"):
6060
if not config.project.get_config("prepare.virglrenderer.enabled"):

projects/mac_ai/testing/prepare_release.py

Lines changed: 14 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -10,26 +10,24 @@
1010
import llama_cpp, ollama, ramalama
1111

1212
def create_remoting_tarball(base_work_dir):
13+
package_libs = []
1314
virglrenderer_lib = prepare_virglrenderer.get_dyld_library_path(base_work_dir, with_lib=True)
1415

1516
if not remote_access.exists(virglrenderer_lib):
1617
raise ValueError(f"Cannot publish the remoting libraries, {virglrenderer_lib} does not exist")
18+
package_libs.append(virglrenderer_lib)
1719

1820
llama_remoting_backend_build_dir = prepare_llama_cpp.get_remoting_build_dir(base_work_dir)
19-
apir_backend_lib = llama_remoting_backend_build_dir / config.project.get_config("prepare.podman.machine.remoting_env.apir_lib.name")
20-
if not remote_access.exists(apir_backend_lib):
21-
raise ValueError(f"Cannot publish the remoting libraries, {apir_backend_lib} does not exist")
2221

23-
llama_cpp_backend_lib = llama_remoting_backend_build_dir / config.project.get_config("prepare.podman.machine.remoting_env.ggml_lib.name")
24-
if not remote_access.exists(llama_cpp_backend_lib):
25-
raise ValueError(f"Cannot publish the remoting libraries, {llama_cpp_backend_lib} does not exist")
26-
27-
virglrenderer_branch = config.project.get_config("prepare.virglrenderer.repo.branch")
28-
if not virglrenderer_branch.startswith("v"):
29-
raise ValueError("Cannot publish the remoting libraries, virglrenderer not built from a released version")
22+
ggml_backend_libs = config.project.get_config("prepare.podman.machine.remoting_env.ggml_libs")
23+
for libname in ggml_backend_libs:
24+
backend_lib = llama_remoting_backend_build_dir / libname
25+
if not remote_access.exists(backend_lib):
26+
raise ValueError(f"Cannot publish the remoting libraries, {backend_lib} does not exist")
27+
package_libs.append(backend_lib)
3028

3129
with env.NextArtifactDir("build_remoting_tarball"):
32-
return build_remoting_tarball(base_work_dir, virglrenderer_lib, llama_cpp_backend_lib, apir_backend_lib)
30+
return build_remoting_tarball(base_work_dir, package_libs)
3331

3432

3533
def add_string_file(dest, content):
@@ -68,7 +66,7 @@ def add_remote_git_status(base_work_dir, src_dir, dest):
6866

6967

7068
def get_version_link(repo_url, version, git_rev, github=False, gitlab=False):
71-
url = f"{repo_url}/-/commit/{git_rev} ({version})" if gitlab \
69+
url = f"{repo_url}/-/commit/{git_rev}" if gitlab \
7270
else f"{repo_url}/commit/{git_rev}"
7371

7472
if version:
@@ -77,11 +75,11 @@ def get_version_link(repo_url, version, git_rev, github=False, gitlab=False):
7775
return url
7876

7977

80-
def build_remoting_tarball(base_work_dir, virglrenderer_lib, llama_cpp_backend_lib, apir_backend_lib):
78+
def build_remoting_tarball(base_work_dir, package_libs):
8179
llama_cpp_version = config.project.get_config("prepare.llama_cpp.source.repo.version")
8280
llama_cpp_url = config.project.get_config("prepare.llama_cpp.source.repo.url")
8381

84-
ramalama_version = config.project.get_config("prepare.ramalama.repo.git_ref")
82+
ramalama_version = config.project.get_config("prepare.ramalama.repo.version")
8583

8684
virglrenderer_version = config.project.get_config("prepare.virglrenderer.repo.branch")
8785
virglrenderer_url = config.project.get_config("prepare.virglrenderer.repo.url")
@@ -96,8 +94,6 @@ def build_remoting_tarball(base_work_dir, virglrenderer_lib, llama_cpp_backend_l
9694
src_info_dir = tarball_dir / "src_info"
9795
src_info_dir.mkdir()
9896

99-
add_remote_file(base_work_dir, virglrenderer_lib, bin_dir / virglrenderer_lib.name)
100-
10197
virglrenderer_src_dir = prepare_virglrenderer.get_build_dir(base_work_dir) / ".." / "src"
10298
virglrenderer_git_rev = add_remote_git_status(base_work_dir, virglrenderer_src_dir,
10399
src_info_dir / "virglrenderer.git-commit.txt")
@@ -110,9 +106,8 @@ def build_remoting_tarball(base_work_dir, virglrenderer_lib, llama_cpp_backend_l
110106

111107
llama_cpp_version_link = get_version_link(llama_cpp_url, llama_cpp_version, llama_cpp_git_rev, github=True)
112108

113-
add_remote_file(base_work_dir, llama_cpp_backend_lib, bin_dir / llama_cpp_backend_lib.name)
114-
115-
add_remote_file(base_work_dir, apir_backend_lib, bin_dir / apir_backend_lib.name)
109+
for backend_lib in package_libs:
110+
add_remote_file(base_work_dir, backend_lib, bin_dir / backend_lib.name)
116111

117112
machine_script_file = pathlib.Path("projects/mac_ai/testing/scripts/podman_start_machine.api_remoting.sh")
118113
add_local_file(machine_script_file, tarball_dir / machine_script_file.name)

projects/mac_ai/testing/prepare_virglrenderer.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77

88
def get_build_dir(base_work_dir):
99
version = config.project.get_config("prepare.virglrenderer.repo.branch")
10+
1011
return base_work_dir / "virglrenderer" / version / "build"
1112

1213

@@ -28,6 +29,12 @@ def prepare(base_work_dir):
2829
repo_url = config.project.get_config("prepare.virglrenderer.repo.url")
2930
build_flags = config.project.get_config("prepare.virglrenderer.build.flags")
3031
version = config.project.get_config("prepare.virglrenderer.repo.branch")
32+
refspec = None
33+
if version.startswith("pr-"):
34+
pr_number = version.removeprefix("pr-")
35+
refspec = f"refs/merge-requests/{pr_number}/head"
36+
version = None
37+
3138
build_dir = get_build_dir(base_work_dir)
3239
src_dir = build_dir.parent / "src"
3340

@@ -36,6 +43,7 @@ def prepare(base_work_dir):
3643
repo_url=repo_url,
3744
dest=src_dir,
3845
version=version,
46+
refspec=refspec,
3947
artifact_dir_suffix="__virglrenderer",
4048
force=True,
4149
)

projects/mac_ai/testing/ramalama.py

Lines changed: 49 additions & 59 deletions
Original file line numberDiff line numberDiff line change
@@ -17,30 +17,23 @@ def prepare_test(base_work_dir, platform):
1717

1818

1919
def _get_binary_path(base_work_dir, platform):
20-
git_ref = config.project.get_config("prepare.ramalama.repo.git_ref")
2120
version = config.project.get_config("prepare.ramalama.repo.version")
2221

23-
if git_ref and version:
24-
raise ValueError(f"Cannot have Ramalama git_ref={git_ref} and version={version} set together.")
25-
2622
if version == "latest":
2723
repo_url = config.project.get_config("prepare.ramalama.repo.url")
2824
version = utils.get_latest_release(repo_url)
2925
config.project.set_config("prepare.ramalama.repo.version", version)
3026

31-
system_file = f"{version}.zip"
32-
3327
dest_base = base_work_dir / "ramalama-ai"
34-
if version:
35-
# don't use 'ramalama' in the base_work_dir, otherwise Python
36-
# takes it (invalidly) for the package `ramalama` package
37-
dest = dest_base / system_file
38-
dest_dir = dest.parent / f"ramalama-{version.removeprefix('v')}"
39-
else:
40-
dest = dest_dir = dest_base / f"ramalama-{git_ref}"
4128

42-
ramalama_path = dest_dir / "bin" / "ramalama"
43-
return ramalama_path, dest, (version, git_ref)
29+
# don't use 'ramalama' in the base_work_dir, otherwise Python
30+
# takes it (invalidly) for the package `ramalama` package
31+
32+
dest_dir = dest_base / f"ramalama-{version.removeprefix('v')}"
33+
34+
ramalama_path = dest_dir / "bin" / "ramalama"
35+
36+
return ramalama_path, dest_dir, version
4437

4538

4639
def get_binary_path(base_work_dir, platform):
@@ -52,40 +45,28 @@ def get_binary_path(base_work_dir, platform):
5245

5346
return ramalama_path
5447

55-
def download_ramalama(base_work_dir, dest, version, git_ref):
48+
def download_ramalama(base_work_dir, dest, version):
5649
repo_url = config.project.get_config("prepare.ramalama.repo.url")
5750

58-
if version:
59-
source = "/".join([
60-
repo_url,
61-
"archive/refs/tags",
62-
f"{version}.zip",
63-
])
64-
run.run_toolbox(
65-
"remote", "download",
66-
source=source, dest=dest,
67-
tarball=True,
68-
)
51+
kwargs = dict(
52+
repo_url=repo_url,
53+
dest=dest,
54+
)
55+
if version.startswith("pr-"):
56+
pr_number = version.removeprefix("pr-")
57+
kwargs["refspec"] = f"refs/pull/{pr_number}/head"
6958
else:
70-
kwargs = dict(
71-
repo_url=repo_url,
72-
dest=dest,
73-
)
74-
if git_ref.startswith("pr-"):
75-
pr_number = git_ref.removeprefix("pr-")
76-
kwargs["refspec"] = f"refs/pull/{pr_number}/head"
77-
else:
78-
kwargs["refspec"] = git_ref
79-
80-
run.run_toolbox(
81-
"remote", "clone",
82-
**kwargs,
83-
artifact_dir_suffix="_llama_cpp",
84-
force=True,
85-
)
59+
kwargs["version"] = version
8660

87-
remote_access.run_with_ansible_ssh_conf(base_work_dir, "git show -s --format='%cd%n%s%n%H' --date=format:'%y%m%d.%H%M' > ramalama-commit.info",
88-
chdir=dest)
61+
run.run_toolbox(
62+
"remote", "clone",
63+
**kwargs,
64+
artifact_dir_suffix="_llama_cpp",
65+
force=True,
66+
)
67+
68+
remote_access.run_with_ansible_ssh_conf(base_work_dir, "git show -s --format='%cd%n%s%n%H' --date=format:'%y%m%d.%H%M' > ramalama-commit.info",
69+
chdir=dest)
8970

9071

9172
def build_container_image(base_work_dir, ramalama_path):
@@ -100,6 +81,7 @@ def build_container_image(base_work_dir, ramalama_path):
10081

10182
chdir = ramalama_path.parent.parent
10283

84+
logging.info("Building the ramalama image ...")
10385
with env.NextArtifactDir(f"build_ramalama_{image_name}_image"):
10486
cmd = f"env PATH=$PATH:{podman_mod.get_podman_binary(base_work_dir).parent}"
10587
cmd += f" time ./container_build.sh build {image_name}"
@@ -111,6 +93,10 @@ def build_container_image(base_work_dir, ramalama_path):
11193
LLAMA_CPP_REPO=config.project.get_config("prepare.llama_cpp.source.repo.url"),
11294
)
11395

96+
if config.project.get_config("prepare.ramalama.build_image.debug"):
97+
extra_env["RAMALAMA_IMAGE_BUILD_DEBUG"] = "y"
98+
extra_env["RAMALAMA_IMAGE_INCLUDE_DEBUG"] = "y"
99+
114100
ret = remote_access.run_with_ansible_ssh_conf(base_work_dir, cmd,
115101
extra_env=extra_env,
116102
chdir=chdir, check=False, capture_stdout=True)
@@ -125,11 +111,10 @@ def build_container_image(base_work_dir, ramalama_path):
125111

126112

127113
def prepare_binary(base_work_dir, platform):
128-
ramalama_path, dest, (version, git_ref) = _get_binary_path(base_work_dir, platform)
129-
system_file = dest.name
114+
ramalama_path, dest, version = _get_binary_path(base_work_dir, platform)
130115

131-
if git_ref or not remote_access.exists(ramalama_path):
132-
download_ramalama(base_work_dir, dest, version, git_ref)
116+
if version.startswith("pr-") or not remote_access.exists(ramalama_path):
117+
download_ramalama(base_work_dir, dest, version)
133118
else:
134119
logging.info(f"ramalama {platform.name} already exists, not downloading it.")
135120

@@ -138,11 +123,17 @@ def prepare_binary(base_work_dir, platform):
138123
image_name = build_container_image(base_work_dir, ramalama_path)
139124

140125
if config.project.get_config("prepare.ramalama.build_image.publish.enabled"):
141-
version = config.project.get_config("prepare.llama_cpp.source.repo.version")
142126
podman_mod.login(base_work_dir, "prepare.ramalama.build_image.publish.credentials")
127+
128+
version = config.project.get_config("prepare.llama_cpp.source.repo.version")
129+
143130
dest_image_name = image_name.partition(":")[0] + f":{version}"
144-
dest_image_latest = image_name.partition(":")[0] + f":latest"
145-
logging.info(f"Pushing the image to {dest_image_name} and latest")
131+
dest_image_latest = image_name.partition(":")[0] + ":latest"
132+
if config.project.get_config("prepare.ramalama.build_image.debug"):
133+
dest_image_latest = dest_image_latest.removesuffix(":latest") + ":debug"
134+
dest_image_name += "-debug"
135+
136+
logging.info(f"Pushing the image to {dest_image_name} and {dest_image_latest}")
146137

147138
podman_mod.push_image(base_work_dir, image_name, dest_image_name)
148139
podman_mod.push_image(base_work_dir, image_name, dest_image_latest)
@@ -186,13 +177,12 @@ def stop_server(base_work_dir, ramalama_path):
186177
def run_model(base_work_dir, platform, ramalama_path, model, unload=False):
187178
inference_server_port = config.project.get_config("test.inference_server.port")
188179

189-
if config.project.get_config("prepare.ramalama.repo.git_ref"):
190-
commit_date_cmd = remote_access.run_with_ansible_ssh_conf(base_work_dir, f"cat ramalama-commit.info", chdir=ramalama_path.parent.parent, check=False, capture_stdout=True)
191-
if commit_date_cmd.returncode != 0:
192-
logging.warning("Couldn't find the Ramalama commit info file ...")
193-
else:
194-
logging.warning(f"Ramalama commit info: {commit_date_cmd.stdout}")
195-
(env.ARTIFACT_DIR / "ramalama-commit.info").write_text(commit_date_cmd.stdout)
180+
commit_date_cmd = remote_access.run_with_ansible_ssh_conf(base_work_dir, f"cat ramalama-commit.info", chdir=ramalama_path.parent.parent, check=False, capture_stdout=True)
181+
if commit_date_cmd.returncode != 0:
182+
logging.warning("Couldn't find the Ramalama commit info file ...")
183+
else:
184+
logging.warning(f"Ramalama commit info: {commit_date_cmd.stdout}")
185+
(env.ARTIFACT_DIR / "ramalama-commit.info").write_text(commit_date_cmd.stdout)
196186

197187
artifact_dir_suffix=None
198188
if unload:

projects/mac_ai/testing/remote_access.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ def prepare():
8282

8383
def run_with_ansible_ssh_conf(
8484
base_work_dir, cmd,
85-
extra_env={},
85+
extra_env=None,
8686
check=True,
8787
capture_stdout=False,
8888
capture_stderr=False,
@@ -92,6 +92,9 @@ def run_with_ansible_ssh_conf(
9292
decode_stdout=True,
9393
decode_stderr=True,
9494
):
95+
if extra_env is None:
96+
extra_env = {}
97+
9598
run_kwargs = dict(
9699
log_command=False,
97100
check=check,

0 commit comments

Comments
 (0)