From f7394cdaf4814d6cdc38d38bd98120818f4dd5ce Mon Sep 17 00:00:00 2001 From: Francis Couture-Harpin Date: Mon, 1 Sep 2025 20:45:57 -0400 Subject: [PATCH 01/13] convert : use reflinks for faster conversion --- convert_hf_to_gguf.py | 185 +++++++++++++++++++++++++++--------- ggml/src/ggml-impl.h | 4 +- ggml/src/gguf.cpp | 2 + gguf-py/gguf/gguf_writer.py | 34 +++++-- gguf-py/gguf/lazy.py | 20 ++-- gguf-py/gguf/utility.py | 81 ++++++++++++++++ 6 files changed, 266 insertions(+), 60 deletions(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index b991848df9aaa..51420f612a186 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -11,6 +11,7 @@ import os import re import sys +from dataclasses import dataclass from enum import IntEnum from pathlib import Path from hashlib import sha256 @@ -59,6 +60,14 @@ class ModelType(IntEnum): AnyModel = TypeVar("AnyModel", bound="type[ModelBase]") +@dataclass +class ModelTensorInfo: + load: Callable[[], Tensor] + src_type: str + src_qtype: gguf.GGMLQuantizationType | None = None + dst_qtype: gguf.GGMLQuantizationType | None = None + + class ModelBase: _model_classes: dict[ModelType, dict[str, type[ModelBase]]] = { ModelType.TEXT: {}, @@ -74,7 +83,7 @@ class ModelBase: lazy: bool dry_run: bool hparams: dict[str, Any] - model_tensors: dict[str, Callable[[], Tensor]] + model_tensors: dict[str, ModelTensorInfo] gguf_writer: gguf.GGUFWriter model_name: str | None metadata_override: Path | None @@ -97,7 +106,8 @@ def __init__(self, dir_model: Path, ftype: gguf.LlamaFileType, fname_out: Path, metadata_override: Path | None = None, model_name: str | None = None, split_max_tensors: int = 0, split_max_size: int = 0, dry_run: bool = False, small_first_shard: bool = False, hparams: dict[str, Any] | None = None, remote_hf_model_id: str | None = None, - disable_mistral_community_chat_template: bool = False): + disable_mistral_community_chat_template: bool = False, + use_reflinks: bool = False): if type(self) is ModelBase or \ type(self) is TextModel or \ type(self) is MmprojModel: @@ -118,22 +128,12 @@ def __init__(self, dir_model: Path, ftype: gguf.LlamaFileType, fname_out: Path, self.model_name = model_name self.dir_model_card = dir_model # overridden in convert_lora_to_gguf.py - # Apply heuristics to figure out typical tensor encoding based on first layer tensor encoding type - if self.ftype == gguf.LlamaFileType.GUESSED: - # NOTE: can't use field "torch_dtype" in config.json, because some finetunes lie. - _, first_tensor = next(self.get_tensors()) - if first_tensor.dtype == torch.float16: - logger.info(f"choosing --outtype f16 from first tensor type ({first_tensor.dtype})") - self.ftype = gguf.LlamaFileType.MOSTLY_F16 - else: - logger.info(f"choosing --outtype bf16 from first tensor type ({first_tensor.dtype})") - self.ftype = gguf.LlamaFileType.MOSTLY_BF16 - self.dequant_model() # Configure GGUF Writer self.gguf_writer = gguf.GGUFWriter(path=None, arch=gguf.MODEL_ARCH_NAMES[self.model_arch], endianess=self.endianess, use_temp_file=self.use_temp_file, - split_max_tensors=split_max_tensors, split_max_size=split_max_size, dry_run=dry_run, small_first_shard=small_first_shard) + split_max_tensors=split_max_tensors, split_max_size=split_max_size, dry_run=dry_run, small_first_shard=small_first_shard, + use_reflinks=use_reflinks) # Mistral specific self.disable_mistral_community_chat_template = disable_mistral_community_chat_template @@ -152,8 +152,8 @@ def find_hparam(self, keys: Iterable[str], optional: bool = False) -> Any: return None raise KeyError(f"could not find any of: {keys}") - def index_tensors(self, remote_hf_model_id: str | None = None) -> dict[str, Callable[[], Tensor]]: - tensors: dict[str, Callable[[], Tensor]] = {} + def index_tensors(self, remote_hf_model_id: str | None = None) -> dict[str, ModelTensorInfo]: + tensors: dict[str, ModelTensorInfo] = {} if remote_hf_model_id is not None: is_safetensors = True @@ -161,7 +161,14 @@ def index_tensors(self, remote_hf_model_id: str | None = None) -> dict[str, Call logger.info(f"Using remote model with HuggingFace id: {remote_hf_model_id}") remote_tensors = gguf.utility.SafetensorRemote.get_list_tensors_hf_model(remote_hf_model_id) for name, remote_tensor in remote_tensors.items(): - tensors[name] = lambda r=remote_tensor: LazyTorchTensor.from_remote_tensor(r) + dtype = LazyTorchTensor._dtype_str_map[remote_tensor.dtype] + qtype = LazyTorchTensor._qtype_map.get(dtype) + tensors[name] = ModelTensorInfo( + load=lambda r=remote_tensor: LazyTorchTensor.from_remote_tensor(r), + src_type=str(dtype), + src_qtype=qtype, + dst_qtype=qtype, + ) return tensors @@ -205,18 +212,25 @@ def index_tensors(self, remote_hf_model_id: str | None = None) -> dict[str, Call for name in model_part.keys(): if is_safetensors: data: gguf.utility.LocalTensor = model_part[name] + dtype = LazyTorchTensor._dtype_str_map[data.dtype] if self.lazy: data_gen = lambda data=data: LazyTorchTensor.from_local_tensor(data) # noqa: E731 else: - dtype = LazyTorchTensor._dtype_str_map[data.dtype] data_gen = lambda data=data, dtype=dtype: torch.from_numpy(data.mmap_bytes()).view(dtype).reshape(data.shape) # noqa: E731 else: data_torch: Tensor = model_part[name] + dtype = data_torch.dtype if self.lazy: data_gen = lambda data=data_torch: LazyTorchTensor.from_eager(data) # noqa: E731 else: data_gen = lambda data=data_torch: data # noqa: E731 - tensors[name] = data_gen + qtype = LazyTorchTensor._qtype_map.get(dtype) + tensors[name] = ModelTensorInfo( + load=data_gen, + src_type=str(dtype), + src_qtype=qtype, + dst_qtype=qtype, + ) # verify tensor name presence and identify potentially missing files if len(tensor_names_from_index) > 0: @@ -237,7 +251,7 @@ def index_tensors(self, remote_hf_model_id: str | None = None) -> dict[str, Call def dequant_model(self): tensors_to_remove: list[str] = [] - new_tensors: dict[str, Callable[[], Tensor]] = {} + new_tensors: dict[str, ModelTensorInfo] = {} if (quant_config := self.hparams.get("quantization_config")) and isinstance(quant_config, dict): quant_method = quant_config.get("quant_method") @@ -315,7 +329,12 @@ def dequant_gptq(g_idx: Tensor, qweight: Tensor, qzeros: Tensor, scales: Tensor) weight_name = name.removesuffix("_scale") w = self.model_tensors[weight_name] s = self.model_tensors[name] - self.model_tensors[weight_name] = lambda w=w, s=s: dequant_bitnet(w(), s()) + self.model_tensors[weight_name] = ModelTensorInfo( + load=lambda w=w, s=s: dequant_bitnet(w.load(), s.load()), + src_type="bitnet", + src_qtype=gguf.GGMLQuantizationType.F32, + dst_qtype=gguf.GGMLQuantizationType.TQ1_0, + ) tensors_to_remove.append(name) elif quant_method == "fp8": for name in self.model_tensors.keys(): @@ -323,9 +342,15 @@ def dequant_gptq(g_idx: Tensor, qweight: Tensor, qzeros: Tensor, scales: Tensor) weight_name = name.removesuffix("_scale_inv") w = self.model_tensors[weight_name] s = self.model_tensors[name] - self.model_tensors[weight_name] = lambda w=w, s=s: dequant_simple(w(), s()) + self.model_tensors[weight_name] = ModelTensorInfo( + load=lambda w=w, s=s: dequant_simple(w.load(), s.load()), + src_type=w.src_type, + src_qtype=gguf.GGMLQuantizationType.F32, + dst_qtype=gguf.GGMLQuantizationType.BF16, # TODO: change to FP8 once natively supported + ) tensors_to_remove.append(name) elif quant_method == "gptq": + bits = quant_config["bits"] for name in self.model_tensors.keys(): if name.endswith(".qweight"): base_name = name.removesuffix(".qweight") @@ -333,10 +358,13 @@ def dequant_gptq(g_idx: Tensor, qweight: Tensor, qzeros: Tensor, scales: Tensor) qweight = self.model_tensors[base_name + ".qweight"] qzeros = self.model_tensors[base_name + ".qzeros"] scales = self.model_tensors[base_name + ".scales"] - new_tensors[base_name + ".weight"] = ( - lambda g=g_idx, z=qzeros, w=qweight, s=scales: dequant_gptq( - g(), w(), z(), s() - ) + new_tensors[base_name + ".weight"] = ModelTensorInfo( + load=lambda g=g_idx, z=qzeros, w=qweight, s=scales: dequant_gptq( + g.load(), w.load(), z.load(), s.load() + ), + src_type=f"GPTQ-{bits}bit", + src_qtype=gguf.GGMLQuantizationType.F32, + dst_qtype=gguf.GGMLQuantizationType.Q8_0 if bits == 8 else gguf.GGMLQuantizationType.Q4_1, ) tensors_to_remove += [ base_name + n @@ -358,8 +386,8 @@ def dequant_gptq(g_idx: Tensor, qweight: Tensor, qzeros: Tensor, scales: Tensor) self.model_tensors[name] = value def get_tensors(self) -> Iterator[tuple[str, Tensor]]: - for name, gen in self.model_tensors.items(): - yield name, gen() + for name, t in self.model_tensors.items(): + yield name, t.load() def format_tensor_name(self, key: gguf.MODEL_TENSOR, bid: int | None = None, suffix: str = ".weight") -> str: if key not in gguf.MODEL_TENSORS[self.model_arch]: @@ -414,10 +442,12 @@ def prepare_tensors(self): if name.endswith((".attention.masked_bias", ".attention.bias", ".rotary_emb.inv_freq")): continue - old_dtype = data_torch.dtype + tensor_info = self.model_tensors.get(name) + old_dtype: str = tensor_info.src_type if tensor_info is not None else str(data_torch.dtype) # convert any unsupported data types to float32 - if data_torch.dtype not in (torch.float16, torch.float32): + # TODO: handle pre-quantized tensors for repacking + if data_torch.dtype not in (torch.float16, torch.bfloat16, torch.float32): data_torch = data_torch.to(torch.float32) # use the first number-like part of the tensor name as the block id @@ -428,8 +458,16 @@ def prepare_tensors(self): break for new_name, data_torch in (self.modify_tensors(data_torch, name, bid)): - # TODO: why do we squeeze here? - # data = data_torch.squeeze().numpy() + old_qtype = LazyTorchTensor._qtype_map[data_torch.dtype] + + # workaround BF16 not being supported by Numpy + if data_torch.dtype == torch.bfloat16: + data_torch = data_torch.view(torch.uint8) + + # if data ends up empty, it means data_torch was a scalar tensor -> restore + if len(data_torch.shape) == 0: + data_torch = data_torch.reshape(1) + data = data_torch.numpy() n_dims = len(data.shape) @@ -500,15 +538,23 @@ def prepare_tensors(self): data_qtype = gguf.GGMLQuantizationType.TQ1_0 elif self.ftype == gguf.LlamaFileType.MOSTLY_TQ2_0: data_qtype = gguf.GGMLQuantizationType.TQ2_0 + elif self.ftype == gguf.LlamaFileType.GUESSED: + data_qtype = old_qtype if tensor_info is None or tensor_info.dst_qtype is None else tensor_info.dst_qtype else: raise ValueError(f"Unknown file type: {self.ftype.name}") - try: - data = gguf.quants.quantize(data, data_qtype) - except gguf.QuantError as e: - logger.warning("%s, %s", e, "falling back to F16") - data_qtype = gguf.GGMLQuantizationType.F16 - data = gguf.quants.quantize(data, data_qtype) + if old_qtype != data_qtype: + if old_qtype not in ( + gguf.GGMLQuantizationType.F32, + gguf.GGMLQuantizationType.F16, + ): + data = gguf.quants.dequantize(data, old_qtype) + try: + data = gguf.quants.quantize(data, data_qtype) + except gguf.QuantError as e: + logger.warning("%s, %s", e, "falling back to F16") + data_qtype = gguf.GGMLQuantizationType.F16 + data = gguf.quants.quantize(data, data_qtype) shape = gguf.quant_shape_from_byte_shape(data.shape, data_qtype) if data.dtype == np.uint8 else data.shape @@ -656,8 +702,24 @@ def prepare_metadata(self, vocab_only: bool): super().prepare_metadata(vocab_only=vocab_only) total_params = self.gguf_writer.get_total_parameter_count()[0] + + # Apply heuristics to figure out typical tensor encoding based on first layer tensor encoding type + # TODO: get type name from `quantization_config` field when present? + if self.ftype == gguf.LlamaFileType.GUESSED: + # NOTE: can't use field "torch_dtype" in config.json, because some finetunes lie. + _, first_tensor = next(self.get_tensors()) + logger.info(f"first tensor type is {first_tensor.dtype}") + if first_tensor.dtype == torch.float16: + ftype = gguf.LlamaFileType.MOSTLY_F16 + elif first_tensor.dtype == torch.bfloat16: + ftype = gguf.LlamaFileType.MOSTLY_BF16 + else: + ftype = gguf.LlamaFileType.ALL_F32 + else: + ftype = self.ftype + # Extract the encoding scheme from the file type name. e.g. 'gguf.LlamaFileType.MOSTLY_Q8_0' --> 'Q8_0' - output_type: str = self.ftype.name.partition("_")[2] + output_type: str = ftype.name.partition("_")[2] # Filename Output if self.fname_out.is_dir(): @@ -8840,12 +8902,20 @@ class LazyTorchTensor(gguf.LazyBase): "F8_E5M2": torch.float8_e5m2, } + _qtype_map: dict[torch.dtype, gguf.GGMLQuantizationType] = { + torch.float64: gguf.GGMLQuantizationType.F64, + torch.float32: gguf.GGMLQuantizationType.F32, + torch.float16: gguf.GGMLQuantizationType.F16, + torch.bfloat16: gguf.GGMLQuantizationType.BF16, + } + def numpy(self) -> gguf.LazyNumpyTensor: dtype = self._dtype_map[self.dtype] return gguf.LazyNumpyTensor( meta=gguf.LazyNumpyTensor.meta_with_dtype_and_shape(dtype, self.shape), args=(self,), - func=(lambda s: s.numpy()) + func=(lambda s: s.numpy()), + ranges=self._ranges ) @classmethod @@ -8866,7 +8936,7 @@ def load_tensor(tensor: gguf.utility.LocalTensor) -> Tensor: return torch.from_numpy(tensor.mmap_bytes()).view(dtype).reshape(tensor.shape) dtype = cls._dtype_str_map[t.dtype] shape = t.shape - lazy = cls(meta=cls.meta_with_dtype_and_shape(dtype, shape), args=(t,), func=lambda r: load_tensor(r)) + lazy = cls(meta=cls.meta_with_dtype_and_shape(dtype, shape), args=(t,), func=lambda r: load_tensor(r), ranges=(t.data_range,)) return cast(torch.Tensor, lazy) @classmethod @@ -8887,7 +8957,27 @@ def __torch_function__(cls, func, types, args=(), kwargs=None): if func is torch.Tensor.numpy: return args[0].numpy() - return cls._wrap_fn(func)(*args, **kwargs) + result = cls._wrap_fn(func)(*args, **kwargs) + + def get_dim(index: int, key: str = "dim", default: int = 0, args=args, kwargs=kwargs) -> int: + # TODO: handle negative dim + if len(args) > index: + return args[index] + else: + return kwargs.get(key, default) + + # Track file ranges + # TODO: handle tensor splits (with torch.split, torch.chunk, and torch.__getitem__) + if isinstance(result, LazyTorchTensor): + if isinstance(args[0], LazyTorchTensor): + if func is torch.Tensor.to and not isinstance(args[1], torch.dtype): + result._ranges = args[0]._ranges + if func is torch.stack and get_dim(1) == 0: + if all(isinstance(t, LazyTorchTensor) and len(t._ranges) > 0 for t in args[0]): + # collect ranges of all stacked tensors + result._ranges = tuple(r for t in args[0] for r in t._ranges) + + return result def parse_args() -> argparse.Namespace: @@ -8902,8 +8992,8 @@ def parse_args() -> argparse.Namespace: help="path to write to; default: based on input. {ftype} will be replaced by the outtype.", ) parser.add_argument( - "--outtype", type=str, choices=["f32", "f16", "bf16", "q8_0", "tq1_0", "tq2_0", "auto"], default="f16", - help="output format - use f32 for float32, f16 for float16, bf16 for bfloat16, q8_0 for Q8_0, tq1_0 or tq2_0 for ternary, and auto for the highest-fidelity 16-bit float type depending on the first loaded tensor type", + "--outtype", type=str, choices=["f32", "f16", "bf16", "q8_0", "tq1_0", "tq2_0", "auto"], default="auto", + help="output format - use f32 for float32, f16 for float16, bf16 for bfloat16, q8_0 for Q8_0, tq1_0 or tq2_0 for ternary, and auto for mostly unchanged types", ) parser.add_argument( "--bigendian", action="store_true", @@ -8922,6 +9012,10 @@ def parse_args() -> argparse.Namespace: "--no-lazy", action="store_true", help="use more RAM by computing all outputs before writing (use in case lazy evaluation is broken)", ) + parser.add_argument( + "--reflink", action="store_true", + help="(Experimental) Use copy-on-write reflinks when possible (e.g. on BTRFS, XFS, ZFS, etc.). File alignment and padding will differ compared to not using this option. Should be very fast when source model layout is compatible enough.", + ) parser.add_argument( "--model-name", type=str, default=None, help="name of the model", @@ -9106,7 +9200,8 @@ def main() -> None: split_max_tensors=args.split_max_tensors, split_max_size=split_str_to_n_bytes(args.split_max_size), dry_run=args.dry_run, small_first_shard=args.no_tensor_first_split, - remote_hf_model_id=hf_repo_id, disable_mistral_community_chat_template=disable_mistral_community_chat_template + remote_hf_model_id=hf_repo_id, disable_mistral_community_chat_template=disable_mistral_community_chat_template, + use_reflinks=args.reflink, ) if args.vocab_only: diff --git a/ggml/src/ggml-impl.h b/ggml/src/ggml-impl.h index 19a7adb2d101b..7a50675e2d61a 100644 --- a/ggml/src/ggml-impl.h +++ b/ggml/src/ggml-impl.h @@ -42,8 +42,8 @@ void ggml_print_backtrace(void); # define MAX(a, b) ((a) > (b) ? (a) : (b)) #endif -// required for mmap as gguf only guarantees 32-byte alignment -#define TENSOR_ALIGNMENT 32 +// required for mmap as gguf converted with reflinks from safetensors only guarantees 8-byte alignment +#define TENSOR_ALIGNMENT 8 // static_assert should be a #define, but if it's not, // fall back to the _Static_assert C11 keyword. diff --git a/ggml/src/gguf.cpp b/ggml/src/gguf.cpp index 8cc4ef1cf4435..9673bf78ba20f 100644 --- a/ggml/src/gguf.cpp +++ b/ggml/src/gguf.cpp @@ -624,6 +624,8 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par ctx->size = 0; for (size_t i = 0; i < ctx->info.size(); ++i) { const gguf_tensor_info & ti = ctx->info[i]; + // HACK: bypass the continuity check + ctx->size = ti.offset; if (ti.offset != ctx->size) { GGML_LOG_ERROR("%s: tensor '%s' has offset %" PRIu64 ", expected %zu\n", __func__, ti.t.name, ti.offset, ctx->size); diff --git a/gguf-py/gguf/gguf_writer.py b/gguf-py/gguf/gguf_writer.py index a6cc8a931eb27..03e7ba930b13f 100644 --- a/gguf-py/gguf/gguf_writer.py +++ b/gguf-py/gguf/gguf_writer.py @@ -30,6 +30,7 @@ ) from .quants import quant_shape_from_byte_shape +from .utility import LocalTensorRange, best_alignment_offset, copy_tensor_ranges logger = logging.getLogger(__name__) @@ -84,14 +85,16 @@ class GGUFWriter: def __init__( self, path: os.PathLike[str] | str | None, arch: str, use_temp_file: bool = False, endianess: GGUFEndian = GGUFEndian.LITTLE, - split_max_tensors: int = 0, split_max_size: int = 0, dry_run: bool = False, small_first_shard: bool = False + split_max_tensors: int = 0, split_max_size: int = 0, dry_run: bool = False, small_first_shard: bool = False, + use_reflinks = False, # opportunistically attempt to use copy-on-write ): self.fout = None self.path = Path(path) if path else None self.arch = arch self.endianess = endianess self.data_alignment = GGUF_DEFAULT_ALIGNMENT - self.use_temp_file = use_temp_file + self.use_reflinks = use_reflinks and hasattr(os, "copy_file_range") + self.use_temp_file = use_temp_file if not self.use_reflinks else False self.temp_file = None self.tensors = [{}] self.kv_data = [{}] @@ -107,6 +110,10 @@ def __init__( if self.small_first_shard: self.tensors.append({}) + if self.use_reflinks: + # common default block size for COW filesystems + self.add_custom_alignment(4096) + self.add_architecture() def get_total_parameter_count(self) -> tuple[int, int, int, int]: @@ -257,14 +264,20 @@ def write_ti_data_to_file(self) -> None: offset_tensor = 0 for name, ti in tensors.items(): + align_offset = 0 + if self.use_reflinks: + ranges: tuple[LocalTensorRange, ...] = getattr(ti.tensor, "_ranges", ()) + if len(ranges) > 0: + align_offset = best_alignment_offset(ranges, self.data_alignment) + ti_data += self._pack_val(name, GGUFValueType.STRING, add_vtype=False) n_dims = len(ti.shape) ti_data += self._pack("I", n_dims) for j in range(n_dims): ti_data += self._pack("Q", ti.shape[n_dims - 1 - j]) ti_data += self._pack("I", ti.dtype) - ti_data += self._pack("Q", offset_tensor) - offset_tensor += GGUFWriter.ggml_pad(ti.nbytes, self.data_alignment) + ti_data += self._pack("Q", offset_tensor + align_offset) + offset_tensor += GGUFWriter.ggml_pad(ti.nbytes + align_offset, self.data_alignment) fout.write(ti_data) fout.flush() @@ -398,6 +411,7 @@ def write_tensor_data(self, tensor: np.ndarray[Any, Any]) -> None: if self.state is not WriterState.TI_DATA and self.state is not WriterState.WEIGHTS: raise ValueError(f'Expected output file to contain tensor info or weights, got {self.state}') assert self.fout is not None + assert not self.use_reflinks # TODO: handle this here too if self.endianess == GGUFEndian.BIG: tensor.byteswap(inplace=True) @@ -450,15 +464,21 @@ def write_tensors_to_file(self, *, progress: bool = False) -> None: shard_bar.reset(total=(total if total > 0 else None)) # relying on the fact that Python dicts preserve insertion order (since 3.7) - for ti in tensors.values(): + for name, ti in tensors.items(): assert ti.tensor is not None # can only iterate once over the tensors assert ti.tensor.nbytes == ti.nbytes - ti.tensor.tofile(fout) + if self.use_reflinks and len(ranges := getattr(ti.tensor, "_ranges", ())) > 0: + logger.debug(f"using reflinks for {name}") + start_offset = fout.tell() + copy_tensor_ranges(fout, ranges, self.data_alignment) + self.write_padding(fout, fout.tell() - start_offset) + else: + ti.tensor.tofile(fout) + self.write_padding(fout, ti.nbytes) if shard_bar is not None: shard_bar.update(ti.nbytes) if bar is not None: bar.update(ti.nbytes) - self.write_padding(fout, ti.nbytes) ti.tensor = None else: self.temp_file.seek(0) diff --git a/gguf-py/gguf/lazy.py b/gguf-py/gguf/lazy.py index f9bcadae0224b..c4e5400639887 100644 --- a/gguf-py/gguf/lazy.py +++ b/gguf-py/gguf/lazy.py @@ -6,6 +6,7 @@ import numpy as np from numpy.typing import DTypeLike +from .utility import LocalTensorRange logger = logging.getLogger(__name__) @@ -20,10 +21,11 @@ def __getattr__(self, name: str) -> Any: return type(self)._wrap_fn( (lambda s, *args, **kwargs: getattr(s, name)(*args, **kwargs)), use_self=self, + data_noop=name in ("view", "reshape", "squeeze", "unsqueeze"), ) elif isinstance(meta_attr, self._tensor_type): # e.g. self.T with torch.Tensor should still be wrapped - return type(self)._wrap_fn(lambda s: getattr(s, name))(self) + return type(self)._wrap_fn(lambda s: getattr(s, name), use_self=self)() else: # no need to wrap non-tensor properties, # and they likely don't depend on the actual contents of the tensor @@ -39,8 +41,9 @@ def mk_wrap(op_name: str, *, meta_noop: bool = False): def wrapped_special_op(self, *args, **kwargs): return type(self)._wrap_fn( getattr(type(self)._tensor_type, op_name), + use_self=self, meta_noop=meta_noop, - )(self, *args, **kwargs) + )(*args, **kwargs) return wrapped_special_op # special methods bypass __getattr__, so they need to be added manually @@ -76,14 +79,16 @@ class LazyBase(ABC, metaclass=LazyMeta): _args: tuple _kwargs: dict[str, Any] _func: Callable[[Any], Any] | None + _ranges: tuple[LocalTensorRange, ...] - def __init__(self, *, meta: Any, data: Any | None = None, args: tuple = (), kwargs: dict[str, Any] | None = None, func: Callable[[Any], Any] | None = None): + def __init__(self, *, meta: Any, data: Any | None = None, args: tuple = (), kwargs: dict[str, Any] | None = None, func: Callable[[Any], Any] | None = None, ranges: tuple[LocalTensorRange, ...] = ()): super().__init__() self._meta = meta self._data = data self._args = args self._kwargs = kwargs if kwargs is not None else {} self._func = func + self._ranges = ranges assert self._func is not None or self._data is not None def __init_subclass__(cls) -> None: @@ -107,7 +112,7 @@ def _recurse_apply(o: Any, fn: Callable[[Any], Any]) -> Any: return o @classmethod - def _wrap_fn(cls, fn: Callable, *, use_self: LazyBase | None = None, meta_noop: bool | DTypeLike | tuple[DTypeLike, Callable[[tuple[int, ...]], tuple[int, ...]]] = False) -> Callable[[Any], Any]: + def _wrap_fn(cls, fn: Callable, *, use_self: LazyBase | None = None, meta_noop: bool | DTypeLike | tuple[DTypeLike, Callable[[tuple[int, ...]], tuple[int, ...]]] = False, data_noop: bool = False) -> Callable[[Any], Any]: def wrapped_fn(*args, **kwargs): if kwargs is None: kwargs = {} @@ -116,6 +121,8 @@ def wrapped_fn(*args, **kwargs): meta_args = LazyBase._recurse_apply(args, lambda t: t._meta) # TODO: maybe handle tensors in kwargs too + ranges = use_self._ranges if use_self is not None and data_noop else () + if isinstance(meta_noop, bool) and not meta_noop: try: res = fn(*meta_args, **kwargs) @@ -138,7 +145,7 @@ def wrapped_fn(*args, **kwargs): res = cls.meta_with_dtype_and_shape(meta_noop, res.shape) if isinstance(res, cls._tensor_type): - return cls(meta=cls.eager_to_meta(res), args=args, kwargs=kwargs, func=fn) + return cls(meta=cls.eager_to_meta(res), args=args, kwargs=kwargs, func=fn, ranges=ranges) elif isinstance(res, tuple) and all(isinstance(t, cls._tensor_type) for t in res): # share the evaluation between lazy tuple elements shared_args: list = [args, None] @@ -214,7 +221,8 @@ def meta_with_dtype_and_shape(cls, dtype: DTypeLike, shape: tuple[int, ...]) -> def astype(self, dtype, *args, **kwargs): meta = type(self).meta_with_dtype_and_shape(dtype, self._meta.shape) full_args = (self, dtype,) + args - return type(self)(meta=meta, args=full_args, kwargs=kwargs, func=(lambda a, *args, **kwargs: a.astype(*args, **kwargs))) + ranges = self._ranges if self._meta.dtype == dtype else () + return type(self)(meta=meta, args=full_args, kwargs=kwargs, func=(lambda a, *args, **kwargs: a.astype(*args, **kwargs)), ranges=ranges) def tofile(self, *args, **kwargs): eager = LazyNumpyTensor.to_eager(self) diff --git a/gguf-py/gguf/utility.py b/gguf-py/gguf/utility.py index c9401a1c0a2d3..63c7cc7caeb9e 100644 --- a/gguf-py/gguf/utility.py +++ b/gguf-py/gguf/utility.py @@ -1,13 +1,17 @@ from __future__ import annotations from dataclasses import dataclass +from io import BufferedReader, BufferedWriter from pathlib import Path from typing import Literal import os import json +import logging import numpy as np +logger = logging.getLogger(__name__) + def fill_templated_filename(filename: str, output_type: str | None) -> str: # Given a file name fill in any type templates e.g. 'some-model-name.{ftype}.gguf' @@ -281,6 +285,83 @@ class LocalTensorRange: size: int +def best_alignment_offset(ranges: tuple[LocalTensorRange, ...], alignment: int): + hist: dict[int, int] = {} + + for r in ranges: + align_offset = r.offset % alignment + if align_offset not in hist: + hist[align_offset] = 0 + hist[align_offset] += r.size + + best_offset = 0 + best_size = 0 + for offset, size in hist.items(): + if size > best_size: + best_size = size + best_offset = offset + return best_offset + +# (assuming this is only called where os.copy_file_range is present) +# +# Copy tensor ranges using os.copy_file_range with aligned offsets and sizes +# to make it more likely that copy-on-write is used where possible. +# Block alignment is necessary for BTRFS and XFS (and likely for ZFS too). +def copy_tensor_ranges(fout: BufferedWriter, ranges: tuple[LocalTensorRange, ...], alignment: int = 4096): + assert len(ranges) > 0 + dst_offset = fout.tell() + assert dst_offset % alignment == 0, dst_offset % alignment + align_offset = best_alignment_offset(ranges, alignment) + if len(ranges) == 1: + r = ranges[0] + with open(r.filename, "rb") as src: + offset_src = r.offset - align_offset + offset_src_end = r.offset + r.size + if offset_src_end % alignment != 0: + offset_src_end += alignment - (offset_src_end % alignment) + size = offset_src_end - offset_src + os.copy_file_range(src.fileno(), fout.fileno(), size, offset_src, dst_offset) + dst_offset += r.size + align_offset + else: + # All ranges need to have the same alignment offset + # Non-consecutive ranges need a patch block in between when the alignment offset is non-zero + src_files: dict[Path, BufferedReader] = {} + for r in ranges: + if r.filename not in src_files: + src_files[r.filename] = open(r.filename, "rb") + + for i, r in enumerate(ranges): + this_align_offset = r.offset % alignment + src = src_files[r.filename] + if this_align_offset != align_offset: + logger.debug(f"copy-on-write can't be used ({i}/{len(ranges)})") + if i > 0 and dst_offset % alignment != 0: + # Write the correct data between blocks even when they are non-consecutive + extra_size = alignment - (dst_offset % alignment) + src.seek(r.offset) + buf = src.read(extra_size) + fout.seek(dst_offset) + fout.write(buf) + dst_offset += extra_size + assert dst_offset % alignment == 0, dst_offset % alignment + offset_src = r.offset + extra_size + else: + # TODO: is this always correct? + offset_src = r.offset - align_offset + + offset_src_end = r.offset + r.size + if offset_src_end % alignment != 0: + offset_src_end += alignment - (offset_src_end % alignment) + size = offset_src_end - offset_src + os.copy_file_range(src.fileno(), fout.fileno(), size, offset_src, dst_offset) + dst_offset += r.size + + for f in src_files.values(): + f.close() + + fout.seek(dst_offset) + + @dataclass class LocalTensor: dtype: str From 7724bf9e4f3bec39daa93ad4ad0a4d8b3632e69f Mon Sep 17 00:00:00 2001 From: Francis Couture-Harpin Date: Tue, 2 Sep 2025 15:22:01 -0400 Subject: [PATCH 02/13] convert : fix reflinks for stacked MoE tensors --- convert_hf_to_gguf.py | 4 +++- gguf-py/gguf/gguf_writer.py | 4 ++-- gguf-py/gguf/lazy.py | 2 +- gguf-py/gguf/utility.py | 35 +++++++++++++++++++++++++---------- 4 files changed, 31 insertions(+), 14 deletions(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 51420f612a186..66ef7b591b4df 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -462,7 +462,9 @@ def prepare_tensors(self): # workaround BF16 not being supported by Numpy if data_torch.dtype == torch.bfloat16: - data_torch = data_torch.view(torch.uint8) + # Need a contiguous last dimension otherwise byte view doesn't work + # (problem can be reproduced with DeepSeek-V2-Lite-Chat) + data_torch = data_torch.contiguous().view(torch.uint8) # if data ends up empty, it means data_torch was a scalar tensor -> restore if len(data_torch.shape) == 0: diff --git a/gguf-py/gguf/gguf_writer.py b/gguf-py/gguf/gguf_writer.py index 03e7ba930b13f..075b381c5997c 100644 --- a/gguf-py/gguf/gguf_writer.py +++ b/gguf-py/gguf/gguf_writer.py @@ -30,7 +30,7 @@ ) from .quants import quant_shape_from_byte_shape -from .utility import LocalTensorRange, best_alignment_offset, copy_tensor_ranges +from .utility import LocalTensorRange, best_alignment_offset, reflink_tensor_ranges logger = logging.getLogger(__name__) @@ -470,7 +470,7 @@ def write_tensors_to_file(self, *, progress: bool = False) -> None: if self.use_reflinks and len(ranges := getattr(ti.tensor, "_ranges", ())) > 0: logger.debug(f"using reflinks for {name}") start_offset = fout.tell() - copy_tensor_ranges(fout, ranges, self.data_alignment) + reflink_tensor_ranges(fout, ranges, self.data_alignment) self.write_padding(fout, fout.tell() - start_offset) else: ti.tensor.tofile(fout) diff --git a/gguf-py/gguf/lazy.py b/gguf-py/gguf/lazy.py index c4e5400639887..70ffb8d3b7ded 100644 --- a/gguf-py/gguf/lazy.py +++ b/gguf-py/gguf/lazy.py @@ -21,7 +21,7 @@ def __getattr__(self, name: str) -> Any: return type(self)._wrap_fn( (lambda s, *args, **kwargs: getattr(s, name)(*args, **kwargs)), use_self=self, - data_noop=name in ("view", "reshape", "squeeze", "unsqueeze"), + data_noop=name in ("view", "reshape", "squeeze", "unsqueeze", "contiguous"), ) elif isinstance(meta_attr, self._tensor_type): # e.g. self.T with torch.Tensor should still be wrapped diff --git a/gguf-py/gguf/utility.py b/gguf-py/gguf/utility.py index 63c7cc7caeb9e..80563238f01fe 100644 --- a/gguf-py/gguf/utility.py +++ b/gguf-py/gguf/utility.py @@ -297,7 +297,8 @@ def best_alignment_offset(ranges: tuple[LocalTensorRange, ...], alignment: int): best_offset = 0 best_size = 0 for offset, size in hist.items(): - if size > best_size: + # Ensure minimal alignment is 8-bytes (common with safetensors) + if size > best_size and offset % 8 == 0: best_size = size best_offset = offset return best_offset @@ -307,7 +308,7 @@ def best_alignment_offset(ranges: tuple[LocalTensorRange, ...], alignment: int): # Copy tensor ranges using os.copy_file_range with aligned offsets and sizes # to make it more likely that copy-on-write is used where possible. # Block alignment is necessary for BTRFS and XFS (and likely for ZFS too). -def copy_tensor_ranges(fout: BufferedWriter, ranges: tuple[LocalTensorRange, ...], alignment: int = 4096): +def reflink_tensor_ranges(fout: BufferedWriter, ranges: tuple[LocalTensorRange, ...], alignment: int = 4096): assert len(ranges) > 0 dst_offset = fout.tell() assert dst_offset % alignment == 0, dst_offset % alignment @@ -335,26 +336,40 @@ def copy_tensor_ranges(fout: BufferedWriter, ranges: tuple[LocalTensorRange, ... src = src_files[r.filename] if this_align_offset != align_offset: logger.debug(f"copy-on-write can't be used ({i}/{len(ranges)})") - if i > 0 and dst_offset % alignment != 0: - # Write the correct data between blocks even when they are non-consecutive + # relying on os.copy_file_range to fallback to a non-aligned copy + + # Block 0, 1, 2, 3, 4, + # |___0000|0000000|0001111|1111111|111____| + # + # 1. blocks 0, 1 and 2 are copied from range[0] using os.copy_file_range + # 2. block 2 is partially overwritten with contents from range[1] + # 3. blocks 3 and 4 are copied from range[1] using os.copy_file_range + # + # (2 and 3 are repeated with further blocks if there are more ranges) + if i == 0: + extra_size = -align_offset + elif dst_offset % alignment == 0: + extra_size = 0 + else: extra_size = alignment - (dst_offset % alignment) + extra_size = min(extra_size, r.size) src.seek(r.offset) buf = src.read(extra_size) fout.seek(dst_offset) fout.write(buf) dst_offset += extra_size - assert dst_offset % alignment == 0, dst_offset % alignment - offset_src = r.offset + extra_size - else: - # TODO: is this always correct? - offset_src = r.offset - align_offset + if extra_size == r.size: + continue + + assert dst_offset % alignment == 0, dst_offset % alignment + offset_src = r.offset + extra_size offset_src_end = r.offset + r.size if offset_src_end % alignment != 0: offset_src_end += alignment - (offset_src_end % alignment) size = offset_src_end - offset_src os.copy_file_range(src.fileno(), fout.fileno(), size, offset_src, dst_offset) - dst_offset += r.size + dst_offset += r.size - extra_size for f in src_files.values(): f.close() From 34bd0242670e5d5263f6f1f2e7284bafddf65381 Mon Sep 17 00:00:00 2001 From: Francis Couture-Harpin Date: Tue, 2 Sep 2025 15:27:34 -0400 Subject: [PATCH 03/13] gguf-py : fix flake8 lint --- gguf-py/gguf/utility.py | 1 + 1 file changed, 1 insertion(+) diff --git a/gguf-py/gguf/utility.py b/gguf-py/gguf/utility.py index 80563238f01fe..90bd4d48b7f4c 100644 --- a/gguf-py/gguf/utility.py +++ b/gguf-py/gguf/utility.py @@ -303,6 +303,7 @@ def best_alignment_offset(ranges: tuple[LocalTensorRange, ...], alignment: int): best_offset = offset return best_offset + # (assuming this is only called where os.copy_file_range is present) # # Copy tensor ranges using os.copy_file_range with aligned offsets and sizes From 6792f66a9329e50c730559f616aae71b034e0087 Mon Sep 17 00:00:00 2001 From: Francis Couture-Harpin Date: Thu, 4 Sep 2025 17:40:11 -0400 Subject: [PATCH 04/13] convert : detect filesystem block size for reflinks * convert : use direct copies when possible Using os.copy_file_range where available, and falling back to shutil.copyfileobj otherwise. * gguf : handle misaligned offset more cleanly --- convert_hf_to_gguf.py | 6 +- ggml/src/gguf.cpp | 10 +-- gguf-py/gguf/gguf_writer.py | 40 ++++----- gguf-py/gguf/lazy.py | 12 ++- gguf-py/gguf/utility.py | 170 ++++++++++++++++++++---------------- 5 files changed, 133 insertions(+), 105 deletions(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 66ef7b591b4df..f14eef1452e4a 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -80,6 +80,7 @@ class ModelBase: is_big_endian: bool endianess: gguf.GGUFEndian use_temp_file: bool + use_reflinks: bool lazy: bool dry_run: bool hparams: dict[str, Any] @@ -119,6 +120,7 @@ def __init__(self, dir_model: Path, ftype: gguf.LlamaFileType, fname_out: Path, self.is_big_endian = is_big_endian self.endianess = gguf.GGUFEndian.BIG if is_big_endian else gguf.GGUFEndian.LITTLE self.use_temp_file = use_temp_file + self.use_reflinks = use_reflinks self.lazy = not eager or (remote_hf_model_id is not None) self.dry_run = dry_run self.remote_hf_model_id = remote_hf_model_id @@ -133,7 +135,7 @@ def __init__(self, dir_model: Path, ftype: gguf.LlamaFileType, fname_out: Path, # Configure GGUF Writer self.gguf_writer = gguf.GGUFWriter(path=None, arch=gguf.MODEL_ARCH_NAMES[self.model_arch], endianess=self.endianess, use_temp_file=self.use_temp_file, split_max_tensors=split_max_tensors, split_max_size=split_max_size, dry_run=dry_run, small_first_shard=small_first_shard, - use_reflinks=use_reflinks) + use_reflinks=self.use_reflinks) # Mistral specific self.disable_mistral_community_chat_template = disable_mistral_community_chat_template @@ -202,7 +204,7 @@ def index_tensors(self, remote_hf_model_id: str | None = None) -> dict[str, Mode logger.info(f"gguf: indexing model part '{part_name}'") ctx: ContextManager[Any] if is_safetensors: - ctx = cast(ContextManager[Any], gguf.utility.SafetensorsLocal(self.dir_model / part_name)) + ctx = cast(ContextManager[Any], gguf.utility.SafetensorsLocal(self.dir_model / part_name, reflink=self.use_reflinks)) else: ctx = contextlib.nullcontext(torch.load(str(self.dir_model / part_name), map_location="cpu", mmap=True, weights_only=True)) diff --git a/ggml/src/gguf.cpp b/ggml/src/gguf.cpp index 9673bf78ba20f..167dce3f2a3fa 100644 --- a/ggml/src/gguf.cpp +++ b/ggml/src/gguf.cpp @@ -624,16 +624,16 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par ctx->size = 0; for (size_t i = 0; i < ctx->info.size(); ++i) { const gguf_tensor_info & ti = ctx->info[i]; - // HACK: bypass the continuity check - ctx->size = ti.offset; - if (ti.offset != ctx->size) { + // alignment offset is only necessary for GGUF converted with reflinks + const size_t align_offset = ti.offset % ctx->alignment; + if (ti.offset - align_offset != ctx->size) { GGML_LOG_ERROR("%s: tensor '%s' has offset %" PRIu64 ", expected %zu\n", - __func__, ti.t.name, ti.offset, ctx->size); + __func__, ti.t.name, ti.offset, ctx->size + align_offset); GGML_LOG_ERROR("%s: failed to read tensor data\n", __func__); gguf_free(ctx); return nullptr; } - size_t padded_size = GGML_PAD(ggml_nbytes(&ti.t), ctx->alignment); + size_t padded_size = GGML_PAD(ggml_nbytes(&ti.t) + align_offset, ctx->alignment); if (SIZE_MAX - ctx->size < padded_size) { GGML_LOG_ERROR("%s: tensor '%s' size overflow, cannot accumulate size %zu + %zu\n", __func__, ti.t.name, ctx->size, padded_size); diff --git a/gguf-py/gguf/gguf_writer.py b/gguf-py/gguf/gguf_writer.py index 075b381c5997c..5258fa868c6d4 100644 --- a/gguf-py/gguf/gguf_writer.py +++ b/gguf-py/gguf/gguf_writer.py @@ -30,7 +30,7 @@ ) from .quants import quant_shape_from_byte_shape -from .utility import LocalTensorRange, best_alignment_offset, reflink_tensor_ranges +from .utility import LocalTensorRange, best_extra_offset logger = logging.getLogger(__name__) @@ -94,7 +94,7 @@ def __init__( self.endianess = endianess self.data_alignment = GGUF_DEFAULT_ALIGNMENT self.use_reflinks = use_reflinks and hasattr(os, "copy_file_range") - self.use_temp_file = use_temp_file if not self.use_reflinks else False + self.use_temp_file = False if self.use_reflinks else use_temp_file self.temp_file = None self.tensors = [{}] self.kv_data = [{}] @@ -110,10 +110,6 @@ def __init__( if self.small_first_shard: self.tensors.append({}) - if self.use_reflinks: - # common default block size for COW filesystems - self.add_custom_alignment(4096) - self.add_architecture() def get_total_parameter_count(self) -> tuple[int, int, int, int]: @@ -185,6 +181,15 @@ def open_output_file(self, path: Path | None = None) -> None: self.fout = [open(filename, "wb") for filename in filenames] self.state = WriterState.EMPTY + if self.use_reflinks: + # reflinks require alignment to the filesystem blocks + block_size = os.stat(self.path.parent).st_blksize + # necessary to get an appropriate data start offset + # when padding for reflinks; + # using the real alignment (8 bytes, from safetensors) + # would result in a unusable base data offset + self.add_custom_alignment(block_size) + def print_plan(self) -> list[Path]: logger.info("Writing the following files:") assert self.path is not None @@ -264,11 +269,11 @@ def write_ti_data_to_file(self) -> None: offset_tensor = 0 for name, ti in tensors.items(): - align_offset = 0 + extra_offset = 0 if self.use_reflinks: ranges: tuple[LocalTensorRange, ...] = getattr(ti.tensor, "_ranges", ()) if len(ranges) > 0: - align_offset = best_alignment_offset(ranges, self.data_alignment) + extra_offset = best_extra_offset(ranges, offset_tensor) ti_data += self._pack_val(name, GGUFValueType.STRING, add_vtype=False) n_dims = len(ti.shape) @@ -276,8 +281,8 @@ def write_ti_data_to_file(self) -> None: for j in range(n_dims): ti_data += self._pack("Q", ti.shape[n_dims - 1 - j]) ti_data += self._pack("I", ti.dtype) - ti_data += self._pack("Q", offset_tensor + align_offset) - offset_tensor += GGUFWriter.ggml_pad(ti.nbytes + align_offset, self.data_alignment) + ti_data += self._pack("Q", offset_tensor + extra_offset) + offset_tensor += GGUFWriter.ggml_pad(ti.nbytes + extra_offset, self.data_alignment) fout.write(ti_data) fout.flush() @@ -405,13 +410,12 @@ def add_tensor( def write_padding(self, fp: IO[bytes], n: int, align: int | None = None) -> None: pad = GGUFWriter.ggml_pad(n, align if align is not None else self.data_alignment) - n if pad != 0: - fp.write(bytes([0] * pad)) + fp.write(b"\x00" * pad) def write_tensor_data(self, tensor: np.ndarray[Any, Any]) -> None: if self.state is not WriterState.TI_DATA and self.state is not WriterState.WEIGHTS: raise ValueError(f'Expected output file to contain tensor info or weights, got {self.state}') assert self.fout is not None - assert not self.use_reflinks # TODO: handle this here too if self.endianess == GGUFEndian.BIG: tensor.byteswap(inplace=True) @@ -432,7 +436,7 @@ def write_tensor_data(self, tensor: np.ndarray[Any, Any]) -> None: self.write_padding(fout, fout.tell()) tensor.tofile(fout) - self.write_padding(fout, tensor.nbytes) + self.write_padding(fout, fout.tell()) self.state = WriterState.WEIGHTS @@ -467,18 +471,14 @@ def write_tensors_to_file(self, *, progress: bool = False) -> None: for name, ti in tensors.items(): assert ti.tensor is not None # can only iterate once over the tensors assert ti.tensor.nbytes == ti.nbytes - if self.use_reflinks and len(ranges := getattr(ti.tensor, "_ranges", ())) > 0: + if self.use_reflinks and len(getattr(ti.tensor, "_ranges", ())) > 0: logger.debug(f"using reflinks for {name}") - start_offset = fout.tell() - reflink_tensor_ranges(fout, ranges, self.data_alignment) - self.write_padding(fout, fout.tell() - start_offset) - else: - ti.tensor.tofile(fout) - self.write_padding(fout, ti.nbytes) + ti.tensor.tofile(fout) if shard_bar is not None: shard_bar.update(ti.nbytes) if bar is not None: bar.update(ti.nbytes) + self.write_padding(fout, fout.tell()) ti.tensor = None else: self.temp_file.seek(0) diff --git a/gguf-py/gguf/lazy.py b/gguf-py/gguf/lazy.py index 70ffb8d3b7ded..91214191a8a22 100644 --- a/gguf-py/gguf/lazy.py +++ b/gguf-py/gguf/lazy.py @@ -1,12 +1,13 @@ from __future__ import annotations from abc import ABC, ABCMeta, abstractmethod +from io import BufferedWriter import logging from typing import Any, Callable import numpy as np from numpy.typing import DTypeLike -from .utility import LocalTensorRange +from .utility import LocalTensorRange, copy_tensor_ranges logger = logging.getLogger(__name__) @@ -224,8 +225,11 @@ def astype(self, dtype, *args, **kwargs): ranges = self._ranges if self._meta.dtype == dtype else () return type(self)(meta=meta, args=full_args, kwargs=kwargs, func=(lambda a, *args, **kwargs: a.astype(*args, **kwargs)), ranges=ranges) - def tofile(self, *args, **kwargs): - eager = LazyNumpyTensor.to_eager(self) - return eager.tofile(*args, **kwargs) + def tofile(self, fid, *args, **kwargs): + if isinstance(fid, BufferedWriter) and len(self._ranges) > 0: + return copy_tensor_ranges(fid, self._ranges) + else: + eager = LazyNumpyTensor.to_eager(self) + return eager.tofile(fid, *args, **kwargs) # TODO: __array_function__ diff --git a/gguf-py/gguf/utility.py b/gguf-py/gguf/utility.py index 90bd4d48b7f4c..78a621e976f0e 100644 --- a/gguf-py/gguf/utility.py +++ b/gguf-py/gguf/utility.py @@ -7,6 +7,7 @@ import os import json +import shutil import logging import numpy as np @@ -281,99 +282,116 @@ def _get_request_headers(cls) -> dict[str, str]: @dataclass class LocalTensorRange: filename: Path + block_size: int offset: int size: int -def best_alignment_offset(ranges: tuple[LocalTensorRange, ...], alignment: int): +def best_extra_offset(ranges: tuple[LocalTensorRange, ...], current_offset: int) -> int: hist: dict[int, int] = {} + max_block_size = 0 for r in ranges: - align_offset = r.offset % alignment - if align_offset not in hist: - hist[align_offset] = 0 - hist[align_offset] += r.size + # Ensure minimal alignment is 8 bytes (common with safetensors) + # and that the block size is valid + if r.offset % 8 == 0 and r.block_size > 0: + align_offset = r.offset % r.block_size + if align_offset not in hist: + hist[align_offset] = 0 + hist[align_offset] += r.size + if r.block_size > max_block_size: + max_block_size = r.block_size best_offset = 0 best_size = 0 for offset, size in hist.items(): - # Ensure minimal alignment is 8-bytes (common with safetensors) - if size > best_size and offset % 8 == 0: + if size > best_size: best_size = size best_offset = offset + + if max_block_size > 0: + # the offset needs to be aligned properly + # or else there's probably a block size mismatch + assert current_offset % max_block_size == 0, current_offset % max_block_size + return best_offset -# (assuming this is only called where os.copy_file_range is present) -# # Copy tensor ranges using os.copy_file_range with aligned offsets and sizes # to make it more likely that copy-on-write is used where possible. # Block alignment is necessary for BTRFS and XFS (and likely for ZFS too). -def reflink_tensor_ranges(fout: BufferedWriter, ranges: tuple[LocalTensorRange, ...], alignment: int = 4096): +# +# Falls back to shutil.copyfileobj when os.copy_file_range is not present. +def copy_tensor_ranges(fout: BufferedWriter, ranges: tuple[LocalTensorRange, ...]): assert len(ranges) > 0 dst_offset = fout.tell() - assert dst_offset % alignment == 0, dst_offset % alignment - align_offset = best_alignment_offset(ranges, alignment) - if len(ranges) == 1: - r = ranges[0] - with open(r.filename, "rb") as src: - offset_src = r.offset - align_offset - offset_src_end = r.offset + r.size - if offset_src_end % alignment != 0: - offset_src_end += alignment - (offset_src_end % alignment) - size = offset_src_end - offset_src - os.copy_file_range(src.fileno(), fout.fileno(), size, offset_src, dst_offset) - dst_offset += r.size + align_offset - else: - # All ranges need to have the same alignment offset - # Non-consecutive ranges need a patch block in between when the alignment offset is non-zero - src_files: dict[Path, BufferedReader] = {} - for r in ranges: - if r.filename not in src_files: - src_files[r.filename] = open(r.filename, "rb") - - for i, r in enumerate(ranges): - this_align_offset = r.offset % alignment - src = src_files[r.filename] - if this_align_offset != align_offset: - logger.debug(f"copy-on-write can't be used ({i}/{len(ranges)})") - # relying on os.copy_file_range to fallback to a non-aligned copy - - # Block 0, 1, 2, 3, 4, - # |___0000|0000000|0001111|1111111|111____| - # - # 1. blocks 0, 1 and 2 are copied from range[0] using os.copy_file_range - # 2. block 2 is partially overwritten with contents from range[1] - # 3. blocks 3 and 4 are copied from range[1] using os.copy_file_range - # - # (2 and 3 are repeated with further blocks if there are more ranges) - if i == 0: - extra_size = -align_offset - elif dst_offset % alignment == 0: - extra_size = 0 - else: - extra_size = alignment - (dst_offset % alignment) - extra_size = min(extra_size, r.size) - src.seek(r.offset) - buf = src.read(extra_size) - fout.seek(dst_offset) - fout.write(buf) - dst_offset += extra_size - if extra_size == r.size: - continue + extra_offset = best_extra_offset(ranges, dst_offset) - assert dst_offset % alignment == 0, dst_offset % alignment + if extra_offset > 0: + # initial padding + fout.write(b"\x00" * extra_offset) - offset_src = r.offset + extra_size - offset_src_end = r.offset + r.size - if offset_src_end % alignment != 0: - offset_src_end += alignment - (offset_src_end % alignment) - size = offset_src_end - offset_src - os.copy_file_range(src.fileno(), fout.fileno(), size, offset_src, dst_offset) - dst_offset += r.size - extra_size + dst_offset += extra_offset + start_offset = dst_offset - for f in src_files.values(): - f.close() + src_files: dict[Path, BufferedReader] = {} + for r in ranges: + if r.filename not in src_files: + src_files[r.filename] = open(r.filename, "rb") + + has_copy_file_range = hasattr(os, "copy_file_range") + + for i, r in enumerate(ranges): + src = src_files[r.filename] + if has_copy_file_range: + if r.block_size > 0 and (r.offset % r.block_size) == (start_offset % r.block_size): + # Attempting to align copies for reflinking + + # Block 0, 1, 2, 3, 4, + # |___0000|0000000|0001111|1111111|111____| + # + # 1. block 0 is partially overwritten with contents from range[0] + # 2. blocks 1 and 2 are copied from range[0] using os.copy_file_range + # 3. block 2 is partially overwritten with contents from range[1] + # 4. blocks 3 and 4 are copied from range[1] using os.copy_file_range + # (repeated for further ranges) + if dst_offset % r.block_size == 0: + extra_size = 0 + else: + extra_size = r.block_size - (dst_offset % r.block_size) + extra_size = min(extra_size, r.size) + src.seek(r.offset) + buf = src.read(extra_size) + fout.seek(dst_offset) + fout.write(buf) + dst_offset += extra_size + if extra_size == r.size: + continue + + assert dst_offset % r.block_size == 0, dst_offset % r.block_size + + offset_src = r.offset + extra_size + offset_src_end = r.offset + r.size + if offset_src_end % r.block_size != 0: + offset_src_end += r.block_size - (offset_src_end % r.block_size) + size = offset_src_end - offset_src + os.copy_file_range(src.fileno(), fout.fileno(), size, offset_src, dst_offset) + dst_offset += r.size - extra_size + else: + if r.block_size > 0: + logger.debug(f"misaligned for reflinking, falling back to copy ({i}/{len(ranges)})") + # not trying to use reflinks, but still using os.copy_file_range for speed + os.copy_file_range(src.fileno(), fout.fileno(), r.size, r.offset, dst_offset) + dst_offset += r.size + else: + # not using reflinks, fallback when os.copy_file_range is not supported + src.seek(r.offset) + fout.seek(dst_offset) + shutil.copyfileobj(src, fout, r.size) + dst_offset += r.size + + for f in src_files.values(): + f.close() fout.seek(dst_offset) @@ -399,10 +417,13 @@ class SafetensorsLocal: tensors: dict[str, LocalTensor] - def __init__(self, filename: Path): + def __init__(self, filename: Path, *, reflink: bool = False): + stat = os.stat(filename) + # using the preferred block size to signal whether reflinks are desired when copying + block_size = stat.st_blksize if reflink else -1 with open(filename, "rb") as f: metadata_length = int.from_bytes(f.read(8), byteorder='little') - file_size = os.stat(filename).st_size + file_size = stat.st_size if file_size < 8 + metadata_length: raise ValueError(f"Could not read complete metadata. Need {8 + metadata_length} bytes, got {file_size}") @@ -427,9 +448,10 @@ def __init__(self, filename: Path): dtype=meta["dtype"], shape=tuple(meta["shape"]), data_range=LocalTensorRange( - filename, - data_start_offset + meta["data_offsets"][0], - meta["data_offsets"][1] - meta["data_offsets"][0], + filename=filename, + block_size=block_size, + offset=data_start_offset + meta["data_offsets"][0], + size=meta["data_offsets"][1] - meta["data_offsets"][0], ), ) From fb879b40c05d204e3cb298b4ec5cbd3106155e82 Mon Sep 17 00:00:00 2001 From: Francis Couture-Harpin Date: Thu, 4 Sep 2025 18:43:10 -0400 Subject: [PATCH 05/13] convert : use F32 operations on Mamba A_log This matches the previous behavior for BF16 tensors. --- convert_hf_to_gguf.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index f14eef1452e4a..8556c268258ef 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -4356,7 +4356,7 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter del bid # unused if name.endswith(".A_log"): - data_torch = -torch.exp(data_torch) + data_torch = -torch.exp(data_torch.float()) elif name.endswith(".dt_bias"): name = name.rpartition(".dt_bias")[0] + ".dt_proj.bias" elif name.endswith(".dt_norm_weight"): @@ -5829,7 +5829,7 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter if name.endswith(".A_log"): logger.debug("A_log --> A ==> " + new_name) - data_torch = -torch.exp(data_torch) + data_torch = -torch.exp(data_torch.float()) # [4 1 8192 1] -> [4 8192 1 1] if self.match_model_tensor_name(new_name, gguf.MODEL_TENSOR.SSM_CONV1D, bid): @@ -5934,7 +5934,7 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter if name.endswith(".A_log"): logger.debug("A_log --> A ==> " + new_name) - data_torch = -torch.exp(data_torch) + data_torch = -torch.exp(data_torch.float()) yield (new_name, data_torch) @@ -6042,7 +6042,7 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter if name.endswith(".A_log"): logger.debug("A_log --> A ==> " + new_name) - data_torch = -torch.exp(data_torch) + data_torch = -torch.exp(data_torch.float()) yield (new_name, data_torch) From cec344950733b9a04dcd1fd67aaf51562e070f22 Mon Sep 17 00:00:00 2001 From: Francis Couture-Harpin Date: Thu, 4 Sep 2025 19:08:09 -0400 Subject: [PATCH 06/13] convert : allow sharding reflinked models --- gguf-py/gguf/gguf_writer.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/gguf-py/gguf/gguf_writer.py b/gguf-py/gguf/gguf_writer.py index 5258fa868c6d4..5fda08a6714a2 100644 --- a/gguf-py/gguf/gguf_writer.py +++ b/gguf-py/gguf/gguf_writer.py @@ -184,11 +184,15 @@ def open_output_file(self, path: Path | None = None) -> None: if self.use_reflinks: # reflinks require alignment to the filesystem blocks block_size = os.stat(self.path.parent).st_blksize - # necessary to get an appropriate data start offset - # when padding for reflinks; - # using the real alignment (8 bytes, from safetensors) - # would result in a unusable base data offset - self.add_custom_alignment(block_size) + # necessary to get an appropriate data start offset when padding for reflinks; + # using the real alignment (8 bytes, from safetensors) would result in a unusable base data offset + self.data_alignment = block_size + # for all shards to allow reading them on their own + for i, kv in enumerate(self.kv_data): + # insert at the start of the key-values + if Keys.General.ALIGNMENT in kv: + del kv[Keys.General.ALIGNMENT] + self.kv_data[i] = { Keys.General.ALIGNMENT: GGUFValue(block_size, GGUFValueType.UINT32), **kv } def print_plan(self) -> list[Path]: logger.info("Writing the following files:") From ec07416dcfc84ca4c9fb06f77bea29fb355c104a Mon Sep 17 00:00:00 2001 From: Francis Couture-Harpin Date: Thu, 4 Sep 2025 22:06:09 -0400 Subject: [PATCH 07/13] gguf-py : improve reflink size logging * gguf-py : move reflinking functions to lazy --- gguf-py/gguf/gguf_writer.py | 16 ++-- gguf-py/gguf/lazy.py | 147 ++++++++++++++++++++++++++++++++++-- gguf-py/gguf/utility.py | 111 --------------------------- 3 files changed, 149 insertions(+), 125 deletions(-) diff --git a/gguf-py/gguf/gguf_writer.py b/gguf-py/gguf/gguf_writer.py index 5fda08a6714a2..6429ea180dc63 100644 --- a/gguf-py/gguf/gguf_writer.py +++ b/gguf-py/gguf/gguf_writer.py @@ -29,8 +29,8 @@ ExpertGatingFuncType, ) +from .lazy import best_extra_offset, count_reflinkable_size from .quants import quant_shape_from_byte_shape -from .utility import LocalTensorRange, best_extra_offset logger = logging.getLogger(__name__) @@ -192,7 +192,7 @@ def open_output_file(self, path: Path | None = None) -> None: # insert at the start of the key-values if Keys.General.ALIGNMENT in kv: del kv[Keys.General.ALIGNMENT] - self.kv_data[i] = { Keys.General.ALIGNMENT: GGUFValue(block_size, GGUFValueType.UINT32), **kv } + self.kv_data[i] = {Keys.General.ALIGNMENT: GGUFValue(block_size, GGUFValueType.UINT32), **kv} def print_plan(self) -> list[Path]: logger.info("Writing the following files:") @@ -200,7 +200,9 @@ def print_plan(self) -> list[Path]: filenames = self.format_shard_names(self.path) assert len(filenames) == len(self.tensors) for name, tensors in zip(filenames, self.tensors): - logger.info(f"{name}: n_tensors = {len(tensors)}, total_size = {GGUFWriter.format_n_bytes_to_str(sum(ti.nbytes for ti in tensors.values()))}") + total_size = sum(ti.nbytes for ti in tensors.values()) + reflinkable_size = count_reflinkable_size(ti.tensor for ti in tensors.values()) if self.use_reflinks else 0 + logger.info(f"{name}: n_tensors = {len(tensors)}, total_size = {GGUFWriter.format_n_bytes_to_str(total_size)}{', reflinked = ' + GGUFWriter.format_n_bytes_to_str(total_size - reflinkable_size) if self.use_reflinks else ''}") if self.dry_run: logger.info("Dry run, not writing files") @@ -275,9 +277,7 @@ def write_ti_data_to_file(self) -> None: for name, ti in tensors.items(): extra_offset = 0 if self.use_reflinks: - ranges: tuple[LocalTensorRange, ...] = getattr(ti.tensor, "_ranges", ()) - if len(ranges) > 0: - extra_offset = best_extra_offset(ranges, offset_tensor) + extra_offset = best_extra_offset(ti.tensor, offset_tensor) ti_data += self._pack_val(name, GGUFValueType.STRING, add_vtype=False) n_dims = len(ti.shape) @@ -472,11 +472,9 @@ def write_tensors_to_file(self, *, progress: bool = False) -> None: shard_bar.reset(total=(total if total > 0 else None)) # relying on the fact that Python dicts preserve insertion order (since 3.7) - for name, ti in tensors.items(): + for ti in tensors.values(): assert ti.tensor is not None # can only iterate once over the tensors assert ti.tensor.nbytes == ti.nbytes - if self.use_reflinks and len(getattr(ti.tensor, "_ranges", ())) > 0: - logger.debug(f"using reflinks for {name}") ti.tensor.tofile(fout) if shard_bar is not None: shard_bar.update(ti.nbytes) diff --git a/gguf-py/gguf/lazy.py b/gguf-py/gguf/lazy.py index 91214191a8a22..16497319321d1 100644 --- a/gguf-py/gguf/lazy.py +++ b/gguf-py/gguf/lazy.py @@ -1,13 +1,18 @@ from __future__ import annotations from abc import ABC, ABCMeta, abstractmethod -from io import BufferedWriter -import logging -from typing import Any, Callable +from io import BufferedReader, BufferedWriter +from pathlib import Path +from typing import Any, Callable, Iterable +import logging import numpy as np +import os +import shutil + from numpy.typing import DTypeLike -from .utility import LocalTensorRange, copy_tensor_ranges + +from .utility import LocalTensorRange logger = logging.getLogger(__name__) @@ -210,6 +215,7 @@ class LazyNumpyTensor(LazyBase): _tensor_type = np.ndarray shape: tuple[int, ...] # Makes the type checker happy in quants.py + nbytes: int @classmethod def meta_with_dtype_and_shape(cls, dtype: DTypeLike, shape: tuple[int, ...]) -> np.ndarray[Any, Any]: @@ -227,9 +233,140 @@ def astype(self, dtype, *args, **kwargs): def tofile(self, fid, *args, **kwargs): if isinstance(fid, BufferedWriter) and len(self._ranges) > 0: - return copy_tensor_ranges(fid, self._ranges) + return copy_tensor_ranges(self, fid) else: eager = LazyNumpyTensor.to_eager(self) return eager.tofile(fid, *args, **kwargs) # TODO: __array_function__ + + +# For aligning blocks when reflinking +def best_extra_offset(t: np.ndarray | LazyNumpyTensor | None, current_offset: int) -> int: + if not isinstance(t, LazyNumpyTensor): + # no file ranges, no need for an offset + return 0 + + ranges = t._ranges + + histogram: dict[int, int] = {} + + max_block_size = 0 + for r in ranges: + # Ensure minimal alignment is 8 bytes (common with safetensors) + # and that the block size is valid + if r.offset % 8 == 0 and r.block_size > 0: + align_offset = r.offset % r.block_size + if align_offset not in histogram: + histogram[align_offset] = 0 + histogram[align_offset] += r.size + if r.block_size > max_block_size: + max_block_size = r.block_size + + best_offset = 0 + best_size = 0 + for offset, size in histogram.items(): + if size > best_size: + best_size = size + best_offset = offset + + if max_block_size > 0: + # the offset needs to be aligned properly + # or else there's probably a block size mismatch + assert current_offset % max_block_size == 0, current_offset % max_block_size + + return best_offset + + +def count_reflinkable_size(tensors: Iterable[np.ndarray | LazyNumpyTensor | None]) -> int: + if not hasattr(os, "copy_file_range"): + return 0 + + size = 0 + for t in tensors: + if isinstance(t, LazyNumpyTensor) and len(t._ranges) > 0: + align_offset = best_extra_offset(t, 0) + for range in t._ranges: + if range.block_size > 0 and range.offset % range.block_size == align_offset: + size += range.size + return size + + +# Copy tensor ranges using os.copy_file_range with aligned offsets and sizes +# to make it more likely that copy-on-write is used where possible. +# Block alignment is necessary for BTRFS and XFS (and likely for ZFS too). +# +# Falls back to shutil.copyfileobj when os.copy_file_range is not present. +def copy_tensor_ranges(t: LazyNumpyTensor, fout: BufferedWriter): + ranges = t._ranges + assert len(ranges) > 0 + dst_offset = fout.tell() + extra_offset = best_extra_offset(t, dst_offset) + + if extra_offset > 0: + # initial padding + fout.write(b"\x00" * extra_offset) + + dst_offset += extra_offset + start_offset = dst_offset + + src_files: dict[Path, BufferedReader] = {} + for r in ranges: + if r.filename not in src_files: + src_files[r.filename] = open(r.filename, "rb") + + has_copy_file_range = hasattr(os, "copy_file_range") + + for i, r in enumerate(ranges): + src = src_files[r.filename] + if has_copy_file_range: + if r.block_size > 0 and (r.offset % r.block_size) == (start_offset % r.block_size): + # Attempting to align copies for reflinking + + # Block 0, 1, 2, 3, 4, + # |___0000|0000000|0001111|1111111|111____| + # + # 1. block 0 is partially overwritten with contents from range[0] + # 2. blocks 1 and 2 are copied from range[0] using os.copy_file_range + # 3. block 2 is partially overwritten with contents from range[1] + # 4. blocks 3 and 4 are copied from range[1] using os.copy_file_range + # (repeated for further ranges) + if dst_offset % r.block_size == 0: + extra_size = 0 + else: + extra_size = r.block_size - (dst_offset % r.block_size) + extra_size = min(extra_size, r.size) + src.seek(r.offset) + buf = src.read(extra_size) + fout.seek(dst_offset) + fout.write(buf) + dst_offset += extra_size + if extra_size == r.size: + continue + + assert dst_offset % r.block_size == 0, dst_offset % r.block_size + + offset_src = r.offset + extra_size + offset_src_end = r.offset + r.size + if offset_src_end % r.block_size != 0: + offset_src_end += r.block_size - (offset_src_end % r.block_size) + size = offset_src_end - offset_src + os.copy_file_range(src.fileno(), fout.fileno(), size, offset_src, dst_offset) + dst_offset += r.size - extra_size + else: + if r.block_size > 0: + logger.debug(f"misaligned for reflinking, falling back to copy ({i}/{len(ranges)})") + # not trying to use reflinks, but still using os.copy_file_range for speed + os.copy_file_range(src.fileno(), fout.fileno(), r.size, r.offset, dst_offset) + dst_offset += r.size + else: + # not using reflinks, fallback when os.copy_file_range is not supported + src.seek(r.offset) + fout.seek(dst_offset) + shutil.copyfileobj(src, fout, r.size) + dst_offset += r.size + + for f in src_files.values(): + f.close() + + fout.seek(dst_offset) diff --git a/gguf-py/gguf/utility.py b/gguf-py/gguf/utility.py index 78a621e976f0e..d621ed811bc28 100644 --- a/gguf-py/gguf/utility.py +++ b/gguf-py/gguf/utility.py @@ -1,13 +1,11 @@ from __future__ import annotations from dataclasses import dataclass -from io import BufferedReader, BufferedWriter from pathlib import Path from typing import Literal import os import json -import shutil import logging import numpy as np @@ -287,115 +285,6 @@ class LocalTensorRange: size: int -def best_extra_offset(ranges: tuple[LocalTensorRange, ...], current_offset: int) -> int: - hist: dict[int, int] = {} - - max_block_size = 0 - for r in ranges: - # Ensure minimal alignment is 8 bytes (common with safetensors) - # and that the block size is valid - if r.offset % 8 == 0 and r.block_size > 0: - align_offset = r.offset % r.block_size - if align_offset not in hist: - hist[align_offset] = 0 - hist[align_offset] += r.size - if r.block_size > max_block_size: - max_block_size = r.block_size - - best_offset = 0 - best_size = 0 - for offset, size in hist.items(): - if size > best_size: - best_size = size - best_offset = offset - - if max_block_size > 0: - # the offset needs to be aligned properly - # or else there's probably a block size mismatch - assert current_offset % max_block_size == 0, current_offset % max_block_size - - return best_offset - - -# Copy tensor ranges using os.copy_file_range with aligned offsets and sizes -# to make it more likely that copy-on-write is used where possible. -# Block alignment is necessary for BTRFS and XFS (and likely for ZFS too). -# -# Falls back to shutil.copyfileobj when os.copy_file_range is not present. -def copy_tensor_ranges(fout: BufferedWriter, ranges: tuple[LocalTensorRange, ...]): - assert len(ranges) > 0 - dst_offset = fout.tell() - extra_offset = best_extra_offset(ranges, dst_offset) - - if extra_offset > 0: - # initial padding - fout.write(b"\x00" * extra_offset) - - dst_offset += extra_offset - start_offset = dst_offset - - src_files: dict[Path, BufferedReader] = {} - for r in ranges: - if r.filename not in src_files: - src_files[r.filename] = open(r.filename, "rb") - - has_copy_file_range = hasattr(os, "copy_file_range") - - for i, r in enumerate(ranges): - src = src_files[r.filename] - if has_copy_file_range: - if r.block_size > 0 and (r.offset % r.block_size) == (start_offset % r.block_size): - # Attempting to align copies for reflinking - - # Block 0, 1, 2, 3, 4, - # |___0000|0000000|0001111|1111111|111____| - # - # 1. block 0 is partially overwritten with contents from range[0] - # 2. blocks 1 and 2 are copied from range[0] using os.copy_file_range - # 3. block 2 is partially overwritten with contents from range[1] - # 4. blocks 3 and 4 are copied from range[1] using os.copy_file_range - # (repeated for further ranges) - if dst_offset % r.block_size == 0: - extra_size = 0 - else: - extra_size = r.block_size - (dst_offset % r.block_size) - extra_size = min(extra_size, r.size) - src.seek(r.offset) - buf = src.read(extra_size) - fout.seek(dst_offset) - fout.write(buf) - dst_offset += extra_size - if extra_size == r.size: - continue - - assert dst_offset % r.block_size == 0, dst_offset % r.block_size - - offset_src = r.offset + extra_size - offset_src_end = r.offset + r.size - if offset_src_end % r.block_size != 0: - offset_src_end += r.block_size - (offset_src_end % r.block_size) - size = offset_src_end - offset_src - os.copy_file_range(src.fileno(), fout.fileno(), size, offset_src, dst_offset) - dst_offset += r.size - extra_size - else: - if r.block_size > 0: - logger.debug(f"misaligned for reflinking, falling back to copy ({i}/{len(ranges)})") - # not trying to use reflinks, but still using os.copy_file_range for speed - os.copy_file_range(src.fileno(), fout.fileno(), r.size, r.offset, dst_offset) - dst_offset += r.size - else: - # not using reflinks, fallback when os.copy_file_range is not supported - src.seek(r.offset) - fout.seek(dst_offset) - shutil.copyfileobj(src, fout, r.size) - dst_offset += r.size - - for f in src_files.values(): - f.close() - - fout.seek(dst_offset) - - @dataclass class LocalTensor: dtype: str From be600e262244019bdaff4d7ddf7d9905c846fe2f Mon Sep 17 00:00:00 2001 From: Francis Couture-Harpin Date: Thu, 4 Sep 2025 23:10:28 -0400 Subject: [PATCH 08/13] convert : more robust default ftype detection --- convert_hf_to_gguf.py | 61 +++++++++++++++++++++++++++++-------------- 1 file changed, 42 insertions(+), 19 deletions(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 8556c268258ef..e2a8d1f56bfdb 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -63,6 +63,7 @@ class ModelType(IntEnum): @dataclass class ModelTensorInfo: load: Callable[[], Tensor] + size: int # in elements src_type: str src_qtype: gguf.GGMLQuantizationType | None = None dst_qtype: gguf.GGMLQuantizationType | None = None @@ -76,6 +77,7 @@ class ModelBase: dir_model: Path ftype: gguf.LlamaFileType + ftype_guessed: bool fname_out: Path is_big_endian: bool endianess: gguf.GGUFEndian @@ -116,6 +118,7 @@ def __init__(self, dir_model: Path, ftype: gguf.LlamaFileType, fname_out: Path, self.dir_model = dir_model self.ftype = ftype + self.ftype_guessed = ftype == gguf.LlamaFileType.GUESSED self.fname_out = fname_out self.is_big_endian = is_big_endian self.endianess = gguf.GGUFEndian.BIG if is_big_endian else gguf.GGUFEndian.LITTLE @@ -132,6 +135,34 @@ def __init__(self, dir_model: Path, ftype: gguf.LlamaFileType, fname_out: Path, self.dequant_model() + if self.ftype == gguf.LlamaFileType.GUESSED: + # find out the most common type + hist: dict[gguf.GGMLQuantizationType, int] = {} + for t in self.model_tensors.values(): + if t.dst_qtype is not None: + if t.dst_qtype not in hist: + hist[t.dst_qtype] = 0 + hist[t.dst_qtype] += t.size + max_qtype = gguf.GGMLQuantizationType.F32 + max_size = 0 + for qtype, size in hist.items(): + if size > max_size: + max_qtype = qtype + max_size = size + # TODO: add more type if they're used as dst_qtypes + if max_qtype == gguf.GGMLQuantizationType.F32: + self.ftype = gguf.LlamaFileType.ALL_F32 + elif max_qtype == gguf.GGMLQuantizationType.F16: + self.ftype = gguf.LlamaFileType.MOSTLY_F16 + elif max_qtype == gguf.GGMLQuantizationType.BF16: + self.ftype = gguf.LlamaFileType.MOSTLY_BF16 + elif max_qtype == gguf.GGMLQuantizationType.Q8_0: + self.ftype = gguf.LlamaFileType.MOSTLY_Q8_0 + elif max_qtype == gguf.GGMLQuantizationType.Q4_1: + self.ftype = gguf.LlamaFileType.MOSTLY_Q4_1 + elif max_qtype == gguf.GGMLQuantizationType.TQ1_0: + self.ftype = gguf.LlamaFileType.MOSTLY_TQ1_0 + # Configure GGUF Writer self.gguf_writer = gguf.GGUFWriter(path=None, arch=gguf.MODEL_ARCH_NAMES[self.model_arch], endianess=self.endianess, use_temp_file=self.use_temp_file, split_max_tensors=split_max_tensors, split_max_size=split_max_size, dry_run=dry_run, small_first_shard=small_first_shard, @@ -167,6 +198,7 @@ def index_tensors(self, remote_hf_model_id: str | None = None) -> dict[str, Mode qtype = LazyTorchTensor._qtype_map.get(dtype) tensors[name] = ModelTensorInfo( load=lambda r=remote_tensor: LazyTorchTensor.from_remote_tensor(r), + size=math.prod(remote_tensor.shape), src_type=str(dtype), src_qtype=qtype, dst_qtype=qtype, @@ -215,12 +247,14 @@ def index_tensors(self, remote_hf_model_id: str | None = None) -> dict[str, Mode if is_safetensors: data: gguf.utility.LocalTensor = model_part[name] dtype = LazyTorchTensor._dtype_str_map[data.dtype] + size = math.prod(data.shape) if self.lazy: data_gen = lambda data=data: LazyTorchTensor.from_local_tensor(data) # noqa: E731 else: data_gen = lambda data=data, dtype=dtype: torch.from_numpy(data.mmap_bytes()).view(dtype).reshape(data.shape) # noqa: E731 else: data_torch: Tensor = model_part[name] + size = data_torch.numel() dtype = data_torch.dtype if self.lazy: data_gen = lambda data=data_torch: LazyTorchTensor.from_eager(data) # noqa: E731 @@ -229,6 +263,7 @@ def index_tensors(self, remote_hf_model_id: str | None = None) -> dict[str, Mode qtype = LazyTorchTensor._qtype_map.get(dtype) tensors[name] = ModelTensorInfo( load=data_gen, + size=size, src_type=str(dtype), src_qtype=qtype, dst_qtype=qtype, @@ -333,6 +368,7 @@ def dequant_gptq(g_idx: Tensor, qweight: Tensor, qzeros: Tensor, scales: Tensor) s = self.model_tensors[name] self.model_tensors[weight_name] = ModelTensorInfo( load=lambda w=w, s=s: dequant_bitnet(w.load(), s.load()), + size=w.size, src_type="bitnet", src_qtype=gguf.GGMLQuantizationType.F32, dst_qtype=gguf.GGMLQuantizationType.TQ1_0, @@ -346,6 +382,7 @@ def dequant_gptq(g_idx: Tensor, qweight: Tensor, qzeros: Tensor, scales: Tensor) s = self.model_tensors[name] self.model_tensors[weight_name] = ModelTensorInfo( load=lambda w=w, s=s: dequant_simple(w.load(), s.load()), + size=w.size, src_type=w.src_type, src_qtype=gguf.GGMLQuantizationType.F32, dst_qtype=gguf.GGMLQuantizationType.BF16, # TODO: change to FP8 once natively supported @@ -364,6 +401,7 @@ def dequant_gptq(g_idx: Tensor, qweight: Tensor, qzeros: Tensor, scales: Tensor) load=lambda g=g_idx, z=qzeros, w=qweight, s=scales: dequant_gptq( g.load(), w.load(), z.load(), s.load() ), + size=qweight.size, # TODO: use more accurate value src_type=f"GPTQ-{bits}bit", src_qtype=gguf.GGMLQuantizationType.F32, dst_qtype=gguf.GGMLQuantizationType.Q8_0 if bits == 8 else gguf.GGMLQuantizationType.Q4_1, @@ -530,7 +568,9 @@ def prepare_tensors(self): # No override (data_qtype is False), or wants to be quantized (data_qtype is True) if isinstance(data_qtype, bool): - if self.ftype == gguf.LlamaFileType.ALL_F32: + if self.ftype_guessed: + data_qtype = old_qtype if tensor_info is None or tensor_info.dst_qtype is None else tensor_info.dst_qtype + elif self.ftype == gguf.LlamaFileType.ALL_F32: data_qtype = gguf.GGMLQuantizationType.F32 elif self.ftype == gguf.LlamaFileType.MOSTLY_F16: data_qtype = gguf.GGMLQuantizationType.F16 @@ -542,8 +582,6 @@ def prepare_tensors(self): data_qtype = gguf.GGMLQuantizationType.TQ1_0 elif self.ftype == gguf.LlamaFileType.MOSTLY_TQ2_0: data_qtype = gguf.GGMLQuantizationType.TQ2_0 - elif self.ftype == gguf.LlamaFileType.GUESSED: - data_qtype = old_qtype if tensor_info is None or tensor_info.dst_qtype is None else tensor_info.dst_qtype else: raise ValueError(f"Unknown file type: {self.ftype.name}") @@ -707,23 +745,8 @@ def prepare_metadata(self, vocab_only: bool): total_params = self.gguf_writer.get_total_parameter_count()[0] - # Apply heuristics to figure out typical tensor encoding based on first layer tensor encoding type - # TODO: get type name from `quantization_config` field when present? - if self.ftype == gguf.LlamaFileType.GUESSED: - # NOTE: can't use field "torch_dtype" in config.json, because some finetunes lie. - _, first_tensor = next(self.get_tensors()) - logger.info(f"first tensor type is {first_tensor.dtype}") - if first_tensor.dtype == torch.float16: - ftype = gguf.LlamaFileType.MOSTLY_F16 - elif first_tensor.dtype == torch.bfloat16: - ftype = gguf.LlamaFileType.MOSTLY_BF16 - else: - ftype = gguf.LlamaFileType.ALL_F32 - else: - ftype = self.ftype - # Extract the encoding scheme from the file type name. e.g. 'gguf.LlamaFileType.MOSTLY_Q8_0' --> 'Q8_0' - output_type: str = ftype.name.partition("_")[2] + output_type: str = self.ftype.name.partition("_")[2] # Filename Output if self.fname_out.is_dir(): From 8ef4136b20e5ce89c549d35e96d87c566896fcd5 Mon Sep 17 00:00:00 2001 From: Francis Couture-Harpin Date: Thu, 4 Sep 2025 23:28:01 -0400 Subject: [PATCH 09/13] convert : remove unused field ModelTensorInfo.src_qtype --- convert_hf_to_gguf.py | 32 +++++++++++++------------------- 1 file changed, 13 insertions(+), 19 deletions(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index e2a8d1f56bfdb..102fc6b2368e0 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -65,8 +65,7 @@ class ModelTensorInfo: load: Callable[[], Tensor] size: int # in elements src_type: str - src_qtype: gguf.GGMLQuantizationType | None = None - dst_qtype: gguf.GGMLQuantizationType | None = None + auto_qtype: gguf.GGMLQuantizationType | None = None class ModelBase: @@ -139,17 +138,17 @@ def __init__(self, dir_model: Path, ftype: gguf.LlamaFileType, fname_out: Path, # find out the most common type hist: dict[gguf.GGMLQuantizationType, int] = {} for t in self.model_tensors.values(): - if t.dst_qtype is not None: - if t.dst_qtype not in hist: - hist[t.dst_qtype] = 0 - hist[t.dst_qtype] += t.size + if t.auto_qtype is not None: + if t.auto_qtype not in hist: + hist[t.auto_qtype] = 0 + hist[t.auto_qtype] += t.size max_qtype = gguf.GGMLQuantizationType.F32 max_size = 0 for qtype, size in hist.items(): if size > max_size: max_qtype = qtype max_size = size - # TODO: add more type if they're used as dst_qtypes + # TODO: add more type if they're used as auto_qtype if max_qtype == gguf.GGMLQuantizationType.F32: self.ftype = gguf.LlamaFileType.ALL_F32 elif max_qtype == gguf.GGMLQuantizationType.F16: @@ -200,8 +199,7 @@ def index_tensors(self, remote_hf_model_id: str | None = None) -> dict[str, Mode load=lambda r=remote_tensor: LazyTorchTensor.from_remote_tensor(r), size=math.prod(remote_tensor.shape), src_type=str(dtype), - src_qtype=qtype, - dst_qtype=qtype, + auto_qtype=qtype, ) return tensors @@ -265,8 +263,7 @@ def index_tensors(self, remote_hf_model_id: str | None = None) -> dict[str, Mode load=data_gen, size=size, src_type=str(dtype), - src_qtype=qtype, - dst_qtype=qtype, + auto_qtype=qtype, ) # verify tensor name presence and identify potentially missing files @@ -370,8 +367,7 @@ def dequant_gptq(g_idx: Tensor, qweight: Tensor, qzeros: Tensor, scales: Tensor) load=lambda w=w, s=s: dequant_bitnet(w.load(), s.load()), size=w.size, src_type="bitnet", - src_qtype=gguf.GGMLQuantizationType.F32, - dst_qtype=gguf.GGMLQuantizationType.TQ1_0, + auto_qtype=gguf.GGMLQuantizationType.TQ1_0, ) tensors_to_remove.append(name) elif quant_method == "fp8": @@ -384,8 +380,7 @@ def dequant_gptq(g_idx: Tensor, qweight: Tensor, qzeros: Tensor, scales: Tensor) load=lambda w=w, s=s: dequant_simple(w.load(), s.load()), size=w.size, src_type=w.src_type, - src_qtype=gguf.GGMLQuantizationType.F32, - dst_qtype=gguf.GGMLQuantizationType.BF16, # TODO: change to FP8 once natively supported + auto_qtype=gguf.GGMLQuantizationType.BF16, # TODO: change to FP8 once natively supported ) tensors_to_remove.append(name) elif quant_method == "gptq": @@ -403,8 +398,7 @@ def dequant_gptq(g_idx: Tensor, qweight: Tensor, qzeros: Tensor, scales: Tensor) ), size=qweight.size, # TODO: use more accurate value src_type=f"GPTQ-{bits}bit", - src_qtype=gguf.GGMLQuantizationType.F32, - dst_qtype=gguf.GGMLQuantizationType.Q8_0 if bits == 8 else gguf.GGMLQuantizationType.Q4_1, + auto_qtype=gguf.GGMLQuantizationType.Q8_0 if bits == 8 else gguf.GGMLQuantizationType.Q4_1, ) tensors_to_remove += [ base_name + n @@ -569,7 +563,7 @@ def prepare_tensors(self): # No override (data_qtype is False), or wants to be quantized (data_qtype is True) if isinstance(data_qtype, bool): if self.ftype_guessed: - data_qtype = old_qtype if tensor_info is None or tensor_info.dst_qtype is None else tensor_info.dst_qtype + data_qtype = old_qtype if tensor_info is None or tensor_info.auto_qtype is None else tensor_info.auto_qtype elif self.ftype == gguf.LlamaFileType.ALL_F32: data_qtype = gguf.GGMLQuantizationType.F32 elif self.ftype == gguf.LlamaFileType.MOSTLY_F16: @@ -8942,7 +8936,7 @@ def numpy(self) -> gguf.LazyNumpyTensor: meta=gguf.LazyNumpyTensor.meta_with_dtype_and_shape(dtype, self.shape), args=(self,), func=(lambda s: s.numpy()), - ranges=self._ranges + ranges=self._ranges, ) @classmethod From 2499e47cfdda0d3e25b833f161192742108fc0a3 Mon Sep 17 00:00:00 2001 From: Francis Couture-Harpin Date: Fri, 5 Sep 2025 10:45:41 -0400 Subject: [PATCH 10/13] gguf-py : allow previewing reflinked size on non-Linux platforms --- convert_hf_to_gguf.py | 1 - ggml/src/gguf.cpp | 2 +- gguf-py/gguf/gguf_writer.py | 2 +- 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 102fc6b2368e0..24f11c9af0e1b 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -738,7 +738,6 @@ def prepare_metadata(self, vocab_only: bool): super().prepare_metadata(vocab_only=vocab_only) total_params = self.gguf_writer.get_total_parameter_count()[0] - # Extract the encoding scheme from the file type name. e.g. 'gguf.LlamaFileType.MOSTLY_Q8_0' --> 'Q8_0' output_type: str = self.ftype.name.partition("_")[2] diff --git a/ggml/src/gguf.cpp b/ggml/src/gguf.cpp index 167dce3f2a3fa..135555a7debbd 100644 --- a/ggml/src/gguf.cpp +++ b/ggml/src/gguf.cpp @@ -624,7 +624,7 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par ctx->size = 0; for (size_t i = 0; i < ctx->info.size(); ++i) { const gguf_tensor_info & ti = ctx->info[i]; - // alignment offset is only necessary for GGUF converted with reflinks + // alignment offset only exists for GGUF converted with reflinks const size_t align_offset = ti.offset % ctx->alignment; if (ti.offset - align_offset != ctx->size) { GGML_LOG_ERROR("%s: tensor '%s' has offset %" PRIu64 ", expected %zu\n", diff --git a/gguf-py/gguf/gguf_writer.py b/gguf-py/gguf/gguf_writer.py index 6429ea180dc63..baea41cb0eef4 100644 --- a/gguf-py/gguf/gguf_writer.py +++ b/gguf-py/gguf/gguf_writer.py @@ -93,7 +93,7 @@ def __init__( self.arch = arch self.endianess = endianess self.data_alignment = GGUF_DEFAULT_ALIGNMENT - self.use_reflinks = use_reflinks and hasattr(os, "copy_file_range") + self.use_reflinks = use_reflinks self.use_temp_file = False if self.use_reflinks else use_temp_file self.temp_file = None self.tensors = [{}] From 34f37c283b5e2cfdf8f699ea4983a29bfeaaf54c Mon Sep 17 00:00:00 2001 From: Francis Couture-Harpin Date: Tue, 9 Sep 2025 11:00:00 -0400 Subject: [PATCH 11/13] convert : better logging of partially reflinkable tensors --- gguf-py/gguf/gguf_writer.py | 2 +- gguf-py/gguf/lazy.py | 19 +++++++++++-------- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/gguf-py/gguf/gguf_writer.py b/gguf-py/gguf/gguf_writer.py index baea41cb0eef4..db263f8d49928 100644 --- a/gguf-py/gguf/gguf_writer.py +++ b/gguf-py/gguf/gguf_writer.py @@ -201,7 +201,7 @@ def print_plan(self) -> list[Path]: assert len(filenames) == len(self.tensors) for name, tensors in zip(filenames, self.tensors): total_size = sum(ti.nbytes for ti in tensors.values()) - reflinkable_size = count_reflinkable_size(ti.tensor for ti in tensors.values()) if self.use_reflinks else 0 + reflinkable_size = count_reflinkable_size((name, ti.tensor) for name, ti in tensors.items()) if self.use_reflinks else 0 logger.info(f"{name}: n_tensors = {len(tensors)}, total_size = {GGUFWriter.format_n_bytes_to_str(total_size)}{', reflinked = ' + GGUFWriter.format_n_bytes_to_str(total_size - reflinkable_size) if self.use_reflinks else ''}") if self.dry_run: diff --git a/gguf-py/gguf/lazy.py b/gguf-py/gguf/lazy.py index 16497319321d1..7ea7d8f62cb09 100644 --- a/gguf-py/gguf/lazy.py +++ b/gguf-py/gguf/lazy.py @@ -278,17 +278,22 @@ def best_extra_offset(t: np.ndarray | LazyNumpyTensor | None, current_offset: in return best_offset -def count_reflinkable_size(tensors: Iterable[np.ndarray | LazyNumpyTensor | None]) -> int: +def count_reflinkable_size(tensors: Iterable[tuple[str, np.ndarray | LazyNumpyTensor | None]]) -> int: if not hasattr(os, "copy_file_range"): return 0 - size = 0 - for t in tensors: + for name, t in tensors: if isinstance(t, LazyNumpyTensor) and len(t._ranges) > 0: align_offset = best_extra_offset(t, 0) + misaligned = 0 for range in t._ranges: - if range.block_size > 0 and range.offset % range.block_size == align_offset: - size += range.size + if range.block_size > 0: + if range.offset % range.block_size == align_offset: + size += range.size + else: + misaligned += 1 + if misaligned > 0: + logger.debug(f"{name} misaligned for reflinking, fallback to copy for {misaligned} of {len(t._ranges)} parts") return size @@ -317,7 +322,7 @@ def copy_tensor_ranges(t: LazyNumpyTensor, fout: BufferedWriter): has_copy_file_range = hasattr(os, "copy_file_range") - for i, r in enumerate(ranges): + for r in ranges: src = src_files[r.filename] if has_copy_file_range: if r.block_size > 0 and (r.offset % r.block_size) == (start_offset % r.block_size): @@ -354,8 +359,6 @@ def copy_tensor_ranges(t: LazyNumpyTensor, fout: BufferedWriter): os.copy_file_range(src.fileno(), fout.fileno(), size, offset_src, dst_offset) dst_offset += r.size - extra_size else: - if r.block_size > 0: - logger.debug(f"misaligned for reflinking, falling back to copy ({i}/{len(ranges)})") # not trying to use reflinks, but still using os.copy_file_range for speed os.copy_file_range(src.fileno(), fout.fileno(), r.size, r.offset, dst_offset) dst_offset += r.size From 34680f07d2366df3c66b76d8c51391ab891b7762 Mon Sep 17 00:00:00 2001 From: Francis Couture-Harpin Date: Tue, 9 Sep 2025 11:04:44 -0400 Subject: [PATCH 12/13] gguf-py : handle cross-filesystem file range copies --- gguf-py/gguf/lazy.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/gguf-py/gguf/lazy.py b/gguf-py/gguf/lazy.py index 7ea7d8f62cb09..22ffb4338bdc7 100644 --- a/gguf-py/gguf/lazy.py +++ b/gguf-py/gguf/lazy.py @@ -360,7 +360,13 @@ def copy_tensor_ranges(t: LazyNumpyTensor, fout: BufferedWriter): dst_offset += r.size - extra_size else: # not trying to use reflinks, but still using os.copy_file_range for speed - os.copy_file_range(src.fileno(), fout.fileno(), r.size, r.offset, dst_offset) + try: + os.copy_file_range(src.fileno(), fout.fileno(), r.size, r.offset, dst_offset) + except OSError: + # fallback when there's a problem (e.g. cross-filesystem copies) + src.seek(r.offset) + fout.seek(dst_offset) + shutil.copyfileobj(src, fout, r.size) dst_offset += r.size else: # not using reflinks, fallback when os.copy_file_range is not supported From 833d03c25db0bcd7b69fe57667da63eda62c197e Mon Sep 17 00:00:00 2001 From: Francis Couture-Harpin Date: Tue, 9 Sep 2025 14:28:10 -0400 Subject: [PATCH 13/13] convert : for FP8, use scale type to decide auto type --- convert_hf_to_gguf.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 24f11c9af0e1b..788a8c54b14b6 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -376,11 +376,13 @@ def dequant_gptq(g_idx: Tensor, qweight: Tensor, qzeros: Tensor, scales: Tensor) weight_name = name.removesuffix("_scale_inv") w = self.model_tensors[weight_name] s = self.model_tensors[name] + # TODO: change to FP8 once natively supported + auto_qtype = s.auto_qtype if s.auto_qtype is not gguf.GGMLQuantizationType.F32 else gguf.GGMLQuantizationType.BF16 self.model_tensors[weight_name] = ModelTensorInfo( load=lambda w=w, s=s: dequant_simple(w.load(), s.load()), size=w.size, src_type=w.src_type, - auto_qtype=gguf.GGMLQuantizationType.BF16, # TODO: change to FP8 once natively supported + auto_qtype=auto_qtype, ) tensors_to_remove.append(name) elif quant_method == "gptq":