Skip to content

Commit c2ba046

Browse files
committed
fix typos
1 parent b00126a commit c2ba046

File tree

2 files changed

+2
-2
lines changed

2 files changed

+2
-2
lines changed

src/llama-model-loader.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -898,7 +898,7 @@ void llama_model_loader::load_data_for(struct ggml_tensor * cur) const {
898898
if (use_mmap) {
899899
const auto & mapping = mappings.at(w.idx);
900900
if (tensor_data(cur) == nullptr) {
901-
tensor_data(cur) = (uint8_t *)mapping->addr() + w.offs;
901+
tensor_set_data(cur, (uint8_t *)mapping->addr() + w.offs);
902902
} else {
903903
memcpy(tensor_data(cur), (uint8_t *)mapping->addr() + w.offs, ggml_nbytes(cur));
904904
}

src/llama-quant.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -804,7 +804,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std::
804804
if (read_data.size() < ggml_nbytes(tensor)) {
805805
read_data.resize(ggml_nbytes(tensor));
806806
}
807-
set_tensor_data(tensor, read_data.data());
807+
tensor_set_data(tensor, read_data.data());
808808
}
809809
ml.load_data_for(tensor);
810810

0 commit comments

Comments
 (0)