Skip to content

Commit df7397f

Browse files
authored
Merge branch 'ggml-org:master' into mradermacher
2 parents ce29d25 + b3e3060 commit df7397f

File tree

6 files changed

+23
-13
lines changed

6 files changed

+23
-13
lines changed

.github/workflows/release.yml

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -869,6 +869,12 @@ jobs:
869869
> [!WARNING]
870870
> **Release Format Update**: Linux releases will soon use .tar.gz archives instead of .zip. Please make the necessary changes to your deployment scripts.
871871
872+
<details open>
873+
874+
${{ github.event.head_commit.message }}
875+
876+
</details>
877+
872878
**macOS/iOS:**
873879
- [macOS Apple Silicon (arm64)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-macos-arm64.tar.gz)
874880
- [macOS Intel (x64)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-macos-x64.tar.gz)
@@ -893,12 +899,6 @@ jobs:
893899
- [openEuler aarch64 (310p)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-310p-openEuler-aarch64.tar.gz)
894900
- [openEuler aarch64 (910b)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-910b-openEuler-aarch64.tar.gz)
895901
896-
<details>
897-
898-
${{ github.event.head_commit.message }}
899-
900-
</details>
901-
902902
- name: Upload release
903903
id: upload_release
904904
uses: actions/github-script@v3

docs/build.md

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -431,11 +431,22 @@ docker run -it --rm -v "$(pwd):/app:Z" --device /dev/dri/renderD128:/dev/dri/ren
431431

432432
### For Linux users:
433433

434+
#### Using the LunarG Vulkan SDK
435+
434436
First, follow the official LunarG instructions for the installation and setup of the Vulkan SDK in the [Getting Started with the Linux Tarball Vulkan SDK](https://vulkan.lunarg.com/doc/sdk/latest/linux/getting_started.html) guide.
435437

436438
> [!IMPORTANT]
437439
> After completing the first step, ensure that you have used the `source` command on the `setup_env.sh` file inside of the Vulkan SDK in your current terminal session. Otherwise, the build won't work. Additionally, if you close out of your terminal, you must perform this step again if you intend to perform a build. However, there are ways to make this persistent. Refer to the Vulkan SDK guide linked in the first step for more information about any of this.
438440
441+
#### Using system packages
442+
443+
On Debian / Ubuntu, you can install the required dependencies using:
444+
```sh
445+
sudo apt-get install libvulkan-dev glslc
446+
```
447+
448+
#### Common steps
449+
439450
Second, after verifying that you have followed all of the SDK installation/setup steps, use this command to make sure before proceeding:
440451
```bash
441452
vulkaninfo

ggml/src/gguf.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1169,7 +1169,7 @@ void gguf_set_tensor_data(struct gguf_context * ctx, const char * name, const vo
11691169
struct gguf_writer_base {
11701170
size_t written_bytes {0u};
11711171

1172-
~gguf_writer_base(void) {}
1172+
~gguf_writer_base(void) = default;
11731173

11741174
// we bet on devirtualization
11751175
virtual void write(int8_t val) = 0;

src/llama-impl.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ void llama_log_callback_default(ggml_log_level level, const char * text, void *
3737
template <typename T>
3838
struct no_init {
3939
T value;
40-
no_init() { /* do nothing */ }
40+
no_init() = default;
4141
};
4242

4343
struct time_meas {

src/llama-model.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -423,8 +423,8 @@ static buft_list_t make_gpu_buft_list(ggml_backend_dev_t dev, llama_split_mode s
423423
}
424424

425425
struct llama_model::impl {
426-
impl() {}
427-
~impl() {}
426+
impl() = default;
427+
~impl() = default;
428428

429429
uint64_t n_elements = 0;
430430

@@ -461,7 +461,7 @@ llama_model::llama_model(const llama_model_params & params) : params(params), pi
461461
pimpl->has_tensor_overrides = params.tensor_buft_overrides && params.tensor_buft_overrides[0].pattern;
462462
}
463463

464-
llama_model::~llama_model() {}
464+
llama_model::~llama_model() = default;
465465

466466
void llama_model::load_stats(llama_model_loader & ml) {
467467
pimpl->n_elements = ml.n_elements;

src/llama-vocab.cpp

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3253,8 +3253,7 @@ void llama_vocab::impl::print_info() const {
32533253
llama_vocab::llama_vocab() : pimpl(new impl(*this)) {
32543254
}
32553255

3256-
llama_vocab::~llama_vocab() {
3257-
}
3256+
llama_vocab::~llama_vocab() = default;
32583257

32593258
void llama_vocab::load(llama_model_loader & ml, const LLM_KV & kv) {
32603259
pimpl->load(ml, kv);

0 commit comments

Comments
 (0)