From 052778653d08639895f6901304f10128ae7879d7 Mon Sep 17 00:00:00 2001 From: Herman Semenoff Date: Mon, 1 Dec 2025 14:14:25 +0300 Subject: [PATCH] gguf: llama: Use `= default` for trivial constructors and destructors MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit References: 1. **cppreference.com - Defaulted and deleted functions**: [https://en.cppreference.com/w/cpp/language/default_operator](https://en.cppreference.com/w/cpp/language/default_operator) 2. **Modernes C++ - Default and Delete Special Member Functions**: [https://www.modernescpp.com/index.php/default-and-delete-special-member-functions](https://www.modernescpp.com/index.php/default-and-delete-special-member-functions) 3. **Bartlomiej Filipek - Defaulted and Deleted Functions in C++11**: [https://www.bfilipek.com/2018/06/default-and-delete.html](https://www.bfilipek.com/2018/06/default-and-delete.html) 4. **LearnCpp.com - 15.2 — Defaulted and deleted functions**: [https://www.learncpp.com/cpp-tutorial/15-2-defaulted-and-deleted-functions/](https://www.learncpp.com/cpp-tutorial/15-2-defaulted-and-deleted-functions/) --- ggml/src/gguf.cpp | 2 +- src/llama-impl.h | 2 +- src/llama-model.cpp | 6 +++--- src/llama-vocab.cpp | 3 +-- 4 files changed, 6 insertions(+), 7 deletions(-) diff --git a/ggml/src/gguf.cpp b/ggml/src/gguf.cpp index 8cc4ef1cf44..b165d8bdc62 100644 --- a/ggml/src/gguf.cpp +++ b/ggml/src/gguf.cpp @@ -1169,7 +1169,7 @@ void gguf_set_tensor_data(struct gguf_context * ctx, const char * name, const vo struct gguf_writer_base { size_t written_bytes {0u}; - ~gguf_writer_base(void) {} + ~gguf_writer_base(void) = default; // we bet on devirtualization virtual void write(int8_t val) = 0; diff --git a/src/llama-impl.h b/src/llama-impl.h index c5163e9225a..c3391e79f51 100644 --- a/src/llama-impl.h +++ b/src/llama-impl.h @@ -37,7 +37,7 @@ void llama_log_callback_default(ggml_log_level level, const char * text, void * template struct no_init { T value; - no_init() { /* do nothing */ } + no_init() = default; }; struct time_meas { diff --git a/src/llama-model.cpp b/src/llama-model.cpp index ea6f59ed482..ed2b112cc41 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -420,8 +420,8 @@ static buft_list_t make_gpu_buft_list(ggml_backend_dev_t dev, llama_split_mode s } struct llama_model::impl { - impl() {} - ~impl() {} + impl() = default; + ~impl() = default; uint64_t n_elements = 0; @@ -458,7 +458,7 @@ llama_model::llama_model(const llama_model_params & params) : params(params), pi pimpl->has_tensor_overrides = params.tensor_buft_overrides && params.tensor_buft_overrides[0].pattern; } -llama_model::~llama_model() {} +llama_model::~llama_model() = default; void llama_model::load_stats(llama_model_loader & ml) { pimpl->n_elements = ml.n_elements; diff --git a/src/llama-vocab.cpp b/src/llama-vocab.cpp index 639fecbd317..da42cd4848d 100644 --- a/src/llama-vocab.cpp +++ b/src/llama-vocab.cpp @@ -3232,8 +3232,7 @@ void llama_vocab::impl::print_info() const { llama_vocab::llama_vocab() : pimpl(new impl(*this)) { } -llama_vocab::~llama_vocab() { -} +llama_vocab::~llama_vocab() = default; void llama_vocab::load(llama_model_loader & ml, const LLM_KV & kv) { pimpl->load(ml, kv);