Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion ggml/src/gguf.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1169,7 +1169,7 @@ void gguf_set_tensor_data(struct gguf_context * ctx, const char * name, const vo
struct gguf_writer_base {
size_t written_bytes {0u};

~gguf_writer_base(void) {}
~gguf_writer_base(void) = default;

// we bet on devirtualization
virtual void write(int8_t val) = 0;
Expand Down
2 changes: 1 addition & 1 deletion src/llama-impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ void llama_log_callback_default(ggml_log_level level, const char * text, void *
template <typename T>
struct no_init {
T value;
no_init() { /* do nothing */ }
no_init() = default;
};

struct time_meas {
Expand Down
6 changes: 3 additions & 3 deletions src/llama-model.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -420,8 +420,8 @@ static buft_list_t make_gpu_buft_list(ggml_backend_dev_t dev, llama_split_mode s
}

struct llama_model::impl {
impl() {}
~impl() {}
impl() = default;
~impl() = default;

uint64_t n_elements = 0;

Expand Down Expand Up @@ -458,7 +458,7 @@ llama_model::llama_model(const llama_model_params & params) : params(params), pi
pimpl->has_tensor_overrides = params.tensor_buft_overrides && params.tensor_buft_overrides[0].pattern;
}

llama_model::~llama_model() {}
llama_model::~llama_model() = default;

void llama_model::load_stats(llama_model_loader & ml) {
pimpl->n_elements = ml.n_elements;
Expand Down
3 changes: 1 addition & 2 deletions src/llama-vocab.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3232,8 +3232,7 @@ void llama_vocab::impl::print_info() const {
llama_vocab::llama_vocab() : pimpl(new impl(*this)) {
}

llama_vocab::~llama_vocab() {
}
llama_vocab::~llama_vocab() = default;

void llama_vocab::load(llama_model_loader & ml, const LLM_KV & kv) {
pimpl->load(ml, kv);
Expand Down
Loading