Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions examples/batched/batched.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ int main(int argc, char ** argv) {

llama_model * model = llama_model_load_from_file(params.model.path.c_str(), model_params);

if (model == NULL) {
if (model == nullptr) {
LOG_ERR("%s: error: unable to load model\n" , __func__);
return 1;
}
Expand Down Expand Up @@ -76,7 +76,7 @@ int main(int argc, char ** argv) {
llama_sampler_chain_add(smpl, llama_sampler_init_temp (params.sampling.temp));
llama_sampler_chain_add(smpl, llama_sampler_init_dist (params.sampling.seed));

if (ctx == NULL) {
if (ctx == nullptr) {
LOG_ERR("%s: error: failed to create the llama_context\n" , __func__);
return 1;
}
Expand Down
12 changes: 6 additions & 6 deletions examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -254,7 +254,7 @@ struct my_llama_layer {
};

struct my_llama_model {
struct ggml_context * ctx = NULL;
struct ggml_context * ctx = nullptr;

std::string name;

Expand Down Expand Up @@ -333,7 +333,7 @@ static void print_params(struct my_llama_hparams * params) {
}

static void print_tensor_info(const struct ggml_context * ctx) {
for (auto * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
for (auto * t = ggml_get_first_tensor(ctx); t != nullptr; t = ggml_get_next_tensor(ctx, t)) {
LOG_INF("%s: Allocating ", __func__);
int64_t total = 1;
int i = 0;
Expand Down Expand Up @@ -443,7 +443,7 @@ struct my_llama_file {

my_llama_file(const char * fname, const char * mode) {
fp = std::fopen(fname, mode);
if (fp == NULL) {
if (fp == nullptr) {
size = 0;
} else {
seek(0, SEEK_END);
Expand Down Expand Up @@ -530,15 +530,15 @@ static std::string llama_escape_whitespaces(const std::string & text) {
static void load_vocab(const char * filename, const Config * config, struct my_llama_vocab * vocab) {
if (is_ggml_file(filename)) {
LOG_INF("%s: Loading vocabulary from gguf file %s\n", __func__, filename);
struct ggml_context * ctx_data = NULL;
struct ggml_context * ctx_data = nullptr;

struct gguf_init_params params = {
/*.no_alloc = */ false,
/*.ctx = */ &ctx_data,
};

struct gguf_context * ctx = gguf_init_from_file(filename, params);
GGML_ASSERT(ctx != NULL);
GGML_ASSERT(ctx != nullptr);

const int model_idx = gguf_find_key(ctx, KV_TOKENIZER_MODEL);
GGML_ASSERT(model_idx >= 0);
Expand Down Expand Up @@ -925,7 +925,7 @@ int main(int argc, char ** argv) {

struct ggml_init_params lcparams;
lcparams.mem_size = 1024ll*1024ll*1024ll*((size_t) params.mem_model_gb);
lcparams.mem_buffer = NULL;
lcparams.mem_buffer = nullptr;
lcparams.no_alloc = false;

model.ctx = ggml_init(lcparams);
Expand Down
6 changes: 3 additions & 3 deletions examples/embedding/embedding.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -57,12 +57,12 @@ static void batch_decode(llama_context * ctx, llama_batch & batch, float * outpu
// try to get token embeddings
embd = llama_get_embeddings_ith(ctx, i);
embd_pos = i;
GGML_ASSERT(embd != NULL && "failed to get token embeddings");
GGML_ASSERT(embd != nullptr && "failed to get token embeddings");
} else {
// try to get sequence embeddings - supported only when pooling_type is not NONE
embd = llama_get_embeddings_seq(ctx, batch.seq_id[i][0]);
embd_pos = batch.seq_id[i][0];
GGML_ASSERT(embd != NULL && "failed to get sequence embeddings");
GGML_ASSERT(embd != nullptr && "failed to get sequence embeddings");
}

float * out = output + embd_pos * n_embd;
Expand Down Expand Up @@ -107,7 +107,7 @@ int main(int argc, char ** argv) {
llama_model * model = llama_init.model.get();
llama_context * ctx = llama_init.context.get();

if (model == NULL) {
if (model == nullptr) {
LOG_ERR("%s: unable to load model\n", __func__);
return 1;
}
Expand Down
6 changes: 3 additions & 3 deletions examples/gguf-hash/gguf-hash.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -285,18 +285,18 @@ static void generate_uuidv5(const unsigned char sha1_digest[20], unsigned char u

static hash_exit_code_t gguf_hash(const hash_params & hash_params) {
const std::string & fname = hash_params.input;
struct ggml_context * ctx_data = NULL;
struct ggml_context * ctx_data = nullptr;

struct gguf_init_params params = {
/*.no_alloc = */ false,
/*.ctx = */ &ctx_data,
};

// xxh64 init
XXH64_state_t* xxh64_model_hash_state = NULL;
XXH64_state_t* xxh64_model_hash_state = nullptr;
if (hash_params.xxh64) {
xxh64_model_hash_state = XXH64_createState();
if (xxh64_model_hash_state==NULL) {
if (xxh64_model_hash_state==nullptr) {
abort();
}

Expand Down
6 changes: 3 additions & 3 deletions examples/gguf/gguf.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ static bool gguf_ex_write(const std::string & fname) {

struct ggml_init_params params = {
/*.mem_size =*/ 128ull*1024ull*1024ull,
/*.mem_buffer =*/ NULL,
/*.mem_buffer =*/ nullptr,
/*.no_alloc =*/ false,
};

Expand Down Expand Up @@ -86,7 +86,7 @@ static bool gguf_ex_write(const std::string & fname) {
static bool gguf_ex_read_0(const std::string & fname) {
struct gguf_init_params params = {
/*.no_alloc = */ false,
/*.ctx = */ NULL,
/*.ctx = */ nullptr,
};

struct gguf_context * ctx = gguf_init_from_file(fname.c_str(), params);
Expand Down Expand Up @@ -148,7 +148,7 @@ static bool gguf_ex_read_0(const std::string & fname) {

// read and create ggml_context containing the tensors and their data
static bool gguf_ex_read_1(const std::string & fname, bool check_data) {
struct ggml_context * ctx_data = NULL;
struct ggml_context * ctx_data = nullptr;

struct gguf_init_params params = {
/*.no_alloc = */ false,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ Java_android_llama_cpp_LLamaAndroid_backend_1free(JNIEnv *, jobject) {
extern "C"
JNIEXPORT void JNICALL
Java_android_llama_cpp_LLamaAndroid_log_1to_1android(JNIEnv *, jobject) {
llama_log_set(log_callback, NULL);
llama_log_set(log_callback, nullptr);
}

extern "C"
Expand Down
14 changes: 7 additions & 7 deletions examples/model-conversion/logits.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -78,27 +78,27 @@ int main(int argc, char ** argv) {

llama_model * model = llama_model_load_from_file(model_path.c_str(), model_params);

if (model == NULL) {
if (model == nullptr) {
fprintf(stderr , "%s: error: unable to load model\n" , __func__);
return 1;
}

// Extract basename from model_path
const char * basename = strrchr(model_path.c_str(), '/');
basename = (basename == NULL) ? model_path.c_str() : basename + 1;
basename = (basename == nullptr) ? model_path.c_str() : basename + 1;

char model_name[256];
strncpy(model_name, basename, 255);
model_name[255] = '\0';

char * dot = strrchr(model_name, '.');
if (dot != NULL && strcmp(dot, ".gguf") == 0) {
if (dot != nullptr && strcmp(dot, ".gguf") == 0) {
*dot = '\0';
}
printf("Model name: %s\n", model_name);

const llama_vocab * vocab = llama_model_get_vocab(model);
const int n_prompt = -llama_tokenize(vocab, prompt.c_str(), prompt.size(), NULL, 0, true, true);
const int n_prompt = -llama_tokenize(vocab, prompt.c_str(), prompt.size(), nullptr, 0, true, true);

std::vector<llama_token> prompt_tokens(n_prompt);
if (llama_tokenize(vocab, prompt.c_str(), prompt.size(), prompt_tokens.data(), prompt_tokens.size(), true, true) < 0) {
Expand All @@ -117,7 +117,7 @@ int main(int argc, char ** argv) {
}

llama_context * ctx = llama_init_from_model(model, ctx_params);
if (ctx == NULL) {
if (ctx == nullptr) {
fprintf(stderr , "%s: error: failed to create the llama_context\n" , __func__);
return 1;
}
Expand Down Expand Up @@ -167,7 +167,7 @@ int main(int argc, char ** argv) {
printf("Saving logits to %s\n", bin_filename);

FILE * f = fopen(bin_filename, "wb");
if (f == NULL) {
if (f == nullptr) {
fprintf(stderr, "%s: error: failed to open binary output file\n", __func__);
return 1;
}
Expand All @@ -178,7 +178,7 @@ int main(int argc, char ** argv) {
char txt_filename[512];
snprintf(txt_filename, sizeof(txt_filename), "data/llamacpp-%s%s.txt", model_name, type);
f = fopen(txt_filename, "w");
if (f == NULL) {
if (f == nullptr) {
fprintf(stderr, "%s: error: failed to open text output file\n", __func__);
return 1;
}
Expand Down
4 changes: 2 additions & 2 deletions examples/passkey/passkey.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ int main(int argc, char ** argv) {

llama_model * model = llama_model_load_from_file(params.model.path.c_str(), model_params);

if (model == NULL) {
if (model == nullptr) {
LOG_ERR("%s: unable to load model\n" , __func__);
return 1;
}
Expand All @@ -82,7 +82,7 @@ int main(int argc, char ** argv) {
GGML_ASSERT(ctx_params.n_batch % n_grp == 0 && "n_batch must be divisible by n_grp");

llama_context * ctx = llama_init_from_model(model, ctx_params);
if (ctx == NULL) {
if (ctx == nullptr) {
LOG_ERR("%s: failed to create the llama_context\n" , __func__);
return 1;
}
Expand Down
6 changes: 3 additions & 3 deletions examples/retrieval/retrieval.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -98,9 +98,9 @@ static void batch_process(llama_context * ctx, llama_batch & batch, float * outp

// try to get sequence embeddings - supported only when pooling_type is not NONE
const float * embd = llama_get_embeddings_seq(ctx, batch.seq_id[i][0]);
if (embd == NULL) {
if (embd == nullptr) {
embd = llama_get_embeddings_ith(ctx, i);
if (embd == NULL) {
if (embd == nullptr) {
LOG_ERR("%s: failed to get embeddings for token %d\n", __func__, i);
continue;
}
Expand Down Expand Up @@ -154,7 +154,7 @@ int main(int argc, char ** argv) {
llama_model * model = llama_init.model.get();
llama_context * ctx = llama_init.context.get();

if (model == NULL) {
if (model == nullptr) {
LOG_ERR("%s: unable to load model\n", __func__);
return 1;
}
Expand Down
2 changes: 1 addition & 1 deletion examples/simple-chat/simple-chat.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ int main(int argc, char ** argv) {
const bool is_first = llama_memory_seq_pos_max(llama_get_memory(ctx), 0) == -1;

// tokenize the prompt
const int n_prompt_tokens = -llama_tokenize(vocab, prompt.c_str(), prompt.size(), NULL, 0, is_first, true);
const int n_prompt_tokens = -llama_tokenize(vocab, prompt.c_str(), prompt.size(), nullptr, 0, is_first, true);
std::vector<llama_token> prompt_tokens(n_prompt_tokens);
if (llama_tokenize(vocab, prompt.c_str(), prompt.size(), prompt_tokens.data(), prompt_tokens.size(), is_first, true) < 0) {
GGML_ABORT("failed to tokenize the prompt\n");
Expand Down
6 changes: 3 additions & 3 deletions examples/simple/simple.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ int main(int argc, char ** argv) {

llama_model * model = llama_model_load_from_file(model_path.c_str(), model_params);

if (model == NULL) {
if (model == nullptr) {
fprintf(stderr , "%s: error: unable to load model\n" , __func__);
return 1;
}
Expand All @@ -94,7 +94,7 @@ int main(int argc, char ** argv) {
// tokenize the prompt

// find the number of tokens in the prompt
const int n_prompt = -llama_tokenize(vocab, prompt.c_str(), prompt.size(), NULL, 0, true, true);
const int n_prompt = -llama_tokenize(vocab, prompt.c_str(), prompt.size(), nullptr, 0, true, true);

// allocate space for the tokens and tokenize the prompt
std::vector<llama_token> prompt_tokens(n_prompt);
Expand All @@ -115,7 +115,7 @@ int main(int argc, char ** argv) {

llama_context * ctx = llama_init_from_model(model, ctx_params);

if (ctx == NULL) {
if (ctx == nullptr) {
fprintf(stderr , "%s: error: failed to create the llama_context\n" , __func__);
return 1;
}
Expand Down
8 changes: 4 additions & 4 deletions examples/speculative-simple/speculative-simple.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,11 +33,11 @@ int main(int argc, char ** argv) {
llama_backend_init();
llama_numa_init(params.numa);

llama_model * model_tgt = NULL;
//llama_model * model_dft = NULL;
llama_model * model_tgt = nullptr;
//llama_model * model_dft = nullptr;

llama_context * ctx_tgt = NULL;
llama_context * ctx_dft = NULL;
llama_context * ctx_tgt = nullptr;
llama_context * ctx_dft = nullptr;

// load the target model
common_init_result llama_init_tgt = common_init_from_params(params);
Expand Down
8 changes: 4 additions & 4 deletions examples/speculative/speculative.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -64,11 +64,11 @@ int main(int argc, char ** argv) {
llama_backend_init();
llama_numa_init(params.numa);

llama_model * model_tgt = NULL;
llama_model * model_dft = NULL;
llama_model * model_tgt = nullptr;
llama_model * model_dft = nullptr;

llama_context * ctx_tgt = NULL;
llama_context * ctx_dft = NULL;
llama_context * ctx_tgt = nullptr;
llama_context * ctx_dft = nullptr;

// load the target model
common_init_result llama_init_tgt = common_init_from_params(params);
Expand Down
2 changes: 1 addition & 1 deletion examples/training/finetune.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ int main(int argc, char ** argv) {
llama_model_ptr & model = llama_init.model;
llama_context_ptr & ctx = llama_init.context;

if (model == NULL) {
if (model == nullptr) {
LOG_ERR("%s: unable to load model\n", __func__);
return 1;
}
Expand Down
4 changes: 2 additions & 2 deletions src/llama-adapter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ bool llama_adapter_cvec::init(const llama_model & model) {
if (it == ctx_map.end()) {
ggml_init_params params = {
/*.mem_size =*/ hparams.n_layer*ggml_tensor_overhead(),
/*.mem_buffer =*/ NULL,
/*.mem_buffer =*/ nullptr,
/*.no_alloc =*/ true,
};

Expand Down Expand Up @@ -248,7 +248,7 @@ static void llama_adapter_lora_init_impl(llama_model & model, const char * path_
// add a new context
ggml_init_params params = {
/*.mem_size =*/ n_tensors*ggml_tensor_overhead(),
/*.mem_buffer =*/ NULL,
/*.mem_buffer =*/ nullptr,
/*.no_alloc =*/ true,
};
ggml_context * buft_ctx = ggml_init(params);
Expand Down
2 changes: 1 addition & 1 deletion src/llama-batch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ bool llama_batch_allocr::init(

if (!batch.seq_id) {
seq_id.resize(batch.n_tokens + 1);
seq_id[batch.n_tokens] = NULL;
seq_id[batch.n_tokens] = nullptr;
for (int32_t i = 0; i < batch.n_tokens; i++) {
seq_id[i] = seq_id_0.data();
}
Expand Down
4 changes: 2 additions & 2 deletions src/llama-context.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1120,7 +1120,7 @@ int llama_context::decode(const llama_batch & batch_inp) {

// plot the computation graph in dot format (for debugging purposes)
//if (n_past%100 == 0) {
// ggml_graph_dump_dot(gf, NULL, "llama.dot");
// ggml_graph_dump_dot(gf, nullptr, "llama.dot");
//}

auto * t_logits = res->get_logits();
Expand Down Expand Up @@ -2287,7 +2287,7 @@ llama_context * llama_init_from_model(
llama_model * model,
llama_context_params params) {
if (!model) {
LLAMA_LOG_ERROR("%s: model cannot be NULL\n", __func__);
LLAMA_LOG_ERROR("%s: model cannot be nullptr\n", __func__);
return nullptr;
}

Expand Down
2 changes: 1 addition & 1 deletion src/llama-graph.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -744,7 +744,7 @@ ggml_tensor * llm_graph_context::build_ffn(
} else {
cur = ggml_gelu(ctx0, cur);
cb(cur, "ffn_gelu", il);
if (act_scales != NULL) {
if (act_scales != nullptr) {
cur = ggml_div(ctx0, cur, act_scales);
cb(cur, "ffn_act", il);
}
Expand Down
2 changes: 1 addition & 1 deletion src/llama-impl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ std::string format(const char * fmt, ...) {
va_list ap2;
va_start(ap, fmt);
va_copy(ap2, ap);
int size = vsnprintf(NULL, 0, fmt, ap);
int size = vsnprintf(nullptr, 0, fmt, ap);
GGML_ASSERT(size >= 0 && size < INT_MAX); // NOLINT
std::vector<char> buf(size + 1);
int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2);
Expand Down
Loading
Loading