Skip to content

Commit 9cbf50c

Browse files
authored
build : fix and ignore MSVC warnings (ggml-org#1889)
1 parent 3d01122 commit 9cbf50c

File tree

16 files changed

+89
-38
lines changed

16 files changed

+89
-38
lines changed

examples/baby-llama/baby-llama.cpp

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,10 @@
44
#include <random>
55
#include <cstring>
66

7+
#if defined(_MSC_VER)
8+
#pragma warning(disable: 4244 4267) // possible loss of data
9+
#endif
10+
711
float frand() {
812
return (float)rand()/(float)RAND_MAX;
913
}
@@ -1470,7 +1474,7 @@ struct ggml_tensor * square_error_loss(struct ggml_context * ctx, struct ggml_te
14701474
}
14711475

14721476
struct ggml_tensor * cross_entropy_loss(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b) {
1473-
const float eps = 1e-3;
1477+
const float eps = 1e-3f;
14741478
return
14751479
ggml_sum(ctx,
14761480
ggml_neg(ctx,

examples/benchmark/benchmark-matmult.cpp

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,10 @@
1616
#include <iterator>
1717
#include <algorithm>
1818

19+
#if defined(_MSC_VER)
20+
#pragma warning(disable: 4244 4267) // possible loss of data
21+
#endif
22+
1923
float tensor_sum_elements(const ggml_tensor * tensor) {
2024
float sum = 0;
2125
if (tensor->type==GGML_TYPE_F32) {
@@ -29,9 +33,9 @@ float tensor_sum_elements(const ggml_tensor * tensor) {
2933
}
3034

3135
void tensor_dump(const ggml_tensor * tensor, const char * name) {
32-
printf("%15s: type = %i (%5s) ne = %5d x %5d x %5d, nb = (%5li, %5li, %5li) - ", name,
36+
printf("%15s: type = %i (%5s) ne = %5" PRIi64 " x %5" PRIi64 " x %5" PRIi64 ", nb = (%5zi, %5zi, %5zi) - ", name,
3337
tensor->type, ggml_type_name(tensor->type),
34-
(int) tensor->ne[0], (int) tensor->ne[1], (int) tensor->ne[2], tensor->nb[0], tensor->nb[1], tensor->nb[2]);
38+
tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->nb[0], tensor->nb[1], tensor->nb[2]);
3539
float sum = tensor_sum_elements(tensor);
3640
printf("Sum of tensor %s is %6.2f\n", name, sum);
3741
}
@@ -120,7 +124,7 @@ int main(int argc, char ** argv) {
120124
ctx_size += sizex*sizey*ggml_type_sizef(GGML_TYPE_F32); // BLAS
121125
ctx_size += 1024*1024*16;
122126

123-
printf("Allocating Memory of size %li bytes, %li MB\n",ctx_size, (ctx_size/1024/1024));
127+
printf("Allocating Memory of size %zi bytes, %zi MB\n",ctx_size, (ctx_size/1024/1024));
124128

125129
struct ggml_init_params params = {
126130
/*.mem_size =*/ ctx_size,

examples/common.cpp

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,10 @@
2828
#include <wchar.h>
2929
#endif
3030

31+
#if defined(_MSC_VER)
32+
#pragma warning(disable: 4244 4267) // possible loss of data
33+
#endif
34+
3135
int32_t get_num_physical_cores() {
3236
#ifdef __linux__
3337
// enumerate the set of thread siblings, num entries is num cores
@@ -373,7 +377,7 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
373377
} else {
374378
throw std::exception();
375379
}
376-
} catch (const std::exception &e) {
380+
} catch (const std::exception&) {
377381
invalid_param = true;
378382
break;
379383
}

examples/embedding/embedding.cpp

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,10 @@
44

55
#include <ctime>
66

7+
#if defined(_MSC_VER)
8+
#pragma warning(disable: 4244 4267) // possible loss of data
9+
#endif
10+
711
int main(int argc, char ** argv) {
812
gpt_params params;
913

examples/main/main.cpp

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,10 @@
2828
#include <signal.h>
2929
#endif
3030

31+
#if defined(_MSC_VER)
32+
#pragma warning(disable: 4244 4267) // possible loss of data
33+
#endif
34+
3135
static console_state con_st;
3236
static llama_context ** g_ctx;
3337

@@ -348,7 +352,7 @@ int main(int argc, char ** argv) {
348352
if ((int)embd.size() > max_embd_size) {
349353
auto skipped_tokens = embd.size() - max_embd_size;
350354
console_set_color(con_st, CONSOLE_COLOR_ERROR);
351-
printf("<<input too long: skipped %ld token%s>>", skipped_tokens, skipped_tokens != 1 ? "s" : "");
355+
printf("<<input too long: skipped %" PRIu64 " token%s>>", skipped_tokens, skipped_tokens != 1 ? "s" : "");
352356
console_set_color(con_st, CONSOLE_COLOR_DEFAULT);
353357
fflush(stdout);
354358
embd.resize(max_embd_size);

examples/perplexity/perplexity.cpp

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,10 @@
55
#include <cmath>
66
#include <ctime>
77

8+
#if defined(_MSC_VER)
9+
#pragma warning(disable: 4244 4267) // possible loss of data
10+
#endif
11+
812
std::vector<float> softmax(const std::vector<float>& logits) {
913
std::vector<float> probs(logits.size());
1014
float max_logit = logits[0];

examples/quantize-stats/quantize-stats.cpp

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,10 @@
1919
#include <thread>
2020
#include <mutex>
2121

22+
#if defined(_MSC_VER)
23+
#pragma warning(disable: 4244 4267) // possible loss of data
24+
#endif
25+
2226
struct quantize_stats_params {
2327
std::string model = "models/7B/ggml-model-f16.bin";
2428
bool verbose = false;

examples/save-load-state/save-load-state.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ int main(int argc, char ** argv) {
3737
// init
3838
auto ctx = llama_init_from_file(params.model.c_str(), lparams);
3939
auto tokens = std::vector<llama_token>(params.n_ctx);
40-
auto n_prompt_tokens = llama_tokenize(ctx, params.prompt.c_str(), tokens.data(), tokens.size(), true);
40+
auto n_prompt_tokens = llama_tokenize(ctx, params.prompt.c_str(), tokens.data(), int(tokens.size()), true);
4141

4242
if (n_prompt_tokens < 1) {
4343
fprintf(stderr, "%s : failed to tokenize prompt\n", __func__);

examples/train-text-from-scratch/train-text-from-scratch.cpp

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,9 @@
1212
#include <algorithm>
1313
#include <string>
1414

15+
#if defined(_MSC_VER)
16+
#pragma warning(disable: 4244 4267) // possible loss of data
17+
#endif
1518

1619
struct random_normal_distribution {
1720
std::mt19937 gen;
@@ -20,7 +23,6 @@ struct random_normal_distribution {
2023
float max;
2124
};
2225

23-
2426
struct random_uniform_distribution {
2527
std::mt19937 gen;
2628
std::uniform_real_distribution<float> rd;
@@ -2366,7 +2368,7 @@ void write_tensor(struct llama_file * file, struct ggml_tensor * tensor) {
23662368
file->write_u32(0);
23672369
file->write_u32(0);
23682370
file->write_u32(GGML_TYPE_F32);
2369-
file->seek(-file->tell() & 31, SEEK_CUR);
2371+
file->seek(0-file->tell() & 31, SEEK_CUR);
23702372
return;
23712373
}
23722374
const char * name = ggml_get_name(tensor);
@@ -2381,7 +2383,7 @@ void write_tensor(struct llama_file * file, struct ggml_tensor * tensor) {
23812383
file->write_u32(tensor->type);
23822384
file->write_raw(ne, sizeof(ne[0]) * nd);
23832385
file->write_raw(name, name_len);
2384-
file->seek(-file->tell() & 31, SEEK_CUR);
2386+
file->seek(0-file->tell() & 31, SEEK_CUR);
23852387
file->write_raw(tensor->data, ggml_nbytes(tensor));
23862388
}
23872389

@@ -2402,7 +2404,7 @@ void read_tensor(struct llama_file * file, struct ggml_tensor * tensor) {
24022404
std::string name = file->read_string(name_len);
24032405
GGML_ASSERT(strncmp(ggml_get_name(tensor), name.c_str(), sizeof(tensor->name)-1) == 0);
24042406

2405-
file->seek(-file->tell() & 31, SEEK_CUR);
2407+
file->seek(0-file->tell() & 31, SEEK_CUR);
24062408
file->read_raw(tensor->data, ggml_nbytes(tensor));
24072409
}
24082410

@@ -2756,8 +2758,8 @@ struct train_params get_default_train_params() {
27562758

27572759
params.lbfgs_n_iter = 16;
27582760
params.adam_n_iter = 16;
2759-
params.adam_alpha = 1e-3;
2760-
params.adam_decay = 1e-3;
2761+
params.adam_alpha = 1e-3f;
2762+
params.adam_decay = 1e-3f;
27612763

27622764
params.mem_model_gb = 2;
27632765
params.mem_compute_gb = 24;
@@ -3331,8 +3333,8 @@ int main(int argc, char ** argv) {
33313333
int n_gen = params.n_predict;
33323334
int sample_ctx = n_tokens - n_tokens/8;
33333335

3334-
sampler.params.temp = 0.2;
3335-
sampler.params.repeat_penalty = 1.1;
3336+
sampler.params.temp = 0.2f;
3337+
sampler.params.repeat_penalty = 1.1f;
33363338
sampler.params.mirostat = 2;
33373339
init_sampler(&sampler, lctx);
33383340

ggml.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,12 @@
3535
#define static_assert(cond, msg) struct global_scope_noop_trick
3636
#endif
3737

38+
#if defined(_MSC_VER)
39+
// disable "possible loss of data" to avoid hundreds of casts
40+
// we should just be careful :)
41+
#pragma warning(disable: 4244 4267)
42+
#endif
43+
3844
#if defined(_WIN32)
3945

4046
#include <windows.h>

0 commit comments

Comments
 (0)