From 1cec9f1ea9502674efa071f1a011e796a5daa0e4 Mon Sep 17 00:00:00 2001 From: zhouwg Date: Thu, 6 Mar 2025 09:07:54 +0800 Subject: [PATCH 1/3] build: build llama.cpp with cygwin on Windows, without complex IDE --- common/console.cpp | 4 +++- src/llama-mmap.cpp | 6 +++--- 2 files changed, 6 insertions(+), 4 deletions(-) mode change 100644 => 100755 common/console.cpp mode change 100644 => 100755 src/llama-mmap.cpp diff --git a/common/console.cpp b/common/console.cpp old mode 100644 new mode 100755 index 078a8d678d933..73b00aa95de9f --- a/common/console.cpp +++ b/common/console.cpp @@ -241,7 +241,9 @@ namespace console { (void)codepoint; return 1; #else - return wcwidth(codepoint); + //return wcwidth(codepoint); + (void)codepoint; + return 1; #endif } diff --git a/src/llama-mmap.cpp b/src/llama-mmap.cpp old mode 100644 new mode 100755 index 3970b7485fe9d..d415d7cc4e91a --- a/src/llama-mmap.cpp +++ b/src/llama-mmap.cpp @@ -482,9 +482,9 @@ struct llama_mlock::impl { suggest = false; #else struct rlimit lock_limit; - if (suggest && getrlimit(RLIMIT_MEMLOCK, &lock_limit)) { - suggest = false; - } + //if (suggest && getrlimit(RLIMIT_MEMLOCK, &lock_limit)) { + // suggest = false; + //} if (suggest && (lock_limit.rlim_max > lock_limit.rlim_cur + size)) { suggest = false; } From bcbd79698a44f15c828ce10da9ffc249418604b7 Mon Sep 17 00:00:00 2001 From: zhouwg Date: Thu, 6 Mar 2025 09:21:43 +0800 Subject: [PATCH 2/3] build: build with cygwin on Windows --- make CI happy --- src/llama-mmap.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/llama-mmap.cpp b/src/llama-mmap.cpp index d415d7cc4e91a..c8b948b154945 100755 --- a/src/llama-mmap.cpp +++ b/src/llama-mmap.cpp @@ -481,7 +481,7 @@ struct llama_mlock::impl { // Skip resource limit checks on visionOS/tvOS suggest = false; #else - struct rlimit lock_limit; + struct rlimit lock_limit = {}; //if (suggest && getrlimit(RLIMIT_MEMLOCK, &lock_limit)) { // suggest = false; //} From e916fa07a58f96c91fe67be50b55b86168bea399 Mon Sep 17 00:00:00 2001 From: zhouwg Date: Mon, 10 Mar 2025 21:45:51 +0800 Subject: [PATCH 3/3] build: build llama.cpp on Windows without VS IDE -- the second patch --- examples/export-lora/export-lora.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/export-lora/export-lora.cpp b/examples/export-lora/export-lora.cpp index 91238e4beb26c..b0ba4e9dfc3c0 100644 --- a/examples/export-lora/export-lora.cpp +++ b/examples/export-lora/export-lora.cpp @@ -148,7 +148,7 @@ struct lora_merge_ctx { ctx_out = gguf_init_empty(); struct ggml_init_params params = { - /*.mem_size =*/ gguf_get_n_tensors(base_model.ctx_gguf)*ggml_tensor_overhead(), + /*.mem_size =*/ static_cast(gguf_get_n_tensors(base_model.ctx_gguf)*ggml_tensor_overhead()), /*.mem_buffer =*/ NULL, /*.no_alloc =*/ true, };