From f81f3103c610cae10f2ec6201382a9b7dab70981 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 10 Oct 2025 13:32:19 +0300 Subject: [PATCH 1/5] presets : add --embd-gemma-default and remove old embedding presets --- common/arg.cpp | 243 +++++++++++++++++++++---------------------------- 1 file changed, 104 insertions(+), 139 deletions(-) diff --git a/common/arg.cpp b/common/arg.cpp index d17645cf2f395..3fb0908b04255 100644 --- a/common/arg.cpp +++ b/common/arg.cpp @@ -3570,21 +3570,23 @@ common_params_context common_params_parser_init(common_params & params, llama_ex common_log_set_file(common_log_main(), value.c_str()); } )); - add_opt(common_arg({ "--log-colors" }, "[on|off|auto]", - "Set colored logging ('on', 'off', or 'auto', default: 'auto')\n" - "'auto' enables colors when output is to a terminal", - [](common_params &, const std::string & value) { - if (is_truthy(value)) { - common_log_set_colors(common_log_main(), LOG_COLORS_ENABLED); - } else if (is_falsey(value)) { - common_log_set_colors(common_log_main(), LOG_COLORS_DISABLED); - } else if (is_autoy(value)) { - common_log_set_colors(common_log_main(), LOG_COLORS_AUTO); - } else { - throw std::invalid_argument( - string_format("error: unkown value for --log-colors: '%s'\n", value.c_str())); - } - }).set_env("LLAMA_LOG_COLORS")); + add_opt(common_arg( + {"--log-colors"}, "[on|off|auto]", + "Set colored logging ('on', 'off', or 'auto', default: 'auto')\n" + "'auto' enables colors when output is to a terminal", + [](common_params &, const std::string & value) { + if (is_truthy(value)) { + common_log_set_colors(common_log_main(), LOG_COLORS_ENABLED); + } else if (is_falsey(value)) { + common_log_set_colors(common_log_main(), LOG_COLORS_DISABLED); + } else if (is_autoy(value)) { + common_log_set_colors(common_log_main(), LOG_COLORS_AUTO); + } else { + throw std::invalid_argument( + string_format("error: unkown value for --log-colors: '%s'\n", value.c_str())); + } + } + ).set_env("LLAMA_LOG_COLORS")); add_opt(common_arg( {"-v", "--verbose", "--log-verbose"}, "Set verbosity level to infinity (i.e. log all messages, useful for debugging)", @@ -3850,7 +3852,87 @@ common_params_context common_params_parser_init(common_params & params, llama_ex } ).set_examples({LLAMA_EXAMPLE_TTS})); - // model-specific + add_opt(common_arg( + {"--diffusion-steps"}, "N", + string_format("number of diffusion steps (default: %d)", params.diffusion.steps), + [](common_params & params, int value) { params.diffusion.steps = value; } + ).set_examples({ LLAMA_EXAMPLE_DIFFUSION })); + add_opt(common_arg( + {"--diffusion-visual"}, + string_format("enable visual diffusion mode (show progressive generation) (default: %s)", params.diffusion.visual_mode ? "true" : "false"), + [](common_params & params) { params.diffusion.visual_mode = true; } + ).set_examples({ LLAMA_EXAMPLE_DIFFUSION })); + add_opt(common_arg( + {"--diffusion-eps"}, "F", + string_format("epsilon for timesteps (default: %.6f)", (double) params.diffusion.eps), + [](common_params & params, const std::string & value) { params.diffusion.eps = std::stof(value); } + ).set_examples({ LLAMA_EXAMPLE_DIFFUSION })); + add_opt(common_arg( + {"--diffusion-algorithm"}, "N", + string_format("diffusion algorithm: 0=ORIGIN, 1=ENTROPY_BASED, 2=MARGIN_BASED, 3=RANDOM, 4=LOW_CONFIDENCE (default: %d)", params.diffusion.algorithm), + [](common_params & params, int value) { params.diffusion.algorithm = value; } + ).set_examples({ LLAMA_EXAMPLE_DIFFUSION })); + add_opt(common_arg( + {"--diffusion-alg-temp"}, "F", + string_format("dream algorithm temperature (default: %.3f)", (double) params.diffusion.alg_temp), + [](common_params & params, const std::string & value) { params.diffusion.alg_temp = std::stof(value); } + ).set_examples({ LLAMA_EXAMPLE_DIFFUSION })); + add_opt(common_arg( + {"--diffusion-block-length"}, "N", + string_format("llada block length for generation (default: %d)", params.diffusion.block_length), + [](common_params & params, int value) { params.diffusion.block_length = value; } + ).set_examples({ LLAMA_EXAMPLE_DIFFUSION })); + add_opt(common_arg( + {"--diffusion-cfg-scale"}, "F", + string_format("llada classifier-free guidance scale (default: %.3f)", (double) params.diffusion.cfg_scale), + [](common_params & params, const std::string & value) { params.diffusion.cfg_scale = std::stof(value); } + ).set_examples({ LLAMA_EXAMPLE_DIFFUSION })); + add_opt(common_arg( + {"--diffusion-add-gumbel-noise"}, "F", + string_format("add gumbel noise to the logits if temp > 0.0 (default: %s)", params.diffusion.add_gumbel_noise ? "true" : "false"), + [](common_params & params, const std::string & value) { params.diffusion.add_gumbel_noise = std::stof(value); } + ).set_examples({ LLAMA_EXAMPLE_DIFFUSION })); + add_opt(common_arg( + { "-lr", "--learning-rate" }, "ALPHA", + string_format("adamw or sgd optimizer alpha (default: %.2g); note: sgd alpha recommended ~10x (no momentum)", (double) params.lr.lr0), + [](common_params & params, const std::string & value) { params.lr.lr0 = std::stof(value); } + ).set_examples({ LLAMA_EXAMPLE_FINETUNE })); + add_opt(common_arg({ "-lr-min", "--learning-rate-min" }, "ALPHA", + string_format("(if >0) final learning rate after decay (if -decay-epochs is set, default=%.2g)", + (double) params.lr.lr_min), + [](common_params & params, const std::string & value) { params.lr.lr_min = std::stof(value); } + ).set_examples({ LLAMA_EXAMPLE_FINETUNE })); + add_opt(common_arg( + {"-decay-epochs", "--learning-rate-decay-epochs"}, "ALPHA", + string_format("(if >0) decay learning rate to -lr-min after this many epochs (exponential decay, default=%.2g)", (double) params.lr.decay_epochs), + [](common_params & params, const std::string & value) { params.lr.decay_epochs = std::stof(value); } + ).set_examples({ LLAMA_EXAMPLE_FINETUNE })); + add_opt(common_arg( + {"-wd", "--weight-decay"}, "WD", + string_format("adamw or sgd optimizer weight decay (0 is off; recommend very small e.g. 1e-9) (default: %.2g).", (double) params.lr.wd), + [](common_params & params, const std::string & value) { params.lr.wd = std::stof(value); } + ).set_examples({ LLAMA_EXAMPLE_FINETUNE })); + add_opt(common_arg( + {"-val-split", "--val-split"}, "FRACTION", + string_format("fraction of data to use as validation set for training (default: %.2g).", (double) params.val_split), + [](common_params & params, const std::string & value) { params.val_split = std::stof(value); } + ).set_examples({ LLAMA_EXAMPLE_FINETUNE })); + add_opt(common_arg( + {"-epochs", "--epochs"}, "N", + string_format("optimizer max # of epochs (default: %d)", params.lr.epochs), + [](common_params & params, int epochs) { params.lr.epochs = epochs; } + ).set_examples({ LLAMA_EXAMPLE_FINETUNE })); + add_opt(common_arg( + {"-opt", "--optimizer"}, "sgd|adamw", "adamw or sgd", + [](common_params & params, const std::string & name) { + params.optimizer = common_opt_get_optimizer(name.c_str()); + if (params.optimizer == GGML_OPT_OPTIMIZER_TYPE_COUNT) { + throw std::invalid_argument("invalid --optimizer, valid options: adamw, sgd"); + } + } + ).set_examples({ LLAMA_EXAMPLE_FINETUNE })); + + // presets add_opt(common_arg( {"--tts-oute-default"}, string_format("use default OuteTTS models (note: can download weights from the internet)"), @@ -3863,39 +3945,14 @@ common_params_context common_params_parser_init(common_params & params, llama_ex ).set_examples({LLAMA_EXAMPLE_TTS})); add_opt(common_arg( - {"--embd-bge-small-en-default"}, + {"--embd-gemma-default"}, string_format("use default bge-small-en-v1.5 model (note: can download weights from the internet)"), [](common_params & params) { - params.model.hf_repo = "ggml-org/bge-small-en-v1.5-Q8_0-GGUF"; - params.model.hf_file = "bge-small-en-v1.5-q8_0.gguf"; - params.embd_normalize = 2; - params.n_ctx = 512; - params.verbose_prompt = true; - params.embedding = true; - } - ).set_examples({LLAMA_EXAMPLE_EMBEDDING, LLAMA_EXAMPLE_SERVER})); - - add_opt(common_arg( - {"--embd-e5-small-en-default"}, - string_format("use default e5-small-v2 model (note: can download weights from the internet)"), - [](common_params & params) { - params.model.hf_repo = "ggml-org/e5-small-v2-Q8_0-GGUF"; - params.model.hf_file = "e5-small-v2-q8_0.gguf"; - params.embd_normalize = 2; - params.n_ctx = 512; - params.verbose_prompt = true; - params.embedding = true; - } - ).set_examples({LLAMA_EXAMPLE_EMBEDDING, LLAMA_EXAMPLE_SERVER})); - - add_opt(common_arg( - {"--embd-gte-small-default"}, - string_format("use default gte-small model (note: can download weights from the internet)"), - [](common_params & params) { - params.model.hf_repo = "ggml-org/gte-small-Q8_0-GGUF"; - params.model.hf_file = "gte-small-q8_0.gguf"; - params.embd_normalize = 2; - params.n_ctx = 512; + params.model.hf_repo = "ggml-org/embeddinggemma-300M-qat-q4_0-GGUF"; + params.model.hf_file = "embeddinggemma-300M-qat-Q4_0.gguf"; + params.port = 8011; + params.n_parallel = 32; + params.n_ctx = 2048*params.n_parallel; params.verbose_prompt = true; params.embedding = true; } @@ -3989,97 +4046,5 @@ common_params_context common_params_parser_init(common_params & params, llama_ex } ).set_examples({LLAMA_EXAMPLE_SERVER})); - add_opt(common_arg( - { "--diffusion-steps" }, "N", - string_format("number of diffusion steps (default: %d)", params.diffusion.steps), - [](common_params & params, int value) { params.diffusion.steps = value; } - ).set_examples({ LLAMA_EXAMPLE_DIFFUSION })); - add_opt(common_arg( - { "--diffusion-visual" }, - string_format("enable visual diffusion mode (show progressive generation) (default: %s)", - params.diffusion.visual_mode ? "true" : "false"), - [](common_params & params) { params.diffusion.visual_mode = true; } - ).set_examples({ LLAMA_EXAMPLE_DIFFUSION })); - - add_opt(common_arg( - { "--diffusion-eps" }, "F", - string_format("epsilon for timesteps (default: %.6f)", (double) params.diffusion.eps), - [](common_params & params, const std::string & value) { params.diffusion.eps = std::stof(value); } - ).set_examples({ LLAMA_EXAMPLE_DIFFUSION })); - add_opt(common_arg( - { "--diffusion-algorithm" }, "N", - string_format("diffusion algorithm: 0=ORIGIN, 1=ENTROPY_BASED, 2=MARGIN_BASED, 3=RANDOM, 4=LOW_CONFIDENCE (default: %d)", - params.diffusion.algorithm), - [](common_params & params, int value) { params.diffusion.algorithm = value; } - ).set_examples({ LLAMA_EXAMPLE_DIFFUSION })); - add_opt(common_arg( - { "--diffusion-alg-temp" }, "F", - string_format("dream algorithm temperature (default: %.3f)", (double) params.diffusion.alg_temp), - [](common_params & params, const std::string & value) { params.diffusion.alg_temp = std::stof(value); } - ).set_examples({ LLAMA_EXAMPLE_DIFFUSION })); - - add_opt(common_arg( - { "--diffusion-block-length" }, "N", - string_format("llada block length for generation (default: %d)", params.diffusion.block_length), - [](common_params & params, int value) { params.diffusion.block_length = value; } - ).set_examples({ LLAMA_EXAMPLE_DIFFUSION })); - add_opt(common_arg( - { "--diffusion-cfg-scale" }, "F", - string_format("llada classifier-free guidance scale (default: %.3f)", (double) params.diffusion.cfg_scale), - [](common_params & params, const std::string & value) { params.diffusion.cfg_scale = std::stof(value); } - ).set_examples({ LLAMA_EXAMPLE_DIFFUSION })); - add_opt(common_arg( - { "--diffusion-add-gumbel-noise" }, "F", - string_format("add gumbel noise to the logits if temp > 0.0 (default: %s)", params.diffusion.add_gumbel_noise ? "true" : "false"), - [](common_params & params, const std::string & value) { params.diffusion.add_gumbel_noise = std::stof(value); } - ).set_examples({ LLAMA_EXAMPLE_DIFFUSION })); - - - add_opt( - common_arg({ "-lr", "--learning-rate" }, "ALPHA", - string_format( - "adamw or sgd optimizer alpha (default: %.2g); note: sgd alpha recommended ~10x (no momentum)", - (double) params.lr.lr0), - [](common_params & params, const std::string & value) { params.lr.lr0 = std::stof(value); }) - .set_examples({ LLAMA_EXAMPLE_FINETUNE })); - add_opt( - common_arg({ "-lr-min", "--learning-rate-min" }, "ALPHA", - string_format( - "(if >0) final learning rate after decay (if -decay-epochs is set, default=%.2g)", - (double) params.lr.lr_min), - [](common_params & params, const std::string & value) { params.lr.lr_min = std::stof(value); }) - .set_examples({ LLAMA_EXAMPLE_FINETUNE })); - add_opt( - common_arg({ "-decay-epochs", "--learning-rate-decay-epochs" }, "ALPHA", - string_format( - "(if >0) decay learning rate to -lr-min after this many epochs (exponential decay, default=%.2g)", - (double) params.lr.decay_epochs), - [](common_params & params, const std::string & value) { params.lr.decay_epochs = std::stof(value); }) - .set_examples({ LLAMA_EXAMPLE_FINETUNE })); - add_opt(common_arg( - { "-wd", "--weight-decay" }, "WD", - string_format( - "adamw or sgd optimizer weight decay (0 is off; recommend very small e.g. 1e-9) (default: %.2g).", - (double) params.lr.wd), - [](common_params & params, const std::string & value) { params.lr.wd = std::stof(value); }) - .set_examples({ LLAMA_EXAMPLE_FINETUNE })); - add_opt(common_arg({ "-val-split", "--val-split" }, "FRACTION", - string_format("fraction of data to use as validation set for training (default: %.2g).", - (double) params.val_split), - [](common_params & params, const std::string & value) { params.val_split = std::stof(value); }) - .set_examples({ LLAMA_EXAMPLE_FINETUNE })); - add_opt(common_arg({ "-epochs", "--epochs" }, "N", - string_format("optimizer max # of epochs (default: %d)", params.lr.epochs), - [](common_params & params, int epochs) { params.lr.epochs = epochs; }) - .set_examples({ LLAMA_EXAMPLE_FINETUNE })); - add_opt(common_arg({ "-opt", "--optimizer" }, "sgd|adamw", "adamw or sgd", - [](common_params & params, const std::string & name) { - params.optimizer = common_opt_get_optimizer(name.c_str()); - if (params.optimizer == GGML_OPT_OPTIMIZER_TYPE_COUNT) { - throw std::invalid_argument("invalid --optimizer, valid options: adamw, sgd"); - } - }) - .set_examples({ LLAMA_EXAMPLE_FINETUNE })); - return ctx_arg; } From cbb0e618ee608c5e26039c68555c635e187297de Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 10 Oct 2025 13:48:13 +0300 Subject: [PATCH 2/5] presets : add gpt-oss presets --- common/arg.cpp | 43 +++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 41 insertions(+), 2 deletions(-) diff --git a/common/arg.cpp b/common/arg.cpp index 3fb0908b04255..8fe19f8fe5e1f 100644 --- a/common/arg.cpp +++ b/common/arg.cpp @@ -3358,7 +3358,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex add_opt(common_arg( {"--chat-template-kwargs"}, "STRING", string_format("sets additional params for the json template parser"), - [](common_params & params, const std::string & value) { + [](common_params & params, const std::string & value) { auto parsed = json::parse(value); for (const auto & item : parsed.items()) { params.default_template_kwargs[item.key()] = item.value().dump(); @@ -3946,7 +3946,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex add_opt(common_arg( {"--embd-gemma-default"}, - string_format("use default bge-small-en-v1.5 model (note: can download weights from the internet)"), + string_format("use default EmbeddingGemma model (note: can download weights from the internet)"), [](common_params & params) { params.model.hf_repo = "ggml-org/embeddinggemma-300M-qat-q4_0-GGUF"; params.model.hf_file = "embeddinggemma-300M-qat-Q4_0.gguf"; @@ -4046,5 +4046,44 @@ common_params_context common_params_parser_init(common_params & params, llama_ex } ).set_examples({LLAMA_EXAMPLE_SERVER})); + add_opt(common_arg( + {"--gpt-oss-20b-default"}, + string_format("use gpt-oss-20b (note: can download weights from the internet)"), + [](common_params & params) { + params.model.hf_repo = "ggml-org/gpt-oss-20b-GGUF"; + params.model.hf_file = "gpt-oss-20b-mxfp4.gguf"; + params.port = 8013; + params.n_ubatch = 2048; + params.n_batch = 32768; + params.n_parallel = 2; + params.n_ctx = 131072*params.n_parallel; + params.sampling.temp = 1.0f; + params.sampling.top_p = 1.0f; + params.sampling.top_k = 0; + params.sampling.min_p = 0.01f; + params.use_jinja = true; + params.default_template_kwargs["reasoning_effort"] = "\"high\""; + } + ).set_examples({LLAMA_EXAMPLE_SERVER})); + + add_opt(common_arg( + {"--gpt-oss-120b-default"}, + string_format("use gpt-oss-120b (note: can download weights from the internet)"), + [](common_params & params) { + params.model.hf_repo = "ggml-org/gpt-oss-120b-GGUF"; + params.port = 8013; + params.n_ubatch = 2048; + params.n_batch = 32768; + params.n_parallel = 2; + params.n_ctx = 131072*params.n_parallel; + params.sampling.temp = 1.0f; + params.sampling.top_p = 1.0f; + params.sampling.top_k = 0; + params.sampling.min_p = 0.01f; + params.use_jinja = true; + params.default_template_kwargs["reasoning_effort"] = "\"high\""; + } + ).set_examples({LLAMA_EXAMPLE_SERVER})); + return ctx_arg; } From 983ad2716506b5ce66af371a03b3dc057bd19030 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 10 Oct 2025 15:01:00 +0300 Subject: [PATCH 3/5] presets : add vision presets --- common/arg.cpp | 22 ++++++++++++++++++++++ common/common.h | 2 +- 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/common/arg.cpp b/common/arg.cpp index 8fe19f8fe5e1f..a664065e4d1f3 100644 --- a/common/arg.cpp +++ b/common/arg.cpp @@ -4085,5 +4085,27 @@ common_params_context common_params_parser_init(common_params & params, llama_ex } ).set_examples({LLAMA_EXAMPLE_SERVER})); + add_opt(common_arg( + {"--vision-gemma-4b-default"}, + string_format("use Gemma 3 4B QAT (note: can download weights from the internet)"), + [](common_params & params) { + params.model.hf_repo = "ggml-org/gemma-3-4b-it-qat-GGUF"; + params.port = 8014; + params.n_ctx = 0; + params.use_jinja = true; + } + ).set_examples({LLAMA_EXAMPLE_SERVER})); + + add_opt(common_arg( + {"--vision-gemma-12b-default"}, + string_format("use Gemma 3 12B QAT (note: can download weights from the internet)"), + [](common_params & params) { + params.model.hf_repo = "ggml-org/gemma-3-12b-it-qat-GGUF"; + params.port = 8014; + params.n_ctx = 0; + params.use_jinja = true; + } + ).set_examples({LLAMA_EXAMPLE_SERVER})); + return ctx_arg; } diff --git a/common/common.h b/common/common.h index 040a44ebd89b0..a8cb630ea5805 100644 --- a/common/common.h +++ b/common/common.h @@ -426,7 +426,7 @@ struct common_params { int32_t n_threads_http = -1; // number of threads to process HTTP requests (TODO: support threadpool) int32_t n_cache_reuse = 0; // min chunk size to reuse from the cache via KV shifting int32_t n_ctx_checkpoints = 8; // max number of context checkpoints per slot - int32_t cache_ram_mib = 8192; // 0 = no limit, 1 = 1 MiB, etc. + int32_t cache_ram_mib = 8192; // -1 = no limit, 0 - disable, 1 = 1 MiB, etc. std::string hostname = "127.0.0.1"; std::string public_path = ""; // NOLINT From e571f14c2b9e1b36a69354f2fe4b37e1d31ec5ca Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sat, 11 Oct 2025 10:11:34 +0300 Subject: [PATCH 4/5] cont : remove reasoning overrides [no ci] --- common/arg.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/common/arg.cpp b/common/arg.cpp index a664065e4d1f3..ad43b23c00976 100644 --- a/common/arg.cpp +++ b/common/arg.cpp @@ -4062,7 +4062,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex params.sampling.top_k = 0; params.sampling.min_p = 0.01f; params.use_jinja = true; - params.default_template_kwargs["reasoning_effort"] = "\"high\""; + //params.default_template_kwargs["reasoning_effort"] = "\"high\""; } ).set_examples({LLAMA_EXAMPLE_SERVER})); @@ -4081,7 +4081,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex params.sampling.top_k = 0; params.sampling.min_p = 0.01f; params.use_jinja = true; - params.default_template_kwargs["reasoning_effort"] = "\"high\""; + //params.default_template_kwargs["reasoning_effort"] = "\"high\""; } ).set_examples({LLAMA_EXAMPLE_SERVER})); From 6c4a02ab9ab2916352b06cdb6eae6f1778e144a5 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sat, 11 Oct 2025 10:42:59 +0300 Subject: [PATCH 5/5] cont : fix batch size for embedding gemma [no ci] --- common/arg.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/common/arg.cpp b/common/arg.cpp index ad43b23c00976..c0b718071127d 100644 --- a/common/arg.cpp +++ b/common/arg.cpp @@ -3951,6 +3951,8 @@ common_params_context common_params_parser_init(common_params & params, llama_ex params.model.hf_repo = "ggml-org/embeddinggemma-300M-qat-q4_0-GGUF"; params.model.hf_file = "embeddinggemma-300M-qat-Q4_0.gguf"; params.port = 8011; + params.n_ubatch = 2048; + params.n_batch = 2048; params.n_parallel = 32; params.n_ctx = 2048*params.n_parallel; params.verbose_prompt = true;