@@ -321,12 +321,6 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
321321 invalid_param = true ;
322322 break ;
323323 }
324- } else if (arg == " --n-parts" ) {
325- if (++i >= argc) {
326- invalid_param = true ;
327- break ;
328- }
329- params.n_parts = std::stoi (argv[i]);
330324 } else if (arg == " -h" || arg == " --help" ) {
331325 gpt_print_usage (argc, argv, default_params);
332326 exit (0 );
@@ -418,7 +412,6 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
418412 fprintf (stderr, " --no-penalize-nl do not penalize newline token\n " );
419413 fprintf (stderr, " --memory-f32 use f32 instead of f16 for memory key+value\n " );
420414 fprintf (stderr, " --temp N temperature (default: %.1f)\n " , (double )params.temp );
421- fprintf (stderr, " --n-parts N number of model parts (default: -1 = determine from dimensions)\n " );
422415 fprintf (stderr, " -b N, --batch-size N batch size for prompt processing (default: %d)\n " , params.n_batch );
423416 fprintf (stderr, " --perplexity compute perplexity over the prompt\n " );
424417 fprintf (stderr, " --keep number of tokens to keep from the initial prompt (default: %d, -1 = all)\n " , params.n_keep );
@@ -473,7 +466,6 @@ struct llama_context * llama_init_from_gpt_params(const gpt_params & params) {
473466 auto lparams = llama_context_default_params ();
474467
475468 lparams.n_ctx = params.n_ctx ;
476- lparams.n_parts = params.n_parts ;
477469 lparams.n_gpu_layers = params.n_gpu_layers ;
478470 lparams.seed = params.seed ;
479471 lparams.f16_kv = params.memory_f16 ;
0 commit comments