We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 0c91aef commit 75dc800Copy full SHA for 75dc800
examples/talk-llama/talk-llama.cpp
@@ -267,7 +267,7 @@ int main(int argc, char ** argv) {
267
268
auto lmparams = llama_model_default_params();
269
if (!params.use_gpu) {
270
- lcparams.lmparams = 0;
+ lmparams.n_gpu_layers = 0;
271
}
272
273
struct llama_model * model_llama = llama_load_model_from_file(params.model_llama.c_str(), lmparams);
0 commit comments