File tree Expand file tree Collapse file tree 1 file changed +8
-0
lines changed Expand file tree Collapse file tree 1 file changed +8
-0
lines changed Original file line number Diff line number Diff line change @@ -3346,7 +3346,11 @@ static size_t llama_get_device_count(const llama_model & model) {
33463346static ggml_backend_buffer_type_t llama_default_buffer_type_offload(const llama_model & model, int gpu) {
33473347 ggml_backend_buffer_type_t buft = nullptr;
33483348
3349+ #ifdef GGML_USE_RPC
33493350 int rpc_count = (int)model.rpc_servers.size();
3351+ #else
3352+ int rpc_count = 0;
3353+ #endif
33503354 int local_gpu = gpu - rpc_count;
33513355#if defined(GGML_USE_RPC)
33523356 if (gpu < rpc_count) {
@@ -3403,7 +3407,11 @@ static ggml_backend_buffer_type_t llama_default_buffer_type_split(const llama_mo
34033407}
34043408
34053409static size_t llama_get_device_memory(const llama_model & model, int device) {
3410+ #ifdef GGML_USE_RPC
34063411 int rpc_count = (int)model.rpc_servers.size();
3412+ #else
3413+ int rpc_count = 0;
3414+ #endif
34073415 int local_device = device - rpc_count;
34083416#if defined(GGML_USE_RPC)
34093417 if (device < rpc_count) {
You can’t perform that action at this time.
0 commit comments