@@ -510,11 +510,8 @@ struct llama_model::impl {
510510 llama_mlocks mlock_bufs;
511511 llama_mlocks mlock_mmaps;
512512
513- // contexts where the model tensors metadata is stored
514- std::vector<ggml_context_ptr> ctxs;
515-
516- // the model memory buffers for the tensor data
517- std::vector<ggml_backend_buffer_ptr> bufs;
513+ // contexts where the model tensors metadata is stored as well ass the corresponding buffers:
514+ std::vector<std::pair<ggml_context_ptr, ggml_backend_buffer_ptr>> ctxs_bufs;
518515
519516 buft_list_t cpu_buft_list;
520517 std::map<ggml_backend_dev_t, buft_list_t> gpu_buft_list;
@@ -2294,7 +2291,14 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
22942291 max_n_tensors += n_layer*2; // duplicated rope freq tensors
22952292 const size_t ctx_size = ggml_tensor_overhead()*max_n_tensors;
22962293
2297- std::map<ggml_backend_buffer_type_t, ggml_context *> ctx_map;
2294+ // define a comparator for the buft -> ctx map to ensure that the order is well-defined:
2295+ struct ggml_backend_buft_comparator {
2296+ bool operator()(const ggml_backend_buffer_type_t & lhs, const ggml_backend_buffer_type_t & rhs) const {
2297+ return ggml_backend_buft_name(lhs) < ggml_backend_buft_name(rhs);
2298+ }
2299+ };
2300+ std::map<ggml_backend_buffer_type_t, ggml_context_ptr, ggml_backend_buft_comparator> ctx_map;
2301+
22982302 auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * {
22992303 auto it = ctx_map.find(buft);
23002304 if (it == ctx_map.end()) {
@@ -2309,12 +2313,11 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
23092313 throw std::runtime_error(format("failed to create ggml context"));
23102314 }
23112315
2312- ctx_map[buft] = ctx;
2313- pimpl->ctxs.emplace_back(ctx);
2316+ ctx_map.emplace(buft, ctx);
23142317
23152318 return ctx;
23162319 }
2317- return it->second;
2320+ return it->second.get() ;
23182321 };
23192322
23202323 const auto TENSOR_DUPLICATED = llama_model_loader::TENSOR_DUPLICATED;
@@ -6218,16 +6221,15 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
62186221 pimpl->mappings.reserve(ml.mappings.size());
62196222
62206223 // create the backend buffers
6221- std::vector<std::pair<ggml_context *, llama_buf_map>> ctx_bufs ;
6222- ctx_bufs .reserve(ctx_map.size());
6224+ std::vector<std::pair<ggml_context *, llama_buf_map>> ctx_buf_maps ;
6225+ ctx_buf_maps .reserve(ctx_map.size());
62236226
62246227 // Ensure we have enough capacity for the maximum backend buffer we will potentially create
62256228 const size_t n_max_backend_buffer = ctx_map.size() * ml.files.size();
6226- pimpl->bufs .reserve(n_max_backend_buffer);
6229+ pimpl->ctxs_bufs .reserve(n_max_backend_buffer);
62276230
6228- for (auto & it : ctx_map) {
6229- ggml_backend_buffer_type_t buft = it.first;
6230- ggml_context * ctx = it.second;
6231+ for (auto & [buft, ctx_ptr] : ctx_map) {
6232+ ggml_context * ctx = ctx_ptr.get();
62316233
62326234 // skip contexts without tensors
62336235 if (ggml_get_first_tensor(ctx) == nullptr) {
@@ -6251,6 +6253,7 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
62516253 bool buffer_from_host_ptr_supported = props.caps.buffer_from_host_ptr;
62526254 bool is_default_buft = buft == ggml_backend_dev_buffer_type(dev);
62536255
6256+ ggml_backend_buffer_t buf = nullptr;
62546257 if (ml.use_mmap && use_mmap_buffer && buffer_from_host_ptr_supported && is_default_buft) {
62556258 for (uint32_t idx = 0; idx < ml.files.size(); idx++) {
62566259 // only the mmap region containing the tensors in the model is mapped to the backend buffer
@@ -6263,20 +6266,18 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
62636266 continue;
62646267 }
62656268 const size_t max_size = ggml_get_max_tensor_size(ctx);
6266- ggml_backend_buffer_t buf = ggml_backend_dev_buffer_from_host_ptr(dev, (char *) addr + first, last - first, max_size);
6269+ buf = ggml_backend_dev_buffer_from_host_ptr(dev, (char *) addr + first, last - first, max_size);
62676270 if (buf == nullptr) {
62686271 throw std::runtime_error(format("unable to allocate %s buffer", ggml_backend_buft_name(buft)));
62696272 }
6270- pimpl->bufs.emplace_back(buf);
62716273 buf_map.emplace(idx, buf);
62726274 }
62736275 }
62746276 else {
6275- ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft);
6277+ buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft);
62766278 if (buf == nullptr) {
62776279 throw std::runtime_error(format("unable to allocate %s buffer", ggml_backend_buft_name(buft)));
62786280 }
6279- pimpl->bufs.emplace_back(buf);
62806281 if (use_mlock && ggml_backend_buffer_is_host(buf)) {
62816282 pimpl->mlock_bufs.emplace_back(new llama_mlock);
62826283 auto & mlock_buf = pimpl->mlock_bufs.back();
@@ -6287,18 +6288,15 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
62876288 buf_map.emplace(idx, buf);
62886289 }
62896290 }
6290-
6291- if (pimpl->bufs.empty()) {
6292- throw std::runtime_error("failed to allocate buffer");
6293- }
6291+ pimpl->ctxs_bufs.emplace_back(std::move(ctx_ptr), buf);
62946292
62956293 for (auto & buf : buf_map) {
62966294 // indicate that this buffer contains weights
62976295 // this is used by ggml_backend_sched to improve op scheduling: ops that use a weight are preferably scheduled to the backend that contains the weight
62986296 ggml_backend_buffer_set_usage(buf.second, GGML_BACKEND_BUFFER_USAGE_WEIGHTS);
62996297 }
63006298
6301- ctx_bufs .emplace_back(ctx, buf_map);
6299+ ctx_buf_maps .emplace_back(ctx, buf_map);
63026300 }
63036301
63046302 if (llama_supports_gpu_offload()) {
@@ -6316,22 +6314,20 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
63166314 }
63176315
63186316 // print memory requirements per buffer type
6319- for (auto & buf : pimpl->bufs ) {
6317+ for (auto & [_, buf] : pimpl->ctxs_bufs ) {
63206318 LLAMA_LOG_INFO("%s: %12s model buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf.get()), ggml_backend_buffer_get_size(buf.get()) / 1024.0 / 1024.0);
63216319 }
63226320
63236321 // populate tensors_by_name
6324- for (auto & ctx : pimpl->ctxs ) {
6322+ for (auto & [ ctx, _] : pimpl->ctxs_bufs ) {
63256323 for (auto * cur = ggml_get_first_tensor(ctx.get()); cur != NULL; cur = ggml_get_next_tensor(ctx.get(), cur)) {
63266324 tensors_by_name.emplace_back(ggml_get_name(cur), cur);
63276325 }
63286326 }
63296327
63306328 // load tensor data
6331- for (auto & it : ctx_bufs) {
6332- ggml_context * ctx = it.first;
6333- auto & bufs = it.second;
6334- if (!ml.load_all_data(ctx, bufs, use_mlock ? &pimpl->mlock_mmaps : NULL, params.progress_callback, params.progress_callback_user_data)) {
6329+ for (auto & [ctx, buf_map] : ctx_buf_maps) {
6330+ if (!ml.load_all_data(ctx, buf_map, use_mlock ? &pimpl->mlock_mmaps : NULL, params.progress_callback, params.progress_callback_user_data)) {
63356331 return false;
63366332 }
63376333 }
@@ -6371,8 +6367,8 @@ size_t llama_model::n_devices() const {
63716367
63726368std::map<ggml_backend_buffer_type_t, size_t> llama_model::memory_breakdown() const {
63736369 std::map<ggml_backend_buffer_type_t, size_t> ret;
6374- for (const ggml_backend_buffer_ptr & buf_ptr : pimpl->bufs ) {
6375- ret[ggml_backend_buffer_get_type(buf_ptr .get())] += ggml_backend_buffer_get_size(buf_ptr .get());
6370+ for (const auto & [_, buf] : pimpl->ctxs_bufs ) {
6371+ ret[ggml_backend_buffer_get_type(buf .get())] += ggml_backend_buffer_get_size(buf .get());
63766372 }
63776373 return ret;
63786374}
0 commit comments