@@ -469,7 +469,7 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry)
469469 spin_unlock_irq (& ent -> lock );
470470
471471 err = add_keys (dev , entry , 1 );
472- if (err )
472+ if (err && err != - EAGAIN )
473473 return ERR_PTR (err );
474474
475475 wait_for_completion (& ent -> compl );
@@ -669,8 +669,10 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
669669 INIT_DELAYED_WORK (& ent -> dwork , delayed_cache_work_func );
670670 queue_work (cache -> wq , & ent -> work );
671671
672- if (i > MAX_UMR_CACHE_ENTRY )
672+ if (i > MAX_UMR_CACHE_ENTRY ) {
673+ mlx5_odp_init_mr_cache_entry (ent );
673674 continue ;
675+ }
674676
675677 if (!use_umr (dev , ent -> order ))
676678 continue ;
@@ -935,6 +937,10 @@ static inline int populate_xlt(struct mlx5_ib_mr *mr, int idx, int npages,
935937{
936938 struct mlx5_ib_dev * dev = mr -> dev ;
937939 struct ib_umem * umem = mr -> umem ;
940+ if (flags & MLX5_IB_UPD_XLT_INDIRECT ) {
941+ mlx5_odp_populate_klm (xlt , idx , npages , mr , flags );
942+ return npages ;
943+ }
938944
939945 npages = min_t (size_t , npages , ib_umem_num_pages (umem ) - idx );
940946
@@ -968,7 +974,9 @@ int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
968974 struct mlx5_umr_wr wr ;
969975 struct ib_sge sg ;
970976 int err = 0 ;
971- int desc_size = sizeof (struct mlx5_mtt );
977+ int desc_size = (flags & MLX5_IB_UPD_XLT_INDIRECT )
978+ ? sizeof (struct mlx5_klm )
979+ : sizeof (struct mlx5_mtt );
972980 const int page_align = MLX5_UMR_MTT_ALIGNMENT / desc_size ;
973981 const int page_mask = page_align - 1 ;
974982 size_t pages_mapped = 0 ;
@@ -1186,6 +1194,18 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
11861194
11871195 mlx5_ib_dbg (dev , "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n" ,
11881196 start , virt_addr , length , access_flags );
1197+
1198+ #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1199+ if (!start && length == U64_MAX ) {
1200+ if (!(access_flags & IB_ACCESS_ON_DEMAND ) ||
1201+ !(dev -> odp_caps .general_caps & IB_ODP_SUPPORT_IMPLICIT ))
1202+ return ERR_PTR (- EINVAL );
1203+
1204+ mr = mlx5_ib_alloc_implicit_mr (to_mpd (pd ), access_flags );
1205+ return & mr -> ibmr ;
1206+ }
1207+ #endif
1208+
11891209 err = mr_umem_get (pd , start , length , access_flags , & umem , & npages ,
11901210 & page_shift , & ncont , & order );
11911211
@@ -1471,8 +1491,11 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
14711491 /* Wait for all running page-fault handlers to finish. */
14721492 synchronize_srcu (& dev -> mr_srcu );
14731493 /* Destroy all page mappings */
1474- mlx5_ib_invalidate_range (umem , ib_umem_start (umem ),
1475- ib_umem_end (umem ));
1494+ if (umem -> odp_data -> page_list )
1495+ mlx5_ib_invalidate_range (umem , ib_umem_start (umem ),
1496+ ib_umem_end (umem ));
1497+ else
1498+ mlx5_ib_free_implicit_mr (mr );
14761499 /*
14771500 * We kill the umem before the MR for ODP,
14781501 * so that there will not be any invalidations in
0 commit comments