@@ -24,16 +24,16 @@ u8 rxe_get_next_key(u32 last_key)
2424
2525int  mr_check_range (struct  rxe_mr  * mr , u64  iova , size_t  length )
2626{
27- 	 struct   rxe_map_set   * set   =   mr -> cur_map_set ; 
27+ 
2828
2929	switch  (mr -> type ) {
3030	case  IB_MR_TYPE_DMA :
3131		return  0 ;
3232
3333	case  IB_MR_TYPE_USER :
3434	case  IB_MR_TYPE_MEM_REG :
35- 		if  (iova  <  set -> iova  ||  length  >  set -> length  || 
36- 		    iova  >  set -> iova  +  set -> length  -  length )
35+ 		if  (iova  <  mr -> iova  ||  length  >  mr -> length  || 
36+ 		    iova  >  mr -> iova  +  mr -> length  -  length )
3737			return  - EFAULT ;
3838		return  0 ;
3939
@@ -65,89 +65,41 @@ static void rxe_mr_init(int access, struct rxe_mr *mr)
6565	mr -> map_shift  =  ilog2 (RXE_BUF_PER_MAP );
6666}
6767
68- static  void  rxe_mr_free_map_set (int  num_map , struct  rxe_map_set  * set )
69- {
70- 	int  i ;
71- 
72- 	for  (i  =  0 ; i  <  num_map ; i ++ )
73- 		kfree (set -> map [i ]);
74- 
75- 	kfree (set -> map );
76- 	kfree (set );
77- }
78- 
79- static  int  rxe_mr_alloc_map_set (int  num_map , struct  rxe_map_set  * * setp )
68+ static  int  rxe_mr_alloc (struct  rxe_mr  * mr , int  num_buf )
8069{
8170	int  i ;
82- 	struct  rxe_map_set  * set ;
71+ 	int  num_map ;
72+ 	struct  rxe_map  * * map  =  mr -> map ;
8373
84- 	set  =  kmalloc (sizeof (* set ), GFP_KERNEL );
85- 	if  (!set )
86- 		goto err_out ;
74+ 	num_map  =  (num_buf  +  RXE_BUF_PER_MAP  -  1 ) / RXE_BUF_PER_MAP ;
8775
88- 	set -> map  =  kmalloc_array (num_map , sizeof (struct   rxe_map   * ), GFP_KERNEL );
89- 	if  (!set -> map )
90- 		goto err_free_set ;
76+ 	mr -> map  =  kmalloc_array (num_map , sizeof (* map ), GFP_KERNEL );
77+ 	if  (!mr -> map )
78+ 		goto err1 ;
9179
9280	for  (i  =  0 ; i  <  num_map ; i ++ ) {
93- 		set -> map [i ] =  kmalloc (sizeof (struct   rxe_map ), GFP_KERNEL );
94- 		if  (!set -> map [i ])
95- 			goto err_free_map ;
81+ 		mr -> map [i ] =  kmalloc (sizeof (* * map ), GFP_KERNEL );
82+ 		if  (!mr -> map [i ])
83+ 			goto err2 ;
9684	}
9785
98- 	* setp  =  set ;
99- 
100- 	return  0 ;
101- 
102- err_free_map :
103- 	for  (i -- ; i  >= 0 ; i -- )
104- 		kfree (set -> map [i ]);
105- 
106- 	kfree (set -> map );
107- err_free_set :
108- 	kfree (set );
109- err_out :
110- 	return  - ENOMEM ;
111- }
112- 
113- /** 
114-  * rxe_mr_alloc() - Allocate memory map array(s) for MR 
115-  * @mr: Memory region 
116-  * @num_buf: Number of buffer descriptors to support 
117-  * @both: If non zero allocate both mr->map and mr->next_map 
118-  *	  else just allocate mr->map. Used for fast MRs 
119-  * 
120-  * Return: 0 on success else an error 
121-  */ 
122- static  int  rxe_mr_alloc (struct  rxe_mr  * mr , int  num_buf , int  both )
123- {
124- 	int  ret ;
125- 	int  num_map ;
126- 
12786	BUILD_BUG_ON (!is_power_of_2 (RXE_BUF_PER_MAP ));
128- 	num_map  =  (num_buf  +  RXE_BUF_PER_MAP  -  1 ) / RXE_BUF_PER_MAP ;
12987
13088	mr -> map_shift  =  ilog2 (RXE_BUF_PER_MAP );
13189	mr -> map_mask  =  RXE_BUF_PER_MAP  -  1 ;
90+ 
13291	mr -> num_buf  =  num_buf ;
133- 	mr -> max_buf  =  num_map  *  RXE_BUF_PER_MAP ;
13492	mr -> num_map  =  num_map ;
135- 
136- 	ret  =  rxe_mr_alloc_map_set (num_map , & mr -> cur_map_set );
137- 	if  (ret )
138- 		return  - ENOMEM ;
139- 
140- 	if  (both ) {
141- 		ret  =  rxe_mr_alloc_map_set (num_map , & mr -> next_map_set );
142- 		if  (ret )
143- 			goto err_free ;
144- 	}
93+ 	mr -> max_buf  =  num_map  *  RXE_BUF_PER_MAP ;
14594
14695	return  0 ;
14796
148- err_free :
149- 	rxe_mr_free_map_set (mr -> num_map , mr -> cur_map_set );
150- 	mr -> cur_map_set  =  NULL ;
97+ err2 :
98+ 	for  (i -- ; i  >= 0 ; i -- )
99+ 		kfree (mr -> map [i ]);
100+ 
101+ 	kfree (mr -> map );
102+ err1 :
151103	return  - ENOMEM ;
152104}
153105
@@ -164,14 +116,14 @@ void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr)
164116int  rxe_mr_init_user (struct  rxe_pd  * pd , u64  start , u64  length , u64  iova ,
165117		     int  access , struct  rxe_mr  * mr )
166118{
167- 	struct  rxe_map_set 	* set ;
168119	struct  rxe_map 		* * map ;
169120	struct  rxe_phys_buf 	* buf  =  NULL ;
170121	struct  ib_umem 		* umem ;
171122	struct  sg_page_iter 	sg_iter ;
172123	int 			num_buf ;
173124	void 			* vaddr ;
174125	int  err ;
126+ 	int  i ;
175127
176128	umem  =  ib_umem_get (pd -> ibpd .device , start , length , access );
177129	if  (IS_ERR (umem )) {
@@ -185,20 +137,18 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
185137
186138	rxe_mr_init (access , mr );
187139
188- 	err  =  rxe_mr_alloc (mr , num_buf ,  0 );
140+ 	err  =  rxe_mr_alloc (mr , num_buf );
189141	if  (err ) {
190142		pr_warn ("%s: Unable to allocate memory for map\n" ,
191143				__func__ );
192144		goto err_release_umem ;
193145	}
194146
195- 	set  =  mr -> cur_map_set ;
196- 	set -> page_shift  =  PAGE_SHIFT ;
197- 	set -> page_mask  =  PAGE_SIZE  -  1 ;
198- 
199- 	num_buf  =  0 ;
200- 	map  =  set -> map ;
147+ 	mr -> page_shift  =  PAGE_SHIFT ;
148+ 	mr -> page_mask  =  PAGE_SIZE  -  1 ;
201149
150+ 	num_buf 			=  0 ;
151+ 	map  =  mr -> map ;
202152	if  (length  >  0 ) {
203153		buf  =  map [0 ]-> buf ;
204154
@@ -214,29 +164,33 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
214164				pr_warn ("%s: Unable to get virtual address\n" ,
215165						__func__ );
216166				err  =  - ENOMEM ;
217- 				goto err_release_umem ;
167+ 				goto err_cleanup_map ;
218168			}
219169
220170			buf -> addr  =  (uintptr_t )vaddr ;
221171			buf -> size  =  PAGE_SIZE ;
222172			num_buf ++ ;
223173			buf ++ ;
174+ 
224175		}
225176	}
226177
227178	mr -> ibmr .pd  =  & pd -> ibpd ;
228179	mr -> umem  =  umem ;
229180	mr -> access  =  access ;
181+ 	mr -> length  =  length ;
182+ 	mr -> iova  =  iova ;
183+ 	mr -> va  =  start ;
184+ 	mr -> offset  =  ib_umem_offset (umem );
230185	mr -> state  =  RXE_MR_STATE_VALID ;
231186	mr -> type  =  IB_MR_TYPE_USER ;
232187
233- 	set -> length  =  length ;
234- 	set -> iova  =  iova ;
235- 	set -> va  =  start ;
236- 	set -> offset  =  ib_umem_offset (umem );
237- 
238188	return  0 ;
239189
190+ err_cleanup_map :
191+ 	for  (i  =  0 ; i  <  mr -> num_map ; i ++ )
192+ 		kfree (mr -> map [i ]);
193+ 	kfree (mr -> map );
240194err_release_umem :
241195	ib_umem_release (umem );
242196err_out :
@@ -250,7 +204,7 @@ int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr)
250204	/* always allow remote access for FMRs */ 
251205	rxe_mr_init (IB_ACCESS_REMOTE , mr );
252206
253- 	err  =  rxe_mr_alloc (mr , max_pages ,  1 );
207+ 	err  =  rxe_mr_alloc (mr , max_pages );
254208	if  (err )
255209		goto err1 ;
256210
@@ -268,24 +222,21 @@ int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr)
268222static  void  lookup_iova (struct  rxe_mr  * mr , u64  iova , int  * m_out , int  * n_out ,
269223			size_t  * offset_out )
270224{
271- 	struct  rxe_map_set  * set  =  mr -> cur_map_set ;
272- 	size_t  offset  =  iova  -  set -> iova  +  set -> offset ;
225+ 	size_t  offset  =  iova  -  mr -> iova  +  mr -> offset ;
273226	int 			map_index ;
274227	int 			buf_index ;
275228	u64 			length ;
276- 	struct  rxe_map  * map ;
277229
278- 	if  (likely (set -> page_shift )) {
279- 		* offset_out  =  offset  &  set -> page_mask ;
280- 		offset  >>= set -> page_shift ;
230+ 	if  (likely (mr -> page_shift )) {
231+ 		* offset_out  =  offset  &  mr -> page_mask ;
232+ 		offset  >>= mr -> page_shift ;
281233		* n_out  =  offset  &  mr -> map_mask ;
282234		* m_out  =  offset  >> mr -> map_shift ;
283235	} else  {
284236		map_index  =  0 ;
285237		buf_index  =  0 ;
286238
287- 		map  =  set -> map [map_index ];
288- 		length  =  map -> buf [buf_index ].size ;
239+ 		length  =  mr -> map [map_index ]-> buf [buf_index ].size ;
289240
290241		while  (offset  >= length ) {
291242			offset  -=  length ;
@@ -295,8 +246,7 @@ static void lookup_iova(struct rxe_mr *mr, u64 iova, int *m_out, int *n_out,
295246				map_index ++ ;
296247				buf_index  =  0 ;
297248			}
298- 			map  =  set -> map [map_index ];
299- 			length  =  map -> buf [buf_index ].size ;
249+ 			length  =  mr -> map [map_index ]-> buf [buf_index ].size ;
300250		}
301251
302252		* m_out  =  map_index ;
@@ -317,7 +267,7 @@ void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length)
317267		goto out ;
318268	}
319269
320- 	if  (!mr -> cur_map_set ) {
270+ 	if  (!mr -> map ) {
321271		addr  =  (void  * )(uintptr_t )iova ;
322272		goto out ;
323273	}
@@ -330,13 +280,13 @@ void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length)
330280
331281	lookup_iova (mr , iova , & m , & n , & offset );
332282
333- 	if  (offset  +  length  >  mr -> cur_map_set -> map [m ]-> buf [n ].size ) {
283+ 	if  (offset  +  length  >  mr -> map [m ]-> buf [n ].size ) {
334284		pr_warn ("crosses page boundary\n" );
335285		addr  =  NULL ;
336286		goto out ;
337287	}
338288
339- 	addr  =  (void  * )(uintptr_t )mr -> cur_map_set -> map [m ]-> buf [n ].addr  +  offset ;
289+ 	addr  =  (void  * )(uintptr_t )mr -> map [m ]-> buf [n ].addr  +  offset ;
340290
341291out :
342292	return  addr ;
@@ -372,7 +322,7 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
372322		return  0 ;
373323	}
374324
375- 	WARN_ON_ONCE (!mr -> cur_map_set );
325+ 	WARN_ON_ONCE (!mr -> map );
376326
377327	err  =  mr_check_range (mr , iova , length );
378328	if  (err ) {
@@ -382,7 +332,7 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
382332
383333	lookup_iova (mr , iova , & m , & i , & offset );
384334
385- 	map  =  mr -> cur_map_set -> map  +  m ;
335+ 	map  =  mr -> map  +  m ;
386336	buf 	=  map [0 ]-> buf  +  i ;
387337
388338	while  (length  >  0 ) {
@@ -628,9 +578,8 @@ int rxe_invalidate_mr(struct rxe_qp *qp, u32 key)
628578int  rxe_reg_fast_mr (struct  rxe_qp  * qp , struct  rxe_send_wqe  * wqe )
629579{
630580	struct  rxe_mr  * mr  =  to_rmr (wqe -> wr .wr .reg .mr );
631- 	u32  key  =  wqe -> wr .wr .reg .key   &   0xff ;
581+ 	u32  key  =  wqe -> wr .wr .reg .key ;
632582	u32  access  =  wqe -> wr .wr .reg .access ;
633- 	struct  rxe_map_set  * set ;
634583
635584	/* user can only register MR in free state */ 
636585	if  (unlikely (mr -> state  !=  RXE_MR_STATE_FREE )) {
@@ -646,36 +595,19 @@ int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
646595		return  - EINVAL ;
647596	}
648597
598+ 	/* user is only allowed to change key portion of l/rkey */ 
599+ 	if  (unlikely ((mr -> lkey  &  ~0xff ) !=  (key  &  ~0xff ))) {
600+ 		pr_warn ("%s: key = 0x%x has wrong index mr->lkey = 0x%x\n" ,
601+ 			__func__ , key , mr -> lkey );
602+ 		return  - EINVAL ;
603+ 	}
604+ 
649605	mr -> access  =  access ;
650- 	mr -> lkey  =  (mr -> lkey  &  ~0xff ) | key ;
651- 	mr -> rkey  =  (access  &  IB_ACCESS_REMOTE ) ? mr -> lkey  : 0 ;
606+ 	mr -> lkey  =  key ;
607+ 	mr -> rkey  =  (access  &  IB_ACCESS_REMOTE ) ? key  : 0 ;
608+ 	mr -> iova  =  wqe -> wr .wr .reg .mr -> iova ;
652609	mr -> state  =  RXE_MR_STATE_VALID ;
653610
654- 	set  =  mr -> cur_map_set ;
655- 	mr -> cur_map_set  =  mr -> next_map_set ;
656- 	mr -> cur_map_set -> iova  =  wqe -> wr .wr .reg .mr -> iova ;
657- 	mr -> next_map_set  =  set ;
658- 
659- 	return  0 ;
660- }
661- 
662- int  rxe_mr_set_page (struct  ib_mr  * ibmr , u64  addr )
663- {
664- 	struct  rxe_mr  * mr  =  to_rmr (ibmr );
665- 	struct  rxe_map_set  * set  =  mr -> next_map_set ;
666- 	struct  rxe_map  * map ;
667- 	struct  rxe_phys_buf  * buf ;
668- 
669- 	if  (unlikely (set -> nbuf  ==  mr -> num_buf ))
670- 		return  - ENOMEM ;
671- 
672- 	map  =  set -> map [set -> nbuf  / RXE_BUF_PER_MAP ];
673- 	buf  =  & map -> buf [set -> nbuf  % RXE_BUF_PER_MAP ];
674- 
675- 	buf -> addr  =  addr ;
676- 	buf -> size  =  ibmr -> page_size ;
677- 	set -> nbuf ++ ;
678- 
679611	return  0 ;
680612}
681613
@@ -695,14 +627,15 @@ int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
695627void  rxe_mr_cleanup (struct  rxe_pool_elem  * elem )
696628{
697629	struct  rxe_mr  * mr  =  container_of (elem , typeof (* mr ), elem );
630+ 	int  i ;
698631
699632	rxe_put (mr_pd (mr ));
700- 
701633	ib_umem_release (mr -> umem );
702634
703- 	if  (mr -> cur_map_set )
704- 		rxe_mr_free_map_set (mr -> num_map , mr -> cur_map_set );
635+ 	if  (mr -> map ) {
636+ 		for  (i  =  0 ; i  <  mr -> num_map ; i ++ )
637+ 			kfree (mr -> map [i ]);
705638
706- 	if   (mr -> next_map_set ) 
707- 		 rxe_mr_free_map_set ( mr -> num_map ,  mr -> next_map_set ); 
639+ 		 kfree (mr -> map ); 
640+ 	} 
708641}
0 commit comments