@@ -237,8 +237,6 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
237237 struct rxe_mc_elem * mce ;
238238 struct rxe_qp * qp ;
239239 union ib_gid dgid ;
240- struct sk_buff * per_qp_skb ;
241- struct rxe_pkt_info * per_qp_pkt ;
242240 int err ;
243241
244242 if (skb -> protocol == htons (ETH_P_IP ))
@@ -250,10 +248,15 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
250248 /* lookup mcast group corresponding to mgid, takes a ref */
251249 mcg = rxe_pool_get_key (& rxe -> mc_grp_pool , & dgid );
252250 if (!mcg )
253- goto err1 ; /* mcast group not registered */
251+ goto drop ; /* mcast group not registered */
254252
255253 spin_lock_bh (& mcg -> mcg_lock );
256254
255+ /* this is unreliable datagram service so we let
256+ * failures to deliver a multicast packet to a
257+ * single QP happen and just move on and try
258+ * the rest of them on the list
259+ */
257260 list_for_each_entry (mce , & mcg -> qp_list , qp_list ) {
258261 qp = mce -> qp ;
259262
@@ -266,39 +269,47 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
266269 if (err )
267270 continue ;
268271
269- /* for all but the last qp create a new clone of the
270- * skb and pass to the qp. If an error occurs in the
271- * checks for the last qp in the list we need to
272- * free the skb since it hasn't been passed on to
273- * rxe_rcv_pkt() which would free it later.
272+ /* for all but the last QP create a new clone of the
273+ * skb and pass to the QP. Pass the original skb to
274+ * the last QP in the list.
274275 */
275276 if (mce -> qp_list .next != & mcg -> qp_list ) {
276- per_qp_skb = skb_clone (skb , GFP_ATOMIC );
277- if (WARN_ON (!ib_device_try_get (& rxe -> ib_dev ))) {
278- kfree_skb (per_qp_skb );
277+ struct sk_buff * cskb ;
278+ struct rxe_pkt_info * cpkt ;
279+
280+ cskb = skb_clone (skb , GFP_ATOMIC );
281+ if (unlikely (!cskb ))
279282 continue ;
283+
284+ if (WARN_ON (!ib_device_try_get (& rxe -> ib_dev ))) {
285+ kfree_skb (cskb );
286+ break ;
280287 }
288+
289+ cpkt = SKB_TO_PKT (cskb );
290+ cpkt -> qp = qp ;
291+ rxe_add_ref (qp );
292+ rxe_rcv_pkt (cpkt , cskb );
281293 } else {
282- per_qp_skb = skb ;
283- /* show we have consumed the skb */
284- skb = NULL ;
294+ pkt -> qp = qp ;
295+ rxe_add_ref (qp );
296+ rxe_rcv_pkt (pkt , skb );
297+ skb = NULL ; /* mark consumed */
285298 }
286-
287- if (unlikely (!per_qp_skb ))
288- continue ;
289-
290- per_qp_pkt = SKB_TO_PKT (per_qp_skb );
291- per_qp_pkt -> qp = qp ;
292- rxe_add_ref (qp );
293- rxe_rcv_pkt (per_qp_pkt , per_qp_skb );
294299 }
295300
296301 spin_unlock_bh (& mcg -> mcg_lock );
297302
298303 rxe_drop_ref (mcg ); /* drop ref from rxe_pool_get_key. */
299304
300- err1 :
301- /* free skb if not consumed */
305+ if (likely (!skb ))
306+ return ;
307+
308+ /* This only occurs if one of the checks fails on the last
309+ * QP in the list above
310+ */
311+
312+ drop :
302313 kfree_skb (skb );
303314 ib_device_put (& rxe -> ib_dev );
304315}
0 commit comments