Skip to content

Commit cabe3cb

Browse files
Hal RosenstockLinus Torvalds
authored andcommitted
[PATCH] IB: Fix a couple of MAD code paths
Fixed locking to handle error posting MAD send work requests. Fixed handling canceling a MAD with an active work request. Signed-off-by: Sean Hefty <[email protected]> Signed-off-by: Hal Rosenstock <[email protected]> Cc: Roland Dreier <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 29bb33d commit cabe3cb

File tree

1 file changed

+14
-14
lines changed
  • drivers/infiniband/core

1 file changed

+14
-14
lines changed

drivers/infiniband/core/mad.c

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -841,6 +841,7 @@ static int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
841841
{
842842
struct ib_mad_qp_info *qp_info;
843843
struct ib_send_wr *bad_send_wr;
844+
struct list_head *list;
844845
unsigned long flags;
845846
int ret;
846847

@@ -850,22 +851,20 @@ static int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
850851
mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
851852

852853
spin_lock_irqsave(&qp_info->send_queue.lock, flags);
853-
if (qp_info->send_queue.count++ < qp_info->send_queue.max_active) {
854-
list_add_tail(&mad_send_wr->mad_list.list,
855-
&qp_info->send_queue.list);
856-
spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
854+
if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
857855
ret = ib_post_send(mad_send_wr->mad_agent_priv->agent.qp,
858856
&mad_send_wr->send_wr, &bad_send_wr);
859-
if (ret) {
860-
printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret);
861-
dequeue_mad(&mad_send_wr->mad_list);
862-
}
857+
list = &qp_info->send_queue.list;
863858
} else {
864-
list_add_tail(&mad_send_wr->mad_list.list,
865-
&qp_info->overflow_list);
866-
spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
867859
ret = 0;
860+
list = &qp_info->overflow_list;
868861
}
862+
863+
if (!ret) {
864+
qp_info->send_queue.count++;
865+
list_add_tail(&mad_send_wr->mad_list.list, list);
866+
}
867+
spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
869868
return ret;
870869
}
871870

@@ -2023,8 +2022,7 @@ static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
20232022
}
20242023

20252024
static struct ib_mad_send_wr_private*
2026-
find_send_by_wr_id(struct ib_mad_agent_private *mad_agent_priv,
2027-
u64 wr_id)
2025+
find_send_by_wr_id(struct ib_mad_agent_private *mad_agent_priv, u64 wr_id)
20282026
{
20292027
struct ib_mad_send_wr_private *mad_send_wr;
20302028

@@ -2047,6 +2045,7 @@ int ib_modify_mad(struct ib_mad_agent *mad_agent, u64 wr_id, u32 timeout_ms)
20472045
struct ib_mad_agent_private *mad_agent_priv;
20482046
struct ib_mad_send_wr_private *mad_send_wr;
20492047
unsigned long flags;
2048+
int active;
20502049

20512050
mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
20522051
agent);
@@ -2057,13 +2056,14 @@ int ib_modify_mad(struct ib_mad_agent *mad_agent, u64 wr_id, u32 timeout_ms)
20572056
return -EINVAL;
20582057
}
20592058

2059+
active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
20602060
if (!timeout_ms) {
20612061
mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
20622062
mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
20632063
}
20642064

20652065
mad_send_wr->send_wr.wr.ud.timeout_ms = timeout_ms;
2066-
if (!mad_send_wr->timeout || mad_send_wr->refcount > 1)
2066+
if (active)
20672067
mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
20682068
else
20692069
ib_reset_mad_timeout(mad_send_wr, timeout_ms);

0 commit comments

Comments
 (0)