Skip to content

Commit dce90d4

Browse files
committed
workqueue: introduce put_pwq_unlocked()
Factor out lock pool, put_pwq(), unlock sequence into put_pwq_unlocked(). The two existing places are converted and there will be more with NUMA affinity support. This is to prepare for NUMA affinity support for unbound workqueues and doesn't introduce any functional difference. Signed-off-by: Tejun Heo <[email protected]> Reviewed-by: Lai Jiangshan <[email protected]>
1 parent 1befcf3 commit dce90d4

File tree

1 file changed

+23
-13
lines changed

1 file changed

+23
-13
lines changed

kernel/workqueue.c

Lines changed: 23 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1057,6 +1057,25 @@ static void put_pwq(struct pool_workqueue *pwq)
10571057
schedule_work(&pwq->unbound_release_work);
10581058
}
10591059

1060+
/**
1061+
* put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock
1062+
* @pwq: pool_workqueue to put (can be %NULL)
1063+
*
1064+
* put_pwq() with locking. This function also allows %NULL @pwq.
1065+
*/
1066+
static void put_pwq_unlocked(struct pool_workqueue *pwq)
1067+
{
1068+
if (pwq) {
1069+
/*
1070+
* As both pwqs and pools are sched-RCU protected, the
1071+
* following lock operations are safe.
1072+
*/
1073+
spin_lock_irq(&pwq->pool->lock);
1074+
put_pwq(pwq);
1075+
spin_unlock_irq(&pwq->pool->lock);
1076+
}
1077+
}
1078+
10601079
static void pwq_activate_delayed_work(struct work_struct *work)
10611080
{
10621081
struct pool_workqueue *pwq = get_work_pwq(work);
@@ -3759,12 +3778,7 @@ int apply_workqueue_attrs(struct workqueue_struct *wq,
37593778

37603779
mutex_unlock(&wq->mutex);
37613780

3762-
if (last_pwq) {
3763-
spin_lock_irq(&last_pwq->pool->lock);
3764-
put_pwq(last_pwq);
3765-
spin_unlock_irq(&last_pwq->pool->lock);
3766-
}
3767-
3781+
put_pwq_unlocked(last_pwq);
37683782
ret = 0;
37693783
/* fall through */
37703784
out_free:
@@ -3979,16 +3993,12 @@ void destroy_workqueue(struct workqueue_struct *wq)
39793993
} else {
39803994
/*
39813995
* We're the sole accessor of @wq at this point. Directly
3982-
* access the first pwq and put the base ref. As both pwqs
3983-
* and pools are sched-RCU protected, the lock operations
3984-
* are safe. @wq will be freed when the last pwq is
3985-
* released.
3996+
* access the first pwq and put the base ref. @wq will be
3997+
* freed when the last pwq is released.
39863998
*/
39873999
pwq = list_first_entry(&wq->pwqs, struct pool_workqueue,
39884000
pwqs_node);
3989-
spin_lock_irq(&pwq->pool->lock);
3990-
put_pwq(pwq);
3991-
spin_unlock_irq(&pwq->pool->lock);
4001+
put_pwq_unlocked(pwq);
39924002
}
39934003
}
39944004
EXPORT_SYMBOL_GPL(destroy_workqueue);

0 commit comments

Comments
 (0)