Skip to content

Commit 75682e3

Browse files
KAGA-KOKOSebastian Andrzej Siewior
authored andcommitted
workqueue: Use local irq lock instead of irq disable regions
Use a local_irq_lock as a replacement for irq off regions. We keep the semantic of irq-off in regard to the pool->lock and remain preemptible. Signed-off-by: Thomas Gleixner <[email protected]>
1 parent e97b27c commit 75682e3

File tree

1 file changed

+19
-14
lines changed

1 file changed

+19
-14
lines changed

kernel/workqueue.c

Lines changed: 19 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,7 @@
4848
#include <linux/nodemask.h>
4949
#include <linux/moduleparam.h>
5050
#include <linux/uaccess.h>
51+
#include <linux/locallock.h>
5152

5253
#include "workqueue_internal.h"
5354

@@ -348,6 +349,8 @@ EXPORT_SYMBOL_GPL(system_power_efficient_wq);
348349
struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
349350
EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
350351

352+
static DEFINE_LOCAL_IRQ_LOCK(pendingb_lock);
353+
351354
static int worker_thread(void *__worker);
352355
static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
353356

@@ -1101,9 +1104,11 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq)
11011104
* As both pwqs and pools are RCU protected, the
11021105
* following lock operations are safe.
11031106
*/
1104-
spin_lock_irq(&pwq->pool->lock);
1107+
rcu_read_lock();
1108+
local_spin_lock_irq(pendingb_lock, &pwq->pool->lock);
11051109
put_pwq(pwq);
1106-
spin_unlock_irq(&pwq->pool->lock);
1110+
local_spin_unlock_irq(pendingb_lock, &pwq->pool->lock);
1111+
rcu_read_unlock();
11071112
}
11081113
}
11091114

@@ -1207,7 +1212,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
12071212
struct worker_pool *pool;
12081213
struct pool_workqueue *pwq;
12091214

1210-
local_irq_save(*flags);
1215+
local_lock_irqsave(pendingb_lock, *flags);
12111216

12121217
/* try to steal the timer if it exists */
12131218
if (is_dwork) {
@@ -1271,7 +1276,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
12711276
spin_unlock(&pool->lock);
12721277
fail:
12731278
rcu_read_unlock();
1274-
local_irq_restore(*flags);
1279+
local_unlock_irqrestore(pendingb_lock, *flags);
12751280
if (work_is_canceling(work))
12761281
return -ENOENT;
12771282
cpu_relax();
@@ -1376,7 +1381,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
13761381
* queued or lose PENDING. Grabbing PENDING and queueing should
13771382
* happen with IRQ disabled.
13781383
*/
1379-
WARN_ON_ONCE(!irqs_disabled());
1384+
WARN_ON_ONCE_NONRT(!irqs_disabled());
13801385

13811386
debug_work_activate(work);
13821387

@@ -1482,14 +1487,14 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq,
14821487
bool ret = false;
14831488
unsigned long flags;
14841489

1485-
local_irq_save(flags);
1490+
local_lock_irqsave(pendingb_lock,flags);
14861491

14871492
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
14881493
__queue_work(cpu, wq, work);
14891494
ret = true;
14901495
}
14911496

1492-
local_irq_restore(flags);
1497+
local_unlock_irqrestore(pendingb_lock, flags);
14931498
return ret;
14941499
}
14951500
EXPORT_SYMBOL(queue_work_on);
@@ -1556,14 +1561,14 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
15561561
unsigned long flags;
15571562

15581563
/* read the comment in __queue_work() */
1559-
local_irq_save(flags);
1564+
local_lock_irqsave(pendingb_lock, flags);
15601565

15611566
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
15621567
__queue_delayed_work(cpu, wq, dwork, delay);
15631568
ret = true;
15641569
}
15651570

1566-
local_irq_restore(flags);
1571+
local_unlock_irqrestore(pendingb_lock, flags);
15671572
return ret;
15681573
}
15691574
EXPORT_SYMBOL(queue_delayed_work_on);
@@ -1598,7 +1603,7 @@ bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
15981603

15991604
if (likely(ret >= 0)) {
16001605
__queue_delayed_work(cpu, wq, dwork, delay);
1601-
local_irq_restore(flags);
1606+
local_unlock_irqrestore(pendingb_lock, flags);
16021607
}
16031608

16041609
/* -ENOENT from try_to_grab_pending() becomes %true */
@@ -2916,7 +2921,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
29162921

29172922
/* tell other tasks trying to grab @work to back off */
29182923
mark_work_canceling(work);
2919-
local_irq_restore(flags);
2924+
local_unlock_irqrestore(pendingb_lock, flags);
29202925

29212926
flush_work(work);
29222927
clear_work_data(work);
@@ -2971,10 +2976,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
29712976
*/
29722977
bool flush_delayed_work(struct delayed_work *dwork)
29732978
{
2974-
local_irq_disable();
2979+
local_lock_irq(pendingb_lock);
29752980
if (del_timer_sync(&dwork->timer))
29762981
__queue_work(dwork->cpu, dwork->wq, &dwork->work);
2977-
local_irq_enable();
2982+
local_unlock_irq(pendingb_lock);
29782983
return flush_work(&dwork->work);
29792984
}
29802985
EXPORT_SYMBOL(flush_delayed_work);
@@ -2992,7 +2997,7 @@ static bool __cancel_work(struct work_struct *work, bool is_dwork)
29922997
return false;
29932998

29942999
set_work_pool_and_clear_pending(work, get_work_pool_id(work));
2995-
local_irq_restore(flags);
3000+
local_unlock_irqrestore(pendingb_lock, flags);
29963001
return ret;
29973002
}
29983003

0 commit comments

Comments
 (0)