Skip to content

Commit 051e185

Browse files
Lai Jiangshanhtejun
authored andcommitted
workqueue: unfold start_worker() into create_worker()
Simply unfold the code of start_worker() into create_worker() and remove the original start_worker() and create_and_start_worker(). The only trade-off is the introduced overhead that the pool->lock is released and regrabbed after the newly worker is started. The overhead is acceptible since the manager is slow path. And because this new locking behavior, the newly created worker may grab the lock earlier than the manager and go to process work items. In this case, the recheck need_to_create_worker() may be true as expected and the manager goes to restart which is the correct behavior. tj: Minor updates to description and comments. Signed-off-by: Lai Jiangshan <[email protected]> Signed-off-by: Tejun Heo <[email protected]>
1 parent 228f1d0 commit 051e185

File tree

1 file changed

+18
-57
lines changed

1 file changed

+18
-57
lines changed

kernel/workqueue.c

Lines changed: 18 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -1540,7 +1540,7 @@ static void worker_enter_idle(struct worker *worker)
15401540
(worker->hentry.next || worker->hentry.pprev)))
15411541
return;
15421542

1543-
/* can't use worker_set_flags(), also called from start_worker() */
1543+
/* can't use worker_set_flags(), also called from create_worker() */
15441544
worker->flags |= WORKER_IDLE;
15451545
pool->nr_idle++;
15461546
worker->last_active = jiffies;
@@ -1661,8 +1661,7 @@ static void worker_detach_from_pool(struct worker *worker,
16611661
* create_worker - create a new workqueue worker
16621662
* @pool: pool the new worker will belong to
16631663
*
1664-
* Create a new worker which is attached to @pool. The new worker must be
1665-
* started by start_worker().
1664+
* Create and start a new worker which is attached to @pool.
16661665
*
16671666
* CONTEXT:
16681667
* Might sleep. Does GFP_KERNEL allocations.
@@ -1707,6 +1706,13 @@ static struct worker *create_worker(struct worker_pool *pool)
17071706
/* successful, attach the worker to the pool */
17081707
worker_attach_to_pool(worker, pool);
17091708

1709+
/* start the newly created worker */
1710+
spin_lock_irq(&pool->lock);
1711+
worker->pool->nr_workers++;
1712+
worker_enter_idle(worker);
1713+
wake_up_process(worker->task);
1714+
spin_unlock_irq(&pool->lock);
1715+
17101716
return worker;
17111717

17121718
fail:
@@ -1716,44 +1722,6 @@ static struct worker *create_worker(struct worker_pool *pool)
17161722
return NULL;
17171723
}
17181724

1719-
/**
1720-
* start_worker - start a newly created worker
1721-
* @worker: worker to start
1722-
*
1723-
* Make the pool aware of @worker and start it.
1724-
*
1725-
* CONTEXT:
1726-
* spin_lock_irq(pool->lock).
1727-
*/
1728-
static void start_worker(struct worker *worker)
1729-
{
1730-
worker->pool->nr_workers++;
1731-
worker_enter_idle(worker);
1732-
wake_up_process(worker->task);
1733-
}
1734-
1735-
/**
1736-
* create_and_start_worker - create and start a worker for a pool
1737-
* @pool: the target pool
1738-
*
1739-
* Grab the managership of @pool and create and start a new worker for it.
1740-
*
1741-
* Return: 0 on success. A negative error code otherwise.
1742-
*/
1743-
static int create_and_start_worker(struct worker_pool *pool)
1744-
{
1745-
struct worker *worker;
1746-
1747-
worker = create_worker(pool);
1748-
if (worker) {
1749-
spin_lock_irq(&pool->lock);
1750-
start_worker(worker);
1751-
spin_unlock_irq(&pool->lock);
1752-
}
1753-
1754-
return worker ? 0 : -ENOMEM;
1755-
}
1756-
17571725
/**
17581726
* destroy_worker - destroy a workqueue worker
17591727
* @worker: worker to be destroyed
@@ -1892,19 +1860,7 @@ __acquires(&pool->lock)
18921860
mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
18931861

18941862
while (true) {
1895-
struct worker *worker;
1896-
1897-
worker = create_worker(pool);
1898-
if (worker) {
1899-
del_timer_sync(&pool->mayday_timer);
1900-
spin_lock_irq(&pool->lock);
1901-
start_worker(worker);
1902-
if (WARN_ON_ONCE(need_to_create_worker(pool)))
1903-
goto restart;
1904-
return true;
1905-
}
1906-
1907-
if (!need_to_create_worker(pool))
1863+
if (create_worker(pool) || !need_to_create_worker(pool))
19081864
break;
19091865

19101866
schedule_timeout_interruptible(CREATE_COOLDOWN);
@@ -1915,6 +1871,11 @@ __acquires(&pool->lock)
19151871

19161872
del_timer_sync(&pool->mayday_timer);
19171873
spin_lock_irq(&pool->lock);
1874+
/*
1875+
* This is necessary even after a new worker was just successfully
1876+
* created as @pool->lock was dropped and the new worker might have
1877+
* already become busy.
1878+
*/
19181879
if (need_to_create_worker(pool))
19191880
goto restart;
19201881
return true;
@@ -3537,7 +3498,7 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
35373498
goto fail;
35383499

35393500
/* create and start the initial worker */
3540-
if (create_and_start_worker(pool) < 0)
3501+
if (!create_worker(pool))
35413502
goto fail;
35423503

35433504
/* install */
@@ -4611,7 +4572,7 @@ static int workqueue_cpu_up_callback(struct notifier_block *nfb,
46114572
for_each_cpu_worker_pool(pool, cpu) {
46124573
if (pool->nr_workers)
46134574
continue;
4614-
if (create_and_start_worker(pool) < 0)
4575+
if (!create_worker(pool))
46154576
return NOTIFY_BAD;
46164577
}
46174578
break;
@@ -4911,7 +4872,7 @@ static int __init init_workqueues(void)
49114872

49124873
for_each_cpu_worker_pool(pool, cpu) {
49134874
pool->flags &= ~POOL_DISASSOCIATED;
4914-
BUG_ON(create_and_start_worker(pool) < 0);
4875+
BUG_ON(!create_worker(pool));
49154876
}
49164877
}
49174878

0 commit comments

Comments
 (0)