@@ -3672,10 +3672,10 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to,
3672
3672
cpumask_copy (to -> cpumask , from -> cpumask );
3673
3673
/*
3674
3674
* Unlike hash and equality test, this function doesn't ignore
3675
- * ->no_numa as it is used for both pool and wq attrs. Instead,
3676
- * get_unbound_pool() explicitly clears ->no_numa after copying.
3675
+ * ->ordered as it is used for both pool and wq attrs. Instead,
3676
+ * get_unbound_pool() explicitly clears ->ordered after copying.
3677
3677
*/
3678
- to -> no_numa = from -> no_numa ;
3678
+ to -> ordered = from -> ordered ;
3679
3679
}
3680
3680
3681
3681
/* hash value of the content of @attr */
@@ -3933,10 +3933,10 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
3933
3933
pool -> node = target_node ;
3934
3934
3935
3935
/*
3936
- * no_numa isn't a worker_pool attribute, always clear it. See
3936
+ * ordered isn't a worker_pool attribute, always clear it. See
3937
3937
* 'struct workqueue_attrs' comments for detail.
3938
3938
*/
3939
- pool -> attrs -> no_numa = false;
3939
+ pool -> attrs -> ordered = false;
3940
3940
3941
3941
if (worker_pool_assign_id (pool ) < 0 )
3942
3942
goto fail ;
@@ -4141,7 +4141,7 @@ static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
4141
4141
static void wq_calc_node_cpumask (const struct workqueue_attrs * attrs , int node ,
4142
4142
int cpu_going_down , cpumask_t * cpumask )
4143
4143
{
4144
- if (!wq_numa_enabled || attrs -> no_numa )
4144
+ if (!wq_numa_enabled || attrs -> ordered )
4145
4145
goto use_dfl ;
4146
4146
4147
4147
/* does @node have any online CPUs @attrs wants? */
@@ -4253,7 +4253,7 @@ apply_wqattrs_prepare(struct workqueue_struct *wq,
4253
4253
goto out_free ;
4254
4254
4255
4255
for_each_possible_cpu (cpu ) {
4256
- if (new_attrs -> no_numa ) {
4256
+ if (new_attrs -> ordered ) {
4257
4257
ctx -> dfl_pwq -> refcnt ++ ;
4258
4258
ctx -> pwq_tbl [cpu ] = ctx -> dfl_pwq ;
4259
4259
} else {
@@ -4411,7 +4411,7 @@ static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
4411
4411
lockdep_assert_held (& wq_pool_mutex );
4412
4412
4413
4413
if (!wq_numa_enabled || !(wq -> flags & WQ_UNBOUND ) ||
4414
- wq -> unbound_attrs -> no_numa )
4414
+ wq -> unbound_attrs -> ordered )
4415
4415
return ;
4416
4416
4417
4417
/*
@@ -6358,11 +6358,10 @@ void __init workqueue_init_early(void)
6358
6358
/*
6359
6359
* An ordered wq should have only one pwq as ordering is
6360
6360
* guaranteed by max_active which is enforced by pwqs.
6361
- * Turn off NUMA so that dfl_pwq is used for all nodes.
6362
6361
*/
6363
6362
BUG_ON (!(attrs = alloc_workqueue_attrs ()));
6364
6363
attrs -> nice = std_nice [i ];
6365
- attrs -> no_numa = true;
6364
+ attrs -> ordered = true;
6366
6365
ordered_wq_attrs [i ] = attrs ;
6367
6366
}
6368
6367
0 commit comments