Skip to content

Commit 4fb7203

Browse files
htejunaxboe
authored andcommitted
blk-throttle: remove asynchrnous percpu stats allocation mechanism
Because percpu allocator couldn't do non-blocking allocations, blk-throttle was forced to implement an ad-hoc asynchronous allocation mechanism for its percpu stats for cases where blkg's (blkcg_gq's) are allocated from an IO path without sleepable context. Now that percpu allocator can handle gfp_mask and blkg_policy_data alloc / free are handled by policy methods, the ad-hoc asynchronous allocation mechanism can be replaced with direct allocation from tg_stats_alloc_fn(). Rit it out. This ensures that an active throtl_grp always has valid non-NULL ->stats_cpu. Remove checks on it. Signed-off-by: Tejun Heo <[email protected]> Cc: Vivek Goyal <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
1 parent 001bea7 commit 4fb7203

File tree

1 file changed

+25
-87
lines changed

1 file changed

+25
-87
lines changed

block/blk-throttle.c

Lines changed: 25 additions & 87 deletions
Original file line numberDiff line numberDiff line change
@@ -144,9 +144,6 @@ struct throtl_grp {
144144

145145
/* Per cpu stats pointer */
146146
struct tg_stats_cpu __percpu *stats_cpu;
147-
148-
/* List of tgs waiting for per cpu stats memory to be allocated */
149-
struct list_head stats_alloc_node;
150147
};
151148

152149
struct throtl_data
@@ -168,13 +165,6 @@ struct throtl_data
168165
struct work_struct dispatch_work;
169166
};
170167

171-
/* list and work item to allocate percpu group stats */
172-
static DEFINE_SPINLOCK(tg_stats_alloc_lock);
173-
static LIST_HEAD(tg_stats_alloc_list);
174-
175-
static void tg_stats_alloc_fn(struct work_struct *);
176-
static DECLARE_DELAYED_WORK(tg_stats_alloc_work, tg_stats_alloc_fn);
177-
178168
static void throtl_pending_timer_fn(unsigned long arg);
179169

180170
static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
@@ -256,53 +246,6 @@ static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
256246
} \
257247
} while (0)
258248

259-
static void tg_stats_init(struct tg_stats_cpu *tg_stats)
260-
{
261-
blkg_rwstat_init(&tg_stats->service_bytes);
262-
blkg_rwstat_init(&tg_stats->serviced);
263-
}
264-
265-
/*
266-
* Worker for allocating per cpu stat for tgs. This is scheduled on the
267-
* system_wq once there are some groups on the alloc_list waiting for
268-
* allocation.
269-
*/
270-
static void tg_stats_alloc_fn(struct work_struct *work)
271-
{
272-
static struct tg_stats_cpu *stats_cpu; /* this fn is non-reentrant */
273-
struct delayed_work *dwork = to_delayed_work(work);
274-
bool empty = false;
275-
276-
alloc_stats:
277-
if (!stats_cpu) {
278-
int cpu;
279-
280-
stats_cpu = alloc_percpu(struct tg_stats_cpu);
281-
if (!stats_cpu) {
282-
/* allocation failed, try again after some time */
283-
schedule_delayed_work(dwork, msecs_to_jiffies(10));
284-
return;
285-
}
286-
for_each_possible_cpu(cpu)
287-
tg_stats_init(per_cpu_ptr(stats_cpu, cpu));
288-
}
289-
290-
spin_lock_irq(&tg_stats_alloc_lock);
291-
292-
if (!list_empty(&tg_stats_alloc_list)) {
293-
struct throtl_grp *tg = list_first_entry(&tg_stats_alloc_list,
294-
struct throtl_grp,
295-
stats_alloc_node);
296-
swap(tg->stats_cpu, stats_cpu);
297-
list_del_init(&tg->stats_alloc_node);
298-
}
299-
300-
empty = list_empty(&tg_stats_alloc_list);
301-
spin_unlock_irq(&tg_stats_alloc_lock);
302-
if (!empty)
303-
goto alloc_stats;
304-
}
305-
306249
static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
307250
{
308251
INIT_LIST_HEAD(&qn->node);
@@ -405,15 +348,34 @@ static void throtl_service_queue_exit(struct throtl_service_queue *sq)
405348

406349
static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, int node)
407350
{
408-
return kzalloc_node(sizeof(struct throtl_grp), gfp, node);
351+
struct throtl_grp *tg;
352+
int cpu;
353+
354+
tg = kzalloc_node(sizeof(*tg), gfp, node);
355+
if (!tg)
356+
return NULL;
357+
358+
tg->stats_cpu = alloc_percpu_gfp(struct tg_stats_cpu, gfp);
359+
if (!tg->stats_cpu) {
360+
kfree(tg);
361+
return NULL;
362+
}
363+
364+
for_each_possible_cpu(cpu) {
365+
struct tg_stats_cpu *stats_cpu = per_cpu_ptr(tg->stats_cpu, cpu);
366+
367+
blkg_rwstat_init(&stats_cpu->service_bytes);
368+
blkg_rwstat_init(&stats_cpu->serviced);
369+
}
370+
371+
return &tg->pd;
409372
}
410373

411374
static void throtl_pd_init(struct blkcg_gq *blkg)
412375
{
413376
struct throtl_grp *tg = blkg_to_tg(blkg);
414377
struct throtl_data *td = blkg->q->td;
415378
struct throtl_service_queue *parent_sq;
416-
unsigned long flags;
417379
int rw;
418380

419381
/*
@@ -448,16 +410,6 @@ static void throtl_pd_init(struct blkcg_gq *blkg)
448410
tg->bps[WRITE] = -1;
449411
tg->iops[READ] = -1;
450412
tg->iops[WRITE] = -1;
451-
452-
/*
453-
* Ugh... We need to perform per-cpu allocation for tg->stats_cpu
454-
* but percpu allocator can't be called from IO path. Queue tg on
455-
* tg_stats_alloc_list and allocate from work item.
456-
*/
457-
spin_lock_irqsave(&tg_stats_alloc_lock, flags);
458-
list_add(&tg->stats_alloc_node, &tg_stats_alloc_list);
459-
schedule_delayed_work(&tg_stats_alloc_work, 0);
460-
spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
461413
}
462414

463415
/*
@@ -487,30 +439,23 @@ static void throtl_pd_online(struct blkcg_gq *blkg)
487439
static void throtl_pd_exit(struct blkcg_gq *blkg)
488440
{
489441
struct throtl_grp *tg = blkg_to_tg(blkg);
490-
unsigned long flags;
491-
492-
spin_lock_irqsave(&tg_stats_alloc_lock, flags);
493-
list_del_init(&tg->stats_alloc_node);
494-
spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
495-
496-
free_percpu(tg->stats_cpu);
497442

498443
throtl_service_queue_exit(&tg->service_queue);
499444
}
500445

501446
static void throtl_pd_free(struct blkg_policy_data *pd)
502447
{
503-
kfree(pd);
448+
struct throtl_grp *tg = pd_to_tg(pd);
449+
450+
free_percpu(tg->stats_cpu);
451+
kfree(tg);
504452
}
505453

506454
static void throtl_pd_reset_stats(struct blkcg_gq *blkg)
507455
{
508456
struct throtl_grp *tg = blkg_to_tg(blkg);
509457
int cpu;
510458

511-
if (tg->stats_cpu == NULL)
512-
return;
513-
514459
for_each_possible_cpu(cpu) {
515460
struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
516461

@@ -973,10 +918,6 @@ static void throtl_update_dispatch_stats(struct blkcg_gq *blkg, u64 bytes,
973918
struct tg_stats_cpu *stats_cpu;
974919
unsigned long flags;
975920

976-
/* If per cpu stats are not allocated yet, don't do any accounting. */
977-
if (tg->stats_cpu == NULL)
978-
return;
979-
980921
/*
981922
* Disabling interrupts to provide mutual exclusion between two
982923
* writes on same cpu. It probably is not needed for 64bit. Not
@@ -1302,9 +1243,6 @@ static u64 tg_prfill_cpu_rwstat(struct seq_file *sf,
13021243
struct blkg_rwstat rwstat = { }, tmp;
13031244
int i, cpu;
13041245

1305-
if (tg->stats_cpu == NULL)
1306-
return 0;
1307-
13081246
for_each_possible_cpu(cpu) {
13091247
struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
13101248

0 commit comments

Comments
 (0)