Skip to content

Commit 0da7fea

Browse files
author
Christoph Hellwig
committed
nvme-pci: use the tagset alloc/free helpers
Use the common helpers to allocate and free the tagsets. To make this work the generic nvme_ctrl now needs to be stored in the hctx private data instead of the nvme_dev. Signed-off-by: Christoph Hellwig <[email protected]> Reviewed-by: Keith Busch <[email protected]> Reviewed-by: Sagi Grimberg <[email protected]> Reviewed-by: Chaitanya Kulkarni <[email protected]>
1 parent 93b24f5 commit 0da7fea

File tree

1 file changed

+18
-71
lines changed

1 file changed

+18
-71
lines changed

drivers/nvme/host/pci.c

Lines changed: 18 additions & 71 deletions
Original file line numberDiff line numberDiff line change
@@ -398,7 +398,7 @@ static int nvme_pci_npages_sgl(void)
398398
static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
399399
unsigned int hctx_idx)
400400
{
401-
struct nvme_dev *dev = data;
401+
struct nvme_dev *dev = to_nvme_dev(data);
402402
struct nvme_queue *nvmeq = &dev->queues[0];
403403

404404
WARN_ON(hctx_idx != 0);
@@ -411,7 +411,7 @@ static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
411411
static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
412412
unsigned int hctx_idx)
413413
{
414-
struct nvme_dev *dev = data;
414+
struct nvme_dev *dev = to_nvme_dev(data);
415415
struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1];
416416

417417
WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags);
@@ -423,7 +423,7 @@ static int nvme_pci_init_request(struct blk_mq_tag_set *set,
423423
struct request *req, unsigned int hctx_idx,
424424
unsigned int numa_node)
425425
{
426-
struct nvme_dev *dev = set->driver_data;
426+
struct nvme_dev *dev = to_nvme_dev(set->driver_data);
427427
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
428428

429429
nvme_req(req)->ctrl = &dev->ctrl;
@@ -442,7 +442,7 @@ static int queue_irq_offset(struct nvme_dev *dev)
442442

443443
static void nvme_pci_map_queues(struct blk_mq_tag_set *set)
444444
{
445-
struct nvme_dev *dev = set->driver_data;
445+
struct nvme_dev *dev = to_nvme_dev(set->driver_data);
446446
int i, qoff, offset;
447447

448448
offset = queue_irq_offset(dev);
@@ -1728,39 +1728,10 @@ static void nvme_dev_remove_admin(struct nvme_dev *dev)
17281728
* queue to flush these to completion.
17291729
*/
17301730
nvme_unquiesce_admin_queue(&dev->ctrl);
1731-
blk_mq_destroy_queue(dev->ctrl.admin_q);
1732-
blk_put_queue(dev->ctrl.admin_q);
1733-
blk_mq_free_tag_set(&dev->admin_tagset);
1731+
nvme_remove_admin_tag_set(&dev->ctrl);
17341732
}
17351733
}
17361734

1737-
static int nvme_pci_alloc_admin_tag_set(struct nvme_dev *dev)
1738-
{
1739-
struct blk_mq_tag_set *set = &dev->admin_tagset;
1740-
1741-
set->ops = &nvme_mq_admin_ops;
1742-
set->nr_hw_queues = 1;
1743-
1744-
set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
1745-
set->timeout = NVME_ADMIN_TIMEOUT;
1746-
set->numa_node = dev->ctrl.numa_node;
1747-
set->cmd_size = sizeof(struct nvme_iod);
1748-
set->flags = BLK_MQ_F_NO_SCHED;
1749-
set->driver_data = dev;
1750-
1751-
if (blk_mq_alloc_tag_set(set))
1752-
return -ENOMEM;
1753-
dev->ctrl.admin_tagset = set;
1754-
1755-
dev->ctrl.admin_q = blk_mq_init_queue(set);
1756-
if (IS_ERR(dev->ctrl.admin_q)) {
1757-
blk_mq_free_tag_set(set);
1758-
dev->ctrl.admin_q = NULL;
1759-
return -ENOMEM;
1760-
}
1761-
return 0;
1762-
}
1763-
17641735
static unsigned long db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
17651736
{
17661737
return NVME_REG_DBS + ((nr_io_queues + 1) * 8 * dev->db_stride);
@@ -2515,40 +2486,13 @@ static void nvme_delete_io_queues(struct nvme_dev *dev)
25152486
__nvme_delete_io_queues(dev, nvme_admin_delete_cq);
25162487
}
25172488

2518-
static void nvme_pci_alloc_tag_set(struct nvme_dev *dev)
2489+
static unsigned int nvme_pci_nr_maps(struct nvme_dev *dev)
25192490
{
2520-
struct blk_mq_tag_set * set = &dev->tagset;
2521-
int ret;
2522-
2523-
set->ops = &nvme_mq_ops;
2524-
set->nr_hw_queues = dev->online_queues - 1;
2525-
set->nr_maps = 1;
2526-
if (dev->io_queues[HCTX_TYPE_READ])
2527-
set->nr_maps = 2;
25282491
if (dev->io_queues[HCTX_TYPE_POLL])
2529-
set->nr_maps = 3;
2530-
set->timeout = NVME_IO_TIMEOUT;
2531-
set->numa_node = dev->ctrl.numa_node;
2532-
set->queue_depth = min_t(unsigned, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1;
2533-
set->cmd_size = sizeof(struct nvme_iod);
2534-
set->flags = BLK_MQ_F_SHOULD_MERGE;
2535-
set->driver_data = dev;
2536-
2537-
/*
2538-
* Some Apple controllers requires tags to be unique
2539-
* across admin and IO queue, so reserve the first 32
2540-
* tags of the IO queue.
2541-
*/
2542-
if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS)
2543-
set->reserved_tags = NVME_AQ_DEPTH;
2544-
2545-
ret = blk_mq_alloc_tag_set(set);
2546-
if (ret) {
2547-
dev_warn(dev->ctrl.device,
2548-
"IO queues tagset allocation failed %d\n", ret);
2549-
return;
2550-
}
2551-
dev->ctrl.tagset = set;
2492+
return 3;
2493+
if (dev->io_queues[HCTX_TYPE_READ])
2494+
return 2;
2495+
return 1;
25522496
}
25532497

25542498
static void nvme_pci_update_nr_queues(struct nvme_dev *dev)
@@ -2770,7 +2714,7 @@ static int nvme_pci_alloc_iod_mempool(struct nvme_dev *dev)
27702714
static void nvme_free_tagset(struct nvme_dev *dev)
27712715
{
27722716
if (dev->tagset.tags)
2773-
blk_mq_free_tag_set(&dev->tagset);
2717+
nvme_remove_io_tag_set(&dev->ctrl);
27742718
dev->ctrl.tagset = NULL;
27752719
}
27762720

@@ -3101,7 +3045,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
31013045
if (result)
31023046
goto out_release_iod_mempool;
31033047

3104-
result = nvme_pci_alloc_admin_tag_set(dev);
3048+
result = nvme_alloc_admin_tag_set(&dev->ctrl, &dev->admin_tagset,
3049+
&nvme_mq_admin_ops, sizeof(struct nvme_iod));
31053050
if (result)
31063051
goto out_disable;
31073052

@@ -3131,12 +3076,14 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
31313076
goto out_disable;
31323077

31333078
if (dev->online_queues > 1) {
3134-
nvme_pci_alloc_tag_set(dev);
3079+
nvme_alloc_io_tag_set(&dev->ctrl, &dev->tagset, &nvme_mq_ops,
3080+
nvme_pci_nr_maps(dev), sizeof(struct nvme_iod));
31353081
nvme_dbbuf_set(dev);
3136-
} else {
3137-
dev_warn(dev->ctrl.device, "IO queues not created\n");
31383082
}
31393083

3084+
if (!dev->ctrl.tagset)
3085+
dev_warn(dev->ctrl.device, "IO queues not created\n");
3086+
31403087
if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) {
31413088
dev_warn(dev->ctrl.device,
31423089
"failed to mark controller live state\n");

0 commit comments

Comments
 (0)