Skip to content

Commit 1652b0b

Browse files
Christoph Hellwigaxboe
authored andcommitted
block: remove unused queue limits API
Remove all APIs that are unused now that sd and sr have been converted to the atomic queue limits API. Signed-off-by: Christoph Hellwig <[email protected]> Reviewed-by: Bart Van Assche <[email protected]> Reviewed-by: John Garry <[email protected]> Reviewed-by: Nitesh Shetty <[email protected]> Reviewed-by: Martin K. Petersen <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jens Axboe <[email protected]>
1 parent 969f17e commit 1652b0b

File tree

2 files changed

+0
-214
lines changed

2 files changed

+0
-214
lines changed

block/blk-settings.c

Lines changed: 0 additions & 190 deletions
Original file line numberDiff line numberDiff line change
@@ -293,24 +293,6 @@ int queue_limits_set(struct request_queue *q, struct queue_limits *lim)
293293
}
294294
EXPORT_SYMBOL_GPL(queue_limits_set);
295295

296-
/**
297-
* blk_queue_chunk_sectors - set size of the chunk for this queue
298-
* @q: the request queue for the device
299-
* @chunk_sectors: chunk sectors in the usual 512b unit
300-
*
301-
* Description:
302-
* If a driver doesn't want IOs to cross a given chunk size, it can set
303-
* this limit and prevent merging across chunks. Note that the block layer
304-
* must accept a page worth of data at any offset. So if the crossing of
305-
* chunks is a hard limitation in the driver, it must still be prepared
306-
* to split single page bios.
307-
**/
308-
void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors)
309-
{
310-
q->limits.chunk_sectors = chunk_sectors;
311-
}
312-
EXPORT_SYMBOL(blk_queue_chunk_sectors);
313-
314296
/**
315297
* blk_queue_max_discard_sectors - set max sectors for a single discard
316298
* @q: the request queue for the device
@@ -352,139 +334,6 @@ void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
352334
}
353335
EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors);
354336

355-
/**
356-
* blk_queue_max_zone_append_sectors - set max sectors for a single zone append
357-
* @q: the request queue for the device
358-
* @max_zone_append_sectors: maximum number of sectors to write per command
359-
*
360-
* Sets the maximum number of sectors allowed for zone append commands. If
361-
* Specifying 0 for @max_zone_append_sectors indicates that the queue does
362-
* not natively support zone append operations and that the block layer must
363-
* emulate these operations using regular writes.
364-
**/
365-
void blk_queue_max_zone_append_sectors(struct request_queue *q,
366-
unsigned int max_zone_append_sectors)
367-
{
368-
unsigned int max_sectors = 0;
369-
370-
if (WARN_ON(!blk_queue_is_zoned(q)))
371-
return;
372-
373-
if (max_zone_append_sectors) {
374-
max_sectors = min(q->limits.max_hw_sectors,
375-
max_zone_append_sectors);
376-
max_sectors = min(q->limits.chunk_sectors, max_sectors);
377-
378-
/*
379-
* Signal eventual driver bugs resulting in the max_zone_append
380-
* sectors limit being 0 due to the chunk_sectors limit (zone
381-
* size) not set or the max_hw_sectors limit not set.
382-
*/
383-
WARN_ON_ONCE(!max_sectors);
384-
}
385-
386-
q->limits.max_zone_append_sectors = max_sectors;
387-
}
388-
EXPORT_SYMBOL_GPL(blk_queue_max_zone_append_sectors);
389-
390-
/**
391-
* blk_queue_logical_block_size - set logical block size for the queue
392-
* @q: the request queue for the device
393-
* @size: the logical block size, in bytes
394-
*
395-
* Description:
396-
* This should be set to the lowest possible block size that the
397-
* storage device can address. The default of 512 covers most
398-
* hardware.
399-
**/
400-
void blk_queue_logical_block_size(struct request_queue *q, unsigned int size)
401-
{
402-
struct queue_limits *limits = &q->limits;
403-
404-
limits->logical_block_size = size;
405-
406-
if (limits->discard_granularity < limits->logical_block_size)
407-
limits->discard_granularity = limits->logical_block_size;
408-
409-
if (limits->physical_block_size < size)
410-
limits->physical_block_size = size;
411-
412-
if (limits->io_min < limits->physical_block_size)
413-
limits->io_min = limits->physical_block_size;
414-
415-
limits->max_hw_sectors =
416-
round_down(limits->max_hw_sectors, size >> SECTOR_SHIFT);
417-
limits->max_sectors =
418-
round_down(limits->max_sectors, size >> SECTOR_SHIFT);
419-
}
420-
EXPORT_SYMBOL(blk_queue_logical_block_size);
421-
422-
/**
423-
* blk_queue_physical_block_size - set physical block size for the queue
424-
* @q: the request queue for the device
425-
* @size: the physical block size, in bytes
426-
*
427-
* Description:
428-
* This should be set to the lowest possible sector size that the
429-
* hardware can operate on without reverting to read-modify-write
430-
* operations.
431-
*/
432-
void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
433-
{
434-
q->limits.physical_block_size = size;
435-
436-
if (q->limits.physical_block_size < q->limits.logical_block_size)
437-
q->limits.physical_block_size = q->limits.logical_block_size;
438-
439-
if (q->limits.discard_granularity < q->limits.physical_block_size)
440-
q->limits.discard_granularity = q->limits.physical_block_size;
441-
442-
if (q->limits.io_min < q->limits.physical_block_size)
443-
q->limits.io_min = q->limits.physical_block_size;
444-
}
445-
EXPORT_SYMBOL(blk_queue_physical_block_size);
446-
447-
/**
448-
* blk_queue_zone_write_granularity - set zone write granularity for the queue
449-
* @q: the request queue for the zoned device
450-
* @size: the zone write granularity size, in bytes
451-
*
452-
* Description:
453-
* This should be set to the lowest possible size allowing to write in
454-
* sequential zones of a zoned block device.
455-
*/
456-
void blk_queue_zone_write_granularity(struct request_queue *q,
457-
unsigned int size)
458-
{
459-
if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
460-
return;
461-
462-
q->limits.zone_write_granularity = size;
463-
464-
if (q->limits.zone_write_granularity < q->limits.logical_block_size)
465-
q->limits.zone_write_granularity = q->limits.logical_block_size;
466-
}
467-
EXPORT_SYMBOL_GPL(blk_queue_zone_write_granularity);
468-
469-
/**
470-
* blk_queue_alignment_offset - set physical block alignment offset
471-
* @q: the request queue for the device
472-
* @offset: alignment offset in bytes
473-
*
474-
* Description:
475-
* Some devices are naturally misaligned to compensate for things like
476-
* the legacy DOS partition table 63-sector offset. Low-level drivers
477-
* should call this function for devices whose first sector is not
478-
* naturally aligned.
479-
*/
480-
void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
481-
{
482-
q->limits.alignment_offset =
483-
offset & (q->limits.physical_block_size - 1);
484-
q->limits.misaligned = 0;
485-
}
486-
EXPORT_SYMBOL(blk_queue_alignment_offset);
487-
488337
void disk_update_readahead(struct gendisk *disk)
489338
{
490339
blk_apply_bdi_limits(disk->bdi, &disk->queue->limits);
@@ -514,26 +363,6 @@ void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
514363
}
515364
EXPORT_SYMBOL(blk_limits_io_min);
516365

517-
/**
518-
* blk_queue_io_min - set minimum request size for the queue
519-
* @q: the request queue for the device
520-
* @min: smallest I/O size in bytes
521-
*
522-
* Description:
523-
* Storage devices may report a granularity or preferred minimum I/O
524-
* size which is the smallest request the device can perform without
525-
* incurring a performance penalty. For disk drives this is often the
526-
* physical block size. For RAID arrays it is often the stripe chunk
527-
* size. A properly aligned multiple of minimum_io_size is the
528-
* preferred request size for workloads where a high number of I/O
529-
* operations is desired.
530-
*/
531-
void blk_queue_io_min(struct request_queue *q, unsigned int min)
532-
{
533-
blk_limits_io_min(&q->limits, min);
534-
}
535-
EXPORT_SYMBOL(blk_queue_io_min);
536-
537366
/**
538367
* blk_limits_io_opt - set optimal request size for a device
539368
* @limits: the queue limits
@@ -841,25 +670,6 @@ void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
841670
}
842671
EXPORT_SYMBOL_GPL(blk_queue_write_cache);
843672

844-
/**
845-
* disk_set_zoned - inidicate a zoned device
846-
* @disk: gendisk to configure
847-
*/
848-
void disk_set_zoned(struct gendisk *disk)
849-
{
850-
struct request_queue *q = disk->queue;
851-
852-
WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED));
853-
854-
/*
855-
* Set the zone write granularity to the device logical block
856-
* size by default. The driver can change this value if needed.
857-
*/
858-
q->limits.zoned = true;
859-
blk_queue_zone_write_granularity(q, queue_logical_block_size(q));
860-
}
861-
EXPORT_SYMBOL_GPL(disk_set_zoned);
862-
863673
int bdev_alignment_offset(struct block_device *bdev)
864674
{
865675
struct request_queue *q = bdev_get_queue(bdev);

include/linux/blkdev.h

Lines changed: 0 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -332,8 +332,6 @@ struct queue_limits {
332332
typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx,
333333
void *data);
334334

335-
void disk_set_zoned(struct gendisk *disk);
336-
337335
#define BLK_ALL_ZONES ((unsigned int)-1)
338336
int blkdev_report_zones(struct block_device *bdev, sector_t sector,
339337
unsigned int nr_zones, report_zones_cb cb, void *data);
@@ -638,18 +636,6 @@ static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector)
638636
return sector >> ilog2(disk->queue->limits.chunk_sectors);
639637
}
640638

641-
static inline void disk_set_max_open_zones(struct gendisk *disk,
642-
unsigned int max_open_zones)
643-
{
644-
disk->queue->limits.max_open_zones = max_open_zones;
645-
}
646-
647-
static inline void disk_set_max_active_zones(struct gendisk *disk,
648-
unsigned int max_active_zones)
649-
{
650-
disk->queue->limits.max_active_zones = max_active_zones;
651-
}
652-
653639
static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
654640
{
655641
return bdev->bd_disk->queue->limits.max_open_zones;
@@ -929,24 +915,14 @@ static inline void queue_limits_cancel_update(struct request_queue *q)
929915
/*
930916
* Access functions for manipulating queue properties
931917
*/
932-
extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
933918
void blk_queue_max_secure_erase_sectors(struct request_queue *q,
934919
unsigned int max_sectors);
935920
extern void blk_queue_max_discard_sectors(struct request_queue *q,
936921
unsigned int max_discard_sectors);
937922
extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
938923
unsigned int max_write_same_sectors);
939-
extern void blk_queue_logical_block_size(struct request_queue *, unsigned int);
940-
extern void blk_queue_max_zone_append_sectors(struct request_queue *q,
941-
unsigned int max_zone_append_sectors);
942-
extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
943-
void blk_queue_zone_write_granularity(struct request_queue *q,
944-
unsigned int size);
945-
extern void blk_queue_alignment_offset(struct request_queue *q,
946-
unsigned int alignment);
947924
void disk_update_readahead(struct gendisk *disk);
948925
extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
949-
extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
950926
extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
951927
extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
952928
extern void blk_set_stacking_limits(struct queue_limits *lim);

0 commit comments

Comments
 (0)