@@ -293,24 +293,6 @@ int queue_limits_set(struct request_queue *q, struct queue_limits *lim)
293
293
}
294
294
EXPORT_SYMBOL_GPL (queue_limits_set );
295
295
296
- /**
297
- * blk_queue_chunk_sectors - set size of the chunk for this queue
298
- * @q: the request queue for the device
299
- * @chunk_sectors: chunk sectors in the usual 512b unit
300
- *
301
- * Description:
302
- * If a driver doesn't want IOs to cross a given chunk size, it can set
303
- * this limit and prevent merging across chunks. Note that the block layer
304
- * must accept a page worth of data at any offset. So if the crossing of
305
- * chunks is a hard limitation in the driver, it must still be prepared
306
- * to split single page bios.
307
- **/
308
- void blk_queue_chunk_sectors (struct request_queue * q , unsigned int chunk_sectors )
309
- {
310
- q -> limits .chunk_sectors = chunk_sectors ;
311
- }
312
- EXPORT_SYMBOL (blk_queue_chunk_sectors );
313
-
314
296
/**
315
297
* blk_queue_max_discard_sectors - set max sectors for a single discard
316
298
* @q: the request queue for the device
@@ -352,139 +334,6 @@ void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
352
334
}
353
335
EXPORT_SYMBOL (blk_queue_max_write_zeroes_sectors );
354
336
355
- /**
356
- * blk_queue_max_zone_append_sectors - set max sectors for a single zone append
357
- * @q: the request queue for the device
358
- * @max_zone_append_sectors: maximum number of sectors to write per command
359
- *
360
- * Sets the maximum number of sectors allowed for zone append commands. If
361
- * Specifying 0 for @max_zone_append_sectors indicates that the queue does
362
- * not natively support zone append operations and that the block layer must
363
- * emulate these operations using regular writes.
364
- **/
365
- void blk_queue_max_zone_append_sectors (struct request_queue * q ,
366
- unsigned int max_zone_append_sectors )
367
- {
368
- unsigned int max_sectors = 0 ;
369
-
370
- if (WARN_ON (!blk_queue_is_zoned (q )))
371
- return ;
372
-
373
- if (max_zone_append_sectors ) {
374
- max_sectors = min (q -> limits .max_hw_sectors ,
375
- max_zone_append_sectors );
376
- max_sectors = min (q -> limits .chunk_sectors , max_sectors );
377
-
378
- /*
379
- * Signal eventual driver bugs resulting in the max_zone_append
380
- * sectors limit being 0 due to the chunk_sectors limit (zone
381
- * size) not set or the max_hw_sectors limit not set.
382
- */
383
- WARN_ON_ONCE (!max_sectors );
384
- }
385
-
386
- q -> limits .max_zone_append_sectors = max_sectors ;
387
- }
388
- EXPORT_SYMBOL_GPL (blk_queue_max_zone_append_sectors );
389
-
390
- /**
391
- * blk_queue_logical_block_size - set logical block size for the queue
392
- * @q: the request queue for the device
393
- * @size: the logical block size, in bytes
394
- *
395
- * Description:
396
- * This should be set to the lowest possible block size that the
397
- * storage device can address. The default of 512 covers most
398
- * hardware.
399
- **/
400
- void blk_queue_logical_block_size (struct request_queue * q , unsigned int size )
401
- {
402
- struct queue_limits * limits = & q -> limits ;
403
-
404
- limits -> logical_block_size = size ;
405
-
406
- if (limits -> discard_granularity < limits -> logical_block_size )
407
- limits -> discard_granularity = limits -> logical_block_size ;
408
-
409
- if (limits -> physical_block_size < size )
410
- limits -> physical_block_size = size ;
411
-
412
- if (limits -> io_min < limits -> physical_block_size )
413
- limits -> io_min = limits -> physical_block_size ;
414
-
415
- limits -> max_hw_sectors =
416
- round_down (limits -> max_hw_sectors , size >> SECTOR_SHIFT );
417
- limits -> max_sectors =
418
- round_down (limits -> max_sectors , size >> SECTOR_SHIFT );
419
- }
420
- EXPORT_SYMBOL (blk_queue_logical_block_size );
421
-
422
- /**
423
- * blk_queue_physical_block_size - set physical block size for the queue
424
- * @q: the request queue for the device
425
- * @size: the physical block size, in bytes
426
- *
427
- * Description:
428
- * This should be set to the lowest possible sector size that the
429
- * hardware can operate on without reverting to read-modify-write
430
- * operations.
431
- */
432
- void blk_queue_physical_block_size (struct request_queue * q , unsigned int size )
433
- {
434
- q -> limits .physical_block_size = size ;
435
-
436
- if (q -> limits .physical_block_size < q -> limits .logical_block_size )
437
- q -> limits .physical_block_size = q -> limits .logical_block_size ;
438
-
439
- if (q -> limits .discard_granularity < q -> limits .physical_block_size )
440
- q -> limits .discard_granularity = q -> limits .physical_block_size ;
441
-
442
- if (q -> limits .io_min < q -> limits .physical_block_size )
443
- q -> limits .io_min = q -> limits .physical_block_size ;
444
- }
445
- EXPORT_SYMBOL (blk_queue_physical_block_size );
446
-
447
- /**
448
- * blk_queue_zone_write_granularity - set zone write granularity for the queue
449
- * @q: the request queue for the zoned device
450
- * @size: the zone write granularity size, in bytes
451
- *
452
- * Description:
453
- * This should be set to the lowest possible size allowing to write in
454
- * sequential zones of a zoned block device.
455
- */
456
- void blk_queue_zone_write_granularity (struct request_queue * q ,
457
- unsigned int size )
458
- {
459
- if (WARN_ON_ONCE (!blk_queue_is_zoned (q )))
460
- return ;
461
-
462
- q -> limits .zone_write_granularity = size ;
463
-
464
- if (q -> limits .zone_write_granularity < q -> limits .logical_block_size )
465
- q -> limits .zone_write_granularity = q -> limits .logical_block_size ;
466
- }
467
- EXPORT_SYMBOL_GPL (blk_queue_zone_write_granularity );
468
-
469
- /**
470
- * blk_queue_alignment_offset - set physical block alignment offset
471
- * @q: the request queue for the device
472
- * @offset: alignment offset in bytes
473
- *
474
- * Description:
475
- * Some devices are naturally misaligned to compensate for things like
476
- * the legacy DOS partition table 63-sector offset. Low-level drivers
477
- * should call this function for devices whose first sector is not
478
- * naturally aligned.
479
- */
480
- void blk_queue_alignment_offset (struct request_queue * q , unsigned int offset )
481
- {
482
- q -> limits .alignment_offset =
483
- offset & (q -> limits .physical_block_size - 1 );
484
- q -> limits .misaligned = 0 ;
485
- }
486
- EXPORT_SYMBOL (blk_queue_alignment_offset );
487
-
488
337
void disk_update_readahead (struct gendisk * disk )
489
338
{
490
339
blk_apply_bdi_limits (disk -> bdi , & disk -> queue -> limits );
@@ -514,26 +363,6 @@ void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
514
363
}
515
364
EXPORT_SYMBOL (blk_limits_io_min );
516
365
517
- /**
518
- * blk_queue_io_min - set minimum request size for the queue
519
- * @q: the request queue for the device
520
- * @min: smallest I/O size in bytes
521
- *
522
- * Description:
523
- * Storage devices may report a granularity or preferred minimum I/O
524
- * size which is the smallest request the device can perform without
525
- * incurring a performance penalty. For disk drives this is often the
526
- * physical block size. For RAID arrays it is often the stripe chunk
527
- * size. A properly aligned multiple of minimum_io_size is the
528
- * preferred request size for workloads where a high number of I/O
529
- * operations is desired.
530
- */
531
- void blk_queue_io_min (struct request_queue * q , unsigned int min )
532
- {
533
- blk_limits_io_min (& q -> limits , min );
534
- }
535
- EXPORT_SYMBOL (blk_queue_io_min );
536
-
537
366
/**
538
367
* blk_limits_io_opt - set optimal request size for a device
539
368
* @limits: the queue limits
@@ -841,25 +670,6 @@ void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
841
670
}
842
671
EXPORT_SYMBOL_GPL (blk_queue_write_cache );
843
672
844
- /**
845
- * disk_set_zoned - inidicate a zoned device
846
- * @disk: gendisk to configure
847
- */
848
- void disk_set_zoned (struct gendisk * disk )
849
- {
850
- struct request_queue * q = disk -> queue ;
851
-
852
- WARN_ON_ONCE (!IS_ENABLED (CONFIG_BLK_DEV_ZONED ));
853
-
854
- /*
855
- * Set the zone write granularity to the device logical block
856
- * size by default. The driver can change this value if needed.
857
- */
858
- q -> limits .zoned = true;
859
- blk_queue_zone_write_granularity (q , queue_logical_block_size (q ));
860
- }
861
- EXPORT_SYMBOL_GPL (disk_set_zoned );
862
-
863
673
int bdev_alignment_offset (struct block_device * bdev )
864
674
{
865
675
struct request_queue * q = bdev_get_queue (bdev );
0 commit comments