@@ -101,12 +101,13 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_ZBC);
101
101
102
102
#define SD_MINORS 16
103
103
104
- static void sd_config_discard (struct scsi_disk * , unsigned int );
105
- static void sd_config_write_same (struct scsi_disk * );
104
+ static void sd_config_discard (struct scsi_disk * sdkp , struct queue_limits * lim ,
105
+ unsigned int mode );
106
+ static void sd_config_write_same (struct scsi_disk * sdkp ,
107
+ struct queue_limits * lim );
106
108
static int sd_revalidate_disk (struct gendisk * );
107
109
static void sd_unlock_native_capacity (struct gendisk * disk );
108
110
static void sd_shutdown (struct device * );
109
- static void sd_read_capacity (struct scsi_disk * sdkp , unsigned char * buffer );
110
111
static void scsi_disk_release (struct device * cdev );
111
112
112
113
static DEFINE_IDA (sd_index_ida );
@@ -456,7 +457,8 @@ provisioning_mode_store(struct device *dev, struct device_attribute *attr,
456
457
{
457
458
struct scsi_disk * sdkp = to_scsi_disk (dev );
458
459
struct scsi_device * sdp = sdkp -> device ;
459
- int mode ;
460
+ struct queue_limits lim ;
461
+ int mode , err ;
460
462
461
463
if (!capable (CAP_SYS_ADMIN ))
462
464
return - EACCES ;
@@ -472,8 +474,13 @@ provisioning_mode_store(struct device *dev, struct device_attribute *attr,
472
474
if (mode < 0 )
473
475
return - EINVAL ;
474
476
475
- sd_config_discard (sdkp , mode );
476
-
477
+ lim = queue_limits_start_update (sdkp -> disk -> queue );
478
+ sd_config_discard (sdkp , & lim , mode );
479
+ blk_mq_freeze_queue (sdkp -> disk -> queue );
480
+ err = queue_limits_commit_update (sdkp -> disk -> queue , & lim );
481
+ blk_mq_unfreeze_queue (sdkp -> disk -> queue );
482
+ if (err )
483
+ return err ;
477
484
return count ;
478
485
}
479
486
static DEVICE_ATTR_RW (provisioning_mode );
@@ -556,6 +563,7 @@ max_write_same_blocks_store(struct device *dev, struct device_attribute *attr,
556
563
{
557
564
struct scsi_disk * sdkp = to_scsi_disk (dev );
558
565
struct scsi_device * sdp = sdkp -> device ;
566
+ struct queue_limits lim ;
559
567
unsigned long max ;
560
568
int err ;
561
569
@@ -577,8 +585,13 @@ max_write_same_blocks_store(struct device *dev, struct device_attribute *attr,
577
585
sdkp -> max_ws_blocks = max ;
578
586
}
579
587
580
- sd_config_write_same (sdkp );
581
-
588
+ lim = queue_limits_start_update (sdkp -> disk -> queue );
589
+ sd_config_write_same (sdkp , & lim );
590
+ blk_mq_freeze_queue (sdkp -> disk -> queue );
591
+ err = queue_limits_commit_update (sdkp -> disk -> queue , & lim );
592
+ blk_mq_unfreeze_queue (sdkp -> disk -> queue );
593
+ if (err )
594
+ return err ;
582
595
return count ;
583
596
}
584
597
static DEVICE_ATTR_RW (max_write_same_blocks );
@@ -827,17 +840,15 @@ static void sd_disable_discard(struct scsi_disk *sdkp)
827
840
blk_queue_max_discard_sectors (sdkp -> disk -> queue , 0 );
828
841
}
829
842
830
- static void sd_config_discard (struct scsi_disk * sdkp , unsigned int mode )
843
+ static void sd_config_discard (struct scsi_disk * sdkp , struct queue_limits * lim ,
844
+ unsigned int mode )
831
845
{
832
- struct request_queue * q = sdkp -> disk -> queue ;
833
846
unsigned int logical_block_size = sdkp -> device -> sector_size ;
834
847
unsigned int max_blocks = 0 ;
835
848
836
- q -> limits .discard_alignment =
837
- sdkp -> unmap_alignment * logical_block_size ;
838
- q -> limits .discard_granularity =
839
- max (sdkp -> physical_block_size ,
840
- sdkp -> unmap_granularity * logical_block_size );
849
+ lim -> discard_alignment = sdkp -> unmap_alignment * logical_block_size ;
850
+ lim -> discard_granularity = max (sdkp -> physical_block_size ,
851
+ sdkp -> unmap_granularity * logical_block_size );
841
852
sdkp -> provisioning_mode = mode ;
842
853
843
854
switch (mode ) {
@@ -875,7 +886,8 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
875
886
break ;
876
887
}
877
888
878
- blk_queue_max_discard_sectors (q , max_blocks * (logical_block_size >> 9 ));
889
+ lim -> max_hw_discard_sectors = max_blocks *
890
+ (logical_block_size >> SECTOR_SHIFT );
879
891
}
880
892
881
893
static void * sd_set_special_bvec (struct request * rq , unsigned int data_len )
@@ -1010,9 +1022,9 @@ static void sd_disable_write_same(struct scsi_disk *sdkp)
1010
1022
blk_queue_max_write_zeroes_sectors (sdkp -> disk -> queue , 0 );
1011
1023
}
1012
1024
1013
- static void sd_config_write_same (struct scsi_disk * sdkp )
1025
+ static void sd_config_write_same (struct scsi_disk * sdkp ,
1026
+ struct queue_limits * lim )
1014
1027
{
1015
- struct request_queue * q = sdkp -> disk -> queue ;
1016
1028
unsigned int logical_block_size = sdkp -> device -> sector_size ;
1017
1029
1018
1030
if (sdkp -> device -> no_write_same ) {
@@ -1066,8 +1078,8 @@ static void sd_config_write_same(struct scsi_disk *sdkp)
1066
1078
}
1067
1079
1068
1080
out :
1069
- blk_queue_max_write_zeroes_sectors ( q , sdkp -> max_ws_blocks *
1070
- (logical_block_size >> 9 ) );
1081
+ lim -> max_write_zeroes_sectors =
1082
+ sdkp -> max_ws_blocks * (logical_block_size >> SECTOR_SHIFT );
1071
1083
}
1072
1084
1073
1085
static blk_status_t sd_setup_flush_cmnd (struct scsi_cmnd * cmd )
@@ -2523,7 +2535,7 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
2523
2535
#define READ_CAPACITY_RETRIES_ON_RESET 10
2524
2536
2525
2537
static int read_capacity_16 (struct scsi_disk * sdkp , struct scsi_device * sdp ,
2526
- unsigned char * buffer )
2538
+ struct queue_limits * lim , unsigned char * buffer )
2527
2539
{
2528
2540
unsigned char cmd [16 ];
2529
2541
struct scsi_sense_hdr sshdr ;
@@ -2597,7 +2609,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
2597
2609
2598
2610
/* Lowest aligned logical block */
2599
2611
alignment = ((buffer [14 ] & 0x3f ) << 8 | buffer [15 ]) * sector_size ;
2600
- blk_queue_alignment_offset ( sdp -> request_queue , alignment ) ;
2612
+ lim -> alignment_offset = alignment ;
2601
2613
if (alignment && sdkp -> first_scan )
2602
2614
sd_printk (KERN_NOTICE , sdkp ,
2603
2615
"physical block alignment offset: %u\n" , alignment );
@@ -2608,7 +2620,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
2608
2620
if (buffer [14 ] & 0x40 ) /* LBPRZ */
2609
2621
sdkp -> lbprz = 1 ;
2610
2622
2611
- sd_config_discard (sdkp , SD_LBP_WS16 );
2623
+ sd_config_discard (sdkp , lim , SD_LBP_WS16 );
2612
2624
}
2613
2625
2614
2626
sdkp -> capacity = lba + 1 ;
@@ -2711,13 +2723,14 @@ static int sd_try_rc16_first(struct scsi_device *sdp)
2711
2723
* read disk capacity
2712
2724
*/
2713
2725
static void
2714
- sd_read_capacity (struct scsi_disk * sdkp , unsigned char * buffer )
2726
+ sd_read_capacity (struct scsi_disk * sdkp , struct queue_limits * lim ,
2727
+ unsigned char * buffer )
2715
2728
{
2716
2729
int sector_size ;
2717
2730
struct scsi_device * sdp = sdkp -> device ;
2718
2731
2719
2732
if (sd_try_rc16_first (sdp )) {
2720
- sector_size = read_capacity_16 (sdkp , sdp , buffer );
2733
+ sector_size = read_capacity_16 (sdkp , sdp , lim , buffer );
2721
2734
if (sector_size == - EOVERFLOW )
2722
2735
goto got_data ;
2723
2736
if (sector_size == - ENODEV )
@@ -2737,7 +2750,7 @@ sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer)
2737
2750
int old_sector_size = sector_size ;
2738
2751
sd_printk (KERN_NOTICE , sdkp , "Very big device. "
2739
2752
"Trying to use READ CAPACITY(16).\n" );
2740
- sector_size = read_capacity_16 (sdkp , sdp , buffer );
2753
+ sector_size = read_capacity_16 (sdkp , sdp , lim , buffer );
2741
2754
if (sector_size < 0 ) {
2742
2755
sd_printk (KERN_NOTICE , sdkp ,
2743
2756
"Using 0xffffffff as device size\n" );
@@ -2796,9 +2809,8 @@ sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer)
2796
2809
*/
2797
2810
sector_size = 512 ;
2798
2811
}
2799
- blk_queue_logical_block_size (sdp -> request_queue , sector_size );
2800
- blk_queue_physical_block_size (sdp -> request_queue ,
2801
- sdkp -> physical_block_size );
2812
+ lim -> logical_block_size = sector_size ;
2813
+ lim -> physical_block_size = sdkp -> physical_block_size ;
2802
2814
sdkp -> device -> sector_size = sector_size ;
2803
2815
2804
2816
if (sdkp -> capacity > 0xffffffff )
@@ -3220,11 +3232,11 @@ static unsigned int sd_discard_mode(struct scsi_disk *sdkp)
3220
3232
return SD_LBP_DISABLE ;
3221
3233
}
3222
3234
3223
- /**
3224
- * sd_read_block_limits - Query disk device for preferred I/O sizes.
3225
- * @sdkp: disk to query
3235
+ /*
3236
+ * Query disk device for preferred I/O sizes.
3226
3237
*/
3227
- static void sd_read_block_limits (struct scsi_disk * sdkp )
3238
+ static void sd_read_block_limits (struct scsi_disk * sdkp ,
3239
+ struct queue_limits * lim )
3228
3240
{
3229
3241
struct scsi_vpd * vpd ;
3230
3242
@@ -3258,7 +3270,7 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
3258
3270
sdkp -> unmap_alignment =
3259
3271
get_unaligned_be32 (& vpd -> data [32 ]) & ~(1 << 31 );
3260
3272
3261
- sd_config_discard (sdkp , sd_discard_mode (sdkp ));
3273
+ sd_config_discard (sdkp , lim , sd_discard_mode (sdkp ));
3262
3274
}
3263
3275
3264
3276
out :
@@ -3277,11 +3289,9 @@ static void sd_read_block_limits_ext(struct scsi_disk *sdkp)
3277
3289
rcu_read_unlock ();
3278
3290
}
3279
3291
3280
- /**
3281
- * sd_read_block_characteristics - Query block dev. characteristics
3282
- * @sdkp: disk to query
3283
- */
3284
- static void sd_read_block_characteristics (struct scsi_disk * sdkp )
3292
+ /* Query block device characteristics */
3293
+ static void sd_read_block_characteristics (struct scsi_disk * sdkp ,
3294
+ struct queue_limits * lim )
3285
3295
{
3286
3296
struct request_queue * q = sdkp -> disk -> queue ;
3287
3297
struct scsi_vpd * vpd ;
@@ -3307,29 +3317,26 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
3307
3317
3308
3318
#ifdef CONFIG_BLK_DEV_ZONED /* sd_probe rejects ZBD devices early otherwise */
3309
3319
if (sdkp -> device -> type == TYPE_ZBC ) {
3310
- /*
3311
- * Host-managed.
3312
- */
3313
- disk_set_zoned (sdkp -> disk );
3320
+ lim -> zoned = true;
3314
3321
3315
3322
/*
3316
3323
* Per ZBC and ZAC specifications, writes in sequential write
3317
3324
* required zones of host-managed devices must be aligned to
3318
3325
* the device physical block size.
3319
3326
*/
3320
- blk_queue_zone_write_granularity ( q , sdkp -> physical_block_size ) ;
3327
+ lim -> zone_write_granularity = sdkp -> physical_block_size ;
3321
3328
} else {
3322
3329
/*
3323
3330
* Host-aware devices are treated as conventional.
3324
3331
*/
3325
- WARN_ON_ONCE ( blk_queue_is_zoned ( q )) ;
3332
+ lim -> zoned = false ;
3326
3333
}
3327
3334
#endif /* CONFIG_BLK_DEV_ZONED */
3328
3335
3329
3336
if (!sdkp -> first_scan )
3330
3337
return ;
3331
3338
3332
- if (blk_queue_is_zoned ( q ) )
3339
+ if (lim -> zoned )
3333
3340
sd_printk (KERN_NOTICE , sdkp , "Host-managed zoned block device\n" );
3334
3341
else if (sdkp -> zoned == 1 )
3335
3342
sd_printk (KERN_NOTICE , sdkp , "Host-aware SMR disk used as regular disk\n" );
@@ -3605,8 +3612,10 @@ static int sd_revalidate_disk(struct gendisk *disk)
3605
3612
struct scsi_device * sdp = sdkp -> device ;
3606
3613
struct request_queue * q = sdkp -> disk -> queue ;
3607
3614
sector_t old_capacity = sdkp -> capacity ;
3615
+ struct queue_limits lim ;
3608
3616
unsigned char * buffer ;
3609
3617
unsigned int dev_max ;
3618
+ int err ;
3610
3619
3611
3620
SCSI_LOG_HLQUEUE (3 , sd_printk (KERN_INFO , sdkp ,
3612
3621
"sd_revalidate_disk\n" ));
@@ -3627,12 +3636,14 @@ static int sd_revalidate_disk(struct gendisk *disk)
3627
3636
3628
3637
sd_spinup_disk (sdkp );
3629
3638
3639
+ lim = queue_limits_start_update (sdkp -> disk -> queue );
3640
+
3630
3641
/*
3631
3642
* Without media there is no reason to ask; moreover, some devices
3632
3643
* react badly if we do.
3633
3644
*/
3634
3645
if (sdkp -> media_present ) {
3635
- sd_read_capacity (sdkp , buffer );
3646
+ sd_read_capacity (sdkp , & lim , buffer );
3636
3647
/*
3637
3648
* Some USB/UAS devices return generic values for mode pages
3638
3649
* until the media has been accessed. Trigger a READ operation
@@ -3651,10 +3662,10 @@ static int sd_revalidate_disk(struct gendisk *disk)
3651
3662
3652
3663
if (scsi_device_supports_vpd (sdp )) {
3653
3664
sd_read_block_provisioning (sdkp );
3654
- sd_read_block_limits (sdkp );
3665
+ sd_read_block_limits (sdkp , & lim );
3655
3666
sd_read_block_limits_ext (sdkp );
3656
- sd_read_block_characteristics (sdkp );
3657
- sd_zbc_read_zones (sdkp , buffer );
3667
+ sd_read_block_characteristics (sdkp , & lim );
3668
+ sd_zbc_read_zones (sdkp , & lim , buffer );
3658
3669
sd_read_cpr (sdkp );
3659
3670
}
3660
3671
@@ -3680,31 +3691,36 @@ static int sd_revalidate_disk(struct gendisk *disk)
3680
3691
3681
3692
/* Some devices report a maximum block count for READ/WRITE requests. */
3682
3693
dev_max = min_not_zero (dev_max , sdkp -> max_xfer_blocks );
3683
- q -> limits .max_dev_sectors = logical_to_sectors (sdp , dev_max );
3694
+ lim .max_dev_sectors = logical_to_sectors (sdp , dev_max );
3684
3695
3685
3696
if (sd_validate_min_xfer_size (sdkp ))
3686
- blk_queue_io_min (sdkp -> disk -> queue ,
3687
- logical_to_bytes (sdp , sdkp -> min_xfer_blocks ));
3697
+ lim .io_min = logical_to_bytes (sdp , sdkp -> min_xfer_blocks );
3688
3698
else
3689
- blk_queue_io_min ( sdkp -> disk -> queue , 0 ) ;
3699
+ lim . io_min = 0 ;
3690
3700
3691
3701
/*
3692
3702
* Limit default to SCSI host optimal sector limit if set. There may be
3693
3703
* an impact on performance for when the size of a request exceeds this
3694
3704
* host limit.
3695
3705
*/
3696
- q -> limits .io_opt = sdp -> host -> opt_sectors << SECTOR_SHIFT ;
3706
+ lim .io_opt = sdp -> host -> opt_sectors << SECTOR_SHIFT ;
3697
3707
if (sd_validate_opt_xfer_size (sdkp , dev_max )) {
3698
- q -> limits .io_opt = min_not_zero (q -> limits .io_opt ,
3708
+ lim .io_opt = min_not_zero (lim .io_opt ,
3699
3709
logical_to_bytes (sdp , sdkp -> opt_xfer_blocks ));
3700
3710
}
3701
3711
3702
3712
sdkp -> first_scan = 0 ;
3703
3713
3704
3714
set_capacity_and_notify (disk , logical_to_sectors (sdp , sdkp -> capacity ));
3705
- sd_config_write_same (sdkp );
3715
+ sd_config_write_same (sdkp , & lim );
3706
3716
kfree (buffer );
3707
3717
3718
+ blk_mq_freeze_queue (sdkp -> disk -> queue );
3719
+ err = queue_limits_commit_update (sdkp -> disk -> queue , & lim );
3720
+ blk_mq_unfreeze_queue (sdkp -> disk -> queue );
3721
+ if (err )
3722
+ return err ;
3723
+
3708
3724
/*
3709
3725
* For a zoned drive, revalidating the zones can be done only once
3710
3726
* the gendisk capacity is set. So if this fails, set back the gendisk
0 commit comments