@@ -94,7 +94,7 @@ static inline struct dm_target_io *clone_to_tio(struct bio *clone)
9494
9595void * dm_per_bio_data (struct bio * bio , size_t data_size )
9696{
97- if (!clone_to_tio (bio )-> inside_dm_io )
97+ if (!dm_tio_flagged ( clone_to_tio (bio ), DM_TIO_INSIDE_DM_IO ) )
9898 return (char * )bio - DM_TARGET_IO_BIO_OFFSET - data_size ;
9999 return (char * )bio - DM_IO_BIO_OFFSET - data_size ;
100100}
@@ -538,17 +538,18 @@ static void dm_start_io_acct(struct dm_io *io, struct bio *clone)
538538
539539 /*
540540 * Ensure IO accounting is only ever started once.
541- * Expect no possibility for race unless is_duplicate_bio .
541+ * Expect no possibility for race unless DM_TIO_IS_DUPLICATE_BIO .
542542 */
543- if (!clone || likely (!clone_to_tio (clone )-> is_duplicate_bio )) {
543+ if (!clone ||
544+ likely (!dm_tio_flagged (clone_to_tio (clone ), DM_TIO_IS_DUPLICATE_BIO ))) {
544545 if (WARN_ON_ONCE (dm_io_flagged (io , DM_IO_ACCOUNTED )))
545546 return ;
546547 dm_io_set_flag (io , DM_IO_ACCOUNTED );
547548 } else {
548549 unsigned long flags ;
549550 if (dm_io_flagged (io , DM_IO_ACCOUNTED ))
550551 return ;
551- /* Can afford locking given is_duplicate_bio */
552+ /* Can afford locking given DM_TIO_IS_DUPLICATE_BIO */
552553 spin_lock_irqsave (& io -> startio_lock , flags );
553554 dm_io_set_flag (io , DM_IO_ACCOUNTED );
554555 spin_unlock_irqrestore (& io -> startio_lock , flags );
@@ -571,7 +572,8 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
571572 clone = bio_alloc_clone (bio -> bi_bdev , bio , GFP_NOIO , & md -> io_bs );
572573
573574 tio = clone_to_tio (clone );
574- tio -> inside_dm_io = true;
575+ tio -> flags = 0 ;
576+ dm_tio_set_flag (tio , DM_TIO_INSIDE_DM_IO );
575577 tio -> io = NULL ;
576578
577579 io = container_of (tio , struct dm_io , tio );
@@ -618,14 +620,13 @@ static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti,
618620 clone -> bi_opf &= ~REQ_DM_POLL_LIST ;
619621
620622 tio = clone_to_tio (clone );
621- tio -> inside_dm_io = false;
623+ tio -> flags = 0 ; /* also clears DM_TIO_INSIDE_DM_IO */
622624 }
623625
624626 tio -> magic = DM_TIO_MAGIC ;
625627 tio -> io = ci -> io ;
626628 tio -> ti = ti ;
627629 tio -> target_bio_nr = target_bio_nr ;
628- tio -> is_duplicate_bio = false;
629630 tio -> len_ptr = len ;
630631 tio -> old_sector = 0 ;
631632
@@ -640,7 +641,7 @@ static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti,
640641
641642static void free_tio (struct bio * clone )
642643{
643- if (clone_to_tio (clone )-> inside_dm_io )
644+ if (dm_tio_flagged ( clone_to_tio (clone ), DM_TIO_INSIDE_DM_IO ) )
644645 return ;
645646 bio_put (clone );
646647}
@@ -917,6 +918,12 @@ static void dm_io_complete(struct dm_io *io)
917918 }
918919}
919920
921+ static inline bool dm_tio_is_normal (struct dm_target_io * tio )
922+ {
923+ return (dm_tio_flagged (tio , DM_TIO_INSIDE_DM_IO ) &&
924+ !dm_tio_flagged (tio , DM_TIO_IS_DUPLICATE_BIO ));
925+ }
926+
920927/*
921928 * Decrements the number of outstanding ios that a bio has been
922929 * cloned into, completing the original io if necc.
@@ -1180,7 +1187,7 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
11801187 struct dm_target_io * tio = clone_to_tio (bio );
11811188 unsigned bi_size = bio -> bi_iter .bi_size >> SECTOR_SHIFT ;
11821189
1183- BUG_ON (tio -> is_duplicate_bio );
1190+ BUG_ON (dm_tio_flagged ( tio , DM_TIO_IS_DUPLICATE_BIO ) );
11841191 BUG_ON (op_is_zone_mgmt (bio_op (bio )));
11851192 BUG_ON (bio_op (bio ) == REQ_OP_ZONE_APPEND );
11861193 BUG_ON (bi_size > * tio -> len_ptr );
@@ -1362,13 +1369,13 @@ static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
13621369 break ;
13631370 case 1 :
13641371 clone = alloc_tio (ci , ti , 0 , len , GFP_NOIO );
1365- clone_to_tio (clone )-> is_duplicate_bio = true ;
1372+ dm_tio_set_flag ( clone_to_tio (clone ), DM_TIO_IS_DUPLICATE_BIO ) ;
13661373 __map_bio (clone );
13671374 break ;
13681375 default :
13691376 alloc_multiple_bios (& blist , ci , ti , num_bios , len );
13701377 while ((clone = bio_list_pop (& blist ))) {
1371- clone_to_tio (clone )-> is_duplicate_bio = true ;
1378+ dm_tio_set_flag ( clone_to_tio (clone ), DM_TIO_IS_DUPLICATE_BIO ) ;
13721379 __map_bio (clone );
13731380 }
13741381 break ;
@@ -1648,7 +1655,7 @@ static void dm_submit_bio(struct bio *bio)
16481655static bool dm_poll_dm_io (struct dm_io * io , struct io_comp_batch * iob ,
16491656 unsigned int flags )
16501657{
1651- WARN_ON_ONCE (!io -> tio . inside_dm_io );
1658+ WARN_ON_ONCE (!dm_tio_is_normal ( & io -> tio ) );
16521659
16531660 /* don't poll if the mapped io is done */
16541661 if (atomic_read (& io -> io_count ) > 1 )
0 commit comments