@@ -490,18 +490,13 @@ u64 dm_start_time_ns_from_clone(struct bio *bio)
490490}
491491EXPORT_SYMBOL_GPL (dm_start_time_ns_from_clone );
492492
493- static inline bool bio_is_flush_with_data (struct bio * bio )
494- {
495- return ((bio -> bi_opf & REQ_PREFLUSH ) && bio -> bi_iter .bi_size );
496- }
497-
498493static inline unsigned int dm_io_sectors (struct dm_io * io , struct bio * bio )
499494{
500495 /*
501496 * If REQ_PREFLUSH set, don't account payload, it will be
502497 * submitted (and accounted) after this flush completes.
503498 */
504- if (bio_is_flush_with_data ( bio ) )
499+ if (io -> requeue_flush_with_data )
505500 return 0 ;
506501 if (unlikely (dm_io_flagged (io , DM_IO_WAS_SPLIT )))
507502 return io -> sectors ;
@@ -590,6 +585,7 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio, gfp_t g
590585 io = container_of (tio , struct dm_io , tio );
591586 io -> magic = DM_IO_MAGIC ;
592587 io -> status = BLK_STS_OK ;
588+ io -> requeue_flush_with_data = false;
593589
594590 /* one ref is for submission, the other is for completion */
595591 atomic_set (& io -> io_count , 2 );
@@ -948,6 +944,7 @@ static void __dm_io_complete(struct dm_io *io, bool first_stage)
948944 struct mapped_device * md = io -> md ;
949945 blk_status_t io_error ;
950946 bool requeued ;
947+ bool requeue_flush_with_data ;
951948
952949 requeued = dm_handle_requeue (io , first_stage );
953950 if (requeued && first_stage )
@@ -964,6 +961,7 @@ static void __dm_io_complete(struct dm_io *io, bool first_stage)
964961 __dm_start_io_acct (io );
965962 dm_end_io_acct (io );
966963 }
964+ requeue_flush_with_data = io -> requeue_flush_with_data ;
967965 free_io (io );
968966 smp_wmb ();
969967 this_cpu_dec (* md -> pending_io );
@@ -976,7 +974,7 @@ static void __dm_io_complete(struct dm_io *io, bool first_stage)
976974 if (requeued )
977975 return ;
978976
979- if (bio_is_flush_with_data ( bio )) {
977+ if (unlikely ( requeue_flush_with_data )) {
980978 /*
981979 * Preflush done for flush with data, reissue
982980 * without REQ_PREFLUSH.
@@ -1996,12 +1994,30 @@ static void dm_split_and_process_bio(struct mapped_device *md,
19961994 }
19971995 init_clone_info (& ci , io , map , bio , is_abnormal );
19981996
1999- if (bio -> bi_opf & REQ_PREFLUSH ) {
1997+ if (unlikely ((bio -> bi_opf & REQ_PREFLUSH ) != 0 )) {
1998+ /*
1999+ * The "flush_bypasses_map" is set on targets where it is safe
2000+ * to skip the map function and submit bios directly to the
2001+ * underlying block devices - currently, it is set for dm-linear
2002+ * and dm-stripe.
2003+ *
2004+ * If we have just one underlying device (i.e. there is one
2005+ * linear target or multiple linear targets pointing to the same
2006+ * device), we can send the flush with data directly to it.
2007+ */
2008+ if (map -> flush_bypasses_map ) {
2009+ struct list_head * devices = dm_table_get_devices (map );
2010+ if (devices -> next == devices -> prev )
2011+ goto send_preflush_with_data ;
2012+ }
2013+ if (bio -> bi_iter .bi_size )
2014+ io -> requeue_flush_with_data = true;
20002015 __send_empty_flush (& ci );
20012016 /* dm_io_complete submits any data associated with flush */
20022017 goto out ;
20032018 }
20042019
2020+ send_preflush_with_data :
20052021 if (static_branch_unlikely (& zoned_enabled ) &&
20062022 (bio_op (bio ) == REQ_OP_ZONE_RESET_ALL )) {
20072023 error = __send_zone_reset_all (& ci );
0 commit comments