@@ -302,6 +302,12 @@ static unsigned int bio_split_alignment(struct bio *bio,
302302 return lim -> logical_block_size ;
303303}
304304
305+ static inline unsigned int bvec_seg_gap (struct bio_vec * bvprv ,
306+ struct bio_vec * bv )
307+ {
308+ return bv -> bv_offset | (bvprv -> bv_offset + bvprv -> bv_len );
309+ }
310+
305311/**
306312 * bio_split_io_at - check if and where to split a bio
307313 * @bio: [in] bio to be split
@@ -319,8 +325,8 @@ int bio_split_io_at(struct bio *bio, const struct queue_limits *lim,
319325 unsigned * segs , unsigned max_bytes , unsigned len_align_mask )
320326{
321327 struct bio_vec bv , bvprv , * bvprvp = NULL ;
328+ unsigned nsegs = 0 , bytes = 0 , gaps = 0 ;
322329 struct bvec_iter iter ;
323- unsigned nsegs = 0 , bytes = 0 ;
324330
325331 bio_for_each_bvec (bv , bio , iter ) {
326332 if (bv .bv_offset & lim -> dma_alignment ||
@@ -331,8 +337,11 @@ int bio_split_io_at(struct bio *bio, const struct queue_limits *lim,
331337 * If the queue doesn't support SG gaps and adding this
332338 * offset would create a gap, disallow it.
333339 */
334- if (bvprvp && bvec_gap_to_prev (lim , bvprvp , bv .bv_offset ))
335- goto split ;
340+ if (bvprvp ) {
341+ if (bvec_gap_to_prev (lim , bvprvp , bv .bv_offset ))
342+ goto split ;
343+ gaps |= bvec_seg_gap (bvprvp , & bv );
344+ }
336345
337346 if (nsegs < lim -> max_segments &&
338347 bytes + bv .bv_len <= max_bytes &&
@@ -350,6 +359,7 @@ int bio_split_io_at(struct bio *bio, const struct queue_limits *lim,
350359 }
351360
352361 * segs = nsegs ;
362+ bio -> bi_bvec_gap_bit = ffs (gaps );
353363 return 0 ;
354364split :
355365 if (bio -> bi_opf & REQ_ATOMIC )
@@ -385,6 +395,7 @@ int bio_split_io_at(struct bio *bio, const struct queue_limits *lim,
385395 * big IO can be trival, disable iopoll when split needed.
386396 */
387397 bio_clear_polled (bio );
398+ bio -> bi_bvec_gap_bit = ffs (gaps );
388399 return bytes >> SECTOR_SHIFT ;
389400}
390401EXPORT_SYMBOL_GPL (bio_split_io_at );
@@ -721,6 +732,21 @@ static bool blk_atomic_write_mergeable_rqs(struct request *rq,
721732 return (rq -> cmd_flags & REQ_ATOMIC ) == (next -> cmd_flags & REQ_ATOMIC );
722733}
723734
735+ u8 bio_seg_gap (struct request_queue * q , struct bio * prev , struct bio * next ,
736+ u8 gaps_bit )
737+ {
738+ struct bio_vec pb , nb ;
739+
740+ gaps_bit = min_not_zero (gaps_bit , prev -> bi_bvec_gap_bit );
741+ gaps_bit = min_not_zero (gaps_bit , next -> bi_bvec_gap_bit );
742+
743+ bio_get_last_bvec (prev , & pb );
744+ bio_get_first_bvec (next , & nb );
745+ if (!biovec_phys_mergeable (q , & pb , & nb ))
746+ gaps_bit = min_not_zero (gaps_bit , ffs (bvec_seg_gap (& pb , & nb )));
747+ return gaps_bit ;
748+ }
749+
724750/*
725751 * For non-mq, this has to be called with the request spinlock acquired.
726752 * For mq with scheduling, the appropriate queue wide lock should be held.
@@ -785,6 +811,9 @@ static struct request *attempt_merge(struct request_queue *q,
785811 if (next -> start_time_ns < req -> start_time_ns )
786812 req -> start_time_ns = next -> start_time_ns ;
787813
814+ req -> phys_gap_bit = bio_seg_gap (req -> q , req -> biotail , next -> bio ,
815+ min_not_zero (next -> phys_gap_bit ,
816+ req -> phys_gap_bit ));
788817 req -> biotail -> bi_next = next -> bio ;
789818 req -> biotail = next -> biotail ;
790819
@@ -908,6 +937,8 @@ enum bio_merge_status bio_attempt_back_merge(struct request *req,
908937 if (req -> rq_flags & RQF_ZONE_WRITE_PLUGGING )
909938 blk_zone_write_plug_bio_merged (bio );
910939
940+ req -> phys_gap_bit = bio_seg_gap (req -> q , req -> biotail , bio ,
941+ req -> phys_gap_bit );
911942 req -> biotail -> bi_next = bio ;
912943 req -> biotail = bio ;
913944 req -> __data_len += bio -> bi_iter .bi_size ;
@@ -942,6 +973,8 @@ static enum bio_merge_status bio_attempt_front_merge(struct request *req,
942973
943974 blk_update_mixed_merge (req , bio , true);
944975
976+ req -> phys_gap_bit = bio_seg_gap (req -> q , bio , req -> bio ,
977+ req -> phys_gap_bit );
945978 bio -> bi_next = req -> bio ;
946979 req -> bio = bio ;
947980
0 commit comments