@@ -10,23 +10,14 @@ static bool blk_map_iter_next(struct request *req, struct blk_map_iter *iter)
1010 unsigned int max_size ;
1111 struct bio_vec bv ;
1212
13- if (req -> rq_flags & RQF_SPECIAL_PAYLOAD ) {
14- if (!iter -> bio )
15- return false;
16- iter -> paddr = bvec_phys (& req -> special_vec );
17- iter -> len = req -> special_vec .bv_len ;
18- iter -> bio = NULL ;
19- return true;
20- }
21-
2213 if (!iter -> iter .bi_size )
2314 return false;
2415
25- bv = mp_bvec_iter_bvec (iter -> bio -> bi_io_vec , iter -> iter );
16+ bv = mp_bvec_iter_bvec (iter -> bvec , iter -> iter );
2617 iter -> paddr = bvec_phys (& bv );
2718 max_size = get_max_segment_size (& req -> q -> limits , iter -> paddr , UINT_MAX );
2819 bv .bv_len = min (bv .bv_len , max_size );
29- bio_advance_iter_single (iter -> bio , & iter -> iter , bv .bv_len );
20+ bvec_iter_advance_single (iter -> bvec , & iter -> iter , bv .bv_len );
3021
3122 /*
3223 * If we are entirely done with this bi_io_vec entry, check if the next
@@ -37,19 +28,20 @@ static bool blk_map_iter_next(struct request *req, struct blk_map_iter *iter)
3728 struct bio_vec next ;
3829
3930 if (!iter -> iter .bi_size ) {
40- if (!iter -> bio -> bi_next )
31+ if (!iter -> bio || ! iter -> bio -> bi_next )
4132 break ;
4233 iter -> bio = iter -> bio -> bi_next ;
4334 iter -> iter = iter -> bio -> bi_iter ;
35+ iter -> bvec = iter -> bio -> bi_io_vec ;
4436 }
4537
46- next = mp_bvec_iter_bvec (iter -> bio -> bi_io_vec , iter -> iter );
38+ next = mp_bvec_iter_bvec (iter -> bvec , iter -> iter );
4739 if (bv .bv_len + next .bv_len > max_size ||
4840 !biovec_phys_mergeable (req -> q , & bv , & next ))
4941 break ;
5042
5143 bv .bv_len += next .bv_len ;
52- bio_advance_iter_single (iter -> bio , & iter -> iter , next .bv_len );
44+ bvec_iter_advance_single (iter -> bvec , & iter -> iter , next .bv_len );
5345 }
5446
5547 iter -> len = bv .bv_len ;
@@ -119,6 +111,30 @@ static bool blk_rq_dma_map_iova(struct request *req, struct device *dma_dev,
119111 return true;
120112}
121113
114+ static struct blk_map_iter blk_rq_map_iter (struct request * rq )
115+ {
116+ struct bio * bio = rq -> bio ;
117+
118+ if (rq -> rq_flags & RQF_SPECIAL_PAYLOAD ) {
119+ return (struct blk_map_iter ) {
120+ .bvec = & rq -> special_vec ,
121+ .iter = {
122+ .bi_size = rq -> special_vec .bv_len ,
123+ }
124+ };
125+ }
126+
127+ /* the internal flush request may not have bio attached */
128+ if (!bio )
129+ return (struct blk_map_iter ) {};
130+
131+ return (struct blk_map_iter ) {
132+ .bio = bio ,
133+ .bvec = bio -> bi_io_vec ,
134+ .iter = bio -> bi_iter ,
135+ };
136+ }
137+
122138/**
123139 * blk_rq_dma_map_iter_start - map the first DMA segment for a request
124140 * @req: request to map
@@ -146,10 +162,9 @@ bool blk_rq_dma_map_iter_start(struct request *req, struct device *dma_dev,
146162{
147163 unsigned int total_len = blk_rq_payload_bytes (req );
148164
149- iter -> iter .bio = req -> bio ;
150- iter -> iter .iter = req -> bio -> bi_iter ;
151165 memset (& iter -> p2pdma , 0 , sizeof (iter -> p2pdma ));
152166 iter -> status = BLK_STS_OK ;
167+ iter -> iter = blk_rq_map_iter (req );
153168
154169 /*
155170 * Grab the first segment ASAP because we'll need it to check for P2P
@@ -237,16 +252,9 @@ blk_next_sg(struct scatterlist **sg, struct scatterlist *sglist)
237252int __blk_rq_map_sg (struct request * rq , struct scatterlist * sglist ,
238253 struct scatterlist * * last_sg )
239254{
240- struct bio * bio = rq -> bio ;
241- struct blk_map_iter iter = {
242- .bio = bio ,
243- };
255+ struct blk_map_iter iter = blk_rq_map_iter (rq );
244256 int nsegs = 0 ;
245257
246- /* the internal flush request may not have bio attached */
247- if (bio )
248- iter .iter = bio -> bi_iter ;
249-
250258 while (blk_map_iter_next (rq , & iter )) {
251259 * last_sg = blk_next_sg (last_sg , sglist );
252260 sg_set_page (* last_sg , phys_to_page (iter .paddr ), iter .len ,
0 commit comments