@@ -120,44 +120,16 @@ static bool blk_rq_dma_map_iova(struct request *req, struct device *dma_dev,
120120 return true;
121121}
122122
123- /**
124- * blk_rq_dma_map_iter_start - map the first DMA segment for a request
125- * @req: request to map
126- * @dma_dev: device to map to
127- * @state: DMA IOVA state
128- * @iter: block layer DMA iterator
129- *
130- * Start DMA mapping @req to @dma_dev. @state and @iter are provided by the
131- * caller and don't need to be initialized. @state needs to be stored for use
132- * at unmap time, @iter is only needed at map time.
133- *
134- * Returns %false if there is no segment to map, including due to an error, or
135- * %true ft it did map a segment.
136- *
137- * If a segment was mapped, the DMA address for it is returned in @iter.addr and
138- * the length in @iter.len. If no segment was mapped the status code is
139- * returned in @iter.status.
140- *
141- * The caller can call blk_rq_dma_map_coalesce() to check if further segments
142- * need to be mapped after this, or go straight to blk_rq_dma_map_iter_next()
143- * to try to map the following segments.
144- */
145- bool blk_rq_dma_map_iter_start (struct request * req , struct device * dma_dev ,
146- struct dma_iova_state * state , struct blk_dma_iter * iter )
123+ static bool blk_dma_map_iter_start (struct request * req , struct device * dma_dev ,
124+ struct dma_iova_state * state , struct blk_dma_iter * iter ,
125+ unsigned int total_len )
147126{
148- unsigned int total_len = blk_rq_payload_bytes (req );
149127 struct phys_vec vec ;
150128
151129 iter -> bio = req -> bio ;
152- iter -> iter = req -> bio -> bi_iter ;
153130 memset (& iter -> p2pdma , 0 , sizeof (iter -> p2pdma ));
154131 iter -> status = BLK_STS_OK ;
155132
156- if (req -> rq_flags & RQF_SPECIAL_PAYLOAD )
157- iter -> bvec = & req -> special_vec ;
158- else
159- iter -> bvec = req -> bio -> bi_io_vec ;
160-
161133 /*
162134 * Grab the first segment ASAP because we'll need it to check for P2P
163135 * transfers.
@@ -186,6 +158,41 @@ bool blk_rq_dma_map_iter_start(struct request *req, struct device *dma_dev,
186158 return blk_rq_dma_map_iova (req , dma_dev , state , iter , & vec );
187159 return blk_dma_map_direct (req , dma_dev , iter , & vec );
188160}
161+
162+ /**
163+ * blk_rq_dma_map_iter_start - map the first DMA segment for a request
164+ * @req: request to map
165+ * @dma_dev: device to map to
166+ * @state: DMA IOVA state
167+ * @iter: block layer DMA iterator
168+ *
169+ * Start DMA mapping @req to @dma_dev. @state and @iter are provided by the
170+ * caller and don't need to be initialized. @state needs to be stored for use
171+ * at unmap time, @iter is only needed at map time.
172+ *
173+ * Returns %false if there is no segment to map, including due to an error, or
174+ * %true ft it did map a segment.
175+ *
176+ * If a segment was mapped, the DMA address for it is returned in @iter.addr and
177+ * the length in @iter.len. If no segment was mapped the status code is
178+ * returned in @iter.status.
179+ *
180+ * The caller can call blk_rq_dma_map_coalesce() to check if further segments
181+ * need to be mapped after this, or go straight to blk_rq_dma_map_iter_next()
182+ * to try to map the following segments.
183+ */
184+ bool blk_rq_dma_map_iter_start (struct request * req , struct device * dma_dev ,
185+ struct dma_iova_state * state , struct blk_dma_iter * iter )
186+ {
187+ iter -> iter = req -> bio -> bi_iter ;
188+ if (req -> rq_flags & RQF_SPECIAL_PAYLOAD )
189+ iter -> bvec = & req -> special_vec ;
190+ else
191+ iter -> bvec = req -> bio -> bi_io_vec ;
192+
193+ return blk_dma_map_iter_start (req , dma_dev , state , iter ,
194+ blk_rq_payload_bytes (req ));
195+ }
189196EXPORT_SYMBOL_GPL (blk_rq_dma_map_iter_start );
190197
191198/**
0 commit comments