Skip to content

Commit 168ad18

Browse files
keithbuschkawasaki
authored andcommitted
blk-mq-dma: introduce blk_map_iter
Create a type that fully captures the lower level physical address iteration. Signed-off-by: Keith Busch <[email protected]>
1 parent 36a8aec commit 168ad18

2 files changed

Lines changed: 44 additions & 46 deletions

File tree

block/blk-mq-dma.c

Lines changed: 36 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -5,22 +5,16 @@
55
#include <linux/blk-mq-dma.h>
66
#include "blk.h"
77

8-
struct phys_vec {
9-
phys_addr_t paddr;
10-
u32 len;
11-
};
12-
13-
static bool blk_map_iter_next(struct request *req, struct req_iterator *iter,
14-
struct phys_vec *vec)
8+
static bool blk_map_iter_next(struct request *req, struct blk_map_iter *iter)
159
{
1610
unsigned int max_size;
1711
struct bio_vec bv;
1812

1913
if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
2014
if (!iter->bio)
2115
return false;
22-
vec->paddr = bvec_phys(&req->special_vec);
23-
vec->len = req->special_vec.bv_len;
16+
iter->paddr = bvec_phys(&req->special_vec);
17+
iter->len = req->special_vec.bv_len;
2418
iter->bio = NULL;
2519
return true;
2620
}
@@ -29,8 +23,8 @@ static bool blk_map_iter_next(struct request *req, struct req_iterator *iter,
2923
return false;
3024

3125
bv = mp_bvec_iter_bvec(iter->bio->bi_io_vec, iter->iter);
32-
vec->paddr = bvec_phys(&bv);
33-
max_size = get_max_segment_size(&req->q->limits, vec->paddr, UINT_MAX);
26+
iter->paddr = bvec_phys(&bv);
27+
max_size = get_max_segment_size(&req->q->limits, iter->paddr, UINT_MAX);
3428
bv.bv_len = min(bv.bv_len, max_size);
3529
bio_advance_iter_single(iter->bio, &iter->iter, bv.bv_len);
3630

@@ -58,7 +52,7 @@ static bool blk_map_iter_next(struct request *req, struct req_iterator *iter,
5852
bio_advance_iter_single(iter->bio, &iter->iter, next.bv_len);
5953
}
6054

61-
vec->len = bv.bv_len;
55+
iter->len = bv.bv_len;
6256
return true;
6357
}
6458

@@ -77,29 +71,29 @@ static inline bool blk_can_dma_map_iova(struct request *req,
7771
dma_get_merge_boundary(dma_dev));
7872
}
7973

80-
static bool blk_dma_map_bus(struct blk_dma_iter *iter, struct phys_vec *vec)
74+
static bool blk_dma_map_bus(struct blk_dma_iter *iter)
8175
{
82-
iter->addr = pci_p2pdma_bus_addr_map(&iter->p2pdma, vec->paddr);
83-
iter->len = vec->len;
76+
iter->addr = pci_p2pdma_bus_addr_map(&iter->p2pdma, iter->iter.paddr);
77+
iter->len = iter->iter.len;
8478
return true;
8579
}
8680

8781
static bool blk_dma_map_direct(struct request *req, struct device *dma_dev,
88-
struct blk_dma_iter *iter, struct phys_vec *vec)
82+
struct blk_dma_iter *iter)
8983
{
90-
iter->addr = dma_map_page(dma_dev, phys_to_page(vec->paddr),
91-
offset_in_page(vec->paddr), vec->len, rq_dma_dir(req));
84+
iter->addr = dma_map_page(dma_dev, phys_to_page(iter->iter.paddr),
85+
offset_in_page(iter->iter.paddr), iter->iter.len,
86+
rq_dma_dir(req));
9287
if (dma_mapping_error(dma_dev, iter->addr)) {
9388
iter->status = BLK_STS_RESOURCE;
9489
return false;
9590
}
96-
iter->len = vec->len;
91+
iter->len = iter->iter.len;
9792
return true;
9893
}
9994

10095
static bool blk_rq_dma_map_iova(struct request *req, struct device *dma_dev,
101-
struct dma_iova_state *state, struct blk_dma_iter *iter,
102-
struct phys_vec *vec)
96+
struct dma_iova_state *state, struct blk_dma_iter *iter)
10397
{
10498
enum dma_data_direction dir = rq_dma_dir(req);
10599
unsigned int mapped = 0;
@@ -109,12 +103,12 @@ static bool blk_rq_dma_map_iova(struct request *req, struct device *dma_dev,
109103
iter->len = dma_iova_size(state);
110104

111105
do {
112-
error = dma_iova_link(dma_dev, state, vec->paddr, mapped,
113-
vec->len, dir, 0);
106+
error = dma_iova_link(dma_dev, state, iter->iter.paddr, mapped,
107+
iter->iter.len, dir, 0);
114108
if (error)
115109
break;
116-
mapped += vec->len;
117-
} while (blk_map_iter_next(req, &iter->iter, vec));
110+
mapped += iter->iter.len;
111+
} while (blk_map_iter_next(req, &iter->iter));
118112

119113
error = dma_iova_sync(dma_dev, state, 0, mapped);
120114
if (error) {
@@ -151,7 +145,6 @@ bool blk_rq_dma_map_iter_start(struct request *req, struct device *dma_dev,
151145
struct dma_iova_state *state, struct blk_dma_iter *iter)
152146
{
153147
unsigned int total_len = blk_rq_payload_bytes(req);
154-
struct phys_vec vec;
155148

156149
iter->iter.bio = req->bio;
157150
iter->iter.iter = req->bio->bi_iter;
@@ -162,14 +155,14 @@ bool blk_rq_dma_map_iter_start(struct request *req, struct device *dma_dev,
162155
* Grab the first segment ASAP because we'll need it to check for P2P
163156
* transfers.
164157
*/
165-
if (!blk_map_iter_next(req, &iter->iter, &vec))
158+
if (!blk_map_iter_next(req, &iter->iter))
166159
return false;
167160

168161
if (IS_ENABLED(CONFIG_PCI_P2PDMA) && (req->cmd_flags & REQ_P2PDMA)) {
169162
switch (pci_p2pdma_state(&iter->p2pdma, dma_dev,
170-
phys_to_page(vec.paddr))) {
163+
phys_to_page(iter->iter.paddr))) {
171164
case PCI_P2PDMA_MAP_BUS_ADDR:
172-
return blk_dma_map_bus(iter, &vec);
165+
return blk_dma_map_bus(iter);
173166
case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
174167
/*
175168
* P2P transfers through the host bridge are treated the
@@ -184,9 +177,9 @@ bool blk_rq_dma_map_iter_start(struct request *req, struct device *dma_dev,
184177
}
185178

186179
if (blk_can_dma_map_iova(req, dma_dev) &&
187-
dma_iova_try_alloc(dma_dev, state, vec.paddr, total_len))
188-
return blk_rq_dma_map_iova(req, dma_dev, state, iter, &vec);
189-
return blk_dma_map_direct(req, dma_dev, iter, &vec);
180+
dma_iova_try_alloc(dma_dev, state, iter->iter.paddr, total_len))
181+
return blk_rq_dma_map_iova(req, dma_dev, state, iter);
182+
return blk_dma_map_direct(req, dma_dev, iter);
190183
}
191184
EXPORT_SYMBOL_GPL(blk_rq_dma_map_iter_start);
192185

@@ -211,14 +204,12 @@ EXPORT_SYMBOL_GPL(blk_rq_dma_map_iter_start);
211204
bool blk_rq_dma_map_iter_next(struct request *req, struct device *dma_dev,
212205
struct dma_iova_state *state, struct blk_dma_iter *iter)
213206
{
214-
struct phys_vec vec;
215-
216-
if (!blk_map_iter_next(req, &iter->iter, &vec))
207+
if (!blk_map_iter_next(req, &iter->iter))
217208
return false;
218209

219210
if (iter->p2pdma.map == PCI_P2PDMA_MAP_BUS_ADDR)
220-
return blk_dma_map_bus(iter, &vec);
221-
return blk_dma_map_direct(req, dma_dev, iter, &vec);
211+
return blk_dma_map_bus(iter);
212+
return blk_dma_map_direct(req, dma_dev, iter);
222213
}
223214
EXPORT_SYMBOL_GPL(blk_rq_dma_map_iter_next);
224215

@@ -246,20 +237,20 @@ blk_next_sg(struct scatterlist **sg, struct scatterlist *sglist)
246237
int __blk_rq_map_sg(struct request *rq, struct scatterlist *sglist,
247238
struct scatterlist **last_sg)
248239
{
249-
struct req_iterator iter = {
250-
.bio = rq->bio,
240+
struct bio *bio = rq->bio;
241+
struct blk_map_iter iter = {
242+
.bio = bio,
251243
};
252-
struct phys_vec vec;
253244
int nsegs = 0;
254245

255246
/* the internal flush request may not have bio attached */
256-
if (iter.bio)
257-
iter.iter = iter.bio->bi_iter;
247+
if (bio)
248+
iter.iter = bio->bi_iter;
258249

259-
while (blk_map_iter_next(rq, &iter, &vec)) {
250+
while (blk_map_iter_next(rq, &iter)) {
260251
*last_sg = blk_next_sg(last_sg, sglist);
261-
sg_set_page(*last_sg, phys_to_page(vec.paddr), vec.len,
262-
offset_in_page(vec.paddr));
252+
sg_set_page(*last_sg, phys_to_page(iter.paddr), iter.len,
253+
offset_in_page(iter.paddr));
263254
nsegs++;
264255
}
265256

include/linux/blk-mq-dma.h

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,13 @@
55
#include <linux/blk-mq.h>
66
#include <linux/pci-p2pdma.h>
77

8+
struct blk_map_iter {
9+
phys_addr_t paddr;
10+
u32 len;
11+
struct bvec_iter iter;
12+
struct bio *bio;
13+
};
14+
815
struct blk_dma_iter {
916
/* Output address range for this iteration */
1017
dma_addr_t addr;
@@ -14,7 +21,7 @@ struct blk_dma_iter {
1421
blk_status_t status;
1522

1623
/* Internal to blk_rq_dma_map_iter_* */
17-
struct req_iterator iter;
24+
struct blk_map_iter iter;
1825
struct pci_p2pdma_map_state p2pdma;
1926
};
2027

0 commit comments

Comments
 (0)