Skip to content

Commit 071be3b

Browse files
committed
nvme-pci: handle changing device dma map requirements
The initial state of dma_needs_unmap may be false, but change to true while mapping the data iterator. Enabling swiotlb is one such case that can change the result. The nvme driver needs to save the mapped dma vectors to be unmapped later, so allocate as needed during iteration rather than assume it was always allocated at the beginning. This fixes a NULL dereference from accessing an uninitialized dma_vecs when the device dma unmapping requirements change mid-iteration. Fixes: b8b7570 ("nvme-pci: fix dma unmapping when using PRPs and not using the IOVA mapping") Link: https://lore.kernel.org/linux-nvme/[email protected]/ Reported-by: Pradeep P V K <[email protected]> Reviewed-by: Christoph Hellwig <[email protected]> Signed-off-by: Keith Busch <[email protected]>
1 parent 4da7c5c commit 071be3b

1 file changed

Lines changed: 30 additions & 15 deletions

File tree

drivers/nvme/host/pci.c

Lines changed: 30 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -816,6 +816,32 @@ static void nvme_unmap_data(struct request *req)
816816
nvme_free_descriptors(req);
817817
}
818818

819+
static bool nvme_pci_prp_save_mapping(struct request *req,
820+
struct device *dma_dev,
821+
struct blk_dma_iter *iter)
822+
{
823+
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
824+
825+
if (dma_use_iova(&iod->dma_state) || !dma_need_unmap(dma_dev))
826+
return true;
827+
828+
if (!iod->nr_dma_vecs) {
829+
struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
830+
831+
iod->dma_vecs = mempool_alloc(nvmeq->dev->dmavec_mempool,
832+
GFP_ATOMIC);
833+
if (!iod->dma_vecs) {
834+
iter->status = BLK_STS_RESOURCE;
835+
return false;
836+
}
837+
}
838+
839+
iod->dma_vecs[iod->nr_dma_vecs].addr = iter->addr;
840+
iod->dma_vecs[iod->nr_dma_vecs].len = iter->len;
841+
iod->nr_dma_vecs++;
842+
return true;
843+
}
844+
819845
static bool nvme_pci_prp_iter_next(struct request *req, struct device *dma_dev,
820846
struct blk_dma_iter *iter)
821847
{
@@ -825,12 +851,7 @@ static bool nvme_pci_prp_iter_next(struct request *req, struct device *dma_dev,
825851
return true;
826852
if (!blk_rq_dma_map_iter_next(req, dma_dev, &iod->dma_state, iter))
827853
return false;
828-
if (!dma_use_iova(&iod->dma_state) && dma_need_unmap(dma_dev)) {
829-
iod->dma_vecs[iod->nr_dma_vecs].addr = iter->addr;
830-
iod->dma_vecs[iod->nr_dma_vecs].len = iter->len;
831-
iod->nr_dma_vecs++;
832-
}
833-
return true;
854+
return nvme_pci_prp_save_mapping(req, dma_dev, iter);
834855
}
835856

836857
static blk_status_t nvme_pci_setup_data_prp(struct request *req,
@@ -843,15 +864,8 @@ static blk_status_t nvme_pci_setup_data_prp(struct request *req,
843864
unsigned int prp_len, i;
844865
__le64 *prp_list;
845866

846-
if (!dma_use_iova(&iod->dma_state) && dma_need_unmap(nvmeq->dev->dev)) {
847-
iod->dma_vecs = mempool_alloc(nvmeq->dev->dmavec_mempool,
848-
GFP_ATOMIC);
849-
if (!iod->dma_vecs)
850-
return BLK_STS_RESOURCE;
851-
iod->dma_vecs[0].addr = iter->addr;
852-
iod->dma_vecs[0].len = iter->len;
853-
iod->nr_dma_vecs = 1;
854-
}
867+
if (!nvme_pci_prp_save_mapping(req, nvmeq->dev->dev, iter))
868+
return iter->status;
855869

856870
/*
857871
* PRP1 always points to the start of the DMA transfers.
@@ -1219,6 +1233,7 @@ static blk_status_t nvme_prep_rq(struct request *req)
12191233
iod->nr_descriptors = 0;
12201234
iod->total_len = 0;
12211235
iod->meta_total_len = 0;
1236+
iod->nr_dma_vecs = 0;
12221237

12231238
ret = nvme_setup_cmd(req->q->queuedata, req);
12241239
if (ret)

0 commit comments

Comments
 (0)