Skip to content

Commit 06bc4e2

Browse files
committed
Merge tag 'block-6.19-20260205' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux
Pull block fixes from Jens Axboe: - Revert of a change for loop, which caused regressions for some users (Actually revert of two commits, where one is just an existing fix for the offending commit) - NVMe pull via Keith: - Fix NULL pointer access setting up dma mappings - Fix invalid memory access from malformed TCP PDU * tag 'block-6.19-20260205' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux: loop: revert exclusive opener loop status change nvmet-tcp: add bounds checks in nvmet_tcp_build_pdu_iovec nvme-pci: handle changing device dma map requirements
2 parents 92f778a + a6abd64 commit 06bc4e2

3 files changed

Lines changed: 59 additions & 48 deletions

File tree

drivers/block/loop.c

Lines changed: 12 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -1225,28 +1225,16 @@ static int loop_clr_fd(struct loop_device *lo)
12251225
}
12261226

12271227
static int
1228-
loop_set_status(struct loop_device *lo, blk_mode_t mode,
1229-
struct block_device *bdev, const struct loop_info64 *info)
1228+
loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
12301229
{
12311230
int err;
12321231
bool partscan = false;
12331232
bool size_changed = false;
12341233
unsigned int memflags;
12351234

1236-
/*
1237-
* If we don't hold exclusive handle for the device, upgrade to it
1238-
* here to avoid changing device under exclusive owner.
1239-
*/
1240-
if (!(mode & BLK_OPEN_EXCL)) {
1241-
err = bd_prepare_to_claim(bdev, loop_set_status, NULL);
1242-
if (err)
1243-
goto out_reread_partitions;
1244-
}
1245-
12461235
err = mutex_lock_killable(&lo->lo_mutex);
12471236
if (err)
1248-
goto out_abort_claiming;
1249-
1237+
return err;
12501238
if (lo->lo_state != Lo_bound) {
12511239
err = -ENXIO;
12521240
goto out_unlock;
@@ -1285,10 +1273,6 @@ loop_set_status(struct loop_device *lo, blk_mode_t mode,
12851273
}
12861274
out_unlock:
12871275
mutex_unlock(&lo->lo_mutex);
1288-
out_abort_claiming:
1289-
if (!(mode & BLK_OPEN_EXCL))
1290-
bd_abort_claiming(bdev, loop_set_status);
1291-
out_reread_partitions:
12921276
if (partscan)
12931277
loop_reread_partitions(lo);
12941278

@@ -1368,29 +1352,25 @@ loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info)
13681352
}
13691353

13701354
static int
1371-
loop_set_status_old(struct loop_device *lo, blk_mode_t mode,
1372-
struct block_device *bdev,
1373-
const struct loop_info __user *arg)
1355+
loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg)
13741356
{
13751357
struct loop_info info;
13761358
struct loop_info64 info64;
13771359

13781360
if (copy_from_user(&info, arg, sizeof (struct loop_info)))
13791361
return -EFAULT;
13801362
loop_info64_from_old(&info, &info64);
1381-
return loop_set_status(lo, mode, bdev, &info64);
1363+
return loop_set_status(lo, &info64);
13821364
}
13831365

13841366
static int
1385-
loop_set_status64(struct loop_device *lo, blk_mode_t mode,
1386-
struct block_device *bdev,
1387-
const struct loop_info64 __user *arg)
1367+
loop_set_status64(struct loop_device *lo, const struct loop_info64 __user *arg)
13881368
{
13891369
struct loop_info64 info64;
13901370

13911371
if (copy_from_user(&info64, arg, sizeof (struct loop_info64)))
13921372
return -EFAULT;
1393-
return loop_set_status(lo, mode, bdev, &info64);
1373+
return loop_set_status(lo, &info64);
13941374
}
13951375

13961376
static int
@@ -1569,14 +1549,14 @@ static int lo_ioctl(struct block_device *bdev, blk_mode_t mode,
15691549
case LOOP_SET_STATUS:
15701550
err = -EPERM;
15711551
if ((mode & BLK_OPEN_WRITE) || capable(CAP_SYS_ADMIN))
1572-
err = loop_set_status_old(lo, mode, bdev, argp);
1552+
err = loop_set_status_old(lo, argp);
15731553
break;
15741554
case LOOP_GET_STATUS:
15751555
return loop_get_status_old(lo, argp);
15761556
case LOOP_SET_STATUS64:
15771557
err = -EPERM;
15781558
if ((mode & BLK_OPEN_WRITE) || capable(CAP_SYS_ADMIN))
1579-
err = loop_set_status64(lo, mode, bdev, argp);
1559+
err = loop_set_status64(lo, argp);
15801560
break;
15811561
case LOOP_GET_STATUS64:
15821562
return loop_get_status64(lo, argp);
@@ -1670,17 +1650,16 @@ loop_info64_to_compat(const struct loop_info64 *info64,
16701650
}
16711651

16721652
static int
1673-
loop_set_status_compat(struct loop_device *lo, blk_mode_t mode,
1674-
struct block_device *bdev,
1675-
const struct compat_loop_info __user *arg)
1653+
loop_set_status_compat(struct loop_device *lo,
1654+
const struct compat_loop_info __user *arg)
16761655
{
16771656
struct loop_info64 info64;
16781657
int ret;
16791658

16801659
ret = loop_info64_from_compat(arg, &info64);
16811660
if (ret < 0)
16821661
return ret;
1683-
return loop_set_status(lo, mode, bdev, &info64);
1662+
return loop_set_status(lo, &info64);
16841663
}
16851664

16861665
static int
@@ -1706,7 +1685,7 @@ static int lo_compat_ioctl(struct block_device *bdev, blk_mode_t mode,
17061685

17071686
switch(cmd) {
17081687
case LOOP_SET_STATUS:
1709-
err = loop_set_status_compat(lo, mode, bdev,
1688+
err = loop_set_status_compat(lo,
17101689
(const struct compat_loop_info __user *)arg);
17111690
break;
17121691
case LOOP_GET_STATUS:

drivers/nvme/host/pci.c

Lines changed: 30 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -816,6 +816,32 @@ static void nvme_unmap_data(struct request *req)
816816
nvme_free_descriptors(req);
817817
}
818818

819+
static bool nvme_pci_prp_save_mapping(struct request *req,
820+
struct device *dma_dev,
821+
struct blk_dma_iter *iter)
822+
{
823+
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
824+
825+
if (dma_use_iova(&iod->dma_state) || !dma_need_unmap(dma_dev))
826+
return true;
827+
828+
if (!iod->nr_dma_vecs) {
829+
struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
830+
831+
iod->dma_vecs = mempool_alloc(nvmeq->dev->dmavec_mempool,
832+
GFP_ATOMIC);
833+
if (!iod->dma_vecs) {
834+
iter->status = BLK_STS_RESOURCE;
835+
return false;
836+
}
837+
}
838+
839+
iod->dma_vecs[iod->nr_dma_vecs].addr = iter->addr;
840+
iod->dma_vecs[iod->nr_dma_vecs].len = iter->len;
841+
iod->nr_dma_vecs++;
842+
return true;
843+
}
844+
819845
static bool nvme_pci_prp_iter_next(struct request *req, struct device *dma_dev,
820846
struct blk_dma_iter *iter)
821847
{
@@ -825,12 +851,7 @@ static bool nvme_pci_prp_iter_next(struct request *req, struct device *dma_dev,
825851
return true;
826852
if (!blk_rq_dma_map_iter_next(req, dma_dev, &iod->dma_state, iter))
827853
return false;
828-
if (!dma_use_iova(&iod->dma_state) && dma_need_unmap(dma_dev)) {
829-
iod->dma_vecs[iod->nr_dma_vecs].addr = iter->addr;
830-
iod->dma_vecs[iod->nr_dma_vecs].len = iter->len;
831-
iod->nr_dma_vecs++;
832-
}
833-
return true;
854+
return nvme_pci_prp_save_mapping(req, dma_dev, iter);
834855
}
835856

836857
static blk_status_t nvme_pci_setup_data_prp(struct request *req,
@@ -843,15 +864,8 @@ static blk_status_t nvme_pci_setup_data_prp(struct request *req,
843864
unsigned int prp_len, i;
844865
__le64 *prp_list;
845866

846-
if (!dma_use_iova(&iod->dma_state) && dma_need_unmap(nvmeq->dev->dev)) {
847-
iod->dma_vecs = mempool_alloc(nvmeq->dev->dmavec_mempool,
848-
GFP_ATOMIC);
849-
if (!iod->dma_vecs)
850-
return BLK_STS_RESOURCE;
851-
iod->dma_vecs[0].addr = iter->addr;
852-
iod->dma_vecs[0].len = iter->len;
853-
iod->nr_dma_vecs = 1;
854-
}
867+
if (!nvme_pci_prp_save_mapping(req, nvmeq->dev->dev, iter))
868+
return iter->status;
855869

856870
/*
857871
* PRP1 always points to the start of the DMA transfers.
@@ -1219,6 +1233,7 @@ static blk_status_t nvme_prep_rq(struct request *req)
12191233
iod->nr_descriptors = 0;
12201234
iod->total_len = 0;
12211235
iod->meta_total_len = 0;
1236+
iod->nr_dma_vecs = 0;
12221237

12231238
ret = nvme_setup_cmd(req->q->queuedata, req);
12241239
if (ret)

drivers/nvme/target/tcp.c

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -349,28 +349,45 @@ static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd)
349349
cmd->req.sg = NULL;
350350
}
351351

352+
static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue);
353+
352354
static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
353355
{
354356
struct bio_vec *iov = cmd->iov;
355357
struct scatterlist *sg;
356358
u32 length, offset, sg_offset;
359+
unsigned int sg_remaining;
357360
int nr_pages;
358361

359362
length = cmd->pdu_len;
360363
nr_pages = DIV_ROUND_UP(length, PAGE_SIZE);
361364
offset = cmd->rbytes_done;
362365
cmd->sg_idx = offset / PAGE_SIZE;
363366
sg_offset = offset % PAGE_SIZE;
367+
if (!cmd->req.sg_cnt || cmd->sg_idx >= cmd->req.sg_cnt) {
368+
nvmet_tcp_fatal_error(cmd->queue);
369+
return;
370+
}
364371
sg = &cmd->req.sg[cmd->sg_idx];
372+
sg_remaining = cmd->req.sg_cnt - cmd->sg_idx;
365373

366374
while (length) {
375+
if (!sg_remaining) {
376+
nvmet_tcp_fatal_error(cmd->queue);
377+
return;
378+
}
379+
if (!sg->length || sg->length <= sg_offset) {
380+
nvmet_tcp_fatal_error(cmd->queue);
381+
return;
382+
}
367383
u32 iov_len = min_t(u32, length, sg->length - sg_offset);
368384

369385
bvec_set_page(iov, sg_page(sg), iov_len,
370386
sg->offset + sg_offset);
371387

372388
length -= iov_len;
373389
sg = sg_next(sg);
390+
sg_remaining--;
374391
iov++;
375392
sg_offset = 0;
376393
}

0 commit comments

Comments
 (0)