Skip to content

Commit 32053a2

Browse files
nj-shettykawasaki
authored andcommitted
null_blk: Add support for REQ_OP_COPY_*
Implementation is based on existing read and write infrastructure. copy_max_bytes: A new configfs and module parameter is introduced, which can be used to set hardware/driver supported maximum copy limit. Only request based queue mode will support for copy offload. Added tracefs support to copy IO tracing. Suggested-by: Damien Le Moal <[email protected]> Signed-off-by: Anuj Gupta <[email protected]> Signed-off-by: Nitesh Shetty <[email protected]> Signed-off-by: Vincent Fu <[email protected]> [ bvanassche: Split nullb_do_copy() into two functions. Added a cond_resched() call inside nullb_do_copy(). ] Signed-off-by: Bart Van Assche <[email protected]>
1 parent b2b44c1 commit 32053a2

3 files changed

Lines changed: 118 additions & 0 deletions

File tree

Documentation/block/null_blk.rst

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -149,3 +149,7 @@ zone_size=[MB]: Default: 256
149149
zone_nr_conv=[nr_conv]: Default: 0
150150
The number of conventional zones to create when block device is zoned. If
151151
zone_nr_conv >= nr_zones, it will be reduced to nr_zones - 1.
152+
153+
max_copy_bytes=[size in bytes]: Default: UINT_MAX
154+
A module and configfs parameter which can be used to set hardware/driver
155+
supported maximum copy offload limit.

drivers/block/null_blk/main.c

Lines changed: 113 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
#include <linux/sched.h>
1010
#include <linux/fs.h>
1111
#include <linux/init.h>
12+
#include <linux/blk-copy.h>
1213
#include "null_blk.h"
1314

1415
#undef pr_fmt
@@ -169,6 +170,10 @@ static int g_max_sectors;
169170
module_param_named(max_sectors, g_max_sectors, int, 0444);
170171
MODULE_PARM_DESC(max_sectors, "Maximum size of a command (in 512B sectors)");
171172

173+
static unsigned long g_max_copy_bytes = UINT_MAX;
174+
module_param_named(max_copy_bytes, g_max_copy_bytes, ulong, 0444);
175+
MODULE_PARM_DESC(max_copy_bytes, "Maximum size of a copy command (in bytes)");
176+
172177
static unsigned int nr_devices = 1;
173178
module_param(nr_devices, uint, 0444);
174179
MODULE_PARM_DESC(nr_devices, "Number of devices to register");
@@ -450,6 +455,7 @@ NULLB_DEVICE_ATTR(home_node, uint, NULL);
450455
NULLB_DEVICE_ATTR(queue_mode, uint, NULL);
451456
NULLB_DEVICE_ATTR(blocksize, uint, NULL);
452457
NULLB_DEVICE_ATTR(max_sectors, uint, NULL);
458+
NULLB_DEVICE_ATTR(max_copy_bytes, uint, NULL);
453459
NULLB_DEVICE_ATTR(irqmode, uint, NULL);
454460
NULLB_DEVICE_ATTR(hw_queue_depth, uint, NULL);
455461
NULLB_DEVICE_ATTR(index, uint, NULL);
@@ -601,6 +607,7 @@ static struct configfs_attribute *nullb_device_attrs[] = {
601607
&nullb_device_attr_blocksize,
602608
&nullb_device_attr_cache_size,
603609
&nullb_device_attr_completion_nsec,
610+
&nullb_device_attr_max_copy_bytes,
604611
&nullb_device_attr_discard,
605612
&nullb_device_attr_fua,
606613
&nullb_device_attr_home_node,
@@ -805,6 +812,7 @@ static struct nullb_device *null_alloc_dev(void)
805812
dev->queue_mode = g_queue_mode;
806813
dev->blocksize = g_bs;
807814
dev->max_sectors = g_max_sectors;
815+
dev->max_copy_bytes = g_max_copy_bytes;
808816
dev->irqmode = g_irqmode;
809817
dev->hw_queue_depth = g_hw_queue_depth;
810818
dev->blocking = g_blocking;
@@ -1275,6 +1283,96 @@ static blk_status_t null_transfer(struct nullb *nullb, struct page *page,
12751283
return err;
12761284
}
12771285

1286+
static ssize_t nullb_copy_sector(struct nullb *nullb, sector_t sector_in,
1287+
sector_t sector_out, ssize_t rem, bool is_fua)
1288+
{
1289+
struct nullb_page *t_page_in, *t_page_out;
1290+
loff_t offset_in, offset_out;
1291+
void *in, *out;
1292+
ssize_t chunk;
1293+
1294+
chunk = min_t(size_t, nullb->dev->blocksize, rem);
1295+
offset_in = (sector_in & SECTOR_MASK) << SECTOR_SHIFT;
1296+
offset_out = (sector_out & SECTOR_MASK) << SECTOR_SHIFT;
1297+
1298+
guard(spinlock_irq)(&nullb->lock);
1299+
1300+
if (null_cache_active(nullb) && !is_fua)
1301+
null_make_cache_space(nullb, PAGE_SIZE);
1302+
1303+
t_page_in = null_insert_page(nullb, sector_in,
1304+
!null_cache_active(nullb));
1305+
if (!t_page_in)
1306+
return -1;
1307+
t_page_out = null_insert_page(nullb, sector_out,
1308+
!null_cache_active(nullb) || is_fua);
1309+
if (!t_page_out)
1310+
return -1;
1311+
1312+
in = kmap_local_page(t_page_in->page);
1313+
out = kmap_local_page(t_page_out->page);
1314+
memcpy(out + offset_out, in + offset_in, chunk);
1315+
kunmap_local(out);
1316+
kunmap_local(in);
1317+
1318+
__set_bit(sector_out & SECTOR_MASK, t_page_out->bitmap);
1319+
1320+
if (is_fua)
1321+
null_free_sector(nullb, sector_out, true);
1322+
1323+
return chunk;
1324+
}
1325+
1326+
static blk_status_t nullb_do_copy(struct nullb *nullb, struct request *rq)
1327+
{
1328+
sector_t sector_in, sector_in_end, sector_out, sector_out_end;
1329+
struct bio_copy_offload_ctx *copy_ctx = rq->bio->bi_copy_ctx;
1330+
ssize_t chunk, rem = copy_ctx->len;
1331+
struct bio *src_bio, *dst_bio;
1332+
1333+
src_bio = blk_first_copy_bio(rq, REQ_OP_COPY_SRC);
1334+
dst_bio = blk_first_copy_bio(rq, REQ_OP_COPY_DST);
1335+
1336+
if (WARN_ON_ONCE(!src_bio || !dst_bio))
1337+
return BLK_STS_IOERR;
1338+
1339+
sector_in = src_bio->bi_iter.bi_sector;
1340+
sector_in_end = sector_in + (src_bio->bi_iter.bi_size >> SECTOR_SHIFT);
1341+
sector_out = dst_bio->bi_iter.bi_sector;
1342+
sector_out_end = sector_out + (dst_bio->bi_iter.bi_size >> SECTOR_SHIFT);
1343+
1344+
while (rem > 0) {
1345+
chunk = nullb_copy_sector(nullb, sector_in, sector_out, rem,
1346+
rq->cmd_flags & REQ_FUA);
1347+
if (chunk < 0)
1348+
return BLK_STS_IOERR;
1349+
rem -= chunk;
1350+
if (!rem)
1351+
break;
1352+
sector_in += chunk >> SECTOR_SHIFT;
1353+
if (sector_in >= sector_in_end) {
1354+
src_bio = blk_next_copy_bio(src_bio);
1355+
if (WARN_ON_ONCE(!src_bio))
1356+
return BLK_STS_IOERR;
1357+
sector_in = src_bio->bi_iter.bi_sector;
1358+
sector_in_end = sector_in +
1359+
(src_bio->bi_iter.bi_size >> SECTOR_SHIFT);
1360+
}
1361+
sector_out += chunk >> SECTOR_SHIFT;
1362+
if (sector_out >= sector_out_end) {
1363+
dst_bio = blk_next_copy_bio(dst_bio);
1364+
if (WARN_ON_ONCE(!dst_bio))
1365+
return BLK_STS_IOERR;
1366+
sector_out = dst_bio->bi_iter.bi_sector;
1367+
sector_out_end = sector_out +
1368+
(dst_bio->bi_iter.bi_size >> SECTOR_SHIFT);
1369+
}
1370+
cond_resched();
1371+
}
1372+
1373+
return BLK_STS_OK;
1374+
}
1375+
12781376
/*
12791377
* Transfer data for the given request. The transfer size is capped with the
12801378
* nr_sectors argument.
@@ -1292,6 +1390,9 @@ static blk_status_t null_handle_data_transfer(struct nullb_cmd *cmd,
12921390
struct req_iterator iter;
12931391
struct bio_vec bvec;
12941392

1393+
if (op_is_copy(req_op(rq)))
1394+
return nullb_do_copy(nullb, rq);
1395+
12951396
spin_lock_irq(&nullb->lock);
12961397
rq_for_each_segment(bvec, rq, iter) {
12971398
len = bvec.bv_len;
@@ -1806,6 +1907,13 @@ static void null_config_discard(struct nullb *nullb, struct queue_limits *lim)
18061907
lim->max_hw_discard_sectors = UINT_MAX >> 9;
18071908
}
18081909

1910+
static void null_config_copy(struct nullb *nullb, struct queue_limits *lim)
1911+
{
1912+
lim->max_copy_hw_sectors = nullb->dev->max_copy_bytes >> SECTOR_SHIFT;
1913+
lim->max_copy_src_segments = nullb->dev->max_copy_bytes ? U16_MAX : 0;
1914+
lim->max_copy_dst_segments = lim->max_copy_src_segments;
1915+
}
1916+
18091917
static const struct block_device_operations null_ops = {
18101918
.owner = THIS_MODULE,
18111919
.report_zones = null_report_zones,
@@ -1922,6 +2030,9 @@ static int null_validate_conf(struct nullb_device *dev)
19222030
return -EINVAL;
19232031
}
19242032

2033+
if (dev->queue_mode == NULL_Q_BIO)
2034+
dev->max_copy_bytes = 0;
2035+
19252036
return 0;
19262037
}
19272038

@@ -1989,6 +2100,8 @@ static int null_add_dev(struct nullb_device *dev)
19892100
if (dev->virt_boundary)
19902101
lim.virt_boundary_mask = PAGE_SIZE - 1;
19912102
null_config_discard(nullb, &lim);
2103+
null_config_copy(nullb, &lim);
2104+
19922105
if (dev->zoned) {
19932106
rv = null_init_zoned_dev(dev, &lim);
19942107
if (rv)

drivers/block/null_blk/null_blk.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -93,6 +93,7 @@ struct nullb_device {
9393
unsigned int queue_mode; /* block interface */
9494
unsigned int blocksize; /* block size */
9595
unsigned int max_sectors; /* Max sectors per command */
96+
unsigned long max_copy_bytes; /* Max copy offload length in bytes */
9697
unsigned int irqmode; /* IRQ completion handler */
9798
unsigned int hw_queue_depth; /* queue depth */
9899
unsigned int index; /* index of the disk, only valid with a disk */

0 commit comments

Comments
 (0)