Skip to content

Commit 64dd6e0

Browse files
nj-shettykawasaki
authored andcommitted
block: Introduce queue limits for copy offloading
Add the following request queue limits: - max_copy_hw_sectors: the maximum number of sectors supported by the block driver for a single offloaded copy operation. - max_copy_src_segments: the maximum number of source segments supported by the block driver for a single offloaded copy operation. - max_copy_dst_segments: the maximum number of destination segments supported by the block driver for a single offloaded copy operation. - max_user_copy_sectors: the maximum number of sectors configured by the user for a single offloaded copy operation. - max_copy_sectors: the maximum number of sectors for a single offloaded copy operation. This is the minimum of the above two parameters. The default value for all these new limits is zero which means that copy offloading is not supported unless if these limits are set by the block driver. ake the following two limits available in sysfs: - copy_max_bytes (RW) - copy_max_hw_bytes (RO) These limits will be used by the function that implements copy offloading to decide the bio size. Signed-off-by: Nitesh Shetty <[email protected]> Signed-off-by: Kanchan Joshi <[email protected]> Signed-off-by: Anuj Gupta <[email protected]> [ bvanassche: Added max_copy_{src,dst}_segments limits. Introduced blk_validate_copy_limits(). Introduced BLK_FEAT_STACKING_COPY_OFFL. Modified patch description. ] Signed-off-by: Bart Van Assche <[email protected]>
1 parent 857ada9 commit 64dd6e0

4 files changed

Lines changed: 112 additions & 1 deletion

File tree

Documentation/ABI/stable/sysfs-block

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -239,6 +239,30 @@ Description:
239239
last zone of the device which may be smaller.
240240

241241

242+
What: /sys/block/<disk>/queue/copy_max_bytes
243+
Date: May 2026
244+
245+
Description:
246+
[RW] This is the maximum number of bytes that the block layer
247+
will allow for a copy request. This is always smaller or
248+
equal to the maximum size allowed by the block driver.
249+
Any value higher than 'copy_max_hw_bytes' will be reduced to
250+
'copy_max_hw_bytes'. Writing '0' to this attribute will disable
251+
copy offloading for this block device. If copy offloading is
252+
disabled, copy requests will be translated into read and write
253+
requests.
254+
255+
256+
What: /sys/block/<disk>/queue/copy_max_hw_bytes
257+
Date: May 2026
258+
259+
Description:
260+
[RO] This is the maximum number of bytes that is allowed for
261+
a single data copy request. Set by the block driver. The value
262+
zero indicates that the block device does not support copy
263+
offloading.
264+
265+
242266
What: /sys/block/<disk>/queue/crypto/
243267
Date: February 2022
244268

block/blk-settings.c

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,11 @@ void blk_set_stacking_limits(struct queue_limits *lim)
5757
lim->max_hw_zone_append_sectors = UINT_MAX;
5858
lim->max_user_discard_sectors = UINT_MAX;
5959
lim->atomic_write_hw_max = UINT_MAX;
60+
61+
lim->max_user_copy_sectors = UINT_MAX;
62+
lim->max_copy_hw_sectors = UINT_MAX;
63+
lim->max_copy_src_segments = U16_MAX;
64+
lim->max_copy_dst_segments = U16_MAX;
6065
}
6166
EXPORT_SYMBOL(blk_set_stacking_limits);
6267

@@ -333,6 +338,21 @@ static void blk_validate_atomic_write_limits(struct queue_limits *lim)
333338
lim->atomic_write_unit_max = 0;
334339
}
335340

341+
/*
342+
* Check whether max_copy_hw_sectors and max_copy_{src,dst}_segments are
343+
* either all nonzero or all zero.
344+
*/
345+
static int blk_validate_copy_limits(const struct queue_limits *lim)
346+
{
347+
if (lim->max_copy_hw_sectors && lim->max_copy_src_segments &&
348+
lim->max_copy_dst_segments)
349+
return 0;
350+
if (!lim->max_copy_hw_sectors && !lim->max_copy_src_segments &&
351+
!lim->max_copy_dst_segments)
352+
return 0;
353+
return -EINVAL;
354+
}
355+
336356
/*
337357
* Check that the limits in lim are valid, initialize defaults for unset
338358
* values, and cap values based on others where needed.
@@ -510,6 +530,13 @@ int blk_validate_limits(struct queue_limits *lim)
510530
err = blk_validate_integrity_limits(lim);
511531
if (err)
512532
return err;
533+
534+
err = blk_validate_copy_limits(lim);
535+
if (err)
536+
return err;
537+
lim->max_copy_sectors =
538+
min(lim->max_copy_hw_sectors, lim->max_user_copy_sectors);
539+
513540
return blk_validate_zoned_limits(lim);
514541
}
515542
EXPORT_SYMBOL_GPL(blk_validate_limits);
@@ -528,6 +555,7 @@ int blk_set_default_limits(struct queue_limits *lim)
528555
*/
529556
lim->max_user_discard_sectors = UINT_MAX;
530557
lim->max_user_wzeroes_unmap_sectors = UINT_MAX;
558+
lim->max_user_copy_sectors = UINT_MAX;
531559
return blk_validate_limits(lim);
532560
}
533561

@@ -829,6 +857,14 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
829857
t->max_segment_size = min_not_zero(t->max_segment_size,
830858
b->max_segment_size);
831859

860+
t->max_copy_hw_sectors =
861+
min(t->max_copy_hw_sectors, b->max_copy_hw_sectors);
862+
t->max_copy_src_segments =
863+
min(t->max_copy_src_segments, b->max_copy_src_segments);
864+
t->max_copy_dst_segments =
865+
min(t->max_copy_dst_segments, b->max_copy_dst_segments);
866+
t->max_copy_sectors = min(t->max_copy_sectors, b->max_copy_sectors);
867+
832868
alignment = queue_limit_alignment_offset(b, start);
833869

834870
/* Bottom device has different alignment. Check that it is

block/blk-sysfs.c

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -325,6 +325,36 @@ queue_max_sectors_store(struct gendisk *disk, const char *page, size_t count,
325325
return 0;
326326
}
327327

328+
static ssize_t queue_copy_hw_max_show(struct gendisk *disk, char *page)
329+
{
330+
return queue_var_show(
331+
disk->queue->limits.max_copy_hw_sectors << SECTOR_SHIFT, page);
332+
}
333+
334+
static ssize_t queue_copy_max_show(struct gendisk *disk, char *page)
335+
{
336+
return queue_var_show(
337+
disk->queue->limits.max_copy_sectors << SECTOR_SHIFT, page);
338+
}
339+
340+
static int queue_copy_max_store(struct gendisk *disk, const char *page,
341+
size_t count, struct queue_limits *lim)
342+
{
343+
unsigned long max_copy_bytes;
344+
ssize_t ret;
345+
346+
ret = queue_var_store(&max_copy_bytes, page, count);
347+
if (ret < 0)
348+
return ret;
349+
350+
if ((max_copy_bytes >> SECTOR_SHIFT) > UINT_MAX)
351+
return -EINVAL;
352+
353+
lim->max_user_copy_sectors = max_copy_bytes >> SECTOR_SHIFT;
354+
355+
return 0;
356+
}
357+
328358
static ssize_t queue_feature_store(struct gendisk *disk, const char *page,
329359
size_t count, struct queue_limits *lim, blk_features_t feature)
330360
{
@@ -652,6 +682,9 @@ QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones");
652682
QUEUE_LIM_RO_ENTRY(queue_max_open_zones, "max_open_zones");
653683
QUEUE_LIM_RO_ENTRY(queue_max_active_zones, "max_active_zones");
654684

685+
QUEUE_LIM_RO_ENTRY(queue_copy_hw_max, "copy_max_hw_bytes");
686+
QUEUE_LIM_RW_ENTRY(queue_copy_max, "copy_max_bytes");
687+
655688
QUEUE_RW_ENTRY(queue_nomerges, "nomerges");
656689
QUEUE_LIM_RW_ENTRY(queue_iostats_passthrough, "iostats_passthrough");
657690
QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity");
@@ -760,6 +793,8 @@ static const struct attribute *const queue_attrs[] = {
760793
&queue_max_hw_wzeroes_unmap_sectors_entry.attr,
761794
&queue_max_wzeroes_unmap_sectors_entry.attr,
762795
&queue_max_zone_append_sectors_entry.attr,
796+
&queue_copy_hw_max_entry.attr,
797+
&queue_copy_max_entry.attr,
763798
&queue_zone_write_granularity_entry.attr,
764799
&queue_rotational_entry.attr,
765800
&queue_zoned_entry.attr,

include/linux/blkdev.h

Lines changed: 17 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -353,13 +353,17 @@ typedef unsigned int __bitwise blk_features_t;
353353
#define BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE \
354354
((__force blk_features_t)(1u << 15))
355355

356+
/* block driver is a stacking block driver that supports copy offloading */
357+
#define BLK_FEAT_STACKING_COPY_OFFL ((__force blk_features_t)(1u << 16))
358+
356359
/*
357360
* Flags automatically inherited when stacking limits.
358361
*/
359362
#define BLK_FEAT_INHERIT_MASK \
360363
(BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA | BLK_FEAT_ROTATIONAL | \
361364
BLK_FEAT_STABLE_WRITES | BLK_FEAT_ZONED | \
362-
BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE)
365+
BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE | \
366+
BLK_FEAT_STACKING_COPY_OFFL)
363367

364368
/* internal flags in queue_limits.flags */
365369
typedef unsigned int __bitwise blk_flags_t;
@@ -415,6 +419,13 @@ struct queue_limits {
415419
unsigned int atomic_write_hw_unit_max;
416420
unsigned int atomic_write_unit_max;
417421

422+
/* copy offloading limits */
423+
unsigned int max_copy_hw_sectors; /* set by block driver*/
424+
uint16_t max_copy_src_segments; /* set by block driver*/
425+
uint16_t max_copy_dst_segments; /* set by block driver*/
426+
unsigned int max_user_copy_sectors; /* set via sysfs */
427+
unsigned int max_copy_sectors; /* min() of the above */
428+
418429
unsigned short max_segments;
419430
unsigned short max_integrity_segments;
420431
unsigned short max_discard_segments;
@@ -1454,6 +1465,11 @@ static inline unsigned int bdev_discard_granularity(struct block_device *bdev)
14541465
return bdev_limits(bdev)->discard_granularity;
14551466
}
14561467

1468+
static inline unsigned int bdev_max_copy_sectors(struct block_device *bdev)
1469+
{
1470+
return bdev_get_queue(bdev)->limits.max_copy_sectors;
1471+
}
1472+
14571473
static inline unsigned int
14581474
bdev_max_secure_erase_sectors(struct block_device *bdev)
14591475
{

0 commit comments

Comments
 (0)