Skip to content

Commit 9c6c4ce

Browse files
johnpgarrykawasaki
authored andcommitted
block: use chunk_sectors when evaluating stacked atomic write limits
The atomic write unit max value is limited by any stacked device stripe size. It is required that the atomic write unit is a power-of-2 factor of the stripe size. Currently we use io_min limit to hold the stripe size, and check for a io_min <= SECTOR_SIZE when deciding if we have a striped stacked device. Nilay reports that this causes a problem when the physical block size is greater than SECTOR_SIZE [0]. Furthermore, io_min may be mutated when stacking devices, and this makes it a poor candidate to hold the stripe size. Such an example (of when io_min may change) would be when the io_min is less than the physical block size. Use chunk_sectors to hold the stripe size, which is more appropriate. [0] https://lore.kernel.org/linux-block/[email protected]/T/#mecca17129f72811137d3c2f1e477634e77f06781 Reviewed-by: Nilay Shroff <[email protected]> Tested-by: Nilay Shroff <[email protected]> Signed-off-by: John Garry <[email protected]>
1 parent d3458d8 commit 9c6c4ce

1 file changed

Lines changed: 33 additions & 23 deletions

File tree

block/blk-settings.c

Lines changed: 33 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -597,41 +597,50 @@ static bool blk_stack_atomic_writes_boundary_head(struct queue_limits *t,
597597
return true;
598598
}
599599

600-
601-
/* Check stacking of first bottom device */
602-
static bool blk_stack_atomic_writes_head(struct queue_limits *t,
603-
struct queue_limits *b)
600+
static void blk_stack_atomic_writes_chunk_sectors(struct queue_limits *t)
604601
{
605-
if (b->atomic_write_hw_boundary &&
606-
!blk_stack_atomic_writes_boundary_head(t, b))
607-
return false;
602+
unsigned int chunk_bytes;
608603

609-
if (t->io_min <= SECTOR_SIZE) {
610-
/* No chunk sectors, so use bottom device values directly */
611-
t->atomic_write_hw_unit_max = b->atomic_write_hw_unit_max;
612-
t->atomic_write_hw_unit_min = b->atomic_write_hw_unit_min;
613-
t->atomic_write_hw_max = b->atomic_write_hw_max;
614-
return true;
615-
}
604+
if (!t->chunk_sectors)
605+
return;
606+
607+
/*
608+
* If chunk sectors is so large that its value in bytes overflows
609+
* UINT_MAX, then just shift it down so it definitely will fit.
610+
* We don't support atomic writes of such a large size anyway.
611+
*/
612+
if (check_shl_overflow(t->chunk_sectors, SECTOR_SHIFT, &chunk_bytes))
613+
chunk_bytes = t->chunk_sectors;
616614

617615
/*
618616
* Find values for limits which work for chunk size.
619617
* b->atomic_write_hw_unit_{min, max} may not be aligned with chunk
620-
* size (t->io_min), as chunk size is not restricted to a power-of-2.
618+
* size, as the chunk size is not restricted to a power-of-2.
621619
* So we need to find highest power-of-2 which works for the chunk
622620
* size.
623-
* As an example scenario, we could have b->unit_max = 16K and
624-
* t->io_min = 24K. For this case, reduce t->unit_max to a value
625-
* aligned with both limits, i.e. 8K in this example.
621+
* As an example scenario, we could have t->unit_max = 16K and
622+
* t->chunk_sectors = 24KB. For this case, reduce t->unit_max to a
623+
* value aligned with both limits, i.e. 8K in this example.
626624
*/
627-
t->atomic_write_hw_unit_max = b->atomic_write_hw_unit_max;
628-
while (t->io_min % t->atomic_write_hw_unit_max)
629-
t->atomic_write_hw_unit_max /= 2;
625+
t->atomic_write_hw_unit_max = min(t->atomic_write_hw_unit_max,
626+
max_pow_of_two_factor(chunk_bytes));
630627

631-
t->atomic_write_hw_unit_min = min(b->atomic_write_hw_unit_min,
628+
t->atomic_write_hw_unit_min = min(t->atomic_write_hw_unit_min,
632629
t->atomic_write_hw_unit_max);
633-
t->atomic_write_hw_max = min(b->atomic_write_hw_max, t->io_min);
630+
t->atomic_write_hw_max = min(t->atomic_write_hw_max, chunk_bytes);
631+
}
632+
633+
/* Check stacking of first bottom device */
634+
static bool blk_stack_atomic_writes_head(struct queue_limits *t,
635+
struct queue_limits *b)
636+
{
637+
if (b->atomic_write_hw_boundary &&
638+
!blk_stack_atomic_writes_boundary_head(t, b))
639+
return false;
634640

641+
t->atomic_write_hw_unit_max = b->atomic_write_hw_unit_max;
642+
t->atomic_write_hw_unit_min = b->atomic_write_hw_unit_min;
643+
t->atomic_write_hw_max = b->atomic_write_hw_max;
635644
return true;
636645
}
637646

@@ -659,6 +668,7 @@ static void blk_stack_atomic_writes_limits(struct queue_limits *t,
659668

660669
if (!blk_stack_atomic_writes_head(t, b))
661670
goto unsupported;
671+
blk_stack_atomic_writes_chunk_sectors(t);
662672
return;
663673

664674
unsupported:

0 commit comments

Comments
 (0)