Skip to content

Commit b4d361d

Browse files
bvanasschekawasaki
authored andcommitted
blk-mq: Always insert sequential zoned writes into a software queue
One of the optimizations in the block layer is that the software queues are bypassed if it is expected that the block driver will accept a request. This can cause request reordering even for requests submitted from the same CPU core. This patch preserves the order for sequential zoned writes submitted from a given CPU core by always inserting these requests into the appropriate software queue. Cc: Damien Le Moal <[email protected]> Cc: Christoph Hellwig <[email protected]> Signed-off-by: Bart Van Assche <[email protected]>
1 parent 496bde7 commit b4d361d

2 files changed

Lines changed: 36 additions & 2 deletions

File tree

block/blk-mq.c

Lines changed: 25 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1537,6 +1537,27 @@ void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
15371537
}
15381538
EXPORT_SYMBOL(blk_mq_requeue_request);
15391539

1540+
/*
1541+
* Whether the block layer should preserve the order of @rq relative to other
1542+
* requests submitted to the same software queue.
1543+
*/
1544+
static bool blk_mq_preserve_order(struct request *rq)
1545+
{
1546+
return rq->q->limits.features & BLK_FEAT_ORDERED_HWQ &&
1547+
blk_rq_is_seq_zoned_write(rq);
1548+
}
1549+
1550+
static bool blk_mq_preserve_order_for_list(struct list_head *list)
1551+
{
1552+
struct request *rq;
1553+
1554+
list_for_each_entry(rq, list, queuelist)
1555+
if (blk_mq_preserve_order(rq))
1556+
return true;
1557+
1558+
return false;
1559+
}
1560+
15401561
static void blk_mq_requeue_work(struct work_struct *work)
15411562
{
15421563
struct request_queue *q =
@@ -2566,7 +2587,8 @@ static void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx,
25662587
* Try to issue requests directly if the hw queue isn't busy to save an
25672588
* extra enqueue & dequeue to the sw queue.
25682589
*/
2569-
if (!hctx->dispatch_busy && !run_queue_async) {
2590+
if (!hctx->dispatch_busy && !run_queue_async &&
2591+
!blk_mq_preserve_order_for_list(list)) {
25702592
blk_mq_run_dispatch_ops(hctx->queue,
25712593
blk_mq_try_issue_list_directly(hctx, list));
25722594
if (list_empty(list))
@@ -3215,7 +3237,8 @@ void blk_mq_submit_bio(struct bio *bio)
32153237

32163238
hctx = rq->mq_hctx;
32173239
if ((rq->rq_flags & RQF_USE_SCHED) ||
3218-
(hctx->dispatch_busy && (q->nr_hw_queues == 1 || !is_sync))) {
3240+
(hctx->dispatch_busy && (q->nr_hw_queues == 1 || !is_sync)) ||
3241+
blk_mq_preserve_order(rq)) {
32193242
blk_mq_insert_request(rq, 0);
32203243
blk_mq_run_hw_queue(hctx, true);
32213244
} else {

include/linux/blk-mq.h

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1191,4 +1191,15 @@ static inline int blk_rq_map_sg(struct request *rq, struct scatterlist *sglist)
11911191
}
11921192
void blk_dump_rq_flags(struct request *, char *);
11931193

1194+
static inline bool blk_rq_is_seq_zoned_write(struct request *rq)
1195+
{
1196+
switch (req_op(rq)) {
1197+
case REQ_OP_WRITE:
1198+
case REQ_OP_WRITE_ZEROES:
1199+
return bdev_zone_is_seq(rq->q->disk->part0, blk_rq_pos(rq));
1200+
default:
1201+
return false;
1202+
}
1203+
}
1204+
11941205
#endif /* BLK_MQ_H */

0 commit comments

Comments
 (0)