Skip to content

Commit 99288f7

Browse files
bvanasschekawasaki
authored andcommitted
blk-mq: Restore the zone write order when requeuing
Zoned writes may be requeued. This happens if a block driver returns BLK_STS_RESOURCE, to handle SCSI unit attentions or by the SCSI error handler after error handling has finished. A later patch enables write pipelining and increases the number of pending writes per zone. If multiple writes are pending per zone, write requests may be requeued in another order than submitted. Restore the request order if requests are requeued. Add RQF_DONTPREP to RQF_NOMERGE_FLAGS because this patch may cause RQF_DONTPREP requests to be sent to the code that checks whether a request can be merged and RQF_DONTPREP requests must not be merged. Cc: Christoph Hellwig <[email protected]> Cc: Damien Le Moal <[email protected]> Cc: Yu Kuai <[email protected]> Signed-off-by: Bart Van Assche <[email protected]>
1 parent e28f82a commit 99288f7

6 files changed

Lines changed: 43 additions & 3 deletions

File tree

block/bfq-iosched.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6276,6 +6276,8 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
62766276

62776277
if (flags & BLK_MQ_INSERT_AT_HEAD) {
62786278
list_add(&rq->queuelist, &bfqd->dispatch);
6279+
} else if (flags & BLK_MQ_INSERT_ORDERED) {
6280+
blk_mq_insert_ordered(rq, &bfqd->dispatch);
62796281
} else if (!bfqq) {
62806282
list_add_tail(&rq->queuelist, &bfqd->dispatch);
62816283
} else {

block/blk-mq.c

Lines changed: 19 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1557,7 +1557,9 @@ static void blk_mq_requeue_work(struct work_struct *work)
15571557
* already. Insert it into the hctx dispatch list to avoid
15581558
* block layer merges for the request.
15591559
*/
1560-
if (rq->rq_flags & RQF_DONTPREP)
1560+
if (blk_rq_is_seq_zoned_write(rq))
1561+
blk_mq_insert_request(rq, BLK_MQ_INSERT_ORDERED);
1562+
else if (rq->rq_flags & RQF_DONTPREP)
15611563
blk_mq_request_bypass_insert(rq, 0);
15621564
else
15631565
blk_mq_insert_request(rq, BLK_MQ_INSERT_AT_HEAD);
@@ -2590,6 +2592,20 @@ static void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx,
25902592
blk_mq_run_hw_queue(hctx, run_queue_async);
25912593
}
25922594

2595+
void blk_mq_insert_ordered(struct request *rq, struct list_head *list)
2596+
{
2597+
struct request_queue *q = rq->q;
2598+
struct request *rq2;
2599+
2600+
list_for_each_entry(rq2, list, queuelist)
2601+
if (rq2->q == q && blk_rq_pos(rq2) > blk_rq_pos(rq))
2602+
break;
2603+
2604+
/* Insert rq before rq2. If rq2 is the list head, append at the end. */
2605+
list_add_tail(&rq->queuelist, &rq2->queuelist);
2606+
}
2607+
EXPORT_SYMBOL_GPL(blk_mq_insert_ordered);
2608+
25932609
static void blk_mq_insert_request(struct request *rq, blk_insert_t flags)
25942610
{
25952611
struct request_queue *q = rq->q;
@@ -2644,6 +2660,8 @@ static void blk_mq_insert_request(struct request *rq, blk_insert_t flags)
26442660
spin_lock(&ctx->lock);
26452661
if (flags & BLK_MQ_INSERT_AT_HEAD)
26462662
list_add(&rq->queuelist, &ctx->rq_lists[hctx->type]);
2663+
else if (flags & BLK_MQ_INSERT_ORDERED)
2664+
blk_mq_insert_ordered(rq, &ctx->rq_lists[hctx->type]);
26472665
else
26482666
list_add_tail(&rq->queuelist,
26492667
&ctx->rq_lists[hctx->type]);

block/blk-mq.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,8 +40,10 @@ enum {
4040

4141
typedef unsigned int __bitwise blk_insert_t;
4242
#define BLK_MQ_INSERT_AT_HEAD ((__force blk_insert_t)0x01)
43+
#define BLK_MQ_INSERT_ORDERED ((__force blk_insert_t)0x02)
4344

4445
void blk_mq_submit_bio(struct bio *bio);
46+
void blk_mq_insert_ordered(struct request *rq, struct list_head *list);
4547
int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,
4648
unsigned int flags);
4749
void blk_mq_exit_queue(struct request_queue *q);

block/kyber-iosched.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -603,6 +603,8 @@ static void kyber_insert_requests(struct blk_mq_hw_ctx *hctx,
603603
trace_block_rq_insert(rq);
604604
if (flags & BLK_MQ_INSERT_AT_HEAD)
605605
list_move(&rq->queuelist, head);
606+
else if (flags & BLK_MQ_INSERT_ORDERED)
607+
blk_mq_insert_ordered(rq, head);
606608
else
607609
list_move_tail(&rq->queuelist, head);
608610
sbitmap_set_bit(&khd->kcq_map[sched_domain],

block/mq-deadline.c

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -710,7 +710,12 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
710710
* set expire time and add to fifo list
711711
*/
712712
rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
713-
list_add_tail(&rq->queuelist, &per_prio->fifo_list[data_dir]);
713+
if (flags & BLK_MQ_INSERT_ORDERED)
714+
blk_mq_insert_ordered(rq,
715+
&per_prio->fifo_list[data_dir]);
716+
else
717+
list_add_tail(&rq->queuelist,
718+
&per_prio->fifo_list[data_dir]);
714719
}
715720
}
716721

include/linux/blk-mq.h

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ enum rqf_flags {
8686

8787
/* flags that prevent us from merging requests: */
8888
#define RQF_NOMERGE_FLAGS \
89-
(RQF_STARTED | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
89+
(RQF_STARTED | RQF_FLUSH_SEQ | RQF_DONTPREP | RQF_SPECIAL_PAYLOAD)
9090

9191
enum mq_rq_state {
9292
MQ_RQ_IDLE = 0,
@@ -1191,4 +1191,15 @@ static inline int blk_rq_map_sg(struct request *rq, struct scatterlist *sglist)
11911191
}
11921192
void blk_dump_rq_flags(struct request *, char *);
11931193

1194+
static inline bool blk_rq_is_seq_zoned_write(struct request *rq)
1195+
{
1196+
switch (req_op(rq)) {
1197+
case REQ_OP_WRITE:
1198+
case REQ_OP_WRITE_ZEROES:
1199+
return bdev_zone_is_seq(rq->q->disk->part0, blk_rq_pos(rq));
1200+
default:
1201+
return false;
1202+
}
1203+
}
1204+
11941205
#endif /* BLK_MQ_H */

0 commit comments

Comments
 (0)