Skip to content

Commit 62699ce

Browse files
YuKuai-huaweikawasaki
authored andcommitted
blk-mq-sched: refactor __blk_mq_do_dispatch_sched()
Introduce struct sched_dispatch_ctx, and split the helper into elevator_dispatch_one_request() and elevator_finish_dispatch(). Also and comments about the non-error return value. Make code cleaner, and make it easier to add a new branch to dispatch a batch of requests at a time in the next patch. Signed-off-by: Yu Kuai <[email protected]>
1 parent 3c899fa commit 62699ce

1 file changed

Lines changed: 119 additions & 77 deletions

File tree

block/blk-mq-sched.c

Lines changed: 119 additions & 77 deletions
Original file line numberDiff line numberDiff line change
@@ -74,111 +74,153 @@ static bool blk_mq_dispatch_hctx_list(struct list_head *rq_list)
7474

7575
#define BLK_MQ_BUDGET_DELAY 3 /* ms units */
7676

77-
/*
78-
* Only SCSI implements .get_budget and .put_budget, and SCSI restarts
79-
* its queue by itself in its completion handler, so we don't need to
80-
* restart queue if .get_budget() fails to get the budget.
81-
*
82-
* Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
83-
* be run again. This is necessary to avoid starving flushes.
84-
*/
85-
static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
86-
{
87-
struct request_queue *q = hctx->queue;
88-
struct elevator_queue *e = q->elevator;
89-
bool multi_hctxs = false, run_queue = false;
90-
bool dispatched = false, busy = false;
91-
unsigned int max_dispatch;
92-
LIST_HEAD(rq_list);
93-
int count = 0;
77+
struct sched_dispatch_ctx {
78+
struct blk_mq_hw_ctx *hctx;
79+
struct elevator_queue *e;
80+
struct request_queue *q;
9481

95-
if (hctx->dispatch_busy)
96-
max_dispatch = 1;
97-
else
98-
max_dispatch = hctx->queue->nr_requests;
82+
struct list_head rq_list;
83+
int count;
9984

100-
do {
101-
bool sq_sched = blk_queue_sq_sched(q);
102-
struct request *rq;
103-
int budget_token;
85+
bool multi_hctxs;
86+
bool run_queue;
87+
bool busy;
88+
};
10489

105-
if (e->type->ops.has_work && !e->type->ops.has_work(hctx))
106-
break;
90+
static bool elevator_can_dispatch(struct sched_dispatch_ctx *ctx)
91+
{
92+
if (ctx->e->type->ops.has_work &&
93+
!ctx->e->type->ops.has_work(ctx->hctx))
94+
return false;
10795

108-
if (!list_empty_careful(&hctx->dispatch)) {
109-
busy = true;
110-
break;
111-
}
96+
if (!list_empty_careful(&ctx->hctx->dispatch)) {
97+
ctx->busy = true;
98+
return false;
99+
}
112100

113-
budget_token = blk_mq_get_dispatch_budget(q);
114-
if (budget_token < 0)
115-
break;
101+
return true;
102+
}
116103

117-
if (sq_sched)
118-
spin_lock_irq(&e->lock);
119-
rq = e->type->ops.dispatch_request(hctx);
120-
if (sq_sched)
121-
spin_unlock_irq(&e->lock);
104+
static bool elevator_dispatch_one_request(struct sched_dispatch_ctx *ctx)
105+
{
106+
bool sq_sched = blk_queue_sq_sched(ctx->q);
107+
struct request *rq;
108+
int budget_token;
122109

123-
if (!rq) {
124-
blk_mq_put_dispatch_budget(q, budget_token);
125-
/*
126-
* We're releasing without dispatching. Holding the
127-
* budget could have blocked any "hctx"s with the
128-
* same queue and if we didn't dispatch then there's
129-
* no guarantee anyone will kick the queue. Kick it
130-
* ourselves.
131-
*/
132-
run_queue = true;
133-
break;
134-
}
110+
if (!elevator_can_dispatch(ctx))
111+
return false;
135112

136-
blk_mq_set_rq_budget_token(rq, budget_token);
113+
budget_token = blk_mq_get_dispatch_budget(ctx->q);
114+
if (budget_token < 0)
115+
return false;
137116

138-
/*
139-
* Now this rq owns the budget which has to be released
140-
* if this rq won't be queued to driver via .queue_rq()
141-
* in blk_mq_dispatch_rq_list().
142-
*/
143-
list_add_tail(&rq->queuelist, &rq_list);
144-
count++;
145-
if (rq->mq_hctx != hctx)
146-
multi_hctxs = true;
117+
if (sq_sched)
118+
spin_lock_irq(&ctx->e->lock);
119+
rq = ctx->e->type->ops.dispatch_request(ctx->hctx);
120+
if (sq_sched)
121+
spin_unlock_irq(&ctx->e->lock);
147122

123+
if (!rq) {
124+
blk_mq_put_dispatch_budget(ctx->q, budget_token);
148125
/*
149-
* If we cannot get tag for the request, stop dequeueing
150-
* requests from the IO scheduler. We are unlikely to be able
151-
* to submit them anyway and it creates false impression for
152-
* scheduling heuristics that the device can take more IO.
126+
* We're releasing without dispatching. Holding the
127+
* budget could have blocked any "hctx"s with the
128+
* same queue and if we didn't dispatch then there's
129+
* no guarantee anyone will kick the queue. Kick it
130+
* ourselves.
153131
*/
154-
if (!blk_mq_get_driver_tag(rq))
155-
break;
156-
} while (count < max_dispatch);
132+
ctx->run_queue = true;
133+
return false;
134+
}
157135

158-
if (!count) {
159-
if (run_queue)
160-
blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY);
161-
} else if (multi_hctxs) {
136+
blk_mq_set_rq_budget_token(rq, budget_token);
137+
138+
/*
139+
* Now this rq owns the budget which has to be released
140+
* if this rq won't be queued to driver via .queue_rq()
141+
* in blk_mq_dispatch_rq_list().
142+
*/
143+
list_add_tail(&rq->queuelist, &ctx->rq_list);
144+
ctx->count++;
145+
if (rq->mq_hctx != ctx->hctx)
146+
ctx->multi_hctxs = true;
147+
148+
/*
149+
* If we cannot get tag for the request, stop dequeueing
150+
* requests from the IO scheduler. We are unlikely to be able
151+
* to submit them anyway and it creates false impression for
152+
* scheduling heuristics that the device can take more IO.
153+
*/
154+
return blk_mq_get_driver_tag(rq);
155+
}
156+
157+
/*
158+
* Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
159+
* be run again. This is necessary to avoid starving flushes.
160+
* Return 0 if no request is dispatched.
161+
* Return 1 if at least one request is dispatched.
162+
*/
163+
static int elevator_finish_dispatch(struct sched_dispatch_ctx *ctx)
164+
{
165+
bool dispatched = false;
166+
167+
if (!ctx->count) {
168+
if (ctx->run_queue)
169+
blk_mq_delay_run_hw_queues(ctx->q, BLK_MQ_BUDGET_DELAY);
170+
} else if (ctx->multi_hctxs) {
162171
/*
163172
* Requests from different hctx may be dequeued from some
164173
* schedulers, such as bfq and deadline.
165174
*
166175
* Sort the requests in the list according to their hctx,
167176
* dispatch batching requests from same hctx at a time.
168177
*/
169-
list_sort(NULL, &rq_list, sched_rq_cmp);
178+
list_sort(NULL, &ctx->rq_list, sched_rq_cmp);
170179
do {
171-
dispatched |= blk_mq_dispatch_hctx_list(&rq_list);
172-
} while (!list_empty(&rq_list));
180+
dispatched |= blk_mq_dispatch_hctx_list(&ctx->rq_list);
181+
} while (!list_empty(&ctx->rq_list));
173182
} else {
174-
dispatched = blk_mq_dispatch_rq_list(hctx, &rq_list, false);
183+
dispatched = blk_mq_dispatch_rq_list(ctx->hctx, &ctx->rq_list,
184+
false);
175185
}
176186

177-
if (busy)
187+
if (ctx->busy)
178188
return -EAGAIN;
189+
179190
return !!dispatched;
180191
}
181192

193+
/*
194+
* Only SCSI implements .get_budget and .put_budget, and SCSI restarts
195+
* its queue by itself in its completion handler, so we don't need to
196+
* restart queue if .get_budget() fails to get the budget.
197+
*
198+
* See elevator_finish_dispatch() for return values.
199+
*/
200+
static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
201+
{
202+
unsigned int max_dispatch;
203+
struct sched_dispatch_ctx ctx = {
204+
.hctx = hctx,
205+
.q = hctx->queue,
206+
.e = hctx->queue->elevator,
207+
};
208+
209+
INIT_LIST_HEAD(&ctx.rq_list);
210+
211+
if (hctx->dispatch_busy)
212+
max_dispatch = 1;
213+
else
214+
max_dispatch = hctx->queue->nr_requests;
215+
216+
do {
217+
if (!elevator_dispatch_one_request(&ctx))
218+
break;
219+
} while (ctx.count < max_dispatch);
220+
221+
return elevator_finish_dispatch(&ctx);
222+
}
223+
182224
static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
183225
{
184226
unsigned long end = jiffies + HZ;

0 commit comments

Comments
 (0)