Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 3 additions & 10 deletions block/bfq-iosched.c
Original file line number Diff line number Diff line change
Expand Up @@ -7232,22 +7232,16 @@ static void bfq_init_root_group(struct bfq_group *root_group,
root_group->sched_data.bfq_class_idle_last_service = jiffies;
}

static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
static int bfq_init_queue(struct request_queue *q, struct elevator_queue *eq)
{
struct bfq_data *bfqd;
struct elevator_queue *eq;
unsigned int i;
struct blk_independent_access_ranges *ia_ranges = q->disk->ia_ranges;

eq = elevator_alloc(q, e);
if (!eq)
return -ENOMEM;

bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node);
if (!bfqd) {
kobject_put(&eq->kobj);
if (!bfqd)
return -ENOMEM;
}

eq->elevator_data = bfqd;

spin_lock_irq(&q->queue_lock);
Expand Down Expand Up @@ -7405,7 +7399,6 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)

out_free:
kfree(bfqd);
kobject_put(&eq->kobj);
return -ENOMEM;
}

Expand Down
223 changes: 152 additions & 71 deletions block/blk-mq-sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -374,64 +374,17 @@ bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
}
EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);

static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q,
struct blk_mq_hw_ctx *hctx,
unsigned int hctx_idx)
{
if (blk_mq_is_shared_tags(q->tag_set->flags)) {
hctx->sched_tags = q->sched_shared_tags;
return 0;
}

hctx->sched_tags = blk_mq_alloc_map_and_rqs(q->tag_set, hctx_idx,
q->nr_requests);

if (!hctx->sched_tags)
return -ENOMEM;
return 0;
}

static void blk_mq_exit_sched_shared_tags(struct request_queue *queue)
{
blk_mq_free_rq_map(queue->sched_shared_tags);
queue->sched_shared_tags = NULL;
}

/* called in queue's release handler, tagset has gone away */
static void blk_mq_sched_tags_teardown(struct request_queue *q, unsigned int flags)
{
struct blk_mq_hw_ctx *hctx;
unsigned long i;

queue_for_each_hw_ctx(q, hctx, i) {
if (hctx->sched_tags) {
if (!blk_mq_is_shared_tags(flags))
blk_mq_free_rq_map(hctx->sched_tags);
hctx->sched_tags = NULL;
}
}
queue_for_each_hw_ctx(q, hctx, i)
hctx->sched_tags = NULL;

if (blk_mq_is_shared_tags(flags))
blk_mq_exit_sched_shared_tags(q);
}

static int blk_mq_init_sched_shared_tags(struct request_queue *queue)
{
struct blk_mq_tag_set *set = queue->tag_set;

/*
* Set initial depth at max so that we don't need to reallocate for
* updating nr_requests.
*/
queue->sched_shared_tags = blk_mq_alloc_map_and_rqs(set,
BLK_MQ_NO_HCTX_IDX,
MAX_SCHED_RQ);
if (!queue->sched_shared_tags)
return -ENOMEM;

blk_mq_tag_update_sched_shared_tags(queue);

return 0;
q->sched_shared_tags = NULL;
}

void blk_mq_sched_reg_debugfs(struct request_queue *q)
Expand All @@ -458,45 +411,174 @@ void blk_mq_sched_unreg_debugfs(struct request_queue *q)
mutex_unlock(&q->debugfs_mutex);
}

void blk_mq_free_sched_tags(struct elevator_tags *et,
struct blk_mq_tag_set *set)
{
unsigned long i;

/* Shared tags are stored at index 0 in @tags. */
if (blk_mq_is_shared_tags(set->flags))
blk_mq_free_map_and_rqs(set, et->tags[0], BLK_MQ_NO_HCTX_IDX);
else {
for (i = 0; i < et->nr_hw_queues; i++)
blk_mq_free_map_and_rqs(set, et->tags[i], i);
}

kfree(et);
}

void blk_mq_free_sched_tags_batch(struct xarray *et_table,
struct blk_mq_tag_set *set)
{
struct request_queue *q;
struct elevator_tags *et;

lockdep_assert_held_write(&set->update_nr_hwq_lock);

list_for_each_entry(q, &set->tag_list, tag_set_list) {
/*
* Accessing q->elevator without holding q->elevator_lock is
* safe because we're holding here set->update_nr_hwq_lock in
* the writer context. So, scheduler update/switch code (which
* acquires the same lock but in the reader context) can't run
* concurrently.
*/
if (q->elevator) {
et = xa_load(et_table, q->id);
if (unlikely(!et))
WARN_ON_ONCE(1);
else
blk_mq_free_sched_tags(et, set);
}
}
}

struct elevator_tags *blk_mq_alloc_sched_tags(struct blk_mq_tag_set *set,
unsigned int nr_hw_queues)
{
unsigned int nr_tags;
int i;
struct elevator_tags *et;
gfp_t gfp = GFP_NOIO | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;

if (blk_mq_is_shared_tags(set->flags))
nr_tags = 1;
else
nr_tags = nr_hw_queues;

et = kmalloc(sizeof(struct elevator_tags) +
nr_tags * sizeof(struct blk_mq_tags *), gfp);
if (!et)
return NULL;
/*
* Default to double of smaller one between hw queue_depth and
* 128, since we don't split into sync/async like the old code
* did. Additionally, this is a per-hw queue depth.
*/
et->nr_requests = 2 * min_t(unsigned int, set->queue_depth,
BLKDEV_DEFAULT_RQ);
et->nr_hw_queues = nr_hw_queues;

if (blk_mq_is_shared_tags(set->flags)) {
/* Shared tags are stored at index 0 in @tags. */
et->tags[0] = blk_mq_alloc_map_and_rqs(set, BLK_MQ_NO_HCTX_IDX,
MAX_SCHED_RQ);
if (!et->tags[0])
goto out;
} else {
for (i = 0; i < et->nr_hw_queues; i++) {
et->tags[i] = blk_mq_alloc_map_and_rqs(set, i,
et->nr_requests);
if (!et->tags[i])
goto out_unwind;
}
}

return et;
out_unwind:
while (--i >= 0)
blk_mq_free_map_and_rqs(set, et->tags[i], i);
out:
kfree(et);
return NULL;
}

int blk_mq_alloc_sched_tags_batch(struct xarray *et_table,
struct blk_mq_tag_set *set, unsigned int nr_hw_queues)
{
struct request_queue *q;
struct elevator_tags *et;
gfp_t gfp = GFP_NOIO | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;

lockdep_assert_held_write(&set->update_nr_hwq_lock);

list_for_each_entry(q, &set->tag_list, tag_set_list) {
/*
* Accessing q->elevator without holding q->elevator_lock is
* safe because we're holding here set->update_nr_hwq_lock in
* the writer context. So, scheduler update/switch code (which
* acquires the same lock but in the reader context) can't run
* concurrently.
*/
if (q->elevator) {
et = blk_mq_alloc_sched_tags(set, nr_hw_queues);
if (!et)
goto out_unwind;
if (xa_insert(et_table, q->id, et, gfp))
goto out_free_tags;
}
}
return 0;
out_free_tags:
blk_mq_free_sched_tags(et, set);
out_unwind:
list_for_each_entry_continue_reverse(q, &set->tag_list, tag_set_list) {
if (q->elevator) {
et = xa_load(et_table, q->id);
if (et)
blk_mq_free_sched_tags(et, set);
}
}
return -ENOMEM;
}

/* caller must have a reference to @e, will grab another one if successful */
int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e,
struct elevator_tags *et)
{
unsigned int flags = q->tag_set->flags;
struct blk_mq_hw_ctx *hctx;
struct elevator_queue *eq;
unsigned long i;
int ret;

/*
* Default to double of smaller one between hw queue_depth and 128,
* since we don't split into sync/async like the old code did.
* Additionally, this is a per-hw queue depth.
*/
q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth,
BLKDEV_DEFAULT_RQ);
eq = elevator_alloc(q, e, et);
if (!eq)
return -ENOMEM;

q->nr_requests = et->nr_requests;

if (blk_mq_is_shared_tags(flags)) {
ret = blk_mq_init_sched_shared_tags(q);
if (ret)
return ret;
/* Shared tags are stored at index 0 in @et->tags. */
q->sched_shared_tags = et->tags[0];
blk_mq_tag_update_sched_shared_tags(q);
}

queue_for_each_hw_ctx(q, hctx, i) {
ret = blk_mq_sched_alloc_map_and_rqs(q, hctx, i);
if (ret)
goto err_free_map_and_rqs;
if (blk_mq_is_shared_tags(flags))
hctx->sched_tags = q->sched_shared_tags;
else
hctx->sched_tags = et->tags[i];
}

ret = e->ops.init_sched(q, e);
ret = e->ops.init_sched(q, eq);
if (ret)
goto err_free_map_and_rqs;
goto out;

queue_for_each_hw_ctx(q, hctx, i) {
if (e->ops.init_hctx) {
ret = e->ops.init_hctx(hctx, i);
if (ret) {
eq = q->elevator;
blk_mq_sched_free_rqs(q);
blk_mq_exit_sched(q, eq);
kobject_put(&eq->kobj);
return ret;
Expand All @@ -505,10 +587,9 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
}
return 0;

err_free_map_and_rqs:
blk_mq_sched_free_rqs(q);
out:
blk_mq_sched_tags_teardown(q, flags);

kobject_put(&eq->kobj);
q->elevator = NULL;
return ret;
}
Expand Down
12 changes: 11 additions & 1 deletion block/blk-mq-sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,20 @@ void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);

void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);

int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e);
int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e,
struct elevator_tags *et);
void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
void blk_mq_sched_free_rqs(struct request_queue *q);

struct elevator_tags *blk_mq_alloc_sched_tags(struct blk_mq_tag_set *set,
unsigned int nr_hw_queues);
int blk_mq_alloc_sched_tags_batch(struct xarray *et_table,
struct blk_mq_tag_set *set, unsigned int nr_hw_queues);
void blk_mq_free_sched_tags(struct elevator_tags *et,
struct blk_mq_tag_set *set);
void blk_mq_free_sched_tags_batch(struct xarray *et_table,
struct blk_mq_tag_set *set);

static inline void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
{
if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
Expand Down
16 changes: 11 additions & 5 deletions block/blk-mq.c
Original file line number Diff line number Diff line change
Expand Up @@ -4974,12 +4974,13 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
* Switch back to the elevator type stored in the xarray.
*/
static void blk_mq_elv_switch_back(struct request_queue *q,
struct xarray *elv_tbl)
struct xarray *elv_tbl, struct xarray *et_tbl)
{
struct elevator_type *e = xa_load(elv_tbl, q->id);
struct elevator_tags *t = xa_load(et_tbl, q->id);

/* The elv_update_nr_hw_queues unfreezes the queue. */
elv_update_nr_hw_queues(q, e);
elv_update_nr_hw_queues(q, e, t);

/* Drop the reference acquired in blk_mq_elv_switch_none. */
if (e)
Expand Down Expand Up @@ -5031,7 +5032,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
int prev_nr_hw_queues = set->nr_hw_queues;
unsigned int memflags;
int i;
struct xarray elv_tbl;
struct xarray elv_tbl, et_tbl;

lockdep_assert_held(&set->tag_list_lock);

Expand All @@ -5044,6 +5045,10 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,

memflags = memalloc_noio_save();

xa_init(&et_tbl);
if (blk_mq_alloc_sched_tags_batch(&et_tbl, set, nr_hw_queues) < 0)
goto out_memalloc_restore;

xa_init(&elv_tbl);

list_for_each_entry(q, &set->tag_list, tag_set_list) {
Expand Down Expand Up @@ -5087,7 +5092,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
switch_back:
/* The blk_mq_elv_switch_back unfreezes queue for us. */
list_for_each_entry(q, &set->tag_list, tag_set_list)
blk_mq_elv_switch_back(q, &elv_tbl);
blk_mq_elv_switch_back(q, &elv_tbl, &et_tbl);

list_for_each_entry(q, &set->tag_list, tag_set_list) {
blk_mq_sysfs_register_hctxs(q);
Expand All @@ -5098,7 +5103,8 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
}

xa_destroy(&elv_tbl);

xa_destroy(&et_tbl);
out_memalloc_restore:
memalloc_noio_restore(memflags);

/* Free the excess tags when nr_hw_queues shrink. */
Expand Down
Loading
Loading