Skip to content

Commit 81f19c2

Browse files
YuKuai-huaweikawasaki
authored andcommitted
block, bfq: switch to use elevator lock
Replace the internal spinlock bfqd->lock with the new spinlock in elevator_queue. There are no functional changes. Signed-off-by: Yu Kuai <[email protected]> Reviewed-by: Damien Le Moal <[email protected]>
1 parent 3d61e4e commit 81f19c2

3 files changed

Lines changed: 28 additions & 28 deletions

File tree

block/bfq-cgroup.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -878,7 +878,7 @@ static void bfq_pd_offline(struct blkg_policy_data *pd)
878878
unsigned long flags;
879879
int i;
880880

881-
spin_lock_irqsave(&bfqd->lock, flags);
881+
spin_lock_irqsave(bfqd->lock, flags);
882882

883883
if (!entity) /* root group */
884884
goto put_async_queues;
@@ -923,7 +923,7 @@ static void bfq_pd_offline(struct blkg_policy_data *pd)
923923
put_async_queues:
924924
bfq_put_async_queues(bfqd, bfqg);
925925

926-
spin_unlock_irqrestore(&bfqd->lock, flags);
926+
spin_unlock_irqrestore(bfqd->lock, flags);
927927
/*
928928
* @blkg is going offline and will be ignored by
929929
* blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so

block/bfq-iosched.c

Lines changed: 25 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -469,7 +469,7 @@ static struct bfq_io_cq *bfq_bic_lookup(struct request_queue *q)
469469
*/
470470
void bfq_schedule_dispatch(struct bfq_data *bfqd)
471471
{
472-
lockdep_assert_held(&bfqd->lock);
472+
lockdep_assert_held(bfqd->lock);
473473

474474
if (bfqd->queued != 0) {
475475
bfq_log(bfqd, "schedule dispatch");
@@ -594,7 +594,7 @@ static bool bfqq_request_over_limit(struct bfq_data *bfqd,
594594
int level;
595595

596596
retry:
597-
spin_lock_irq(&bfqd->lock);
597+
spin_lock_irq(bfqd->lock);
598598
bfqq = bic_to_bfqq(bic, op_is_sync(opf), act_idx);
599599
if (!bfqq)
600600
goto out;
@@ -606,7 +606,7 @@ static bool bfqq_request_over_limit(struct bfq_data *bfqd,
606606
/* +1 for bfqq entity, root cgroup not included */
607607
depth = bfqg_to_blkg(bfqq_group(bfqq))->blkcg->css.cgroup->level + 1;
608608
if (depth > alloc_depth) {
609-
spin_unlock_irq(&bfqd->lock);
609+
spin_unlock_irq(bfqd->lock);
610610
if (entities != inline_entities)
611611
kfree(entities);
612612
entities = kmalloc_array(depth, sizeof(*entities), GFP_NOIO);
@@ -664,7 +664,7 @@ static bool bfqq_request_over_limit(struct bfq_data *bfqd,
664664
}
665665
}
666666
out:
667-
spin_unlock_irq(&bfqd->lock);
667+
spin_unlock_irq(bfqd->lock);
668668
if (entities != inline_entities)
669669
kfree(entities);
670670
return ret;
@@ -2458,7 +2458,7 @@ static bool bfq_bio_merge(struct request_queue *q, struct bio *bio,
24582458
struct request *free = NULL;
24592459
bool ret;
24602460

2461-
spin_lock_irq(&bfqd->lock);
2461+
spin_lock_irq(bfqd->lock);
24622462

24632463
if (bic) {
24642464
/*
@@ -2476,7 +2476,7 @@ static bool bfq_bio_merge(struct request_queue *q, struct bio *bio,
24762476

24772477
ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
24782478

2479-
spin_unlock_irq(&bfqd->lock);
2479+
spin_unlock_irq(bfqd->lock);
24802480
if (free)
24812481
blk_mq_free_request(free);
24822482

@@ -2651,7 +2651,7 @@ static void bfq_end_wr(struct bfq_data *bfqd)
26512651
struct bfq_queue *bfqq;
26522652
int i;
26532653

2654-
spin_lock_irq(&bfqd->lock);
2654+
spin_lock_irq(bfqd->lock);
26552655

26562656
for (i = 0; i < bfqd->num_actuators; i++) {
26572657
list_for_each_entry(bfqq, &bfqd->active_list[i], bfqq_list)
@@ -2661,7 +2661,7 @@ static void bfq_end_wr(struct bfq_data *bfqd)
26612661
bfq_bfqq_end_wr(bfqq);
26622662
bfq_end_wr_async(bfqd);
26632663

2664-
spin_unlock_irq(&bfqd->lock);
2664+
spin_unlock_irq(bfqd->lock);
26652665
}
26662666

26672667
static sector_t bfq_io_struct_pos(void *io_struct, bool request)
@@ -5307,7 +5307,7 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
53075307
struct bfq_queue *in_serv_queue;
53085308
bool waiting_rq, idle_timer_disabled = false;
53095309

5310-
spin_lock_irq(&bfqd->lock);
5310+
spin_lock_irq(bfqd->lock);
53115311

53125312
in_serv_queue = bfqd->in_service_queue;
53135313
waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue);
@@ -5318,7 +5318,7 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
53185318
waiting_rq && !bfq_bfqq_wait_request(in_serv_queue);
53195319
}
53205320

5321-
spin_unlock_irq(&bfqd->lock);
5321+
spin_unlock_irq(bfqd->lock);
53225322
bfq_update_dispatch_stats(hctx->queue, rq,
53235323
idle_timer_disabled ? in_serv_queue : NULL,
53245324
idle_timer_disabled);
@@ -5496,9 +5496,9 @@ static void bfq_exit_icq(struct io_cq *icq)
54965496
* this is the last time these queues are accessed.
54975497
*/
54985498
if (bfqd) {
5499-
spin_lock_irqsave(&bfqd->lock, flags);
5499+
spin_lock_irqsave(bfqd->lock, flags);
55005500
_bfq_exit_icq(bic, bfqd->num_actuators);
5501-
spin_unlock_irqrestore(&bfqd->lock, flags);
5501+
spin_unlock_irqrestore(bfqd->lock, flags);
55025502
} else {
55035503
_bfq_exit_icq(bic, BFQ_MAX_ACTUATORS);
55045504
}
@@ -6254,10 +6254,10 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
62546254
if (!cgroup_subsys_on_dfl(io_cgrp_subsys) && rq->bio)
62556255
bfqg_stats_update_legacy_io(q, rq);
62566256
#endif
6257-
spin_lock_irq(&bfqd->lock);
6257+
spin_lock_irq(bfqd->lock);
62586258
bfqq = bfq_init_rq(rq);
62596259
if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
6260-
spin_unlock_irq(&bfqd->lock);
6260+
spin_unlock_irq(bfqd->lock);
62616261
blk_mq_free_requests(&free);
62626262
return;
62636263
}
@@ -6290,7 +6290,7 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
62906290
* merge).
62916291
*/
62926292
cmd_flags = rq->cmd_flags;
6293-
spin_unlock_irq(&bfqd->lock);
6293+
spin_unlock_irq(bfqd->lock);
62946294

62956295
bfq_update_insert_stats(q, bfqq, idle_timer_disabled,
62966296
cmd_flags);
@@ -6671,7 +6671,7 @@ static void bfq_finish_requeue_request(struct request *rq)
66716671
rq->io_start_time_ns,
66726672
rq->cmd_flags);
66736673

6674-
spin_lock_irqsave(&bfqd->lock, flags);
6674+
spin_lock_irqsave(bfqd->lock, flags);
66756675
if (likely(rq->rq_flags & RQF_STARTED)) {
66766676
if (rq == bfqd->waited_rq)
66776677
bfq_update_inject_limit(bfqd, bfqq);
@@ -6681,7 +6681,7 @@ static void bfq_finish_requeue_request(struct request *rq)
66816681
bfqq_request_freed(bfqq);
66826682
bfq_put_queue(bfqq);
66836683
RQ_BIC(rq)->requests--;
6684-
spin_unlock_irqrestore(&bfqd->lock, flags);
6684+
spin_unlock_irqrestore(bfqd->lock, flags);
66856685

66866686
/*
66876687
* Reset private fields. In case of a requeue, this allows
@@ -7012,7 +7012,7 @@ bfq_idle_slice_timer_body(struct bfq_data *bfqd, struct bfq_queue *bfqq)
70127012
enum bfqq_expiration reason;
70137013
unsigned long flags;
70147014

7015-
spin_lock_irqsave(&bfqd->lock, flags);
7015+
spin_lock_irqsave(bfqd->lock, flags);
70167016

70177017
/*
70187018
* Considering that bfqq may be in race, we should firstly check
@@ -7022,7 +7022,7 @@ bfq_idle_slice_timer_body(struct bfq_data *bfqd, struct bfq_queue *bfqq)
70227022
* been cleared in __bfq_bfqd_reset_in_service func.
70237023
*/
70247024
if (bfqq != bfqd->in_service_queue) {
7025-
spin_unlock_irqrestore(&bfqd->lock, flags);
7025+
spin_unlock_irqrestore(bfqd->lock, flags);
70267026
return;
70277027
}
70287028

@@ -7050,7 +7050,7 @@ bfq_idle_slice_timer_body(struct bfq_data *bfqd, struct bfq_queue *bfqq)
70507050

70517051
schedule_dispatch:
70527052
bfq_schedule_dispatch(bfqd);
7053-
spin_unlock_irqrestore(&bfqd->lock, flags);
7053+
spin_unlock_irqrestore(bfqd->lock, flags);
70547054
}
70557055

70567056
/*
@@ -7176,10 +7176,10 @@ static void bfq_exit_queue(struct elevator_queue *e)
71767176

71777177
hrtimer_cancel(&bfqd->idle_slice_timer);
71787178

7179-
spin_lock_irq(&bfqd->lock);
7179+
spin_lock_irq(bfqd->lock);
71807180
list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list)
71817181
bfq_deactivate_bfqq(bfqd, bfqq, false, false);
7182-
spin_unlock_irq(&bfqd->lock);
7182+
spin_unlock_irq(bfqd->lock);
71837183

71847184
for (actuator = 0; actuator < bfqd->num_actuators; actuator++)
71857185
WARN_ON_ONCE(bfqd->rq_in_driver[actuator]);
@@ -7193,10 +7193,10 @@ static void bfq_exit_queue(struct elevator_queue *e)
71937193
#ifdef CONFIG_BFQ_GROUP_IOSCHED
71947194
blkcg_deactivate_policy(bfqd->queue->disk, &blkcg_policy_bfq);
71957195
#else
7196-
spin_lock_irq(&bfqd->lock);
7196+
spin_lock_irq(bfqd->lock);
71977197
bfq_put_async_queues(bfqd, bfqd->root_group);
71987198
kfree(bfqd->root_group);
7199-
spin_unlock_irq(&bfqd->lock);
7199+
spin_unlock_irq(bfqd->lock);
72007200
#endif
72017201

72027202
blk_stat_disable_accounting(bfqd->queue);
@@ -7361,7 +7361,7 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
73617361
/* see comments on the definition of next field inside bfq_data */
73627362
bfqd->actuator_load_threshold = 4;
73637363

7364-
spin_lock_init(&bfqd->lock);
7364+
bfqd->lock = &eq->lock;
73657365

73667366
/*
73677367
* The invocation of the next bfq_create_group_hierarchy

block/bfq-iosched.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -795,7 +795,7 @@ struct bfq_data {
795795
/* fallback dummy bfqq for extreme OOM conditions */
796796
struct bfq_queue oom_bfqq;
797797

798-
spinlock_t lock;
798+
spinlock_t *lock;
799799

800800
/*
801801
* bic associated with the task issuing current bio for

0 commit comments

Comments
 (0)