Skip to content

Commit 669aa0a

Browse files
YuKuai-huaweikawasaki
authored andcommitted
mq-deadline: switch to use high layer elevator lock
Introduce a new spinlock in elevator_queue, and switch dd->lock to use the new lock. There are no functional changes. Signed-off-by: Yu Kuai <[email protected]>
1 parent 81f31a4 commit 669aa0a

3 files changed

Lines changed: 31 additions & 31 deletions

File tree

block/elevator.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -144,6 +144,7 @@ struct elevator_queue *elevator_alloc(struct request_queue *q,
144144
eq->type = e;
145145
kobject_init(&eq->kobj, &elv_ktype);
146146
mutex_init(&eq->sysfs_lock);
147+
spin_lock_init(&eq->lock);
147148
hash_init(eq->hash);
148149

149150
return eq;

block/elevator.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -110,12 +110,12 @@ struct request *elv_rqhash_find(struct request_queue *q, sector_t offset);
110110
/*
111111
* each queue has an elevator_queue associated with it
112112
*/
113-
struct elevator_queue
114-
{
113+
struct elevator_queue {
115114
struct elevator_type *type;
116115
void *elevator_data;
117116
struct kobject kobj;
118117
struct mutex sysfs_lock;
118+
spinlock_t lock;
119119
unsigned long flags;
120120
DECLARE_HASHTABLE(hash, ELV_HASH_BITS);
121121
};

block/mq-deadline.c

Lines changed: 28 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -101,7 +101,7 @@ struct deadline_data {
101101
u32 async_depth;
102102
int prio_aging_expire;
103103

104-
spinlock_t lock;
104+
spinlock_t *lock;
105105
};
106106

107107
/* Maps an I/O priority class to a deadline scheduler priority. */
@@ -213,7 +213,7 @@ static void dd_merged_requests(struct request_queue *q, struct request *req,
213213
const u8 ioprio_class = dd_rq_ioclass(next);
214214
const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
215215

216-
lockdep_assert_held(&dd->lock);
216+
lockdep_assert_held(dd->lock);
217217

218218
dd->per_prio[prio].stats.merged++;
219219

@@ -253,7 +253,7 @@ static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio)
253253
{
254254
const struct io_stats_per_prio *stats = &dd->per_prio[prio].stats;
255255

256-
lockdep_assert_held(&dd->lock);
256+
lockdep_assert_held(dd->lock);
257257

258258
return stats->inserted - atomic_read(&stats->completed);
259259
}
@@ -323,7 +323,7 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd,
323323
enum dd_prio prio;
324324
u8 ioprio_class;
325325

326-
lockdep_assert_held(&dd->lock);
326+
lockdep_assert_held(dd->lock);
327327

328328
if (!list_empty(&per_prio->dispatch)) {
329329
rq = list_first_entry(&per_prio->dispatch, struct request,
@@ -434,7 +434,7 @@ static struct request *dd_dispatch_prio_aged_requests(struct deadline_data *dd,
434434
enum dd_prio prio;
435435
int prio_cnt;
436436

437-
lockdep_assert_held(&dd->lock);
437+
lockdep_assert_held(dd->lock);
438438

439439
prio_cnt = !!dd_queued(dd, DD_RT_PRIO) + !!dd_queued(dd, DD_BE_PRIO) +
440440
!!dd_queued(dd, DD_IDLE_PRIO);
@@ -466,7 +466,7 @@ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
466466
struct request *rq;
467467
enum dd_prio prio;
468468

469-
spin_lock(&dd->lock);
469+
spin_lock(dd->lock);
470470
rq = dd_dispatch_prio_aged_requests(dd, now);
471471
if (rq)
472472
goto unlock;
@@ -482,8 +482,7 @@ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
482482
}
483483

484484
unlock:
485-
spin_unlock(&dd->lock);
486-
485+
spin_unlock(dd->lock);
487486
return rq;
488487
}
489488

@@ -552,9 +551,9 @@ static void dd_exit_sched(struct elevator_queue *e)
552551
WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_READ]));
553552
WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE]));
554553

555-
spin_lock(&dd->lock);
554+
spin_lock(dd->lock);
556555
queued = dd_queued(dd, prio);
557-
spin_unlock(&dd->lock);
556+
spin_unlock(dd->lock);
558557

559558
WARN_ONCE(queued != 0,
560559
"statistics for priority %d: i %u m %u d %u c %u\n",
@@ -601,7 +600,7 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
601600
dd->last_dir = DD_WRITE;
602601
dd->fifo_batch = fifo_batch;
603602
dd->prio_aging_expire = prio_aging_expire;
604-
spin_lock_init(&dd->lock);
603+
dd->lock = &eq->lock;
605604

606605
/* We dispatch from request queue wide instead of hw queue */
607606
blk_queue_flag_set(QUEUE_FLAG_SQ_SCHED, q);
@@ -657,9 +656,9 @@ static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
657656
struct request *free = NULL;
658657
bool ret;
659658

660-
spin_lock(&dd->lock);
659+
spin_lock(dd->lock);
661660
ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
662-
spin_unlock(&dd->lock);
661+
spin_unlock(dd->lock);
663662

664663
if (free)
665664
blk_mq_free_request(free);
@@ -681,7 +680,7 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
681680
struct dd_per_prio *per_prio;
682681
enum dd_prio prio;
683682

684-
lockdep_assert_held(&dd->lock);
683+
lockdep_assert_held(dd->lock);
685684

686685
prio = ioprio_class_to_prio[ioprio_class];
687686
per_prio = &dd->per_prio[prio];
@@ -725,15 +724,15 @@ static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
725724
struct deadline_data *dd = q->elevator->elevator_data;
726725
LIST_HEAD(free);
727726

728-
spin_lock(&dd->lock);
727+
spin_lock(dd->lock);
729728
while (!list_empty(list)) {
730729
struct request *rq;
731730

732731
rq = list_first_entry(list, struct request, queuelist);
733732
list_del_init(&rq->queuelist);
734733
dd_insert_request(hctx, rq, flags, &free);
735734
}
736-
spin_unlock(&dd->lock);
735+
spin_unlock(dd->lock);
737736

738737
blk_mq_free_requests(&free);
739738
}
@@ -849,13 +848,13 @@ static const struct elv_fs_entry deadline_attrs[] = {
849848
#define DEADLINE_DEBUGFS_DDIR_ATTRS(prio, data_dir, name) \
850849
static void *deadline_##name##_fifo_start(struct seq_file *m, \
851850
loff_t *pos) \
852-
__acquires(&dd->lock) \
851+
__acquires(dd->lock) \
853852
{ \
854853
struct request_queue *q = m->private; \
855854
struct deadline_data *dd = q->elevator->elevator_data; \
856855
struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
857856
\
858-
spin_lock(&dd->lock); \
857+
spin_lock(dd->lock); \
859858
return seq_list_start(&per_prio->fifo_list[data_dir], *pos); \
860859
} \
861860
\
@@ -870,12 +869,12 @@ static void *deadline_##name##_fifo_next(struct seq_file *m, void *v, \
870869
} \
871870
\
872871
static void deadline_##name##_fifo_stop(struct seq_file *m, void *v) \
873-
__releases(&dd->lock) \
872+
__releases(dd->lock) \
874873
{ \
875874
struct request_queue *q = m->private; \
876875
struct deadline_data *dd = q->elevator->elevator_data; \
877876
\
878-
spin_unlock(&dd->lock); \
877+
spin_unlock(dd->lock); \
879878
} \
880879
\
881880
static const struct seq_operations deadline_##name##_fifo_seq_ops = { \
@@ -941,11 +940,11 @@ static int dd_queued_show(void *data, struct seq_file *m)
941940
struct deadline_data *dd = q->elevator->elevator_data;
942941
u32 rt, be, idle;
943942

944-
spin_lock(&dd->lock);
943+
spin_lock(dd->lock);
945944
rt = dd_queued(dd, DD_RT_PRIO);
946945
be = dd_queued(dd, DD_BE_PRIO);
947946
idle = dd_queued(dd, DD_IDLE_PRIO);
948-
spin_unlock(&dd->lock);
947+
spin_unlock(dd->lock);
949948

950949
seq_printf(m, "%u %u %u\n", rt, be, idle);
951950

@@ -957,7 +956,7 @@ static u32 dd_owned_by_driver(struct deadline_data *dd, enum dd_prio prio)
957956
{
958957
const struct io_stats_per_prio *stats = &dd->per_prio[prio].stats;
959958

960-
lockdep_assert_held(&dd->lock);
959+
lockdep_assert_held(dd->lock);
961960

962961
return stats->dispatched + stats->merged -
963962
atomic_read(&stats->completed);
@@ -969,11 +968,11 @@ static int dd_owned_by_driver_show(void *data, struct seq_file *m)
969968
struct deadline_data *dd = q->elevator->elevator_data;
970969
u32 rt, be, idle;
971970

972-
spin_lock(&dd->lock);
971+
spin_lock(dd->lock);
973972
rt = dd_owned_by_driver(dd, DD_RT_PRIO);
974973
be = dd_owned_by_driver(dd, DD_BE_PRIO);
975974
idle = dd_owned_by_driver(dd, DD_IDLE_PRIO);
976-
spin_unlock(&dd->lock);
975+
spin_unlock(dd->lock);
977976

978977
seq_printf(m, "%u %u %u\n", rt, be, idle);
979978

@@ -983,13 +982,13 @@ static int dd_owned_by_driver_show(void *data, struct seq_file *m)
983982
#define DEADLINE_DISPATCH_ATTR(prio) \
984983
static void *deadline_dispatch##prio##_start(struct seq_file *m, \
985984
loff_t *pos) \
986-
__acquires(&dd->lock) \
985+
__acquires(dd->lock) \
987986
{ \
988987
struct request_queue *q = m->private; \
989988
struct deadline_data *dd = q->elevator->elevator_data; \
990989
struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
991990
\
992-
spin_lock(&dd->lock); \
991+
spin_lock(dd->lock); \
993992
return seq_list_start(&per_prio->dispatch, *pos); \
994993
} \
995994
\
@@ -1004,12 +1003,12 @@ static void *deadline_dispatch##prio##_next(struct seq_file *m, \
10041003
} \
10051004
\
10061005
static void deadline_dispatch##prio##_stop(struct seq_file *m, void *v) \
1007-
__releases(&dd->lock) \
1006+
__releases(dd->lock) \
10081007
{ \
10091008
struct request_queue *q = m->private; \
10101009
struct deadline_data *dd = q->elevator->elevator_data; \
10111010
\
1012-
spin_unlock(&dd->lock); \
1011+
spin_unlock(dd->lock); \
10131012
} \
10141013
\
10151014
static const struct seq_operations deadline_dispatch##prio##_seq_ops = { \

0 commit comments

Comments
 (0)