Skip to content

Commit b12cfca

Browse files
Ming Leigregkh
authored andcommitted
block: always verify unfreeze lock on the owner task
commit 6a78699 upstream. commit f1be178 ("block: model freeze & enter queue as lock for supporting lockdep") tries to apply lockdep for verifying freeze & unfreeze. However, the verification is only done the outmost freeze and unfreeze. This way is actually not correct because q->mq_freeze_depth still may drop to zero on other task instead of the freeze owner task. Fix this issue by always verifying the last unfreeze lock on the owner task context, and make sure both the outmost freeze & unfreeze are verified in the current task. Fixes: f1be178 ("block: model freeze & enter queue as lock for supporting lockdep") Signed-off-by: Ming Lei <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jens Axboe <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
1 parent a14ab9f commit b12cfca

4 files changed

Lines changed: 61 additions & 10 deletions

File tree

block/blk-core.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -287,7 +287,7 @@ bool blk_queue_start_drain(struct request_queue *q)
287287
* entering queue, so we call blk_freeze_queue_start() to
288288
* prevent I/O from crossing blk_queue_enter().
289289
*/
290-
bool freeze = __blk_freeze_queue_start(q);
290+
bool freeze = __blk_freeze_queue_start(q, current);
291291
if (queue_is_mq(q))
292292
blk_mq_wake_waiters(q);
293293
/* Make blk_queue_enter() reexamine the DYING flag. */

block/blk-mq.c

Lines changed: 54 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -120,28 +120,74 @@ void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
120120
inflight[1] = mi.inflight[1];
121121
}
122122

123-
bool __blk_freeze_queue_start(struct request_queue *q)
123+
#ifdef CONFIG_LOCKDEP
124+
static bool blk_freeze_set_owner(struct request_queue *q,
125+
struct task_struct *owner)
124126
{
125-
int freeze;
127+
if (!owner)
128+
return false;
129+
130+
if (!q->mq_freeze_depth) {
131+
q->mq_freeze_owner = owner;
132+
q->mq_freeze_owner_depth = 1;
133+
return true;
134+
}
135+
136+
if (owner == q->mq_freeze_owner)
137+
q->mq_freeze_owner_depth += 1;
138+
return false;
139+
}
140+
141+
/* verify the last unfreeze in owner context */
142+
static bool blk_unfreeze_check_owner(struct request_queue *q)
143+
{
144+
if (!q->mq_freeze_owner)
145+
return false;
146+
if (q->mq_freeze_owner != current)
147+
return false;
148+
if (--q->mq_freeze_owner_depth == 0) {
149+
q->mq_freeze_owner = NULL;
150+
return true;
151+
}
152+
return false;
153+
}
154+
155+
#else
156+
157+
static bool blk_freeze_set_owner(struct request_queue *q,
158+
struct task_struct *owner)
159+
{
160+
return false;
161+
}
162+
163+
static bool blk_unfreeze_check_owner(struct request_queue *q)
164+
{
165+
return false;
166+
}
167+
#endif
168+
169+
bool __blk_freeze_queue_start(struct request_queue *q,
170+
struct task_struct *owner)
171+
{
172+
bool freeze;
126173

127174
mutex_lock(&q->mq_freeze_lock);
175+
freeze = blk_freeze_set_owner(q, owner);
128176
if (++q->mq_freeze_depth == 1) {
129177
percpu_ref_kill(&q->q_usage_counter);
130178
mutex_unlock(&q->mq_freeze_lock);
131179
if (queue_is_mq(q))
132180
blk_mq_run_hw_queues(q, false);
133-
freeze = true;
134181
} else {
135182
mutex_unlock(&q->mq_freeze_lock);
136-
freeze = false;
137183
}
138184

139185
return freeze;
140186
}
141187

142188
void blk_freeze_queue_start(struct request_queue *q)
143189
{
144-
if (__blk_freeze_queue_start(q))
190+
if (__blk_freeze_queue_start(q, current))
145191
blk_freeze_acquire_lock(q, false, false);
146192
}
147193
EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
@@ -190,7 +236,7 @@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
190236

191237
bool __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
192238
{
193-
int unfreeze = false;
239+
bool unfreeze;
194240

195241
mutex_lock(&q->mq_freeze_lock);
196242
if (force_atomic)
@@ -200,8 +246,8 @@ bool __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
200246
if (!q->mq_freeze_depth) {
201247
percpu_ref_resurrect(&q->q_usage_counter);
202248
wake_up_all(&q->mq_freeze_wq);
203-
unfreeze = true;
204249
}
250+
unfreeze = blk_unfreeze_check_owner(q);
205251
mutex_unlock(&q->mq_freeze_lock);
206252

207253
return unfreeze;
@@ -223,7 +269,7 @@ EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
223269
*/
224270
void blk_freeze_queue_start_non_owner(struct request_queue *q)
225271
{
226-
__blk_freeze_queue_start(q);
272+
__blk_freeze_queue_start(q, NULL);
227273
}
228274
EXPORT_SYMBOL_GPL(blk_freeze_queue_start_non_owner);
229275

block/blk.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,8 @@ void blk_free_flush_queue(struct blk_flush_queue *q);
3838
void blk_freeze_queue(struct request_queue *q);
3939
bool __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
4040
bool blk_queue_start_drain(struct request_queue *q);
41-
bool __blk_freeze_queue_start(struct request_queue *q);
41+
bool __blk_freeze_queue_start(struct request_queue *q,
42+
struct task_struct *owner);
4243
int __bio_queue_enter(struct request_queue *q, struct bio *bio);
4344
void submit_bio_noacct_nocheck(struct bio *bio);
4445
void bio_await_chain(struct bio *bio);

include/linux/blkdev.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -572,6 +572,10 @@ struct request_queue {
572572
struct throtl_data *td;
573573
#endif
574574
struct rcu_head rcu_head;
575+
#ifdef CONFIG_LOCKDEP
576+
struct task_struct *mq_freeze_owner;
577+
int mq_freeze_owner_depth;
578+
#endif
575579
wait_queue_head_t mq_freeze_wq;
576580
/*
577581
* Protect concurrent access to q_usage_counter by

0 commit comments

Comments
 (0)