Skip to content

Commit 4f92d05

Browse files
committed
Merge branch 'block-6.17' into for-next
* block-6.17: blk-ioc: don't hold queue_lock for ioc_lookup_icq() block: Enforce power-of-2 physical block size block: avoid possible overflow for chunk_sectors check in blk_stack_limits() block: Improve read ahead size for rotational devices
2 parents 86aa721 + 5421681 commit 4f92d05

3 files changed

Lines changed: 24 additions & 30 deletions

File tree

block/bfq-iosched.c

Lines changed: 2 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -454,17 +454,10 @@ static struct bfq_io_cq *icq_to_bic(struct io_cq *icq)
454454
*/
455455
static struct bfq_io_cq *bfq_bic_lookup(struct request_queue *q)
456456
{
457-
struct bfq_io_cq *icq;
458-
unsigned long flags;
459-
460457
if (!current->io_context)
461458
return NULL;
462459

463-
spin_lock_irqsave(&q->queue_lock, flags);
464-
icq = icq_to_bic(ioc_lookup_icq(q));
465-
spin_unlock_irqrestore(&q->queue_lock, flags);
466-
467-
return icq;
460+
return icq_to_bic(ioc_lookup_icq(q));
468461
}
469462

470463
/*
@@ -2457,15 +2450,8 @@ static bool bfq_bio_merge(struct request_queue *q, struct bio *bio,
24572450
unsigned int nr_segs)
24582451
{
24592452
struct bfq_data *bfqd = q->elevator->elevator_data;
2460-
struct request *free = NULL;
2461-
/*
2462-
* bfq_bic_lookup grabs the queue_lock: invoke it now and
2463-
* store its return value for later use, to avoid nesting
2464-
* queue_lock inside the bfqd->lock. We assume that the bic
2465-
* returned by bfq_bic_lookup does not go away before
2466-
* bfqd->lock is taken.
2467-
*/
24682453
struct bfq_io_cq *bic = bfq_bic_lookup(q);
2454+
struct request *free = NULL;
24692455
bool ret;
24702456

24712457
spin_lock_irq(&bfqd->lock);

block/blk-ioc.c

Lines changed: 6 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -308,24 +308,23 @@ int __copy_io(unsigned long clone_flags, struct task_struct *tsk)
308308

309309
#ifdef CONFIG_BLK_ICQ
310310
/**
311-
* ioc_lookup_icq - lookup io_cq from ioc
311+
* ioc_lookup_icq - lookup io_cq from ioc in io issue path
312312
* @q: the associated request_queue
313313
*
314314
* Look up io_cq associated with @ioc - @q pair from @ioc. Must be called
315-
* with @q->queue_lock held.
315+
* from io issue path, either return NULL if current issue io to @q for the
316+
* first time, or return a valid icq.
316317
*/
317318
struct io_cq *ioc_lookup_icq(struct request_queue *q)
318319
{
319320
struct io_context *ioc = current->io_context;
320321
struct io_cq *icq;
321322

322-
lockdep_assert_held(&q->queue_lock);
323-
324323
/*
325324
* icq's are indexed from @ioc using radix tree and hint pointer,
326-
* both of which are protected with RCU. All removals are done
327-
* holding both q and ioc locks, and we're holding q lock - if we
328-
* find a icq which points to us, it's guaranteed to be valid.
325+
* both of which are protected with RCU, io issue path ensures that
326+
* both request_queue and current task are valid, the found icq
327+
* is guaranteed to be valid until the io is done.
329328
*/
330329
rcu_read_lock();
331330
icq = rcu_dereference(ioc->icq_hint);
@@ -419,10 +418,7 @@ struct io_cq *ioc_find_get_icq(struct request_queue *q)
419418
task_unlock(current);
420419
} else {
421420
get_io_context(ioc);
422-
423-
spin_lock_irq(&q->queue_lock);
424421
icq = ioc_lookup_icq(q);
425-
spin_unlock_irq(&q->queue_lock);
426422
}
427423

428424
if (!icq) {

block/blk-settings.c

Lines changed: 16 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -62,16 +62,24 @@ EXPORT_SYMBOL(blk_set_stacking_limits);
6262
void blk_apply_bdi_limits(struct backing_dev_info *bdi,
6363
struct queue_limits *lim)
6464
{
65+
u64 io_opt = lim->io_opt;
66+
6567
/*
6668
* For read-ahead of large files to be effective, we need to read ahead
67-
* at least twice the optimal I/O size.
69+
* at least twice the optimal I/O size. For rotational devices that do
70+
* not report an optimal I/O size (e.g. ATA HDDs), use the maximum I/O
71+
* size to avoid falling back to the (rather inefficient) small default
72+
* read-ahead size.
6873
*
6974
* There is no hardware limitation for the read-ahead size and the user
7075
* might have increased the read-ahead size through sysfs, so don't ever
7176
* decrease it.
7277
*/
78+
if (!io_opt && (lim->features & BLK_FEAT_ROTATIONAL))
79+
io_opt = (u64)lim->max_sectors << SECTOR_SHIFT;
80+
7381
bdi->ra_pages = max3(bdi->ra_pages,
74-
lim->io_opt * 2 / PAGE_SIZE,
82+
io_opt * 2 >> PAGE_SHIFT,
7583
VM_READAHEAD_PAGES);
7684
bdi->io_pages = lim->max_sectors >> PAGE_SECTORS_SHIFT;
7785
}
@@ -312,8 +320,12 @@ int blk_validate_limits(struct queue_limits *lim)
312320
pr_warn("Invalid logical block size (%d)\n", lim->logical_block_size);
313321
return -EINVAL;
314322
}
315-
if (lim->physical_block_size < lim->logical_block_size)
323+
if (lim->physical_block_size < lim->logical_block_size) {
316324
lim->physical_block_size = lim->logical_block_size;
325+
} else if (!is_power_of_2(lim->physical_block_size)) {
326+
pr_warn("Invalid physical block size (%d)\n", lim->physical_block_size);
327+
return -EINVAL;
328+
}
317329

318330
/*
319331
* The minimum I/O size defaults to the physical block size unless
@@ -849,7 +861,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
849861
}
850862

851863
/* chunk_sectors a multiple of the physical block size? */
852-
if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) {
864+
if (t->chunk_sectors % (t->physical_block_size >> SECTOR_SHIFT)) {
853865
t->chunk_sectors = 0;
854866
t->flags |= BLK_FLAG_MISALIGNED;
855867
ret = -1;

0 commit comments

Comments
 (0)