Skip to content

Commit 221dbe5

Browse files
committed
Merge branch 'block-7.1' into for-next
* block-7.1: zloop: remove irq-safe locking zloop: factor out zloop_mark_{full,empty} helpers zloop: set RQF_QUIET when completing requests on deleted devices zloop: improve the unaligned write pointer warning zloop: use vfs_truncate zloop: fix write pointer calculation in zloop_forget_cache
2 parents 05f08b4 + 64b437c commit 221dbe5

1 file changed

Lines changed: 59 additions & 64 deletions

File tree

drivers/block/zloop.c

Lines changed: 59 additions & 64 deletions
Original file line numberDiff line numberDiff line change
@@ -288,12 +288,29 @@ static bool zloop_do_open_zone(struct zloop_device *zlo,
288288
}
289289
}
290290

291+
static void zloop_mark_full(struct zloop_device *zlo, struct zloop_zone *zone)
292+
{
293+
lockdep_assert_held(&zone->wp_lock);
294+
295+
zloop_lru_remove_open_zone(zlo, zone);
296+
zone->cond = BLK_ZONE_COND_FULL;
297+
zone->wp = ULLONG_MAX;
298+
}
299+
300+
static void zloop_mark_empty(struct zloop_device *zlo, struct zloop_zone *zone)
301+
{
302+
lockdep_assert_held(&zone->wp_lock);
303+
304+
zloop_lru_remove_open_zone(zlo, zone);
305+
zone->cond = BLK_ZONE_COND_EMPTY;
306+
zone->wp = zone->start;
307+
}
308+
291309
static int zloop_update_seq_zone(struct zloop_device *zlo, unsigned int zone_no)
292310
{
293311
struct zloop_zone *zone = &zlo->zones[zone_no];
294312
struct kstat stat;
295313
sector_t file_sectors;
296-
unsigned long flags;
297314
int ret;
298315

299316
lockdep_assert_held(&zone->lock);
@@ -313,28 +330,24 @@ static int zloop_update_seq_zone(struct zloop_device *zlo, unsigned int zone_no)
313330
return -EINVAL;
314331
}
315332

316-
if (file_sectors & ((zlo->block_size >> SECTOR_SHIFT) - 1)) {
317-
pr_err("Zone %u file size not aligned to block size %u\n",
318-
zone_no, zlo->block_size);
333+
if (!IS_ALIGNED(stat.size, zlo->block_size)) {
334+
pr_err("Zone %u file size (%llu) not aligned to block size %u\n",
335+
zone_no, stat.size, zlo->block_size);
319336
return -EINVAL;
320337
}
321338

322-
spin_lock_irqsave(&zone->wp_lock, flags);
339+
spin_lock(&zone->wp_lock);
323340
if (!file_sectors) {
324-
zloop_lru_remove_open_zone(zlo, zone);
325-
zone->cond = BLK_ZONE_COND_EMPTY;
326-
zone->wp = zone->start;
341+
zloop_mark_empty(zlo, zone);
327342
} else if (file_sectors == zlo->zone_capacity) {
328-
zloop_lru_remove_open_zone(zlo, zone);
329-
zone->cond = BLK_ZONE_COND_FULL;
330-
zone->wp = ULLONG_MAX;
343+
zloop_mark_full(zlo, zone);
331344
} else {
332345
if (zone->cond != BLK_ZONE_COND_IMP_OPEN &&
333346
zone->cond != BLK_ZONE_COND_EXP_OPEN)
334347
zone->cond = BLK_ZONE_COND_CLOSED;
335348
zone->wp = zone->start + file_sectors;
336349
}
337-
spin_unlock_irqrestore(&zone->wp_lock, flags);
350+
spin_unlock(&zone->wp_lock);
338351

339352
return 0;
340353
}
@@ -367,7 +380,6 @@ static int zloop_open_zone(struct zloop_device *zlo, unsigned int zone_no)
367380
static int zloop_close_zone(struct zloop_device *zlo, unsigned int zone_no)
368381
{
369382
struct zloop_zone *zone = &zlo->zones[zone_no];
370-
unsigned long flags;
371383
int ret = 0;
372384

373385
if (test_bit(ZLOOP_ZONE_CONV, &zone->flags))
@@ -386,13 +398,13 @@ static int zloop_close_zone(struct zloop_device *zlo, unsigned int zone_no)
386398
break;
387399
case BLK_ZONE_COND_IMP_OPEN:
388400
case BLK_ZONE_COND_EXP_OPEN:
389-
spin_lock_irqsave(&zone->wp_lock, flags);
401+
spin_lock(&zone->wp_lock);
390402
zloop_lru_remove_open_zone(zlo, zone);
391403
if (zone->wp == zone->start)
392404
zone->cond = BLK_ZONE_COND_EMPTY;
393405
else
394406
zone->cond = BLK_ZONE_COND_CLOSED;
395-
spin_unlock_irqrestore(&zone->wp_lock, flags);
407+
spin_unlock(&zone->wp_lock);
396408
break;
397409
case BLK_ZONE_COND_EMPTY:
398410
case BLK_ZONE_COND_FULL:
@@ -410,7 +422,6 @@ static int zloop_close_zone(struct zloop_device *zlo, unsigned int zone_no)
410422
static int zloop_reset_zone(struct zloop_device *zlo, unsigned int zone_no)
411423
{
412424
struct zloop_zone *zone = &zlo->zones[zone_no];
413-
unsigned long flags;
414425
int ret = 0;
415426

416427
if (test_bit(ZLOOP_ZONE_CONV, &zone->flags))
@@ -428,12 +439,10 @@ static int zloop_reset_zone(struct zloop_device *zlo, unsigned int zone_no)
428439
goto unlock;
429440
}
430441

431-
spin_lock_irqsave(&zone->wp_lock, flags);
432-
zloop_lru_remove_open_zone(zlo, zone);
433-
zone->cond = BLK_ZONE_COND_EMPTY;
434-
zone->wp = zone->start;
442+
spin_lock(&zone->wp_lock);
443+
zloop_mark_empty(zlo, zone);
435444
clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags);
436-
spin_unlock_irqrestore(&zone->wp_lock, flags);
445+
spin_unlock(&zone->wp_lock);
437446

438447
unlock:
439448
mutex_unlock(&zone->lock);
@@ -458,7 +467,6 @@ static int zloop_reset_all_zones(struct zloop_device *zlo)
458467
static int zloop_finish_zone(struct zloop_device *zlo, unsigned int zone_no)
459468
{
460469
struct zloop_zone *zone = &zlo->zones[zone_no];
461-
unsigned long flags;
462470
int ret = 0;
463471

464472
if (test_bit(ZLOOP_ZONE_CONV, &zone->flags))
@@ -476,12 +484,10 @@ static int zloop_finish_zone(struct zloop_device *zlo, unsigned int zone_no)
476484
goto unlock;
477485
}
478486

479-
spin_lock_irqsave(&zone->wp_lock, flags);
480-
zloop_lru_remove_open_zone(zlo, zone);
481-
zone->cond = BLK_ZONE_COND_FULL;
482-
zone->wp = ULLONG_MAX;
487+
spin_lock(&zone->wp_lock);
488+
zloop_mark_full(zlo, zone);
483489
clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags);
484-
spin_unlock_irqrestore(&zone->wp_lock, flags);
490+
spin_unlock(&zone->wp_lock);
485491

486492
unlock:
487493
mutex_unlock(&zone->lock);
@@ -571,10 +577,9 @@ static int zloop_seq_write_prep(struct zloop_cmd *cmd)
571577
bool is_append = req_op(rq) == REQ_OP_ZONE_APPEND;
572578
struct zloop_zone *zone = &zlo->zones[zone_no];
573579
sector_t zone_end = zone->start + zlo->zone_capacity;
574-
unsigned long flags;
575580
int ret = 0;
576581

577-
spin_lock_irqsave(&zone->wp_lock, flags);
582+
spin_lock(&zone->wp_lock);
578583

579584
/*
580585
* Zone append operations always go at the current write pointer, but
@@ -616,14 +621,11 @@ static int zloop_seq_write_prep(struct zloop_cmd *cmd)
616621
*/
617622
if (!is_append || !zlo->ordered_zone_append) {
618623
zone->wp += nr_sectors;
619-
if (zone->wp == zone_end) {
620-
zloop_lru_remove_open_zone(zlo, zone);
621-
zone->cond = BLK_ZONE_COND_FULL;
622-
zone->wp = ULLONG_MAX;
623-
}
624+
if (zone->wp == zone_end)
625+
zloop_mark_full(zlo, zone);
624626
}
625627
out_unlock:
626-
spin_unlock_irqrestore(&zone->wp_lock, flags);
628+
spin_unlock(&zone->wp_lock);
627629
return ret;
628630
}
629631

@@ -861,25 +863,21 @@ static bool zloop_set_zone_append_sector(struct request *rq)
861863
struct zloop_zone *zone = &zlo->zones[zone_no];
862864
sector_t zone_end = zone->start + zlo->zone_capacity;
863865
sector_t nr_sectors = blk_rq_sectors(rq);
864-
unsigned long flags;
865866

866-
spin_lock_irqsave(&zone->wp_lock, flags);
867+
spin_lock(&zone->wp_lock);
867868

868869
if (zone->cond == BLK_ZONE_COND_FULL ||
869870
zone->wp + nr_sectors > zone_end) {
870-
spin_unlock_irqrestore(&zone->wp_lock, flags);
871+
spin_unlock(&zone->wp_lock);
871872
return false;
872873
}
873874

874875
rq->__sector = zone->wp;
875876
zone->wp += blk_rq_sectors(rq);
876-
if (zone->wp >= zone_end) {
877-
zloop_lru_remove_open_zone(zlo, zone);
878-
zone->cond = BLK_ZONE_COND_FULL;
879-
zone->wp = ULLONG_MAX;
880-
}
877+
if (zone->wp >= zone_end)
878+
zloop_mark_full(zlo, zone);
881879

882-
spin_unlock_irqrestore(&zone->wp_lock, flags);
880+
spin_unlock(&zone->wp_lock);
883881

884882
return true;
885883
}
@@ -891,8 +889,10 @@ static blk_status_t zloop_queue_rq(struct blk_mq_hw_ctx *hctx,
891889
struct zloop_cmd *cmd = blk_mq_rq_to_pdu(rq);
892890
struct zloop_device *zlo = rq->q->queuedata;
893891

894-
if (data_race(READ_ONCE(zlo->state)) == Zlo_deleting)
892+
if (data_race(READ_ONCE(zlo->state)) == Zlo_deleting) {
893+
rq->rq_flags |= RQF_QUIET;
895894
return BLK_STS_IOERR;
895+
}
896896

897897
/*
898898
* If we need to strongly order zone append operations, set the request
@@ -938,7 +938,6 @@ static int zloop_report_zones(struct gendisk *disk, sector_t sector,
938938
struct zloop_device *zlo = disk->private_data;
939939
struct blk_zone blkz = {};
940940
unsigned int first, i;
941-
unsigned long flags;
942941
int ret;
943942

944943
first = disk_zone_no(disk, sector);
@@ -962,9 +961,9 @@ static int zloop_report_zones(struct gendisk *disk, sector_t sector,
962961

963962
blkz.start = zone->start;
964963
blkz.len = zlo->zone_size;
965-
spin_lock_irqsave(&zone->wp_lock, flags);
964+
spin_lock(&zone->wp_lock);
966965
blkz.wp = zone->wp;
967-
spin_unlock_irqrestore(&zone->wp_lock, flags);
966+
spin_unlock(&zone->wp_lock);
968967
blkz.cond = zone->cond;
969968
if (test_bit(ZLOOP_ZONE_CONV, &zone->flags)) {
970969
blkz.type = BLK_ZONE_TYPE_CONVENTIONAL;
@@ -1363,20 +1362,6 @@ static int zloop_ctl_add(struct zloop_options *opts)
13631362
return ret;
13641363
}
13651364

1366-
static void zloop_truncate(struct file *file, loff_t pos)
1367-
{
1368-
struct mnt_idmap *idmap = file_mnt_idmap(file);
1369-
struct dentry *dentry = file_dentry(file);
1370-
struct iattr newattrs;
1371-
1372-
newattrs.ia_size = pos;
1373-
newattrs.ia_valid = ATTR_SIZE;
1374-
1375-
inode_lock(dentry->d_inode);
1376-
notify_change(idmap, dentry, &newattrs, NULL);
1377-
inode_unlock(dentry->d_inode);
1378-
}
1379-
13801365
static void zloop_forget_cache(struct zloop_device *zlo)
13811366
{
13821367
unsigned int i;
@@ -1401,8 +1386,18 @@ static void zloop_forget_cache(struct zloop_device *zlo)
14011386
zlo->disk->part0, ret);
14021387
continue;
14031388
}
1404-
if (old_wp < zone->wp)
1405-
zloop_truncate(file, old_wp);
1389+
1390+
if (old_wp > zone->wp)
1391+
continue;
1392+
/*
1393+
* This should not happen, if we recored a full zone, it can't
1394+
* be active.
1395+
*/
1396+
if (WARN_ON_ONCE(old_wp == ULLONG_MAX))
1397+
continue;
1398+
1399+
vfs_truncate(&file->f_path,
1400+
(old_wp - zone->start) << SECTOR_SHIFT);
14061401
}
14071402
}
14081403

0 commit comments

Comments
 (0)