Skip to content

Commit deeebee

Browse files
ricardokojokawasaki
authored andcommitted
nullblk: main: use lock guards
Use guard() and scoped_guard() for handling mutex and spin locks instead of manually locking and unlocking. This prevents forgotten locks due to early exits and remove the need of gotos. Signed-off-by: Ricardo H H Kojo <[email protected]> Co-developed-by: Ellian Carlos <[email protected]> Signed-off-by: Ellian Carlos <[email protected]> Co-developed-by: Gabriel B L de Oliveira <[email protected]> Signed-off-by: Gabriel B L de Oliveira <[email protected]>
1 parent 6b4d829 commit deeebee

2 files changed

Lines changed: 32 additions & 43 deletions

File tree

drivers/block/null_blk/main.c

Lines changed: 31 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -423,9 +423,8 @@ static int nullb_apply_submit_queues(struct nullb_device *dev,
423423
{
424424
int ret;
425425

426-
mutex_lock(&lock);
426+
guard(mutex)(&lock);
427427
ret = nullb_update_nr_hw_queues(dev, submit_queues, dev->poll_queues);
428-
mutex_unlock(&lock);
429428

430429
return ret;
431430
}
@@ -435,9 +434,8 @@ static int nullb_apply_poll_queues(struct nullb_device *dev,
435434
{
436435
int ret;
437436

438-
mutex_lock(&lock);
437+
guard(mutex)(&lock);
439438
ret = nullb_update_nr_hw_queues(dev, dev->submit_queues, poll_queues);
440-
mutex_unlock(&lock);
441439

442440
return ret;
443441
}
@@ -493,15 +491,15 @@ static ssize_t nullb_device_power_store(struct config_item *item,
493491
return ret;
494492

495493
ret = count;
496-
mutex_lock(&lock);
494+
guard(mutex)(&lock);
497495
if (!dev->power && newp) {
498496
if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags))
499-
goto out;
497+
return ret;
500498

501499
ret = null_add_dev(dev);
502500
if (ret) {
503501
clear_bit(NULLB_DEV_FL_UP, &dev->flags);
504-
goto out;
502+
return ret;
505503
}
506504

507505
set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
@@ -515,8 +513,6 @@ static ssize_t nullb_device_power_store(struct config_item *item,
515513
clear_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
516514
}
517515

518-
out:
519-
mutex_unlock(&lock);
520516
return ret;
521517
}
522518

@@ -707,10 +703,9 @@ nullb_group_drop_item(struct config_group *group, struct config_item *item)
707703
struct nullb_device *dev = to_nullb_device(item);
708704

709705
if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
710-
mutex_lock(&lock);
706+
guard(mutex)(&lock);
711707
dev->power = false;
712708
null_del_dev(dev->nullb);
713-
mutex_unlock(&lock);
714709
}
715710
nullb_del_fault_config(dev);
716711
config_item_put(item);
@@ -1205,7 +1200,7 @@ blk_status_t null_handle_discard(struct nullb_device *dev,
12051200
size_t n = nr_sectors << SECTOR_SHIFT;
12061201
size_t temp;
12071202

1208-
spin_lock_irq(&nullb->lock);
1203+
guard(spinlock_irq)(&nullb->lock);
12091204
while (n > 0) {
12101205
temp = min_t(size_t, n, dev->blocksize);
12111206
null_free_sector(nullb, sector, false);
@@ -1214,7 +1209,6 @@ blk_status_t null_handle_discard(struct nullb_device *dev,
12141209
sector += temp >> SECTOR_SHIFT;
12151210
n -= temp;
12161211
}
1217-
spin_unlock_irq(&nullb->lock);
12181212

12191213
return BLK_STS_OK;
12201214
}
@@ -1226,7 +1220,7 @@ static blk_status_t null_handle_flush(struct nullb *nullb)
12261220
if (!null_cache_active(nullb))
12271221
return 0;
12281222

1229-
spin_lock_irq(&nullb->lock);
1223+
guard(spinlock_irq)(&nullb->lock);
12301224
while (true) {
12311225
err = null_make_cache_space(nullb,
12321226
nullb->dev->cache_size * 1024 * 1024);
@@ -1235,7 +1229,6 @@ static blk_status_t null_handle_flush(struct nullb *nullb)
12351229
}
12361230

12371231
WARN_ON(!radix_tree_empty(&nullb->dev->cache));
1238-
spin_unlock_irq(&nullb->lock);
12391232
return errno_to_blk_status(err);
12401233
}
12411234

@@ -1292,7 +1285,7 @@ static blk_status_t null_handle_data_transfer(struct nullb_cmd *cmd,
12921285
struct req_iterator iter;
12931286
struct bio_vec bvec;
12941287

1295-
spin_lock_irq(&nullb->lock);
1288+
guard(spinlock_irq)(&nullb->lock);
12961289
rq_for_each_segment(bvec, rq, iter) {
12971290
len = bvec.bv_len;
12981291
if (transferred_bytes + len > max_bytes)
@@ -1307,7 +1300,6 @@ static blk_status_t null_handle_data_transfer(struct nullb_cmd *cmd,
13071300
if (transferred_bytes >= max_bytes)
13081301
break;
13091302
}
1310-
spin_unlock_irq(&nullb->lock);
13111303

13121304
return err;
13131305
}
@@ -1592,11 +1584,11 @@ static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
15921584
int nr = 0;
15931585
struct request *rq;
15941586

1595-
spin_lock(&nq->poll_lock);
1596-
list_splice_init(&nq->poll_list, &list);
1597-
list_for_each_entry(rq, &list, queuelist)
1598-
blk_mq_set_request_complete(rq);
1599-
spin_unlock(&nq->poll_lock);
1587+
scoped_guard(spinlock, &nq->poll_lock) {
1588+
list_splice_init(&nq->poll_list, &list);
1589+
list_for_each_entry(rq, &list, queuelist)
1590+
blk_mq_set_request_complete(rq);
1591+
}
16001592

16011593
while (!list_empty(&list)) {
16021594
struct nullb_cmd *cmd;
@@ -1624,14 +1616,12 @@ static enum blk_eh_timer_return null_timeout_rq(struct request *rq)
16241616
if (hctx->type == HCTX_TYPE_POLL) {
16251617
struct nullb_queue *nq = hctx->driver_data;
16261618

1627-
spin_lock(&nq->poll_lock);
1628-
/* The request may have completed meanwhile. */
1629-
if (blk_mq_request_completed(rq)) {
1630-
spin_unlock(&nq->poll_lock);
1631-
return BLK_EH_DONE;
1619+
scoped_guard(spinlock, &nq->poll_lock) {
1620+
/* The request may have completed meanwhile. */
1621+
if (blk_mq_request_completed(rq))
1622+
return BLK_EH_DONE;
1623+
list_del_init(&rq->queuelist);
16321624
}
1633-
list_del_init(&rq->queuelist);
1634-
spin_unlock(&nq->poll_lock);
16351625
}
16361626

16371627
pr_info("rq %p timed out\n", rq);
@@ -1692,9 +1682,9 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
16921682
blk_mq_start_request(rq);
16931683

16941684
if (is_poll) {
1695-
spin_lock(&nq->poll_lock);
1696-
list_add_tail(&rq->queuelist, &nq->poll_list);
1697-
spin_unlock(&nq->poll_lock);
1685+
scoped_guard(spinlock, &nq->poll_lock) {
1686+
list_add_tail(&rq->queuelist, &nq->poll_list);
1687+
}
16981688
return BLK_STS_OK;
16991689
}
17001690
if (cmd->fake_timeout)
@@ -2081,14 +2071,13 @@ static struct nullb *null_find_dev_by_name(const char *name)
20812071
{
20822072
struct nullb *nullb = NULL, *nb;
20832073

2084-
mutex_lock(&lock);
2074+
guard(mutex)(&lock);
20852075
list_for_each_entry(nb, &nullb_list, list) {
20862076
if (strcmp(nb->disk_name, name) == 0) {
20872077
nullb = nb;
20882078
break;
20892079
}
20902080
}
2091-
mutex_unlock(&lock);
20922081

20932082
return nullb;
20942083
}
@@ -2101,10 +2090,9 @@ static int null_create_dev(void)
21012090
dev = null_alloc_dev();
21022091
if (!dev)
21032092
return -ENOMEM;
2104-
2105-
mutex_lock(&lock);
2106-
ret = null_add_dev(dev);
2107-
mutex_unlock(&lock);
2093+
scoped_guard(mutex, &lock) {
2094+
ret = null_add_dev(dev);
2095+
}
21082096
if (ret) {
21092097
null_free_dev(dev);
21102098
return ret;
@@ -2202,12 +2190,12 @@ static void __exit null_exit(void)
22022190

22032191
unregister_blkdev(null_major, "nullb");
22042192

2205-
mutex_lock(&lock);
2206-
while (!list_empty(&nullb_list)) {
2207-
nullb = list_entry(nullb_list.next, struct nullb, list);
2208-
null_destroy_dev(nullb);
2193+
scoped_guard(mutex, &lock) {
2194+
while (!list_empty(&nullb_list)) {
2195+
nullb = list_entry(nullb_list.next, struct nullb, list);
2196+
null_destroy_dev(nullb);
2197+
}
22092198
}
2210-
mutex_unlock(&lock);
22112199

22122200
if (tag_set.ops)
22132201
blk_mq_free_tag_set(&tag_set);

drivers/block/null_blk/null_blk.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
#include <linux/fault-inject.h>
1515
#include <linux/spinlock.h>
1616
#include <linux/mutex.h>
17+
#include <linux/cleanup.h>
1718

1819
struct nullb_cmd {
1920
blk_status_t error;

0 commit comments

Comments
 (0)