@@ -421,25 +421,15 @@ static int nullb_update_nr_hw_queues(struct nullb_device *dev,
421421static int nullb_apply_submit_queues (struct nullb_device * dev ,
422422 unsigned int submit_queues )
423423{
424- int ret ;
425-
426- mutex_lock (& lock );
427- ret = nullb_update_nr_hw_queues (dev , submit_queues , dev -> poll_queues );
428- mutex_unlock (& lock );
429-
430- return ret ;
424+ guard (mutex )(& lock );
425+ return nullb_update_nr_hw_queues (dev , submit_queues , dev -> poll_queues );
431426}
432427
433428static int nullb_apply_poll_queues (struct nullb_device * dev ,
434429 unsigned int poll_queues )
435430{
436- int ret ;
437-
438- mutex_lock (& lock );
439- ret = nullb_update_nr_hw_queues (dev , dev -> submit_queues , poll_queues );
440- mutex_unlock (& lock );
441-
442- return ret ;
431+ guard (mutex )(& lock );
432+ return nullb_update_nr_hw_queues (dev , dev -> submit_queues , poll_queues );
443433}
444434
445435NULLB_DEVICE_ATTR (size , ulong , NULL );
@@ -493,15 +483,15 @@ static ssize_t nullb_device_power_store(struct config_item *item,
493483 return ret ;
494484
495485 ret = count ;
496- mutex_lock (& lock );
486+ guard ( mutex ) (& lock );
497487 if (!dev -> power && newp ) {
498488 if (test_and_set_bit (NULLB_DEV_FL_UP , & dev -> flags ))
499- goto out ;
489+ return ret ;
500490
501491 ret = null_add_dev (dev );
502492 if (ret ) {
503493 clear_bit (NULLB_DEV_FL_UP , & dev -> flags );
504- goto out ;
494+ return ret ;
505495 }
506496
507497 set_bit (NULLB_DEV_FL_CONFIGURED , & dev -> flags );
@@ -515,8 +505,6 @@ static ssize_t nullb_device_power_store(struct config_item *item,
515505 clear_bit (NULLB_DEV_FL_CONFIGURED , & dev -> flags );
516506 }
517507
518- out :
519- mutex_unlock (& lock );
520508 return ret ;
521509}
522510
@@ -707,10 +695,9 @@ nullb_group_drop_item(struct config_group *group, struct config_item *item)
707695 struct nullb_device * dev = to_nullb_device (item );
708696
709697 if (test_and_clear_bit (NULLB_DEV_FL_UP , & dev -> flags )) {
710- mutex_lock (& lock );
698+ guard ( mutex ) (& lock );
711699 dev -> power = false;
712700 null_del_dev (dev -> nullb );
713- mutex_unlock (& lock );
714701 }
715702 nullb_del_fault_config (dev );
716703 config_item_put (item );
@@ -1205,7 +1192,7 @@ blk_status_t null_handle_discard(struct nullb_device *dev,
12051192 size_t n = nr_sectors << SECTOR_SHIFT ;
12061193 size_t temp ;
12071194
1208- spin_lock_irq (& nullb -> lock );
1195+ guard ( spinlock_irq ) (& nullb -> lock );
12091196 while (n > 0 ) {
12101197 temp = min_t (size_t , n , dev -> blocksize );
12111198 null_free_sector (nullb , sector , false);
@@ -1214,7 +1201,6 @@ blk_status_t null_handle_discard(struct nullb_device *dev,
12141201 sector += temp >> SECTOR_SHIFT ;
12151202 n -= temp ;
12161203 }
1217- spin_unlock_irq (& nullb -> lock );
12181204
12191205 return BLK_STS_OK ;
12201206}
@@ -1226,7 +1212,7 @@ static blk_status_t null_handle_flush(struct nullb *nullb)
12261212 if (!null_cache_active (nullb ))
12271213 return 0 ;
12281214
1229- spin_lock_irq (& nullb -> lock );
1215+ guard ( spinlock_irq ) (& nullb -> lock );
12301216 while (true) {
12311217 err = null_make_cache_space (nullb ,
12321218 nullb -> dev -> cache_size * 1024 * 1024 );
@@ -1235,7 +1221,6 @@ static blk_status_t null_handle_flush(struct nullb *nullb)
12351221 }
12361222
12371223 WARN_ON (!radix_tree_empty (& nullb -> dev -> cache ));
1238- spin_unlock_irq (& nullb -> lock );
12391224 return errno_to_blk_status (err );
12401225}
12411226
@@ -1292,7 +1277,7 @@ static blk_status_t null_handle_data_transfer(struct nullb_cmd *cmd,
12921277 struct req_iterator iter ;
12931278 struct bio_vec bvec ;
12941279
1295- spin_lock_irq (& nullb -> lock );
1280+ guard ( spinlock_irq ) (& nullb -> lock );
12961281 rq_for_each_segment (bvec , rq , iter ) {
12971282 len = bvec .bv_len ;
12981283 if (transferred_bytes + len > max_bytes )
@@ -1307,7 +1292,6 @@ static blk_status_t null_handle_data_transfer(struct nullb_cmd *cmd,
13071292 if (transferred_bytes >= max_bytes )
13081293 break ;
13091294 }
1310- spin_unlock_irq (& nullb -> lock );
13111295
13121296 return err ;
13131297}
@@ -1592,11 +1576,11 @@ static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
15921576 int nr = 0 ;
15931577 struct request * rq ;
15941578
1595- spin_lock ( & nq -> poll_lock );
1596- list_splice_init (& nq -> poll_list , & list );
1597- list_for_each_entry (rq , & list , queuelist )
1598- blk_mq_set_request_complete (rq );
1599- spin_unlock ( & nq -> poll_lock );
1579+ scoped_guard ( spinlock , & nq -> poll_lock ) {
1580+ list_splice_init (& nq -> poll_list , & list );
1581+ list_for_each_entry (rq , & list , queuelist )
1582+ blk_mq_set_request_complete (rq );
1583+ }
16001584
16011585 while (!list_empty (& list )) {
16021586 struct nullb_cmd * cmd ;
@@ -1624,14 +1608,12 @@ static enum blk_eh_timer_return null_timeout_rq(struct request *rq)
16241608 if (hctx -> type == HCTX_TYPE_POLL ) {
16251609 struct nullb_queue * nq = hctx -> driver_data ;
16261610
1627- spin_lock ( & nq -> poll_lock );
1628- /* The request may have completed meanwhile. */
1629- if (blk_mq_request_completed (rq )) {
1630- spin_unlock ( & nq -> poll_lock ) ;
1631- return BLK_EH_DONE ;
1611+ scoped_guard ( spinlock , & nq -> poll_lock ) {
1612+ /* The request may have completed meanwhile. */
1613+ if (blk_mq_request_completed (rq ))
1614+ return BLK_EH_DONE ;
1615+ list_del_init ( & rq -> queuelist ) ;
16321616 }
1633- list_del_init (& rq -> queuelist );
1634- spin_unlock (& nq -> poll_lock );
16351617 }
16361618
16371619 pr_info ("rq %p timed out\n" , rq );
@@ -1692,9 +1674,9 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
16921674 blk_mq_start_request (rq );
16931675
16941676 if (is_poll ) {
1695- spin_lock ( & nq -> poll_lock );
1696- list_add_tail (& rq -> queuelist , & nq -> poll_list );
1697- spin_unlock ( & nq -> poll_lock );
1677+ scoped_guard ( spinlock , & nq -> poll_lock ) {
1678+ list_add_tail (& rq -> queuelist , & nq -> poll_list );
1679+ }
16981680 return BLK_STS_OK ;
16991681 }
17001682 if (cmd -> fake_timeout )
@@ -2081,14 +2063,13 @@ static struct nullb *null_find_dev_by_name(const char *name)
20812063{
20822064 struct nullb * nullb = NULL , * nb ;
20832065
2084- mutex_lock (& lock );
2066+ guard ( mutex ) (& lock );
20852067 list_for_each_entry (nb , & nullb_list , list ) {
20862068 if (strcmp (nb -> disk_name , name ) == 0 ) {
20872069 nullb = nb ;
20882070 break ;
20892071 }
20902072 }
2091- mutex_unlock (& lock );
20922073
20932074 return nullb ;
20942075}
@@ -2101,10 +2082,9 @@ static int null_create_dev(void)
21012082 dev = null_alloc_dev ();
21022083 if (!dev )
21032084 return - ENOMEM ;
2104-
2105- mutex_lock (& lock );
2106- ret = null_add_dev (dev );
2107- mutex_unlock (& lock );
2085+ scoped_guard (mutex , & lock ) {
2086+ ret = null_add_dev (dev );
2087+ }
21082088 if (ret ) {
21092089 null_free_dev (dev );
21102090 return ret ;
@@ -2202,12 +2182,12 @@ static void __exit null_exit(void)
22022182
22032183 unregister_blkdev (null_major , "nullb" );
22042184
2205- mutex_lock (& lock );
2206- while (!list_empty (& nullb_list )) {
2207- nullb = list_entry (nullb_list .next , struct nullb , list );
2208- null_destroy_dev (nullb );
2185+ scoped_guard (mutex , & lock ) {
2186+ while (!list_empty (& nullb_list )) {
2187+ nullb = list_entry (nullb_list .next , struct nullb , list );
2188+ null_destroy_dev (nullb );
2189+ }
22092190 }
2210- mutex_unlock (& lock );
22112191
22122192 if (tag_set .ops )
22132193 blk_mq_free_tag_set (& tag_set );
0 commit comments