99#include "internal.h"
1010#include "trace.h"
1111
12- static DEFINE_SPINLOCK (failed_read_lock );
13- static struct bio_list failed_read_list = BIO_EMPTY_LIST ;
14-
1512static u32 __iomap_read_end_io (struct bio * bio , int error )
1613{
1714 struct folio_iter fi ;
@@ -27,49 +24,10 @@ static u32 __iomap_read_end_io(struct bio *bio, int error)
2724 return folio_count ;
2825}
2926
30- static void
31- iomap_fail_reads (
32- struct work_struct * work )
33- {
34- struct bio * bio ;
35- struct bio_list tmp = BIO_EMPTY_LIST ;
36- unsigned long flags ;
37-
38- spin_lock_irqsave (& failed_read_lock , flags );
39- bio_list_merge_init (& tmp , & failed_read_list );
40- spin_unlock_irqrestore (& failed_read_lock , flags );
41-
42- while ((bio = bio_list_pop (& tmp )) != NULL ) {
43- __iomap_read_end_io (bio , blk_status_to_errno (bio -> bi_status ));
44- cond_resched ();
45- }
46- }
47-
48- static DECLARE_WORK (failed_read_work , iomap_fail_reads ) ;
49-
50- static void iomap_fail_buffered_read (struct bio * bio )
51- {
52- unsigned long flags ;
53-
54- /*
55- * Bounce I/O errors to a workqueue to avoid nested i_lock acquisitions
56- * in the fserror code. The caller no longer owns the bio reference
57- * after the spinlock drops.
58- */
59- spin_lock_irqsave (& failed_read_lock , flags );
60- if (bio_list_empty (& failed_read_list ))
61- WARN_ON_ONCE (!schedule_work (& failed_read_work ));
62- bio_list_add (& failed_read_list , bio );
63- spin_unlock_irqrestore (& failed_read_lock , flags );
64- }
65-
6627static void iomap_read_end_io (struct bio * bio )
6728{
68- if (bio -> bi_status ) {
69- iomap_fail_buffered_read (bio );
29+ if (bio -> bi_status && bio_complete_in_task (bio ))
7030 return ;
71- }
72-
7331 __iomap_read_end_io (bio , 0 );
7432}
7533
0 commit comments