@@ -274,6 +274,46 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
274274 * lenp = plen ;
275275}
276276
277+ static inline bool iomap_block_needs_zeroing (const struct iomap_iter * iter ,
278+ loff_t pos )
279+ {
280+ const struct iomap * srcmap = iomap_iter_srcmap (iter );
281+
282+ return srcmap -> type != IOMAP_MAPPED ||
283+ (srcmap -> flags & IOMAP_F_NEW ) ||
284+ pos >= i_size_read (iter -> inode );
285+ }
286+
287+ /**
288+ * iomap_read_inline_data - copy inline data into the page cache
289+ * @iter: iteration structure
290+ * @folio: folio to copy to
291+ *
292+ * Copy the inline data in @iter into @folio and zero out the rest of the folio.
293+ * Only a single IOMAP_INLINE extent is allowed at the end of each file.
294+ * Returns zero for success to complete the read, or the usual negative errno.
295+ */
296+ static int iomap_read_inline_data (const struct iomap_iter * iter ,
297+ struct folio * folio )
298+ {
299+ const struct iomap * iomap = iomap_iter_srcmap (iter );
300+ size_t size = i_size_read (iter -> inode ) - iomap -> offset ;
301+ size_t offset = offset_in_folio (folio , iomap -> offset );
302+
303+ if (folio_test_uptodate (folio ))
304+ return 0 ;
305+
306+ if (WARN_ON_ONCE (size > iomap -> length ))
307+ return - EIO ;
308+ if (offset > 0 )
309+ ifs_alloc (iter -> inode , folio , iter -> flags );
310+
311+ folio_fill_tail (folio , offset , iomap -> inline_data , size );
312+ iomap_set_range_uptodate (folio , offset , folio_size (folio ) - offset );
313+ return 0 ;
314+ }
315+
316+ #ifdef CONFIG_BLOCK
277317static void iomap_finish_folio_read (struct folio * folio , size_t off ,
278318 size_t len , int error )
279319{
@@ -313,45 +353,6 @@ struct iomap_readpage_ctx {
313353 struct readahead_control * rac ;
314354};
315355
316- /**
317- * iomap_read_inline_data - copy inline data into the page cache
318- * @iter: iteration structure
319- * @folio: folio to copy to
320- *
321- * Copy the inline data in @iter into @folio and zero out the rest of the folio.
322- * Only a single IOMAP_INLINE extent is allowed at the end of each file.
323- * Returns zero for success to complete the read, or the usual negative errno.
324- */
325- static int iomap_read_inline_data (const struct iomap_iter * iter ,
326- struct folio * folio )
327- {
328- const struct iomap * iomap = iomap_iter_srcmap (iter );
329- size_t size = i_size_read (iter -> inode ) - iomap -> offset ;
330- size_t offset = offset_in_folio (folio , iomap -> offset );
331-
332- if (folio_test_uptodate (folio ))
333- return 0 ;
334-
335- if (WARN_ON_ONCE (size > iomap -> length ))
336- return - EIO ;
337- if (offset > 0 )
338- ifs_alloc (iter -> inode , folio , iter -> flags );
339-
340- folio_fill_tail (folio , offset , iomap -> inline_data , size );
341- iomap_set_range_uptodate (folio , offset , folio_size (folio ) - offset );
342- return 0 ;
343- }
344-
345- static inline bool iomap_block_needs_zeroing (const struct iomap_iter * iter ,
346- loff_t pos )
347- {
348- const struct iomap * srcmap = iomap_iter_srcmap (iter );
349-
350- return srcmap -> type != IOMAP_MAPPED ||
351- (srcmap -> flags & IOMAP_F_NEW ) ||
352- pos >= i_size_read (iter -> inode );
353- }
354-
355356static int iomap_readpage_iter (struct iomap_iter * iter ,
356357 struct iomap_readpage_ctx * ctx )
357358{
@@ -544,6 +545,27 @@ void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
544545}
545546EXPORT_SYMBOL_GPL (iomap_readahead );
546547
548+ static int iomap_read_folio_range (const struct iomap_iter * iter ,
549+ struct folio * folio , loff_t pos , size_t len )
550+ {
551+ const struct iomap * srcmap = iomap_iter_srcmap (iter );
552+ struct bio_vec bvec ;
553+ struct bio bio ;
554+
555+ bio_init (& bio , srcmap -> bdev , & bvec , 1 , REQ_OP_READ );
556+ bio .bi_iter .bi_sector = iomap_sector (srcmap , pos );
557+ bio_add_folio_nofail (& bio , folio , len , offset_in_folio (folio , pos ));
558+ return submit_bio_wait (& bio );
559+ }
560+ #else
561+ static int iomap_read_folio_range (const struct iomap_iter * iter ,
562+ struct folio * folio , loff_t pos , size_t len )
563+ {
564+ WARN_ON_ONCE (1 );
565+ return - EIO ;
566+ }
567+ #endif /* CONFIG_BLOCK */
568+
547569/*
548570 * iomap_is_partially_uptodate checks whether blocks within a folio are
549571 * uptodate or not.
@@ -657,19 +679,6 @@ iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
657679 pos + len - 1 );
658680}
659681
660- static int iomap_read_folio_range (const struct iomap_iter * iter ,
661- struct folio * folio , loff_t pos , size_t len )
662- {
663- const struct iomap * srcmap = iomap_iter_srcmap (iter );
664- struct bio_vec bvec ;
665- struct bio bio ;
666-
667- bio_init (& bio , srcmap -> bdev , & bvec , 1 , REQ_OP_READ );
668- bio .bi_iter .bi_sector = iomap_sector (srcmap , pos );
669- bio_add_folio_nofail (& bio , folio , len , offset_in_folio (folio , pos ));
670- return submit_bio_wait (& bio );
671- }
672-
673682static int __iomap_write_begin (const struct iomap_iter * iter ,
674683 const struct iomap_write_ops * write_ops , size_t len ,
675684 struct folio * folio )
0 commit comments