@@ -1666,14 +1666,30 @@ static bool iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t pos,
16661666 * At the end of a writeback pass, there will be a cached ioend remaining on the
16671667 * writepage context that the caller will need to submit.
16681668 */
1669- static int iomap_add_to_ioend (struct iomap_writepage_ctx * wpc ,
1670- struct folio * folio , loff_t pos , loff_t end_pos , unsigned len )
1669+ ssize_t iomap_add_to_ioend (struct iomap_writepage_ctx * wpc , struct folio * folio ,
1670+ loff_t pos , loff_t end_pos , unsigned int dirty_len )
16711671{
16721672 struct iomap_folio_state * ifs = folio -> private ;
16731673 size_t poff = offset_in_folio (folio , pos );
16741674 unsigned int ioend_flags = 0 ;
1675+ unsigned int map_len = min_t (u64 , dirty_len ,
1676+ wpc -> iomap .offset + wpc -> iomap .length - pos );
16751677 int error ;
16761678
1679+ trace_iomap_add_to_ioend (wpc -> inode , pos , dirty_len , & wpc -> iomap );
1680+
1681+ WARN_ON_ONCE (!folio -> private && map_len < dirty_len );
1682+
1683+ switch (wpc -> iomap .type ) {
1684+ case IOMAP_INLINE :
1685+ WARN_ON_ONCE (1 );
1686+ return - EIO ;
1687+ case IOMAP_HOLE :
1688+ return map_len ;
1689+ default :
1690+ break ;
1691+ }
1692+
16771693 if (wpc -> iomap .type == IOMAP_UNWRITTEN )
16781694 ioend_flags |= IOMAP_IOEND_UNWRITTEN ;
16791695 if (wpc -> iomap .flags & IOMAP_F_SHARED )
@@ -1691,11 +1707,11 @@ static int iomap_add_to_ioend(struct iomap_writepage_ctx *wpc,
16911707 wpc -> ioend = iomap_alloc_ioend (wpc , pos , ioend_flags );
16921708 }
16931709
1694- if (!bio_add_folio (& wpc -> ioend -> io_bio , folio , len , poff ))
1710+ if (!bio_add_folio (& wpc -> ioend -> io_bio , folio , map_len , poff ))
16951711 goto new_ioend ;
16961712
16971713 if (ifs )
1698- atomic_add (len , & ifs -> write_bytes_pending );
1714+ atomic_add (map_len , & ifs -> write_bytes_pending );
16991715
17001716 /*
17011717 * Clamp io_offset and io_size to the incore EOF so that ondisk
@@ -1738,63 +1754,39 @@ static int iomap_add_to_ioend(struct iomap_writepage_ctx *wpc,
17381754 * Note that this defeats the ability to chain the ioends of
17391755 * appending writes.
17401756 */
1741- wpc -> ioend -> io_size += len ;
1757+ wpc -> ioend -> io_size += map_len ;
17421758 if (wpc -> ioend -> io_offset + wpc -> ioend -> io_size > end_pos )
17431759 wpc -> ioend -> io_size = end_pos - wpc -> ioend -> io_offset ;
17441760
1745- wbc_account_cgroup_owner (wpc -> wbc , folio , len );
1746- return 0 ;
1761+ wbc_account_cgroup_owner (wpc -> wbc , folio , map_len );
1762+ return map_len ;
17471763}
1764+ EXPORT_SYMBOL_GPL (iomap_add_to_ioend );
17481765
1749- static int iomap_writepage_map_blocks (struct iomap_writepage_ctx * wpc ,
1750- struct folio * folio , u64 pos , u64 end_pos , unsigned dirty_len ,
1766+ static int iomap_writeback_range (struct iomap_writepage_ctx * wpc ,
1767+ struct folio * folio , u64 pos , u32 rlen , u64 end_pos ,
17511768 bool * wb_pending )
17521769{
1753- int error ;
1754-
17551770 do {
1756- unsigned map_len ;
1757-
1758- error = wpc -> ops -> map_blocks (wpc , wpc -> inode , pos , dirty_len );
1759- if (error )
1760- break ;
1761- trace_iomap_writepage_map (wpc -> inode , pos , dirty_len ,
1762- & wpc -> iomap );
1771+ ssize_t ret ;
17631772
1764- map_len = min_t (u64 , dirty_len ,
1765- wpc -> iomap .offset + wpc -> iomap .length - pos );
1766- WARN_ON_ONCE (!folio -> private && map_len < dirty_len );
1773+ ret = wpc -> ops -> writeback_range (wpc , folio , pos , rlen , end_pos );
1774+ if (WARN_ON_ONCE (ret == 0 || ret > rlen ))
1775+ return - EIO ;
1776+ if (ret < 0 )
1777+ return ret ;
1778+ rlen -= ret ;
1779+ pos += ret ;
17671780
1768- switch (wpc -> iomap .type ) {
1769- case IOMAP_INLINE :
1770- WARN_ON_ONCE (1 );
1771- error = - EIO ;
1772- break ;
1773- case IOMAP_HOLE :
1774- break ;
1775- default :
1776- error = iomap_add_to_ioend (wpc , folio , pos , end_pos ,
1777- map_len );
1778- if (!error )
1779- * wb_pending = true;
1780- break ;
1781- }
1782- dirty_len -= map_len ;
1783- pos += map_len ;
1784- } while (dirty_len && !error );
1781+ /*
1782+ * Holes are not be written back by ->writeback_range, so track
1783+ * if we did handle anything that is not a hole here.
1784+ */
1785+ if (wpc -> iomap .type != IOMAP_HOLE )
1786+ * wb_pending = true;
1787+ } while (rlen );
17851788
1786- /*
1787- * We cannot cancel the ioend directly here on error. We may have
1788- * already set other pages under writeback and hence we have to run I/O
1789- * completion to mark the error state of the pages under writeback
1790- * appropriately.
1791- *
1792- * Just let the file system know what portion of the folio failed to
1793- * map.
1794- */
1795- if (error && wpc -> ops -> discard_folio )
1796- wpc -> ops -> discard_folio (folio , pos );
1797- return error ;
1789+ return 0 ;
17981790}
17991791
18001792/*
@@ -1906,8 +1898,8 @@ static int iomap_writepage_map(struct iomap_writepage_ctx *wpc,
19061898 */
19071899 end_aligned = round_up (end_pos , i_blocksize (inode ));
19081900 while ((rlen = iomap_find_dirty_range (folio , & pos , end_aligned ))) {
1909- error = iomap_writepage_map_blocks (wpc , folio , pos , end_pos ,
1910- rlen , & wb_pending );
1901+ error = iomap_writeback_range (wpc , folio , pos , rlen , end_pos ,
1902+ & wb_pending );
19111903 if (error )
19121904 break ;
19131905 pos += rlen ;
0 commit comments