Skip to content

Commit e3abac7

Browse files
Christoph Hellwigkawasaki
authored andcommitted
iomap: move all ioend handling to ioend.c
Now that the writeback code has the proper abstractions, all the ioend code can be self-contained in ioend.c. Signed-off-by: Christoph Hellwig <[email protected]> Reviewed-by: Brian Foster <[email protected]> Reviewed-by: Joanne Koong <[email protected]> Reviewed-by: "Darrick J. Wong" <[email protected]>
1 parent c6f18f9 commit e3abac7

3 files changed

Lines changed: 219 additions & 217 deletions

File tree

fs/iomap/buffered-io.c

Lines changed: 0 additions & 215 deletions
Original file line numberDiff line numberDiff line change
@@ -1549,221 +1549,6 @@ void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
15491549
}
15501550
EXPORT_SYMBOL_GPL(iomap_finish_folio_write);
15511551

1552-
/*
1553-
* We're now finished for good with this ioend structure. Update the page
1554-
* state, release holds on bios, and finally free up memory. Do not use the
1555-
* ioend after this.
1556-
*/
1557-
u32 iomap_finish_ioend_buffered(struct iomap_ioend *ioend)
1558-
{
1559-
struct inode *inode = ioend->io_inode;
1560-
struct bio *bio = &ioend->io_bio;
1561-
struct folio_iter fi;
1562-
u32 folio_count = 0;
1563-
1564-
if (ioend->io_error) {
1565-
mapping_set_error(inode->i_mapping, ioend->io_error);
1566-
if (!bio_flagged(bio, BIO_QUIET)) {
1567-
pr_err_ratelimited(
1568-
"%s: writeback error on inode %lu, offset %lld, sector %llu",
1569-
inode->i_sb->s_id, inode->i_ino,
1570-
ioend->io_offset, ioend->io_sector);
1571-
}
1572-
}
1573-
1574-
/* walk all folios in bio, ending page IO on them */
1575-
bio_for_each_folio_all(fi, bio) {
1576-
iomap_finish_folio_write(inode, fi.folio, fi.length);
1577-
folio_count++;
1578-
}
1579-
1580-
bio_put(bio); /* frees the ioend */
1581-
return folio_count;
1582-
}
1583-
1584-
static void ioend_writeback_end_bio(struct bio *bio)
1585-
{
1586-
struct iomap_ioend *ioend = iomap_ioend_from_bio(bio);
1587-
1588-
ioend->io_error = blk_status_to_errno(bio->bi_status);
1589-
iomap_finish_ioend_buffered(ioend);
1590-
}
1591-
1592-
/*
1593-
* We cannot cancel the ioend directly in case of an error, so call the bio end
1594-
* I/O handler with the error status here to run the normal I/O completion
1595-
* handler.
1596-
*/
1597-
int iomap_ioend_writeback_submit(struct iomap_writepage_ctx *wpc, int error)
1598-
{
1599-
struct iomap_ioend *ioend = wpc->wb_ctx;
1600-
1601-
if (!ioend->io_bio.bi_end_io)
1602-
ioend->io_bio.bi_end_io = ioend_writeback_end_bio;
1603-
1604-
if (WARN_ON_ONCE(wpc->iomap.flags & IOMAP_F_ANON_WRITE))
1605-
error = -EIO;
1606-
1607-
if (error) {
1608-
ioend->io_bio.bi_status = errno_to_blk_status(error);
1609-
bio_endio(&ioend->io_bio);
1610-
return error;
1611-
}
1612-
1613-
submit_bio(&ioend->io_bio);
1614-
return 0;
1615-
}
1616-
EXPORT_SYMBOL_GPL(iomap_ioend_writeback_submit);
1617-
1618-
static struct iomap_ioend *iomap_alloc_ioend(struct iomap_writepage_ctx *wpc,
1619-
loff_t pos, u16 ioend_flags)
1620-
{
1621-
struct bio *bio;
1622-
1623-
bio = bio_alloc_bioset(wpc->iomap.bdev, BIO_MAX_VECS,
1624-
REQ_OP_WRITE | wbc_to_write_flags(wpc->wbc),
1625-
GFP_NOFS, &iomap_ioend_bioset);
1626-
bio->bi_iter.bi_sector = iomap_sector(&wpc->iomap, pos);
1627-
bio->bi_write_hint = wpc->inode->i_write_hint;
1628-
wbc_init_bio(wpc->wbc, bio);
1629-
wpc->nr_folios = 0;
1630-
return iomap_init_ioend(wpc->inode, bio, pos, ioend_flags);
1631-
}
1632-
1633-
static bool iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t pos,
1634-
u16 ioend_flags)
1635-
{
1636-
struct iomap_ioend *ioend = wpc->wb_ctx;
1637-
1638-
if (ioend_flags & IOMAP_IOEND_BOUNDARY)
1639-
return false;
1640-
if ((ioend_flags & IOMAP_IOEND_NOMERGE_FLAGS) !=
1641-
(ioend->io_flags & IOMAP_IOEND_NOMERGE_FLAGS))
1642-
return false;
1643-
if (pos != ioend->io_offset + ioend->io_size)
1644-
return false;
1645-
if (!(wpc->iomap.flags & IOMAP_F_ANON_WRITE) &&
1646-
iomap_sector(&wpc->iomap, pos) != bio_end_sector(&ioend->io_bio))
1647-
return false;
1648-
/*
1649-
* Limit ioend bio chain lengths to minimise IO completion latency. This
1650-
* also prevents long tight loops ending page writeback on all the
1651-
* folios in the ioend.
1652-
*/
1653-
if (wpc->nr_folios >= IOEND_BATCH_SIZE)
1654-
return false;
1655-
return true;
1656-
}
1657-
1658-
/*
1659-
* Test to see if we have an existing ioend structure that we could append to
1660-
* first; otherwise finish off the current ioend and start another.
1661-
*
1662-
* If a new ioend is created and cached, the old ioend is submitted to the block
1663-
* layer instantly. Batching optimisations are provided by higher level block
1664-
* plugging.
1665-
*
1666-
* At the end of a writeback pass, there will be a cached ioend remaining on the
1667-
* writepage context that the caller will need to submit.
1668-
*/
1669-
ssize_t iomap_add_to_ioend(struct iomap_writepage_ctx *wpc, struct folio *folio,
1670-
loff_t pos, loff_t end_pos, unsigned int dirty_len)
1671-
{
1672-
struct iomap_ioend *ioend = wpc->wb_ctx;
1673-
size_t poff = offset_in_folio(folio, pos);
1674-
unsigned int ioend_flags = 0;
1675-
unsigned int map_len = min_t(u64, dirty_len,
1676-
wpc->iomap.offset + wpc->iomap.length - pos);
1677-
int error;
1678-
1679-
trace_iomap_add_to_ioend(wpc->inode, pos, dirty_len, &wpc->iomap);
1680-
1681-
WARN_ON_ONCE(!folio->private && map_len < dirty_len);
1682-
1683-
switch (wpc->iomap.type) {
1684-
case IOMAP_INLINE:
1685-
WARN_ON_ONCE(1);
1686-
return -EIO;
1687-
case IOMAP_HOLE:
1688-
return map_len;
1689-
default:
1690-
break;
1691-
}
1692-
1693-
if (wpc->iomap.type == IOMAP_UNWRITTEN)
1694-
ioend_flags |= IOMAP_IOEND_UNWRITTEN;
1695-
if (wpc->iomap.flags & IOMAP_F_SHARED)
1696-
ioend_flags |= IOMAP_IOEND_SHARED;
1697-
if (folio_test_dropbehind(folio))
1698-
ioend_flags |= IOMAP_IOEND_DONTCACHE;
1699-
if (pos == wpc->iomap.offset && (wpc->iomap.flags & IOMAP_F_BOUNDARY))
1700-
ioend_flags |= IOMAP_IOEND_BOUNDARY;
1701-
1702-
if (!ioend || !iomap_can_add_to_ioend(wpc, pos, ioend_flags)) {
1703-
new_ioend:
1704-
if (ioend) {
1705-
error = wpc->ops->writeback_submit(wpc, 0);
1706-
if (error)
1707-
return error;
1708-
}
1709-
wpc->wb_ctx = ioend = iomap_alloc_ioend(wpc, pos, ioend_flags);
1710-
}
1711-
1712-
if (!bio_add_folio(&ioend->io_bio, folio, map_len, poff))
1713-
goto new_ioend;
1714-
1715-
iomap_start_folio_write(wpc->inode, folio, map_len);
1716-
1717-
/*
1718-
* Clamp io_offset and io_size to the incore EOF so that ondisk
1719-
* file size updates in the ioend completion are byte-accurate.
1720-
* This avoids recovering files with zeroed tail regions when
1721-
* writeback races with appending writes:
1722-
*
1723-
* Thread 1: Thread 2:
1724-
* ------------ -----------
1725-
* write [A, A+B]
1726-
* update inode size to A+B
1727-
* submit I/O [A, A+BS]
1728-
* write [A+B, A+B+C]
1729-
* update inode size to A+B+C
1730-
* <I/O completes, updates disk size to min(A+B+C, A+BS)>
1731-
* <power failure>
1732-
*
1733-
* After reboot:
1734-
* 1) with A+B+C < A+BS, the file has zero padding in range
1735-
* [A+B, A+B+C]
1736-
*
1737-
* |< Block Size (BS) >|
1738-
* |DDDDDDDDDDDD0000000000000|
1739-
* ^ ^ ^
1740-
* A A+B A+B+C
1741-
* (EOF)
1742-
*
1743-
* 2) with A+B+C > A+BS, the file has zero padding in range
1744-
* [A+B, A+BS]
1745-
*
1746-
* |< Block Size (BS) >|< Block Size (BS) >|
1747-
* |DDDDDDDDDDDD0000000000000|00000000000000000000000000|
1748-
* ^ ^ ^ ^
1749-
* A A+B A+BS A+B+C
1750-
* (EOF)
1751-
*
1752-
* D = Valid Data
1753-
* 0 = Zero Padding
1754-
*
1755-
* Note that this defeats the ability to chain the ioends of
1756-
* appending writes.
1757-
*/
1758-
ioend->io_size += map_len;
1759-
if (ioend->io_offset + ioend->io_size > end_pos)
1760-
ioend->io_size = end_pos - ioend->io_offset;
1761-
1762-
wbc_account_cgroup_owner(wpc->wbc, folio, map_len);
1763-
return map_len;
1764-
}
1765-
EXPORT_SYMBOL_GPL(iomap_add_to_ioend);
1766-
17671552
static int iomap_writeback_range(struct iomap_writepage_ctx *wpc,
17681553
struct folio *folio, u64 pos, u32 rlen, u64 end_pos,
17691554
bool *wb_pending)

fs/iomap/internal.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@
44

55
#define IOEND_BATCH_SIZE 4096
66

7-
u32 iomap_finish_ioend_buffered(struct iomap_ioend *ioend);
87
u32 iomap_finish_ioend_direct(struct iomap_ioend *ioend);
98

109
#endif /* _IOMAP_INTERNAL_H */

0 commit comments

Comments
 (0)