Skip to content

Commit 053703c

Browse files
Christoph Hellwigkawasaki
authored andcommitted
iomap: move all ioend handling to ioend.c
Now that the writeback code has the proper abstractions, all the ioend code can be self-contained in ioend.c. Signed-off-by: Christoph Hellwig <[email protected]> Reviewed-by: Brian Foster <[email protected]> Reviewed-by: Joanne Koong <[email protected]> Reviewed-by: "Darrick J. Wong" <[email protected]>
1 parent 94bce81 commit 053703c

3 files changed

Lines changed: 219 additions & 218 deletions

File tree

fs/iomap/buffered-io.c

Lines changed: 0 additions & 216 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@
88
#include <linux/writeback.h>
99
#include <linux/swap.h>
1010
#include <linux/migrate.h>
11-
#include "internal.h"
1211
#include "trace.h"
1312

1413
#include "../internal.h"
@@ -1554,221 +1553,6 @@ void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
15541553
}
15551554
EXPORT_SYMBOL_GPL(iomap_finish_folio_write);
15561555

1557-
/*
1558-
* We're now finished for good with this ioend structure. Update the page
1559-
* state, release holds on bios, and finally free up memory. Do not use the
1560-
* ioend after this.
1561-
*/
1562-
u32 iomap_finish_ioend_buffered(struct iomap_ioend *ioend)
1563-
{
1564-
struct inode *inode = ioend->io_inode;
1565-
struct bio *bio = &ioend->io_bio;
1566-
struct folio_iter fi;
1567-
u32 folio_count = 0;
1568-
1569-
if (ioend->io_error) {
1570-
mapping_set_error(inode->i_mapping, ioend->io_error);
1571-
if (!bio_flagged(bio, BIO_QUIET)) {
1572-
pr_err_ratelimited(
1573-
"%s: writeback error on inode %lu, offset %lld, sector %llu",
1574-
inode->i_sb->s_id, inode->i_ino,
1575-
ioend->io_offset, ioend->io_sector);
1576-
}
1577-
}
1578-
1579-
/* walk all folios in bio, ending page IO on them */
1580-
bio_for_each_folio_all(fi, bio) {
1581-
iomap_finish_folio_write(inode, fi.folio, fi.length);
1582-
folio_count++;
1583-
}
1584-
1585-
bio_put(bio); /* frees the ioend */
1586-
return folio_count;
1587-
}
1588-
1589-
static void ioend_writeback_end_bio(struct bio *bio)
1590-
{
1591-
struct iomap_ioend *ioend = iomap_ioend_from_bio(bio);
1592-
1593-
ioend->io_error = blk_status_to_errno(bio->bi_status);
1594-
iomap_finish_ioend_buffered(ioend);
1595-
}
1596-
1597-
/*
1598-
* We cannot cancel the ioend directly in case of an error, so call the bio end
1599-
* I/O handler with the error status here to run the normal I/O completion
1600-
* handler.
1601-
*/
1602-
int iomap_ioend_writeback_submit(struct iomap_writepage_ctx *wpc, int error)
1603-
{
1604-
struct iomap_ioend *ioend = wpc->wb_ctx;
1605-
1606-
if (!ioend->io_bio.bi_end_io)
1607-
ioend->io_bio.bi_end_io = ioend_writeback_end_bio;
1608-
1609-
if (WARN_ON_ONCE(wpc->iomap.flags & IOMAP_F_ANON_WRITE))
1610-
error = -EIO;
1611-
1612-
if (error) {
1613-
ioend->io_bio.bi_status = errno_to_blk_status(error);
1614-
bio_endio(&ioend->io_bio);
1615-
return error;
1616-
}
1617-
1618-
submit_bio(&ioend->io_bio);
1619-
return 0;
1620-
}
1621-
EXPORT_SYMBOL_GPL(iomap_ioend_writeback_submit);
1622-
1623-
static struct iomap_ioend *iomap_alloc_ioend(struct iomap_writepage_ctx *wpc,
1624-
loff_t pos, u16 ioend_flags)
1625-
{
1626-
struct bio *bio;
1627-
1628-
bio = bio_alloc_bioset(wpc->iomap.bdev, BIO_MAX_VECS,
1629-
REQ_OP_WRITE | wbc_to_write_flags(wpc->wbc),
1630-
GFP_NOFS, &iomap_ioend_bioset);
1631-
bio->bi_iter.bi_sector = iomap_sector(&wpc->iomap, pos);
1632-
bio->bi_write_hint = wpc->inode->i_write_hint;
1633-
wbc_init_bio(wpc->wbc, bio);
1634-
wpc->nr_folios = 0;
1635-
return iomap_init_ioend(wpc->inode, bio, pos, ioend_flags);
1636-
}
1637-
1638-
static bool iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t pos,
1639-
u16 ioend_flags)
1640-
{
1641-
struct iomap_ioend *ioend = wpc->wb_ctx;
1642-
1643-
if (ioend_flags & IOMAP_IOEND_BOUNDARY)
1644-
return false;
1645-
if ((ioend_flags & IOMAP_IOEND_NOMERGE_FLAGS) !=
1646-
(ioend->io_flags & IOMAP_IOEND_NOMERGE_FLAGS))
1647-
return false;
1648-
if (pos != ioend->io_offset + ioend->io_size)
1649-
return false;
1650-
if (!(wpc->iomap.flags & IOMAP_F_ANON_WRITE) &&
1651-
iomap_sector(&wpc->iomap, pos) != bio_end_sector(&ioend->io_bio))
1652-
return false;
1653-
/*
1654-
* Limit ioend bio chain lengths to minimise IO completion latency. This
1655-
* also prevents long tight loops ending page writeback on all the
1656-
* folios in the ioend.
1657-
*/
1658-
if (wpc->nr_folios >= IOEND_BATCH_SIZE)
1659-
return false;
1660-
return true;
1661-
}
1662-
1663-
/*
1664-
* Test to see if we have an existing ioend structure that we could append to
1665-
* first; otherwise finish off the current ioend and start another.
1666-
*
1667-
* If a new ioend is created and cached, the old ioend is submitted to the block
1668-
* layer instantly. Batching optimisations are provided by higher level block
1669-
* plugging.
1670-
*
1671-
* At the end of a writeback pass, there will be a cached ioend remaining on the
1672-
* writepage context that the caller will need to submit.
1673-
*/
1674-
ssize_t iomap_add_to_ioend(struct iomap_writepage_ctx *wpc, struct folio *folio,
1675-
loff_t pos, loff_t end_pos, unsigned int dirty_len)
1676-
{
1677-
struct iomap_ioend *ioend = wpc->wb_ctx;
1678-
size_t poff = offset_in_folio(folio, pos);
1679-
unsigned int ioend_flags = 0;
1680-
unsigned int map_len = min_t(u64, dirty_len,
1681-
wpc->iomap.offset + wpc->iomap.length - pos);
1682-
int error;
1683-
1684-
trace_iomap_add_to_ioend(wpc->inode, pos, dirty_len, &wpc->iomap);
1685-
1686-
WARN_ON_ONCE(!folio->private && map_len < dirty_len);
1687-
1688-
switch (wpc->iomap.type) {
1689-
case IOMAP_INLINE:
1690-
WARN_ON_ONCE(1);
1691-
return -EIO;
1692-
case IOMAP_HOLE:
1693-
return map_len;
1694-
default:
1695-
break;
1696-
}
1697-
1698-
if (wpc->iomap.type == IOMAP_UNWRITTEN)
1699-
ioend_flags |= IOMAP_IOEND_UNWRITTEN;
1700-
if (wpc->iomap.flags & IOMAP_F_SHARED)
1701-
ioend_flags |= IOMAP_IOEND_SHARED;
1702-
if (folio_test_dropbehind(folio))
1703-
ioend_flags |= IOMAP_IOEND_DONTCACHE;
1704-
if (pos == wpc->iomap.offset && (wpc->iomap.flags & IOMAP_F_BOUNDARY))
1705-
ioend_flags |= IOMAP_IOEND_BOUNDARY;
1706-
1707-
if (!ioend || !iomap_can_add_to_ioend(wpc, pos, ioend_flags)) {
1708-
new_ioend:
1709-
if (ioend) {
1710-
error = wpc->ops->writeback_submit(wpc, 0);
1711-
if (error)
1712-
return error;
1713-
}
1714-
wpc->wb_ctx = ioend = iomap_alloc_ioend(wpc, pos, ioend_flags);
1715-
}
1716-
1717-
if (!bio_add_folio(&ioend->io_bio, folio, map_len, poff))
1718-
goto new_ioend;
1719-
1720-
iomap_start_folio_write(wpc->inode, folio, map_len);
1721-
1722-
/*
1723-
* Clamp io_offset and io_size to the incore EOF so that ondisk
1724-
* file size updates in the ioend completion are byte-accurate.
1725-
* This avoids recovering files with zeroed tail regions when
1726-
* writeback races with appending writes:
1727-
*
1728-
* Thread 1: Thread 2:
1729-
* ------------ -----------
1730-
* write [A, A+B]
1731-
* update inode size to A+B
1732-
* submit I/O [A, A+BS]
1733-
* write [A+B, A+B+C]
1734-
* update inode size to A+B+C
1735-
* <I/O completes, updates disk size to min(A+B+C, A+BS)>
1736-
* <power failure>
1737-
*
1738-
* After reboot:
1739-
* 1) with A+B+C < A+BS, the file has zero padding in range
1740-
* [A+B, A+B+C]
1741-
*
1742-
* |< Block Size (BS) >|
1743-
* |DDDDDDDDDDDD0000000000000|
1744-
* ^ ^ ^
1745-
* A A+B A+B+C
1746-
* (EOF)
1747-
*
1748-
* 2) with A+B+C > A+BS, the file has zero padding in range
1749-
* [A+B, A+BS]
1750-
*
1751-
* |< Block Size (BS) >|< Block Size (BS) >|
1752-
* |DDDDDDDDDDDD0000000000000|00000000000000000000000000|
1753-
* ^ ^ ^ ^
1754-
* A A+B A+BS A+B+C
1755-
* (EOF)
1756-
*
1757-
* D = Valid Data
1758-
* 0 = Zero Padding
1759-
*
1760-
* Note that this defeats the ability to chain the ioends of
1761-
* appending writes.
1762-
*/
1763-
ioend->io_size += map_len;
1764-
if (ioend->io_offset + ioend->io_size > end_pos)
1765-
ioend->io_size = end_pos - ioend->io_offset;
1766-
1767-
wbc_account_cgroup_owner(wpc->wbc, folio, map_len);
1768-
return map_len;
1769-
}
1770-
EXPORT_SYMBOL_GPL(iomap_add_to_ioend);
1771-
17721556
static int iomap_writeback_range(struct iomap_writepage_ctx *wpc,
17731557
struct folio *folio, u64 pos, u32 rlen, u64 end_pos,
17741558
bool *wb_pending)

fs/iomap/internal.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@
44

55
#define IOEND_BATCH_SIZE 4096
66

7-
u32 iomap_finish_ioend_buffered(struct iomap_ioend *ioend);
87
u32 iomap_finish_ioend_direct(struct iomap_ioend *ioend);
98

109
#endif /* _IOMAP_INTERNAL_H */

0 commit comments

Comments
 (0)