Skip to content

Commit d6f9946

Browse files
Christoph Hellwigkawasaki
authored andcommitted
iomap: move all ioend handling to ioend.c
Now that the writeback code has the proper abstractions, all the ioend code can be self-contained in ioend.c. Signed-off-by: Christoph Hellwig <[email protected]> Reviewed-by: Brian Foster <[email protected]> Reviewed-by: Joanne Koong <[email protected]> Reviewed-by: "Darrick J. Wong" <[email protected]>
1 parent 2f87fc4 commit d6f9946

3 files changed

Lines changed: 219 additions & 218 deletions

File tree

fs/iomap/buffered-io.c

Lines changed: 0 additions & 216 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@
88
#include <linux/writeback.h>
99
#include <linux/swap.h>
1010
#include <linux/migrate.h>
11-
#include "internal.h"
1211
#include "trace.h"
1312

1413
#include "../internal.h"
@@ -1551,221 +1550,6 @@ void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
15511550
}
15521551
EXPORT_SYMBOL_GPL(iomap_finish_folio_write);
15531552

1554-
/*
1555-
* We're now finished for good with this ioend structure. Update the page
1556-
* state, release holds on bios, and finally free up memory. Do not use the
1557-
* ioend after this.
1558-
*/
1559-
u32 iomap_finish_ioend_buffered(struct iomap_ioend *ioend)
1560-
{
1561-
struct inode *inode = ioend->io_inode;
1562-
struct bio *bio = &ioend->io_bio;
1563-
struct folio_iter fi;
1564-
u32 folio_count = 0;
1565-
1566-
if (ioend->io_error) {
1567-
mapping_set_error(inode->i_mapping, ioend->io_error);
1568-
if (!bio_flagged(bio, BIO_QUIET)) {
1569-
pr_err_ratelimited(
1570-
"%s: writeback error on inode %lu, offset %lld, sector %llu",
1571-
inode->i_sb->s_id, inode->i_ino,
1572-
ioend->io_offset, ioend->io_sector);
1573-
}
1574-
}
1575-
1576-
/* walk all folios in bio, ending page IO on them */
1577-
bio_for_each_folio_all(fi, bio) {
1578-
iomap_finish_folio_write(inode, fi.folio, fi.length);
1579-
folio_count++;
1580-
}
1581-
1582-
bio_put(bio); /* frees the ioend */
1583-
return folio_count;
1584-
}
1585-
1586-
static void ioend_writeback_end_bio(struct bio *bio)
1587-
{
1588-
struct iomap_ioend *ioend = iomap_ioend_from_bio(bio);
1589-
1590-
ioend->io_error = blk_status_to_errno(bio->bi_status);
1591-
iomap_finish_ioend_buffered(ioend);
1592-
}
1593-
1594-
/*
1595-
* We cannot cancel the ioend directly in case of an error, so call the bio end
1596-
* I/O handler with the error status here to run the normal I/O completion
1597-
* handler.
1598-
*/
1599-
int iomap_ioend_writeback_submit(struct iomap_writepage_ctx *wpc, int error)
1600-
{
1601-
struct iomap_ioend *ioend = wpc->wb_ctx;
1602-
1603-
if (!ioend->io_bio.bi_end_io)
1604-
ioend->io_bio.bi_end_io = ioend_writeback_end_bio;
1605-
1606-
if (WARN_ON_ONCE(wpc->iomap.flags & IOMAP_F_ANON_WRITE))
1607-
error = -EIO;
1608-
1609-
if (error) {
1610-
ioend->io_bio.bi_status = errno_to_blk_status(error);
1611-
bio_endio(&ioend->io_bio);
1612-
return error;
1613-
}
1614-
1615-
submit_bio(&ioend->io_bio);
1616-
return 0;
1617-
}
1618-
EXPORT_SYMBOL_GPL(iomap_ioend_writeback_submit);
1619-
1620-
static struct iomap_ioend *iomap_alloc_ioend(struct iomap_writepage_ctx *wpc,
1621-
loff_t pos, u16 ioend_flags)
1622-
{
1623-
struct bio *bio;
1624-
1625-
bio = bio_alloc_bioset(wpc->iomap.bdev, BIO_MAX_VECS,
1626-
REQ_OP_WRITE | wbc_to_write_flags(wpc->wbc),
1627-
GFP_NOFS, &iomap_ioend_bioset);
1628-
bio->bi_iter.bi_sector = iomap_sector(&wpc->iomap, pos);
1629-
bio->bi_write_hint = wpc->inode->i_write_hint;
1630-
wbc_init_bio(wpc->wbc, bio);
1631-
wpc->nr_folios = 0;
1632-
return iomap_init_ioend(wpc->inode, bio, pos, ioend_flags);
1633-
}
1634-
1635-
static bool iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t pos,
1636-
u16 ioend_flags)
1637-
{
1638-
struct iomap_ioend *ioend = wpc->wb_ctx;
1639-
1640-
if (ioend_flags & IOMAP_IOEND_BOUNDARY)
1641-
return false;
1642-
if ((ioend_flags & IOMAP_IOEND_NOMERGE_FLAGS) !=
1643-
(ioend->io_flags & IOMAP_IOEND_NOMERGE_FLAGS))
1644-
return false;
1645-
if (pos != ioend->io_offset + ioend->io_size)
1646-
return false;
1647-
if (!(wpc->iomap.flags & IOMAP_F_ANON_WRITE) &&
1648-
iomap_sector(&wpc->iomap, pos) != bio_end_sector(&ioend->io_bio))
1649-
return false;
1650-
/*
1651-
* Limit ioend bio chain lengths to minimise IO completion latency. This
1652-
* also prevents long tight loops ending page writeback on all the
1653-
* folios in the ioend.
1654-
*/
1655-
if (wpc->nr_folios >= IOEND_BATCH_SIZE)
1656-
return false;
1657-
return true;
1658-
}
1659-
1660-
/*
1661-
* Test to see if we have an existing ioend structure that we could append to
1662-
* first; otherwise finish off the current ioend and start another.
1663-
*
1664-
* If a new ioend is created and cached, the old ioend is submitted to the block
1665-
* layer instantly. Batching optimisations are provided by higher level block
1666-
* plugging.
1667-
*
1668-
* At the end of a writeback pass, there will be a cached ioend remaining on the
1669-
* writepage context that the caller will need to submit.
1670-
*/
1671-
ssize_t iomap_add_to_ioend(struct iomap_writepage_ctx *wpc, struct folio *folio,
1672-
loff_t pos, loff_t end_pos, unsigned int dirty_len)
1673-
{
1674-
struct iomap_ioend *ioend = wpc->wb_ctx;
1675-
size_t poff = offset_in_folio(folio, pos);
1676-
unsigned int ioend_flags = 0;
1677-
unsigned int map_len = min_t(u64, dirty_len,
1678-
wpc->iomap.offset + wpc->iomap.length - pos);
1679-
int error;
1680-
1681-
trace_iomap_add_to_ioend(wpc->inode, pos, dirty_len, &wpc->iomap);
1682-
1683-
WARN_ON_ONCE(!folio->private && map_len < dirty_len);
1684-
1685-
switch (wpc->iomap.type) {
1686-
case IOMAP_INLINE:
1687-
WARN_ON_ONCE(1);
1688-
return -EIO;
1689-
case IOMAP_HOLE:
1690-
return map_len;
1691-
default:
1692-
break;
1693-
}
1694-
1695-
if (wpc->iomap.type == IOMAP_UNWRITTEN)
1696-
ioend_flags |= IOMAP_IOEND_UNWRITTEN;
1697-
if (wpc->iomap.flags & IOMAP_F_SHARED)
1698-
ioend_flags |= IOMAP_IOEND_SHARED;
1699-
if (folio_test_dropbehind(folio))
1700-
ioend_flags |= IOMAP_IOEND_DONTCACHE;
1701-
if (pos == wpc->iomap.offset && (wpc->iomap.flags & IOMAP_F_BOUNDARY))
1702-
ioend_flags |= IOMAP_IOEND_BOUNDARY;
1703-
1704-
if (!ioend || !iomap_can_add_to_ioend(wpc, pos, ioend_flags)) {
1705-
new_ioend:
1706-
if (ioend) {
1707-
error = wpc->ops->writeback_submit(wpc, 0);
1708-
if (error)
1709-
return error;
1710-
}
1711-
wpc->wb_ctx = ioend = iomap_alloc_ioend(wpc, pos, ioend_flags);
1712-
}
1713-
1714-
if (!bio_add_folio(&ioend->io_bio, folio, map_len, poff))
1715-
goto new_ioend;
1716-
1717-
iomap_start_folio_write(wpc->inode, folio, map_len);
1718-
1719-
/*
1720-
* Clamp io_offset and io_size to the incore EOF so that ondisk
1721-
* file size updates in the ioend completion are byte-accurate.
1722-
* This avoids recovering files with zeroed tail regions when
1723-
* writeback races with appending writes:
1724-
*
1725-
* Thread 1: Thread 2:
1726-
* ------------ -----------
1727-
* write [A, A+B]
1728-
* update inode size to A+B
1729-
* submit I/O [A, A+BS]
1730-
* write [A+B, A+B+C]
1731-
* update inode size to A+B+C
1732-
* <I/O completes, updates disk size to min(A+B+C, A+BS)>
1733-
* <power failure>
1734-
*
1735-
* After reboot:
1736-
* 1) with A+B+C < A+BS, the file has zero padding in range
1737-
* [A+B, A+B+C]
1738-
*
1739-
* |< Block Size (BS) >|
1740-
* |DDDDDDDDDDDD0000000000000|
1741-
* ^ ^ ^
1742-
* A A+B A+B+C
1743-
* (EOF)
1744-
*
1745-
* 2) with A+B+C > A+BS, the file has zero padding in range
1746-
* [A+B, A+BS]
1747-
*
1748-
* |< Block Size (BS) >|< Block Size (BS) >|
1749-
* |DDDDDDDDDDDD0000000000000|00000000000000000000000000|
1750-
* ^ ^ ^ ^
1751-
* A A+B A+BS A+B+C
1752-
* (EOF)
1753-
*
1754-
* D = Valid Data
1755-
* 0 = Zero Padding
1756-
*
1757-
* Note that this defeats the ability to chain the ioends of
1758-
* appending writes.
1759-
*/
1760-
ioend->io_size += map_len;
1761-
if (ioend->io_offset + ioend->io_size > end_pos)
1762-
ioend->io_size = end_pos - ioend->io_offset;
1763-
1764-
wbc_account_cgroup_owner(wpc->wbc, folio, map_len);
1765-
return map_len;
1766-
}
1767-
EXPORT_SYMBOL_GPL(iomap_add_to_ioend);
1768-
17691553
static int iomap_writeback_range(struct iomap_writepage_ctx *wpc,
17701554
struct folio *folio, u64 pos, u32 rlen, u64 end_pos,
17711555
bool *wb_pending)

fs/iomap/internal.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@
44

55
#define IOEND_BATCH_SIZE 4096
66

7-
u32 iomap_finish_ioend_buffered(struct iomap_ioend *ioend);
87
u32 iomap_finish_ioend_direct(struct iomap_ioend *ioend);
98

109
#endif /* _IOMAP_INTERNAL_H */

0 commit comments

Comments
 (0)