Skip to content

Commit ab13e8c

Browse files
zhangyi089gregkh
authored andcommitted
ext4: refactor the block allocation process of ext4_page_mkwrite()
commit 2bddafe upstream. The block allocation process and error handling in ext4_page_mkwrite() is complex now. Refactor it by introducing a new helper function, ext4_block_page_mkwrite(). It will call ext4_block_write_begin() to allocate blocks instead of directly calling block_page_mkwrite(). Preparing to implement retry logic in a subsequent patch to address situations where the reserved journal credits are insufficient. Additionally, this modification will help prevent potential deadlocks that may occur when waiting for folio writeback while holding the transaction handle. Suggested-by: Jan Kara <[email protected]> Signed-off-by: Zhang Yi <[email protected]> Reviewed-by: Jan Kara <[email protected]> Link: https://patch.msgid.link/[email protected] Signed-off-by: Theodore Ts'o <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
1 parent 98571b6 commit ab13e8c

1 file changed

Lines changed: 50 additions & 45 deletions

File tree

fs/ext4/inode.c

Lines changed: 50 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -6622,6 +6622,53 @@ static int ext4_bh_unmapped(handle_t *handle, struct inode *inode,
66226622
return !buffer_mapped(bh);
66236623
}
66246624

6625+
static int ext4_block_page_mkwrite(struct inode *inode, struct folio *folio,
6626+
get_block_t get_block)
6627+
{
6628+
handle_t *handle;
6629+
loff_t size;
6630+
unsigned long len;
6631+
int ret;
6632+
6633+
handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
6634+
ext4_writepage_trans_blocks(inode));
6635+
if (IS_ERR(handle))
6636+
return PTR_ERR(handle);
6637+
6638+
folio_lock(folio);
6639+
size = i_size_read(inode);
6640+
/* Page got truncated from under us? */
6641+
if (folio->mapping != inode->i_mapping || folio_pos(folio) > size) {
6642+
ret = -EFAULT;
6643+
goto out_error;
6644+
}
6645+
6646+
len = folio_size(folio);
6647+
if (folio_pos(folio) + len > size)
6648+
len = size - folio_pos(folio);
6649+
6650+
ret = ext4_block_write_begin(handle, folio, 0, len, get_block);
6651+
if (ret)
6652+
goto out_error;
6653+
6654+
if (!ext4_should_journal_data(inode)) {
6655+
block_commit_write(folio, 0, len);
6656+
folio_mark_dirty(folio);
6657+
} else {
6658+
ret = ext4_journal_folio_buffers(handle, folio, len);
6659+
if (ret)
6660+
goto out_error;
6661+
}
6662+
ext4_journal_stop(handle);
6663+
folio_wait_stable(folio);
6664+
return ret;
6665+
6666+
out_error:
6667+
folio_unlock(folio);
6668+
ext4_journal_stop(handle);
6669+
return ret;
6670+
}
6671+
66256672
vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf)
66266673
{
66276674
struct vm_area_struct *vma = vmf->vma;
@@ -6633,8 +6680,7 @@ vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf)
66336680
struct file *file = vma->vm_file;
66346681
struct inode *inode = file_inode(file);
66356682
struct address_space *mapping = inode->i_mapping;
6636-
handle_t *handle;
6637-
get_block_t *get_block;
6683+
get_block_t *get_block = ext4_get_block;
66386684
int retries = 0;
66396685

66406686
if (unlikely(IS_IMMUTABLE(inode)))
@@ -6702,46 +6748,9 @@ vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf)
67026748
/* OK, we need to fill the hole... */
67036749
if (ext4_should_dioread_nolock(inode))
67046750
get_block = ext4_get_block_unwritten;
6705-
else
6706-
get_block = ext4_get_block;
67076751
retry_alloc:
6708-
handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
6709-
ext4_writepage_trans_blocks(inode));
6710-
if (IS_ERR(handle)) {
6711-
ret = VM_FAULT_SIGBUS;
6712-
goto out;
6713-
}
6714-
/*
6715-
* Data journalling can't use block_page_mkwrite() because it
6716-
* will set_buffer_dirty() before do_journal_get_write_access()
6717-
* thus might hit warning messages for dirty metadata buffers.
6718-
*/
6719-
if (!ext4_should_journal_data(inode)) {
6720-
err = block_page_mkwrite(vma, vmf, get_block);
6721-
} else {
6722-
folio_lock(folio);
6723-
size = i_size_read(inode);
6724-
/* Page got truncated from under us? */
6725-
if (folio->mapping != mapping || folio_pos(folio) > size) {
6726-
ret = VM_FAULT_NOPAGE;
6727-
goto out_error;
6728-
}
6729-
6730-
len = folio_size(folio);
6731-
if (folio_pos(folio) + len > size)
6732-
len = size - folio_pos(folio);
6733-
6734-
err = ext4_block_write_begin(handle, folio, 0, len,
6735-
ext4_get_block);
6736-
if (!err) {
6737-
ret = VM_FAULT_SIGBUS;
6738-
if (ext4_journal_folio_buffers(handle, folio, len))
6739-
goto out_error;
6740-
} else {
6741-
folio_unlock(folio);
6742-
}
6743-
}
6744-
ext4_journal_stop(handle);
6752+
/* Start journal and allocate blocks */
6753+
err = ext4_block_page_mkwrite(inode, folio, get_block);
67456754
if (err == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
67466755
goto retry_alloc;
67476756
out_ret:
@@ -6750,8 +6759,4 @@ vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf)
67506759
filemap_invalidate_unlock_shared(mapping);
67516760
sb_end_pagefault(inode->i_sb);
67526761
return ret;
6753-
out_error:
6754-
folio_unlock(folio);
6755-
ext4_journal_stop(handle);
6756-
goto out;
67576762
}

0 commit comments

Comments
 (0)