7474#include "delayed-inode.h"
7575
7676#define COW_FILE_RANGE_KEEP_LOCKED (1UL << 0)
77- #define COW_FILE_RANGE_NO_INLINE (1UL << 1)
7877
7978struct btrfs_iget_args {
8079 u64 ino ;
@@ -622,6 +621,10 @@ static bool can_cow_file_range_inline(struct btrfs_inode *inode,
622621 *
623622 * If being used directly, you must have already checked we're allowed to cow
624623 * the range by getting true from can_cow_file_range_inline().
624+ *
625+ * Return 0 if the inlined extent is created successfully.
626+ * Return <0 for critical error, and should be considered as an writeback error.
627+ * Return >0 if can not create an inlined extent (mostly due to lack of meta space).
625628 */
626629static noinline int __cow_file_range_inline (struct btrfs_inode * inode ,
627630 u64 size , size_t compressed_size ,
@@ -703,55 +706,6 @@ static noinline int __cow_file_range_inline(struct btrfs_inode *inode,
703706 return ret ;
704707}
705708
706- static noinline int cow_file_range_inline (struct btrfs_inode * inode ,
707- struct folio * locked_folio ,
708- u64 offset , u64 end ,
709- size_t compressed_size ,
710- int compress_type ,
711- struct folio * compressed_folio ,
712- bool update_i_size )
713- {
714- struct extent_state * cached = NULL ;
715- unsigned long clear_flags = EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
716- EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING | EXTENT_LOCKED ;
717- u64 size = min_t (u64 , i_size_read (& inode -> vfs_inode ), end + 1 );
718- int ret ;
719-
720- if (!can_cow_file_range_inline (inode , offset , size , compressed_size ))
721- return 1 ;
722-
723- btrfs_lock_extent (& inode -> io_tree , offset , end , & cached );
724- ret = __cow_file_range_inline (inode , size , compressed_size ,
725- compress_type , compressed_folio ,
726- update_i_size );
727- if (ret > 0 ) {
728- btrfs_unlock_extent (& inode -> io_tree , offset , end , & cached );
729- return ret ;
730- }
731-
732- /*
733- * In the successful case (ret == 0 here), cow_file_range will return 1.
734- *
735- * Quite a bit further up the callstack in extent_writepage(), ret == 1
736- * is treated as a short circuited success and does not unlock the folio,
737- * so we must do it here.
738- *
739- * In the failure case, the locked_folio does get unlocked by
740- * btrfs_folio_end_all_writers, which asserts that it is still locked
741- * at that point, so we must *not* unlock it here.
742- *
743- * The other two callsites in compress_file_range do not have a
744- * locked_folio, so they are not relevant to this logic.
745- */
746- if (ret == 0 )
747- locked_folio = NULL ;
748-
749- extent_clear_unlock_delalloc (inode , offset , end , locked_folio , & cached ,
750- clear_flags , PAGE_UNLOCK |
751- PAGE_START_WRITEBACK | PAGE_END_WRITEBACK );
752- return ret ;
753- }
754-
755709struct async_extent {
756710 u64 start ;
757711 u64 ram_size ;
@@ -797,7 +751,7 @@ static int add_async_extent(struct async_chunk *cow, u64 start, u64 ram_size,
797751 * options, defragmentation, properties or heuristics.
798752 */
799753static inline int inode_need_compress (struct btrfs_inode * inode , u64 start ,
800- u64 end )
754+ u64 end , bool check_inline )
801755{
802756 struct btrfs_fs_info * fs_info = inode -> root -> fs_info ;
803757
@@ -812,8 +766,9 @@ static inline int inode_need_compress(struct btrfs_inode *inode, u64 start,
812766 * and will always fallback to regular write later.
813767 */
814768 if (end + 1 - start <= fs_info -> sectorsize &&
815- (start > 0 || end + 1 < inode -> disk_i_size ))
769+ (! check_inline || ( start > 0 || end + 1 < inode -> disk_i_size ) ))
816770 return 0 ;
771+
817772 /* Defrag ioctl takes precedence over mount options and properties. */
818773 if (inode -> defrag_compress == BTRFS_DEFRAG_DONT_COMPRESS )
819774 return 0 ;
@@ -928,7 +883,6 @@ static void compress_file_range(struct btrfs_work *work)
928883 container_of (work , struct async_chunk , work );
929884 struct btrfs_inode * inode = async_chunk -> inode ;
930885 struct btrfs_fs_info * fs_info = inode -> root -> fs_info ;
931- struct address_space * mapping = inode -> vfs_inode .i_mapping ;
932886 struct compressed_bio * cb = NULL ;
933887 u64 blocksize = fs_info -> sectorsize ;
934888 u64 start = async_chunk -> start ;
@@ -1000,7 +954,7 @@ static void compress_file_range(struct btrfs_work *work)
1000954 * been flagged as NOCOMPRESS. This flag can change at any time if we
1001955 * discover bad compression ratios.
1002956 */
1003- if (!inode_need_compress (inode , start , end ))
957+ if (!inode_need_compress (inode , start , end , false ))
1004958 goto cleanup_and_bail_uncompressed ;
1005959
1006960 if (0 < inode -> defrag_compress && inode -> defrag_compress < BTRFS_NR_COMPRESS_TYPES ) {
@@ -1021,35 +975,6 @@ static void compress_file_range(struct btrfs_work *work)
1021975 total_compressed = cb -> bbio .bio .bi_iter .bi_size ;
1022976 total_in = cur_len ;
1023977
1024- /*
1025- * Try to create an inline extent.
1026- *
1027- * If we didn't compress the entire range, try to create an uncompressed
1028- * inline extent, else a compressed one.
1029- *
1030- * Check cow_file_range() for why we don't even try to create inline
1031- * extent for the subpage case.
1032- */
1033- if (total_in < actual_end )
1034- ret = cow_file_range_inline (inode , NULL , start , end , 0 ,
1035- BTRFS_COMPRESS_NONE , NULL , false);
1036- else
1037- ret = cow_file_range_inline (inode , NULL , start , end , total_compressed ,
1038- compress_type ,
1039- bio_first_folio_all (& cb -> bbio .bio ), false);
1040- if (ret <= 0 ) {
1041- cleanup_compressed_bio (cb );
1042- if (ret < 0 )
1043- mapping_set_error (mapping , - EIO );
1044- return ;
1045- }
1046- /*
1047- * If a single block at file offset 0 cannot be inlined, fall back to
1048- * regular writes without marking the file incompressible.
1049- */
1050- if (start == 0 && end <= blocksize )
1051- goto cleanup_and_bail_uncompressed ;
1052-
1053978 /*
1054979 * We aren't doing an inline extent. Round the compressed size up to a
1055980 * block size boundary so the allocator does sane things.
@@ -1427,11 +1352,6 @@ static int cow_one_range(struct btrfs_inode *inode, struct folio *locked_folio,
14271352 *
14281353 * When this function fails, it unlocks all folios except @locked_folio.
14291354 *
1430- * When this function successfully creates an inline extent, it returns 1 and
1431- * unlocks all folios including locked_folio and starts I/O on them.
1432- * (In reality inline extents are limited to a single block, so locked_folio is
1433- * the only folio handled anyway).
1434- *
14351355 * When this function succeed and creates a normal extent, the folio locking
14361356 * status depends on the passed in flags:
14371357 *
@@ -1475,25 +1395,6 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
14751395 ASSERT (num_bytes <= btrfs_super_total_bytes (fs_info -> super_copy ));
14761396
14771397 inode_should_defrag (inode , start , end , num_bytes , SZ_64K );
1478-
1479- if (!(flags & COW_FILE_RANGE_NO_INLINE )) {
1480- /* lets try to make an inline extent */
1481- ret = cow_file_range_inline (inode , locked_folio , start , end , 0 ,
1482- BTRFS_COMPRESS_NONE , NULL , false);
1483- if (ret <= 0 ) {
1484- /*
1485- * We succeeded, return 1 so the caller knows we're done
1486- * with this page and already handled the IO.
1487- *
1488- * If there was an error then cow_file_range_inline() has
1489- * already done the cleanup.
1490- */
1491- if (ret == 0 )
1492- ret = 1 ;
1493- goto done ;
1494- }
1495- }
1496-
14971398 alloc_hint = btrfs_get_extent_allocation_hint (inode , start , num_bytes );
14981399
14991400 /*
@@ -1571,7 +1472,6 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
15711472 }
15721473 extent_clear_unlock_delalloc (inode , orig_start , end , locked_folio , & cached ,
15731474 EXTENT_LOCKED | EXTENT_DELALLOC , page_ops );
1574- done :
15751475 if (done_offset )
15761476 * done_offset = end ;
15771477 return ret ;
@@ -1874,7 +1774,7 @@ static int fallback_to_cow(struct btrfs_inode *inode,
18741774 * a locked folio, which can race with writeback.
18751775 */
18761776 ret = cow_file_range (inode , locked_folio , start , end , NULL ,
1877- COW_FILE_RANGE_NO_INLINE | COW_FILE_RANGE_KEEP_LOCKED );
1777+ COW_FILE_RANGE_KEEP_LOCKED );
18781778 ASSERT (ret != 1 );
18791779 return ret ;
18801780}
@@ -2425,6 +2325,91 @@ static bool should_nocow(struct btrfs_inode *inode, u64 start, u64 end)
24252325 return false;
24262326}
24272327
2328+ /*
2329+ * Return 0 if an inlined extent is created successfully.
2330+ * Return <0 if critical error happened.
2331+ * Return >0 if an inline extent can not be created.
2332+ */
2333+ static int run_delalloc_inline (struct btrfs_inode * inode , struct folio * locked_folio )
2334+ {
2335+ struct btrfs_fs_info * fs_info = inode -> root -> fs_info ;
2336+ struct compressed_bio * cb = NULL ;
2337+ struct extent_state * cached = NULL ;
2338+ const u64 i_size = i_size_read (& inode -> vfs_inode );
2339+ const u32 blocksize = fs_info -> sectorsize ;
2340+ int compress_type = fs_info -> compress_type ;
2341+ int compress_level = fs_info -> compress_level ;
2342+ u32 compressed_size = 0 ;
2343+ int ret ;
2344+
2345+ ASSERT (folio_pos (locked_folio ) == 0 );
2346+
2347+ if (btrfs_inode_can_compress (inode ) &&
2348+ inode_need_compress (inode , 0 , blocksize , true)) {
2349+ if (inode -> defrag_compress > 0 &&
2350+ inode -> defrag_compress < BTRFS_NR_COMPRESS_TYPES ) {
2351+ compress_type = inode -> defrag_compress ;
2352+ compress_level = inode -> defrag_compress_level ;
2353+ } else if (inode -> prop_compress ) {
2354+ compress_type = inode -> prop_compress ;
2355+ }
2356+ cb = btrfs_compress_bio (inode , 0 , blocksize , compress_type , compress_level , 0 );
2357+ if (IS_ERR (cb )) {
2358+ cb = NULL ;
2359+ /* Just fall back to non-compressed case. */
2360+ } else {
2361+ compressed_size = cb -> bbio .bio .bi_iter .bi_size ;
2362+ }
2363+ }
2364+ if (!can_cow_file_range_inline (inode , 0 , i_size , compressed_size )) {
2365+ if (cb )
2366+ cleanup_compressed_bio (cb );
2367+ return 1 ;
2368+ }
2369+
2370+ btrfs_lock_extent (& inode -> io_tree , 0 , blocksize - 1 , & cached );
2371+ if (cb ) {
2372+ ret = __cow_file_range_inline (inode , i_size , compressed_size , compress_type ,
2373+ bio_first_folio_all (& cb -> bbio .bio ), false);
2374+ cleanup_compressed_bio (cb );
2375+ cb = NULL ;
2376+ } else {
2377+ ret = __cow_file_range_inline (inode , i_size , 0 , BTRFS_COMPRESS_NONE ,
2378+ NULL , false);
2379+ }
2380+ /*
2381+ * We failed to insert inline extent due to lack of meta space.
2382+ * Just unlock the extent io range and fallback to regular COW/NOCOW path.
2383+ */
2384+ if (ret > 0 ) {
2385+ btrfs_unlock_extent (& inode -> io_tree , 0 , blocksize - 1 , & cached );
2386+ return ret ;
2387+ }
2388+
2389+ /*
2390+ * In the successful case (ret == 0 here), btrfs_run_delalloc_range()
2391+ * will return 1.
2392+ *
2393+ * Quite a bit further up the callstack in extent_writepage(), ret == 1
2394+ * is treated as a short circuited success and does not unlock the folio,
2395+ * so we must do it here.
2396+ *
2397+ * For failure case, the @locked_folio does get unlocked by
2398+ * btrfs_folio_end_lock_bitmap(), so we must *not* unlock it here.
2399+ *
2400+ * So if ret == 0, we let extent_clear_unlock_delalloc() to unlock the
2401+ * folio by passing NULL as @locked_folio.
2402+ * Otherwise pass @locked_folio as usual.
2403+ */
2404+ if (ret == 0 )
2405+ locked_folio = NULL ;
2406+ extent_clear_unlock_delalloc (inode , 0 , blocksize - 1 , locked_folio , & cached ,
2407+ EXTENT_DELALLOC | EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
2408+ EXTENT_DO_ACCOUNTING | EXTENT_LOCKED ,
2409+ PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK );
2410+ return ret ;
2411+ }
2412+
24282413/*
24292414 * Function to process delayed allocation (create CoW) for ranges which are
24302415 * being touched for the first time.
@@ -2441,11 +2426,26 @@ int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct folio *locked_fol
24412426 ASSERT (!(end <= folio_pos (locked_folio ) ||
24422427 start >= folio_next_pos (locked_folio )));
24432428
2429+ if (start == 0 && end + 1 <= inode -> root -> fs_info -> sectorsize &&
2430+ end + 1 >= inode -> disk_i_size ) {
2431+ int ret ;
2432+
2433+ ret = run_delalloc_inline (inode , locked_folio );
2434+ if (ret < 0 )
2435+ return ret ;
2436+ if (ret == 0 )
2437+ return 1 ;
2438+ /*
2439+ * Continue regular handling if we can not create an
2440+ * inlined extent.
2441+ */
2442+ }
2443+
24442444 if (should_nocow (inode , start , end ))
24452445 return run_delalloc_nocow (inode , locked_folio , start , end );
24462446
24472447 if (btrfs_inode_can_compress (inode ) &&
2448- inode_need_compress (inode , start , end ) &&
2448+ inode_need_compress (inode , start , end , false ) &&
24492449 run_delalloc_compressed (inode , locked_folio , start , end , wbc ))
24502450 return 1 ;
24512451
0 commit comments