Skip to content

Commit 6194db7

Browse files
x-y-zgregkh
authored andcommitted
mm/huge_memory: do not change split_huge_page*() target order silently
commit 77008e1b2ef73249bceb078a321a3ff6bc087afb upstream. Page cache folios from a file system that support large block size (LBS) can have minimal folio order greater than 0, thus a high order folio might not be able to be split down to order-0. Commit e220917 ("mm: split a folio in minimum folio order chunks") bumps the target order of split_huge_page*() to the minimum allowed order when splitting a LBS folio. This causes confusion for some split_huge_page*() callers like memory failure handling code, since they expect after-split folios all have order-0 when split succeeds but in reality get min_order_for_split() order folios and give warnings. Fix it by failing a split if the folio cannot be split to the target order. Rename try_folio_split() to try_folio_split_to_order() to reflect the added new_order parameter. Remove its unused list parameter. [The test poisons LBS folios, which cannot be split to order-0 folios, and also tries to poison all memory. The non split LBS folios take more memory than the test anticipated, leading to OOM. The patch fixed the kernel warning and the test needs some change to avoid OOM.] Link: https://lkml.kernel.org/r/[email protected] Fixes: e220917 ("mm: split a folio in minimum folio order chunks") Signed-off-by: Zi Yan <[email protected]> Reported-by: [email protected] Closes: https://lore.kernel.org/all/[email protected]/ Reviewed-by: Luis Chamberlain <[email protected]> Reviewed-by: Pankaj Raghav <[email protected]> Reviewed-by: Wei Yang <[email protected]> Acked-by: David Hildenbrand <[email protected]> Reviewed-by: Lorenzo Stoakes <[email protected]> Reviewed-by: Miaohe Lin <[email protected]> Cc: Baolin Wang <[email protected]> Cc: Barry Song <[email protected]> Cc: David Hildenbrand <[email protected]> Cc: Dev Jain <[email protected]> Cc: Jane Chu <[email protected]> Cc: Lance Yang <[email protected]> Cc: Liam Howlett <[email protected]> Cc: Mariano Pache <[email protected]> Cc: Matthew Wilcox (Oracle) <[email protected]> Cc: Naoya Horiguchi <[email protected]> Cc: Ryan Roberts <[email protected]> Cc: Christian Brauner <[email protected]> Cc: <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
1 parent f4ff166 commit 6194db7

3 files changed

Lines changed: 28 additions & 42 deletions

File tree

include/linux/huge_mm.h

Lines changed: 23 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -354,45 +354,30 @@ bool non_uniform_split_supported(struct folio *folio, unsigned int new_order,
354354
int folio_split(struct folio *folio, unsigned int new_order, struct page *page,
355355
struct list_head *list);
356356
/*
357-
* try_folio_split - try to split a @folio at @page using non uniform split.
357+
* try_folio_split_to_order - try to split a @folio at @page to @new_order using
358+
* non uniform split.
358359
* @folio: folio to be split
359-
* @page: split to order-0 at the given page
360-
* @list: store the after-split folios
360+
* @page: split to @new_order at the given page
361+
* @new_order: the target split order
361362
*
362-
* Try to split a @folio at @page using non uniform split to order-0, if
363-
* non uniform split is not supported, fall back to uniform split.
363+
* Try to split a @folio at @page using non uniform split to @new_order, if
364+
* non uniform split is not supported, fall back to uniform split. After-split
365+
* folios are put back to LRU list. Use min_order_for_split() to get the lower
366+
* bound of @new_order.
364367
*
365368
* Return: 0: split is successful, otherwise split failed.
366369
*/
367-
static inline int try_folio_split(struct folio *folio, struct page *page,
368-
struct list_head *list)
370+
static inline int try_folio_split_to_order(struct folio *folio,
371+
struct page *page, unsigned int new_order)
369372
{
370-
int ret = min_order_for_split(folio);
371-
372-
if (ret < 0)
373-
return ret;
374-
375-
if (!non_uniform_split_supported(folio, 0, false))
376-
return split_huge_page_to_list_to_order(&folio->page, list,
377-
ret);
378-
return folio_split(folio, ret, page, list);
373+
if (!non_uniform_split_supported(folio, new_order, /* warns= */ false))
374+
return split_huge_page_to_list_to_order(&folio->page, NULL,
375+
new_order);
376+
return folio_split(folio, new_order, page, NULL);
379377
}
380378
static inline int split_huge_page(struct page *page)
381379
{
382-
struct folio *folio = page_folio(page);
383-
int ret = min_order_for_split(folio);
384-
385-
if (ret < 0)
386-
return ret;
387-
388-
/*
389-
* split_huge_page() locks the page before splitting and
390-
* expects the same page that has been split to be locked when
391-
* returned. split_folio(page_folio(page)) cannot be used here
392-
* because it converts the page to folio and passes the head
393-
* page to be split.
394-
*/
395-
return split_huge_page_to_list_to_order(page, NULL, ret);
380+
return split_huge_page_to_list_to_order(page, NULL, 0);
396381
}
397382
void deferred_split_folio(struct folio *folio, bool partially_mapped);
398383

@@ -560,13 +545,19 @@ static inline int split_huge_page(struct page *page)
560545
return 0;
561546
}
562547

548+
static inline int min_order_for_split(struct folio *folio)
549+
{
550+
VM_WARN_ON_ONCE_FOLIO(1, folio);
551+
return -EINVAL;
552+
}
553+
563554
static inline int split_folio_to_list(struct folio *folio, struct list_head *list)
564555
{
565556
return 0;
566557
}
567558

568-
static inline int try_folio_split(struct folio *folio, struct page *page,
569-
struct list_head *list)
559+
static inline int try_folio_split_to_order(struct folio *folio,
560+
struct page *page, unsigned int new_order)
570561
{
571562
return 0;
572563
}

mm/huge_memory.c

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -3680,8 +3680,6 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
36803680

36813681
min_order = mapping_min_folio_order(folio->mapping);
36823682
if (new_order < min_order) {
3683-
VM_WARN_ONCE(1, "Cannot split mapped folio below min-order: %u",
3684-
min_order);
36853683
ret = -EINVAL;
36863684
goto out;
36873685
}
@@ -4016,12 +4014,7 @@ int min_order_for_split(struct folio *folio)
40164014

40174015
int split_folio_to_list(struct folio *folio, struct list_head *list)
40184016
{
4019-
int ret = min_order_for_split(folio);
4020-
4021-
if (ret < 0)
4022-
return ret;
4023-
4024-
return split_huge_page_to_list_to_order(&folio->page, list, ret);
4017+
return split_huge_page_to_list_to_order(&folio->page, list, 0);
40254018
}
40264019

40274020
/*

mm/truncate.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -194,6 +194,7 @@ bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end)
194194
size_t size = folio_size(folio);
195195
unsigned int offset, length;
196196
struct page *split_at, *split_at2;
197+
unsigned int min_order;
197198

198199
if (pos < start)
199200
offset = start - pos;
@@ -223,8 +224,9 @@ bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end)
223224
if (!folio_test_large(folio))
224225
return true;
225226

227+
min_order = mapping_min_folio_order(folio->mapping);
226228
split_at = folio_page(folio, PAGE_ALIGN_DOWN(offset) / PAGE_SIZE);
227-
if (!try_folio_split(folio, split_at, NULL)) {
229+
if (!try_folio_split_to_order(folio, split_at, min_order)) {
228230
/*
229231
* try to split at offset + length to make sure folios within
230232
* the range can be dropped, especially to avoid memory waste
@@ -254,7 +256,7 @@ bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end)
254256
*/
255257
if (folio_test_large(folio2) &&
256258
folio2->mapping == folio->mapping)
257-
try_folio_split(folio2, split_at2, NULL);
259+
try_folio_split_to_order(folio2, split_at2, min_order);
258260

259261
folio_unlock(folio2);
260262
out:

0 commit comments

Comments
 (0)