Skip to content

Commit a095090

Browse files
Hugh Dickinsgregkh
authored andcommitted
mm: shmem: fix ShmemHugePages at swapout
commit dad2dc9 upstream. /proc/meminfo ShmemHugePages has been showing overlarge amounts (more than Shmem) after swapping out THPs: we forgot to update NR_SHMEM_THPS. Add shmem_update_stats(), to avoid repetition, and risk of making that mistake again: the call from shmem_delete_from_page_cache() is the bugfix; the call from shmem_replace_folio() is reassuring, but not really a bugfix (replace corrects misplaced swapin readahead, but huge swapin readahead would be a mistake). Link: https://lkml.kernel.org/r/[email protected] Fixes: 809bc86 ("mm: shmem: support large folio swap out") Signed-off-by: Hugh Dickins <[email protected]> Reviewed-by: Shakeel Butt <[email protected]> Reviewed-by: Yosry Ahmed <[email protected]> Reviewed-by: Baolin Wang <[email protected]> Tested-by: Baolin Wang <[email protected]> Cc: <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
1 parent cb12d61 commit a095090

1 file changed

Lines changed: 12 additions & 10 deletions

File tree

mm/shmem.c

Lines changed: 12 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -779,6 +779,14 @@ static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
779779
}
780780
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
781781

782+
static void shmem_update_stats(struct folio *folio, int nr_pages)
783+
{
784+
if (folio_test_pmd_mappable(folio))
785+
__lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr_pages);
786+
__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr_pages);
787+
__lruvec_stat_mod_folio(folio, NR_SHMEM, nr_pages);
788+
}
789+
782790
/*
783791
* Somewhat like filemap_add_folio, but error if expected item has gone.
784792
*/
@@ -813,10 +821,7 @@ static int shmem_add_to_page_cache(struct folio *folio,
813821
xas_store(&xas, folio);
814822
if (xas_error(&xas))
815823
goto unlock;
816-
if (folio_test_pmd_mappable(folio))
817-
__lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr);
818-
__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
819-
__lruvec_stat_mod_folio(folio, NR_SHMEM, nr);
824+
shmem_update_stats(folio, nr);
820825
mapping->nrpages += nr;
821826
unlock:
822827
xas_unlock_irq(&xas);
@@ -844,8 +849,7 @@ static void shmem_delete_from_page_cache(struct folio *folio, void *radswap)
844849
error = shmem_replace_entry(mapping, folio->index, folio, radswap);
845850
folio->mapping = NULL;
846851
mapping->nrpages -= nr;
847-
__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
848-
__lruvec_stat_mod_folio(folio, NR_SHMEM, -nr);
852+
shmem_update_stats(folio, -nr);
849853
xa_unlock_irq(&mapping->i_pages);
850854
folio_put_refs(folio, nr);
851855
BUG_ON(error);
@@ -1944,10 +1948,8 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
19441948
}
19451949
if (!error) {
19461950
mem_cgroup_replace_folio(old, new);
1947-
__lruvec_stat_mod_folio(new, NR_FILE_PAGES, nr_pages);
1948-
__lruvec_stat_mod_folio(new, NR_SHMEM, nr_pages);
1949-
__lruvec_stat_mod_folio(old, NR_FILE_PAGES, -nr_pages);
1950-
__lruvec_stat_mod_folio(old, NR_SHMEM, -nr_pages);
1951+
shmem_update_stats(new, nr_pages);
1952+
shmem_update_stats(old, -nr_pages);
19511953
}
19521954
xa_unlock_irq(&swap_mapping->i_pages);
19531955

0 commit comments

Comments
 (0)