Skip to content

Commit e610007

Browse files
Panky-codeskawasaki
authored andcommitted
mm: add static huge zero folio
There are many places in the kernel where we need to zeroout larger chunks but the maximum segment we can zeroout at a time by ZERO_PAGE is limited by PAGE_SIZE. This is especially annoying in block devices and filesystems where we attach multiple ZERO_PAGEs to the bio in different bvecs. With multipage bvec support in block layer, it is much more efficient to send out larger zero pages as a part of single bvec. This concern was raised during the review of adding LBS support to XFS[1][2]. Usually huge_zero_folio is allocated on demand, and it will be deallocated by the shrinker if there are no users of it left. At moment, huge_zero_folio infrastructure refcount is tied to the process lifetime that created it. This might not work for bio layer as the completions can be async and the process that created the huge_zero_folio might no longer be alive. And, one of the main point that came during discussion is to have something bigger than zero page as a drop-in replacement. Add a config option STATIC_HUGE_ZERO_FOLIO that will always allocate the huge_zero_folio, and it will never drop the reference. This makes using the huge_zero_folio without having to pass any mm struct and does not tie the lifetime of the zero folio to anything, making it a drop-in replacement for ZERO_PAGE. If STATIC_HUGE_ZERO_FOLIO config option is enabled, then mm_get_huge_zero_folio() will simply return this page instead of dynamically allocating a new PMD page. This option can waste memory in small systems or systems with 64k base page size. So make it an opt-in and also add an option from individual architecture so that we don't enable this feature for larger base page size systems. [1] https://lore.kernel.org/linux-xfs/[email protected]/ [2] https://lore.kernel.org/linux-xfs/[email protected]/ Co-developed-by: David Hildenbrand <[email protected]> Signed-off-by: David Hildenbrand <[email protected]> Signed-off-by: Pankaj Raghav <[email protected]>
1 parent 7e6ec53 commit e610007

4 files changed

Lines changed: 82 additions & 0 deletions

File tree

arch/x86/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -151,6 +151,7 @@ config X86
151151
select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP if X86_64
152152
select ARCH_WANT_HUGETLB_VMEMMAP_PREINIT if X86_64
153153
select ARCH_WANTS_THP_SWAP if X86_64
154+
select ARCH_WANTS_STATIC_HUGE_ZERO_FOLIO if X86_64
154155
select ARCH_HAS_PARANOID_L1D_FLUSH
155156
select ARCH_WANT_IRQS_OFF_ACTIVATE_MM
156157
select BUILDTIME_TABLE_SORT

include/linux/huge_mm.h

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -476,6 +476,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf);
476476

477477
extern struct folio *huge_zero_folio;
478478
extern unsigned long huge_zero_pfn;
479+
extern atomic_t huge_zero_folio_is_static;
479480

480481
static inline bool is_huge_zero_folio(const struct folio *folio)
481482
{
@@ -494,6 +495,18 @@ static inline bool is_huge_zero_pmd(pmd_t pmd)
494495

495496
struct folio *mm_get_huge_zero_folio(struct mm_struct *mm);
496497
void mm_put_huge_zero_folio(struct mm_struct *mm);
498+
struct folio *__get_static_huge_zero_folio(void);
499+
500+
static inline struct folio *get_static_huge_zero_folio(void)
501+
{
502+
if (!IS_ENABLED(CONFIG_STATIC_HUGE_ZERO_FOLIO))
503+
return NULL;
504+
505+
if (likely(atomic_read(&huge_zero_folio_is_static)))
506+
return huge_zero_folio;
507+
508+
return __get_static_huge_zero_folio();
509+
}
497510

498511
static inline bool thp_migration_supported(void)
499512
{
@@ -685,6 +698,11 @@ static inline int change_huge_pud(struct mmu_gather *tlb,
685698
{
686699
return 0;
687700
}
701+
702+
static inline struct folio *get_static_huge_zero_folio(void)
703+
{
704+
return NULL;
705+
}
688706
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
689707

690708
static inline int split_folio_to_list_to_order(struct folio *folio,

mm/Kconfig

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -823,6 +823,27 @@ config ARCH_WANT_GENERAL_HUGETLB
823823
config ARCH_WANTS_THP_SWAP
824824
def_bool n
825825

826+
config ARCH_WANTS_STATIC_HUGE_ZERO_FOLIO
827+
def_bool n
828+
829+
config STATIC_HUGE_ZERO_FOLIO
830+
bool "Allocate a PMD sized folio for zeroing"
831+
depends on ARCH_WANTS_STATIC_HUGE_ZERO_FOLIO && TRANSPARENT_HUGEPAGE
832+
help
833+
Without this config enabled, the huge zero folio is allocated on
834+
demand and freed under memory pressure once no longer in use.
835+
To detect remaining users reliably, references to the huge zero folio
836+
must be tracked precisely, so it is commonly only available for mapping
837+
it into user page tables.
838+
839+
With this config enabled, the huge zero folio can also be used
840+
for other purposes that do not implement precise reference counting:
841+
it is still allocated on demand, but never freed, allowing for more
842+
wide-spread use, for example, when performing I/O similar to the
843+
traditional shared zeropage.
844+
845+
Not suitable for memory constrained systems.
846+
826847
config MM_ID
827848
def_bool n
828849

mm/huge_memory.c

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,7 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
7575
static bool split_underused_thp = true;
7676

7777
static atomic_t huge_zero_refcount;
78+
atomic_t huge_zero_folio_is_static __read_mostly;
7879
struct folio *huge_zero_folio __read_mostly;
7980
unsigned long huge_zero_pfn __read_mostly = ~0UL;
8081
unsigned long huge_anon_orders_always __read_mostly;
@@ -266,6 +267,47 @@ void mm_put_huge_zero_folio(struct mm_struct *mm)
266267
put_huge_zero_page();
267268
}
268269

270+
#ifdef CONFIG_STATIC_HUGE_ZERO_FOLIO
271+
#define FAIL_COUNT_LIMIT 2
272+
273+
struct folio *__get_static_huge_zero_folio(void)
274+
{
275+
static unsigned long fail_count_clear_timer;
276+
static atomic_t huge_zero_static_fail_count __read_mostly;
277+
278+
if (unlikely(!slab_is_available()))
279+
return NULL;
280+
281+
/*
282+
* If we failed to allocate a huge zero folio multiple times,
283+
* just refrain from trying for one minute before retrying to get
284+
* a reference again.
285+
*/
286+
if (atomic_read(&huge_zero_static_fail_count) > FAIL_COUNT_LIMIT) {
287+
if (time_before(jiffies, fail_count_clear_timer))
288+
return NULL;
289+
atomic_set(&huge_zero_static_fail_count, 0);
290+
}
291+
/*
292+
* Our raised reference will prevent the shrinker from ever having
293+
* success.
294+
*/
295+
if (!get_huge_zero_page()) {
296+
int count = atomic_inc_return(&huge_zero_static_fail_count);
297+
298+
if (count > FAIL_COUNT_LIMIT)
299+
fail_count_clear_timer = get_jiffies_64() + 60 * HZ;
300+
301+
return NULL;
302+
}
303+
304+
if (atomic_cmpxchg(&huge_zero_folio_is_static, 0, 1) != 0)
305+
put_huge_zero_page();
306+
307+
return huge_zero_folio;
308+
}
309+
#endif /* CONFIG_STATIC_HUGE_ZERO_FOLIO */
310+
269311
static unsigned long shrink_huge_zero_folio_count(struct shrinker *shrink,
270312
struct shrink_control *sc)
271313
{

0 commit comments

Comments
 (0)