Skip to content

Commit e101840

Browse files
chen210kawasaki
authored andcommitted
mm:zsmalloc: drop class lock before freeing zspage
Currently in zs_free(), the class->lock is held until the zspage is completely freed and the counters are updated. However, freeing pages back to the buddy allocator requires acquiring the zone lock. Under heavy memory pressure, zone lock contention can be severe. When this happens, the CPU holding the class->lock will stall waiting for the zone lock, thereby blocking all other CPUs attempting to acquire the same class->lock. This patch shrinks the critical section of the class->lock to reduce lock contention. By moving the actual page freeing process outside the class->lock, we can improve the concurrency performance of zs_free(). Testing on the RADXA O6 platform shows that with 12 CPUs concurrently performing zs_free() operations, the execution time is reduced by 20%. Signed-off-by: Xueyuan Chen <[email protected]> Signed-off-by: Wenchao Hao <[email protected]>
1 parent 857ada9 commit e101840

1 file changed

Lines changed: 22 additions & 6 deletions

File tree

mm/zsmalloc.c

Lines changed: 22 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -801,13 +801,10 @@ static int trylock_zspage(struct zspage *zspage)
801801
return 0;
802802
}
803803

804-
static void __free_zspage(struct zs_pool *pool, struct size_class *class,
805-
struct zspage *zspage)
804+
static inline void __free_zspage_lockless(struct zs_pool *pool, struct zspage *zspage)
806805
{
807806
struct zpdesc *zpdesc, *next;
808807

809-
assert_spin_locked(&class->lock);
810-
811808
VM_BUG_ON(get_zspage_inuse(zspage));
812809
VM_BUG_ON(zspage->fullness != ZS_INUSE_RATIO_0);
813810

@@ -823,7 +820,13 @@ static void __free_zspage(struct zs_pool *pool, struct size_class *class,
823820
} while (zpdesc != NULL);
824821

825822
cache_free_zspage(zspage);
823+
}
826824

825+
static void __free_zspage(struct zs_pool *pool, struct size_class *class,
826+
struct zspage *zspage)
827+
{
828+
assert_spin_locked(&class->lock);
829+
__free_zspage_lockless(pool, zspage);
827830
class_stat_sub(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage);
828831
atomic_long_sub(class->pages_per_zspage, &pool->pages_allocated);
829832
}
@@ -1388,6 +1391,7 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
13881391
unsigned long obj;
13891392
struct size_class *class;
13901393
int fullness;
1394+
struct zspage *zspage_to_free = NULL;
13911395

13921396
if (IS_ERR_OR_NULL((void *)handle))
13931397
return;
@@ -1408,10 +1412,22 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
14081412
obj_free(class->size, obj);
14091413

14101414
fullness = fix_fullness_group(class, zspage);
1411-
if (fullness == ZS_INUSE_RATIO_0)
1412-
free_zspage(pool, class, zspage);
1415+
if (fullness == ZS_INUSE_RATIO_0) {
1416+
if (trylock_zspage(zspage)) {
1417+
remove_zspage(class, zspage);
1418+
class_stat_sub(class, ZS_OBJS_ALLOCATED,
1419+
class->objs_per_zspage);
1420+
zspage_to_free = zspage;
1421+
} else
1422+
kick_deferred_free(pool);
1423+
}
14131424

14141425
spin_unlock(&class->lock);
1426+
1427+
if (likely(zspage_to_free)) {
1428+
__free_zspage_lockless(pool, zspage_to_free);
1429+
atomic_long_sub(class->pages_per_zspage, &pool->pages_allocated);
1430+
}
14151431
cache_free_handle(handle);
14161432
}
14171433
EXPORT_SYMBOL_GPL(zs_free);

0 commit comments

Comments
 (0)