Skip to content

Commit 029543c

Browse files
wenchao-haokawasaki
authored andcommitted
mm/zsmalloc: introduce zs_free_deferred() for async handle freeing
zs_free() is expensive due to internal locking (pool->lock, class->lock) and potential zspage freeing. On the process exit path, the slow zs_free() blocks memory reclamation, delaying overall memory release. This has been reported to significantly impact Android low-memory killing where slot_free() accounts for over 80% of the total swap entry freeing cost. Introduce zs_free_deferred() which queues handles into a fixed-size per-pool array for later processing by a workqueue. This allows callers to defer the expensive zs_free() and return quickly, so the process exit path can release memory faster. The array capacity is derived from a 128MB uncompressed data budget (128MB >> PAGE_SHIFT entries), which scales naturally with PAGE_SIZE. When the array reaches half capacity, the workqueue is scheduled to drain pending handles. zs_free_deferred() uses spin_trylock() to access the deferred queue. If the lock is contended (e.g. drain in progress) or the queue is full, it falls back to synchronous zs_free() to guarantee correctness. Also introduce zs_free_deferred_flush() for use during pool teardown to ensure all pending handles are freed. Signed-off-by: Wenchao Hao <[email protected]>
1 parent e101840 commit 029543c

2 files changed

Lines changed: 113 additions & 0 deletions

File tree

include/linux/zsmalloc.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,8 @@ void zs_destroy_pool(struct zs_pool *pool);
3030
unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t flags,
3131
const int nid);
3232
void zs_free(struct zs_pool *pool, unsigned long obj);
33+
void zs_free_deferred(struct zs_pool *pool, unsigned long handle);
34+
void zs_free_deferred_flush(struct zs_pool *pool);
3335

3436
size_t zs_huge_class_size(struct zs_pool *pool);
3537

mm/zsmalloc.c

Lines changed: 111 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,10 @@
5353

5454
#define ZS_HANDLE_SIZE (sizeof(unsigned long))
5555

56+
#define ZS_DEFERRED_FREE_MAX_BYTES (128 << 20)
57+
#define ZS_DEFERRED_FREE_CAPACITY (ZS_DEFERRED_FREE_MAX_BYTES >> PAGE_SHIFT)
58+
#define ZS_DEFERRED_FREE_THRESHOLD (ZS_DEFERRED_FREE_CAPACITY / 2)
59+
5660
/*
5761
* Object location (<PFN>, <obj_idx>) is encoded as
5862
* a single (unsigned long) handle value.
@@ -217,6 +221,13 @@ struct zs_pool {
217221
/* protect zspage migration/compaction */
218222
rwlock_t lock;
219223
atomic_t compaction_in_progress;
224+
225+
/* deferred free support */
226+
spinlock_t deferred_lock;
227+
unsigned long *deferred_handles;
228+
unsigned int deferred_count;
229+
unsigned int deferred_capacity;
230+
struct work_struct deferred_free_work;
220231
};
221232

222233
static inline void zpdesc_set_first(struct zpdesc *zpdesc)
@@ -579,6 +590,19 @@ static int zs_stats_size_show(struct seq_file *s, void *v)
579590
}
580591
DEFINE_SHOW_ATTRIBUTE(zs_stats_size);
581592

593+
static int zs_stats_deferred_show(struct seq_file *s, void *v)
594+
{
595+
struct zs_pool *pool = s->private;
596+
597+
spin_lock(&pool->deferred_lock);
598+
seq_printf(s, "pending: %u\n", pool->deferred_count);
599+
seq_printf(s, "capacity: %u\n", pool->deferred_capacity);
600+
spin_unlock(&pool->deferred_lock);
601+
602+
return 0;
603+
}
604+
DEFINE_SHOW_ATTRIBUTE(zs_stats_deferred);
605+
582606
static void zs_pool_stat_create(struct zs_pool *pool, const char *name)
583607
{
584608
if (!zs_stat_root) {
@@ -590,6 +614,9 @@ static void zs_pool_stat_create(struct zs_pool *pool, const char *name)
590614

591615
debugfs_create_file("classes", S_IFREG | 0444, pool->stat_dentry, pool,
592616
&zs_stats_size_fops);
617+
debugfs_create_file("deferred_free", S_IFREG | 0444,
618+
pool->stat_dentry, pool,
619+
&zs_stats_deferred_fops);
593620
}
594621

595622
static void zs_pool_stat_destroy(struct zs_pool *pool)
@@ -1432,6 +1459,76 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
14321459
}
14331460
EXPORT_SYMBOL_GPL(zs_free);
14341461

1462+
static void zs_deferred_free_work(struct work_struct *work)
1463+
{
1464+
struct zs_pool *pool = container_of(work, struct zs_pool,
1465+
deferred_free_work);
1466+
unsigned long handle;
1467+
1468+
while (1) {
1469+
spin_lock(&pool->deferred_lock);
1470+
if (pool->deferred_count == 0) {
1471+
spin_unlock(&pool->deferred_lock);
1472+
break;
1473+
}
1474+
handle = pool->deferred_handles[--pool->deferred_count];
1475+
spin_unlock(&pool->deferred_lock);
1476+
1477+
zs_free(pool, handle);
1478+
cond_resched();
1479+
}
1480+
}
1481+
1482+
/**
1483+
* zs_free_deferred - queue a handle for asynchronous freeing
1484+
* @pool: pool to free from
1485+
* @handle: handle to free
1486+
*
1487+
* Place @handle into a deferred free queue for later processing by a
1488+
* workqueue. This is intended for callers that are in atomic context
1489+
* (e.g. under a spinlock) and cannot afford the cost of zs_free()
1490+
* directly. When the queue reaches a threshold the work is scheduled.
1491+
* Falls back to synchronous zs_free() if the lock is contended (drain
1492+
* in progress) or if the queue is full.
1493+
*/
1494+
void zs_free_deferred(struct zs_pool *pool, unsigned long handle)
1495+
{
1496+
if (IS_ERR_OR_NULL((void *)handle))
1497+
return;
1498+
1499+
if (!spin_trylock(&pool->deferred_lock))
1500+
goto sync_free;
1501+
1502+
if (pool->deferred_count >= pool->deferred_capacity) {
1503+
spin_unlock(&pool->deferred_lock);
1504+
goto sync_free;
1505+
}
1506+
1507+
pool->deferred_handles[pool->deferred_count++] = handle;
1508+
if (pool->deferred_count >= ZS_DEFERRED_FREE_THRESHOLD)
1509+
queue_work(system_wq, &pool->deferred_free_work);
1510+
spin_unlock(&pool->deferred_lock);
1511+
return;
1512+
1513+
sync_free:
1514+
zs_free(pool, handle);
1515+
}
1516+
EXPORT_SYMBOL_GPL(zs_free_deferred);
1517+
1518+
/**
1519+
* zs_free_deferred_flush - flush all pending deferred frees
1520+
* @pool: pool to flush
1521+
*
1522+
* Wait for any scheduled work to complete, then drain any remaining
1523+
* handles. Must be called from process context.
1524+
*/
1525+
void zs_free_deferred_flush(struct zs_pool *pool)
1526+
{
1527+
flush_work(&pool->deferred_free_work);
1528+
zs_deferred_free_work(&pool->deferred_free_work);
1529+
}
1530+
EXPORT_SYMBOL_GPL(zs_free_deferred_flush);
1531+
14351532
static void zs_object_copy(struct size_class *class, unsigned long dst,
14361533
unsigned long src)
14371534
{
@@ -2099,6 +2196,18 @@ struct zs_pool *zs_create_pool(const char *name)
20992196
rwlock_init(&pool->lock);
21002197
atomic_set(&pool->compaction_in_progress, 0);
21012198

2199+
spin_lock_init(&pool->deferred_lock);
2200+
pool->deferred_capacity = ZS_DEFERRED_FREE_CAPACITY;
2201+
pool->deferred_handles = kvmalloc_array(pool->deferred_capacity,
2202+
sizeof(unsigned long),
2203+
GFP_KERNEL);
2204+
if (!pool->deferred_handles) {
2205+
kfree(pool);
2206+
return NULL;
2207+
}
2208+
pool->deferred_count = 0;
2209+
INIT_WORK(&pool->deferred_free_work, zs_deferred_free_work);
2210+
21022211
pool->name = kstrdup(name, GFP_KERNEL);
21032212
if (!pool->name)
21042213
goto err;
@@ -2201,6 +2310,7 @@ void zs_destroy_pool(struct zs_pool *pool)
22012310
int i;
22022311

22032312
zs_unregister_shrinker(pool);
2313+
zs_free_deferred_flush(pool);
22042314
zs_flush_migration(pool);
22052315
zs_pool_stat_destroy(pool);
22062316

@@ -2224,6 +2334,7 @@ void zs_destroy_pool(struct zs_pool *pool)
22242334
kfree(class);
22252335
}
22262336

2337+
kvfree(pool->deferred_handles);
22272338
kfree(pool->name);
22282339
kfree(pool);
22292340
}

0 commit comments

Comments
 (0)