Skip to content

Commit e3c33bc

Browse files
committed
Merge tag 'mm-hotfixes-stable-2026-03-23-17-56' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull MM fixes from Andrew Morton: "6 hotfixes. 2 are cc:stable. All are for MM. All are singletons - please see the changelogs for details" * tag 'mm-hotfixes-stable-2026-03-23-17-56' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: mm/damon/stat: monitor all System RAM resources mm/zswap: add missing kunmap_local() mailmap: update email address for Muhammad Usama Anjum zram: do not slot_free() written-back slots mm/damon/core: avoid use of half-online-committed context mm/rmap: clear vma->anon_vma on error
2 parents 26a0198 + 84481e7 commit e3c33bc

7 files changed

Lines changed: 93 additions & 29 deletions

File tree

.mailmap

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -587,6 +587,7 @@ Morten Welinder <[email protected]>
587587
Morten Welinder <[email protected]>
588588
Morten Welinder <[email protected]>
589589
Morten Welinder <[email protected]>
590+
Muhammad Usama Anjum <[email protected]> <[email protected]>
590591
591592
592593

drivers/block/zram/zram_drv.c

Lines changed: 14 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -917,9 +917,8 @@ static void zram_account_writeback_submit(struct zram *zram)
917917

918918
static int zram_writeback_complete(struct zram *zram, struct zram_wb_req *req)
919919
{
920-
u32 size, index = req->pps->index;
921-
int err, prio;
922-
bool huge;
920+
u32 index = req->pps->index;
921+
int err;
923922

924923
err = blk_status_to_errno(req->bio.bi_status);
925924
if (err) {
@@ -946,28 +945,13 @@ static int zram_writeback_complete(struct zram *zram, struct zram_wb_req *req)
946945
goto out;
947946
}
948947

949-
if (zram->compressed_wb) {
950-
/*
951-
* ZRAM_WB slots get freed, we need to preserve data required
952-
* for read decompression.
953-
*/
954-
size = get_slot_size(zram, index);
955-
prio = get_slot_comp_priority(zram, index);
956-
huge = test_slot_flag(zram, index, ZRAM_HUGE);
957-
}
958-
959-
slot_free(zram, index);
960-
set_slot_flag(zram, index, ZRAM_WB);
948+
clear_slot_flag(zram, index, ZRAM_IDLE);
949+
if (test_slot_flag(zram, index, ZRAM_HUGE))
950+
atomic64_dec(&zram->stats.huge_pages);
951+
atomic64_sub(get_slot_size(zram, index), &zram->stats.compr_data_size);
952+
zs_free(zram->mem_pool, get_slot_handle(zram, index));
961953
set_slot_handle(zram, index, req->blk_idx);
962-
963-
if (zram->compressed_wb) {
964-
if (huge)
965-
set_slot_flag(zram, index, ZRAM_HUGE);
966-
set_slot_size(zram, index, size);
967-
set_slot_comp_priority(zram, index, prio);
968-
}
969-
970-
atomic64_inc(&zram->stats.pages_stored);
954+
set_slot_flag(zram, index, ZRAM_WB);
971955

972956
out:
973957
slot_unlock(zram, index);
@@ -2010,8 +1994,13 @@ static void slot_free(struct zram *zram, u32 index)
20101994
set_slot_comp_priority(zram, index, 0);
20111995

20121996
if (test_slot_flag(zram, index, ZRAM_HUGE)) {
1997+
/*
1998+
* Writeback completion decrements ->huge_pages but keeps
1999+
* ZRAM_HUGE flag for deferred decompression path.
2000+
*/
2001+
if (!test_slot_flag(zram, index, ZRAM_WB))
2002+
atomic64_dec(&zram->stats.huge_pages);
20132003
clear_slot_flag(zram, index, ZRAM_HUGE);
2014-
atomic64_dec(&zram->stats.huge_pages);
20152004
}
20162005

20172006
if (test_slot_flag(zram, index, ZRAM_WB)) {

include/linux/damon.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -810,6 +810,12 @@ struct damon_ctx {
810810
struct damos_walk_control *walk_control;
811811
struct mutex walk_control_lock;
812812

813+
/*
814+
* indicate if this may be corrupted. Currentonly this is set only for
815+
* damon_commit_ctx() failure.
816+
*/
817+
bool maybe_corrupted;
818+
813819
/* Working thread of the given DAMON context */
814820
struct task_struct *kdamond;
815821
/* Protects @kdamond field access */

mm/damon/core.c

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1252,6 +1252,7 @@ int damon_commit_ctx(struct damon_ctx *dst, struct damon_ctx *src)
12521252
{
12531253
int err;
12541254

1255+
dst->maybe_corrupted = true;
12551256
if (!is_power_of_2(src->min_region_sz))
12561257
return -EINVAL;
12571258

@@ -1277,6 +1278,7 @@ int damon_commit_ctx(struct damon_ctx *dst, struct damon_ctx *src)
12771278
dst->addr_unit = src->addr_unit;
12781279
dst->min_region_sz = src->min_region_sz;
12791280

1281+
dst->maybe_corrupted = false;
12801282
return 0;
12811283
}
12821284

@@ -2678,6 +2680,8 @@ static void kdamond_call(struct damon_ctx *ctx, bool cancel)
26782680
complete(&control->completion);
26792681
else if (control->canceled && control->dealloc_on_cancel)
26802682
kfree(control);
2683+
if (!cancel && ctx->maybe_corrupted)
2684+
break;
26812685
}
26822686

26832687
mutex_lock(&ctx->call_controls_lock);
@@ -2707,6 +2711,8 @@ static int kdamond_wait_activation(struct damon_ctx *ctx)
27072711
kdamond_usleep(min_wait_time);
27082712

27092713
kdamond_call(ctx, false);
2714+
if (ctx->maybe_corrupted)
2715+
return -EINVAL;
27102716
damos_walk_cancel(ctx);
27112717
}
27122718
return -EBUSY;
@@ -2790,6 +2796,8 @@ static int kdamond_fn(void *data)
27902796
* kdamond_merge_regions() if possible, to reduce overhead
27912797
*/
27922798
kdamond_call(ctx, false);
2799+
if (ctx->maybe_corrupted)
2800+
break;
27932801
if (!list_empty(&ctx->schemes))
27942802
kdamond_apply_schemes(ctx);
27952803
else

mm/damon/stat.c

Lines changed: 50 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -145,12 +145,59 @@ static int damon_stat_damon_call_fn(void *data)
145145
return 0;
146146
}
147147

148+
struct damon_stat_system_ram_range_walk_arg {
149+
bool walked;
150+
struct resource res;
151+
};
152+
153+
static int damon_stat_system_ram_walk_fn(struct resource *res, void *arg)
154+
{
155+
struct damon_stat_system_ram_range_walk_arg *a = arg;
156+
157+
if (!a->walked) {
158+
a->walked = true;
159+
a->res.start = res->start;
160+
}
161+
a->res.end = res->end;
162+
return 0;
163+
}
164+
165+
static unsigned long damon_stat_res_to_core_addr(resource_size_t ra,
166+
unsigned long addr_unit)
167+
{
168+
/*
169+
* Use div_u64() for avoiding linking errors related with __udivdi3,
170+
* __aeabi_uldivmod, or similar problems. This should also improve the
171+
* performance optimization (read div_u64() comment for the detail).
172+
*/
173+
if (sizeof(ra) == 8 && sizeof(addr_unit) == 4)
174+
return div_u64(ra, addr_unit);
175+
return ra / addr_unit;
176+
}
177+
178+
static int damon_stat_set_monitoring_region(struct damon_target *t,
179+
unsigned long addr_unit, unsigned long min_region_sz)
180+
{
181+
struct damon_addr_range addr_range;
182+
struct damon_stat_system_ram_range_walk_arg arg = {};
183+
184+
walk_system_ram_res(0, -1, &arg, damon_stat_system_ram_walk_fn);
185+
if (!arg.walked)
186+
return -EINVAL;
187+
addr_range.start = damon_stat_res_to_core_addr(
188+
arg.res.start, addr_unit);
189+
addr_range.end = damon_stat_res_to_core_addr(
190+
arg.res.end + 1, addr_unit);
191+
if (addr_range.end <= addr_range.start)
192+
return -EINVAL;
193+
return damon_set_regions(t, &addr_range, 1, min_region_sz);
194+
}
195+
148196
static struct damon_ctx *damon_stat_build_ctx(void)
149197
{
150198
struct damon_ctx *ctx;
151199
struct damon_attrs attrs;
152200
struct damon_target *target;
153-
unsigned long start = 0, end = 0;
154201

155202
ctx = damon_new_ctx();
156203
if (!ctx)
@@ -180,8 +227,8 @@ static struct damon_ctx *damon_stat_build_ctx(void)
180227
if (!target)
181228
goto free_out;
182229
damon_add_target(ctx, target);
183-
if (damon_set_region_biggest_system_ram_default(target, &start, &end,
184-
ctx->min_region_sz))
230+
if (damon_stat_set_monitoring_region(target, ctx->addr_unit,
231+
ctx->min_region_sz))
185232
goto free_out;
186233
return ctx;
187234
free_out:

mm/rmap.c

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -457,6 +457,13 @@ static void cleanup_partial_anon_vmas(struct vm_area_struct *vma)
457457
list_del(&avc->same_vma);
458458
anon_vma_chain_free(avc);
459459
}
460+
461+
/*
462+
* The anon_vma assigned to this VMA is no longer valid, as we were not
463+
* able to correctly clone AVC state. Avoid inconsistent anon_vma tree
464+
* state by resetting.
465+
*/
466+
vma->anon_vma = NULL;
460467
}
461468

462469
/**

mm/zswap.c

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -942,9 +942,15 @@ static bool zswap_decompress(struct zswap_entry *entry, struct folio *folio)
942942

943943
/* zswap entries of length PAGE_SIZE are not compressed. */
944944
if (entry->length == PAGE_SIZE) {
945+
void *dst;
946+
945947
WARN_ON_ONCE(input->length != PAGE_SIZE);
946-
memcpy_from_sglist(kmap_local_folio(folio, 0), input, 0, PAGE_SIZE);
948+
949+
dst = kmap_local_folio(folio, 0);
950+
memcpy_from_sglist(dst, input, 0, PAGE_SIZE);
947951
dlen = PAGE_SIZE;
952+
kunmap_local(dst);
953+
flush_dcache_folio(folio);
948954
} else {
949955
sg_init_table(&output, 1);
950956
sg_set_folio(&output, folio, PAGE_SIZE, 0);

0 commit comments

Comments
 (0)