77#include <linux/kernel.h>
88#include <linux/rcupdate.h>
99#include <linux/delay.h>
10+ #include <linux/perf_event.h>
1011#include "../mm/slab.h"
1112
1213static struct kunit_resource resource ;
@@ -291,6 +292,94 @@ static void test_krealloc_redzone_zeroing(struct kunit *test)
291292 kmem_cache_destroy (s );
292293}
293294
295+ #ifdef CONFIG_PERF_EVENTS
296+ #define NR_ITERATIONS 1000
297+ #define NR_OBJECTS 1000
298+ static void * objects [NR_OBJECTS ];
299+
300+ struct test_nolock_context {
301+ struct kunit * test ;
302+ int callback_count ;
303+ int alloc_ok ;
304+ int alloc_fail ;
305+ struct perf_event * event ;
306+ };
307+
308+ static struct perf_event_attr hw_attr = {
309+ .type = PERF_TYPE_HARDWARE ,
310+ .config = PERF_COUNT_HW_CPU_CYCLES ,
311+ .size = sizeof (struct perf_event_attr ),
312+ .pinned = 1 ,
313+ .disabled = 1 ,
314+ .freq = 1 ,
315+ .sample_freq = 100000 ,
316+ };
317+
318+ static void overflow_handler_test_kmalloc_kfree_nolock (struct perf_event * event ,
319+ struct perf_sample_data * data ,
320+ struct pt_regs * regs )
321+ {
322+ void * objp ;
323+ gfp_t gfp ;
324+ struct test_nolock_context * ctx = event -> overflow_handler_context ;
325+
326+ /* __GFP_ACCOUNT to test kmalloc_nolock() in alloc_slab_obj_exts() */
327+ gfp = (ctx -> callback_count % 2 ) ? 0 : __GFP_ACCOUNT ;
328+ objp = kmalloc_nolock (64 , gfp , NUMA_NO_NODE );
329+
330+ if (objp )
331+ ctx -> alloc_ok ++ ;
332+ else
333+ ctx -> alloc_fail ++ ;
334+
335+ kfree_nolock (objp );
336+ ctx -> callback_count ++ ;
337+ }
338+
339+ static void test_kmalloc_kfree_nolock (struct kunit * test )
340+ {
341+ int i , j ;
342+ struct test_nolock_context ctx = { .test = test };
343+ struct perf_event * event ;
344+ bool alloc_fail = false;
345+
346+ event = perf_event_create_kernel_counter (& hw_attr , -1 , current ,
347+ overflow_handler_test_kmalloc_kfree_nolock ,
348+ & ctx );
349+ if (IS_ERR (event ))
350+ kunit_skip (test , "Failed to create perf event" );
351+ ctx .event = event ;
352+ perf_event_enable (ctx .event );
353+ for (i = 0 ; i < NR_ITERATIONS ; i ++ ) {
354+ for (j = 0 ; j < NR_OBJECTS ; j ++ ) {
355+ gfp_t gfp = (i % 2 ) ? GFP_KERNEL : GFP_KERNEL_ACCOUNT ;
356+
357+ objects [j ] = kmalloc (64 , gfp );
358+ if (!objects [j ]) {
359+ j -- ;
360+ while (j >= 0 )
361+ kfree (objects [j -- ]);
362+ alloc_fail = true;
363+ goto cleanup ;
364+ }
365+ }
366+ for (j = 0 ; j < NR_OBJECTS ; j ++ )
367+ kfree (objects [j ]);
368+ }
369+
370+ cleanup :
371+ perf_event_disable (ctx .event );
372+ perf_event_release_kernel (ctx .event );
373+
374+ kunit_info (test , "callback_count: %d, alloc_ok: %d, alloc_fail: %d\n" ,
375+ ctx .callback_count , ctx .alloc_ok , ctx .alloc_fail );
376+
377+ if (alloc_fail )
378+ kunit_skip (test , "Allocation failed" );
379+ KUNIT_EXPECT_EQ (test , 0 , slab_errors );
380+ }
381+ #endif
382+
294383static int test_init (struct kunit * test )
295384{
296385 slab_errors = 0 ;
@@ -315,6 +404,9 @@ static struct kunit_case test_cases[] = {
315404 KUNIT_CASE (test_kfree_rcu_wq_destroy ),
316405 KUNIT_CASE (test_leak_destroy ),
317406 KUNIT_CASE (test_krealloc_redzone_zeroing ),
407+ #ifdef CONFIG_PERF_EVENTS
408+ KUNIT_CASE_SLOW (test_kmalloc_kfree_nolock ),
409+ #endif
318410 {}
319411};
320412
0 commit comments