Skip to content

Commit 4516432

Browse files
committed
ttm/pool: track allocated_pages per numa node.
This gets the memory sizes from the nodes and stores the limit as 50% of those. I think eventually we should drop the limits once we have memcg aware shrinking, but this should be more NUMA friendly, and I think seems like what people would prefer to happen on NUMA aware systems. Cc: Christian Koenig <[email protected]> Signed-off-by: Dave Airlie <[email protected]>
1 parent 0180d6f commit 4516432

1 file changed

Lines changed: 47 additions & 15 deletions

File tree

drivers/gpu/drm/ttm/ttm_pool.c

Lines changed: 47 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -116,10 +116,11 @@ struct ttm_pool_tt_restore {
116116

117117
static unsigned long page_pool_size;
118118

119-
MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool");
119+
MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool per NUMA node");
120120
module_param(page_pool_size, ulong, 0644);
121121

122-
static atomic_long_t allocated_pages;
122+
static unsigned long pool_node_limit[MAX_NUMNODES];
123+
static atomic_long_t allocated_pages[MAX_NUMNODES];
123124

124125
static struct ttm_pool_type global_write_combined[NR_PAGE_ORDERS];
125126
static struct ttm_pool_type global_uncached[NR_PAGE_ORDERS];
@@ -299,6 +300,7 @@ static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr,
299300
static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p)
300301
{
301302
unsigned int i, num_pages = 1 << pt->order;
303+
int nid = page_to_nid(p);
302304

303305
for (i = 0; i < num_pages; ++i) {
304306
if (PageHighMem(p))
@@ -309,10 +311,10 @@ static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p)
309311

310312
INIT_LIST_HEAD(&p->lru);
311313
rcu_read_lock();
312-
list_lru_add(&pt->pages, &p->lru, page_to_nid(p), NULL);
314+
list_lru_add(&pt->pages, &p->lru, nid, NULL);
313315
rcu_read_unlock();
314-
atomic_long_add(1 << pt->order, &allocated_pages);
315316

317+
atomic_long_add(num_pages, &allocated_pages[nid]);
316318
mod_lruvec_page_state(p, NR_GPU_ACTIVE, -num_pages);
317319
mod_lruvec_page_state(p, NR_GPU_RECLAIM, num_pages);
318320
}
@@ -338,7 +340,7 @@ static struct page *ttm_pool_type_take(struct ttm_pool_type *pt, int nid)
338340

339341
ret = list_lru_walk_node(&pt->pages, nid, take_one_from_lru, (void *)&p, &nr_to_walk);
340342
if (ret == 1 && p) {
341-
atomic_long_sub(1 << pt->order, &allocated_pages);
343+
atomic_long_sub(1 << pt->order, &allocated_pages[nid]);
342344
mod_lruvec_page_state(p, NR_GPU_ACTIVE, (1 << pt->order));
343345
mod_lruvec_page_state(p, NR_GPU_RECLAIM, -(1 << pt->order));
344346
}
@@ -377,7 +379,7 @@ static void ttm_pool_dispose_list(struct ttm_pool_type *pt,
377379
struct page *p;
378380
p = list_first_entry(dispose, struct page, lru);
379381
list_del_init(&p->lru);
380-
atomic_long_sub(1 << pt->order, &allocated_pages);
382+
atomic_long_sub(1 << pt->order, &allocated_pages[page_to_nid(p)]);
381383
ttm_pool_free_page(pt->pool, pt->caching, pt->order, p, true);
382384
}
383385
}
@@ -940,11 +942,13 @@ int ttm_pool_restore_and_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
940942
*/
941943
void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
942944
{
945+
int nid = ttm_pool_nid(pool);
946+
943947
ttm_pool_free_range(pool, tt, tt->caching, 0, tt->num_pages);
944948

945-
while (atomic_long_read(&allocated_pages) > page_pool_size) {
946-
unsigned long diff = atomic_long_read(&allocated_pages) - page_pool_size;
947-
ttm_pool_shrink(ttm_pool_nid(pool), diff);
949+
while (atomic_long_read(&allocated_pages[nid]) > pool_node_limit[nid]) {
950+
unsigned long diff = atomic_long_read(&allocated_pages[nid]) - pool_node_limit[nid];
951+
ttm_pool_shrink(nid, diff);
948952
}
949953
}
950954
EXPORT_SYMBOL(ttm_pool_free);
@@ -1202,7 +1206,7 @@ static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
12021206
do
12031207
num_freed += ttm_pool_shrink(sc->nid, sc->nr_to_scan);
12041208
while (num_freed < sc->nr_to_scan &&
1205-
atomic_long_read(&allocated_pages));
1209+
atomic_long_read(&allocated_pages[sc->nid]));
12061210

12071211
sc->nr_scanned = num_freed;
12081212

@@ -1213,7 +1217,7 @@ static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
12131217
static unsigned long ttm_pool_shrinker_count(struct shrinker *shrink,
12141218
struct shrink_control *sc)
12151219
{
1216-
unsigned long num_pages = atomic_long_read(&allocated_pages);
1220+
unsigned long num_pages = atomic_long_read(&allocated_pages[sc->nid]);
12171221

12181222
return num_pages ? num_pages : SHRINK_EMPTY;
12191223
}
@@ -1250,8 +1254,12 @@ static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
12501254
/* Dump the total amount of allocated pages */
12511255
static void ttm_pool_debugfs_footer(struct seq_file *m)
12521256
{
1253-
seq_printf(m, "\ntotal\t: %8lu of %8lu\n",
1254-
atomic_long_read(&allocated_pages), page_pool_size);
1257+
int nid;
1258+
1259+
for_each_node(nid) {
1260+
seq_printf(m, "\ntotal node%d\t: %8lu of %8lu\n", nid,
1261+
atomic_long_read(&allocated_pages[nid]), pool_node_limit[nid]);
1262+
}
12551263
}
12561264

12571265
/* Dump the information for the global pools */
@@ -1345,6 +1353,23 @@ DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_shrink);
13451353

13461354
#endif
13471355

1356+
static inline u64 ttm_get_node_memory_size(int nid)
1357+
{
1358+
/*
1359+
* This is directly using si_meminfo_node implementation as the
1360+
* function is not exported.
1361+
*/
1362+
int zone_type;
1363+
u64 managed_pages = 0;
1364+
1365+
pg_data_t *pgdat = NODE_DATA(nid);
1366+
1367+
for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
1368+
managed_pages +=
1369+
zone_managed_pages(&pgdat->node_zones[zone_type]);
1370+
return managed_pages * PAGE_SIZE;
1371+
}
1372+
13481373
/**
13491374
* ttm_pool_mgr_init - Initialize globals
13501375
*
@@ -1356,8 +1381,15 @@ int ttm_pool_mgr_init(unsigned long num_pages)
13561381
{
13571382
unsigned int i;
13581383

1359-
if (!page_pool_size)
1360-
page_pool_size = num_pages;
1384+
int nid;
1385+
for_each_node(nid) {
1386+
if (!page_pool_size) {
1387+
u64 node_size = ttm_get_node_memory_size(nid);
1388+
pool_node_limit[nid] = (node_size >> PAGE_SHIFT) / 2;
1389+
} else {
1390+
pool_node_limit[nid] = page_pool_size;
1391+
}
1392+
}
13611393

13621394
spin_lock_init(&shrinker_lock);
13631395
INIT_LIST_HEAD(&shrinker_list);

0 commit comments

Comments
 (0)