Skip to content

Commit 0d1c207

Browse files
hnaztorvalds
authored andcommitted
mm: memcontrol: switch to native NR_FILE_PAGES and NR_SHMEM counters
Memcg maintains private MEMCG_CACHE and NR_SHMEM counters. This divergence from the generic VM accounting means unnecessary code overhead, and creates a dependency for memcg that page->mapping is set up at the time of charging, so that page types can be told apart. Convert the generic accounting sites to mod_lruvec_page_state and friends to maintain the per-cgroup vmstat counters of NR_FILE_PAGES and NR_SHMEM. The page is already locked in these places, so page->mem_cgroup is stable; we only need minimal tweaks of two mem_cgroup_migrate() calls to ensure it's set up in time. Then replace MEMCG_CACHE with NR_FILE_PAGES and delete the private NR_SHMEM accounting sites. Signed-off-by: Johannes Weiner <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Reviewed-by: Joonsoo Kim <[email protected]> Cc: Alex Shi <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: "Kirill A. Shutemov" <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Roman Gushchin <[email protected]> Cc: Shakeel Butt <[email protected]> Cc: Balbir Singh <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Linus Torvalds <[email protected]>
1 parent 9da7b52 commit 0d1c207

File tree

6 files changed

+50
-43
lines changed

6 files changed

+50
-43
lines changed

include/linux/memcontrol.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,8 +29,7 @@ struct kmem_cache;
2929

3030
/* Cgroup-specific page state, on top of universal node page state */
3131
enum memcg_stat_item {
32-
MEMCG_CACHE = NR_VM_NODE_STAT_ITEMS,
33-
MEMCG_RSS,
32+
MEMCG_RSS = NR_VM_NODE_STAT_ITEMS,
3433
MEMCG_RSS_HUGE,
3534
MEMCG_SWAP,
3635
MEMCG_SOCK,

mm/filemap.c

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -199,9 +199,9 @@ static void unaccount_page_cache_page(struct address_space *mapping,
199199

200200
nr = hpage_nr_pages(page);
201201

202-
__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
202+
__mod_lruvec_page_state(page, NR_FILE_PAGES, -nr);
203203
if (PageSwapBacked(page)) {
204-
__mod_node_page_state(page_pgdat(page), NR_SHMEM, -nr);
204+
__mod_lruvec_page_state(page, NR_SHMEM, -nr);
205205
if (PageTransHuge(page))
206206
__dec_node_page_state(page, NR_SHMEM_THPS);
207207
} else if (PageTransHuge(page)) {
@@ -802,21 +802,22 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
802802
new->mapping = mapping;
803803
new->index = offset;
804804

805+
mem_cgroup_migrate(old, new);
806+
805807
xas_lock_irqsave(&xas, flags);
806808
xas_store(&xas, new);
807809

808810
old->mapping = NULL;
809811
/* hugetlb pages do not participate in page cache accounting. */
810812
if (!PageHuge(old))
811-
__dec_node_page_state(old, NR_FILE_PAGES);
813+
__dec_lruvec_page_state(old, NR_FILE_PAGES);
812814
if (!PageHuge(new))
813-
__inc_node_page_state(new, NR_FILE_PAGES);
815+
__inc_lruvec_page_state(new, NR_FILE_PAGES);
814816
if (PageSwapBacked(old))
815-
__dec_node_page_state(old, NR_SHMEM);
817+
__dec_lruvec_page_state(old, NR_SHMEM);
816818
if (PageSwapBacked(new))
817-
__inc_node_page_state(new, NR_SHMEM);
819+
__inc_lruvec_page_state(new, NR_SHMEM);
818820
xas_unlock_irqrestore(&xas, flags);
819-
mem_cgroup_migrate(old, new);
820821
if (freepage)
821822
freepage(old);
822823
put_page(old);
@@ -867,7 +868,7 @@ static int __add_to_page_cache_locked(struct page *page,
867868

868869
/* hugetlb pages do not participate in page cache accounting */
869870
if (!huge)
870-
__inc_node_page_state(page, NR_FILE_PAGES);
871+
__inc_lruvec_page_state(page, NR_FILE_PAGES);
871872
unlock:
872873
xas_unlock_irq(&xas);
873874
} while (xas_nomem(&xas, gfp_mask & GFP_RECLAIM_MASK));

mm/khugepaged.c

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1844,12 +1844,18 @@ static void collapse_file(struct mm_struct *mm,
18441844
}
18451845

18461846
if (nr_none) {
1847-
struct zone *zone = page_zone(new_page);
1848-
1849-
__mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
1847+
struct lruvec *lruvec;
1848+
/*
1849+
* XXX: We have started try_charge and pinned the
1850+
* memcg, but the page isn't committed yet so we
1851+
* cannot use mod_lruvec_page_state(). This hackery
1852+
* will be cleaned up when remove the page->mapping
1853+
* dependency from memcg and fully charge above.
1854+
*/
1855+
lruvec = mem_cgroup_lruvec(memcg, page_pgdat(new_page));
1856+
__mod_lruvec_state(lruvec, NR_FILE_PAGES, nr_none);
18501857
if (is_shmem)
1851-
__mod_node_page_state(zone->zone_pgdat,
1852-
NR_SHMEM, nr_none);
1858+
__mod_lruvec_state(lruvec, NR_SHMEM, nr_none);
18531859
}
18541860

18551861
xa_locked:

mm/memcontrol.c

Lines changed: 11 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -842,11 +842,6 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
842842
*/
843843
if (PageAnon(page))
844844
__mod_memcg_state(memcg, MEMCG_RSS, nr_pages);
845-
else {
846-
__mod_memcg_state(memcg, MEMCG_CACHE, nr_pages);
847-
if (PageSwapBacked(page))
848-
__mod_memcg_state(memcg, NR_SHMEM, nr_pages);
849-
}
850845

851846
if (abs(nr_pages) > 1) {
852847
VM_BUG_ON_PAGE(!PageTransHuge(page), page);
@@ -1392,7 +1387,7 @@ static char *memory_stat_format(struct mem_cgroup *memcg)
13921387
(u64)memcg_page_state(memcg, MEMCG_RSS) *
13931388
PAGE_SIZE);
13941389
seq_buf_printf(&s, "file %llu\n",
1395-
(u64)memcg_page_state(memcg, MEMCG_CACHE) *
1390+
(u64)memcg_page_state(memcg, NR_FILE_PAGES) *
13961391
PAGE_SIZE);
13971392
seq_buf_printf(&s, "kernel_stack %llu\n",
13981393
(u64)memcg_page_state(memcg, MEMCG_KERNEL_STACK_KB) *
@@ -3357,7 +3352,7 @@ static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
33573352
unsigned long val;
33583353

33593354
if (mem_cgroup_is_root(memcg)) {
3360-
val = memcg_page_state(memcg, MEMCG_CACHE) +
3355+
val = memcg_page_state(memcg, NR_FILE_PAGES) +
33613356
memcg_page_state(memcg, MEMCG_RSS);
33623357
if (swap)
33633358
val += memcg_page_state(memcg, MEMCG_SWAP);
@@ -3828,7 +3823,7 @@ static int memcg_numa_stat_show(struct seq_file *m, void *v)
38283823
#endif /* CONFIG_NUMA */
38293824

38303825
static const unsigned int memcg1_stats[] = {
3831-
MEMCG_CACHE,
3826+
NR_FILE_PAGES,
38323827
MEMCG_RSS,
38333828
MEMCG_RSS_HUGE,
38343829
NR_SHMEM,
@@ -5461,6 +5456,14 @@ static int mem_cgroup_move_account(struct page *page,
54615456
lock_page_memcg(page);
54625457

54635458
if (!PageAnon(page)) {
5459+
__mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages);
5460+
__mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages);
5461+
5462+
if (PageSwapBacked(page)) {
5463+
__mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages);
5464+
__mod_lruvec_state(to_vec, NR_SHMEM, nr_pages);
5465+
}
5466+
54645467
if (page_mapped(page)) {
54655468
__mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
54665469
__mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
@@ -6673,10 +6676,8 @@ struct uncharge_gather {
66736676
unsigned long nr_pages;
66746677
unsigned long pgpgout;
66756678
unsigned long nr_anon;
6676-
unsigned long nr_file;
66776679
unsigned long nr_kmem;
66786680
unsigned long nr_huge;
6679-
unsigned long nr_shmem;
66806681
struct page *dummy_page;
66816682
};
66826683

@@ -6700,9 +6701,7 @@ static void uncharge_batch(const struct uncharge_gather *ug)
67006701

67016702
local_irq_save(flags);
67026703
__mod_memcg_state(ug->memcg, MEMCG_RSS, -ug->nr_anon);
6703-
__mod_memcg_state(ug->memcg, MEMCG_CACHE, -ug->nr_file);
67046704
__mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge);
6705-
__mod_memcg_state(ug->memcg, NR_SHMEM, -ug->nr_shmem);
67066705
__count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
67076706
__this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_pages);
67086707
memcg_check_events(ug->memcg, ug->dummy_page);
@@ -6743,11 +6742,6 @@ static void uncharge_page(struct page *page, struct uncharge_gather *ug)
67436742
ug->nr_huge += nr_pages;
67446743
if (PageAnon(page))
67456744
ug->nr_anon += nr_pages;
6746-
else {
6747-
ug->nr_file += nr_pages;
6748-
if (PageSwapBacked(page))
6749-
ug->nr_shmem += nr_pages;
6750-
}
67516745
ug->pgpgout++;
67526746
} else {
67536747
ug->nr_kmem += nr_pages;

mm/migrate.c

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -490,11 +490,18 @@ int migrate_page_move_mapping(struct address_space *mapping,
490490
* are mapped to swap space.
491491
*/
492492
if (newzone != oldzone) {
493-
__dec_node_state(oldzone->zone_pgdat, NR_FILE_PAGES);
494-
__inc_node_state(newzone->zone_pgdat, NR_FILE_PAGES);
493+
struct lruvec *old_lruvec, *new_lruvec;
494+
struct mem_cgroup *memcg;
495+
496+
memcg = page_memcg(page);
497+
old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
498+
new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
499+
500+
__dec_lruvec_state(old_lruvec, NR_FILE_PAGES);
501+
__inc_lruvec_state(new_lruvec, NR_FILE_PAGES);
495502
if (PageSwapBacked(page) && !PageSwapCache(page)) {
496-
__dec_node_state(oldzone->zone_pgdat, NR_SHMEM);
497-
__inc_node_state(newzone->zone_pgdat, NR_SHMEM);
503+
__dec_lruvec_state(old_lruvec, NR_SHMEM);
504+
__inc_lruvec_state(new_lruvec, NR_SHMEM);
498505
}
499506
if (dirty && mapping_cap_account_dirty(mapping)) {
500507
__dec_node_state(oldzone->zone_pgdat, NR_FILE_DIRTY);

mm/shmem.c

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -653,8 +653,8 @@ static int shmem_add_to_page_cache(struct page *page,
653653
__inc_node_page_state(page, NR_SHMEM_THPS);
654654
}
655655
mapping->nrpages += nr;
656-
__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
657-
__mod_node_page_state(page_pgdat(page), NR_SHMEM, nr);
656+
__mod_lruvec_page_state(page, NR_FILE_PAGES, nr);
657+
__mod_lruvec_page_state(page, NR_SHMEM, nr);
658658
unlock:
659659
xas_unlock_irq(&xas);
660660
} while (xas_nomem(&xas, gfp));
@@ -685,8 +685,8 @@ static void shmem_delete_from_page_cache(struct page *page, void *radswap)
685685
error = shmem_replace_entry(mapping, page->index, page, radswap);
686686
page->mapping = NULL;
687687
mapping->nrpages--;
688-
__dec_node_page_state(page, NR_FILE_PAGES);
689-
__dec_node_page_state(page, NR_SHMEM);
688+
__dec_lruvec_page_state(page, NR_FILE_PAGES);
689+
__dec_lruvec_page_state(page, NR_SHMEM);
690690
xa_unlock_irq(&mapping->i_pages);
691691
put_page(page);
692692
BUG_ON(error);
@@ -1593,8 +1593,9 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
15931593
xa_lock_irq(&swap_mapping->i_pages);
15941594
error = shmem_replace_entry(swap_mapping, swap_index, oldpage, newpage);
15951595
if (!error) {
1596-
__inc_node_page_state(newpage, NR_FILE_PAGES);
1597-
__dec_node_page_state(oldpage, NR_FILE_PAGES);
1596+
mem_cgroup_migrate(oldpage, newpage);
1597+
__inc_lruvec_page_state(newpage, NR_FILE_PAGES);
1598+
__dec_lruvec_page_state(oldpage, NR_FILE_PAGES);
15981599
}
15991600
xa_unlock_irq(&swap_mapping->i_pages);
16001601

@@ -1606,7 +1607,6 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
16061607
*/
16071608
oldpage = newpage;
16081609
} else {
1609-
mem_cgroup_migrate(oldpage, newpage);
16101610
lru_cache_add_anon(newpage);
16111611
*pagep = newpage;
16121612
}

0 commit comments

Comments
 (0)