Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S932769Ab1BYVjM (ORCPT ); Fri, 25 Feb 2011 16:39:12 -0500 Received: from smtp-out.google.com ([74.125.121.67]:50400 "EHLO smtp-out.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S932545Ab1BYVjK (ORCPT ); Fri, 25 Feb 2011 16:39:10 -0500 DomainKey-Signature: a=rsa-sha1; s=beta; d=google.com; c=nofws; q=dns; h=from:to:cc:subject:date:message-id:x-mailer:in-reply-to:references; b=iy0V27O7ZuyYmFEJZDL9vG2AeAWJe4FeS+Vb2jfsjusUHVIc3xuXja2WjX5bEAQEJ cN4+ZJgHfR2aaW9405Ufg== From: Greg Thelen To: Andrew Morton Cc: linux-kernel@vger.kernel.org, linux-mm@kvack.org, containers@lists.osdl.org, Andrea Righi , Balbir Singh , KAMEZAWA Hiroyuki , Daisuke Nishimura , Minchan Kim , Ciju Rajan K , David Rientjes , Wu Fengguang , Chad Talbott , Justin TerAvest , Vivek Goyal , Greg Thelen Subject: [PATCH v5 5/9] memcg: add dirty page accounting infrastructure Date: Fri, 25 Feb 2011 13:35:56 -0800 Message-Id: <1298669760-26344-6-git-send-email-gthelen@google.com> X-Mailer: git-send-email 1.7.3.1 In-Reply-To: <1298669760-26344-1-git-send-email-gthelen@google.com> References: <1298669760-26344-1-git-send-email-gthelen@google.com> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 6559 Lines: 189 Add memcg routines to track dirty, writeback, and unstable_NFS pages. These routines are not yet used by the kernel to count such pages. A later change adds kernel calls to these new routines. Signed-off-by: Greg Thelen Signed-off-by: Andrea Righi Acked-by: KAMEZAWA Hiroyuki Acked-by: Daisuke Nishimura --- Changelog since v1: - Renamed "nfs"/"total_nfs" to "nfs_unstable"/"total_nfs_unstable" in per cgroup memory.stat to match /proc/meminfo. - Rename (for clarity): - mem_cgroup_write_page_stat_item -> mem_cgroup_page_stat_item - mem_cgroup_read_page_stat_item -> mem_cgroup_nr_pages_item - Remove redundant comments. - Made mem_cgroup_move_account_page_stat() inline. include/linux/memcontrol.h | 5 ++- mm/memcontrol.c | 87 ++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 83 insertions(+), 9 deletions(-) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 3da48ae..e1f70a9 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -25,9 +25,12 @@ struct page_cgroup; struct page; struct mm_struct; -/* Stats that can be updated by kernel. */ +/* mem_cgroup page counts accessed by kernel. */ enum mem_cgroup_page_stat_item { MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */ + MEMCG_NR_FILE_DIRTY, /* # of dirty pages in page cache */ + MEMCG_NR_FILE_WRITEBACK, /* # of pages under writeback */ + MEMCG_NR_FILE_UNSTABLE_NFS, /* # of NFS unstable pages */ }; extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 1c2704a..38f786b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -92,8 +92,11 @@ enum mem_cgroup_stat_index { */ MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */ MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */ - MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */ MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */ + MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */ + MEM_CGROUP_STAT_FILE_DIRTY, /* # of dirty pages in page cache */ + MEM_CGROUP_STAT_FILE_WRITEBACK, /* # of pages under writeback */ + MEM_CGROUP_STAT_FILE_UNSTABLE_NFS, /* # of NFS unstable pages */ MEM_CGROUP_STAT_DATA, /* end of data requires synchronization */ MEM_CGROUP_ON_MOVE, /* someone is moving account between groups */ MEM_CGROUP_STAT_NSTATS, @@ -1622,6 +1625,44 @@ void mem_cgroup_update_page_stat(struct page *page, ClearPageCgroupFileMapped(pc); idx = MEM_CGROUP_STAT_FILE_MAPPED; break; + + case MEMCG_NR_FILE_DIRTY: + /* Use Test{Set,Clear} to only un/charge the memcg once. */ + if (val > 0) { + if (TestSetPageCgroupFileDirty(pc)) + val = 0; + } else { + if (!TestClearPageCgroupFileDirty(pc)) + val = 0; + } + idx = MEM_CGROUP_STAT_FILE_DIRTY; + break; + + case MEMCG_NR_FILE_WRITEBACK: + /* + * This counter is adjusted while holding the mapping's + * tree_lock. Therefore there is no race between settings and + * clearing of this flag. + */ + if (val > 0) + SetPageCgroupFileWriteback(pc); + else + ClearPageCgroupFileWriteback(pc); + idx = MEM_CGROUP_STAT_FILE_WRITEBACK; + break; + + case MEMCG_NR_FILE_UNSTABLE_NFS: + /* Use Test{Set,Clear} to only un/charge the memcg once. */ + if (val > 0) { + if (TestSetPageCgroupFileUnstableNFS(pc)) + val = 0; + } else { + if (!TestClearPageCgroupFileUnstableNFS(pc)) + val = 0; + } + idx = MEM_CGROUP_STAT_FILE_UNSTABLE_NFS; + break; + default: BUG(); } @@ -2181,6 +2222,17 @@ void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail) } #endif +static inline +void mem_cgroup_move_account_page_stat(struct mem_cgroup *from, + struct mem_cgroup *to, + enum mem_cgroup_stat_index idx) +{ + preempt_disable(); + __this_cpu_dec(from->stat->count[idx]); + __this_cpu_inc(to->stat->count[idx]); + preempt_enable(); +} + /** * mem_cgroup_move_account - move account of the page * @page: the page @@ -2229,13 +2281,18 @@ static int mem_cgroup_move_account(struct page *page, move_lock_page_cgroup(pc, &flags); - if (PageCgroupFileMapped(pc)) { - /* Update mapped_file data for mem_cgroup */ - preempt_disable(); - __this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]); - __this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]); - preempt_enable(); - } + if (PageCgroupFileMapped(pc)) + mem_cgroup_move_account_page_stat(from, to, + MEM_CGROUP_STAT_FILE_MAPPED); + if (PageCgroupFileDirty(pc)) + mem_cgroup_move_account_page_stat(from, to, + MEM_CGROUP_STAT_FILE_DIRTY); + if (PageCgroupFileWriteback(pc)) + mem_cgroup_move_account_page_stat(from, to, + MEM_CGROUP_STAT_FILE_WRITEBACK); + if (PageCgroupFileUnstableNFS(pc)) + mem_cgroup_move_account_page_stat(from, to, + MEM_CGROUP_STAT_FILE_UNSTABLE_NFS); mem_cgroup_charge_statistics(from, PageCgroupCache(pc), -nr_pages); if (uncharge) /* This is not "cancel", but cancel_charge does all we need. */ @@ -3681,6 +3738,9 @@ enum { MCS_PGPGIN, MCS_PGPGOUT, MCS_SWAP, + MCS_FILE_DIRTY, + MCS_WRITEBACK, + MCS_UNSTABLE_NFS, MCS_INACTIVE_ANON, MCS_ACTIVE_ANON, MCS_INACTIVE_FILE, @@ -3703,6 +3763,9 @@ struct { {"pgpgin", "total_pgpgin"}, {"pgpgout", "total_pgpgout"}, {"swap", "total_swap"}, + {"dirty", "total_dirty"}, + {"writeback", "total_writeback"}, + {"nfs_unstable", "total_nfs_unstable"}, {"inactive_anon", "total_inactive_anon"}, {"active_anon", "total_active_anon"}, {"inactive_file", "total_inactive_file"}, @@ -3732,6 +3795,14 @@ mem_cgroup_get_local_stat(struct mem_cgroup *mem, struct mcs_total_stat *s) s->stat[MCS_SWAP] += val * PAGE_SIZE; } + /* dirty stat */ + val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_FILE_DIRTY); + s->stat[MCS_FILE_DIRTY] += val * PAGE_SIZE; + val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_FILE_WRITEBACK); + s->stat[MCS_WRITEBACK] += val * PAGE_SIZE; + val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_FILE_UNSTABLE_NFS); + s->stat[MCS_UNSTABLE_NFS] += val * PAGE_SIZE; + /* per zone stat */ val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_ANON); s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE; -- 1.7.3.1 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/