Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753054Ab1F3O5K (ORCPT ); Thu, 30 Jun 2011 10:57:10 -0400 Received: from mail-iy0-f174.google.com ([209.85.210.174]:46430 "EHLO mail-iy0-f174.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752910Ab1F3O4n (ORCPT ); Thu, 30 Jun 2011 10:56:43 -0400 From: Minchan Kim To: Andrew Morton Cc: linux-mm , LKML , Minchan Kim , Johannes Weiner , KAMEZAWA Hiroyuki , KOSAKI Motohiro , Mel Gorman , Rik van Riel , Andrea Arcangeli , Michal Hocko Subject: [PATCH v4 09/10] add inorder-lru tracepoints for just measurement Date: Thu, 30 Jun 2011 23:55:19 +0900 Message-Id: <8186f779cbfa1fbd83420549b4fc25cc9cb71a69.1309444658.git.minchan.kim@gmail.com> X-Mailer: git-send-email 1.7.4.1 In-Reply-To: References: In-Reply-To: References: Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 5103 Lines: 186 This patch adds some tracepints for see the effect this patch series. This tracepoints isn't for merge but just see the effect. Cc: Johannes Weiner Cc: KAMEZAWA Hiroyuki Cc: KOSAKI Motohiro Cc: Mel Gorman Cc: Rik van Riel Cc: Andrea Arcangeli Cc: Michal Hocko Signed-off-by: Minchan Kim --- include/trace/events/inorder_putback.h | 88 ++++++++++++++++++++++++++++++++ mm/migrate.c | 3 + mm/swap.c | 3 + mm/vmscan.c | 4 +- 4 files changed, 96 insertions(+), 2 deletions(-) create mode 100644 include/trace/events/inorder_putback.h diff --git a/include/trace/events/inorder_putback.h b/include/trace/events/inorder_putback.h new file mode 100644 index 0000000..fe81742 --- /dev/null +++ b/include/trace/events/inorder_putback.h @@ -0,0 +1,88 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM inorder_putback + +#if !defined(_TRACE_INP_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_INP_H + +#include +#include + +TRACE_EVENT(mm_inorder_inorder, + + TP_PROTO(struct page *page, + struct page *old_page, + struct page *prev_page), + + TP_ARGS(page, old_page, prev_page), + + TP_STRUCT__entry( + __field(struct page *, page) + __field(struct page *, old_page) + __field(struct page *, prev_page) + ), + + TP_fast_assign( + __entry->page = page; + __entry->old_page = old_page; + __entry->prev_page = prev_page; + ), + + TP_printk("pfn=%lu old pfn=%lu prev_pfn=%lu active=%d", + page_to_pfn(__entry->page), + page_to_pfn(__entry->old_page), + page_to_pfn(__entry->prev_page), + PageActive(__entry->prev_page)) +); + +TRACE_EVENT(mm_inorder_outoforder, + TP_PROTO(struct page *page, + struct page *old_page, + struct page *prev_page), + + TP_ARGS(page, old_page, prev_page), + + TP_STRUCT__entry( + __field(struct page *, page) + __field(struct page *, old_page) + __field(struct page *, prev_page) + ), + + TP_fast_assign( + __entry->page = page; + __entry->old_page = old_page; + __entry->prev_page = prev_page; + ), + + TP_printk("pfn=%lu old pfn=%lu prev_pfn=%lu active=%d", + page_to_pfn(__entry->page), + page_to_pfn(__entry->old_page), + __entry->prev_page ? page_to_pfn(__entry->prev_page) : 0, + __entry->prev_page ? PageActive(__entry->prev_page) : 0) +); + +TRACE_EVENT(mm_inorder_isolate, + + TP_PROTO(struct page *prev_page, + struct page *page), + + TP_ARGS(prev_page, page), + + TP_STRUCT__entry( + __field(struct page *, prev_page) + __field(struct page *, page) + ), + + TP_fast_assign( + __entry->prev_page = prev_page; + __entry->page = page; + ), + + TP_printk("prev_pfn=%lu pfn=%lu active=%d", + page_to_pfn(__entry->prev_page), + page_to_pfn(__entry->page), PageActive(__entry->prev_page)) +); + +#endif /* _TRACE_INP_H */ + +/* This part must be outside protection */ +#include diff --git a/mm/migrate.c b/mm/migrate.c index cf73477..1267c45 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -39,6 +39,9 @@ #include "internal.h" +#define CREATE_TRACE_POINTS +#include + #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) /* diff --git a/mm/swap.c b/mm/swap.c index 611013d..f2ccf81 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -32,6 +32,7 @@ #include #include +#include #include "internal.h" /* How many pages do we try to swap or page in/out together? */ @@ -846,12 +847,14 @@ static void ____pagevec_ilru_add_fn(struct page *page, void *arg, int idx) */ adjust_ilru_list(lru, old_page, page, idx); __add_page_to_lru_list(zone, page, lru, &prev_page->lru); + trace_mm_inorder_inorder(page, old_page, prev_page); } else { file = is_file_lru(lru); active = is_active_lru(lru); if (active) SetPageActive(page); add_page_to_lru_list(zone, page, lru); + trace_mm_inorder_outoforder(page, old_page, prev_page); } update_page_reclaim_stat(zone, page, file, active); diff --git a/mm/vmscan.c b/mm/vmscan.c index f0e7789..eb26f03 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -50,10 +50,9 @@ #include #include "internal.h" - +#include #define CREATE_TRACE_POINTS #include - /* * reclaim_mode determines how the inactive list is shrunk * RECLAIM_MODE_SINGLE: Reclaim only order-0 pages @@ -1055,6 +1054,7 @@ int isolate_ilru_page(struct page *page, isolate_mode_t mode, int file, } *prev_page = lru_to_page(&page->lru); + trace_mm_inorder_isolate(*prev_page, page); } return ret; -- 1.7.4.1 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/