Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1754844AbbFTL2t (ORCPT ); Sat, 20 Jun 2015 07:28:49 -0400 Received: from mail-wg0-f47.google.com ([74.125.82.47]:34088 "EHLO mail-wg0-f47.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753811AbbFTL2d (ORCPT ); Sat, 20 Jun 2015 07:28:33 -0400 From: Ebru Akagunduz To: linux-mm@kvack.org Cc: akpm@linux-foundation.org, kirill.shutemov@linux.intel.com, n-horiguchi@ah.jp.nec.com, aarcange@redhat.com, riel@redhat.com, iamjoonsoo.kim@lge.com, xiexiuqi@huawei.com, gorcunov@openvz.org, linux-kernel@vger.kernel.org, mgorman@suse.de, rientjes@google.com, vbabka@suse.cz, aneesh.kumar@linux.vnet.ibm.com, hughd@google.com, hannes@cmpxchg.org, mhocko@suse.cz, boaz@plexistor.com, raindel@mellanox.com, Ebru Akagunduz Subject: [RFC v2 1/3] mm: add tracepoint for scanning pages Date: Sat, 20 Jun 2015 14:28:04 +0300 Message-Id: <1434799686-7929-2-git-send-email-ebru.akagunduz@gmail.com> X-Mailer: git-send-email 1.9.1 In-Reply-To: <1434799686-7929-1-git-send-email-ebru.akagunduz@gmail.com> References: <1434799686-7929-1-git-send-email-ebru.akagunduz@gmail.com> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 4899 Lines: 175 Using static tracepoints, data of functions is recorded. It is good to automatize debugging without doing a lot of changes in the source code. This patch adds tracepoint for khugepaged_scan_pmd, collapse_huge_page and __collapse_huge_page_isolate. Signed-off-by: Ebru Akagunduz Acked-by: Rik van Riel --- Changes in v2: - Nothing changed include/trace/events/huge_memory.h | 96 ++++++++++++++++++++++++++++++++++++++ mm/huge_memory.c | 10 +++- 2 files changed, 105 insertions(+), 1 deletion(-) create mode 100644 include/trace/events/huge_memory.h diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h new file mode 100644 index 0000000..4b9049b --- /dev/null +++ b/include/trace/events/huge_memory.h @@ -0,0 +1,96 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM huge_memory + +#if !defined(__HUGE_MEMORY_H) || defined(TRACE_HEADER_MULTI_READ) +#define __HUGE_MEMORY_H + +#include + +TRACE_EVENT(mm_khugepaged_scan_pmd, + + TP_PROTO(struct mm_struct *mm, unsigned long vm_start, bool writable, + bool referenced, int none_or_zero, int collapse), + + TP_ARGS(mm, vm_start, writable, referenced, none_or_zero, collapse), + + TP_STRUCT__entry( + __field(struct mm_struct *, mm) + __field(unsigned long, vm_start) + __field(bool, writable) + __field(bool, referenced) + __field(int, none_or_zero) + __field(int, collapse) + ), + + TP_fast_assign( + __entry->mm = mm; + __entry->vm_start = vm_start; + __entry->writable = writable; + __entry->referenced = referenced; + __entry->none_or_zero = none_or_zero; + __entry->collapse = collapse; + ), + + TP_printk("mm=%p, vm_start=%04lx, writable=%d, referenced=%d, none_or_zero=%d, collapse=%d", + __entry->mm, + __entry->vm_start, + __entry->writable, + __entry->referenced, + __entry->none_or_zero, + __entry->collapse) +); + +TRACE_EVENT(mm_collapse_huge_page, + + TP_PROTO(struct mm_struct *mm, unsigned long vm_start, int isolated), + + TP_ARGS(mm, vm_start, isolated), + + TP_STRUCT__entry( + __field(struct mm_struct *, mm) + __field(unsigned long, vm_start) + __field(int, isolated) + ), + + TP_fast_assign( + __entry->mm = mm; + __entry->vm_start = vm_start; + __entry->isolated = isolated; + ), + + TP_printk("mm=%p, vm_start=%04lx, isolated=%d", + __entry->mm, + __entry->vm_start, + __entry->isolated) +); + +TRACE_EVENT(mm_collapse_huge_page_isolate, + + TP_PROTO(unsigned long vm_start, int none_or_zero, + bool referenced, bool writable), + + TP_ARGS(vm_start, none_or_zero, referenced, writable), + + TP_STRUCT__entry( + __field(unsigned long, vm_start) + __field(int, none_or_zero) + __field(bool, referenced) + __field(bool, writable) + ), + + TP_fast_assign( + __entry->vm_start = vm_start; + __entry->none_or_zero = none_or_zero; + __entry->referenced = referenced; + __entry->writable = writable; + ), + + TP_printk("vm_start=%04lx, none_or_zero=%d, referenced=%d, writable=%d", + __entry->vm_start, + __entry->none_or_zero, + __entry->referenced, + __entry->writable) +); + +#endif /* __HUGE_MEMORY_H */ +#include diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 9671f51..9bb97fc 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -29,6 +29,9 @@ #include #include "internal.h" +#define CREATE_TRACE_POINTS +#include + /* * By default transparent hugepage support is disabled in order that avoid * to risk increase the memory footprint of applications without a guaranteed @@ -2266,6 +2269,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, if (likely(referenced && writable)) return 1; out: + trace_mm_collapse_huge_page_isolate(vma->vm_start, none_or_zero, + referenced, writable); release_pte_pages(pte, _pte); return 0; } @@ -2501,7 +2506,7 @@ static void collapse_huge_page(struct mm_struct *mm, pgtable_t pgtable; struct page *new_page; spinlock_t *pmd_ptl, *pte_ptl; - int isolated; + int isolated = 0; unsigned long hstart, hend; struct mem_cgroup *memcg; unsigned long mmun_start; /* For mmu_notifiers */ @@ -2619,6 +2624,7 @@ static void collapse_huge_page(struct mm_struct *mm, khugepaged_pages_collapsed++; out_up_write: up_write(&mm->mmap_sem); + trace_mm_collapse_huge_page(mm, vma->vm_start, isolated); return; out: @@ -2694,6 +2700,8 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, ret = 1; out_unmap: pte_unmap_unlock(pte, ptl); + trace_mm_khugepaged_scan_pmd(mm, vma->vm_start, writable, referenced, + none_or_zero, ret); if (ret) { node = khugepaged_find_target_node(); /* collapse_huge_page will return with the mmap_sem released */ -- 1.9.1 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in Please read the FAQ at http://www.tux.org/lkml/