Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1751862AbbBMDH4 (ORCPT ); Thu, 12 Feb 2015 22:07:56 -0500 Received: from lgeamrelo01.lge.com ([156.147.1.125]:57618 "EHLO lgeamrelo01.lge.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1750973AbbBMDHz (ORCPT ); Thu, 12 Feb 2015 22:07:55 -0500 X-Original-SENDERIP: 10.177.222.153 X-Original-MAILFROM: iamjoonsoo.kim@lge.com Date: Fri, 13 Feb 2015 12:10:12 +0900 From: Joonsoo Kim To: Stefan Strogin Cc: linux-mm@kvack.org, linux-kernel@vger.kernel.org, Andrew Morton , Marek Szyprowski , Michal Nazarewicz , aneesh.kumar@linux.vnet.ibm.com, Laurent Pinchart , Dmitry Safonov , Pintu Kumar , Weijie Yang , Laura Abbott , SeongJae Park , Hui Zhu , Minchan Kim , Dyasly Sergey , Vyacheslav Tyrtov , gregory.0xf0@gmail.com, sasha.levin@oracle.com, gioh.kim@lge.com, pavel@ucw.cz, stefan.strogin@gmail.com Subject: Re: [PATCH 1/4] mm: cma: add currently allocated CMA buffers list to debugfs Message-ID: <20150213031012.GH6592@js1304-P5Q-DELUXE> References: MIME-Version: 1.0 Content-Type: text/plain; charset=us-ascii Content-Disposition: inline In-Reply-To: User-Agent: Mutt/1.5.21 (2010-09-15) Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 7254 Lines: 238 On Fri, Feb 13, 2015 at 01:15:41AM +0300, Stefan Strogin wrote: > /sys/kernel/debug/cma/cma-/buffers contains a list of currently allocated > CMA buffers for CMA region N when CONFIG_CMA_DEBUGFS is enabled. > > Format is: > > - ( kB), allocated by () > > > Signed-off-by: Stefan Strogin > --- > include/linux/cma.h | 9 ++++ > mm/cma.c | 9 ++++ > mm/cma.h | 16 ++++++ > mm/cma_debug.c | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++- > 4 files changed, 178 insertions(+), 1 deletion(-) > > diff --git a/include/linux/cma.h b/include/linux/cma.h > index 9384ba6..4c2c83c 100644 > --- a/include/linux/cma.h > +++ b/include/linux/cma.h > @@ -28,4 +28,13 @@ extern int cma_init_reserved_mem(phys_addr_t base, > struct cma **res_cma); > extern struct page *cma_alloc(struct cma *cma, int count, unsigned int align); > extern bool cma_release(struct cma *cma, struct page *pages, int count); > + > +#ifdef CONFIG_CMA_DEBUGFS > +extern int cma_buffer_list_add(struct cma *cma, unsigned long pfn, int count); > +extern void cma_buffer_list_del(struct cma *cma, unsigned long pfn, int count); > +#else > +#define cma_buffer_list_add(cma, pfn, count) { } > +#define cma_buffer_list_del(cma, pfn, count) { } > +#endif > + These could be in mm/cma.h rather than include/linux/cma.h. > #endif > diff --git a/mm/cma.c b/mm/cma.c > index 2609e20..ed269b0 100644 > --- a/mm/cma.c > +++ b/mm/cma.c > @@ -34,6 +34,9 @@ > #include > #include > #include > +#include > +#include > +#include > > #include "cma.h" > > @@ -125,6 +128,8 @@ static int __init cma_activate_area(struct cma *cma) > #ifdef CONFIG_CMA_DEBUGFS > INIT_HLIST_HEAD(&cma->mem_head); > spin_lock_init(&cma->mem_head_lock); > + INIT_LIST_HEAD(&cma->buffers_list); > + mutex_init(&cma->list_lock); > #endif > > return 0; > @@ -408,6 +413,9 @@ struct page *cma_alloc(struct cma *cma, int count, unsigned int align) > start = bitmap_no + mask + 1; > } > > + if (page) > + cma_buffer_list_add(cma, pfn, count); > + > pr_debug("%s(): returned %p\n", __func__, page); > return page; > } > @@ -440,6 +448,7 @@ bool cma_release(struct cma *cma, struct page *pages, int count) > > free_contig_range(pfn, count); > cma_clear_bitmap(cma, pfn, count); > + cma_buffer_list_del(cma, pfn, count); > > return true; > } > diff --git a/mm/cma.h b/mm/cma.h > index 1132d73..98e5f79 100644 > --- a/mm/cma.h > +++ b/mm/cma.h > @@ -1,6 +1,8 @@ > #ifndef __MM_CMA_H__ > #define __MM_CMA_H__ > > +#include > + > struct cma { > unsigned long base_pfn; > unsigned long count; > @@ -10,9 +12,23 @@ struct cma { > #ifdef CONFIG_CMA_DEBUGFS > struct hlist_head mem_head; > spinlock_t mem_head_lock; > + struct list_head buffers_list; > + struct mutex list_lock; > #endif > }; > > +#ifdef CONFIG_CMA_DEBUGFS > +struct cma_buffer { > + unsigned long pfn; > + unsigned long count; > + pid_t pid; > + char comm[TASK_COMM_LEN]; > + unsigned long trace_entries[16]; > + unsigned int nr_entries; > + struct list_head list; > +}; > +#endif > + > extern struct cma cma_areas[MAX_CMA_AREAS]; > extern unsigned cma_area_count; > > diff --git a/mm/cma_debug.c b/mm/cma_debug.c > index 7e1d325..5acd937 100644 > --- a/mm/cma_debug.c > +++ b/mm/cma_debug.c > @@ -2,6 +2,7 @@ > * CMA DebugFS Interface > * > * Copyright (c) 2015 Sasha Levin > + * Copyright (c) 2015 Stefan Strogin > */ > > > @@ -10,6 +11,8 @@ > #include > #include > #include > +#include > +#include > > #include "cma.h" > > @@ -21,6 +24,99 @@ struct cma_mem { > > static struct dentry *cma_debugfs_root; > > +/* Must be called under cma->list_lock */ > +static int __cma_buffer_list_add(struct cma *cma, unsigned long pfn, int count) > +{ > + struct cma_buffer *cmabuf; > + struct stack_trace trace; > + > + cmabuf = kmalloc(sizeof(*cmabuf), GFP_KERNEL); > + if (!cmabuf) { > + pr_warn("%s(page %p, count %d): failed to allocate buffer list entry\n", > + __func__, pfn_to_page(pfn), count); > + return -ENOMEM; > + } > + > + trace.nr_entries = 0; > + trace.max_entries = ARRAY_SIZE(cmabuf->trace_entries); > + trace.entries = &cmabuf->trace_entries[0]; > + trace.skip = 2; > + save_stack_trace(&trace); > + > + cmabuf->pfn = pfn; > + cmabuf->count = count; > + cmabuf->pid = task_pid_nr(current); > + cmabuf->nr_entries = trace.nr_entries; > + get_task_comm(cmabuf->comm, current); > + > + list_add_tail(&cmabuf->list, &cma->buffers_list); > + > + return 0; > +} > + > +/** > + * cma_buffer_list_add() - add a new entry to a list of allocated buffers > + * @cma: Contiguous memory region for which the allocation is performed. > + * @pfn: Base PFN of the allocated buffer. > + * @count: Number of allocated pages. > + * > + * This function adds a new entry to the list of allocated contiguous memory > + * buffers in a CMA area. It uses the CMA area specificated by the device > + * if available or the default global one otherwise. > + */ > +int cma_buffer_list_add(struct cma *cma, unsigned long pfn, int count) > +{ > + int ret; > + > + mutex_lock(&cma->list_lock); > + ret = __cma_buffer_list_add(cma, pfn, count); > + mutex_unlock(&cma->list_lock); > + > + return ret; > +} > + > +/** > + * cma_buffer_list_del() - delete an entry from a list of allocated buffers > + * @cma: Contiguous memory region for which the allocation was performed. > + * @pfn: Base PFN of the released buffer. > + * @count: Number of pages. > + * > + * This function deletes a list entry added by cma_buffer_list_add(). > + */ > +void cma_buffer_list_del(struct cma *cma, unsigned long pfn, int count) > +{ > + struct cma_buffer *cmabuf, *tmp; > + int found = 0; > + unsigned long buf_end_pfn, free_end_pfn = pfn + count; > + > + mutex_lock(&cma->list_lock); > + list_for_each_entry_safe(cmabuf, tmp, &cma->buffers_list, list) { > + > + buf_end_pfn = cmabuf->pfn + cmabuf->count; > + if (pfn <= cmabuf->pfn && free_end_pfn >= buf_end_pfn) { > + list_del(&cmabuf->list); > + kfree(cmabuf); > + found = 1; > + } else if (pfn <= cmabuf->pfn && free_end_pfn < buf_end_pfn) { > + cmabuf->count -= free_end_pfn - cmabuf->pfn; > + cmabuf->pfn = free_end_pfn; > + found = 1; > + } else if (pfn > cmabuf->pfn && pfn < buf_end_pfn) { > + if (free_end_pfn < buf_end_pfn) > + __cma_buffer_list_add(cma, free_end_pfn, > + buf_end_pfn - free_end_pfn); > + cmabuf->count = pfn - cmabuf->pfn; > + found = 1; > + } > + } > + mutex_unlock(&cma->list_lock); This linear searching make cma_release() slow if we have many allocated cma buffers. It wouldn't cause any problem? Thanks. -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/