Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1750932AbdCNH46 (ORCPT ); Tue, 14 Mar 2017 03:56:58 -0400 Received: from LGEAMRELO12.lge.com ([156.147.23.52]:56873 "EHLO lgeamrelo12.lge.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1750785AbdCNH44 (ORCPT ); Tue, 14 Mar 2017 03:56:56 -0400 X-Original-SENDERIP: 156.147.1.127 X-Original-MAILFROM: junil0814.lee@lge.com X-Original-SENDERIP: 165.244.249.26 X-Original-MAILFROM: junil0814.lee@lge.com X-Original-SENDERIP: 10.168.177.123 X-Original-MAILFROM: junil0814.lee@lge.com From: Junil Lee To: , , , , CC: , , Junil Lee , Bongkyu Kim Subject: [PATCH] staging: android: ion: reduce lock contention latency Date: Tue, 14 Mar 2017 16:51:26 +0900 Message-ID: <1489477886-2860-1-git-send-email-junil0814.lee@lge.com> X-Mailer: git-send-email 2.6.2 X-MIMETrack: Itemize by SMTP Server on LGEKRMHUB03/LGE/LG Group(Release 8.5.3FP6|November 21, 2013) at 2017/03/14 16:51:30, Serialize by Router on LGEKRMHUB03/LGE/LG Group(Release 8.5.3FP6|November 21, 2013) at 2017/03/14 16:51:30, Serialize complete at 2017/03/14 16:51:30 MIME-Version: 1.0 Content-Type: text/plain Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 7416 Lines: 207 Replace list into lock-less list of ion page pool. Measure how mutex lock contention latency on android. 1. the test is done under android 7.0 2. startup many applications circularly 3. find sample in trace log as below cameraserver-625 [004] ...1 1891.952958: mutex_lock_enter: id=0 Binder:384_2-417 [005] ...1 1891.952958: mutex_lock_enter: id=0 Binder:384_2-417 [005] ...1 1891.952966: mutex_lock_enter: id=1 Binder:384_2-417 [005] ...1 1891.952970: mutex_lock_enter: id=0 Binder:384_2-417 [005] ...1 1891.952971: mutex_lock_enter: id=1 Binder:384_2-417 [005] ...1 1891.952982: mutex_lock_enter: id=0 Binder:384_2-417 [005] ...1 1891.952983: mutex_lock_enter: id=1 Binder:384_2-417 [005] ...1 1891.952989: mutex_lock_enter: id=0 Binder:384_2-417 [005] ...1 1891.952989: mutex_lock_enter: id=1 Binder:384_2-417 [005] ...1 1891.952995: mutex_lock_enter: id=0 cameraserver-625 [004] ...1 1891.952995: mutex_lock_enter: id=1 - id 0 is try to lock, id 1 is locked Figure out how many latency reduction by this patch as below. The test is startup 60 applications circularly (repeat 10cycles) - lock contention count : 3717 -> 93 Signed-off-by: Bongkyu Kim Signed-off-by: Junil Lee --- drivers/staging/android/ion/ion_page_pool.c | 52 ++++++++++++++------------- drivers/staging/android/ion/ion_priv.h | 8 ++--- drivers/staging/android/ion/ion_system_heap.c | 16 ++++----- 3 files changed, 40 insertions(+), 36 deletions(-) diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c index aea89c1..1beb2c8 100644 --- a/drivers/staging/android/ion/ion_page_pool.c +++ b/drivers/staging/android/ion/ion_page_pool.c @@ -22,6 +22,7 @@ #include #include #include +#include #include "ion_priv.h" static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool) @@ -44,33 +45,36 @@ static void ion_page_pool_free_pages(struct ion_page_pool *pool, static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page) { - mutex_lock(&pool->mutex); if (PageHighMem(page)) { - list_add_tail(&page->lru, &pool->high_items); - pool->high_count++; + llist_add((struct llist_node *)&page->lru, &pool->high_items); + atomic_inc(&pool->high_count); } else { - list_add_tail(&page->lru, &pool->low_items); - pool->low_count++; + llist_add((struct llist_node *)&page->lru, &pool->low_items); + atomic_inc(&pool->low_count); } - mutex_unlock(&pool->mutex); + return 0; } static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high) { - struct page *page; + struct page *page = NULL; + struct llist_node *node; if (high) { - BUG_ON(!pool->high_count); - page = list_first_entry(&pool->high_items, struct page, lru); - pool->high_count--; + BUG_ON(!atomic_read(&pool->high_count)); + node = llist_del_first(&pool->high_items); + if (node) + node = llist_entry((struct list_head *)node, struct page, lru); + atomic_dec(&pool->high_count); } else { - BUG_ON(!pool->low_count); - page = list_first_entry(&pool->low_items, struct page, lru); - pool->low_count--; + BUG_ON(!atomic_read(&pool->low_count)); + node = llist_del_first(&pool->low_items); + if (node) + node = llist_entry((struct list_head *)node, struct page, lru); + atomic_dec(&pool->low_count); } - list_del(&page->lru); return page; } @@ -81,9 +85,9 @@ struct page *ion_page_pool_alloc(struct ion_page_pool *pool) BUG_ON(!pool); mutex_lock(&pool->mutex); - if (pool->high_count) + if (atomic_read(&pool->high_count)) page = ion_page_pool_remove(pool, true); - else if (pool->low_count) + else if (atomic_read(&pool->low_count)) page = ion_page_pool_remove(pool, false); mutex_unlock(&pool->mutex); @@ -106,10 +110,10 @@ void ion_page_pool_free(struct ion_page_pool *pool, struct page *page) static int ion_page_pool_total(struct ion_page_pool *pool, bool high) { - int count = pool->low_count; + int count = atomic_read(&pool->low_count); if (high) - count += pool->high_count; + count += atomic_read(&pool->high_count); return count << pool->order; } @@ -132,9 +136,9 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, struct page *page; mutex_lock(&pool->mutex); - if (pool->low_count) { + if (atomic_read(&pool->low_count)) { page = ion_page_pool_remove(pool, false); - } else if (high && pool->high_count) { + } else if (high && atomic_read(&pool->high_count)) { page = ion_page_pool_remove(pool, true); } else { mutex_unlock(&pool->mutex); @@ -155,10 +159,10 @@ struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order, if (!pool) return NULL; - pool->high_count = 0; - pool->low_count = 0; - INIT_LIST_HEAD(&pool->low_items); - INIT_LIST_HEAD(&pool->high_items); + atomic_set(&pool->high_count, 0); + atomic_set(&pool->low_count, 0); + init_llist_head(&pool->low_items); + init_llist_head(&pool->high_items); pool->gfp_mask = gfp_mask | __GFP_COMP; pool->order = order; mutex_init(&pool->mutex); diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h index 5b3059c..d4d5704 100644 --- a/drivers/staging/android/ion/ion_priv.h +++ b/drivers/staging/android/ion/ion_priv.h @@ -414,11 +414,11 @@ void ion_cma_heap_destroy(struct ion_heap *heap); * on many systems */ struct ion_page_pool { - int high_count; - int low_count; + atomic_t high_count; + atomic_t low_count; bool cached; - struct list_head high_items; - struct list_head low_items; + struct llist_head high_items; + struct llist_head low_items; struct mutex mutex; gfp_t gfp_mask; unsigned int order; diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c index 3ebbb75..8ee8d98 100644 --- a/drivers/staging/android/ion/ion_system_heap.c +++ b/drivers/staging/android/ion/ion_system_heap.c @@ -274,22 +274,22 @@ static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s, pool = sys_heap->uncached_pools[i]; seq_printf(s, "%d order %u highmem pages uncached %lu total\n", - pool->high_count, pool->order, - (PAGE_SIZE << pool->order) * pool->high_count); + atomic_read(&pool->high_count), pool->order, + (PAGE_SIZE << pool->order) * atomic_read(&pool->high_count)); seq_printf(s, "%d order %u lowmem pages uncached %lu total\n", - pool->low_count, pool->order, - (PAGE_SIZE << pool->order) * pool->low_count); + atomic_read(&pool->low_count), pool->order, + (PAGE_SIZE << pool->order) * atomic_read(&pool->low_count)); } for (i = 0; i < NUM_ORDERS; i++) { pool = sys_heap->cached_pools[i]; seq_printf(s, "%d order %u highmem pages cached %lu total\n", - pool->high_count, pool->order, - (PAGE_SIZE << pool->order) * pool->high_count); + atomic_read(&pool->high_count), pool->order, + (PAGE_SIZE << pool->order) * atomic_read(&pool->high_count)); seq_printf(s, "%d order %u lowmem pages cached %lu total\n", - pool->low_count, pool->order, - (PAGE_SIZE << pool->order) * pool->low_count); + atomic_read(&pool->low_count), pool->order, + (PAGE_SIZE << pool->order) * atomic_read(&pool->low_count)); } return 0; } -- 2.6.2