Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1756688AbcJGIc5 (ORCPT ); Fri, 7 Oct 2016 04:32:57 -0400 Received: from szxga01-in.huawei.com ([58.251.152.64]:15846 "EHLO szxga01-in.huawei.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1756405AbcJGIch (ORCPT ); Fri, 7 Oct 2016 04:32:37 -0400 Subject: Re: [patch] staging: ion: use two separate locks for heaps and clients in ion_device To: Laura Abbott , , , , , , References: <1475223503-65906-1-git-send-email-xuyiping@hisilicon.com> <37a3350a-d934-8d52-e5ab-0634687aaf46@redhat.com> CC: , , From: YiPing Xu Message-ID: <57F75C85.5050406@hisilicon.com> Date: Fri, 7 Oct 2016 16:27:49 +0800 User-Agent: Mozilla/5.0 (Windows NT 6.1; WOW64; rv:38.0) Gecko/20100101 Thunderbird/38.5.1 MIME-Version: 1.0 In-Reply-To: <37a3350a-d934-8d52-e5ab-0634687aaf46@redhat.com> Content-Type: text/plain; charset="windows-1252"; format=flowed Content-Transfer-Encoding: 7bit X-Originating-IP: [10.184.212.206] X-CFilter-Loop: Reflected Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 7416 Lines: 219 On 2016/10/5 2:02, Laura Abbott wrote: > On 09/30/2016 01:18 AM, Xu YiPing wrote: >> ion_alloc may get into slow path to get free page, >> the call stack: >> >> __alloc_pages_slowpath >> ion_page_pool_alloc_pages >> alloc_buffer_page >> ion_system_heap_allocate >> ion_buffer_create <-- hold ion_device->lock >> ion_alloc >> >> after that, kernel invokes low-memory killer to kill some apps in >> order to free memory in android system. However, sometimes, the >> killing is blocked, >> the app's call stack: >> >> rwsem_down_write_failed >> down_write >> ion_client_destroy >> ion_release >> fput >> do_signal >> >> the killing is blocked because ion_device->lock is held by ion_alloc. >> >> ion_alloc hold the lock for accessing the heaps list, >> ion_destroy_client hold the lock for accessing the clients list. >> >> so, we can use two separate locks for heaps and clients, to avoid the >> unnecessary race. >> > > I've reviewed this and it looks okay at first pass but I don't want it > applied just yet. Ion locking is a bit of a mess and has been added yes, and now "debugfs_mutex" and "ion_root_client" is redundant, after commit 49d200deaa680501f19a247b1fffb29301e51d2b and 9fd4dcece43a53e5a9e65a973df5693702ee6401. > once piece at a time. It needs a fundamental review. There will be Ion > discussions at plumbers at the end of October. Let's come back to this > after plumbers. > >> Signed-off-by: Xu YiPing >> --- >> drivers/staging/android/ion/ion.c | 37 >> ++++++++++++++++++++----------------- >> 1 file changed, 20 insertions(+), 17 deletions(-) >> >> diff --git a/drivers/staging/android/ion/ion.c >> b/drivers/staging/android/ion/ion.c >> index a2cf93b..331ad0f 100644 >> --- a/drivers/staging/android/ion/ion.c >> +++ b/drivers/staging/android/ion/ion.c >> @@ -46,7 +46,8 @@ >> * @dev: the actual misc device >> * @buffers: an rb tree of all the existing buffers >> * @buffer_lock: lock protecting the tree of buffers >> - * @lock: rwsem protecting the tree of heaps and clients >> + * @client_lock: rwsem protecting the tree of clients >> + * @heap_lock: rwsem protecting the tree of heaps >> * @heaps: list of all the heaps in the system >> * @user_clients: list of all the clients created from userspace >> */ >> @@ -54,7 +55,8 @@ struct ion_device { >> struct miscdevice dev; >> struct rb_root buffers; >> struct mutex buffer_lock; >> - struct rw_semaphore lock; >> + struct rw_semaphore client_lock; >> + struct rw_semaphore heap_lock; >> struct plist_head heaps; >> long (*custom_ioctl)(struct ion_client *client, unsigned int cmd, >> unsigned long arg); >> @@ -146,7 +148,7 @@ static inline void ion_buffer_page_clean(struct >> page **page) >> *page = (struct page *)((unsigned long)(*page) & ~(1UL)); >> } >> >> -/* this function should only be called while dev->lock is held */ >> +/* this function should only be called while dev->heap_lock is held */ >> static void ion_buffer_add(struct ion_device *dev, >> struct ion_buffer *buffer) >> { >> @@ -172,7 +174,7 @@ static void ion_buffer_add(struct ion_device *dev, >> rb_insert_color(&buffer->node, &dev->buffers); >> } >> >> -/* this function should only be called while dev->lock is held */ >> +/* this function should only be called while dev->heap_lock is held */ >> static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, >> struct ion_device *dev, >> unsigned long len, >> @@ -511,7 +513,7 @@ struct ion_handle *ion_alloc(struct ion_client >> *client, size_t len, >> if (!len) >> return ERR_PTR(-EINVAL); >> >> - down_read(&dev->lock); >> + down_read(&dev->heap_lock); >> plist_for_each_entry(heap, &dev->heaps, node) { >> /* if the caller didn't specify this heap id */ >> if (!((1 << heap->id) & heap_id_mask)) >> @@ -520,7 +522,7 @@ struct ion_handle *ion_alloc(struct ion_client >> *client, size_t len, >> if (!IS_ERR(buffer)) >> break; >> } >> - up_read(&dev->lock); >> + up_read(&dev->heap_lock); >> >> if (buffer == NULL) >> return ERR_PTR(-ENODEV); >> @@ -713,7 +715,7 @@ static int is_client_alive(struct ion_client *client) >> node = ion_root_client->rb_node; >> dev = container_of(ion_root_client, struct ion_device, clients); >> >> - down_read(&dev->lock); >> + down_read(&dev->client_lock); >> while (node) { >> tmp = rb_entry(node, struct ion_client, node); >> if (client < tmp) { >> @@ -721,12 +723,12 @@ static int is_client_alive(struct ion_client >> *client) >> } else if (client > tmp) { >> node = node->rb_right; >> } else { >> - up_read(&dev->lock); >> + up_read(&dev->client_lock); >> return 1; >> } >> } >> >> - up_read(&dev->lock); >> + up_read(&dev->client_lock); >> return 0; >> } >> >> @@ -841,12 +843,12 @@ struct ion_client *ion_client_create(struct >> ion_device *dev, >> if (!client->name) >> goto err_free_client; >> >> - down_write(&dev->lock); >> + down_write(&dev->client_lock); >> client->display_serial = ion_get_client_serial(&dev->clients, name); >> client->display_name = kasprintf( >> GFP_KERNEL, "%s-%d", name, client->display_serial); >> if (!client->display_name) { >> - up_write(&dev->lock); >> + up_write(&dev->client_lock); >> goto err_free_client_name; >> } >> p = &dev->clients.rb_node; >> @@ -873,7 +875,7 @@ struct ion_client *ion_client_create(struct >> ion_device *dev, >> path, client->display_name); >> } >> >> - up_write(&dev->lock); >> + up_write(&dev->client_lock); >> >> return client; >> >> @@ -903,12 +905,12 @@ void ion_client_destroy(struct ion_client *client) >> >> idr_destroy(&client->idr); >> >> - down_write(&dev->lock); >> + down_write(&dev->client_lock); >> if (client->task) >> put_task_struct(client->task); >> rb_erase(&client->node, &dev->clients); >> debugfs_remove_recursive(client->debug_root); >> - up_write(&dev->lock); >> + up_write(&dev->client_lock); >> >> kfree(client->display_name); >> kfree(client->name); >> @@ -1603,7 +1605,7 @@ void ion_device_add_heap(struct ion_device *dev, >> struct ion_heap *heap) >> ion_heap_init_shrinker(heap); >> >> heap->dev = dev; >> - down_write(&dev->lock); >> + down_write(&dev->heap_lock); >> /* >> * use negative heap->id to reverse the priority -- when traversing >> * the list later attempt higher id numbers first >> @@ -1638,7 +1640,7 @@ void ion_device_add_heap(struct ion_device *dev, >> struct ion_heap *heap) >> } >> } >> >> - up_write(&dev->lock); >> + up_write(&dev->heap_lock); >> } >> EXPORT_SYMBOL(ion_device_add_heap); >> >> @@ -1685,7 +1687,8 @@ debugfs_done: >> idev->custom_ioctl = custom_ioctl; >> idev->buffers = RB_ROOT; >> mutex_init(&idev->buffer_lock); >> - init_rwsem(&idev->lock); >> + init_rwsem(&idev->client_lock); >> + init_rwsem(&idev->heap_lock); >> plist_head_init(&idev->heaps); >> idev->clients = RB_ROOT; >> ion_root_client = &idev->clients; >> -- >> 2.7.4 >> > > > . >