Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1751814AbcCHTvk (ORCPT ); Tue, 8 Mar 2016 14:51:40 -0500 Received: from mx1.redhat.com ([209.132.183.28]:37033 "EHLO mx1.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751494AbcCHTrr (ORCPT ); Tue, 8 Mar 2016 14:47:47 -0500 From: =?UTF-8?q?J=C3=A9r=C3=B4me=20Glisse?= To: akpm@linux-foundation.org, , linux-mm@kvack.org Cc: Linus Torvalds , , Mel Gorman , "H. Peter Anvin" , Peter Zijlstra , Andrea Arcangeli , Johannes Weiner , Larry Woodman , Rik van Riel , Dave Airlie , Brendan Conoboy , Joe Donohue , Christophe Harle , Duncan Poole , Sherry Cheung , Subhash Gutti , John Hubbard , Mark Hairgrove , Lucien Dunning , Cameron Buschardt , Arvind Gopalakrishnan , Haggai Eran , Shachar Raindel , Liran Liss , Roland Dreier , Ben Sander , Greg Stoner , John Bridgman , Michael Mantor , Paul Blinzer , Leonid Shamis , Laurent Morichetti , Alexander Deucher , =?UTF-8?q?J=C3=A9r=C3=B4me=20Glisse?= Subject: [PATCH v12 25/29] HMM: split DMA mapping function in two. Date: Tue, 8 Mar 2016 15:43:18 -0500 Message-Id: <1457469802-11850-26-git-send-email-jglisse@redhat.com> In-Reply-To: <1457469802-11850-1-git-send-email-jglisse@redhat.com> References: <1457469802-11850-1-git-send-email-jglisse@redhat.com> MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 4210 Lines: 155 To be able to reuse the DMA mapping logic, split it in two functions. Signed-off-by: Jérôme Glisse --- mm/hmm.c | 120 ++++++++++++++++++++++++++++++++++----------------------------- 1 file changed, 65 insertions(+), 55 deletions(-) diff --git a/mm/hmm.c b/mm/hmm.c index d26abe4..07f1ab6 100644 --- a/mm/hmm.c +++ b/mm/hmm.c @@ -910,76 +910,86 @@ static int hmm_mirror_fault_hugetlb_entry(pte_t *ptep, return 0; } +static int hmm_mirror_dma_map_range(struct hmm_mirror *mirror, + dma_addr_t *hmm_pte, + spinlock_t *lock, + unsigned long npages) +{ + struct device *dev = mirror->device->dev; + unsigned long i; + int ret = 0; + + for (i = 0; i < npages; i++) { + dma_addr_t dma_addr, pte; + struct page *page; + +again: + pte = ACCESS_ONCE(hmm_pte[i]); + if (!hmm_pte_test_valid_pfn(&pte) || !hmm_pte_test_select(&pte)) + continue; + + page = pfn_to_page(hmm_pte_pfn(pte)); + VM_BUG_ON(!page); + dma_addr = dma_map_page(dev, page, 0, PAGE_SIZE, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, dma_addr)) { + ret = -ENOMEM; + break; + } + + /* + * Make sure we transfer the dirty bit. Note that there + * might still be a window for another thread to set + * the dirty bit before we check for pte equality. This + * will just lead to a useless retry so it is not the + * end of the world here. + */ + if (lock) + spin_lock(lock); + if (hmm_pte_test_dirty(&hmm_pte[i])) + hmm_pte_set_dirty(&pte); + if (ACCESS_ONCE(hmm_pte[i]) != pte) { + if (lock) + spin_unlock(lock); + dma_unmap_page(dev, dma_addr, PAGE_SIZE, + DMA_BIDIRECTIONAL); + if (hmm_pte_test_valid_pfn(&hmm_pte[i])) + goto again; + continue; + } + hmm_pte[i] = hmm_pte_from_dma_addr(dma_addr); + if (hmm_pte_test_write(&pte)) + hmm_pte_set_write(&hmm_pte[i]); + if (hmm_pte_test_dirty(&pte)) + hmm_pte_set_dirty(&hmm_pte[i]); + if (lock) + spin_unlock(lock); + } + + return ret; +} + static int hmm_mirror_dma_map(struct hmm_mirror *mirror, struct hmm_pt_iter *iter, unsigned long start, unsigned long end) { - struct device *dev = mirror->device->dev; unsigned long addr; int ret; for (ret = 0, addr = start; !ret && addr < end;) { - unsigned long i = 0, next = end; + unsigned long next = end, npages; dma_addr_t *hmm_pte; + spinlock_t *lock; hmm_pte = hmm_pt_iter_populate(iter, addr, &next); if (!hmm_pte) return -ENOENT; - do { - dma_addr_t dma_addr, pte; - struct page *page; - -again: - pte = ACCESS_ONCE(hmm_pte[i]); - if (!hmm_pte_test_valid_pfn(&pte) || - !hmm_pte_test_select(&pte)) { - if (!hmm_pte_test_valid_dma(&pte)) { - ret = -ENOENT; - break; - } - continue; - } - - page = pfn_to_page(hmm_pte_pfn(pte)); - VM_BUG_ON(!page); - dma_addr = dma_map_page(dev, page, 0, PAGE_SIZE, - DMA_BIDIRECTIONAL); - if (dma_mapping_error(dev, dma_addr)) { - ret = -ENOMEM; - break; - } - - hmm_pt_iter_directory_lock(iter); - /* - * Make sure we transfer the dirty bit. Note that there - * might still be a window for another thread to set - * the dirty bit before we check for pte equality. This - * will just lead to a useless retry so it is not the - * end of the world here. - */ - if (hmm_pte_test_dirty(&hmm_pte[i])) - hmm_pte_set_dirty(&pte); - if (ACCESS_ONCE(hmm_pte[i]) != pte) { - hmm_pt_iter_directory_unlock(iter); - dma_unmap_page(dev, dma_addr, PAGE_SIZE, - DMA_BIDIRECTIONAL); - if (hmm_pte_test_valid_pfn(&pte)) - goto again; - if (!hmm_pte_test_valid_dma(&pte)) { - ret = -ENOENT; - break; - } - } else { - hmm_pte[i] = hmm_pte_from_dma_addr(dma_addr); - if (hmm_pte_test_write(&pte)) - hmm_pte_set_write(&hmm_pte[i]); - if (hmm_pte_test_dirty(&pte)) - hmm_pte_set_dirty(&hmm_pte[i]); - hmm_pt_iter_directory_unlock(iter); - } - } while (addr += PAGE_SIZE, i++, addr != next && !ret); + npages = (next - addr) >> PAGE_SHIFT; + lock = hmm_pt_iter_directory_lock_ptr(iter); + ret = hmm_mirror_dma_map_range(mirror, hmm_pte, lock, npages); + addr = next; } return ret; -- 2.4.3