Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752413AbeADJcj (ORCPT + 1 other); Thu, 4 Jan 2018 04:32:39 -0500 Received: from mail-wr0-f194.google.com ([209.85.128.194]:41304 "EHLO mail-wr0-f194.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751993AbeADJci (ORCPT ); Thu, 4 Jan 2018 04:32:38 -0500 X-Google-Smtp-Source: ACJfBosx8W9pHiSHLXCqOen6svwW0hacbzH0VFFWTtY8NFiXieTM+jblSClLrX7t3y5o1YtVejjGqA== From: "=?UTF-8?q?Christian=20K=C3=B6nig?=" X-Google-Original-From: =?UTF-8?q?Christian=20K=C3=B6nig?= To: konrad.wilk@oracle.com Cc: linux-kernel@vger.kernel.org Subject: [PATCH] swiotlb: suppress warning when __GFP_NOWARN is set v4 Date: Thu, 4 Jan 2018 10:32:33 +0100 Message-Id: <20180104093233.16311-1-christian.koenig@amd.com> X-Mailer: git-send-email 2.11.0 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Return-Path: TTM tries to allocate coherent memory in chunks of 2MB first to improve TLB efficiency and falls back to allocating 4K pages if that fails. Suppress the warning when the 2MB allocations fails since there is a valid fall back path. v2: suppress warnings from swiotlb_tbl_map_single as well v3: coding style fixes as suggested by Konrad v4: make tbl_map_single static Signed-off-by: Christian König Reported-by: Mike Galbraith Bug: https://bugs.freedesktop.org/show_bug.cgi?id=104082 CC: stable@vger.kernel.org --- lib/swiotlb.c | 44 +++++++++++++++++++++++++++++--------------- 1 file changed, 29 insertions(+), 15 deletions(-) diff --git a/lib/swiotlb.c b/lib/swiotlb.c index cea19aaf303c..8ed802101071 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -490,11 +490,11 @@ static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr, } } -phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, - dma_addr_t tbl_dma_addr, - phys_addr_t orig_addr, size_t size, - enum dma_data_direction dir, - unsigned long attrs) +static phys_addr_t tbl_map_single(struct device *hwdev, + dma_addr_t tbl_dma_addr, + phys_addr_t orig_addr, size_t size, + enum dma_data_direction dir, + unsigned long attrs, bool warn) { unsigned long flags; phys_addr_t tlb_addr; @@ -586,7 +586,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, not_found: spin_unlock_irqrestore(&io_tlb_lock, flags); - if (printk_ratelimit()) + if (warn && printk_ratelimit()) dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)\n", size); return SWIOTLB_MAP_ERROR; found: @@ -605,6 +605,16 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, return tlb_addr; } + +phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, + dma_addr_t tbl_dma_addr, + phys_addr_t orig_addr, size_t size, + enum dma_data_direction dir, + unsigned long attrs) +{ + return tbl_map_single(hwdev, tbl_dma_addr, orig_addr, + size, dir, attrs, true); +} EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single); /* @@ -613,7 +623,7 @@ EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single); static phys_addr_t map_single(struct device *hwdev, phys_addr_t phys, size_t size, - enum dma_data_direction dir, unsigned long attrs) + enum dma_data_direction dir, unsigned long attrs, bool warn) { dma_addr_t start_dma_addr; @@ -624,8 +634,8 @@ map_single(struct device *hwdev, phys_addr_t phys, size_t size, } start_dma_addr = swiotlb_phys_to_dma(hwdev, io_tlb_start); - return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, - dir, attrs); + return tbl_map_single(hwdev, start_dma_addr, phys, size, + dir, attrs, warn); } /* @@ -713,6 +723,7 @@ void * swiotlb_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags) { + bool warn = !(flags & __GFP_NOWARN); dma_addr_t dev_addr; void *ret; int order = get_order(size); @@ -739,7 +750,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, * will grab memory from the lowest available address range. */ phys_addr_t paddr = map_single(hwdev, 0, size, - DMA_FROM_DEVICE, 0); + DMA_FROM_DEVICE, 0, warn); if (paddr == SWIOTLB_MAP_ERROR) goto err_warn; @@ -769,9 +780,11 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, return ret; err_warn: - pr_warn("swiotlb: coherent allocation failed for device %s size=%zu\n", - dev_name(hwdev), size); - dump_stack(); + if (warn && printk_ratelimit()) { + pr_warn("swiotlb: coherent allocation failed for device %s size=%zu\n", + dev_name(hwdev), size); + dump_stack(); + } return NULL; } @@ -851,7 +864,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force); /* Oh well, have to allocate and map a bounce buffer. */ - map = map_single(dev, phys, size, dir, attrs); + map = map_single(dev, phys, size, dir, attrs, true); if (map == SWIOTLB_MAP_ERROR) { swiotlb_full(dev, size, dir, 1); return swiotlb_phys_to_dma(dev, io_tlb_overflow_buffer); @@ -989,7 +1002,8 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, if (swiotlb_force == SWIOTLB_FORCE || !dma_capable(hwdev, dev_addr, sg->length)) { phys_addr_t map = map_single(hwdev, sg_phys(sg), - sg->length, dir, attrs); + sg->length, dir, attrs, + true /*Always warn.*/); if (map == SWIOTLB_MAP_ERROR) { /* Don't panic here, we expect map_sg users to do proper error handling. */ -- 2.11.0