Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S942132AbcJFPuw (ORCPT ); Thu, 6 Oct 2016 11:50:52 -0400 Received: from mail-wm0-f53.google.com ([74.125.82.53]:38242 "EHLO mail-wm0-f53.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1755758AbcJFPul (ORCPT ); Thu, 6 Oct 2016 11:50:41 -0400 From: Ard Biesheuvel To: linux-kernel@vger.kernel.org, nouveau@lists.freedesktop.org, dri-devel@lists.freedesktop.org Cc: airlied@linux.ie, bskeggs@redhat.com, gnurou@gmail.com, Ard Biesheuvel Subject: [PATCH v5 2/3] drm/nouveau/fb/gf100: defer DMA mapping of scratch page to oneinit() hook Date: Thu, 6 Oct 2016 16:49:29 +0100 Message-Id: <1475768970-32512-3-git-send-email-ard.biesheuvel@linaro.org> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1475768970-32512-1-git-send-email-ard.biesheuvel@linaro.org> References: <1475768970-32512-1-git-send-email-ard.biesheuvel@linaro.org> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 2487 Lines: 78 The 100c10 scratch page is mapped using dma_map_page() before the TTM layer has had a chance to set the DMA mask. This means we are still running with the default of 32 when this code executes, and this causes problems for platforms with no memory below 4 GB (such as AMD Seattle) So move the dma_map_page() to the .oneinit hook, which executes after the DMA mask has been set. Signed-off-by: Ard Biesheuvel --- drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c | 31 ++++++++++++-------- 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c index 76433cc66fff..c1995c0024ef 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c @@ -50,24 +50,39 @@ gf100_fb_intr(struct nvkm_fb *base) } int -gf100_fb_oneinit(struct nvkm_fb *fb) +gf100_fb_oneinit(struct nvkm_fb *base) { - struct nvkm_device *device = fb->subdev.device; + struct gf100_fb *fb = gf100_fb(base); + struct nvkm_device *device = fb->base.subdev.device; int ret, size = 0x1000; size = nvkm_longopt(device->cfgopt, "MmuDebugBufferSize", size); size = min(size, 0x1000); ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000, - false, &fb->mmu_rd); + false, &base->mmu_rd); if (ret) return ret; ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000, - false, &fb->mmu_wr); + false, &base->mmu_wr); if (ret) return ret; + fb->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!fb->r100c10_page) { + nvkm_error(&fb->base.subdev, "failed 100c10 page alloc\n"); + return -ENOMEM; + } + + fb->r100c10 = dma_map_page(device->dev, fb->r100c10_page, 0, PAGE_SIZE, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(device->dev, fb->r100c10)) { + nvkm_error(&fb->base.subdev, "failed to map 100c10 page\n"); + __free_page(fb->r100c10_page); + return -EFAULT; + } + return 0; } @@ -123,14 +138,6 @@ gf100_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device, nvkm_fb_ctor(func, device, index, &fb->base); *pfb = &fb->base; - fb->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO); - if (fb->r100c10_page) { - fb->r100c10 = dma_map_page(device->dev, fb->r100c10_page, 0, - PAGE_SIZE, DMA_BIDIRECTIONAL); - if (dma_mapping_error(device->dev, fb->r100c10)) - return -EFAULT; - } - return 0; } -- 2.7.4