Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S966092AbcKKIvl (ORCPT ); Fri, 11 Nov 2016 03:51:41 -0500 Received: from mail-wm0-f67.google.com ([74.125.82.67]:33293 "EHLO mail-wm0-f67.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S935064AbcKKIup (ORCPT ); Fri, 11 Nov 2016 03:50:45 -0500 From: Tvrtko Ursulin X-Google-Original-From: Tvrtko Ursulin To: Intel-gfx@lists.freedesktop.org Cc: linux-kernel@vger.kernel.org, Tvrtko Ursulin , Chris Wilson Subject: [PATCH 4/4] drm/i915: Use __sg_alloc_table_from_pages for userptr allocations Date: Fri, 11 Nov 2016 08:50:20 +0000 Message-Id: <1478854220-3255-5-git-send-email-tvrtko.ursulin@linux.intel.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1478854220-3255-1-git-send-email-tvrtko.ursulin@linux.intel.com> References: <1478854220-3255-1-git-send-email-tvrtko.ursulin@linux.intel.com> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 3839 Lines: 130 From: Tvrtko Ursulin With the addition of __sg_alloc_table_from_pages we can control the maximum coallescing size and eliminate a separate path for allocating backing store here. Similar to 871dfbd67d4e ("drm/i915: Allow compaction upto SWIOTLB max segment size") this enables more compact sg lists to be created and so has a beneficial effect on workloads with many and/or large objects of this class. v2: * Rename helper to i915_sg_segment_size and fix swiotlb override. * Commit message update. Signed-off-by: Tvrtko Ursulin Cc: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 9 +++++++++ drivers/gpu/drm/i915/i915_gem.c | 15 +-------------- drivers/gpu/drm/i915/i915_gem_userptr.c | 28 ++++++---------------------- 3 files changed, 16 insertions(+), 36 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 30777dee3f9c..319f8def0f86 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -4175,4 +4175,13 @@ int remap_io_mapping(struct vm_area_struct *vma, __T; \ }) +static inline unsigned int i915_sg_segment_size(void) +{ +#if IS_ENABLED(CONFIG_SWIOTLB) + return swiotlb_nr_tbl() << IO_TLB_SHIFT; +#else + return UINT_MAX; +#endif +} + #endif diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 1c20edba7f2a..cb4c188a395c 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2223,15 +2223,6 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, mutex_unlock(&obj->mm.lock); } -static unsigned int swiotlb_max_size(void) -{ -#if IS_ENABLED(CONFIG_SWIOTLB) - return rounddown(swiotlb_nr_tbl() << IO_TLB_SHIFT, PAGE_SIZE); -#else - return 0; -#endif -} - static void i915_sg_trim(struct sg_table *orig_st) { struct sg_table new_st; @@ -2267,7 +2258,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) struct sgt_iter sgt_iter; struct page *page; unsigned long last_pfn = 0; /* suppress gcc warning */ - unsigned int max_segment; + unsigned int max_segment = rounddown(i915_sg_segment_size(), PAGE_SIZE); int ret; gfp_t gfp; @@ -2278,10 +2269,6 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); - max_segment = swiotlb_max_size(); - if (!max_segment) - max_segment = rounddown(UINT_MAX, PAGE_SIZE); - st = kmalloc(sizeof(*st), GFP_KERNEL); if (st == NULL) return ERR_PTR(-ENOMEM); diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index 64261639f547..b4461f1832a6 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -390,36 +390,20 @@ struct get_pages_work { struct task_struct *task; }; -#if IS_ENABLED(CONFIG_SWIOTLB) -#define swiotlb_active() swiotlb_nr_tbl() -#else -#define swiotlb_active() 0 -#endif - static int st_set_pages(struct sg_table **st, struct page **pvec, int num_pages) { - struct scatterlist *sg; - int ret, n; + int ret; *st = kmalloc(sizeof(**st), GFP_KERNEL); if (*st == NULL) return -ENOMEM; - if (swiotlb_active()) { - ret = sg_alloc_table(*st, num_pages, GFP_KERNEL); - if (ret) - goto err; - - for_each_sg((*st)->sgl, sg, num_pages, n) - sg_set_page(sg, pvec[n], PAGE_SIZE, 0); - } else { - ret = sg_alloc_table_from_pages(*st, pvec, num_pages, - 0, num_pages << PAGE_SHIFT, - GFP_KERNEL); - if (ret) - goto err; - } + ret = __sg_alloc_table_from_pages(*st, pvec, num_pages, 0, + num_pages << PAGE_SHIFT, + GFP_KERNEL, i915_sg_segment_size()); + if (ret) + goto err; return 0; -- 2.7.4