Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1751660AbdGaSz1 (ORCPT ); Mon, 31 Jul 2017 14:55:27 -0400 Received: from mail-wm0-f67.google.com ([74.125.82.67]:34084 "EHLO mail-wm0-f67.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1750967AbdGaSzZ (ORCPT ); Mon, 31 Jul 2017 14:55:25 -0400 From: Tvrtko Ursulin X-Google-Original-From: Tvrtko Ursulin To: Intel-gfx@lists.freedesktop.org Cc: tursulin@ursulin.net, Tvrtko Ursulin , Chris Wilson , linux-kernel@vger.kernel.org Subject: [PATCH 5/5] tools/testing/scatterlist: Test new __sg_alloc_table_from_pages Date: Mon, 31 Jul 2017 19:55:12 +0100 Message-Id: <20170731185512.20010-5-tvrtko.ursulin@linux.intel.com> X-Mailer: git-send-email 2.9.4 In-Reply-To: <20170731185512.20010-1-tvrtko.ursulin@linux.intel.com> References: <20170731185512.20010-1-tvrtko.ursulin@linux.intel.com> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 8887 Lines: 306 From: Tvrtko Ursulin Exercise the new __sg_alloc_table_from_pages API (and through it also the old sg_alloc_table_from_pages), checking that the created table has the expected number of segments depending on the sequence of input pages and other conditions. Signed-off-by: Tvrtko Ursulin Cc: Chris Wilson Cc: linux-kernel@vger.kernel.org --- tools/testing/scatterlist/Makefile | 30 +++++++++ tools/testing/scatterlist/linux/mm.h | 125 +++++++++++++++++++++++++++++++++++ tools/testing/scatterlist/main.c | 112 +++++++++++++++++++++++++++++++ 3 files changed, 267 insertions(+) create mode 100644 tools/testing/scatterlist/Makefile create mode 100644 tools/testing/scatterlist/linux/mm.h create mode 100644 tools/testing/scatterlist/main.c diff --git a/tools/testing/scatterlist/Makefile b/tools/testing/scatterlist/Makefile new file mode 100644 index 000000000000..0867e0ef32d6 --- /dev/null +++ b/tools/testing/scatterlist/Makefile @@ -0,0 +1,30 @@ +CFLAGS += -I. -I../../include -g -O2 -Wall -fsanitize=address +LDFLAGS += -fsanitize=address +TARGETS = main +OFILES = main.o scatterlist.o + +ifeq ($(BUILD), 32) + CFLAGS += -m32 + LDFLAGS += -m32 +endif + +targets: include $(TARGETS) + +main: $(OFILES) + +clean: + $(RM) $(TARGETS) $(OFILES) scatterlist.c linux/scatterlist.h linux/highmem.h linux/kmemleak.h asm/io.h + @rmdir asm + +scatterlist.c: ../../../lib/scatterlist.c + @sed -e 's/^static //' -e 's/__always_inline //' -e 's/inline //' < $< > $@ + +.PHONY: include + +include: ../../../include/linux/scatterlist.h + @mkdir -p linux + @mkdir -p asm + @touch asm/io.h + @touch linux/highmem.h + @touch linux/kmemleak.h + @cp $< linux/scatterlist.h diff --git a/tools/testing/scatterlist/linux/mm.h b/tools/testing/scatterlist/linux/mm.h new file mode 100644 index 000000000000..ccbb248ebdc1 --- /dev/null +++ b/tools/testing/scatterlist/linux/mm.h @@ -0,0 +1,125 @@ +#ifndef _LINUX_MM_H +#define _LINUX_MM_H + +#include +#include +#include +#include +#include +#include + +typedef unsigned long dma_addr_t; + +#define unlikely + +#define BUG_ON(x) assert(!(x)) + +#define WARN_ON(condition) ({ \ + int __ret_warn_on = !!(condition); \ + unlikely(__ret_warn_on); \ +}) + +#define WARN_ON_ONCE(condition) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + assert(0); \ + unlikely(__ret_warn_on); \ +}) + +#define PAGE_SIZE (4096) +#define PAGE_SHIFT (12) +#define PAGE_MASK (~(PAGE_SIZE-1)) + +#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1) +#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask)) +#define ALIGN(x, a) __ALIGN_KERNEL((x), (a)) + +#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE) + +#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) + +#define virt_to_page(x) ((void *)x) +#define page_address(x) ((void *)x) + +static inline unsigned long page_to_phys(struct page *page) +{ + assert(0); + + return 0; +} + +#define page_to_pfn(page) ((unsigned long)(page) / PAGE_SIZE) +#define pfn_to_page(pfn) (void *)((pfn) * PAGE_SIZE) +#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) + +#define __min(t1, t2, min1, min2, x, y) ({ \ + t1 min1 = (x); \ + t2 min2 = (y); \ + (void) (&min1 == &min2); \ + min1 < min2 ? min1 : min2; }) + +#define ___PASTE(a,b) a##b +#define __PASTE(a,b) ___PASTE(a,b) + +#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) + +#define min(x, y) \ + __min(typeof(x), typeof(y), \ + __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \ + x, y) + +#define min_t(type, x, y) \ + __min(type, type, \ + __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \ + x, y) + +#define preemptible() (1) + +static inline void *kmap(struct page *page) +{ + assert(0); + + return NULL; +} + +static inline void *kmap_atomic(struct page *page) +{ + assert(0); + + return NULL; +} + +static inline void kunmap(void *addr) +{ + assert(0); +} + +static inline void kunmap_atomic(void *addr) +{ + assert(0); +} + +static inline unsigned long __get_free_page(unsigned int flags) +{ + return (unsigned long)malloc(PAGE_SIZE); +} + +static inline void free_page(unsigned long page) +{ + free((void *)page); +} + +static inline void *kmalloc(unsigned int size, unsigned int flags) +{ + return malloc(size); +} + +#define kfree(x) free(x) + +#define kmemleak_alloc(a, b, c, d) +#define kmemleak_free(a) + +#define PageSlab(p) (0) +#define flush_kernel_dcache_page(p) + +#endif diff --git a/tools/testing/scatterlist/main.c b/tools/testing/scatterlist/main.c new file mode 100644 index 000000000000..edf9b2960734 --- /dev/null +++ b/tools/testing/scatterlist/main.c @@ -0,0 +1,112 @@ +#include +#include + +#include + +#define MAX_PAGES (64) + +static unsigned +_set_pages(struct page **pages, const unsigned *array, unsigned num) +{ + unsigned int i; + + assert(num < MAX_PAGES); + + for (i = 0; i < num; i++) + pages[i] = (struct page *)(unsigned long) + ((1 + array[i]) * PAGE_SIZE); + + return num; +} + +#define set_pages(p, a) _set_pages((p), (a), sizeof(a) / sizeof(a[0])) + +#define check_and_free(_st, _ret, _nents) \ +{ \ + assert((_ret) == 0); \ + assert((_st)->nents == _nents); \ + assert((_st)->orig_nents == _nents); \ + sg_free_table(_st); \ +} + +static int +alloc_tbl(struct sg_table *st, struct page **pages, unsigned nr_pages, + unsigned offset, unsigned size, unsigned max) +{ + return __sg_alloc_table_from_pages(st, pages, nr_pages, offset, size, + max, GFP_KERNEL); +} + +int main(void) +{ + const unsigned int sgmax = SCATTERLIST_MAX_SEGMENT; + struct page *pages[MAX_PAGES]; + struct sg_table st; + int ret; + + ret = set_pages(pages, ((unsigned []){ 0 })); + ret = alloc_tbl(&st, pages, ret, 0, ret * PAGE_SIZE, PAGE_SIZE + 1); + assert(ret == -EINVAL); + + ret = set_pages(pages, ((unsigned []){ 0 })); + ret = alloc_tbl(&st, pages, ret, 0, ret * PAGE_SIZE, 0); + assert(ret == -EINVAL); + + ret = set_pages(pages, ((unsigned []){ 0 })); + ret = alloc_tbl(&st, pages, ret, 0, ret * PAGE_SIZE, sgmax); + check_and_free(&st, ret, 1); + + ret = set_pages(pages, ((unsigned []){ 0 })); + ret = alloc_tbl(&st, pages, ret, 0, ret, sgmax); + check_and_free(&st, ret, 1); + + ret = set_pages(pages, ((unsigned []){ 0, 1 })); + ret = alloc_tbl(&st, pages, ret, 0, ret * PAGE_SIZE, sgmax); + check_and_free(&st, ret, 1); + + ret = set_pages(pages, ((unsigned []){ 0, 2 })); + ret = alloc_tbl(&st, pages, ret, 0, ret * PAGE_SIZE, sgmax); + check_and_free(&st, ret, 2); + + ret = set_pages(pages, ((unsigned []){ 0, 1, 3 })); + ret = alloc_tbl(&st, pages, ret, 0, ret * PAGE_SIZE, sgmax); + check_and_free(&st, ret, 2); + + ret = set_pages(pages, ((unsigned []){ 0, 1, 3, 4 })); + ret = alloc_tbl(&st, pages, ret, 0, ret * PAGE_SIZE, sgmax); + check_and_free(&st, ret, 2); + + ret = set_pages(pages, ((unsigned []){ 0, 1, 3, 4, 5 })); + ret = alloc_tbl(&st, pages, ret, 0, ret * PAGE_SIZE, sgmax); + check_and_free(&st, ret, 2); + + ret = set_pages(pages, ((unsigned []){ 0, 1, 3, 4, 6 })); + ret = alloc_tbl(&st, pages, ret, 0, ret * PAGE_SIZE, sgmax); + check_and_free(&st, ret, 3); + + ret = set_pages(pages, ((unsigned []){ 0, 2, 4, 6, 8 })); + ret = alloc_tbl(&st, pages, ret, 0, ret * PAGE_SIZE, sgmax); + check_and_free(&st, ret, 5); + + ret = set_pages(pages, ((unsigned []){ 0, 1, 2, 3, 4 })); + ret = alloc_tbl(&st, pages, ret, 0, ret * PAGE_SIZE, sgmax); + check_and_free(&st, ret, 1); + + ret = set_pages(pages, ((unsigned []){ 0, 1, 2, 3, 4 })); + ret = alloc_tbl(&st, pages, ret, 0, ret * PAGE_SIZE, 2 * PAGE_SIZE); + check_and_free(&st, ret, 3); + + ret = set_pages(pages, ((unsigned []){ 0, 1, 2, 3, 4, 5})); + ret = alloc_tbl(&st, pages, ret, 0, ret * PAGE_SIZE, 2 * PAGE_SIZE); + check_and_free(&st, ret, 3); + + ret = set_pages(pages, ((unsigned []){ 0, 2, 3, 4, 5, 6})); + ret = alloc_tbl(&st, pages, ret, 0, ret * PAGE_SIZE, 2 * PAGE_SIZE); + check_and_free(&st, ret, 4); + + ret = set_pages(pages, ((unsigned []){ 0, 1, 3, 4, 5, 6})); + ret = alloc_tbl(&st, pages, ret, 0, ret * PAGE_SIZE, 2 * PAGE_SIZE); + check_and_free(&st, ret, 3); + + return 0; +} -- 2.9.4