2009-11-15 12:23:20

by FUJITA Tomonori

[permalink] [raw]
Subject: [PATCH 0/3] x86: kill global bad_dma_address variable

This is against -tip.

This patchset kills bad_dma_address variable, the old mechanism to
enable IOMMU drivers to make dma_mapping_error() work in IOMMU's
specific way.

bad_dma_address variable was introduced to enable IOMMU drivers to
make dma_mapping_error() work in IOMMU's specific way. However, it
can't handle systems that use both swiotlb and HW IOMMU. SO we
introduced dma_map_ops->mapping_error to solve that case.

Keeping those two mechanisms is pointless. Let's remove the old one.

=
arch/x86/include/asm/dma-mapping.h | 5 +++--
arch/x86/kernel/amd_iommu.c | 21 ++++++++++-----------
arch/x86/kernel/pci-calgary_64.c | 31 +++++++++++++------------------
arch/x86/kernel/pci-dma.c | 3 ---
arch/x86/kernel/pci-gart_64.c | 18 +++++++++++++-----
arch/x86/kernel/pci-nommu.c | 2 +-
6 files changed, 40 insertions(+), 40 deletions(-)



2009-11-15 12:23:32

by FUJITA Tomonori

[permalink] [raw]
Subject: [PATCH 1/3] gart: add own dma_mapping_error function

GART IOMMU is the only user of bad_dma_address variable. This patch
converts GART to use the newer mechanism, mapping_error in struct
dma_map_ops, to make dma_mapping_error() work in IOMMU specific way.

Signed-off-by: FUJITA Tomonori <[email protected]>
---
arch/x86/kernel/pci-gart_64.c | 18 +++++++++++++-----
1 files changed, 13 insertions(+), 5 deletions(-)

diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index 919182e..61c4d1e 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -47,6 +47,8 @@ static unsigned long iommu_pages; /* .. and in pages */

static u32 *iommu_gatt_base; /* Remapping table */

+static dma_addr_t bad_dma_addr;
+
/*
* If this is disabled the IOMMU will use an optimized flushing strategy
* of only flushing when an mapping is reused. With it true the GART is
@@ -217,7 +219,7 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
if (panic_on_overflow)
panic("dma_map_area overflow %lu bytes\n", size);
iommu_full(dev, size, dir);
- return bad_dma_address;
+ return bad_dma_addr;
}

for (i = 0; i < npages; i++) {
@@ -303,7 +305,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,

if (nonforced_iommu(dev, addr, s->length)) {
addr = dma_map_area(dev, addr, s->length, dir, 0);
- if (addr == bad_dma_address) {
+ if (addr == bad_dma_addr) {
if (i > 0)
gart_unmap_sg(dev, sg, i, dir, NULL);
nents = 0;
@@ -456,7 +458,7 @@ error:

iommu_full(dev, pages << PAGE_SHIFT, dir);
for_each_sg(sg, s, nents, i)
- s->dma_address = bad_dma_address;
+ s->dma_address = bad_dma_addr;
return 0;
}

@@ -480,7 +482,7 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
DMA_BIDIRECTIONAL, align_mask);

flush_gart();
- if (paddr != bad_dma_address) {
+ if (paddr != bad_dma_addr) {
*dma_addr = paddr;
return page_address(page);
}
@@ -500,6 +502,11 @@ gart_free_coherent(struct device *dev, size_t size, void *vaddr,
free_pages((unsigned long)vaddr, get_order(size));
}

+static int gart_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return (dma_addr == bad_dma_addr);
+}
+
static int no_agp;

static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
@@ -687,6 +694,7 @@ static struct dma_map_ops gart_dma_ops = {
.unmap_page = gart_unmap_page,
.alloc_coherent = gart_alloc_coherent,
.free_coherent = gart_free_coherent,
+ .mapping_error = gart_mapping_error,
};

static void gart_iommu_shutdown(void)
@@ -785,7 +793,7 @@ int __init gart_iommu_init(void)

iommu_start = aper_size - iommu_size;
iommu_bus_base = info.aper_base + iommu_start;
- bad_dma_address = iommu_bus_base;
+ bad_dma_addr = iommu_bus_base;
iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);

/*
--
1.5.6.5

2009-11-15 12:23:04

by FUJITA Tomonori

[permalink] [raw]
Subject: [PATCH 2/3] x86: kill bad_dma_address variable

This kills bad_dma_address variable, the old mechanism to enable IOMMU
drivers to make dma_mapping_error() work in IOMMU's specific way.

bad_dma_address variable was introduced to enable IOMMU drivers to
make dma_mapping_error() work in IOMMU's specific way. However, it
can't handle systems that use both swiotlb and HW IOMMU. SO we
introduced dma_map_ops->mapping_error to solve that case.

Intel VT-d, GART, and swiotlb already use
dma_map_ops->mapping_error. Calgary, AMD IOMMU, and nommu use zero for
an error dma address. This adds DMA_ERROR_CODE and converts them to
use it (as SPARC and POWER does).

Signed-off-by: FUJITA Tomonori <[email protected]>
---
arch/x86/include/asm/dma-mapping.h | 5 +++--
arch/x86/kernel/amd_iommu.c | 21 ++++++++++-----------
arch/x86/kernel/pci-calgary_64.c | 22 ++++++++++------------
arch/x86/kernel/pci-dma.c | 3 ---
arch/x86/kernel/pci-nommu.c | 2 +-
5 files changed, 24 insertions(+), 29 deletions(-)

diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index 6a25d5d..0f6c02f 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -20,7 +20,8 @@
# define ISA_DMA_BIT_MASK DMA_BIT_MASK(32)
#endif

-extern dma_addr_t bad_dma_address;
+#define DMA_ERROR_CODE 0
+
extern int iommu_merge;
extern struct device x86_dma_fallback_dev;
extern int panic_on_overflow;
@@ -48,7 +49,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
if (ops->mapping_error)
return ops->mapping_error(dev, dma_addr);

- return (dma_addr == bad_dma_address);
+ return (dma_addr == DMA_ERROR_CODE);
}

#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 66237fd..093bd52 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -928,7 +928,7 @@ static unsigned long dma_ops_alloc_addresses(struct device *dev,
}

if (unlikely(address == -1))
- address = bad_dma_address;
+ address = DMA_ERROR_CODE;

WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size);

@@ -1544,7 +1544,7 @@ static dma_addr_t dma_ops_domain_map(struct amd_iommu *iommu,

pte = dma_ops_get_pte(dom, address);
if (!pte)
- return bad_dma_address;
+ return DMA_ERROR_CODE;

__pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC;

@@ -1625,7 +1625,7 @@ static dma_addr_t __map_single(struct device *dev,
retry:
address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask,
dma_mask);
- if (unlikely(address == bad_dma_address)) {
+ if (unlikely(address == DMA_ERROR_CODE)) {
/*
* setting next_address here will let the address
* allocator only scan the new allocated range in the
@@ -1646,7 +1646,7 @@ retry:
start = address;
for (i = 0; i < pages; ++i) {
ret = dma_ops_domain_map(iommu, dma_dom, start, paddr, dir);
- if (ret == bad_dma_address)
+ if (ret == DMA_ERROR_CODE)
goto out_unmap;

paddr += PAGE_SIZE;
@@ -1674,7 +1674,7 @@ out_unmap:

dma_ops_free_addresses(dma_dom, address, pages);

- return bad_dma_address;
+ return DMA_ERROR_CODE;
}

/*
@@ -1690,7 +1690,7 @@ static void __unmap_single(struct amd_iommu *iommu,
dma_addr_t i, start;
unsigned int pages;

- if ((dma_addr == bad_dma_address) ||
+ if ((dma_addr == DMA_ERROR_CODE) ||
(dma_addr + size > dma_dom->aperture_size))
return;

@@ -1732,7 +1732,7 @@ static dma_addr_t map_page(struct device *dev, struct page *page,
INC_STATS_COUNTER(cnt_map_single);

if (!check_device(dev))
- return bad_dma_address;
+ return DMA_ERROR_CODE;

dma_mask = *dev->dma_mask;

@@ -1743,12 +1743,12 @@ static dma_addr_t map_page(struct device *dev, struct page *page,
return (dma_addr_t)paddr;

if (!dma_ops_domain(domain))
- return bad_dma_address;
+ return DMA_ERROR_CODE;

spin_lock_irqsave(&domain->lock, flags);
addr = __map_single(dev, iommu, domain->priv, paddr, size, dir, false,
dma_mask);
- if (addr == bad_dma_address)
+ if (addr == DMA_ERROR_CODE)
goto out;

iommu_completion_wait(iommu);
@@ -1957,7 +1957,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
*dma_addr = __map_single(dev, iommu, domain->priv, paddr,
size, DMA_BIDIRECTIONAL, true, dma_mask);

- if (*dma_addr == bad_dma_address) {
+ if (*dma_addr == DMA_ERROR_CODE) {
spin_unlock_irqrestore(&domain->lock, flags);
goto out_free;
}
@@ -2110,7 +2110,6 @@ int __init amd_iommu_init_dma_ops(void)
prealloc_protection_domains();

iommu_detected = 1;
- bad_dma_address = 0;
swiotlb = 0;
#ifdef CONFIG_GART_IOMMU
gart_iommu_aperture_disabled = 1;
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index c84ad03..af9f436 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -245,7 +245,7 @@ static unsigned long iommu_range_alloc(struct device *dev,
if (panic_on_overflow)
panic("Calgary: fix the allocator.\n");
else
- return bad_dma_address;
+ return DMA_ERROR_CODE;
}
}

@@ -261,11 +261,11 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
void *vaddr, unsigned int npages, int direction)
{
unsigned long entry;
- dma_addr_t ret = bad_dma_address;
+ dma_addr_t ret = DMA_ERROR_CODE;

entry = iommu_range_alloc(dev, tbl, npages);

- if (unlikely(entry == bad_dma_address))
+ if (unlikely(entry == DMA_ERROR_CODE))
goto error;

/* set the return dma address */
@@ -280,7 +280,7 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
error:
printk(KERN_WARNING "Calgary: failed to allocate %u pages in "
"iommu %p\n", npages, tbl);
- return bad_dma_address;
+ return DMA_ERROR_CODE;
}

static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
@@ -291,8 +291,8 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
unsigned long flags;

/* were we called with bad_dma_address? */
- badend = bad_dma_address + (EMERGENCY_PAGES * PAGE_SIZE);
- if (unlikely((dma_addr >= bad_dma_address) && (dma_addr < badend))) {
+ badend = DMA_ERROR_CODE + (EMERGENCY_PAGES * PAGE_SIZE);
+ if (unlikely((dma_addr >= DMA_ERROR_CODE) && (dma_addr < badend))) {
WARN(1, KERN_ERR "Calgary: driver tried unmapping bad DMA "
"address 0x%Lx\n", dma_addr);
return;
@@ -374,7 +374,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
npages = iommu_num_pages(vaddr, s->length, PAGE_SIZE);

entry = iommu_range_alloc(dev, tbl, npages);
- if (entry == bad_dma_address) {
+ if (entry == DMA_ERROR_CODE) {
/* makes sure unmap knows to stop */
s->dma_length = 0;
goto error;
@@ -392,7 +392,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
error:
calgary_unmap_sg(dev, sg, nelems, dir, NULL);
for_each_sg(sg, s, nelems, i) {
- sg->dma_address = bad_dma_address;
+ sg->dma_address = DMA_ERROR_CODE;
sg->dma_length = 0;
}
return 0;
@@ -447,7 +447,7 @@ static void* calgary_alloc_coherent(struct device *dev, size_t size,

/* set up tces to cover the allocated range */
mapping = iommu_alloc(dev, tbl, ret, npages, DMA_BIDIRECTIONAL);
- if (mapping == bad_dma_address)
+ if (mapping == DMA_ERROR_CODE)
goto free;
*dma_handle = mapping;
return ret;
@@ -728,7 +728,7 @@ static void __init calgary_reserve_regions(struct pci_dev *dev)
struct iommu_table *tbl = pci_iommu(dev->bus);

/* reserve EMERGENCY_PAGES from bad_dma_address and up */
- iommu_range_reserve(tbl, bad_dma_address, EMERGENCY_PAGES);
+ iommu_range_reserve(tbl, DMA_ERROR_CODE, EMERGENCY_PAGES);

/* avoid the BIOS/VGA first 640KB-1MB region */
/* for CalIOC2 - avoid the entire first MB */
@@ -1359,8 +1359,6 @@ static int __init calgary_iommu_init(void)
return ret;
}

- bad_dma_address = 0x0;
-
return 0;
}

diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index bf621b9..afcc58b 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -43,9 +43,6 @@ int iommu_detected __read_mostly = 0;
*/
int iommu_pass_through __read_mostly;

-dma_addr_t bad_dma_address __read_mostly = 0;
-EXPORT_SYMBOL(bad_dma_address);
-
/* Dummy device used for NULL arguments (normally ISA). */
struct device x86_dma_fallback_dev = {
.init_name = "fallback device",
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
index 875e382..22be12b 100644
--- a/arch/x86/kernel/pci-nommu.c
+++ b/arch/x86/kernel/pci-nommu.c
@@ -33,7 +33,7 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
dma_addr_t bus = page_to_phys(page) + offset;
WARN_ON(size == 0);
if (!check_addr("map_single", dev, bus, size))
- return bad_dma_address;
+ return DMA_ERROR_CODE;
flush_write_buffers();
return bus;
}
--
1.5.6.5

2009-11-15 12:23:19

by FUJITA Tomonori

[permalink] [raw]
Subject: [PATCH 3/3] Calgary: remove unnecessary DMA_ERROR_CODE usage

This cleans up iommu_alloc() a bit and removes unnecessary
DMA_ERROR_CODE usage.

Signed-off-by: FUJITA Tomonori <[email protected]>
---
arch/x86/kernel/pci-calgary_64.c | 15 ++++++---------
1 files changed, 6 insertions(+), 9 deletions(-)

diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index af9f436..849a099 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -261,12 +261,15 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
void *vaddr, unsigned int npages, int direction)
{
unsigned long entry;
- dma_addr_t ret = DMA_ERROR_CODE;
+ dma_addr_t ret;

entry = iommu_range_alloc(dev, tbl, npages);

- if (unlikely(entry == DMA_ERROR_CODE))
- goto error;
+ if (unlikely(entry == DMA_ERROR_CODE)) {
+ printk(KERN_WARNING "Calgary: failed to allocate %u pages in "
+ "iommu %p\n", npages, tbl);
+ return DMA_ERROR_CODE;
+ }

/* set the return dma address */
ret = (entry << PAGE_SHIFT) | ((unsigned long)vaddr & ~PAGE_MASK);
@@ -274,13 +277,7 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
/* put the TCEs in the HW table */
tce_build(tbl, entry, npages, (unsigned long)vaddr & PAGE_MASK,
direction);
-
return ret;
-
-error:
- printk(KERN_WARNING "Calgary: failed to allocate %u pages in "
- "iommu %p\n", npages, tbl);
- return DMA_ERROR_CODE;
}

static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
--
1.5.6.5

2009-11-17 08:27:27

by FUJITA Tomonori

[permalink] [raw]
Subject: [tip:core/iommu] x86: gart: Add own dma_mapping_error function

Commit-ID: 42109197eb7c01080eea6d9cd48ca23cbc3c566c
Gitweb: http://git.kernel.org/tip/42109197eb7c01080eea6d9cd48ca23cbc3c566c
Author: FUJITA Tomonori <[email protected]>
AuthorDate: Sun, 15 Nov 2009 21:19:52 +0900
Committer: Ingo Molnar <[email protected]>
CommitDate: Tue, 17 Nov 2009 07:53:20 +0100

x86: gart: Add own dma_mapping_error function

GART IOMMU is the only user of bad_dma_address variable.

This patch converts GART to use the newer mechanism, fill in
->mapping_error() in struct dma_map_ops, to make
dma_mapping_error() work in IOMMU specific way.

Signed-off-by: FUJITA Tomonori <[email protected]>
Acked-by: Jesse Barnes <[email protected]>
Cc: [email protected]
Cc: [email protected]
LKML-Reference: <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
---
arch/x86/kernel/pci-gart_64.c | 18 +++++++++++++-----
1 files changed, 13 insertions(+), 5 deletions(-)

diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index 919182e..61c4d1e 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -47,6 +47,8 @@ static unsigned long iommu_pages; /* .. and in pages */

static u32 *iommu_gatt_base; /* Remapping table */

+static dma_addr_t bad_dma_addr;
+
/*
* If this is disabled the IOMMU will use an optimized flushing strategy
* of only flushing when an mapping is reused. With it true the GART is
@@ -217,7 +219,7 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
if (panic_on_overflow)
panic("dma_map_area overflow %lu bytes\n", size);
iommu_full(dev, size, dir);
- return bad_dma_address;
+ return bad_dma_addr;
}

for (i = 0; i < npages; i++) {
@@ -303,7 +305,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,

if (nonforced_iommu(dev, addr, s->length)) {
addr = dma_map_area(dev, addr, s->length, dir, 0);
- if (addr == bad_dma_address) {
+ if (addr == bad_dma_addr) {
if (i > 0)
gart_unmap_sg(dev, sg, i, dir, NULL);
nents = 0;
@@ -456,7 +458,7 @@ error:

iommu_full(dev, pages << PAGE_SHIFT, dir);
for_each_sg(sg, s, nents, i)
- s->dma_address = bad_dma_address;
+ s->dma_address = bad_dma_addr;
return 0;
}

@@ -480,7 +482,7 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
DMA_BIDIRECTIONAL, align_mask);

flush_gart();
- if (paddr != bad_dma_address) {
+ if (paddr != bad_dma_addr) {
*dma_addr = paddr;
return page_address(page);
}
@@ -500,6 +502,11 @@ gart_free_coherent(struct device *dev, size_t size, void *vaddr,
free_pages((unsigned long)vaddr, get_order(size));
}

+static int gart_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return (dma_addr == bad_dma_addr);
+}
+
static int no_agp;

static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
@@ -687,6 +694,7 @@ static struct dma_map_ops gart_dma_ops = {
.unmap_page = gart_unmap_page,
.alloc_coherent = gart_alloc_coherent,
.free_coherent = gart_free_coherent,
+ .mapping_error = gart_mapping_error,
};

static void gart_iommu_shutdown(void)
@@ -785,7 +793,7 @@ int __init gart_iommu_init(void)

iommu_start = aper_size - iommu_size;
iommu_bus_base = info.aper_base + iommu_start;
- bad_dma_address = iommu_bus_base;
+ bad_dma_addr = iommu_bus_base;
iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);

/*

2009-11-17 08:26:12

by FUJITA Tomonori

[permalink] [raw]
Subject: [tip:core/iommu] x86: Kill bad_dma_address variable

Commit-ID: 8fd524b355daef0945692227e726fb444cebcd4f
Gitweb: http://git.kernel.org/tip/8fd524b355daef0945692227e726fb444cebcd4f
Author: FUJITA Tomonori <[email protected]>
AuthorDate: Sun, 15 Nov 2009 21:19:53 +0900
Committer: Ingo Molnar <[email protected]>
CommitDate: Tue, 17 Nov 2009 07:53:21 +0100

x86: Kill bad_dma_address variable

This kills bad_dma_address variable, the old mechanism to enable
IOMMU drivers to make dma_mapping_error() work in IOMMU's
specific way.

bad_dma_address variable was introduced to enable IOMMU drivers
to make dma_mapping_error() work in IOMMU's specific way.
However, it can't handle systems that use both swiotlb and HW
IOMMU. SO we introduced dma_map_ops->mapping_error to solve that
case.

Intel VT-d, GART, and swiotlb already use
dma_map_ops->mapping_error. Calgary, AMD IOMMU, and nommu use
zero for an error dma address. This adds DMA_ERROR_CODE and
converts them to use it (as SPARC and POWER does).

Signed-off-by: FUJITA Tomonori <[email protected]>
Acked-by: Jesse Barnes <[email protected]>
Cc: [email protected]
Cc: [email protected]
LKML-Reference: <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
---
arch/x86/include/asm/dma-mapping.h | 5 +++--
arch/x86/kernel/amd_iommu.c | 21 ++++++++++-----------
arch/x86/kernel/pci-calgary_64.c | 22 ++++++++++------------
arch/x86/kernel/pci-dma.c | 3 ---
arch/x86/kernel/pci-nommu.c | 2 +-
5 files changed, 24 insertions(+), 29 deletions(-)

diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index 6a25d5d..0f6c02f 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -20,7 +20,8 @@
# define ISA_DMA_BIT_MASK DMA_BIT_MASK(32)
#endif

-extern dma_addr_t bad_dma_address;
+#define DMA_ERROR_CODE 0
+
extern int iommu_merge;
extern struct device x86_dma_fallback_dev;
extern int panic_on_overflow;
@@ -48,7 +49,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
if (ops->mapping_error)
return ops->mapping_error(dev, dma_addr);

- return (dma_addr == bad_dma_address);
+ return (dma_addr == DMA_ERROR_CODE);
}

#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 66237fd..093bd52 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -928,7 +928,7 @@ static unsigned long dma_ops_alloc_addresses(struct device *dev,
}

if (unlikely(address == -1))
- address = bad_dma_address;
+ address = DMA_ERROR_CODE;

WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size);

@@ -1544,7 +1544,7 @@ static dma_addr_t dma_ops_domain_map(struct amd_iommu *iommu,

pte = dma_ops_get_pte(dom, address);
if (!pte)
- return bad_dma_address;
+ return DMA_ERROR_CODE;

__pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC;

@@ -1625,7 +1625,7 @@ static dma_addr_t __map_single(struct device *dev,
retry:
address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask,
dma_mask);
- if (unlikely(address == bad_dma_address)) {
+ if (unlikely(address == DMA_ERROR_CODE)) {
/*
* setting next_address here will let the address
* allocator only scan the new allocated range in the
@@ -1646,7 +1646,7 @@ retry:
start = address;
for (i = 0; i < pages; ++i) {
ret = dma_ops_domain_map(iommu, dma_dom, start, paddr, dir);
- if (ret == bad_dma_address)
+ if (ret == DMA_ERROR_CODE)
goto out_unmap;

paddr += PAGE_SIZE;
@@ -1674,7 +1674,7 @@ out_unmap:

dma_ops_free_addresses(dma_dom, address, pages);

- return bad_dma_address;
+ return DMA_ERROR_CODE;
}

/*
@@ -1690,7 +1690,7 @@ static void __unmap_single(struct amd_iommu *iommu,
dma_addr_t i, start;
unsigned int pages;

- if ((dma_addr == bad_dma_address) ||
+ if ((dma_addr == DMA_ERROR_CODE) ||
(dma_addr + size > dma_dom->aperture_size))
return;

@@ -1732,7 +1732,7 @@ static dma_addr_t map_page(struct device *dev, struct page *page,
INC_STATS_COUNTER(cnt_map_single);

if (!check_device(dev))
- return bad_dma_address;
+ return DMA_ERROR_CODE;

dma_mask = *dev->dma_mask;

@@ -1743,12 +1743,12 @@ static dma_addr_t map_page(struct device *dev, struct page *page,
return (dma_addr_t)paddr;

if (!dma_ops_domain(domain))
- return bad_dma_address;
+ return DMA_ERROR_CODE;

spin_lock_irqsave(&domain->lock, flags);
addr = __map_single(dev, iommu, domain->priv, paddr, size, dir, false,
dma_mask);
- if (addr == bad_dma_address)
+ if (addr == DMA_ERROR_CODE)
goto out;

iommu_completion_wait(iommu);
@@ -1957,7 +1957,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
*dma_addr = __map_single(dev, iommu, domain->priv, paddr,
size, DMA_BIDIRECTIONAL, true, dma_mask);

- if (*dma_addr == bad_dma_address) {
+ if (*dma_addr == DMA_ERROR_CODE) {
spin_unlock_irqrestore(&domain->lock, flags);
goto out_free;
}
@@ -2110,7 +2110,6 @@ int __init amd_iommu_init_dma_ops(void)
prealloc_protection_domains();

iommu_detected = 1;
- bad_dma_address = 0;
swiotlb = 0;
#ifdef CONFIG_GART_IOMMU
gart_iommu_aperture_disabled = 1;
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index c84ad03..af9f436 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -245,7 +245,7 @@ static unsigned long iommu_range_alloc(struct device *dev,
if (panic_on_overflow)
panic("Calgary: fix the allocator.\n");
else
- return bad_dma_address;
+ return DMA_ERROR_CODE;
}
}

@@ -261,11 +261,11 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
void *vaddr, unsigned int npages, int direction)
{
unsigned long entry;
- dma_addr_t ret = bad_dma_address;
+ dma_addr_t ret = DMA_ERROR_CODE;

entry = iommu_range_alloc(dev, tbl, npages);

- if (unlikely(entry == bad_dma_address))
+ if (unlikely(entry == DMA_ERROR_CODE))
goto error;

/* set the return dma address */
@@ -280,7 +280,7 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
error:
printk(KERN_WARNING "Calgary: failed to allocate %u pages in "
"iommu %p\n", npages, tbl);
- return bad_dma_address;
+ return DMA_ERROR_CODE;
}

static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
@@ -291,8 +291,8 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
unsigned long flags;

/* were we called with bad_dma_address? */
- badend = bad_dma_address + (EMERGENCY_PAGES * PAGE_SIZE);
- if (unlikely((dma_addr >= bad_dma_address) && (dma_addr < badend))) {
+ badend = DMA_ERROR_CODE + (EMERGENCY_PAGES * PAGE_SIZE);
+ if (unlikely((dma_addr >= DMA_ERROR_CODE) && (dma_addr < badend))) {
WARN(1, KERN_ERR "Calgary: driver tried unmapping bad DMA "
"address 0x%Lx\n", dma_addr);
return;
@@ -374,7 +374,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
npages = iommu_num_pages(vaddr, s->length, PAGE_SIZE);

entry = iommu_range_alloc(dev, tbl, npages);
- if (entry == bad_dma_address) {
+ if (entry == DMA_ERROR_CODE) {
/* makes sure unmap knows to stop */
s->dma_length = 0;
goto error;
@@ -392,7 +392,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
error:
calgary_unmap_sg(dev, sg, nelems, dir, NULL);
for_each_sg(sg, s, nelems, i) {
- sg->dma_address = bad_dma_address;
+ sg->dma_address = DMA_ERROR_CODE;
sg->dma_length = 0;
}
return 0;
@@ -447,7 +447,7 @@ static void* calgary_alloc_coherent(struct device *dev, size_t size,

/* set up tces to cover the allocated range */
mapping = iommu_alloc(dev, tbl, ret, npages, DMA_BIDIRECTIONAL);
- if (mapping == bad_dma_address)
+ if (mapping == DMA_ERROR_CODE)
goto free;
*dma_handle = mapping;
return ret;
@@ -728,7 +728,7 @@ static void __init calgary_reserve_regions(struct pci_dev *dev)
struct iommu_table *tbl = pci_iommu(dev->bus);

/* reserve EMERGENCY_PAGES from bad_dma_address and up */
- iommu_range_reserve(tbl, bad_dma_address, EMERGENCY_PAGES);
+ iommu_range_reserve(tbl, DMA_ERROR_CODE, EMERGENCY_PAGES);

/* avoid the BIOS/VGA first 640KB-1MB region */
/* for CalIOC2 - avoid the entire first MB */
@@ -1359,8 +1359,6 @@ static int __init calgary_iommu_init(void)
return ret;
}

- bad_dma_address = 0x0;
-
return 0;
}

diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index bf621b9..afcc58b 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -43,9 +43,6 @@ int iommu_detected __read_mostly = 0;
*/
int iommu_pass_through __read_mostly;

-dma_addr_t bad_dma_address __read_mostly = 0;
-EXPORT_SYMBOL(bad_dma_address);
-
/* Dummy device used for NULL arguments (normally ISA). */
struct device x86_dma_fallback_dev = {
.init_name = "fallback device",
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
index 875e382..22be12b 100644
--- a/arch/x86/kernel/pci-nommu.c
+++ b/arch/x86/kernel/pci-nommu.c
@@ -33,7 +33,7 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
dma_addr_t bus = page_to_phys(page) + offset;
WARN_ON(size == 0);
if (!check_addr("map_single", dev, bus, size))
- return bad_dma_address;
+ return DMA_ERROR_CODE;
flush_write_buffers();
return bus;
}

2009-11-17 08:26:20

by FUJITA Tomonori

[permalink] [raw]
Subject: [tip:core/iommu] x86: Calgary: Remove unnecessary DMA_ERROR_CODE usage

Commit-ID: 1f7564ca831a00b21bb493ef174c845b2ba9e64d
Gitweb: http://git.kernel.org/tip/1f7564ca831a00b21bb493ef174c845b2ba9e64d
Author: FUJITA Tomonori <[email protected]>
AuthorDate: Sun, 15 Nov 2009 21:19:54 +0900
Committer: Ingo Molnar <[email protected]>
CommitDate: Tue, 17 Nov 2009 07:53:21 +0100

x86: Calgary: Remove unnecessary DMA_ERROR_CODE usage

This cleans up iommu_alloc() a bit and removes unnecessary
DMA_ERROR_CODE usage.

Signed-off-by: FUJITA Tomonori <[email protected]>
Acked-by: Jesse Barnes <[email protected]>
Cc: [email protected]
Cc: [email protected]
LKML-Reference: <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
---
arch/x86/kernel/pci-calgary_64.c | 15 ++++++---------
1 files changed, 6 insertions(+), 9 deletions(-)

diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index af9f436..849a099 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -261,12 +261,15 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
void *vaddr, unsigned int npages, int direction)
{
unsigned long entry;
- dma_addr_t ret = DMA_ERROR_CODE;
+ dma_addr_t ret;

entry = iommu_range_alloc(dev, tbl, npages);

- if (unlikely(entry == DMA_ERROR_CODE))
- goto error;
+ if (unlikely(entry == DMA_ERROR_CODE)) {
+ printk(KERN_WARNING "Calgary: failed to allocate %u pages in "
+ "iommu %p\n", npages, tbl);
+ return DMA_ERROR_CODE;
+ }

/* set the return dma address */
ret = (entry << PAGE_SHIFT) | ((unsigned long)vaddr & ~PAGE_MASK);
@@ -274,13 +277,7 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
/* put the TCEs in the HW table */
tce_build(tbl, entry, npages, (unsigned long)vaddr & PAGE_MASK,
direction);
-
return ret;
-
-error:
- printk(KERN_WARNING "Calgary: failed to allocate %u pages in "
- "iommu %p\n", npages, tbl);
- return DMA_ERROR_CODE;
}

static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,

2009-11-17 08:26:01

by Ingo Molnar

[permalink] [raw]
Subject: [tip:core/iommu] x86: gart: Clean up the code a bit

Commit-ID: 123bf0e2eddcda36a33bdfc87aa1fb07229f07b5
Gitweb: http://git.kernel.org/tip/123bf0e2eddcda36a33bdfc87aa1fb07229f07b5
Author: Ingo Molnar <[email protected]>
AuthorDate: Sun, 15 Nov 2009 21:19:52 +0900
Committer: Ingo Molnar <[email protected]>
CommitDate: Tue, 17 Nov 2009 07:57:00 +0100

x86: gart: Clean up the code a bit

Clean up various small stylistic details in the GART code. No
functionality changed.

Cc: FUJITA Tomonori <[email protected]>
Cc: Jesse Barnes <[email protected]>
Cc: [email protected]
Cc: [email protected]
LKML-Reference: <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
---
arch/x86/kernel/pci-gart_64.c | 116 +++++++++++++++++++++-------------------
1 files changed, 61 insertions(+), 55 deletions(-)

diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index 61c4d1e..e6a0d40 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -95,7 +95,7 @@ static unsigned long alloc_iommu(struct device *dev, int size,

base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
PAGE_SIZE) >> PAGE_SHIFT;
- boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1,
+ boundary_size = ALIGN((u64)dma_get_seg_boundary(dev) + 1,
PAGE_SIZE) >> PAGE_SHIFT;

spin_lock_irqsave(&iommu_bitmap_lock, flags);
@@ -297,7 +297,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
int i;

#ifdef CONFIG_IOMMU_DEBUG
- printk(KERN_DEBUG "dma_map_sg overflow\n");
+ pr_debug("dma_map_sg overflow\n");
#endif

for_each_sg(sg, s, nents, i) {
@@ -392,12 +392,14 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
if (!dev)
dev = &x86_dma_fallback_dev;

- out = 0;
- start = 0;
- start_sg = sgmap = sg;
- seg_size = 0;
- max_seg_size = dma_get_max_seg_size(dev);
- ps = NULL; /* shut up gcc */
+ out = 0;
+ start = 0;
+ start_sg = sg;
+ sgmap = sg;
+ seg_size = 0;
+ max_seg_size = dma_get_max_seg_size(dev);
+ ps = NULL; /* shut up gcc */
+
for_each_sg(sg, s, nents, i) {
dma_addr_t addr = sg_phys(s);

@@ -420,11 +422,12 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
sgmap, pages, need) < 0)
goto error;
out++;
- seg_size = 0;
- sgmap = sg_next(sgmap);
- pages = 0;
- start = i;
- start_sg = s;
+
+ seg_size = 0;
+ sgmap = sg_next(sgmap);
+ pages = 0;
+ start = i;
+ start_sg = s;
}
}

@@ -523,7 +526,7 @@ static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
iommu_size -= round_up(a, PMD_PAGE_SIZE) - a;

if (iommu_size < 64*1024*1024) {
- printk(KERN_WARNING
+ pr_warning(
"PCI-DMA: Warning: Small IOMMU %luMB."
" Consider increasing the AGP aperture in BIOS\n",
iommu_size >> 20);
@@ -578,28 +581,32 @@ void set_up_gart_resume(u32 aper_order, u32 aper_alloc)
aperture_alloc = aper_alloc;
}

-static int gart_resume(struct sys_device *dev)
+static void gart_fixup_northbridges(struct sys_device *dev)
{
- printk(KERN_INFO "PCI-DMA: Resuming GART IOMMU\n");
+ int i;

- if (fix_up_north_bridges) {
- int i;
+ if (!fix_up_north_bridges)
+ return;

- printk(KERN_INFO "PCI-DMA: Restoring GART aperture settings\n");
+ pr_info("PCI-DMA: Restoring GART aperture settings\n");

- for (i = 0; i < num_k8_northbridges; i++) {
- struct pci_dev *dev = k8_northbridges[i];
+ for (i = 0; i < num_k8_northbridges; i++) {
+ struct pci_dev *dev = k8_northbridges[i];

- /*
- * Don't enable translations just yet. That is the next
- * step. Restore the pre-suspend aperture settings.
- */
- pci_write_config_dword(dev, AMD64_GARTAPERTURECTL,
- aperture_order << 1);
- pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE,
- aperture_alloc >> 25);
- }
+ /*
+ * Don't enable translations just yet. That is the next
+ * step. Restore the pre-suspend aperture settings.
+ */
+ pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, aperture_order << 1);
+ pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE, aperture_alloc >> 25);
}
+}
+
+static int gart_resume(struct sys_device *dev)
+{
+ pr_info("PCI-DMA: Resuming GART IOMMU\n");
+
+ gart_fixup_northbridges(dev);

enable_gart_translations();

@@ -612,15 +619,14 @@ static int gart_suspend(struct sys_device *dev, pm_message_t state)
}

static struct sysdev_class gart_sysdev_class = {
- .name = "gart",
- .suspend = gart_suspend,
- .resume = gart_resume,
+ .name = "gart",
+ .suspend = gart_suspend,
+ .resume = gart_resume,

};

static struct sys_device device_gart = {
- .id = 0,
- .cls = &gart_sysdev_class,
+ .cls = &gart_sysdev_class,
};

/*
@@ -635,7 +641,8 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
void *gatt;
int i, error;

- printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
+ pr_info("PCI-DMA: Disabling AGP.\n");
+
aper_size = aper_base = info->aper_size = 0;
dev = NULL;
for (i = 0; i < num_k8_northbridges; i++) {
@@ -653,6 +660,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
}
if (!aper_base)
goto nommu;
+
info->aper_base = aper_base;
info->aper_size = aper_size >> 20;

@@ -675,14 +683,14 @@ static __init int init_k8_gatt(struct agp_kern_info *info)

flush_gart();

- printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n",
+ pr_info("PCI-DMA: aperture base @ %x size %u KB\n",
aper_base, aper_size>>10);

return 0;

nommu:
/* Should not happen anymore */
- printk(KERN_WARNING "PCI-DMA: More than 4GB of RAM and no IOMMU\n"
+ pr_warning("PCI-DMA: More than 4GB of RAM and no IOMMU\n"
"falling back to iommu=soft.\n");
return -1;
}
@@ -744,23 +752,23 @@ int __init gart_iommu_init(void)
!gart_iommu_aperture ||
(no_agp && init_k8_gatt(&info) < 0)) {
if (max_pfn > MAX_DMA32_PFN) {
- printk(KERN_WARNING "More than 4GB of memory "
- "but GART IOMMU not available.\n");
- printk(KERN_WARNING "falling back to iommu=soft.\n");
+ pr_warning("More than 4GB of memory but GART IOMMU not available.\n");
+ pr_warning("falling back to iommu=soft.\n");
}
return 0;
}

/* need to map that range */
- aper_size = info.aper_size << 20;
- aper_base = info.aper_base;
- end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
+ aper_size = info.aper_size << 20;
+ aper_base = info.aper_base;
+ end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
+
if (end_pfn > max_low_pfn_mapped) {
start_pfn = (aper_base>>PAGE_SHIFT);
init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
}

- printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
+ pr_info("PCI-DMA: using GART IOMMU.\n");
iommu_size = check_iommu_size(info.aper_base, aper_size);
iommu_pages = iommu_size >> PAGE_SHIFT;

@@ -775,8 +783,7 @@ int __init gart_iommu_init(void)

ret = dma_debug_resize_entries(iommu_pages);
if (ret)
- printk(KERN_DEBUG
- "PCI-DMA: Cannot trace all the entries\n");
+ pr_debug("PCI-DMA: Cannot trace all the entries\n");
}
#endif

@@ -786,15 +793,14 @@ int __init gart_iommu_init(void)
*/
iommu_area_reserve(iommu_gart_bitmap, 0, EMERGENCY_PAGES);

- agp_memory_reserved = iommu_size;
- printk(KERN_INFO
- "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
+ pr_info("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
iommu_size >> 20);

- iommu_start = aper_size - iommu_size;
- iommu_bus_base = info.aper_base + iommu_start;
- bad_dma_addr = iommu_bus_base;
- iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
+ agp_memory_reserved = iommu_size;
+ iommu_start = aper_size - iommu_size;
+ iommu_bus_base = info.aper_base + iommu_start;
+ bad_dma_addr = iommu_bus_base;
+ iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);

/*
* Unmap the IOMMU part of the GART. The alias of the page is
@@ -816,7 +822,7 @@ int __init gart_iommu_init(void)
* the pages as Not-Present:
*/
wbinvd();
-
+
/*
* Now all caches are flushed and we can safely enable
* GART hardware. Doing it early leaves the possibility