x86_64 defines ARCH_HAS_SG_CHAIN. So if IOMMU implementations don't
support sg chaining, we will get data corruption.
Signed-off-by: FUJITA Tomonori <[email protected]>
---
drivers/pci/intel-iommu.c | 32 ++++++++++++++++----------------
1 files changed, 16 insertions(+), 16 deletions(-)
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index dab329f..4668995 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -1963,7 +1963,7 @@ static void intel_free_coherent(struct device *hwdev, size_t size,
}
#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset)
-static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sg,
+static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
int nelems, int dir)
{
int i;
@@ -1973,16 +1973,17 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sg,
struct iova *iova;
size_t size = 0;
void *addr;
+ struct scatterlist *sg;
if (pdev->sysdata == DUMMY_DEVICE_DOMAIN_INFO)
return;
domain = find_domain(pdev);
- iova = find_iova(&domain->iovad, IOVA_PFN(sg[0].dma_address));
+ iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
if (!iova)
return;
- for (i = 0; i < nelems; i++, sg++) {
+ for_each_sg(sglist, sg, nelems, i) {
addr = SG_ENT_VIRT_ADDRESS(sg);
size += aligned_size((u64)addr, sg->length);
}
@@ -2003,20 +2004,20 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sg,
}
static int intel_nontranslate_map_sg(struct device *hddev,
- struct scatterlist *sg, int nelems, int dir)
+ struct scatterlist *sglist, int nelems, int dir)
{
int i;
+ struct scatterlist *sg;
- for (i = 0; i < nelems; i++) {
- struct scatterlist *s = &sg[i];
- BUG_ON(!s->page);
- s->dma_address = virt_to_bus(SG_ENT_VIRT_ADDRESS(s));
- s->dma_length = s->length;
+ for_each_sg(sglist, sg, nelems, i) {
+ BUG_ON(!sg->page);
+ sg->dma_address = virt_to_bus(SG_ENT_VIRT_ADDRESS(sg));
+ sg->dma_length = sg->length;
}
return nelems;
}
-static int intel_map_sg(struct device *hwdev, struct scatterlist *sg,
+static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist,
int nelems, int dir)
{
void *addr;
@@ -2028,18 +2029,18 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sg,
size_t offset = 0;
struct iova *iova = NULL;
int ret;
- struct scatterlist *orig_sg = sg;
+ struct scatterlist *sg;
unsigned long start_addr;
BUG_ON(dir == DMA_NONE);
if (pdev->sysdata == DUMMY_DEVICE_DOMAIN_INFO)
- return intel_nontranslate_map_sg(hwdev, sg, nelems, dir);
+ return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
domain = get_valid_domain_for_dev(pdev);
if (!domain)
return 0;
- for (i = 0; i < nelems; i++, sg++) {
+ for_each_sg(sglist, sg, nelems, i) {
addr = SG_ENT_VIRT_ADDRESS(sg);
addr = (void *)virt_to_phys(addr);
size += aligned_size((u64)addr, sg->length);
@@ -2047,7 +2048,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sg,
iova = __intel_alloc_iova(hwdev, domain, size);
if (!iova) {
- orig_sg->dma_length = 0;
+ sglist->dma_length = 0;
return 0;
}
@@ -2063,8 +2064,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sg,
start_addr = iova->pfn_lo << PAGE_SHIFT_4K;
offset = 0;
- sg = orig_sg;
- for (i = 0; i < nelems; i++, sg++) {
+ for_each_sg(sglist, sg, nelems, i) {
addr = SG_ENT_VIRT_ADDRESS(sg);
addr = (void *)virt_to_phys(addr);
size = aligned_size((u64)addr, sg->length);
--
1.5.2.4
On Sat, Sep 29, 2007 at 05:16:38AM -0700, FUJITA Tomonori wrote:
>
> x86_64 defines ARCH_HAS_SG_CHAIN. So if IOMMU implementations don't
> support sg chaining, we will get data corruption.
> Signed-off-by: FUJITA Tomonori <[email protected]>
Acked-by: Anil S Keshavamurthy <[email protected]>
> ---
> drivers/pci/intel-iommu.c | 32 ++++++++++++++++----------------
> 1 files changed, 16 insertions(+), 16 deletions(-)
> diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
> index dab329f..4668995 100644
> --- a/drivers/pci/intel-iommu.c
> +++ b/drivers/pci/intel-iommu.c
> @@ -1963,7 +1963,7 @@ static void intel_free_coherent(struct device *hwdev,
> size_t size,
> }
> #define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) +
> (sg)->offset)
> -static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sg,
> +static void intel_unmap_sg(struct device *hwdev, struct scatterlist
> *sglist,
> int nelems, int dir)
> {
> int i;
> @@ -1973,16 +1973,17 @@ static void intel_unmap_sg(struct device *hwdev,
> struct scatterlist *sg,
> struct iova *iova;
> size_t size = 0;
> void *addr;
> + struct scatterlist *sg;
> if (pdev->sysdata == DUMMY_DEVICE_DOMAIN_INFO)
> return;
> domain = find_domain(pdev);
> - iova = find_iova(&domain->iovad, IOVA_PFN(sg[0].dma_address));
> + iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
> if (!iova)
> return;
> - for (i = 0; i < nelems; i++, sg++) {
> + for_each_sg(sglist, sg, nelems, i) {
> addr = SG_ENT_VIRT_ADDRESS(sg);
> size += aligned_size((u64)addr, sg->length);
> }
> @@ -2003,20 +2004,20 @@ static void intel_unmap_sg(struct device *hwdev,
> struct scatterlist *sg,
> }
> static int intel_nontranslate_map_sg(struct device *hddev,
> - struct scatterlist *sg, int nelems, int dir)
> + struct scatterlist *sglist, int nelems, int dir)
> {
> int i;
> + struct scatterlist *sg;
> - for (i = 0; i < nelems; i++) {
> - struct scatterlist *s = &sg[i];
> - BUG_ON(!s->page);
> - s->dma_address = virt_to_bus(SG_ENT_VIRT_ADDRESS(s));
> - s->dma_length = s->length;
> + for_each_sg(sglist, sg, nelems, i) {
> + BUG_ON(!sg->page);
> + sg->dma_address = virt_to_bus(SG_ENT_VIRT_ADDRESS(sg));
> + sg->dma_length = sg->length;
> }
> return nelems;
> }
> -static int intel_map_sg(struct device *hwdev, struct scatterlist *sg,
> +static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist,
> int nelems, int dir)
> {
> void *addr;
> @@ -2028,18 +2029,18 @@ static int intel_map_sg(struct device *hwdev, struct
> scatterlist *sg,
> size_t offset = 0;
> struct iova *iova = NULL;
> int ret;
> - struct scatterlist *orig_sg = sg;
> + struct scatterlist *sg;
> unsigned long start_addr;
> BUG_ON(dir == DMA_NONE);
> if (pdev->sysdata == DUMMY_DEVICE_DOMAIN_INFO)
> - return intel_nontranslate_map_sg(hwdev, sg, nelems, dir);
> + return intel_nontranslate_map_sg(hwdev, sglist, nelems,
> dir);
> domain = get_valid_domain_for_dev(pdev);
> if (!domain)
> return 0;
> - for (i = 0; i < nelems; i++, sg++) {
> + for_each_sg(sglist, sg, nelems, i) {
> addr = SG_ENT_VIRT_ADDRESS(sg);
> addr = (void *)virt_to_phys(addr);
> size += aligned_size((u64)addr, sg->length);
> @@ -2047,7 +2048,7 @@ static int intel_map_sg(struct device *hwdev, struct
> scatterlist *sg,
> iova = __intel_alloc_iova(hwdev, domain, size);
> if (!iova) {
> - orig_sg->dma_length = 0;
> + sglist->dma_length = 0;
> return 0;
> }
> @@ -2063,8 +2064,7 @@ static int intel_map_sg(struct device *hwdev, struct
> scatterlist *sg,
> start_addr = iova->pfn_lo << PAGE_SHIFT_4K;
> offset = 0;
> - sg = orig_sg;
> - for (i = 0; i < nelems; i++, sg++) {
> + for_each_sg(sglist, sg, nelems, i) {
> addr = SG_ENT_VIRT_ADDRESS(sg);
> addr = (void *)virt_to_phys(addr);
> size = aligned_size((u64)addr, sg->length);
> --
> 1.5.2.4
On Mon, 1 Oct 2007 09:12:56 -0700
"Keshavamurthy, Anil S" <[email protected]> wrote:
> On Sat, Sep 29, 2007 at 05:16:38AM -0700, FUJITA Tomonori wrote:
> >
> > x86_64 defines ARCH_HAS_SG_CHAIN. So if IOMMU implementations don't
> > support sg chaining, we will get data corruption.
> > Signed-off-by: FUJITA Tomonori <[email protected]>
>
> Acked-by: Anil S Keshavamurthy <[email protected]>
>
Am I correct in believing that this patch is needed only when the
chaining patches which are presently in git-block are combined with the
intel-iommu work which is presently in -mm?
On Wed, Oct 03, 2007 at 02:12:03PM -0700, Andrew Morton wrote:
> On Mon, 1 Oct 2007 09:12:56 -0700
> "Keshavamurthy, Anil S" <[email protected]> wrote:
>
> > On Sat, Sep 29, 2007 at 05:16:38AM -0700, FUJITA Tomonori wrote:
> > >
> > > x86_64 defines ARCH_HAS_SG_CHAIN. So if IOMMU implementations don't
> > > support sg chaining, we will get data corruption.
> > > Signed-off-by: FUJITA Tomonori <[email protected]>
> >
> > Acked-by: Anil S Keshavamurthy <[email protected]>
> >
>
> Am I correct in believing that this patch is needed only when the
> chaining patches which are presently in git-block are combined with the
> intel-iommu work which is presently in -mm?
Yes, that is correct.
-Anil