2014-10-01 08:46:35

by Liviu Dudau

[permalink] [raw]
Subject: Re: [RFC 2/4] PCI: generic: Add support for ARM64 and MSI(x)

On Tue, Sep 30, 2014 at 09:01:14PM +0100, Arnd Bergmann wrote:
> On Tuesday 30 September 2014 20:54:41 Arnd Bergmann wrote:
> > On Tuesday 30 September 2014 18:48:21 Liviu Dudau wrote:
> > > > > > > These are the functions I found that refer to pci_sys_data on arm32:
> > > > > > >
> > > > > > > pcibios_add_bus
> > > > > > > pcibios_remove_bus
> > >
> > > These are only needed if you want to do per HB processing of the bus
> > >
> > > > > > > pcibios_align_resource
> > >
> > > mvebu is the only user of this function.
> > >
> > > > > > > pci_mmap_page_range
> > >
> > > This is only needed when mapping a PCI resource to userspace. Is that your case here?
> > >
> > > > > > > pci_domain_nr
> > > > > > > pci_proc_domain
> > >
> > > We have equivalent functionality in the generic patches for those.
> > >
> >
> > We clearly don't need those functions for the new drivers, but that's not
> > the point. The problem is that when you build a kernel that has both
> > a traditional host bridge driver and a new one in it, you always get those
> > functions and they get called from the PCI core, with incorrect arguments.
>
> FWIW, the last time we discussed this, I think I had suggested that the
> functions that are currently architecture specific and have a generic
> __weak fallback could become function pointers in a per-host structure
> passed to pci_scan_root_bus, either a new structure or an extended
> struct pci_ops. Something along these lines:

Agree to the general idea. But have a look why host drivers need the add_bus ops:
to add MSI information into the bus!! If we take care of the MSI in the generic
code there is less of a need for this function at all.

Best regards,
Liviu

>
> diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h
> index 7fc42784becb..3da32fc631d0 100644
> --- a/arch/arm/include/asm/mach/pci.h
> +++ b/arch/arm/include/asm/mach/pci.h
> @@ -36,7 +36,6 @@ struct hw_pci {
> resource_size_t start,
> resource_size_t size,
> resource_size_t align);
> - void (*add_bus)(struct pci_bus *bus);
> void (*remove_bus)(struct pci_bus *bus);
> };
>
> @@ -65,7 +64,6 @@ struct pci_sys_data {
> resource_size_t start,
> resource_size_t size,
> resource_size_t align);
> - void (*add_bus)(struct pci_bus *bus);
> void (*remove_bus)(struct pci_bus *bus);
> void *private_data; /* platform controller private data */
> };
> diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
> index 17a26c17f7f5..3cbcf8dc41e4 100644
> --- a/arch/arm/kernel/bios32.c
> +++ b/arch/arm/kernel/bios32.c
> @@ -360,13 +360,6 @@ void pcibios_fixup_bus(struct pci_bus *bus)
> }
> EXPORT_SYMBOL(pcibios_fixup_bus);
>
> -void pcibios_add_bus(struct pci_bus *bus)
> -{
> - struct pci_sys_data *sys = bus->sysdata;
> - if (sys->add_bus)
> - sys->add_bus(bus);
> -}
> -
> void pcibios_remove_bus(struct pci_bus *bus)
> {
> struct pci_sys_data *sys = bus->sysdata;
> @@ -475,7 +468,6 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw,
> sys->swizzle = hw->swizzle;
> sys->map_irq = hw->map_irq;
> sys->align_resource = hw->align_resource;
> - sys->add_bus = hw->add_bus;
> sys->remove_bus = hw->remove_bus;
> INIT_LIST_HEAD(&sys->resources);
>
> diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
> index b1315e197ffb..c9a0ee0429e8 100644
> --- a/drivers/pci/host/pci-mvebu.c
> +++ b/drivers/pci/host/pci-mvebu.c
> @@ -716,6 +716,7 @@ static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
> static struct pci_ops mvebu_pcie_ops = {
> .read = mvebu_pcie_rd_conf,
> .write = mvebu_pcie_wr_conf,
> + .add_bus = mvebu_pcie_add_bus,
> };
>
> static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys)
> @@ -823,7 +824,6 @@ static void mvebu_pcie_enable(struct mvebu_pcie *pcie)
> hw.map_irq = of_irq_parse_and_map_pci;
> hw.ops = &mvebu_pcie_ops;
> hw.align_resource = mvebu_pcie_align_resource;
> - hw.add_bus = mvebu_pcie_add_bus;
>
> pci_common_init(&hw);
> }
> diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
> index a63a47a70846..be6d56358320 100644
> --- a/drivers/pci/probe.c
> +++ b/drivers/pci/probe.c
> @@ -1885,6 +1885,8 @@ int __weak pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
>
> void __weak pcibios_add_bus(struct pci_bus *bus)
> {
> + if (bus->ops && bus->ops->add_bus)
> + bus->ops->add_bus(bus);
> }
>
> void __weak pcibios_remove_bus(struct pci_bus *bus)
>
> Arnd
>
>

--
====================
| I would like to |
| fix the world, |
| but they're not |
| giving me the |
\ source code! /
---------------
¯\_(ツ)_/¯


2014-10-01 09:39:44

by Arnd Bergmann

[permalink] [raw]
Subject: Re: [RFC 2/4] PCI: generic: Add support for ARM64 and MSI(x)

On Wednesday 01 October 2014 09:46:26 Liviu Dudau wrote:
> On Tue, Sep 30, 2014 at 09:01:14PM +0100, Arnd Bergmann wrote:
> > On Tuesday 30 September 2014 20:54:41 Arnd Bergmann wrote:
> > > On Tuesday 30 September 2014 18:48:21 Liviu Dudau wrote:
> > > > > > > > These are the functions I found that refer to pci_sys_data on arm32:
> > > > > > > >
> > > > > > > > pcibios_add_bus
> > > > > > > > pcibios_remove_bus
> > > >
> > > > These are only needed if you want to do per HB processing of the bus
> > > >
> > > > > > > > pcibios_align_resource
> > > >
> > > > mvebu is the only user of this function.
> > > >
> > > > > > > > pci_mmap_page_range
> > > >
> > > > This is only needed when mapping a PCI resource to userspace. Is that your case here?
> > > >
> > > > > > > > pci_domain_nr
> > > > > > > > pci_proc_domain
> > > >
> > > > We have equivalent functionality in the generic patches for those.
> > > >
> > >
> > > We clearly don't need those functions for the new drivers, but that's not
> > > the point. The problem is that when you build a kernel that has both
> > > a traditional host bridge driver and a new one in it, you always get those
> > > functions and they get called from the PCI core, with incorrect arguments.
> >
> > FWIW, the last time we discussed this, I think I had suggested that the
> > functions that are currently architecture specific and have a generic
> > __weak fallback could become function pointers in a per-host structure
> > passed to pci_scan_root_bus, either a new structure or an extended
> > struct pci_ops. Something along these lines:
>
> Agree to the general idea. But have a look why host drivers need the add_bus ops:
> to add MSI information into the bus!! If we take care of the MSI in the generic
> code there is less of a need for this function at all.


Right, if we can eliminate the need for some or all of the functions above,
we don't have to abstract them any more.

pcibios_remove_bus can just go away entirely, we don't have a single driver
on ARM that implements it. pcibios_add_bus as you say is just used for MSI
at the moment, and we could get rid of it by just moving the msi_chip
reference from pci_bus into pci_host_bridge.

The arm32 implementations of pci_domain_nr/pci_proc_domain can probably be
removed if we change the arm32 pcibios_init_hw function to call the new
interfaces that set the domain number.

pci_mmap_page_range could either get generalized some more in an attempt
to have a __weak default implementation that works on ARM, or it could
be changed to lose the dependency on pci_sys_data instead. In either
case, the change would involve using the generic pci_host_bridge_window
list.

pcibios_align_resource should probably be per host, and we could move
that into a pointer in pci_host_bridge, something like this:

diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index b7c3a5ea1fca..d9cb6c916d54 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -200,11 +200,15 @@ static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
int resno, resource_size_t size, resource_size_t align)
{
+ struct pci_host_bridge *host = find_pci_host_bridge(bus);
+ resource_size_t (*alignf)(void *, const struct resource *,
+ resource_size_t, resource_size_t),
struct resource *res = dev->resource + resno;
resource_size_t min;
int ret;

min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM;
+ alignf = host->align_resource ?: pcibios_align_resource;

/*
* First, try exact prefetching match. Even if a 64-bit
@@ -215,7 +219,7 @@ static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
*/
ret = pci_bus_alloc_resource(bus, res, size, align, min,
IORESOURCE_PREFETCH | IORESOURCE_MEM_64,
- pcibios_align_resource, dev);
+ alignf, dev);
if (ret == 0)
return 0;

@@ -227,7 +231,7 @@ static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
(IORESOURCE_PREFETCH | IORESOURCE_MEM_64)) {
ret = pci_bus_alloc_resource(bus, res, size, align, min,
IORESOURCE_PREFETCH,
- pcibios_align_resource, dev);
+ alignf, dev);
if (ret == 0)
return 0;
}
@@ -240,7 +244,7 @@ static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
*/
if (res->flags & (IORESOURCE_PREFETCH | IORESOURCE_MEM_64))
ret = pci_bus_alloc_resource(bus, res, size, align, min, 0,
- pcibios_align_resource, dev);
+ alignf, dev);

return ret;
}


If we decide constantly calling find_pci_host_bridge() is too expensive, we can
be more clever about it.

Arnd

2014-10-07 12:07:12

by Lorenzo Pieralisi

[permalink] [raw]
Subject: Re: [RFC 2/4] PCI: generic: Add support for ARM64 and MSI(x)

On Wed, Oct 01, 2014 at 10:38:45AM +0100, Arnd Bergmann wrote:

[...]

> pci_mmap_page_range could either get generalized some more in an attempt
> to have a __weak default implementation that works on ARM, or it could
> be changed to lose the dependency on pci_sys_data instead. In either
> case, the change would involve using the generic pci_host_bridge_window
> list.

On ARM pci_mmap_page_range requires pci_sys_data to retrieve its
mem_offset parameter. I had a look, and I do not understand *why*
it is required in that function, so I am asking. That function
is basically used to map PCI resources to userspace, IIUC, through
/proc or /sysfs file mappings. As far as I understand those mappings
expect VMA pgoff to be the CPU address when files representing resources
are mmapped from /proc and 0 when mmapped from /sys (I mean from
userspace, then VMA pgoff should be updated by the kernel to map the
resource).

Question is: why pci_mmap_page_range() should apply an additional
shift to the VMA pgoff based on pci_sys_data.mem_offset, which represents
the offset from cpu->bus offset. I do not understand that. PowerPC
does not seem to apply that fix-up (in PowerPC __pci_mmap_make_offset there
is commented out code which prevents the pci_mem_offset shift to be
applied). I think it all boils down to what the userspace interface is
expecting when the memory areas are mmapped, if anyone has comments on
this that is appreciated.

Thanks,
Lorenzo

2014-10-07 13:53:13

by Arnd Bergmann

[permalink] [raw]
Subject: Re: [RFC 2/4] PCI: generic: Add support for ARM64 and MSI(x)

On Tuesday 07 October 2014 13:06:59 Lorenzo Pieralisi wrote:
> On Wed, Oct 01, 2014 at 10:38:45AM +0100, Arnd Bergmann wrote:
>
> [...]
>
> > pci_mmap_page_range could either get generalized some more in an attempt
> > to have a __weak default implementation that works on ARM, or it could
> > be changed to lose the dependency on pci_sys_data instead. In either
> > case, the change would involve using the generic pci_host_bridge_window
> > list.
>
> On ARM pci_mmap_page_range requires pci_sys_data to retrieve its
> mem_offset parameter. I had a look, and I do not understand *why*
> it is required in that function, so I am asking. That function
> is basically used to map PCI resources to userspace, IIUC, through
> /proc or /sysfs file mappings. As far as I understand those mappings
> expect VMA pgoff to be the CPU address when files representing resources
> are mmapped from /proc and 0 when mmapped from /sys (I mean from
> userspace, then VMA pgoff should be updated by the kernel to map the
> resource).

Applying the mem_offset is certainly the more intuitive way, since
that lets you read the PCI BAR values from a device and access the
device with the appropriate offsets.

> Question is: why pci_mmap_page_range() should apply an additional
> shift to the VMA pgoff based on pci_sys_data.mem_offset, which represents
> the offset from cpu->bus offset. I do not understand that. PowerPC
> does not seem to apply that fix-up (in PowerPC __pci_mmap_make_offset there
> is commented out code which prevents the pci_mem_offset shift to be
> applied). I think it all boils down to what the userspace interface is
> expecting when the memory areas are mmapped, if anyone has comments on
> this that is appreciated.

The important part is certainly that whatever transformation is done
by pci_resource_to_user() gets undone by __pci_mmap_make_offset().

In case of PowerPC and Microblaze, the mem_offset handling is commented
out in both, to work around X11 trying to use the same values on
/dev/mem. However, they do have the respective fixup for io_offset.

sparc applies the offset in both places for both io_offset and mem_offset.
xtensa applies only io_offset in __pci_mmap_make_offset but neither
in pci_resource_to_user. This probably works because the mem_offset is
always zero there.
mips applies a different fixup (for 36-bit addressing), but not the
mem_offset.

Every other architecture applies no offset here, neither in __pci_mmap_make_offset/pci_mmap_page_range nor in pci_resource_to_user

The only hint I could find for how the ARM version came to be is
from the historic kernel tree git log for linux-2.5.42, which added
the current code as

2002/10/13 11:05:47+01:00 rmk
[ARM] Update pcibios_enable_device, supply pci_mmap_page_range()
Update pcibios_enable_device to only enable requested resources,
mainly for IDE. Supply a pci_mmap_page_range() function to allow
user space to mmap PCI regions.

At that point, only two platforms had a nonzero mem_offset:
footbridge/dc21285 and integrator/pci_v3. Both were using VGA,
and presumably used this to make X work. (rmk might remember
details).

The code at the time matched what powerpc and sparc did, but then
both implemented pci_resource_to_user() in order for libpciaccess
to work correctly (bcea1db16b for sparc, 463ce0e103f for powerpc),
and later powerpc changed it again to not apply the offset in
pci_resource_to_user or pci_mmap_page_range in 396a1a5832ae.

Arnd

2014-10-07 14:47:57

by Lorenzo Pieralisi

[permalink] [raw]
Subject: Re: [RFC 2/4] PCI: generic: Add support for ARM64 and MSI(x)

On Tue, Oct 07, 2014 at 02:52:27PM +0100, Arnd Bergmann wrote:
> On Tuesday 07 October 2014 13:06:59 Lorenzo Pieralisi wrote:
> > On Wed, Oct 01, 2014 at 10:38:45AM +0100, Arnd Bergmann wrote:
> >
> > [...]
> >
> > > pci_mmap_page_range could either get generalized some more in an attempt
> > > to have a __weak default implementation that works on ARM, or it could
> > > be changed to lose the dependency on pci_sys_data instead. In either
> > > case, the change would involve using the generic pci_host_bridge_window
> > > list.
> >
> > On ARM pci_mmap_page_range requires pci_sys_data to retrieve its
> > mem_offset parameter. I had a look, and I do not understand *why*
> > it is required in that function, so I am asking. That function
> > is basically used to map PCI resources to userspace, IIUC, through
> > /proc or /sysfs file mappings. As far as I understand those mappings
> > expect VMA pgoff to be the CPU address when files representing resources
> > are mmapped from /proc and 0 when mmapped from /sys (I mean from
> > userspace, then VMA pgoff should be updated by the kernel to map the
> > resource).
>
> Applying the mem_offset is certainly the more intuitive way, since
> that lets you read the PCI BAR values from a device and access the
> device with the appropriate offsets.

Ok, but I am referring to this snippet (drivers/pci/pci-sysfs.c):

/* pci_mmap_page_range() expects the same kind of entry as coming
* from /proc/bus/pci/ which is a "user visible" value. If this is
* different from the resource itself, arch will do necessary fixup.
*/
pci_resource_to_user(pdev, i, res, &start, &end);

--> Here start represents a CPU physical address, if pci_resource_to_user()
does not fix it up, correct ?

vma->vm_pgoff += start >> PAGE_SHIFT;

[...]

return pci_mmap_page_range(...);

pci_mmap_page_range() applies (mem_offset >> PAGE_SHIFT) to pgoff in the
ARM implemention.

Is not there a mismatch here on platforms where mem_offset != 0 ?

> > Question is: why pci_mmap_page_range() should apply an additional
> > shift to the VMA pgoff based on pci_sys_data.mem_offset, which represents
> > the offset from cpu->bus offset. I do not understand that. PowerPC
> > does not seem to apply that fix-up (in PowerPC __pci_mmap_make_offset there
> > is commented out code which prevents the pci_mem_offset shift to be
> > applied). I think it all boils down to what the userspace interface is
> > expecting when the memory areas are mmapped, if anyone has comments on
> > this that is appreciated.
>
> The important part is certainly that whatever transformation is done
> by pci_resource_to_user() gets undone by __pci_mmap_make_offset().

Exactly, it does not seem to be the case above, that's why I asked.

> In case of PowerPC and Microblaze, the mem_offset handling is commented
> out in both, to work around X11 trying to use the same values on
> /dev/mem. However, they do have the respective fixup for io_offset.
>
> sparc applies the offset in both places for both io_offset and mem_offset.
> xtensa applies only io_offset in __pci_mmap_make_offset but neither
> in pci_resource_to_user. This probably works because the mem_offset is
> always zero there.
> mips applies a different fixup (for 36-bit addressing), but not the
> mem_offset.
>
> Every other architecture applies no offset here, neither in __pci_mmap_make_offset/pci_mmap_page_range nor in pci_resource_to_user
>
> The only hint I could find for how the ARM version came to be is
> from the historic kernel tree git log for linux-2.5.42, which added
> the current code as
>
> 2002/10/13 11:05:47+01:00 rmk
> [ARM] Update pcibios_enable_device, supply pci_mmap_page_range()
> Update pcibios_enable_device to only enable requested resources,
> mainly for IDE. Supply a pci_mmap_page_range() function to allow
> user space to mmap PCI regions.
>
> At that point, only two platforms had a nonzero mem_offset:
> footbridge/dc21285 and integrator/pci_v3. Both were using VGA,
> and presumably used this to make X work. (rmk might remember
> details).

I think that, as I mentioned, it boils down to what the userspace
interface (proc/sys and they seem to differ) is supposed to be passed
from userspace processes upon mmap.

> The code at the time matched what powerpc and sparc did, but then
> both implemented pci_resource_to_user() in order for libpciaccess
> to work correctly (bcea1db16b for sparc, 463ce0e103f for powerpc),
> and later powerpc changed it again to not apply the offset in
> pci_resource_to_user or pci_mmap_page_range in 396a1a5832ae.

I will keep investigating, thank you for your help, any further comments
appreciated.

Lorenzo

2014-10-07 21:39:59

by Arnd Bergmann

[permalink] [raw]
Subject: Re: [RFC 2/4] PCI: generic: Add support for ARM64 and MSI(x)

On Tuesday 07 October 2014 15:47:50 Lorenzo Pieralisi wrote:
> On Tue, Oct 07, 2014 at 02:52:27PM +0100, Arnd Bergmann wrote:
> > On Tuesday 07 October 2014 13:06:59 Lorenzo Pieralisi wrote:
> > > On Wed, Oct 01, 2014 at 10:38:45AM +0100, Arnd Bergmann wrote:
> > >
> > > [...]
> > >
> > > > pci_mmap_page_range could either get generalized some more in an attempt
> > > > to have a __weak default implementation that works on ARM, or it could
> > > > be changed to lose the dependency on pci_sys_data instead. In either
> > > > case, the change would involve using the generic pci_host_bridge_window
> > > > list.
> > >
> > > On ARM pci_mmap_page_range requires pci_sys_data to retrieve its
> > > mem_offset parameter. I had a look, and I do not understand *why*
> > > it is required in that function, so I am asking. That function
> > > is basically used to map PCI resources to userspace, IIUC, through
> > > /proc or /sysfs file mappings. As far as I understand those mappings
> > > expect VMA pgoff to be the CPU address when files representing resources
> > > are mmapped from /proc and 0 when mmapped from /sys (I mean from
> > > userspace, then VMA pgoff should be updated by the kernel to map the
> > > resource).
> >
> > Applying the mem_offset is certainly the more intuitive way, since
> > that lets you read the PCI BAR values from a device and access the
> > device with the appropriate offsets.
>
> Ok, but I am referring to this snippet (drivers/pci/pci-sysfs.c):
>
> /* pci_mmap_page_range() expects the same kind of entry as coming
> * from /proc/bus/pci/ which is a "user visible" value. If this is
> * different from the resource itself, arch will do necessary fixup.
> */
> pci_resource_to_user(pdev, i, res, &start, &end);
>
> --> Here start represents a CPU physical address, if pci_resource_to_user()
> does not fix it up, correct ?
>
> vma->vm_pgoff += start >> PAGE_SHIFT;
>
> [...]
>
> return pci_mmap_page_range(...);
>
> pci_mmap_page_range() applies (mem_offset >> PAGE_SHIFT) to pgoff in the
> ARM implemention.
>
> Is not there a mismatch here on platforms where mem_offset != 0 ?

Yes, I think that's right: ARM never gained its own pci_resource_to_user()
implementation, presumably because nobody ran into this problem and
debugged it all the way.

Arnd

2014-10-08 10:19:52

by Lorenzo Pieralisi

[permalink] [raw]
Subject: Re: [RFC 2/4] PCI: generic: Add support for ARM64 and MSI(x)

On Tue, Oct 07, 2014 at 10:39:47PM +0100, Arnd Bergmann wrote:
> On Tuesday 07 October 2014 15:47:50 Lorenzo Pieralisi wrote:
> > On Tue, Oct 07, 2014 at 02:52:27PM +0100, Arnd Bergmann wrote:
> > > On Tuesday 07 October 2014 13:06:59 Lorenzo Pieralisi wrote:
> > > > On Wed, Oct 01, 2014 at 10:38:45AM +0100, Arnd Bergmann wrote:
> > > >
> > > > [...]
> > > >
> > > > > pci_mmap_page_range could either get generalized some more in an attempt
> > > > > to have a __weak default implementation that works on ARM, or it could
> > > > > be changed to lose the dependency on pci_sys_data instead. In either
> > > > > case, the change would involve using the generic pci_host_bridge_window
> > > > > list.
> > > >
> > > > On ARM pci_mmap_page_range requires pci_sys_data to retrieve its
> > > > mem_offset parameter. I had a look, and I do not understand *why*
> > > > it is required in that function, so I am asking. That function
> > > > is basically used to map PCI resources to userspace, IIUC, through
> > > > /proc or /sysfs file mappings. As far as I understand those mappings
> > > > expect VMA pgoff to be the CPU address when files representing resources
> > > > are mmapped from /proc and 0 when mmapped from /sys (I mean from
> > > > userspace, then VMA pgoff should be updated by the kernel to map the
> > > > resource).
> > >
> > > Applying the mem_offset is certainly the more intuitive way, since
> > > that lets you read the PCI BAR values from a device and access the
> > > device with the appropriate offsets.
> >
> > Ok, but I am referring to this snippet (drivers/pci/pci-sysfs.c):
> >
> > /* pci_mmap_page_range() expects the same kind of entry as coming
> > * from /proc/bus/pci/ which is a "user visible" value. If this is
> > * different from the resource itself, arch will do necessary fixup.
> > */
> > pci_resource_to_user(pdev, i, res, &start, &end);
> >
> > --> Here start represents a CPU physical address, if pci_resource_to_user()
> > does not fix it up, correct ?
> >
> > vma->vm_pgoff += start >> PAGE_SHIFT;
> >
> > [...]
> >
> > return pci_mmap_page_range(...);
> >
> > pci_mmap_page_range() applies (mem_offset >> PAGE_SHIFT) to pgoff in the
> > ARM implemention.
> >
> > Is not there a mismatch here on platforms where mem_offset != 0 ?
>
> Yes, I think that's right: ARM never gained its own pci_resource_to_user()
> implementation, presumably because nobody ran into this problem and
> debugged it all the way.

Ok. So, unless I am missing something, on platform with mem_offset != 0
/proc and /sys interfaces for remapping PCI resources can't work (IIUC
the proc interface expects the user to pass in the resource address as
seen from /proc/bus/pci/devices - which are not BAR values. Even if the
user passed the BAR value to mmap, pci_mmap_fits() in proc_bus_pci_mmap()
would fail since it compares the pgoff to resource values, which are not
BAR values).

As things stand I think we can safely remove the mem_offset (and
pci_sys_data dependency) from pci_mmap_page_range(). I do not think
we can break userspace in any way, basically because it can't work at
the moment, again, happy to be corrected if I am wrong, please shout.

Or we can add mem_offset to the host bridge (after all architectures like
PowerPC and Microblaze have a pci_mem_offset variable in their host
controllers), but still, this removes pci_sys_data dependency but does
not solve the pci_mmap_page_range() issue.

Lorenzo

2014-10-08 14:47:59

by Arnd Bergmann

[permalink] [raw]
Subject: Re: [RFC 2/4] PCI: generic: Add support for ARM64 and MSI(x)

On Wednesday 08 October 2014 11:19:43 Lorenzo Pieralisi wrote:
>
> Ok. So, unless I am missing something, on platform with mem_offset != 0
> /proc and /sys interfaces for remapping PCI resources can't work (IIUC
> the proc interface expects the user to pass in the resource address as
> seen from /proc/bus/pci/devices - which are not BAR values. Even if the
> user passed the BAR value to mmap, pci_mmap_fits() in proc_bus_pci_mmap()
> would fail since it compares the pgoff to resource values, which are not
> BAR values).

I think you are right for the sysfs interface, that one can't possibly
work because of the incorrect address computation.

For the /procfs interface, I think it can work as long as the offsets
used there are coming from the config space dump in /proc/bus/pci/*
rather than from the /sys/bus/pci/devices/*/resource file.

> As things stand I think we can safely remove the mem_offset (and
> pci_sys_data dependency) from pci_mmap_page_range(). I do not think
> we can break userspace in any way, basically because it can't work at
> the moment, again, happy to be corrected if I am wrong, please shout.

Please look at the procfs interface again. That one can be defined
in two ways (either like sparc and arm, or like powerpc and microblaze)
but either one should be able to work with user space that expects
that interface and break with user space that expects the other one.

> Or we can add mem_offset to the host bridge (after all architectures like
> PowerPC and Microblaze have a pci_mem_offset variable in their host
> controllers), but still, this removes pci_sys_data dependency but does
> not solve the pci_mmap_page_range() issue.

The host bridge already stores the mem_offset in terms of the resource
list, so we could readily use that, except that it might break the
powerpc hack if that is still in use.

Arnd

2014-10-09 09:04:58

by Lorenzo Pieralisi

[permalink] [raw]
Subject: Re: [RFC 2/4] PCI: generic: Add support for ARM64 and MSI(x)

On Wed, Oct 08, 2014 at 03:47:43PM +0100, Arnd Bergmann wrote:
> On Wednesday 08 October 2014 11:19:43 Lorenzo Pieralisi wrote:
> >
> > Ok. So, unless I am missing something, on platform with mem_offset != 0
> > /proc and /sys interfaces for remapping PCI resources can't work (IIUC
> > the proc interface expects the user to pass in the resource address as
> > seen from /proc/bus/pci/devices - which are not BAR values. Even if the
> > user passed the BAR value to mmap, pci_mmap_fits() in proc_bus_pci_mmap()
> > would fail since it compares the pgoff to resource values, which are not
> > BAR values).
>
> I think you are right for the sysfs interface, that one can't possibly
> work because of the incorrect address computation.
>
> For the /procfs interface, I think it can work as long as the offsets
> used there are coming from the config space dump in /proc/bus/pci/*
> rather than from the /sys/bus/pci/devices/*/resource file.
>
> > As things stand I think we can safely remove the mem_offset (and
> > pci_sys_data dependency) from pci_mmap_page_range(). I do not think
> > we can break userspace in any way, basically because it can't work at
> > the moment, again, happy to be corrected if I am wrong, please shout.
>
> Please look at the procfs interface again. That one can be defined
> in two ways (either like sparc and arm, or like powerpc and microblaze)
> but either one should be able to work with user space that expects
> that interface and break with user space that expects the other one.

I agree as long as pci_mmap_page_range() is concerned, but I am
referring to the pci_mmap_fits() implementation here:

start = vma->vm_pgoff;
size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1;
pci_start = (mmap_api == PCI_MMAP_PROCFS) ?
pci_resource_start(pdev, resno) >> PAGE_SHIFT : 0;
if (start >= pci_start && start < pci_start + size &&
start + nr <= pci_start + size)
return 1;
return 0;

pci_mmap_fits(), when mapping from procfs always check the offset against
resources, which are fixed-up addresses. If we passed the values dumped
from the device config space (as pci_mmap_page_range() expects on arm) IMHO
the check above would fail (always referring to platforms where
mem_offset != 0).

Last changes where introduced by commit 8c05cd08a, whose commit log adds
to my confusion:

"[...] I think what we want here is for pci_start to be 0 when mmap_api ==
PCI_MMAP_PROCFS.[...]"

But that's not what the code does.

I will try to grab an integrator board to give it a go.

> > Or we can add mem_offset to the host bridge (after all architectures like
> > PowerPC and Microblaze have a pci_mem_offset variable in their host
> > controllers), but still, this removes pci_sys_data dependency but does
> > not solve the pci_mmap_page_range() issue.
>
> The host bridge already stores the mem_offset in terms of the resource
> list, so we could readily use that, except that it might break the
> powerpc hack if that is still in use.

Well, yes, I am not saying it can't be done by using the resources list,
I am just trying to understand if that's really useful.

Thank you !
Lorenzo

2014-10-09 10:52:05

by Arnd Bergmann

[permalink] [raw]
Subject: Re: [RFC 2/4] PCI: generic: Add support for ARM64 and MSI(x)

On Thursday 09 October 2014 10:04:20 Lorenzo Pieralisi wrote:
> On Wed, Oct 08, 2014 at 03:47:43PM +0100, Arnd Bergmann wrote:
> > On Wednesday 08 October 2014 11:19:43 Lorenzo Pieralisi wrote:

> > Please look at the procfs interface again. That one can be defined
> > in two ways (either like sparc and arm, or like powerpc and microblaze)
> > but either one should be able to work with user space that expects
> > that interface and break with user space that expects the other one.
>
> I agree as long as pci_mmap_page_range() is concerned, but I am
> referring to the pci_mmap_fits() implementation here:
>
> start = vma->vm_pgoff;
> size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1;
> pci_start = (mmap_api == PCI_MMAP_PROCFS) ?
> pci_resource_start(pdev, resno) >> PAGE_SHIFT : 0;
> if (start >= pci_start && start < pci_start + size &&
> start + nr <= pci_start + size)
> return 1;
> return 0;
>
> pci_mmap_fits(), when mapping from procfs always check the offset against
> resources, which are fixed-up addresses. If we passed the values dumped
> from the device config space (as pci_mmap_page_range() expects on arm) IMHO
> the check above would fail (always referring to platforms where
> mem_offset != 0).

Ah, I see it now too.

> Last changes where introduced by commit 8c05cd08a, whose commit log adds
> to my confusion:
>
> "[...] I think what we want here is for pci_start to be 0 when mmap_api ==
> PCI_MMAP_PROCFS.[...]"
>
> But that's not what the code does.

My best guess is that this is a typo and that Darrick meant PCI_MMAP_SYSFS
in the changelog, which is the same thing that the code does. It's also
the sensible thing to do.

This probably means that the procfs interface is now also broken on
sparc.

> I will try to grab an integrator board to give it a go.

Ok, good idea.

> > > Or we can add mem_offset to the host bridge (after all architectures like
> > > PowerPC and Microblaze have a pci_mem_offset variable in their host
> > > controllers), but still, this removes pci_sys_data dependency but does
> > > not solve the pci_mmap_page_range() issue.
> >
> > The host bridge already stores the mem_offset in terms of the resource
> > list, so we could readily use that, except that it might break the
> > powerpc hack if that is still in use.
>
> Well, yes, I am not saying it can't be done by using the resources list,
> I am just trying to understand if that's really useful.

The PCI core tries to be ready for PCI host bridges that have multiple
discontiguous memory spaces with different offsets, although I don't know
of anybody has that. However if we decide to implement a generic
pci_mmap_page_range that tries to take the offset into account, we should
use the resource list in the host bridge because it can tell us the correct
offsets.

However, given what you found, the procfs interface being broken since
2010 on both architectures (arm32 and sparc) that try to honor the offset,
we should probably go back to your previous suggestion of removing
the offset handling, which would make it possible to use the procfs
interface and the sysfs interface on all architectures.

Would you be able to prepare a patch that does this and circulate that
with the sparc, powerpc and microblaze maintainers as well as Darrick
and Martin who were involved with the pci_mmap_fits change?

Arnd

2014-10-10 13:58:13

by Lorenzo Pieralisi

[permalink] [raw]
Subject: Re: [RFC 2/4] PCI: generic: Add support for ARM64 and MSI(x)

On Thu, Oct 09, 2014 at 11:51:43AM +0100, Arnd Bergmann wrote:

[...]

> > Last changes where introduced by commit 8c05cd08a, whose commit log adds
> > to my confusion:
> >
> > "[...] I think what we want here is for pci_start to be 0 when mmap_api ==
> > PCI_MMAP_PROCFS.[...]"
> >
> > But that's not what the code does.
>
> My best guess is that this is a typo and that Darrick meant PCI_MMAP_SYSFS
> in the changelog, which is the same thing that the code does. It's also
> the sensible thing to do.
>
> This probably means that the procfs interface is now also broken on
> sparc.
>
> > I will try to grab an integrator board to give it a go.
>
> Ok, good idea.

Grabbed, tested it, my theory was correct, I can't map PCI resources
to userspace. Actually, if I pass resource offset as a fixed-up address, mmap
succeeds through proc, but it does not mmap the resource, it maps
the resource + mem_offset that happens to be RAM :D for the PCI slot I am
using.

I am testing on an oldish (3.16) kernel since I am having trouble with
mainline PCI and my network adapter on integrator, but I do not see why this
is a problem, this bug has been there forever.

By removing mem_offset from pci_mmap_page_range() everything works fine,
both proc and sys mappings are ok.

> > > > Or we can add mem_offset to the host bridge (after all architectures like
> > > > PowerPC and Microblaze have a pci_mem_offset variable in their host
> > > > controllers), but still, this removes pci_sys_data dependency but does
> > > > not solve the pci_mmap_page_range() issue.
> > >
> > > The host bridge already stores the mem_offset in terms of the resource
> > > list, so we could readily use that, except that it might break the
> > > powerpc hack if that is still in use.
> >
> > Well, yes, I am not saying it can't be done by using the resources list,
> > I am just trying to understand if that's really useful.
>
> The PCI core tries to be ready for PCI host bridges that have multiple
> discontiguous memory spaces with different offsets, although I don't know
> of anybody has that. However if we decide to implement a generic
> pci_mmap_page_range that tries to take the offset into account, we should
> use the resource list in the host bridge because it can tell us the correct
> offsets.
>
> However, given what you found, the procfs interface being broken since
> 2010 on both architectures (arm32 and sparc) that try to honor the offset,
> we should probably go back to your previous suggestion of removing
> the offset handling, which would make it possible to use the procfs
> interface and the sysfs interface on all architectures.
>
> Would you be able to prepare a patch that does this and circulate that
> with the sparc, powerpc and microblaze maintainers as well as Darrick
> and Martin who were involved with the pci_mmap_fits change?

Yes, but let's step back a second. I think that the proc interface
should expect an offset as passed to the user in /proc/bus/pci/devices,
and there the resource is exposed through pci_resource_to_user().

Hence, the pci_mmap_fits() should check the offset against the
resource filtered through pci_resource_to_user(), job done, patch
is trivial, and does what pci_resource_to_user() was meant for IMHO.

Then we have to decide what to do with arm32 code:

1) we remove mem_offset from pci_mmap_page_range() (and rely on default
pci_resource_to_user())

or

2) we provide pci_resource_to_user() for arm32 which does the CPU->bus
conversion for us (and leave mem_offset as-is in pci_mmap_range())

Thoughts ?

Thanks,
Lorenzo

2014-10-10 18:31:41

by Arnd Bergmann

[permalink] [raw]
Subject: Re: [RFC 2/4] PCI: generic: Add support for ARM64 and MSI(x)

On Friday 10 October 2014 14:58:04 Lorenzo Pieralisi wrote:
> On Thu, Oct 09, 2014 at 11:51:43AM +0100, Arnd Bergmann wrote:
>
> > > Last changes where introduced by commit 8c05cd08a, whose commit log adds
> > > to my confusion:
> > >
> > > "[...] I think what we want here is for pci_start to be 0 when mmap_api ==
> > > PCI_MMAP_PROCFS.[...]"
> > >
> > > But that's not what the code does.
> >
> > My best guess is that this is a typo and that Darrick meant PCI_MMAP_SYSFS
> > in the changelog, which is the same thing that the code does. It's also
> > the sensible thing to do.
> >
> > This probably means that the procfs interface is now also broken on
> > sparc.
> >
> > > I will try to grab an integrator board to give it a go.
> >
> > Ok, good idea.
>
> Grabbed, tested it, my theory was correct, I can't map PCI resources
> to userspace. Actually, if I pass resource offset as a fixed-up address, mmap
> succeeds through proc, but it does not mmap the resource, it maps
> the resource + mem_offset that happens to be RAM :D for the PCI slot I am
> using.
>
> I am testing on an oldish (3.16) kernel since I am having trouble with
> mainline PCI and my network adapter on integrator, but I do not see why this
> is a problem, this bug has been there forever.

I would guess that almost the only users of the sysfs and procfs
interfaces are Xorg drivers, you certainly don't need it to get
a network adapter working.

> By removing mem_offset from pci_mmap_page_range() everything works fine,
> both proc and sys mappings are ok.

Ok, thanks for confirming!

> > However, given what you found, the procfs interface being broken since
> > 2010 on both architectures (arm32 and sparc) that try to honor the offset,
> > we should probably go back to your previous suggestion of removing
> > the offset handling, which would make it possible to use the procfs
> > interface and the sysfs interface on all architectures.
> >
> > Would you be able to prepare a patch that does this and circulate that
> > with the sparc, powerpc and microblaze maintainers as well as Darrick
> > and Martin who were involved with the pci_mmap_fits change?
>
> Yes, but let's step back a second. I think that the proc interface
> should expect an offset as passed to the user in /proc/bus/pci/devices,
> and there the resource is exposed through pci_resource_to_user().
>
> Hence, the pci_mmap_fits() should check the offset against the
> resource filtered through pci_resource_to_user(), job done, patch
> is trivial, and does what pci_resource_to_user() was meant for IMHO.

My point was that there is no reason why sparc and powerpc should
do this differently. At the moment they do and sparc is broken
as you proved. We can either fix sparc to restore the old behavior
by adding pci_resource_to_user to pci_mmap_fits, or by making it
do what powerpc does, essentially removing the memory space handling
from pci_resource_to_user.

Whatever we do for sparc is probably what we need to do on ARM as well,
except that ARM has been broken for a longer time than sparc.

> Then we have to decide what to do with arm32 code:
>
> 1) we remove mem_offset from pci_mmap_page_range() (and rely on default
> pci_resource_to_user())
>
> or
>
> 2) we provide pci_resource_to_user() for arm32 which does the CPU->bus
> conversion for us (and leave mem_offset as-is in pci_mmap_range())

I'd vote for 1) to get it in line with the only working architectures
that currently use a nonzero offset, but Russell needs to have the final
word on this, and I still think we have to involve the sparc and powerpc
maintainers as well, hoping to find a common solution for everybody.

Making a user space interface behave differently based on the CPU
architecture is a bad idea.

Arnd

2014-10-13 09:36:38

by Lorenzo Pieralisi

[permalink] [raw]
Subject: Re: [RFC 2/4] PCI: generic: Add support for ARM64 and MSI(x)

On Fri, Oct 10, 2014 at 07:31:26PM +0100, Arnd Bergmann wrote:
> On Friday 10 October 2014 14:58:04 Lorenzo Pieralisi wrote:
> > On Thu, Oct 09, 2014 at 11:51:43AM +0100, Arnd Bergmann wrote:
> >
> > > > Last changes where introduced by commit 8c05cd08a, whose commit log adds
> > > > to my confusion:
> > > >
> > > > "[...] I think what we want here is for pci_start to be 0 when mmap_api ==
> > > > PCI_MMAP_PROCFS.[...]"
> > > >
> > > > But that's not what the code does.
> > >
> > > My best guess is that this is a typo and that Darrick meant PCI_MMAP_SYSFS
> > > in the changelog, which is the same thing that the code does. It's also
> > > the sensible thing to do.
> > >
> > > This probably means that the procfs interface is now also broken on
> > > sparc.
> > >
> > > > I will try to grab an integrator board to give it a go.
> > >
> > > Ok, good idea.
> >
> > Grabbed, tested it, my theory was correct, I can't map PCI resources
> > to userspace. Actually, if I pass resource offset as a fixed-up address, mmap
> > succeeds through proc, but it does not mmap the resource, it maps
> > the resource + mem_offset that happens to be RAM :D for the PCI slot I am
> > using.
> >
> > I am testing on an oldish (3.16) kernel since I am having trouble with
> > mainline PCI and my network adapter on integrator, but I do not see why this
> > is a problem, this bug has been there forever.
>
> I would guess that almost the only users of the sysfs and procfs
> interfaces are Xorg drivers, you certainly don't need it to get
> a network adapter working.

The issue I am facing is not related to the PCI mmap implementation,
that's certainly broken but does not stop me from using the board.

[...]

> > By removing mem_offset from pci_mmap_page_range() everything works fine,
> > both proc and sys mappings are ok.
>
> Ok, thanks for confirming!
>
> > > However, given what you found, the procfs interface being broken since
> > > 2010 on both architectures (arm32 and sparc) that try to honor the offset,
> > > we should probably go back to your previous suggestion of removing
> > > the offset handling, which would make it possible to use the procfs
> > > interface and the sysfs interface on all architectures.
> > >
> > > Would you be able to prepare a patch that does this and circulate that
> > > with the sparc, powerpc and microblaze maintainers as well as Darrick
> > > and Martin who were involved with the pci_mmap_fits change?
> >
> > Yes, but let's step back a second. I think that the proc interface
> > should expect an offset as passed to the user in /proc/bus/pci/devices,
> > and there the resource is exposed through pci_resource_to_user().
> >
> > Hence, the pci_mmap_fits() should check the offset against the
> > resource filtered through pci_resource_to_user(), job done, patch
> > is trivial, and does what pci_resource_to_user() was meant for IMHO.
>
> My point was that there is no reason why sparc and powerpc should
> do this differently. At the moment they do and sparc is broken
> as you proved. We can either fix sparc to restore the old behavior
> by adding pci_resource_to_user to pci_mmap_fits, or by making it
> do what powerpc does, essentially removing the memory space handling
> from pci_resource_to_user.
>
> Whatever we do for sparc is probably what we need to do on ARM as well,
> except that ARM has been broken for a longer time than sparc.
>
> > Then we have to decide what to do with arm32 code:
> >
> > 1) we remove mem_offset from pci_mmap_page_range() (and rely on default
> > pci_resource_to_user())
> >
> > or
> >
> > 2) we provide pci_resource_to_user() for arm32 which does the CPU->bus
> > conversion for us (and leave mem_offset as-is in pci_mmap_range())
>
> I'd vote for 1) to get it in line with the only working architectures
> that currently use a nonzero offset, but Russell needs to have the final
> word on this, and I still think we have to involve the sparc and powerpc
> maintainers as well, hoping to find a common solution for everybody.
>
> Making a user space interface behave differently based on the CPU
> architecture is a bad idea.

I agree with you, I will put together a patchset and copy all people
who should be involved.

Lorenzo

2014-10-22 15:59:28

by Lorenzo Pieralisi

[permalink] [raw]
Subject: Re: [RFC 2/4] PCI: generic: Add support for ARM64 and MSI(x)

On Wed, Oct 01, 2014 at 10:38:45AM +0100, Arnd Bergmann wrote:

[...]

> The arm32 implementations of pci_domain_nr/pci_proc_domain can probably be
> removed if we change the arm32 pcibios_init_hw function to call the new
> interfaces that set the domain number.

I wished, but it is a bit more complicated than I thought unfortunately,
mostly because some drivers, eg cns3xxx set the domain numbers
statically in pci_sys_data and this sets a chain of dependency that is
not easy to untangle. I think cns3xxx is the only legacy driver that "uses"
the domain number (in pci_sys_data) in a way that clashes with the
generic domain_nr implementation, I need to give it more thought.

> pci_mmap_page_range could either get generalized some more in an attempt
> to have a __weak default implementation that works on ARM, or it could
> be changed to lose the dependency on pci_sys_data instead. In either
> case, the change would involve using the generic pci_host_bridge_window
> list.

I need to repost my series, but I *think* we can consider the dependency on
pci_sys_data gone in pci_mmap_page_range().

> pcibios_align_resource should probably be per host, and we could move
> that into a pointer in pci_host_bridge, something like this:

Yes, and that's likely to be true for add_bus too. I wonder what's the
best course of action. Putting together all the bits and pieces required
to remove PCI bios dependency from this patch can take a while, I wonder
whether we should aim for merging this driver (rebased on top of my port to the
new parse ranges API) with the ARM/ARM64 ifdeffery and clean it up later
or aim for the whole thing at once, I am just worried it can take us a while.

Lorenzo

>
> diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
> index b7c3a5ea1fca..d9cb6c916d54 100644
> --- a/drivers/pci/setup-res.c
> +++ b/drivers/pci/setup-res.c
> @@ -200,11 +200,15 @@ static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
> static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
> int resno, resource_size_t size, resource_size_t align)
> {
> + struct pci_host_bridge *host = find_pci_host_bridge(bus);
> + resource_size_t (*alignf)(void *, const struct resource *,
> + resource_size_t, resource_size_t),
> struct resource *res = dev->resource + resno;
> resource_size_t min;
> int ret;
>
> min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM;
> + alignf = host->align_resource ?: pcibios_align_resource;
>
> /*
> * First, try exact prefetching match. Even if a 64-bit
> @@ -215,7 +219,7 @@ static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
> */
> ret = pci_bus_alloc_resource(bus, res, size, align, min,
> IORESOURCE_PREFETCH | IORESOURCE_MEM_64,
> - pcibios_align_resource, dev);
> + alignf, dev);
> if (ret == 0)
> return 0;
>
> @@ -227,7 +231,7 @@ static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
> (IORESOURCE_PREFETCH | IORESOURCE_MEM_64)) {
> ret = pci_bus_alloc_resource(bus, res, size, align, min,
> IORESOURCE_PREFETCH,
> - pcibios_align_resource, dev);
> + alignf, dev);
> if (ret == 0)
> return 0;
> }
> @@ -240,7 +244,7 @@ static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
> */
> if (res->flags & (IORESOURCE_PREFETCH | IORESOURCE_MEM_64))
> ret = pci_bus_alloc_resource(bus, res, size, align, min, 0,
> - pcibios_align_resource, dev);
> + alignf, dev);
>
> return ret;
> }
>
>
> If we decide constantly calling find_pci_host_bridge() is too expensive, we can
> be more clever about it.
>
> Arnd
>
>

2014-10-22 16:49:38

by Bjorn Helgaas

[permalink] [raw]
Subject: Re: [RFC 2/4] PCI: generic: Add support for ARM64 and MSI(x)

On Wed, Oct 22, 2014 at 9:59 AM, Lorenzo Pieralisi
<[email protected]> wrote:

> ... I wonder what's the
> best course of action. Putting together all the bits and pieces required
> to remove PCI bios dependency from this patch can take a while, I wonder
> whether we should aim for merging this driver (rebased on top of my port to the
> new parse ranges API) with the ARM/ARM64 ifdeffery and clean it up later
> or aim for the whole thing at once, I am just worried it can take us a while.

I haven't looked at your patches, but "the whole thing at once" is
never *my* goal. A gradual cleanup is just fine with me.

Bjorn

2014-10-22 20:53:13

by Arnd Bergmann

[permalink] [raw]
Subject: Re: [RFC 2/4] PCI: generic: Add support for ARM64 and MSI(x)

On Wednesday 22 October 2014 16:59:14 Lorenzo Pieralisi wrote:
> On Wed, Oct 01, 2014 at 10:38:45AM +0100, Arnd Bergmann wrote:
>
> [...]
>
> > The arm32 implementations of pci_domain_nr/pci_proc_domain can probably be
> > removed if we change the arm32 pcibios_init_hw function to call the new
> > interfaces that set the domain number.
>
> I wished, but it is a bit more complicated than I thought unfortunately,
> mostly because some drivers, eg cns3xxx set the domain numbers
> statically in pci_sys_data and this sets a chain of dependency that is
> not easy to untangle. I think cns3xxx is the only legacy driver that "uses"
> the domain number (in pci_sys_data) in a way that clashes with the
> generic domain_nr implementation, I need to give it more thought.

Just had a look at that driver, shouldn't be too hard to change, see below.

Signed-off-by: Arnd Bergmann <[email protected]>

diff --git a/arch/arm/mach-cns3xxx/pcie.c b/arch/arm/mach-cns3xxx/pcie.c
index 45d6bd09e6ef..aa4b9d7c52fd 100644
--- a/arch/arm/mach-cns3xxx/pcie.c
+++ b/arch/arm/mach-cns3xxx/pcie.c
@@ -30,18 +30,15 @@ struct cns3xxx_pcie {
unsigned int irqs[2];
struct resource res_io;
struct resource res_mem;
- struct hw_pci hw_pci;
-
+ int port;
bool linked;
};

-static struct cns3xxx_pcie cns3xxx_pcie[]; /* forward decl. */
-
static struct cns3xxx_pcie *sysdata_to_cnspci(void *sysdata)
{
struct pci_sys_data *root = sysdata;

- return &cns3xxx_pcie[root->domain];
+ return root->private_data;
}

static struct cns3xxx_pcie *pdev_to_cnspci(const struct pci_dev *dev)
@@ -192,13 +189,7 @@ static struct cns3xxx_pcie cns3xxx_pcie[] = {
.flags = IORESOURCE_MEM,
},
.irqs = { IRQ_CNS3XXX_PCIE0_RC, IRQ_CNS3XXX_PCIE0_DEVICE, },
- .hw_pci = {
- .domain = 0,
- .nr_controllers = 1,
- .ops = &cns3xxx_pcie_ops,
- .setup = cns3xxx_pci_setup,
- .map_irq = cns3xxx_pcie_map_irq,
- },
+ .port = 0,
},
[1] = {
.host_regs = (void __iomem *)CNS3XXX_PCIE1_HOST_BASE_VIRT,
@@ -217,19 +208,13 @@ static struct cns3xxx_pcie cns3xxx_pcie[] = {
.flags = IORESOURCE_MEM,
},
.irqs = { IRQ_CNS3XXX_PCIE1_RC, IRQ_CNS3XXX_PCIE1_DEVICE, },
- .hw_pci = {
- .domain = 1,
- .nr_controllers = 1,
- .ops = &cns3xxx_pcie_ops,
- .setup = cns3xxx_pci_setup,
- .map_irq = cns3xxx_pcie_map_irq,
- },
+ .port = 1,
},
};

static void __init cns3xxx_pcie_check_link(struct cns3xxx_pcie *cnspci)
{
- int port = cnspci->hw_pci.domain;
+ int port = cnspci->port;
u32 reg;
unsigned long time;

@@ -260,9 +245,10 @@ static void __init cns3xxx_pcie_check_link(struct cns3xxx_pcie *cnspci)

static void __init cns3xxx_pcie_hw_init(struct cns3xxx_pcie *cnspci)
{
- int port = cnspci->hw_pci.domain;
+ int port = cnspci->port;
struct pci_sys_data sd = {
.domain = port,
+ .private_data = cnspci,
};
struct pci_bus bus = {
.number = 0,
@@ -323,6 +309,14 @@ static int cns3xxx_pcie_abort_handler(unsigned long addr, unsigned int fsr,
void __init cns3xxx_pcie_init_late(void)
{
int i;
+ void *private_data;
+ struct hw_pci hw_pci = {
+ .nr_controllers = 1,
+ .ops = &cns3xxx_pcie_ops,
+ .setup = cns3xxx_pci_setup,
+ .map_irq = cns3xxx_pcie_map_irq,
+ .private_data = &private_data,
+ };

pcibios_min_io = 0;
pcibios_min_mem = 0;
@@ -335,7 +329,9 @@ void __init cns3xxx_pcie_init_late(void)
cns3xxx_pwr_soft_rst(0x1 << PM_SOFT_RST_REG_OFFST_PCIE(i));
cns3xxx_pcie_check_link(&cns3xxx_pcie[i]);
cns3xxx_pcie_hw_init(&cns3xxx_pcie[i]);
- pci_common_init(&cns3xxx_pcie[i].hw_pci);
+ hw_pci->domain = i;
+ private_data = &cns3xxx_pcie[i];
+ pci_common_init(&hw_pci);
}

pci_assign_unassigned_resources();

2014-10-23 09:13:17

by Liviu Dudau

[permalink] [raw]
Subject: Re: [RFC 2/4] PCI: generic: Add support for ARM64 and MSI(x)

On Wed, Oct 22, 2014 at 09:52:19PM +0100, Arnd Bergmann wrote:
> On Wednesday 22 October 2014 16:59:14 Lorenzo Pieralisi wrote:
> > On Wed, Oct 01, 2014 at 10:38:45AM +0100, Arnd Bergmann wrote:
> >
> > [...]
> >
> > > The arm32 implementations of pci_domain_nr/pci_proc_domain can probably be
> > > removed if we change the arm32 pcibios_init_hw function to call the new
> > > interfaces that set the domain number.
> >
> > I wished, but it is a bit more complicated than I thought unfortunately,
> > mostly because some drivers, eg cns3xxx set the domain numbers
> > statically in pci_sys_data and this sets a chain of dependency that is
> > not easy to untangle. I think cns3xxx is the only legacy driver that "uses"
> > the domain number (in pci_sys_data) in a way that clashes with the
> > generic domain_nr implementation, I need to give it more thought.
>
> Just had a look at that driver, shouldn't be too hard to change, see below.

I like this!

One thing though ...

>
> Signed-off-by: Arnd Bergmann <[email protected]>
>
> diff --git a/arch/arm/mach-cns3xxx/pcie.c b/arch/arm/mach-cns3xxx/pcie.c
> index 45d6bd09e6ef..aa4b9d7c52fd 100644
> --- a/arch/arm/mach-cns3xxx/pcie.c
> +++ b/arch/arm/mach-cns3xxx/pcie.c
> @@ -30,18 +30,15 @@ struct cns3xxx_pcie {
> unsigned int irqs[2];
> struct resource res_io;
> struct resource res_mem;
> - struct hw_pci hw_pci;
> -
> + int port;
> bool linked;
> };
>
> -static struct cns3xxx_pcie cns3xxx_pcie[]; /* forward decl. */
> -
> static struct cns3xxx_pcie *sysdata_to_cnspci(void *sysdata)
> {
> struct pci_sys_data *root = sysdata;
>
> - return &cns3xxx_pcie[root->domain];
> + return root->private_data;
> }
>
> static struct cns3xxx_pcie *pdev_to_cnspci(const struct pci_dev *dev)
> @@ -192,13 +189,7 @@ static struct cns3xxx_pcie cns3xxx_pcie[] = {
> .flags = IORESOURCE_MEM,
> },
> .irqs = { IRQ_CNS3XXX_PCIE0_RC, IRQ_CNS3XXX_PCIE0_DEVICE, },
> - .hw_pci = {
> - .domain = 0,
> - .nr_controllers = 1,
> - .ops = &cns3xxx_pcie_ops,
> - .setup = cns3xxx_pci_setup,
> - .map_irq = cns3xxx_pcie_map_irq,
> - },
> + .port = 0,
> },
> [1] = {
> .host_regs = (void __iomem *)CNS3XXX_PCIE1_HOST_BASE_VIRT,
> @@ -217,19 +208,13 @@ static struct cns3xxx_pcie cns3xxx_pcie[] = {
> .flags = IORESOURCE_MEM,
> },
> .irqs = { IRQ_CNS3XXX_PCIE1_RC, IRQ_CNS3XXX_PCIE1_DEVICE, },
> - .hw_pci = {
> - .domain = 1,
> - .nr_controllers = 1,
> - .ops = &cns3xxx_pcie_ops,
> - .setup = cns3xxx_pci_setup,
> - .map_irq = cns3xxx_pcie_map_irq,
> - },
> + .port = 1,
> },
> };
>
> static void __init cns3xxx_pcie_check_link(struct cns3xxx_pcie *cnspci)
> {
> - int port = cnspci->hw_pci.domain;
> + int port = cnspci->port;
> u32 reg;
> unsigned long time;
>
> @@ -260,9 +245,10 @@ static void __init cns3xxx_pcie_check_link(struct cns3xxx_pcie *cnspci)
>
> static void __init cns3xxx_pcie_hw_init(struct cns3xxx_pcie *cnspci)
> {
> - int port = cnspci->hw_pci.domain;
> + int port = cnspci->port;
> struct pci_sys_data sd = {
> .domain = port,
> + .private_data = cnspci,
> };
> struct pci_bus bus = {
> .number = 0,
> @@ -323,6 +309,14 @@ static int cns3xxx_pcie_abort_handler(unsigned long addr, unsigned int fsr,
> void __init cns3xxx_pcie_init_late(void)
> {
> int i;
> + void *private_data;
> + struct hw_pci hw_pci = {
> + .nr_controllers = 1,
> + .ops = &cns3xxx_pcie_ops,
> + .setup = cns3xxx_pci_setup,
> + .map_irq = cns3xxx_pcie_map_irq,
> + .private_data = &private_data,
> + };
>
> pcibios_min_io = 0;
> pcibios_min_mem = 0;
> @@ -335,7 +329,9 @@ void __init cns3xxx_pcie_init_late(void)
> cns3xxx_pwr_soft_rst(0x1 << PM_SOFT_RST_REG_OFFST_PCIE(i));
> cns3xxx_pcie_check_link(&cns3xxx_pcie[i]);
> cns3xxx_pcie_hw_init(&cns3xxx_pcie[i]);
> - pci_common_init(&cns3xxx_pcie[i].hw_pci);
> + hw_pci->domain = i;
> + private_data = &cns3xxx_pcie[i];

Is this dance with pointers absolutely necessary? Does gcc though dishes at you
for doing hw_pci->private_data = &cns3xxx_pcie[i] directly?

Best regards,
Liviu

> + pci_common_init(&hw_pci);
> }
>
> pci_assign_unassigned_resources();
>
>
>
>

--
====================
| I would like to |
| fix the world, |
| but they're not |
| giving me the |
\ source code! /
---------------
¯\_(ツ)_/¯

2014-10-23 11:27:46

by Lorenzo Pieralisi

[permalink] [raw]
Subject: Re: [RFC 2/4] PCI: generic: Add support for ARM64 and MSI(x)

On Thu, Oct 23, 2014 at 10:13:09AM +0100, Liviu Dudau wrote:
> On Wed, Oct 22, 2014 at 09:52:19PM +0100, Arnd Bergmann wrote:
> > On Wednesday 22 October 2014 16:59:14 Lorenzo Pieralisi wrote:
> > > On Wed, Oct 01, 2014 at 10:38:45AM +0100, Arnd Bergmann wrote:
> > >
> > > [...]
> > >
> > > > The arm32 implementations of pci_domain_nr/pci_proc_domain can probably be
> > > > removed if we change the arm32 pcibios_init_hw function to call the new
> > > > interfaces that set the domain number.
> > >
> > > I wished, but it is a bit more complicated than I thought unfortunately,
> > > mostly because some drivers, eg cns3xxx set the domain numbers
> > > statically in pci_sys_data and this sets a chain of dependency that is
> > > not easy to untangle. I think cns3xxx is the only legacy driver that "uses"
> > > the domain number (in pci_sys_data) in a way that clashes with the
> > > generic domain_nr implementation, I need to give it more thought.
> >
> > Just had a look at that driver, shouldn't be too hard to change, see below.
>
> I like this!
>
> One thing though ...

I like it too, it is one way of removing the artificial domain dependency
from this driver.

I think that by removing that, we could switch to CONFIG_PCI_DOMAINS_GENERIC
on ARM32. I will remove the dependency in drivers/pci/host/pci-mvebu.c
introduced by commit 2613ba48. pci_sys_data.domain is always 0 in that
driver so its usefulness is doubtful, comments welcome, copied Jason in
if he has comments.

[...]

> > @@ -323,6 +309,14 @@ static int cns3xxx_pcie_abort_handler(unsigned long addr, unsigned int fsr,
> > void __init cns3xxx_pcie_init_late(void)
> > {
> > int i;
> > + void *private_data;
> > + struct hw_pci hw_pci = {
> > + .nr_controllers = 1,
> > + .ops = &cns3xxx_pcie_ops,
> > + .setup = cns3xxx_pci_setup,
> > + .map_irq = cns3xxx_pcie_map_irq,
> > + .private_data = &private_data,
> > + };
> >
> > pcibios_min_io = 0;
> > pcibios_min_mem = 0;
> > @@ -335,7 +329,9 @@ void __init cns3xxx_pcie_init_late(void)
> > cns3xxx_pwr_soft_rst(0x1 << PM_SOFT_RST_REG_OFFST_PCIE(i));
> > cns3xxx_pcie_check_link(&cns3xxx_pcie[i]);
> > cns3xxx_pcie_hw_init(&cns3xxx_pcie[i]);
> > - pci_common_init(&cns3xxx_pcie[i].hw_pci);
> > + hw_pci->domain = i;

+ hw_pci.domain = i;

I will remove this since if we move to generic domains it is useless to
pass the value through hw_pci.

> > + private_data = &cns3xxx_pcie[i];
>
> Is this dance with pointers absolutely necessary? Does gcc though dishes at you
> for doing hw_pci->private_data = &cns3xxx_pcie[i] directly?

You can't, hw_pci.private_data is void **.

Lorenzo

2014-10-23 13:33:36

by Arnd Bergmann

[permalink] [raw]
Subject: Re: [RFC 2/4] PCI: generic: Add support for ARM64 and MSI(x)

On Thursday 23 October 2014 10:13:09 Liviu Dudau wrote:
> > @@ -335,7 +329,9 @@ void __init cns3xxx_pcie_init_late(void)
> > cns3xxx_pwr_soft_rst(0x1 << PM_SOFT_RST_REG_OFFST_PCIE(i));
> > cns3xxx_pcie_check_link(&cns3xxx_pcie[i]);
> > cns3xxx_pcie_hw_init(&cns3xxx_pcie[i]);
> > - pci_common_init(&cns3xxx_pcie[i].hw_pci);
> > + hw_pci->domain = i;
> > + private_data = &cns3xxx_pcie[i];
>
> Is this dance with pointers absolutely necessary? Does gcc though dishes at you
> for doing hw_pci->private_data = &cns3xxx_pcie[i] directly?

hw_pci->private_data is an array of pointers to private_data for each
host controller instance within the domain. There is only one entry
here, but you still the the correct type, so that would be

hw_pci->private_data = (void **)&&cns3xxx_pcie[i];

which is even more confusing and ugly than what I wrote. If you have
a better idea, I'm all for it. Maybe it's clearer to write like this
(taken from rcar driver)?

void *hw_private[1];
hw_pci.private_data = hw_private;

for each host {
...
hw_private[0] = &cns3xxx_pcie[i];
pci_common_init_dev(&hw_pci);
}

Note that all 'modern' controllers always use nr_controllers=1, so we
only need a single private_data pointer per domain, and the entire
hw_pci interface is a bit pointless.

The platforms that currently require it are iop13xx, dove, mv78xx0
and orion5x. We have plans to remove the last three platforms in
the next merge window or two, once all users are able to migrate to
mach-mvebu. Once that happens, we could probably move the entire
hw_pci logic that deals with multiple hosts per domain into the
iop13xx pci driver if we want to. A less intrusive simplification
would be to convert all 'multiplatform'-aware host controllers to
use pci_common_init_dev() and then take hw_pci out of that.

See below for a sample patch I just did. It duplicates the code from
pci_common_init_dev/pci_common_init because we know that all users
of pci_common_init_dev are modern and only pass a single host bridge.
The new pci_common_init_dev is simpler than the old one but should
do the exact same thing for all current users, with the addition
of propagating the return value.

pci_init_single() is the new internal helper and we should be able to
convert all existing users of pci_common_init_dev() to use that directly
and no longer define hw_pci at all.

I've converted two drivers to give an example, but the conversions
should be done in follow-up patches really, and the pci_common_init_dev
function removed after all users are moved over.

The new pci_init_single() is also rather simple, and it should just
converge with what we do for arm64 over time.

Arnd

---
arch/arm/include/asm/mach/pci.h | 20 ++++---
arch/arm/kernel/bios32.c | 103 ++++++++++++++++++++++++++++++++++--
drivers/pci/host/pci-host-generic.c | 53 ++++++++-----------
drivers/pci/host/pci-mvebu.c | 44 +++++++--------
4 files changed, 157 insertions(+), 63 deletions(-)

diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h
index 7fc42784becb..fe7e13759ec0 100644
--- a/arch/arm/include/asm/mach/pci.h
+++ b/arch/arm/include/asm/mach/pci.h
@@ -73,16 +73,22 @@ struct pci_sys_data {
/*
* Call this with your hw_pci struct to initialise the PCI system.
*/
-void pci_common_init_dev(struct device *, struct hw_pci *);
+void pci_common_init(struct hw_pci *);

/*
- * Compatibility wrapper for older platforms that do not care about
- * passing the parent device.
+ * Used by modern platforms, only one host allowed.
*/
-static inline void pci_common_init(struct hw_pci *hw)
-{
- pci_common_init_dev(NULL, hw);
-}
+int pci_common_init_dev(struct device *, struct hw_pci *);
+
+/*
+ * Replaces pci_common_init_dev for drivers that want to do the
+ * initialization simpler and avoid defining hw_pci
+ */
+int pci_init_single(struct device *parent,
+ struct pci_sys_data *sys,
+ struct pci_bus *(*scan)(int nr, struct pci_sys_data *),
+ struct pci_ops *ops);
+

/*
* Setup early fixed I/O mapping.
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index 17a26c17f7f5..bccc8703e575 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -456,8 +456,7 @@ static int pcibios_init_resources(int busnr, struct pci_sys_data *sys)
return 0;
}

-static void pcibios_init_hw(struct device *parent, struct hw_pci *hw,
- struct list_head *head)
+static void pcibios_init_hw(struct hw_pci *hw, struct list_head *head)
{
struct pci_sys_data *sys = NULL;
int ret;
@@ -494,7 +493,7 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw,
if (hw->scan)
sys->bus = hw->scan(nr, sys);
else
- sys->bus = pci_scan_root_bus(parent, sys->busnr,
+ sys->bus = pci_scan_root_bus(NULL, sys->busnr,
hw->ops, sys, &sys->resources);

if (!sys->bus)
@@ -511,7 +510,7 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw,
}
}

-void pci_common_init_dev(struct device *parent, struct hw_pci *hw)
+void pci_common_init(struct hw_pci *hw)
{
struct pci_sys_data *sys;
LIST_HEAD(head);
@@ -519,7 +518,7 @@ void pci_common_init_dev(struct device *parent, struct hw_pci *hw)
pci_add_flags(PCI_REASSIGN_ALL_RSRC);
if (hw->preinit)
hw->preinit();
- pcibios_init_hw(parent, hw, &head);
+ pcibios_init_hw(hw, &head);
if (hw->postinit)
hw->postinit();

@@ -559,6 +558,100 @@ void pci_common_init_dev(struct device *parent, struct hw_pci *hw)
}
}

+int pci_init_single(struct device *parent,
+ struct pci_sys_data *sys,
+ struct pci_bus *(*scan)(int nr, struct pci_sys_data *),
+ struct pci_ops *ops)
+{
+ int ret;
+ struct pci_bus *bus;
+
+ ret = pcibios_init_resources(0, sys);
+ if (ret)
+ return ret;
+
+ if (scan)
+ bus = scan(0, sys);
+ else
+ bus = pci_scan_root_bus(parent, 0, ops, sys, &sys->resources);
+
+ if (!bus) {
+ dev_err(parent, "PCI: unable to scan bus!");
+ return -ENXIO;
+ }
+ sys->bus = bus;
+
+ pci_fixup_irqs(pcibios_swizzle, pcibios_map_irq);
+
+ if (!pci_has_flag(PCI_PROBE_ONLY)) {
+ /*
+ * Size the bridge windows.
+ */
+ pci_bus_size_bridges(bus);
+
+ /*
+ * Assign resources.
+ */
+ pci_bus_assign_resources(bus);
+ }
+
+ /*
+ * Tell drivers about devices found.
+ */
+ pci_bus_add_devices(bus);
+
+ /* Configure PCI Express settings */
+ if (bus && !pci_has_flag(PCI_PROBE_ONLY)) {
+ struct pci_bus *child;
+
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
+ }
+
+ return 0;
+}
+
+int pci_common_init_dev(struct device *parent, struct hw_pci *hw)
+{
+ struct pci_sys_data *sys;
+ int ret;
+
+ if (hw->nr_controllers != 1 ||
+ hw->preinit || hw->postinit)
+ return -EINVAL;
+
+ sys = kzalloc(sizeof(struct pci_sys_data), GFP_KERNEL);
+ if (!sys)
+ return -ENOMEM;
+
+#ifdef CONFIG_PCI_DOMAINS
+ sys->domain = hw->domain;
+#endif
+ sys->swizzle = hw->swizzle;
+ sys->map_irq = hw->map_irq;
+ sys->align_resource = hw->align_resource;
+ sys->add_bus = hw->add_bus;
+ sys->remove_bus = hw->remove_bus;
+ INIT_LIST_HEAD(&sys->resources);
+
+ if (hw->private_data)
+ sys->private_data = hw->private_data[0];
+
+ pci_add_flags(PCI_REASSIGN_ALL_RSRC);
+ ret = hw->setup(0, sys);
+ if (ret == 0)
+ ret = -ENXIO;
+ if (ret < 0)
+ return ret;
+
+ ret = pcibios_init_sysdata(parent, sys, hw->scan, hw->ops);
+ if (ret)
+ /* FIXME: undo ->setup */
+ kfree(sys);
+
+ return ret;
+}
+
#ifndef CONFIG_PCI_HOST_ITE8152
void pcibios_set_master(struct pci_dev *dev)
{
diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c
index 3d2076f59911..3542a7b740e5 100644
--- a/drivers/pci/host/pci-host-generic.c
+++ b/drivers/pci/host/pci-host-generic.c
@@ -40,16 +40,20 @@ struct gen_pci_cfg_windows {

struct gen_pci {
struct pci_host_bridge host;
+ struct pci_sys_data sys;
struct gen_pci_cfg_windows cfg;
- struct list_head resources;
};

+static inline struct gen_pci *gen_pci_from_sys(struct pci_sys_data *sys)
+{
+ return container_of(sys, struct gen_pci, sys);
+}
+
static void __iomem *gen_pci_map_cfg_bus_cam(struct pci_bus *bus,
unsigned int devfn,
int where)
{
- struct pci_sys_data *sys = bus->sysdata;
- struct gen_pci *pci = sys->private_data;
+ struct gen_pci *pci = gen_pci_from_sys(bus->sysdata);
resource_size_t idx = bus->number - pci->cfg.bus_range.start;

return pci->cfg.win[idx] + ((devfn << 8) | where);
@@ -64,8 +68,7 @@ static void __iomem *gen_pci_map_cfg_bus_ecam(struct pci_bus *bus,
unsigned int devfn,
int where)
{
- struct pci_sys_data *sys = bus->sysdata;
- struct gen_pci *pci = sys->private_data;
+ struct gen_pci *pci = gen_pci_from_sys(bus->sysdata);
resource_size_t idx = bus->number - pci->cfg.bus_range.start;

return pci->cfg.win[idx] + ((devfn << 12) | where);
@@ -80,8 +83,7 @@ static int gen_pci_config_read(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
void __iomem *addr;
- struct pci_sys_data *sys = bus->sysdata;
- struct gen_pci *pci = sys->private_data;
+ struct gen_pci *pci = gen_pci_from_sys(bus->sysdata);

addr = pci->cfg.ops->map_bus(bus, devfn, where);

@@ -103,8 +105,7 @@ static int gen_pci_config_write(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
void __iomem *addr;
- struct pci_sys_data *sys = bus->sysdata;
- struct gen_pci *pci = sys->private_data;
+ struct gen_pci *pci = gen_pci_from_sys(bus->sysdata);

addr = pci->cfg.ops->map_bus(bus, devfn, where);

@@ -181,10 +182,10 @@ static void gen_pci_release_of_pci_ranges(struct gen_pci *pci)
{
struct pci_host_bridge_window *win;

- list_for_each_entry(win, &pci->resources, list)
+ list_for_each_entry(win, &pci->sys.resources, list)
release_resource(win->res);

- pci_free_resource_list(&pci->resources);
+ pci_free_resource_list(&pci->sys.resources);
}

static int gen_pci_parse_request_of_pci_ranges(struct gen_pci *pci)
@@ -237,7 +238,7 @@ static int gen_pci_parse_request_of_pci_ranges(struct gen_pci *pci)
if (err)
goto out_release_res;

- pci_add_resource_offset(&pci->resources, res, offset);
+ pci_add_resource_offset(&pci->sys.resources, res, offset);
}

if (!res_valid) {
@@ -306,17 +307,10 @@ static int gen_pci_parse_map_cfg_windows(struct gen_pci *pci)
}

/* Register bus resource */
- pci_add_resource(&pci->resources, bus_range);
+ pci_add_resource(&pci->sys.resources, bus_range);
return 0;
}

-static int gen_pci_setup(int nr, struct pci_sys_data *sys)
-{
- struct gen_pci *pci = sys->private_data;
- list_splice_init(&pci->resources, &sys->resources);
- return 1;
-}
-
static int gen_pci_probe(struct platform_device *pdev)
{
int err;
@@ -326,17 +320,12 @@ static int gen_pci_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct gen_pci *pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
- struct hw_pci hw = {
- .nr_controllers = 1,
- .private_data = (void **)&pci,
- .setup = gen_pci_setup,
- .map_irq = of_irq_parse_and_map_pci,
- .ops = &gen_pci_ops,
- };

if (!pci)
return -ENOMEM;

+ pci->sys.map_irq = of_irq_parse_and_map_pci,
+
type = of_get_property(np, "device_type", NULL);
if (!type || strcmp(type, "pci")) {
dev_err(dev, "invalid \"device_type\" %s\n", type);
@@ -355,7 +344,7 @@ static int gen_pci_probe(struct platform_device *pdev)
pci->cfg.ops = of_id->data;
pci->host.dev.parent = dev;
INIT_LIST_HEAD(&pci->host.windows);
- INIT_LIST_HEAD(&pci->resources);
+ INIT_LIST_HEAD(&pci->sys.resources);

/* Parse our PCI ranges and request their resources */
err = gen_pci_parse_request_of_pci_ranges(pci);
@@ -369,8 +358,12 @@ static int gen_pci_probe(struct platform_device *pdev)
return err;
}

- pci_common_init_dev(dev, &hw);
- return 0;
+ pci_add_flags(PCI_REASSIGN_ALL_RSRC);
+ err = pci_init_single(dev, &pci->sys, NULL, &gen_pci_ops);
+ if (err)
+ gen_pci_release_of_pci_ranges(pci);
+
+ return err;
}

static struct platform_driver gen_pci_driver = {
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index b1315e197ffb..e1381c0699be 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -99,6 +99,7 @@ struct mvebu_pcie_port;
struct mvebu_pcie {
struct platform_device *pdev;
struct mvebu_pcie_port *ports;
+ struct pci_sys_data sysdata;
struct msi_chip *msi;
struct resource io;
char io_name[30];
@@ -611,7 +612,7 @@ static int mvebu_sw_pci_bridge_write(struct mvebu_pcie_port *port,

static inline struct mvebu_pcie *sys_to_pcie(struct pci_sys_data *sys)
{
- return sys->private_data;
+ return container_of(sys, struct mvebu_pcie, sysdata);
}

static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie,
@@ -718,11 +719,26 @@ static struct pci_ops mvebu_pcie_ops = {
.write = mvebu_pcie_wr_conf,
};

-static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys)
+/* FIXME: move the code around to avoid these */
+static struct pci_bus *mvebu_pcie_scan_bus(int nr, struct pci_sys_data *sys);
+static void mvebu_pcie_add_bus(struct pci_bus *bus);
+static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
+ const struct resource *res,
+ resource_size_t start,
+ resource_size_t size,
+ resource_size_t align);
+
+static int mvebu_pcie_enable(struct mvebu_pcie *pcie)
{
- struct mvebu_pcie *pcie = sys_to_pcie(sys);
int i;
int domain = 0;
+ struct pci_sys_data *sys = &pcie->sysdata;
+
+ pcie->sysdata = (struct pci_sys_data) {
+ .map_irq = of_irq_parse_and_map_pci,
+ .align_resource = mvebu_pcie_align_resource,
+ .add_bus = mvebu_pcie_add_bus,
+ };

#ifdef CONFIG_PCI_DOMAINS
domain = sys->domain;
@@ -738,11 +754,13 @@ static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys)
if (request_resource(&iomem_resource, &pcie->mem))
return 0;

+ INIT_LIST_HEAD(&sys->resources);
if (resource_size(&pcie->realio) != 0) {
if (request_resource(&ioport_resource, &pcie->realio)) {
release_resource(&pcie->mem);
return 0;
}
+
pci_add_resource_offset(&sys->resources, &pcie->realio,
sys->io_offset);
}
@@ -756,7 +774,9 @@ static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys)
mvebu_pcie_setup_hw(port);
}

- return 1;
+ pci_add_flags(PCI_REASSIGN_ALL_RSRC);
+ return pci_init_single(&pcie->pdev->dev, &pcie->sysdata,
+ mvebu_pcie_scan_bus, &mvebu_pcie_ops);
}

static struct pci_bus *mvebu_pcie_scan_bus(int nr, struct pci_sys_data *sys)
@@ -810,24 +830,6 @@ static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
return start;
}

-static void mvebu_pcie_enable(struct mvebu_pcie *pcie)
-{
- struct hw_pci hw;
-
- memset(&hw, 0, sizeof(hw));
-
- hw.nr_controllers = 1;
- hw.private_data = (void **)&pcie;
- hw.setup = mvebu_pcie_setup;
- hw.scan = mvebu_pcie_scan_bus;
- hw.map_irq = of_irq_parse_and_map_pci;
- hw.ops = &mvebu_pcie_ops;
- hw.align_resource = mvebu_pcie_align_resource;
- hw.add_bus = mvebu_pcie_add_bus;
-
- pci_common_init(&hw);
-}
-
/*
* Looks up the list of register addresses encoded into the reg =
* <...> property for one that matches the given port/lane. Once
@@ -1066,9 +1068,7 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
pci_ioremap_io(i, pcie->io.start + i);

mvebu_pcie_msi_enable(pcie);
- mvebu_pcie_enable(pcie);
-
- return 0;
+ return mvebu_pcie_enable(pcie);
}

static const struct of_device_id mvebu_pcie_of_match_table[] = {

2014-10-23 16:52:18

by Jason Gunthorpe

[permalink] [raw]
Subject: Re: [RFC 2/4] PCI: generic: Add support for ARM64 and MSI(x)

On Thu, Oct 23, 2014 at 12:27:31PM +0100, Lorenzo Pieralisi wrote:

> I think that by removing that, we could switch to CONFIG_PCI_DOMAINS_GENERIC
> on ARM32. I will remove the dependency in drivers/pci/host/pci-mvebu.c
> introduced by commit 2613ba48. pci_sys_data.domain is always 0 in that
> driver so its usefulness is doubtful, comments welcome, copied Jason in
> if he has comments.

pcie-mvebu is like all the other new drivers, each top level DT node
that introduces the interface should have a unique domain number. It
would be very strange (and currently unsupported by the driver) to
ever have more than 1 mvebu top level node in any DT.

Jason

2014-10-24 10:04:32

by Liviu Dudau

[permalink] [raw]
Subject: Re: [RFC 2/4] PCI: generic: Add support for ARM64 and MSI(x)

On Thu, Oct 23, 2014 at 02:33:16PM +0100, Arnd Bergmann wrote:
> On Thursday 23 October 2014 10:13:09 Liviu Dudau wrote:
> > > @@ -335,7 +329,9 @@ void __init cns3xxx_pcie_init_late(void)
> > > cns3xxx_pwr_soft_rst(0x1 << PM_SOFT_RST_REG_OFFST_PCIE(i));
> > > cns3xxx_pcie_check_link(&cns3xxx_pcie[i]);
> > > cns3xxx_pcie_hw_init(&cns3xxx_pcie[i]);
> > > - pci_common_init(&cns3xxx_pcie[i].hw_pci);
> > > + hw_pci->domain = i;
> > > + private_data = &cns3xxx_pcie[i];
> >
> > Is this dance with pointers absolutely necessary? Does gcc though dishes at you
> > for doing hw_pci->private_data = &cns3xxx_pcie[i] directly?
>
> hw_pci->private_data is an array of pointers to private_data for each
> host controller instance within the domain. There is only one entry
> here, but you still the the correct type, so that would be
>
> hw_pci->private_data = (void **)&&cns3xxx_pcie[i];
>
> which is even more confusing and ugly than what I wrote. If you have
> a better idea, I'm all for it. Maybe it's clearer to write like this
> (taken from rcar driver)?

I was not questioning the implementation (and I do like it) I was just
wondering if it is to work around some quirk of gcc. As I did not had
the time to test it I was hoping that if there is a gcc requirement to
phrase the code that way you might provide me with an answer.

Best regards,
Liviu

>
> void *hw_private[1];
> hw_pci.private_data = hw_private;
>
> for each host {
> ...
> hw_private[0] = &cns3xxx_pcie[i];
> pci_common_init_dev(&hw_pci);
> }
>
> Note that all 'modern' controllers always use nr_controllers=1, so we
> only need a single private_data pointer per domain, and the entire
> hw_pci interface is a bit pointless.
>
> The platforms that currently require it are iop13xx, dove, mv78xx0
> and orion5x. We have plans to remove the last three platforms in
> the next merge window or two, once all users are able to migrate to
> mach-mvebu. Once that happens, we could probably move the entire
> hw_pci logic that deals with multiple hosts per domain into the
> iop13xx pci driver if we want to. A less intrusive simplification
> would be to convert all 'multiplatform'-aware host controllers to
> use pci_common_init_dev() and then take hw_pci out of that.
>
> See below for a sample patch I just did. It duplicates the code from
> pci_common_init_dev/pci_common_init because we know that all users
> of pci_common_init_dev are modern and only pass a single host bridge.
> The new pci_common_init_dev is simpler than the old one but should
> do the exact same thing for all current users, with the addition
> of propagating the return value.
>
> pci_init_single() is the new internal helper and we should be able to
> convert all existing users of pci_common_init_dev() to use that directly
> and no longer define hw_pci at all.
>
> I've converted two drivers to give an example, but the conversions
> should be done in follow-up patches really, and the pci_common_init_dev
> function removed after all users are moved over.
>
> The new pci_init_single() is also rather simple, and it should just
> converge with what we do for arm64 over time.
>
> Arnd
>
> ---
> arch/arm/include/asm/mach/pci.h | 20 ++++---
> arch/arm/kernel/bios32.c | 103 ++++++++++++++++++++++++++++++++++--
> drivers/pci/host/pci-host-generic.c | 53 ++++++++-----------
> drivers/pci/host/pci-mvebu.c | 44 +++++++--------
> 4 files changed, 157 insertions(+), 63 deletions(-)
>
> diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h
> index 7fc42784becb..fe7e13759ec0 100644
> --- a/arch/arm/include/asm/mach/pci.h
> +++ b/arch/arm/include/asm/mach/pci.h
> @@ -73,16 +73,22 @@ struct pci_sys_data {
> /*
> * Call this with your hw_pci struct to initialise the PCI system.
> */
> -void pci_common_init_dev(struct device *, struct hw_pci *);
> +void pci_common_init(struct hw_pci *);
>
> /*
> - * Compatibility wrapper for older platforms that do not care about
> - * passing the parent device.
> + * Used by modern platforms, only one host allowed.
> */
> -static inline void pci_common_init(struct hw_pci *hw)
> -{
> - pci_common_init_dev(NULL, hw);
> -}
> +int pci_common_init_dev(struct device *, struct hw_pci *);
> +
> +/*
> + * Replaces pci_common_init_dev for drivers that want to do the
> + * initialization simpler and avoid defining hw_pci
> + */
> +int pci_init_single(struct device *parent,
> + struct pci_sys_data *sys,
> + struct pci_bus *(*scan)(int nr, struct pci_sys_data *),
> + struct pci_ops *ops);
> +
>
> /*
> * Setup early fixed I/O mapping.
> diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
> index 17a26c17f7f5..bccc8703e575 100644
> --- a/arch/arm/kernel/bios32.c
> +++ b/arch/arm/kernel/bios32.c
> @@ -456,8 +456,7 @@ static int pcibios_init_resources(int busnr, struct pci_sys_data *sys)
> return 0;
> }
>
> -static void pcibios_init_hw(struct device *parent, struct hw_pci *hw,
> - struct list_head *head)
> +static void pcibios_init_hw(struct hw_pci *hw, struct list_head *head)
> {
> struct pci_sys_data *sys = NULL;
> int ret;
> @@ -494,7 +493,7 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw,
> if (hw->scan)
> sys->bus = hw->scan(nr, sys);
> else
> - sys->bus = pci_scan_root_bus(parent, sys->busnr,
> + sys->bus = pci_scan_root_bus(NULL, sys->busnr,
> hw->ops, sys, &sys->resources);
>
> if (!sys->bus)
> @@ -511,7 +510,7 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw,
> }
> }
>
> -void pci_common_init_dev(struct device *parent, struct hw_pci *hw)
> +void pci_common_init(struct hw_pci *hw)
> {
> struct pci_sys_data *sys;
> LIST_HEAD(head);
> @@ -519,7 +518,7 @@ void pci_common_init_dev(struct device *parent, struct hw_pci *hw)
> pci_add_flags(PCI_REASSIGN_ALL_RSRC);
> if (hw->preinit)
> hw->preinit();
> - pcibios_init_hw(parent, hw, &head);
> + pcibios_init_hw(hw, &head);
> if (hw->postinit)
> hw->postinit();
>
> @@ -559,6 +558,100 @@ void pci_common_init_dev(struct device *parent, struct hw_pci *hw)
> }
> }
>
> +int pci_init_single(struct device *parent,
> + struct pci_sys_data *sys,
> + struct pci_bus *(*scan)(int nr, struct pci_sys_data *),
> + struct pci_ops *ops)
> +{
> + int ret;
> + struct pci_bus *bus;
> +
> + ret = pcibios_init_resources(0, sys);
> + if (ret)
> + return ret;
> +
> + if (scan)
> + bus = scan(0, sys);
> + else
> + bus = pci_scan_root_bus(parent, 0, ops, sys, &sys->resources);
> +
> + if (!bus) {
> + dev_err(parent, "PCI: unable to scan bus!");
> + return -ENXIO;
> + }
> + sys->bus = bus;
> +
> + pci_fixup_irqs(pcibios_swizzle, pcibios_map_irq);
> +
> + if (!pci_has_flag(PCI_PROBE_ONLY)) {
> + /*
> + * Size the bridge windows.
> + */
> + pci_bus_size_bridges(bus);
> +
> + /*
> + * Assign resources.
> + */
> + pci_bus_assign_resources(bus);
> + }
> +
> + /*
> + * Tell drivers about devices found.
> + */
> + pci_bus_add_devices(bus);
> +
> + /* Configure PCI Express settings */
> + if (bus && !pci_has_flag(PCI_PROBE_ONLY)) {
> + struct pci_bus *child;
> +
> + list_for_each_entry(child, &bus->children, node)
> + pcie_bus_configure_settings(child);
> + }
> +
> + return 0;
> +}
> +
> +int pci_common_init_dev(struct device *parent, struct hw_pci *hw)
> +{
> + struct pci_sys_data *sys;
> + int ret;
> +
> + if (hw->nr_controllers != 1 ||
> + hw->preinit || hw->postinit)
> + return -EINVAL;
> +
> + sys = kzalloc(sizeof(struct pci_sys_data), GFP_KERNEL);
> + if (!sys)
> + return -ENOMEM;
> +
> +#ifdef CONFIG_PCI_DOMAINS
> + sys->domain = hw->domain;
> +#endif
> + sys->swizzle = hw->swizzle;
> + sys->map_irq = hw->map_irq;
> + sys->align_resource = hw->align_resource;
> + sys->add_bus = hw->add_bus;
> + sys->remove_bus = hw->remove_bus;
> + INIT_LIST_HEAD(&sys->resources);
> +
> + if (hw->private_data)
> + sys->private_data = hw->private_data[0];
> +
> + pci_add_flags(PCI_REASSIGN_ALL_RSRC);
> + ret = hw->setup(0, sys);
> + if (ret == 0)
> + ret = -ENXIO;
> + if (ret < 0)
> + return ret;
> +
> + ret = pcibios_init_sysdata(parent, sys, hw->scan, hw->ops);
> + if (ret)
> + /* FIXME: undo ->setup */
> + kfree(sys);
> +
> + return ret;
> +}
> +
> #ifndef CONFIG_PCI_HOST_ITE8152
> void pcibios_set_master(struct pci_dev *dev)
> {
> diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c
> index 3d2076f59911..3542a7b740e5 100644
> --- a/drivers/pci/host/pci-host-generic.c
> +++ b/drivers/pci/host/pci-host-generic.c
> @@ -40,16 +40,20 @@ struct gen_pci_cfg_windows {
>
> struct gen_pci {
> struct pci_host_bridge host;
> + struct pci_sys_data sys;
> struct gen_pci_cfg_windows cfg;
> - struct list_head resources;
> };
>
> +static inline struct gen_pci *gen_pci_from_sys(struct pci_sys_data *sys)
> +{
> + return container_of(sys, struct gen_pci, sys);
> +}
> +
> static void __iomem *gen_pci_map_cfg_bus_cam(struct pci_bus *bus,
> unsigned int devfn,
> int where)
> {
> - struct pci_sys_data *sys = bus->sysdata;
> - struct gen_pci *pci = sys->private_data;
> + struct gen_pci *pci = gen_pci_from_sys(bus->sysdata);
> resource_size_t idx = bus->number - pci->cfg.bus_range.start;
>
> return pci->cfg.win[idx] + ((devfn << 8) | where);
> @@ -64,8 +68,7 @@ static void __iomem *gen_pci_map_cfg_bus_ecam(struct pci_bus *bus,
> unsigned int devfn,
> int where)
> {
> - struct pci_sys_data *sys = bus->sysdata;
> - struct gen_pci *pci = sys->private_data;
> + struct gen_pci *pci = gen_pci_from_sys(bus->sysdata);
> resource_size_t idx = bus->number - pci->cfg.bus_range.start;
>
> return pci->cfg.win[idx] + ((devfn << 12) | where);
> @@ -80,8 +83,7 @@ static int gen_pci_config_read(struct pci_bus *bus, unsigned int devfn,
> int where, int size, u32 *val)
> {
> void __iomem *addr;
> - struct pci_sys_data *sys = bus->sysdata;
> - struct gen_pci *pci = sys->private_data;
> + struct gen_pci *pci = gen_pci_from_sys(bus->sysdata);
>
> addr = pci->cfg.ops->map_bus(bus, devfn, where);
>
> @@ -103,8 +105,7 @@ static int gen_pci_config_write(struct pci_bus *bus, unsigned int devfn,
> int where, int size, u32 val)
> {
> void __iomem *addr;
> - struct pci_sys_data *sys = bus->sysdata;
> - struct gen_pci *pci = sys->private_data;
> + struct gen_pci *pci = gen_pci_from_sys(bus->sysdata);
>
> addr = pci->cfg.ops->map_bus(bus, devfn, where);
>
> @@ -181,10 +182,10 @@ static void gen_pci_release_of_pci_ranges(struct gen_pci *pci)
> {
> struct pci_host_bridge_window *win;
>
> - list_for_each_entry(win, &pci->resources, list)
> + list_for_each_entry(win, &pci->sys.resources, list)
> release_resource(win->res);
>
> - pci_free_resource_list(&pci->resources);
> + pci_free_resource_list(&pci->sys.resources);
> }
>
> static int gen_pci_parse_request_of_pci_ranges(struct gen_pci *pci)
> @@ -237,7 +238,7 @@ static int gen_pci_parse_request_of_pci_ranges(struct gen_pci *pci)
> if (err)
> goto out_release_res;
>
> - pci_add_resource_offset(&pci->resources, res, offset);
> + pci_add_resource_offset(&pci->sys.resources, res, offset);
> }
>
> if (!res_valid) {
> @@ -306,17 +307,10 @@ static int gen_pci_parse_map_cfg_windows(struct gen_pci *pci)
> }
>
> /* Register bus resource */
> - pci_add_resource(&pci->resources, bus_range);
> + pci_add_resource(&pci->sys.resources, bus_range);
> return 0;
> }
>
> -static int gen_pci_setup(int nr, struct pci_sys_data *sys)
> -{
> - struct gen_pci *pci = sys->private_data;
> - list_splice_init(&pci->resources, &sys->resources);
> - return 1;
> -}
> -
> static int gen_pci_probe(struct platform_device *pdev)
> {
> int err;
> @@ -326,17 +320,12 @@ static int gen_pci_probe(struct platform_device *pdev)
> struct device *dev = &pdev->dev;
> struct device_node *np = dev->of_node;
> struct gen_pci *pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
> - struct hw_pci hw = {
> - .nr_controllers = 1,
> - .private_data = (void **)&pci,
> - .setup = gen_pci_setup,
> - .map_irq = of_irq_parse_and_map_pci,
> - .ops = &gen_pci_ops,
> - };
>
> if (!pci)
> return -ENOMEM;
>
> + pci->sys.map_irq = of_irq_parse_and_map_pci,
> +
> type = of_get_property(np, "device_type", NULL);
> if (!type || strcmp(type, "pci")) {
> dev_err(dev, "invalid \"device_type\" %s\n", type);
> @@ -355,7 +344,7 @@ static int gen_pci_probe(struct platform_device *pdev)
> pci->cfg.ops = of_id->data;
> pci->host.dev.parent = dev;
> INIT_LIST_HEAD(&pci->host.windows);
> - INIT_LIST_HEAD(&pci->resources);
> + INIT_LIST_HEAD(&pci->sys.resources);
>
> /* Parse our PCI ranges and request their resources */
> err = gen_pci_parse_request_of_pci_ranges(pci);
> @@ -369,8 +358,12 @@ static int gen_pci_probe(struct platform_device *pdev)
> return err;
> }
>
> - pci_common_init_dev(dev, &hw);
> - return 0;
> + pci_add_flags(PCI_REASSIGN_ALL_RSRC);
> + err = pci_init_single(dev, &pci->sys, NULL, &gen_pci_ops);
> + if (err)
> + gen_pci_release_of_pci_ranges(pci);
> +
> + return err;
> }
>
> static struct platform_driver gen_pci_driver = {
> diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
> index b1315e197ffb..e1381c0699be 100644
> --- a/drivers/pci/host/pci-mvebu.c
> +++ b/drivers/pci/host/pci-mvebu.c
> @@ -99,6 +99,7 @@ struct mvebu_pcie_port;
> struct mvebu_pcie {
> struct platform_device *pdev;
> struct mvebu_pcie_port *ports;
> + struct pci_sys_data sysdata;
> struct msi_chip *msi;
> struct resource io;
> char io_name[30];
> @@ -611,7 +612,7 @@ static int mvebu_sw_pci_bridge_write(struct mvebu_pcie_port *port,
>
> static inline struct mvebu_pcie *sys_to_pcie(struct pci_sys_data *sys)
> {
> - return sys->private_data;
> + return container_of(sys, struct mvebu_pcie, sysdata);
> }
>
> static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie,
> @@ -718,11 +719,26 @@ static struct pci_ops mvebu_pcie_ops = {
> .write = mvebu_pcie_wr_conf,
> };
>
> -static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys)
> +/* FIXME: move the code around to avoid these */
> +static struct pci_bus *mvebu_pcie_scan_bus(int nr, struct pci_sys_data *sys);
> +static void mvebu_pcie_add_bus(struct pci_bus *bus);
> +static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
> + const struct resource *res,
> + resource_size_t start,
> + resource_size_t size,
> + resource_size_t align);
> +
> +static int mvebu_pcie_enable(struct mvebu_pcie *pcie)
> {
> - struct mvebu_pcie *pcie = sys_to_pcie(sys);
> int i;
> int domain = 0;
> + struct pci_sys_data *sys = &pcie->sysdata;
> +
> + pcie->sysdata = (struct pci_sys_data) {
> + .map_irq = of_irq_parse_and_map_pci,
> + .align_resource = mvebu_pcie_align_resource,
> + .add_bus = mvebu_pcie_add_bus,
> + };
>
> #ifdef CONFIG_PCI_DOMAINS
> domain = sys->domain;
> @@ -738,11 +754,13 @@ static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys)
> if (request_resource(&iomem_resource, &pcie->mem))
> return 0;
>
> + INIT_LIST_HEAD(&sys->resources);
> if (resource_size(&pcie->realio) != 0) {
> if (request_resource(&ioport_resource, &pcie->realio)) {
> release_resource(&pcie->mem);
> return 0;
> }
> +
> pci_add_resource_offset(&sys->resources, &pcie->realio,
> sys->io_offset);
> }
> @@ -756,7 +774,9 @@ static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys)
> mvebu_pcie_setup_hw(port);
> }
>
> - return 1;
> + pci_add_flags(PCI_REASSIGN_ALL_RSRC);
> + return pci_init_single(&pcie->pdev->dev, &pcie->sysdata,
> + mvebu_pcie_scan_bus, &mvebu_pcie_ops);
> }
>
> static struct pci_bus *mvebu_pcie_scan_bus(int nr, struct pci_sys_data *sys)
> @@ -810,24 +830,6 @@ static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
> return start;
> }
>
> -static void mvebu_pcie_enable(struct mvebu_pcie *pcie)
> -{
> - struct hw_pci hw;
> -
> - memset(&hw, 0, sizeof(hw));
> -
> - hw.nr_controllers = 1;
> - hw.private_data = (void **)&pcie;
> - hw.setup = mvebu_pcie_setup;
> - hw.scan = mvebu_pcie_scan_bus;
> - hw.map_irq = of_irq_parse_and_map_pci;
> - hw.ops = &mvebu_pcie_ops;
> - hw.align_resource = mvebu_pcie_align_resource;
> - hw.add_bus = mvebu_pcie_add_bus;
> -
> - pci_common_init(&hw);
> -}
> -
> /*
> * Looks up the list of register addresses encoded into the reg =
> * <...> property for one that matches the given port/lane. Once
> @@ -1066,9 +1068,7 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
> pci_ioremap_io(i, pcie->io.start + i);
>
> mvebu_pcie_msi_enable(pcie);
> - mvebu_pcie_enable(pcie);
> -
> - return 0;
> + return mvebu_pcie_enable(pcie);
> }
>
> static const struct of_device_id mvebu_pcie_of_match_table[] = {
>
>
>

--
====================
| I would like to |
| fix the world, |
| but they're not |
| giving me the |
\ source code! /
---------------
¯\_(ツ)_/¯

2014-10-27 16:10:39

by Lorenzo Pieralisi

[permalink] [raw]
Subject: Re: [RFC 2/4] PCI: generic: Add support for ARM64 and MSI(x)

On Thu, Oct 23, 2014 at 05:52:06PM +0100, Jason Gunthorpe wrote:
> On Thu, Oct 23, 2014 at 12:27:31PM +0100, Lorenzo Pieralisi wrote:
>
> > I think that by removing that, we could switch to CONFIG_PCI_DOMAINS_GENERIC
> > on ARM32. I will remove the dependency in drivers/pci/host/pci-mvebu.c
> > introduced by commit 2613ba48. pci_sys_data.domain is always 0 in that
> > driver so its usefulness is doubtful, comments welcome, copied Jason in
> > if he has comments.
>
> pcie-mvebu is like all the other new drivers, each top level DT node
> that introduces the interface should have a unique domain number. It
> would be very strange (and currently unsupported by the driver) to
> ever have more than 1 mvebu top level node in any DT.

Which as a matter of fact I should take as pci_sys_data.domain is
useless on pci-mvebu.c, since that value will always be 0 (at least it
is in the current driver):

#ifdef CONFIG_PCI_DOMAINS
domain = sys->domain;
#endif

Am I missing something ? Is that domain number meant to be used for anything
else ?

Thanks,
Lorenzo

2014-11-05 23:39:28

by Bjorn Helgaas

[permalink] [raw]
Subject: Re: [RFC 2/4] PCI: generic: Add support for ARM64 and MSI(x)

On Wed, Oct 22, 2014 at 10:52:19PM +0200, Arnd Bergmann wrote:
> On Wednesday 22 October 2014 16:59:14 Lorenzo Pieralisi wrote:
> > On Wed, Oct 01, 2014 at 10:38:45AM +0100, Arnd Bergmann wrote:
> >
> > [...]
> >
> > > The arm32 implementations of pci_domain_nr/pci_proc_domain can probably be
> > > removed if we change the arm32 pcibios_init_hw function to call the new
> > > interfaces that set the domain number.
> >
> > I wished, but it is a bit more complicated than I thought unfortunately,
> > mostly because some drivers, eg cns3xxx set the domain numbers
> > statically in pci_sys_data and this sets a chain of dependency that is
> > not easy to untangle. I think cns3xxx is the only legacy driver that "uses"
> > the domain number (in pci_sys_data) in a way that clashes with the
> > generic domain_nr implementation, I need to give it more thought.
>
> Just had a look at that driver, shouldn't be too hard to change, see below.
>
> Signed-off-by: Arnd Bergmann <[email protected]>

This patch is in my patchwork, but it lacks a topic & changelog and I'm not
sure of its state, so I'm going to drop it for now. Please post it again
if you want me to do something with it. I guess it only touches arch/arm,
so it would probably be merged via your tree anyway.

Bjorn

> diff --git a/arch/arm/mach-cns3xxx/pcie.c b/arch/arm/mach-cns3xxx/pcie.c
> index 45d6bd09e6ef..aa4b9d7c52fd 100644
> --- a/arch/arm/mach-cns3xxx/pcie.c
> +++ b/arch/arm/mach-cns3xxx/pcie.c
> @@ -30,18 +30,15 @@ struct cns3xxx_pcie {
> unsigned int irqs[2];
> struct resource res_io;
> struct resource res_mem;
> - struct hw_pci hw_pci;
> -
> + int port;
> bool linked;
> };
>
> -static struct cns3xxx_pcie cns3xxx_pcie[]; /* forward decl. */
> -
> static struct cns3xxx_pcie *sysdata_to_cnspci(void *sysdata)
> {
> struct pci_sys_data *root = sysdata;
>
> - return &cns3xxx_pcie[root->domain];
> + return root->private_data;
> }
>
> static struct cns3xxx_pcie *pdev_to_cnspci(const struct pci_dev *dev)
> @@ -192,13 +189,7 @@ static struct cns3xxx_pcie cns3xxx_pcie[] = {
> .flags = IORESOURCE_MEM,
> },
> .irqs = { IRQ_CNS3XXX_PCIE0_RC, IRQ_CNS3XXX_PCIE0_DEVICE, },
> - .hw_pci = {
> - .domain = 0,
> - .nr_controllers = 1,
> - .ops = &cns3xxx_pcie_ops,
> - .setup = cns3xxx_pci_setup,
> - .map_irq = cns3xxx_pcie_map_irq,
> - },
> + .port = 0,
> },
> [1] = {
> .host_regs = (void __iomem *)CNS3XXX_PCIE1_HOST_BASE_VIRT,
> @@ -217,19 +208,13 @@ static struct cns3xxx_pcie cns3xxx_pcie[] = {
> .flags = IORESOURCE_MEM,
> },
> .irqs = { IRQ_CNS3XXX_PCIE1_RC, IRQ_CNS3XXX_PCIE1_DEVICE, },
> - .hw_pci = {
> - .domain = 1,
> - .nr_controllers = 1,
> - .ops = &cns3xxx_pcie_ops,
> - .setup = cns3xxx_pci_setup,
> - .map_irq = cns3xxx_pcie_map_irq,
> - },
> + .port = 1,
> },
> };
>
> static void __init cns3xxx_pcie_check_link(struct cns3xxx_pcie *cnspci)
> {
> - int port = cnspci->hw_pci.domain;
> + int port = cnspci->port;
> u32 reg;
> unsigned long time;
>
> @@ -260,9 +245,10 @@ static void __init cns3xxx_pcie_check_link(struct cns3xxx_pcie *cnspci)
>
> static void __init cns3xxx_pcie_hw_init(struct cns3xxx_pcie *cnspci)
> {
> - int port = cnspci->hw_pci.domain;
> + int port = cnspci->port;
> struct pci_sys_data sd = {
> .domain = port,
> + .private_data = cnspci,
> };
> struct pci_bus bus = {
> .number = 0,
> @@ -323,6 +309,14 @@ static int cns3xxx_pcie_abort_handler(unsigned long addr, unsigned int fsr,
> void __init cns3xxx_pcie_init_late(void)
> {
> int i;
> + void *private_data;
> + struct hw_pci hw_pci = {
> + .nr_controllers = 1,
> + .ops = &cns3xxx_pcie_ops,
> + .setup = cns3xxx_pci_setup,
> + .map_irq = cns3xxx_pcie_map_irq,
> + .private_data = &private_data,
> + };
>
> pcibios_min_io = 0;
> pcibios_min_mem = 0;
> @@ -335,7 +329,9 @@ void __init cns3xxx_pcie_init_late(void)
> cns3xxx_pwr_soft_rst(0x1 << PM_SOFT_RST_REG_OFFST_PCIE(i));
> cns3xxx_pcie_check_link(&cns3xxx_pcie[i]);
> cns3xxx_pcie_hw_init(&cns3xxx_pcie[i]);
> - pci_common_init(&cns3xxx_pcie[i].hw_pci);
> + hw_pci->domain = i;
> + private_data = &cns3xxx_pcie[i];
> + pci_common_init(&hw_pci);
> }
>
> pci_assign_unassigned_resources();
>
>

2014-11-05 23:41:06

by Bjorn Helgaas

[permalink] [raw]
Subject: Re: [RFC 2/4] PCI: generic: Add support for ARM64 and MSI(x)

On Thu, Oct 23, 2014 at 03:33:16PM +0200, Arnd Bergmann wrote:
> On Thursday 23 October 2014 10:13:09 Liviu Dudau wrote:
> > > @@ -335,7 +329,9 @@ void __init cns3xxx_pcie_init_late(void)
> > > cns3xxx_pwr_soft_rst(0x1 << PM_SOFT_RST_REG_OFFST_PCIE(i));
> > > cns3xxx_pcie_check_link(&cns3xxx_pcie[i]);
> > > cns3xxx_pcie_hw_init(&cns3xxx_pcie[i]);
> > > - pci_common_init(&cns3xxx_pcie[i].hw_pci);
> > > + hw_pci->domain = i;
> > > + private_data = &cns3xxx_pcie[i];
> >
> > Is this dance with pointers absolutely necessary? Does gcc though dishes at you
> > for doing hw_pci->private_data = &cns3xxx_pcie[i] directly?
>
> hw_pci->private_data is an array of pointers to private_data for each
> host controller instance within the domain. There is only one entry
> here, but you still the the correct type, so that would be
>
> hw_pci->private_data = (void **)&&cns3xxx_pcie[i];
>
> which is even more confusing and ugly than what I wrote. If you have
> a better idea, I'm all for it. Maybe it's clearer to write like this
> (taken from rcar driver)?
>
> void *hw_private[1];
> hw_pci.private_data = hw_private;
>
> for each host {
> ...
> hw_private[0] = &cns3xxx_pcie[i];
> pci_common_init_dev(&hw_pci);
> }
>
> Note that all 'modern' controllers always use nr_controllers=1, so we
> only need a single private_data pointer per domain, and the entire
> hw_pci interface is a bit pointless.
>
> The platforms that currently require it are iop13xx, dove, mv78xx0
> and orion5x. We have plans to remove the last three platforms in
> the next merge window or two, once all users are able to migrate to
> mach-mvebu. Once that happens, we could probably move the entire
> hw_pci logic that deals with multiple hosts per domain into the
> iop13xx pci driver if we want to. A less intrusive simplification
> would be to convert all 'multiplatform'-aware host controllers to
> use pci_common_init_dev() and then take hw_pci out of that.
>
> See below for a sample patch I just did. It duplicates the code from
> pci_common_init_dev/pci_common_init because we know that all users
> of pci_common_init_dev are modern and only pass a single host bridge.
> The new pci_common_init_dev is simpler than the old one but should
> do the exact same thing for all current users, with the addition
> of propagating the return value.

Same with this one; I'm ignoring for now on the theory that this was just a
sample to show the idea, and a more formal patch might come later.

Bjorn

> pci_init_single() is the new internal helper and we should be able to
> convert all existing users of pci_common_init_dev() to use that directly
> and no longer define hw_pci at all.
>
> I've converted two drivers to give an example, but the conversions
> should be done in follow-up patches really, and the pci_common_init_dev
> function removed after all users are moved over.
>
> The new pci_init_single() is also rather simple, and it should just
> converge with what we do for arm64 over time.
>
> Arnd
>
> ---
> arch/arm/include/asm/mach/pci.h | 20 ++++---
> arch/arm/kernel/bios32.c | 103 ++++++++++++++++++++++++++++++++++--
> drivers/pci/host/pci-host-generic.c | 53 ++++++++-----------
> drivers/pci/host/pci-mvebu.c | 44 +++++++--------
> 4 files changed, 157 insertions(+), 63 deletions(-)
>
> diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h
> index 7fc42784becb..fe7e13759ec0 100644
> --- a/arch/arm/include/asm/mach/pci.h
> +++ b/arch/arm/include/asm/mach/pci.h
> @@ -73,16 +73,22 @@ struct pci_sys_data {
> /*
> * Call this with your hw_pci struct to initialise the PCI system.
> */
> -void pci_common_init_dev(struct device *, struct hw_pci *);
> +void pci_common_init(struct hw_pci *);
>
> /*
> - * Compatibility wrapper for older platforms that do not care about
> - * passing the parent device.
> + * Used by modern platforms, only one host allowed.
> */
> -static inline void pci_common_init(struct hw_pci *hw)
> -{
> - pci_common_init_dev(NULL, hw);
> -}
> +int pci_common_init_dev(struct device *, struct hw_pci *);
> +
> +/*
> + * Replaces pci_common_init_dev for drivers that want to do the
> + * initialization simpler and avoid defining hw_pci
> + */
> +int pci_init_single(struct device *parent,
> + struct pci_sys_data *sys,
> + struct pci_bus *(*scan)(int nr, struct pci_sys_data *),
> + struct pci_ops *ops);
> +
>
> /*
> * Setup early fixed I/O mapping.
> diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
> index 17a26c17f7f5..bccc8703e575 100644
> --- a/arch/arm/kernel/bios32.c
> +++ b/arch/arm/kernel/bios32.c
> @@ -456,8 +456,7 @@ static int pcibios_init_resources(int busnr, struct pci_sys_data *sys)
> return 0;
> }
>
> -static void pcibios_init_hw(struct device *parent, struct hw_pci *hw,
> - struct list_head *head)
> +static void pcibios_init_hw(struct hw_pci *hw, struct list_head *head)
> {
> struct pci_sys_data *sys = NULL;
> int ret;
> @@ -494,7 +493,7 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw,
> if (hw->scan)
> sys->bus = hw->scan(nr, sys);
> else
> - sys->bus = pci_scan_root_bus(parent, sys->busnr,
> + sys->bus = pci_scan_root_bus(NULL, sys->busnr,
> hw->ops, sys, &sys->resources);
>
> if (!sys->bus)
> @@ -511,7 +510,7 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw,
> }
> }
>
> -void pci_common_init_dev(struct device *parent, struct hw_pci *hw)
> +void pci_common_init(struct hw_pci *hw)
> {
> struct pci_sys_data *sys;
> LIST_HEAD(head);
> @@ -519,7 +518,7 @@ void pci_common_init_dev(struct device *parent, struct hw_pci *hw)
> pci_add_flags(PCI_REASSIGN_ALL_RSRC);
> if (hw->preinit)
> hw->preinit();
> - pcibios_init_hw(parent, hw, &head);
> + pcibios_init_hw(hw, &head);
> if (hw->postinit)
> hw->postinit();
>
> @@ -559,6 +558,100 @@ void pci_common_init_dev(struct device *parent, struct hw_pci *hw)
> }
> }
>
> +int pci_init_single(struct device *parent,
> + struct pci_sys_data *sys,
> + struct pci_bus *(*scan)(int nr, struct pci_sys_data *),
> + struct pci_ops *ops)
> +{
> + int ret;
> + struct pci_bus *bus;
> +
> + ret = pcibios_init_resources(0, sys);
> + if (ret)
> + return ret;
> +
> + if (scan)
> + bus = scan(0, sys);
> + else
> + bus = pci_scan_root_bus(parent, 0, ops, sys, &sys->resources);
> +
> + if (!bus) {
> + dev_err(parent, "PCI: unable to scan bus!");
> + return -ENXIO;
> + }
> + sys->bus = bus;
> +
> + pci_fixup_irqs(pcibios_swizzle, pcibios_map_irq);
> +
> + if (!pci_has_flag(PCI_PROBE_ONLY)) {
> + /*
> + * Size the bridge windows.
> + */
> + pci_bus_size_bridges(bus);
> +
> + /*
> + * Assign resources.
> + */
> + pci_bus_assign_resources(bus);
> + }
> +
> + /*
> + * Tell drivers about devices found.
> + */
> + pci_bus_add_devices(bus);
> +
> + /* Configure PCI Express settings */
> + if (bus && !pci_has_flag(PCI_PROBE_ONLY)) {
> + struct pci_bus *child;
> +
> + list_for_each_entry(child, &bus->children, node)
> + pcie_bus_configure_settings(child);
> + }
> +
> + return 0;
> +}
> +
> +int pci_common_init_dev(struct device *parent, struct hw_pci *hw)
> +{
> + struct pci_sys_data *sys;
> + int ret;
> +
> + if (hw->nr_controllers != 1 ||
> + hw->preinit || hw->postinit)
> + return -EINVAL;
> +
> + sys = kzalloc(sizeof(struct pci_sys_data), GFP_KERNEL);
> + if (!sys)
> + return -ENOMEM;
> +
> +#ifdef CONFIG_PCI_DOMAINS
> + sys->domain = hw->domain;
> +#endif
> + sys->swizzle = hw->swizzle;
> + sys->map_irq = hw->map_irq;
> + sys->align_resource = hw->align_resource;
> + sys->add_bus = hw->add_bus;
> + sys->remove_bus = hw->remove_bus;
> + INIT_LIST_HEAD(&sys->resources);
> +
> + if (hw->private_data)
> + sys->private_data = hw->private_data[0];
> +
> + pci_add_flags(PCI_REASSIGN_ALL_RSRC);
> + ret = hw->setup(0, sys);
> + if (ret == 0)
> + ret = -ENXIO;
> + if (ret < 0)
> + return ret;
> +
> + ret = pcibios_init_sysdata(parent, sys, hw->scan, hw->ops);
> + if (ret)
> + /* FIXME: undo ->setup */
> + kfree(sys);
> +
> + return ret;
> +}
> +
> #ifndef CONFIG_PCI_HOST_ITE8152
> void pcibios_set_master(struct pci_dev *dev)
> {
> diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c
> index 3d2076f59911..3542a7b740e5 100644
> --- a/drivers/pci/host/pci-host-generic.c
> +++ b/drivers/pci/host/pci-host-generic.c
> @@ -40,16 +40,20 @@ struct gen_pci_cfg_windows {
>
> struct gen_pci {
> struct pci_host_bridge host;
> + struct pci_sys_data sys;
> struct gen_pci_cfg_windows cfg;
> - struct list_head resources;
> };
>
> +static inline struct gen_pci *gen_pci_from_sys(struct pci_sys_data *sys)
> +{
> + return container_of(sys, struct gen_pci, sys);
> +}
> +
> static void __iomem *gen_pci_map_cfg_bus_cam(struct pci_bus *bus,
> unsigned int devfn,
> int where)
> {
> - struct pci_sys_data *sys = bus->sysdata;
> - struct gen_pci *pci = sys->private_data;
> + struct gen_pci *pci = gen_pci_from_sys(bus->sysdata);
> resource_size_t idx = bus->number - pci->cfg.bus_range.start;
>
> return pci->cfg.win[idx] + ((devfn << 8) | where);
> @@ -64,8 +68,7 @@ static void __iomem *gen_pci_map_cfg_bus_ecam(struct pci_bus *bus,
> unsigned int devfn,
> int where)
> {
> - struct pci_sys_data *sys = bus->sysdata;
> - struct gen_pci *pci = sys->private_data;
> + struct gen_pci *pci = gen_pci_from_sys(bus->sysdata);
> resource_size_t idx = bus->number - pci->cfg.bus_range.start;
>
> return pci->cfg.win[idx] + ((devfn << 12) | where);
> @@ -80,8 +83,7 @@ static int gen_pci_config_read(struct pci_bus *bus, unsigned int devfn,
> int where, int size, u32 *val)
> {
> void __iomem *addr;
> - struct pci_sys_data *sys = bus->sysdata;
> - struct gen_pci *pci = sys->private_data;
> + struct gen_pci *pci = gen_pci_from_sys(bus->sysdata);
>
> addr = pci->cfg.ops->map_bus(bus, devfn, where);
>
> @@ -103,8 +105,7 @@ static int gen_pci_config_write(struct pci_bus *bus, unsigned int devfn,
> int where, int size, u32 val)
> {
> void __iomem *addr;
> - struct pci_sys_data *sys = bus->sysdata;
> - struct gen_pci *pci = sys->private_data;
> + struct gen_pci *pci = gen_pci_from_sys(bus->sysdata);
>
> addr = pci->cfg.ops->map_bus(bus, devfn, where);
>
> @@ -181,10 +182,10 @@ static void gen_pci_release_of_pci_ranges(struct gen_pci *pci)
> {
> struct pci_host_bridge_window *win;
>
> - list_for_each_entry(win, &pci->resources, list)
> + list_for_each_entry(win, &pci->sys.resources, list)
> release_resource(win->res);
>
> - pci_free_resource_list(&pci->resources);
> + pci_free_resource_list(&pci->sys.resources);
> }
>
> static int gen_pci_parse_request_of_pci_ranges(struct gen_pci *pci)
> @@ -237,7 +238,7 @@ static int gen_pci_parse_request_of_pci_ranges(struct gen_pci *pci)
> if (err)
> goto out_release_res;
>
> - pci_add_resource_offset(&pci->resources, res, offset);
> + pci_add_resource_offset(&pci->sys.resources, res, offset);
> }
>
> if (!res_valid) {
> @@ -306,17 +307,10 @@ static int gen_pci_parse_map_cfg_windows(struct gen_pci *pci)
> }
>
> /* Register bus resource */
> - pci_add_resource(&pci->resources, bus_range);
> + pci_add_resource(&pci->sys.resources, bus_range);
> return 0;
> }
>
> -static int gen_pci_setup(int nr, struct pci_sys_data *sys)
> -{
> - struct gen_pci *pci = sys->private_data;
> - list_splice_init(&pci->resources, &sys->resources);
> - return 1;
> -}
> -
> static int gen_pci_probe(struct platform_device *pdev)
> {
> int err;
> @@ -326,17 +320,12 @@ static int gen_pci_probe(struct platform_device *pdev)
> struct device *dev = &pdev->dev;
> struct device_node *np = dev->of_node;
> struct gen_pci *pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
> - struct hw_pci hw = {
> - .nr_controllers = 1,
> - .private_data = (void **)&pci,
> - .setup = gen_pci_setup,
> - .map_irq = of_irq_parse_and_map_pci,
> - .ops = &gen_pci_ops,
> - };
>
> if (!pci)
> return -ENOMEM;
>
> + pci->sys.map_irq = of_irq_parse_and_map_pci,
> +
> type = of_get_property(np, "device_type", NULL);
> if (!type || strcmp(type, "pci")) {
> dev_err(dev, "invalid \"device_type\" %s\n", type);
> @@ -355,7 +344,7 @@ static int gen_pci_probe(struct platform_device *pdev)
> pci->cfg.ops = of_id->data;
> pci->host.dev.parent = dev;
> INIT_LIST_HEAD(&pci->host.windows);
> - INIT_LIST_HEAD(&pci->resources);
> + INIT_LIST_HEAD(&pci->sys.resources);
>
> /* Parse our PCI ranges and request their resources */
> err = gen_pci_parse_request_of_pci_ranges(pci);
> @@ -369,8 +358,12 @@ static int gen_pci_probe(struct platform_device *pdev)
> return err;
> }
>
> - pci_common_init_dev(dev, &hw);
> - return 0;
> + pci_add_flags(PCI_REASSIGN_ALL_RSRC);
> + err = pci_init_single(dev, &pci->sys, NULL, &gen_pci_ops);
> + if (err)
> + gen_pci_release_of_pci_ranges(pci);
> +
> + return err;
> }
>
> static struct platform_driver gen_pci_driver = {
> diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
> index b1315e197ffb..e1381c0699be 100644
> --- a/drivers/pci/host/pci-mvebu.c
> +++ b/drivers/pci/host/pci-mvebu.c
> @@ -99,6 +99,7 @@ struct mvebu_pcie_port;
> struct mvebu_pcie {
> struct platform_device *pdev;
> struct mvebu_pcie_port *ports;
> + struct pci_sys_data sysdata;
> struct msi_chip *msi;
> struct resource io;
> char io_name[30];
> @@ -611,7 +612,7 @@ static int mvebu_sw_pci_bridge_write(struct mvebu_pcie_port *port,
>
> static inline struct mvebu_pcie *sys_to_pcie(struct pci_sys_data *sys)
> {
> - return sys->private_data;
> + return container_of(sys, struct mvebu_pcie, sysdata);
> }
>
> static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie,
> @@ -718,11 +719,26 @@ static struct pci_ops mvebu_pcie_ops = {
> .write = mvebu_pcie_wr_conf,
> };
>
> -static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys)
> +/* FIXME: move the code around to avoid these */
> +static struct pci_bus *mvebu_pcie_scan_bus(int nr, struct pci_sys_data *sys);
> +static void mvebu_pcie_add_bus(struct pci_bus *bus);
> +static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
> + const struct resource *res,
> + resource_size_t start,
> + resource_size_t size,
> + resource_size_t align);
> +
> +static int mvebu_pcie_enable(struct mvebu_pcie *pcie)
> {
> - struct mvebu_pcie *pcie = sys_to_pcie(sys);
> int i;
> int domain = 0;
> + struct pci_sys_data *sys = &pcie->sysdata;
> +
> + pcie->sysdata = (struct pci_sys_data) {
> + .map_irq = of_irq_parse_and_map_pci,
> + .align_resource = mvebu_pcie_align_resource,
> + .add_bus = mvebu_pcie_add_bus,
> + };
>
> #ifdef CONFIG_PCI_DOMAINS
> domain = sys->domain;
> @@ -738,11 +754,13 @@ static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys)
> if (request_resource(&iomem_resource, &pcie->mem))
> return 0;
>
> + INIT_LIST_HEAD(&sys->resources);
> if (resource_size(&pcie->realio) != 0) {
> if (request_resource(&ioport_resource, &pcie->realio)) {
> release_resource(&pcie->mem);
> return 0;
> }
> +
> pci_add_resource_offset(&sys->resources, &pcie->realio,
> sys->io_offset);
> }
> @@ -756,7 +774,9 @@ static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys)
> mvebu_pcie_setup_hw(port);
> }
>
> - return 1;
> + pci_add_flags(PCI_REASSIGN_ALL_RSRC);
> + return pci_init_single(&pcie->pdev->dev, &pcie->sysdata,
> + mvebu_pcie_scan_bus, &mvebu_pcie_ops);
> }
>
> static struct pci_bus *mvebu_pcie_scan_bus(int nr, struct pci_sys_data *sys)
> @@ -810,24 +830,6 @@ static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
> return start;
> }
>
> -static void mvebu_pcie_enable(struct mvebu_pcie *pcie)
> -{
> - struct hw_pci hw;
> -
> - memset(&hw, 0, sizeof(hw));
> -
> - hw.nr_controllers = 1;
> - hw.private_data = (void **)&pcie;
> - hw.setup = mvebu_pcie_setup;
> - hw.scan = mvebu_pcie_scan_bus;
> - hw.map_irq = of_irq_parse_and_map_pci;
> - hw.ops = &mvebu_pcie_ops;
> - hw.align_resource = mvebu_pcie_align_resource;
> - hw.add_bus = mvebu_pcie_add_bus;
> -
> - pci_common_init(&hw);
> -}
> -
> /*
> * Looks up the list of register addresses encoded into the reg =
> * <...> property for one that matches the given port/lane. Once
> @@ -1066,9 +1068,7 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
> pci_ioremap_io(i, pcie->io.start + i);
>
> mvebu_pcie_msi_enable(pcie);
> - mvebu_pcie_enable(pcie);
> -
> - return 0;
> + return mvebu_pcie_enable(pcie);
> }
>
> static const struct of_device_id mvebu_pcie_of_match_table[] = {
>

2014-11-06 00:05:59

by Arnd Bergmann

[permalink] [raw]
Subject: Re: [RFC 2/4] PCI: generic: Add support for ARM64 and MSI(x)

On Wednesday 05 November 2014 16:39:21 Bjorn Helgaas wrote:
> On Wed, Oct 22, 2014 at 10:52:19PM +0200, Arnd Bergmann wrote:
> > On Wednesday 22 October 2014 16:59:14 Lorenzo Pieralisi wrote:
> > > On Wed, Oct 01, 2014 at 10:38:45AM +0100, Arnd Bergmann wrote:
> > >
> > > [...]
> > >
> > > > The arm32 implementations of pci_domain_nr/pci_proc_domain can probably be
> > > > removed if we change the arm32 pcibios_init_hw function to call the new
> > > > interfaces that set the domain number.
> > >
> > > I wished, but it is a bit more complicated than I thought unfortunately,
> > > mostly because some drivers, eg cns3xxx set the domain numbers
> > > statically in pci_sys_data and this sets a chain of dependency that is
> > > not easy to untangle. I think cns3xxx is the only legacy driver that "uses"
> > > the domain number (in pci_sys_data) in a way that clashes with the
> > > generic domain_nr implementation, I need to give it more thought.
> >
> > Just had a look at that driver, shouldn't be too hard to change, see below.
> >
> > Signed-off-by: Arnd Bergmann <[email protected]>
>
> This patch is in my patchwork, but it lacks a topic & changelog and I'm not
> sure of its state, so I'm going to drop it for now. Please post it again
> if you want me to do something with it. I guess it only touches arch/arm,
> so it would probably be merged via your tree anyway.

Lorenzo has posted an updated version as

"arm: cns3xxx: pci: remove artificial dependency on pci_sys_data domain",
and a second patch that depends on it. That is the version we should be
merging, though I'm not sure through which tree.

Arnd

2014-11-06 00:06:47

by Arnd Bergmann

[permalink] [raw]
Subject: Re: [RFC 2/4] PCI: generic: Add support for ARM64 and MSI(x)

On Wednesday 05 November 2014 16:40:58 Bjorn Helgaas wrote:
> >
> > See below for a sample patch I just did. It duplicates the code from
> > pci_common_init_dev/pci_common_init because we know that all users
> > of pci_common_init_dev are modern and only pass a single host bridge.
> > The new pci_common_init_dev is simpler than the old one but should
> > do the exact same thing for all current users, with the addition
> > of propagating the return value.
>
> Same with this one; I'm ignoring for now on the theory that this was just a
> sample to show the idea, and a more formal patch might come later.
>

Correct. Or we might not do it at all, if some of the other patches
are done before we need this.

Arnd

2014-11-06 09:52:41

by Lorenzo Pieralisi

[permalink] [raw]
Subject: Re: [RFC 2/4] PCI: generic: Add support for ARM64 and MSI(x)

On Thu, Nov 06, 2014 at 12:05:48AM +0000, Arnd Bergmann wrote:
> On Wednesday 05 November 2014 16:39:21 Bjorn Helgaas wrote:
> > On Wed, Oct 22, 2014 at 10:52:19PM +0200, Arnd Bergmann wrote:
> > > On Wednesday 22 October 2014 16:59:14 Lorenzo Pieralisi wrote:
> > > > On Wed, Oct 01, 2014 at 10:38:45AM +0100, Arnd Bergmann wrote:
> > > >
> > > > [...]
> > > >
> > > > > The arm32 implementations of pci_domain_nr/pci_proc_domain can probably be
> > > > > removed if we change the arm32 pcibios_init_hw function to call the new
> > > > > interfaces that set the domain number.
> > > >
> > > > I wished, but it is a bit more complicated than I thought unfortunately,
> > > > mostly because some drivers, eg cns3xxx set the domain numbers
> > > > statically in pci_sys_data and this sets a chain of dependency that is
> > > > not easy to untangle. I think cns3xxx is the only legacy driver that "uses"
> > > > the domain number (in pci_sys_data) in a way that clashes with the
> > > > generic domain_nr implementation, I need to give it more thought.
> > >
> > > Just had a look at that driver, shouldn't be too hard to change, see below.
> > >
> > > Signed-off-by: Arnd Bergmann <[email protected]>
> >
> > This patch is in my patchwork, but it lacks a topic & changelog and I'm not
> > sure of its state, so I'm going to drop it for now. Please post it again
> > if you want me to do something with it. I guess it only touches arch/arm,
> > so it would probably be merged via your tree anyway.
>
> Lorenzo has posted an updated version as
>
> "arm: cns3xxx: pci: remove artificial dependency on pci_sys_data domain",
> and a second patch that depends on it. That is the version we should be
> merging, though I'm not sure through which tree.

I am posting a v2 shortly, let's discuss the best way to merge it then.

Thanks,
Lorenzo

2014-12-29 19:32:54

by Suthikulpanit, Suravee

[permalink] [raw]
Subject: Re: [RFC 2/4] PCI: generic: Add support for ARM64 and MSI(x)

Hi,

I am not sure if this thread is still alive. I'm trying to see what I
can do to help clean up/convert to make the PCI GHC also works for arm64
w/ zero or minimal ifdefs.

Please let me know if someone is already working on this. I noticed that
Lorenzo's patches has already been in 3.19-rc1, and in Bjorn's
pci/domain branch. Otherwise, I'll try to continue the work based on the
sample patch from Arnd here.

On 10/23/14 08:33, Arnd Bergmann wrote:
> [...]
> diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c
> index 3d2076f59911..3542a7b740e5 100644
> --- a/drivers/pci/host/pci-host-generic.c
> +++ b/drivers/pci/host/pci-host-generic.c
> @@ -40,16 +40,20 @@ struct gen_pci_cfg_windows {
>
> struct gen_pci {
> struct pci_host_bridge host;
> + struct pci_sys_data sys;
> struct gen_pci_cfg_windows cfg;
> - struct list_head resources;
> };

Arnd, based on the patch here, if we are trying to use the
pci-host-generic driver on arm64, this means that we are going to have
to introduce struct pci_sys_data for the arm64 as well (e.g move the
struct from include/asm/mach/pci.h to include/linux/pci.h). Is this also
your intention?

Thanks,

Suravee

>
> +static inline struct gen_pci *gen_pci_from_sys(struct pci_sys_data *sys)
> +{
> + return container_of(sys, struct gen_pci, sys);
> +}
> +
> static void __iomem *gen_pci_map_cfg_bus_cam(struct pci_bus *bus,
> unsigned int devfn,
> int where)
> {
> - struct pci_sys_data *sys = bus->sysdata;
> - struct gen_pci *pci = sys->private_data;
> + struct gen_pci *pci = gen_pci_from_sys(bus->sysdata);
> resource_size_t idx = bus->number - pci->cfg.bus_range.start;
>
> return pci->cfg.win[idx] + ((devfn << 8) | where);
> @@ -64,8 +68,7 @@ static void __iomem *gen_pci_map_cfg_bus_ecam(struct pci_bus *bus,
> unsigned int devfn,
> int where)
> {
> - struct pci_sys_data *sys = bus->sysdata;
> - struct gen_pci *pci = sys->private_data;
> + struct gen_pci *pci = gen_pci_from_sys(bus->sysdata);
> resource_size_t idx = bus->number - pci->cfg.bus_range.start;
>
> return pci->cfg.win[idx] + ((devfn << 12) | where);
> @@ -80,8 +83,7 @@ static int gen_pci_config_read(struct pci_bus *bus, unsigned int devfn,
> int where, int size, u32 *val)
> {
> void __iomem *addr;
> - struct pci_sys_data *sys = bus->sysdata;
> - struct gen_pci *pci = sys->private_data;
> + struct gen_pci *pci = gen_pci_from_sys(bus->sysdata);
>
> addr = pci->cfg.ops->map_bus(bus, devfn, where);
>
> @@ -103,8 +105,7 @@ static int gen_pci_config_write(struct pci_bus *bus, unsigned int devfn,
> int where, int size, u32 val)
> {
> void __iomem *addr;
> - struct pci_sys_data *sys = bus->sysdata;
> - struct gen_pci *pci = sys->private_data;
> + struct gen_pci *pci = gen_pci_from_sys(bus->sysdata);
>
> addr = pci->cfg.ops->map_bus(bus, devfn, where);
>
> @@ -181,10 +182,10 @@ static void gen_pci_release_of_pci_ranges(struct gen_pci *pci)
> {
> struct pci_host_bridge_window *win;
>
> - list_for_each_entry(win, &pci->resources, list)
> + list_for_each_entry(win, &pci->sys.resources, list)
> release_resource(win->res);
>
> - pci_free_resource_list(&pci->resources);
> + pci_free_resource_list(&pci->sys.resources);
> }
>
> static int gen_pci_parse_request_of_pci_ranges(struct gen_pci *pci)
> @@ -237,7 +238,7 @@ static int gen_pci_parse_request_of_pci_ranges(struct gen_pci *pci)
> if (err)
> goto out_release_res;
>
> - pci_add_resource_offset(&pci->resources, res, offset);
> + pci_add_resource_offset(&pci->sys.resources, res, offset);
> }
>
> if (!res_valid) {
> @@ -306,17 +307,10 @@ static int gen_pci_parse_map_cfg_windows(struct gen_pci *pci)
> }
>
> /* Register bus resource */
> - pci_add_resource(&pci->resources, bus_range);
> + pci_add_resource(&pci->sys.resources, bus_range);
> return 0;
> }
>
> -static int gen_pci_setup(int nr, struct pci_sys_data *sys)
> -{
> - struct gen_pci *pci = sys->private_data;
> - list_splice_init(&pci->resources, &sys->resources);
> - return 1;
> -}
> -
> static int gen_pci_probe(struct platform_device *pdev)
> {
> int err;
> @@ -326,17 +320,12 @@ static int gen_pci_probe(struct platform_device *pdev)
> struct device *dev = &pdev->dev;
> struct device_node *np = dev->of_node;
> struct gen_pci *pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
> - struct hw_pci hw = {
> - .nr_controllers = 1,
> - .private_data = (void **)&pci,
> - .setup = gen_pci_setup,
> - .map_irq = of_irq_parse_and_map_pci,
> - .ops = &gen_pci_ops,
> - };
>
> if (!pci)
> return -ENOMEM;
>
> + pci->sys.map_irq = of_irq_parse_and_map_pci,
> +
> type = of_get_property(np, "device_type", NULL);
> if (!type || strcmp(type, "pci")) {
> dev_err(dev, "invalid \"device_type\" %s\n", type);
> @@ -355,7 +344,7 @@ static int gen_pci_probe(struct platform_device *pdev)
> pci->cfg.ops = of_id->data;
> pci->host.dev.parent = dev;
> INIT_LIST_HEAD(&pci->host.windows);
> - INIT_LIST_HEAD(&pci->resources);
> + INIT_LIST_HEAD(&pci->sys.resources);
>
> /* Parse our PCI ranges and request their resources */
> err = gen_pci_parse_request_of_pci_ranges(pci);
> @@ -369,8 +358,12 @@ static int gen_pci_probe(struct platform_device *pdev)
> return err;
> }
>
> - pci_common_init_dev(dev, &hw);
> - return 0;
> + pci_add_flags(PCI_REASSIGN_ALL_RSRC);
> + err = pci_init_single(dev, &pci->sys, NULL, &gen_pci_ops);
> + if (err)
> + gen_pci_release_of_pci_ranges(pci);
> +
> + return err;
> }
>
> static struct platform_driver gen_pci_driver = {
> diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
> index b1315e197ffb..e1381c0699be 100644
> --- a/drivers/pci/host/pci-mvebu.c
> +++ b/drivers/pci/host/pci-mvebu.c
> @@ -99,6 +99,7 @@ struct mvebu_pcie_port;
> struct mvebu_pcie {
> struct platform_device *pdev;
> struct mvebu_pcie_port *ports;
> + struct pci_sys_data sysdata;
> struct msi_chip *msi;
> struct resource io;
> char io_name[30];
> @@ -611,7 +612,7 @@ static int mvebu_sw_pci_bridge_write(struct mvebu_pcie_port *port,
>
> static inline struct mvebu_pcie *sys_to_pcie(struct pci_sys_data *sys)
> {
> - return sys->private_data;
> + return container_of(sys, struct mvebu_pcie, sysdata);
> }
>
> static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie,
> @@ -718,11 +719,26 @@ static struct pci_ops mvebu_pcie_ops = {
> .write = mvebu_pcie_wr_conf,
> };
>
> -static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys)
> +/* FIXME: move the code around to avoid these */
> +static struct pci_bus *mvebu_pcie_scan_bus(int nr, struct pci_sys_data *sys);
> +static void mvebu_pcie_add_bus(struct pci_bus *bus);
> +static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
> + const struct resource *res,
> + resource_size_t start,
> + resource_size_t size,
> + resource_size_t align);
> +
> +static int mvebu_pcie_enable(struct mvebu_pcie *pcie)
> {
> - struct mvebu_pcie *pcie = sys_to_pcie(sys);
> int i;
> int domain = 0;
> + struct pci_sys_data *sys = &pcie->sysdata;
> +
> + pcie->sysdata = (struct pci_sys_data) {
> + .map_irq = of_irq_parse_and_map_pci,
> + .align_resource = mvebu_pcie_align_resource,
> + .add_bus = mvebu_pcie_add_bus,
> + };
>
> #ifdef CONFIG_PCI_DOMAINS
> domain = sys->domain;
> @@ -738,11 +754,13 @@ static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys)
> if (request_resource(&iomem_resource, &pcie->mem))
> return 0;
>
> + INIT_LIST_HEAD(&sys->resources);
> if (resource_size(&pcie->realio) != 0) {
> if (request_resource(&ioport_resource, &pcie->realio)) {
> release_resource(&pcie->mem);
> return 0;
> }
> +
> pci_add_resource_offset(&sys->resources, &pcie->realio,
> sys->io_offset);
> }
> @@ -756,7 +774,9 @@ static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys)
> mvebu_pcie_setup_hw(port);
> }
>
> - return 1;
> + pci_add_flags(PCI_REASSIGN_ALL_RSRC);
> + return pci_init_single(&pcie->pdev->dev, &pcie->sysdata,
> + mvebu_pcie_scan_bus, &mvebu_pcie_ops);
> }
>
> static struct pci_bus *mvebu_pcie_scan_bus(int nr, struct pci_sys_data *sys)
> @@ -810,24 +830,6 @@ static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
> return start;
> }
>
> -static void mvebu_pcie_enable(struct mvebu_pcie *pcie)
> -{
> - struct hw_pci hw;
> -
> - memset(&hw, 0, sizeof(hw));
> -
> - hw.nr_controllers = 1;
> - hw.private_data = (void **)&pcie;
> - hw.setup = mvebu_pcie_setup;
> - hw.scan = mvebu_pcie_scan_bus;
> - hw.map_irq = of_irq_parse_and_map_pci;
> - hw.ops = &mvebu_pcie_ops;
> - hw.align_resource = mvebu_pcie_align_resource;
> - hw.add_bus = mvebu_pcie_add_bus;
> -
> - pci_common_init(&hw);
> -}
> -
> /*
> * Looks up the list of register addresses encoded into the reg =
> * <...> property for one that matches the given port/lane. Once
> @@ -1066,9 +1068,7 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
> pci_ioremap_io(i, pcie->io.start + i);
>
> mvebu_pcie_msi_enable(pcie);
> - mvebu_pcie_enable(pcie);
> -
> - return 0;
> + return mvebu_pcie_enable(pcie);
> }
>
> static const struct of_device_id mvebu_pcie_of_match_table[] = {
>