The cacheinfo for a given CPU at a given index is used at quite a few
places by fetching the base point for index 0 using the helper
per_cpu_cacheinfo(cpu) and offseting it by the required index.
Instead, add another helper to fetch the required pointer directly and
use it to simplify and improve readability.
Signed-off-by: Sudeep Holla <[email protected]>
---
drivers/base/cacheinfo.c | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index b0bde272e2ae..c4547d8ac6f3 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -25,6 +25,8 @@ static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
#define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu))
#define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves)
#define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list)
+#define per_cpu_cacheinfo_idx(cpu, idx) \
+ (per_cpu_cacheinfo(cpu) + (idx))
struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
{
@@ -172,7 +174,7 @@ static int cache_setup_of_node(unsigned int cpu)
}
while (index < cache_leaves(cpu)) {
- this_leaf = this_cpu_ci->info_list + index;
+ this_leaf = per_cpu_cacheinfo_idx(cpu, index);
if (this_leaf->level != 1)
np = of_find_next_cache_node(np);
else
@@ -231,7 +233,7 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
for (index = 0; index < cache_leaves(cpu); index++) {
unsigned int i;
- this_leaf = this_cpu_ci->info_list + index;
+ this_leaf = per_cpu_cacheinfo_idx(cpu, index);
/* skip if shared_cpu_map is already populated */
if (!cpumask_empty(&this_leaf->shared_cpu_map))
continue;
@@ -242,7 +244,7 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
if (i == cpu || !sib_cpu_ci->info_list)
continue;/* skip if itself or no cacheinfo */
- sib_leaf = sib_cpu_ci->info_list + index;
+ sib_leaf = per_cpu_cacheinfo_idx(i, index);
if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
@@ -258,12 +260,11 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
static void cache_shared_cpu_map_remove(unsigned int cpu)
{
- struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct cacheinfo *this_leaf, *sib_leaf;
unsigned int sibling, index;
for (index = 0; index < cache_leaves(cpu); index++) {
- this_leaf = this_cpu_ci->info_list + index;
+ this_leaf = per_cpu_cacheinfo_idx(cpu, index);
for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
struct cpu_cacheinfo *sib_cpu_ci;
@@ -609,7 +610,6 @@ static int cache_add_dev(unsigned int cpu)
int rc;
struct device *ci_dev, *parent;
struct cacheinfo *this_leaf;
- struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
const struct attribute_group **cache_groups;
rc = cpu_cache_sysfs_init(cpu);
@@ -618,7 +618,7 @@ static int cache_add_dev(unsigned int cpu)
parent = per_cpu_cache_dev(cpu);
for (i = 0; i < cache_leaves(cpu); i++) {
- this_leaf = this_cpu_ci->info_list + i;
+ this_leaf = per_cpu_cacheinfo_idx(cpu, i);
if (this_leaf->disable_sysfs)
continue;
if (this_leaf->type == CACHE_TYPE_NOCACHE)
--
2.36.1
cache_leaves_are_shared is already used even with ACPI and PPTT. It checks
if the cache leaves are the shared based on fw_token pointer. However it is
defined conditionally only if CONFIG_OF is enabled which is wrong.
Move the function cache_leaves_are_shared out of CONFIG_OF and keep it
generic. It also handles the case where both OF and ACPI is not defined.
Signed-off-by: Sudeep Holla <[email protected]>
---
drivers/base/cacheinfo.c | 20 +++++++++-----------
1 file changed, 9 insertions(+), 11 deletions(-)
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index c4547d8ac6f3..417e1ebf5525 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -33,13 +33,21 @@ struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
return ci_cacheinfo(cpu);
}
-#ifdef CONFIG_OF
static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
struct cacheinfo *sib_leaf)
{
+ /*
+ * For non DT/ACPI systems, assume unique level 1 caches,
+ * system-wide shared caches for all other levels. This will be used
+ * only if arch specific code has not populated shared_cpu_map
+ */
+ if (!IS_ENABLED(CONFIG_OF) && !(IS_ENABLED(CONFIG_ACPI)))
+ return !(this_leaf->level == 1);
+
return sib_leaf->fw_token == this_leaf->fw_token;
}
+#ifdef CONFIG_OF
/* OF properties to query for a given cache type */
struct cache_type_info {
const char *size_prop;
@@ -193,16 +201,6 @@ static int cache_setup_of_node(unsigned int cpu)
}
#else
static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
-static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
- struct cacheinfo *sib_leaf)
-{
- /*
- * For non-DT/ACPI systems, assume unique level 1 caches, system-wide
- * shared caches for all other levels. This will be used only if
- * arch specific code has not populated shared_cpu_map
- */
- return !(this_leaf->level == 1);
-}
#endif
int __weak cache_setup_acpi(unsigned int cpu)
--
2.36.1
It is useful to have helper to check if the given two CPUs share last level
cache. We can do that check by comparing fw_token or by comparing the cache
ID. Currently we check just for fw_token as the cache ID is optional.
This helper can be used to build the llc_sibling during arch specific
topology parsing and feeding information to the sched_domains. This also
helps to get rid of llc_id in the CPU topology as it is sort of duplicate
information.
Also add helper to check if the llc information in cacheinfo is valid or not.
Signed-off-by: Sudeep Holla <[email protected]>
---
drivers/base/cacheinfo.c | 26 ++++++++++++++++++++++++++
include/linux/cacheinfo.h | 2 ++
2 files changed, 28 insertions(+)
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index 417e1ebf5525..ed74db18468f 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -47,6 +47,32 @@ static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
return sib_leaf->fw_token == this_leaf->fw_token;
}
+bool last_level_cache_is_valid(unsigned int cpu)
+{
+ struct cacheinfo *llc;
+
+ if (!cache_leaves(cpu))
+ return false;
+
+ llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1);
+
+ return llc->fw_token;
+}
+
+bool last_level_cache_is_shared(unsigned int cpu_x, unsigned int cpu_y)
+{
+ struct cacheinfo *llc_x, *llc_y;
+
+ if (!last_level_cache_is_valid(cpu_x) ||
+ !last_level_cache_is_valid(cpu_y))
+ return false;
+
+ llc_x = per_cpu_cacheinfo_idx(cpu_x, cache_leaves(cpu_x) - 1);
+ llc_y = per_cpu_cacheinfo_idx(cpu_y, cache_leaves(cpu_y) - 1);
+
+ return cache_leaves_are_shared(llc_x, llc_y);
+}
+
#ifdef CONFIG_OF
/* OF properties to query for a given cache type */
struct cache_type_info {
diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h
index 4ff37cb763ae..7e429bc5c1a4 100644
--- a/include/linux/cacheinfo.h
+++ b/include/linux/cacheinfo.h
@@ -82,6 +82,8 @@ struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu);
int init_cache_level(unsigned int cpu);
int populate_cache_leaves(unsigned int cpu);
int cache_setup_acpi(unsigned int cpu);
+bool last_level_cache_is_valid(unsigned int cpu);
+bool last_level_cache_is_shared(unsigned int cpu_x, unsigned int cpu_y);
#ifndef CONFIG_ACPI_PPTT
/*
* acpi_find_last_cache_level is only called on ACPI enabled
--
2.36.1
Hi Sudeep,
On 5/25/22 4:14 PM, Sudeep Holla wrote:
> cache_leaves_are_shared is already used even with ACPI and PPTT. It checks
> if the cache leaves are the shared based on fw_token pointer. However it is
> defined conditionally only if CONFIG_OF is enabled which is wrong.
>
> Move the function cache_leaves_are_shared out of CONFIG_OF and keep it
> generic. It also handles the case where both OF and ACPI is not defined.
>
> Signed-off-by: Sudeep Holla <[email protected]>
> ---
> drivers/base/cacheinfo.c | 20 +++++++++-----------
> 1 file changed, 9 insertions(+), 11 deletions(-)
>
With below nits fixed:
Reviewed-by: Gavin Shan <[email protected]>
> diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
> index c4547d8ac6f3..417e1ebf5525 100644
> --- a/drivers/base/cacheinfo.c
> +++ b/drivers/base/cacheinfo.c
> @@ -33,13 +33,21 @@ struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
> return ci_cacheinfo(cpu);
> }
>
> -#ifdef CONFIG_OF
> static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
> struct cacheinfo *sib_leaf)
> {
> + /*
> + * For non DT/ACPI systems, assume unique level 1 caches,
> + * system-wide shared caches for all other levels. This will be used
> + * only if arch specific code has not populated shared_cpu_map
> + */
> + if (!IS_ENABLED(CONFIG_OF) && !(IS_ENABLED(CONFIG_ACPI)))
> + return !(this_leaf->level == 1);
> +
> return sib_leaf->fw_token == this_leaf->fw_token;
> }
>
if (!IS_ENABLED(CONFIG_OF) && !IS_ENABLED(CONFIG_ACPI))
or
if (!(IS_ENABLED(CONFIG_OF) || IS_ENABLED(CONFIG_ACPI)))
> +#ifdef CONFIG_OF
> /* OF properties to query for a given cache type */
> struct cache_type_info {
> const char *size_prop;
> @@ -193,16 +201,6 @@ static int cache_setup_of_node(unsigned int cpu)
> }
> #else
> static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
> -static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
> - struct cacheinfo *sib_leaf)
> -{
> - /*
> - * For non-DT/ACPI systems, assume unique level 1 caches, system-wide
> - * shared caches for all other levels. This will be used only if
> - * arch specific code has not populated shared_cpu_map
> - */
> - return !(this_leaf->level == 1);
> -}
> #endif
>
> int __weak cache_setup_acpi(unsigned int cpu)
>
Thanks,
Gavin
On Wed, Jun 01, 2022 at 10:44:20AM +0800, Gavin Shan wrote:
> Hi Sudeep,
>
> On 5/25/22 4:14 PM, Sudeep Holla wrote:
> > The cacheinfo for a given CPU at a given index is used at quite a few
> > places by fetching the base point for index 0 using the helper
> > per_cpu_cacheinfo(cpu) and offseting it by the required index.
> >
> > Instead, add another helper to fetch the required pointer directly and
> > use it to simplify and improve readability.
> >
> > Signed-off-by: Sudeep Holla <[email protected]>
> > ---
> > drivers/base/cacheinfo.c | 14 +++++++-------
> > 1 file changed, 7 insertions(+), 7 deletions(-)
> >
>
> s/offseting/offsetting
>
> It looks good to me with below nits fixed:
>
> Reviewed-by: Gavin Shan <[email protected]>
>
>
> > diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
> > index b0bde272e2ae..c4547d8ac6f3 100644
> > --- a/drivers/base/cacheinfo.c
> > +++ b/drivers/base/cacheinfo.c
> > @@ -25,6 +25,8 @@ static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
> > #define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu))
> > #define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves)
> > #define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list)
> > +#define per_cpu_cacheinfo_idx(cpu, idx) \
> > + (per_cpu_cacheinfo(cpu) + (idx))
> > struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
> > {
> > @@ -172,7 +174,7 @@ static int cache_setup_of_node(unsigned int cpu)
> > }
> > while (index < cache_leaves(cpu)) {
> > - this_leaf = this_cpu_ci->info_list + index;
> > + this_leaf = per_cpu_cacheinfo_idx(cpu, index);
> > if (this_leaf->level != 1)
> > np = of_find_next_cache_node(np);
> > else
> > @@ -231,7 +233,7 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
> > for (index = 0; index < cache_leaves(cpu); index++) {
> > unsigned int i;
> > - this_leaf = this_cpu_ci->info_list + index;
> > + this_leaf = per_cpu_cacheinfo_idx(cpu, index);
> > /* skip if shared_cpu_map is already populated */
> > if (!cpumask_empty(&this_leaf->shared_cpu_map))
> > continue;
> > @@ -242,7 +244,7 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
> > if (i == cpu || !sib_cpu_ci->info_list)
> > continue;/* skip if itself or no cacheinfo */
> > - sib_leaf = sib_cpu_ci->info_list + index;
> > + sib_leaf = per_cpu_cacheinfo_idx(i, index);
> > if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
> > cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
> > cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
> > @@ -258,12 +260,11 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
> > static void cache_shared_cpu_map_remove(unsigned int cpu)
> > {
> > - struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
> > struct cacheinfo *this_leaf, *sib_leaf;
> > unsigned int sibling, index;
> > for (index = 0; index < cache_leaves(cpu); index++) {
> > - this_leaf = this_cpu_ci->info_list + index;
> > + this_leaf = per_cpu_cacheinfo_idx(cpu, index);
> > for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
> > struct cpu_cacheinfo *sib_cpu_ci;
> >
>
> In cache_shared_cpu_map_remove(), the newly introduced macro
> can be applied when the sibling CPU's cache info is fetched.
>
> sib_leaf = sib_cpu_ci->info_list + index;
>
> to
>
> sib_leaf = per_cpu_cacheinfo_idx(sibling, index);
>
Right, I clearly missed to identify this. Thanks again for the review, all
other comments are now fixed locally and pushed @[1], will post them as part
of next version.
--
Regards,
Sudeep
[1] https://git.kernel.org/sudeep.holla/h/arch_topology
Hi Sudeep,
On 5/25/22 4:14 PM, Sudeep Holla wrote:
> The cacheinfo for a given CPU at a given index is used at quite a few
> places by fetching the base point for index 0 using the helper
> per_cpu_cacheinfo(cpu) and offseting it by the required index.
>
> Instead, add another helper to fetch the required pointer directly and
> use it to simplify and improve readability.
>
> Signed-off-by: Sudeep Holla <[email protected]>
> ---
> drivers/base/cacheinfo.c | 14 +++++++-------
> 1 file changed, 7 insertions(+), 7 deletions(-)
>
s/offseting/offsetting
It looks good to me with below nits fixed:
Reviewed-by: Gavin Shan <[email protected]>
> diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
> index b0bde272e2ae..c4547d8ac6f3 100644
> --- a/drivers/base/cacheinfo.c
> +++ b/drivers/base/cacheinfo.c
> @@ -25,6 +25,8 @@ static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
> #define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu))
> #define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves)
> #define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list)
> +#define per_cpu_cacheinfo_idx(cpu, idx) \
> + (per_cpu_cacheinfo(cpu) + (idx))
>
> struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
> {
> @@ -172,7 +174,7 @@ static int cache_setup_of_node(unsigned int cpu)
> }
>
> while (index < cache_leaves(cpu)) {
> - this_leaf = this_cpu_ci->info_list + index;
> + this_leaf = per_cpu_cacheinfo_idx(cpu, index);
> if (this_leaf->level != 1)
> np = of_find_next_cache_node(np);
> else
> @@ -231,7 +233,7 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
> for (index = 0; index < cache_leaves(cpu); index++) {
> unsigned int i;
>
> - this_leaf = this_cpu_ci->info_list + index;
> + this_leaf = per_cpu_cacheinfo_idx(cpu, index);
> /* skip if shared_cpu_map is already populated */
> if (!cpumask_empty(&this_leaf->shared_cpu_map))
> continue;
> @@ -242,7 +244,7 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
>
> if (i == cpu || !sib_cpu_ci->info_list)
> continue;/* skip if itself or no cacheinfo */
> - sib_leaf = sib_cpu_ci->info_list + index;
> + sib_leaf = per_cpu_cacheinfo_idx(i, index);
> if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
> cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
> cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
> @@ -258,12 +260,11 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
>
> static void cache_shared_cpu_map_remove(unsigned int cpu)
> {
> - struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
> struct cacheinfo *this_leaf, *sib_leaf;
> unsigned int sibling, index;
>
> for (index = 0; index < cache_leaves(cpu); index++) {
> - this_leaf = this_cpu_ci->info_list + index;
> + this_leaf = per_cpu_cacheinfo_idx(cpu, index);
> for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
> struct cpu_cacheinfo *sib_cpu_ci;
>
In cache_shared_cpu_map_remove(), the newly introduced macro
can be applied when the sibling CPU's cache info is fetched.
sib_leaf = sib_cpu_ci->info_list + index;
to
sib_leaf = per_cpu_cacheinfo_idx(sibling, index);
> @@ -609,7 +610,6 @@ static int cache_add_dev(unsigned int cpu)
> int rc;
> struct device *ci_dev, *parent;
> struct cacheinfo *this_leaf;
> - struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
> const struct attribute_group **cache_groups;
>
> rc = cpu_cache_sysfs_init(cpu);
> @@ -618,7 +618,7 @@ static int cache_add_dev(unsigned int cpu)
>
> parent = per_cpu_cache_dev(cpu);
> for (i = 0; i < cache_leaves(cpu); i++) {
> - this_leaf = this_cpu_ci->info_list + i;
> + this_leaf = per_cpu_cacheinfo_idx(cpu, i);
> if (this_leaf->disable_sysfs)
> continue;
> if (this_leaf->type == CACHE_TYPE_NOCACHE)
>
Thanks,
Gavin
Hi Sudeep,
On 5/25/22 4:14 PM, Sudeep Holla wrote:
> It is useful to have helper to check if the given two CPUs share last level
> cache. We can do that check by comparing fw_token or by comparing the cache
> ID. Currently we check just for fw_token as the cache ID is optional.
>
> This helper can be used to build the llc_sibling during arch specific
> topology parsing and feeding information to the sched_domains. This also
> helps to get rid of llc_id in the CPU topology as it is sort of duplicate
> information.
>
> Also add helper to check if the llc information in cacheinfo is valid or not.
>
> Signed-off-by: Sudeep Holla <[email protected]>
> ---
> drivers/base/cacheinfo.c | 26 ++++++++++++++++++++++++++
> include/linux/cacheinfo.h | 2 ++
> 2 files changed, 28 insertions(+)
>
With below nits fixed:
Reviewed-by: Gavin Shan <[email protected]>
> diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
> index 417e1ebf5525..ed74db18468f 100644
> --- a/drivers/base/cacheinfo.c
> +++ b/drivers/base/cacheinfo.c
> @@ -47,6 +47,32 @@ static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
> return sib_leaf->fw_token == this_leaf->fw_token;
> }
>
> +bool last_level_cache_is_valid(unsigned int cpu)
> +{
> + struct cacheinfo *llc;
> +
> + if (!cache_leaves(cpu))
> + return false;
> +
> + llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1);
> +
> + return llc->fw_token;
> +}
> +
return !!llc->fw_token;
> +bool last_level_cache_is_shared(unsigned int cpu_x, unsigned int cpu_y)
> +{
> + struct cacheinfo *llc_x, *llc_y;
> +
> + if (!last_level_cache_is_valid(cpu_x) ||
> + !last_level_cache_is_valid(cpu_y))
> + return false;
> +
> + llc_x = per_cpu_cacheinfo_idx(cpu_x, cache_leaves(cpu_x) - 1);
> + llc_y = per_cpu_cacheinfo_idx(cpu_y, cache_leaves(cpu_y) - 1);
> +
> + return cache_leaves_are_shared(llc_x, llc_y);
> +}
> +
> #ifdef CONFIG_OF
> /* OF properties to query for a given cache type */
> struct cache_type_info {
> diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h
> index 4ff37cb763ae..7e429bc5c1a4 100644
> --- a/include/linux/cacheinfo.h
> +++ b/include/linux/cacheinfo.h
> @@ -82,6 +82,8 @@ struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu);
> int init_cache_level(unsigned int cpu);
> int populate_cache_leaves(unsigned int cpu);
> int cache_setup_acpi(unsigned int cpu);
> +bool last_level_cache_is_valid(unsigned int cpu);
> +bool last_level_cache_is_shared(unsigned int cpu_x, unsigned int cpu_y);
> #ifndef CONFIG_ACPI_PPTT
> /*
> * acpi_find_last_cache_level is only called on ACPI enabled
>
Thanks,
Gavin