2007-06-04 22:31:19

by Keshavamurthy, Anil S

[permalink] [raw]
Subject: [Intel-IOMMU 02/10] Library routines for handling pre-allocated pool of objects

This patch provides a common interface for pre allocating and
managing pool of objects.

Signed-off-by: Anil S Keshavamurthy <[email protected]>
---
include/linux/respool.h | 43 +++++++++++
lib/Makefile | 1
lib/respool.c | 176 ++++++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 220 insertions(+)

Index: linux-2.6.22-rc3/include/linux/respool.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-2.6.22-rc3/include/linux/respool.h 2007-06-04 12:36:17.000000000 -0700
@@ -0,0 +1,43 @@
+/*
+ * respool.c - library routines for handling generic pre-allocated pool of objects
+ *
+ * Copyright (c) 2006, Intel Corporation.
+ *
+ * This file is released under the GPLv2.
+ *
+ * Copyright (C) 2006 Anil S Keshavamurthy <[email protected]>
+ *
+ */
+
+#ifndef _RESPOOL_H_
+#define _RESPOOL_H_
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+typedef void *(*rpool_alloc_t)(unsigned int, gfp_t);
+typedef void (*rpool_free_t)(void *, unsigned int);
+
+struct resource_pool {
+ struct work_struct work;
+ spinlock_t pool_lock; /* pool lock to walk the pool_head */
+ struct list_head pool_head; /* pool objects list head */
+ unsigned int min_count; /* min count to maintain */
+ unsigned int grow_count; /* grow by count when time to grow */
+ unsigned int curr_count; /* count of current free objects */
+ unsigned int alloc_size; /* objects size */
+ rpool_alloc_t alloc_mem; /* pool mem alloc function pointer */
+ rpool_free_t free_mem; /* pool mem free function pointer */
+};
+
+void *get_resource_pool_obj(struct resource_pool *ppool);
+void put_resource_pool_obj(void * vaddr, struct resource_pool *ppool);
+void destroy_resource_pool(struct resource_pool *ppool);
+int init_resource_pool(struct resource_pool *res,
+ unsigned int min_count, unsigned int alloc_size,
+ unsigned int grow_count, rpool_alloc_t alloc_fn,
+ rpool_free_t free_fn);
+
+#endif
Index: linux-2.6.22-rc3/lib/Makefile
===================================================================
--- linux-2.6.22-rc3.orig/lib/Makefile 2007-06-04 12:28:10.000000000 -0700
+++ linux-2.6.22-rc3/lib/Makefile 2007-06-04 12:36:17.000000000 -0700
@@ -58,6 +58,7 @@
obj-$(CONFIG_AUDIT_GENERIC) += audit.o

obj-$(CONFIG_SWIOTLB) += swiotlb.o
+obj-$(CONFIG_DMAR) += respool.o
obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o

lib-$(CONFIG_GENERIC_BUG) += bug.o
Index: linux-2.6.22-rc3/lib/respool.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-2.6.22-rc3/lib/respool.c 2007-06-04 12:36:17.000000000 -0700
@@ -0,0 +1,176 @@
+/*
+ * respool.c - library routines for handling generic pre-allocated pool of objects
+ *
+ * Copyright (c) 2006, Intel Corporation.
+ *
+ * This file is released under the GPLv2.
+ *
+ * Copyright (C) 2006 Anil S Keshavamurthy <[email protected]>
+ */
+
+#include <linux/respool.h>
+
+/**
+ * get_resource_pool_obj - gets an object from the pool
+ * @ppool - resource pool in question
+ * This function gets an object from the pool and
+ * if the pool count drops below min_count, this
+ * function schedules work to grow the pool. If
+ * no elements are fount in the pool then this function
+ * tries to get memory from kernel.
+ */
+void * get_resource_pool_obj(struct resource_pool *ppool)
+{
+ unsigned long flags;
+ struct list_head *plist;
+ bool queue_work = 0;
+
+ spin_lock_irqsave(&ppool->pool_lock, flags);
+ if (!list_empty(&ppool->pool_head)) {
+ plist = ppool->pool_head.next;
+ list_del(plist);
+ ppool->curr_count--;
+ } else {
+ /*Making sure that curr_count is 0 when list is empty */
+ plist = NULL;
+ BUG_ON(ppool->curr_count != 0);
+ }
+
+ /* Check if pool needs to grow */
+ if (ppool->curr_count <= ppool->min_count)
+ queue_work = 1;
+ spin_unlock_irqrestore(&ppool->pool_lock, flags);
+
+ if (queue_work)
+ schedule_work(&ppool->work); /* queue work to grow the pool */
+
+
+ if (plist) {
+ memset(plist, 0, ppool->alloc_size); /* Zero out memory */
+ return plist;
+ }
+
+ /* Out of luck, try to get memory from kernel */
+ plist = (struct list_head *)ppool->alloc_mem(ppool->alloc_size,
+ GFP_ATOMIC);
+
+ return plist;
+}
+
+/**
+ * put_resource_pool_obj - puts an object back to the pool
+ * @vaddr - object's address
+ * @ppool - resource pool in question.
+ * This function puts an object back to the pool.
+ */
+void put_resource_pool_obj(void * vaddr, struct resource_pool *ppool)
+{
+ unsigned long flags;
+ struct list_head *plist = (struct list_head *)vaddr;
+
+ BUG_ON(!vaddr);
+ BUG_ON(!ppool);
+
+ spin_lock_irqsave(&ppool->pool_lock, flags);
+ list_add(plist, &ppool->pool_head);
+ ppool->curr_count++;
+ spin_unlock_irqrestore(&ppool->pool_lock, flags);
+}
+
+/**
+ * grow_resource_pool - grows the given resource pool
+ * @work - work struct
+ * This functions gets the resource pool pointer from the
+ * work struct and grows the resource pool by grow_count.
+ */
+static void
+grow_resource_pool(struct work_struct * work)
+{
+ struct resource_pool *ppool;
+ struct list_head *plist;
+ unsigned int min_count, grow_count = 0;
+ unsigned long flags;
+
+ ppool = container_of(work, struct resource_pool, work);
+
+ /* compute the minimum count to grow */
+ spin_lock_irqsave(&ppool->pool_lock, flags);
+ min_count = ppool->min_count + ppool->grow_count;
+ if (ppool->curr_count < min_count)
+ grow_count = min_count - ppool->curr_count;
+ spin_unlock_irqrestore(&ppool->pool_lock, flags);
+
+ while(grow_count) {
+ plist = (struct list_head *)ppool->alloc_mem(ppool->alloc_size,
+ GFP_KERNEL);
+
+ if (!plist)
+ break;
+
+ /* Add the element to the list */
+ spin_lock_irqsave(&ppool->pool_lock, flags);
+ list_add(plist, &ppool->pool_head);
+ ppool->curr_count++;
+ spin_unlock_irqrestore(&ppool->pool_lock, flags);
+ grow_count--;
+ }
+}
+
+/**
+ * destroy_resource_pool - destroys the given resource pool
+ * @ppool - resource pool in question.
+ * This function walks throuhg its list and frees up the
+ * preallocated objects.
+ */
+void
+destroy_resource_pool(struct resource_pool *ppool)
+{
+ unsigned long flags;
+ struct list_head *plist;
+
+ spin_lock_irqsave(&ppool->pool_lock, flags);
+ while (!list_empty(&ppool->pool_head)) {
+ plist = &ppool->pool_head;
+ list_del(plist);
+
+ ppool->free_mem(plist, ppool->alloc_size);
+
+ }
+ ppool->curr_count = 0;
+ spin_unlock_irqrestore(&ppool->pool_lock, flags);
+}
+
+/**
+ * init_resource_pool - initializes the resource pool
+ * @res: resource pool in question.
+ * @min_count: count of objectes to pre-allocate
+ * @alloc_size: size of each objects
+ * @grow_count: count of objects to grow when required
+ * @alloc_fn: function which allocates memory
+ * @free_fn: function which frees memory
+ *
+ * This function initializes the given resource pool and
+ * populates the min_count of objects to begin with.
+ */
+int
+init_resource_pool(struct resource_pool *res,
+ unsigned int min_count, unsigned int alloc_size,
+ unsigned int grow_count, rpool_alloc_t alloc_fn,
+ rpool_free_t free_fn)
+{
+ res->min_count = min_count;
+ res->alloc_size = alloc_size;
+ res->grow_count = grow_count;
+ res->curr_count = 0;
+ res->alloc_mem = alloc_fn;
+ res->free_mem = free_fn;
+ spin_lock_init(&res->pool_lock);
+ INIT_LIST_HEAD(&res->pool_head);
+ INIT_WORK(&res->work, grow_resource_pool);
+
+ /* grow the pool */
+ grow_resource_pool(&res->work);
+
+ return (res->curr_count == 0);
+}
+

--


2007-06-04 22:57:24

by Jeff Garzik

[permalink] [raw]
Subject: Re: [Intel-IOMMU 02/10] Library routines for handling pre-allocated pool of objects

On Mon, Jun 04, 2007 at 02:02:44PM -0700, [email protected] wrote:
> This patch provides a common interface for pre allocating and
> managing pool of objects.
>
> Signed-off-by: Anil S Keshavamurthy <[email protected]>
> ---
> include/linux/respool.h | 43 +++++++++++
> lib/Makefile | 1
> lib/respool.c | 176 ++++++++++++++++++++++++++++++++++++++++++++++++
> 3 files changed, 220 insertions(+)
>
> Index: linux-2.6.22-rc3/include/linux/respool.h
> ===================================================================
> --- /dev/null 1970-01-01 00:00:00.000000000 +0000
> +++ linux-2.6.22-rc3/include/linux/respool.h 2007-06-04 12:36:17.000000000 -0700
> @@ -0,0 +1,43 @@
> +/*
> + * respool.c - library routines for handling generic pre-allocated pool of objects
> + *
> + * Copyright (c) 2006, Intel Corporation.
> + *
> + * This file is released under the GPLv2.
> + *
> + * Copyright (C) 2006 Anil S Keshavamurthy <[email protected]>
> + *
> + */
> +
> +#ifndef _RESPOOL_H_
> +#define _RESPOOL_H_
> +
> +#include <linux/types.h>
> +#include <linux/kernel.h>
> +#include <linux/slab.h>
> +#include <linux/workqueue.h>
> +
> +typedef void *(*rpool_alloc_t)(unsigned int, gfp_t);
> +typedef void (*rpool_free_t)(void *, unsigned int);
> +
> +struct resource_pool {
> + struct work_struct work;
> + spinlock_t pool_lock; /* pool lock to walk the pool_head */
> + struct list_head pool_head; /* pool objects list head */
> + unsigned int min_count; /* min count to maintain */
> + unsigned int grow_count; /* grow by count when time to grow */
> + unsigned int curr_count; /* count of current free objects */
> + unsigned int alloc_size; /* objects size */
> + rpool_alloc_t alloc_mem; /* pool mem alloc function pointer */
> + rpool_free_t free_mem; /* pool mem free function pointer */
> +};
> +
> +void *get_resource_pool_obj(struct resource_pool *ppool);
> +void put_resource_pool_obj(void * vaddr, struct resource_pool *ppool);
> +void destroy_resource_pool(struct resource_pool *ppool);
> +int init_resource_pool(struct resource_pool *res,
> + unsigned int min_count, unsigned int alloc_size,
> + unsigned int grow_count, rpool_alloc_t alloc_fn,
> + rpool_free_t free_fn);
> +
> +#endif
> Index: linux-2.6.22-rc3/lib/Makefile
> ===================================================================
> --- linux-2.6.22-rc3.orig/lib/Makefile 2007-06-04 12:28:10.000000000 -0700
> +++ linux-2.6.22-rc3/lib/Makefile 2007-06-04 12:36:17.000000000 -0700
> @@ -58,6 +58,7 @@
> obj-$(CONFIG_AUDIT_GENERIC) += audit.o
>
> obj-$(CONFIG_SWIOTLB) += swiotlb.o
> +obj-$(CONFIG_DMAR) += respool.o
> obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
>
> lib-$(CONFIG_GENERIC_BUG) += bug.o
> Index: linux-2.6.22-rc3/lib/respool.c
> ===================================================================
> --- /dev/null 1970-01-01 00:00:00.000000000 +0000
> +++ linux-2.6.22-rc3/lib/respool.c 2007-06-04 12:36:17.000000000 -0700
> @@ -0,0 +1,176 @@
> +/*
> + * respool.c - library routines for handling generic pre-allocated pool of objects
> + *
> + * Copyright (c) 2006, Intel Corporation.
> + *
> + * This file is released under the GPLv2.
> + *
> + * Copyright (C) 2006 Anil S Keshavamurthy <[email protected]>
> + */
> +
> +#include <linux/respool.h>
> +
> +/**
> + * get_resource_pool_obj - gets an object from the pool
> + * @ppool - resource pool in question
> + * This function gets an object from the pool and
> + * if the pool count drops below min_count, this
> + * function schedules work to grow the pool. If
> + * no elements are fount in the pool then this function
> + * tries to get memory from kernel.
> + */
> +void * get_resource_pool_obj(struct resource_pool *ppool)
> +{
> + unsigned long flags;
> + struct list_head *plist;
> + bool queue_work = 0;
> +
> + spin_lock_irqsave(&ppool->pool_lock, flags);
> + if (!list_empty(&ppool->pool_head)) {
> + plist = ppool->pool_head.next;
> + list_del(plist);
> + ppool->curr_count--;
> + } else {
> + /*Making sure that curr_count is 0 when list is empty */
> + plist = NULL;
> + BUG_ON(ppool->curr_count != 0);
> + }
> +
> + /* Check if pool needs to grow */
> + if (ppool->curr_count <= ppool->min_count)
> + queue_work = 1;
> + spin_unlock_irqrestore(&ppool->pool_lock, flags);
> +
> + if (queue_work)
> + schedule_work(&ppool->work); /* queue work to grow the pool */
> +
> +
> + if (plist) {
> + memset(plist, 0, ppool->alloc_size); /* Zero out memory */
> + return plist;
> + }
> +
> + /* Out of luck, try to get memory from kernel */
> + plist = (struct list_head *)ppool->alloc_mem(ppool->alloc_size,
> + GFP_ATOMIC);

Since you are outside the lock, you should pass in gfp_flags, to enhance
the chance of being able to use GFP_KERNEL in certain contexts.


> + return plist;
> +}
> +
> +/**
> + * put_resource_pool_obj - puts an object back to the pool
> + * @vaddr - object's address
> + * @ppool - resource pool in question.
> + * This function puts an object back to the pool.
> + */
> +void put_resource_pool_obj(void * vaddr, struct resource_pool *ppool)
> +{
> + unsigned long flags;
> + struct list_head *plist = (struct list_head *)vaddr;
> +
> + BUG_ON(!vaddr);
> + BUG_ON(!ppool);
> +
> + spin_lock_irqsave(&ppool->pool_lock, flags);
> + list_add(plist, &ppool->pool_head);
> + ppool->curr_count++;
> + spin_unlock_irqrestore(&ppool->pool_lock, flags);

you should add logic to free resources here (or queue_work to free the
resources), if the pool grows beyond a certain size.



> +/**
> + * grow_resource_pool - grows the given resource pool
> + * @work - work struct
> + * This functions gets the resource pool pointer from the
> + * work struct and grows the resource pool by grow_count.
> + */
> +static void
> +grow_resource_pool(struct work_struct * work)
> +{
> + struct resource_pool *ppool;
> + struct list_head *plist;
> + unsigned int min_count, grow_count = 0;
> + unsigned long flags;
> +
> + ppool = container_of(work, struct resource_pool, work);
> +
> + /* compute the minimum count to grow */
> + spin_lock_irqsave(&ppool->pool_lock, flags);
> + min_count = ppool->min_count + ppool->grow_count;
> + if (ppool->curr_count < min_count)
> + grow_count = min_count - ppool->curr_count;
> + spin_unlock_irqrestore(&ppool->pool_lock, flags);
> +
> + while(grow_count) {
> + plist = (struct list_head *)ppool->alloc_mem(ppool->alloc_size,
> + GFP_KERNEL);
> +
> + if (!plist)
> + break;
> +
> + /* Add the element to the list */
> + spin_lock_irqsave(&ppool->pool_lock, flags);
> + list_add(plist, &ppool->pool_head);
> + ppool->curr_count++;
> + spin_unlock_irqrestore(&ppool->pool_lock, flags);
> + grow_count--;
> + }
> +}
> +
> +/**
> + * destroy_resource_pool - destroys the given resource pool
> + * @ppool - resource pool in question.
> + * This function walks throuhg its list and frees up the
> + * preallocated objects.
> + */
> +void
> +destroy_resource_pool(struct resource_pool *ppool)
> +{
> + unsigned long flags;
> + struct list_head *plist;
> +
> + spin_lock_irqsave(&ppool->pool_lock, flags);
> + while (!list_empty(&ppool->pool_head)) {
> + plist = &ppool->pool_head;
> + list_del(plist);
> +
> + ppool->free_mem(plist, ppool->alloc_size);
> +
> + }
> + ppool->curr_count = 0;
> + spin_unlock_irqrestore(&ppool->pool_lock, flags);
> +}
> +
> +/**
> + * init_resource_pool - initializes the resource pool
> + * @res: resource pool in question.
> + * @min_count: count of objectes to pre-allocate
> + * @alloc_size: size of each objects
> + * @grow_count: count of objects to grow when required
> + * @alloc_fn: function which allocates memory
> + * @free_fn: function which frees memory
> + *
> + * This function initializes the given resource pool and
> + * populates the min_count of objects to begin with.
> + */
> +int
> +init_resource_pool(struct resource_pool *res,
> + unsigned int min_count, unsigned int alloc_size,
> + unsigned int grow_count, rpool_alloc_t alloc_fn,
> + rpool_free_t free_fn)
> +{
> + res->min_count = min_count;
> + res->alloc_size = alloc_size;
> + res->grow_count = grow_count;
> + res->curr_count = 0;
> + res->alloc_mem = alloc_fn;
> + res->free_mem = free_fn;
> + spin_lock_init(&res->pool_lock);
> + INIT_LIST_HEAD(&res->pool_head);
> + INIT_WORK(&res->work, grow_resource_pool);
> +
> + /* grow the pool */
> + grow_resource_pool(&res->work);
> +
> + return (res->curr_count == 0);
> +}
> +
>
> --
> -
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to [email protected]
> More majordomo info at http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at http://www.tux.org/lkml/

2007-06-04 23:10:36

by Keshavamurthy, Anil S

[permalink] [raw]
Subject: Re: [Intel-IOMMU 02/10] Library routines for handling pre-allocated pool of objects

On Mon, Jun 04, 2007 at 06:57:14PM -0400, Jeff Garzik wrote:
> On Mon, Jun 04, 2007 at 02:02:44PM -0700, [email protected] wrote:
> > This patch provides a common interface for pre allocating and
> > managing pool of objects.
> >
> > Signed-off-by: Anil S Keshavamurthy <[email protected]>
> > ---
> > include/linux/respool.h | 43 +++++++++++
> > lib/Makefile | 1
> > lib/respool.c | 176 ++++++++++++++++++++++++++++++++++++++++++++++++
> > 3 files changed, 220 insertions(+)
> >
> > Index: linux-2.6.22-rc3/include/linux/respool.h
> > ===================================================================
> > --- /dev/null 1970-01-01 00:00:00.000000000 +0000
> > +++ linux-2.6.22-rc3/include/linux/respool.h 2007-06-04 12:36:17.000000000 -0700
> > @@ -0,0 +1,43 @@
> > +/*
> > + * respool.c - library routines for handling generic pre-allocated pool of objects
> > + *
> > + * Copyright (c) 2006, Intel Corporation.
> > + *
> > + * This file is released under the GPLv2.
> > + *
> > + * Copyright (C) 2006 Anil S Keshavamurthy <[email protected]>
> > + *
> > + */
> > +
> > +#ifndef _RESPOOL_H_
> > +#define _RESPOOL_H_
> > +
> > +#include <linux/types.h>
> > +#include <linux/kernel.h>
> > +#include <linux/slab.h>
> > +#include <linux/workqueue.h>
> > +
> > +typedef void *(*rpool_alloc_t)(unsigned int, gfp_t);
> > +typedef void (*rpool_free_t)(void *, unsigned int);
> > +
> > +struct resource_pool {
> > + struct work_struct work;
> > + spinlock_t pool_lock; /* pool lock to walk the pool_head */
> > + struct list_head pool_head; /* pool objects list head */
> > + unsigned int min_count; /* min count to maintain */
> > + unsigned int grow_count; /* grow by count when time to grow */
> > + unsigned int curr_count; /* count of current free objects */
> > + unsigned int alloc_size; /* objects size */
> > + rpool_alloc_t alloc_mem; /* pool mem alloc function pointer */
> > + rpool_free_t free_mem; /* pool mem free function pointer */
> > +};
> > +
> > +void *get_resource_pool_obj(struct resource_pool *ppool);
> > +void put_resource_pool_obj(void * vaddr, struct resource_pool *ppool);
> > +void destroy_resource_pool(struct resource_pool *ppool);
> > +int init_resource_pool(struct resource_pool *res,
> > + unsigned int min_count, unsigned int alloc_size,
> > + unsigned int grow_count, rpool_alloc_t alloc_fn,
> > + rpool_free_t free_fn);
> > +
> > +#endif
> > Index: linux-2.6.22-rc3/lib/Makefile
> > ===================================================================
> > --- linux-2.6.22-rc3.orig/lib/Makefile 2007-06-04 12:28:10.000000000 -0700
> > +++ linux-2.6.22-rc3/lib/Makefile 2007-06-04 12:36:17.000000000 -0700
> > @@ -58,6 +58,7 @@
> > obj-$(CONFIG_AUDIT_GENERIC) += audit.o
> >
> > obj-$(CONFIG_SWIOTLB) += swiotlb.o
> > +obj-$(CONFIG_DMAR) += respool.o
> > obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
> >
> > lib-$(CONFIG_GENERIC_BUG) += bug.o
> > Index: linux-2.6.22-rc3/lib/respool.c
> > ===================================================================
> > --- /dev/null 1970-01-01 00:00:00.000000000 +0000
> > +++ linux-2.6.22-rc3/lib/respool.c 2007-06-04 12:36:17.000000000 -0700
> > @@ -0,0 +1,176 @@
> > +/*
> > + * respool.c - library routines for handling generic pre-allocated pool of objects
> > + *
> > + * Copyright (c) 2006, Intel Corporation.
> > + *
> > + * This file is released under the GPLv2.
> > + *
> > + * Copyright (C) 2006 Anil S Keshavamurthy <[email protected]>
> > + */
> > +
> > +#include <linux/respool.h>
> > +
> > +/**
> > + * get_resource_pool_obj - gets an object from the pool
> > + * @ppool - resource pool in question
> > + * This function gets an object from the pool and
> > + * if the pool count drops below min_count, this
> > + * function schedules work to grow the pool. If
> > + * no elements are fount in the pool then this function
> > + * tries to get memory from kernel.
> > + */
> > +void * get_resource_pool_obj(struct resource_pool *ppool)
> > +{
> > + unsigned long flags;
> > + struct list_head *plist;
> > + bool queue_work = 0;
> > +
> > + spin_lock_irqsave(&ppool->pool_lock, flags);
> > + if (!list_empty(&ppool->pool_head)) {
> > + plist = ppool->pool_head.next;
> > + list_del(plist);
> > + ppool->curr_count--;
> > + } else {
> > + /*Making sure that curr_count is 0 when list is empty */
> > + plist = NULL;
> > + BUG_ON(ppool->curr_count != 0);
> > + }
> > +
> > + /* Check if pool needs to grow */
> > + if (ppool->curr_count <= ppool->min_count)
> > + queue_work = 1;
> > + spin_unlock_irqrestore(&ppool->pool_lock, flags);
> > +
> > + if (queue_work)
> > + schedule_work(&ppool->work); /* queue work to grow the pool */
> > +
> > +
> > + if (plist) {
> > + memset(plist, 0, ppool->alloc_size); /* Zero out memory */
> > + return plist;
> > + }
> > +
> > + /* Out of luck, try to get memory from kernel */
> > + plist = (struct list_head *)ppool->alloc_mem(ppool->alloc_size,
> > + GFP_ATOMIC);
>
> Since you are outside the lock, you should pass in gfp_flags, to enhance
> the chance of being able to use GFP_KERNEL in certain contexts.

No, you got it wrong. This above function get_resource_pool_obj() itself
is called from interrrupt time so the logic is to get an object
from the pre-allocated pool if available else get it from kernel
without blocking which is to pass GFP_ATOMIC flag.

>
>
> > + return plist;
> > +}
> > +
> > +/**
> > + * put_resource_pool_obj - puts an object back to the pool
> > + * @vaddr - object's address
> > + * @ppool - resource pool in question.
> > + * This function puts an object back to the pool.
> > + */
> > +void put_resource_pool_obj(void * vaddr, struct resource_pool *ppool)
> > +{
> > + unsigned long flags;
> > + struct list_head *plist = (struct list_head *)vaddr;
> > +
> > + BUG_ON(!vaddr);
> > + BUG_ON(!ppool);
> > +
> > + spin_lock_irqsave(&ppool->pool_lock, flags);
> > + list_add(plist, &ppool->pool_head);
> > + ppool->curr_count++;
> > + spin_unlock_irqrestore(&ppool->pool_lock, flags);
>
> you should add logic to free resources here (or queue_work to free the
> resources), if the pool grows beyond a certain size.

Can be added as an add on, testing showed that pool
grows to a certain size and will not grow beyond that
as we tend to reuse the elements.

2007-06-04 23:44:05

by Jeff Garzik

[permalink] [raw]
Subject: Re: [Intel-IOMMU 02/10] Library routines for handling pre-allocated pool of objects

On Mon, Jun 04, 2007 at 04:06:49PM -0700, Keshavamurthy, Anil S wrote:
> On Mon, Jun 04, 2007 at 06:57:14PM -0400, Jeff Garzik wrote:
> > you should add logic to free resources here (or queue_work to free the
> > resources), if the pool grows beyond a certain size.

> Can be added as an add on, testing showed that pool
> grows to a certain size and will not grow beyond that
> as we tend to reuse the elements.

Yes, but is it possible? If no, what part of the code guarantees the
pool is limited?

We should not merge code that allows the pool to grow without bound.
In-house testing certainly never covers all the cases seen in the
field, so I wouldn't make too many assumptions based on that. Some
vendor will inevitably build a $BigNum system where the IOMMU is very
heavily used.

Jeff



2007-06-04 23:54:55

by Keshavamurthy, Anil S

[permalink] [raw]
Subject: Re: [Intel-IOMMU 02/10] Library routines for handling pre-allocated pool of objects

On Mon, Jun 04, 2007 at 07:43:54PM -0400, Jeff Garzik wrote:
> On Mon, Jun 04, 2007 at 04:06:49PM -0700, Keshavamurthy, Anil S wrote:
> > On Mon, Jun 04, 2007 at 06:57:14PM -0400, Jeff Garzik wrote:
> > > you should add logic to free resources here (or queue_work to free the
> > > resources), if the pool grows beyond a certain size.
>
> > Can be added as an add on, testing showed that pool
> > grows to a certain size and will not grow beyond that
> > as we tend to reuse the elements.
>
> Yes, but is it possible? If no, what part of the code guarantees the
> pool is limited?
>
> We should not merge code that allows the pool to grow without bound.
> In-house testing certainly never covers all the cases seen in the
> field, so I wouldn't make too many assumptions based on that. Some
> vendor will inevitably build a $BigNum system where the IOMMU is very
> heavily used.

No problem, I can add the code to free the pool element if
the curr_count ever goes greater than (min_count + 2 * grow_count)
then bring the curr_count to min_count + grow_count by freeing
some pool objects.

A patch which will apply to this current patch will follow soon.

Thanks Jeff for making your case stronger :)

-Anil


>
> Jeff
>

2007-06-05 20:28:26

by Keshavamurthy, Anil S

[permalink] [raw]
Subject: Re: [Intel-IOMMU 02/10] Library routines for handling pre-allocated pool of objects

On Mon, Jun 04, 2007 at 04:51:05PM -0700, Keshavamurthy, Anil S wrote:
> On Mon, Jun 04, 2007 at 07:43:54PM -0400, Jeff Garzik wrote:
> > On Mon, Jun 04, 2007 at 04:06:49PM -0700, Keshavamurthy, Anil S wrote:
> > > On Mon, Jun 04, 2007 at 06:57:14PM -0400, Jeff Garzik wrote:
> > > > you should add logic to free resources here (or queue_work to free the
> > > > resources), if the pool grows beyond a certain size.
> >
> > > Can be added as an add on, testing showed that pool
> > > grows to a certain size and will not grow beyond that
> > > as we tend to reuse the elements.
> >
> > Yes, but is it possible? If no, what part of the code guarantees the
> > pool is limited?
> >
> > We should not merge code that allows the pool to grow without bound.
> > In-house testing certainly never covers all the cases seen in the
> > field, so I wouldn't make too many assumptions based on that. Some
> > vendor will inevitably build a $BigNum system where the IOMMU is very
> > heavily used.
>
> No problem, I can add the code to free the pool element if
> the curr_count ever goes greater than (min_count + 2 * grow_count)
> then bring the curr_count to min_count + grow_count by freeing
> some pool objects.
>
> A patch which will apply to this current patch will follow soon.

Here goes the patch as promised...
------------------------------------

Now adding the logic to shrink the resource pool
and give back the excess pool object's memory space
back to OS.

This patch add's on top of my previous posting.

Signed-off-by: Anil S Keshavamurthy <[email protected]>

---
lib/respool.c | 84 ++++++++++++++++++++++++++++++++++++++++++++--------------
1 file changed, 65 insertions(+), 19 deletions(-)

Index: linux-2.6.22-rc3/lib/respool.c
===================================================================
--- linux-2.6.22-rc3.orig/lib/respool.c 2007-06-05 12:22:26.000000000 -0700
+++ linux-2.6.22-rc3/lib/respool.c 2007-06-05 12:58:01.000000000 -0700
@@ -67,6 +67,7 @@
{
unsigned long flags;
struct list_head *plist = (struct list_head *)vaddr;
+ bool queue_work = 0;

BUG_ON(!vaddr);
BUG_ON(!ppool);
@@ -74,21 +75,74 @@
spin_lock_irqsave(&ppool->pool_lock, flags);
list_add(plist, &ppool->pool_head);
ppool->curr_count++;
+ if (ppool->curr_count > (ppool->min_count +
+ ppool->grow_count * 2))
+ queue_work = 1;
spin_unlock_irqrestore(&ppool->pool_lock, flags);
+
+ if (queue_work)
+ schedule_work(&ppool->work); /* queue work to shrink the pool */
+}
+
+void
+__grow_resource_pool(struct resource_pool *ppool,
+ unsigned int grow_count)
+{
+ unsigned long flags;
+ struct list_head *plist;
+
+ while (grow_count) {
+ plist = (struct list_head *)ppool->alloc_mem(ppool->alloc_size,
+ GFP_KERNEL);
+
+ if (!plist)
+ break;
+
+ /* Add the element to the list */
+ spin_lock_irqsave(&ppool->pool_lock, flags);
+ list_add(plist, &ppool->pool_head);
+ ppool->curr_count++;
+ spin_unlock_irqrestore(&ppool->pool_lock, flags);
+ grow_count--;
+ }
}

+void
+__shrink_resource_pool(struct resource_pool *ppool,
+ unsigned int shrink_count)
+{
+ unsigned long flags;
+ struct list_head *plist;
+
+ while (shrink_count) {
+ /* remove an object from the pool */
+ spin_lock_irqsave(&ppool->pool_lock, flags);
+ if (list_empty(&ppool->pool_head)) {
+ spin_unlock_irqrestore(&ppool->pool_lock, flags);
+ break;
+ }
+ plist = ppool->pool_head.next;
+ list_del(plist);
+ ppool->curr_count--;
+ spin_unlock_irqrestore(&ppool->pool_lock, flags);
+ ppool->free_mem(plist, ppool->alloc_size);
+ shrink_count--;
+ }
+}
+
+
/**
- * grow_resource_pool - grows the given resource pool
+ * resize_resource_pool - resize the given resource pool
* @work - work struct
* This functions gets the resource pool pointer from the
* work struct and grows the resource pool by grow_count.
*/
static void
-grow_resource_pool(struct work_struct * work)
+resize_resource_pool(struct work_struct * work)
{
struct resource_pool *ppool;
- struct list_head *plist;
unsigned int min_count, grow_count = 0;
+ unsigned int shrink_count = 0;
unsigned long flags;

ppool = container_of(work, struct resource_pool, work);
@@ -98,22 +152,14 @@
min_count = ppool->min_count + ppool->grow_count;
if (ppool->curr_count < min_count)
grow_count = min_count - ppool->curr_count;
+ else if (ppool->curr_count > min_count + ppool->grow_count)
+ shrink_count = ppool->curr_count - min_count;
spin_unlock_irqrestore(&ppool->pool_lock, flags);

- while(grow_count) {
- plist = (struct list_head *)ppool->alloc_mem(ppool->alloc_size,
- GFP_KERNEL);
-
- if (!plist)
- break;
-
- /* Add the element to the list */
- spin_lock_irqsave(&ppool->pool_lock, flags);
- list_add(plist, &ppool->pool_head);
- ppool->curr_count++;
- spin_unlock_irqrestore(&ppool->pool_lock, flags);
- grow_count--;
- }
+ if (grow_count)
+ __grow_resource_pool(ppool, grow_count);
+ else if (shrink_count)
+ __shrink_resource_pool(ppool, shrink_count);
}

/**
@@ -166,10 +212,10 @@
res->free_mem = free_fn;
spin_lock_init(&res->pool_lock);
INIT_LIST_HEAD(&res->pool_head);
- INIT_WORK(&res->work, grow_resource_pool);
+ INIT_WORK(&res->work, resize_resource_pool);

/* grow the pool */
- grow_resource_pool(&res->work);
+ resize_resource_pool(&res->work);

return (res->curr_count == 0);
}

2007-06-05 20:31:02

by Jeff Garzik

[permalink] [raw]
Subject: Re: [Intel-IOMMU 02/10] Library routines for handling pre-allocated pool of objects

On Tue, Jun 05, 2007 at 01:24:33PM -0700, Keshavamurthy, Anil S wrote:
> 1 file changed, 65 insertions(+), 19 deletions(-)
>
> Index: linux-2.6.22-rc3/lib/respool.c
> ===================================================================
> --- linux-2.6.22-rc3.orig/lib/respool.c 2007-06-05 12:22:26.000000000 -0700
> +++ linux-2.6.22-rc3/lib/respool.c 2007-06-05 12:58:01.000000000 -0700
> @@ -67,6 +67,7 @@
> {
> unsigned long flags;
> struct list_head *plist = (struct list_head *)vaddr;
> + bool queue_work = 0;

Seems sane to me.

I only have a very minor nit to pick: naming a variable (queue_work)
the same as a public function may cause confusion.

Also, if its a bool you should initialize it to 'true' or 'false'.

Jeff



2007-06-05 21:00:28

by Keshavamurthy, Anil S

[permalink] [raw]
Subject: Re: [Intel-IOMMU 02/10] Library routines for handling pre-allocated pool of objects

On Tue, Jun 05, 2007 at 04:30:48PM -0400, Jeff Garzik wrote:
> On Tue, Jun 05, 2007 at 01:24:33PM -0700, Keshavamurthy, Anil S wrote:
> > 1 file changed, 65 insertions(+), 19 deletions(-)
> >
> > Index: linux-2.6.22-rc3/lib/respool.c
> > ===================================================================
> > --- linux-2.6.22-rc3.orig/lib/respool.c 2007-06-05 12:22:26.000000000 -0700
> > +++ linux-2.6.22-rc3/lib/respool.c 2007-06-05 12:58:01.000000000 -0700
> > @@ -67,6 +67,7 @@
> > {
> > unsigned long flags;
> > struct list_head *plist = (struct list_head *)vaddr;
> > + bool queue_work = 0;
>
> Seems sane to me.
Thanks so much.

>
> I only have a very minor nit to pick: naming a variable (queue_work)
> the same as a public function may cause confusion.
>
> Also, if its a bool you should initialize it to 'true' or 'false'.
I will address when this patch gets into Andrew's mm tree.

Thanks once again,
Anil