Hi Andrew,
Bellow five patches convert Lustre client to new shrinker API and also
adapt num_physpages removal linux-next/akpm tree. Since both changes
are not in Greg's staging tree, I'm sending them to you so that Lustre
can build properly in linux-next.
Thanks,
Tao
Peng Tao (5):
staging/lustre/ldlm: convert to shrinkers to count/scan API
staging/lustre/obdclass: convert lu_object shrinker to count/scan API
staging/lustre/ptlrpc: convert to new shrinker API
staging/lustre/libcfs: cleanup linux-mem.h
staging/lustre: replace num_physpages with totalram_pages
.../lustre/include/linux/libcfs/linux/linux-mem.h | 42 +-----
drivers/staging/lustre/lustre/ldlm/ldlm_lib.c | 10 +-
drivers/staging/lustre/lustre/ldlm/ldlm_pool.c | 139 +++++++++++---------
.../lustre/lustre/libcfs/linux/linux-tracefile.c | 2 +-
drivers/staging/lustre/lustre/llite/lproc_llite.c | 8 +-
drivers/staging/lustre/lustre/obdclass/class_obd.c | 6 +-
.../lustre/lustre/obdclass/linux/linux-sysctl.c | 6 +-
drivers/staging/lustre/lustre/obdclass/lu_object.c | 100 +++++++-------
drivers/staging/lustre/lustre/osc/lproc_osc.c | 2 +-
.../staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c | 2 +-
drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c | 80 ++++++-----
11 files changed, 192 insertions(+), 205 deletions(-)
--
1.7.9.5
convert ldlm shrinker to new count/scan API.
Cc: Dave Chinner <[email protected]>
Signed-off-by: Peng Tao <[email protected]>
Signed-off-by: Andreas Dilger <[email protected]>
---
drivers/staging/lustre/lustre/ldlm/ldlm_pool.c | 139 +++++++++++++-----------
1 file changed, 75 insertions(+), 64 deletions(-)
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
index b3b6028..4c41e02 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
@@ -524,7 +524,7 @@ static int ldlm_cli_pool_shrink(struct ldlm_pool *pl,
int nr, unsigned int gfp_mask)
{
struct ldlm_namespace *ns;
- int canceled = 0, unused;
+ int unused;
ns = ldlm_pl2ns(pl);
@@ -543,14 +543,10 @@ static int ldlm_cli_pool_shrink(struct ldlm_pool *pl,
unused = ns->ns_nr_unused;
spin_unlock(&ns->ns_lock);
- if (nr) {
- canceled = ldlm_cancel_lru(ns, nr, LCF_ASYNC,
- LDLM_CANCEL_SHRINK);
- }
- /*
- * Return the number of potentially reclaimable locks.
- */
- return ((unused - canceled) / 100) * sysctl_vfs_cache_pressure;
+ if (nr == 0)
+ return (unused / 100) * sysctl_vfs_cache_pressure;
+ else
+ return ldlm_cancel_lru(ns, nr, LCF_ASYNC, LDLM_CANCEL_SHRINK);
}
struct ldlm_pool_ops ldlm_srv_pool_ops = {
@@ -605,9 +601,10 @@ int ldlm_pool_recalc(struct ldlm_pool *pl)
}
EXPORT_SYMBOL(ldlm_pool_recalc);
-/**
+/*
* Pool shrink wrapper. Will call either client or server pool recalc callback
- * depending what pool \a pl is used.
+ * depending what pool pl is used. When nr == 0, just return the number of
+ * freeable locks. Otherwise, return the number of canceled locks.
*/
int ldlm_pool_shrink(struct ldlm_pool *pl, int nr,
unsigned int gfp_mask)
@@ -1025,28 +1022,23 @@ static int ldlm_pool_granted(struct ldlm_pool *pl)
}
static struct ptlrpc_thread *ldlm_pools_thread;
-static struct shrinker *ldlm_pools_srv_shrinker;
-static struct shrinker *ldlm_pools_cli_shrinker;
static struct completion ldlm_pools_comp;
/*
- * Cancel \a nr locks from all namespaces (if possible). Returns number of
- * cached locks after shrink is finished. All namespaces are asked to
- * cancel approximately equal amount of locks to keep balancing.
+ * count locks from all namespaces (if possible). Returns number of
+ * cached locks.
*/
-static int ldlm_pools_shrink(ldlm_side_t client, int nr,
- unsigned int gfp_mask)
+static unsigned long ldlm_pools_count(ldlm_side_t client, unsigned int gfp_mask)
{
- int total = 0, cached = 0, nr_ns;
+ unsigned long total = 0, nr_ns;
struct ldlm_namespace *ns;
void *cookie;
- if (client == LDLM_NAMESPACE_CLIENT && nr != 0 &&
- !(gfp_mask & __GFP_FS))
- return -1;
+ if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
+ return 0;
- CDEBUG(D_DLMTRACE, "Request to shrink %d %s locks from all pools\n",
- nr, client == LDLM_NAMESPACE_CLIENT ? "client" : "server");
+ CDEBUG(D_DLMTRACE, "Request to count %s locks from all pools\n",
+ client == LDLM_NAMESPACE_CLIENT ? "client" : "server");
cookie = cl_env_reenter();
@@ -1070,16 +1062,26 @@ static int ldlm_pools_shrink(ldlm_side_t client, int nr,
ldlm_namespace_put(ns);
}
- if (nr == 0 || total == 0) {
- cl_env_reexit(cookie);
- return total;
- }
+ cl_env_reexit(cookie);
+ return total;
+}
+
+static unsigned long ldlm_pools_scan(ldlm_side_t client, int nr, unsigned int gfp_mask)
+{
+ unsigned long freed = 0;
+ int tmp, nr_ns;
+ struct ldlm_namespace *ns;
+ void *cookie;
+ if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
+ return -1;
+
+ cookie = cl_env_reenter();
/*
* Shrink at least ldlm_namespace_nr(client) namespaces.
*/
- for (nr_ns = atomic_read(ldlm_namespace_nr(client));
- nr_ns > 0; nr_ns--)
+ for (tmp = nr_ns = atomic_read(ldlm_namespace_nr(client));
+ tmp > 0; tmp--)
{
int cancel, nr_locks;
@@ -1089,12 +1091,6 @@ static int ldlm_pools_shrink(ldlm_side_t client, int nr,
mutex_lock(ldlm_namespace_lock(client));
if (list_empty(ldlm_namespace_list(client))) {
mutex_unlock(ldlm_namespace_lock(client));
- /*
- * If list is empty, we can't return any @cached > 0,
- * that probably would cause needless shrinker
- * call.
- */
- cached = 0;
break;
}
ns = ldlm_namespace_first_locked(client);
@@ -1103,29 +1099,42 @@ static int ldlm_pools_shrink(ldlm_side_t client, int nr,
mutex_unlock(ldlm_namespace_lock(client));
nr_locks = ldlm_pool_granted(&ns->ns_pool);
- cancel = 1 + nr_locks * nr / total;
- ldlm_pool_shrink(&ns->ns_pool, cancel, gfp_mask);
- cached += ldlm_pool_granted(&ns->ns_pool);
+ /*
+ * We use to shrink propotionally but with new shrinker API,
+ * we lost the total number of freeable locks.
+ */
+ cancel = 1 + min_t(int, nr_locks, nr / nr_ns);
+ freed += ldlm_pool_shrink(&ns->ns_pool, cancel, gfp_mask);
ldlm_namespace_put(ns);
}
cl_env_reexit(cookie);
- /* we only decrease the SLV in server pools shrinker, return -1 to
- * kernel to avoid needless loop. LU-1128 */
- return (client == LDLM_NAMESPACE_SERVER) ? -1 : cached;
+ /*
+ * we only decrease the SLV in server pools shrinker, return
+ * SHRINK_STOP to kernel to avoid needless loop. LU-1128
+ */
+ return (client == LDLM_NAMESPACE_SERVER) ? SHRINK_STOP : freed;
+}
+
+static unsigned long ldlm_pools_srv_count(struct shrinker *s, struct shrink_control *sc)
+{
+ return ldlm_pools_count(LDLM_NAMESPACE_SERVER, sc->gfp_mask);
}
-static int ldlm_pools_srv_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
+static unsigned long ldlm_pools_srv_scan(struct shrinker *s, struct shrink_control *sc)
{
- return ldlm_pools_shrink(LDLM_NAMESPACE_SERVER,
- shrink_param(sc, nr_to_scan),
- shrink_param(sc, gfp_mask));
+ return ldlm_pools_scan(LDLM_NAMESPACE_SERVER, sc->nr_to_scan,
+ sc->gfp_mask);
}
-static int ldlm_pools_cli_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
+static unsigned long ldlm_pools_cli_count(struct shrinker *s, struct shrink_control *sc)
{
- return ldlm_pools_shrink(LDLM_NAMESPACE_CLIENT,
- shrink_param(sc, nr_to_scan),
- shrink_param(sc, gfp_mask));
+ return ldlm_pools_count(LDLM_NAMESPACE_CLIENT, sc->gfp_mask);
+}
+
+static unsigned long ldlm_pools_cli_scan(struct shrinker *s, struct shrink_control *sc)
+{
+ return ldlm_pools_scan(LDLM_NAMESPACE_CLIENT, sc->nr_to_scan,
+ sc->gfp_mask);
}
void ldlm_pools_recalc(ldlm_side_t client)
@@ -1351,6 +1360,18 @@ static void ldlm_pools_thread_stop(void)
EXIT;
}
+static struct shrinker ldlm_pools_srv_shrinker = {
+ .count_objects = ldlm_pools_srv_count,
+ .scan_objects = ldlm_pools_srv_scan,
+ .seeks = DEFAULT_SEEKS,
+};
+
+static struct shrinker ldlm_pools_cli_shrinker = {
+ .count_objects = ldlm_pools_cli_count,
+ .scan_objects = ldlm_pools_cli_scan,
+ .seeks = DEFAULT_SEEKS,
+};
+
int ldlm_pools_init(void)
{
int rc;
@@ -1358,12 +1379,8 @@ int ldlm_pools_init(void)
rc = ldlm_pools_thread_start();
if (rc == 0) {
- ldlm_pools_srv_shrinker =
- set_shrinker(DEFAULT_SEEKS,
- ldlm_pools_srv_shrink);
- ldlm_pools_cli_shrinker =
- set_shrinker(DEFAULT_SEEKS,
- ldlm_pools_cli_shrink);
+ register_shrinker(&ldlm_pools_srv_shrinker);
+ register_shrinker(&ldlm_pools_cli_shrinker);
}
RETURN(rc);
}
@@ -1371,14 +1388,8 @@ EXPORT_SYMBOL(ldlm_pools_init);
void ldlm_pools_fini(void)
{
- if (ldlm_pools_srv_shrinker != NULL) {
- remove_shrinker(ldlm_pools_srv_shrinker);
- ldlm_pools_srv_shrinker = NULL;
- }
- if (ldlm_pools_cli_shrinker != NULL) {
- remove_shrinker(ldlm_pools_cli_shrinker);
- ldlm_pools_cli_shrinker = NULL;
- }
+ unregister_shrinker(&ldlm_pools_srv_shrinker);
+ unregister_shrinker(&ldlm_pools_cli_shrinker);
ldlm_pools_thread_stop();
}
EXPORT_SYMBOL(ldlm_pools_fini);
--
1.7.9.5
Convert sptlrpc encode pool shrinker to use scan/count API.
Cc: Dave Chinner <[email protected]>
Signed-off-by: Peng Tao <[email protected]>
Signed-off-by: Andreas Dilger <[email protected]>
---
drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c | 76 +++++++++++++----------
1 file changed, 42 insertions(+), 34 deletions(-)
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
index bf53f1b..87eea53 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
@@ -121,13 +121,6 @@ static struct ptlrpc_enc_page_pool {
} page_pools;
/*
- * memory shrinker
- */
-const int pools_shrinker_seeks = DEFAULT_SEEKS;
-static struct shrinker *pools_shrinker = NULL;
-
-
-/*
* /proc/fs/lustre/sptlrpc/encrypt_page_pools
*/
int sptlrpc_proc_enc_pool_seq_show(struct seq_file *m, void *v)
@@ -226,30 +219,46 @@ static void enc_pools_release_free_pages(long npages)
}
/*
- * could be called frequently for query (@nr_to_scan == 0).
* we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool.
*/
-static int enc_pools_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
+static unsigned long enc_pools_shrink_count(struct shrinker *s,
+ struct shrink_control *sc)
{
- if (unlikely(shrink_param(sc, nr_to_scan) != 0)) {
+ /*
+ * if no pool access for a long time, we consider it's fully idle.
+ * a little race here is fine.
+ */
+ if (unlikely(cfs_time_current_sec() - page_pools.epp_last_access >
+ CACHE_QUIESCENT_PERIOD)) {
spin_lock(&page_pools.epp_lock);
- shrink_param(sc, nr_to_scan) = min_t(unsigned long,
- shrink_param(sc, nr_to_scan),
- page_pools.epp_free_pages -
- PTLRPC_MAX_BRW_PAGES);
- if (shrink_param(sc, nr_to_scan) > 0) {
- enc_pools_release_free_pages(shrink_param(sc,
- nr_to_scan));
- CDEBUG(D_SEC, "released %ld pages, %ld left\n",
- (long)shrink_param(sc, nr_to_scan),
- page_pools.epp_free_pages);
-
- page_pools.epp_st_shrinks++;
- page_pools.epp_last_shrink = cfs_time_current_sec();
- }
+ page_pools.epp_idle_idx = IDLE_IDX_MAX;
spin_unlock(&page_pools.epp_lock);
}
+ LASSERT(page_pools.epp_idle_idx <= IDLE_IDX_MAX);
+ return max((int)page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES, 0) *
+ (IDLE_IDX_MAX - page_pools.epp_idle_idx) / IDLE_IDX_MAX;
+}
+
+/*
+ * we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool.
+ */
+static unsigned long enc_pools_shrink_scan(struct shrinker *s,
+ struct shrink_control *sc)
+{
+ spin_lock(&page_pools.epp_lock);
+ sc->nr_to_scan = min_t(unsigned long, sc->nr_to_scan,
+ page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES);
+ if (sc->nr_to_scan > 0) {
+ enc_pools_release_free_pages(sc->nr_to_scan);
+ CDEBUG(D_SEC, "released %ld pages, %ld left\n",
+ (long)sc->nr_to_scan, page_pools.epp_free_pages);
+
+ page_pools.epp_st_shrinks++;
+ page_pools.epp_last_shrink = cfs_time_current_sec();
+ }
+ spin_unlock(&page_pools.epp_lock);
+
/*
* if no pool access for a long time, we consider it's fully idle.
* a little race here is fine.
@@ -262,8 +271,7 @@ static int enc_pools_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
}
LASSERT(page_pools.epp_idle_idx <= IDLE_IDX_MAX);
- return max((int)page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES, 0) *
- (IDLE_IDX_MAX - page_pools.epp_idle_idx) / IDLE_IDX_MAX;
+ return sc->nr_to_scan;
}
static inline
@@ -699,6 +707,12 @@ static inline void enc_pools_free(void)
sizeof(*page_pools.epp_pools));
}
+static struct shrinker pools_shrinker = {
+ .count_objects = enc_pools_shrink_count,
+ .scan_objects = enc_pools_shrink_scan,
+ .seeks = DEFAULT_SEEKS,
+};
+
int sptlrpc_enc_pool_init(void)
{
/*
@@ -736,12 +750,7 @@ int sptlrpc_enc_pool_init(void)
if (page_pools.epp_pools == NULL)
return -ENOMEM;
- pools_shrinker = set_shrinker(pools_shrinker_seeks,
- enc_pools_shrink);
- if (pools_shrinker == NULL) {
- enc_pools_free();
- return -ENOMEM;
- }
+ register_shrinker(&pools_shrinker);
return 0;
}
@@ -750,11 +759,10 @@ void sptlrpc_enc_pool_fini(void)
{
unsigned long cleaned, npools;
- LASSERT(pools_shrinker);
LASSERT(page_pools.epp_pools);
LASSERT(page_pools.epp_total_pages == page_pools.epp_free_pages);
- remove_shrinker(pools_shrinker);
+ unregister_shrinker(&pools_shrinker);
npools = npages_to_npools(page_pools.epp_total_pages);
cleaned = enc_pools_cleanup(page_pools.epp_pools, npools);
--
1.7.9.5
convert lu_object shrinker to new count/scan API.
Cc: Dave Chinner <[email protected]>
Signed-off-by: Peng Tao <[email protected]>
Signed-off-by: Andreas Dilger <[email protected]>
---
drivers/staging/lustre/lustre/obdclass/lu_object.c | 98 +++++++++++---------
1 file changed, 52 insertions(+), 46 deletions(-)
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c
index fdf0ed3..1b88a74 100644
--- a/drivers/staging/lustre/lustre/obdclass/lu_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c
@@ -1783,7 +1783,6 @@ int lu_env_refill_by_tags(struct lu_env *env, __u32 ctags,
}
EXPORT_SYMBOL(lu_env_refill_by_tags);
-static struct shrinker *lu_site_shrinker = NULL;
typedef struct lu_site_stats{
unsigned lss_populated;
@@ -1839,61 +1838,68 @@ static void lu_site_stats_get(cfs_hash_t *hs,
* objects without taking the lu_sites_guard lock, but this is not
* possible in the current implementation.
*/
-static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
+static unsigned long lu_cache_shrink_count(struct shrinker *sk,
+ struct shrink_control *sc)
{
lu_site_stats_t stats;
struct lu_site *s;
struct lu_site *tmp;
- int cached = 0;
- int remain = shrink_param(sc, nr_to_scan);
- LIST_HEAD(splice);
-
- if (!(shrink_param(sc, gfp_mask) & __GFP_FS)) {
- if (remain != 0)
- return -1;
- else
- /* We must not take the lu_sites_guard lock when
- * __GFP_FS is *not* set because of the deadlock
- * possibility detailed above. Additionally,
- * since we cannot determine the number of
- * objects in the cache without taking this
- * lock, we're in a particularly tough spot. As
- * a result, we'll just lie and say our cache is
- * empty. This _should_ be ok, as we can't
- * reclaim objects when __GFP_FS is *not* set
- * anyways.
- */
- return 0;
- }
+ unsigned long cached = 0;
- CDEBUG(D_INODE, "Shrink %d objects\n", remain);
+ if (!sc->gfp_mask & __GFP_FS)
+ return 0;
mutex_lock(&lu_sites_guard);
list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
- if (shrink_param(sc, nr_to_scan) != 0) {
- remain = lu_site_purge(&lu_shrink_env, s, remain);
- /*
- * Move just shrunk site to the tail of site list to
- * assure shrinking fairness.
- */
- list_move_tail(&s->ls_linkage, &splice);
- }
-
memset(&stats, 0, sizeof(stats));
lu_site_stats_get(s->ls_obj_hash, &stats, 0);
cached += stats.lss_total - stats.lss_busy;
- if (shrink_param(sc, nr_to_scan) && remain <= 0)
- break;
}
- list_splice(&splice, lu_sites.prev);
mutex_unlock(&lu_sites_guard);
cached = (cached / 100) * sysctl_vfs_cache_pressure;
- if (shrink_param(sc, nr_to_scan) == 0)
- CDEBUG(D_INODE, "%d objects cached\n", cached);
+ CDEBUG(D_INODE, "%ld objects cached\n", cached);
return cached;
}
+static unsigned long lu_cache_shrink_scan(struct shrinker *sk,
+ struct shrink_control *sc)
+{
+ struct lu_site *s;
+ struct lu_site *tmp;
+ unsigned long remain = sc->nr_to_scan, freed = 0;
+ LIST_HEAD(splice);
+
+ if (!sc->gfp_mask & __GFP_FS)
+ /* We must not take the lu_sites_guard lock when
+ * __GFP_FS is *not* set because of the deadlock
+ * possibility detailed above. Additionally,
+ * since we cannot determine the number of
+ * objects in the cache without taking this
+ * lock, we're in a particularly tough spot. As
+ * a result, we'll just lie and say our cache is
+ * empty. This _should_ be ok, as we can't
+ * reclaim objects when __GFP_FS is *not* set
+ * anyways.
+ */
+ return SHRINK_STOP;
+
+ mutex_lock(&lu_sites_guard);
+ list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
+ freed = lu_site_purge(&lu_shrink_env, s, remain);
+ remain -= freed;
+ /*
+ * Move just shrunk site to the tail of site list to
+ * assure shrinking fairness.
+ */
+ list_move_tail(&s->ls_linkage, &splice);
+ }
+ list_splice(&splice, lu_sites.prev);
+ mutex_unlock(&lu_sites_guard);
+
+ return sc->nr_to_scan - remain;
+}
+
/*
* Debugging stuff.
*/
@@ -1917,6 +1923,12 @@ int lu_printk_printer(const struct lu_env *env,
return 0;
}
+static struct shrinker lu_site_shrinker = {
+ .count_objects = lu_cache_shrink_count,
+ .scan_objects = lu_cache_shrink_scan,
+ .seeks = DEFAULT_SEEKS,
+};
+
/**
* Initialization of global lu_* data.
*/
@@ -1951,9 +1963,7 @@ int lu_global_init(void)
* inode, one for ea. Unfortunately setting this high value results in
* lu_object/inode cache consuming all the memory.
*/
- lu_site_shrinker = set_shrinker(DEFAULT_SEEKS, lu_cache_shrink);
- if (lu_site_shrinker == NULL)
- return -ENOMEM;
+ register_shrinker(&lu_site_shrinker);
return result;
}
@@ -1963,11 +1973,7 @@ int lu_global_init(void)
*/
void lu_global_fini(void)
{
- if (lu_site_shrinker != NULL) {
- remove_shrinker(lu_site_shrinker);
- lu_site_shrinker = NULL;
- }
-
+ unregister_shrinker(&lu_site_shrinker);
lu_context_key_degister(&lu_global_key);
/*
--
1.7.9.5
remove shrinker related wrappers.
Signed-off-by: Peng Tao <[email protected]>
Signed-off-by: Andreas Dilger <[email protected]>
---
.../lustre/include/linux/libcfs/linux/linux-mem.h | 38 --------------------
1 file changed, 38 deletions(-)
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h
index 042a2bc..c572611 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h
@@ -79,42 +79,4 @@
do { __oldfs = get_fs(); set_fs(get_ds());} while(0)
#define MMSPACE_CLOSE set_fs(__oldfs)
-/*
- * Shrinker
- */
-
-# define SHRINKER_ARGS(sc, nr_to_scan, gfp_mask) \
- struct shrinker *shrinker, \
- struct shrink_control *sc
-# define shrink_param(sc, var) ((sc)->var)
-
-typedef int (*shrinker_t)(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask));
-
-static inline
-struct shrinker *set_shrinker(int seek, shrinker_t func)
-{
- struct shrinker *s;
-
- s = kmalloc(sizeof(*s), GFP_KERNEL);
- if (s == NULL)
- return (NULL);
-
- s->shrink = func;
- s->seeks = seek;
-
- register_shrinker(s);
-
- return s;
-}
-
-static inline
-void remove_shrinker(struct shrinker *shrinker)
-{
- if (shrinker == NULL)
- return;
-
- unregister_shrinker(shrinker);
- kfree(shrinker);
-}
-
#endif /* __LINUX_CFS_MEM_H__ */
--
1.7.9.5
From: Peng Tao <[email protected]>
The global variable num_physpages is going away. Replace it
with totalram_pages.
Cc: Jiang Liu <[email protected]>
Signed-off-by: Peng Tao <[email protected]>
---
.../lustre/include/linux/libcfs/linux/linux-mem.h | 4 ++--
drivers/staging/lustre/lustre/ldlm/ldlm_lib.c | 10 +++++-----
.../lustre/lustre/libcfs/linux/linux-tracefile.c | 2 +-
drivers/staging/lustre/lustre/llite/lproc_llite.c | 8 ++++----
drivers/staging/lustre/lustre/obdclass/class_obd.c | 6 +++---
.../lustre/lustre/obdclass/linux/linux-sysctl.c | 6 +++---
drivers/staging/lustre/lustre/obdclass/lu_object.c | 2 +-
drivers/staging/lustre/lustre/osc/lproc_osc.c | 2 +-
.../staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c | 2 +-
drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c | 4 ++--
10 files changed, 23 insertions(+), 23 deletions(-)
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h
index c572611..2af15d4 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h
@@ -63,9 +63,9 @@
#if BITS_PER_LONG == 32
/* limit to lowmem on 32-bit systems */
#define NUM_CACHEPAGES \
- min(num_physpages, 1UL << (30 - PAGE_CACHE_SHIFT) * 3 / 4)
+ min(totalram_pages, 1UL << (30 - PAGE_CACHE_SHIFT) * 3 / 4)
#else
-#define NUM_CACHEPAGES num_physpages
+#define NUM_CACHEPAGES totalram_pages
#endif
/*
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
index 42df530..aace534 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
@@ -339,8 +339,8 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
cli->cl_avail_grant = 0;
/* FIXME: Should limit this for the sum of all cl_dirty_max. */
cli->cl_dirty_max = OSC_MAX_DIRTY_DEFAULT * 1024 * 1024;
- if (cli->cl_dirty_max >> PAGE_CACHE_SHIFT > num_physpages / 8)
- cli->cl_dirty_max = num_physpages << (PAGE_CACHE_SHIFT - 3);
+ if (cli->cl_dirty_max >> PAGE_CACHE_SHIFT > totalram_pages / 8)
+ cli->cl_dirty_max = totalram_pages << (PAGE_CACHE_SHIFT - 3);
INIT_LIST_HEAD(&cli->cl_cache_waiters);
INIT_LIST_HEAD(&cli->cl_loi_ready_list);
INIT_LIST_HEAD(&cli->cl_loi_hp_ready_list);
@@ -388,11 +388,11 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
if (!strcmp(name, LUSTRE_MDC_NAME)) {
cli->cl_max_rpcs_in_flight = MDC_MAX_RIF_DEFAULT;
- } else if (num_physpages >> (20 - PAGE_CACHE_SHIFT) <= 128 /* MB */) {
+ } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 128 /* MB */) {
cli->cl_max_rpcs_in_flight = 2;
- } else if (num_physpages >> (20 - PAGE_CACHE_SHIFT) <= 256 /* MB */) {
+ } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 256 /* MB */) {
cli->cl_max_rpcs_in_flight = 3;
- } else if (num_physpages >> (20 - PAGE_CACHE_SHIFT) <= 512 /* MB */) {
+ } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 512 /* MB */) {
cli->cl_max_rpcs_in_flight = 4;
} else {
if (osc_on_mdt(obddev->obd_name))
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-tracefile.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-tracefile.c
index 6f56343..a500a0b 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-tracefile.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-tracefile.c
@@ -269,7 +269,7 @@ void cfs_print_to_console(struct ptldebug_header *hdr, int mask,
int cfs_trace_max_debug_mb(void)
{
- int total_mb = (num_physpages >> (20 - PAGE_SHIFT));
+ int total_mb = (totalram_pages >> (20 - PAGE_SHIFT));
return MAX(512, (total_mb * 80)/100);
}
diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c
index 6a82505..a30c411 100644
--- a/drivers/staging/lustre/lustre/llite/lproc_llite.c
+++ b/drivers/staging/lustre/lustre/llite/lproc_llite.c
@@ -243,9 +243,9 @@ static ssize_t ll_max_readahead_mb_seq_write(struct file *file, const char *buff
if (rc)
return rc;
- if (pages_number < 0 || pages_number > num_physpages / 2) {
+ if (pages_number < 0 || pages_number > totalram_pages / 2) {
CERROR("can't set file readahead more than %lu MB\n",
- num_physpages >> (20 - PAGE_CACHE_SHIFT + 1)); /*1/2 of RAM*/
+ totalram_pages >> (20 - PAGE_CACHE_SHIFT + 1)); /*1/2 of RAM*/
return -ERANGE;
}
@@ -388,10 +388,10 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file, const char *buffer,
if (rc)
RETURN(rc);
- if (pages_number < 0 || pages_number > num_physpages) {
+ if (pages_number < 0 || pages_number > totalram_pages) {
CERROR("%s: can't set max cache more than %lu MB\n",
ll_get_fsname(sb, NULL, 0),
- num_physpages >> (20 - PAGE_CACHE_SHIFT));
+ totalram_pages >> (20 - PAGE_CACHE_SHIFT));
RETURN(-ERANGE);
}
diff --git a/drivers/staging/lustre/lustre/obdclass/class_obd.c b/drivers/staging/lustre/lustre/obdclass/class_obd.c
index af1c2d0..0715cf2 100644
--- a/drivers/staging/lustre/lustre/obdclass/class_obd.c
+++ b/drivers/staging/lustre/lustre/obdclass/class_obd.c
@@ -558,10 +558,10 @@ static int __init init_obdclass(void)
/* Default the dirty page cache cap to 1/2 of system memory.
* For clients with less memory, a larger fraction is needed
* for other purposes (mostly for BGL). */
- if (num_physpages <= 512 << (20 - PAGE_CACHE_SHIFT))
- obd_max_dirty_pages = num_physpages / 4;
+ if (totalram_pages <= 512 << (20 - PAGE_CACHE_SHIFT))
+ obd_max_dirty_pages = totalram_pages / 4;
else
- obd_max_dirty_pages = num_physpages / 2;
+ obd_max_dirty_pages = totalram_pages / 2;
err = obd_init_caches();
if (err)
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
index 46aad68..7b94cb7 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
@@ -202,12 +202,12 @@ int LL_PROC_PROTO(proc_max_dirty_pages_in_mb)
1 << (20 - PAGE_CACHE_SHIFT));
/* Don't allow them to let dirty pages exceed 90% of system
* memory and set a hard minimum of 4MB. */
- if (obd_max_dirty_pages > ((num_physpages / 10) * 9)) {
+ if (obd_max_dirty_pages > ((totalram_pages / 10) * 9)) {
CERROR("Refusing to set max dirty pages to %u, which "
"is more than 90%% of available RAM; setting "
"to %lu\n", obd_max_dirty_pages,
- ((num_physpages / 10) * 9));
- obd_max_dirty_pages = ((num_physpages / 10) * 9);
+ ((totalram_pages / 10) * 9));
+ obd_max_dirty_pages = ((totalram_pages / 10) * 9);
} else if (obd_max_dirty_pages < 4 << (20 - PAGE_CACHE_SHIFT)) {
obd_max_dirty_pages = 4 << (20 - PAGE_CACHE_SHIFT);
}
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c
index 1b88a74..1a37c2c 100644
--- a/drivers/staging/lustre/lustre/obdclass/lu_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c
@@ -849,7 +849,7 @@ static int lu_htable_order(void)
*
* Size of lu_object is (arbitrary) taken as 1K (together with inode).
*/
- cache_size = num_physpages;
+ cache_size = totalram_pages;
#if BITS_PER_LONG == 32
/* limit hashtable size for lowmem systems to low RAM */
diff --git a/drivers/staging/lustre/lustre/osc/lproc_osc.c b/drivers/staging/lustre/lustre/osc/lproc_osc.c
index 198cf3b..d2f2198 100644
--- a/drivers/staging/lustre/lustre/osc/lproc_osc.c
+++ b/drivers/staging/lustre/lustre/osc/lproc_osc.c
@@ -146,7 +146,7 @@ static ssize_t osc_max_dirty_mb_seq_write(struct file *file, const char *buffer,
if (pages_number <= 0 ||
pages_number > OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_CACHE_SHIFT) ||
- pages_number > num_physpages / 4) /* 1/4 of RAM */
+ pages_number > totalram_pages / 4) /* 1/4 of RAM */
return -ERANGE;
client_obd_list_lock(&cli->cl_loi_list_lock);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
index 3e73254..2bd0d98 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
@@ -302,7 +302,7 @@ ptlrpc_lprocfs_req_history_max_seq_write(struct file *file, const char *buffer,
* hose a kernel by allowing the request history to grow too
* far. */
bufpages = (svc->srv_buf_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
- if (val > num_physpages/(2 * bufpages))
+ if (val > totalram_pages / (2 * bufpages))
return -ERANGE;
spin_lock(&svc->srv_lock);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
index 87eea53..e90c8fb 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
@@ -149,7 +149,7 @@ int sptlrpc_proc_enc_pool_seq_show(struct seq_file *m, void *v)
"max waitqueue depth: %u\n"
"max wait time: "CFS_TIME_T"/%u\n"
,
- num_physpages,
+ totalram_pages,
PAGES_PER_POOL,
page_pools.epp_max_pages,
page_pools.epp_max_pools,
@@ -719,7 +719,7 @@ int sptlrpc_enc_pool_init(void)
* maximum capacity is 1/8 of total physical memory.
* is the 1/8 a good number?
*/
- page_pools.epp_max_pages = num_physpages / 8;
+ page_pools.epp_max_pages = totalram_pages / 8;
page_pools.epp_max_pools = npages_to_npools(page_pools.epp_max_pages);
init_waitqueue_head(&page_pools.epp_waitq);
--
1.7.9.5