This patchset makes some *_is_* like functions return bool because
these functions only use true or false as their return values.
No functional change.
Yaowei Bai (7):
mm/memblock: memblock_is_map/region_memory can be boolean
lib/lockref: __lockref_is_dead can be boolean
kernel/cpuset: current_cpuset_is_being_rebound can be boolean
kernel/resource: iomem_is_exclusive can be boolean
kernel/module: module_is_live can be boolean
kernel/mutex: mutex_is_locked can be boolean
crash_dump: is_kdump_kernel can be boolean
include/linux/cpuset.h | 6 +++---
include/linux/crash_dump.h | 10 +++++-----
include/linux/ioport.h | 2 +-
include/linux/lockref.h | 2 +-
include/linux/memblock.h | 4 ++--
include/linux/module.h | 2 +-
include/linux/mutex.h | 4 ++--
kernel/cgroup/cpuset.c | 4 ++--
kernel/resource.c | 10 +++++-----
mm/memblock.c | 6 +++---
10 files changed, 25 insertions(+), 25 deletions(-)
--
1.8.3.1
This patch makes memblock_is_map/region_memory return bool due to these
two functions only using either true or false as its return value.
No functional change.
Signed-off-by: Yaowei Bai <[email protected]>
---
include/linux/memblock.h | 4 ++--
mm/memblock.c | 6 +++---
2 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 7ed0f77..8be5077 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -332,8 +332,8 @@ phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align,
void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size);
void memblock_mem_limit_remove_map(phys_addr_t limit);
bool memblock_is_memory(phys_addr_t addr);
-int memblock_is_map_memory(phys_addr_t addr);
-int memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
+bool memblock_is_map_memory(phys_addr_t addr);
+bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
bool memblock_is_reserved(phys_addr_t addr);
bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
diff --git a/mm/memblock.c b/mm/memblock.c
index 46aacdf..5a9ca2a 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1654,7 +1654,7 @@ bool __init_memblock memblock_is_memory(phys_addr_t addr)
return memblock_search(&memblock.memory, addr) != -1;
}
-int __init_memblock memblock_is_map_memory(phys_addr_t addr)
+bool __init_memblock memblock_is_map_memory(phys_addr_t addr)
{
int i = memblock_search(&memblock.memory, addr);
@@ -1690,13 +1690,13 @@ int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
* RETURNS:
* 0 if false, non-zero if true
*/
-int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
+bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
{
int idx = memblock_search(&memblock.memory, base);
phys_addr_t end = base + memblock_cap_size(base, &size);
if (idx == -1)
- return 0;
+ return false;
return (memblock.memory.regions[idx].base +
memblock.memory.regions[idx].size) >= end;
}
--
1.8.3.1
This patch makes module_is_live return bool due to this particular
function only using either one or zero as its return value.
No functional change.
Signed-off-by: Yaowei Bai <[email protected]>
---
include/linux/module.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/include/linux/module.h b/include/linux/module.h
index c69b49a..fa5d53e 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -485,7 +485,7 @@ struct module {
/* FIXME: It'd be nice to isolate modules during init, too, so they
aren't used before they (may) fail. But presently too much code
(IDE & SCSI) require entry into the module during init.*/
-static inline int module_is_live(struct module *mod)
+static inline bool module_is_live(struct module *mod)
{
return mod->state != MODULE_STATE_GOING;
}
--
1.8.3.1
This patch makes __lockref_is_dead return bool due to this function
only using either true or false as its return value.
No functional change.
Signed-off-by: Yaowei Bai <[email protected]>
---
include/linux/lockref.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
index ef3c934..2eac320 100644
--- a/include/linux/lockref.h
+++ b/include/linux/lockref.h
@@ -44,7 +44,7 @@ struct lockref {
extern int lockref_get_not_dead(struct lockref *);
/* Must be called under spinlock for reliable results */
-static inline int __lockref_is_dead(const struct lockref *l)
+static inline bool __lockref_is_dead(const struct lockref *l)
{
return ((int)l->count < 0);
}
--
1.8.3.1
This patch makes mutex_is_locked return bool due to this
particular function only using either one or zero as its return
value.
No functional change.
Signed-off-by: Yaowei Bai <[email protected]>
---
include/linux/mutex.h | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 153274f..f25c134 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -138,9 +138,9 @@ extern void __mutex_init(struct mutex *lock, const char *name,
* mutex_is_locked - is the mutex locked
* @lock: the mutex to be queried
*
- * Returns 1 if the mutex is locked, 0 if unlocked.
+ * Returns true if the mutex is locked, false if unlocked.
*/
-static inline int mutex_is_locked(struct mutex *lock)
+static inline bool mutex_is_locked(struct mutex *lock)
{
/*
* XXX think about spin_is_locked
--
1.8.3.1
This patch makes is_kdump_kernel return bool due to this particular
function only using either one or zero as its return value.
No functional change.
Signed-off-by: Yaowei Bai <[email protected]>
---
include/linux/crash_dump.h | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index a992e6c..8b0c711 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -52,13 +52,13 @@ extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
* has passed the elf core header address on command line.
*
* This is not just a test if CONFIG_CRASH_DUMP is enabled or not. It will
- * return 1 if CONFIG_CRASH_DUMP=y and if kernel is booting after a panic of
- * previous kernel.
+ * return true if CONFIG_CRASH_DUMP=y and if kernel is booting after a panic
+ * of previous kernel.
*/
-static inline int is_kdump_kernel(void)
+static inline bool is_kdump_kernel(void)
{
- return (elfcorehdr_addr != ELFCORE_ADDR_MAX) ? 1 : 0;
+ return elfcorehdr_addr != ELFCORE_ADDR_MAX;
}
/* is_vmcore_usable() checks if the kernel is booting after a panic and
@@ -89,7 +89,7 @@ static inline void vmcore_unusable(void)
extern void unregister_oldmem_pfn_is_ram(void);
#else /* !CONFIG_CRASH_DUMP */
-static inline int is_kdump_kernel(void) { return 0; }
+static inline bool is_kdump_kernel(void) { return false; }
#endif /* CONFIG_CRASH_DUMP */
extern unsigned long saved_max_pfn;
--
1.8.3.1
This patch makes current_cpuset_is_being_rebound return bool due to
this particular function only using either one or zero as its return
value.
No functional change.
Signed-off-by: Yaowei Bai <[email protected]>
---
include/linux/cpuset.h | 6 +++---
kernel/cgroup/cpuset.c | 4 ++--
2 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 1b8e415..934633a 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -112,7 +112,7 @@ static inline int cpuset_do_slab_mem_spread(void)
return task_spread_slab(current);
}
-extern int current_cpuset_is_being_rebound(void);
+extern bool current_cpuset_is_being_rebound(void);
extern void rebuild_sched_domains(void);
@@ -247,9 +247,9 @@ static inline int cpuset_do_slab_mem_spread(void)
return 0;
}
-static inline int current_cpuset_is_being_rebound(void)
+static inline bool current_cpuset_is_being_rebound(void)
{
- return 0;
+ return false;
}
static inline void rebuild_sched_domains(void)
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index f7efa7b..b42037e 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -1254,9 +1254,9 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
return retval;
}
-int current_cpuset_is_being_rebound(void)
+bool current_cpuset_is_being_rebound(void)
{
- int ret;
+ bool ret;
rcu_read_lock();
ret = task_cs(current) == cpuset_being_rebound;
--
1.8.3.1
This patch makes iomem_is_exclusive return bool due to this
particular function only using either one or zero as its return
value.
No functional change.
Signed-off-by: Yaowei Bai <[email protected]>
---
include/linux/ioport.h | 2 +-
kernel/resource.c | 10 +++++-----
2 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 93b4183..da0ebae 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -265,7 +265,7 @@ extern struct resource * __devm_request_region(struct device *dev,
extern void __devm_release_region(struct device *dev, struct resource *parent,
resource_size_t start, resource_size_t n);
extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size);
-extern int iomem_is_exclusive(u64 addr);
+extern bool iomem_is_exclusive(u64 addr);
extern int
walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
diff --git a/kernel/resource.c b/kernel/resource.c
index 54ba6de..a269b9a1 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -1563,17 +1563,17 @@ int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
/*
* check if an address is reserved in the iomem resource tree
- * returns 1 if reserved, 0 if not reserved.
+ * returns true if reserved, false if not reserved.
*/
-int iomem_is_exclusive(u64 addr)
+bool iomem_is_exclusive(u64 addr)
{
struct resource *p = &iomem_resource;
- int err = 0;
+ bool err = false;
loff_t l;
int size = PAGE_SIZE;
if (!strict_iomem_checks)
- return 0;
+ return false;
addr = addr & PAGE_MASK;
@@ -1596,7 +1596,7 @@ int iomem_is_exclusive(u64 addr)
continue;
if (IS_ENABLED(CONFIG_IO_STRICT_DEVMEM)
|| p->flags & IORESOURCE_EXCLUSIVE) {
- err = 1;
+ err = true;
break;
}
}
--
1.8.3.1