This patch-set will cleanup and refine the code related to scheduler and
save few lines.
Rough tested (random benchmark) with latest tip/master on a desktop.
Michael Wang (11):
[PATCH 01/11] sched/cleanup: remove the extra line in init_sched_fair_class()
[PATCH 02/11] sched/cleanup: remove the extra parm of alloc_thread_info_node()
[PATCH 03/11] sched/cleanup: remove the extra parm of copy_flags()
[PATCH 04/11] sched/cleanup: refine rt_policy()
[PATCH 05/11] sched/cleanup: remove the extra parm of wakeup_gran()
[PATCH 06/11] sched/cleanup: refine __setup_irq()
[PATCH 07/11] sched/cleanup: remove the extra parm of irq_set_thread_affinity()
[PATCH 08/11] sched/cleanup: remove the extra parm of sched_clock_idle_wakeup_event()
[PATCH 09/11] sched/cleanup: remove the extra parm of tick_nohz_start_idle()
[PATCH 10/11] sched/cleanup: remove the extra parm of retrigger_next_event()
[PATCH 11/11] sched/cleanup: remove the extra parm of find_new_ilb()
---
b/arch/x86/kernel/tsc.c | 2 +-
b/drivers/acpi/processor_idle.c | 4 ++--
b/include/linux/sched.h | 7 ++-----
b/kernel/fork.c | 8 +++-----
b/kernel/hrtimer.c | 8 ++++----
b/kernel/irq/internals.h | 2 +-
b/kernel/irq/manage.c | 6 ++----
b/kernel/irq/migration.c | 2 +-
b/kernel/sched/clock.c | 2 +-
b/kernel/sched/fair.c | 1 -
b/kernel/sched/sched.h | 4 +---
b/kernel/time/tick-sched.c | 2 +-
kernel/fork.c | 4 ++--
kernel/irq/manage.c | 7 +++----
kernel/sched/fair.c | 8 ++++----
kernel/time/tick-sched.c | 4 ++--
16 files changed, 30 insertions(+), 41 deletions(-)
Remove the extra line.
CC: Ingo Molnar <[email protected]>
CC: Peter Zijlstra <[email protected]>
Signed-off-by: Michael Wang <[email protected]>
---
kernel/sched/fair.c | 1 -
1 file changed, 1 deletion(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index aaaed57..a925e17 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6277,5 +6277,4 @@ __init void init_sched_fair_class(void)
cpu_notifier(sched_ilb_notifier, 0);
#endif
#endif /* SMP */
-
}
--
1.7.9.5
Parm 'tsk' has not been used, remove it to make code clean.
CC: Andrew Morton <[email protected]>
CC: Al Viro <[email protected]>
CC: "Eric W. Biederman" <[email protected]>
CC: Oleg Nesterov <[email protected]>
CC: Srikar Dronamraju <[email protected]>
CC: Ingo Molnar <[email protected]>
CC: Peter Zijlstra <[email protected]>
Signed-off-by: Michael Wang <[email protected]>
---
kernel/fork.c | 8 +++-----
1 file changed, 3 insertions(+), 5 deletions(-)
diff --git a/kernel/fork.c b/kernel/fork.c
index e23bb19..457945f 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -144,8 +144,7 @@ void __weak arch_release_thread_info(struct thread_info *ti)
* kmemcache based allocator.
*/
# if THREAD_SIZE >= PAGE_SIZE
-static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
- int node)
+static struct thread_info *alloc_thread_info_node(int node)
{
struct page *page = alloc_pages_node(node, THREADINFO_GFP_ACCOUNTED,
THREAD_SIZE_ORDER);
@@ -160,8 +159,7 @@ static inline void free_thread_info(struct thread_info *ti)
# else
static struct kmem_cache *thread_info_cache;
-static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
- int node)
+static struct thread_info *alloc_thread_info_node(int node)
{
return kmem_cache_alloc_node(thread_info_cache, THREADINFO_GFP, node);
}
@@ -302,7 +300,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
if (!tsk)
return NULL;
- ti = alloc_thread_info_node(tsk, node);
+ ti = alloc_thread_info_node(node);
if (!ti)
goto free_tsk;
--
1.7.9.5
Parm 'clone_flags' has not been used, remove it to make code clean.
CC: Andrew Morton <[email protected]>
CC: Al Viro <[email protected]>
CC: "Eric W. Biederman" <[email protected]>
CC: Oleg Nesterov <[email protected]>
CC: Srikar Dronamraju <[email protected]>
CC: Ingo Molnar <[email protected]>
CC: Peter Zijlstra <[email protected]>
Signed-off-by: Michael Wang <[email protected]>
---
kernel/fork.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/kernel/fork.c b/kernel/fork.c
index 457945f..3c2d8fd 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1070,7 +1070,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
return 0;
}
-static void copy_flags(unsigned long clone_flags, struct task_struct *p)
+static void copy_flags(struct task_struct *p)
{
unsigned long new_flags = p->flags;
@@ -1223,7 +1223,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->did_exec = 0;
delayacct_tsk_init(p); /* Must remain after dup_task_struct() */
- copy_flags(clone_flags, p);
+ copy_flags(p);
INIT_LIST_HEAD(&p->children);
INIT_LIST_HEAD(&p->sibling);
rcu_copy_process(p);
--
1.7.9.5
Refine the rt_policy() to save few lines.
CC: Ingo Molnar <[email protected]>
CC: Peter Zijlstra <[email protected]>
Signed-off-by: Michael Wang <[email protected]>
---
kernel/sched/sched.h | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 4c1cb80..b557f39 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -82,9 +82,7 @@ extern void update_cpu_load_active(struct rq *this_rq);
static inline int rt_policy(int policy)
{
- if (policy == SCHED_FIFO || policy == SCHED_RR)
- return 1;
- return 0;
+ return policy == SCHED_FIFO || policy == SCHED_RR;
}
static inline int task_has_rt_policy(struct task_struct *p)
--
1.7.9.5
Parm 'curr' has not been used, remove it to make code clean.
CC: Ingo Molnar <[email protected]>
CC: Peter Zijlstra <[email protected]>
Signed-off-by: Michael Wang <[email protected]>
---
kernel/sched/fair.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index a925e17..05643d6 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3536,7 +3536,7 @@ migrate_task_rq_fair(struct task_struct *p, int next_cpu)
#endif /* CONFIG_SMP */
static unsigned long
-wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
+wakeup_gran(struct sched_entity *se)
{
unsigned long gran = sysctl_sched_wakeup_granularity;
@@ -3578,7 +3578,7 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
if (vdiff <= 0)
return -1;
- gran = wakeup_gran(curr, se);
+ gran = wakeup_gran(se);
if (vdiff > gran)
return 1;
--
1.7.9.5
Refine the __setup_irq() to save few lines.
CC: Thomas Gleixner <[email protected]>
CC: Ingo Molnar <[email protected]>
CC: Peter Zijlstra <[email protected]>
Signed-off-by: Michael Wang <[email protected]>
---
kernel/irq/manage.c | 6 ++----
1 file changed, 2 insertions(+), 4 deletions(-)
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 514bcfd..46941e7 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -933,10 +933,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
* dummy function which warns when called.
*/
new->handler = irq_nested_primary_handler;
- } else {
- if (irq_settings_can_thread(desc))
- irq_setup_forced_threading(new);
- }
+ } else if (irq_settings_can_thread(desc))
+ irq_setup_forced_threading(new);
/*
* Create a handler thread when a thread function is supplied
--
1.7.9.5
Parm 'force' has been ignored, and currently no one need a 'true' option, so
remove it to make code clean.
CC: Thomas Gleixner <[email protected]>
CC: Ingo Molnar <[email protected]>
CC: Peter Zijlstra <[email protected]>
Signed-off-by: Michael Wang <[email protected]>
---
kernel/irq/internals.h | 2 +-
kernel/irq/manage.c | 7 +++----
kernel/irq/migration.c | 2 +-
3 files changed, 5 insertions(+), 6 deletions(-)
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 001fa5b..c5b4085 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -102,7 +102,7 @@ extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask);
extern void irq_set_thread_affinity(struct irq_desc *desc);
extern int irq_do_set_affinity(struct irq_data *data,
- const struct cpumask *dest, bool force);
+ const struct cpumask *dest);
/* Inline functions for support of irq chips on slow busses */
static inline void chip_bus_lock(struct irq_desc *desc)
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 46941e7..9a71ba5 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -143,8 +143,7 @@ static inline void
irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
#endif
-int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
- bool force)
+int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask)
{
struct irq_desc *desc = irq_data_to_desc(data);
struct irq_chip *chip = irq_data_get_irq_chip(data);
@@ -172,7 +171,7 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
return -EINVAL;
if (irq_can_move_pcntxt(data)) {
- ret = irq_do_set_affinity(data, mask, false);
+ ret = irq_do_set_affinity(data, mask);
} else {
irqd_set_move_pending(data);
irq_copy_pending(desc, mask);
@@ -323,7 +322,7 @@ setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
if (cpumask_intersects(mask, nodemask))
cpumask_and(mask, mask, nodemask);
}
- irq_do_set_affinity(&desc->irq_data, mask, false);
+ irq_do_set_affinity(&desc->irq_data, mask);
return 0;
}
#else
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index ca3f4aa..4790949 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -43,7 +43,7 @@ void irq_move_masked_irq(struct irq_data *idata)
* masking the irqs.
*/
if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids)
- irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false);
+ irq_do_set_affinity(&desc->irq_data, desc->pending_mask);
cpumask_clear(desc->pending_mask);
}
--
1.7.9.5
Parm 'delta_ns' has not been used, remove it to make code clean.
CC: Thomas Gleixner <[email protected]>
CC: "H. Peter Anvin" <[email protected]>
CC: Len Brown <[email protected]>
CC: "Rafael J. Wysocki" <[email protected]>
CC: John Stultz <[email protected]>
CC: Paul Gortmaker <[email protected]>
CC: Feng Tang <[email protected]>
CC: Ingo Molnar <[email protected]>
CC: Peter Zijlstra <[email protected]>
Signed-off-by: Michael Wang <[email protected]>
---
arch/x86/kernel/tsc.c | 2 +-
drivers/acpi/processor_idle.c | 4 ++--
include/linux/sched.h | 7 ++-----
kernel/sched/clock.c | 2 +-
kernel/time/tick-sched.c | 2 +-
5 files changed, 7 insertions(+), 10 deletions(-)
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 930e5d4..4a40402 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -635,7 +635,7 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
(1UL << CYC2NS_SCALE_FACTOR));
}
- sched_clock_idle_wakeup_event(0);
+ sched_clock_idle_wakeup_event();
local_irq_restore(flags);
}
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index f98dd00..2bb49f8 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -817,7 +817,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
sched_clock_idle_sleep_event();
acpi_idle_do_entry(cx);
- sched_clock_idle_wakeup_event(0);
+ sched_clock_idle_wakeup_event();
if (cx->entry_method != ACPI_CSTATE_FFH)
current_thread_info()->status |= TS_POLLING;
@@ -913,7 +913,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
raw_spin_unlock(&c3_lock);
}
- sched_clock_idle_wakeup_event(0);
+ sched_clock_idle_wakeup_event();
if (cx->entry_method != ACPI_CSTATE_FFH)
current_thread_info()->status |= TS_POLLING;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 45a1a88..800c57f 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1809,7 +1809,7 @@ static inline void sched_clock_idle_sleep_event(void)
{
}
-static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
+static inline void sched_clock_idle_wakeup_event(void)
{
}
#else
@@ -1823,7 +1823,7 @@ extern int sched_clock_stable;
extern void sched_clock_tick(void);
extern void sched_clock_idle_sleep_event(void);
-extern void sched_clock_idle_wakeup_event(u64 delta_ns);
+extern void sched_clock_idle_wakeup_event(void);
#endif
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
@@ -1849,9 +1849,6 @@ extern void sched_exec(void);
#define sched_exec() {}
#endif
-extern void sched_clock_idle_sleep_event(void);
-extern void sched_clock_idle_wakeup_event(u64 delta_ns);
-
#ifdef CONFIG_HOTPLUG_CPU
extern void idle_task_exit(void);
#else
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
index c3ae144..d111c58 100644
--- a/kernel/sched/clock.c
+++ b/kernel/sched/clock.c
@@ -294,7 +294,7 @@ EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
/*
* We just idled delta nanoseconds (called with irqs disabled):
*/
-void sched_clock_idle_wakeup_event(u64 delta_ns)
+void sched_clock_idle_wakeup_event(void)
{
if (timekeeping_suspended)
return;
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 3612fc7..4b24d9e 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -433,7 +433,7 @@ static void tick_nohz_stop_idle(int cpu, ktime_t now)
update_ts_time_stats(cpu, ts, now, NULL);
ts->idle_active = 0;
- sched_clock_idle_wakeup_event(0);
+ sched_clock_idle_wakeup_event();
}
static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts)
--
1.7.9.5
Parm 'cpu' has not been used, remove it to make code clean.
CC: Thomas Gleixner <[email protected]>
CC: Ingo Molnar <[email protected]>
CC: Peter Zijlstra <[email protected]>
Signed-off-by: Michael Wang <[email protected]>
---
kernel/time/tick-sched.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 4b24d9e..6d24c5f 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -436,7 +436,7 @@ static void tick_nohz_stop_idle(int cpu, ktime_t now)
sched_clock_idle_wakeup_event();
}
-static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts)
+static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
{
ktime_t now = ktime_get();
@@ -752,7 +752,7 @@ static void __tick_nohz_idle_enter(struct tick_sched *ts)
ktime_t now, expires;
int cpu = smp_processor_id();
- now = tick_nohz_start_idle(cpu, ts);
+ now = tick_nohz_start_idle(ts);
if (can_stop_idle_tick(cpu, ts)) {
int was_stopped = ts->tick_stopped;
--
1.7.9.5
Parm 'arg' has not been used, remove it to make code clean.
CC: Thomas Gleixner <[email protected]>
CC: Ingo Molnar <[email protected]>
CC: Peter Zijlstra <[email protected]>
Signed-off-by: Michael Wang <[email protected]>
---
kernel/hrtimer.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 383319b..1abca37 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -678,7 +678,7 @@ static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
*
* Called with interrupts disabled via on_each_cpu()
*/
-static void retrigger_next_event(void *arg)
+static void retrigger_next_event(void)
{
struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases);
@@ -717,7 +717,7 @@ static int hrtimer_switch_to_hres(void)
tick_setup_sched_timer();
/* "Retrigger" the interrupt to get things going */
- retrigger_next_event(NULL);
+ retrigger_next_event();
local_irq_restore(flags);
return 1;
}
@@ -751,7 +751,7 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
return 0;
}
static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
-static inline void retrigger_next_event(void *arg) { }
+static inline void retrigger_next_event(void) { }
#endif /* CONFIG_HIGH_RES_TIMERS */
@@ -787,7 +787,7 @@ void hrtimers_resume(void)
KERN_INFO "hrtimers_resume() called with IRQs enabled!");
/* Retrigger on the local CPU */
- retrigger_next_event(NULL);
+ retrigger_next_event();
/* And schedule a retrigger for all others */
clock_was_set_delayed();
}
--
1.7.9.5
Parm 'call_cpu' has not been used, remove it to make code clean.
CC: Ingo Molnar <[email protected]>
CC: Peter Zijlstra <[email protected]>
Signed-off-by: Michael Wang <[email protected]>
---
kernel/sched/fair.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 05643d6..29ac8dd 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5454,7 +5454,7 @@ static struct {
unsigned long next_balance; /* in jiffy units */
} nohz ____cacheline_aligned;
-static inline int find_new_ilb(int call_cpu)
+static inline int find_new_ilb(void)
{
int ilb = cpumask_first(nohz.idle_cpus_mask);
@@ -5475,7 +5475,7 @@ static void nohz_balancer_kick(int cpu)
nohz.next_balance++;
- ilb_cpu = find_new_ilb(cpu);
+ ilb_cpu = find_new_ilb();
if (ilb_cpu >= nr_cpu_ids)
return;
--
1.7.9.5
On 08/22/2013 03:57 PM, Michael wang wrote:
> Parm 'delta_ns' has not been used, remove it to make code clean.
[snip]
>
> -extern void sched_clock_idle_sleep_event(void);
> -extern void sched_clock_idle_wakeup_event(u64 delta_ns);
Also remove the extra declaration here.
Regards,
Michael Wang
> -
> #ifdef CONFIG_HOTPLUG_CPU
> extern void idle_task_exit(void);
> #else
> diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
> index c3ae144..d111c58 100644
> --- a/kernel/sched/clock.c
> +++ b/kernel/sched/clock.c
> @@ -294,7 +294,7 @@ EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
> /*
> * We just idled delta nanoseconds (called with irqs disabled):
> */
> -void sched_clock_idle_wakeup_event(u64 delta_ns)
> +void sched_clock_idle_wakeup_event(void)
> {
> if (timekeeping_suspended)
> return;
> diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
> index 3612fc7..4b24d9e 100644
> --- a/kernel/time/tick-sched.c
> +++ b/kernel/time/tick-sched.c
> @@ -433,7 +433,7 @@ static void tick_nohz_stop_idle(int cpu, ktime_t now)
> update_ts_time_stats(cpu, ts, now, NULL);
> ts->idle_active = 0;
>
> - sched_clock_idle_wakeup_event(0);
> + sched_clock_idle_wakeup_event();
> }
>
> static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts)
>