Commit a9957449b08ab561a33e1e038df06843b8d8dd9f
Committed by
Ingo Molnar
1 parent
155bb293ae
sched: uninline scheduler
* save ~300 bytes * activate_idle_task() was moved to avoid a warning bloat-o-meter output: add/remove: 6/0 grow/shrink: 0/16 up/down: 438/-733 (-295) <=== function old new delta __enqueue_entity - 165 +165 finish_task_switch - 110 +110 update_curr_rt - 79 +79 __load_balance_iterator - 32 +32 __task_rq_unlock - 28 +28 find_process_by_pid - 24 +24 do_sched_setscheduler 133 123 -10 sys_sched_rr_get_interval 176 165 -11 sys_sched_getparam 156 145 -11 normalize_rt_tasks 482 470 -12 sched_getaffinity 112 99 -13 sys_sched_getscheduler 86 72 -14 sched_setaffinity 226 212 -14 sched_setscheduler 666 642 -24 load_balance_start_fair 33 9 -24 load_balance_next_fair 33 9 -24 dequeue_task_rt 133 67 -66 put_prev_task_rt 97 28 -69 schedule_tail 133 50 -83 schedule 682 594 -88 enqueue_entity 499 366 -133 task_new_fair 317 180 -137 Signed-off-by: Alexey Dobriyan <adobriyan@sw.ru> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Showing 3 changed files with 24 additions and 24 deletions Side-by-side Diff
kernel/sched.c
... | ... | @@ -608,7 +608,7 @@ |
608 | 608 | return rq; |
609 | 609 | } |
610 | 610 | |
611 | -static inline void __task_rq_unlock(struct rq *rq) | |
611 | +static void __task_rq_unlock(struct rq *rq) | |
612 | 612 | __releases(rq->lock) |
613 | 613 | { |
614 | 614 | spin_unlock(&rq->lock); |
... | ... | @@ -623,7 +623,7 @@ |
623 | 623 | /* |
624 | 624 | * this_rq_lock - lock this runqueue and disable interrupts. |
625 | 625 | */ |
626 | -static inline struct rq *this_rq_lock(void) | |
626 | +static struct rq *this_rq_lock(void) | |
627 | 627 | __acquires(rq->lock) |
628 | 628 | { |
629 | 629 | struct rq *rq; |
... | ... | @@ -986,20 +986,6 @@ |
986 | 986 | } |
987 | 987 | |
988 | 988 | /* |
989 | - * activate_idle_task - move idle task to the _front_ of runqueue. | |
990 | - */ | |
991 | -static inline void activate_idle_task(struct task_struct *p, struct rq *rq) | |
992 | -{ | |
993 | - update_rq_clock(rq); | |
994 | - | |
995 | - if (p->state == TASK_UNINTERRUPTIBLE) | |
996 | - rq->nr_uninterruptible--; | |
997 | - | |
998 | - enqueue_task(rq, p, 0); | |
999 | - inc_nr_running(p, rq); | |
1000 | -} | |
1001 | - | |
1002 | -/* | |
1003 | 989 | * deactivate_task - remove a task from the runqueue. |
1004 | 990 | */ |
1005 | 991 | static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep) |
... | ... | @@ -1206,7 +1192,7 @@ |
1206 | 1192 | * We want to under-estimate the load of migration sources, to |
1207 | 1193 | * balance conservatively. |
1208 | 1194 | */ |
1209 | -static inline unsigned long source_load(int cpu, int type) | |
1195 | +static unsigned long source_load(int cpu, int type) | |
1210 | 1196 | { |
1211 | 1197 | struct rq *rq = cpu_rq(cpu); |
1212 | 1198 | unsigned long total = weighted_cpuload(cpu); |
... | ... | @@ -1221,7 +1207,7 @@ |
1221 | 1207 | * Return a high guess at the load of a migration-target cpu weighted |
1222 | 1208 | * according to the scheduling class and "nice" value. |
1223 | 1209 | */ |
1224 | -static inline unsigned long target_load(int cpu, int type) | |
1210 | +static unsigned long target_load(int cpu, int type) | |
1225 | 1211 | { |
1226 | 1212 | struct rq *rq = cpu_rq(cpu); |
1227 | 1213 | unsigned long total = weighted_cpuload(cpu); |
... | ... | @@ -1813,7 +1799,7 @@ |
1813 | 1799 | * with the lock held can cause deadlocks; see schedule() for |
1814 | 1800 | * details.) |
1815 | 1801 | */ |
1816 | -static inline void finish_task_switch(struct rq *rq, struct task_struct *prev) | |
1802 | +static void finish_task_switch(struct rq *rq, struct task_struct *prev) | |
1817 | 1803 | __releases(rq->lock) |
1818 | 1804 | { |
1819 | 1805 | struct mm_struct *mm = rq->prev_mm; |
... | ... | @@ -3020,7 +3006,7 @@ |
3020 | 3006 | * |
3021 | 3007 | * Balancing parameters are set up in arch_init_sched_domains. |
3022 | 3008 | */ |
3023 | -static inline void rebalance_domains(int cpu, enum cpu_idle_type idle) | |
3009 | +static void rebalance_domains(int cpu, enum cpu_idle_type idle) | |
3024 | 3010 | { |
3025 | 3011 | int balance = 1; |
3026 | 3012 | struct rq *rq = cpu_rq(cpu); |
... | ... | @@ -4140,7 +4126,7 @@ |
4140 | 4126 | * find_process_by_pid - find a process with a matching PID value. |
4141 | 4127 | * @pid: the pid in question. |
4142 | 4128 | */ |
4143 | -static inline struct task_struct *find_process_by_pid(pid_t pid) | |
4129 | +static struct task_struct *find_process_by_pid(pid_t pid) | |
4144 | 4130 | { |
4145 | 4131 | return pid ? find_task_by_pid(pid) : current; |
4146 | 4132 | } |
... | ... | @@ -5157,6 +5143,20 @@ |
5157 | 5143 | } |
5158 | 5144 | |
5159 | 5145 | /* |
5146 | + * activate_idle_task - move idle task to the _front_ of runqueue. | |
5147 | + */ | |
5148 | +static void activate_idle_task(struct task_struct *p, struct rq *rq) | |
5149 | +{ | |
5150 | + update_rq_clock(rq); | |
5151 | + | |
5152 | + if (p->state == TASK_UNINTERRUPTIBLE) | |
5153 | + rq->nr_uninterruptible--; | |
5154 | + | |
5155 | + enqueue_task(rq, p, 0); | |
5156 | + inc_nr_running(p, rq); | |
5157 | +} | |
5158 | + | |
5159 | +/* | |
5160 | 5160 | * Schedules idle task to be the next runnable task on current CPU. |
5161 | 5161 | * It does so by boosting its priority to highest possible and adding it to |
5162 | 5162 | * the _front_ of the runqueue. Used by CPU offline code. |
... | ... | @@ -6494,7 +6494,7 @@ |
6494 | 6494 | && addr < (unsigned long)__sched_text_end); |
6495 | 6495 | } |
6496 | 6496 | |
6497 | -static inline void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq) | |
6497 | +static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq) | |
6498 | 6498 | { |
6499 | 6499 | cfs_rq->tasks_timeline = RB_ROOT; |
6500 | 6500 | #ifdef CONFIG_FAIR_GROUP_SCHED |
kernel/sched_fair.c
... | ... | @@ -892,7 +892,7 @@ |
892 | 892 | * achieve that by always pre-iterating before returning |
893 | 893 | * the current task: |
894 | 894 | */ |
895 | -static inline struct task_struct * | |
895 | +static struct task_struct * | |
896 | 896 | __load_balance_iterator(struct cfs_rq *cfs_rq, struct rb_node *curr) |
897 | 897 | { |
898 | 898 | struct task_struct *p; |
kernel/sched_rt.c
... | ... | @@ -7,7 +7,7 @@ |
7 | 7 | * Update the current task's runtime statistics. Skip current tasks that |
8 | 8 | * are not in our scheduling class. |
9 | 9 | */ |
10 | -static inline void update_curr_rt(struct rq *rq) | |
10 | +static void update_curr_rt(struct rq *rq) | |
11 | 11 | { |
12 | 12 | struct task_struct *curr = rq->curr; |
13 | 13 | u64 delta_exec; |