Blame view
kernel/sched/pelt.h
1.66 KB
c07962986 sched/pelt: Move ... |
1 2 3 4 5 |
#ifdef CONFIG_SMP int __update_load_avg_blocked_se(u64 now, int cpu, struct sched_entity *se); int __update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entity *se); int __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq); |
371bf4273 sched/rt: Add rt_... |
6 |
int update_rt_rq_load_avg(u64 now, struct rq *rq, int running); |
3727e0e16 sched/dl: Add dl_... |
7 |
int update_dl_rq_load_avg(u64 now, struct rq *rq, int running); |
c07962986 sched/pelt: Move ... |
8 |
|
dc5350715 sched/pelt: Fix w... |
9 |
#ifdef CONFIG_HAVE_SCHED_AVG_IRQ |
91c27493e sched/irq: Add IR... |
10 11 12 13 14 15 16 17 |
int update_irq_load_avg(struct rq *rq, u64 running); #else static inline int update_irq_load_avg(struct rq *rq, u64 running) { return 0; } #endif |
c07962986 sched/pelt: Move ... |
18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
/* * When a task is dequeued, its estimated utilization should not be update if * its util_avg has not been updated at least once. * This flag is used to synchronize util_avg updates with util_est updates. * We map this information into the LSB bit of the utilization saved at * dequeue time (i.e. util_est.dequeued). */ #define UTIL_AVG_UNCHANGED 0x1 static inline void cfs_se_util_change(struct sched_avg *avg) { unsigned int enqueued; if (!sched_feat(UTIL_EST)) return; /* Avoid store if the flag has been already set */ enqueued = avg->util_est.enqueued; if (!(enqueued & UTIL_AVG_UNCHANGED)) return; /* Reset flag to report util_avg has been updated */ enqueued &= ~UTIL_AVG_UNCHANGED; WRITE_ONCE(avg->util_est.enqueued, enqueued); } #else static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) { return 0; } |
371bf4273 sched/rt: Add rt_... |
51 52 53 54 55 |
static inline int update_rt_rq_load_avg(u64 now, struct rq *rq, int running) { return 0; } |
3727e0e16 sched/dl: Add dl_... |
56 57 58 59 60 |
static inline int update_dl_rq_load_avg(u64 now, struct rq *rq, int running) { return 0; } |
91c27493e sched/irq: Add IR... |
61 62 63 64 65 66 |
static inline int update_irq_load_avg(struct rq *rq, u64 running) { return 0; } |
c07962986 sched/pelt: Move ... |
67 |
#endif |