Commit d4a6f3c32c39132318454e77d59ab14b06f6eb02
Committed by
Ingo Molnar
1 parent
b2b5ce022a
Exists in
master
and in
4 other branches
sched_stat: Update sched_info_queue/dequeue() code comments
Remove some sched_info_queue(), sched_info_dequeue() code comment. We no longer belongs to the era of O(1) and we don't use active or expired array anymore. Signed-off-by: Rakib Mullick <rakib.mullick@gmail.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> LKML-Reference: <AANLkTi=REu0WzOp5N=nVT1=ZJ=ZA+MZFV+4CHSJ3Q-Yv@mail.gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Showing 1 changed file with 1 additions and 19 deletions Inline Diff
kernel/sched_stats.h
1 | 1 | ||
2 | #ifdef CONFIG_SCHEDSTATS | 2 | #ifdef CONFIG_SCHEDSTATS |
3 | /* | 3 | /* |
4 | * bump this up when changing the output format or the meaning of an existing | 4 | * bump this up when changing the output format or the meaning of an existing |
5 | * format, so that tools can adapt (or abort) | 5 | * format, so that tools can adapt (or abort) |
6 | */ | 6 | */ |
7 | #define SCHEDSTAT_VERSION 15 | 7 | #define SCHEDSTAT_VERSION 15 |
8 | 8 | ||
9 | static int show_schedstat(struct seq_file *seq, void *v) | 9 | static int show_schedstat(struct seq_file *seq, void *v) |
10 | { | 10 | { |
11 | int cpu; | 11 | int cpu; |
12 | int mask_len = DIV_ROUND_UP(NR_CPUS, 32) * 9; | 12 | int mask_len = DIV_ROUND_UP(NR_CPUS, 32) * 9; |
13 | char *mask_str = kmalloc(mask_len, GFP_KERNEL); | 13 | char *mask_str = kmalloc(mask_len, GFP_KERNEL); |
14 | 14 | ||
15 | if (mask_str == NULL) | 15 | if (mask_str == NULL) |
16 | return -ENOMEM; | 16 | return -ENOMEM; |
17 | 17 | ||
18 | seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION); | 18 | seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION); |
19 | seq_printf(seq, "timestamp %lu\n", jiffies); | 19 | seq_printf(seq, "timestamp %lu\n", jiffies); |
20 | for_each_online_cpu(cpu) { | 20 | for_each_online_cpu(cpu) { |
21 | struct rq *rq = cpu_rq(cpu); | 21 | struct rq *rq = cpu_rq(cpu); |
22 | #ifdef CONFIG_SMP | 22 | #ifdef CONFIG_SMP |
23 | struct sched_domain *sd; | 23 | struct sched_domain *sd; |
24 | int dcount = 0; | 24 | int dcount = 0; |
25 | #endif | 25 | #endif |
26 | 26 | ||
27 | /* runqueue-specific stats */ | 27 | /* runqueue-specific stats */ |
28 | seq_printf(seq, | 28 | seq_printf(seq, |
29 | "cpu%d %u %u %u %u %u %u %llu %llu %lu", | 29 | "cpu%d %u %u %u %u %u %u %llu %llu %lu", |
30 | cpu, rq->yld_count, | 30 | cpu, rq->yld_count, |
31 | rq->sched_switch, rq->sched_count, rq->sched_goidle, | 31 | rq->sched_switch, rq->sched_count, rq->sched_goidle, |
32 | rq->ttwu_count, rq->ttwu_local, | 32 | rq->ttwu_count, rq->ttwu_local, |
33 | rq->rq_cpu_time, | 33 | rq->rq_cpu_time, |
34 | rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount); | 34 | rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount); |
35 | 35 | ||
36 | seq_printf(seq, "\n"); | 36 | seq_printf(seq, "\n"); |
37 | 37 | ||
38 | #ifdef CONFIG_SMP | 38 | #ifdef CONFIG_SMP |
39 | /* domain-specific stats */ | 39 | /* domain-specific stats */ |
40 | preempt_disable(); | 40 | preempt_disable(); |
41 | for_each_domain(cpu, sd) { | 41 | for_each_domain(cpu, sd) { |
42 | enum cpu_idle_type itype; | 42 | enum cpu_idle_type itype; |
43 | 43 | ||
44 | cpumask_scnprintf(mask_str, mask_len, | 44 | cpumask_scnprintf(mask_str, mask_len, |
45 | sched_domain_span(sd)); | 45 | sched_domain_span(sd)); |
46 | seq_printf(seq, "domain%d %s", dcount++, mask_str); | 46 | seq_printf(seq, "domain%d %s", dcount++, mask_str); |
47 | for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES; | 47 | for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES; |
48 | itype++) { | 48 | itype++) { |
49 | seq_printf(seq, " %u %u %u %u %u %u %u %u", | 49 | seq_printf(seq, " %u %u %u %u %u %u %u %u", |
50 | sd->lb_count[itype], | 50 | sd->lb_count[itype], |
51 | sd->lb_balanced[itype], | 51 | sd->lb_balanced[itype], |
52 | sd->lb_failed[itype], | 52 | sd->lb_failed[itype], |
53 | sd->lb_imbalance[itype], | 53 | sd->lb_imbalance[itype], |
54 | sd->lb_gained[itype], | 54 | sd->lb_gained[itype], |
55 | sd->lb_hot_gained[itype], | 55 | sd->lb_hot_gained[itype], |
56 | sd->lb_nobusyq[itype], | 56 | sd->lb_nobusyq[itype], |
57 | sd->lb_nobusyg[itype]); | 57 | sd->lb_nobusyg[itype]); |
58 | } | 58 | } |
59 | seq_printf(seq, | 59 | seq_printf(seq, |
60 | " %u %u %u %u %u %u %u %u %u %u %u %u\n", | 60 | " %u %u %u %u %u %u %u %u %u %u %u %u\n", |
61 | sd->alb_count, sd->alb_failed, sd->alb_pushed, | 61 | sd->alb_count, sd->alb_failed, sd->alb_pushed, |
62 | sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed, | 62 | sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed, |
63 | sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed, | 63 | sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed, |
64 | sd->ttwu_wake_remote, sd->ttwu_move_affine, | 64 | sd->ttwu_wake_remote, sd->ttwu_move_affine, |
65 | sd->ttwu_move_balance); | 65 | sd->ttwu_move_balance); |
66 | } | 66 | } |
67 | preempt_enable(); | 67 | preempt_enable(); |
68 | #endif | 68 | #endif |
69 | } | 69 | } |
70 | kfree(mask_str); | 70 | kfree(mask_str); |
71 | return 0; | 71 | return 0; |
72 | } | 72 | } |
73 | 73 | ||
74 | static int schedstat_open(struct inode *inode, struct file *file) | 74 | static int schedstat_open(struct inode *inode, struct file *file) |
75 | { | 75 | { |
76 | unsigned int size = PAGE_SIZE * (1 + num_online_cpus() / 32); | 76 | unsigned int size = PAGE_SIZE * (1 + num_online_cpus() / 32); |
77 | char *buf = kmalloc(size, GFP_KERNEL); | 77 | char *buf = kmalloc(size, GFP_KERNEL); |
78 | struct seq_file *m; | 78 | struct seq_file *m; |
79 | int res; | 79 | int res; |
80 | 80 | ||
81 | if (!buf) | 81 | if (!buf) |
82 | return -ENOMEM; | 82 | return -ENOMEM; |
83 | res = single_open(file, show_schedstat, NULL); | 83 | res = single_open(file, show_schedstat, NULL); |
84 | if (!res) { | 84 | if (!res) { |
85 | m = file->private_data; | 85 | m = file->private_data; |
86 | m->buf = buf; | 86 | m->buf = buf; |
87 | m->size = size; | 87 | m->size = size; |
88 | } else | 88 | } else |
89 | kfree(buf); | 89 | kfree(buf); |
90 | return res; | 90 | return res; |
91 | } | 91 | } |
92 | 92 | ||
93 | static const struct file_operations proc_schedstat_operations = { | 93 | static const struct file_operations proc_schedstat_operations = { |
94 | .open = schedstat_open, | 94 | .open = schedstat_open, |
95 | .read = seq_read, | 95 | .read = seq_read, |
96 | .llseek = seq_lseek, | 96 | .llseek = seq_lseek, |
97 | .release = single_release, | 97 | .release = single_release, |
98 | }; | 98 | }; |
99 | 99 | ||
100 | static int __init proc_schedstat_init(void) | 100 | static int __init proc_schedstat_init(void) |
101 | { | 101 | { |
102 | proc_create("schedstat", 0, NULL, &proc_schedstat_operations); | 102 | proc_create("schedstat", 0, NULL, &proc_schedstat_operations); |
103 | return 0; | 103 | return 0; |
104 | } | 104 | } |
105 | module_init(proc_schedstat_init); | 105 | module_init(proc_schedstat_init); |
106 | 106 | ||
107 | /* | 107 | /* |
108 | * Expects runqueue lock to be held for atomicity of update | 108 | * Expects runqueue lock to be held for atomicity of update |
109 | */ | 109 | */ |
110 | static inline void | 110 | static inline void |
111 | rq_sched_info_arrive(struct rq *rq, unsigned long long delta) | 111 | rq_sched_info_arrive(struct rq *rq, unsigned long long delta) |
112 | { | 112 | { |
113 | if (rq) { | 113 | if (rq) { |
114 | rq->rq_sched_info.run_delay += delta; | 114 | rq->rq_sched_info.run_delay += delta; |
115 | rq->rq_sched_info.pcount++; | 115 | rq->rq_sched_info.pcount++; |
116 | } | 116 | } |
117 | } | 117 | } |
118 | 118 | ||
119 | /* | 119 | /* |
120 | * Expects runqueue lock to be held for atomicity of update | 120 | * Expects runqueue lock to be held for atomicity of update |
121 | */ | 121 | */ |
122 | static inline void | 122 | static inline void |
123 | rq_sched_info_depart(struct rq *rq, unsigned long long delta) | 123 | rq_sched_info_depart(struct rq *rq, unsigned long long delta) |
124 | { | 124 | { |
125 | if (rq) | 125 | if (rq) |
126 | rq->rq_cpu_time += delta; | 126 | rq->rq_cpu_time += delta; |
127 | } | 127 | } |
128 | 128 | ||
129 | static inline void | 129 | static inline void |
130 | rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) | 130 | rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) |
131 | { | 131 | { |
132 | if (rq) | 132 | if (rq) |
133 | rq->rq_sched_info.run_delay += delta; | 133 | rq->rq_sched_info.run_delay += delta; |
134 | } | 134 | } |
135 | # define schedstat_inc(rq, field) do { (rq)->field++; } while (0) | 135 | # define schedstat_inc(rq, field) do { (rq)->field++; } while (0) |
136 | # define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0) | 136 | # define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0) |
137 | # define schedstat_set(var, val) do { var = (val); } while (0) | 137 | # define schedstat_set(var, val) do { var = (val); } while (0) |
138 | #else /* !CONFIG_SCHEDSTATS */ | 138 | #else /* !CONFIG_SCHEDSTATS */ |
139 | static inline void | 139 | static inline void |
140 | rq_sched_info_arrive(struct rq *rq, unsigned long long delta) | 140 | rq_sched_info_arrive(struct rq *rq, unsigned long long delta) |
141 | {} | 141 | {} |
142 | static inline void | 142 | static inline void |
143 | rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) | 143 | rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) |
144 | {} | 144 | {} |
145 | static inline void | 145 | static inline void |
146 | rq_sched_info_depart(struct rq *rq, unsigned long long delta) | 146 | rq_sched_info_depart(struct rq *rq, unsigned long long delta) |
147 | {} | 147 | {} |
148 | # define schedstat_inc(rq, field) do { } while (0) | 148 | # define schedstat_inc(rq, field) do { } while (0) |
149 | # define schedstat_add(rq, field, amt) do { } while (0) | 149 | # define schedstat_add(rq, field, amt) do { } while (0) |
150 | # define schedstat_set(var, val) do { } while (0) | 150 | # define schedstat_set(var, val) do { } while (0) |
151 | #endif | 151 | #endif |
152 | 152 | ||
153 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) | 153 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) |
154 | static inline void sched_info_reset_dequeued(struct task_struct *t) | 154 | static inline void sched_info_reset_dequeued(struct task_struct *t) |
155 | { | 155 | { |
156 | t->sched_info.last_queued = 0; | 156 | t->sched_info.last_queued = 0; |
157 | } | 157 | } |
158 | 158 | ||
159 | /* | 159 | /* |
160 | * Called when a process is dequeued from the active array and given | 160 | * We are interested in knowing how long it was from the *first* time a |
161 | * the cpu. We should note that with the exception of interactive | ||
162 | * tasks, the expired queue will become the active queue after the active | ||
163 | * queue is empty, without explicitly dequeuing and requeuing tasks in the | ||
164 | * expired queue. (Interactive tasks may be requeued directly to the | ||
165 | * active queue, thus delaying tasks in the expired queue from running; | ||
166 | * see scheduler_tick()). | ||
167 | * | ||
168 | * Though we are interested in knowing how long it was from the *first* time a | ||
169 | * task was queued to the time that it finally hit a cpu, we call this routine | 161 | * task was queued to the time that it finally hit a cpu, we call this routine |
170 | * from dequeue_task() to account for possible rq->clock skew across cpus. The | 162 | * from dequeue_task() to account for possible rq->clock skew across cpus. The |
171 | * delta taken on each cpu would annul the skew. | 163 | * delta taken on each cpu would annul the skew. |
172 | */ | 164 | */ |
173 | static inline void sched_info_dequeued(struct task_struct *t) | 165 | static inline void sched_info_dequeued(struct task_struct *t) |
174 | { | 166 | { |
175 | unsigned long long now = task_rq(t)->clock, delta = 0; | 167 | unsigned long long now = task_rq(t)->clock, delta = 0; |
176 | 168 | ||
177 | if (unlikely(sched_info_on())) | 169 | if (unlikely(sched_info_on())) |
178 | if (t->sched_info.last_queued) | 170 | if (t->sched_info.last_queued) |
179 | delta = now - t->sched_info.last_queued; | 171 | delta = now - t->sched_info.last_queued; |
180 | sched_info_reset_dequeued(t); | 172 | sched_info_reset_dequeued(t); |
181 | t->sched_info.run_delay += delta; | 173 | t->sched_info.run_delay += delta; |
182 | 174 | ||
183 | rq_sched_info_dequeued(task_rq(t), delta); | 175 | rq_sched_info_dequeued(task_rq(t), delta); |
184 | } | 176 | } |
185 | 177 | ||
186 | /* | 178 | /* |
187 | * Called when a task finally hits the cpu. We can now calculate how | 179 | * Called when a task finally hits the cpu. We can now calculate how |
188 | * long it was waiting to run. We also note when it began so that we | 180 | * long it was waiting to run. We also note when it began so that we |
189 | * can keep stats on how long its timeslice is. | 181 | * can keep stats on how long its timeslice is. |
190 | */ | 182 | */ |
191 | static void sched_info_arrive(struct task_struct *t) | 183 | static void sched_info_arrive(struct task_struct *t) |
192 | { | 184 | { |
193 | unsigned long long now = task_rq(t)->clock, delta = 0; | 185 | unsigned long long now = task_rq(t)->clock, delta = 0; |
194 | 186 | ||
195 | if (t->sched_info.last_queued) | 187 | if (t->sched_info.last_queued) |
196 | delta = now - t->sched_info.last_queued; | 188 | delta = now - t->sched_info.last_queued; |
197 | sched_info_reset_dequeued(t); | 189 | sched_info_reset_dequeued(t); |
198 | t->sched_info.run_delay += delta; | 190 | t->sched_info.run_delay += delta; |
199 | t->sched_info.last_arrival = now; | 191 | t->sched_info.last_arrival = now; |
200 | t->sched_info.pcount++; | 192 | t->sched_info.pcount++; |
201 | 193 | ||
202 | rq_sched_info_arrive(task_rq(t), delta); | 194 | rq_sched_info_arrive(task_rq(t), delta); |
203 | } | 195 | } |
204 | 196 | ||
205 | /* | 197 | /* |
206 | * Called when a process is queued into either the active or expired | ||
207 | * array. The time is noted and later used to determine how long we | ||
208 | * had to wait for us to reach the cpu. Since the expired queue will | ||
209 | * become the active queue after active queue is empty, without dequeuing | ||
210 | * and requeuing any tasks, we are interested in queuing to either. It | ||
211 | * is unusual but not impossible for tasks to be dequeued and immediately | ||
212 | * requeued in the same or another array: this can happen in sched_yield(), | ||
213 | * set_user_nice(), and even load_balance() as it moves tasks from runqueue | ||
214 | * to runqueue. | ||
215 | * | ||
216 | * This function is only called from enqueue_task(), but also only updates | 198 | * This function is only called from enqueue_task(), but also only updates |
217 | * the timestamp if it is already not set. It's assumed that | 199 | * the timestamp if it is already not set. It's assumed that |
218 | * sched_info_dequeued() will clear that stamp when appropriate. | 200 | * sched_info_dequeued() will clear that stamp when appropriate. |
219 | */ | 201 | */ |
220 | static inline void sched_info_queued(struct task_struct *t) | 202 | static inline void sched_info_queued(struct task_struct *t) |
221 | { | 203 | { |
222 | if (unlikely(sched_info_on())) | 204 | if (unlikely(sched_info_on())) |
223 | if (!t->sched_info.last_queued) | 205 | if (!t->sched_info.last_queued) |
224 | t->sched_info.last_queued = task_rq(t)->clock; | 206 | t->sched_info.last_queued = task_rq(t)->clock; |
225 | } | 207 | } |
226 | 208 | ||
227 | /* | 209 | /* |
228 | * Called when a process ceases being the active-running process, either | 210 | * Called when a process ceases being the active-running process, either |
229 | * voluntarily or involuntarily. Now we can calculate how long we ran. | 211 | * voluntarily or involuntarily. Now we can calculate how long we ran. |
230 | * Also, if the process is still in the TASK_RUNNING state, call | 212 | * Also, if the process is still in the TASK_RUNNING state, call |
231 | * sched_info_queued() to mark that it has now again started waiting on | 213 | * sched_info_queued() to mark that it has now again started waiting on |
232 | * the runqueue. | 214 | * the runqueue. |
233 | */ | 215 | */ |
234 | static inline void sched_info_depart(struct task_struct *t) | 216 | static inline void sched_info_depart(struct task_struct *t) |
235 | { | 217 | { |
236 | unsigned long long delta = task_rq(t)->clock - | 218 | unsigned long long delta = task_rq(t)->clock - |
237 | t->sched_info.last_arrival; | 219 | t->sched_info.last_arrival; |
238 | 220 | ||
239 | rq_sched_info_depart(task_rq(t), delta); | 221 | rq_sched_info_depart(task_rq(t), delta); |
240 | 222 | ||
241 | if (t->state == TASK_RUNNING) | 223 | if (t->state == TASK_RUNNING) |
242 | sched_info_queued(t); | 224 | sched_info_queued(t); |
243 | } | 225 | } |
244 | 226 | ||
245 | /* | 227 | /* |
246 | * Called when tasks are switched involuntarily due, typically, to expiring | 228 | * Called when tasks are switched involuntarily due, typically, to expiring |
247 | * their time slice. (This may also be called when switching to or from | 229 | * their time slice. (This may also be called when switching to or from |
248 | * the idle task.) We are only called when prev != next. | 230 | * the idle task.) We are only called when prev != next. |
249 | */ | 231 | */ |
250 | static inline void | 232 | static inline void |
251 | __sched_info_switch(struct task_struct *prev, struct task_struct *next) | 233 | __sched_info_switch(struct task_struct *prev, struct task_struct *next) |
252 | { | 234 | { |
253 | struct rq *rq = task_rq(prev); | 235 | struct rq *rq = task_rq(prev); |
254 | 236 | ||
255 | /* | 237 | /* |
256 | * prev now departs the cpu. It's not interesting to record | 238 | * prev now departs the cpu. It's not interesting to record |
257 | * stats about how efficient we were at scheduling the idle | 239 | * stats about how efficient we were at scheduling the idle |
258 | * process, however. | 240 | * process, however. |
259 | */ | 241 | */ |
260 | if (prev != rq->idle) | 242 | if (prev != rq->idle) |
261 | sched_info_depart(prev); | 243 | sched_info_depart(prev); |
262 | 244 | ||
263 | if (next != rq->idle) | 245 | if (next != rq->idle) |
264 | sched_info_arrive(next); | 246 | sched_info_arrive(next); |
265 | } | 247 | } |
266 | static inline void | 248 | static inline void |
267 | sched_info_switch(struct task_struct *prev, struct task_struct *next) | 249 | sched_info_switch(struct task_struct *prev, struct task_struct *next) |
268 | { | 250 | { |
269 | if (unlikely(sched_info_on())) | 251 | if (unlikely(sched_info_on())) |
270 | __sched_info_switch(prev, next); | 252 | __sched_info_switch(prev, next); |
271 | } | 253 | } |
272 | #else | 254 | #else |
273 | #define sched_info_queued(t) do { } while (0) | 255 | #define sched_info_queued(t) do { } while (0) |
274 | #define sched_info_reset_dequeued(t) do { } while (0) | 256 | #define sched_info_reset_dequeued(t) do { } while (0) |
275 | #define sched_info_dequeued(t) do { } while (0) | 257 | #define sched_info_dequeued(t) do { } while (0) |
276 | #define sched_info_switch(t, next) do { } while (0) | 258 | #define sched_info_switch(t, next) do { } while (0) |
277 | #endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */ | 259 | #endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */ |
278 | 260 | ||
279 | /* | 261 | /* |
280 | * The following are functions that support scheduler-internal time accounting. | 262 | * The following are functions that support scheduler-internal time accounting. |
281 | * These functions are generally called at the timer tick. None of this depends | 263 | * These functions are generally called at the timer tick. None of this depends |
282 | * on CONFIG_SCHEDSTATS. | 264 | * on CONFIG_SCHEDSTATS. |
283 | */ | 265 | */ |
284 | 266 | ||
285 | /** | 267 | /** |
286 | * account_group_user_time - Maintain utime for a thread group. | 268 | * account_group_user_time - Maintain utime for a thread group. |
287 | * | 269 | * |
288 | * @tsk: Pointer to task structure. | 270 | * @tsk: Pointer to task structure. |
289 | * @cputime: Time value by which to increment the utime field of the | 271 | * @cputime: Time value by which to increment the utime field of the |
290 | * thread_group_cputime structure. | 272 | * thread_group_cputime structure. |
291 | * | 273 | * |
292 | * If thread group time is being maintained, get the structure for the | 274 | * If thread group time is being maintained, get the structure for the |
293 | * running CPU and update the utime field there. | 275 | * running CPU and update the utime field there. |
294 | */ | 276 | */ |
295 | static inline void account_group_user_time(struct task_struct *tsk, | 277 | static inline void account_group_user_time(struct task_struct *tsk, |
296 | cputime_t cputime) | 278 | cputime_t cputime) |
297 | { | 279 | { |
298 | struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; | 280 | struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; |
299 | 281 | ||
300 | if (!cputimer->running) | 282 | if (!cputimer->running) |
301 | return; | 283 | return; |
302 | 284 | ||
303 | spin_lock(&cputimer->lock); | 285 | spin_lock(&cputimer->lock); |
304 | cputimer->cputime.utime = | 286 | cputimer->cputime.utime = |
305 | cputime_add(cputimer->cputime.utime, cputime); | 287 | cputime_add(cputimer->cputime.utime, cputime); |
306 | spin_unlock(&cputimer->lock); | 288 | spin_unlock(&cputimer->lock); |
307 | } | 289 | } |
308 | 290 | ||
309 | /** | 291 | /** |
310 | * account_group_system_time - Maintain stime for a thread group. | 292 | * account_group_system_time - Maintain stime for a thread group. |
311 | * | 293 | * |
312 | * @tsk: Pointer to task structure. | 294 | * @tsk: Pointer to task structure. |
313 | * @cputime: Time value by which to increment the stime field of the | 295 | * @cputime: Time value by which to increment the stime field of the |
314 | * thread_group_cputime structure. | 296 | * thread_group_cputime structure. |
315 | * | 297 | * |
316 | * If thread group time is being maintained, get the structure for the | 298 | * If thread group time is being maintained, get the structure for the |
317 | * running CPU and update the stime field there. | 299 | * running CPU and update the stime field there. |
318 | */ | 300 | */ |
319 | static inline void account_group_system_time(struct task_struct *tsk, | 301 | static inline void account_group_system_time(struct task_struct *tsk, |
320 | cputime_t cputime) | 302 | cputime_t cputime) |
321 | { | 303 | { |
322 | struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; | 304 | struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; |
323 | 305 | ||
324 | if (!cputimer->running) | 306 | if (!cputimer->running) |
325 | return; | 307 | return; |
326 | 308 | ||
327 | spin_lock(&cputimer->lock); | 309 | spin_lock(&cputimer->lock); |
328 | cputimer->cputime.stime = | 310 | cputimer->cputime.stime = |
329 | cputime_add(cputimer->cputime.stime, cputime); | 311 | cputime_add(cputimer->cputime.stime, cputime); |
330 | spin_unlock(&cputimer->lock); | 312 | spin_unlock(&cputimer->lock); |
331 | } | 313 | } |
332 | 314 | ||
333 | /** | 315 | /** |
334 | * account_group_exec_runtime - Maintain exec runtime for a thread group. | 316 | * account_group_exec_runtime - Maintain exec runtime for a thread group. |
335 | * | 317 | * |
336 | * @tsk: Pointer to task structure. | 318 | * @tsk: Pointer to task structure. |
337 | * @ns: Time value by which to increment the sum_exec_runtime field | 319 | * @ns: Time value by which to increment the sum_exec_runtime field |
338 | * of the thread_group_cputime structure. | 320 | * of the thread_group_cputime structure. |
339 | * | 321 | * |
340 | * If thread group time is being maintained, get the structure for the | 322 | * If thread group time is being maintained, get the structure for the |
341 | * running CPU and update the sum_exec_runtime field there. | 323 | * running CPU and update the sum_exec_runtime field there. |
342 | */ | 324 | */ |
343 | static inline void account_group_exec_runtime(struct task_struct *tsk, | 325 | static inline void account_group_exec_runtime(struct task_struct *tsk, |
344 | unsigned long long ns) | 326 | unsigned long long ns) |
345 | { | 327 | { |
346 | struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; | 328 | struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; |
347 | 329 | ||
348 | if (!cputimer->running) | 330 | if (!cputimer->running) |
349 | return; | 331 | return; |
350 | 332 | ||
351 | spin_lock(&cputimer->lock); | 333 | spin_lock(&cputimer->lock); |
352 | cputimer->cputime.sum_exec_runtime += ns; | 334 | cputimer->cputime.sum_exec_runtime += ns; |
353 | spin_unlock(&cputimer->lock); | 335 | spin_unlock(&cputimer->lock); |
354 | } | 336 | } |
355 | 337 |