Blame view
kernel/time/posix-cpu-timers.c
37.5 KB
b24413180
|
1 |
// SPDX-License-Identifier: GPL-2.0 |
1da177e4c
|
2 3 4 |
/* * Implement CPU time clocks for the POSIX clock interface. */ |
3f07c0144
|
5 |
#include <linux/sched/signal.h> |
32ef5517c
|
6 |
#include <linux/sched/cputime.h> |
1da177e4c
|
7 |
#include <linux/posix-timers.h> |
1da177e4c
|
8 |
#include <linux/errno.h> |
f8bd2258e
|
9 |
#include <linux/math64.h> |
7c0f6ba68
|
10 |
#include <linux/uaccess.h> |
bb34d92f6
|
11 |
#include <linux/kernel_stat.h> |
3f0a525eb
|
12 |
#include <trace/events/timer.h> |
a85721601
|
13 14 |
#include <linux/tick.h> #include <linux/workqueue.h> |
edbeda463
|
15 |
#include <linux/compat.h> |
34be39305
|
16 |
#include <linux/sched/deadline.h> |
1da177e4c
|
17 |
|
bab0aae9d
|
18 |
#include "posix-timers.h" |
f37fb0aa4
|
19 |
static void posix_cpu_timer_rearm(struct k_itimer *timer); |
f06febc96
|
20 |
/* |
f55db6090
|
21 22 23 24 |
* Called after updating RLIMIT_CPU to run cpu timer and update * tsk->signal->cputime_expires expiration cache if necessary. Needs * siglock protection since other code may update expiration cache as * well. |
f06febc96
|
25 |
*/ |
5ab46b345
|
26 |
void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new) |
f06febc96
|
27 |
{ |
858cf3a8c
|
28 |
u64 nsecs = rlim_new * NSEC_PER_SEC; |
f06febc96
|
29 |
|
5ab46b345
|
30 |
spin_lock_irq(&task->sighand->siglock); |
858cf3a8c
|
31 |
set_process_cpu_timer(task, CPUCLOCK_PROF, &nsecs, NULL); |
5ab46b345
|
32 |
spin_unlock_irq(&task->sighand->siglock); |
f06febc96
|
33 |
} |
a924b04dd
|
34 |
static int check_clock(const clockid_t which_clock) |
1da177e4c
|
35 36 37 38 39 40 41 42 43 44 |
{ int error = 0; struct task_struct *p; const pid_t pid = CPUCLOCK_PID(which_clock); if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX) return -EINVAL; if (pid == 0) return 0; |
c0deae8c9
|
45 |
rcu_read_lock(); |
8dc86af00
|
46 |
p = find_task_by_vpid(pid); |
bac0abd61
|
47 |
if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ? |
c0deae8c9
|
48 |
same_thread_group(p, current) : has_group_leader_pid(p))) { |
1da177e4c
|
49 50 |
error = -EINVAL; } |
c0deae8c9
|
51 |
rcu_read_unlock(); |
1da177e4c
|
52 53 54 |
return error; } |
1da177e4c
|
55 56 57 58 |
/* * Update expiry time from increment, and increase overrun count, * given the current clock sample. */ |
ebd7e7fc4
|
59 |
static void bump_cpu_timer(struct k_itimer *timer, u64 now) |
1da177e4c
|
60 61 |
{ int i; |
ebd7e7fc4
|
62 |
u64 delta, incr; |
1da177e4c
|
63 |
|
55ccb616a
|
64 |
if (timer->it.cpu.incr == 0) |
1da177e4c
|
65 |
return; |
55ccb616a
|
66 67 |
if (now < timer->it.cpu.expires) return; |
1da177e4c
|
68 |
|
55ccb616a
|
69 70 |
incr = timer->it.cpu.incr; delta = now + incr - timer->it.cpu.expires; |
1da177e4c
|
71 |
|
55ccb616a
|
72 73 74 75 76 77 78 79 80 |
/* Don't use (incr*2 < delta), incr*2 might overflow. */ for (i = 0; incr < delta - incr; i++) incr = incr << 1; for (; i >= 0; incr >>= 1, i--) { if (delta < incr) continue; timer->it.cpu.expires += incr; |
78c9c4dfb
|
81 |
timer->it_overrun += 1LL << i; |
55ccb616a
|
82 |
delta -= incr; |
1da177e4c
|
83 84 |
} } |
555347f6c
|
85 86 87 88 89 90 91 92 |
/** * task_cputime_zero - Check a task_cputime struct for all zero fields. * * @cputime: The struct to compare. * * Checks @cputime to see if all fields are zero. Returns true if all fields * are zero, false if any field is nonzero. */ |
ebd7e7fc4
|
93 |
static inline int task_cputime_zero(const struct task_cputime *cputime) |
555347f6c
|
94 95 96 97 98 |
{ if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime) return 1; return 0; } |
ebd7e7fc4
|
99 |
static inline u64 prof_ticks(struct task_struct *p) |
1da177e4c
|
100 |
{ |
ebd7e7fc4
|
101 |
u64 utime, stime; |
6fac4829c
|
102 |
|
ebd7e7fc4
|
103 |
task_cputime(p, &utime, &stime); |
6fac4829c
|
104 |
|
ebd7e7fc4
|
105 |
return utime + stime; |
1da177e4c
|
106 |
} |
ebd7e7fc4
|
107 |
static inline u64 virt_ticks(struct task_struct *p) |
1da177e4c
|
108 |
{ |
ebd7e7fc4
|
109 |
u64 utime, stime; |
6fac4829c
|
110 |
|
ebd7e7fc4
|
111 |
task_cputime(p, &utime, &stime); |
6fac4829c
|
112 |
|
ebd7e7fc4
|
113 |
return utime; |
1da177e4c
|
114 |
} |
1da177e4c
|
115 |
|
bc2c8ea48
|
116 |
static int |
d2e3e0ca5
|
117 |
posix_cpu_clock_getres(const clockid_t which_clock, struct timespec64 *tp) |
1da177e4c
|
118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 |
{ int error = check_clock(which_clock); if (!error) { tp->tv_sec = 0; tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ); if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { /* * If sched_clock is using a cycle counter, we * don't have any idea of its true resolution * exported, but it is much more than 1s/HZ. */ tp->tv_nsec = 1; } } return error; } |
bc2c8ea48
|
134 |
static int |
0fe6afe38
|
135 |
posix_cpu_clock_set(const clockid_t which_clock, const struct timespec64 *tp) |
1da177e4c
|
136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 |
{ /* * You can never reset a CPU clock, but we check for other errors * in the call before failing with EPERM. */ int error = check_clock(which_clock); if (error == 0) { error = -EPERM; } return error; } /* * Sample a per-thread clock for the given task. */ |
ebd7e7fc4
|
152 153 |
static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p, u64 *sample) |
1da177e4c
|
154 155 156 157 158 |
{ switch (CPUCLOCK_WHICH(which_clock)) { default: return -EINVAL; case CPUCLOCK_PROF: |
55ccb616a
|
159 |
*sample = prof_ticks(p); |
1da177e4c
|
160 161 |
break; case CPUCLOCK_VIRT: |
55ccb616a
|
162 |
*sample = virt_ticks(p); |
1da177e4c
|
163 164 |
break; case CPUCLOCK_SCHED: |
55ccb616a
|
165 |
*sample = task_sched_runtime(p); |
1da177e4c
|
166 167 168 169 |
break; } return 0; } |
1018016c7
|
170 171 172 173 174 |
/* * Set cputime to sum_cputime if sum_cputime > cputime. Use cmpxchg * to avoid race conditions with concurrent updates to cputime. */ static inline void __update_gt_cputime(atomic64_t *cputime, u64 sum_cputime) |
4da94d49b
|
175 |
{ |
1018016c7
|
176 177 178 179 180 181 182 183 |
u64 curr_cputime; retry: curr_cputime = atomic64_read(cputime); if (sum_cputime > curr_cputime) { if (atomic64_cmpxchg(cputime, curr_cputime, sum_cputime) != curr_cputime) goto retry; } } |
4da94d49b
|
184 |
|
ebd7e7fc4
|
185 |
static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic, struct task_cputime *sum) |
1018016c7
|
186 |
{ |
711074451
|
187 188 189 |
__update_gt_cputime(&cputime_atomic->utime, sum->utime); __update_gt_cputime(&cputime_atomic->stime, sum->stime); __update_gt_cputime(&cputime_atomic->sum_exec_runtime, sum->sum_exec_runtime); |
1018016c7
|
190 |
} |
4da94d49b
|
191 |
|
711074451
|
192 |
/* Sample task_cputime_atomic values in "atomic_timers", store results in "times". */ |
ebd7e7fc4
|
193 |
static inline void sample_cputime_atomic(struct task_cputime *times, |
711074451
|
194 |
struct task_cputime_atomic *atomic_times) |
1018016c7
|
195 |
{ |
711074451
|
196 197 198 |
times->utime = atomic64_read(&atomic_times->utime); times->stime = atomic64_read(&atomic_times->stime); times->sum_exec_runtime = atomic64_read(&atomic_times->sum_exec_runtime); |
4da94d49b
|
199 |
} |
ebd7e7fc4
|
200 |
void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) |
4da94d49b
|
201 202 |
{ struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; |
ebd7e7fc4
|
203 |
struct task_cputime sum; |
4da94d49b
|
204 |
|
1018016c7
|
205 206 |
/* Check if cputimer isn't running. This is accessed without locking. */ if (!READ_ONCE(cputimer->running)) { |
4da94d49b
|
207 208 209 |
/* * The POSIX timer interface allows for absolute time expiry * values through the TIMER_ABSTIME flag, therefore we have |
1018016c7
|
210 |
* to synchronize the timer to the clock every time we start it. |
4da94d49b
|
211 |
*/ |
ebd7e7fc4
|
212 |
thread_group_cputime(tsk, &sum); |
711074451
|
213 |
update_gt_cputime(&cputimer->cputime_atomic, &sum); |
1018016c7
|
214 215 216 217 218 219 220 221 |
/* * We're setting cputimer->running without a lock. Ensure * this only gets written to in one operation. We set * running after update_gt_cputime() as a small optimization, * but barriers are not required because update_gt_cputime() * can handle concurrent updates. */ |
d5c373eb5
|
222 |
WRITE_ONCE(cputimer->running, true); |
1018016c7
|
223 |
} |
711074451
|
224 |
sample_cputime_atomic(times, &cputimer->cputime_atomic); |
4da94d49b
|
225 |
} |
1da177e4c
|
226 227 |
/* * Sample a process (thread group) clock for the given group_leader task. |
e73d84e33
|
228 229 |
* Must be called with task sighand lock held for safe while_each_thread() * traversal. |
1da177e4c
|
230 |
*/ |
bb34d92f6
|
231 232 |
static int cpu_clock_sample_group(const clockid_t which_clock, struct task_struct *p, |
ebd7e7fc4
|
233 |
u64 *sample) |
1da177e4c
|
234 |
{ |
ebd7e7fc4
|
235 |
struct task_cputime cputime; |
f06febc96
|
236 |
|
eccdaeafa
|
237 |
switch (CPUCLOCK_WHICH(which_clock)) { |
1da177e4c
|
238 239 240 |
default: return -EINVAL; case CPUCLOCK_PROF: |
ebd7e7fc4
|
241 242 |
thread_group_cputime(p, &cputime); *sample = cputime.utime + cputime.stime; |
1da177e4c
|
243 244 |
break; case CPUCLOCK_VIRT: |
ebd7e7fc4
|
245 246 |
thread_group_cputime(p, &cputime); *sample = cputime.utime; |
1da177e4c
|
247 248 |
break; case CPUCLOCK_SCHED: |
ebd7e7fc4
|
249 |
thread_group_cputime(p, &cputime); |
55ccb616a
|
250 |
*sample = cputime.sum_exec_runtime; |
1da177e4c
|
251 252 253 254 |
break; } return 0; } |
33ab0fec3
|
255 256 |
static int posix_cpu_clock_get_task(struct task_struct *tsk, const clockid_t which_clock, |
3c9c12f4b
|
257 |
struct timespec64 *tp) |
33ab0fec3
|
258 259 |
{ int err = -EINVAL; |
ebd7e7fc4
|
260 |
u64 rtn; |
33ab0fec3
|
261 262 263 264 265 |
if (CPUCLOCK_PERTHREAD(which_clock)) { if (same_thread_group(tsk, current)) err = cpu_clock_sample(which_clock, tsk, &rtn); } else { |
50875788a
|
266 |
if (tsk == current || thread_group_leader(tsk)) |
33ab0fec3
|
267 |
err = cpu_clock_sample_group(which_clock, tsk, &rtn); |
33ab0fec3
|
268 269 270 |
} if (!err) |
3c9c12f4b
|
271 |
*tp = ns_to_timespec64(rtn); |
33ab0fec3
|
272 273 274 |
return err; } |
1da177e4c
|
275 |
|
3c9c12f4b
|
276 |
static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec64 *tp) |
1da177e4c
|
277 278 |
{ const pid_t pid = CPUCLOCK_PID(which_clock); |
33ab0fec3
|
279 |
int err = -EINVAL; |
1da177e4c
|
280 281 282 283 284 285 |
if (pid == 0) { /* * Special case constant value for our own clocks. * We don't have to do any lookup to find ourselves. */ |
33ab0fec3
|
286 |
err = posix_cpu_clock_get_task(current, which_clock, tp); |
1da177e4c
|
287 288 289 290 291 292 |
} else { /* * Find the given PID, and validate that the caller * should be able to see it. */ struct task_struct *p; |
1f2ea0837
|
293 |
rcu_read_lock(); |
8dc86af00
|
294 |
p = find_task_by_vpid(pid); |
33ab0fec3
|
295 296 |
if (p) err = posix_cpu_clock_get_task(p, which_clock, tp); |
1f2ea0837
|
297 |
rcu_read_unlock(); |
1da177e4c
|
298 |
} |
33ab0fec3
|
299 |
return err; |
1da177e4c
|
300 |
} |
1da177e4c
|
301 302 |
/* * Validate the clockid_t for a new CPU-clock timer, and initialize the timer. |
ba5ea951d
|
303 304 |
* This is called from sys_timer_create() and do_cpu_nanosleep() with the * new timer already all-zeros initialized. |
1da177e4c
|
305 |
*/ |
bc2c8ea48
|
306 |
static int posix_cpu_timer_create(struct k_itimer *new_timer) |
1da177e4c
|
307 308 309 310 311 312 313 |
{ int ret = 0; const pid_t pid = CPUCLOCK_PID(new_timer->it_clock); struct task_struct *p; if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX) return -EINVAL; |
d97bb75dd
|
314 |
new_timer->kclock = &clock_posix_cpu; |
1da177e4c
|
315 |
INIT_LIST_HEAD(&new_timer->it.cpu.entry); |
1da177e4c
|
316 |
|
c0deae8c9
|
317 |
rcu_read_lock(); |
1da177e4c
|
318 319 320 321 |
if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) { if (pid == 0) { p = current; } else { |
8dc86af00
|
322 |
p = find_task_by_vpid(pid); |
bac0abd61
|
323 |
if (p && !same_thread_group(p, current)) |
1da177e4c
|
324 325 326 327 328 329 |
p = NULL; } } else { if (pid == 0) { p = current->group_leader; } else { |
8dc86af00
|
330 |
p = find_task_by_vpid(pid); |
c0deae8c9
|
331 |
if (p && !has_group_leader_pid(p)) |
1da177e4c
|
332 333 334 335 336 337 338 339 340 |
p = NULL; } } new_timer->it.cpu.task = p; if (p) { get_task_struct(p); } else { ret = -EINVAL; } |
c0deae8c9
|
341 |
rcu_read_unlock(); |
1da177e4c
|
342 343 344 345 346 347 348 349 350 351 |
return ret; } /* * Clean up a CPU-clock timer that is about to be destroyed. * This is called from timer deletion with the timer already locked. * If we return TIMER_RETRY, it's necessary to release the timer's lock * and try again. (This happens when the timer is in the middle of firing.) */ |
bc2c8ea48
|
352 |
static int posix_cpu_timer_del(struct k_itimer *timer) |
1da177e4c
|
353 |
{ |
108150ea7
|
354 |
int ret = 0; |
3d7a1427e
|
355 356 357 |
unsigned long flags; struct sighand_struct *sighand; struct task_struct *p = timer->it.cpu.task; |
1da177e4c
|
358 |
|
a3222f88f
|
359 |
WARN_ON_ONCE(p == NULL); |
108150ea7
|
360 |
|
3d7a1427e
|
361 362 363 364 365 366 |
/* * Protect against sighand release/switch in exit/exec and process/ * thread timer list entry concurrent read/writes. */ sighand = lock_task_sighand(p, &flags); if (unlikely(sighand == NULL)) { |
a3222f88f
|
367 368 369 370 |
/* * We raced with the reaping of the task. * The deletion should have cleared us off the list. */ |
531f64fd6
|
371 |
WARN_ON_ONCE(!list_empty(&timer->it.cpu.entry)); |
a3222f88f
|
372 |
} else { |
a3222f88f
|
373 374 375 376 |
if (timer->it.cpu.firing) ret = TIMER_RETRY; else list_del(&timer->it.cpu.entry); |
3d7a1427e
|
377 378 |
unlock_task_sighand(p, &flags); |
1da177e4c
|
379 |
} |
a3222f88f
|
380 381 382 |
if (!ret) put_task_struct(p); |
1da177e4c
|
383 |
|
108150ea7
|
384 |
return ret; |
1da177e4c
|
385 |
} |
af82eb3c3
|
386 |
static void cleanup_timers_list(struct list_head *head) |
1a7fa510b
|
387 388 |
{ struct cpu_timer_list *timer, *next; |
a0b2062b0
|
389 |
list_for_each_entry_safe(timer, next, head, entry) |
1a7fa510b
|
390 |
list_del_init(&timer->entry); |
1a7fa510b
|
391 |
} |
1da177e4c
|
392 393 394 395 396 397 |
/* * Clean out CPU timers still ticking when a thread exited. The task * pointer is cleared, and the expiry time is replaced with the residual * time for later timer_gettime calls to return. * This must be called with the siglock held. */ |
af82eb3c3
|
398 |
static void cleanup_timers(struct list_head *head) |
1da177e4c
|
399 |
{ |
af82eb3c3
|
400 401 402 |
cleanup_timers_list(head); cleanup_timers_list(++head); cleanup_timers_list(++head); |
1da177e4c
|
403 404 405 406 407 408 409 410 411 |
} /* * These are both called with the siglock held, when the current thread * is being reaped. When the final (leader) thread in the group is reaped, * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit. */ void posix_cpu_timers_exit(struct task_struct *tsk) { |
af82eb3c3
|
412 |
cleanup_timers(tsk->cpu_timers); |
1da177e4c
|
413 414 415 |
} void posix_cpu_timers_exit_group(struct task_struct *tsk) { |
af82eb3c3
|
416 |
cleanup_timers(tsk->signal->cpu_timers); |
1da177e4c
|
417 |
} |
ebd7e7fc4
|
418 |
static inline int expires_gt(u64 expires, u64 new_exp) |
d1e3b6d19
|
419 |
{ |
648616343
|
420 |
return expires == 0 || expires > new_exp; |
d1e3b6d19
|
421 |
} |
1da177e4c
|
422 423 |
/* * Insert the timer on the appropriate list before any timers that |
e73d84e33
|
424 |
* expire later. This must be called with the sighand lock held. |
1da177e4c
|
425 |
*/ |
5eb9aa641
|
426 |
static void arm_timer(struct k_itimer *timer) |
1da177e4c
|
427 428 429 |
{ struct task_struct *p = timer->it.cpu.task; struct list_head *head, *listpos; |
ebd7e7fc4
|
430 |
struct task_cputime *cputime_expires; |
1da177e4c
|
431 432 |
struct cpu_timer_list *const nt = &timer->it.cpu; struct cpu_timer_list *next; |
1da177e4c
|
433 |
|
5eb9aa641
|
434 435 436 437 438 439 440 |
if (CPUCLOCK_PERTHREAD(timer->it_clock)) { head = p->cpu_timers; cputime_expires = &p->cputime_expires; } else { head = p->signal->cpu_timers; cputime_expires = &p->signal->cputime_expires; } |
1da177e4c
|
441 |
head += CPUCLOCK_WHICH(timer->it_clock); |
1da177e4c
|
442 |
listpos = head; |
5eb9aa641
|
443 |
list_for_each_entry(next, head, entry) { |
55ccb616a
|
444 |
if (nt->expires < next->expires) |
5eb9aa641
|
445 446 |
break; listpos = &next->entry; |
1da177e4c
|
447 448 449 450 |
} list_add(&nt->entry, listpos); if (listpos == head) { |
ebd7e7fc4
|
451 |
u64 exp = nt->expires; |
5eb9aa641
|
452 |
|
1da177e4c
|
453 |
/* |
5eb9aa641
|
454 455 456 457 |
* We are the new earliest-expiring POSIX 1.b timer, hence * need to update expiration cache. Take into account that * for process timers we share expiration cache with itimers * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME. |
1da177e4c
|
458 |
*/ |
5eb9aa641
|
459 460 |
switch (CPUCLOCK_WHICH(timer->it_clock)) { case CPUCLOCK_PROF: |
ebd7e7fc4
|
461 462 |
if (expires_gt(cputime_expires->prof_exp, exp)) cputime_expires->prof_exp = exp; |
5eb9aa641
|
463 464 |
break; case CPUCLOCK_VIRT: |
ebd7e7fc4
|
465 466 |
if (expires_gt(cputime_expires->virt_exp, exp)) cputime_expires->virt_exp = exp; |
5eb9aa641
|
467 468 |
break; case CPUCLOCK_SCHED: |
ebd7e7fc4
|
469 |
if (expires_gt(cputime_expires->sched_exp, exp)) |
55ccb616a
|
470 |
cputime_expires->sched_exp = exp; |
5eb9aa641
|
471 |
break; |
1da177e4c
|
472 |
} |
b78783000
|
473 474 475 476 |
if (CPUCLOCK_PERTHREAD(timer->it_clock)) tick_dep_set_task(p, TICK_DEP_BIT_POSIX_TIMER); else tick_dep_set_signal(p->signal, TICK_DEP_BIT_POSIX_TIMER); |
1da177e4c
|
477 |
} |
1da177e4c
|
478 479 480 481 482 483 484 |
} /* * The timer is locked, fire it and arrange for its reload. */ static void cpu_timer_fire(struct k_itimer *timer) { |
1f169f84d
|
485 486 487 488 |
if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) { /* * User don't want any signal. */ |
55ccb616a
|
489 |
timer->it.cpu.expires = 0; |
1f169f84d
|
490 |
} else if (unlikely(timer->sigq == NULL)) { |
1da177e4c
|
491 492 493 494 495 |
/* * This a special case for clock_nanosleep, * not a normal timer from sys_timer_create. */ wake_up_process(timer->it_process); |
55ccb616a
|
496 497 |
timer->it.cpu.expires = 0; } else if (timer->it.cpu.incr == 0) { |
1da177e4c
|
498 499 500 501 |
/* * One-shot timer. Clear it as soon as it's fired. */ posix_timer_event(timer, 0); |
55ccb616a
|
502 |
timer->it.cpu.expires = 0; |
1da177e4c
|
503 504 505 506 507 508 509 |
} else if (posix_timer_event(timer, ++timer->it_requeue_pending)) { /* * The signal did not get queued because the signal * was ignored, so we won't get any callback to * reload the timer. But we need to keep it * ticking in case the signal is deliverable next time. */ |
f37fb0aa4
|
510 |
posix_cpu_timer_rearm(timer); |
af888d677
|
511 |
++timer->it_requeue_pending; |
1da177e4c
|
512 513 514 515 |
} } /* |
3997ad317
|
516 |
* Sample a process (thread group) timer for the given group_leader task. |
e73d84e33
|
517 518 |
* Must be called with task sighand lock held for safe while_each_thread() * traversal. |
3997ad317
|
519 520 |
*/ static int cpu_timer_sample_group(const clockid_t which_clock, |
ebd7e7fc4
|
521 |
struct task_struct *p, u64 *sample) |
3997ad317
|
522 |
{ |
ebd7e7fc4
|
523 |
struct task_cputime cputime; |
3997ad317
|
524 525 526 527 528 529 |
thread_group_cputimer(p, &cputime); switch (CPUCLOCK_WHICH(which_clock)) { default: return -EINVAL; case CPUCLOCK_PROF: |
ebd7e7fc4
|
530 |
*sample = cputime.utime + cputime.stime; |
3997ad317
|
531 532 |
break; case CPUCLOCK_VIRT: |
ebd7e7fc4
|
533 |
*sample = cputime.utime; |
3997ad317
|
534 535 |
break; case CPUCLOCK_SCHED: |
23cfa361f
|
536 |
*sample = cputime.sum_exec_runtime; |
3997ad317
|
537 538 539 540 541 542 |
break; } return 0; } /* |
1da177e4c
|
543 544 545 546 547 |
* Guts of sys_timer_settime for CPU timers. * This is called with the timer locked and interrupts disabled. * If we return TIMER_RETRY, it's necessary to release the timer's lock * and try again. (This happens when the timer is in the middle of firing.) */ |
e73d84e33
|
548 |
static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags, |
5f252b325
|
549 |
struct itimerspec64 *new, struct itimerspec64 *old) |
1da177e4c
|
550 |
{ |
e73d84e33
|
551 552 |
unsigned long flags; struct sighand_struct *sighand; |
1da177e4c
|
553 |
struct task_struct *p = timer->it.cpu.task; |
ebd7e7fc4
|
554 |
u64 old_expires, new_expires, old_incr, val; |
1da177e4c
|
555 |
int ret; |
a3222f88f
|
556 |
WARN_ON_ONCE(p == NULL); |
1da177e4c
|
557 |
|
098b0e01a
|
558 559 560 561 562 |
/* * Use the to_ktime conversion because that clamps the maximum * value to KTIME_MAX and avoid multiplication overflows. */ new_expires = ktime_to_ns(timespec64_to_ktime(new->it_value)); |
1da177e4c
|
563 |
|
1da177e4c
|
564 |
/* |
e73d84e33
|
565 566 567 568 569 570 |
* Protect against sighand release/switch in exit/exec and p->cpu_timers * and p->signal->cpu_timers read/write in arm_timer() */ sighand = lock_task_sighand(p, &flags); /* * If p has just been reaped, we can no |
1da177e4c
|
571 572 |
* longer get any information about it at all. */ |
e73d84e33
|
573 |
if (unlikely(sighand == NULL)) { |
1da177e4c
|
574 575 576 577 578 579 |
return -ESRCH; } /* * Disarm any old timer after extracting its expiry time. */ |
a69ac4a78
|
580 581 |
ret = 0; |
ae1a78eec
|
582 |
old_incr = timer->it.cpu.incr; |
1da177e4c
|
583 |
old_expires = timer->it.cpu.expires; |
a69ac4a78
|
584 585 586 587 588 |
if (unlikely(timer->it.cpu.firing)) { timer->it.cpu.firing = -1; ret = TIMER_RETRY; } else list_del_init(&timer->it.cpu.entry); |
1da177e4c
|
589 590 591 592 593 594 595 596 597 598 599 600 |
/* * We need to sample the current value to convert the new * value from to relative and absolute, and to convert the * old value from absolute to relative. To set a process * timer, we need a sample to balance the thread expiry * times (in arm_timer). With an absolute time, we must * check if it's already passed. In short, we need a sample. */ if (CPUCLOCK_PERTHREAD(timer->it_clock)) { cpu_clock_sample(timer->it_clock, p, &val); } else { |
3997ad317
|
601 |
cpu_timer_sample_group(timer->it_clock, p, &val); |
1da177e4c
|
602 603 604 |
} if (old) { |
55ccb616a
|
605 |
if (old_expires == 0) { |
1da177e4c
|
606 607 608 609 610 611 612 613 614 615 616 617 618 619 |
old->it_value.tv_sec = 0; old->it_value.tv_nsec = 0; } else { /* * Update the timer in case it has * overrun already. If it has, * we'll report it as having overrun * and with the next reloaded timer * already ticking, though we are * swallowing that pending * notification here to install the * new setting. */ bump_cpu_timer(timer, val); |
55ccb616a
|
620 621 |
if (val < timer->it.cpu.expires) { old_expires = timer->it.cpu.expires - val; |
5f252b325
|
622 |
old->it_value = ns_to_timespec64(old_expires); |
1da177e4c
|
623 624 625 626 627 628 |
} else { old->it_value.tv_nsec = 1; old->it_value.tv_sec = 0; } } } |
a69ac4a78
|
629 |
if (unlikely(ret)) { |
1da177e4c
|
630 631 632 633 634 635 |
/* * We are colliding with the timer actually firing. * Punt after filling in the timer's old value, and * disable this firing since we are already reporting * it as an overrun (thanks to bump_cpu_timer above). */ |
e73d84e33
|
636 |
unlock_task_sighand(p, &flags); |
1da177e4c
|
637 638 |
goto out; } |
e73d84e33
|
639 |
if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME)) { |
55ccb616a
|
640 |
new_expires += val; |
1da177e4c
|
641 642 643 644 645 646 647 648 |
} /* * Install the new expiry time (or zero). * For a timer with no notification action, we don't actually * arm the timer (we'll just fake it for timer_gettime). */ timer->it.cpu.expires = new_expires; |
55ccb616a
|
649 |
if (new_expires != 0 && val < new_expires) { |
5eb9aa641
|
650 |
arm_timer(timer); |
1da177e4c
|
651 |
} |
e73d84e33
|
652 |
unlock_task_sighand(p, &flags); |
1da177e4c
|
653 654 655 656 |
/* * Install the new reload setting, and * set up the signal and overrun bookkeeping. */ |
5f252b325
|
657 |
timer->it.cpu.incr = timespec64_to_ns(&new->it_interval); |
21c0d1621
|
658 |
timer->it_interval = ns_to_ktime(timer->it.cpu.incr); |
1da177e4c
|
659 660 661 662 663 664 665 666 667 668 |
/* * This acts as a modification timestamp for the timer, * so any automatic reload attempt will punt on seeing * that we have reset the timer manually. */ timer->it_requeue_pending = (timer->it_requeue_pending + 2) & ~REQUEUE_PENDING; timer->it_overrun_last = 0; timer->it_overrun = -1; |
55ccb616a
|
669 |
if (new_expires != 0 && !(val < new_expires)) { |
1da177e4c
|
670 671 672 673 674 675 676 677 678 679 |
/* * The designated time already passed, so we notify * immediately, even if the thread never runs to * accumulate more time on this clock. */ cpu_timer_fire(timer); } ret = 0; out: |
ebd7e7fc4
|
680 |
if (old) |
5f252b325
|
681 |
old->it_interval = ns_to_timespec64(old_incr); |
b78783000
|
682 |
|
1da177e4c
|
683 684 |
return ret; } |
5f252b325
|
685 |
static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp) |
1da177e4c
|
686 |
{ |
ebd7e7fc4
|
687 |
u64 now; |
1da177e4c
|
688 |
struct task_struct *p = timer->it.cpu.task; |
1da177e4c
|
689 |
|
a3222f88f
|
690 |
WARN_ON_ONCE(p == NULL); |
1da177e4c
|
691 692 693 |
/* * Easy part: convert the reload time. */ |
5f252b325
|
694 |
itp->it_interval = ns_to_timespec64(timer->it.cpu.incr); |
1da177e4c
|
695 |
|
eabdec043
|
696 |
if (!timer->it.cpu.expires) |
1da177e4c
|
697 |
return; |
1da177e4c
|
698 |
|
1da177e4c
|
699 700 701 702 703 |
/* * Sample the clock to take the difference with the expiry time. */ if (CPUCLOCK_PERTHREAD(timer->it_clock)) { cpu_clock_sample(timer->it_clock, p, &now); |
1da177e4c
|
704 |
} else { |
e73d84e33
|
705 706 707 708 709 710 |
struct sighand_struct *sighand; unsigned long flags; /* * Protect against sighand release/switch in exit/exec and * also make timer sampling safe if it ends up calling |
ebd7e7fc4
|
711 |
* thread_group_cputime(). |
e73d84e33
|
712 713 714 |
*/ sighand = lock_task_sighand(p, &flags); if (unlikely(sighand == NULL)) { |
1da177e4c
|
715 716 717 718 719 |
/* * The process has been reaped. * We can't even collect a sample any more. * Call the timer disarmed, nothing else to do. */ |
55ccb616a
|
720 |
timer->it.cpu.expires = 0; |
2c13ce8f6
|
721 |
return; |
1da177e4c
|
722 |
} else { |
3997ad317
|
723 |
cpu_timer_sample_group(timer->it_clock, p, &now); |
e73d84e33
|
724 |
unlock_task_sighand(p, &flags); |
1da177e4c
|
725 |
} |
1da177e4c
|
726 |
} |
55ccb616a
|
727 |
if (now < timer->it.cpu.expires) { |
5f252b325
|
728 |
itp->it_value = ns_to_timespec64(timer->it.cpu.expires - now); |
1da177e4c
|
729 730 731 732 733 734 735 736 737 |
} else { /* * The timer should have expired already, but the firing * hasn't taken place yet. Say it's just about to expire. */ itp->it_value.tv_nsec = 1; itp->it_value.tv_sec = 0; } } |
2473f3e7a
|
738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 |
static unsigned long long check_timers_list(struct list_head *timers, struct list_head *firing, unsigned long long curr) { int maxfire = 20; while (!list_empty(timers)) { struct cpu_timer_list *t; t = list_first_entry(timers, struct cpu_timer_list, entry); if (!--maxfire || curr < t->expires) return t->expires; t->firing = 1; list_move_tail(&t->entry, firing); } return 0; } |
34be39305
|
759 760 761 762 763 764 765 |
static inline void check_dl_overrun(struct task_struct *tsk) { if (tsk->dl.dl_overrun) { tsk->dl.dl_overrun = 0; __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); } } |
1da177e4c
|
766 767 768 769 770 771 772 773 774 |
/* * Check for any per-thread CPU timers that have fired and move them off * the tsk->cpu_timers[N] list onto the firing list. Here we update the * tsk->it_*_expires values to reflect the remaining thread CPU timers. */ static void check_thread_timers(struct task_struct *tsk, struct list_head *firing) { struct list_head *timers = tsk->cpu_timers; |
ebd7e7fc4
|
775 776 |
struct task_cputime *tsk_expires = &tsk->cputime_expires; u64 expires; |
d4bb52743
|
777 |
unsigned long soft; |
1da177e4c
|
778 |
|
34be39305
|
779 780 |
if (dl_task(tsk)) check_dl_overrun(tsk); |
934715a19
|
781 782 783 784 785 786 |
/* * If cputime_expires is zero, then there are no active * per thread CPU timers. */ if (task_cputime_zero(&tsk->cputime_expires)) return; |
2473f3e7a
|
787 |
expires = check_timers_list(timers, firing, prof_ticks(tsk)); |
ebd7e7fc4
|
788 |
tsk_expires->prof_exp = expires; |
1da177e4c
|
789 |
|
2473f3e7a
|
790 |
expires = check_timers_list(++timers, firing, virt_ticks(tsk)); |
ebd7e7fc4
|
791 |
tsk_expires->virt_exp = expires; |
1da177e4c
|
792 |
|
2473f3e7a
|
793 794 |
tsk_expires->sched_exp = check_timers_list(++timers, firing, tsk->se.sum_exec_runtime); |
78f2c7db6
|
795 796 797 798 |
/* * Check for the special case thread timers. */ |
3cf294962
|
799 |
soft = task_rlimit(tsk, RLIMIT_RTTIME); |
d4bb52743
|
800 |
if (soft != RLIM_INFINITY) { |
3cf294962
|
801 |
unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME); |
78f2c7db6
|
802 |
|
5a52dd500
|
803 804 |
if (hard != RLIM_INFINITY && tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) { |
78f2c7db6
|
805 806 807 808 |
/* * At the hard limit, we just die. * No need to calculate anything else now. */ |
43fe8b8eb
|
809 810 811 812 813 |
if (print_fatal_signals) { pr_info("CPU Watchdog Timeout (hard): %s[%d] ", tsk->comm, task_pid_nr(tsk)); } |
78f2c7db6
|
814 815 816 |
__group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); return; } |
d4bb52743
|
817 |
if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) { |
78f2c7db6
|
818 819 820 |
/* * At the soft limit, send a SIGXCPU every second. */ |
d4bb52743
|
821 822 |
if (soft < hard) { soft += USEC_PER_SEC; |
3cf294962
|
823 824 |
tsk->signal->rlim[RLIMIT_RTTIME].rlim_cur = soft; |
78f2c7db6
|
825 |
} |
43fe8b8eb
|
826 827 828 829 830 |
if (print_fatal_signals) { pr_info("RT Watchdog Timeout (soft): %s[%d] ", tsk->comm, task_pid_nr(tsk)); } |
78f2c7db6
|
831 832 833 |
__group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); } } |
b78783000
|
834 835 |
if (task_cputime_zero(tsk_expires)) tick_dep_clear_task(tsk, TICK_DEP_BIT_POSIX_TIMER); |
1da177e4c
|
836 |
} |
1018016c7
|
837 |
static inline void stop_process_timers(struct signal_struct *sig) |
3fccfd67d
|
838 |
{ |
15365c108
|
839 |
struct thread_group_cputimer *cputimer = &sig->cputimer; |
3fccfd67d
|
840 |
|
1018016c7
|
841 |
/* Turn off cputimer->running. This is done without locking. */ |
d5c373eb5
|
842 |
WRITE_ONCE(cputimer->running, false); |
b78783000
|
843 |
tick_dep_clear_signal(sig, TICK_DEP_BIT_POSIX_TIMER); |
3fccfd67d
|
844 |
} |
42c4ab41a
|
845 |
static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, |
ebd7e7fc4
|
846 |
u64 *expires, u64 cur_time, int signo) |
42c4ab41a
|
847 |
{ |
648616343
|
848 |
if (!it->expires) |
42c4ab41a
|
849 |
return; |
858cf3a8c
|
850 851 |
if (cur_time >= it->expires) { if (it->incr) |
648616343
|
852 |
it->expires += it->incr; |
858cf3a8c
|
853 |
else |
648616343
|
854 |
it->expires = 0; |
42c4ab41a
|
855 |
|
3f0a525eb
|
856 857 |
trace_itimer_expire(signo == SIGPROF ? ITIMER_PROF : ITIMER_VIRTUAL, |
6883f81aa
|
858 |
task_tgid(tsk), cur_time); |
42c4ab41a
|
859 860 |
__group_send_sig_info(signo, SEND_SIG_PRIV, tsk); } |
858cf3a8c
|
861 862 |
if (it->expires && (!*expires || it->expires < *expires)) *expires = it->expires; |
42c4ab41a
|
863 |
} |
1da177e4c
|
864 865 866 867 868 869 870 871 872 |
/* * Check for any per-thread CPU timers that have fired and move them * off the tsk->*_timers list onto the firing list. Per-thread timers * have already been taken off. */ static void check_process_timers(struct task_struct *tsk, struct list_head *firing) { struct signal_struct *const sig = tsk->signal; |
ebd7e7fc4
|
873 874 |
u64 utime, ptime, virt_expires, prof_expires; u64 sum_sched_runtime, sched_expires; |
1da177e4c
|
875 |
struct list_head *timers = sig->cpu_timers; |
ebd7e7fc4
|
876 |
struct task_cputime cputime; |
d4bb52743
|
877 |
unsigned long soft; |
1da177e4c
|
878 |
|
34be39305
|
879 880 |
if (dl_task(tsk)) check_dl_overrun(tsk); |
1da177e4c
|
881 |
/* |
934715a19
|
882 883 884 885 886 |
* If cputimer is not running, then there are no active * process wide timers (POSIX 1.b, itimers, RLIMIT_CPU). */ if (!READ_ONCE(tsk->signal->cputimer.running)) return; |
c8d75aa47
|
887 888 889 890 891 |
/* * Signify that a thread is checking for process timers. * Write access to this field is protected by the sighand lock. */ sig->cputimer.checking_timer = true; |
934715a19
|
892 |
/* |
1da177e4c
|
893 894 |
* Collect the current process totals. */ |
4cd4c1b40
|
895 |
thread_group_cputimer(tsk, &cputime); |
ebd7e7fc4
|
896 897 |
utime = cputime.utime; ptime = utime + cputime.stime; |
f06febc96
|
898 |
sum_sched_runtime = cputime.sum_exec_runtime; |
1da177e4c
|
899 |
|
2473f3e7a
|
900 901 902 |
prof_expires = check_timers_list(timers, firing, ptime); virt_expires = check_timers_list(++timers, firing, utime); sched_expires = check_timers_list(++timers, firing, sum_sched_runtime); |
1da177e4c
|
903 904 905 906 |
/* * Check for the special case process timers. */ |
42c4ab41a
|
907 908 909 910 |
check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], &prof_expires, ptime, SIGPROF); check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime, SIGVTALRM); |
3cf294962
|
911 |
soft = task_rlimit(tsk, RLIMIT_CPU); |
d4bb52743
|
912 |
if (soft != RLIM_INFINITY) { |
ebd7e7fc4
|
913 |
unsigned long psecs = div_u64(ptime, NSEC_PER_SEC); |
3cf294962
|
914 |
unsigned long hard = task_rlimit_max(tsk, RLIMIT_CPU); |
ebd7e7fc4
|
915 |
u64 x; |
d4bb52743
|
916 |
if (psecs >= hard) { |
1da177e4c
|
917 918 919 920 |
/* * At the hard limit, we just die. * No need to calculate anything else now. */ |
43fe8b8eb
|
921 922 923 924 925 |
if (print_fatal_signals) { pr_info("RT Watchdog Timeout (hard): %s[%d] ", tsk->comm, task_pid_nr(tsk)); } |
1da177e4c
|
926 927 928 |
__group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); return; } |
d4bb52743
|
929 |
if (psecs >= soft) { |
1da177e4c
|
930 931 932 |
/* * At the soft limit, send a SIGXCPU every second. */ |
43fe8b8eb
|
933 934 935 936 937 |
if (print_fatal_signals) { pr_info("CPU Watchdog Timeout (soft): %s[%d] ", tsk->comm, task_pid_nr(tsk)); } |
1da177e4c
|
938 |
__group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); |
d4bb52743
|
939 940 941 |
if (soft < hard) { soft++; sig->rlim[RLIMIT_CPU].rlim_cur = soft; |
1da177e4c
|
942 943 |
} } |
ebd7e7fc4
|
944 945 |
x = soft * NSEC_PER_SEC; if (!prof_expires || x < prof_expires) |
1da177e4c
|
946 |
prof_expires = x; |
1da177e4c
|
947 |
} |
ebd7e7fc4
|
948 949 |
sig->cputime_expires.prof_exp = prof_expires; sig->cputime_expires.virt_exp = virt_expires; |
29f87b793
|
950 951 952 |
sig->cputime_expires.sched_exp = sched_expires; if (task_cputime_zero(&sig->cputime_expires)) stop_process_timers(sig); |
c8d75aa47
|
953 954 |
sig->cputimer.checking_timer = false; |
1da177e4c
|
955 956 957 |
} /* |
96fe3b072
|
958 |
* This is called from the signal code (via posixtimer_rearm) |
1da177e4c
|
959 960 |
* when the last timer signal was delivered and we have to reload the timer. */ |
f37fb0aa4
|
961 |
static void posix_cpu_timer_rearm(struct k_itimer *timer) |
1da177e4c
|
962 |
{ |
e73d84e33
|
963 964 |
struct sighand_struct *sighand; unsigned long flags; |
1da177e4c
|
965 |
struct task_struct *p = timer->it.cpu.task; |
ebd7e7fc4
|
966 |
u64 now; |
1da177e4c
|
967 |
|
a3222f88f
|
968 |
WARN_ON_ONCE(p == NULL); |
1da177e4c
|
969 970 971 972 973 974 975 |
/* * Fetch the current sample and update the timer's expiry time. */ if (CPUCLOCK_PERTHREAD(timer->it_clock)) { cpu_clock_sample(timer->it_clock, p, &now); bump_cpu_timer(timer, now); |
724a37139
|
976 |
if (unlikely(p->exit_state)) |
af888d677
|
977 |
return; |
724a37139
|
978 |
|
e73d84e33
|
979 980 981 |
/* Protect timer list r/w in arm_timer() */ sighand = lock_task_sighand(p, &flags); if (!sighand) |
af888d677
|
982 |
return; |
1da177e4c
|
983 |
} else { |
e73d84e33
|
984 985 |
/* * Protect arm_timer() and timer sampling in case of call to |
ebd7e7fc4
|
986 |
* thread_group_cputime(). |
e73d84e33
|
987 988 989 |
*/ sighand = lock_task_sighand(p, &flags); if (unlikely(sighand == NULL)) { |
1da177e4c
|
990 991 992 993 |
/* * The process has been reaped. * We can't even collect a sample any more. */ |
55ccb616a
|
994 |
timer->it.cpu.expires = 0; |
af888d677
|
995 |
return; |
1da177e4c
|
996 |
} else if (unlikely(p->exit_state) && thread_group_empty(p)) { |
af888d677
|
997 998 |
/* If the process is dying, no need to rearm */ goto unlock; |
1da177e4c
|
999 |
} |
3997ad317
|
1000 |
cpu_timer_sample_group(timer->it_clock, p, &now); |
1da177e4c
|
1001 |
bump_cpu_timer(timer, now); |
e73d84e33
|
1002 |
/* Leave the sighand locked for the call below. */ |
1da177e4c
|
1003 1004 1005 1006 1007 |
} /* * Now re-arm for the new expiry time. */ |
5eb9aa641
|
1008 |
arm_timer(timer); |
af888d677
|
1009 |
unlock: |
e73d84e33
|
1010 |
unlock_task_sighand(p, &flags); |
1da177e4c
|
1011 |
} |
f06febc96
|
1012 |
/** |
f06febc96
|
1013 1014 1015 1016 1017 1018 1019 1020 1021 |
* task_cputime_expired - Compare two task_cputime entities. * * @sample: The task_cputime structure to be checked for expiration. * @expires: Expiration times, against which @sample will be checked. * * Checks @sample against @expires to see if any field of @sample has expired. * Returns true if any field of the former is greater than the corresponding * field of the latter if the latter field is set. Otherwise returns false. */ |
ebd7e7fc4
|
1022 1023 |
static inline int task_cputime_expired(const struct task_cputime *sample, const struct task_cputime *expires) |
f06febc96
|
1024 |
{ |
648616343
|
1025 |
if (expires->utime && sample->utime >= expires->utime) |
f06febc96
|
1026 |
return 1; |
648616343
|
1027 |
if (expires->stime && sample->utime + sample->stime >= expires->stime) |
f06febc96
|
1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 |
return 1; if (expires->sum_exec_runtime != 0 && sample->sum_exec_runtime >= expires->sum_exec_runtime) return 1; return 0; } /** * fastpath_timer_check - POSIX CPU timers fast path. * * @tsk: The task (thread) being checked. |
f06febc96
|
1039 |
* |
bb34d92f6
|
1040 1041 1042 1043 |
* Check the task and thread group timers. If both are zero (there are no * timers set) return false. Otherwise snapshot the task and thread group * timers and compare them with the corresponding expiration times. Return * true if a timer has expired, else return false. |
f06febc96
|
1044 |
*/ |
bb34d92f6
|
1045 |
static inline int fastpath_timer_check(struct task_struct *tsk) |
f06febc96
|
1046 |
{ |
ad133ba3d
|
1047 |
struct signal_struct *sig; |
bb34d92f6
|
1048 |
|
bb34d92f6
|
1049 |
if (!task_cputime_zero(&tsk->cputime_expires)) { |
ebd7e7fc4
|
1050 |
struct task_cputime task_sample; |
bb34d92f6
|
1051 |
|
ebd7e7fc4
|
1052 |
task_cputime(tsk, &task_sample.utime, &task_sample.stime); |
7c177d994
|
1053 |
task_sample.sum_exec_runtime = tsk->se.sum_exec_runtime; |
bb34d92f6
|
1054 1055 1056 |
if (task_cputime_expired(&task_sample, &tsk->cputime_expires)) return 1; } |
ad133ba3d
|
1057 1058 |
sig = tsk->signal; |
c8d75aa47
|
1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 |
/* * Check if thread group timers expired when the cputimer is * running and no other thread in the group is already checking * for thread group cputimers. These fields are read without the * sighand lock. However, this is fine because this is meant to * be a fastpath heuristic to determine whether we should try to * acquire the sighand lock to check/handle timers. * * In the worst case scenario, if 'running' or 'checking_timer' gets * set but the current thread doesn't see the change yet, we'll wait * until the next thread in the group gets a scheduler interrupt to * handle the timer. This isn't an issue in practice because these * types of delays with signals actually getting sent are expected. */ if (READ_ONCE(sig->cputimer.running) && !READ_ONCE(sig->cputimer.checking_timer)) { |
ebd7e7fc4
|
1075 |
struct task_cputime group_sample; |
bb34d92f6
|
1076 |
|
711074451
|
1077 |
sample_cputime_atomic(&group_sample, &sig->cputimer.cputime_atomic); |
8d1f431cb
|
1078 |
|
bb34d92f6
|
1079 1080 1081 |
if (task_cputime_expired(&group_sample, &sig->cputime_expires)) return 1; } |
37bebc70d
|
1082 |
|
34be39305
|
1083 1084 |
if (dl_task(tsk) && tsk->dl.dl_overrun) return 1; |
f55db6090
|
1085 |
return 0; |
f06febc96
|
1086 |
} |
1da177e4c
|
1087 1088 1089 1090 1091 1092 1093 1094 1095 |
/* * This is called from the timer interrupt handler. The irq handler has * already updated our counts. We need to check if any timers fire now. * Interrupts are disabled. */ void run_posix_cpu_timers(struct task_struct *tsk) { LIST_HEAD(firing); struct k_itimer *timer, *next; |
0bdd2ed41
|
1096 |
unsigned long flags; |
1da177e4c
|
1097 |
|
a69682200
|
1098 |
lockdep_assert_irqs_disabled(); |
1da177e4c
|
1099 |
|
1da177e4c
|
1100 |
/* |
f06febc96
|
1101 |
* The fast path checks that there are no expired thread or thread |
bb34d92f6
|
1102 |
* group timers. If that's so, just return. |
1da177e4c
|
1103 |
*/ |
bb34d92f6
|
1104 |
if (!fastpath_timer_check(tsk)) |
f06febc96
|
1105 |
return; |
5ce73a4a5
|
1106 |
|
0bdd2ed41
|
1107 1108 |
if (!lock_task_sighand(tsk, &flags)) return; |
bb34d92f6
|
1109 1110 1111 1112 1113 1114 |
/* * Here we take off tsk->signal->cpu_timers[N] and * tsk->cpu_timers[N] all the timers that are firing, and * put them on the firing list. */ check_thread_timers(tsk, &firing); |
934715a19
|
1115 1116 |
check_process_timers(tsk, &firing); |
1da177e4c
|
1117 |
|
bb34d92f6
|
1118 1119 1120 1121 1122 1123 1124 1125 |
/* * We must release these locks before taking any timer's lock. * There is a potential race with timer deletion here, as the * siglock now protects our private firing list. We have set * the firing flag in each timer, so that a deletion attempt * that gets the timer lock before we do will give it up and * spin until we've taken care of that timer below. */ |
0bdd2ed41
|
1126 |
unlock_task_sighand(tsk, &flags); |
1da177e4c
|
1127 1128 1129 |
/* * Now that all the timers on our list have the firing flag, |
25985edce
|
1130 |
* no one will touch their list entries but us. We'll take |
1da177e4c
|
1131 1132 1133 1134 |
* each timer's lock before clearing its firing flag, so no * timer call will interfere. */ list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) { |
6e85c5ba7
|
1135 |
int cpu_firing; |
1da177e4c
|
1136 1137 |
spin_lock(&timer->it_lock); list_del_init(&timer->it.cpu.entry); |
6e85c5ba7
|
1138 |
cpu_firing = timer->it.cpu.firing; |
1da177e4c
|
1139 1140 1141 1142 1143 1144 |
timer->it.cpu.firing = 0; /* * The firing flag is -1 if we collided with a reset * of the timer, which already reported this * almost-firing as an overrun. So don't generate an event. */ |
6e85c5ba7
|
1145 |
if (likely(cpu_firing >= 0)) |
1da177e4c
|
1146 |
cpu_timer_fire(timer); |
1da177e4c
|
1147 1148 1149 1150 1151 |
spin_unlock(&timer->it_lock); } } /* |
f55db6090
|
1152 |
* Set one of the process-wide special case CPU timers or RLIMIT_CPU. |
f06febc96
|
1153 |
* The tsk->sighand->siglock must be held by the caller. |
1da177e4c
|
1154 1155 |
*/ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, |
858cf3a8c
|
1156 |
u64 *newval, u64 *oldval) |
1da177e4c
|
1157 |
{ |
858cf3a8c
|
1158 |
u64 now; |
c3bca5d45
|
1159 |
int ret; |
1da177e4c
|
1160 |
|
531f64fd6
|
1161 |
WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED); |
c3bca5d45
|
1162 |
ret = cpu_timer_sample_group(clock_idx, tsk, &now); |
1da177e4c
|
1163 |
|
c3bca5d45
|
1164 |
if (oldval && ret != -EINVAL) { |
f55db6090
|
1165 1166 1167 1168 1169 |
/* * We are setting itimer. The *oldval is absolute and we update * it to be relative, *newval argument is relative and we update * it to be absolute. */ |
648616343
|
1170 |
if (*oldval) { |
858cf3a8c
|
1171 |
if (*oldval <= now) { |
1da177e4c
|
1172 |
/* Just about to fire. */ |
858cf3a8c
|
1173 |
*oldval = TICK_NSEC; |
1da177e4c
|
1174 |
} else { |
858cf3a8c
|
1175 |
*oldval -= now; |
1da177e4c
|
1176 1177 |
} } |
648616343
|
1178 |
if (!*newval) |
b78783000
|
1179 |
return; |
858cf3a8c
|
1180 |
*newval += now; |
1da177e4c
|
1181 1182 1183 |
} /* |
f55db6090
|
1184 1185 |
* Update expiration cache if we are the earliest timer, or eventually * RLIMIT_CPU limit is earlier than prof_exp cpu timer expire. |
1da177e4c
|
1186 |
*/ |
f55db6090
|
1187 1188 |
switch (clock_idx) { case CPUCLOCK_PROF: |
858cf3a8c
|
1189 1190 |
if (expires_gt(tsk->signal->cputime_expires.prof_exp, *newval)) tsk->signal->cputime_expires.prof_exp = *newval; |
f55db6090
|
1191 1192 |
break; case CPUCLOCK_VIRT: |
858cf3a8c
|
1193 1194 |
if (expires_gt(tsk->signal->cputime_expires.virt_exp, *newval)) tsk->signal->cputime_expires.virt_exp = *newval; |
f55db6090
|
1195 |
break; |
1da177e4c
|
1196 |
} |
b78783000
|
1197 1198 |
tick_dep_set_signal(tsk->signal, TICK_DEP_BIT_POSIX_TIMER); |
1da177e4c
|
1199 |
} |
e4b765551
|
1200 |
static int do_cpu_nanosleep(const clockid_t which_clock, int flags, |
343d8fc20
|
1201 |
const struct timespec64 *rqtp) |
1da177e4c
|
1202 |
{ |
86a9c446c
|
1203 |
struct itimerspec64 it; |
343d8fc20
|
1204 1205 |
struct k_itimer timer; u64 expires; |
1da177e4c
|
1206 1207 1208 |
int error; /* |
1da177e4c
|
1209 1210 1211 1212 1213 1214 1215 1216 1217 |
* Set up a temporary timer and then wait for it to go off. */ memset(&timer, 0, sizeof timer); spin_lock_init(&timer.it_lock); timer.it_clock = which_clock; timer.it_overrun = -1; error = posix_cpu_timer_create(&timer); timer.it_process = current; if (!error) { |
5f252b325
|
1218 |
static struct itimerspec64 zero_it; |
edbeda463
|
1219 |
struct restart_block *restart; |
e4b765551
|
1220 |
|
edbeda463
|
1221 |
memset(&it, 0, sizeof(it)); |
86a9c446c
|
1222 |
it.it_value = *rqtp; |
1da177e4c
|
1223 1224 |
spin_lock_irq(&timer.it_lock); |
86a9c446c
|
1225 |
error = posix_cpu_timer_set(&timer, flags, &it, NULL); |
1da177e4c
|
1226 1227 1228 1229 1230 1231 |
if (error) { spin_unlock_irq(&timer.it_lock); return error; } while (!signal_pending(current)) { |
55ccb616a
|
1232 |
if (timer.it.cpu.expires == 0) { |
1da177e4c
|
1233 |
/* |
e6c42c295
|
1234 1235 |
* Our timer fired and was reset, below * deletion can not fail. |
1da177e4c
|
1236 |
*/ |
e6c42c295
|
1237 |
posix_cpu_timer_del(&timer); |
1da177e4c
|
1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 |
spin_unlock_irq(&timer.it_lock); return 0; } /* * Block until cpu_timer_fire (or a signal) wakes us. */ __set_current_state(TASK_INTERRUPTIBLE); spin_unlock_irq(&timer.it_lock); schedule(); spin_lock_irq(&timer.it_lock); } /* * We were interrupted by a signal. */ |
343d8fc20
|
1254 |
expires = timer.it.cpu.expires; |
86a9c446c
|
1255 |
error = posix_cpu_timer_set(&timer, 0, &zero_it, &it); |
e6c42c295
|
1256 1257 1258 1259 1260 1261 |
if (!error) { /* * Timer is now unarmed, deletion can not fail. */ posix_cpu_timer_del(&timer); } |
1da177e4c
|
1262 |
spin_unlock_irq(&timer.it_lock); |
e6c42c295
|
1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 |
while (error == TIMER_RETRY) { /* * We need to handle case when timer was or is in the * middle of firing. In other cases we already freed * resources. */ spin_lock_irq(&timer.it_lock); error = posix_cpu_timer_del(&timer); spin_unlock_irq(&timer.it_lock); } |
86a9c446c
|
1273 |
if ((it.it_value.tv_sec | it.it_value.tv_nsec) == 0) { |
1da177e4c
|
1274 1275 1276 1277 1278 |
/* * It actually did fire already. */ return 0; } |
e4b765551
|
1279 |
error = -ERESTART_RESTARTBLOCK; |
86a9c446c
|
1280 1281 1282 |
/* * Report back to the user the time still remaining. */ |
edbeda463
|
1283 |
restart = ¤t->restart_block; |
343d8fc20
|
1284 |
restart->nanosleep.expires = expires; |
c0edd7c9a
|
1285 1286 |
if (restart->nanosleep.type != TT_NONE) error = nanosleep_copyout(restart, &it.it_value); |
e4b765551
|
1287 1288 1289 1290 |
} return error; } |
bc2c8ea48
|
1291 1292 1293 |
static long posix_cpu_nsleep_restart(struct restart_block *restart_block); static int posix_cpu_nsleep(const clockid_t which_clock, int flags, |
938e7cf2d
|
1294 |
const struct timespec64 *rqtp) |
e4b765551
|
1295 |
{ |
f56141e3e
|
1296 |
struct restart_block *restart_block = ¤t->restart_block; |
e4b765551
|
1297 1298 1299 1300 1301 1302 1303 |
int error; /* * Diagnose required errors first. */ if (CPUCLOCK_PERTHREAD(which_clock) && (CPUCLOCK_PID(which_clock) == 0 || |
01a219748
|
1304 |
CPUCLOCK_PID(which_clock) == task_pid_vnr(current))) |
e4b765551
|
1305 |
return -EINVAL; |
86a9c446c
|
1306 |
error = do_cpu_nanosleep(which_clock, flags, rqtp); |
e4b765551
|
1307 1308 |
if (error == -ERESTART_RESTARTBLOCK) { |
3751f9f29
|
1309 |
if (flags & TIMER_ABSTIME) |
e4b765551
|
1310 |
return -ERESTARTNOHAND; |
1da177e4c
|
1311 |
|
1711ef386
|
1312 |
restart_block->fn = posix_cpu_nsleep_restart; |
ab8177bc5
|
1313 |
restart_block->nanosleep.clockid = which_clock; |
1da177e4c
|
1314 |
} |
1da177e4c
|
1315 1316 |
return error; } |
bc2c8ea48
|
1317 |
static long posix_cpu_nsleep_restart(struct restart_block *restart_block) |
1da177e4c
|
1318 |
{ |
ab8177bc5
|
1319 |
clockid_t which_clock = restart_block->nanosleep.clockid; |
ad1963846
|
1320 |
struct timespec64 t; |
97735f25d
|
1321 |
|
ad1963846
|
1322 |
t = ns_to_timespec64(restart_block->nanosleep.expires); |
97735f25d
|
1323 |
|
86a9c446c
|
1324 |
return do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t); |
1da177e4c
|
1325 |
} |
29f1b2b0f
|
1326 1327 |
#define PROCESS_CLOCK make_process_cpuclock(0, CPUCLOCK_SCHED) #define THREAD_CLOCK make_thread_cpuclock(0, CPUCLOCK_SCHED) |
1da177e4c
|
1328 |
|
a924b04dd
|
1329 |
static int process_cpu_clock_getres(const clockid_t which_clock, |
d2e3e0ca5
|
1330 |
struct timespec64 *tp) |
1da177e4c
|
1331 1332 1333 |
{ return posix_cpu_clock_getres(PROCESS_CLOCK, tp); } |
a924b04dd
|
1334 |
static int process_cpu_clock_get(const clockid_t which_clock, |
3c9c12f4b
|
1335 |
struct timespec64 *tp) |
1da177e4c
|
1336 1337 1338 1339 1340 1341 1342 1343 |
{ return posix_cpu_clock_get(PROCESS_CLOCK, tp); } static int process_cpu_timer_create(struct k_itimer *timer) { timer->it_clock = PROCESS_CLOCK; return posix_cpu_timer_create(timer); } |
a924b04dd
|
1344 |
static int process_cpu_nsleep(const clockid_t which_clock, int flags, |
938e7cf2d
|
1345 |
const struct timespec64 *rqtp) |
1da177e4c
|
1346 |
{ |
99e6c0e6e
|
1347 |
return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp); |
1da177e4c
|
1348 |
} |
a924b04dd
|
1349 |
static int thread_cpu_clock_getres(const clockid_t which_clock, |
d2e3e0ca5
|
1350 |
struct timespec64 *tp) |
1da177e4c
|
1351 1352 1353 |
{ return posix_cpu_clock_getres(THREAD_CLOCK, tp); } |
a924b04dd
|
1354 |
static int thread_cpu_clock_get(const clockid_t which_clock, |
3c9c12f4b
|
1355 |
struct timespec64 *tp) |
1da177e4c
|
1356 1357 1358 1359 1360 1361 1362 1363 |
{ return posix_cpu_clock_get(THREAD_CLOCK, tp); } static int thread_cpu_timer_create(struct k_itimer *timer) { timer->it_clock = THREAD_CLOCK; return posix_cpu_timer_create(timer); } |
1da177e4c
|
1364 |
|
d3ba5a9a3
|
1365 |
const struct k_clock clock_posix_cpu = { |
1976945ee
|
1366 1367 1368 1369 1370 |
.clock_getres = posix_cpu_clock_getres, .clock_set = posix_cpu_clock_set, .clock_get = posix_cpu_clock_get, .timer_create = posix_cpu_timer_create, .nsleep = posix_cpu_nsleep, |
1976945ee
|
1371 1372 1373 |
.timer_set = posix_cpu_timer_set, .timer_del = posix_cpu_timer_del, .timer_get = posix_cpu_timer_get, |
f37fb0aa4
|
1374 |
.timer_rearm = posix_cpu_timer_rearm, |
1976945ee
|
1375 |
}; |
d3ba5a9a3
|
1376 1377 1378 1379 1380 |
const struct k_clock clock_process = { .clock_getres = process_cpu_clock_getres, .clock_get = process_cpu_clock_get, .timer_create = process_cpu_timer_create, .nsleep = process_cpu_nsleep, |
d3ba5a9a3
|
1381 |
}; |
1da177e4c
|
1382 |
|
d3ba5a9a3
|
1383 1384 1385 1386 1387 |
const struct k_clock clock_thread = { .clock_getres = thread_cpu_clock_getres, .clock_get = thread_cpu_clock_get, .timer_create = thread_cpu_timer_create, }; |