Blame view
kernel/posix-timers.c
30.2 KB
1da177e4c
|
1 |
/* |
f30c22695
|
2 |
* linux/kernel/posix-timers.c |
1da177e4c
|
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 |
* * * 2002-10-15 Posix Clocks & timers * by George Anzinger george@mvista.com * * Copyright (C) 2002 2003 by MontaVista Software. * * 2004-06-01 Fix CLOCK_REALTIME clock/timer TIMER_ABSTIME bug. * Copyright (C) 2004 Boris Hu * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * MontaVista Software | 1237 East Arques Avenue | Sunnyvale | CA 94085 | USA */ /* These are all the functions necessary to implement * POSIX clocks & timers */ #include <linux/mm.h> |
1da177e4c
|
34 35 36 |
#include <linux/interrupt.h> #include <linux/slab.h> #include <linux/time.h> |
97d1f15b7
|
37 |
#include <linux/mutex.h> |
1da177e4c
|
38 39 |
#include <asm/uaccess.h> |
1da177e4c
|
40 41 42 |
#include <linux/list.h> #include <linux/init.h> #include <linux/compiler.h> |
5ed67f05f
|
43 |
#include <linux/hash.h> |
0606f422b
|
44 |
#include <linux/posix-clock.h> |
1da177e4c
|
45 46 47 48 |
#include <linux/posix-timers.h> #include <linux/syscalls.h> #include <linux/wait.h> #include <linux/workqueue.h> |
9984de1a5
|
49 |
#include <linux/export.h> |
5ed67f05f
|
50 |
#include <linux/hashtable.h> |
1da177e4c
|
51 |
|
1da177e4c
|
52 |
/* |
5ed67f05f
|
53 54 55 56 57 58 |
* Management arrays for POSIX timers. Timers are now kept in static hash table * with 512 entries. * Timer ids are allocated by local routine, which selects proper hash head by * key, constructed from current->signal address and per signal struct counter. * This keeps timer ids unique per process, but now they can intersect between * processes. |
1da177e4c
|
59 60 61 62 63 |
*/ /* * Lets keep our timers in a slab cache :-) */ |
e18b890bb
|
64 |
static struct kmem_cache *posix_timers_cache; |
5ed67f05f
|
65 66 67 |
static DEFINE_HASHTABLE(posix_timers_hashtable, 9); static DEFINE_SPINLOCK(hash_lock); |
1da177e4c
|
68 69 |
/* |
1da177e4c
|
70 71 72 73 74 75 76 |
* we assume that the new SIGEV_THREAD_ID shares no bits with the other * SIGEV values. Here we put out an error if this assumption fails. */ #if SIGEV_THREAD_ID != (SIGEV_THREAD_ID & \ ~(SIGEV_SIGNAL | SIGEV_NONE | SIGEV_THREAD)) #error "SIGEV_THREAD_ID must not share bit with other SIGEV values!" #endif |
65da528d7
|
77 78 79 80 81 82 83 84 |
/* * parisc wants ENOTSUP instead of EOPNOTSUPP */ #ifndef ENOTSUP # define ENANOSLEEP_NOTSUP EOPNOTSUPP #else # define ENANOSLEEP_NOTSUP ENOTSUP #endif |
1da177e4c
|
85 86 87 88 89 90 91 92 93 94 95 96 97 |
/* * The timer ID is turned into a timer address by idr_find(). * Verifying a valid ID consists of: * * a) checking that idr_find() returns other than -1. * b) checking that the timer id matches the one in the timer itself. * c) that the timer owner is in the callers thread group. */ /* * CLOCKs: The POSIX standard calls for a couple of clocks and allows us * to implement others. This structure defines the various |
0061748dd
|
98 |
* clocks. |
1da177e4c
|
99 100 101 102 103 104 105 106 107 |
* * RESOLUTION: Clock resolution is used to round up timer and interval * times, NOT to report clock times, which are reported with as * much resolution as the system can muster. In some cases this * resolution may depend on the underlying clock hardware and * may not be quantifiable until run time, and only then is the * necessary code is written. The standard says we should say * something about this issue in the documentation... * |
0061748dd
|
108 109 |
* FUNCTIONS: The CLOCKs structure defines possible functions to * handle various clock functions. |
1da177e4c
|
110 |
* |
0061748dd
|
111 112 113 114 |
* The standard POSIX timer management code assumes the * following: 1.) The k_itimer struct (sched.h) is used for * the timer. 2.) The list, it_lock, it_clock, it_id and * it_pid fields are not modified by timer code. |
1da177e4c
|
115 116 117 118 119 120 121 122 123 124 |
* * Permissions: It is assumed that the clock_settime() function defined * for each clock will take care of permission checks. Some * clocks may be set able by any user (i.e. local process * clocks) others not. Currently the only set able clock we * have is CLOCK_REALTIME and its high res counter part, both of * which we beg off on and pass to do_sys_settimeofday(). */ static struct k_clock posix_clocks[MAX_CLOCKS]; |
becf8b5d0
|
125 |
|
1da177e4c
|
126 |
/* |
becf8b5d0
|
127 |
* These ones are defined below. |
1da177e4c
|
128 |
*/ |
becf8b5d0
|
129 130 |
static int common_nsleep(const clockid_t, int flags, struct timespec *t, struct timespec __user *rmtp); |
838394fbf
|
131 |
static int common_timer_create(struct k_itimer *new_timer); |
becf8b5d0
|
132 133 134 135 |
static void common_timer_get(struct k_itimer *, struct itimerspec *); static int common_timer_set(struct k_itimer *, int, struct itimerspec *, struct itimerspec *); static int common_timer_del(struct k_itimer *timer); |
1da177e4c
|
136 |
|
c9cb2e3d7
|
137 |
static enum hrtimer_restart posix_timer_fn(struct hrtimer *data); |
1da177e4c
|
138 |
|
20f33a03f
|
139 140 141 142 143 144 145 |
static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags); #define lock_timer(tid, flags) \ ({ struct k_itimer *__timr; \ __cond_lock(&__timr->it_lock, __timr = __lock_timer(tid, flags)); \ __timr; \ }) |
1da177e4c
|
146 |
|
5ed67f05f
|
147 148 149 150 151 152 153 154 155 |
static int hash(struct signal_struct *sig, unsigned int nr) { return hash_32(hash32_ptr(sig) ^ nr, HASH_BITS(posix_timers_hashtable)); } static struct k_itimer *__posix_timers_find(struct hlist_head *head, struct signal_struct *sig, timer_t id) { |
5ed67f05f
|
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 |
struct k_itimer *timer; hlist_for_each_entry_rcu(timer, head, t_hash) { if ((timer->it_signal == sig) && (timer->it_id == id)) return timer; } return NULL; } static struct k_itimer *posix_timer_by_id(timer_t id) { struct signal_struct *sig = current->signal; struct hlist_head *head = &posix_timers_hashtable[hash(sig, id)]; return __posix_timers_find(head, sig, id); } static int posix_timer_add(struct k_itimer *timer) { struct signal_struct *sig = current->signal; int first_free_id = sig->posix_timer_id; struct hlist_head *head; int ret = -ENOENT; do { spin_lock(&hash_lock); head = &posix_timers_hashtable[hash(sig, sig->posix_timer_id)]; if (!__posix_timers_find(head, sig, sig->posix_timer_id)) { hlist_add_head_rcu(&timer->t_hash, head); ret = sig->posix_timer_id; } if (++sig->posix_timer_id < 0) sig->posix_timer_id = 0; if ((sig->posix_timer_id == first_free_id) && (ret == -ENOENT)) /* Loop over all possible ids completed */ ret = -EAGAIN; spin_unlock(&hash_lock); } while (ret == -ENOENT); return ret; } |
1da177e4c
|
196 197 198 199 |
static inline void unlock_timer(struct k_itimer *timr, unsigned long flags) { spin_unlock_irqrestore(&timr->it_lock, flags); } |
422857776
|
200 201 202 203 204 205 |
/* Get clock_realtime */ static int posix_clock_realtime_get(clockid_t which_clock, struct timespec *tp) { ktime_get_real_ts(tp); return 0; } |
26f9a4796
|
206 207 208 209 210 211 |
/* Set clock_realtime */ static int posix_clock_realtime_set(const clockid_t which_clock, const struct timespec *tp) { return do_sys_settimeofday(tp, NULL); } |
f1f1d5ebd
|
212 213 214 215 216 |
static int posix_clock_realtime_adj(const clockid_t which_clock, struct timex *t) { return do_adjtimex(t); } |
becf8b5d0
|
217 218 219 220 221 222 223 224 |
/* * Get monotonic time for posix timers */ static int posix_ktime_get_ts(clockid_t which_clock, struct timespec *tp) { ktime_get_ts(tp); return 0; } |
1da177e4c
|
225 226 |
/* |
7fdd7f890
|
227 |
* Get monotonic-raw time for posix timers |
2d42244ae
|
228 229 230 231 232 233 |
*/ static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec *tp) { getrawmonotonic(tp); return 0; } |
da15cfdae
|
234 235 236 237 238 239 240 241 242 243 244 245 246 |
static int posix_get_realtime_coarse(clockid_t which_clock, struct timespec *tp) { *tp = current_kernel_time(); return 0; } static int posix_get_monotonic_coarse(clockid_t which_clock, struct timespec *tp) { *tp = get_monotonic_coarse(); return 0; } |
6622e670b
|
247 |
static int posix_get_coarse_res(const clockid_t which_clock, struct timespec *tp) |
da15cfdae
|
248 249 250 251 |
{ *tp = ktime_to_timespec(KTIME_LOW_RES); return 0; } |
7fdd7f890
|
252 253 254 255 256 257 |
static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp) { get_monotonic_boottime(tp); return 0; } |
1ff3c9677
|
258 259 260 261 262 |
static int posix_get_tai(clockid_t which_clock, struct timespec *tp) { timekeeping_clocktai(tp); return 0; } |
7fdd7f890
|
263 |
|
2d42244ae
|
264 |
/* |
1da177e4c
|
265 266 267 268 |
* Initialize everything, well, just everything in Posix clocks/timers ;) */ static __init int init_posix_timers(void) { |
becf8b5d0
|
269 |
struct k_clock clock_realtime = { |
2fd1f0408
|
270 |
.clock_getres = hrtimer_get_res, |
422857776
|
271 |
.clock_get = posix_clock_realtime_get, |
26f9a4796
|
272 |
.clock_set = posix_clock_realtime_set, |
f1f1d5ebd
|
273 |
.clock_adj = posix_clock_realtime_adj, |
a5cd28801
|
274 |
.nsleep = common_nsleep, |
59bd5bc24
|
275 |
.nsleep_restart = hrtimer_nanosleep_restart, |
838394fbf
|
276 |
.timer_create = common_timer_create, |
27722df16
|
277 |
.timer_set = common_timer_set, |
a7319fa25
|
278 |
.timer_get = common_timer_get, |
6761c6702
|
279 |
.timer_del = common_timer_del, |
1da177e4c
|
280 |
}; |
becf8b5d0
|
281 |
struct k_clock clock_monotonic = { |
2fd1f0408
|
282 283 |
.clock_getres = hrtimer_get_res, .clock_get = posix_ktime_get_ts, |
a5cd28801
|
284 |
.nsleep = common_nsleep, |
59bd5bc24
|
285 |
.nsleep_restart = hrtimer_nanosleep_restart, |
838394fbf
|
286 |
.timer_create = common_timer_create, |
27722df16
|
287 |
.timer_set = common_timer_set, |
a7319fa25
|
288 |
.timer_get = common_timer_get, |
6761c6702
|
289 |
.timer_del = common_timer_del, |
1da177e4c
|
290 |
}; |
2d42244ae
|
291 |
struct k_clock clock_monotonic_raw = { |
2fd1f0408
|
292 293 |
.clock_getres = hrtimer_get_res, .clock_get = posix_get_monotonic_raw, |
2d42244ae
|
294 |
}; |
da15cfdae
|
295 |
struct k_clock clock_realtime_coarse = { |
2fd1f0408
|
296 297 |
.clock_getres = posix_get_coarse_res, .clock_get = posix_get_realtime_coarse, |
da15cfdae
|
298 299 |
}; struct k_clock clock_monotonic_coarse = { |
2fd1f0408
|
300 301 |
.clock_getres = posix_get_coarse_res, .clock_get = posix_get_monotonic_coarse, |
da15cfdae
|
302 |
}; |
1ff3c9677
|
303 304 305 |
struct k_clock clock_tai = { .clock_getres = hrtimer_get_res, .clock_get = posix_get_tai, |
90adda98b
|
306 307 308 309 310 311 |
.nsleep = common_nsleep, .nsleep_restart = hrtimer_nanosleep_restart, .timer_create = common_timer_create, .timer_set = common_timer_set, .timer_get = common_timer_get, .timer_del = common_timer_del, |
1ff3c9677
|
312 |
}; |
7fdd7f890
|
313 314 315 316 317 318 319 320 321 322 |
struct k_clock clock_boottime = { .clock_getres = hrtimer_get_res, .clock_get = posix_get_boottime, .nsleep = common_nsleep, .nsleep_restart = hrtimer_nanosleep_restart, .timer_create = common_timer_create, .timer_set = common_timer_set, .timer_get = common_timer_get, .timer_del = common_timer_del, }; |
1da177e4c
|
323 |
|
527087374
|
324 325 326 327 328 |
posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime); posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic); posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw); posix_timers_register_clock(CLOCK_REALTIME_COARSE, &clock_realtime_coarse); posix_timers_register_clock(CLOCK_MONOTONIC_COARSE, &clock_monotonic_coarse); |
7fdd7f890
|
329 |
posix_timers_register_clock(CLOCK_BOOTTIME, &clock_boottime); |
1ff3c9677
|
330 |
posix_timers_register_clock(CLOCK_TAI, &clock_tai); |
1da177e4c
|
331 332 |
posix_timers_cache = kmem_cache_create("posix_timers_cache", |
040b5c6f9
|
333 334 |
sizeof (struct k_itimer), 0, SLAB_PANIC, NULL); |
1da177e4c
|
335 336 337 338 |
return 0; } __initcall(init_posix_timers); |
1da177e4c
|
339 340 |
static void schedule_next_timer(struct k_itimer *timr) { |
44f214755
|
341 |
struct hrtimer *timer = &timr->it.real.timer; |
becf8b5d0
|
342 |
if (timr->it.real.interval.tv64 == 0) |
1da177e4c
|
343 |
return; |
4d672e7ac
|
344 345 346 |
timr->it_overrun += (unsigned int) hrtimer_forward(timer, timer->base->get_time(), timr->it.real.interval); |
44f214755
|
347 |
|
1da177e4c
|
348 349 350 |
timr->it_overrun_last = timr->it_overrun; timr->it_overrun = -1; ++timr->it_requeue_pending; |
44f214755
|
351 |
hrtimer_restart(timer); |
1da177e4c
|
352 353 354 355 356 357 358 359 360 361 |
} /* * This function is exported for use by the signal deliver code. It is * called just prior to the info block being released and passes that * block to us. It's function is to update the overrun entry AND to * restart the timer. It should only be called if the timer is to be * restarted (i.e. we have flagged this in the sys_private entry of the * info block). * |
25985edce
|
362 |
* To protect against the timer going away while the interrupt is queued, |
1da177e4c
|
363 364 365 366 367 368 369 370 |
* we require that the it_requeue_pending flag be set. */ void do_schedule_next_timer(struct siginfo *info) { struct k_itimer *timr; unsigned long flags; timr = lock_timer(info->si_tid, &flags); |
becf8b5d0
|
371 372 373 374 375 |
if (timr && timr->it_requeue_pending == info->si_sys_private) { if (timr->it_clock < 0) posix_cpu_timer_schedule(timr); else schedule_next_timer(timr); |
1da177e4c
|
376 |
|
54da11749
|
377 |
info->si_overrun += timr->it_overrun_last; |
becf8b5d0
|
378 |
} |
b6557fbca
|
379 380 |
if (timr) unlock_timer(timr, flags); |
1da177e4c
|
381 |
} |
ba661292a
|
382 |
int posix_timer_event(struct k_itimer *timr, int si_private) |
1da177e4c
|
383 |
{ |
27af4245b
|
384 385 |
struct task_struct *task; int shared, ret = -1; |
ba661292a
|
386 387 388 389 390 391 392 393 394 395 396 |
/* * FIXME: if ->sigq is queued we can race with * dequeue_signal()->do_schedule_next_timer(). * * If dequeue_signal() sees the "right" value of * si_sys_private it calls do_schedule_next_timer(). * We re-queue ->sigq and drop ->it_lock(). * do_schedule_next_timer() locks the timer * and re-schedules it while ->sigq is pending. * Not really bad, but not that we want. */ |
1da177e4c
|
397 |
timr->sigq->info.si_sys_private = si_private; |
1da177e4c
|
398 |
|
27af4245b
|
399 400 401 402 403 404 405 |
rcu_read_lock(); task = pid_task(timr->it_pid, PIDTYPE_PID); if (task) { shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID); ret = send_sigqueue(timr->sigq, task, shared); } rcu_read_unlock(); |
4aa736117
|
406 407 |
/* If we failed to send the signal the timer stops. */ return ret > 0; |
1da177e4c
|
408 409 410 411 412 413 414 415 416 417 |
} EXPORT_SYMBOL_GPL(posix_timer_event); /* * This function gets called when a POSIX.1b interval timer expires. It * is used as a callback from the kernel internal timer. The * run_timer_list code ALWAYS calls with interrupts on. * This code is for CLOCK_REALTIME* and CLOCK_MONOTONIC* timers. */ |
c9cb2e3d7
|
418 |
static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer) |
1da177e4c
|
419 |
{ |
05cfb614d
|
420 |
struct k_itimer *timr; |
1da177e4c
|
421 |
unsigned long flags; |
becf8b5d0
|
422 |
int si_private = 0; |
c9cb2e3d7
|
423 |
enum hrtimer_restart ret = HRTIMER_NORESTART; |
1da177e4c
|
424 |
|
05cfb614d
|
425 |
timr = container_of(timer, struct k_itimer, it.real.timer); |
1da177e4c
|
426 |
spin_lock_irqsave(&timr->it_lock, flags); |
1da177e4c
|
427 |
|
becf8b5d0
|
428 429 |
if (timr->it.real.interval.tv64 != 0) si_private = ++timr->it_requeue_pending; |
1da177e4c
|
430 |
|
becf8b5d0
|
431 432 433 434 435 436 437 |
if (posix_timer_event(timr, si_private)) { /* * signal was not sent because of sig_ignor * we will not get a call back to restart it AND * it should be restarted. */ if (timr->it.real.interval.tv64 != 0) { |
58229a189
|
438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 |
ktime_t now = hrtimer_cb_get_time(timer); /* * FIXME: What we really want, is to stop this * timer completely and restart it in case the * SIG_IGN is removed. This is a non trivial * change which involves sighand locking * (sigh !), which we don't want to do late in * the release cycle. * * For now we just let timers with an interval * less than a jiffie expire every jiffie to * avoid softirq starvation in case of SIG_IGN * and a very small interval, which would put * the timer right back on the softirq pending * list. By moving now ahead of time we trick * hrtimer_forward() to expire the timer * later, while we still maintain the overrun * accuracy, but have some inconsistency in * the timer_gettime() case. This is at least * better than a starved softirq. A more * complex fix which solves also another related * inconsistency is already in the pipeline. */ #ifdef CONFIG_HIGH_RES_TIMERS { ktime_t kj = ktime_set(0, NSEC_PER_SEC / HZ); if (timr->it.real.interval.tv64 < kj.tv64) now = ktime_add(now, kj); } #endif |
4d672e7ac
|
470 |
timr->it_overrun += (unsigned int) |
58229a189
|
471 |
hrtimer_forward(timer, now, |
becf8b5d0
|
472 473 |
timr->it.real.interval); ret = HRTIMER_RESTART; |
a0a0c28c1
|
474 |
++timr->it_requeue_pending; |
1da177e4c
|
475 |
} |
1da177e4c
|
476 |
} |
1da177e4c
|
477 |
|
becf8b5d0
|
478 479 480 |
unlock_timer(timr, flags); return ret; } |
1da177e4c
|
481 |
|
27af4245b
|
482 |
static struct pid *good_sigevent(sigevent_t * event) |
1da177e4c
|
483 484 485 486 |
{ struct task_struct *rtn = current->group_leader; if ((event->sigev_notify & SIGEV_THREAD_ID ) && |
8dc86af00
|
487 |
(!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) || |
bac0abd61
|
488 |
!same_thread_group(rtn, current) || |
1da177e4c
|
489 490 491 492 493 494 |
(event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_SIGNAL)) return NULL; if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) && ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX))) return NULL; |
27af4245b
|
495 |
return task_pid(rtn); |
1da177e4c
|
496 |
} |
527087374
|
497 498 |
void posix_timers_register_clock(const clockid_t clock_id, struct k_clock *new_clock) |
1da177e4c
|
499 500 |
{ if ((unsigned) clock_id >= MAX_CLOCKS) { |
4359ac0ac
|
501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 |
printk(KERN_WARNING "POSIX clock register failed for clock_id %d ", clock_id); return; } if (!new_clock->clock_get) { printk(KERN_WARNING "POSIX clock id %d lacks clock_get() ", clock_id); return; } if (!new_clock->clock_getres) { printk(KERN_WARNING "POSIX clock id %d lacks clock_getres() ", |
1da177e4c
|
516 517 518 519 520 521 |
clock_id); return; } posix_clocks[clock_id] = *new_clock; } |
527087374
|
522 |
EXPORT_SYMBOL_GPL(posix_timers_register_clock); |
1da177e4c
|
523 524 525 526 |
static struct k_itimer * alloc_posix_timer(void) { struct k_itimer *tmr; |
c37622296
|
527 |
tmr = kmem_cache_zalloc(posix_timers_cache, GFP_KERNEL); |
1da177e4c
|
528 529 |
if (!tmr) return tmr; |
1da177e4c
|
530 531 |
if (unlikely(!(tmr->sigq = sigqueue_alloc()))) { kmem_cache_free(posix_timers_cache, tmr); |
aa94fbd5c
|
532 |
return NULL; |
1da177e4c
|
533 |
} |
ba661292a
|
534 |
memset(&tmr->sigq->info, 0, sizeof(siginfo_t)); |
1da177e4c
|
535 536 |
return tmr; } |
8af088710
|
537 538 539 540 541 542 |
static void k_itimer_rcu_free(struct rcu_head *head) { struct k_itimer *tmr = container_of(head, struct k_itimer, it.rcu); kmem_cache_free(posix_timers_cache, tmr); } |
1da177e4c
|
543 544 545 546 547 548 |
#define IT_ID_SET 1 #define IT_ID_NOT_SET 0 static void release_posix_timer(struct k_itimer *tmr, int it_id_set) { if (it_id_set) { unsigned long flags; |
5ed67f05f
|
549 550 551 |
spin_lock_irqsave(&hash_lock, flags); hlist_del_rcu(&tmr->t_hash); spin_unlock_irqrestore(&hash_lock, flags); |
1da177e4c
|
552 |
} |
899921025
|
553 |
put_pid(tmr->it_pid); |
1da177e4c
|
554 |
sigqueue_free(tmr->sigq); |
8af088710
|
555 |
call_rcu(&tmr->it.rcu, k_itimer_rcu_free); |
1da177e4c
|
556 |
} |
cc785ac22
|
557 558 559 |
static struct k_clock *clockid_to_kclock(const clockid_t id) { if (id < 0) |
0606f422b
|
560 561 |
return (id & CLOCKFD_MASK) == CLOCKFD ? &clock_posix_dynamic : &clock_posix_cpu; |
cc785ac22
|
562 563 564 565 566 |
if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres) return NULL; return &posix_clocks[id]; } |
838394fbf
|
567 568 569 570 571 |
static int common_timer_create(struct k_itimer *new_timer) { hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock, 0); return 0; } |
1da177e4c
|
572 |
/* Create a POSIX.1b interval timer. */ |
362e9c07c
|
573 574 575 |
SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock, struct sigevent __user *, timer_event_spec, timer_t __user *, created_timer_id) |
1da177e4c
|
576 |
{ |
838394fbf
|
577 |
struct k_clock *kc = clockid_to_kclock(which_clock); |
2cd499e38
|
578 |
struct k_itimer *new_timer; |
ef864c958
|
579 |
int error, new_timer_id; |
1da177e4c
|
580 581 |
sigevent_t event; int it_id_set = IT_ID_NOT_SET; |
838394fbf
|
582 |
if (!kc) |
1da177e4c
|
583 |
return -EINVAL; |
838394fbf
|
584 585 |
if (!kc->timer_create) return -EOPNOTSUPP; |
1da177e4c
|
586 587 588 589 590 591 |
new_timer = alloc_posix_timer(); if (unlikely(!new_timer)) return -EAGAIN; spin_lock_init(&new_timer->it_lock); |
5ed67f05f
|
592 593 594 |
new_timer_id = posix_timer_add(new_timer); if (new_timer_id < 0) { error = new_timer_id; |
1da177e4c
|
595 596 597 598 599 600 601 |
goto out; } it_id_set = IT_ID_SET; new_timer->it_id = (timer_t) new_timer_id; new_timer->it_clock = which_clock; new_timer->it_overrun = -1; |
1da177e4c
|
602 |
|
1da177e4c
|
603 604 605 606 607 |
if (timer_event_spec) { if (copy_from_user(&event, timer_event_spec, sizeof (event))) { error = -EFAULT; goto out; } |
36b2f0460
|
608 |
rcu_read_lock(); |
899921025
|
609 |
new_timer->it_pid = get_pid(good_sigevent(&event)); |
36b2f0460
|
610 |
rcu_read_unlock(); |
899921025
|
611 |
if (!new_timer->it_pid) { |
1da177e4c
|
612 613 614 615 |
error = -EINVAL; goto out; } } else { |
5a9fa7307
|
616 617 618 |
event.sigev_notify = SIGEV_SIGNAL; event.sigev_signo = SIGALRM; event.sigev_value.sival_int = new_timer->it_id; |
899921025
|
619 |
new_timer->it_pid = get_pid(task_tgid(current)); |
1da177e4c
|
620 |
} |
5a9fa7307
|
621 622 623 |
new_timer->it_sigev_notify = event.sigev_notify; new_timer->sigq->info.si_signo = event.sigev_signo; new_timer->sigq->info.si_value = event.sigev_value; |
717835d94
|
624 |
new_timer->sigq->info.si_tid = new_timer->it_id; |
5a9fa7307
|
625 |
new_timer->sigq->info.si_code = SI_TIMER; |
717835d94
|
626 |
|
2b08de007
|
627 628 629 630 631 |
if (copy_to_user(created_timer_id, &new_timer_id, sizeof (new_timer_id))) { error = -EFAULT; goto out; } |
838394fbf
|
632 |
error = kc->timer_create(new_timer); |
45e0fffc8
|
633 634 |
if (error) goto out; |
36b2f0460
|
635 |
spin_lock_irq(¤t->sighand->siglock); |
27af4245b
|
636 |
new_timer->it_signal = current->signal; |
36b2f0460
|
637 638 |
list_add(&new_timer->list, ¤t->signal->posix_timers); spin_unlock_irq(¤t->sighand->siglock); |
ef864c958
|
639 640 |
return 0; |
838394fbf
|
641 |
/* |
1da177e4c
|
642 643 644 645 646 |
* In the case of the timer belonging to another task, after * the task is unlocked, the timer is owned by the other task * and may cease to exist at any time. Don't use or modify * new_timer after the unlock call. */ |
1da177e4c
|
647 |
out: |
ef864c958
|
648 |
release_posix_timer(new_timer, it_id_set); |
1da177e4c
|
649 650 651 652 |
return error; } /* |
1da177e4c
|
653 654 655 656 657 658 |
* Locking issues: We need to protect the result of the id look up until * we get the timer locked down so it is not deleted under us. The * removal is done under the idr spinlock so we use that here to bridge * the find to the timer lock. To avoid a dead lock, the timer id MUST * be release with out holding the timer lock. */ |
20f33a03f
|
659 |
static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags) |
1da177e4c
|
660 661 |
{ struct k_itimer *timr; |
8af088710
|
662 |
|
e182bb38d
|
663 664 665 666 667 668 |
/* * timer_t could be any type >= int and we want to make sure any * @timer_id outside positive int range fails lookup. */ if ((unsigned long long)timer_id > INT_MAX) return NULL; |
8af088710
|
669 |
rcu_read_lock(); |
5ed67f05f
|
670 |
timr = posix_timer_by_id(timer_id); |
1da177e4c
|
671 |
if (timr) { |
8af088710
|
672 |
spin_lock_irqsave(&timr->it_lock, *flags); |
899921025
|
673 |
if (timr->it_signal == current->signal) { |
8af088710
|
674 |
rcu_read_unlock(); |
31d928456
|
675 676 |
return timr; } |
8af088710
|
677 |
spin_unlock_irqrestore(&timr->it_lock, *flags); |
31d928456
|
678 |
} |
8af088710
|
679 |
rcu_read_unlock(); |
1da177e4c
|
680 |
|
31d928456
|
681 |
return NULL; |
1da177e4c
|
682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 |
} /* * Get the time remaining on a POSIX.1b interval timer. This function * is ALWAYS called with spin_lock_irq on the timer, thus it must not * mess with irq. * * We have a couple of messes to clean up here. First there is the case * of a timer that has a requeue pending. These timers should appear to * be in the timer list with an expiry as if we were to requeue them * now. * * The second issue is the SIGEV_NONE timer which may be active but is * not really ever put in the timer list (to save system resources). * This timer may be expired, and if so, we will do it here. Otherwise * it is the same as a requeue pending timer WRT to what we should * report. */ static void common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting) { |
3b98a5328
|
703 |
ktime_t now, remaining, iv; |
becf8b5d0
|
704 |
struct hrtimer *timer = &timr->it.real.timer; |
1da177e4c
|
705 |
|
becf8b5d0
|
706 |
memset(cur_setting, 0, sizeof(struct itimerspec)); |
becf8b5d0
|
707 |
|
3b98a5328
|
708 |
iv = timr->it.real.interval; |
becf8b5d0
|
709 |
/* interval timer ? */ |
3b98a5328
|
710 711 712 713 |
if (iv.tv64) cur_setting->it_interval = ktime_to_timespec(iv); else if (!hrtimer_active(timer) && (timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) |
becf8b5d0
|
714 |
return; |
3b98a5328
|
715 716 |
now = timer->base->get_time(); |
becf8b5d0
|
717 |
/* |
3b98a5328
|
718 719 720 |
* When a requeue is pending or this is a SIGEV_NONE * timer move the expiry time forward by intervals, so * expiry is > now. |
becf8b5d0
|
721 |
*/ |
3b98a5328
|
722 723 |
if (iv.tv64 && (timr->it_requeue_pending & REQUEUE_PENDING || (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) |
4d672e7ac
|
724 |
timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv); |
3b98a5328
|
725 |
|
cc584b213
|
726 |
remaining = ktime_sub(hrtimer_get_expires(timer), now); |
becf8b5d0
|
727 |
/* Return 0 only, when the timer is expired and not pending */ |
3b98a5328
|
728 729 730 731 732 733 734 735 |
if (remaining.tv64 <= 0) { /* * A single shot SIGEV_NONE timer must return 0, when * it is expired ! */ if ((timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) cur_setting->it_value.tv_nsec = 1; } else |
becf8b5d0
|
736 |
cur_setting->it_value = ktime_to_timespec(remaining); |
1da177e4c
|
737 738 739 |
} /* Get the time remaining on a POSIX.1b interval timer. */ |
362e9c07c
|
740 741 |
SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id, struct itimerspec __user *, setting) |
1da177e4c
|
742 |
{ |
1da177e4c
|
743 |
struct itimerspec cur_setting; |
a7319fa25
|
744 745 |
struct k_itimer *timr; struct k_clock *kc; |
1da177e4c
|
746 |
unsigned long flags; |
a7319fa25
|
747 |
int ret = 0; |
1da177e4c
|
748 749 750 751 |
timr = lock_timer(timer_id, &flags); if (!timr) return -EINVAL; |
a7319fa25
|
752 753 754 755 756 |
kc = clockid_to_kclock(timr->it_clock); if (WARN_ON_ONCE(!kc || !kc->timer_get)) ret = -EINVAL; else kc->timer_get(timr, &cur_setting); |
1da177e4c
|
757 758 |
unlock_timer(timr, flags); |
a7319fa25
|
759 |
if (!ret && copy_to_user(setting, &cur_setting, sizeof (cur_setting))) |
1da177e4c
|
760 |
return -EFAULT; |
a7319fa25
|
761 |
return ret; |
1da177e4c
|
762 |
} |
becf8b5d0
|
763 |
|
1da177e4c
|
764 765 766 767 768 769 770 771 772 |
/* * Get the number of overruns of a POSIX.1b interval timer. This is to * be the overrun of the timer last delivered. At the same time we are * accumulating overruns on the next timer. The overrun is frozen when * the signal is delivered, either at the notify time (if the info block * is not queued) or at the actual delivery time (as we are informed by * the call back to do_schedule_next_timer(). So all we need to do is * to pick up the frozen overrun. */ |
362e9c07c
|
773 |
SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id) |
1da177e4c
|
774 775 776 |
{ struct k_itimer *timr; int overrun; |
5ba253313
|
777 |
unsigned long flags; |
1da177e4c
|
778 779 780 781 782 783 784 785 786 787 |
timr = lock_timer(timer_id, &flags); if (!timr) return -EINVAL; overrun = timr->it_overrun_last; unlock_timer(timr, flags); return overrun; } |
1da177e4c
|
788 789 790 |
/* Set a POSIX.1b interval timer. */ /* timr->it_lock is taken. */ |
858119e15
|
791 |
static int |
1da177e4c
|
792 793 794 |
common_timer_set(struct k_itimer *timr, int flags, struct itimerspec *new_setting, struct itimerspec *old_setting) { |
becf8b5d0
|
795 |
struct hrtimer *timer = &timr->it.real.timer; |
7978672c4
|
796 |
enum hrtimer_mode mode; |
1da177e4c
|
797 798 799 800 801 |
if (old_setting) common_timer_get(timr, old_setting); /* disable the timer */ |
becf8b5d0
|
802 |
timr->it.real.interval.tv64 = 0; |
1da177e4c
|
803 804 805 806 |
/* * careful here. If smp we could be in the "fire" routine which will * be spinning as we hold the lock. But this is ONLY an SMP issue. */ |
becf8b5d0
|
807 |
if (hrtimer_try_to_cancel(timer) < 0) |
1da177e4c
|
808 |
return TIMER_RETRY; |
1da177e4c
|
809 810 811 812 |
timr->it_requeue_pending = (timr->it_requeue_pending + 2) & ~REQUEUE_PENDING; timr->it_overrun_last = 0; |
1da177e4c
|
813 |
|
becf8b5d0
|
814 815 816 |
/* switch off the timer when it_value is zero */ if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec) return 0; |
1da177e4c
|
817 |
|
c9cb2e3d7
|
818 |
mode = flags & TIMER_ABSTIME ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL; |
7978672c4
|
819 |
hrtimer_init(&timr->it.real.timer, timr->it_clock, mode); |
7978672c4
|
820 |
timr->it.real.timer.function = posix_timer_fn; |
becf8b5d0
|
821 |
|
cc584b213
|
822 |
hrtimer_set_expires(timer, timespec_to_ktime(new_setting->it_value)); |
becf8b5d0
|
823 824 825 826 827 |
/* Convert interval */ timr->it.real.interval = timespec_to_ktime(new_setting->it_interval); /* SIGEV_NONE timers are not queued ! See common_timer_get */ |
952bbc87f
|
828 829 |
if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) { /* Setup correct expiry time for relative timers */ |
5a7780e72
|
830 |
if (mode == HRTIMER_MODE_REL) { |
cc584b213
|
831 |
hrtimer_add_expires(timer, timer->base->get_time()); |
5a7780e72
|
832 |
} |
becf8b5d0
|
833 |
return 0; |
952bbc87f
|
834 |
} |
becf8b5d0
|
835 |
|
cc584b213
|
836 |
hrtimer_start_expires(timer, mode); |
1da177e4c
|
837 838 839 840 |
return 0; } /* Set a POSIX.1b interval timer */ |
362e9c07c
|
841 842 843 |
SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags, const struct itimerspec __user *, new_setting, struct itimerspec __user *, old_setting) |
1da177e4c
|
844 845 846 847 |
{ struct k_itimer *timr; struct itimerspec new_spec, old_spec; int error = 0; |
5ba253313
|
848 |
unsigned long flag; |
1da177e4c
|
849 |
struct itimerspec *rtn = old_setting ? &old_spec : NULL; |
27722df16
|
850 |
struct k_clock *kc; |
1da177e4c
|
851 852 853 854 855 856 |
if (!new_setting) return -EINVAL; if (copy_from_user(&new_spec, new_setting, sizeof (new_spec))) return -EFAULT; |
becf8b5d0
|
857 858 |
if (!timespec_valid(&new_spec.it_interval) || !timespec_valid(&new_spec.it_value)) |
1da177e4c
|
859 860 861 862 863 |
return -EINVAL; retry: timr = lock_timer(timer_id, &flag); if (!timr) return -EINVAL; |
27722df16
|
864 865 866 867 868 |
kc = clockid_to_kclock(timr->it_clock); if (WARN_ON_ONCE(!kc || !kc->timer_set)) error = -EINVAL; else error = kc->timer_set(timr, flags, &new_spec, rtn); |
1da177e4c
|
869 870 871 872 873 874 |
unlock_timer(timr, flag); if (error == TIMER_RETRY) { rtn = NULL; // We already got the old time... goto retry; } |
becf8b5d0
|
875 876 |
if (old_setting && !error && copy_to_user(old_setting, &old_spec, sizeof (old_spec))) |
1da177e4c
|
877 878 879 880 |
error = -EFAULT; return error; } |
6761c6702
|
881 |
static int common_timer_del(struct k_itimer *timer) |
1da177e4c
|
882 |
{ |
becf8b5d0
|
883 |
timer->it.real.interval.tv64 = 0; |
f972be33c
|
884 |
|
becf8b5d0
|
885 |
if (hrtimer_try_to_cancel(&timer->it.real.timer) < 0) |
1da177e4c
|
886 |
return TIMER_RETRY; |
1da177e4c
|
887 888 889 890 891 |
return 0; } static inline int timer_delete_hook(struct k_itimer *timer) { |
6761c6702
|
892 893 894 895 896 |
struct k_clock *kc = clockid_to_kclock(timer->it_clock); if (WARN_ON_ONCE(!kc || !kc->timer_del)) return -EINVAL; return kc->timer_del(timer); |
1da177e4c
|
897 898 899 |
} /* Delete a POSIX.1b interval timer. */ |
362e9c07c
|
900 |
SYSCALL_DEFINE1(timer_delete, timer_t, timer_id) |
1da177e4c
|
901 902 |
{ struct k_itimer *timer; |
5ba253313
|
903 |
unsigned long flags; |
1da177e4c
|
904 |
|
1da177e4c
|
905 |
retry_delete: |
1da177e4c
|
906 907 908 |
timer = lock_timer(timer_id, &flags); if (!timer) return -EINVAL; |
becf8b5d0
|
909 |
if (timer_delete_hook(timer) == TIMER_RETRY) { |
1da177e4c
|
910 911 912 |
unlock_timer(timer, flags); goto retry_delete; } |
becf8b5d0
|
913 |
|
1da177e4c
|
914 915 916 917 918 919 920 |
spin_lock(¤t->sighand->siglock); list_del(&timer->list); spin_unlock(¤t->sighand->siglock); /* * This keeps any tasks waiting on the spin lock from thinking * they got something (see the lock code above). */ |
899921025
|
921 |
timer->it_signal = NULL; |
4b7a13042
|
922 |
|
1da177e4c
|
923 924 925 926 |
unlock_timer(timer, flags); release_posix_timer(timer, IT_ID_SET); return 0; } |
becf8b5d0
|
927 |
|
1da177e4c
|
928 929 930 |
/* * return timer owned by the process, used by exit_itimers */ |
858119e15
|
931 |
static void itimer_delete(struct k_itimer *timer) |
1da177e4c
|
932 933 |
{ unsigned long flags; |
1da177e4c
|
934 |
retry_delete: |
1da177e4c
|
935 |
spin_lock_irqsave(&timer->it_lock, flags); |
becf8b5d0
|
936 |
if (timer_delete_hook(timer) == TIMER_RETRY) { |
1da177e4c
|
937 938 939 |
unlock_timer(timer, flags); goto retry_delete; } |
1da177e4c
|
940 941 942 943 944 |
list_del(&timer->list); /* * This keeps any tasks waiting on the spin lock from thinking * they got something (see the lock code above). */ |
899921025
|
945 |
timer->it_signal = NULL; |
4b7a13042
|
946 |
|
1da177e4c
|
947 948 949 950 951 |
unlock_timer(timer, flags); release_posix_timer(timer, IT_ID_SET); } /* |
25f407f0b
|
952 |
* This is called by do_exit or de_thread, only when there are no more |
1da177e4c
|
953 954 955 956 957 958 959 960 961 962 963 |
* references to the shared signal_struct. */ void exit_itimers(struct signal_struct *sig) { struct k_itimer *tmr; while (!list_empty(&sig->posix_timers)) { tmr = list_entry(sig->posix_timers.next, struct k_itimer, list); itimer_delete(tmr); } } |
362e9c07c
|
964 965 |
SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock, const struct timespec __user *, tp) |
1da177e4c
|
966 |
{ |
26f9a4796
|
967 |
struct k_clock *kc = clockid_to_kclock(which_clock); |
1da177e4c
|
968 |
struct timespec new_tp; |
26f9a4796
|
969 |
if (!kc || !kc->clock_set) |
1da177e4c
|
970 |
return -EINVAL; |
26f9a4796
|
971 |
|
1da177e4c
|
972 973 |
if (copy_from_user(&new_tp, tp, sizeof (*tp))) return -EFAULT; |
26f9a4796
|
974 |
return kc->clock_set(which_clock, &new_tp); |
1da177e4c
|
975 |
} |
362e9c07c
|
976 977 |
SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock, struct timespec __user *,tp) |
1da177e4c
|
978 |
{ |
422857776
|
979 |
struct k_clock *kc = clockid_to_kclock(which_clock); |
1da177e4c
|
980 981 |
struct timespec kernel_tp; int error; |
422857776
|
982 |
if (!kc) |
1da177e4c
|
983 |
return -EINVAL; |
422857776
|
984 985 |
error = kc->clock_get(which_clock, &kernel_tp); |
1da177e4c
|
986 987 988 989 |
if (!error && copy_to_user(tp, &kernel_tp, sizeof (kernel_tp))) error = -EFAULT; return error; |
1da177e4c
|
990 |
} |
f1f1d5ebd
|
991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 |
SYSCALL_DEFINE2(clock_adjtime, const clockid_t, which_clock, struct timex __user *, utx) { struct k_clock *kc = clockid_to_kclock(which_clock); struct timex ktx; int err; if (!kc) return -EINVAL; if (!kc->clock_adj) return -EOPNOTSUPP; if (copy_from_user(&ktx, utx, sizeof(ktx))) return -EFAULT; err = kc->clock_adj(which_clock, &ktx); |
f0dbe81f0
|
1007 |
if (err >= 0 && copy_to_user(utx, &ktx, sizeof(ktx))) |
f1f1d5ebd
|
1008 1009 1010 1011 |
return -EFAULT; return err; } |
362e9c07c
|
1012 1013 |
SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock, struct timespec __user *, tp) |
1da177e4c
|
1014 |
{ |
e5e542eea
|
1015 |
struct k_clock *kc = clockid_to_kclock(which_clock); |
1da177e4c
|
1016 1017 |
struct timespec rtn_tp; int error; |
e5e542eea
|
1018 |
if (!kc) |
1da177e4c
|
1019 |
return -EINVAL; |
e5e542eea
|
1020 |
error = kc->clock_getres(which_clock, &rtn_tp); |
1da177e4c
|
1021 |
|
e5e542eea
|
1022 |
if (!error && tp && copy_to_user(tp, &rtn_tp, sizeof (rtn_tp))) |
1da177e4c
|
1023 |
error = -EFAULT; |
1da177e4c
|
1024 1025 1026 |
return error; } |
1da177e4c
|
1027 |
/* |
97735f25d
|
1028 1029 1030 1031 1032 |
* nanosleep for monotonic and realtime clocks */ static int common_nsleep(const clockid_t which_clock, int flags, struct timespec *tsave, struct timespec __user *rmtp) { |
080344b98
|
1033 1034 1035 |
return hrtimer_nanosleep(tsave, rmtp, flags & TIMER_ABSTIME ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL, which_clock); |
97735f25d
|
1036 |
} |
1da177e4c
|
1037 |
|
362e9c07c
|
1038 1039 1040 |
SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags, const struct timespec __user *, rqtp, struct timespec __user *, rmtp) |
1da177e4c
|
1041 |
{ |
a5cd28801
|
1042 |
struct k_clock *kc = clockid_to_kclock(which_clock); |
1da177e4c
|
1043 |
struct timespec t; |
1da177e4c
|
1044 |
|
a5cd28801
|
1045 |
if (!kc) |
1da177e4c
|
1046 |
return -EINVAL; |
a5cd28801
|
1047 1048 |
if (!kc->nsleep) return -ENANOSLEEP_NOTSUP; |
1da177e4c
|
1049 1050 1051 |
if (copy_from_user(&t, rqtp, sizeof (struct timespec))) return -EFAULT; |
5f82b2b77
|
1052 |
if (!timespec_valid(&t)) |
1da177e4c
|
1053 |
return -EINVAL; |
a5cd28801
|
1054 |
return kc->nsleep(which_clock, flags, &t, rmtp); |
1da177e4c
|
1055 |
} |
1711ef386
|
1056 1057 |
/* |
1711ef386
|
1058 1059 1060 |
* This will restart clock_nanosleep. This is required only by * compat_clock_nanosleep_restart for now. */ |
59bd5bc24
|
1061 |
long clock_nanosleep_restart(struct restart_block *restart_block) |
1711ef386
|
1062 |
{ |
ab8177bc5
|
1063 |
clockid_t which_clock = restart_block->nanosleep.clockid; |
59bd5bc24
|
1064 1065 1066 1067 |
struct k_clock *kc = clockid_to_kclock(which_clock); if (WARN_ON_ONCE(!kc || !kc->nsleep_restart)) return -EINVAL; |
1711ef386
|
1068 |
|
59bd5bc24
|
1069 |
return kc->nsleep_restart(restart_block); |
1711ef386
|
1070 |
} |