Blame view
ipc/sem.c
55 KB
1da177e4c
|
1 2 3 4 5 |
/* * linux/ipc/sem.c * Copyright (C) 1992 Krishna Balasubramanian * Copyright (C) 1995 Eric Schenk, Bruno Haible * |
1da177e4c
|
6 7 8 |
* /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com> * * SMP-threaded, sysctl's added |
624dffcbc
|
9 |
* (c) 1999 Manfred Spraul <manfred@colorfullife.com> |
1da177e4c
|
10 |
* Enforced range limit on SEM_UNDO |
046c68842
|
11 |
* (c) 2001 Red Hat Inc |
1da177e4c
|
12 13 |
* Lockless wakeup * (c) 2003 Manfred Spraul <manfred@colorfullife.com> |
c5cf6359a
|
14 15 |
* Further wakeup optimizations, documentation * (c) 2010 Manfred Spraul <manfred@colorfullife.com> |
073115d6b
|
16 17 18 |
* * support for audit of ipc object properties and permission changes * Dustin Kirkland <dustin.kirkland@us.ibm.com> |
e38935341
|
19 20 21 22 |
* * namespaces support * OpenVZ, SWsoft Inc. * Pavel Emelianov <xemul@openvz.org> |
c5cf6359a
|
23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 |
* * Implementation notes: (May 2010) * This file implements System V semaphores. * * User space visible behavior: * - FIFO ordering for semop() operations (just FIFO, not starvation * protection) * - multiple semaphore operations that alter the same semaphore in * one semop() are handled. * - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and * SETALL calls. * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO. * - undo adjustments at process exit are limited to 0..SEMVMX. * - namespace are supported. * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing * to /proc/sys/kernel/sem. * - statistics about the usage are reported in /proc/sysvipc/sem. * * Internals: * - scalability: * - all global variables are read-mostly. * - semop() calls and semctl(RMID) are synchronized by RCU. * - most operations do write operations (actually: spin_lock calls) to * the per-semaphore array structure. * Thus: Perfect SMP scaling between independent semaphore arrays. * If multiple semaphores in one array are used, then cache line * trashing on the semaphore array spinlock will limit the scaling. |
2f2ed41dc
|
50 |
* - semncnt and semzcnt are calculated on demand in count_semcnt() |
c5cf6359a
|
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 |
* - the task that performs a successful semop() scans the list of all * sleeping tasks and completes any pending operations that can be fulfilled. * Semaphores are actively given to waiting tasks (necessary for FIFO). * (see update_queue()) * - To improve the scalability, the actual wake-up calls are performed after * dropping all locks. (see wake_up_sem_queue_prepare(), * wake_up_sem_queue_do()) * - All work is done by the waker, the woken up task does not have to do * anything - not even acquiring a lock or dropping a refcount. * - A woken up task may not even touch the semaphore array anymore, it may * have been destroyed already by a semctl(RMID). * - The synchronizations between wake-ups due to a timeout/signal and a * wake-up due to a completed semaphore operation is achieved by using an * intermediate state (IN_WAKEUP). * - UNDO values are stored in an array (one per process and per * semaphore array, lazily allocated). For backwards compatibility, multiple * modes for the UNDO variables are supported (per process, per thread) * (see copy_semundo, CLONE_SYSVSEM) * - There are two lists of the pending operations: a per-array list * and per-semaphore list (stored in the array). This allows to achieve FIFO * ordering without always scanning all pending operations. * The worst-case behavior is nevertheless O(N^2) for N wakeups. |
1da177e4c
|
73 |
*/ |
1da177e4c
|
74 75 76 77 78 |
#include <linux/slab.h> #include <linux/spinlock.h> #include <linux/init.h> #include <linux/proc_fs.h> #include <linux/time.h> |
1da177e4c
|
79 80 81 |
#include <linux/security.h> #include <linux/syscalls.h> #include <linux/audit.h> |
c59ede7b7
|
82 |
#include <linux/capability.h> |
19b4946ca
|
83 |
#include <linux/seq_file.h> |
3e148c799
|
84 |
#include <linux/rwsem.h> |
e38935341
|
85 |
#include <linux/nsproxy.h> |
ae5e1b22f
|
86 |
#include <linux/ipc_namespace.h> |
5f921ae96
|
87 |
|
7153e4027
|
88 |
#include <linux/uaccess.h> |
1da177e4c
|
89 |
#include "util.h" |
e57940d71
|
90 91 92 93 |
/* One semaphore structure for each semaphore in the system. */ struct sem { int semval; /* current value */ int sempid; /* pid of last operation */ |
6062a8dc0
|
94 |
spinlock_t lock; /* spinlock for fine-grained semtimedop */ |
1a82e9e1d
|
95 96 97 98 |
struct list_head pending_alter; /* pending single-sop operations */ /* that alter the semaphore */ struct list_head pending_const; /* pending single-sop operations */ /* that do not alter the semaphore*/ |
d12e1e50e
|
99 |
time_t sem_otime; /* candidate for sem_otime */ |
f5c936c0f
|
100 |
} ____cacheline_aligned_in_smp; |
e57940d71
|
101 102 103 |
/* One queue for each sleeping process in the system. */ struct sem_queue { |
e57940d71
|
104 105 106 107 108 109 |
struct list_head list; /* queue of pending operations */ struct task_struct *sleeper; /* this process */ struct sem_undo *undo; /* undo structure */ int pid; /* process id of requesting process */ int status; /* completion status of operation */ struct sembuf *sops; /* array of pending operations */ |
ed247b7ca
|
110 |
struct sembuf *blocking; /* the operation that blocked */ |
e57940d71
|
111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 |
int nsops; /* number of operations */ int alter; /* does *sops alter the array? */ }; /* Each task has a list of undo requests. They are executed automatically * when the process exits. */ struct sem_undo { struct list_head list_proc; /* per-process list: * * all undos from one process * rcu protected */ struct rcu_head rcu; /* rcu struct for sem_undo */ struct sem_undo_list *ulp; /* back ptr to sem_undo_list */ struct list_head list_id; /* per semaphore array list: * all undos for one array */ int semid; /* semaphore set identifier */ short *semadj; /* array of adjustments */ /* one per semaphore */ }; /* sem_undo_list controls shared access to the list of sem_undo structures * that may be shared among all a CLONE_SYSVSEM task group. */ struct sem_undo_list { atomic_t refcnt; spinlock_t lock; struct list_head list_proc; }; |
ed2ddbf88
|
139 |
#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS]) |
e38935341
|
140 |
|
1b531f213
|
141 |
#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid) |
1da177e4c
|
142 |
|
7748dbfaa
|
143 |
static int newary(struct ipc_namespace *, struct ipc_params *); |
01b8b07a5
|
144 |
static void freeary(struct ipc_namespace *, struct kern_ipc_perm *); |
1da177e4c
|
145 |
#ifdef CONFIG_PROC_FS |
19b4946ca
|
146 |
static int sysvipc_sem_proc_show(struct seq_file *s, void *it); |
1da177e4c
|
147 148 149 150 151 152 |
#endif #define SEMMSL_FAST 256 /* 512 bytes on stack */ #define SEMOPM_FAST 64 /* ~ 372 bytes on stack */ /* |
758a6ba39
|
153 |
* Locking: |
1da177e4c
|
154 |
* sem_undo.id_next, |
758a6ba39
|
155 |
* sem_array.complex_count, |
1a82e9e1d
|
156 |
* sem_array.pending{_alter,_cont}, |
758a6ba39
|
157 |
* sem_array.sem_undo: global sem_lock() for read/write |
1da177e4c
|
158 |
* sem_undo.proc_next: only "current" is allowed to read/write that field. |
46c0a8ca3
|
159 |
* |
758a6ba39
|
160 161 |
* sem_array.sem_base[i].pending_{const,alter}: * global or semaphore sem_lock() for read/write |
1da177e4c
|
162 |
*/ |
e38935341
|
163 164 165 166 |
#define sc_semmsl sem_ctls[0] #define sc_semmns sem_ctls[1] #define sc_semopm sem_ctls[2] #define sc_semmni sem_ctls[3] |
ed2ddbf88
|
167 |
void sem_init_ns(struct ipc_namespace *ns) |
e38935341
|
168 |
{ |
e38935341
|
169 170 171 172 173 |
ns->sc_semmsl = SEMMSL; ns->sc_semmns = SEMMNS; ns->sc_semopm = SEMOPM; ns->sc_semmni = SEMMNI; ns->used_sems = 0; |
ed2ddbf88
|
174 |
ipc_init_ids(&ns->ids[IPC_SEM_IDS]); |
e38935341
|
175 |
} |
ae5e1b22f
|
176 |
#ifdef CONFIG_IPC_NS |
e38935341
|
177 178 |
void sem_exit_ns(struct ipc_namespace *ns) { |
01b8b07a5
|
179 |
free_ipcs(ns, &sem_ids(ns), freeary); |
7d6feeb28
|
180 |
idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr); |
e38935341
|
181 |
} |
ae5e1b22f
|
182 |
#endif |
1da177e4c
|
183 |
|
239521f31
|
184 |
void __init sem_init(void) |
1da177e4c
|
185 |
{ |
ed2ddbf88
|
186 |
sem_init_ns(&init_ipc_ns); |
19b4946ca
|
187 188 189 |
ipc_init_proc_interface("sysvipc/sem", " key semid perms nsems uid gid cuid cgid otime ctime ", |
e38935341
|
190 |
IPC_SEM_IDS, sysvipc_sem_proc_show); |
1da177e4c
|
191 |
} |
f269f40ad
|
192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 |
/** * unmerge_queues - unmerge queues, if possible. * @sma: semaphore array * * The function unmerges the wait queues if complex_count is 0. * It must be called prior to dropping the global semaphore array lock. */ static void unmerge_queues(struct sem_array *sma) { struct sem_queue *q, *tq; /* complex operations still around? */ if (sma->complex_count) return; /* * We will switch back to simple mode. * Move all pending operation back into the per-semaphore * queues. */ list_for_each_entry_safe(q, tq, &sma->pending_alter, list) { struct sem *curr; curr = &sma->sem_base[q->sops[0].sem_num]; list_add_tail(&q->list, &curr->pending_alter); } INIT_LIST_HEAD(&sma->pending_alter); } /** |
8001c8581
|
221 |
* merge_queues - merge single semop queues into global queue |
f269f40ad
|
222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 |
* @sma: semaphore array * * This function merges all per-semaphore queues into the global queue. * It is necessary to achieve FIFO ordering for the pending single-sop * operations when a multi-semop operation must sleep. * Only the alter operations must be moved, the const operations can stay. */ static void merge_queues(struct sem_array *sma) { int i; for (i = 0; i < sma->sem_nsems; i++) { struct sem *sem = sma->sem_base + i; list_splice_init(&sem->pending_alter, &sma->pending_alter); } } |
53dad6d3a
|
238 239 240 241 242 243 244 245 |
static void sem_rcu_free(struct rcu_head *head) { struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu); struct sem_array *sma = ipc_rcu_to_struct(p); security_sem_free(sma); ipc_rcu_free(head); } |
3e148c799
|
246 |
/* |
5e9d52759
|
247 248 249 250 |
* Wait until all currently ongoing simple ops have completed. * Caller must own sem_perm.lock. * New simple ops cannot start, because simple ops first check * that sem_perm.lock is free. |
6d07b68ce
|
251 |
* that a) sem_perm.lock is free and b) complex_count is 0. |
5e9d52759
|
252 253 254 255 256 |
*/ static void sem_wait_array(struct sem_array *sma) { int i; struct sem *sem; |
6d07b68ce
|
257 258 259 260 261 262 |
if (sma->complex_count) { /* The thread that increased sma->complex_count waited on * all sem->lock locks. Thus we don't need to wait again. */ return; } |
5e9d52759
|
263 264 265 266 267 268 269 |
for (i = 0; i < sma->sem_nsems; i++) { sem = sma->sem_base + i; spin_unlock_wait(&sem->lock); } } /* |
6062a8dc0
|
270 271 272 273 274 |
* If the request contains only one semaphore operation, and there are * no complex transactions pending, lock only the semaphore involved. * Otherwise, lock the entire semaphore array, since we either have * multiple semaphores in our own semops, or we need to look at * semaphores from other pending complex operations. |
6062a8dc0
|
275 276 277 278 |
*/ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops, int nsops) { |
5e9d52759
|
279 |
struct sem *sem; |
6062a8dc0
|
280 |
|
5e9d52759
|
281 282 283 |
if (nsops != 1) { /* Complex operation - acquire a full lock */ ipc_lock_object(&sma->sem_perm); |
6062a8dc0
|
284 |
|
5e9d52759
|
285 286 |
/* And wait until all simple ops that are processed * right now have dropped their locks. |
6062a8dc0
|
287 |
*/ |
5e9d52759
|
288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 |
sem_wait_array(sma); return -1; } /* * Only one semaphore affected - try to optimize locking. * The rules are: * - optimized locking is possible if no complex operation * is either enqueued or processed right now. * - The test for enqueued complex ops is simple: * sma->complex_count != 0 * - Testing for complex ops that are processed right now is * a bit more difficult. Complex ops acquire the full lock * and first wait that the running simple ops have completed. * (see above) * Thus: If we own a simple lock and the global lock is free * and complex_count is now 0, then it will stay 0 and * thus just locking sem->lock is sufficient. */ sem = sma->sem_base + sops->sem_num; |
6062a8dc0
|
308 |
|
5e9d52759
|
309 |
if (sma->complex_count == 0) { |
6062a8dc0
|
310 |
/* |
5e9d52759
|
311 312 |
* It appears that no complex operation is around. * Acquire the per-semaphore lock. |
6062a8dc0
|
313 |
*/ |
5e9d52759
|
314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 |
spin_lock(&sem->lock); /* Then check that the global lock is free */ if (!spin_is_locked(&sma->sem_perm.lock)) { /* spin_is_locked() is not a memory barrier */ smp_mb(); /* Now repeat the test of complex_count: * It can't change anymore until we drop sem->lock. * Thus: if is now 0, then it will stay 0. */ if (sma->complex_count == 0) { /* fast path successful! */ return sops->sem_num; } |
6062a8dc0
|
329 |
} |
5e9d52759
|
330 331 332 333 334 |
spin_unlock(&sem->lock); } /* slow path: acquire the full lock */ ipc_lock_object(&sma->sem_perm); |
6062a8dc0
|
335 |
|
5e9d52759
|
336 337 338 339 340 341 342 343 |
if (sma->complex_count == 0) { /* False alarm: * There is no complex operation, thus we can switch * back to the fast path. */ spin_lock(&sem->lock); ipc_unlock_object(&sma->sem_perm); return sops->sem_num; |
6062a8dc0
|
344 |
} else { |
5e9d52759
|
345 346 |
/* Not a false alarm, thus complete the sequence for a * full lock. |
6062a8dc0
|
347 |
*/ |
5e9d52759
|
348 349 |
sem_wait_array(sma); return -1; |
6062a8dc0
|
350 |
} |
6062a8dc0
|
351 352 353 354 355 |
} static inline void sem_unlock(struct sem_array *sma, int locknum) { if (locknum == -1) { |
f269f40ad
|
356 |
unmerge_queues(sma); |
cf9d5d78d
|
357 |
ipc_unlock_object(&sma->sem_perm); |
6062a8dc0
|
358 359 360 361 |
} else { struct sem *sem = sma->sem_base + locknum; spin_unlock(&sem->lock); } |
6062a8dc0
|
362 363 364 |
} /* |
d9a605e40
|
365 |
* sem_lock_(check_) routines are called in the paths where the rwsem |
3e148c799
|
366 |
* is not held. |
321310ced
|
367 368 |
* * The caller holds the RCU read lock. |
3e148c799
|
369 |
*/ |
6062a8dc0
|
370 371 |
static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id, struct sembuf *sops, int nsops, int *locknum) |
023a53557
|
372 |
{ |
c460b662d
|
373 374 |
struct kern_ipc_perm *ipcp; struct sem_array *sma; |
03f02c765
|
375 |
|
c460b662d
|
376 |
ipcp = ipc_obtain_object(&sem_ids(ns), id); |
321310ced
|
377 378 |
if (IS_ERR(ipcp)) return ERR_CAST(ipcp); |
b1ed88b47
|
379 |
|
6062a8dc0
|
380 381 |
sma = container_of(ipcp, struct sem_array, sem_perm); *locknum = sem_lock(sma, sops, nsops); |
c460b662d
|
382 383 384 385 |
/* ipc_rmid() may have already freed the ID while sem_lock * was spinning: verify that the structure is still valid */ |
72a8ff2f9
|
386 |
if (ipc_valid_object(ipcp)) |
c460b662d
|
387 |
return container_of(ipcp, struct sem_array, sem_perm); |
6062a8dc0
|
388 |
sem_unlock(sma, *locknum); |
321310ced
|
389 |
return ERR_PTR(-EINVAL); |
023a53557
|
390 |
} |
16df3674e
|
391 392 393 394 395 396 397 398 399 |
static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id) { struct kern_ipc_perm *ipcp = ipc_obtain_object(&sem_ids(ns), id); if (IS_ERR(ipcp)) return ERR_CAST(ipcp); return container_of(ipcp, struct sem_array, sem_perm); } |
16df3674e
|
400 401 402 403 404 405 406 |
static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns, int id) { struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&sem_ids(ns), id); if (IS_ERR(ipcp)) return ERR_CAST(ipcp); |
b1ed88b47
|
407 |
|
03f02c765
|
408 |
return container_of(ipcp, struct sem_array, sem_perm); |
023a53557
|
409 |
} |
6ff379721
|
410 411 |
static inline void sem_lock_and_putref(struct sem_array *sma) { |
6062a8dc0
|
412 |
sem_lock(sma, NULL, -1); |
53dad6d3a
|
413 |
ipc_rcu_putref(sma, ipc_rcu_free); |
6ff379721
|
414 |
} |
7ca7e564e
|
415 416 417 418 |
static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s) { ipc_rmid(&sem_ids(ns), &s->sem_perm); } |
1da177e4c
|
419 420 421 422 423 |
/* * Lockless wakeup algorithm: * Without the check/retry algorithm a lockless wakeup is possible: * - queue.status is initialized to -EINTR before blocking. * - wakeup is performed by |
1a82e9e1d
|
424 |
* * unlinking the queue entry from the pending list |
1da177e4c
|
425 426 427 428 429 430 |
* * setting queue.status to IN_WAKEUP * This is the notification for the blocked thread that a * result value is imminent. * * call wake_up_process * * set queue.status to the final value. * - the previously blocked thread checks queue.status: |
239521f31
|
431 432 433 434 435 |
* * if it's IN_WAKEUP, then it must wait until the value changes * * if it's not -EINTR, then the operation was completed by * update_queue. semtimedop can return queue.status without * performing any operation on the sem array. * * otherwise it must acquire the spinlock and check what's up. |
1da177e4c
|
436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 |
* * The two-stage algorithm is necessary to protect against the following * races: * - if queue.status is set after wake_up_process, then the woken up idle * thread could race forward and try (and fail) to acquire sma->lock * before update_queue had a chance to set queue.status * - if queue.status is written before wake_up_process and if the * blocked process is woken up by a signal between writing * queue.status and the wake_up_process, then the woken up * process could return from semtimedop and die by calling * sys_exit before wake_up_process is called. Then wake_up_process * will oops, because the task structure is already invalid. * (yes, this happened on s390 with sysv msg). * */ #define IN_WAKEUP 1 |
f4566f048
|
452 453 454 455 456 |
/** * newary - Create a new semaphore set * @ns: namespace * @params: ptr to the structure that contains key, semflg and nsems * |
d9a605e40
|
457 |
* Called with sem_ids.rwsem held (as a writer) |
f4566f048
|
458 |
*/ |
7748dbfaa
|
459 |
static int newary(struct ipc_namespace *ns, struct ipc_params *params) |
1da177e4c
|
460 461 462 463 464 |
{ int id; int retval; struct sem_array *sma; int size; |
7748dbfaa
|
465 466 467 |
key_t key = params->key; int nsems = params->u.nsems; int semflg = params->flg; |
b97e820ff
|
468 |
int i; |
1da177e4c
|
469 470 471 |
if (!nsems) return -EINVAL; |
e38935341
|
472 |
if (ns->used_sems + nsems > ns->sc_semmns) |
1da177e4c
|
473 |
return -ENOSPC; |
239521f31
|
474 |
size = sizeof(*sma) + nsems * sizeof(struct sem); |
1da177e4c
|
475 |
sma = ipc_rcu_alloc(size); |
3ab08fe20
|
476 |
if (!sma) |
1da177e4c
|
477 |
return -ENOMEM; |
3ab08fe20
|
478 |
|
239521f31
|
479 |
memset(sma, 0, size); |
1da177e4c
|
480 481 482 483 484 485 486 |
sma->sem_perm.mode = (semflg & S_IRWXUGO); sma->sem_perm.key = key; sma->sem_perm.security = NULL; retval = security_sem_alloc(sma); if (retval) { |
53dad6d3a
|
487 |
ipc_rcu_putref(sma, ipc_rcu_free); |
1da177e4c
|
488 489 |
return retval; } |
e38935341
|
490 |
id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni); |
283bb7fad
|
491 |
if (id < 0) { |
53dad6d3a
|
492 |
ipc_rcu_putref(sma, sem_rcu_free); |
283bb7fad
|
493 |
return id; |
1da177e4c
|
494 |
} |
e38935341
|
495 |
ns->used_sems += nsems; |
1da177e4c
|
496 497 |
sma->sem_base = (struct sem *) &sma[1]; |
b97e820ff
|
498 |
|
6062a8dc0
|
499 |
for (i = 0; i < nsems; i++) { |
1a82e9e1d
|
500 501 |
INIT_LIST_HEAD(&sma->sem_base[i].pending_alter); INIT_LIST_HEAD(&sma->sem_base[i].pending_const); |
6062a8dc0
|
502 503 |
spin_lock_init(&sma->sem_base[i].lock); } |
b97e820ff
|
504 505 |
sma->complex_count = 0; |
1a82e9e1d
|
506 507 |
INIT_LIST_HEAD(&sma->pending_alter); INIT_LIST_HEAD(&sma->pending_const); |
4daa28f6d
|
508 |
INIT_LIST_HEAD(&sma->list_id); |
1da177e4c
|
509 510 |
sma->sem_nsems = nsems; sma->sem_ctime = get_seconds(); |
6062a8dc0
|
511 |
sem_unlock(sma, -1); |
6d49dab8a
|
512 |
rcu_read_unlock(); |
1da177e4c
|
513 |
|
7ca7e564e
|
514 |
return sma->sem_perm.id; |
1da177e4c
|
515 |
} |
7748dbfaa
|
516 |
|
f4566f048
|
517 |
/* |
d9a605e40
|
518 |
* Called with sem_ids.rwsem and ipcp locked. |
f4566f048
|
519 |
*/ |
03f02c765
|
520 |
static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg) |
7748dbfaa
|
521 |
{ |
03f02c765
|
522 523 524 525 |
struct sem_array *sma; sma = container_of(ipcp, struct sem_array, sem_perm); return security_sem_associate(sma, semflg); |
7748dbfaa
|
526 |
} |
f4566f048
|
527 |
/* |
d9a605e40
|
528 |
* Called with sem_ids.rwsem and ipcp locked. |
f4566f048
|
529 |
*/ |
03f02c765
|
530 531 |
static inline int sem_more_checks(struct kern_ipc_perm *ipcp, struct ipc_params *params) |
7748dbfaa
|
532 |
{ |
03f02c765
|
533 534 535 536 |
struct sem_array *sma; sma = container_of(ipcp, struct sem_array, sem_perm); if (params->u.nsems > sma->sem_nsems) |
7748dbfaa
|
537 538 539 540 |
return -EINVAL; return 0; } |
d5460c997
|
541 |
SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg) |
1da177e4c
|
542 |
{ |
e38935341
|
543 |
struct ipc_namespace *ns; |
eb66ec44f
|
544 545 546 547 548 |
static const struct ipc_ops sem_ops = { .getnew = newary, .associate = sem_security, .more_checks = sem_more_checks, }; |
7748dbfaa
|
549 |
struct ipc_params sem_params; |
e38935341
|
550 551 |
ns = current->nsproxy->ipc_ns; |
1da177e4c
|
552 |
|
e38935341
|
553 |
if (nsems < 0 || nsems > ns->sc_semmsl) |
1da177e4c
|
554 |
return -EINVAL; |
7ca7e564e
|
555 |
|
7748dbfaa
|
556 557 558 |
sem_params.key = key; sem_params.flg = semflg; sem_params.u.nsems = nsems; |
1da177e4c
|
559 |
|
7748dbfaa
|
560 |
return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params); |
1da177e4c
|
561 |
} |
78f5009cc
|
562 563 |
/** * perform_atomic_semop - Perform (if possible) a semaphore operation |
758a6ba39
|
564 |
* @sma: semaphore array |
d198cd6d6
|
565 |
* @q: struct sem_queue that describes the operation |
758a6ba39
|
566 567 568 569 |
* * Returns 0 if the operation was possible. * Returns 1 if the operation is impossible, the caller must sleep. * Negative values are error codes. |
1da177e4c
|
570 |
*/ |
d198cd6d6
|
571 |
static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q) |
1da177e4c
|
572 |
{ |
d198cd6d6
|
573 |
int result, sem_op, nsops, pid; |
1da177e4c
|
574 |
struct sembuf *sop; |
239521f31
|
575 |
struct sem *curr; |
d198cd6d6
|
576 577 578 579 580 581 |
struct sembuf *sops; struct sem_undo *un; sops = q->sops; nsops = q->nsops; un = q->undo; |
1da177e4c
|
582 583 584 585 586 |
for (sop = sops; sop < sops + nsops; sop++) { curr = sma->sem_base + sop->sem_num; sem_op = sop->sem_op; result = curr->semval; |
78f5009cc
|
587 |
|
1da177e4c
|
588 589 590 591 592 593 594 595 |
if (!sem_op && result) goto would_block; result += sem_op; if (result < 0) goto would_block; if (result > SEMVMX) goto out_of_range; |
78f5009cc
|
596 |
|
1da177e4c
|
597 598 |
if (sop->sem_flg & SEM_UNDO) { int undo = un->semadj[sop->sem_num] - sem_op; |
78f5009cc
|
599 |
/* Exceeding the undo range is an error. */ |
1da177e4c
|
600 601 |
if (undo < (-SEMAEM - 1) || undo > SEMAEM) goto out_of_range; |
78f5009cc
|
602 |
un->semadj[sop->sem_num] = undo; |
1da177e4c
|
603 |
} |
78f5009cc
|
604 |
|
1da177e4c
|
605 606 607 608 |
curr->semval = result; } sop--; |
d198cd6d6
|
609 |
pid = q->pid; |
1da177e4c
|
610 611 |
while (sop >= sops) { sma->sem_base[sop->sem_num].sempid = pid; |
1da177e4c
|
612 613 |
sop--; } |
78f5009cc
|
614 |
|
1da177e4c
|
615 616 617 618 619 620 621 |
return 0; out_of_range: result = -ERANGE; goto undo; would_block: |
ed247b7ca
|
622 |
q->blocking = sop; |
1da177e4c
|
623 624 625 626 627 628 629 630 |
if (sop->sem_flg & IPC_NOWAIT) result = -EAGAIN; else result = 1; undo: sop--; while (sop >= sops) { |
78f5009cc
|
631 632 633 634 |
sem_op = sop->sem_op; sma->sem_base[sop->sem_num].semval -= sem_op; if (sop->sem_flg & SEM_UNDO) un->semadj[sop->sem_num] += sem_op; |
1da177e4c
|
635 636 637 638 639 |
sop--; } return result; } |
0a2b9d4c7
|
640 641 642 643 644 |
/** wake_up_sem_queue_prepare(q, error): Prepare wake-up * @q: queue entry that must be signaled * @error: Error value for the signal * * Prepare the wake-up of the queue entry q. |
d4212093d
|
645 |
*/ |
0a2b9d4c7
|
646 647 |
static void wake_up_sem_queue_prepare(struct list_head *pt, struct sem_queue *q, int error) |
d4212093d
|
648 |
{ |
0a2b9d4c7
|
649 650 651 652 653 654 655 |
if (list_empty(pt)) { /* * Hold preempt off so that we don't get preempted and have the * wakee busy-wait until we're scheduled back on. */ preempt_disable(); } |
d4212093d
|
656 |
q->status = IN_WAKEUP; |
0a2b9d4c7
|
657 |
q->pid = error; |
9f1bc2c90
|
658 |
list_add_tail(&q->list, pt); |
0a2b9d4c7
|
659 660 661 |
} /** |
8001c8581
|
662 |
* wake_up_sem_queue_do - do the actual wake-up |
0a2b9d4c7
|
663 664 665 666 667 668 669 670 671 672 673 674 675 |
* @pt: list of tasks to be woken up * * Do the actual wake-up. * The function is called without any locks held, thus the semaphore array * could be destroyed already and the tasks can disappear as soon as the * status is set to the actual return code. */ static void wake_up_sem_queue_do(struct list_head *pt) { struct sem_queue *q, *t; int did_something; did_something = !list_empty(pt); |
9f1bc2c90
|
676 |
list_for_each_entry_safe(q, t, pt, list) { |
0a2b9d4c7
|
677 678 679 680 681 682 683 |
wake_up_process(q->sleeper); /* q can disappear immediately after writing q->status. */ smp_wmb(); q->status = q->pid; } if (did_something) preempt_enable(); |
d4212093d
|
684 |
} |
b97e820ff
|
685 686 687 |
static void unlink_queue(struct sem_array *sma, struct sem_queue *q) { list_del(&q->list); |
9f1bc2c90
|
688 |
if (q->nsops > 1) |
b97e820ff
|
689 690 |
sma->complex_count--; } |
fd5db4225
|
691 692 693 694 695 696 697 |
/** check_restart(sma, q) * @sma: semaphore array * @q: the operation that just completed * * update_queue is O(N^2) when it restarts scanning the whole queue of * waiting operations. Therefore this function checks if the restart is * really necessary. It is called after a previously waiting operation |
1a82e9e1d
|
698 699 |
* modified the array. * Note that wait-for-zero operations are handled without restart. |
fd5db4225
|
700 701 702 |
*/ static int check_restart(struct sem_array *sma, struct sem_queue *q) { |
1a82e9e1d
|
703 704 |
/* pending complex alter operations are too difficult to analyse */ if (!list_empty(&sma->pending_alter)) |
fd5db4225
|
705 706 707 708 709 |
return 1; /* we were a sleeping complex operation. Too difficult */ if (q->nsops > 1) return 1; |
1a82e9e1d
|
710 711 712 713 714 715 716 717 718 719 720 721 722 |
/* It is impossible that someone waits for the new value: * - complex operations always restart. * - wait-for-zero are handled seperately. * - q is a previously sleeping simple operation that * altered the array. It must be a decrement, because * simple increments never sleep. * - If there are older (higher priority) decrements * in the queue, then they have observed the original * semval value and couldn't proceed. The operation * decremented to value - thus they won't proceed either. */ return 0; } |
fd5db4225
|
723 |
|
1a82e9e1d
|
724 |
/** |
8001c8581
|
725 |
* wake_const_ops - wake up non-alter tasks |
1a82e9e1d
|
726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 |
* @sma: semaphore array. * @semnum: semaphore that was modified. * @pt: list head for the tasks that must be woken up. * * wake_const_ops must be called after a semaphore in a semaphore array * was set to 0. If complex const operations are pending, wake_const_ops must * be called with semnum = -1, as well as with the number of each modified * semaphore. * The tasks that must be woken up are added to @pt. The return code * is stored in q->pid. * The function returns 1 if at least one operation was completed successfully. */ static int wake_const_ops(struct sem_array *sma, int semnum, struct list_head *pt) { struct sem_queue *q; struct list_head *walk; struct list_head *pending_list; int semop_completed = 0; if (semnum == -1) pending_list = &sma->pending_const; else pending_list = &sma->sem_base[semnum].pending_const; |
fd5db4225
|
750 |
|
1a82e9e1d
|
751 752 753 754 755 756 |
walk = pending_list->next; while (walk != pending_list) { int error; q = container_of(walk, struct sem_queue, list); walk = walk->next; |
d198cd6d6
|
757 |
error = perform_atomic_semop(sma, q); |
1a82e9e1d
|
758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 |
if (error <= 0) { /* operation completed, remove from queue & wakeup */ unlink_queue(sma, q); wake_up_sem_queue_prepare(pt, q, error); if (error == 0) semop_completed = 1; } } return semop_completed; } /** |
8001c8581
|
773 |
* do_smart_wakeup_zero - wakeup all wait for zero tasks |
1a82e9e1d
|
774 775 776 777 778 |
* @sma: semaphore array * @sops: operations that were performed * @nsops: number of operations * @pt: list head of the tasks that must be woken up. * |
8001c8581
|
779 780 |
* Checks all required queue for wait-for-zero operations, based * on the actual changes that were performed on the semaphore array. |
1a82e9e1d
|
781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 |
* The function returns 1 if at least one operation was completed successfully. */ static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops, int nsops, struct list_head *pt) { int i; int semop_completed = 0; int got_zero = 0; /* first: the per-semaphore queues, if known */ if (sops) { for (i = 0; i < nsops; i++) { int num = sops[i].sem_num; if (sma->sem_base[num].semval == 0) { got_zero = 1; semop_completed |= wake_const_ops(sma, num, pt); } } } else { /* * No sops means modified semaphores not known. * Assume all were changed. |
fd5db4225
|
804 |
*/ |
1a82e9e1d
|
805 806 807 808 809 810 |
for (i = 0; i < sma->sem_nsems; i++) { if (sma->sem_base[i].semval == 0) { got_zero = 1; semop_completed |= wake_const_ops(sma, i, pt); } } |
fd5db4225
|
811 812 |
} /* |
1a82e9e1d
|
813 814 |
* If one of the modified semaphores got 0, * then check the global queue, too. |
fd5db4225
|
815 |
*/ |
1a82e9e1d
|
816 817 |
if (got_zero) semop_completed |= wake_const_ops(sma, -1, pt); |
fd5db4225
|
818 |
|
1a82e9e1d
|
819 |
return semop_completed; |
fd5db4225
|
820 |
} |
636c6be82
|
821 822 |
/** |
8001c8581
|
823 |
* update_queue - look for tasks that can be completed. |
636c6be82
|
824 825 |
* @sma: semaphore array. * @semnum: semaphore that was modified. |
0a2b9d4c7
|
826 |
* @pt: list head for the tasks that must be woken up. |
636c6be82
|
827 828 |
* * update_queue must be called after a semaphore in a semaphore array |
9f1bc2c90
|
829 830 831 |
* was modified. If multiple semaphores were modified, update_queue must * be called with semnum = -1, as well as with the number of each modified * semaphore. |
0a2b9d4c7
|
832 833 |
* The tasks that must be woken up are added to @pt. The return code * is stored in q->pid. |
1a82e9e1d
|
834 835 |
* The function internally checks if const operations can now succeed. * |
0a2b9d4c7
|
836 |
* The function return 1 if at least one semop was completed successfully. |
1da177e4c
|
837 |
*/ |
0a2b9d4c7
|
838 |
static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt) |
1da177e4c
|
839 |
{ |
636c6be82
|
840 841 842 |
struct sem_queue *q; struct list_head *walk; struct list_head *pending_list; |
0a2b9d4c7
|
843 |
int semop_completed = 0; |
636c6be82
|
844 |
|
9f1bc2c90
|
845 |
if (semnum == -1) |
1a82e9e1d
|
846 |
pending_list = &sma->pending_alter; |
9f1bc2c90
|
847 |
else |
1a82e9e1d
|
848 |
pending_list = &sma->sem_base[semnum].pending_alter; |
9cad200c7
|
849 850 |
again: |
636c6be82
|
851 852 |
walk = pending_list->next; while (walk != pending_list) { |
fd5db4225
|
853 |
int error, restart; |
636c6be82
|
854 |
|
9f1bc2c90
|
855 |
q = container_of(walk, struct sem_queue, list); |
636c6be82
|
856 |
walk = walk->next; |
1da177e4c
|
857 |
|
d987f8b21
|
858 859 |
/* If we are scanning the single sop, per-semaphore list of * one semaphore and that semaphore is 0, then it is not |
1a82e9e1d
|
860 |
* necessary to scan further: simple increments |
d987f8b21
|
861 862 863 864 |
* that affect only one entry succeed immediately and cannot * be in the per semaphore pending queue, and decrements * cannot be successful if the value is already 0. */ |
1a82e9e1d
|
865 |
if (semnum != -1 && sma->sem_base[semnum].semval == 0) |
d987f8b21
|
866 |
break; |
d198cd6d6
|
867 |
error = perform_atomic_semop(sma, q); |
1da177e4c
|
868 869 |
/* Does q->sleeper still need to sleep? */ |
9cad200c7
|
870 871 |
if (error > 0) continue; |
b97e820ff
|
872 |
unlink_queue(sma, q); |
9cad200c7
|
873 |
|
0a2b9d4c7
|
874 |
if (error) { |
fd5db4225
|
875 |
restart = 0; |
0a2b9d4c7
|
876 877 |
} else { semop_completed = 1; |
1a82e9e1d
|
878 |
do_smart_wakeup_zero(sma, q->sops, q->nsops, pt); |
fd5db4225
|
879 |
restart = check_restart(sma, q); |
0a2b9d4c7
|
880 |
} |
fd5db4225
|
881 |
|
0a2b9d4c7
|
882 |
wake_up_sem_queue_prepare(pt, q, error); |
fd5db4225
|
883 |
if (restart) |
9cad200c7
|
884 |
goto again; |
1da177e4c
|
885 |
} |
0a2b9d4c7
|
886 |
return semop_completed; |
1da177e4c
|
887 |
} |
0a2b9d4c7
|
888 |
/** |
8001c8581
|
889 |
* set_semotime - set sem_otime |
0e8c66569
|
890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 |
* @sma: semaphore array * @sops: operations that modified the array, may be NULL * * sem_otime is replicated to avoid cache line trashing. * This function sets one instance to the current time. */ static void set_semotime(struct sem_array *sma, struct sembuf *sops) { if (sops == NULL) { sma->sem_base[0].sem_otime = get_seconds(); } else { sma->sem_base[sops[0].sem_num].sem_otime = get_seconds(); } } /** |
8001c8581
|
907 |
* do_smart_update - optimized update_queue |
fd5db4225
|
908 909 910 |
* @sma: semaphore array * @sops: operations that were performed * @nsops: number of operations |
0a2b9d4c7
|
911 912 |
* @otime: force setting otime * @pt: list head of the tasks that must be woken up. |
fd5db4225
|
913 |
* |
1a82e9e1d
|
914 915 |
* do_smart_update() does the required calls to update_queue and wakeup_zero, * based on the actual changes that were performed on the semaphore array. |
0a2b9d4c7
|
916 917 918 |
* Note that the function does not do the actual wake-up: the caller is * responsible for calling wake_up_sem_queue_do(@pt). * It is safe to perform this call after dropping all locks. |
fd5db4225
|
919 |
*/ |
0a2b9d4c7
|
920 921 |
static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops, int otime, struct list_head *pt) |
fd5db4225
|
922 923 |
{ int i; |
1a82e9e1d
|
924 |
otime |= do_smart_wakeup_zero(sma, sops, nsops, pt); |
f269f40ad
|
925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 |
if (!list_empty(&sma->pending_alter)) { /* semaphore array uses the global queue - just process it. */ otime |= update_queue(sma, -1, pt); } else { if (!sops) { /* * No sops, thus the modified semaphores are not * known. Check all. */ for (i = 0; i < sma->sem_nsems; i++) otime |= update_queue(sma, i, pt); } else { /* * Check the semaphores that were increased: * - No complex ops, thus all sleeping ops are * decrease. * - if we decreased the value, then any sleeping * semaphore ops wont be able to run: If the * previous value was too small, then the new * value will be too small, too. */ for (i = 0; i < nsops; i++) { if (sops[i].sem_op > 0) { otime |= update_queue(sma, sops[i].sem_num, pt); } |
ab465df9d
|
951 |
} |
9f1bc2c90
|
952 |
} |
fd5db4225
|
953 |
} |
0e8c66569
|
954 955 |
if (otime) set_semotime(sma, sops); |
fd5db4225
|
956 |
} |
2f2ed41dc
|
957 |
/* |
b220c57ae
|
958 |
* check_qop: Test if a queued operation sleeps on the semaphore semnum |
2f2ed41dc
|
959 960 961 962 |
*/ static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q, bool count_zero) { |
b220c57ae
|
963 |
struct sembuf *sop = q->blocking; |
2f2ed41dc
|
964 |
|
9b44ee2ee
|
965 966 967 968 969 970 971 972 973 974 975 976 |
/* * Linux always (since 0.99.10) reported a task as sleeping on all * semaphores. This violates SUS, therefore it was changed to the * standard compliant behavior. * Give the administrators a chance to notice that an application * might misbehave because it relies on the Linux behavior. */ pr_info_once("semctl(GETNCNT/GETZCNT) is since 3.16 Single Unix Specification compliant. " "The task %s (%d) triggered the difference, watch for misbehavior. ", current->comm, task_pid_nr(current)); |
b220c57ae
|
977 978 |
if (sop->sem_num != semnum) return 0; |
2f2ed41dc
|
979 |
|
b220c57ae
|
980 981 982 983 984 985 |
if (count_zero && sop->sem_op == 0) return 1; if (!count_zero && sop->sem_op < 0) return 1; return 0; |
2f2ed41dc
|
986 |
} |
1da177e4c
|
987 988 989 |
/* The following counts are associated to each semaphore: * semncnt number of tasks waiting on semval being nonzero * semzcnt number of tasks waiting on semval being zero |
b220c57ae
|
990 991 992 |
* * Per definition, a task waits only on the semaphore of the first semop * that cannot proceed, even if additional operation would block, too. |
1da177e4c
|
993 |
*/ |
2f2ed41dc
|
994 995 |
static int count_semcnt(struct sem_array *sma, ushort semnum, bool count_zero) |
1da177e4c
|
996 |
{ |
2f2ed41dc
|
997 |
struct list_head *l; |
239521f31
|
998 |
struct sem_queue *q; |
2f2ed41dc
|
999 |
int semcnt; |
1da177e4c
|
1000 |
|
2f2ed41dc
|
1001 1002 1003 1004 1005 1006 |
semcnt = 0; /* First: check the simple operations. They are easy to evaluate */ if (count_zero) l = &sma->sem_base[semnum].pending_const; else l = &sma->sem_base[semnum].pending_alter; |
1da177e4c
|
1007 |
|
2f2ed41dc
|
1008 1009 1010 1011 1012 |
list_for_each_entry(q, l, list) { /* all task on a per-semaphore list sleep on exactly * that semaphore */ semcnt++; |
ebc2e5e6a
|
1013 |
} |
2f2ed41dc
|
1014 |
/* Then: check the complex operations. */ |
1994862dc
|
1015 |
list_for_each_entry(q, &sma->pending_alter, list) { |
2f2ed41dc
|
1016 1017 1018 1019 1020 1021 |
semcnt += check_qop(sma, semnum, q, count_zero); } if (count_zero) { list_for_each_entry(q, &sma->pending_const, list) { semcnt += check_qop(sma, semnum, q, count_zero); } |
1994862dc
|
1022 |
} |
2f2ed41dc
|
1023 |
return semcnt; |
1da177e4c
|
1024 |
} |
d9a605e40
|
1025 1026 |
/* Free a semaphore set. freeary() is called with sem_ids.rwsem locked * as a writer and the spinlock for this semaphore set hold. sem_ids.rwsem |
3e148c799
|
1027 |
* remains locked on exit. |
1da177e4c
|
1028 |
*/ |
01b8b07a5
|
1029 |
static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) |
1da177e4c
|
1030 |
{ |
380af1b33
|
1031 1032 |
struct sem_undo *un, *tu; struct sem_queue *q, *tq; |
01b8b07a5
|
1033 |
struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm); |
0a2b9d4c7
|
1034 |
struct list_head tasks; |
9f1bc2c90
|
1035 |
int i; |
1da177e4c
|
1036 |
|
380af1b33
|
1037 |
/* Free the existing undo structures for this semaphore set. */ |
cf9d5d78d
|
1038 |
ipc_assert_locked_object(&sma->sem_perm); |
380af1b33
|
1039 1040 1041 |
list_for_each_entry_safe(un, tu, &sma->list_id, list_id) { list_del(&un->list_id); spin_lock(&un->ulp->lock); |
1da177e4c
|
1042 |
un->semid = -1; |
380af1b33
|
1043 1044 |
list_del_rcu(&un->list_proc); spin_unlock(&un->ulp->lock); |
693a8b6ee
|
1045 |
kfree_rcu(un, rcu); |
380af1b33
|
1046 |
} |
1da177e4c
|
1047 1048 |
/* Wake up all pending processes and let them fail with EIDRM. */ |
0a2b9d4c7
|
1049 |
INIT_LIST_HEAD(&tasks); |
1a82e9e1d
|
1050 1051 1052 1053 1054 1055 |
list_for_each_entry_safe(q, tq, &sma->pending_const, list) { unlink_queue(sma, q); wake_up_sem_queue_prepare(&tasks, q, -EIDRM); } list_for_each_entry_safe(q, tq, &sma->pending_alter, list) { |
b97e820ff
|
1056 |
unlink_queue(sma, q); |
0a2b9d4c7
|
1057 |
wake_up_sem_queue_prepare(&tasks, q, -EIDRM); |
1da177e4c
|
1058 |
} |
9f1bc2c90
|
1059 1060 |
for (i = 0; i < sma->sem_nsems; i++) { struct sem *sem = sma->sem_base + i; |
1a82e9e1d
|
1061 1062 1063 1064 1065 |
list_for_each_entry_safe(q, tq, &sem->pending_const, list) { unlink_queue(sma, q); wake_up_sem_queue_prepare(&tasks, q, -EIDRM); } list_for_each_entry_safe(q, tq, &sem->pending_alter, list) { |
9f1bc2c90
|
1066 1067 1068 1069 |
unlink_queue(sma, q); wake_up_sem_queue_prepare(&tasks, q, -EIDRM); } } |
1da177e4c
|
1070 |
|
7ca7e564e
|
1071 1072 |
/* Remove the semaphore set from the IDR */ sem_rmid(ns, sma); |
6062a8dc0
|
1073 |
sem_unlock(sma, -1); |
6d49dab8a
|
1074 |
rcu_read_unlock(); |
1da177e4c
|
1075 |
|
0a2b9d4c7
|
1076 |
wake_up_sem_queue_do(&tasks); |
e38935341
|
1077 |
ns->used_sems -= sma->sem_nsems; |
53dad6d3a
|
1078 |
ipc_rcu_putref(sma, sem_rcu_free); |
1da177e4c
|
1079 1080 1081 1082 |
} static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version) { |
239521f31
|
1083 |
switch (version) { |
1da177e4c
|
1084 1085 1086 1087 1088 |
case IPC_64: return copy_to_user(buf, in, sizeof(*in)); case IPC_OLD: { struct semid_ds out; |
982f7c2b2
|
1089 |
memset(&out, 0, sizeof(out)); |
1da177e4c
|
1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 |
ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm); out.sem_otime = in->sem_otime; out.sem_ctime = in->sem_ctime; out.sem_nsems = in->sem_nsems; return copy_to_user(buf, &out, sizeof(out)); } default: return -EINVAL; } } |
d12e1e50e
|
1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 |
static time_t get_semotime(struct sem_array *sma) { int i; time_t res; res = sma->sem_base[0].sem_otime; for (i = 1; i < sma->sem_nsems; i++) { time_t to = sma->sem_base[i].sem_otime; if (to > res) res = to; } return res; } |
4b9fcb0ec
|
1116 |
static int semctl_nolock(struct ipc_namespace *ns, int semid, |
e1fd1f490
|
1117 |
int cmd, int version, void __user *p) |
1da177e4c
|
1118 |
{ |
e5cc9c7b1
|
1119 |
int err; |
1da177e4c
|
1120 |
struct sem_array *sma; |
239521f31
|
1121 |
switch (cmd) { |
1da177e4c
|
1122 1123 1124 1125 1126 1127 1128 1129 1130 |
case IPC_INFO: case SEM_INFO: { struct seminfo seminfo; int max_id; err = security_sem_semctl(NULL, cmd); if (err) return err; |
46c0a8ca3
|
1131 |
|
239521f31
|
1132 |
memset(&seminfo, 0, sizeof(seminfo)); |
e38935341
|
1133 1134 1135 1136 |
seminfo.semmni = ns->sc_semmni; seminfo.semmns = ns->sc_semmns; seminfo.semmsl = ns->sc_semmsl; seminfo.semopm = ns->sc_semopm; |
1da177e4c
|
1137 1138 1139 1140 |
seminfo.semvmx = SEMVMX; seminfo.semmnu = SEMMNU; seminfo.semmap = SEMMAP; seminfo.semume = SEMUME; |
d9a605e40
|
1141 |
down_read(&sem_ids(ns).rwsem); |
1da177e4c
|
1142 |
if (cmd == SEM_INFO) { |
e38935341
|
1143 1144 |
seminfo.semusz = sem_ids(ns).in_use; seminfo.semaem = ns->used_sems; |
1da177e4c
|
1145 1146 1147 1148 |
} else { seminfo.semusz = SEMUSZ; seminfo.semaem = SEMAEM; } |
7ca7e564e
|
1149 |
max_id = ipc_get_maxid(&sem_ids(ns)); |
d9a605e40
|
1150 |
up_read(&sem_ids(ns).rwsem); |
46c0a8ca3
|
1151 |
if (copy_to_user(p, &seminfo, sizeof(struct seminfo))) |
1da177e4c
|
1152 |
return -EFAULT; |
239521f31
|
1153 |
return (max_id < 0) ? 0 : max_id; |
1da177e4c
|
1154 |
} |
4b9fcb0ec
|
1155 |
case IPC_STAT: |
1da177e4c
|
1156 1157 1158 |
case SEM_STAT: { struct semid64_ds tbuf; |
16df3674e
|
1159 1160 1161 |
int id = 0; memset(&tbuf, 0, sizeof(tbuf)); |
1da177e4c
|
1162 |
|
941b0304a
|
1163 |
rcu_read_lock(); |
4b9fcb0ec
|
1164 |
if (cmd == SEM_STAT) { |
16df3674e
|
1165 1166 1167 1168 1169 |
sma = sem_obtain_object(ns, semid); if (IS_ERR(sma)) { err = PTR_ERR(sma); goto out_unlock; } |
4b9fcb0ec
|
1170 1171 |
id = sma->sem_perm.id; } else { |
16df3674e
|
1172 1173 1174 1175 1176 |
sma = sem_obtain_object_check(ns, semid); if (IS_ERR(sma)) { err = PTR_ERR(sma); goto out_unlock; } |
4b9fcb0ec
|
1177 |
} |
1da177e4c
|
1178 1179 |
err = -EACCES; |
b0e77598f
|
1180 |
if (ipcperms(ns, &sma->sem_perm, S_IRUGO)) |
1da177e4c
|
1181 1182 1183 1184 1185 |
goto out_unlock; err = security_sem_semctl(sma, cmd); if (err) goto out_unlock; |
1da177e4c
|
1186 |
kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm); |
d12e1e50e
|
1187 1188 1189 |
tbuf.sem_otime = get_semotime(sma); tbuf.sem_ctime = sma->sem_ctime; tbuf.sem_nsems = sma->sem_nsems; |
16df3674e
|
1190 |
rcu_read_unlock(); |
e1fd1f490
|
1191 |
if (copy_semid_to_user(p, &tbuf, version)) |
1da177e4c
|
1192 1193 1194 1195 1196 1197 |
return -EFAULT; return id; } default: return -EINVAL; } |
1da177e4c
|
1198 |
out_unlock: |
16df3674e
|
1199 |
rcu_read_unlock(); |
1da177e4c
|
1200 1201 |
return err; } |
e1fd1f490
|
1202 1203 1204 1205 1206 |
static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum, unsigned long arg) { struct sem_undo *un; struct sem_array *sma; |
239521f31
|
1207 |
struct sem *curr; |
e1fd1f490
|
1208 |
int err; |
e1fd1f490
|
1209 1210 1211 1212 1213 1214 1215 1216 1217 |
struct list_head tasks; int val; #if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN) /* big-endian 64bit */ val = arg >> 32; #else /* 32bit or little-endian 64bit */ val = arg; #endif |
6062a8dc0
|
1218 1219 |
if (val > SEMVMX || val < 0) return -ERANGE; |
e1fd1f490
|
1220 1221 |
INIT_LIST_HEAD(&tasks); |
e1fd1f490
|
1222 |
|
6062a8dc0
|
1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 |
rcu_read_lock(); sma = sem_obtain_object_check(ns, semid); if (IS_ERR(sma)) { rcu_read_unlock(); return PTR_ERR(sma); } if (semnum < 0 || semnum >= sma->sem_nsems) { rcu_read_unlock(); return -EINVAL; } if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) { rcu_read_unlock(); return -EACCES; } |
e1fd1f490
|
1240 1241 |
err = security_sem_semctl(sma, SETVAL); |
6062a8dc0
|
1242 1243 1244 1245 |
if (err) { rcu_read_unlock(); return -EACCES; } |
e1fd1f490
|
1246 |
|
6062a8dc0
|
1247 |
sem_lock(sma, NULL, -1); |
e1fd1f490
|
1248 |
|
0f3d2b013
|
1249 |
if (!ipc_valid_object(&sma->sem_perm)) { |
6e224f945
|
1250 1251 1252 1253 |
sem_unlock(sma, -1); rcu_read_unlock(); return -EIDRM; } |
e1fd1f490
|
1254 |
curr = &sma->sem_base[semnum]; |
cf9d5d78d
|
1255 |
ipc_assert_locked_object(&sma->sem_perm); |
e1fd1f490
|
1256 1257 1258 1259 1260 1261 1262 1263 |
list_for_each_entry(un, &sma->list_id, list_id) un->semadj[semnum] = 0; curr->semval = val; curr->sempid = task_tgid_vnr(current); sma->sem_ctime = get_seconds(); /* maybe some queued-up processes were waiting for this */ do_smart_update(sma, NULL, 0, 0, &tasks); |
6062a8dc0
|
1264 |
sem_unlock(sma, -1); |
6d49dab8a
|
1265 |
rcu_read_unlock(); |
e1fd1f490
|
1266 |
wake_up_sem_queue_do(&tasks); |
6062a8dc0
|
1267 |
return 0; |
e1fd1f490
|
1268 |
} |
e38935341
|
1269 |
static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, |
e1fd1f490
|
1270 |
int cmd, void __user *p) |
1da177e4c
|
1271 1272 |
{ struct sem_array *sma; |
239521f31
|
1273 |
struct sem *curr; |
16df3674e
|
1274 |
int err, nsems; |
1da177e4c
|
1275 |
ushort fast_sem_io[SEMMSL_FAST]; |
239521f31
|
1276 |
ushort *sem_io = fast_sem_io; |
0a2b9d4c7
|
1277 |
struct list_head tasks; |
1da177e4c
|
1278 |
|
16df3674e
|
1279 1280 1281 1282 1283 1284 |
INIT_LIST_HEAD(&tasks); rcu_read_lock(); sma = sem_obtain_object_check(ns, semid); if (IS_ERR(sma)) { rcu_read_unlock(); |
023a53557
|
1285 |
return PTR_ERR(sma); |
16df3674e
|
1286 |
} |
1da177e4c
|
1287 1288 |
nsems = sma->sem_nsems; |
1da177e4c
|
1289 |
err = -EACCES; |
c728b9c87
|
1290 1291 |
if (ipcperms(ns, &sma->sem_perm, cmd == SETALL ? S_IWUGO : S_IRUGO)) goto out_rcu_wakeup; |
1da177e4c
|
1292 1293 |
err = security_sem_semctl(sma, cmd); |
c728b9c87
|
1294 1295 |
if (err) goto out_rcu_wakeup; |
1da177e4c
|
1296 1297 1298 1299 1300 |
err = -EACCES; switch (cmd) { case GETALL: { |
e1fd1f490
|
1301 |
ushort __user *array = p; |
1da177e4c
|
1302 |
int i; |
ce857229e
|
1303 |
sem_lock(sma, NULL, -1); |
0f3d2b013
|
1304 |
if (!ipc_valid_object(&sma->sem_perm)) { |
6e224f945
|
1305 1306 1307 |
err = -EIDRM; goto out_unlock; } |
239521f31
|
1308 |
if (nsems > SEMMSL_FAST) { |
ce857229e
|
1309 |
if (!ipc_rcu_getref(sma)) { |
ce857229e
|
1310 |
err = -EIDRM; |
6e224f945
|
1311 |
goto out_unlock; |
ce857229e
|
1312 1313 |
} sem_unlock(sma, -1); |
6d49dab8a
|
1314 |
rcu_read_unlock(); |
1da177e4c
|
1315 |
sem_io = ipc_alloc(sizeof(ushort)*nsems); |
239521f31
|
1316 |
if (sem_io == NULL) { |
53dad6d3a
|
1317 |
ipc_rcu_putref(sma, ipc_rcu_free); |
1da177e4c
|
1318 1319 |
return -ENOMEM; } |
4091fd942
|
1320 |
rcu_read_lock(); |
6ff379721
|
1321 |
sem_lock_and_putref(sma); |
0f3d2b013
|
1322 |
if (!ipc_valid_object(&sma->sem_perm)) { |
1da177e4c
|
1323 |
err = -EIDRM; |
6e224f945
|
1324 |
goto out_unlock; |
1da177e4c
|
1325 |
} |
ce857229e
|
1326 |
} |
1da177e4c
|
1327 1328 |
for (i = 0; i < sma->sem_nsems; i++) sem_io[i] = sma->sem_base[i].semval; |
6062a8dc0
|
1329 |
sem_unlock(sma, -1); |
6d49dab8a
|
1330 |
rcu_read_unlock(); |
1da177e4c
|
1331 |
err = 0; |
239521f31
|
1332 |
if (copy_to_user(array, sem_io, nsems*sizeof(ushort))) |
1da177e4c
|
1333 1334 1335 1336 1337 1338 1339 |
err = -EFAULT; goto out_free; } case SETALL: { int i; struct sem_undo *un; |
6062a8dc0
|
1340 |
if (!ipc_rcu_getref(sma)) { |
6e224f945
|
1341 1342 |
err = -EIDRM; goto out_rcu_wakeup; |
6062a8dc0
|
1343 |
} |
16df3674e
|
1344 |
rcu_read_unlock(); |
1da177e4c
|
1345 |
|
239521f31
|
1346 |
if (nsems > SEMMSL_FAST) { |
1da177e4c
|
1347 |
sem_io = ipc_alloc(sizeof(ushort)*nsems); |
239521f31
|
1348 |
if (sem_io == NULL) { |
53dad6d3a
|
1349 |
ipc_rcu_putref(sma, ipc_rcu_free); |
1da177e4c
|
1350 1351 1352 |
return -ENOMEM; } } |
239521f31
|
1353 |
if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) { |
53dad6d3a
|
1354 |
ipc_rcu_putref(sma, ipc_rcu_free); |
1da177e4c
|
1355 1356 1357 1358 1359 1360 |
err = -EFAULT; goto out_free; } for (i = 0; i < nsems; i++) { if (sem_io[i] > SEMVMX) { |
53dad6d3a
|
1361 |
ipc_rcu_putref(sma, ipc_rcu_free); |
1da177e4c
|
1362 1363 1364 1365 |
err = -ERANGE; goto out_free; } } |
4091fd942
|
1366 |
rcu_read_lock(); |
6ff379721
|
1367 |
sem_lock_and_putref(sma); |
0f3d2b013
|
1368 |
if (!ipc_valid_object(&sma->sem_perm)) { |
1da177e4c
|
1369 |
err = -EIDRM; |
6e224f945
|
1370 |
goto out_unlock; |
1da177e4c
|
1371 1372 1373 1374 |
} for (i = 0; i < nsems; i++) sma->sem_base[i].semval = sem_io[i]; |
4daa28f6d
|
1375 |
|
cf9d5d78d
|
1376 |
ipc_assert_locked_object(&sma->sem_perm); |
4daa28f6d
|
1377 |
list_for_each_entry(un, &sma->list_id, list_id) { |
1da177e4c
|
1378 1379 |
for (i = 0; i < nsems; i++) un->semadj[i] = 0; |
4daa28f6d
|
1380 |
} |
1da177e4c
|
1381 1382 |
sma->sem_ctime = get_seconds(); /* maybe some queued-up processes were waiting for this */ |
0a2b9d4c7
|
1383 |
do_smart_update(sma, NULL, 0, 0, &tasks); |
1da177e4c
|
1384 1385 1386 |
err = 0; goto out_unlock; } |
e1fd1f490
|
1387 |
/* GETVAL, GETPID, GETNCTN, GETZCNT: fall-through */ |
1da177e4c
|
1388 1389 |
} err = -EINVAL; |
c728b9c87
|
1390 1391 |
if (semnum < 0 || semnum >= nsems) goto out_rcu_wakeup; |
1da177e4c
|
1392 |
|
6062a8dc0
|
1393 |
sem_lock(sma, NULL, -1); |
0f3d2b013
|
1394 |
if (!ipc_valid_object(&sma->sem_perm)) { |
6e224f945
|
1395 1396 1397 |
err = -EIDRM; goto out_unlock; } |
1da177e4c
|
1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 |
curr = &sma->sem_base[semnum]; switch (cmd) { case GETVAL: err = curr->semval; goto out_unlock; case GETPID: err = curr->sempid; goto out_unlock; case GETNCNT: |
2f2ed41dc
|
1408 |
err = count_semcnt(sma, semnum, 0); |
1da177e4c
|
1409 1410 |
goto out_unlock; case GETZCNT: |
2f2ed41dc
|
1411 |
err = count_semcnt(sma, semnum, 1); |
1da177e4c
|
1412 |
goto out_unlock; |
1da177e4c
|
1413 |
} |
16df3674e
|
1414 |
|
1da177e4c
|
1415 |
out_unlock: |
6062a8dc0
|
1416 |
sem_unlock(sma, -1); |
c728b9c87
|
1417 |
out_rcu_wakeup: |
6d49dab8a
|
1418 |
rcu_read_unlock(); |
0a2b9d4c7
|
1419 |
wake_up_sem_queue_do(&tasks); |
1da177e4c
|
1420 |
out_free: |
239521f31
|
1421 |
if (sem_io != fast_sem_io) |
1da177e4c
|
1422 1423 1424 |
ipc_free(sem_io, sizeof(ushort)*nsems); return err; } |
016d7132f
|
1425 1426 |
static inline unsigned long copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version) |
1da177e4c
|
1427 |
{ |
239521f31
|
1428 |
switch (version) { |
1da177e4c
|
1429 |
case IPC_64: |
016d7132f
|
1430 |
if (copy_from_user(out, buf, sizeof(*out))) |
1da177e4c
|
1431 |
return -EFAULT; |
1da177e4c
|
1432 |
return 0; |
1da177e4c
|
1433 1434 1435 |
case IPC_OLD: { struct semid_ds tbuf_old; |
239521f31
|
1436 |
if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) |
1da177e4c
|
1437 |
return -EFAULT; |
016d7132f
|
1438 1439 1440 |
out->sem_perm.uid = tbuf_old.sem_perm.uid; out->sem_perm.gid = tbuf_old.sem_perm.gid; out->sem_perm.mode = tbuf_old.sem_perm.mode; |
1da177e4c
|
1441 1442 1443 1444 1445 1446 1447 |
return 0; } default: return -EINVAL; } } |
522bb2a2b
|
1448 |
/* |
d9a605e40
|
1449 |
* This function handles some semctl commands which require the rwsem |
522bb2a2b
|
1450 |
* to be held in write mode. |
d9a605e40
|
1451 |
* NOTE: no locks must be held, the rwsem is taken inside this function. |
522bb2a2b
|
1452 |
*/ |
21a4826a7
|
1453 |
static int semctl_down(struct ipc_namespace *ns, int semid, |
e1fd1f490
|
1454 |
int cmd, int version, void __user *p) |
1da177e4c
|
1455 1456 1457 |
{ struct sem_array *sma; int err; |
016d7132f
|
1458 |
struct semid64_ds semid64; |
1da177e4c
|
1459 |
struct kern_ipc_perm *ipcp; |
239521f31
|
1460 |
if (cmd == IPC_SET) { |
e1fd1f490
|
1461 |
if (copy_semid_from_user(&semid64, p, version)) |
1da177e4c
|
1462 |
return -EFAULT; |
1da177e4c
|
1463 |
} |
073115d6b
|
1464 |
|
d9a605e40
|
1465 |
down_write(&sem_ids(ns).rwsem); |
7b4cc5d84
|
1466 |
rcu_read_lock(); |
16df3674e
|
1467 1468 |
ipcp = ipcctl_pre_down_nolock(ns, &sem_ids(ns), semid, cmd, &semid64.sem_perm, 0); |
7b4cc5d84
|
1469 1470 |
if (IS_ERR(ipcp)) { err = PTR_ERR(ipcp); |
7b4cc5d84
|
1471 1472 |
goto out_unlock1; } |
073115d6b
|
1473 |
|
a5f75e7f2
|
1474 |
sma = container_of(ipcp, struct sem_array, sem_perm); |
1da177e4c
|
1475 1476 |
err = security_sem_semctl(sma, cmd); |
7b4cc5d84
|
1477 1478 |
if (err) goto out_unlock1; |
1da177e4c
|
1479 |
|
7b4cc5d84
|
1480 |
switch (cmd) { |
1da177e4c
|
1481 |
case IPC_RMID: |
6062a8dc0
|
1482 |
sem_lock(sma, NULL, -1); |
7b4cc5d84
|
1483 |
/* freeary unlocks the ipc object and rcu */ |
01b8b07a5
|
1484 |
freeary(ns, ipcp); |
522bb2a2b
|
1485 |
goto out_up; |
1da177e4c
|
1486 |
case IPC_SET: |
6062a8dc0
|
1487 |
sem_lock(sma, NULL, -1); |
1efdb69b0
|
1488 1489 |
err = ipc_update_perm(&semid64.sem_perm, ipcp); if (err) |
7b4cc5d84
|
1490 |
goto out_unlock0; |
1da177e4c
|
1491 |
sma->sem_ctime = get_seconds(); |
1da177e4c
|
1492 1493 |
break; default: |
1da177e4c
|
1494 |
err = -EINVAL; |
7b4cc5d84
|
1495 |
goto out_unlock1; |
1da177e4c
|
1496 |
} |
1da177e4c
|
1497 |
|
7b4cc5d84
|
1498 |
out_unlock0: |
6062a8dc0
|
1499 |
sem_unlock(sma, -1); |
7b4cc5d84
|
1500 |
out_unlock1: |
6d49dab8a
|
1501 |
rcu_read_unlock(); |
522bb2a2b
|
1502 |
out_up: |
d9a605e40
|
1503 |
up_write(&sem_ids(ns).rwsem); |
1da177e4c
|
1504 1505 |
return err; } |
e1fd1f490
|
1506 |
SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg) |
1da177e4c
|
1507 |
{ |
1da177e4c
|
1508 |
int version; |
e38935341
|
1509 |
struct ipc_namespace *ns; |
e1fd1f490
|
1510 |
void __user *p = (void __user *)arg; |
1da177e4c
|
1511 1512 1513 1514 1515 |
if (semid < 0) return -EINVAL; version = ipc_parse_version(&cmd); |
e38935341
|
1516 |
ns = current->nsproxy->ipc_ns; |
1da177e4c
|
1517 |
|
239521f31
|
1518 |
switch (cmd) { |
1da177e4c
|
1519 1520 |
case IPC_INFO: case SEM_INFO: |
4b9fcb0ec
|
1521 |
case IPC_STAT: |
1da177e4c
|
1522 |
case SEM_STAT: |
e1fd1f490
|
1523 |
return semctl_nolock(ns, semid, cmd, version, p); |
1da177e4c
|
1524 1525 1526 1527 1528 |
case GETALL: case GETVAL: case GETPID: case GETNCNT: case GETZCNT: |
1da177e4c
|
1529 |
case SETALL: |
e1fd1f490
|
1530 1531 1532 |
return semctl_main(ns, semid, semnum, cmd, p); case SETVAL: return semctl_setval(ns, semid, semnum, arg); |
1da177e4c
|
1533 1534 |
case IPC_RMID: case IPC_SET: |
e1fd1f490
|
1535 |
return semctl_down(ns, semid, cmd, version, p); |
1da177e4c
|
1536 1537 1538 1539 |
default: return -EINVAL; } } |
1da177e4c
|
1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 |
/* If the task doesn't already have a undo_list, then allocate one * here. We guarantee there is only one thread using this undo list, * and current is THE ONE * * If this allocation and assignment succeeds, but later * portions of this code fail, there is no need to free the sem_undo_list. * Just let it stay associated with the task, and it'll be freed later * at exit time. * * This can block, so callers must hold no locks. */ static inline int get_undo_list(struct sem_undo_list **undo_listp) { struct sem_undo_list *undo_list; |
1da177e4c
|
1554 1555 1556 |
undo_list = current->sysvsem.undo_list; if (!undo_list) { |
2453a3062
|
1557 |
undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL); |
1da177e4c
|
1558 1559 |
if (undo_list == NULL) return -ENOMEM; |
00a5dfdb9
|
1560 |
spin_lock_init(&undo_list->lock); |
1da177e4c
|
1561 |
atomic_set(&undo_list->refcnt, 1); |
4daa28f6d
|
1562 |
INIT_LIST_HEAD(&undo_list->list_proc); |
1da177e4c
|
1563 1564 1565 1566 1567 |
current->sysvsem.undo_list = undo_list; } *undo_listp = undo_list; return 0; } |
bf17bb717
|
1568 |
static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid) |
1da177e4c
|
1569 |
{ |
bf17bb717
|
1570 |
struct sem_undo *un; |
4daa28f6d
|
1571 |
|
bf17bb717
|
1572 1573 1574 |
list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) { if (un->semid == semid) return un; |
1da177e4c
|
1575 |
} |
4daa28f6d
|
1576 |
return NULL; |
1da177e4c
|
1577 |
} |
bf17bb717
|
1578 1579 1580 |
static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid) { struct sem_undo *un; |
239521f31
|
1581 |
assert_spin_locked(&ulp->lock); |
bf17bb717
|
1582 1583 1584 1585 1586 1587 1588 1589 |
un = __lookup_undo(ulp, semid); if (un) { list_del_rcu(&un->list_proc); list_add_rcu(&un->list_proc, &ulp->list_proc); } return un; } |
4daa28f6d
|
1590 |
/** |
8001c8581
|
1591 |
* find_alloc_undo - lookup (and if not present create) undo array |
4daa28f6d
|
1592 1593 1594 1595 1596 1597 |
* @ns: namespace * @semid: semaphore array id * * The function looks up (and if not present creates) the undo structure. * The size of the undo structure depends on the size of the semaphore * array, thus the alloc path is not that straightforward. |
380af1b33
|
1598 1599 |
* Lifetime-rules: sem_undo is rcu-protected, on success, the function * performs a rcu_read_lock(). |
4daa28f6d
|
1600 1601 |
*/ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) |
1da177e4c
|
1602 1603 1604 1605 |
{ struct sem_array *sma; struct sem_undo_list *ulp; struct sem_undo *un, *new; |
6062a8dc0
|
1606 |
int nsems, error; |
1da177e4c
|
1607 1608 1609 1610 |
error = get_undo_list(&ulp); if (error) return ERR_PTR(error); |
380af1b33
|
1611 |
rcu_read_lock(); |
c530c6ac7
|
1612 |
spin_lock(&ulp->lock); |
1da177e4c
|
1613 |
un = lookup_undo(ulp, semid); |
c530c6ac7
|
1614 |
spin_unlock(&ulp->lock); |
239521f31
|
1615 |
if (likely(un != NULL)) |
1da177e4c
|
1616 1617 1618 |
goto out; /* no undo structure around - allocate one. */ |
4daa28f6d
|
1619 |
/* step 1: figure out the size of the semaphore array */ |
16df3674e
|
1620 1621 1622 |
sma = sem_obtain_object_check(ns, semid); if (IS_ERR(sma)) { rcu_read_unlock(); |
4de85cd6d
|
1623 |
return ERR_CAST(sma); |
16df3674e
|
1624 |
} |
023a53557
|
1625 |
|
1da177e4c
|
1626 |
nsems = sma->sem_nsems; |
6062a8dc0
|
1627 1628 1629 1630 1631 |
if (!ipc_rcu_getref(sma)) { rcu_read_unlock(); un = ERR_PTR(-EIDRM); goto out; } |
16df3674e
|
1632 |
rcu_read_unlock(); |
1da177e4c
|
1633 |
|
4daa28f6d
|
1634 |
/* step 2: allocate new undo structure */ |
4668edc33
|
1635 |
new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); |
1da177e4c
|
1636 |
if (!new) { |
53dad6d3a
|
1637 |
ipc_rcu_putref(sma, ipc_rcu_free); |
1da177e4c
|
1638 1639 |
return ERR_PTR(-ENOMEM); } |
1da177e4c
|
1640 |
|
380af1b33
|
1641 |
/* step 3: Acquire the lock on semaphore array */ |
4091fd942
|
1642 |
rcu_read_lock(); |
6ff379721
|
1643 |
sem_lock_and_putref(sma); |
0f3d2b013
|
1644 |
if (!ipc_valid_object(&sma->sem_perm)) { |
6062a8dc0
|
1645 |
sem_unlock(sma, -1); |
6d49dab8a
|
1646 |
rcu_read_unlock(); |
1da177e4c
|
1647 1648 1649 1650 |
kfree(new); un = ERR_PTR(-EIDRM); goto out; } |
380af1b33
|
1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 |
spin_lock(&ulp->lock); /* * step 4: check for races: did someone else allocate the undo struct? */ un = lookup_undo(ulp, semid); if (un) { kfree(new); goto success; } |
4daa28f6d
|
1661 1662 |
/* step 5: initialize & link new undo structure */ new->semadj = (short *) &new[1]; |
380af1b33
|
1663 |
new->ulp = ulp; |
4daa28f6d
|
1664 1665 |
new->semid = semid; assert_spin_locked(&ulp->lock); |
380af1b33
|
1666 |
list_add_rcu(&new->list_proc, &ulp->list_proc); |
cf9d5d78d
|
1667 |
ipc_assert_locked_object(&sma->sem_perm); |
4daa28f6d
|
1668 |
list_add(&new->list_id, &sma->list_id); |
380af1b33
|
1669 |
un = new; |
4daa28f6d
|
1670 |
|
380af1b33
|
1671 |
success: |
c530c6ac7
|
1672 |
spin_unlock(&ulp->lock); |
6062a8dc0
|
1673 |
sem_unlock(sma, -1); |
1da177e4c
|
1674 1675 1676 |
out: return un; } |
c61284e99
|
1677 1678 |
/** |
8001c8581
|
1679 |
* get_queue_result - retrieve the result code from sem_queue |
c61284e99
|
1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 |
* @q: Pointer to queue structure * * Retrieve the return code from the pending queue. If IN_WAKEUP is found in * q->status, then we must loop until the value is replaced with the final * value: This may happen if a task is woken up by an unrelated event (e.g. * signal) and in parallel the task is woken up by another task because it got * the requested semaphores. * * The function can be called with or without holding the semaphore spinlock. */ static int get_queue_result(struct sem_queue *q) { int error; error = q->status; while (unlikely(error == IN_WAKEUP)) { cpu_relax(); error = q->status; } return error; } |
d5460c997
|
1702 1703 |
SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, unsigned, nsops, const struct timespec __user *, timeout) |
1da177e4c
|
1704 1705 1706 1707 |
{ int error = -EINVAL; struct sem_array *sma; struct sembuf fast_sops[SEMOPM_FAST]; |
239521f31
|
1708 |
struct sembuf *sops = fast_sops, *sop; |
1da177e4c
|
1709 |
struct sem_undo *un; |
6062a8dc0
|
1710 |
int undos = 0, alter = 0, max, locknum; |
1da177e4c
|
1711 1712 |
struct sem_queue queue; unsigned long jiffies_left = 0; |
e38935341
|
1713 |
struct ipc_namespace *ns; |
0a2b9d4c7
|
1714 |
struct list_head tasks; |
e38935341
|
1715 1716 |
ns = current->nsproxy->ipc_ns; |
1da177e4c
|
1717 1718 1719 |
if (nsops < 1 || semid < 0) return -EINVAL; |
e38935341
|
1720 |
if (nsops > ns->sc_semopm) |
1da177e4c
|
1721 |
return -E2BIG; |
239521f31
|
1722 1723 1724 |
if (nsops > SEMOPM_FAST) { sops = kmalloc(sizeof(*sops)*nsops, GFP_KERNEL); if (sops == NULL) |
1da177e4c
|
1725 1726 |
return -ENOMEM; } |
239521f31
|
1727 1728 |
if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) { error = -EFAULT; |
1da177e4c
|
1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 |
goto out_free; } if (timeout) { struct timespec _timeout; if (copy_from_user(&_timeout, timeout, sizeof(*timeout))) { error = -EFAULT; goto out_free; } if (_timeout.tv_sec < 0 || _timeout.tv_nsec < 0 || _timeout.tv_nsec >= 1000000000L) { error = -EINVAL; goto out_free; } jiffies_left = timespec_to_jiffies(&_timeout); } max = 0; for (sop = sops; sop < sops + nsops; sop++) { if (sop->sem_num >= max) max = sop->sem_num; if (sop->sem_flg & SEM_UNDO) |
b78755abc
|
1749 1750 |
undos = 1; if (sop->sem_op != 0) |
1da177e4c
|
1751 1752 |
alter = 1; } |
1da177e4c
|
1753 |
|
6062a8dc0
|
1754 |
INIT_LIST_HEAD(&tasks); |
1da177e4c
|
1755 |
if (undos) { |
6062a8dc0
|
1756 |
/* On success, find_alloc_undo takes the rcu_read_lock */ |
4daa28f6d
|
1757 |
un = find_alloc_undo(ns, semid); |
1da177e4c
|
1758 1759 1760 1761 |
if (IS_ERR(un)) { error = PTR_ERR(un); goto out_free; } |
6062a8dc0
|
1762 |
} else { |
1da177e4c
|
1763 |
un = NULL; |
6062a8dc0
|
1764 1765 |
rcu_read_lock(); } |
1da177e4c
|
1766 |
|
16df3674e
|
1767 |
sma = sem_obtain_object_check(ns, semid); |
023a53557
|
1768 |
if (IS_ERR(sma)) { |
6062a8dc0
|
1769 |
rcu_read_unlock(); |
023a53557
|
1770 |
error = PTR_ERR(sma); |
1da177e4c
|
1771 |
goto out_free; |
023a53557
|
1772 |
} |
16df3674e
|
1773 |
error = -EFBIG; |
c728b9c87
|
1774 1775 |
if (max >= sma->sem_nsems) goto out_rcu_wakeup; |
16df3674e
|
1776 1777 |
error = -EACCES; |
c728b9c87
|
1778 1779 |
if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) goto out_rcu_wakeup; |
16df3674e
|
1780 1781 |
error = security_sem_semop(sma, sops, nsops, alter); |
c728b9c87
|
1782 1783 |
if (error) goto out_rcu_wakeup; |
16df3674e
|
1784 |
|
6e224f945
|
1785 1786 |
error = -EIDRM; locknum = sem_lock(sma, sops, nsops); |
0f3d2b013
|
1787 1788 1789 1790 1791 1792 1793 1794 1795 |
/* * We eventually might perform the following check in a lockless * fashion, considering ipc_valid_object() locking constraints. * If nsops == 1 and there is no contention for sem_perm.lock, then * only a per-semaphore lock is held and it's OK to proceed with the * check below. More details on the fine grained locking scheme * entangled here and why it's RMID race safe on comments at sem_lock() */ if (!ipc_valid_object(&sma->sem_perm)) |
6e224f945
|
1796 |
goto out_unlock_free; |
1da177e4c
|
1797 |
/* |
4daa28f6d
|
1798 |
* semid identifiers are not unique - find_alloc_undo may have |
1da177e4c
|
1799 |
* allocated an undo structure, it was invalidated by an RMID |
4daa28f6d
|
1800 |
* and now a new array with received the same id. Check and fail. |
25985edce
|
1801 |
* This case can be detected checking un->semid. The existence of |
380af1b33
|
1802 |
* "un" itself is guaranteed by rcu. |
1da177e4c
|
1803 |
*/ |
6062a8dc0
|
1804 1805 |
if (un && un->semid == -1) goto out_unlock_free; |
4daa28f6d
|
1806 |
|
d198cd6d6
|
1807 1808 1809 1810 1811 1812 1813 |
queue.sops = sops; queue.nsops = nsops; queue.undo = un; queue.pid = task_tgid_vnr(current); queue.alter = alter; error = perform_atomic_semop(sma, &queue); |
0e8c66569
|
1814 1815 1816 1817 1818 |
if (error == 0) { /* If the operation was successful, then do * the required updates. */ if (alter) |
0a2b9d4c7
|
1819 |
do_smart_update(sma, sops, nsops, 1, &tasks); |
0e8c66569
|
1820 1821 |
else set_semotime(sma, sops); |
1da177e4c
|
1822 |
} |
0e8c66569
|
1823 1824 |
if (error <= 0) goto out_unlock_free; |
1da177e4c
|
1825 1826 1827 1828 |
/* We need to sleep on this operation, so we put the current * task into the pending queue and go to sleep. */ |
46c0a8ca3
|
1829 |
|
b97e820ff
|
1830 1831 1832 |
if (nsops == 1) { struct sem *curr; curr = &sma->sem_base[sops->sem_num]; |
f269f40ad
|
1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 |
if (alter) { if (sma->complex_count) { list_add_tail(&queue.list, &sma->pending_alter); } else { list_add_tail(&queue.list, &curr->pending_alter); } } else { |
1a82e9e1d
|
1843 |
list_add_tail(&queue.list, &curr->pending_const); |
f269f40ad
|
1844 |
} |
b97e820ff
|
1845 |
} else { |
f269f40ad
|
1846 1847 |
if (!sma->complex_count) merge_queues(sma); |
9f1bc2c90
|
1848 |
if (alter) |
1a82e9e1d
|
1849 |
list_add_tail(&queue.list, &sma->pending_alter); |
9f1bc2c90
|
1850 |
else |
1a82e9e1d
|
1851 |
list_add_tail(&queue.list, &sma->pending_const); |
b97e820ff
|
1852 1853 |
sma->complex_count++; } |
1da177e4c
|
1854 1855 |
queue.status = -EINTR; queue.sleeper = current; |
0b0577f60
|
1856 1857 |
sleep_again: |
1da177e4c
|
1858 |
current->state = TASK_INTERRUPTIBLE; |
6062a8dc0
|
1859 |
sem_unlock(sma, locknum); |
6d49dab8a
|
1860 |
rcu_read_unlock(); |
1da177e4c
|
1861 1862 1863 1864 1865 |
if (timeout) jiffies_left = schedule_timeout(jiffies_left); else schedule(); |
c61284e99
|
1866 |
error = get_queue_result(&queue); |
1da177e4c
|
1867 1868 1869 |
if (error != -EINTR) { /* fast path: update_queue already obtained all requested |
c61284e99
|
1870 1871 1872 1873 1874 1875 1876 |
* resources. * Perform a smp_mb(): User space could assume that semop() * is a memory barrier: Without the mb(), the cpu could * speculatively read in user space stale data that was * overwritten by the previous owner of the semaphore. */ smp_mb(); |
1da177e4c
|
1877 1878 |
goto out_free; } |
321310ced
|
1879 |
rcu_read_lock(); |
6062a8dc0
|
1880 |
sma = sem_obtain_lock(ns, semid, sops, nsops, &locknum); |
d694ad62b
|
1881 1882 1883 1884 1885 1886 1887 1888 1889 |
/* * Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing. */ error = get_queue_result(&queue); /* * Array removed? If yes, leave without sem_unlock(). */ |
023a53557
|
1890 |
if (IS_ERR(sma)) { |
321310ced
|
1891 |
rcu_read_unlock(); |
1da177e4c
|
1892 1893 |
goto out_free; } |
c61284e99
|
1894 |
|
1da177e4c
|
1895 |
/* |
d694ad62b
|
1896 1897 |
* If queue.status != -EINTR we are woken up by another process. * Leave without unlink_queue(), but with sem_unlock(). |
1da177e4c
|
1898 |
*/ |
3ab08fe20
|
1899 |
if (error != -EINTR) |
1da177e4c
|
1900 |
goto out_unlock_free; |
1da177e4c
|
1901 1902 1903 1904 1905 1906 |
/* * If an interrupt occurred we have to clean up the queue */ if (timeout && jiffies_left == 0) error = -EAGAIN; |
0b0577f60
|
1907 1908 1909 1910 1911 1912 |
/* * If the wakeup was spurious, just retry */ if (error == -EINTR && !signal_pending(current)) goto sleep_again; |
b97e820ff
|
1913 |
unlink_queue(sma, &queue); |
1da177e4c
|
1914 1915 |
out_unlock_free: |
6062a8dc0
|
1916 |
sem_unlock(sma, locknum); |
c728b9c87
|
1917 |
out_rcu_wakeup: |
6d49dab8a
|
1918 |
rcu_read_unlock(); |
0a2b9d4c7
|
1919 |
wake_up_sem_queue_do(&tasks); |
1da177e4c
|
1920 |
out_free: |
239521f31
|
1921 |
if (sops != fast_sops) |
1da177e4c
|
1922 1923 1924 |
kfree(sops); return error; } |
d5460c997
|
1925 1926 |
SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops, unsigned, nsops) |
1da177e4c
|
1927 1928 1929 1930 1931 1932 |
{ return sys_semtimedop(semid, tsops, nsops, NULL); } /* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between * parent and child tasks. |
1da177e4c
|
1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 |
*/ int copy_semundo(unsigned long clone_flags, struct task_struct *tsk) { struct sem_undo_list *undo_list; int error; if (clone_flags & CLONE_SYSVSEM) { error = get_undo_list(&undo_list); if (error) return error; |
1da177e4c
|
1944 1945 |
atomic_inc(&undo_list->refcnt); tsk->sysvsem.undo_list = undo_list; |
46c0a8ca3
|
1946 |
} else |
1da177e4c
|
1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 |
tsk->sysvsem.undo_list = NULL; return 0; } /* * add semadj values to semaphores, free undo structures. * undo structures are not freed when semaphore arrays are destroyed * so some of them may be out of date. * IMPLEMENTATION NOTE: There is some confusion over whether the * set of adjustments that needs to be done should be done in an atomic * manner or not. That is, if we are attempting to decrement the semval * should we queue up and wait until we can do so legally? * The original implementation attempted to do this (queue and wait). * The current implementation does not do so. The POSIX standard * and SVID should be consulted to determine what behavior is mandated. */ void exit_sem(struct task_struct *tsk) { |
4daa28f6d
|
1966 |
struct sem_undo_list *ulp; |
1da177e4c
|
1967 |
|
4daa28f6d
|
1968 1969 |
ulp = tsk->sysvsem.undo_list; if (!ulp) |
1da177e4c
|
1970 |
return; |
9edff4ab1
|
1971 |
tsk->sysvsem.undo_list = NULL; |
1da177e4c
|
1972 |
|
4daa28f6d
|
1973 |
if (!atomic_dec_and_test(&ulp->refcnt)) |
1da177e4c
|
1974 |
return; |
380af1b33
|
1975 |
for (;;) { |
1da177e4c
|
1976 |
struct sem_array *sma; |
380af1b33
|
1977 |
struct sem_undo *un; |
0a2b9d4c7
|
1978 |
struct list_head tasks; |
6062a8dc0
|
1979 |
int semid, i; |
4daa28f6d
|
1980 |
|
380af1b33
|
1981 |
rcu_read_lock(); |
05725f7eb
|
1982 1983 |
un = list_entry_rcu(ulp->list_proc.next, struct sem_undo, list_proc); |
380af1b33
|
1984 1985 1986 1987 |
if (&un->list_proc == &ulp->list_proc) semid = -1; else semid = un->semid; |
4daa28f6d
|
1988 |
|
6062a8dc0
|
1989 1990 |
if (semid == -1) { rcu_read_unlock(); |
380af1b33
|
1991 |
break; |
6062a8dc0
|
1992 |
} |
1da177e4c
|
1993 |
|
6062a8dc0
|
1994 |
sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid); |
380af1b33
|
1995 |
/* exit_sem raced with IPC_RMID, nothing to do */ |
6062a8dc0
|
1996 1997 |
if (IS_ERR(sma)) { rcu_read_unlock(); |
380af1b33
|
1998 |
continue; |
6062a8dc0
|
1999 |
} |
1da177e4c
|
2000 |
|
6062a8dc0
|
2001 |
sem_lock(sma, NULL, -1); |
6e224f945
|
2002 |
/* exit_sem raced with IPC_RMID, nothing to do */ |
0f3d2b013
|
2003 |
if (!ipc_valid_object(&sma->sem_perm)) { |
6e224f945
|
2004 2005 2006 2007 |
sem_unlock(sma, -1); rcu_read_unlock(); continue; } |
bf17bb717
|
2008 |
un = __lookup_undo(ulp, semid); |
380af1b33
|
2009 2010 2011 2012 |
if (un == NULL) { /* exit_sem raced with IPC_RMID+semget() that created * exactly the same semid. Nothing to do. */ |
6062a8dc0
|
2013 |
sem_unlock(sma, -1); |
6d49dab8a
|
2014 |
rcu_read_unlock(); |
380af1b33
|
2015 2016 2017 2018 |
continue; } /* remove un from the linked lists */ |
cf9d5d78d
|
2019 |
ipc_assert_locked_object(&sma->sem_perm); |
4daa28f6d
|
2020 |
list_del(&un->list_id); |
380af1b33
|
2021 2022 2023 |
spin_lock(&ulp->lock); list_del_rcu(&un->list_proc); spin_unlock(&ulp->lock); |
4daa28f6d
|
2024 2025 |
/* perform adjustments registered in un */ for (i = 0; i < sma->sem_nsems; i++) { |
239521f31
|
2026 |
struct sem *semaphore = &sma->sem_base[i]; |
4daa28f6d
|
2027 2028 |
if (un->semadj[i]) { semaphore->semval += un->semadj[i]; |
1da177e4c
|
2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 |
/* * Range checks of the new semaphore value, * not defined by sus: * - Some unices ignore the undo entirely * (e.g. HP UX 11i 11.22, Tru64 V5.1) * - some cap the value (e.g. FreeBSD caps * at 0, but doesn't enforce SEMVMX) * * Linux caps the semaphore value, both at 0 * and at SEMVMX. * |
239521f31
|
2040 |
* Manfred <manfred@colorfullife.com> |
1da177e4c
|
2041 |
*/ |
5f921ae96
|
2042 2043 2044 2045 |
if (semaphore->semval < 0) semaphore->semval = 0; if (semaphore->semval > SEMVMX) semaphore->semval = SEMVMX; |
b488893a3
|
2046 |
semaphore->sempid = task_tgid_vnr(current); |
1da177e4c
|
2047 2048 |
} } |
1da177e4c
|
2049 |
/* maybe some queued-up processes were waiting for this */ |
0a2b9d4c7
|
2050 2051 |
INIT_LIST_HEAD(&tasks); do_smart_update(sma, NULL, 0, 1, &tasks); |
6062a8dc0
|
2052 |
sem_unlock(sma, -1); |
6d49dab8a
|
2053 |
rcu_read_unlock(); |
0a2b9d4c7
|
2054 |
wake_up_sem_queue_do(&tasks); |
380af1b33
|
2055 |
|
693a8b6ee
|
2056 |
kfree_rcu(un, rcu); |
1da177e4c
|
2057 |
} |
4daa28f6d
|
2058 |
kfree(ulp); |
1da177e4c
|
2059 2060 2061 |
} #ifdef CONFIG_PROC_FS |
19b4946ca
|
2062 |
static int sysvipc_sem_proc_show(struct seq_file *s, void *it) |
1da177e4c
|
2063 |
{ |
1efdb69b0
|
2064 |
struct user_namespace *user_ns = seq_user_ns(s); |
19b4946ca
|
2065 |
struct sem_array *sma = it; |
d12e1e50e
|
2066 |
time_t sem_otime; |
d8c633766
|
2067 2068 2069 2070 2071 2072 2073 |
/* * The proc interface isn't aware of sem_lock(), it calls * ipc_lock_object() directly (in sysvipc_find_ipc). * In order to stay compatible with sem_lock(), we must wait until * all simple semop() calls have left their critical regions. */ sem_wait_array(sma); |
d12e1e50e
|
2074 |
sem_otime = get_semotime(sma); |
19b4946ca
|
2075 2076 |
return seq_printf(s, |
b97e820ff
|
2077 2078 |
"%10d %10d %4o %10u %5u %5u %5u %5u %10lu %10lu ", |
19b4946ca
|
2079 |
sma->sem_perm.key, |
7ca7e564e
|
2080 |
sma->sem_perm.id, |
19b4946ca
|
2081 2082 |
sma->sem_perm.mode, sma->sem_nsems, |
1efdb69b0
|
2083 2084 2085 2086 |
from_kuid_munged(user_ns, sma->sem_perm.uid), from_kgid_munged(user_ns, sma->sem_perm.gid), from_kuid_munged(user_ns, sma->sem_perm.cuid), from_kgid_munged(user_ns, sma->sem_perm.cgid), |
d12e1e50e
|
2087 |
sem_otime, |
19b4946ca
|
2088 |
sma->sem_ctime); |
1da177e4c
|
2089 2090 |
} #endif |