Blame view
ipc/sem.c
55.2 KB
1da177e4c
|
1 2 3 4 5 |
/* * linux/ipc/sem.c * Copyright (C) 1992 Krishna Balasubramanian * Copyright (C) 1995 Eric Schenk, Bruno Haible * |
1da177e4c
|
6 7 8 |
* /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com> * * SMP-threaded, sysctl's added |
624dffcbc
|
9 |
* (c) 1999 Manfred Spraul <manfred@colorfullife.com> |
1da177e4c
|
10 |
* Enforced range limit on SEM_UNDO |
046c68842
|
11 |
* (c) 2001 Red Hat Inc |
1da177e4c
|
12 13 |
* Lockless wakeup * (c) 2003 Manfred Spraul <manfred@colorfullife.com> |
c5cf6359a
|
14 15 |
* Further wakeup optimizations, documentation * (c) 2010 Manfred Spraul <manfred@colorfullife.com> |
073115d6b
|
16 17 18 |
* * support for audit of ipc object properties and permission changes * Dustin Kirkland <dustin.kirkland@us.ibm.com> |
e38935341
|
19 20 21 22 |
* * namespaces support * OpenVZ, SWsoft Inc. * Pavel Emelianov <xemul@openvz.org> |
c5cf6359a
|
23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 |
* * Implementation notes: (May 2010) * This file implements System V semaphores. * * User space visible behavior: * - FIFO ordering for semop() operations (just FIFO, not starvation * protection) * - multiple semaphore operations that alter the same semaphore in * one semop() are handled. * - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and * SETALL calls. * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO. * - undo adjustments at process exit are limited to 0..SEMVMX. * - namespace are supported. * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing * to /proc/sys/kernel/sem. * - statistics about the usage are reported in /proc/sysvipc/sem. * * Internals: * - scalability: * - all global variables are read-mostly. * - semop() calls and semctl(RMID) are synchronized by RCU. * - most operations do write operations (actually: spin_lock calls) to * the per-semaphore array structure. * Thus: Perfect SMP scaling between independent semaphore arrays. * If multiple semaphores in one array are used, then cache line * trashing on the semaphore array spinlock will limit the scaling. |
2f2ed41dc
|
50 |
* - semncnt and semzcnt are calculated on demand in count_semcnt() |
c5cf6359a
|
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 |
* - the task that performs a successful semop() scans the list of all * sleeping tasks and completes any pending operations that can be fulfilled. * Semaphores are actively given to waiting tasks (necessary for FIFO). * (see update_queue()) * - To improve the scalability, the actual wake-up calls are performed after * dropping all locks. (see wake_up_sem_queue_prepare(), * wake_up_sem_queue_do()) * - All work is done by the waker, the woken up task does not have to do * anything - not even acquiring a lock or dropping a refcount. * - A woken up task may not even touch the semaphore array anymore, it may * have been destroyed already by a semctl(RMID). * - The synchronizations between wake-ups due to a timeout/signal and a * wake-up due to a completed semaphore operation is achieved by using an * intermediate state (IN_WAKEUP). * - UNDO values are stored in an array (one per process and per * semaphore array, lazily allocated). For backwards compatibility, multiple * modes for the UNDO variables are supported (per process, per thread) * (see copy_semundo, CLONE_SYSVSEM) * - There are two lists of the pending operations: a per-array list * and per-semaphore list (stored in the array). This allows to achieve FIFO * ordering without always scanning all pending operations. * The worst-case behavior is nevertheless O(N^2) for N wakeups. |
1da177e4c
|
73 |
*/ |
1da177e4c
|
74 75 76 77 78 |
#include <linux/slab.h> #include <linux/spinlock.h> #include <linux/init.h> #include <linux/proc_fs.h> #include <linux/time.h> |
1da177e4c
|
79 80 81 |
#include <linux/security.h> #include <linux/syscalls.h> #include <linux/audit.h> |
c59ede7b7
|
82 |
#include <linux/capability.h> |
19b4946ca
|
83 |
#include <linux/seq_file.h> |
3e148c799
|
84 |
#include <linux/rwsem.h> |
e38935341
|
85 |
#include <linux/nsproxy.h> |
ae5e1b22f
|
86 |
#include <linux/ipc_namespace.h> |
5f921ae96
|
87 |
|
7153e4027
|
88 |
#include <linux/uaccess.h> |
1da177e4c
|
89 |
#include "util.h" |
e57940d71
|
90 91 92 93 |
/* One semaphore structure for each semaphore in the system. */ struct sem { int semval; /* current value */ int sempid; /* pid of last operation */ |
6062a8dc0
|
94 |
spinlock_t lock; /* spinlock for fine-grained semtimedop */ |
1a82e9e1d
|
95 96 97 98 |
struct list_head pending_alter; /* pending single-sop operations */ /* that alter the semaphore */ struct list_head pending_const; /* pending single-sop operations */ /* that do not alter the semaphore*/ |
d12e1e50e
|
99 |
time_t sem_otime; /* candidate for sem_otime */ |
f5c936c0f
|
100 |
} ____cacheline_aligned_in_smp; |
e57940d71
|
101 102 103 |
/* One queue for each sleeping process in the system. */ struct sem_queue { |
e57940d71
|
104 105 106 107 108 109 |
struct list_head list; /* queue of pending operations */ struct task_struct *sleeper; /* this process */ struct sem_undo *undo; /* undo structure */ int pid; /* process id of requesting process */ int status; /* completion status of operation */ struct sembuf *sops; /* array of pending operations */ |
ed247b7ca
|
110 |
struct sembuf *blocking; /* the operation that blocked */ |
e57940d71
|
111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 |
int nsops; /* number of operations */ int alter; /* does *sops alter the array? */ }; /* Each task has a list of undo requests. They are executed automatically * when the process exits. */ struct sem_undo { struct list_head list_proc; /* per-process list: * * all undos from one process * rcu protected */ struct rcu_head rcu; /* rcu struct for sem_undo */ struct sem_undo_list *ulp; /* back ptr to sem_undo_list */ struct list_head list_id; /* per semaphore array list: * all undos for one array */ int semid; /* semaphore set identifier */ short *semadj; /* array of adjustments */ /* one per semaphore */ }; /* sem_undo_list controls shared access to the list of sem_undo structures * that may be shared among all a CLONE_SYSVSEM task group. */ struct sem_undo_list { atomic_t refcnt; spinlock_t lock; struct list_head list_proc; }; |
ed2ddbf88
|
139 |
#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS]) |
e38935341
|
140 |
|
1b531f213
|
141 |
#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid) |
1da177e4c
|
142 |
|
7748dbfaa
|
143 |
static int newary(struct ipc_namespace *, struct ipc_params *); |
01b8b07a5
|
144 |
static void freeary(struct ipc_namespace *, struct kern_ipc_perm *); |
1da177e4c
|
145 |
#ifdef CONFIG_PROC_FS |
19b4946ca
|
146 |
static int sysvipc_sem_proc_show(struct seq_file *s, void *it); |
1da177e4c
|
147 148 149 150 151 152 |
#endif #define SEMMSL_FAST 256 /* 512 bytes on stack */ #define SEMOPM_FAST 64 /* ~ 372 bytes on stack */ /* |
758a6ba39
|
153 |
* Locking: |
1da177e4c
|
154 |
* sem_undo.id_next, |
758a6ba39
|
155 |
* sem_array.complex_count, |
1a82e9e1d
|
156 |
* sem_array.pending{_alter,_cont}, |
758a6ba39
|
157 |
* sem_array.sem_undo: global sem_lock() for read/write |
1da177e4c
|
158 |
* sem_undo.proc_next: only "current" is allowed to read/write that field. |
46c0a8ca3
|
159 |
* |
758a6ba39
|
160 161 |
* sem_array.sem_base[i].pending_{const,alter}: * global or semaphore sem_lock() for read/write |
1da177e4c
|
162 |
*/ |
e38935341
|
163 164 165 166 |
#define sc_semmsl sem_ctls[0] #define sc_semmns sem_ctls[1] #define sc_semopm sem_ctls[2] #define sc_semmni sem_ctls[3] |
ed2ddbf88
|
167 |
void sem_init_ns(struct ipc_namespace *ns) |
e38935341
|
168 |
{ |
e38935341
|
169 170 171 172 173 |
ns->sc_semmsl = SEMMSL; ns->sc_semmns = SEMMNS; ns->sc_semopm = SEMOPM; ns->sc_semmni = SEMMNI; ns->used_sems = 0; |
ed2ddbf88
|
174 |
ipc_init_ids(&ns->ids[IPC_SEM_IDS]); |
e38935341
|
175 |
} |
ae5e1b22f
|
176 |
#ifdef CONFIG_IPC_NS |
e38935341
|
177 178 |
void sem_exit_ns(struct ipc_namespace *ns) { |
01b8b07a5
|
179 |
free_ipcs(ns, &sem_ids(ns), freeary); |
7d6feeb28
|
180 |
idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr); |
e38935341
|
181 |
} |
ae5e1b22f
|
182 |
#endif |
1da177e4c
|
183 |
|
239521f31
|
184 |
void __init sem_init(void) |
1da177e4c
|
185 |
{ |
ed2ddbf88
|
186 |
sem_init_ns(&init_ipc_ns); |
19b4946ca
|
187 188 189 |
ipc_init_proc_interface("sysvipc/sem", " key semid perms nsems uid gid cuid cgid otime ctime ", |
e38935341
|
190 |
IPC_SEM_IDS, sysvipc_sem_proc_show); |
1da177e4c
|
191 |
} |
f269f40ad
|
192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 |
/** * unmerge_queues - unmerge queues, if possible. * @sma: semaphore array * * The function unmerges the wait queues if complex_count is 0. * It must be called prior to dropping the global semaphore array lock. */ static void unmerge_queues(struct sem_array *sma) { struct sem_queue *q, *tq; /* complex operations still around? */ if (sma->complex_count) return; /* * We will switch back to simple mode. * Move all pending operation back into the per-semaphore * queues. */ list_for_each_entry_safe(q, tq, &sma->pending_alter, list) { struct sem *curr; curr = &sma->sem_base[q->sops[0].sem_num]; list_add_tail(&q->list, &curr->pending_alter); } INIT_LIST_HEAD(&sma->pending_alter); } /** |
8001c8581
|
221 |
* merge_queues - merge single semop queues into global queue |
f269f40ad
|
222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 |
* @sma: semaphore array * * This function merges all per-semaphore queues into the global queue. * It is necessary to achieve FIFO ordering for the pending single-sop * operations when a multi-semop operation must sleep. * Only the alter operations must be moved, the const operations can stay. */ static void merge_queues(struct sem_array *sma) { int i; for (i = 0; i < sma->sem_nsems; i++) { struct sem *sem = sma->sem_base + i; list_splice_init(&sem->pending_alter, &sma->pending_alter); } } |
53dad6d3a
|
238 239 240 241 242 243 244 245 |
static void sem_rcu_free(struct rcu_head *head) { struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu); struct sem_array *sma = ipc_rcu_to_struct(p); security_sem_free(sma); ipc_rcu_free(head); } |
3e148c799
|
246 |
/* |
5e9d52759
|
247 248 249 250 |
* Wait until all currently ongoing simple ops have completed. * Caller must own sem_perm.lock. * New simple ops cannot start, because simple ops first check * that sem_perm.lock is free. |
6d07b68ce
|
251 |
* that a) sem_perm.lock is free and b) complex_count is 0. |
5e9d52759
|
252 253 254 255 256 |
*/ static void sem_wait_array(struct sem_array *sma) { int i; struct sem *sem; |
6d07b68ce
|
257 258 259 260 261 262 |
if (sma->complex_count) { /* The thread that increased sma->complex_count waited on * all sem->lock locks. Thus we don't need to wait again. */ return; } |
5e9d52759
|
263 264 265 266 267 268 269 |
for (i = 0; i < sma->sem_nsems; i++) { sem = sma->sem_base + i; spin_unlock_wait(&sem->lock); } } /* |
6062a8dc0
|
270 271 272 273 274 |
* If the request contains only one semaphore operation, and there are * no complex transactions pending, lock only the semaphore involved. * Otherwise, lock the entire semaphore array, since we either have * multiple semaphores in our own semops, or we need to look at * semaphores from other pending complex operations. |
6062a8dc0
|
275 276 277 278 |
*/ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops, int nsops) { |
5e9d52759
|
279 |
struct sem *sem; |
6062a8dc0
|
280 |
|
5e9d52759
|
281 282 283 |
if (nsops != 1) { /* Complex operation - acquire a full lock */ ipc_lock_object(&sma->sem_perm); |
6062a8dc0
|
284 |
|
5e9d52759
|
285 286 |
/* And wait until all simple ops that are processed * right now have dropped their locks. |
6062a8dc0
|
287 |
*/ |
5e9d52759
|
288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 |
sem_wait_array(sma); return -1; } /* * Only one semaphore affected - try to optimize locking. * The rules are: * - optimized locking is possible if no complex operation * is either enqueued or processed right now. * - The test for enqueued complex ops is simple: * sma->complex_count != 0 * - Testing for complex ops that are processed right now is * a bit more difficult. Complex ops acquire the full lock * and first wait that the running simple ops have completed. * (see above) * Thus: If we own a simple lock and the global lock is free * and complex_count is now 0, then it will stay 0 and * thus just locking sem->lock is sufficient. */ sem = sma->sem_base + sops->sem_num; |
6062a8dc0
|
308 |
|
5e9d52759
|
309 |
if (sma->complex_count == 0) { |
6062a8dc0
|
310 |
/* |
5e9d52759
|
311 312 |
* It appears that no complex operation is around. * Acquire the per-semaphore lock. |
6062a8dc0
|
313 |
*/ |
5e9d52759
|
314 315 316 317 |
spin_lock(&sem->lock); /* Then check that the global lock is free */ if (!spin_is_locked(&sma->sem_perm.lock)) { |
2e094abfd
|
318 319 320 321 322 323 324 325 |
/* * The ipc object lock check must be visible on all * cores before rechecking the complex count. Otherwise * we can race with another thread that does: * complex_count++; * spin_unlock(sem_perm.lock); */ smp_rmb(); |
5e9d52759
|
326 |
|
2e094abfd
|
327 328 |
/* * Now repeat the test of complex_count: |
5e9d52759
|
329 330 331 332 333 334 335 |
* It can't change anymore until we drop sem->lock. * Thus: if is now 0, then it will stay 0. */ if (sma->complex_count == 0) { /* fast path successful! */ return sops->sem_num; } |
6062a8dc0
|
336 |
} |
5e9d52759
|
337 338 339 340 341 |
spin_unlock(&sem->lock); } /* slow path: acquire the full lock */ ipc_lock_object(&sma->sem_perm); |
6062a8dc0
|
342 |
|
5e9d52759
|
343 344 345 346 347 348 349 350 |
if (sma->complex_count == 0) { /* False alarm: * There is no complex operation, thus we can switch * back to the fast path. */ spin_lock(&sem->lock); ipc_unlock_object(&sma->sem_perm); return sops->sem_num; |
6062a8dc0
|
351 |
} else { |
5e9d52759
|
352 353 |
/* Not a false alarm, thus complete the sequence for a * full lock. |
6062a8dc0
|
354 |
*/ |
5e9d52759
|
355 356 |
sem_wait_array(sma); return -1; |
6062a8dc0
|
357 |
} |
6062a8dc0
|
358 359 360 361 362 |
} static inline void sem_unlock(struct sem_array *sma, int locknum) { if (locknum == -1) { |
f269f40ad
|
363 |
unmerge_queues(sma); |
cf9d5d78d
|
364 |
ipc_unlock_object(&sma->sem_perm); |
6062a8dc0
|
365 366 367 368 |
} else { struct sem *sem = sma->sem_base + locknum; spin_unlock(&sem->lock); } |
6062a8dc0
|
369 370 371 |
} /* |
d9a605e40
|
372 |
* sem_lock_(check_) routines are called in the paths where the rwsem |
3e148c799
|
373 |
* is not held. |
321310ced
|
374 375 |
* * The caller holds the RCU read lock. |
3e148c799
|
376 |
*/ |
6062a8dc0
|
377 378 |
static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id, struct sembuf *sops, int nsops, int *locknum) |
023a53557
|
379 |
{ |
c460b662d
|
380 381 |
struct kern_ipc_perm *ipcp; struct sem_array *sma; |
03f02c765
|
382 |
|
c460b662d
|
383 |
ipcp = ipc_obtain_object(&sem_ids(ns), id); |
321310ced
|
384 385 |
if (IS_ERR(ipcp)) return ERR_CAST(ipcp); |
b1ed88b47
|
386 |
|
6062a8dc0
|
387 388 |
sma = container_of(ipcp, struct sem_array, sem_perm); *locknum = sem_lock(sma, sops, nsops); |
c460b662d
|
389 390 391 392 |
/* ipc_rmid() may have already freed the ID while sem_lock * was spinning: verify that the structure is still valid */ |
72a8ff2f9
|
393 |
if (ipc_valid_object(ipcp)) |
c460b662d
|
394 |
return container_of(ipcp, struct sem_array, sem_perm); |
6062a8dc0
|
395 |
sem_unlock(sma, *locknum); |
321310ced
|
396 |
return ERR_PTR(-EINVAL); |
023a53557
|
397 |
} |
16df3674e
|
398 399 400 401 402 403 404 405 406 |
static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id) { struct kern_ipc_perm *ipcp = ipc_obtain_object(&sem_ids(ns), id); if (IS_ERR(ipcp)) return ERR_CAST(ipcp); return container_of(ipcp, struct sem_array, sem_perm); } |
16df3674e
|
407 408 409 410 411 412 413 |
static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns, int id) { struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&sem_ids(ns), id); if (IS_ERR(ipcp)) return ERR_CAST(ipcp); |
b1ed88b47
|
414 |
|
03f02c765
|
415 |
return container_of(ipcp, struct sem_array, sem_perm); |
023a53557
|
416 |
} |
6ff379721
|
417 418 |
static inline void sem_lock_and_putref(struct sem_array *sma) { |
6062a8dc0
|
419 |
sem_lock(sma, NULL, -1); |
53dad6d3a
|
420 |
ipc_rcu_putref(sma, ipc_rcu_free); |
6ff379721
|
421 |
} |
7ca7e564e
|
422 423 424 425 |
static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s) { ipc_rmid(&sem_ids(ns), &s->sem_perm); } |
1da177e4c
|
426 427 428 429 430 |
/* * Lockless wakeup algorithm: * Without the check/retry algorithm a lockless wakeup is possible: * - queue.status is initialized to -EINTR before blocking. * - wakeup is performed by |
1a82e9e1d
|
431 |
* * unlinking the queue entry from the pending list |
1da177e4c
|
432 433 434 435 436 437 |
* * setting queue.status to IN_WAKEUP * This is the notification for the blocked thread that a * result value is imminent. * * call wake_up_process * * set queue.status to the final value. * - the previously blocked thread checks queue.status: |
239521f31
|
438 439 440 441 442 |
* * if it's IN_WAKEUP, then it must wait until the value changes * * if it's not -EINTR, then the operation was completed by * update_queue. semtimedop can return queue.status without * performing any operation on the sem array. * * otherwise it must acquire the spinlock and check what's up. |
1da177e4c
|
443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 |
* * The two-stage algorithm is necessary to protect against the following * races: * - if queue.status is set after wake_up_process, then the woken up idle * thread could race forward and try (and fail) to acquire sma->lock * before update_queue had a chance to set queue.status * - if queue.status is written before wake_up_process and if the * blocked process is woken up by a signal between writing * queue.status and the wake_up_process, then the woken up * process could return from semtimedop and die by calling * sys_exit before wake_up_process is called. Then wake_up_process * will oops, because the task structure is already invalid. * (yes, this happened on s390 with sysv msg). * */ #define IN_WAKEUP 1 |
f4566f048
|
459 460 461 462 463 |
/** * newary - Create a new semaphore set * @ns: namespace * @params: ptr to the structure that contains key, semflg and nsems * |
d9a605e40
|
464 |
* Called with sem_ids.rwsem held (as a writer) |
f4566f048
|
465 |
*/ |
7748dbfaa
|
466 |
static int newary(struct ipc_namespace *ns, struct ipc_params *params) |
1da177e4c
|
467 468 469 470 471 |
{ int id; int retval; struct sem_array *sma; int size; |
7748dbfaa
|
472 473 474 |
key_t key = params->key; int nsems = params->u.nsems; int semflg = params->flg; |
b97e820ff
|
475 |
int i; |
1da177e4c
|
476 477 478 |
if (!nsems) return -EINVAL; |
e38935341
|
479 |
if (ns->used_sems + nsems > ns->sc_semmns) |
1da177e4c
|
480 |
return -ENOSPC; |
239521f31
|
481 |
size = sizeof(*sma) + nsems * sizeof(struct sem); |
1da177e4c
|
482 |
sma = ipc_rcu_alloc(size); |
3ab08fe20
|
483 |
if (!sma) |
1da177e4c
|
484 |
return -ENOMEM; |
3ab08fe20
|
485 |
|
239521f31
|
486 |
memset(sma, 0, size); |
1da177e4c
|
487 488 489 490 491 492 493 |
sma->sem_perm.mode = (semflg & S_IRWXUGO); sma->sem_perm.key = key; sma->sem_perm.security = NULL; retval = security_sem_alloc(sma); if (retval) { |
53dad6d3a
|
494 |
ipc_rcu_putref(sma, ipc_rcu_free); |
1da177e4c
|
495 496 |
return retval; } |
1da177e4c
|
497 |
sma->sem_base = (struct sem *) &sma[1]; |
b97e820ff
|
498 |
|
6062a8dc0
|
499 |
for (i = 0; i < nsems; i++) { |
1a82e9e1d
|
500 501 |
INIT_LIST_HEAD(&sma->sem_base[i].pending_alter); INIT_LIST_HEAD(&sma->sem_base[i].pending_const); |
6062a8dc0
|
502 503 |
spin_lock_init(&sma->sem_base[i].lock); } |
b97e820ff
|
504 505 |
sma->complex_count = 0; |
1a82e9e1d
|
506 507 |
INIT_LIST_HEAD(&sma->pending_alter); INIT_LIST_HEAD(&sma->pending_const); |
4daa28f6d
|
508 |
INIT_LIST_HEAD(&sma->list_id); |
1da177e4c
|
509 510 |
sma->sem_nsems = nsems; sma->sem_ctime = get_seconds(); |
e8577d1f0
|
511 512 513 514 515 516 517 |
id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni); if (id < 0) { ipc_rcu_putref(sma, sem_rcu_free); return id; } ns->used_sems += nsems; |
6062a8dc0
|
518 |
sem_unlock(sma, -1); |
6d49dab8a
|
519 |
rcu_read_unlock(); |
1da177e4c
|
520 |
|
7ca7e564e
|
521 |
return sma->sem_perm.id; |
1da177e4c
|
522 |
} |
7748dbfaa
|
523 |
|
f4566f048
|
524 |
/* |
d9a605e40
|
525 |
* Called with sem_ids.rwsem and ipcp locked. |
f4566f048
|
526 |
*/ |
03f02c765
|
527 |
static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg) |
7748dbfaa
|
528 |
{ |
03f02c765
|
529 530 531 532 |
struct sem_array *sma; sma = container_of(ipcp, struct sem_array, sem_perm); return security_sem_associate(sma, semflg); |
7748dbfaa
|
533 |
} |
f4566f048
|
534 |
/* |
d9a605e40
|
535 |
* Called with sem_ids.rwsem and ipcp locked. |
f4566f048
|
536 |
*/ |
03f02c765
|
537 538 |
static inline int sem_more_checks(struct kern_ipc_perm *ipcp, struct ipc_params *params) |
7748dbfaa
|
539 |
{ |
03f02c765
|
540 541 542 543 |
struct sem_array *sma; sma = container_of(ipcp, struct sem_array, sem_perm); if (params->u.nsems > sma->sem_nsems) |
7748dbfaa
|
544 545 546 547 |
return -EINVAL; return 0; } |
d5460c997
|
548 |
SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg) |
1da177e4c
|
549 |
{ |
e38935341
|
550 |
struct ipc_namespace *ns; |
eb66ec44f
|
551 552 553 554 555 |
static const struct ipc_ops sem_ops = { .getnew = newary, .associate = sem_security, .more_checks = sem_more_checks, }; |
7748dbfaa
|
556 |
struct ipc_params sem_params; |
e38935341
|
557 558 |
ns = current->nsproxy->ipc_ns; |
1da177e4c
|
559 |
|
e38935341
|
560 |
if (nsems < 0 || nsems > ns->sc_semmsl) |
1da177e4c
|
561 |
return -EINVAL; |
7ca7e564e
|
562 |
|
7748dbfaa
|
563 564 565 |
sem_params.key = key; sem_params.flg = semflg; sem_params.u.nsems = nsems; |
1da177e4c
|
566 |
|
7748dbfaa
|
567 |
return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params); |
1da177e4c
|
568 |
} |
78f5009cc
|
569 570 |
/** * perform_atomic_semop - Perform (if possible) a semaphore operation |
758a6ba39
|
571 |
* @sma: semaphore array |
d198cd6d6
|
572 |
* @q: struct sem_queue that describes the operation |
758a6ba39
|
573 574 575 576 |
* * Returns 0 if the operation was possible. * Returns 1 if the operation is impossible, the caller must sleep. * Negative values are error codes. |
1da177e4c
|
577 |
*/ |
d198cd6d6
|
578 |
static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q) |
1da177e4c
|
579 |
{ |
d198cd6d6
|
580 |
int result, sem_op, nsops, pid; |
1da177e4c
|
581 |
struct sembuf *sop; |
239521f31
|
582 |
struct sem *curr; |
d198cd6d6
|
583 584 585 586 587 588 |
struct sembuf *sops; struct sem_undo *un; sops = q->sops; nsops = q->nsops; un = q->undo; |
1da177e4c
|
589 590 591 592 593 |
for (sop = sops; sop < sops + nsops; sop++) { curr = sma->sem_base + sop->sem_num; sem_op = sop->sem_op; result = curr->semval; |
78f5009cc
|
594 |
|
1da177e4c
|
595 596 597 598 599 600 601 602 |
if (!sem_op && result) goto would_block; result += sem_op; if (result < 0) goto would_block; if (result > SEMVMX) goto out_of_range; |
78f5009cc
|
603 |
|
1da177e4c
|
604 605 |
if (sop->sem_flg & SEM_UNDO) { int undo = un->semadj[sop->sem_num] - sem_op; |
78f5009cc
|
606 |
/* Exceeding the undo range is an error. */ |
1da177e4c
|
607 608 |
if (undo < (-SEMAEM - 1) || undo > SEMAEM) goto out_of_range; |
78f5009cc
|
609 |
un->semadj[sop->sem_num] = undo; |
1da177e4c
|
610 |
} |
78f5009cc
|
611 |
|
1da177e4c
|
612 613 614 615 |
curr->semval = result; } sop--; |
d198cd6d6
|
616 |
pid = q->pid; |
1da177e4c
|
617 618 |
while (sop >= sops) { sma->sem_base[sop->sem_num].sempid = pid; |
1da177e4c
|
619 620 |
sop--; } |
78f5009cc
|
621 |
|
1da177e4c
|
622 623 624 625 626 627 628 |
return 0; out_of_range: result = -ERANGE; goto undo; would_block: |
ed247b7ca
|
629 |
q->blocking = sop; |
1da177e4c
|
630 631 632 633 634 635 636 637 |
if (sop->sem_flg & IPC_NOWAIT) result = -EAGAIN; else result = 1; undo: sop--; while (sop >= sops) { |
78f5009cc
|
638 639 640 641 |
sem_op = sop->sem_op; sma->sem_base[sop->sem_num].semval -= sem_op; if (sop->sem_flg & SEM_UNDO) un->semadj[sop->sem_num] += sem_op; |
1da177e4c
|
642 643 644 645 646 |
sop--; } return result; } |
0a2b9d4c7
|
647 648 649 650 651 |
/** wake_up_sem_queue_prepare(q, error): Prepare wake-up * @q: queue entry that must be signaled * @error: Error value for the signal * * Prepare the wake-up of the queue entry q. |
d4212093d
|
652 |
*/ |
0a2b9d4c7
|
653 654 |
static void wake_up_sem_queue_prepare(struct list_head *pt, struct sem_queue *q, int error) |
d4212093d
|
655 |
{ |
0a2b9d4c7
|
656 657 658 659 660 661 662 |
if (list_empty(pt)) { /* * Hold preempt off so that we don't get preempted and have the * wakee busy-wait until we're scheduled back on. */ preempt_disable(); } |
d4212093d
|
663 |
q->status = IN_WAKEUP; |
0a2b9d4c7
|
664 |
q->pid = error; |
9f1bc2c90
|
665 |
list_add_tail(&q->list, pt); |
0a2b9d4c7
|
666 667 668 |
} /** |
8001c8581
|
669 |
* wake_up_sem_queue_do - do the actual wake-up |
0a2b9d4c7
|
670 671 672 673 674 675 676 677 678 679 680 681 682 |
* @pt: list of tasks to be woken up * * Do the actual wake-up. * The function is called without any locks held, thus the semaphore array * could be destroyed already and the tasks can disappear as soon as the * status is set to the actual return code. */ static void wake_up_sem_queue_do(struct list_head *pt) { struct sem_queue *q, *t; int did_something; did_something = !list_empty(pt); |
9f1bc2c90
|
683 |
list_for_each_entry_safe(q, t, pt, list) { |
0a2b9d4c7
|
684 685 686 687 688 689 690 |
wake_up_process(q->sleeper); /* q can disappear immediately after writing q->status. */ smp_wmb(); q->status = q->pid; } if (did_something) preempt_enable(); |
d4212093d
|
691 |
} |
b97e820ff
|
692 693 694 |
static void unlink_queue(struct sem_array *sma, struct sem_queue *q) { list_del(&q->list); |
9f1bc2c90
|
695 |
if (q->nsops > 1) |
b97e820ff
|
696 697 |
sma->complex_count--; } |
fd5db4225
|
698 699 700 701 702 703 704 |
/** check_restart(sma, q) * @sma: semaphore array * @q: the operation that just completed * * update_queue is O(N^2) when it restarts scanning the whole queue of * waiting operations. Therefore this function checks if the restart is * really necessary. It is called after a previously waiting operation |
1a82e9e1d
|
705 706 |
* modified the array. * Note that wait-for-zero operations are handled without restart. |
fd5db4225
|
707 708 709 |
*/ static int check_restart(struct sem_array *sma, struct sem_queue *q) { |
1a82e9e1d
|
710 711 |
/* pending complex alter operations are too difficult to analyse */ if (!list_empty(&sma->pending_alter)) |
fd5db4225
|
712 713 714 715 716 |
return 1; /* we were a sleeping complex operation. Too difficult */ if (q->nsops > 1) return 1; |
1a82e9e1d
|
717 718 719 720 721 722 723 724 725 726 727 728 729 |
/* It is impossible that someone waits for the new value: * - complex operations always restart. * - wait-for-zero are handled seperately. * - q is a previously sleeping simple operation that * altered the array. It must be a decrement, because * simple increments never sleep. * - If there are older (higher priority) decrements * in the queue, then they have observed the original * semval value and couldn't proceed. The operation * decremented to value - thus they won't proceed either. */ return 0; } |
fd5db4225
|
730 |
|
1a82e9e1d
|
731 |
/** |
8001c8581
|
732 |
* wake_const_ops - wake up non-alter tasks |
1a82e9e1d
|
733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 |
* @sma: semaphore array. * @semnum: semaphore that was modified. * @pt: list head for the tasks that must be woken up. * * wake_const_ops must be called after a semaphore in a semaphore array * was set to 0. If complex const operations are pending, wake_const_ops must * be called with semnum = -1, as well as with the number of each modified * semaphore. * The tasks that must be woken up are added to @pt. The return code * is stored in q->pid. * The function returns 1 if at least one operation was completed successfully. */ static int wake_const_ops(struct sem_array *sma, int semnum, struct list_head *pt) { struct sem_queue *q; struct list_head *walk; struct list_head *pending_list; int semop_completed = 0; if (semnum == -1) pending_list = &sma->pending_const; else pending_list = &sma->sem_base[semnum].pending_const; |
fd5db4225
|
757 |
|
1a82e9e1d
|
758 759 760 761 762 763 |
walk = pending_list->next; while (walk != pending_list) { int error; q = container_of(walk, struct sem_queue, list); walk = walk->next; |
d198cd6d6
|
764 |
error = perform_atomic_semop(sma, q); |
1a82e9e1d
|
765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 |
if (error <= 0) { /* operation completed, remove from queue & wakeup */ unlink_queue(sma, q); wake_up_sem_queue_prepare(pt, q, error); if (error == 0) semop_completed = 1; } } return semop_completed; } /** |
8001c8581
|
780 |
* do_smart_wakeup_zero - wakeup all wait for zero tasks |
1a82e9e1d
|
781 782 783 784 785 |
* @sma: semaphore array * @sops: operations that were performed * @nsops: number of operations * @pt: list head of the tasks that must be woken up. * |
8001c8581
|
786 787 |
* Checks all required queue for wait-for-zero operations, based * on the actual changes that were performed on the semaphore array. |
1a82e9e1d
|
788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 |
* The function returns 1 if at least one operation was completed successfully. */ static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops, int nsops, struct list_head *pt) { int i; int semop_completed = 0; int got_zero = 0; /* first: the per-semaphore queues, if known */ if (sops) { for (i = 0; i < nsops; i++) { int num = sops[i].sem_num; if (sma->sem_base[num].semval == 0) { got_zero = 1; semop_completed |= wake_const_ops(sma, num, pt); } } } else { /* * No sops means modified semaphores not known. * Assume all were changed. |
fd5db4225
|
811 |
*/ |
1a82e9e1d
|
812 813 814 815 816 817 |
for (i = 0; i < sma->sem_nsems; i++) { if (sma->sem_base[i].semval == 0) { got_zero = 1; semop_completed |= wake_const_ops(sma, i, pt); } } |
fd5db4225
|
818 819 |
} /* |
1a82e9e1d
|
820 821 |
* If one of the modified semaphores got 0, * then check the global queue, too. |
fd5db4225
|
822 |
*/ |
1a82e9e1d
|
823 824 |
if (got_zero) semop_completed |= wake_const_ops(sma, -1, pt); |
fd5db4225
|
825 |
|
1a82e9e1d
|
826 |
return semop_completed; |
fd5db4225
|
827 |
} |
636c6be82
|
828 829 |
/** |
8001c8581
|
830 |
* update_queue - look for tasks that can be completed. |
636c6be82
|
831 832 |
* @sma: semaphore array. * @semnum: semaphore that was modified. |
0a2b9d4c7
|
833 |
* @pt: list head for the tasks that must be woken up. |
636c6be82
|
834 835 |
* * update_queue must be called after a semaphore in a semaphore array |
9f1bc2c90
|
836 837 838 |
* was modified. If multiple semaphores were modified, update_queue must * be called with semnum = -1, as well as with the number of each modified * semaphore. |
0a2b9d4c7
|
839 840 |
* The tasks that must be woken up are added to @pt. The return code * is stored in q->pid. |
1a82e9e1d
|
841 842 |
* The function internally checks if const operations can now succeed. * |
0a2b9d4c7
|
843 |
* The function return 1 if at least one semop was completed successfully. |
1da177e4c
|
844 |
*/ |
0a2b9d4c7
|
845 |
static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt) |
1da177e4c
|
846 |
{ |
636c6be82
|
847 848 849 |
struct sem_queue *q; struct list_head *walk; struct list_head *pending_list; |
0a2b9d4c7
|
850 |
int semop_completed = 0; |
636c6be82
|
851 |
|
9f1bc2c90
|
852 |
if (semnum == -1) |
1a82e9e1d
|
853 |
pending_list = &sma->pending_alter; |
9f1bc2c90
|
854 |
else |
1a82e9e1d
|
855 |
pending_list = &sma->sem_base[semnum].pending_alter; |
9cad200c7
|
856 857 |
again: |
636c6be82
|
858 859 |
walk = pending_list->next; while (walk != pending_list) { |
fd5db4225
|
860 |
int error, restart; |
636c6be82
|
861 |
|
9f1bc2c90
|
862 |
q = container_of(walk, struct sem_queue, list); |
636c6be82
|
863 |
walk = walk->next; |
1da177e4c
|
864 |
|
d987f8b21
|
865 866 |
/* If we are scanning the single sop, per-semaphore list of * one semaphore and that semaphore is 0, then it is not |
1a82e9e1d
|
867 |
* necessary to scan further: simple increments |
d987f8b21
|
868 869 870 871 |
* that affect only one entry succeed immediately and cannot * be in the per semaphore pending queue, and decrements * cannot be successful if the value is already 0. */ |
1a82e9e1d
|
872 |
if (semnum != -1 && sma->sem_base[semnum].semval == 0) |
d987f8b21
|
873 |
break; |
d198cd6d6
|
874 |
error = perform_atomic_semop(sma, q); |
1da177e4c
|
875 876 |
/* Does q->sleeper still need to sleep? */ |
9cad200c7
|
877 878 |
if (error > 0) continue; |
b97e820ff
|
879 |
unlink_queue(sma, q); |
9cad200c7
|
880 |
|
0a2b9d4c7
|
881 |
if (error) { |
fd5db4225
|
882 |
restart = 0; |
0a2b9d4c7
|
883 884 |
} else { semop_completed = 1; |
1a82e9e1d
|
885 |
do_smart_wakeup_zero(sma, q->sops, q->nsops, pt); |
fd5db4225
|
886 |
restart = check_restart(sma, q); |
0a2b9d4c7
|
887 |
} |
fd5db4225
|
888 |
|
0a2b9d4c7
|
889 |
wake_up_sem_queue_prepare(pt, q, error); |
fd5db4225
|
890 |
if (restart) |
9cad200c7
|
891 |
goto again; |
1da177e4c
|
892 |
} |
0a2b9d4c7
|
893 |
return semop_completed; |
1da177e4c
|
894 |
} |
0a2b9d4c7
|
895 |
/** |
8001c8581
|
896 |
* set_semotime - set sem_otime |
0e8c66569
|
897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 |
* @sma: semaphore array * @sops: operations that modified the array, may be NULL * * sem_otime is replicated to avoid cache line trashing. * This function sets one instance to the current time. */ static void set_semotime(struct sem_array *sma, struct sembuf *sops) { if (sops == NULL) { sma->sem_base[0].sem_otime = get_seconds(); } else { sma->sem_base[sops[0].sem_num].sem_otime = get_seconds(); } } /** |
8001c8581
|
914 |
* do_smart_update - optimized update_queue |
fd5db4225
|
915 916 917 |
* @sma: semaphore array * @sops: operations that were performed * @nsops: number of operations |
0a2b9d4c7
|
918 919 |
* @otime: force setting otime * @pt: list head of the tasks that must be woken up. |
fd5db4225
|
920 |
* |
1a82e9e1d
|
921 922 |
* do_smart_update() does the required calls to update_queue and wakeup_zero, * based on the actual changes that were performed on the semaphore array. |
0a2b9d4c7
|
923 924 925 |
* Note that the function does not do the actual wake-up: the caller is * responsible for calling wake_up_sem_queue_do(@pt). * It is safe to perform this call after dropping all locks. |
fd5db4225
|
926 |
*/ |
0a2b9d4c7
|
927 928 |
static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops, int otime, struct list_head *pt) |
fd5db4225
|
929 930 |
{ int i; |
1a82e9e1d
|
931 |
otime |= do_smart_wakeup_zero(sma, sops, nsops, pt); |
f269f40ad
|
932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 |
if (!list_empty(&sma->pending_alter)) { /* semaphore array uses the global queue - just process it. */ otime |= update_queue(sma, -1, pt); } else { if (!sops) { /* * No sops, thus the modified semaphores are not * known. Check all. */ for (i = 0; i < sma->sem_nsems; i++) otime |= update_queue(sma, i, pt); } else { /* * Check the semaphores that were increased: * - No complex ops, thus all sleeping ops are * decrease. * - if we decreased the value, then any sleeping * semaphore ops wont be able to run: If the * previous value was too small, then the new * value will be too small, too. */ for (i = 0; i < nsops; i++) { if (sops[i].sem_op > 0) { otime |= update_queue(sma, sops[i].sem_num, pt); } |
ab465df9d
|
958 |
} |
9f1bc2c90
|
959 |
} |
fd5db4225
|
960 |
} |
0e8c66569
|
961 962 |
if (otime) set_semotime(sma, sops); |
fd5db4225
|
963 |
} |
2f2ed41dc
|
964 |
/* |
b220c57ae
|
965 |
* check_qop: Test if a queued operation sleeps on the semaphore semnum |
2f2ed41dc
|
966 967 968 969 |
*/ static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q, bool count_zero) { |
b220c57ae
|
970 |
struct sembuf *sop = q->blocking; |
2f2ed41dc
|
971 |
|
9b44ee2ee
|
972 973 974 975 976 977 978 979 980 981 982 983 |
/* * Linux always (since 0.99.10) reported a task as sleeping on all * semaphores. This violates SUS, therefore it was changed to the * standard compliant behavior. * Give the administrators a chance to notice that an application * might misbehave because it relies on the Linux behavior. */ pr_info_once("semctl(GETNCNT/GETZCNT) is since 3.16 Single Unix Specification compliant. " "The task %s (%d) triggered the difference, watch for misbehavior. ", current->comm, task_pid_nr(current)); |
b220c57ae
|
984 985 |
if (sop->sem_num != semnum) return 0; |
2f2ed41dc
|
986 |
|
b220c57ae
|
987 988 989 990 991 992 |
if (count_zero && sop->sem_op == 0) return 1; if (!count_zero && sop->sem_op < 0) return 1; return 0; |
2f2ed41dc
|
993 |
} |
1da177e4c
|
994 995 996 |
/* The following counts are associated to each semaphore: * semncnt number of tasks waiting on semval being nonzero * semzcnt number of tasks waiting on semval being zero |
b220c57ae
|
997 998 999 |
* * Per definition, a task waits only on the semaphore of the first semop * that cannot proceed, even if additional operation would block, too. |
1da177e4c
|
1000 |
*/ |
2f2ed41dc
|
1001 1002 |
static int count_semcnt(struct sem_array *sma, ushort semnum, bool count_zero) |
1da177e4c
|
1003 |
{ |
2f2ed41dc
|
1004 |
struct list_head *l; |
239521f31
|
1005 |
struct sem_queue *q; |
2f2ed41dc
|
1006 |
int semcnt; |
1da177e4c
|
1007 |
|
2f2ed41dc
|
1008 1009 1010 1011 1012 1013 |
semcnt = 0; /* First: check the simple operations. They are easy to evaluate */ if (count_zero) l = &sma->sem_base[semnum].pending_const; else l = &sma->sem_base[semnum].pending_alter; |
1da177e4c
|
1014 |
|
2f2ed41dc
|
1015 1016 1017 1018 1019 |
list_for_each_entry(q, l, list) { /* all task on a per-semaphore list sleep on exactly * that semaphore */ semcnt++; |
ebc2e5e6a
|
1020 |
} |
2f2ed41dc
|
1021 |
/* Then: check the complex operations. */ |
1994862dc
|
1022 |
list_for_each_entry(q, &sma->pending_alter, list) { |
2f2ed41dc
|
1023 1024 1025 1026 1027 1028 |
semcnt += check_qop(sma, semnum, q, count_zero); } if (count_zero) { list_for_each_entry(q, &sma->pending_const, list) { semcnt += check_qop(sma, semnum, q, count_zero); } |
1994862dc
|
1029 |
} |
2f2ed41dc
|
1030 |
return semcnt; |
1da177e4c
|
1031 |
} |
d9a605e40
|
1032 1033 |
/* Free a semaphore set. freeary() is called with sem_ids.rwsem locked * as a writer and the spinlock for this semaphore set hold. sem_ids.rwsem |
3e148c799
|
1034 |
* remains locked on exit. |
1da177e4c
|
1035 |
*/ |
01b8b07a5
|
1036 |
static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) |
1da177e4c
|
1037 |
{ |
380af1b33
|
1038 1039 |
struct sem_undo *un, *tu; struct sem_queue *q, *tq; |
01b8b07a5
|
1040 |
struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm); |
0a2b9d4c7
|
1041 |
struct list_head tasks; |
9f1bc2c90
|
1042 |
int i; |
1da177e4c
|
1043 |
|
380af1b33
|
1044 |
/* Free the existing undo structures for this semaphore set. */ |
cf9d5d78d
|
1045 |
ipc_assert_locked_object(&sma->sem_perm); |
380af1b33
|
1046 1047 1048 |
list_for_each_entry_safe(un, tu, &sma->list_id, list_id) { list_del(&un->list_id); spin_lock(&un->ulp->lock); |
1da177e4c
|
1049 |
un->semid = -1; |
380af1b33
|
1050 1051 |
list_del_rcu(&un->list_proc); spin_unlock(&un->ulp->lock); |
693a8b6ee
|
1052 |
kfree_rcu(un, rcu); |
380af1b33
|
1053 |
} |
1da177e4c
|
1054 1055 |
/* Wake up all pending processes and let them fail with EIDRM. */ |
0a2b9d4c7
|
1056 |
INIT_LIST_HEAD(&tasks); |
1a82e9e1d
|
1057 1058 1059 1060 1061 1062 |
list_for_each_entry_safe(q, tq, &sma->pending_const, list) { unlink_queue(sma, q); wake_up_sem_queue_prepare(&tasks, q, -EIDRM); } list_for_each_entry_safe(q, tq, &sma->pending_alter, list) { |
b97e820ff
|
1063 |
unlink_queue(sma, q); |
0a2b9d4c7
|
1064 |
wake_up_sem_queue_prepare(&tasks, q, -EIDRM); |
1da177e4c
|
1065 |
} |
9f1bc2c90
|
1066 1067 |
for (i = 0; i < sma->sem_nsems; i++) { struct sem *sem = sma->sem_base + i; |
1a82e9e1d
|
1068 1069 1070 1071 1072 |
list_for_each_entry_safe(q, tq, &sem->pending_const, list) { unlink_queue(sma, q); wake_up_sem_queue_prepare(&tasks, q, -EIDRM); } list_for_each_entry_safe(q, tq, &sem->pending_alter, list) { |
9f1bc2c90
|
1073 1074 1075 1076 |
unlink_queue(sma, q); wake_up_sem_queue_prepare(&tasks, q, -EIDRM); } } |
1da177e4c
|
1077 |
|
7ca7e564e
|
1078 1079 |
/* Remove the semaphore set from the IDR */ sem_rmid(ns, sma); |
6062a8dc0
|
1080 |
sem_unlock(sma, -1); |
6d49dab8a
|
1081 |
rcu_read_unlock(); |
1da177e4c
|
1082 |
|
0a2b9d4c7
|
1083 |
wake_up_sem_queue_do(&tasks); |
e38935341
|
1084 |
ns->used_sems -= sma->sem_nsems; |
53dad6d3a
|
1085 |
ipc_rcu_putref(sma, sem_rcu_free); |
1da177e4c
|
1086 1087 1088 1089 |
} static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version) { |
239521f31
|
1090 |
switch (version) { |
1da177e4c
|
1091 1092 1093 1094 1095 |
case IPC_64: return copy_to_user(buf, in, sizeof(*in)); case IPC_OLD: { struct semid_ds out; |
982f7c2b2
|
1096 |
memset(&out, 0, sizeof(out)); |
1da177e4c
|
1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 |
ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm); out.sem_otime = in->sem_otime; out.sem_ctime = in->sem_ctime; out.sem_nsems = in->sem_nsems; return copy_to_user(buf, &out, sizeof(out)); } default: return -EINVAL; } } |
d12e1e50e
|
1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 |
static time_t get_semotime(struct sem_array *sma) { int i; time_t res; res = sma->sem_base[0].sem_otime; for (i = 1; i < sma->sem_nsems; i++) { time_t to = sma->sem_base[i].sem_otime; if (to > res) res = to; } return res; } |
4b9fcb0ec
|
1123 |
static int semctl_nolock(struct ipc_namespace *ns, int semid, |
e1fd1f490
|
1124 |
int cmd, int version, void __user *p) |
1da177e4c
|
1125 |
{ |
e5cc9c7b1
|
1126 |
int err; |
1da177e4c
|
1127 |
struct sem_array *sma; |
239521f31
|
1128 |
switch (cmd) { |
1da177e4c
|
1129 1130 1131 1132 1133 1134 1135 1136 1137 |
case IPC_INFO: case SEM_INFO: { struct seminfo seminfo; int max_id; err = security_sem_semctl(NULL, cmd); if (err) return err; |
46c0a8ca3
|
1138 |
|
239521f31
|
1139 |
memset(&seminfo, 0, sizeof(seminfo)); |
e38935341
|
1140 1141 1142 1143 |
seminfo.semmni = ns->sc_semmni; seminfo.semmns = ns->sc_semmns; seminfo.semmsl = ns->sc_semmsl; seminfo.semopm = ns->sc_semopm; |
1da177e4c
|
1144 1145 1146 1147 |
seminfo.semvmx = SEMVMX; seminfo.semmnu = SEMMNU; seminfo.semmap = SEMMAP; seminfo.semume = SEMUME; |
d9a605e40
|
1148 |
down_read(&sem_ids(ns).rwsem); |
1da177e4c
|
1149 |
if (cmd == SEM_INFO) { |
e38935341
|
1150 1151 |
seminfo.semusz = sem_ids(ns).in_use; seminfo.semaem = ns->used_sems; |
1da177e4c
|
1152 1153 1154 1155 |
} else { seminfo.semusz = SEMUSZ; seminfo.semaem = SEMAEM; } |
7ca7e564e
|
1156 |
max_id = ipc_get_maxid(&sem_ids(ns)); |
d9a605e40
|
1157 |
up_read(&sem_ids(ns).rwsem); |
46c0a8ca3
|
1158 |
if (copy_to_user(p, &seminfo, sizeof(struct seminfo))) |
1da177e4c
|
1159 |
return -EFAULT; |
239521f31
|
1160 |
return (max_id < 0) ? 0 : max_id; |
1da177e4c
|
1161 |
} |
4b9fcb0ec
|
1162 |
case IPC_STAT: |
1da177e4c
|
1163 1164 1165 |
case SEM_STAT: { struct semid64_ds tbuf; |
16df3674e
|
1166 1167 1168 |
int id = 0; memset(&tbuf, 0, sizeof(tbuf)); |
1da177e4c
|
1169 |
|
941b0304a
|
1170 |
rcu_read_lock(); |
4b9fcb0ec
|
1171 |
if (cmd == SEM_STAT) { |
16df3674e
|
1172 1173 1174 1175 1176 |
sma = sem_obtain_object(ns, semid); if (IS_ERR(sma)) { err = PTR_ERR(sma); goto out_unlock; } |
4b9fcb0ec
|
1177 1178 |
id = sma->sem_perm.id; } else { |
16df3674e
|
1179 1180 1181 1182 1183 |
sma = sem_obtain_object_check(ns, semid); if (IS_ERR(sma)) { err = PTR_ERR(sma); goto out_unlock; } |
4b9fcb0ec
|
1184 |
} |
1da177e4c
|
1185 1186 |
err = -EACCES; |
b0e77598f
|
1187 |
if (ipcperms(ns, &sma->sem_perm, S_IRUGO)) |
1da177e4c
|
1188 1189 1190 1191 1192 |
goto out_unlock; err = security_sem_semctl(sma, cmd); if (err) goto out_unlock; |
1da177e4c
|
1193 |
kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm); |
d12e1e50e
|
1194 1195 1196 |
tbuf.sem_otime = get_semotime(sma); tbuf.sem_ctime = sma->sem_ctime; tbuf.sem_nsems = sma->sem_nsems; |
16df3674e
|
1197 |
rcu_read_unlock(); |
e1fd1f490
|
1198 |
if (copy_semid_to_user(p, &tbuf, version)) |
1da177e4c
|
1199 1200 1201 1202 1203 1204 |
return -EFAULT; return id; } default: return -EINVAL; } |
1da177e4c
|
1205 |
out_unlock: |
16df3674e
|
1206 |
rcu_read_unlock(); |
1da177e4c
|
1207 1208 |
return err; } |
e1fd1f490
|
1209 1210 1211 1212 1213 |
static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum, unsigned long arg) { struct sem_undo *un; struct sem_array *sma; |
239521f31
|
1214 |
struct sem *curr; |
e1fd1f490
|
1215 |
int err; |
e1fd1f490
|
1216 1217 1218 1219 1220 1221 1222 1223 1224 |
struct list_head tasks; int val; #if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN) /* big-endian 64bit */ val = arg >> 32; #else /* 32bit or little-endian 64bit */ val = arg; #endif |
6062a8dc0
|
1225 1226 |
if (val > SEMVMX || val < 0) return -ERANGE; |
e1fd1f490
|
1227 1228 |
INIT_LIST_HEAD(&tasks); |
e1fd1f490
|
1229 |
|
6062a8dc0
|
1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 |
rcu_read_lock(); sma = sem_obtain_object_check(ns, semid); if (IS_ERR(sma)) { rcu_read_unlock(); return PTR_ERR(sma); } if (semnum < 0 || semnum >= sma->sem_nsems) { rcu_read_unlock(); return -EINVAL; } if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) { rcu_read_unlock(); return -EACCES; } |
e1fd1f490
|
1247 1248 |
err = security_sem_semctl(sma, SETVAL); |
6062a8dc0
|
1249 1250 1251 1252 |
if (err) { rcu_read_unlock(); return -EACCES; } |
e1fd1f490
|
1253 |
|
6062a8dc0
|
1254 |
sem_lock(sma, NULL, -1); |
e1fd1f490
|
1255 |
|
0f3d2b013
|
1256 |
if (!ipc_valid_object(&sma->sem_perm)) { |
6e224f945
|
1257 1258 1259 1260 |
sem_unlock(sma, -1); rcu_read_unlock(); return -EIDRM; } |
e1fd1f490
|
1261 |
curr = &sma->sem_base[semnum]; |
cf9d5d78d
|
1262 |
ipc_assert_locked_object(&sma->sem_perm); |
e1fd1f490
|
1263 1264 1265 1266 1267 1268 1269 1270 |
list_for_each_entry(un, &sma->list_id, list_id) un->semadj[semnum] = 0; curr->semval = val; curr->sempid = task_tgid_vnr(current); sma->sem_ctime = get_seconds(); /* maybe some queued-up processes were waiting for this */ do_smart_update(sma, NULL, 0, 0, &tasks); |
6062a8dc0
|
1271 |
sem_unlock(sma, -1); |
6d49dab8a
|
1272 |
rcu_read_unlock(); |
e1fd1f490
|
1273 |
wake_up_sem_queue_do(&tasks); |
6062a8dc0
|
1274 |
return 0; |
e1fd1f490
|
1275 |
} |
e38935341
|
1276 |
static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, |
e1fd1f490
|
1277 |
int cmd, void __user *p) |
1da177e4c
|
1278 1279 |
{ struct sem_array *sma; |
239521f31
|
1280 |
struct sem *curr; |
16df3674e
|
1281 |
int err, nsems; |
1da177e4c
|
1282 |
ushort fast_sem_io[SEMMSL_FAST]; |
239521f31
|
1283 |
ushort *sem_io = fast_sem_io; |
0a2b9d4c7
|
1284 |
struct list_head tasks; |
1da177e4c
|
1285 |
|
16df3674e
|
1286 1287 1288 1289 1290 1291 |
INIT_LIST_HEAD(&tasks); rcu_read_lock(); sma = sem_obtain_object_check(ns, semid); if (IS_ERR(sma)) { rcu_read_unlock(); |
023a53557
|
1292 |
return PTR_ERR(sma); |
16df3674e
|
1293 |
} |
1da177e4c
|
1294 1295 |
nsems = sma->sem_nsems; |
1da177e4c
|
1296 |
err = -EACCES; |
c728b9c87
|
1297 1298 |
if (ipcperms(ns, &sma->sem_perm, cmd == SETALL ? S_IWUGO : S_IRUGO)) goto out_rcu_wakeup; |
1da177e4c
|
1299 1300 |
err = security_sem_semctl(sma, cmd); |
c728b9c87
|
1301 1302 |
if (err) goto out_rcu_wakeup; |
1da177e4c
|
1303 1304 1305 1306 1307 |
err = -EACCES; switch (cmd) { case GETALL: { |
e1fd1f490
|
1308 |
ushort __user *array = p; |
1da177e4c
|
1309 |
int i; |
ce857229e
|
1310 |
sem_lock(sma, NULL, -1); |
0f3d2b013
|
1311 |
if (!ipc_valid_object(&sma->sem_perm)) { |
6e224f945
|
1312 1313 1314 |
err = -EIDRM; goto out_unlock; } |
239521f31
|
1315 |
if (nsems > SEMMSL_FAST) { |
ce857229e
|
1316 |
if (!ipc_rcu_getref(sma)) { |
ce857229e
|
1317 |
err = -EIDRM; |
6e224f945
|
1318 |
goto out_unlock; |
ce857229e
|
1319 1320 |
} sem_unlock(sma, -1); |
6d49dab8a
|
1321 |
rcu_read_unlock(); |
1da177e4c
|
1322 |
sem_io = ipc_alloc(sizeof(ushort)*nsems); |
239521f31
|
1323 |
if (sem_io == NULL) { |
53dad6d3a
|
1324 |
ipc_rcu_putref(sma, ipc_rcu_free); |
1da177e4c
|
1325 1326 |
return -ENOMEM; } |
4091fd942
|
1327 |
rcu_read_lock(); |
6ff379721
|
1328 |
sem_lock_and_putref(sma); |
0f3d2b013
|
1329 |
if (!ipc_valid_object(&sma->sem_perm)) { |
1da177e4c
|
1330 |
err = -EIDRM; |
6e224f945
|
1331 |
goto out_unlock; |
1da177e4c
|
1332 |
} |
ce857229e
|
1333 |
} |
1da177e4c
|
1334 1335 |
for (i = 0; i < sma->sem_nsems; i++) sem_io[i] = sma->sem_base[i].semval; |
6062a8dc0
|
1336 |
sem_unlock(sma, -1); |
6d49dab8a
|
1337 |
rcu_read_unlock(); |
1da177e4c
|
1338 |
err = 0; |
239521f31
|
1339 |
if (copy_to_user(array, sem_io, nsems*sizeof(ushort))) |
1da177e4c
|
1340 1341 1342 1343 1344 1345 1346 |
err = -EFAULT; goto out_free; } case SETALL: { int i; struct sem_undo *un; |
6062a8dc0
|
1347 |
if (!ipc_rcu_getref(sma)) { |
6e224f945
|
1348 1349 |
err = -EIDRM; goto out_rcu_wakeup; |
6062a8dc0
|
1350 |
} |
16df3674e
|
1351 |
rcu_read_unlock(); |
1da177e4c
|
1352 |
|
239521f31
|
1353 |
if (nsems > SEMMSL_FAST) { |
1da177e4c
|
1354 |
sem_io = ipc_alloc(sizeof(ushort)*nsems); |
239521f31
|
1355 |
if (sem_io == NULL) { |
53dad6d3a
|
1356 |
ipc_rcu_putref(sma, ipc_rcu_free); |
1da177e4c
|
1357 1358 1359 |
return -ENOMEM; } } |
239521f31
|
1360 |
if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) { |
53dad6d3a
|
1361 |
ipc_rcu_putref(sma, ipc_rcu_free); |
1da177e4c
|
1362 1363 1364 1365 1366 1367 |
err = -EFAULT; goto out_free; } for (i = 0; i < nsems; i++) { if (sem_io[i] > SEMVMX) { |
53dad6d3a
|
1368 |
ipc_rcu_putref(sma, ipc_rcu_free); |
1da177e4c
|
1369 1370 1371 1372 |
err = -ERANGE; goto out_free; } } |
4091fd942
|
1373 |
rcu_read_lock(); |
6ff379721
|
1374 |
sem_lock_and_putref(sma); |
0f3d2b013
|
1375 |
if (!ipc_valid_object(&sma->sem_perm)) { |
1da177e4c
|
1376 |
err = -EIDRM; |
6e224f945
|
1377 |
goto out_unlock; |
1da177e4c
|
1378 1379 1380 1381 |
} for (i = 0; i < nsems; i++) sma->sem_base[i].semval = sem_io[i]; |
4daa28f6d
|
1382 |
|
cf9d5d78d
|
1383 |
ipc_assert_locked_object(&sma->sem_perm); |
4daa28f6d
|
1384 |
list_for_each_entry(un, &sma->list_id, list_id) { |
1da177e4c
|
1385 1386 |
for (i = 0; i < nsems; i++) un->semadj[i] = 0; |
4daa28f6d
|
1387 |
} |
1da177e4c
|
1388 1389 |
sma->sem_ctime = get_seconds(); /* maybe some queued-up processes were waiting for this */ |
0a2b9d4c7
|
1390 |
do_smart_update(sma, NULL, 0, 0, &tasks); |
1da177e4c
|
1391 1392 1393 |
err = 0; goto out_unlock; } |
e1fd1f490
|
1394 |
/* GETVAL, GETPID, GETNCTN, GETZCNT: fall-through */ |
1da177e4c
|
1395 1396 |
} err = -EINVAL; |
c728b9c87
|
1397 1398 |
if (semnum < 0 || semnum >= nsems) goto out_rcu_wakeup; |
1da177e4c
|
1399 |
|
6062a8dc0
|
1400 |
sem_lock(sma, NULL, -1); |
0f3d2b013
|
1401 |
if (!ipc_valid_object(&sma->sem_perm)) { |
6e224f945
|
1402 1403 1404 |
err = -EIDRM; goto out_unlock; } |
1da177e4c
|
1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 |
curr = &sma->sem_base[semnum]; switch (cmd) { case GETVAL: err = curr->semval; goto out_unlock; case GETPID: err = curr->sempid; goto out_unlock; case GETNCNT: |
2f2ed41dc
|
1415 |
err = count_semcnt(sma, semnum, 0); |
1da177e4c
|
1416 1417 |
goto out_unlock; case GETZCNT: |
2f2ed41dc
|
1418 |
err = count_semcnt(sma, semnum, 1); |
1da177e4c
|
1419 |
goto out_unlock; |
1da177e4c
|
1420 |
} |
16df3674e
|
1421 |
|
1da177e4c
|
1422 |
out_unlock: |
6062a8dc0
|
1423 |
sem_unlock(sma, -1); |
c728b9c87
|
1424 |
out_rcu_wakeup: |
6d49dab8a
|
1425 |
rcu_read_unlock(); |
0a2b9d4c7
|
1426 |
wake_up_sem_queue_do(&tasks); |
1da177e4c
|
1427 |
out_free: |
239521f31
|
1428 |
if (sem_io != fast_sem_io) |
1da177e4c
|
1429 1430 1431 |
ipc_free(sem_io, sizeof(ushort)*nsems); return err; } |
016d7132f
|
1432 1433 |
static inline unsigned long copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version) |
1da177e4c
|
1434 |
{ |
239521f31
|
1435 |
switch (version) { |
1da177e4c
|
1436 |
case IPC_64: |
016d7132f
|
1437 |
if (copy_from_user(out, buf, sizeof(*out))) |
1da177e4c
|
1438 |
return -EFAULT; |
1da177e4c
|
1439 |
return 0; |
1da177e4c
|
1440 1441 1442 |
case IPC_OLD: { struct semid_ds tbuf_old; |
239521f31
|
1443 |
if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) |
1da177e4c
|
1444 |
return -EFAULT; |
016d7132f
|
1445 1446 1447 |
out->sem_perm.uid = tbuf_old.sem_perm.uid; out->sem_perm.gid = tbuf_old.sem_perm.gid; out->sem_perm.mode = tbuf_old.sem_perm.mode; |
1da177e4c
|
1448 1449 1450 1451 1452 1453 1454 |
return 0; } default: return -EINVAL; } } |
522bb2a2b
|
1455 |
/* |
d9a605e40
|
1456 |
* This function handles some semctl commands which require the rwsem |
522bb2a2b
|
1457 |
* to be held in write mode. |
d9a605e40
|
1458 |
* NOTE: no locks must be held, the rwsem is taken inside this function. |
522bb2a2b
|
1459 |
*/ |
21a4826a7
|
1460 |
static int semctl_down(struct ipc_namespace *ns, int semid, |
e1fd1f490
|
1461 |
int cmd, int version, void __user *p) |
1da177e4c
|
1462 1463 1464 |
{ struct sem_array *sma; int err; |
016d7132f
|
1465 |
struct semid64_ds semid64; |
1da177e4c
|
1466 |
struct kern_ipc_perm *ipcp; |
239521f31
|
1467 |
if (cmd == IPC_SET) { |
e1fd1f490
|
1468 |
if (copy_semid_from_user(&semid64, p, version)) |
1da177e4c
|
1469 |
return -EFAULT; |
1da177e4c
|
1470 |
} |
073115d6b
|
1471 |
|
d9a605e40
|
1472 |
down_write(&sem_ids(ns).rwsem); |
7b4cc5d84
|
1473 |
rcu_read_lock(); |
16df3674e
|
1474 1475 |
ipcp = ipcctl_pre_down_nolock(ns, &sem_ids(ns), semid, cmd, &semid64.sem_perm, 0); |
7b4cc5d84
|
1476 1477 |
if (IS_ERR(ipcp)) { err = PTR_ERR(ipcp); |
7b4cc5d84
|
1478 1479 |
goto out_unlock1; } |
073115d6b
|
1480 |
|
a5f75e7f2
|
1481 |
sma = container_of(ipcp, struct sem_array, sem_perm); |
1da177e4c
|
1482 1483 |
err = security_sem_semctl(sma, cmd); |
7b4cc5d84
|
1484 1485 |
if (err) goto out_unlock1; |
1da177e4c
|
1486 |
|
7b4cc5d84
|
1487 |
switch (cmd) { |
1da177e4c
|
1488 |
case IPC_RMID: |
6062a8dc0
|
1489 |
sem_lock(sma, NULL, -1); |
7b4cc5d84
|
1490 |
/* freeary unlocks the ipc object and rcu */ |
01b8b07a5
|
1491 |
freeary(ns, ipcp); |
522bb2a2b
|
1492 |
goto out_up; |
1da177e4c
|
1493 |
case IPC_SET: |
6062a8dc0
|
1494 |
sem_lock(sma, NULL, -1); |
1efdb69b0
|
1495 1496 |
err = ipc_update_perm(&semid64.sem_perm, ipcp); if (err) |
7b4cc5d84
|
1497 |
goto out_unlock0; |
1da177e4c
|
1498 |
sma->sem_ctime = get_seconds(); |
1da177e4c
|
1499 1500 |
break; default: |
1da177e4c
|
1501 |
err = -EINVAL; |
7b4cc5d84
|
1502 |
goto out_unlock1; |
1da177e4c
|
1503 |
} |
1da177e4c
|
1504 |
|
7b4cc5d84
|
1505 |
out_unlock0: |
6062a8dc0
|
1506 |
sem_unlock(sma, -1); |
7b4cc5d84
|
1507 |
out_unlock1: |
6d49dab8a
|
1508 |
rcu_read_unlock(); |
522bb2a2b
|
1509 |
out_up: |
d9a605e40
|
1510 |
up_write(&sem_ids(ns).rwsem); |
1da177e4c
|
1511 1512 |
return err; } |
e1fd1f490
|
1513 |
SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg) |
1da177e4c
|
1514 |
{ |
1da177e4c
|
1515 |
int version; |
e38935341
|
1516 |
struct ipc_namespace *ns; |
e1fd1f490
|
1517 |
void __user *p = (void __user *)arg; |
1da177e4c
|
1518 1519 1520 1521 1522 |
if (semid < 0) return -EINVAL; version = ipc_parse_version(&cmd); |
e38935341
|
1523 |
ns = current->nsproxy->ipc_ns; |
1da177e4c
|
1524 |
|
239521f31
|
1525 |
switch (cmd) { |
1da177e4c
|
1526 1527 |
case IPC_INFO: case SEM_INFO: |
4b9fcb0ec
|
1528 |
case IPC_STAT: |
1da177e4c
|
1529 |
case SEM_STAT: |
e1fd1f490
|
1530 |
return semctl_nolock(ns, semid, cmd, version, p); |
1da177e4c
|
1531 1532 1533 1534 1535 |
case GETALL: case GETVAL: case GETPID: case GETNCNT: case GETZCNT: |
1da177e4c
|
1536 |
case SETALL: |
e1fd1f490
|
1537 1538 1539 |
return semctl_main(ns, semid, semnum, cmd, p); case SETVAL: return semctl_setval(ns, semid, semnum, arg); |
1da177e4c
|
1540 1541 |
case IPC_RMID: case IPC_SET: |
e1fd1f490
|
1542 |
return semctl_down(ns, semid, cmd, version, p); |
1da177e4c
|
1543 1544 1545 1546 |
default: return -EINVAL; } } |
1da177e4c
|
1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 |
/* If the task doesn't already have a undo_list, then allocate one * here. We guarantee there is only one thread using this undo list, * and current is THE ONE * * If this allocation and assignment succeeds, but later * portions of this code fail, there is no need to free the sem_undo_list. * Just let it stay associated with the task, and it'll be freed later * at exit time. * * This can block, so callers must hold no locks. */ static inline int get_undo_list(struct sem_undo_list **undo_listp) { struct sem_undo_list *undo_list; |
1da177e4c
|
1561 1562 1563 |
undo_list = current->sysvsem.undo_list; if (!undo_list) { |
2453a3062
|
1564 |
undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL); |
1da177e4c
|
1565 1566 |
if (undo_list == NULL) return -ENOMEM; |
00a5dfdb9
|
1567 |
spin_lock_init(&undo_list->lock); |
1da177e4c
|
1568 |
atomic_set(&undo_list->refcnt, 1); |
4daa28f6d
|
1569 |
INIT_LIST_HEAD(&undo_list->list_proc); |
1da177e4c
|
1570 1571 1572 1573 1574 |
current->sysvsem.undo_list = undo_list; } *undo_listp = undo_list; return 0; } |
bf17bb717
|
1575 |
static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid) |
1da177e4c
|
1576 |
{ |
bf17bb717
|
1577 |
struct sem_undo *un; |
4daa28f6d
|
1578 |
|
bf17bb717
|
1579 1580 1581 |
list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) { if (un->semid == semid) return un; |
1da177e4c
|
1582 |
} |
4daa28f6d
|
1583 |
return NULL; |
1da177e4c
|
1584 |
} |
bf17bb717
|
1585 1586 1587 |
static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid) { struct sem_undo *un; |
239521f31
|
1588 |
assert_spin_locked(&ulp->lock); |
bf17bb717
|
1589 1590 1591 1592 1593 1594 1595 1596 |
un = __lookup_undo(ulp, semid); if (un) { list_del_rcu(&un->list_proc); list_add_rcu(&un->list_proc, &ulp->list_proc); } return un; } |
4daa28f6d
|
1597 |
/** |
8001c8581
|
1598 |
* find_alloc_undo - lookup (and if not present create) undo array |
4daa28f6d
|
1599 1600 1601 1602 1603 1604 |
* @ns: namespace * @semid: semaphore array id * * The function looks up (and if not present creates) the undo structure. * The size of the undo structure depends on the size of the semaphore * array, thus the alloc path is not that straightforward. |
380af1b33
|
1605 1606 |
* Lifetime-rules: sem_undo is rcu-protected, on success, the function * performs a rcu_read_lock(). |
4daa28f6d
|
1607 1608 |
*/ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) |
1da177e4c
|
1609 1610 1611 1612 |
{ struct sem_array *sma; struct sem_undo_list *ulp; struct sem_undo *un, *new; |
6062a8dc0
|
1613 |
int nsems, error; |
1da177e4c
|
1614 1615 1616 1617 |
error = get_undo_list(&ulp); if (error) return ERR_PTR(error); |
380af1b33
|
1618 |
rcu_read_lock(); |
c530c6ac7
|
1619 |
spin_lock(&ulp->lock); |
1da177e4c
|
1620 |
un = lookup_undo(ulp, semid); |
c530c6ac7
|
1621 |
spin_unlock(&ulp->lock); |
239521f31
|
1622 |
if (likely(un != NULL)) |
1da177e4c
|
1623 1624 1625 |
goto out; /* no undo structure around - allocate one. */ |
4daa28f6d
|
1626 |
/* step 1: figure out the size of the semaphore array */ |
16df3674e
|
1627 1628 1629 |
sma = sem_obtain_object_check(ns, semid); if (IS_ERR(sma)) { rcu_read_unlock(); |
4de85cd6d
|
1630 |
return ERR_CAST(sma); |
16df3674e
|
1631 |
} |
023a53557
|
1632 |
|
1da177e4c
|
1633 |
nsems = sma->sem_nsems; |
6062a8dc0
|
1634 1635 1636 1637 1638 |
if (!ipc_rcu_getref(sma)) { rcu_read_unlock(); un = ERR_PTR(-EIDRM); goto out; } |
16df3674e
|
1639 |
rcu_read_unlock(); |
1da177e4c
|
1640 |
|
4daa28f6d
|
1641 |
/* step 2: allocate new undo structure */ |
4668edc33
|
1642 |
new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); |
1da177e4c
|
1643 |
if (!new) { |
53dad6d3a
|
1644 |
ipc_rcu_putref(sma, ipc_rcu_free); |
1da177e4c
|
1645 1646 |
return ERR_PTR(-ENOMEM); } |
1da177e4c
|
1647 |
|
380af1b33
|
1648 |
/* step 3: Acquire the lock on semaphore array */ |
4091fd942
|
1649 |
rcu_read_lock(); |
6ff379721
|
1650 |
sem_lock_and_putref(sma); |
0f3d2b013
|
1651 |
if (!ipc_valid_object(&sma->sem_perm)) { |
6062a8dc0
|
1652 |
sem_unlock(sma, -1); |
6d49dab8a
|
1653 |
rcu_read_unlock(); |
1da177e4c
|
1654 1655 1656 1657 |
kfree(new); un = ERR_PTR(-EIDRM); goto out; } |
380af1b33
|
1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 |
spin_lock(&ulp->lock); /* * step 4: check for races: did someone else allocate the undo struct? */ un = lookup_undo(ulp, semid); if (un) { kfree(new); goto success; } |
4daa28f6d
|
1668 1669 |
/* step 5: initialize & link new undo structure */ new->semadj = (short *) &new[1]; |
380af1b33
|
1670 |
new->ulp = ulp; |
4daa28f6d
|
1671 1672 |
new->semid = semid; assert_spin_locked(&ulp->lock); |
380af1b33
|
1673 |
list_add_rcu(&new->list_proc, &ulp->list_proc); |
cf9d5d78d
|
1674 |
ipc_assert_locked_object(&sma->sem_perm); |
4daa28f6d
|
1675 |
list_add(&new->list_id, &sma->list_id); |
380af1b33
|
1676 |
un = new; |
4daa28f6d
|
1677 |
|
380af1b33
|
1678 |
success: |
c530c6ac7
|
1679 |
spin_unlock(&ulp->lock); |
6062a8dc0
|
1680 |
sem_unlock(sma, -1); |
1da177e4c
|
1681 1682 1683 |
out: return un; } |
c61284e99
|
1684 1685 |
/** |
8001c8581
|
1686 |
* get_queue_result - retrieve the result code from sem_queue |
c61284e99
|
1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 |
* @q: Pointer to queue structure * * Retrieve the return code from the pending queue. If IN_WAKEUP is found in * q->status, then we must loop until the value is replaced with the final * value: This may happen if a task is woken up by an unrelated event (e.g. * signal) and in parallel the task is woken up by another task because it got * the requested semaphores. * * The function can be called with or without holding the semaphore spinlock. */ static int get_queue_result(struct sem_queue *q) { int error; error = q->status; while (unlikely(error == IN_WAKEUP)) { cpu_relax(); error = q->status; } return error; } |
d5460c997
|
1709 1710 |
SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, unsigned, nsops, const struct timespec __user *, timeout) |
1da177e4c
|
1711 1712 1713 1714 |
{ int error = -EINVAL; struct sem_array *sma; struct sembuf fast_sops[SEMOPM_FAST]; |
239521f31
|
1715 |
struct sembuf *sops = fast_sops, *sop; |
1da177e4c
|
1716 |
struct sem_undo *un; |
6062a8dc0
|
1717 |
int undos = 0, alter = 0, max, locknum; |
1da177e4c
|
1718 1719 |
struct sem_queue queue; unsigned long jiffies_left = 0; |
e38935341
|
1720 |
struct ipc_namespace *ns; |
0a2b9d4c7
|
1721 |
struct list_head tasks; |
e38935341
|
1722 1723 |
ns = current->nsproxy->ipc_ns; |
1da177e4c
|
1724 1725 1726 |
if (nsops < 1 || semid < 0) return -EINVAL; |
e38935341
|
1727 |
if (nsops > ns->sc_semopm) |
1da177e4c
|
1728 |
return -E2BIG; |
239521f31
|
1729 1730 1731 |
if (nsops > SEMOPM_FAST) { sops = kmalloc(sizeof(*sops)*nsops, GFP_KERNEL); if (sops == NULL) |
1da177e4c
|
1732 1733 |
return -ENOMEM; } |
239521f31
|
1734 1735 |
if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) { error = -EFAULT; |
1da177e4c
|
1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 |
goto out_free; } if (timeout) { struct timespec _timeout; if (copy_from_user(&_timeout, timeout, sizeof(*timeout))) { error = -EFAULT; goto out_free; } if (_timeout.tv_sec < 0 || _timeout.tv_nsec < 0 || _timeout.tv_nsec >= 1000000000L) { error = -EINVAL; goto out_free; } jiffies_left = timespec_to_jiffies(&_timeout); } max = 0; for (sop = sops; sop < sops + nsops; sop++) { if (sop->sem_num >= max) max = sop->sem_num; if (sop->sem_flg & SEM_UNDO) |
b78755abc
|
1756 1757 |
undos = 1; if (sop->sem_op != 0) |
1da177e4c
|
1758 1759 |
alter = 1; } |
1da177e4c
|
1760 |
|
6062a8dc0
|
1761 |
INIT_LIST_HEAD(&tasks); |
1da177e4c
|
1762 |
if (undos) { |
6062a8dc0
|
1763 |
/* On success, find_alloc_undo takes the rcu_read_lock */ |
4daa28f6d
|
1764 |
un = find_alloc_undo(ns, semid); |
1da177e4c
|
1765 1766 1767 1768 |
if (IS_ERR(un)) { error = PTR_ERR(un); goto out_free; } |
6062a8dc0
|
1769 |
} else { |
1da177e4c
|
1770 |
un = NULL; |
6062a8dc0
|
1771 1772 |
rcu_read_lock(); } |
1da177e4c
|
1773 |
|
16df3674e
|
1774 |
sma = sem_obtain_object_check(ns, semid); |
023a53557
|
1775 |
if (IS_ERR(sma)) { |
6062a8dc0
|
1776 |
rcu_read_unlock(); |
023a53557
|
1777 |
error = PTR_ERR(sma); |
1da177e4c
|
1778 |
goto out_free; |
023a53557
|
1779 |
} |
16df3674e
|
1780 |
error = -EFBIG; |
c728b9c87
|
1781 1782 |
if (max >= sma->sem_nsems) goto out_rcu_wakeup; |
16df3674e
|
1783 1784 |
error = -EACCES; |
c728b9c87
|
1785 1786 |
if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) goto out_rcu_wakeup; |
16df3674e
|
1787 1788 |
error = security_sem_semop(sma, sops, nsops, alter); |
c728b9c87
|
1789 1790 |
if (error) goto out_rcu_wakeup; |
16df3674e
|
1791 |
|
6e224f945
|
1792 1793 |
error = -EIDRM; locknum = sem_lock(sma, sops, nsops); |
0f3d2b013
|
1794 1795 1796 1797 1798 1799 1800 1801 1802 |
/* * We eventually might perform the following check in a lockless * fashion, considering ipc_valid_object() locking constraints. * If nsops == 1 and there is no contention for sem_perm.lock, then * only a per-semaphore lock is held and it's OK to proceed with the * check below. More details on the fine grained locking scheme * entangled here and why it's RMID race safe on comments at sem_lock() */ if (!ipc_valid_object(&sma->sem_perm)) |
6e224f945
|
1803 |
goto out_unlock_free; |
1da177e4c
|
1804 |
/* |
4daa28f6d
|
1805 |
* semid identifiers are not unique - find_alloc_undo may have |
1da177e4c
|
1806 |
* allocated an undo structure, it was invalidated by an RMID |
4daa28f6d
|
1807 |
* and now a new array with received the same id. Check and fail. |
25985edce
|
1808 |
* This case can be detected checking un->semid. The existence of |
380af1b33
|
1809 |
* "un" itself is guaranteed by rcu. |
1da177e4c
|
1810 |
*/ |
6062a8dc0
|
1811 1812 |
if (un && un->semid == -1) goto out_unlock_free; |
4daa28f6d
|
1813 |
|
d198cd6d6
|
1814 1815 1816 1817 1818 1819 1820 |
queue.sops = sops; queue.nsops = nsops; queue.undo = un; queue.pid = task_tgid_vnr(current); queue.alter = alter; error = perform_atomic_semop(sma, &queue); |
0e8c66569
|
1821 1822 1823 1824 1825 |
if (error == 0) { /* If the operation was successful, then do * the required updates. */ if (alter) |
0a2b9d4c7
|
1826 |
do_smart_update(sma, sops, nsops, 1, &tasks); |
0e8c66569
|
1827 1828 |
else set_semotime(sma, sops); |
1da177e4c
|
1829 |
} |
0e8c66569
|
1830 1831 |
if (error <= 0) goto out_unlock_free; |
1da177e4c
|
1832 1833 1834 1835 |
/* We need to sleep on this operation, so we put the current * task into the pending queue and go to sleep. */ |
46c0a8ca3
|
1836 |
|
b97e820ff
|
1837 1838 1839 |
if (nsops == 1) { struct sem *curr; curr = &sma->sem_base[sops->sem_num]; |
f269f40ad
|
1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 |
if (alter) { if (sma->complex_count) { list_add_tail(&queue.list, &sma->pending_alter); } else { list_add_tail(&queue.list, &curr->pending_alter); } } else { |
1a82e9e1d
|
1850 |
list_add_tail(&queue.list, &curr->pending_const); |
f269f40ad
|
1851 |
} |
b97e820ff
|
1852 |
} else { |
f269f40ad
|
1853 1854 |
if (!sma->complex_count) merge_queues(sma); |
9f1bc2c90
|
1855 |
if (alter) |
1a82e9e1d
|
1856 |
list_add_tail(&queue.list, &sma->pending_alter); |
9f1bc2c90
|
1857 |
else |
1a82e9e1d
|
1858 |
list_add_tail(&queue.list, &sma->pending_const); |
b97e820ff
|
1859 1860 |
sma->complex_count++; } |
1da177e4c
|
1861 1862 |
queue.status = -EINTR; queue.sleeper = current; |
0b0577f60
|
1863 1864 |
sleep_again: |
1da177e4c
|
1865 |
current->state = TASK_INTERRUPTIBLE; |
6062a8dc0
|
1866 |
sem_unlock(sma, locknum); |
6d49dab8a
|
1867 |
rcu_read_unlock(); |
1da177e4c
|
1868 1869 1870 1871 1872 |
if (timeout) jiffies_left = schedule_timeout(jiffies_left); else schedule(); |
c61284e99
|
1873 |
error = get_queue_result(&queue); |
1da177e4c
|
1874 1875 1876 |
if (error != -EINTR) { /* fast path: update_queue already obtained all requested |
c61284e99
|
1877 1878 1879 1880 1881 1882 1883 |
* resources. * Perform a smp_mb(): User space could assume that semop() * is a memory barrier: Without the mb(), the cpu could * speculatively read in user space stale data that was * overwritten by the previous owner of the semaphore. */ smp_mb(); |
1da177e4c
|
1884 1885 |
goto out_free; } |
321310ced
|
1886 |
rcu_read_lock(); |
6062a8dc0
|
1887 |
sma = sem_obtain_lock(ns, semid, sops, nsops, &locknum); |
d694ad62b
|
1888 1889 1890 1891 1892 1893 1894 1895 1896 |
/* * Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing. */ error = get_queue_result(&queue); /* * Array removed? If yes, leave without sem_unlock(). */ |
023a53557
|
1897 |
if (IS_ERR(sma)) { |
321310ced
|
1898 |
rcu_read_unlock(); |
1da177e4c
|
1899 1900 |
goto out_free; } |
c61284e99
|
1901 |
|
1da177e4c
|
1902 |
/* |
d694ad62b
|
1903 1904 |
* If queue.status != -EINTR we are woken up by another process. * Leave without unlink_queue(), but with sem_unlock(). |
1da177e4c
|
1905 |
*/ |
3ab08fe20
|
1906 |
if (error != -EINTR) |
1da177e4c
|
1907 |
goto out_unlock_free; |
1da177e4c
|
1908 1909 1910 1911 1912 1913 |
/* * If an interrupt occurred we have to clean up the queue */ if (timeout && jiffies_left == 0) error = -EAGAIN; |
0b0577f60
|
1914 1915 1916 1917 1918 1919 |
/* * If the wakeup was spurious, just retry */ if (error == -EINTR && !signal_pending(current)) goto sleep_again; |
b97e820ff
|
1920 |
unlink_queue(sma, &queue); |
1da177e4c
|
1921 1922 |
out_unlock_free: |
6062a8dc0
|
1923 |
sem_unlock(sma, locknum); |
c728b9c87
|
1924 |
out_rcu_wakeup: |
6d49dab8a
|
1925 |
rcu_read_unlock(); |
0a2b9d4c7
|
1926 |
wake_up_sem_queue_do(&tasks); |
1da177e4c
|
1927 |
out_free: |
239521f31
|
1928 |
if (sops != fast_sops) |
1da177e4c
|
1929 1930 1931 |
kfree(sops); return error; } |
d5460c997
|
1932 1933 |
SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops, unsigned, nsops) |
1da177e4c
|
1934 1935 1936 1937 1938 1939 |
{ return sys_semtimedop(semid, tsops, nsops, NULL); } /* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between * parent and child tasks. |
1da177e4c
|
1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 |
*/ int copy_semundo(unsigned long clone_flags, struct task_struct *tsk) { struct sem_undo_list *undo_list; int error; if (clone_flags & CLONE_SYSVSEM) { error = get_undo_list(&undo_list); if (error) return error; |
1da177e4c
|
1951 1952 |
atomic_inc(&undo_list->refcnt); tsk->sysvsem.undo_list = undo_list; |
46c0a8ca3
|
1953 |
} else |
1da177e4c
|
1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 |
tsk->sysvsem.undo_list = NULL; return 0; } /* * add semadj values to semaphores, free undo structures. * undo structures are not freed when semaphore arrays are destroyed * so some of them may be out of date. * IMPLEMENTATION NOTE: There is some confusion over whether the * set of adjustments that needs to be done should be done in an atomic * manner or not. That is, if we are attempting to decrement the semval * should we queue up and wait until we can do so legally? * The original implementation attempted to do this (queue and wait). * The current implementation does not do so. The POSIX standard * and SVID should be consulted to determine what behavior is mandated. */ void exit_sem(struct task_struct *tsk) { |
4daa28f6d
|
1973 |
struct sem_undo_list *ulp; |
1da177e4c
|
1974 |
|
4daa28f6d
|
1975 1976 |
ulp = tsk->sysvsem.undo_list; if (!ulp) |
1da177e4c
|
1977 |
return; |
9edff4ab1
|
1978 |
tsk->sysvsem.undo_list = NULL; |
1da177e4c
|
1979 |
|
4daa28f6d
|
1980 |
if (!atomic_dec_and_test(&ulp->refcnt)) |
1da177e4c
|
1981 |
return; |
380af1b33
|
1982 |
for (;;) { |
1da177e4c
|
1983 |
struct sem_array *sma; |
380af1b33
|
1984 |
struct sem_undo *un; |
0a2b9d4c7
|
1985 |
struct list_head tasks; |
6062a8dc0
|
1986 |
int semid, i; |
4daa28f6d
|
1987 |
|
380af1b33
|
1988 |
rcu_read_lock(); |
05725f7eb
|
1989 1990 |
un = list_entry_rcu(ulp->list_proc.next, struct sem_undo, list_proc); |
380af1b33
|
1991 1992 1993 1994 |
if (&un->list_proc == &ulp->list_proc) semid = -1; else semid = un->semid; |
4daa28f6d
|
1995 |
|
6062a8dc0
|
1996 1997 |
if (semid == -1) { rcu_read_unlock(); |
380af1b33
|
1998 |
break; |
6062a8dc0
|
1999 |
} |
1da177e4c
|
2000 |
|
6062a8dc0
|
2001 |
sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid); |
380af1b33
|
2002 |
/* exit_sem raced with IPC_RMID, nothing to do */ |
6062a8dc0
|
2003 2004 |
if (IS_ERR(sma)) { rcu_read_unlock(); |
380af1b33
|
2005 |
continue; |
6062a8dc0
|
2006 |
} |
1da177e4c
|
2007 |
|
6062a8dc0
|
2008 |
sem_lock(sma, NULL, -1); |
6e224f945
|
2009 |
/* exit_sem raced with IPC_RMID, nothing to do */ |
0f3d2b013
|
2010 |
if (!ipc_valid_object(&sma->sem_perm)) { |
6e224f945
|
2011 2012 2013 2014 |
sem_unlock(sma, -1); rcu_read_unlock(); continue; } |
bf17bb717
|
2015 |
un = __lookup_undo(ulp, semid); |
380af1b33
|
2016 2017 2018 2019 |
if (un == NULL) { /* exit_sem raced with IPC_RMID+semget() that created * exactly the same semid. Nothing to do. */ |
6062a8dc0
|
2020 |
sem_unlock(sma, -1); |
6d49dab8a
|
2021 |
rcu_read_unlock(); |
380af1b33
|
2022 2023 2024 2025 |
continue; } /* remove un from the linked lists */ |
cf9d5d78d
|
2026 |
ipc_assert_locked_object(&sma->sem_perm); |
4daa28f6d
|
2027 |
list_del(&un->list_id); |
380af1b33
|
2028 2029 2030 |
spin_lock(&ulp->lock); list_del_rcu(&un->list_proc); spin_unlock(&ulp->lock); |
4daa28f6d
|
2031 2032 |
/* perform adjustments registered in un */ for (i = 0; i < sma->sem_nsems; i++) { |
239521f31
|
2033 |
struct sem *semaphore = &sma->sem_base[i]; |
4daa28f6d
|
2034 2035 |
if (un->semadj[i]) { semaphore->semval += un->semadj[i]; |
1da177e4c
|
2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 |
/* * Range checks of the new semaphore value, * not defined by sus: * - Some unices ignore the undo entirely * (e.g. HP UX 11i 11.22, Tru64 V5.1) * - some cap the value (e.g. FreeBSD caps * at 0, but doesn't enforce SEMVMX) * * Linux caps the semaphore value, both at 0 * and at SEMVMX. * |
239521f31
|
2047 |
* Manfred <manfred@colorfullife.com> |
1da177e4c
|
2048 |
*/ |
5f921ae96
|
2049 2050 2051 2052 |
if (semaphore->semval < 0) semaphore->semval = 0; if (semaphore->semval > SEMVMX) semaphore->semval = SEMVMX; |
b488893a3
|
2053 |
semaphore->sempid = task_tgid_vnr(current); |
1da177e4c
|
2054 2055 |
} } |
1da177e4c
|
2056 |
/* maybe some queued-up processes were waiting for this */ |
0a2b9d4c7
|
2057 2058 |
INIT_LIST_HEAD(&tasks); do_smart_update(sma, NULL, 0, 1, &tasks); |
6062a8dc0
|
2059 |
sem_unlock(sma, -1); |
6d49dab8a
|
2060 |
rcu_read_unlock(); |
0a2b9d4c7
|
2061 |
wake_up_sem_queue_do(&tasks); |
380af1b33
|
2062 |
|
693a8b6ee
|
2063 |
kfree_rcu(un, rcu); |
1da177e4c
|
2064 |
} |
4daa28f6d
|
2065 |
kfree(ulp); |
1da177e4c
|
2066 2067 2068 |
} #ifdef CONFIG_PROC_FS |
19b4946ca
|
2069 |
static int sysvipc_sem_proc_show(struct seq_file *s, void *it) |
1da177e4c
|
2070 |
{ |
1efdb69b0
|
2071 |
struct user_namespace *user_ns = seq_user_ns(s); |
19b4946ca
|
2072 |
struct sem_array *sma = it; |
d12e1e50e
|
2073 |
time_t sem_otime; |
d8c633766
|
2074 2075 2076 2077 2078 2079 2080 |
/* * The proc interface isn't aware of sem_lock(), it calls * ipc_lock_object() directly (in sysvipc_find_ipc). * In order to stay compatible with sem_lock(), we must wait until * all simple semop() calls have left their critical regions. */ sem_wait_array(sma); |
d12e1e50e
|
2081 |
sem_otime = get_semotime(sma); |
19b4946ca
|
2082 2083 |
return seq_printf(s, |
b97e820ff
|
2084 2085 |
"%10d %10d %4o %10u %5u %5u %5u %5u %10lu %10lu ", |
19b4946ca
|
2086 |
sma->sem_perm.key, |
7ca7e564e
|
2087 |
sma->sem_perm.id, |
19b4946ca
|
2088 2089 |
sma->sem_perm.mode, sma->sem_nsems, |
1efdb69b0
|
2090 2091 2092 2093 |
from_kuid_munged(user_ns, sma->sem_perm.uid), from_kgid_munged(user_ns, sma->sem_perm.gid), from_kuid_munged(user_ns, sma->sem_perm.cuid), from_kgid_munged(user_ns, sma->sem_perm.cgid), |
d12e1e50e
|
2094 |
sem_otime, |
19b4946ca
|
2095 |
sma->sem_ctime); |
1da177e4c
|
2096 2097 |
} #endif |