Blame view
ipc/shm.c
43 KB
b24413180
|
1 |
// SPDX-License-Identifier: GPL-2.0 |
1da177e4c
|
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 |
/* * linux/ipc/shm.c * Copyright (C) 1992, 1993 Krishna Balasubramanian * Many improvements/fixes by Bruno Haible. * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994. * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli. * * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com> * BIGMEM support, Andrea Arcangeli <andrea@suse.de> * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr> * HIGHMEM support, Ingo Molnar <mingo@redhat.com> * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com> * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com> * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com> * |
073115d6b
|
17 18 |
* support for audit of ipc object properties and permission changes * Dustin Kirkland <dustin.kirkland@us.ibm.com> |
4e9823111
|
19 20 21 22 |
* * namespaces support * OpenVZ, SWsoft Inc. * Pavel Emelianov <xemul@openvz.org> |
c2c737a04
|
23 24 25 |
* * Better ipc lock (kern_ipc_perm.lock) handling * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013. |
1da177e4c
|
26 |
*/ |
1da177e4c
|
27 28 29 30 31 32 33 |
#include <linux/slab.h> #include <linux/mm.h> #include <linux/hugetlb.h> #include <linux/shm.h> #include <linux/init.h> #include <linux/file.h> #include <linux/mman.h> |
1da177e4c
|
34 35 36 37 |
#include <linux/shmem_fs.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/audit.h> |
c59ede7b7
|
38 |
#include <linux/capability.h> |
7d87e14c2
|
39 |
#include <linux/ptrace.h> |
19b4946ca
|
40 |
#include <linux/seq_file.h> |
3e148c799
|
41 |
#include <linux/rwsem.h> |
4e9823111
|
42 |
#include <linux/nsproxy.h> |
bc56bba8f
|
43 |
#include <linux/mount.h> |
ae5e1b22f
|
44 |
#include <linux/ipc_namespace.h> |
0eb71a9da
|
45 |
#include <linux/rhashtable.h> |
7d87e14c2
|
46 |
|
7153e4027
|
47 |
#include <linux/uaccess.h> |
1da177e4c
|
48 49 |
#include "util.h" |
a2e102cd3
|
50 51 52 53 54 55 56 57 58 |
struct shmid_kernel /* private to the kernel */ { struct kern_ipc_perm shm_perm; struct file *shm_file; unsigned long shm_nattch; unsigned long shm_segsz; time64_t shm_atim; time64_t shm_dtim; time64_t shm_ctim; |
98f929b1b
|
59 60 |
struct pid *shm_cprid; struct pid *shm_lprid; |
a2e102cd3
|
61 62 63 64 65 66 67 68 69 70 |
struct user_struct *mlock_user; /* The task created the shm object. NULL if the task is dead. */ struct task_struct *shm_creator; struct list_head shm_clist; /* list by creator */ } __randomize_layout; /* shm_mode upper byte flags */ #define SHM_DEST 01000 /* segment will be destroyed on last detach */ #define SHM_LOCKED 02000 /* segment will not be swapped */ |
bc56bba8f
|
71 72 73 74 75 76 77 78 |
struct shm_file_data { int id; struct ipc_namespace *ns; struct file *file; const struct vm_operations_struct *vm_ops; }; #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data)) |
9a32144e9
|
79 |
static const struct file_operations shm_file_operations; |
f0f37e2f7
|
80 |
static const struct vm_operations_struct shm_vm_ops; |
1da177e4c
|
81 |
|
ed2ddbf88
|
82 |
#define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS]) |
1da177e4c
|
83 |
|
4e9823111
|
84 85 |
#define shm_unlock(shp) \ ipc_unlock(&(shp)->shm_perm) |
1da177e4c
|
86 |
|
7748dbfaa
|
87 |
static int newseg(struct ipc_namespace *, struct ipc_params *); |
bc56bba8f
|
88 89 |
static void shm_open(struct vm_area_struct *vma); static void shm_close(struct vm_area_struct *vma); |
239521f31
|
90 |
static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp); |
1da177e4c
|
91 |
#ifdef CONFIG_PROC_FS |
19b4946ca
|
92 |
static int sysvipc_shm_proc_show(struct seq_file *s, void *it); |
1da177e4c
|
93 |
#endif |
eae04d25a
|
94 |
void shm_init_ns(struct ipc_namespace *ns) |
4e9823111
|
95 |
{ |
4e9823111
|
96 97 98 |
ns->shm_ctlmax = SHMMAX; ns->shm_ctlall = SHMALL; ns->shm_ctlmni = SHMMNI; |
b34a6b1da
|
99 |
ns->shm_rmid_forced = 0; |
4e9823111
|
100 |
ns->shm_tot = 0; |
eae04d25a
|
101 |
ipc_init_ids(&shm_ids(ns)); |
4e9823111
|
102 |
} |
f4566f048
|
103 |
/* |
d9a605e40
|
104 105 |
* Called with shm_ids.rwsem (writer) and the shp structure locked. * Only shm_ids.rwsem remains locked on exit. |
f4566f048
|
106 |
*/ |
01b8b07a5
|
107 |
static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) |
4e9823111
|
108 |
{ |
01b8b07a5
|
109 |
struct shmid_kernel *shp; |
63980c80e
|
110 |
|
01b8b07a5
|
111 |
shp = container_of(ipcp, struct shmid_kernel, shm_perm); |
239521f31
|
112 |
if (shp->shm_nattch) { |
4e9823111
|
113 114 |
shp->shm_perm.mode |= SHM_DEST; /* Do not find it any more */ |
0cfb6aee7
|
115 |
ipc_set_key_private(&shm_ids(ns), &shp->shm_perm); |
4e9823111
|
116 117 118 119 |
shm_unlock(shp); } else shm_destroy(ns, shp); } |
ae5e1b22f
|
120 |
#ifdef CONFIG_IPC_NS |
4e9823111
|
121 122 |
void shm_exit_ns(struct ipc_namespace *ns) { |
01b8b07a5
|
123 |
free_ipcs(ns, &shm_ids(ns), do_shm_rmid); |
7d6feeb28
|
124 |
idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr); |
0cfb6aee7
|
125 |
rhashtable_destroy(&ns->ids[IPC_SHM_IDS].key_ht); |
4e9823111
|
126 |
} |
ae5e1b22f
|
127 |
#endif |
1da177e4c
|
128 |
|
140d0b210
|
129 |
static int __init ipc_ns_init(void) |
1da177e4c
|
130 |
{ |
eae04d25a
|
131 132 |
shm_init_ns(&init_ipc_ns); return 0; |
140d0b210
|
133 134 135 |
} pure_initcall(ipc_ns_init); |
239521f31
|
136 |
void __init shm_init(void) |
140d0b210
|
137 |
{ |
19b4946ca
|
138 |
ipc_init_proc_interface("sysvipc/shm", |
b79521807
|
139 140 141 142 143 144 145 |
#if BITS_PER_LONG <= 32 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap ", #else " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap ", #endif |
4e9823111
|
146 |
IPC_SHM_IDS, sysvipc_shm_proc_show); |
1da177e4c
|
147 |
} |
8b8d52ac3
|
148 149 |
static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id) { |
55b7ae501
|
150 |
struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&shm_ids(ns), id); |
8b8d52ac3
|
151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 |
if (IS_ERR(ipcp)) return ERR_CAST(ipcp); return container_of(ipcp, struct shmid_kernel, shm_perm); } static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id) { struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id); if (IS_ERR(ipcp)) return ERR_CAST(ipcp); return container_of(ipcp, struct shmid_kernel, shm_perm); } |
3e148c799
|
167 |
/* |
d9a605e40
|
168 |
* shm_lock_(check_) routines are called in the paths where the rwsem |
00c2bf85d
|
169 |
* is not necessarily held. |
3e148c799
|
170 |
*/ |
023a53557
|
171 |
static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id) |
1da177e4c
|
172 |
{ |
82061c57c
|
173 |
struct kern_ipc_perm *ipcp; |
03f02c765
|
174 |
|
82061c57c
|
175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 |
rcu_read_lock(); ipcp = ipc_obtain_object_idr(&shm_ids(ns), id); if (IS_ERR(ipcp)) goto err; ipc_lock_object(ipcp); /* * ipc_rmid() may have already freed the ID while ipc_lock_object() * was spinning: here verify that the structure is still valid. * Upon races with RMID, return -EIDRM, thus indicating that * the ID points to a removed identifier. */ if (ipc_valid_object(ipcp)) { /* return a locked ipc object upon success */ return container_of(ipcp, struct shmid_kernel, shm_perm); } ipc_unlock_object(ipcp); |
9c21dae29
|
193 |
ipcp = ERR_PTR(-EIDRM); |
82061c57c
|
194 195 |
err: rcu_read_unlock(); |
c5c8975b2
|
196 |
/* |
1ac0b6dec
|
197 |
* Callers of shm_lock() must validate the status of the returned ipc |
82061c57c
|
198 |
* object pointer and error out as appropriate. |
c5c8975b2
|
199 |
*/ |
59cf0a933
|
200 |
return ERR_CAST(ipcp); |
023a53557
|
201 |
} |
4c677e2ee
|
202 203 204 |
static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp) { rcu_read_lock(); |
cf9d5d78d
|
205 |
ipc_lock_object(&ipcp->shm_perm); |
4c677e2ee
|
206 |
} |
53dad6d3a
|
207 208 |
static void shm_rcu_free(struct rcu_head *head) { |
dba4cdd39
|
209 210 211 212 |
struct kern_ipc_perm *ptr = container_of(head, struct kern_ipc_perm, rcu); struct shmid_kernel *shp = container_of(ptr, struct shmid_kernel, shm_perm); |
7191adff2
|
213 |
security_shm_free(&shp->shm_perm); |
42e618f77
|
214 |
kvfree(shp); |
53dad6d3a
|
215 |
} |
7ca7e564e
|
216 |
static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s) |
1da177e4c
|
217 |
{ |
ab602f799
|
218 |
list_del(&s->shm_clist); |
7ca7e564e
|
219 |
ipc_rmid(&shm_ids(ns), &s->shm_perm); |
1da177e4c
|
220 |
} |
1da177e4c
|
221 |
|
1ac0b6dec
|
222 |
static int __shm_open(struct vm_area_struct *vma) |
4e9823111
|
223 |
{ |
bc56bba8f
|
224 225 |
struct file *file = vma->vm_file; struct shm_file_data *sfd = shm_file_data(file); |
1da177e4c
|
226 |
struct shmid_kernel *shp; |
bc56bba8f
|
227 |
shp = shm_lock(sfd->ns, sfd->id); |
1ac0b6dec
|
228 229 230 |
if (IS_ERR(shp)) return PTR_ERR(shp); |
3f05317d9
|
231 232 233 234 235 |
if (shp->shm_file != sfd->file) { /* ID was reused */ shm_unlock(shp); return -EINVAL; } |
7ff2819e8
|
236 |
shp->shm_atim = ktime_get_real_seconds(); |
98f929b1b
|
237 |
ipc_update_pid(&shp->shm_lprid, task_tgid(current)); |
1da177e4c
|
238 239 |
shp->shm_nattch++; shm_unlock(shp); |
1ac0b6dec
|
240 241 242 243 244 245 246 247 248 249 250 251 |
return 0; } /* This is called by fork, once for every shm attach. */ static void shm_open(struct vm_area_struct *vma) { int err = __shm_open(vma); /* * We raced in the idr lookup or with shm_destroy(). * Either way, the ID is busted. */ WARN_ON_ONCE(err); |
1da177e4c
|
252 |
} |
1da177e4c
|
253 254 255 |
/* * shm_destroy - free the struct shmid_kernel * |
f4566f048
|
256 |
* @ns: namespace |
1da177e4c
|
257 258 |
* @shp: struct to free * |
d9a605e40
|
259 |
* It has to be called with shp and shm_ids.rwsem (writer) locked, |
1da177e4c
|
260 261 |
* but returns with shp unlocked and freed. */ |
4e9823111
|
262 |
static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) |
1da177e4c
|
263 |
{ |
a399b29df
|
264 265 266 267 |
struct file *shm_file; shm_file = shp->shm_file; shp->shm_file = NULL; |
4e9823111
|
268 |
ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT; |
7ca7e564e
|
269 |
shm_rmid(ns, shp); |
1da177e4c
|
270 |
shm_unlock(shp); |
a399b29df
|
271 272 |
if (!is_file_hugepages(shm_file)) shmem_lock(shm_file, 0, shp->mlock_user); |
353d5c30c
|
273 |
else if (shp->mlock_user) |
07a46ed27
|
274 275 |
user_shm_unlock(i_size_read(file_inode(shm_file)), shp->mlock_user); |
a399b29df
|
276 |
fput(shm_file); |
98f929b1b
|
277 278 |
ipc_update_pid(&shp->shm_cprid, NULL); ipc_update_pid(&shp->shm_lprid, NULL); |
dba4cdd39
|
279 |
ipc_rcu_putref(&shp->shm_perm, shm_rcu_free); |
1da177e4c
|
280 281 282 |
} /* |
b34a6b1da
|
283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 |
* shm_may_destroy - identifies whether shm segment should be destroyed now * * Returns true if and only if there are no active users of the segment and * one of the following is true: * * 1) shmctl(id, IPC_RMID, NULL) was called for this shp * * 2) sysctl kernel.shm_rmid_forced is set to 1. */ static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) { return (shp->shm_nattch == 0) && (ns->shm_rmid_forced || (shp->shm_perm.mode & SHM_DEST)); } /* |
bc56bba8f
|
300 |
* remove the attach descriptor vma. |
1da177e4c
|
301 302 303 304 |
* free memory for segment if it is marked destroyed. * The descriptor has already been removed from the current->mm->mmap list * and will later be kfree()d. */ |
bc56bba8f
|
305 |
static void shm_close(struct vm_area_struct *vma) |
1da177e4c
|
306 |
{ |
239521f31
|
307 |
struct file *file = vma->vm_file; |
bc56bba8f
|
308 |
struct shm_file_data *sfd = shm_file_data(file); |
1da177e4c
|
309 |
struct shmid_kernel *shp; |
bc56bba8f
|
310 |
struct ipc_namespace *ns = sfd->ns; |
4e9823111
|
311 |
|
d9a605e40
|
312 |
down_write(&shm_ids(ns).rwsem); |
1da177e4c
|
313 |
/* remove from the list of attaches of the shm segment */ |
00c2bf85d
|
314 |
shp = shm_lock(ns, sfd->id); |
1ac0b6dec
|
315 316 317 318 319 320 321 |
/* * We raced in the idr lookup or with shm_destroy(). * Either way, the ID is busted. */ if (WARN_ON_ONCE(IS_ERR(shp))) goto done; /* no-op */ |
98f929b1b
|
322 |
ipc_update_pid(&shp->shm_lprid, task_tgid(current)); |
7ff2819e8
|
323 |
shp->shm_dtim = ktime_get_real_seconds(); |
1da177e4c
|
324 |
shp->shm_nattch--; |
b34a6b1da
|
325 326 327 328 |
if (shm_may_destroy(ns, shp)) shm_destroy(ns, shp); else shm_unlock(shp); |
1ac0b6dec
|
329 |
done: |
d9a605e40
|
330 |
up_write(&shm_ids(ns).rwsem); |
b34a6b1da
|
331 |
} |
d9a605e40
|
332 |
/* Called with ns->shm_ids(ns).rwsem locked */ |
b34a6b1da
|
333 334 335 |
static int shm_try_destroy_orphaned(int id, void *p, void *data) { struct ipc_namespace *ns = data; |
4c677e2ee
|
336 337 |
struct kern_ipc_perm *ipcp = p; struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm); |
b34a6b1da
|
338 339 340 341 |
/* * We want to destroy segments without users and with already * exit'ed originating process. |
4c677e2ee
|
342 |
* |
d9a605e40
|
343 |
* As shp->* are changed under rwsem, it's safe to skip shp locking. |
b34a6b1da
|
344 |
*/ |
4c677e2ee
|
345 |
if (shp->shm_creator != NULL) |
b34a6b1da
|
346 |
return 0; |
b34a6b1da
|
347 |
|
4c677e2ee
|
348 349 |
if (shm_may_destroy(ns, shp)) { shm_lock_by_ptr(shp); |
4e9823111
|
350 |
shm_destroy(ns, shp); |
4c677e2ee
|
351 |
} |
b34a6b1da
|
352 353 354 355 356 |
return 0; } void shm_destroy_orphaned(struct ipc_namespace *ns) { |
d9a605e40
|
357 |
down_write(&shm_ids(ns).rwsem); |
33a30ed4b
|
358 |
if (shm_ids(ns).in_use) |
4c677e2ee
|
359 |
idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns); |
d9a605e40
|
360 |
up_write(&shm_ids(ns).rwsem); |
b34a6b1da
|
361 |
} |
83293c0f5
|
362 |
/* Locking assumes this will only be called with task == current */ |
b34a6b1da
|
363 364 |
void exit_shm(struct task_struct *task) { |
4c677e2ee
|
365 |
struct ipc_namespace *ns = task->nsproxy->ipc_ns; |
ab602f799
|
366 |
struct shmid_kernel *shp, *n; |
b34a6b1da
|
367 |
|
83293c0f5
|
368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 |
if (list_empty(&task->sysvshm.shm_clist)) return; /* * If kernel.shm_rmid_forced is not set then only keep track of * which shmids are orphaned, so that a later set of the sysctl * can clean them up. */ if (!ns->shm_rmid_forced) { down_read(&shm_ids(ns).rwsem); list_for_each_entry(shp, &task->sysvshm.shm_clist, shm_clist) shp->shm_creator = NULL; /* * Only under read lock but we are only called on current * so no entry on the list will be shared. */ list_del(&task->sysvshm.shm_clist); up_read(&shm_ids(ns).rwsem); |
298507d4d
|
386 |
return; |
83293c0f5
|
387 |
} |
298507d4d
|
388 |
|
83293c0f5
|
389 390 391 392 393 |
/* * Destroy all already created segments, that were not yet mapped, * and mark any mapped as orphan to cover the sysctl toggling. * Destroy is skipped if shm_may_destroy() returns false. */ |
d9a605e40
|
394 |
down_write(&shm_ids(ns).rwsem); |
83293c0f5
|
395 396 397 398 399 400 401 402 403 404 |
list_for_each_entry_safe(shp, n, &task->sysvshm.shm_clist, shm_clist) { shp->shm_creator = NULL; if (shm_may_destroy(ns, shp)) { shm_lock_by_ptr(shp); shm_destroy(ns, shp); } } /* Remove the list head from any segments still attached. */ |
ab602f799
|
405 |
list_del(&task->sysvshm.shm_clist); |
d9a605e40
|
406 |
up_write(&shm_ids(ns).rwsem); |
1da177e4c
|
407 |
} |
14f28f577
|
408 |
static vm_fault_t shm_fault(struct vm_fault *vmf) |
bc56bba8f
|
409 |
{ |
11bac8000
|
410 |
struct file *file = vmf->vma->vm_file; |
bc56bba8f
|
411 |
struct shm_file_data *sfd = shm_file_data(file); |
11bac8000
|
412 |
return sfd->vm_ops->fault(vmf); |
bc56bba8f
|
413 |
} |
3d942ee07
|
414 415 416 417 |
static int shm_split(struct vm_area_struct *vma, unsigned long addr) { struct file *file = vma->vm_file; struct shm_file_data *sfd = shm_file_data(file); |
a61fc2cbd
|
418 |
if (sfd->vm_ops->split) |
3d942ee07
|
419 420 421 422 |
return sfd->vm_ops->split(vma, addr); return 0; } |
eec3636ad
|
423 424 425 426 427 428 429 430 431 432 |
static unsigned long shm_pagesize(struct vm_area_struct *vma) { struct file *file = vma->vm_file; struct shm_file_data *sfd = shm_file_data(file); if (sfd->vm_ops->pagesize) return sfd->vm_ops->pagesize(vma); return PAGE_SIZE; } |
bc56bba8f
|
433 |
#ifdef CONFIG_NUMA |
d823e3e75
|
434 |
static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new) |
bc56bba8f
|
435 436 437 438 |
{ struct file *file = vma->vm_file; struct shm_file_data *sfd = shm_file_data(file); int err = 0; |
63980c80e
|
439 |
|
bc56bba8f
|
440 441 442 443 |
if (sfd->vm_ops->set_policy) err = sfd->vm_ops->set_policy(vma, new); return err; } |
d823e3e75
|
444 445 |
static struct mempolicy *shm_get_policy(struct vm_area_struct *vma, unsigned long addr) |
bc56bba8f
|
446 447 448 449 450 451 452 |
{ struct file *file = vma->vm_file; struct shm_file_data *sfd = shm_file_data(file); struct mempolicy *pol = NULL; if (sfd->vm_ops->get_policy) pol = sfd->vm_ops->get_policy(vma, addr); |
52cd3b074
|
453 |
else if (vma->vm_policy) |
bc56bba8f
|
454 |
pol = vma->vm_policy; |
52cd3b074
|
455 |
|
bc56bba8f
|
456 457 458 |
return pol; } #endif |
239521f31
|
459 |
static int shm_mmap(struct file *file, struct vm_area_struct *vma) |
1da177e4c
|
460 |
{ |
bc56bba8f
|
461 |
struct shm_file_data *sfd = shm_file_data(file); |
b0e15190e
|
462 |
int ret; |
1ac0b6dec
|
463 |
/* |
3f05317d9
|
464 465 466 |
* In case of remap_file_pages() emulation, the file can represent an * IPC ID that was removed, and possibly even reused by another shm * segment already. Propagate this case as an error to caller. |
1ac0b6dec
|
467 |
*/ |
63980c80e
|
468 |
ret = __shm_open(vma); |
1ac0b6dec
|
469 470 |
if (ret) return ret; |
f74ac0152
|
471 |
ret = call_mmap(sfd->file, vma); |
1ac0b6dec
|
472 473 |
if (ret) { shm_close(vma); |
bc56bba8f
|
474 |
return ret; |
1ac0b6dec
|
475 |
} |
bc56bba8f
|
476 |
sfd->vm_ops = vma->vm_ops; |
2e92a3bae
|
477 |
#ifdef CONFIG_MMU |
d0edd8528
|
478 |
WARN_ON(!sfd->vm_ops->fault); |
2e92a3bae
|
479 |
#endif |
bc56bba8f
|
480 |
vma->vm_ops = &shm_vm_ops; |
1ac0b6dec
|
481 |
return 0; |
1da177e4c
|
482 |
} |
4e9823111
|
483 484 |
static int shm_release(struct inode *ino, struct file *file) { |
bc56bba8f
|
485 |
struct shm_file_data *sfd = shm_file_data(file); |
4e9823111
|
486 |
|
bc56bba8f
|
487 |
put_ipc_ns(sfd->ns); |
3f05317d9
|
488 |
fput(sfd->file); |
bc56bba8f
|
489 490 |
shm_file_data(file) = NULL; kfree(sfd); |
4e9823111
|
491 492 |
return 0; } |
02c24a821
|
493 |
static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync) |
516dffdcd
|
494 |
{ |
516dffdcd
|
495 |
struct shm_file_data *sfd = shm_file_data(file); |
516dffdcd
|
496 |
|
7ea808591
|
497 498 |
if (!sfd->file->f_op->fsync) return -EINVAL; |
0f41074a6
|
499 |
return sfd->file->f_op->fsync(sfd->file, start, end, datasync); |
516dffdcd
|
500 |
} |
7d8a45695
|
501 502 503 504 505 506 507 508 509 |
static long shm_fallocate(struct file *file, int mode, loff_t offset, loff_t len) { struct shm_file_data *sfd = shm_file_data(file); if (!sfd->file->f_op->fallocate) return -EOPNOTSUPP; return sfd->file->f_op->fallocate(file, mode, offset, len); } |
bc56bba8f
|
510 511 512 513 514 |
static unsigned long shm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct shm_file_data *sfd = shm_file_data(file); |
63980c80e
|
515 |
|
c4caa7781
|
516 517 |
return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len, pgoff, flags); |
bc56bba8f
|
518 |
} |
bc56bba8f
|
519 |
|
9a32144e9
|
520 |
static const struct file_operations shm_file_operations = { |
4e9823111
|
521 |
.mmap = shm_mmap, |
516dffdcd
|
522 |
.fsync = shm_fsync, |
4e9823111
|
523 |
.release = shm_release, |
ed5e5894b
|
524 |
.get_unmapped_area = shm_get_unmapped_area, |
6038f373a
|
525 |
.llseek = noop_llseek, |
7d8a45695
|
526 |
.fallocate = shm_fallocate, |
c4caa7781
|
527 |
}; |
c01d5b300
|
528 529 530 531 |
/* * shm_file_operations_huge is now identical to shm_file_operations, * but we keep it distinct for the sake of is_file_shm_hugepages(). */ |
c4caa7781
|
532 533 534 535 |
static const struct file_operations shm_file_operations_huge = { .mmap = shm_mmap, .fsync = shm_fsync, .release = shm_release, |
bc56bba8f
|
536 |
.get_unmapped_area = shm_get_unmapped_area, |
6038f373a
|
537 |
.llseek = noop_llseek, |
7d8a45695
|
538 |
.fallocate = shm_fallocate, |
1da177e4c
|
539 |
}; |
2954e440b
|
540 |
bool is_file_shm_hugepages(struct file *file) |
c4caa7781
|
541 542 543 |
{ return file->f_op == &shm_file_operations_huge; } |
f0f37e2f7
|
544 |
static const struct vm_operations_struct shm_vm_ops = { |
1da177e4c
|
545 546 |
.open = shm_open, /* callback for a new vm-area open */ .close = shm_close, /* callback for when the vm-area is released */ |
54cb8821d
|
547 |
.fault = shm_fault, |
3d942ee07
|
548 |
.split = shm_split, |
eec3636ad
|
549 |
.pagesize = shm_pagesize, |
bc56bba8f
|
550 551 552 |
#if defined(CONFIG_NUMA) .set_policy = shm_set_policy, .get_policy = shm_get_policy, |
1da177e4c
|
553 554 |
#endif }; |
f4566f048
|
555 556 557 558 559 |
/** * newseg - Create a new shared memory segment * @ns: namespace * @params: ptr to the structure that contains key, size and shmflg * |
d9a605e40
|
560 |
* Called with shm_ids.rwsem held as a writer. |
f4566f048
|
561 |
*/ |
7748dbfaa
|
562 |
static int newseg(struct ipc_namespace *ns, struct ipc_params *params) |
1da177e4c
|
563 |
{ |
7748dbfaa
|
564 565 566 |
key_t key = params->key; int shmflg = params->flg; size_t size = params->u.size; |
1da177e4c
|
567 568 |
int error; struct shmid_kernel *shp; |
d69f3bad4
|
569 |
size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
239521f31
|
570 |
struct file *file; |
1da177e4c
|
571 |
char name[13]; |
ca16d140a
|
572 |
vm_flags_t acctflag = 0; |
1da177e4c
|
573 |
|
4e9823111
|
574 |
if (size < SHMMIN || size > ns->shm_ctlmax) |
1da177e4c
|
575 |
return -EINVAL; |
1376327ce
|
576 577 |
if (numpages << PAGE_SHIFT < size) return -ENOSPC; |
09c6eb1f6
|
578 579 |
if (ns->shm_tot + numpages < ns->shm_tot || ns->shm_tot + numpages > ns->shm_ctlall) |
1da177e4c
|
580 |
return -ENOSPC; |
42e618f77
|
581 582 |
shp = kvmalloc(sizeof(*shp), GFP_KERNEL); if (unlikely(!shp)) |
1da177e4c
|
583 584 585 |
return -ENOMEM; shp->shm_perm.key = key; |
b33291c0b
|
586 |
shp->shm_perm.mode = (shmflg & S_IRWXUGO); |
1da177e4c
|
587 588 589 |
shp->mlock_user = NULL; shp->shm_perm.security = NULL; |
7191adff2
|
590 |
error = security_shm_alloc(&shp->shm_perm); |
1da177e4c
|
591 |
if (error) { |
42e618f77
|
592 |
kvfree(shp); |
1da177e4c
|
593 594 |
return error; } |
239521f31
|
595 |
sprintf(name, "SYSV%08x", key); |
1da177e4c
|
596 |
if (shmflg & SHM_HUGETLB) { |
c103a4dc4
|
597 |
struct hstate *hs; |
091d0d55b
|
598 |
size_t hugesize; |
c103a4dc4
|
599 |
hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK); |
091d0d55b
|
600 601 602 603 604 |
if (!hs) { error = -EINVAL; goto no_file; } hugesize = ALIGN(size, huge_page_size(hs)); |
af73e4d95
|
605 |
|
5a6fe1259
|
606 607 608 |
/* hugetlb_file_setup applies strict accounting */ if (shmflg & SHM_NORESERVE) acctflag = VM_NORESERVE; |
af73e4d95
|
609 |
file = hugetlb_file_setup(name, hugesize, acctflag, |
42d7395fe
|
610 611 |
&shp->mlock_user, HUGETLB_SHMFS_INODE, (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK); |
1da177e4c
|
612 |
} else { |
bf8f972d3
|
613 614 |
/* * Do not allow no accounting for OVERCOMMIT_NEVER, even |
239521f31
|
615 |
* if it's asked for. |
bf8f972d3
|
616 617 618 |
*/ if ((shmflg & SHM_NORESERVE) && sysctl_overcommit_memory != OVERCOMMIT_NEVER) |
fc8744adc
|
619 |
acctflag = VM_NORESERVE; |
e1832f292
|
620 |
file = shmem_kernel_file_setup(name, size, acctflag); |
1da177e4c
|
621 622 623 624 |
} error = PTR_ERR(file); if (IS_ERR(file)) goto no_file; |
98f929b1b
|
625 626 |
shp->shm_cprid = get_pid(task_tgid(current)); shp->shm_lprid = NULL; |
1da177e4c
|
627 |
shp->shm_atim = shp->shm_dtim = 0; |
7ff2819e8
|
628 |
shp->shm_ctim = ktime_get_real_seconds(); |
1da177e4c
|
629 630 |
shp->shm_segsz = size; shp->shm_nattch = 0; |
1da177e4c
|
631 |
shp->shm_file = file; |
5774ed014
|
632 |
shp->shm_creator = current; |
b9a532277
|
633 |
|
39c96a1b9
|
634 |
/* ipc_addid() locks shp upon success. */ |
a2642f877
|
635 636 |
error = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); if (error < 0) |
b9a532277
|
637 |
goto no_id; |
b9a532277
|
638 |
|
ab602f799
|
639 |
list_add(&shp->shm_clist, ¤t->sysvshm.shm_clist); |
dbfcd91f0
|
640 |
|
30475cc12
|
641 642 643 644 |
/* * shmid gets reported as "inode#" in /proc/pid/maps. * proc-ps tools use this. Changing this will break them. */ |
496ad9aa8
|
645 |
file_inode(file)->i_ino = shp->shm_perm.id; |
551110a94
|
646 |
|
4e9823111
|
647 |
ns->shm_tot += numpages; |
7ca7e564e
|
648 |
error = shp->shm_perm.id; |
dbfcd91f0
|
649 |
|
cf9d5d78d
|
650 |
ipc_unlock_object(&shp->shm_perm); |
dbfcd91f0
|
651 |
rcu_read_unlock(); |
7ca7e564e
|
652 |
return error; |
1da177e4c
|
653 654 |
no_id: |
2236d4d39
|
655 656 |
ipc_update_pid(&shp->shm_cprid, NULL); ipc_update_pid(&shp->shm_lprid, NULL); |
2195d2818
|
657 |
if (is_file_hugepages(file) && shp->mlock_user) |
353d5c30c
|
658 |
user_shm_unlock(size, shp->mlock_user); |
1da177e4c
|
659 |
fput(file); |
39cfffd77
|
660 661 |
ipc_rcu_putref(&shp->shm_perm, shm_rcu_free); return error; |
1da177e4c
|
662 |
no_file: |
a2642f877
|
663 |
call_rcu(&shp->shm_perm.rcu, shm_rcu_free); |
1da177e4c
|
664 665 |
return error; } |
f4566f048
|
666 |
/* |
d9a605e40
|
667 |
* Called with shm_ids.rwsem and ipcp locked. |
f4566f048
|
668 |
*/ |
00898e859
|
669 |
static int shm_more_checks(struct kern_ipc_perm *ipcp, struct ipc_params *params) |
7748dbfaa
|
670 |
{ |
03f02c765
|
671 672 673 674 |
struct shmid_kernel *shp; shp = container_of(ipcp, struct shmid_kernel, shm_perm); if (shp->shm_segsz < params->u.size) |
7748dbfaa
|
675 676 677 678 |
return -EINVAL; return 0; } |
65749e0bb
|
679 |
long ksys_shmget(key_t key, size_t size, int shmflg) |
1da177e4c
|
680 |
{ |
4e9823111
|
681 |
struct ipc_namespace *ns; |
eb66ec44f
|
682 683 |
static const struct ipc_ops shm_ops = { .getnew = newseg, |
50ab44b1c
|
684 |
.associate = security_shm_associate, |
eb66ec44f
|
685 686 |
.more_checks = shm_more_checks, }; |
7748dbfaa
|
687 |
struct ipc_params shm_params; |
4e9823111
|
688 689 |
ns = current->nsproxy->ipc_ns; |
1da177e4c
|
690 |
|
7748dbfaa
|
691 692 693 |
shm_params.key = key; shm_params.flg = shmflg; shm_params.u.size = size; |
1da177e4c
|
694 |
|
7748dbfaa
|
695 |
return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params); |
1da177e4c
|
696 |
} |
65749e0bb
|
697 698 699 700 |
SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg) { return ksys_shmget(key, size, shmflg); } |
1da177e4c
|
701 702 |
static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version) { |
239521f31
|
703 |
switch (version) { |
1da177e4c
|
704 705 706 707 708 |
case IPC_64: return copy_to_user(buf, in, sizeof(*in)); case IPC_OLD: { struct shmid_ds out; |
3af54c9bd
|
709 |
memset(&out, 0, sizeof(out)); |
1da177e4c
|
710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 |
ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm); out.shm_segsz = in->shm_segsz; out.shm_atime = in->shm_atime; out.shm_dtime = in->shm_dtime; out.shm_ctime = in->shm_ctime; out.shm_cpid = in->shm_cpid; out.shm_lpid = in->shm_lpid; out.shm_nattch = in->shm_nattch; return copy_to_user(buf, &out, sizeof(out)); } default: return -EINVAL; } } |
016d7132f
|
725 726 |
static inline unsigned long copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version) |
1da177e4c
|
727 |
{ |
239521f31
|
728 |
switch (version) { |
1da177e4c
|
729 |
case IPC_64: |
016d7132f
|
730 |
if (copy_from_user(out, buf, sizeof(*out))) |
1da177e4c
|
731 |
return -EFAULT; |
1da177e4c
|
732 |
return 0; |
1da177e4c
|
733 734 735 736 737 738 |
case IPC_OLD: { struct shmid_ds tbuf_old; if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) return -EFAULT; |
016d7132f
|
739 740 741 |
out->shm_perm.uid = tbuf_old.shm_perm.uid; out->shm_perm.gid = tbuf_old.shm_perm.gid; out->shm_perm.mode = tbuf_old.shm_perm.mode; |
1da177e4c
|
742 743 744 745 746 747 748 749 750 751 |
return 0; } default: return -EINVAL; } } static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version) { |
239521f31
|
752 |
switch (version) { |
1da177e4c
|
753 754 755 756 757 |
case IPC_64: return copy_to_user(buf, in, sizeof(*in)); case IPC_OLD: { struct shminfo out; |
239521f31
|
758 |
if (in->shmmax > INT_MAX) |
1da177e4c
|
759 760 761 762 763 764 765 |
out.shmmax = INT_MAX; else out.shmmax = (int)in->shmmax; out.shmmin = in->shmmin; out.shmmni = in->shmmni; out.shmseg = in->shmseg; |
46c0a8ca3
|
766 |
out.shmall = in->shmall; |
1da177e4c
|
767 768 769 770 771 772 773 |
return copy_to_user(buf, &out, sizeof(out)); } default: return -EINVAL; } } |
f4566f048
|
774 |
/* |
b79521807
|
775 |
* Calculate and add used RSS and swap pages of a shm. |
d9a605e40
|
776 |
* Called with shm_ids.rwsem held as a reader |
b79521807
|
777 778 779 780 781 |
*/ static void shm_add_rss_swap(struct shmid_kernel *shp, unsigned long *rss_add, unsigned long *swp_add) { struct inode *inode; |
496ad9aa8
|
782 |
inode = file_inode(shp->shm_file); |
b79521807
|
783 784 785 786 787 788 789 790 |
if (is_file_hugepages(shp->shm_file)) { struct address_space *mapping = inode->i_mapping; struct hstate *h = hstate_file(shp->shm_file); *rss_add += pages_per_huge_page(h) * mapping->nrpages; } else { #ifdef CONFIG_SHMEM struct shmem_inode_info *info = SHMEM_I(inode); |
63980c80e
|
791 |
|
4595ef88d
|
792 |
spin_lock_irq(&info->lock); |
b79521807
|
793 794 |
*rss_add += inode->i_mapping->nrpages; *swp_add += info->swapped; |
4595ef88d
|
795 |
spin_unlock_irq(&info->lock); |
b79521807
|
796 797 798 799 800 801 802 |
#else *rss_add += inode->i_mapping->nrpages; #endif } } /* |
d9a605e40
|
803 |
* Called with shm_ids.rwsem held as a reader |
f4566f048
|
804 |
*/ |
4e9823111
|
805 806 |
static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss, unsigned long *swp) |
1da177e4c
|
807 |
{ |
7ca7e564e
|
808 809 |
int next_id; int total, in_use; |
1da177e4c
|
810 811 812 |
*rss = 0; *swp = 0; |
7ca7e564e
|
813 814 815 |
in_use = shm_ids(ns).in_use; for (total = 0, next_id = 0; total < in_use; next_id++) { |
e562aebc6
|
816 |
struct kern_ipc_perm *ipc; |
1da177e4c
|
817 |
struct shmid_kernel *shp; |
1da177e4c
|
818 |
|
e562aebc6
|
819 820 |
ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id); if (ipc == NULL) |
1da177e4c
|
821 |
continue; |
e562aebc6
|
822 |
shp = container_of(ipc, struct shmid_kernel, shm_perm); |
1da177e4c
|
823 |
|
b79521807
|
824 |
shm_add_rss_swap(shp, rss, swp); |
7ca7e564e
|
825 826 |
total++; |
1da177e4c
|
827 828 |
} } |
8d4cc8b5c
|
829 |
/* |
d9a605e40
|
830 |
* This function handles some shmctl commands which require the rwsem |
8d4cc8b5c
|
831 |
* to be held in write mode. |
d9a605e40
|
832 |
* NOTE: no locks must be held, the rwsem is taken inside this function. |
8d4cc8b5c
|
833 834 |
*/ static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd, |
9ba720c18
|
835 |
struct shmid64_ds *shmid64) |
1da177e4c
|
836 |
{ |
8d4cc8b5c
|
837 |
struct kern_ipc_perm *ipcp; |
1da177e4c
|
838 |
struct shmid_kernel *shp; |
8d4cc8b5c
|
839 |
int err; |
d9a605e40
|
840 |
down_write(&shm_ids(ns).rwsem); |
7b4cc5d84
|
841 |
rcu_read_lock(); |
4241c1a30
|
842 |
ipcp = ipcctl_obtain_check(ns, &shm_ids(ns), shmid, cmd, |
9ba720c18
|
843 |
&shmid64->shm_perm, 0); |
7b4cc5d84
|
844 845 |
if (IS_ERR(ipcp)) { err = PTR_ERR(ipcp); |
7b4cc5d84
|
846 847 |
goto out_unlock1; } |
8d4cc8b5c
|
848 |
|
a5f75e7f2
|
849 |
shp = container_of(ipcp, struct shmid_kernel, shm_perm); |
8d4cc8b5c
|
850 |
|
7191adff2
|
851 |
err = security_shm_shmctl(&shp->shm_perm, cmd); |
8d4cc8b5c
|
852 |
if (err) |
79ccf0f8c
|
853 |
goto out_unlock1; |
7b4cc5d84
|
854 |
|
8d4cc8b5c
|
855 856 |
switch (cmd) { case IPC_RMID: |
79ccf0f8c
|
857 |
ipc_lock_object(&shp->shm_perm); |
7b4cc5d84
|
858 |
/* do_shm_rmid unlocks the ipc object and rcu */ |
8d4cc8b5c
|
859 860 861 |
do_shm_rmid(ns, ipcp); goto out_up; case IPC_SET: |
79ccf0f8c
|
862 |
ipc_lock_object(&shp->shm_perm); |
9ba720c18
|
863 |
err = ipc_update_perm(&shmid64->shm_perm, ipcp); |
1efdb69b0
|
864 |
if (err) |
7b4cc5d84
|
865 |
goto out_unlock0; |
7ff2819e8
|
866 |
shp->shm_ctim = ktime_get_real_seconds(); |
8d4cc8b5c
|
867 868 869 |
break; default: err = -EINVAL; |
79ccf0f8c
|
870 |
goto out_unlock1; |
8d4cc8b5c
|
871 |
} |
7b4cc5d84
|
872 873 874 875 876 |
out_unlock0: ipc_unlock_object(&shp->shm_perm); out_unlock1: rcu_read_unlock(); |
8d4cc8b5c
|
877 |
out_up: |
d9a605e40
|
878 |
up_write(&shm_ids(ns).rwsem); |
8d4cc8b5c
|
879 880 |
return err; } |
9ba720c18
|
881 882 |
static int shmctl_ipc_info(struct ipc_namespace *ns, struct shminfo64 *shminfo) |
8d4cc8b5c
|
883 |
{ |
9ba720c18
|
884 885 886 887 888 889 890 |
int err = security_shm_shmctl(NULL, IPC_INFO); if (!err) { memset(shminfo, 0, sizeof(*shminfo)); shminfo->shmmni = shminfo->shmseg = ns->shm_ctlmni; shminfo->shmmax = ns->shm_ctlmax; shminfo->shmall = ns->shm_ctlall; shminfo->shmmin = SHMMIN; |
d9a605e40
|
891 |
down_read(&shm_ids(ns).rwsem); |
27c331a17
|
892 |
err = ipc_get_maxidx(&shm_ids(ns)); |
d9a605e40
|
893 |
up_read(&shm_ids(ns).rwsem); |
239521f31
|
894 |
if (err < 0) |
1da177e4c
|
895 |
err = 0; |
1da177e4c
|
896 |
} |
9ba720c18
|
897 898 |
return err; } |
1da177e4c
|
899 |
|
9ba720c18
|
900 901 902 903 904 905 |
static int shmctl_shm_info(struct ipc_namespace *ns, struct shm_info *shm_info) { int err = security_shm_shmctl(NULL, SHM_INFO); if (!err) { memset(shm_info, 0, sizeof(*shm_info)); |
d9a605e40
|
906 |
down_read(&shm_ids(ns).rwsem); |
9ba720c18
|
907 908 909 910 911 |
shm_info->used_ids = shm_ids(ns).in_use; shm_get_stat(ns, &shm_info->shm_rss, &shm_info->shm_swp); shm_info->shm_tot = ns->shm_tot; shm_info->swap_attempts = 0; shm_info->swap_successes = 0; |
27c331a17
|
912 |
err = ipc_get_maxidx(&shm_ids(ns)); |
d9a605e40
|
913 |
up_read(&shm_ids(ns).rwsem); |
9ba720c18
|
914 915 |
if (err < 0) err = 0; |
1da177e4c
|
916 |
} |
9ba720c18
|
917 918 |
return err; } |
c97cb9cca
|
919 |
|
9ba720c18
|
920 921 922 923 |
static int shmctl_stat(struct ipc_namespace *ns, int shmid, int cmd, struct shmid64_ds *tbuf) { struct shmid_kernel *shp; |
9ba720c18
|
924 |
int err; |
c97cb9cca
|
925 |
|
87ad4b0d8
|
926 |
memset(tbuf, 0, sizeof(*tbuf)); |
9ba720c18
|
927 |
rcu_read_lock(); |
c21a6970a
|
928 |
if (cmd == SHM_STAT || cmd == SHM_STAT_ANY) { |
9ba720c18
|
929 930 931 932 933 |
shp = shm_obtain_object(ns, shmid); if (IS_ERR(shp)) { err = PTR_ERR(shp); goto out_unlock; } |
c21a6970a
|
934 |
} else { /* IPC_STAT */ |
9ba720c18
|
935 936 937 |
shp = shm_obtain_object_check(ns, shmid); if (IS_ERR(shp)) { err = PTR_ERR(shp); |
1da177e4c
|
938 |
goto out_unlock; |
1da177e4c
|
939 |
} |
9ba720c18
|
940 |
} |
1da177e4c
|
941 |
|
c21a6970a
|
942 943 944 945 946 947 948 949 950 951 952 953 954 955 |
/* * Semantically SHM_STAT_ANY ought to be identical to * that functionality provided by the /proc/sysvipc/ * interface. As such, only audit these calls and * do not do traditional S_IRUGO permission checks on * the ipc object. */ if (cmd == SHM_STAT_ANY) audit_ipc_obj(&shp->shm_perm); else { err = -EACCES; if (ipcperms(ns, &shp->shm_perm, S_IRUGO)) goto out_unlock; } |
c97cb9cca
|
956 |
|
7191adff2
|
957 |
err = security_shm_shmctl(&shp->shm_perm, cmd); |
9ba720c18
|
958 959 |
if (err) goto out_unlock; |
87ad4b0d8
|
960 961 962 963 964 965 966 |
ipc_lock_object(&shp->shm_perm); if (!ipc_valid_object(&shp->shm_perm)) { ipc_unlock_object(&shp->shm_perm); err = -EIDRM; goto out_unlock; } |
9ba720c18
|
967 968 969 970 971 |
kernel_to_ipc64_perm(&shp->shm_perm, &tbuf->shm_perm); tbuf->shm_segsz = shp->shm_segsz; tbuf->shm_atime = shp->shm_atim; tbuf->shm_dtime = shp->shm_dtim; tbuf->shm_ctime = shp->shm_ctim; |
c2ab975c3
|
972 973 974 975 976 |
#ifndef CONFIG_64BIT tbuf->shm_atime_high = shp->shm_atim >> 32; tbuf->shm_dtime_high = shp->shm_dtim >> 32; tbuf->shm_ctime_high = shp->shm_ctim >> 32; #endif |
98f929b1b
|
977 978 |
tbuf->shm_cpid = pid_vnr(shp->shm_cprid); tbuf->shm_lpid = pid_vnr(shp->shm_lprid); |
9ba720c18
|
979 |
tbuf->shm_nattch = shp->shm_nattch; |
87ad4b0d8
|
980 |
|
615c999cd
|
981 982 983 984 985 986 987 988 989 990 991 992 993 |
if (cmd == IPC_STAT) { /* * As defined in SUS: * Return 0 on success */ err = 0; } else { /* * SHM_STAT and SHM_STAT_ANY (both Linux specific) * Return the full id, including the sequence number */ err = shp->shm_perm.id; } |
68eccc1dc
|
994 |
|
615c999cd
|
995 |
ipc_unlock_object(&shp->shm_perm); |
68eccc1dc
|
996 |
out_unlock: |
c97cb9cca
|
997 |
rcu_read_unlock(); |
68eccc1dc
|
998 999 |
return err; } |
9ba720c18
|
1000 |
static int shmctl_do_lock(struct ipc_namespace *ns, int shmid, int cmd) |
68eccc1dc
|
1001 1002 |
{ struct shmid_kernel *shp; |
9ba720c18
|
1003 1004 |
struct file *shm_file; int err; |
68eccc1dc
|
1005 |
|
9ba720c18
|
1006 1007 1008 1009 1010 |
rcu_read_lock(); shp = shm_obtain_object_check(ns, shmid); if (IS_ERR(shp)) { err = PTR_ERR(shp); goto out_unlock1; |
1da177e4c
|
1011 |
} |
c97cb9cca
|
1012 |
|
9ba720c18
|
1013 |
audit_ipc_obj(&(shp->shm_perm)); |
7191adff2
|
1014 |
err = security_shm_shmctl(&shp->shm_perm, cmd); |
9ba720c18
|
1015 1016 |
if (err) goto out_unlock1; |
c97cb9cca
|
1017 |
|
9ba720c18
|
1018 |
ipc_lock_object(&shp->shm_perm); |
c97cb9cca
|
1019 |
|
9ba720c18
|
1020 1021 1022 1023 |
/* check if shm_destroy() is tearing down shp */ if (!ipc_valid_object(&shp->shm_perm)) { err = -EIDRM; goto out_unlock0; |
1da177e4c
|
1024 |
} |
073115d6b
|
1025 |
|
9ba720c18
|
1026 1027 |
if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) { kuid_t euid = current_euid(); |
0f3d2b013
|
1028 |
|
9ba720c18
|
1029 1030 1031 |
if (!uid_eq(euid, shp->shm_perm.uid) && !uid_eq(euid, shp->shm_perm.cuid)) { err = -EPERM; |
0f3d2b013
|
1032 1033 |
goto out_unlock0; } |
9ba720c18
|
1034 1035 1036 |
if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) { err = -EPERM; goto out_unlock0; |
1da177e4c
|
1037 |
} |
68eccc1dc
|
1038 |
} |
9ba720c18
|
1039 1040 1041 |
shm_file = shp->shm_file; if (is_file_hugepages(shm_file)) goto out_unlock0; |
85046579b
|
1042 |
|
9ba720c18
|
1043 1044 |
if (cmd == SHM_LOCK) { struct user_struct *user = current_user(); |
63980c80e
|
1045 |
|
9ba720c18
|
1046 1047 1048 1049 |
err = shmem_lock(shm_file, 1, user); if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) { shp->shm_perm.mode |= SHM_LOCKED; shp->mlock_user = user; |
1da177e4c
|
1050 |
} |
9ba720c18
|
1051 |
goto out_unlock0; |
1da177e4c
|
1052 |
} |
9ba720c18
|
1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 |
/* SHM_UNLOCK */ if (!(shp->shm_perm.mode & SHM_LOCKED)) goto out_unlock0; shmem_lock(shm_file, 0, shp->mlock_user); shp->shm_perm.mode &= ~SHM_LOCKED; shp->mlock_user = NULL; get_file(shm_file); ipc_unlock_object(&shp->shm_perm); rcu_read_unlock(); shmem_unlock_mapping(shm_file->f_mapping); fput(shm_file); return err; |
2caacaa82
|
1066 1067 1068 |
out_unlock0: ipc_unlock_object(&shp->shm_perm); out_unlock1: |
c97cb9cca
|
1069 |
rcu_read_unlock(); |
68eccc1dc
|
1070 1071 |
return err; } |
275f22148
|
1072 |
static long ksys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf, int version) |
68eccc1dc
|
1073 |
{ |
275f22148
|
1074 |
int err; |
68eccc1dc
|
1075 |
struct ipc_namespace *ns; |
553f770ef
|
1076 |
struct shmid64_ds sem64; |
68eccc1dc
|
1077 |
|
2caacaa82
|
1078 1079 |
if (cmd < 0 || shmid < 0) return -EINVAL; |
68eccc1dc
|
1080 |
|
68eccc1dc
|
1081 1082 1083 |
ns = current->nsproxy->ipc_ns; switch (cmd) { |
9ba720c18
|
1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 |
case IPC_INFO: { struct shminfo64 shminfo; err = shmctl_ipc_info(ns, &shminfo); if (err < 0) return err; if (copy_shminfo_to_user(buf, &shminfo, version)) err = -EFAULT; return err; } case SHM_INFO: { struct shm_info shm_info; err = shmctl_shm_info(ns, &shm_info); if (err < 0) return err; if (copy_to_user(buf, &shm_info, sizeof(shm_info))) err = -EFAULT; return err; } |
68eccc1dc
|
1102 |
case SHM_STAT: |
c21a6970a
|
1103 |
case SHM_STAT_ANY: |
9ba720c18
|
1104 |
case IPC_STAT: { |
553f770ef
|
1105 |
err = shmctl_stat(ns, shmid, cmd, &sem64); |
9ba720c18
|
1106 1107 |
if (err < 0) return err; |
553f770ef
|
1108 |
if (copy_shmid_to_user(buf, &sem64, version)) |
9ba720c18
|
1109 1110 1111 |
err = -EFAULT; return err; } |
2caacaa82
|
1112 |
case IPC_SET: |
553f770ef
|
1113 |
if (copy_shmid_from_user(&sem64, buf, version)) |
9ba720c18
|
1114 |
return -EFAULT; |
df561f668
|
1115 |
fallthrough; |
9ba720c18
|
1116 |
case IPC_RMID: |
553f770ef
|
1117 |
return shmctl_down(ns, shmid, cmd, &sem64); |
1da177e4c
|
1118 1119 |
case SHM_LOCK: case SHM_UNLOCK: |
9ba720c18
|
1120 1121 1122 1123 1124 |
return shmctl_do_lock(ns, shmid, cmd); default: return -EINVAL; } } |
89e004ea5
|
1125 |
|
c84d0791d
|
1126 1127 |
SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf) { |
275f22148
|
1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 |
return ksys_shmctl(shmid, cmd, buf, IPC_64); } #ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION long ksys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf) { int version = ipc_parse_version(&cmd); return ksys_shmctl(shmid, cmd, buf, version); } SYSCALL_DEFINE3(old_shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf) { return ksys_old_shmctl(shmid, cmd, buf); |
c84d0791d
|
1142 |
} |
275f22148
|
1143 |
#endif |
c84d0791d
|
1144 |
|
553f770ef
|
1145 1146 1147 1148 1149 |
#ifdef CONFIG_COMPAT struct compat_shmid_ds { struct compat_ipc_perm shm_perm; int shm_segsz; |
9afc5eee6
|
1150 1151 1152 |
old_time32_t shm_atime; old_time32_t shm_dtime; old_time32_t shm_ctime; |
553f770ef
|
1153 1154 1155 1156 1157 1158 1159 |
compat_ipc_pid_t shm_cpid; compat_ipc_pid_t shm_lpid; unsigned short shm_nattch; unsigned short shm_unused; compat_uptr_t shm_unused2; compat_uptr_t shm_unused3; }; |
1da177e4c
|
1160 |
|
553f770ef
|
1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 |
struct compat_shminfo64 { compat_ulong_t shmmax; compat_ulong_t shmmin; compat_ulong_t shmmni; compat_ulong_t shmseg; compat_ulong_t shmall; compat_ulong_t __unused1; compat_ulong_t __unused2; compat_ulong_t __unused3; compat_ulong_t __unused4; }; |
073115d6b
|
1172 |
|
553f770ef
|
1173 1174 1175 1176 1177 |
struct compat_shm_info { compat_int_t used_ids; compat_ulong_t shm_tot, shm_rss, shm_swp; compat_ulong_t swap_attempts, swap_successes; }; |
0f3d2b013
|
1178 |
|
553f770ef
|
1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 |
static int copy_compat_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version) { if (in->shmmax > INT_MAX) in->shmmax = INT_MAX; if (version == IPC_64) { struct compat_shminfo64 info; memset(&info, 0, sizeof(info)); info.shmmax = in->shmmax; info.shmmin = in->shmmin; info.shmmni = in->shmmni; info.shmseg = in->shmseg; info.shmall = in->shmall; return copy_to_user(buf, &info, sizeof(info)); } else { struct shminfo info; memset(&info, 0, sizeof(info)); info.shmmax = in->shmmax; info.shmmin = in->shmmin; info.shmmni = in->shmmni; info.shmseg = in->shmseg; info.shmall = in->shmall; return copy_to_user(buf, &info, sizeof(info)); } } |
0f3d2b013
|
1204 |
|
553f770ef
|
1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 |
static int put_compat_shm_info(struct shm_info *ip, struct compat_shm_info __user *uip) { struct compat_shm_info info; memset(&info, 0, sizeof(info)); info.used_ids = ip->used_ids; info.shm_tot = ip->shm_tot; info.shm_rss = ip->shm_rss; info.shm_swp = ip->shm_swp; info.swap_attempts = ip->swap_attempts; info.swap_successes = ip->swap_successes; |
b776e4b1a
|
1217 |
return copy_to_user(uip, &info, sizeof(info)); |
553f770ef
|
1218 |
} |
1da177e4c
|
1219 |
|
553f770ef
|
1220 1221 1222 1223 1224 1225 |
static int copy_compat_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version) { if (version == IPC_64) { struct compat_shmid64_ds v; memset(&v, 0, sizeof(v)); |
28327fae6
|
1226 |
to_compat_ipc64_perm(&v.shm_perm, &in->shm_perm); |
c2ab975c3
|
1227 1228 1229 1230 1231 1232 |
v.shm_atime = lower_32_bits(in->shm_atime); v.shm_atime_high = upper_32_bits(in->shm_atime); v.shm_dtime = lower_32_bits(in->shm_dtime); v.shm_dtime_high = upper_32_bits(in->shm_dtime); v.shm_ctime = lower_32_bits(in->shm_ctime); v.shm_ctime_high = upper_32_bits(in->shm_ctime); |
553f770ef
|
1233 1234 1235 1236 1237 1238 1239 1240 |
v.shm_segsz = in->shm_segsz; v.shm_nattch = in->shm_nattch; v.shm_cpid = in->shm_cpid; v.shm_lpid = in->shm_lpid; return copy_to_user(buf, &v, sizeof(v)); } else { struct compat_shmid_ds v; memset(&v, 0, sizeof(v)); |
28327fae6
|
1241 |
to_compat_ipc_perm(&v.shm_perm, &in->shm_perm); |
553f770ef
|
1242 |
v.shm_perm.key = in->shm_perm.key; |
553f770ef
|
1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 |
v.shm_atime = in->shm_atime; v.shm_dtime = in->shm_dtime; v.shm_ctime = in->shm_ctime; v.shm_segsz = in->shm_segsz; v.shm_nattch = in->shm_nattch; v.shm_cpid = in->shm_cpid; v.shm_lpid = in->shm_lpid; return copy_to_user(buf, &v, sizeof(v)); } } |
85046579b
|
1253 |
|
553f770ef
|
1254 1255 1256 1257 1258 |
static int copy_compat_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version) { memset(out, 0, sizeof(*out)); if (version == IPC_64) { |
6aa211e8c
|
1259 |
struct compat_shmid64_ds __user *p = buf; |
28327fae6
|
1260 |
return get_compat_ipc64_perm(&out->shm_perm, &p->shm_perm); |
553f770ef
|
1261 |
} else { |
6aa211e8c
|
1262 |
struct compat_shmid_ds __user *p = buf; |
28327fae6
|
1263 |
return get_compat_ipc_perm(&out->shm_perm, &p->shm_perm); |
553f770ef
|
1264 |
} |
553f770ef
|
1265 |
} |
63980c80e
|
1266 |
|
1cd377baa
|
1267 |
static long compat_ksys_shmctl(int shmid, int cmd, void __user *uptr, int version) |
553f770ef
|
1268 1269 1270 |
{ struct ipc_namespace *ns; struct shmid64_ds sem64; |
553f770ef
|
1271 |
int err; |
85046579b
|
1272 |
|
553f770ef
|
1273 1274 1275 1276 |
ns = current->nsproxy->ipc_ns; if (cmd < 0 || shmid < 0) return -EINVAL; |
2caacaa82
|
1277 |
|
553f770ef
|
1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 |
switch (cmd) { case IPC_INFO: { struct shminfo64 shminfo; err = shmctl_ipc_info(ns, &shminfo); if (err < 0) return err; if (copy_compat_shminfo_to_user(uptr, &shminfo, version)) err = -EFAULT; return err; } case SHM_INFO: { struct shm_info shm_info; err = shmctl_shm_info(ns, &shm_info); if (err < 0) return err; if (put_compat_shm_info(&shm_info, uptr)) err = -EFAULT; |
8d4cc8b5c
|
1295 |
return err; |
2caacaa82
|
1296 |
} |
553f770ef
|
1297 |
case IPC_STAT: |
c21a6970a
|
1298 |
case SHM_STAT_ANY: |
553f770ef
|
1299 1300 1301 1302 |
case SHM_STAT: err = shmctl_stat(ns, shmid, cmd, &sem64); if (err < 0) return err; |
58aff0af7
|
1303 |
if (copy_compat_shmid_to_user(uptr, &sem64, version)) |
553f770ef
|
1304 1305 1306 1307 1308 1309 |
err = -EFAULT; return err; case IPC_SET: if (copy_compat_shmid_from_user(&sem64, uptr, version)) return -EFAULT; |
df561f668
|
1310 |
fallthrough; |
553f770ef
|
1311 1312 1313 1314 1315 |
case IPC_RMID: return shmctl_down(ns, shmid, cmd, &sem64); case SHM_LOCK: case SHM_UNLOCK: return shmctl_do_lock(ns, shmid, cmd); |
1da177e4c
|
1316 |
default: |
8d4cc8b5c
|
1317 |
return -EINVAL; |
1da177e4c
|
1318 |
} |
1da177e4c
|
1319 1320 |
return err; } |
c84d0791d
|
1321 1322 1323 |
COMPAT_SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, void __user *, uptr) { |
275f22148
|
1324 |
return compat_ksys_shmctl(shmid, cmd, uptr, IPC_64); |
c84d0791d
|
1325 |
} |
275f22148
|
1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 |
#ifdef CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION long compat_ksys_old_shmctl(int shmid, int cmd, void __user *uptr) { int version = compat_ipc_parse_version(&cmd); return compat_ksys_shmctl(shmid, cmd, uptr, version); } COMPAT_SYSCALL_DEFINE3(old_shmctl, int, shmid, int, cmd, void __user *, uptr) { return compat_ksys_old_shmctl(shmid, cmd, uptr); } #endif |
553f770ef
|
1340 |
#endif |
1da177e4c
|
1341 1342 1343 1344 1345 1346 1347 1348 |
/* * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists. * * NOTE! Despite the name, this is NOT a direct system call entrypoint. The * "raddr" thing points to kernel space, and there has to be a wrapper around * this. */ |
95e91b831
|
1349 1350 |
long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr, unsigned long shmlba) |
1da177e4c
|
1351 1352 |
{ struct shmid_kernel *shp; |
f0cb88026
|
1353 |
unsigned long addr = (unsigned long)shmaddr; |
1da177e4c
|
1354 |
unsigned long size; |
4f089acc5
|
1355 |
struct file *file, *base; |
1da177e4c
|
1356 |
int err; |
f0cb88026
|
1357 |
unsigned long flags = MAP_SHARED; |
1da177e4c
|
1358 |
unsigned long prot; |
1da177e4c
|
1359 |
int acc_mode; |
4e9823111
|
1360 |
struct ipc_namespace *ns; |
bc56bba8f
|
1361 |
struct shm_file_data *sfd; |
c9c554f21
|
1362 |
int f_flags; |
41badc15c
|
1363 |
unsigned long populate = 0; |
1da177e4c
|
1364 |
|
bc56bba8f
|
1365 1366 |
err = -EINVAL; if (shmid < 0) |
1da177e4c
|
1367 |
goto out; |
f0cb88026
|
1368 1369 |
if (addr) { |
079a96ae3
|
1370 |
if (addr & (shmlba - 1)) { |
8f89c007b
|
1371 |
if (shmflg & SHM_RND) { |
a73ab244f
|
1372 |
addr &= ~(shmlba - 1); /* round down */ |
8f89c007b
|
1373 1374 1375 1376 1377 1378 1379 1380 1381 |
/* * Ensure that the round-down is non-nil * when remapping. This can happen for * cases when addr < shmlba. */ if (!addr && (shmflg & SHM_REMAP)) goto out; } else |
1da177e4c
|
1382 1383 1384 |
#ifndef __ARCH_FORCE_SHMLBA if (addr & ~PAGE_MASK) #endif |
bc56bba8f
|
1385 |
goto out; |
1da177e4c
|
1386 |
} |
1da177e4c
|
1387 |
|
f0cb88026
|
1388 1389 1390 |
flags |= MAP_FIXED; } else if ((shmflg & SHM_REMAP)) goto out; |
1da177e4c
|
1391 1392 1393 |
if (shmflg & SHM_RDONLY) { prot = PROT_READ; |
1da177e4c
|
1394 |
acc_mode = S_IRUGO; |
c9c554f21
|
1395 |
f_flags = O_RDONLY; |
1da177e4c
|
1396 1397 |
} else { prot = PROT_READ | PROT_WRITE; |
1da177e4c
|
1398 |
acc_mode = S_IRUGO | S_IWUGO; |
c9c554f21
|
1399 |
f_flags = O_RDWR; |
1da177e4c
|
1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 |
} if (shmflg & SHM_EXEC) { prot |= PROT_EXEC; acc_mode |= S_IXUGO; } /* * We cannot rely on the fs check since SYSV IPC does have an * additional creator id... */ |
4e9823111
|
1410 |
ns = current->nsproxy->ipc_ns; |
c2c737a04
|
1411 1412 |
rcu_read_lock(); shp = shm_obtain_object_check(ns, shmid); |
023a53557
|
1413 1414 |
if (IS_ERR(shp)) { err = PTR_ERR(shp); |
c2c737a04
|
1415 |
goto out_unlock; |
023a53557
|
1416 |
} |
bc56bba8f
|
1417 1418 |
err = -EACCES; |
b0e77598f
|
1419 |
if (ipcperms(ns, &shp->shm_perm, acc_mode)) |
bc56bba8f
|
1420 |
goto out_unlock; |
1da177e4c
|
1421 |
|
7191adff2
|
1422 |
err = security_shm_shmat(&shp->shm_perm, shmaddr, shmflg); |
bc56bba8f
|
1423 1424 |
if (err) goto out_unlock; |
c2c737a04
|
1425 |
ipc_lock_object(&shp->shm_perm); |
a399b29df
|
1426 1427 |
/* check if shm_destroy() is tearing down shp */ |
0f3d2b013
|
1428 |
if (!ipc_valid_object(&shp->shm_perm)) { |
a399b29df
|
1429 1430 1431 1432 |
ipc_unlock_object(&shp->shm_perm); err = -EIDRM; goto out_unlock; } |
4f089acc5
|
1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 |
/* * We need to take a reference to the real shm file to prevent the * pointer from becoming stale in cases where the lifetime of the outer * file extends beyond that of the shm segment. It's not usually * possible, but it can happen during remap_file_pages() emulation as * that unmaps the memory, then does ->mmap() via file reference only. * We'll deny the ->mmap() if the shm segment was since removed, but to * detect shm ID reuse we need to compare the file pointers. */ base = get_file(shp->shm_file); |
1da177e4c
|
1443 |
shp->shm_nattch++; |
4f089acc5
|
1444 |
size = i_size_read(file_inode(base)); |
c2c737a04
|
1445 1446 |
ipc_unlock_object(&shp->shm_perm); rcu_read_unlock(); |
1da177e4c
|
1447 |
|
bc56bba8f
|
1448 1449 |
err = -ENOMEM; sfd = kzalloc(sizeof(*sfd), GFP_KERNEL); |
f42569b13
|
1450 |
if (!sfd) { |
4f089acc5
|
1451 |
fput(base); |
f42569b13
|
1452 1453 |
goto out_nattch; } |
bc56bba8f
|
1454 |
|
4f089acc5
|
1455 1456 |
file = alloc_file_clone(base, f_flags, is_file_hugepages(base) ? |
c4caa7781
|
1457 1458 |
&shm_file_operations_huge : &shm_file_operations); |
39b652527
|
1459 |
err = PTR_ERR(file); |
f42569b13
|
1460 1461 |
if (IS_ERR(file)) { kfree(sfd); |
4f089acc5
|
1462 |
fput(base); |
f42569b13
|
1463 1464 |
goto out_nattch; } |
bc56bba8f
|
1465 |
|
7ca7e564e
|
1466 |
sfd->id = shp->shm_perm.id; |
bc56bba8f
|
1467 |
sfd->ns = get_ipc_ns(ns); |
4f089acc5
|
1468 |
sfd->file = base; |
bc56bba8f
|
1469 |
sfd->vm_ops = NULL; |
4f089acc5
|
1470 |
file->private_data = sfd; |
bc56bba8f
|
1471 |
|
8b3ec6814
|
1472 1473 1474 |
err = security_mmap_file(file, prot, flags); if (err) goto out_fput; |
d8ed45c5d
|
1475 |
if (mmap_write_lock_killable(current->mm)) { |
91f4f94ea
|
1476 1477 1478 |
err = -EINTR; goto out_fput; } |
1da177e4c
|
1479 |
if (addr && !(shmflg & SHM_REMAP)) { |
bc56bba8f
|
1480 |
err = -EINVAL; |
247a8ce82
|
1481 1482 |
if (addr + size < addr) goto invalid; |
1da177e4c
|
1483 1484 |
if (find_vma_intersection(current->mm, addr, addr + size)) goto invalid; |
1da177e4c
|
1485 |
} |
f42569b13
|
1486 |
|
45e55300f
|
1487 |
addr = do_mmap(file, addr, size, prot, flags, 0, &populate, NULL); |
bebeb3d68
|
1488 |
*raddr = addr; |
bc56bba8f
|
1489 |
err = 0; |
bebeb3d68
|
1490 1491 |
if (IS_ERR_VALUE(addr)) err = (long)addr; |
1da177e4c
|
1492 |
invalid: |
d8ed45c5d
|
1493 |
mmap_write_unlock(current->mm); |
bebeb3d68
|
1494 |
if (populate) |
41badc15c
|
1495 |
mm_populate(addr, populate); |
1da177e4c
|
1496 |
|
8b3ec6814
|
1497 |
out_fput: |
bc56bba8f
|
1498 1499 1500 |
fput(file); out_nattch: |
d9a605e40
|
1501 |
down_write(&shm_ids(ns).rwsem); |
00c2bf85d
|
1502 |
shp = shm_lock(ns, shmid); |
1da177e4c
|
1503 |
shp->shm_nattch--; |
b34a6b1da
|
1504 |
if (shm_may_destroy(ns, shp)) |
4e9823111
|
1505 |
shm_destroy(ns, shp); |
1da177e4c
|
1506 1507 |
else shm_unlock(shp); |
d9a605e40
|
1508 |
up_write(&shm_ids(ns).rwsem); |
1da177e4c
|
1509 |
return err; |
bc56bba8f
|
1510 1511 |
out_unlock: |
c2c737a04
|
1512 |
rcu_read_unlock(); |
f42569b13
|
1513 1514 |
out: return err; |
1da177e4c
|
1515 |
} |
d5460c997
|
1516 |
SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg) |
7d87e14c2
|
1517 1518 1519 |
{ unsigned long ret; long err; |
079a96ae3
|
1520 |
err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA); |
7d87e14c2
|
1521 1522 1523 1524 1525 |
if (err) return err; force_successful_syscall_return(); return (long)ret; } |
a78ee9ed2
|
1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 |
#ifdef CONFIG_COMPAT #ifndef COMPAT_SHMLBA #define COMPAT_SHMLBA SHMLBA #endif COMPAT_SYSCALL_DEFINE3(shmat, int, shmid, compat_uptr_t, shmaddr, int, shmflg) { unsigned long ret; long err; err = do_shmat(shmid, compat_ptr(shmaddr), shmflg, &ret, COMPAT_SHMLBA); if (err) return err; force_successful_syscall_return(); return (long)ret; } #endif |
1da177e4c
|
1544 1545 1546 1547 |
/* * detach and kill segment if marked destroyed. * The work is done in shm_close. */ |
da1e27443
|
1548 |
long ksys_shmdt(char __user *shmaddr) |
1da177e4c
|
1549 1550 |
{ struct mm_struct *mm = current->mm; |
586c7e6a2
|
1551 |
struct vm_area_struct *vma; |
1da177e4c
|
1552 |
unsigned long addr = (unsigned long)shmaddr; |
1da177e4c
|
1553 |
int retval = -EINVAL; |
586c7e6a2
|
1554 1555 |
#ifdef CONFIG_MMU loff_t size = 0; |
d3c97900b
|
1556 |
struct file *file; |
586c7e6a2
|
1557 1558 |
struct vm_area_struct *next; #endif |
1da177e4c
|
1559 |
|
df1e2fb54
|
1560 1561 |
if (addr & ~PAGE_MASK) return retval; |
d8ed45c5d
|
1562 |
if (mmap_write_lock_killable(mm)) |
91f4f94ea
|
1563 |
return -EINTR; |
1da177e4c
|
1564 1565 1566 1567 1568 1569 1570 1571 1572 |
/* * This function tries to be smart and unmap shm segments that * were modified by partial mlock or munmap calls: * - It first determines the size of the shm segment that should be * unmapped: It searches for a vma that is backed by shm and that * started at address shmaddr. It records it's size and then unmaps * it. * - Then it unmaps all shm vmas that started at shmaddr and that |
d3c97900b
|
1573 1574 |
* are within the initially determined size and that are from the * same shm segment from which we determined the size. |
1da177e4c
|
1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 |
* Errors from do_munmap are ignored: the function only fails if * it's called with invalid parameters or if it's called to unmap * a part of a vma. Both calls in this function are for full vmas, * the parameters are directly copied from the vma itself and always * valid - therefore do_munmap cannot fail. (famous last words?) */ /* * If it had been mremap()'d, the starting address would not * match the usual checks anyway. So assume all vma's are * above the starting address given. */ vma = find_vma(mm, addr); |
8feae1311
|
1587 |
#ifdef CONFIG_MMU |
1da177e4c
|
1588 1589 1590 1591 1592 1593 1594 1595 |
while (vma) { next = vma->vm_next; /* * Check if the starting address would match, i.e. it's * a fragment created by mprotect() and/or munmap(), or it * otherwise it starts at this address with no hassles. */ |
bc56bba8f
|
1596 |
if ((vma->vm_ops == &shm_vm_ops) && |
1da177e4c
|
1597 |
(vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) { |
d3c97900b
|
1598 1599 1600 1601 1602 1603 1604 |
/* * Record the file of the shm segment being * unmapped. With mremap(), someone could place * page from another segment but with equal offsets * in the range we are unmapping. */ file = vma->vm_file; |
07a46ed27
|
1605 |
size = i_size_read(file_inode(vma->vm_file)); |
897ab3e0c
|
1606 |
do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL); |
1da177e4c
|
1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 |
/* * We discovered the size of the shm segment, so * break out of here and fall through to the next * loop that uses the size information to stop * searching for matching vma's. */ retval = 0; vma = next; break; } vma = next; } /* * We need look no further than the maximum address a fragment * could possibly have landed at. Also cast things to loff_t to |
25985edce
|
1623 |
* prevent overflows and make comparisons vs. equal-width types. |
1da177e4c
|
1624 |
*/ |
8e36709d8
|
1625 |
size = PAGE_ALIGN(size); |
1da177e4c
|
1626 1627 1628 1629 |
while (vma && (loff_t)(vma->vm_end - addr) <= size) { next = vma->vm_next; /* finding a matching vma now does not alter retval */ |
bc56bba8f
|
1630 |
if ((vma->vm_ops == &shm_vm_ops) && |
d3c97900b
|
1631 1632 |
((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) && (vma->vm_file == file)) |
897ab3e0c
|
1633 |
do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL); |
1da177e4c
|
1634 1635 |
vma = next; } |
63980c80e
|
1636 |
#else /* CONFIG_MMU */ |
8feae1311
|
1637 |
/* under NOMMU conditions, the exact address to be destroyed must be |
63980c80e
|
1638 1639 |
* given */ |
530fcd16d
|
1640 |
if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) { |
897ab3e0c
|
1641 |
do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL); |
8feae1311
|
1642 1643 1644 1645 |
retval = 0; } #endif |
d8ed45c5d
|
1646 |
mmap_write_unlock(mm); |
1da177e4c
|
1647 1648 |
return retval; } |
da1e27443
|
1649 1650 1651 1652 |
SYSCALL_DEFINE1(shmdt, char __user *, shmaddr) { return ksys_shmdt(shmaddr); } |
1da177e4c
|
1653 |
#ifdef CONFIG_PROC_FS |
19b4946ca
|
1654 |
static int sysvipc_shm_proc_show(struct seq_file *s, void *it) |
1da177e4c
|
1655 |
{ |
98f929b1b
|
1656 |
struct pid_namespace *pid_ns = ipc_seq_pid_ns(s); |
1efdb69b0
|
1657 |
struct user_namespace *user_ns = seq_user_ns(s); |
ade9f91b3
|
1658 1659 |
struct kern_ipc_perm *ipcp = it; struct shmid_kernel *shp; |
b79521807
|
1660 |
unsigned long rss = 0, swp = 0; |
ade9f91b3
|
1661 |
shp = container_of(ipcp, struct shmid_kernel, shm_perm); |
b79521807
|
1662 |
shm_add_rss_swap(shp, &rss, &swp); |
1da177e4c
|
1663 |
|
6c826818f
|
1664 1665 1666 1667 1668 |
#if BITS_PER_LONG <= 32 #define SIZE_SPEC "%10lu" #else #define SIZE_SPEC "%21lu" #endif |
1da177e4c
|
1669 |
|
7f032d6ef
|
1670 1671 |
seq_printf(s, "%10d %10d %4o " SIZE_SPEC " %5u %5u " |
7ff2819e8
|
1672 |
"%5lu %5u %5u %5u %5u %10llu %10llu %10llu " |
7f032d6ef
|
1673 1674 1675 1676 1677 1678 |
SIZE_SPEC " " SIZE_SPEC " ", shp->shm_perm.key, shp->shm_perm.id, shp->shm_perm.mode, shp->shm_segsz, |
98f929b1b
|
1679 1680 |
pid_nr_ns(shp->shm_cprid, pid_ns), pid_nr_ns(shp->shm_lprid, pid_ns), |
7f032d6ef
|
1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 |
shp->shm_nattch, from_kuid_munged(user_ns, shp->shm_perm.uid), from_kgid_munged(user_ns, shp->shm_perm.gid), from_kuid_munged(user_ns, shp->shm_perm.cuid), from_kgid_munged(user_ns, shp->shm_perm.cgid), shp->shm_atim, shp->shm_dtim, shp->shm_ctim, rss * PAGE_SIZE, swp * PAGE_SIZE); return 0; |
1da177e4c
|
1693 1694 |
} #endif |