Blame view
kernel/user.c
12.2 KB
1da177e4c
|
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 |
/* * The "user cache". * * (C) Copyright 1991-2000 Linus Torvalds * * We have a per-user structure to keep track of how many * processes, files etc the user has claimed, in order to be * able to have per-user limits for system resources. */ #include <linux/init.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/bitops.h> #include <linux/key.h> |
4021cb279
|
16 |
#include <linux/interrupt.h> |
acce292c8
|
17 18 |
#include <linux/module.h> #include <linux/user_namespace.h> |
d84f4f992
|
19 |
#include "cred-internals.h" |
1da177e4c
|
20 |
|
aee16ce73
|
21 22 |
struct user_namespace init_user_ns = { .kref = { |
1d1e97562
|
23 |
.refcount = ATOMIC_INIT(2), |
aee16ce73
|
24 |
}, |
18b6e0414
|
25 |
.creator = &root_user, |
aee16ce73
|
26 27 |
}; EXPORT_SYMBOL_GPL(init_user_ns); |
1da177e4c
|
28 29 30 31 |
/* * UID task count cache, to get fast user lookup in "alloc_uid" * when changing user ID's (ie setuid() and friends). */ |
1da177e4c
|
32 33 |
#define UIDHASH_MASK (UIDHASH_SZ - 1) #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK) |
acce292c8
|
34 |
#define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid))) |
1da177e4c
|
35 |
|
e18b890bb
|
36 |
static struct kmem_cache *uid_cachep; |
4021cb279
|
37 38 39 40 41 |
/* * The uidhash_lock is mostly taken from process context, but it is * occasionally also taken from softirq/tasklet context, when * task-structs get RCU-freed. Hence all locking must be softirq-safe. |
3fa97c9db
|
42 43 44 45 |
* But free_uid() is also called with local interrupts disabled, and running * local_bh_enable() with local interrupts disabled is an error - we'll run * softirq callbacks, and they can unconditionally enable interrupts, and * the caller of free_uid() didn't expect that.. |
4021cb279
|
46 |
*/ |
1da177e4c
|
47 |
static DEFINE_SPINLOCK(uidhash_lock); |
18b6e0414
|
48 |
/* root_user.__count is 2, 1 for init task cred, 1 for init_user_ns->creator */ |
1da177e4c
|
49 |
struct user_struct root_user = { |
18b6e0414
|
50 |
.__count = ATOMIC_INIT(2), |
1da177e4c
|
51 52 53 |
.processes = ATOMIC_INIT(1), .files = ATOMIC_INIT(0), .sigpending = ATOMIC_INIT(0), |
1da177e4c
|
54 |
.locked_shm = 0, |
18b6e0414
|
55 |
.user_ns = &init_user_ns, |
052f1dc7e
|
56 |
#ifdef CONFIG_USER_SCHED |
4cf86d77f
|
57 |
.tg = &init_task_group, |
24e377a83
|
58 |
#endif |
1da177e4c
|
59 |
}; |
5cb350baf
|
60 61 62 |
/* * These routines must be called with the uidhash spinlock held! */ |
40aeb400f
|
63 |
static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent) |
5cb350baf
|
64 65 66 |
{ hlist_add_head(&up->uidhash_node, hashent); } |
40aeb400f
|
67 |
static void uid_hash_remove(struct user_struct *up) |
5cb350baf
|
68 69 |
{ hlist_del_init(&up->uidhash_node); |
fb5ae64fd
|
70 |
put_user_ns(up->user_ns); |
5cb350baf
|
71 |
} |
052f1dc7e
|
72 |
#ifdef CONFIG_USER_SCHED |
5cb350baf
|
73 |
|
24e377a83
|
74 75 76 77 78 79 80 81 |
static void sched_destroy_user(struct user_struct *up) { sched_destroy_group(up->tg); } static int sched_create_user(struct user_struct *up) { int rc = 0; |
eff766a65
|
82 |
up->tg = sched_create_group(&root_task_group); |
24e377a83
|
83 84 |
if (IS_ERR(up->tg)) rc = -ENOMEM; |
6c415b923
|
85 |
set_tg_uid(up); |
24e377a83
|
86 87 |
return rc; } |
052f1dc7e
|
88 |
#else /* CONFIG_USER_SCHED */ |
b1a8c172c
|
89 90 91 |
static void sched_destroy_user(struct user_struct *up) { } static int sched_create_user(struct user_struct *up) { return 0; } |
b1a8c172c
|
92 |
|
052f1dc7e
|
93 |
#endif /* CONFIG_USER_SCHED */ |
b1a8c172c
|
94 |
|
052f1dc7e
|
95 |
#if defined(CONFIG_USER_SCHED) && defined(CONFIG_SYSFS) |
b1a8c172c
|
96 |
|
3959214f9
|
97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 |
static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) { struct user_struct *user; struct hlist_node *h; hlist_for_each_entry(user, h, hashent, uidhash_node) { if (user->uid == uid) { /* possibly resurrect an "almost deleted" object */ if (atomic_inc_return(&user->__count) == 1) cancel_delayed_work(&user->work); return user; } } return NULL; } |
eb41d9465
|
113 |
static struct kset *uids_kset; /* represents the /sys/kernel/uids/ directory */ |
b1a8c172c
|
114 |
static DEFINE_MUTEX(uids_mutex); |
5cb350baf
|
115 116 117 118 |
static inline void uids_mutex_lock(void) { mutex_lock(&uids_mutex); } |
24e377a83
|
119 |
|
5cb350baf
|
120 121 122 123 |
static inline void uids_mutex_unlock(void) { mutex_unlock(&uids_mutex); } |
24e377a83
|
124 |
|
eb41d9465
|
125 |
/* uid directory attributes */ |
052f1dc7e
|
126 |
#ifdef CONFIG_FAIR_GROUP_SCHED |
eb41d9465
|
127 128 129 |
static ssize_t cpu_shares_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) |
5cb350baf
|
130 |
{ |
eb41d9465
|
131 |
struct user_struct *up = container_of(kobj, struct user_struct, kobj); |
24e377a83
|
132 |
|
eb41d9465
|
133 134 |
return sprintf(buf, "%lu ", sched_group_shares(up->tg)); |
5cb350baf
|
135 |
} |
eb41d9465
|
136 137 138 |
static ssize_t cpu_shares_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t size) |
5cb350baf
|
139 |
{ |
eb41d9465
|
140 |
struct user_struct *up = container_of(kobj, struct user_struct, kobj); |
5cb350baf
|
141 142 |
unsigned long shares; int rc; |
eb41d9465
|
143 |
sscanf(buf, "%lu", &shares); |
5cb350baf
|
144 145 146 147 148 |
rc = sched_group_set_shares(up->tg, shares); return (rc ? rc : size); } |
eb41d9465
|
149 150 |
static struct kobj_attribute cpu_share_attr = __ATTR(cpu_share, 0644, cpu_shares_show, cpu_shares_store); |
052f1dc7e
|
151 |
#endif |
eb41d9465
|
152 |
|
052f1dc7e
|
153 |
#ifdef CONFIG_RT_GROUP_SCHED |
9f0c1e560
|
154 155 156 157 158 |
static ssize_t cpu_rt_runtime_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct user_struct *up = container_of(kobj, struct user_struct, kobj); |
af4491e51
|
159 160 |
return sprintf(buf, "%ld ", sched_group_rt_runtime(up->tg)); |
9f0c1e560
|
161 162 163 164 165 166 167 168 169 |
} static ssize_t cpu_rt_runtime_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t size) { struct user_struct *up = container_of(kobj, struct user_struct, kobj); unsigned long rt_runtime; int rc; |
af4491e51
|
170 |
sscanf(buf, "%ld", &rt_runtime); |
9f0c1e560
|
171 172 173 174 175 176 177 178 |
rc = sched_group_set_rt_runtime(up->tg, rt_runtime); return (rc ? rc : size); } static struct kobj_attribute cpu_rt_runtime_attr = __ATTR(cpu_rt_runtime, 0644, cpu_rt_runtime_show, cpu_rt_runtime_store); |
d0b27fa77
|
179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 |
static ssize_t cpu_rt_period_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct user_struct *up = container_of(kobj, struct user_struct, kobj); return sprintf(buf, "%lu ", sched_group_rt_period(up->tg)); } static ssize_t cpu_rt_period_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t size) { struct user_struct *up = container_of(kobj, struct user_struct, kobj); unsigned long rt_period; int rc; sscanf(buf, "%lu", &rt_period); rc = sched_group_set_rt_period(up->tg, rt_period); return (rc ? rc : size); } static struct kobj_attribute cpu_rt_period_attr = __ATTR(cpu_rt_period, 0644, cpu_rt_period_show, cpu_rt_period_store); |
052f1dc7e
|
207 |
#endif |
9f0c1e560
|
208 |
|
eb41d9465
|
209 210 |
/* default attributes per uid directory */ static struct attribute *uids_attributes[] = { |
052f1dc7e
|
211 |
#ifdef CONFIG_FAIR_GROUP_SCHED |
eb41d9465
|
212 |
&cpu_share_attr.attr, |
052f1dc7e
|
213 214 |
#endif #ifdef CONFIG_RT_GROUP_SCHED |
9f0c1e560
|
215 |
&cpu_rt_runtime_attr.attr, |
d0b27fa77
|
216 |
&cpu_rt_period_attr.attr, |
052f1dc7e
|
217 |
#endif |
eb41d9465
|
218 219 220 221 222 |
NULL }; /* the lifetime of user_struct is not managed by the core (now) */ static void uids_release(struct kobject *kobj) |
5cb350baf
|
223 |
{ |
eb41d9465
|
224 |
return; |
5cb350baf
|
225 |
} |
eb41d9465
|
226 227 228 229 230 |
static struct kobj_type uids_ktype = { .sysfs_ops = &kobj_sysfs_ops, .default_attrs = uids_attributes, .release = uids_release, }; |
94d6a5f73
|
231 232 233 234 235 236 237 |
/* * Create /sys/kernel/uids/<uid>/cpu_share file for this user * We do not create this file for users in a user namespace (until * sysfs tagging is implemented). * * See Documentation/scheduler/sched-design-CFS.txt for ramifications. */ |
eb41d9465
|
238 |
static int uids_user_create(struct user_struct *up) |
1da177e4c
|
239 |
{ |
eb41d9465
|
240 |
struct kobject *kobj = &up->kobj; |
5cb350baf
|
241 |
int error; |
eb41d9465
|
242 |
memset(kobj, 0, sizeof(struct kobject)); |
c37bbb0fd
|
243 244 |
if (up->user_ns != &init_user_ns) return 0; |
eb41d9465
|
245 |
kobj->kset = uids_kset; |
cf15126b3
|
246 247 248 |
error = kobject_init_and_add(kobj, &uids_ktype, NULL, "%d", up->uid); if (error) { kobject_put(kobj); |
5cb350baf
|
249 |
goto done; |
cf15126b3
|
250 |
} |
5cb350baf
|
251 |
|
fb7dde37e
|
252 |
kobject_uevent(kobj, KOBJ_ADD); |
5cb350baf
|
253 254 |
done: return error; |
1da177e4c
|
255 |
} |
eb41d9465
|
256 |
/* create these entries in sysfs: |
5cb350baf
|
257 258 259 260 |
* "/sys/kernel/uids" directory * "/sys/kernel/uids/0" directory (for root user) * "/sys/kernel/uids/0/cpu_share" file (for root user) */ |
eb41d9465
|
261 |
int __init uids_sysfs_init(void) |
1da177e4c
|
262 |
{ |
0ff21e466
|
263 |
uids_kset = kset_create_and_add("uids", NULL, kernel_kobj); |
eb41d9465
|
264 265 |
if (!uids_kset) return -ENOMEM; |
5cb350baf
|
266 |
|
eb41d9465
|
267 |
return uids_user_create(&root_user); |
1da177e4c
|
268 |
} |
3959214f9
|
269 |
/* delayed work function to remove sysfs directory for a user and free up |
5cb350baf
|
270 271 |
* corresponding structures. */ |
be50b8342
|
272 |
static void cleanup_user_struct(struct work_struct *w) |
1da177e4c
|
273 |
{ |
3959214f9
|
274 |
struct user_struct *up = container_of(w, struct user_struct, work.work); |
5cb350baf
|
275 276 |
unsigned long flags; int remove_user = 0; |
1da177e4c
|
277 |
|
5cb350baf
|
278 279 280 281 |
/* Make uid_hash_remove() + sysfs_remove_file() + kobject_del() * atomic. */ uids_mutex_lock(); |
3959214f9
|
282 283 |
spin_lock_irqsave(&uidhash_lock, flags); if (atomic_read(&up->__count) == 0) { |
5cb350baf
|
284 285 |
uid_hash_remove(up); remove_user = 1; |
1da177e4c
|
286 |
} |
3959214f9
|
287 |
spin_unlock_irqrestore(&uidhash_lock, flags); |
1da177e4c
|
288 |
|
5cb350baf
|
289 290 |
if (!remove_user) goto done; |
be50b8342
|
291 292 293 294 295 |
if (up->user_ns == &init_user_ns) { kobject_uevent(&up->kobj, KOBJ_REMOVE); kobject_del(&up->kobj); kobject_put(&up->kobj); } |
5cb350baf
|
296 297 298 299 300 301 302 303 304 305 306 307 308 309 |
sched_destroy_user(up); key_put(up->uid_keyring); key_put(up->session_keyring); kmem_cache_free(uid_cachep, up); done: uids_mutex_unlock(); } /* IRQs are disabled and uidhash_lock is held upon function entry. * IRQ state (as stored in flags) is restored and uidhash_lock released * upon function exit. */ |
18b6e0414
|
310 |
static void free_user(struct user_struct *up, unsigned long flags) |
5cb350baf
|
311 |
{ |
3959214f9
|
312 313 |
INIT_DELAYED_WORK(&up->work, cleanup_user_struct); schedule_delayed_work(&up->work, msecs_to_jiffies(1000)); |
b00bc0b23
|
314 |
spin_unlock_irqrestore(&uidhash_lock, flags); |
1da177e4c
|
315 |
} |
052f1dc7e
|
316 |
#else /* CONFIG_USER_SCHED && CONFIG_SYSFS */ |
5cb350baf
|
317 |
|
3959214f9
|
318 319 320 321 322 323 324 325 326 327 328 329 330 331 |
static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) { struct user_struct *user; struct hlist_node *h; hlist_for_each_entry(user, h, hashent, uidhash_node) { if (user->uid == uid) { atomic_inc(&user->__count); return user; } } return NULL; } |
eb41d9465
|
332 333 |
int uids_sysfs_init(void) { return 0; } static inline int uids_user_create(struct user_struct *up) { return 0; } |
5cb350baf
|
334 335 336 337 338 339 340 |
static inline void uids_mutex_lock(void) { } static inline void uids_mutex_unlock(void) { } /* IRQs are disabled and uidhash_lock is held upon function entry. * IRQ state (as stored in flags) is restored and uidhash_lock released * upon function exit. */ |
18b6e0414
|
341 |
static void free_user(struct user_struct *up, unsigned long flags) |
5cb350baf
|
342 343 344 345 346 347 348 349 |
{ uid_hash_remove(up); spin_unlock_irqrestore(&uidhash_lock, flags); sched_destroy_user(up); key_put(up->uid_keyring); key_put(up->session_keyring); kmem_cache_free(uid_cachep, up); } |
b1a8c172c
|
350 |
#endif |
5cb350baf
|
351 |
|
54e991242
|
352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 |
#if defined(CONFIG_RT_GROUP_SCHED) && defined(CONFIG_USER_SCHED) /* * We need to check if a setuid can take place. This function should be called * before successfully completing the setuid. */ int task_can_switch_user(struct user_struct *up, struct task_struct *tsk) { return sched_rt_can_attach(up->tg, tsk); } #else int task_can_switch_user(struct user_struct *up, struct task_struct *tsk) { return 1; } #endif |
1da177e4c
|
369 370 371 372 373 374 375 376 377 |
/* * Locate the user_struct for the passed UID. If found, take a ref on it. The * caller must undo that ref with free_uid(). * * If the user_struct could not be found, return NULL. */ struct user_struct *find_user(uid_t uid) { struct user_struct *ret; |
3fa97c9db
|
378 |
unsigned long flags; |
6ded6ab9b
|
379 |
struct user_namespace *ns = current_user_ns(); |
1da177e4c
|
380 |
|
3fa97c9db
|
381 |
spin_lock_irqsave(&uidhash_lock, flags); |
acce292c8
|
382 |
ret = uid_hash_find(uid, uidhashentry(ns, uid)); |
3fa97c9db
|
383 |
spin_unlock_irqrestore(&uidhash_lock, flags); |
1da177e4c
|
384 385 386 387 388 |
return ret; } void free_uid(struct user_struct *up) { |
3fa97c9db
|
389 |
unsigned long flags; |
36f574135
|
390 391 |
if (!up) return; |
3fa97c9db
|
392 |
local_irq_save(flags); |
5cb350baf
|
393 394 395 |
if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) free_user(up, flags); else |
36f574135
|
396 |
local_irq_restore(flags); |
1da177e4c
|
397 |
} |
354a1f4d9
|
398 |
struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid) |
1da177e4c
|
399 |
{ |
735de2230
|
400 |
struct hlist_head *hashent = uidhashentry(ns, uid); |
8eb703e4f
|
401 |
struct user_struct *up, *new; |
1da177e4c
|
402 |
|
eb41d9465
|
403 |
/* Make uid_hash_find() + uids_user_create() + uid_hash_insert() |
5cb350baf
|
404 405 406 |
* atomic. */ uids_mutex_lock(); |
3fa97c9db
|
407 |
spin_lock_irq(&uidhash_lock); |
1da177e4c
|
408 |
up = uid_hash_find(uid, hashent); |
3fa97c9db
|
409 |
spin_unlock_irq(&uidhash_lock); |
1da177e4c
|
410 411 |
if (!up) { |
354a1f4d9
|
412 |
new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL); |
8eb703e4f
|
413 414 |
if (!new) goto out_unlock; |
5e8869bb6
|
415 |
|
1da177e4c
|
416 417 |
new->uid = uid; atomic_set(&new->__count, 1); |
1da177e4c
|
418 |
|
8eb703e4f
|
419 |
if (sched_create_user(new) < 0) |
69664cf16
|
420 |
goto out_free_user; |
24e377a83
|
421 |
|
18b6e0414
|
422 |
new->user_ns = get_user_ns(ns); |
8eb703e4f
|
423 424 |
if (uids_user_create(new)) goto out_destoy_sched; |
5cb350baf
|
425 |
|
1da177e4c
|
426 427 428 429 |
/* * Before adding this, check whether we raced * on adding the same user already.. */ |
3fa97c9db
|
430 |
spin_lock_irq(&uidhash_lock); |
1da177e4c
|
431 432 |
up = uid_hash_find(uid, hashent); if (up) { |
052f1dc7e
|
433 |
/* This case is not possible when CONFIG_USER_SCHED |
5cb350baf
|
434 435 436 437 |
* is defined, since we serialize alloc_uid() using * uids_mutex. Hence no need to call * sched_destroy_user() or remove_user_sysfs_dir(). */ |
1da177e4c
|
438 439 440 441 442 443 444 |
key_put(new->uid_keyring); key_put(new->session_keyring); kmem_cache_free(uid_cachep, new); } else { uid_hash_insert(new, hashent); up = new; } |
3fa97c9db
|
445 |
spin_unlock_irq(&uidhash_lock); |
1da177e4c
|
446 |
} |
5cb350baf
|
447 448 |
uids_mutex_unlock(); |
1da177e4c
|
449 |
return up; |
8eb703e4f
|
450 451 452 |
out_destoy_sched: sched_destroy_user(new); |
18b6e0414
|
453 |
put_user_ns(new->user_ns); |
8eb703e4f
|
454 455 456 457 458 |
out_free_user: kmem_cache_free(uid_cachep, new); out_unlock: uids_mutex_unlock(); return NULL; |
1da177e4c
|
459 |
} |
1da177e4c
|
460 461 462 463 464 |
static int __init uid_cache_init(void) { int n; uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct), |
20c2df83d
|
465 |
0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); |
1da177e4c
|
466 467 |
for(n = 0; n < UIDHASH_SZ; ++n) |
735de2230
|
468 |
INIT_HLIST_HEAD(init_user_ns.uidhash_table + n); |
1da177e4c
|
469 470 |
/* Insert the root user immediately (init already runs as root) */ |
3fa97c9db
|
471 |
spin_lock_irq(&uidhash_lock); |
acce292c8
|
472 |
uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0)); |
3fa97c9db
|
473 |
spin_unlock_irq(&uidhash_lock); |
1da177e4c
|
474 475 476 477 478 |
return 0; } module_init(uid_cache_init); |