Blame view
kernel/user.c
11.5 KB
1da177e4c
|
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 |
/* * The "user cache". * * (C) Copyright 1991-2000 Linus Torvalds * * We have a per-user structure to keep track of how many * processes, files etc the user has claimed, in order to be * able to have per-user limits for system resources. */ #include <linux/init.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/bitops.h> #include <linux/key.h> |
4021cb279
|
16 |
#include <linux/interrupt.h> |
acce292c8
|
17 18 |
#include <linux/module.h> #include <linux/user_namespace.h> |
d84f4f992
|
19 |
#include "cred-internals.h" |
1da177e4c
|
20 |
|
aee16ce73
|
21 22 |
struct user_namespace init_user_ns = { .kref = { |
18b6e0414
|
23 |
.refcount = ATOMIC_INIT(1), |
aee16ce73
|
24 |
}, |
18b6e0414
|
25 |
.creator = &root_user, |
aee16ce73
|
26 27 |
}; EXPORT_SYMBOL_GPL(init_user_ns); |
1da177e4c
|
28 29 30 31 |
/* * UID task count cache, to get fast user lookup in "alloc_uid" * when changing user ID's (ie setuid() and friends). */ |
1da177e4c
|
32 33 |
#define UIDHASH_MASK (UIDHASH_SZ - 1) #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK) |
acce292c8
|
34 |
#define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid))) |
1da177e4c
|
35 |
|
e18b890bb
|
36 |
static struct kmem_cache *uid_cachep; |
4021cb279
|
37 38 39 40 41 |
/* * The uidhash_lock is mostly taken from process context, but it is * occasionally also taken from softirq/tasklet context, when * task-structs get RCU-freed. Hence all locking must be softirq-safe. |
3fa97c9db
|
42 43 44 45 |
* But free_uid() is also called with local interrupts disabled, and running * local_bh_enable() with local interrupts disabled is an error - we'll run * softirq callbacks, and they can unconditionally enable interrupts, and * the caller of free_uid() didn't expect that.. |
4021cb279
|
46 |
*/ |
1da177e4c
|
47 |
static DEFINE_SPINLOCK(uidhash_lock); |
18b6e0414
|
48 |
/* root_user.__count is 2, 1 for init task cred, 1 for init_user_ns->creator */ |
1da177e4c
|
49 |
struct user_struct root_user = { |
18b6e0414
|
50 |
.__count = ATOMIC_INIT(2), |
1da177e4c
|
51 52 53 |
.processes = ATOMIC_INIT(1), .files = ATOMIC_INIT(0), .sigpending = ATOMIC_INIT(0), |
1da177e4c
|
54 |
.locked_shm = 0, |
18b6e0414
|
55 |
.user_ns = &init_user_ns, |
052f1dc7e
|
56 |
#ifdef CONFIG_USER_SCHED |
4cf86d77f
|
57 |
.tg = &init_task_group, |
24e377a83
|
58 |
#endif |
1da177e4c
|
59 |
}; |
5cb350baf
|
60 61 62 |
/* * These routines must be called with the uidhash spinlock held! */ |
40aeb400f
|
63 |
static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent) |
5cb350baf
|
64 65 66 |
{ hlist_add_head(&up->uidhash_node, hashent); } |
40aeb400f
|
67 |
static void uid_hash_remove(struct user_struct *up) |
5cb350baf
|
68 69 70 |
{ hlist_del_init(&up->uidhash_node); } |
40aeb400f
|
71 |
static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) |
5cb350baf
|
72 73 74 75 76 77 78 79 80 81 82 83 84 |
{ struct user_struct *user; struct hlist_node *h; hlist_for_each_entry(user, h, hashent, uidhash_node) { if (user->uid == uid) { atomic_inc(&user->__count); return user; } } return NULL; } |
052f1dc7e
|
85 |
#ifdef CONFIG_USER_SCHED |
5cb350baf
|
86 |
|
24e377a83
|
87 88 89 90 91 92 93 94 |
static void sched_destroy_user(struct user_struct *up) { sched_destroy_group(up->tg); } static int sched_create_user(struct user_struct *up) { int rc = 0; |
eff766a65
|
95 |
up->tg = sched_create_group(&root_task_group); |
24e377a83
|
96 97 |
if (IS_ERR(up->tg)) rc = -ENOMEM; |
6c415b923
|
98 |
set_tg_uid(up); |
24e377a83
|
99 100 |
return rc; } |
052f1dc7e
|
101 |
#else /* CONFIG_USER_SCHED */ |
b1a8c172c
|
102 103 104 |
static void sched_destroy_user(struct user_struct *up) { } static int sched_create_user(struct user_struct *up) { return 0; } |
b1a8c172c
|
105 |
|
052f1dc7e
|
106 |
#endif /* CONFIG_USER_SCHED */ |
b1a8c172c
|
107 |
|
052f1dc7e
|
108 |
#if defined(CONFIG_USER_SCHED) && defined(CONFIG_SYSFS) |
b1a8c172c
|
109 |
|
eb41d9465
|
110 |
static struct kset *uids_kset; /* represents the /sys/kernel/uids/ directory */ |
b1a8c172c
|
111 |
static DEFINE_MUTEX(uids_mutex); |
5cb350baf
|
112 113 114 115 |
static inline void uids_mutex_lock(void) { mutex_lock(&uids_mutex); } |
24e377a83
|
116 |
|
5cb350baf
|
117 118 119 120 |
static inline void uids_mutex_unlock(void) { mutex_unlock(&uids_mutex); } |
24e377a83
|
121 |
|
eb41d9465
|
122 |
/* uid directory attributes */ |
052f1dc7e
|
123 |
#ifdef CONFIG_FAIR_GROUP_SCHED |
eb41d9465
|
124 125 126 |
static ssize_t cpu_shares_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) |
5cb350baf
|
127 |
{ |
eb41d9465
|
128 |
struct user_struct *up = container_of(kobj, struct user_struct, kobj); |
24e377a83
|
129 |
|
eb41d9465
|
130 131 |
return sprintf(buf, "%lu ", sched_group_shares(up->tg)); |
5cb350baf
|
132 |
} |
eb41d9465
|
133 134 135 |
static ssize_t cpu_shares_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t size) |
5cb350baf
|
136 |
{ |
eb41d9465
|
137 |
struct user_struct *up = container_of(kobj, struct user_struct, kobj); |
5cb350baf
|
138 139 |
unsigned long shares; int rc; |
eb41d9465
|
140 |
sscanf(buf, "%lu", &shares); |
5cb350baf
|
141 142 143 144 145 |
rc = sched_group_set_shares(up->tg, shares); return (rc ? rc : size); } |
eb41d9465
|
146 147 |
static struct kobj_attribute cpu_share_attr = __ATTR(cpu_share, 0644, cpu_shares_show, cpu_shares_store); |
052f1dc7e
|
148 |
#endif |
eb41d9465
|
149 |
|
052f1dc7e
|
150 |
#ifdef CONFIG_RT_GROUP_SCHED |
9f0c1e560
|
151 152 153 154 155 |
static ssize_t cpu_rt_runtime_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct user_struct *up = container_of(kobj, struct user_struct, kobj); |
af4491e51
|
156 157 |
return sprintf(buf, "%ld ", sched_group_rt_runtime(up->tg)); |
9f0c1e560
|
158 159 160 161 162 163 164 165 166 |
} static ssize_t cpu_rt_runtime_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t size) { struct user_struct *up = container_of(kobj, struct user_struct, kobj); unsigned long rt_runtime; int rc; |
af4491e51
|
167 |
sscanf(buf, "%ld", &rt_runtime); |
9f0c1e560
|
168 169 170 171 172 173 174 175 |
rc = sched_group_set_rt_runtime(up->tg, rt_runtime); return (rc ? rc : size); } static struct kobj_attribute cpu_rt_runtime_attr = __ATTR(cpu_rt_runtime, 0644, cpu_rt_runtime_show, cpu_rt_runtime_store); |
d0b27fa77
|
176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 |
static ssize_t cpu_rt_period_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct user_struct *up = container_of(kobj, struct user_struct, kobj); return sprintf(buf, "%lu ", sched_group_rt_period(up->tg)); } static ssize_t cpu_rt_period_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t size) { struct user_struct *up = container_of(kobj, struct user_struct, kobj); unsigned long rt_period; int rc; sscanf(buf, "%lu", &rt_period); rc = sched_group_set_rt_period(up->tg, rt_period); return (rc ? rc : size); } static struct kobj_attribute cpu_rt_period_attr = __ATTR(cpu_rt_period, 0644, cpu_rt_period_show, cpu_rt_period_store); |
052f1dc7e
|
204 |
#endif |
9f0c1e560
|
205 |
|
eb41d9465
|
206 207 |
/* default attributes per uid directory */ static struct attribute *uids_attributes[] = { |
052f1dc7e
|
208 |
#ifdef CONFIG_FAIR_GROUP_SCHED |
eb41d9465
|
209 |
&cpu_share_attr.attr, |
052f1dc7e
|
210 211 |
#endif #ifdef CONFIG_RT_GROUP_SCHED |
9f0c1e560
|
212 |
&cpu_rt_runtime_attr.attr, |
d0b27fa77
|
213 |
&cpu_rt_period_attr.attr, |
052f1dc7e
|
214 |
#endif |
eb41d9465
|
215 216 217 218 219 |
NULL }; /* the lifetime of user_struct is not managed by the core (now) */ static void uids_release(struct kobject *kobj) |
5cb350baf
|
220 |
{ |
eb41d9465
|
221 |
return; |
5cb350baf
|
222 |
} |
eb41d9465
|
223 224 225 226 227 |
static struct kobj_type uids_ktype = { .sysfs_ops = &kobj_sysfs_ops, .default_attrs = uids_attributes, .release = uids_release, }; |
94d6a5f73
|
228 229 230 231 232 233 234 |
/* * Create /sys/kernel/uids/<uid>/cpu_share file for this user * We do not create this file for users in a user namespace (until * sysfs tagging is implemented). * * See Documentation/scheduler/sched-design-CFS.txt for ramifications. */ |
eb41d9465
|
235 |
static int uids_user_create(struct user_struct *up) |
1da177e4c
|
236 |
{ |
eb41d9465
|
237 |
struct kobject *kobj = &up->kobj; |
5cb350baf
|
238 |
int error; |
eb41d9465
|
239 |
memset(kobj, 0, sizeof(struct kobject)); |
c37bbb0fd
|
240 241 |
if (up->user_ns != &init_user_ns) return 0; |
eb41d9465
|
242 |
kobj->kset = uids_kset; |
cf15126b3
|
243 244 245 |
error = kobject_init_and_add(kobj, &uids_ktype, NULL, "%d", up->uid); if (error) { kobject_put(kobj); |
5cb350baf
|
246 |
goto done; |
cf15126b3
|
247 |
} |
5cb350baf
|
248 |
|
fb7dde37e
|
249 |
kobject_uevent(kobj, KOBJ_ADD); |
5cb350baf
|
250 251 |
done: return error; |
1da177e4c
|
252 |
} |
eb41d9465
|
253 |
/* create these entries in sysfs: |
5cb350baf
|
254 255 256 257 |
* "/sys/kernel/uids" directory * "/sys/kernel/uids/0" directory (for root user) * "/sys/kernel/uids/0/cpu_share" file (for root user) */ |
eb41d9465
|
258 |
int __init uids_sysfs_init(void) |
1da177e4c
|
259 |
{ |
0ff21e466
|
260 |
uids_kset = kset_create_and_add("uids", NULL, kernel_kobj); |
eb41d9465
|
261 262 |
if (!uids_kset) return -ENOMEM; |
5cb350baf
|
263 |
|
eb41d9465
|
264 |
return uids_user_create(&root_user); |
1da177e4c
|
265 |
} |
5cb350baf
|
266 267 268 269 |
/* work function to remove sysfs directory for a user and free up * corresponding structures. */ static void remove_user_sysfs_dir(struct work_struct *w) |
1da177e4c
|
270 |
{ |
5cb350baf
|
271 |
struct user_struct *up = container_of(w, struct user_struct, work); |
5cb350baf
|
272 273 |
unsigned long flags; int remove_user = 0; |
1da177e4c
|
274 |
|
c37bbb0fd
|
275 276 |
if (up->user_ns != &init_user_ns) return; |
5cb350baf
|
277 278 279 280 281 282 283 284 285 286 287 288 289 |
/* Make uid_hash_remove() + sysfs_remove_file() + kobject_del() * atomic. */ uids_mutex_lock(); local_irq_save(flags); if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) { uid_hash_remove(up); remove_user = 1; spin_unlock_irqrestore(&uidhash_lock, flags); } else { local_irq_restore(flags); |
1da177e4c
|
290 |
} |
5cb350baf
|
291 292 |
if (!remove_user) goto done; |
eb41d9465
|
293 294 295 |
kobject_uevent(&up->kobj, KOBJ_REMOVE); kobject_del(&up->kobj); kobject_put(&up->kobj); |
5cb350baf
|
296 297 298 299 300 301 302 303 304 305 306 307 308 309 |
sched_destroy_user(up); key_put(up->uid_keyring); key_put(up->session_keyring); kmem_cache_free(uid_cachep, up); done: uids_mutex_unlock(); } /* IRQs are disabled and uidhash_lock is held upon function entry. * IRQ state (as stored in flags) is restored and uidhash_lock released * upon function exit. */ |
18b6e0414
|
310 |
static void free_user(struct user_struct *up, unsigned long flags) |
5cb350baf
|
311 312 313 314 |
{ /* restore back the count */ atomic_inc(&up->__count); spin_unlock_irqrestore(&uidhash_lock, flags); |
18b6e0414
|
315 |
put_user_ns(up->user_ns); |
5cb350baf
|
316 317 |
INIT_WORK(&up->work, remove_user_sysfs_dir); schedule_work(&up->work); |
1da177e4c
|
318 |
} |
052f1dc7e
|
319 |
#else /* CONFIG_USER_SCHED && CONFIG_SYSFS */ |
5cb350baf
|
320 |
|
eb41d9465
|
321 322 |
int uids_sysfs_init(void) { return 0; } static inline int uids_user_create(struct user_struct *up) { return 0; } |
5cb350baf
|
323 324 325 326 327 328 329 |
static inline void uids_mutex_lock(void) { } static inline void uids_mutex_unlock(void) { } /* IRQs are disabled and uidhash_lock is held upon function entry. * IRQ state (as stored in flags) is restored and uidhash_lock released * upon function exit. */ |
18b6e0414
|
330 |
static void free_user(struct user_struct *up, unsigned long flags) |
5cb350baf
|
331 332 333 334 335 336 |
{ uid_hash_remove(up); spin_unlock_irqrestore(&uidhash_lock, flags); sched_destroy_user(up); key_put(up->uid_keyring); key_put(up->session_keyring); |
18b6e0414
|
337 |
put_user_ns(up->user_ns); |
5cb350baf
|
338 339 |
kmem_cache_free(uid_cachep, up); } |
b1a8c172c
|
340 |
#endif |
5cb350baf
|
341 |
|
1da177e4c
|
342 343 344 345 346 347 348 349 350 |
/* * Locate the user_struct for the passed UID. If found, take a ref on it. The * caller must undo that ref with free_uid(). * * If the user_struct could not be found, return NULL. */ struct user_struct *find_user(uid_t uid) { struct user_struct *ret; |
3fa97c9db
|
351 |
unsigned long flags; |
6ded6ab9b
|
352 |
struct user_namespace *ns = current_user_ns(); |
1da177e4c
|
353 |
|
3fa97c9db
|
354 |
spin_lock_irqsave(&uidhash_lock, flags); |
acce292c8
|
355 |
ret = uid_hash_find(uid, uidhashentry(ns, uid)); |
3fa97c9db
|
356 |
spin_unlock_irqrestore(&uidhash_lock, flags); |
1da177e4c
|
357 358 359 360 361 |
return ret; } void free_uid(struct user_struct *up) { |
3fa97c9db
|
362 |
unsigned long flags; |
36f574135
|
363 364 |
if (!up) return; |
3fa97c9db
|
365 |
local_irq_save(flags); |
5cb350baf
|
366 367 368 |
if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) free_user(up, flags); else |
36f574135
|
369 |
local_irq_restore(flags); |
1da177e4c
|
370 |
} |
354a1f4d9
|
371 |
struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid) |
1da177e4c
|
372 |
{ |
735de2230
|
373 |
struct hlist_head *hashent = uidhashentry(ns, uid); |
8eb703e4f
|
374 |
struct user_struct *up, *new; |
1da177e4c
|
375 |
|
eb41d9465
|
376 |
/* Make uid_hash_find() + uids_user_create() + uid_hash_insert() |
5cb350baf
|
377 378 379 |
* atomic. */ uids_mutex_lock(); |
3fa97c9db
|
380 |
spin_lock_irq(&uidhash_lock); |
1da177e4c
|
381 |
up = uid_hash_find(uid, hashent); |
3fa97c9db
|
382 |
spin_unlock_irq(&uidhash_lock); |
1da177e4c
|
383 384 |
if (!up) { |
354a1f4d9
|
385 |
new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL); |
8eb703e4f
|
386 387 |
if (!new) goto out_unlock; |
5e8869bb6
|
388 |
|
1da177e4c
|
389 390 |
new->uid = uid; atomic_set(&new->__count, 1); |
1da177e4c
|
391 |
|
8eb703e4f
|
392 |
if (sched_create_user(new) < 0) |
69664cf16
|
393 |
goto out_free_user; |
24e377a83
|
394 |
|
18b6e0414
|
395 |
new->user_ns = get_user_ns(ns); |
8eb703e4f
|
396 397 |
if (uids_user_create(new)) goto out_destoy_sched; |
5cb350baf
|
398 |
|
1da177e4c
|
399 400 401 402 |
/* * Before adding this, check whether we raced * on adding the same user already.. */ |
3fa97c9db
|
403 |
spin_lock_irq(&uidhash_lock); |
1da177e4c
|
404 405 |
up = uid_hash_find(uid, hashent); if (up) { |
052f1dc7e
|
406 |
/* This case is not possible when CONFIG_USER_SCHED |
5cb350baf
|
407 408 409 410 |
* is defined, since we serialize alloc_uid() using * uids_mutex. Hence no need to call * sched_destroy_user() or remove_user_sysfs_dir(). */ |
1da177e4c
|
411 412 413 414 415 416 417 |
key_put(new->uid_keyring); key_put(new->session_keyring); kmem_cache_free(uid_cachep, new); } else { uid_hash_insert(new, hashent); up = new; } |
3fa97c9db
|
418 |
spin_unlock_irq(&uidhash_lock); |
1da177e4c
|
419 |
} |
5cb350baf
|
420 421 |
uids_mutex_unlock(); |
1da177e4c
|
422 |
return up; |
8eb703e4f
|
423 424 425 |
out_destoy_sched: sched_destroy_user(new); |
18b6e0414
|
426 |
put_user_ns(new->user_ns); |
8eb703e4f
|
427 428 429 430 431 |
out_free_user: kmem_cache_free(uid_cachep, new); out_unlock: uids_mutex_unlock(); return NULL; |
1da177e4c
|
432 |
} |
1da177e4c
|
433 434 435 436 437 |
static int __init uid_cache_init(void) { int n; uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct), |
20c2df83d
|
438 |
0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); |
1da177e4c
|
439 440 |
for(n = 0; n < UIDHASH_SZ; ++n) |
735de2230
|
441 |
INIT_HLIST_HEAD(init_user_ns.uidhash_table + n); |
1da177e4c
|
442 443 |
/* Insert the root user immediately (init already runs as root) */ |
3fa97c9db
|
444 |
spin_lock_irq(&uidhash_lock); |
acce292c8
|
445 |
uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0)); |
3fa97c9db
|
446 |
spin_unlock_irq(&uidhash_lock); |
1da177e4c
|
447 448 449 450 451 |
return 0; } module_init(uid_cache_init); |