Blame view
kernel/user.c
5.29 KB
1da177e4c
|
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 |
/* * The "user cache". * * (C) Copyright 1991-2000 Linus Torvalds * * We have a per-user structure to keep track of how many * processes, files etc the user has claimed, in order to be * able to have per-user limits for system resources. */ #include <linux/init.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/bitops.h> #include <linux/key.h> |
4021cb279
|
16 |
#include <linux/interrupt.h> |
9984de1a5
|
17 |
#include <linux/export.h> |
acce292c8
|
18 |
#include <linux/user_namespace.h> |
0bb80f240
|
19 |
#include <linux/proc_ns.h> |
1da177e4c
|
20 |
|
59607db36
|
21 22 23 24 |
/* * userns count is 1 for root user, 1 for init_uts_ns, * and 1 for... ? */ |
aee16ce73
|
25 |
struct user_namespace init_user_ns = { |
22d917d80
|
26 27 28 29 30 |
.uid_map = { .nr_extents = 1, .extent[0] = { .first = 0, .lower_first = 0, |
4b06a81f1
|
31 |
.count = 4294967295U, |
22d917d80
|
32 33 34 35 36 37 38 |
}, }, .gid_map = { .nr_extents = 1, .extent[0] = { .first = 0, .lower_first = 0, |
4b06a81f1
|
39 |
.count = 4294967295U, |
22d917d80
|
40 41 |
}, }, |
f76d207a6
|
42 43 44 45 46 47 48 49 |
.projid_map = { .nr_extents = 1, .extent[0] = { .first = 0, .lower_first = 0, .count = 4294967295U, }, }, |
c61a2810a
|
50 |
.count = ATOMIC_INIT(3), |
783291e69
|
51 52 |
.owner = GLOBAL_ROOT_UID, .group = GLOBAL_ROOT_GID, |
98f842e67
|
53 |
.proc_inum = PROC_USER_INIT_INO, |
6bd364d82
|
54 55 56 |
#ifdef CONFIG_PERSISTENT_KEYRINGS .persistent_keyring_register_sem = __RWSEM_INITIALIZER(init_user_ns.persistent_keyring_register_sem), |
f36f8c75a
|
57 |
#endif |
aee16ce73
|
58 59 |
}; EXPORT_SYMBOL_GPL(init_user_ns); |
1da177e4c
|
60 61 62 63 |
/* * UID task count cache, to get fast user lookup in "alloc_uid" * when changing user ID's (ie setuid() and friends). */ |
7b44ab978
|
64 65 |
#define UIDHASH_BITS (CONFIG_BASE_SMALL ? 3 : 7) #define UIDHASH_SZ (1 << UIDHASH_BITS) |
1da177e4c
|
66 67 |
#define UIDHASH_MASK (UIDHASH_SZ - 1) #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK) |
7b44ab978
|
68 |
#define uidhashentry(uid) (uidhash_table + __uidhashfn((__kuid_val(uid)))) |
1da177e4c
|
69 |
|
e18b890bb
|
70 |
static struct kmem_cache *uid_cachep; |
7b44ab978
|
71 |
struct hlist_head uidhash_table[UIDHASH_SZ]; |
4021cb279
|
72 73 74 75 76 |
/* * The uidhash_lock is mostly taken from process context, but it is * occasionally also taken from softirq/tasklet context, when * task-structs get RCU-freed. Hence all locking must be softirq-safe. |
3fa97c9db
|
77 78 79 80 |
* But free_uid() is also called with local interrupts disabled, and running * local_bh_enable() with local interrupts disabled is an error - we'll run * softirq callbacks, and they can unconditionally enable interrupts, and * the caller of free_uid() didn't expect that.. |
4021cb279
|
81 |
*/ |
1da177e4c
|
82 |
static DEFINE_SPINLOCK(uidhash_lock); |
783291e69
|
83 |
/* root_user.__count is 1, for init task cred */ |
1da177e4c
|
84 |
struct user_struct root_user = { |
783291e69
|
85 |
.__count = ATOMIC_INIT(1), |
1da177e4c
|
86 |
.processes = ATOMIC_INIT(1), |
1da177e4c
|
87 |
.sigpending = ATOMIC_INIT(0), |
1da177e4c
|
88 |
.locked_shm = 0, |
7b44ab978
|
89 |
.uid = GLOBAL_ROOT_UID, |
1da177e4c
|
90 |
}; |
5cb350baf
|
91 92 93 |
/* * These routines must be called with the uidhash spinlock held! */ |
40aeb400f
|
94 |
static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent) |
5cb350baf
|
95 96 97 |
{ hlist_add_head(&up->uidhash_node, hashent); } |
40aeb400f
|
98 |
static void uid_hash_remove(struct user_struct *up) |
5cb350baf
|
99 100 101 |
{ hlist_del_init(&up->uidhash_node); } |
7b44ab978
|
102 |
static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent) |
3959214f9
|
103 104 |
{ struct user_struct *user; |
3959214f9
|
105 |
|
b67bfe0d4
|
106 |
hlist_for_each_entry(user, hashent, uidhash_node) { |
7b44ab978
|
107 |
if (uid_eq(user->uid, uid)) { |
3959214f9
|
108 109 110 111 112 113 114 |
atomic_inc(&user->__count); return user; } } return NULL; } |
5cb350baf
|
115 116 117 118 |
/* IRQs are disabled and uidhash_lock is held upon function entry. * IRQ state (as stored in flags) is restored and uidhash_lock released * upon function exit. */ |
18b6e0414
|
119 |
static void free_user(struct user_struct *up, unsigned long flags) |
571428be5
|
120 |
__releases(&uidhash_lock) |
5cb350baf
|
121 122 123 |
{ uid_hash_remove(up); spin_unlock_irqrestore(&uidhash_lock, flags); |
5cb350baf
|
124 125 126 127 |
key_put(up->uid_keyring); key_put(up->session_keyring); kmem_cache_free(uid_cachep, up); } |
1da177e4c
|
128 129 130 131 132 133 |
/* * Locate the user_struct for the passed UID. If found, take a ref on it. The * caller must undo that ref with free_uid(). * * If the user_struct could not be found, return NULL. */ |
7b44ab978
|
134 |
struct user_struct *find_user(kuid_t uid) |
1da177e4c
|
135 136 |
{ struct user_struct *ret; |
3fa97c9db
|
137 |
unsigned long flags; |
1da177e4c
|
138 |
|
3fa97c9db
|
139 |
spin_lock_irqsave(&uidhash_lock, flags); |
7b44ab978
|
140 |
ret = uid_hash_find(uid, uidhashentry(uid)); |
3fa97c9db
|
141 |
spin_unlock_irqrestore(&uidhash_lock, flags); |
1da177e4c
|
142 143 144 145 146 |
return ret; } void free_uid(struct user_struct *up) { |
3fa97c9db
|
147 |
unsigned long flags; |
36f574135
|
148 149 |
if (!up) return; |
3fa97c9db
|
150 |
local_irq_save(flags); |
5cb350baf
|
151 152 153 |
if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) free_user(up, flags); else |
36f574135
|
154 |
local_irq_restore(flags); |
1da177e4c
|
155 |
} |
7b44ab978
|
156 |
struct user_struct *alloc_uid(kuid_t uid) |
1da177e4c
|
157 |
{ |
7b44ab978
|
158 |
struct hlist_head *hashent = uidhashentry(uid); |
8eb703e4f
|
159 |
struct user_struct *up, *new; |
1da177e4c
|
160 |
|
3fa97c9db
|
161 |
spin_lock_irq(&uidhash_lock); |
1da177e4c
|
162 |
up = uid_hash_find(uid, hashent); |
3fa97c9db
|
163 |
spin_unlock_irq(&uidhash_lock); |
1da177e4c
|
164 165 |
if (!up) { |
354a1f4d9
|
166 |
new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL); |
8eb703e4f
|
167 168 |
if (!new) goto out_unlock; |
5e8869bb6
|
169 |
|
1da177e4c
|
170 171 |
new->uid = uid; atomic_set(&new->__count, 1); |
1da177e4c
|
172 173 174 175 176 |
/* * Before adding this, check whether we raced * on adding the same user already.. */ |
3fa97c9db
|
177 |
spin_lock_irq(&uidhash_lock); |
1da177e4c
|
178 179 180 181 182 183 184 185 186 |
up = uid_hash_find(uid, hashent); if (up) { key_put(new->uid_keyring); key_put(new->session_keyring); kmem_cache_free(uid_cachep, new); } else { uid_hash_insert(new, hashent); up = new; } |
3fa97c9db
|
187 |
spin_unlock_irq(&uidhash_lock); |
1da177e4c
|
188 |
} |
5cb350baf
|
189 |
|
1da177e4c
|
190 |
return up; |
8eb703e4f
|
191 |
|
8eb703e4f
|
192 |
out_unlock: |
8eb703e4f
|
193 |
return NULL; |
1da177e4c
|
194 |
} |
1da177e4c
|
195 196 197 198 199 |
static int __init uid_cache_init(void) { int n; uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct), |
20c2df83d
|
200 |
0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); |
1da177e4c
|
201 202 |
for(n = 0; n < UIDHASH_SZ; ++n) |
7b44ab978
|
203 |
INIT_HLIST_HEAD(uidhash_table + n); |
1da177e4c
|
204 205 |
/* Insert the root user immediately (init already runs as root) */ |
3fa97c9db
|
206 |
spin_lock_irq(&uidhash_lock); |
7b44ab978
|
207 |
uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID)); |
3fa97c9db
|
208 |
spin_unlock_irq(&uidhash_lock); |
1da177e4c
|
209 210 211 |
return 0; } |
c96d6660d
|
212 |
subsys_initcall(uid_cache_init); |