Commit b072e9bc2fe9aeff4e104e80e479160349f474a9
Committed by
James Morris
1 parent
8bc16deabc
Exists in
master
and in
6 other branches
KEYS: Make the key reaper non-reentrant
Make the key reaper non-reentrant by sticking it on the appropriate system work queue when we queue it. This will allow it to have global state and drop locks. It should probably be non-reentrant already as it may spend a long time holding the key serial spinlock, and so multiple entrants can spend long periods of time just sitting there spinning, waiting to get the lock. Signed-off-by: David Howells <dhowells@redhat.com> Signed-off-by: James Morris <jmorris@namei.org>
Showing 1 changed file with 1 additions and 1 deletions Inline Diff
security/keys/key.c
1 | /* Basic authentication token and access key management | 1 | /* Basic authentication token and access key management |
2 | * | 2 | * |
3 | * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved. | 3 | * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved. |
4 | * Written by David Howells (dhowells@redhat.com) | 4 | * Written by David Howells (dhowells@redhat.com) |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU General Public License | 7 | * modify it under the terms of the GNU General Public License |
8 | * as published by the Free Software Foundation; either version | 8 | * as published by the Free Software Foundation; either version |
9 | * 2 of the License, or (at your option) any later version. | 9 | * 2 of the License, or (at your option) any later version. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <linux/poison.h> | 14 | #include <linux/poison.h> |
15 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
16 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
17 | #include <linux/security.h> | 17 | #include <linux/security.h> |
18 | #include <linux/workqueue.h> | 18 | #include <linux/workqueue.h> |
19 | #include <linux/random.h> | 19 | #include <linux/random.h> |
20 | #include <linux/err.h> | 20 | #include <linux/err.h> |
21 | #include <linux/user_namespace.h> | 21 | #include <linux/user_namespace.h> |
22 | #include "internal.h" | 22 | #include "internal.h" |
23 | 23 | ||
24 | struct kmem_cache *key_jar; | 24 | struct kmem_cache *key_jar; |
25 | struct rb_root key_serial_tree; /* tree of keys indexed by serial */ | 25 | struct rb_root key_serial_tree; /* tree of keys indexed by serial */ |
26 | DEFINE_SPINLOCK(key_serial_lock); | 26 | DEFINE_SPINLOCK(key_serial_lock); |
27 | 27 | ||
28 | struct rb_root key_user_tree; /* tree of quota records indexed by UID */ | 28 | struct rb_root key_user_tree; /* tree of quota records indexed by UID */ |
29 | DEFINE_SPINLOCK(key_user_lock); | 29 | DEFINE_SPINLOCK(key_user_lock); |
30 | 30 | ||
31 | unsigned int key_quota_root_maxkeys = 200; /* root's key count quota */ | 31 | unsigned int key_quota_root_maxkeys = 200; /* root's key count quota */ |
32 | unsigned int key_quota_root_maxbytes = 20000; /* root's key space quota */ | 32 | unsigned int key_quota_root_maxbytes = 20000; /* root's key space quota */ |
33 | unsigned int key_quota_maxkeys = 200; /* general key count quota */ | 33 | unsigned int key_quota_maxkeys = 200; /* general key count quota */ |
34 | unsigned int key_quota_maxbytes = 20000; /* general key space quota */ | 34 | unsigned int key_quota_maxbytes = 20000; /* general key space quota */ |
35 | 35 | ||
36 | static LIST_HEAD(key_types_list); | 36 | static LIST_HEAD(key_types_list); |
37 | static DECLARE_RWSEM(key_types_sem); | 37 | static DECLARE_RWSEM(key_types_sem); |
38 | 38 | ||
39 | /* We serialise key instantiation and link */ | 39 | /* We serialise key instantiation and link */ |
40 | DEFINE_MUTEX(key_construction_mutex); | 40 | DEFINE_MUTEX(key_construction_mutex); |
41 | 41 | ||
42 | /* Any key who's type gets unegistered will be re-typed to this */ | 42 | /* Any key who's type gets unegistered will be re-typed to this */ |
43 | static struct key_type key_type_dead = { | 43 | static struct key_type key_type_dead = { |
44 | .name = "dead", | 44 | .name = "dead", |
45 | }; | 45 | }; |
46 | 46 | ||
47 | #ifdef KEY_DEBUGGING | 47 | #ifdef KEY_DEBUGGING |
48 | void __key_check(const struct key *key) | 48 | void __key_check(const struct key *key) |
49 | { | 49 | { |
50 | printk("__key_check: key %p {%08x} should be {%08x}\n", | 50 | printk("__key_check: key %p {%08x} should be {%08x}\n", |
51 | key, key->magic, KEY_DEBUG_MAGIC); | 51 | key, key->magic, KEY_DEBUG_MAGIC); |
52 | BUG(); | 52 | BUG(); |
53 | } | 53 | } |
54 | #endif | 54 | #endif |
55 | 55 | ||
56 | /* | 56 | /* |
57 | * Get the key quota record for a user, allocating a new record if one doesn't | 57 | * Get the key quota record for a user, allocating a new record if one doesn't |
58 | * already exist. | 58 | * already exist. |
59 | */ | 59 | */ |
60 | struct key_user *key_user_lookup(uid_t uid, struct user_namespace *user_ns) | 60 | struct key_user *key_user_lookup(uid_t uid, struct user_namespace *user_ns) |
61 | { | 61 | { |
62 | struct key_user *candidate = NULL, *user; | 62 | struct key_user *candidate = NULL, *user; |
63 | struct rb_node *parent = NULL; | 63 | struct rb_node *parent = NULL; |
64 | struct rb_node **p; | 64 | struct rb_node **p; |
65 | 65 | ||
66 | try_again: | 66 | try_again: |
67 | p = &key_user_tree.rb_node; | 67 | p = &key_user_tree.rb_node; |
68 | spin_lock(&key_user_lock); | 68 | spin_lock(&key_user_lock); |
69 | 69 | ||
70 | /* search the tree for a user record with a matching UID */ | 70 | /* search the tree for a user record with a matching UID */ |
71 | while (*p) { | 71 | while (*p) { |
72 | parent = *p; | 72 | parent = *p; |
73 | user = rb_entry(parent, struct key_user, node); | 73 | user = rb_entry(parent, struct key_user, node); |
74 | 74 | ||
75 | if (uid < user->uid) | 75 | if (uid < user->uid) |
76 | p = &(*p)->rb_left; | 76 | p = &(*p)->rb_left; |
77 | else if (uid > user->uid) | 77 | else if (uid > user->uid) |
78 | p = &(*p)->rb_right; | 78 | p = &(*p)->rb_right; |
79 | else if (user_ns < user->user_ns) | 79 | else if (user_ns < user->user_ns) |
80 | p = &(*p)->rb_left; | 80 | p = &(*p)->rb_left; |
81 | else if (user_ns > user->user_ns) | 81 | else if (user_ns > user->user_ns) |
82 | p = &(*p)->rb_right; | 82 | p = &(*p)->rb_right; |
83 | else | 83 | else |
84 | goto found; | 84 | goto found; |
85 | } | 85 | } |
86 | 86 | ||
87 | /* if we get here, we failed to find a match in the tree */ | 87 | /* if we get here, we failed to find a match in the tree */ |
88 | if (!candidate) { | 88 | if (!candidate) { |
89 | /* allocate a candidate user record if we don't already have | 89 | /* allocate a candidate user record if we don't already have |
90 | * one */ | 90 | * one */ |
91 | spin_unlock(&key_user_lock); | 91 | spin_unlock(&key_user_lock); |
92 | 92 | ||
93 | user = NULL; | 93 | user = NULL; |
94 | candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL); | 94 | candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL); |
95 | if (unlikely(!candidate)) | 95 | if (unlikely(!candidate)) |
96 | goto out; | 96 | goto out; |
97 | 97 | ||
98 | /* the allocation may have scheduled, so we need to repeat the | 98 | /* the allocation may have scheduled, so we need to repeat the |
99 | * search lest someone else added the record whilst we were | 99 | * search lest someone else added the record whilst we were |
100 | * asleep */ | 100 | * asleep */ |
101 | goto try_again; | 101 | goto try_again; |
102 | } | 102 | } |
103 | 103 | ||
104 | /* if we get here, then the user record still hadn't appeared on the | 104 | /* if we get here, then the user record still hadn't appeared on the |
105 | * second pass - so we use the candidate record */ | 105 | * second pass - so we use the candidate record */ |
106 | atomic_set(&candidate->usage, 1); | 106 | atomic_set(&candidate->usage, 1); |
107 | atomic_set(&candidate->nkeys, 0); | 107 | atomic_set(&candidate->nkeys, 0); |
108 | atomic_set(&candidate->nikeys, 0); | 108 | atomic_set(&candidate->nikeys, 0); |
109 | candidate->uid = uid; | 109 | candidate->uid = uid; |
110 | candidate->user_ns = get_user_ns(user_ns); | 110 | candidate->user_ns = get_user_ns(user_ns); |
111 | candidate->qnkeys = 0; | 111 | candidate->qnkeys = 0; |
112 | candidate->qnbytes = 0; | 112 | candidate->qnbytes = 0; |
113 | spin_lock_init(&candidate->lock); | 113 | spin_lock_init(&candidate->lock); |
114 | mutex_init(&candidate->cons_lock); | 114 | mutex_init(&candidate->cons_lock); |
115 | 115 | ||
116 | rb_link_node(&candidate->node, parent, p); | 116 | rb_link_node(&candidate->node, parent, p); |
117 | rb_insert_color(&candidate->node, &key_user_tree); | 117 | rb_insert_color(&candidate->node, &key_user_tree); |
118 | spin_unlock(&key_user_lock); | 118 | spin_unlock(&key_user_lock); |
119 | user = candidate; | 119 | user = candidate; |
120 | goto out; | 120 | goto out; |
121 | 121 | ||
122 | /* okay - we found a user record for this UID */ | 122 | /* okay - we found a user record for this UID */ |
123 | found: | 123 | found: |
124 | atomic_inc(&user->usage); | 124 | atomic_inc(&user->usage); |
125 | spin_unlock(&key_user_lock); | 125 | spin_unlock(&key_user_lock); |
126 | kfree(candidate); | 126 | kfree(candidate); |
127 | out: | 127 | out: |
128 | return user; | 128 | return user; |
129 | } | 129 | } |
130 | 130 | ||
131 | /* | 131 | /* |
132 | * Dispose of a user structure | 132 | * Dispose of a user structure |
133 | */ | 133 | */ |
134 | void key_user_put(struct key_user *user) | 134 | void key_user_put(struct key_user *user) |
135 | { | 135 | { |
136 | if (atomic_dec_and_lock(&user->usage, &key_user_lock)) { | 136 | if (atomic_dec_and_lock(&user->usage, &key_user_lock)) { |
137 | rb_erase(&user->node, &key_user_tree); | 137 | rb_erase(&user->node, &key_user_tree); |
138 | spin_unlock(&key_user_lock); | 138 | spin_unlock(&key_user_lock); |
139 | put_user_ns(user->user_ns); | 139 | put_user_ns(user->user_ns); |
140 | 140 | ||
141 | kfree(user); | 141 | kfree(user); |
142 | } | 142 | } |
143 | } | 143 | } |
144 | 144 | ||
145 | /* | 145 | /* |
146 | * Allocate a serial number for a key. These are assigned randomly to avoid | 146 | * Allocate a serial number for a key. These are assigned randomly to avoid |
147 | * security issues through covert channel problems. | 147 | * security issues through covert channel problems. |
148 | */ | 148 | */ |
149 | static inline void key_alloc_serial(struct key *key) | 149 | static inline void key_alloc_serial(struct key *key) |
150 | { | 150 | { |
151 | struct rb_node *parent, **p; | 151 | struct rb_node *parent, **p; |
152 | struct key *xkey; | 152 | struct key *xkey; |
153 | 153 | ||
154 | /* propose a random serial number and look for a hole for it in the | 154 | /* propose a random serial number and look for a hole for it in the |
155 | * serial number tree */ | 155 | * serial number tree */ |
156 | do { | 156 | do { |
157 | get_random_bytes(&key->serial, sizeof(key->serial)); | 157 | get_random_bytes(&key->serial, sizeof(key->serial)); |
158 | 158 | ||
159 | key->serial >>= 1; /* negative numbers are not permitted */ | 159 | key->serial >>= 1; /* negative numbers are not permitted */ |
160 | } while (key->serial < 3); | 160 | } while (key->serial < 3); |
161 | 161 | ||
162 | spin_lock(&key_serial_lock); | 162 | spin_lock(&key_serial_lock); |
163 | 163 | ||
164 | attempt_insertion: | 164 | attempt_insertion: |
165 | parent = NULL; | 165 | parent = NULL; |
166 | p = &key_serial_tree.rb_node; | 166 | p = &key_serial_tree.rb_node; |
167 | 167 | ||
168 | while (*p) { | 168 | while (*p) { |
169 | parent = *p; | 169 | parent = *p; |
170 | xkey = rb_entry(parent, struct key, serial_node); | 170 | xkey = rb_entry(parent, struct key, serial_node); |
171 | 171 | ||
172 | if (key->serial < xkey->serial) | 172 | if (key->serial < xkey->serial) |
173 | p = &(*p)->rb_left; | 173 | p = &(*p)->rb_left; |
174 | else if (key->serial > xkey->serial) | 174 | else if (key->serial > xkey->serial) |
175 | p = &(*p)->rb_right; | 175 | p = &(*p)->rb_right; |
176 | else | 176 | else |
177 | goto serial_exists; | 177 | goto serial_exists; |
178 | } | 178 | } |
179 | 179 | ||
180 | /* we've found a suitable hole - arrange for this key to occupy it */ | 180 | /* we've found a suitable hole - arrange for this key to occupy it */ |
181 | rb_link_node(&key->serial_node, parent, p); | 181 | rb_link_node(&key->serial_node, parent, p); |
182 | rb_insert_color(&key->serial_node, &key_serial_tree); | 182 | rb_insert_color(&key->serial_node, &key_serial_tree); |
183 | 183 | ||
184 | spin_unlock(&key_serial_lock); | 184 | spin_unlock(&key_serial_lock); |
185 | return; | 185 | return; |
186 | 186 | ||
187 | /* we found a key with the proposed serial number - walk the tree from | 187 | /* we found a key with the proposed serial number - walk the tree from |
188 | * that point looking for the next unused serial number */ | 188 | * that point looking for the next unused serial number */ |
189 | serial_exists: | 189 | serial_exists: |
190 | for (;;) { | 190 | for (;;) { |
191 | key->serial++; | 191 | key->serial++; |
192 | if (key->serial < 3) { | 192 | if (key->serial < 3) { |
193 | key->serial = 3; | 193 | key->serial = 3; |
194 | goto attempt_insertion; | 194 | goto attempt_insertion; |
195 | } | 195 | } |
196 | 196 | ||
197 | parent = rb_next(parent); | 197 | parent = rb_next(parent); |
198 | if (!parent) | 198 | if (!parent) |
199 | goto attempt_insertion; | 199 | goto attempt_insertion; |
200 | 200 | ||
201 | xkey = rb_entry(parent, struct key, serial_node); | 201 | xkey = rb_entry(parent, struct key, serial_node); |
202 | if (key->serial < xkey->serial) | 202 | if (key->serial < xkey->serial) |
203 | goto attempt_insertion; | 203 | goto attempt_insertion; |
204 | } | 204 | } |
205 | } | 205 | } |
206 | 206 | ||
207 | /** | 207 | /** |
208 | * key_alloc - Allocate a key of the specified type. | 208 | * key_alloc - Allocate a key of the specified type. |
209 | * @type: The type of key to allocate. | 209 | * @type: The type of key to allocate. |
210 | * @desc: The key description to allow the key to be searched out. | 210 | * @desc: The key description to allow the key to be searched out. |
211 | * @uid: The owner of the new key. | 211 | * @uid: The owner of the new key. |
212 | * @gid: The group ID for the new key's group permissions. | 212 | * @gid: The group ID for the new key's group permissions. |
213 | * @cred: The credentials specifying UID namespace. | 213 | * @cred: The credentials specifying UID namespace. |
214 | * @perm: The permissions mask of the new key. | 214 | * @perm: The permissions mask of the new key. |
215 | * @flags: Flags specifying quota properties. | 215 | * @flags: Flags specifying quota properties. |
216 | * | 216 | * |
217 | * Allocate a key of the specified type with the attributes given. The key is | 217 | * Allocate a key of the specified type with the attributes given. The key is |
218 | * returned in an uninstantiated state and the caller needs to instantiate the | 218 | * returned in an uninstantiated state and the caller needs to instantiate the |
219 | * key before returning. | 219 | * key before returning. |
220 | * | 220 | * |
221 | * The user's key count quota is updated to reflect the creation of the key and | 221 | * The user's key count quota is updated to reflect the creation of the key and |
222 | * the user's key data quota has the default for the key type reserved. The | 222 | * the user's key data quota has the default for the key type reserved. The |
223 | * instantiation function should amend this as necessary. If insufficient | 223 | * instantiation function should amend this as necessary. If insufficient |
224 | * quota is available, -EDQUOT will be returned. | 224 | * quota is available, -EDQUOT will be returned. |
225 | * | 225 | * |
226 | * The LSM security modules can prevent a key being created, in which case | 226 | * The LSM security modules can prevent a key being created, in which case |
227 | * -EACCES will be returned. | 227 | * -EACCES will be returned. |
228 | * | 228 | * |
229 | * Returns a pointer to the new key if successful and an error code otherwise. | 229 | * Returns a pointer to the new key if successful and an error code otherwise. |
230 | * | 230 | * |
231 | * Note that the caller needs to ensure the key type isn't uninstantiated. | 231 | * Note that the caller needs to ensure the key type isn't uninstantiated. |
232 | * Internally this can be done by locking key_types_sem. Externally, this can | 232 | * Internally this can be done by locking key_types_sem. Externally, this can |
233 | * be done by either never unregistering the key type, or making sure | 233 | * be done by either never unregistering the key type, or making sure |
234 | * key_alloc() calls don't race with module unloading. | 234 | * key_alloc() calls don't race with module unloading. |
235 | */ | 235 | */ |
236 | struct key *key_alloc(struct key_type *type, const char *desc, | 236 | struct key *key_alloc(struct key_type *type, const char *desc, |
237 | uid_t uid, gid_t gid, const struct cred *cred, | 237 | uid_t uid, gid_t gid, const struct cred *cred, |
238 | key_perm_t perm, unsigned long flags) | 238 | key_perm_t perm, unsigned long flags) |
239 | { | 239 | { |
240 | struct key_user *user = NULL; | 240 | struct key_user *user = NULL; |
241 | struct key *key; | 241 | struct key *key; |
242 | size_t desclen, quotalen; | 242 | size_t desclen, quotalen; |
243 | int ret; | 243 | int ret; |
244 | 244 | ||
245 | key = ERR_PTR(-EINVAL); | 245 | key = ERR_PTR(-EINVAL); |
246 | if (!desc || !*desc) | 246 | if (!desc || !*desc) |
247 | goto error; | 247 | goto error; |
248 | 248 | ||
249 | if (type->vet_description) { | 249 | if (type->vet_description) { |
250 | ret = type->vet_description(desc); | 250 | ret = type->vet_description(desc); |
251 | if (ret < 0) { | 251 | if (ret < 0) { |
252 | key = ERR_PTR(ret); | 252 | key = ERR_PTR(ret); |
253 | goto error; | 253 | goto error; |
254 | } | 254 | } |
255 | } | 255 | } |
256 | 256 | ||
257 | desclen = strlen(desc) + 1; | 257 | desclen = strlen(desc) + 1; |
258 | quotalen = desclen + type->def_datalen; | 258 | quotalen = desclen + type->def_datalen; |
259 | 259 | ||
260 | /* get hold of the key tracking for this user */ | 260 | /* get hold of the key tracking for this user */ |
261 | user = key_user_lookup(uid, cred->user->user_ns); | 261 | user = key_user_lookup(uid, cred->user->user_ns); |
262 | if (!user) | 262 | if (!user) |
263 | goto no_memory_1; | 263 | goto no_memory_1; |
264 | 264 | ||
265 | /* check that the user's quota permits allocation of another key and | 265 | /* check that the user's quota permits allocation of another key and |
266 | * its description */ | 266 | * its description */ |
267 | if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { | 267 | if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { |
268 | unsigned maxkeys = (uid == 0) ? | 268 | unsigned maxkeys = (uid == 0) ? |
269 | key_quota_root_maxkeys : key_quota_maxkeys; | 269 | key_quota_root_maxkeys : key_quota_maxkeys; |
270 | unsigned maxbytes = (uid == 0) ? | 270 | unsigned maxbytes = (uid == 0) ? |
271 | key_quota_root_maxbytes : key_quota_maxbytes; | 271 | key_quota_root_maxbytes : key_quota_maxbytes; |
272 | 272 | ||
273 | spin_lock(&user->lock); | 273 | spin_lock(&user->lock); |
274 | if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) { | 274 | if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) { |
275 | if (user->qnkeys + 1 >= maxkeys || | 275 | if (user->qnkeys + 1 >= maxkeys || |
276 | user->qnbytes + quotalen >= maxbytes || | 276 | user->qnbytes + quotalen >= maxbytes || |
277 | user->qnbytes + quotalen < user->qnbytes) | 277 | user->qnbytes + quotalen < user->qnbytes) |
278 | goto no_quota; | 278 | goto no_quota; |
279 | } | 279 | } |
280 | 280 | ||
281 | user->qnkeys++; | 281 | user->qnkeys++; |
282 | user->qnbytes += quotalen; | 282 | user->qnbytes += quotalen; |
283 | spin_unlock(&user->lock); | 283 | spin_unlock(&user->lock); |
284 | } | 284 | } |
285 | 285 | ||
286 | /* allocate and initialise the key and its description */ | 286 | /* allocate and initialise the key and its description */ |
287 | key = kmem_cache_alloc(key_jar, GFP_KERNEL); | 287 | key = kmem_cache_alloc(key_jar, GFP_KERNEL); |
288 | if (!key) | 288 | if (!key) |
289 | goto no_memory_2; | 289 | goto no_memory_2; |
290 | 290 | ||
291 | if (desc) { | 291 | if (desc) { |
292 | key->description = kmemdup(desc, desclen, GFP_KERNEL); | 292 | key->description = kmemdup(desc, desclen, GFP_KERNEL); |
293 | if (!key->description) | 293 | if (!key->description) |
294 | goto no_memory_3; | 294 | goto no_memory_3; |
295 | } | 295 | } |
296 | 296 | ||
297 | atomic_set(&key->usage, 1); | 297 | atomic_set(&key->usage, 1); |
298 | init_rwsem(&key->sem); | 298 | init_rwsem(&key->sem); |
299 | key->type = type; | 299 | key->type = type; |
300 | key->user = user; | 300 | key->user = user; |
301 | key->quotalen = quotalen; | 301 | key->quotalen = quotalen; |
302 | key->datalen = type->def_datalen; | 302 | key->datalen = type->def_datalen; |
303 | key->uid = uid; | 303 | key->uid = uid; |
304 | key->gid = gid; | 304 | key->gid = gid; |
305 | key->perm = perm; | 305 | key->perm = perm; |
306 | key->flags = 0; | 306 | key->flags = 0; |
307 | key->expiry = 0; | 307 | key->expiry = 0; |
308 | key->payload.data = NULL; | 308 | key->payload.data = NULL; |
309 | key->security = NULL; | 309 | key->security = NULL; |
310 | 310 | ||
311 | if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) | 311 | if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) |
312 | key->flags |= 1 << KEY_FLAG_IN_QUOTA; | 312 | key->flags |= 1 << KEY_FLAG_IN_QUOTA; |
313 | 313 | ||
314 | memset(&key->type_data, 0, sizeof(key->type_data)); | 314 | memset(&key->type_data, 0, sizeof(key->type_data)); |
315 | 315 | ||
316 | #ifdef KEY_DEBUGGING | 316 | #ifdef KEY_DEBUGGING |
317 | key->magic = KEY_DEBUG_MAGIC; | 317 | key->magic = KEY_DEBUG_MAGIC; |
318 | #endif | 318 | #endif |
319 | 319 | ||
320 | /* let the security module know about the key */ | 320 | /* let the security module know about the key */ |
321 | ret = security_key_alloc(key, cred, flags); | 321 | ret = security_key_alloc(key, cred, flags); |
322 | if (ret < 0) | 322 | if (ret < 0) |
323 | goto security_error; | 323 | goto security_error; |
324 | 324 | ||
325 | /* publish the key by giving it a serial number */ | 325 | /* publish the key by giving it a serial number */ |
326 | atomic_inc(&user->nkeys); | 326 | atomic_inc(&user->nkeys); |
327 | key_alloc_serial(key); | 327 | key_alloc_serial(key); |
328 | 328 | ||
329 | error: | 329 | error: |
330 | return key; | 330 | return key; |
331 | 331 | ||
332 | security_error: | 332 | security_error: |
333 | kfree(key->description); | 333 | kfree(key->description); |
334 | kmem_cache_free(key_jar, key); | 334 | kmem_cache_free(key_jar, key); |
335 | if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { | 335 | if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { |
336 | spin_lock(&user->lock); | 336 | spin_lock(&user->lock); |
337 | user->qnkeys--; | 337 | user->qnkeys--; |
338 | user->qnbytes -= quotalen; | 338 | user->qnbytes -= quotalen; |
339 | spin_unlock(&user->lock); | 339 | spin_unlock(&user->lock); |
340 | } | 340 | } |
341 | key_user_put(user); | 341 | key_user_put(user); |
342 | key = ERR_PTR(ret); | 342 | key = ERR_PTR(ret); |
343 | goto error; | 343 | goto error; |
344 | 344 | ||
345 | no_memory_3: | 345 | no_memory_3: |
346 | kmem_cache_free(key_jar, key); | 346 | kmem_cache_free(key_jar, key); |
347 | no_memory_2: | 347 | no_memory_2: |
348 | if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { | 348 | if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { |
349 | spin_lock(&user->lock); | 349 | spin_lock(&user->lock); |
350 | user->qnkeys--; | 350 | user->qnkeys--; |
351 | user->qnbytes -= quotalen; | 351 | user->qnbytes -= quotalen; |
352 | spin_unlock(&user->lock); | 352 | spin_unlock(&user->lock); |
353 | } | 353 | } |
354 | key_user_put(user); | 354 | key_user_put(user); |
355 | no_memory_1: | 355 | no_memory_1: |
356 | key = ERR_PTR(-ENOMEM); | 356 | key = ERR_PTR(-ENOMEM); |
357 | goto error; | 357 | goto error; |
358 | 358 | ||
359 | no_quota: | 359 | no_quota: |
360 | spin_unlock(&user->lock); | 360 | spin_unlock(&user->lock); |
361 | key_user_put(user); | 361 | key_user_put(user); |
362 | key = ERR_PTR(-EDQUOT); | 362 | key = ERR_PTR(-EDQUOT); |
363 | goto error; | 363 | goto error; |
364 | } | 364 | } |
365 | EXPORT_SYMBOL(key_alloc); | 365 | EXPORT_SYMBOL(key_alloc); |
366 | 366 | ||
367 | /** | 367 | /** |
368 | * key_payload_reserve - Adjust data quota reservation for the key's payload | 368 | * key_payload_reserve - Adjust data quota reservation for the key's payload |
369 | * @key: The key to make the reservation for. | 369 | * @key: The key to make the reservation for. |
370 | * @datalen: The amount of data payload the caller now wants. | 370 | * @datalen: The amount of data payload the caller now wants. |
371 | * | 371 | * |
372 | * Adjust the amount of the owning user's key data quota that a key reserves. | 372 | * Adjust the amount of the owning user's key data quota that a key reserves. |
373 | * If the amount is increased, then -EDQUOT may be returned if there isn't | 373 | * If the amount is increased, then -EDQUOT may be returned if there isn't |
374 | * enough free quota available. | 374 | * enough free quota available. |
375 | * | 375 | * |
376 | * If successful, 0 is returned. | 376 | * If successful, 0 is returned. |
377 | */ | 377 | */ |
378 | int key_payload_reserve(struct key *key, size_t datalen) | 378 | int key_payload_reserve(struct key *key, size_t datalen) |
379 | { | 379 | { |
380 | int delta = (int)datalen - key->datalen; | 380 | int delta = (int)datalen - key->datalen; |
381 | int ret = 0; | 381 | int ret = 0; |
382 | 382 | ||
383 | key_check(key); | 383 | key_check(key); |
384 | 384 | ||
385 | /* contemplate the quota adjustment */ | 385 | /* contemplate the quota adjustment */ |
386 | if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { | 386 | if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { |
387 | unsigned maxbytes = (key->user->uid == 0) ? | 387 | unsigned maxbytes = (key->user->uid == 0) ? |
388 | key_quota_root_maxbytes : key_quota_maxbytes; | 388 | key_quota_root_maxbytes : key_quota_maxbytes; |
389 | 389 | ||
390 | spin_lock(&key->user->lock); | 390 | spin_lock(&key->user->lock); |
391 | 391 | ||
392 | if (delta > 0 && | 392 | if (delta > 0 && |
393 | (key->user->qnbytes + delta >= maxbytes || | 393 | (key->user->qnbytes + delta >= maxbytes || |
394 | key->user->qnbytes + delta < key->user->qnbytes)) { | 394 | key->user->qnbytes + delta < key->user->qnbytes)) { |
395 | ret = -EDQUOT; | 395 | ret = -EDQUOT; |
396 | } | 396 | } |
397 | else { | 397 | else { |
398 | key->user->qnbytes += delta; | 398 | key->user->qnbytes += delta; |
399 | key->quotalen += delta; | 399 | key->quotalen += delta; |
400 | } | 400 | } |
401 | spin_unlock(&key->user->lock); | 401 | spin_unlock(&key->user->lock); |
402 | } | 402 | } |
403 | 403 | ||
404 | /* change the recorded data length if that didn't generate an error */ | 404 | /* change the recorded data length if that didn't generate an error */ |
405 | if (ret == 0) | 405 | if (ret == 0) |
406 | key->datalen = datalen; | 406 | key->datalen = datalen; |
407 | 407 | ||
408 | return ret; | 408 | return ret; |
409 | } | 409 | } |
410 | EXPORT_SYMBOL(key_payload_reserve); | 410 | EXPORT_SYMBOL(key_payload_reserve); |
411 | 411 | ||
412 | /* | 412 | /* |
413 | * Instantiate a key and link it into the target keyring atomically. Must be | 413 | * Instantiate a key and link it into the target keyring atomically. Must be |
414 | * called with the target keyring's semaphore writelocked. The target key's | 414 | * called with the target keyring's semaphore writelocked. The target key's |
415 | * semaphore need not be locked as instantiation is serialised by | 415 | * semaphore need not be locked as instantiation is serialised by |
416 | * key_construction_mutex. | 416 | * key_construction_mutex. |
417 | */ | 417 | */ |
418 | static int __key_instantiate_and_link(struct key *key, | 418 | static int __key_instantiate_and_link(struct key *key, |
419 | const void *data, | 419 | const void *data, |
420 | size_t datalen, | 420 | size_t datalen, |
421 | struct key *keyring, | 421 | struct key *keyring, |
422 | struct key *authkey, | 422 | struct key *authkey, |
423 | unsigned long *_prealloc) | 423 | unsigned long *_prealloc) |
424 | { | 424 | { |
425 | int ret, awaken; | 425 | int ret, awaken; |
426 | 426 | ||
427 | key_check(key); | 427 | key_check(key); |
428 | key_check(keyring); | 428 | key_check(keyring); |
429 | 429 | ||
430 | awaken = 0; | 430 | awaken = 0; |
431 | ret = -EBUSY; | 431 | ret = -EBUSY; |
432 | 432 | ||
433 | mutex_lock(&key_construction_mutex); | 433 | mutex_lock(&key_construction_mutex); |
434 | 434 | ||
435 | /* can't instantiate twice */ | 435 | /* can't instantiate twice */ |
436 | if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { | 436 | if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { |
437 | /* instantiate the key */ | 437 | /* instantiate the key */ |
438 | ret = key->type->instantiate(key, data, datalen); | 438 | ret = key->type->instantiate(key, data, datalen); |
439 | 439 | ||
440 | if (ret == 0) { | 440 | if (ret == 0) { |
441 | /* mark the key as being instantiated */ | 441 | /* mark the key as being instantiated */ |
442 | atomic_inc(&key->user->nikeys); | 442 | atomic_inc(&key->user->nikeys); |
443 | set_bit(KEY_FLAG_INSTANTIATED, &key->flags); | 443 | set_bit(KEY_FLAG_INSTANTIATED, &key->flags); |
444 | 444 | ||
445 | if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) | 445 | if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) |
446 | awaken = 1; | 446 | awaken = 1; |
447 | 447 | ||
448 | /* and link it into the destination keyring */ | 448 | /* and link it into the destination keyring */ |
449 | if (keyring) | 449 | if (keyring) |
450 | __key_link(keyring, key, _prealloc); | 450 | __key_link(keyring, key, _prealloc); |
451 | 451 | ||
452 | /* disable the authorisation key */ | 452 | /* disable the authorisation key */ |
453 | if (authkey) | 453 | if (authkey) |
454 | key_revoke(authkey); | 454 | key_revoke(authkey); |
455 | } | 455 | } |
456 | } | 456 | } |
457 | 457 | ||
458 | mutex_unlock(&key_construction_mutex); | 458 | mutex_unlock(&key_construction_mutex); |
459 | 459 | ||
460 | /* wake up anyone waiting for a key to be constructed */ | 460 | /* wake up anyone waiting for a key to be constructed */ |
461 | if (awaken) | 461 | if (awaken) |
462 | wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT); | 462 | wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT); |
463 | 463 | ||
464 | return ret; | 464 | return ret; |
465 | } | 465 | } |
466 | 466 | ||
467 | /** | 467 | /** |
468 | * key_instantiate_and_link - Instantiate a key and link it into the keyring. | 468 | * key_instantiate_and_link - Instantiate a key and link it into the keyring. |
469 | * @key: The key to instantiate. | 469 | * @key: The key to instantiate. |
470 | * @data: The data to use to instantiate the keyring. | 470 | * @data: The data to use to instantiate the keyring. |
471 | * @datalen: The length of @data. | 471 | * @datalen: The length of @data. |
472 | * @keyring: Keyring to create a link in on success (or NULL). | 472 | * @keyring: Keyring to create a link in on success (or NULL). |
473 | * @authkey: The authorisation token permitting instantiation. | 473 | * @authkey: The authorisation token permitting instantiation. |
474 | * | 474 | * |
475 | * Instantiate a key that's in the uninstantiated state using the provided data | 475 | * Instantiate a key that's in the uninstantiated state using the provided data |
476 | * and, if successful, link it in to the destination keyring if one is | 476 | * and, if successful, link it in to the destination keyring if one is |
477 | * supplied. | 477 | * supplied. |
478 | * | 478 | * |
479 | * If successful, 0 is returned, the authorisation token is revoked and anyone | 479 | * If successful, 0 is returned, the authorisation token is revoked and anyone |
480 | * waiting for the key is woken up. If the key was already instantiated, | 480 | * waiting for the key is woken up. If the key was already instantiated, |
481 | * -EBUSY will be returned. | 481 | * -EBUSY will be returned. |
482 | */ | 482 | */ |
483 | int key_instantiate_and_link(struct key *key, | 483 | int key_instantiate_and_link(struct key *key, |
484 | const void *data, | 484 | const void *data, |
485 | size_t datalen, | 485 | size_t datalen, |
486 | struct key *keyring, | 486 | struct key *keyring, |
487 | struct key *authkey) | 487 | struct key *authkey) |
488 | { | 488 | { |
489 | unsigned long prealloc; | 489 | unsigned long prealloc; |
490 | int ret; | 490 | int ret; |
491 | 491 | ||
492 | if (keyring) { | 492 | if (keyring) { |
493 | ret = __key_link_begin(keyring, key->type, key->description, | 493 | ret = __key_link_begin(keyring, key->type, key->description, |
494 | &prealloc); | 494 | &prealloc); |
495 | if (ret < 0) | 495 | if (ret < 0) |
496 | return ret; | 496 | return ret; |
497 | } | 497 | } |
498 | 498 | ||
499 | ret = __key_instantiate_and_link(key, data, datalen, keyring, authkey, | 499 | ret = __key_instantiate_and_link(key, data, datalen, keyring, authkey, |
500 | &prealloc); | 500 | &prealloc); |
501 | 501 | ||
502 | if (keyring) | 502 | if (keyring) |
503 | __key_link_end(keyring, key->type, prealloc); | 503 | __key_link_end(keyring, key->type, prealloc); |
504 | 504 | ||
505 | return ret; | 505 | return ret; |
506 | } | 506 | } |
507 | 507 | ||
508 | EXPORT_SYMBOL(key_instantiate_and_link); | 508 | EXPORT_SYMBOL(key_instantiate_and_link); |
509 | 509 | ||
510 | /** | 510 | /** |
511 | * key_reject_and_link - Negatively instantiate a key and link it into the keyring. | 511 | * key_reject_and_link - Negatively instantiate a key and link it into the keyring. |
512 | * @key: The key to instantiate. | 512 | * @key: The key to instantiate. |
513 | * @timeout: The timeout on the negative key. | 513 | * @timeout: The timeout on the negative key. |
514 | * @error: The error to return when the key is hit. | 514 | * @error: The error to return when the key is hit. |
515 | * @keyring: Keyring to create a link in on success (or NULL). | 515 | * @keyring: Keyring to create a link in on success (or NULL). |
516 | * @authkey: The authorisation token permitting instantiation. | 516 | * @authkey: The authorisation token permitting instantiation. |
517 | * | 517 | * |
518 | * Negatively instantiate a key that's in the uninstantiated state and, if | 518 | * Negatively instantiate a key that's in the uninstantiated state and, if |
519 | * successful, set its timeout and stored error and link it in to the | 519 | * successful, set its timeout and stored error and link it in to the |
520 | * destination keyring if one is supplied. The key and any links to the key | 520 | * destination keyring if one is supplied. The key and any links to the key |
521 | * will be automatically garbage collected after the timeout expires. | 521 | * will be automatically garbage collected after the timeout expires. |
522 | * | 522 | * |
523 | * Negative keys are used to rate limit repeated request_key() calls by causing | 523 | * Negative keys are used to rate limit repeated request_key() calls by causing |
524 | * them to return the stored error code (typically ENOKEY) until the negative | 524 | * them to return the stored error code (typically ENOKEY) until the negative |
525 | * key expires. | 525 | * key expires. |
526 | * | 526 | * |
527 | * If successful, 0 is returned, the authorisation token is revoked and anyone | 527 | * If successful, 0 is returned, the authorisation token is revoked and anyone |
528 | * waiting for the key is woken up. If the key was already instantiated, | 528 | * waiting for the key is woken up. If the key was already instantiated, |
529 | * -EBUSY will be returned. | 529 | * -EBUSY will be returned. |
530 | */ | 530 | */ |
531 | int key_reject_and_link(struct key *key, | 531 | int key_reject_and_link(struct key *key, |
532 | unsigned timeout, | 532 | unsigned timeout, |
533 | unsigned error, | 533 | unsigned error, |
534 | struct key *keyring, | 534 | struct key *keyring, |
535 | struct key *authkey) | 535 | struct key *authkey) |
536 | { | 536 | { |
537 | unsigned long prealloc; | 537 | unsigned long prealloc; |
538 | struct timespec now; | 538 | struct timespec now; |
539 | int ret, awaken, link_ret = 0; | 539 | int ret, awaken, link_ret = 0; |
540 | 540 | ||
541 | key_check(key); | 541 | key_check(key); |
542 | key_check(keyring); | 542 | key_check(keyring); |
543 | 543 | ||
544 | awaken = 0; | 544 | awaken = 0; |
545 | ret = -EBUSY; | 545 | ret = -EBUSY; |
546 | 546 | ||
547 | if (keyring) | 547 | if (keyring) |
548 | link_ret = __key_link_begin(keyring, key->type, | 548 | link_ret = __key_link_begin(keyring, key->type, |
549 | key->description, &prealloc); | 549 | key->description, &prealloc); |
550 | 550 | ||
551 | mutex_lock(&key_construction_mutex); | 551 | mutex_lock(&key_construction_mutex); |
552 | 552 | ||
553 | /* can't instantiate twice */ | 553 | /* can't instantiate twice */ |
554 | if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { | 554 | if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { |
555 | /* mark the key as being negatively instantiated */ | 555 | /* mark the key as being negatively instantiated */ |
556 | atomic_inc(&key->user->nikeys); | 556 | atomic_inc(&key->user->nikeys); |
557 | set_bit(KEY_FLAG_NEGATIVE, &key->flags); | 557 | set_bit(KEY_FLAG_NEGATIVE, &key->flags); |
558 | set_bit(KEY_FLAG_INSTANTIATED, &key->flags); | 558 | set_bit(KEY_FLAG_INSTANTIATED, &key->flags); |
559 | key->type_data.reject_error = -error; | 559 | key->type_data.reject_error = -error; |
560 | now = current_kernel_time(); | 560 | now = current_kernel_time(); |
561 | key->expiry = now.tv_sec + timeout; | 561 | key->expiry = now.tv_sec + timeout; |
562 | key_schedule_gc(key->expiry + key_gc_delay); | 562 | key_schedule_gc(key->expiry + key_gc_delay); |
563 | 563 | ||
564 | if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) | 564 | if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) |
565 | awaken = 1; | 565 | awaken = 1; |
566 | 566 | ||
567 | ret = 0; | 567 | ret = 0; |
568 | 568 | ||
569 | /* and link it into the destination keyring */ | 569 | /* and link it into the destination keyring */ |
570 | if (keyring && link_ret == 0) | 570 | if (keyring && link_ret == 0) |
571 | __key_link(keyring, key, &prealloc); | 571 | __key_link(keyring, key, &prealloc); |
572 | 572 | ||
573 | /* disable the authorisation key */ | 573 | /* disable the authorisation key */ |
574 | if (authkey) | 574 | if (authkey) |
575 | key_revoke(authkey); | 575 | key_revoke(authkey); |
576 | } | 576 | } |
577 | 577 | ||
578 | mutex_unlock(&key_construction_mutex); | 578 | mutex_unlock(&key_construction_mutex); |
579 | 579 | ||
580 | if (keyring) | 580 | if (keyring) |
581 | __key_link_end(keyring, key->type, prealloc); | 581 | __key_link_end(keyring, key->type, prealloc); |
582 | 582 | ||
583 | /* wake up anyone waiting for a key to be constructed */ | 583 | /* wake up anyone waiting for a key to be constructed */ |
584 | if (awaken) | 584 | if (awaken) |
585 | wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT); | 585 | wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT); |
586 | 586 | ||
587 | return ret == 0 ? link_ret : ret; | 587 | return ret == 0 ? link_ret : ret; |
588 | } | 588 | } |
589 | EXPORT_SYMBOL(key_reject_and_link); | 589 | EXPORT_SYMBOL(key_reject_and_link); |
590 | 590 | ||
591 | /** | 591 | /** |
592 | * key_put - Discard a reference to a key. | 592 | * key_put - Discard a reference to a key. |
593 | * @key: The key to discard a reference from. | 593 | * @key: The key to discard a reference from. |
594 | * | 594 | * |
595 | * Discard a reference to a key, and when all the references are gone, we | 595 | * Discard a reference to a key, and when all the references are gone, we |
596 | * schedule the cleanup task to come and pull it out of the tree in process | 596 | * schedule the cleanup task to come and pull it out of the tree in process |
597 | * context at some later time. | 597 | * context at some later time. |
598 | */ | 598 | */ |
599 | void key_put(struct key *key) | 599 | void key_put(struct key *key) |
600 | { | 600 | { |
601 | if (key) { | 601 | if (key) { |
602 | key_check(key); | 602 | key_check(key); |
603 | 603 | ||
604 | if (atomic_dec_and_test(&key->usage)) | 604 | if (atomic_dec_and_test(&key->usage)) |
605 | schedule_work(&key_gc_unused_work); | 605 | queue_work(system_nrt_wq, &key_gc_unused_work); |
606 | } | 606 | } |
607 | } | 607 | } |
608 | EXPORT_SYMBOL(key_put); | 608 | EXPORT_SYMBOL(key_put); |
609 | 609 | ||
610 | /* | 610 | /* |
611 | * Find a key by its serial number. | 611 | * Find a key by its serial number. |
612 | */ | 612 | */ |
613 | struct key *key_lookup(key_serial_t id) | 613 | struct key *key_lookup(key_serial_t id) |
614 | { | 614 | { |
615 | struct rb_node *n; | 615 | struct rb_node *n; |
616 | struct key *key; | 616 | struct key *key; |
617 | 617 | ||
618 | spin_lock(&key_serial_lock); | 618 | spin_lock(&key_serial_lock); |
619 | 619 | ||
620 | /* search the tree for the specified key */ | 620 | /* search the tree for the specified key */ |
621 | n = key_serial_tree.rb_node; | 621 | n = key_serial_tree.rb_node; |
622 | while (n) { | 622 | while (n) { |
623 | key = rb_entry(n, struct key, serial_node); | 623 | key = rb_entry(n, struct key, serial_node); |
624 | 624 | ||
625 | if (id < key->serial) | 625 | if (id < key->serial) |
626 | n = n->rb_left; | 626 | n = n->rb_left; |
627 | else if (id > key->serial) | 627 | else if (id > key->serial) |
628 | n = n->rb_right; | 628 | n = n->rb_right; |
629 | else | 629 | else |
630 | goto found; | 630 | goto found; |
631 | } | 631 | } |
632 | 632 | ||
633 | not_found: | 633 | not_found: |
634 | key = ERR_PTR(-ENOKEY); | 634 | key = ERR_PTR(-ENOKEY); |
635 | goto error; | 635 | goto error; |
636 | 636 | ||
637 | found: | 637 | found: |
638 | /* pretend it doesn't exist if it is awaiting deletion */ | 638 | /* pretend it doesn't exist if it is awaiting deletion */ |
639 | if (atomic_read(&key->usage) == 0) | 639 | if (atomic_read(&key->usage) == 0) |
640 | goto not_found; | 640 | goto not_found; |
641 | 641 | ||
642 | /* this races with key_put(), but that doesn't matter since key_put() | 642 | /* this races with key_put(), but that doesn't matter since key_put() |
643 | * doesn't actually change the key | 643 | * doesn't actually change the key |
644 | */ | 644 | */ |
645 | atomic_inc(&key->usage); | 645 | atomic_inc(&key->usage); |
646 | 646 | ||
647 | error: | 647 | error: |
648 | spin_unlock(&key_serial_lock); | 648 | spin_unlock(&key_serial_lock); |
649 | return key; | 649 | return key; |
650 | } | 650 | } |
651 | 651 | ||
652 | /* | 652 | /* |
653 | * Find and lock the specified key type against removal. | 653 | * Find and lock the specified key type against removal. |
654 | * | 654 | * |
655 | * We return with the sem read-locked if successful. If the type wasn't | 655 | * We return with the sem read-locked if successful. If the type wasn't |
656 | * available -ENOKEY is returned instead. | 656 | * available -ENOKEY is returned instead. |
657 | */ | 657 | */ |
658 | struct key_type *key_type_lookup(const char *type) | 658 | struct key_type *key_type_lookup(const char *type) |
659 | { | 659 | { |
660 | struct key_type *ktype; | 660 | struct key_type *ktype; |
661 | 661 | ||
662 | down_read(&key_types_sem); | 662 | down_read(&key_types_sem); |
663 | 663 | ||
664 | /* look up the key type to see if it's one of the registered kernel | 664 | /* look up the key type to see if it's one of the registered kernel |
665 | * types */ | 665 | * types */ |
666 | list_for_each_entry(ktype, &key_types_list, link) { | 666 | list_for_each_entry(ktype, &key_types_list, link) { |
667 | if (strcmp(ktype->name, type) == 0) | 667 | if (strcmp(ktype->name, type) == 0) |
668 | goto found_kernel_type; | 668 | goto found_kernel_type; |
669 | } | 669 | } |
670 | 670 | ||
671 | up_read(&key_types_sem); | 671 | up_read(&key_types_sem); |
672 | ktype = ERR_PTR(-ENOKEY); | 672 | ktype = ERR_PTR(-ENOKEY); |
673 | 673 | ||
674 | found_kernel_type: | 674 | found_kernel_type: |
675 | return ktype; | 675 | return ktype; |
676 | } | 676 | } |
677 | 677 | ||
678 | /* | 678 | /* |
679 | * Unlock a key type locked by key_type_lookup(). | 679 | * Unlock a key type locked by key_type_lookup(). |
680 | */ | 680 | */ |
681 | void key_type_put(struct key_type *ktype) | 681 | void key_type_put(struct key_type *ktype) |
682 | { | 682 | { |
683 | up_read(&key_types_sem); | 683 | up_read(&key_types_sem); |
684 | } | 684 | } |
685 | 685 | ||
686 | /* | 686 | /* |
687 | * Attempt to update an existing key. | 687 | * Attempt to update an existing key. |
688 | * | 688 | * |
689 | * The key is given to us with an incremented refcount that we need to discard | 689 | * The key is given to us with an incremented refcount that we need to discard |
690 | * if we get an error. | 690 | * if we get an error. |
691 | */ | 691 | */ |
692 | static inline key_ref_t __key_update(key_ref_t key_ref, | 692 | static inline key_ref_t __key_update(key_ref_t key_ref, |
693 | const void *payload, size_t plen) | 693 | const void *payload, size_t plen) |
694 | { | 694 | { |
695 | struct key *key = key_ref_to_ptr(key_ref); | 695 | struct key *key = key_ref_to_ptr(key_ref); |
696 | int ret; | 696 | int ret; |
697 | 697 | ||
698 | /* need write permission on the key to update it */ | 698 | /* need write permission on the key to update it */ |
699 | ret = key_permission(key_ref, KEY_WRITE); | 699 | ret = key_permission(key_ref, KEY_WRITE); |
700 | if (ret < 0) | 700 | if (ret < 0) |
701 | goto error; | 701 | goto error; |
702 | 702 | ||
703 | ret = -EEXIST; | 703 | ret = -EEXIST; |
704 | if (!key->type->update) | 704 | if (!key->type->update) |
705 | goto error; | 705 | goto error; |
706 | 706 | ||
707 | down_write(&key->sem); | 707 | down_write(&key->sem); |
708 | 708 | ||
709 | ret = key->type->update(key, payload, plen); | 709 | ret = key->type->update(key, payload, plen); |
710 | if (ret == 0) | 710 | if (ret == 0) |
711 | /* updating a negative key instantiates it */ | 711 | /* updating a negative key instantiates it */ |
712 | clear_bit(KEY_FLAG_NEGATIVE, &key->flags); | 712 | clear_bit(KEY_FLAG_NEGATIVE, &key->flags); |
713 | 713 | ||
714 | up_write(&key->sem); | 714 | up_write(&key->sem); |
715 | 715 | ||
716 | if (ret < 0) | 716 | if (ret < 0) |
717 | goto error; | 717 | goto error; |
718 | out: | 718 | out: |
719 | return key_ref; | 719 | return key_ref; |
720 | 720 | ||
721 | error: | 721 | error: |
722 | key_put(key); | 722 | key_put(key); |
723 | key_ref = ERR_PTR(ret); | 723 | key_ref = ERR_PTR(ret); |
724 | goto out; | 724 | goto out; |
725 | } | 725 | } |
726 | 726 | ||
727 | /** | 727 | /** |
728 | * key_create_or_update - Update or create and instantiate a key. | 728 | * key_create_or_update - Update or create and instantiate a key. |
729 | * @keyring_ref: A pointer to the destination keyring with possession flag. | 729 | * @keyring_ref: A pointer to the destination keyring with possession flag. |
730 | * @type: The type of key. | 730 | * @type: The type of key. |
731 | * @description: The searchable description for the key. | 731 | * @description: The searchable description for the key. |
732 | * @payload: The data to use to instantiate or update the key. | 732 | * @payload: The data to use to instantiate or update the key. |
733 | * @plen: The length of @payload. | 733 | * @plen: The length of @payload. |
734 | * @perm: The permissions mask for a new key. | 734 | * @perm: The permissions mask for a new key. |
735 | * @flags: The quota flags for a new key. | 735 | * @flags: The quota flags for a new key. |
736 | * | 736 | * |
737 | * Search the destination keyring for a key of the same description and if one | 737 | * Search the destination keyring for a key of the same description and if one |
738 | * is found, update it, otherwise create and instantiate a new one and create a | 738 | * is found, update it, otherwise create and instantiate a new one and create a |
739 | * link to it from that keyring. | 739 | * link to it from that keyring. |
740 | * | 740 | * |
741 | * If perm is KEY_PERM_UNDEF then an appropriate key permissions mask will be | 741 | * If perm is KEY_PERM_UNDEF then an appropriate key permissions mask will be |
742 | * concocted. | 742 | * concocted. |
743 | * | 743 | * |
744 | * Returns a pointer to the new key if successful, -ENODEV if the key type | 744 | * Returns a pointer to the new key if successful, -ENODEV if the key type |
745 | * wasn't available, -ENOTDIR if the keyring wasn't a keyring, -EACCES if the | 745 | * wasn't available, -ENOTDIR if the keyring wasn't a keyring, -EACCES if the |
746 | * caller isn't permitted to modify the keyring or the LSM did not permit | 746 | * caller isn't permitted to modify the keyring or the LSM did not permit |
747 | * creation of the key. | 747 | * creation of the key. |
748 | * | 748 | * |
749 | * On success, the possession flag from the keyring ref will be tacked on to | 749 | * On success, the possession flag from the keyring ref will be tacked on to |
750 | * the key ref before it is returned. | 750 | * the key ref before it is returned. |
751 | */ | 751 | */ |
752 | key_ref_t key_create_or_update(key_ref_t keyring_ref, | 752 | key_ref_t key_create_or_update(key_ref_t keyring_ref, |
753 | const char *type, | 753 | const char *type, |
754 | const char *description, | 754 | const char *description, |
755 | const void *payload, | 755 | const void *payload, |
756 | size_t plen, | 756 | size_t plen, |
757 | key_perm_t perm, | 757 | key_perm_t perm, |
758 | unsigned long flags) | 758 | unsigned long flags) |
759 | { | 759 | { |
760 | unsigned long prealloc; | 760 | unsigned long prealloc; |
761 | const struct cred *cred = current_cred(); | 761 | const struct cred *cred = current_cred(); |
762 | struct key_type *ktype; | 762 | struct key_type *ktype; |
763 | struct key *keyring, *key = NULL; | 763 | struct key *keyring, *key = NULL; |
764 | key_ref_t key_ref; | 764 | key_ref_t key_ref; |
765 | int ret; | 765 | int ret; |
766 | 766 | ||
767 | /* look up the key type to see if it's one of the registered kernel | 767 | /* look up the key type to see if it's one of the registered kernel |
768 | * types */ | 768 | * types */ |
769 | ktype = key_type_lookup(type); | 769 | ktype = key_type_lookup(type); |
770 | if (IS_ERR(ktype)) { | 770 | if (IS_ERR(ktype)) { |
771 | key_ref = ERR_PTR(-ENODEV); | 771 | key_ref = ERR_PTR(-ENODEV); |
772 | goto error; | 772 | goto error; |
773 | } | 773 | } |
774 | 774 | ||
775 | key_ref = ERR_PTR(-EINVAL); | 775 | key_ref = ERR_PTR(-EINVAL); |
776 | if (!ktype->match || !ktype->instantiate) | 776 | if (!ktype->match || !ktype->instantiate) |
777 | goto error_2; | 777 | goto error_2; |
778 | 778 | ||
779 | keyring = key_ref_to_ptr(keyring_ref); | 779 | keyring = key_ref_to_ptr(keyring_ref); |
780 | 780 | ||
781 | key_check(keyring); | 781 | key_check(keyring); |
782 | 782 | ||
783 | key_ref = ERR_PTR(-ENOTDIR); | 783 | key_ref = ERR_PTR(-ENOTDIR); |
784 | if (keyring->type != &key_type_keyring) | 784 | if (keyring->type != &key_type_keyring) |
785 | goto error_2; | 785 | goto error_2; |
786 | 786 | ||
787 | ret = __key_link_begin(keyring, ktype, description, &prealloc); | 787 | ret = __key_link_begin(keyring, ktype, description, &prealloc); |
788 | if (ret < 0) | 788 | if (ret < 0) |
789 | goto error_2; | 789 | goto error_2; |
790 | 790 | ||
791 | /* if we're going to allocate a new key, we're going to have | 791 | /* if we're going to allocate a new key, we're going to have |
792 | * to modify the keyring */ | 792 | * to modify the keyring */ |
793 | ret = key_permission(keyring_ref, KEY_WRITE); | 793 | ret = key_permission(keyring_ref, KEY_WRITE); |
794 | if (ret < 0) { | 794 | if (ret < 0) { |
795 | key_ref = ERR_PTR(ret); | 795 | key_ref = ERR_PTR(ret); |
796 | goto error_3; | 796 | goto error_3; |
797 | } | 797 | } |
798 | 798 | ||
799 | /* if it's possible to update this type of key, search for an existing | 799 | /* if it's possible to update this type of key, search for an existing |
800 | * key of the same type and description in the destination keyring and | 800 | * key of the same type and description in the destination keyring and |
801 | * update that instead if possible | 801 | * update that instead if possible |
802 | */ | 802 | */ |
803 | if (ktype->update) { | 803 | if (ktype->update) { |
804 | key_ref = __keyring_search_one(keyring_ref, ktype, description, | 804 | key_ref = __keyring_search_one(keyring_ref, ktype, description, |
805 | 0); | 805 | 0); |
806 | if (!IS_ERR(key_ref)) | 806 | if (!IS_ERR(key_ref)) |
807 | goto found_matching_key; | 807 | goto found_matching_key; |
808 | } | 808 | } |
809 | 809 | ||
810 | /* if the client doesn't provide, decide on the permissions we want */ | 810 | /* if the client doesn't provide, decide on the permissions we want */ |
811 | if (perm == KEY_PERM_UNDEF) { | 811 | if (perm == KEY_PERM_UNDEF) { |
812 | perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR; | 812 | perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR; |
813 | perm |= KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK | KEY_USR_SETATTR; | 813 | perm |= KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK | KEY_USR_SETATTR; |
814 | 814 | ||
815 | if (ktype->read) | 815 | if (ktype->read) |
816 | perm |= KEY_POS_READ | KEY_USR_READ; | 816 | perm |= KEY_POS_READ | KEY_USR_READ; |
817 | 817 | ||
818 | if (ktype == &key_type_keyring || ktype->update) | 818 | if (ktype == &key_type_keyring || ktype->update) |
819 | perm |= KEY_USR_WRITE; | 819 | perm |= KEY_USR_WRITE; |
820 | } | 820 | } |
821 | 821 | ||
822 | /* allocate a new key */ | 822 | /* allocate a new key */ |
823 | key = key_alloc(ktype, description, cred->fsuid, cred->fsgid, cred, | 823 | key = key_alloc(ktype, description, cred->fsuid, cred->fsgid, cred, |
824 | perm, flags); | 824 | perm, flags); |
825 | if (IS_ERR(key)) { | 825 | if (IS_ERR(key)) { |
826 | key_ref = ERR_CAST(key); | 826 | key_ref = ERR_CAST(key); |
827 | goto error_3; | 827 | goto error_3; |
828 | } | 828 | } |
829 | 829 | ||
830 | /* instantiate it and link it into the target keyring */ | 830 | /* instantiate it and link it into the target keyring */ |
831 | ret = __key_instantiate_and_link(key, payload, plen, keyring, NULL, | 831 | ret = __key_instantiate_and_link(key, payload, plen, keyring, NULL, |
832 | &prealloc); | 832 | &prealloc); |
833 | if (ret < 0) { | 833 | if (ret < 0) { |
834 | key_put(key); | 834 | key_put(key); |
835 | key_ref = ERR_PTR(ret); | 835 | key_ref = ERR_PTR(ret); |
836 | goto error_3; | 836 | goto error_3; |
837 | } | 837 | } |
838 | 838 | ||
839 | key_ref = make_key_ref(key, is_key_possessed(keyring_ref)); | 839 | key_ref = make_key_ref(key, is_key_possessed(keyring_ref)); |
840 | 840 | ||
841 | error_3: | 841 | error_3: |
842 | __key_link_end(keyring, ktype, prealloc); | 842 | __key_link_end(keyring, ktype, prealloc); |
843 | error_2: | 843 | error_2: |
844 | key_type_put(ktype); | 844 | key_type_put(ktype); |
845 | error: | 845 | error: |
846 | return key_ref; | 846 | return key_ref; |
847 | 847 | ||
848 | found_matching_key: | 848 | found_matching_key: |
849 | /* we found a matching key, so we're going to try to update it | 849 | /* we found a matching key, so we're going to try to update it |
850 | * - we can drop the locks first as we have the key pinned | 850 | * - we can drop the locks first as we have the key pinned |
851 | */ | 851 | */ |
852 | __key_link_end(keyring, ktype, prealloc); | 852 | __key_link_end(keyring, ktype, prealloc); |
853 | key_type_put(ktype); | 853 | key_type_put(ktype); |
854 | 854 | ||
855 | key_ref = __key_update(key_ref, payload, plen); | 855 | key_ref = __key_update(key_ref, payload, plen); |
856 | goto error; | 856 | goto error; |
857 | } | 857 | } |
858 | EXPORT_SYMBOL(key_create_or_update); | 858 | EXPORT_SYMBOL(key_create_or_update); |
859 | 859 | ||
860 | /** | 860 | /** |
861 | * key_update - Update a key's contents. | 861 | * key_update - Update a key's contents. |
862 | * @key_ref: The pointer (plus possession flag) to the key. | 862 | * @key_ref: The pointer (plus possession flag) to the key. |
863 | * @payload: The data to be used to update the key. | 863 | * @payload: The data to be used to update the key. |
864 | * @plen: The length of @payload. | 864 | * @plen: The length of @payload. |
865 | * | 865 | * |
866 | * Attempt to update the contents of a key with the given payload data. The | 866 | * Attempt to update the contents of a key with the given payload data. The |
867 | * caller must be granted Write permission on the key. Negative keys can be | 867 | * caller must be granted Write permission on the key. Negative keys can be |
868 | * instantiated by this method. | 868 | * instantiated by this method. |
869 | * | 869 | * |
870 | * Returns 0 on success, -EACCES if not permitted and -EOPNOTSUPP if the key | 870 | * Returns 0 on success, -EACCES if not permitted and -EOPNOTSUPP if the key |
871 | * type does not support updating. The key type may return other errors. | 871 | * type does not support updating. The key type may return other errors. |
872 | */ | 872 | */ |
873 | int key_update(key_ref_t key_ref, const void *payload, size_t plen) | 873 | int key_update(key_ref_t key_ref, const void *payload, size_t plen) |
874 | { | 874 | { |
875 | struct key *key = key_ref_to_ptr(key_ref); | 875 | struct key *key = key_ref_to_ptr(key_ref); |
876 | int ret; | 876 | int ret; |
877 | 877 | ||
878 | key_check(key); | 878 | key_check(key); |
879 | 879 | ||
880 | /* the key must be writable */ | 880 | /* the key must be writable */ |
881 | ret = key_permission(key_ref, KEY_WRITE); | 881 | ret = key_permission(key_ref, KEY_WRITE); |
882 | if (ret < 0) | 882 | if (ret < 0) |
883 | goto error; | 883 | goto error; |
884 | 884 | ||
885 | /* attempt to update it if supported */ | 885 | /* attempt to update it if supported */ |
886 | ret = -EOPNOTSUPP; | 886 | ret = -EOPNOTSUPP; |
887 | if (key->type->update) { | 887 | if (key->type->update) { |
888 | down_write(&key->sem); | 888 | down_write(&key->sem); |
889 | 889 | ||
890 | ret = key->type->update(key, payload, plen); | 890 | ret = key->type->update(key, payload, plen); |
891 | if (ret == 0) | 891 | if (ret == 0) |
892 | /* updating a negative key instantiates it */ | 892 | /* updating a negative key instantiates it */ |
893 | clear_bit(KEY_FLAG_NEGATIVE, &key->flags); | 893 | clear_bit(KEY_FLAG_NEGATIVE, &key->flags); |
894 | 894 | ||
895 | up_write(&key->sem); | 895 | up_write(&key->sem); |
896 | } | 896 | } |
897 | 897 | ||
898 | error: | 898 | error: |
899 | return ret; | 899 | return ret; |
900 | } | 900 | } |
901 | EXPORT_SYMBOL(key_update); | 901 | EXPORT_SYMBOL(key_update); |
902 | 902 | ||
903 | /** | 903 | /** |
904 | * key_revoke - Revoke a key. | 904 | * key_revoke - Revoke a key. |
905 | * @key: The key to be revoked. | 905 | * @key: The key to be revoked. |
906 | * | 906 | * |
907 | * Mark a key as being revoked and ask the type to free up its resources. The | 907 | * Mark a key as being revoked and ask the type to free up its resources. The |
908 | * revocation timeout is set and the key and all its links will be | 908 | * revocation timeout is set and the key and all its links will be |
909 | * automatically garbage collected after key_gc_delay amount of time if they | 909 | * automatically garbage collected after key_gc_delay amount of time if they |
910 | * are not manually dealt with first. | 910 | * are not manually dealt with first. |
911 | */ | 911 | */ |
912 | void key_revoke(struct key *key) | 912 | void key_revoke(struct key *key) |
913 | { | 913 | { |
914 | struct timespec now; | 914 | struct timespec now; |
915 | time_t time; | 915 | time_t time; |
916 | 916 | ||
917 | key_check(key); | 917 | key_check(key); |
918 | 918 | ||
919 | /* make sure no one's trying to change or use the key when we mark it | 919 | /* make sure no one's trying to change or use the key when we mark it |
920 | * - we tell lockdep that we might nest because we might be revoking an | 920 | * - we tell lockdep that we might nest because we might be revoking an |
921 | * authorisation key whilst holding the sem on a key we've just | 921 | * authorisation key whilst holding the sem on a key we've just |
922 | * instantiated | 922 | * instantiated |
923 | */ | 923 | */ |
924 | down_write_nested(&key->sem, 1); | 924 | down_write_nested(&key->sem, 1); |
925 | if (!test_and_set_bit(KEY_FLAG_REVOKED, &key->flags) && | 925 | if (!test_and_set_bit(KEY_FLAG_REVOKED, &key->flags) && |
926 | key->type->revoke) | 926 | key->type->revoke) |
927 | key->type->revoke(key); | 927 | key->type->revoke(key); |
928 | 928 | ||
929 | /* set the death time to no more than the expiry time */ | 929 | /* set the death time to no more than the expiry time */ |
930 | now = current_kernel_time(); | 930 | now = current_kernel_time(); |
931 | time = now.tv_sec; | 931 | time = now.tv_sec; |
932 | if (key->revoked_at == 0 || key->revoked_at > time) { | 932 | if (key->revoked_at == 0 || key->revoked_at > time) { |
933 | key->revoked_at = time; | 933 | key->revoked_at = time; |
934 | key_schedule_gc(key->revoked_at + key_gc_delay); | 934 | key_schedule_gc(key->revoked_at + key_gc_delay); |
935 | } | 935 | } |
936 | 936 | ||
937 | up_write(&key->sem); | 937 | up_write(&key->sem); |
938 | } | 938 | } |
939 | EXPORT_SYMBOL(key_revoke); | 939 | EXPORT_SYMBOL(key_revoke); |
940 | 940 | ||
941 | /** | 941 | /** |
942 | * register_key_type - Register a type of key. | 942 | * register_key_type - Register a type of key. |
943 | * @ktype: The new key type. | 943 | * @ktype: The new key type. |
944 | * | 944 | * |
945 | * Register a new key type. | 945 | * Register a new key type. |
946 | * | 946 | * |
947 | * Returns 0 on success or -EEXIST if a type of this name already exists. | 947 | * Returns 0 on success or -EEXIST if a type of this name already exists. |
948 | */ | 948 | */ |
949 | int register_key_type(struct key_type *ktype) | 949 | int register_key_type(struct key_type *ktype) |
950 | { | 950 | { |
951 | struct key_type *p; | 951 | struct key_type *p; |
952 | int ret; | 952 | int ret; |
953 | 953 | ||
954 | ret = -EEXIST; | 954 | ret = -EEXIST; |
955 | down_write(&key_types_sem); | 955 | down_write(&key_types_sem); |
956 | 956 | ||
957 | /* disallow key types with the same name */ | 957 | /* disallow key types with the same name */ |
958 | list_for_each_entry(p, &key_types_list, link) { | 958 | list_for_each_entry(p, &key_types_list, link) { |
959 | if (strcmp(p->name, ktype->name) == 0) | 959 | if (strcmp(p->name, ktype->name) == 0) |
960 | goto out; | 960 | goto out; |
961 | } | 961 | } |
962 | 962 | ||
963 | /* store the type */ | 963 | /* store the type */ |
964 | list_add(&ktype->link, &key_types_list); | 964 | list_add(&ktype->link, &key_types_list); |
965 | ret = 0; | 965 | ret = 0; |
966 | 966 | ||
967 | out: | 967 | out: |
968 | up_write(&key_types_sem); | 968 | up_write(&key_types_sem); |
969 | return ret; | 969 | return ret; |
970 | } | 970 | } |
971 | EXPORT_SYMBOL(register_key_type); | 971 | EXPORT_SYMBOL(register_key_type); |
972 | 972 | ||
973 | /** | 973 | /** |
974 | * unregister_key_type - Unregister a type of key. | 974 | * unregister_key_type - Unregister a type of key. |
975 | * @ktype: The key type. | 975 | * @ktype: The key type. |
976 | * | 976 | * |
977 | * Unregister a key type and mark all the extant keys of this type as dead. | 977 | * Unregister a key type and mark all the extant keys of this type as dead. |
978 | * Those keys of this type are then destroyed to get rid of their payloads and | 978 | * Those keys of this type are then destroyed to get rid of their payloads and |
979 | * they and their links will be garbage collected as soon as possible. | 979 | * they and their links will be garbage collected as soon as possible. |
980 | */ | 980 | */ |
981 | void unregister_key_type(struct key_type *ktype) | 981 | void unregister_key_type(struct key_type *ktype) |
982 | { | 982 | { |
983 | struct rb_node *_n; | 983 | struct rb_node *_n; |
984 | struct key *key; | 984 | struct key *key; |
985 | 985 | ||
986 | down_write(&key_types_sem); | 986 | down_write(&key_types_sem); |
987 | 987 | ||
988 | /* withdraw the key type */ | 988 | /* withdraw the key type */ |
989 | list_del_init(&ktype->link); | 989 | list_del_init(&ktype->link); |
990 | 990 | ||
991 | /* mark all the keys of this type dead */ | 991 | /* mark all the keys of this type dead */ |
992 | spin_lock(&key_serial_lock); | 992 | spin_lock(&key_serial_lock); |
993 | 993 | ||
994 | for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) { | 994 | for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) { |
995 | key = rb_entry(_n, struct key, serial_node); | 995 | key = rb_entry(_n, struct key, serial_node); |
996 | 996 | ||
997 | if (key->type == ktype) { | 997 | if (key->type == ktype) { |
998 | key->type = &key_type_dead; | 998 | key->type = &key_type_dead; |
999 | set_bit(KEY_FLAG_DEAD, &key->flags); | 999 | set_bit(KEY_FLAG_DEAD, &key->flags); |
1000 | } | 1000 | } |
1001 | } | 1001 | } |
1002 | 1002 | ||
1003 | spin_unlock(&key_serial_lock); | 1003 | spin_unlock(&key_serial_lock); |
1004 | 1004 | ||
1005 | /* make sure everyone revalidates their keys */ | 1005 | /* make sure everyone revalidates their keys */ |
1006 | synchronize_rcu(); | 1006 | synchronize_rcu(); |
1007 | 1007 | ||
1008 | /* we should now be able to destroy the payloads of all the keys of | 1008 | /* we should now be able to destroy the payloads of all the keys of |
1009 | * this type with impunity */ | 1009 | * this type with impunity */ |
1010 | spin_lock(&key_serial_lock); | 1010 | spin_lock(&key_serial_lock); |
1011 | 1011 | ||
1012 | for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) { | 1012 | for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) { |
1013 | key = rb_entry(_n, struct key, serial_node); | 1013 | key = rb_entry(_n, struct key, serial_node); |
1014 | 1014 | ||
1015 | if (key->type == ktype) { | 1015 | if (key->type == ktype) { |
1016 | if (ktype->destroy) | 1016 | if (ktype->destroy) |
1017 | ktype->destroy(key); | 1017 | ktype->destroy(key); |
1018 | memset(&key->payload, KEY_DESTROY, sizeof(key->payload)); | 1018 | memset(&key->payload, KEY_DESTROY, sizeof(key->payload)); |
1019 | } | 1019 | } |
1020 | } | 1020 | } |
1021 | 1021 | ||
1022 | spin_unlock(&key_serial_lock); | 1022 | spin_unlock(&key_serial_lock); |
1023 | up_write(&key_types_sem); | 1023 | up_write(&key_types_sem); |
1024 | 1024 | ||
1025 | key_schedule_gc(0); | 1025 | key_schedule_gc(0); |
1026 | } | 1026 | } |
1027 | EXPORT_SYMBOL(unregister_key_type); | 1027 | EXPORT_SYMBOL(unregister_key_type); |
1028 | 1028 | ||
1029 | /* | 1029 | /* |
1030 | * Initialise the key management state. | 1030 | * Initialise the key management state. |
1031 | */ | 1031 | */ |
1032 | void __init key_init(void) | 1032 | void __init key_init(void) |
1033 | { | 1033 | { |
1034 | /* allocate a slab in which we can store keys */ | 1034 | /* allocate a slab in which we can store keys */ |
1035 | key_jar = kmem_cache_create("key_jar", sizeof(struct key), | 1035 | key_jar = kmem_cache_create("key_jar", sizeof(struct key), |
1036 | 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); | 1036 | 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); |
1037 | 1037 | ||
1038 | /* add the special key types */ | 1038 | /* add the special key types */ |
1039 | list_add_tail(&key_type_keyring.link, &key_types_list); | 1039 | list_add_tail(&key_type_keyring.link, &key_types_list); |
1040 | list_add_tail(&key_type_dead.link, &key_types_list); | 1040 | list_add_tail(&key_type_dead.link, &key_types_list); |
1041 | list_add_tail(&key_type_user.link, &key_types_list); | 1041 | list_add_tail(&key_type_user.link, &key_types_list); |
1042 | 1042 | ||
1043 | /* record the root user tracking */ | 1043 | /* record the root user tracking */ |
1044 | rb_link_node(&root_key_user.node, | 1044 | rb_link_node(&root_key_user.node, |
1045 | NULL, | 1045 | NULL, |
1046 | &key_user_tree.rb_node); | 1046 | &key_user_tree.rb_node); |
1047 | 1047 | ||
1048 | rb_insert_color(&root_key_user.node, | 1048 | rb_insert_color(&root_key_user.node, |
1049 | &key_user_tree); | 1049 | &key_user_tree); |
1050 | } | 1050 | } |
1051 | 1051 |