Blame view
kernel/user.c
4.4 KB
1da177e4c Linux-2.6.12-rc2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 |
/* * The "user cache". * * (C) Copyright 1991-2000 Linus Torvalds * * We have a per-user structure to keep track of how many * processes, files etc the user has claimed, in order to be * able to have per-user limits for system resources. */ #include <linux/init.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/bitops.h> #include <linux/key.h> /* * UID task count cache, to get fast user lookup in "alloc_uid" * when changing user ID's (ie setuid() and friends). */ #define UIDHASH_BITS (CONFIG_BASE_SMALL ? 3 : 8) #define UIDHASH_SZ (1 << UIDHASH_BITS) #define UIDHASH_MASK (UIDHASH_SZ - 1) #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK) #define uidhashentry(uid) (uidhash_table + __uidhashfn((uid))) static kmem_cache_t *uid_cachep; static struct list_head uidhash_table[UIDHASH_SZ]; static DEFINE_SPINLOCK(uidhash_lock); struct user_struct root_user = { .__count = ATOMIC_INIT(1), .processes = ATOMIC_INIT(1), .files = ATOMIC_INIT(0), .sigpending = ATOMIC_INIT(0), .mq_bytes = 0, .locked_shm = 0, #ifdef CONFIG_KEYS .uid_keyring = &root_user_keyring, .session_keyring = &root_session_keyring, #endif }; /* * These routines must be called with the uidhash spinlock held! */ static inline void uid_hash_insert(struct user_struct *up, struct list_head *hashent) { list_add(&up->uidhash_list, hashent); } static inline void uid_hash_remove(struct user_struct *up) { list_del(&up->uidhash_list); } static inline struct user_struct *uid_hash_find(uid_t uid, struct list_head *hashent) { struct list_head *up; list_for_each(up, hashent) { struct user_struct *user; user = list_entry(up, struct user_struct, uidhash_list); if(user->uid == uid) { atomic_inc(&user->__count); return user; } } return NULL; } /* * Locate the user_struct for the passed UID. If found, take a ref on it. The * caller must undo that ref with free_uid(). * * If the user_struct could not be found, return NULL. */ struct user_struct *find_user(uid_t uid) { struct user_struct *ret; spin_lock(&uidhash_lock); ret = uid_hash_find(uid, uidhashentry(uid)); spin_unlock(&uidhash_lock); return ret; } void free_uid(struct user_struct *up) { if (up && atomic_dec_and_lock(&up->__count, &uidhash_lock)) { uid_hash_remove(up); key_put(up->uid_keyring); key_put(up->session_keyring); kmem_cache_free(uid_cachep, up); spin_unlock(&uidhash_lock); } } struct user_struct * alloc_uid(uid_t uid) { struct list_head *hashent = uidhashentry(uid); struct user_struct *up; spin_lock(&uidhash_lock); up = uid_hash_find(uid, hashent); spin_unlock(&uidhash_lock); if (!up) { struct user_struct *new; new = kmem_cache_alloc(uid_cachep, SLAB_KERNEL); if (!new) return NULL; new->uid = uid; atomic_set(&new->__count, 1); atomic_set(&new->processes, 0); atomic_set(&new->files, 0); atomic_set(&new->sigpending, 0); |
0eeca2830 [PATCH] inotify |
123 124 125 126 |
#ifdef CONFIG_INOTIFY atomic_set(&new->inotify_watches, 0); atomic_set(&new->inotify_devs, 0); #endif |
1da177e4c Linux-2.6.12-rc2 |
127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 |
new->mq_bytes = 0; new->locked_shm = 0; if (alloc_uid_keyring(new) < 0) { kmem_cache_free(uid_cachep, new); return NULL; } /* * Before adding this, check whether we raced * on adding the same user already.. */ spin_lock(&uidhash_lock); up = uid_hash_find(uid, hashent); if (up) { key_put(new->uid_keyring); key_put(new->session_keyring); kmem_cache_free(uid_cachep, new); } else { uid_hash_insert(new, hashent); up = new; } spin_unlock(&uidhash_lock); } return up; } void switch_uid(struct user_struct *new_user) { struct user_struct *old_user; /* What if a process setreuid()'s and this brings the * new uid over his NPROC rlimit? We can check this now * cheaply with the new uid cache, so if it matters * we should be checking for it. -DaveM */ old_user = current->user; atomic_inc(&new_user->processes); atomic_dec(&old_user->processes); switch_uid_keyring(new_user); current->user = new_user; free_uid(old_user); suid_keys(current); } static int __init uid_cache_init(void) { int n; uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); for(n = 0; n < UIDHASH_SZ; ++n) INIT_LIST_HEAD(uidhash_table + n); /* Insert the root user immediately (init already runs as root) */ spin_lock(&uidhash_lock); uid_hash_insert(&root_user, uidhashentry(0)); spin_unlock(&uidhash_lock); return 0; } module_init(uid_cache_init); |