Commit 353ab6e97b8f209dbecc9f650f1f84e3da2a7bb1

Authored by Ingo Molnar
Committed by Linus Torvalds
1 parent e655a250d5

[PATCH] sem2mutex: fs/

Semaphore to mutex conversion.

The conversion was generated via scripts, and the result was validated
automatically via a script as well.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Cc: Eric Van Hensbergen <ericvh@ericvh.myip.org>
Cc: Robert Love <rml@tech9.net>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Neil Brown <neilb@cse.unsw.edu.au>
Cc: Trond Myklebust <trond.myklebust@fys.uio.no>
Cc: Dave Kleikamp <shaggy@austin.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

Showing 11 changed files with 99 additions and 89 deletions Side-by-side Diff

... ... @@ -22,6 +22,7 @@
22 22 #include <linux/cramfs_fs_sb.h>
23 23 #include <linux/buffer_head.h>
24 24 #include <linux/vfs.h>
  25 +#include <linux/mutex.h>
25 26 #include <asm/semaphore.h>
26 27  
27 28 #include <asm/uaccess.h>
... ... @@ -31,7 +32,7 @@
31 32 static struct file_operations cramfs_directory_operations;
32 33 static struct address_space_operations cramfs_aops;
33 34  
34   -static DECLARE_MUTEX(read_mutex);
  35 +static DEFINE_MUTEX(read_mutex);
35 36  
36 37  
37 38 /* These two macros may change in future, to provide better st_ino
38 39  
39 40  
40 41  
... ... @@ -250,20 +251,20 @@
250 251 memset(sbi, 0, sizeof(struct cramfs_sb_info));
251 252  
252 253 /* Invalidate the read buffers on mount: think disk change.. */
253   - down(&read_mutex);
  254 + mutex_lock(&read_mutex);
254 255 for (i = 0; i < READ_BUFFERS; i++)
255 256 buffer_blocknr[i] = -1;
256 257  
257 258 /* Read the first block and get the superblock from it */
258 259 memcpy(&super, cramfs_read(sb, 0, sizeof(super)), sizeof(super));
259   - up(&read_mutex);
  260 + mutex_unlock(&read_mutex);
260 261  
261 262 /* Do sanity checks on the superblock */
262 263 if (super.magic != CRAMFS_MAGIC) {
263 264 /* check at 512 byte offset */
264   - down(&read_mutex);
  265 + mutex_lock(&read_mutex);
265 266 memcpy(&super, cramfs_read(sb, 512, sizeof(super)), sizeof(super));
266   - up(&read_mutex);
  267 + mutex_unlock(&read_mutex);
267 268 if (super.magic != CRAMFS_MAGIC) {
268 269 if (!silent)
269 270 printk(KERN_ERR "cramfs: wrong magic\n");
... ... @@ -366,7 +367,7 @@
366 367 mode_t mode;
367 368 int namelen, error;
368 369  
369   - down(&read_mutex);
  370 + mutex_lock(&read_mutex);
370 371 de = cramfs_read(sb, OFFSET(inode) + offset, sizeof(*de)+256);
371 372 name = (char *)(de+1);
372 373  
... ... @@ -379,7 +380,7 @@
379 380 memcpy(buf, name, namelen);
380 381 ino = CRAMINO(de);
381 382 mode = de->mode;
382   - up(&read_mutex);
  383 + mutex_unlock(&read_mutex);
383 384 nextoffset = offset + sizeof(*de) + namelen;
384 385 for (;;) {
385 386 if (!namelen) {
... ... @@ -410,7 +411,7 @@
410 411 unsigned int offset = 0;
411 412 int sorted;
412 413  
413   - down(&read_mutex);
  414 + mutex_lock(&read_mutex);
414 415 sorted = CRAMFS_SB(dir->i_sb)->flags & CRAMFS_FLAG_SORTED_DIRS;
415 416 while (offset < dir->i_size) {
416 417 struct cramfs_inode *de;
... ... @@ -433,7 +434,7 @@
433 434  
434 435 for (;;) {
435 436 if (!namelen) {
436   - up(&read_mutex);
  437 + mutex_unlock(&read_mutex);
437 438 return ERR_PTR(-EIO);
438 439 }
439 440 if (name[namelen-1])
... ... @@ -447,7 +448,7 @@
447 448 continue;
448 449 if (!retval) {
449 450 struct cramfs_inode entry = *de;
450   - up(&read_mutex);
  451 + mutex_unlock(&read_mutex);
451 452 d_add(dentry, get_cramfs_inode(dir->i_sb, &entry));
452 453 return NULL;
453 454 }
... ... @@ -455,7 +456,7 @@
455 456 if (sorted)
456 457 break;
457 458 }
458   - up(&read_mutex);
  459 + mutex_unlock(&read_mutex);
459 460 d_add(dentry, NULL);
460 461 return NULL;
461 462 }
462 463  
463 464  
464 465  
... ... @@ -474,21 +475,21 @@
474 475 u32 start_offset, compr_len;
475 476  
476 477 start_offset = OFFSET(inode) + maxblock*4;
477   - down(&read_mutex);
  478 + mutex_lock(&read_mutex);
478 479 if (page->index)
479 480 start_offset = *(u32 *) cramfs_read(sb, blkptr_offset-4, 4);
480 481 compr_len = (*(u32 *) cramfs_read(sb, blkptr_offset, 4) - start_offset);
481   - up(&read_mutex);
  482 + mutex_unlock(&read_mutex);
482 483 pgdata = kmap(page);
483 484 if (compr_len == 0)
484 485 ; /* hole */
485 486 else {
486   - down(&read_mutex);
  487 + mutex_lock(&read_mutex);
487 488 bytes_filled = cramfs_uncompress_block(pgdata,
488 489 PAGE_CACHE_SIZE,
489 490 cramfs_read(sb, start_offset, compr_len),
490 491 compr_len);
491   - up(&read_mutex);
  492 + mutex_unlock(&read_mutex);
492 493 }
493 494 } else
494 495 pgdata = kmap(page);
... ... @@ -23,6 +23,7 @@
23 23 #include <linux/mm.h>
24 24 #include <linux/errno.h>
25 25 #include <linux/dcookies.h>
  26 +#include <linux/mutex.h>
26 27 #include <asm/uaccess.h>
27 28  
28 29 /* The dcookies are allocated from a kmem_cache and
... ... @@ -36,7 +37,7 @@
36 37 };
37 38  
38 39 static LIST_HEAD(dcookie_users);
39   -static DECLARE_MUTEX(dcookie_sem);
  40 +static DEFINE_MUTEX(dcookie_mutex);
40 41 static kmem_cache_t * dcookie_cache;
41 42 static struct list_head * dcookie_hashtable;
42 43 static size_t hash_size;
... ... @@ -114,7 +115,7 @@
114 115 int err = 0;
115 116 struct dcookie_struct * dcs;
116 117  
117   - down(&dcookie_sem);
  118 + mutex_lock(&dcookie_mutex);
118 119  
119 120 if (!is_live()) {
120 121 err = -EINVAL;
... ... @@ -134,7 +135,7 @@
134 135 *cookie = dcookie_value(dcs);
135 136  
136 137 out:
137   - up(&dcookie_sem);
  138 + mutex_unlock(&dcookie_mutex);
138 139 return err;
139 140 }
140 141  
... ... @@ -157,7 +158,7 @@
157 158 if (!capable(CAP_SYS_ADMIN))
158 159 return -EPERM;
159 160  
160   - down(&dcookie_sem);
  161 + mutex_lock(&dcookie_mutex);
161 162  
162 163 if (!is_live()) {
163 164 err = -EINVAL;
... ... @@ -192,7 +193,7 @@
192 193 out_free:
193 194 kfree(kbuf);
194 195 out:
195   - up(&dcookie_sem);
  196 + mutex_unlock(&dcookie_mutex);
196 197 return err;
197 198 }
198 199  
... ... @@ -290,7 +291,7 @@
290 291 {
291 292 struct dcookie_user * user;
292 293  
293   - down(&dcookie_sem);
  294 + mutex_lock(&dcookie_mutex);
294 295  
295 296 user = kmalloc(sizeof(struct dcookie_user), GFP_KERNEL);
296 297 if (!user)
... ... @@ -302,7 +303,7 @@
302 303 list_add(&user->next, &dcookie_users);
303 304  
304 305 out:
305   - up(&dcookie_sem);
  306 + mutex_unlock(&dcookie_mutex);
306 307 return user;
307 308 out_free:
308 309 kfree(user);
... ... @@ -313,7 +314,7 @@
313 314  
314 315 void dcookie_unregister(struct dcookie_user * user)
315 316 {
316   - down(&dcookie_sem);
  317 + mutex_lock(&dcookie_mutex);
317 318  
318 319 list_del(&user->next);
319 320 kfree(user);
... ... @@ -321,7 +322,7 @@
321 322 if (!is_live())
322 323 dcookie_exit();
323 324  
324   - up(&dcookie_sem);
  325 + mutex_unlock(&dcookie_mutex);
325 326 }
326 327  
327 328 EXPORT_SYMBOL_GPL(dcookie_register);
fs/jffs2/compr_zlib.c
... ... @@ -33,13 +33,14 @@
33 33 */
34 34 #define STREAM_END_SPACE 12
35 35  
36   -static DECLARE_MUTEX(deflate_sem);
37   -static DECLARE_MUTEX(inflate_sem);
  36 +static DEFINE_MUTEX(deflate_mutex);
  37 +static DEFINE_MUTEX(inflate_mutex);
38 38 static z_stream inf_strm, def_strm;
39 39  
40 40 #ifdef __KERNEL__ /* Linux-only */
41 41 #include <linux/vmalloc.h>
42 42 #include <linux/init.h>
  43 +#include <linux/mutex.h>
43 44  
44 45 static int __init alloc_workspaces(void)
45 46 {
46 47  
... ... @@ -79,11 +80,11 @@
79 80 if (*dstlen <= STREAM_END_SPACE)
80 81 return -1;
81 82  
82   - down(&deflate_sem);
  83 + mutex_lock(&deflate_mutex);
83 84  
84 85 if (Z_OK != zlib_deflateInit(&def_strm, 3)) {
85 86 printk(KERN_WARNING "deflateInit failed\n");
86   - up(&deflate_sem);
  87 + mutex_unlock(&deflate_mutex);
87 88 return -1;
88 89 }
89 90  
... ... @@ -104,7 +105,7 @@
104 105 if (ret != Z_OK) {
105 106 D1(printk(KERN_DEBUG "deflate in loop returned %d\n", ret));
106 107 zlib_deflateEnd(&def_strm);
107   - up(&deflate_sem);
  108 + mutex_unlock(&deflate_mutex);
108 109 return -1;
109 110 }
110 111 }
... ... @@ -133,7 +134,7 @@
133 134 *sourcelen = def_strm.total_in;
134 135 ret = 0;
135 136 out:
136   - up(&deflate_sem);
  137 + mutex_unlock(&deflate_mutex);
137 138 return ret;
138 139 }
139 140  
... ... @@ -145,7 +146,7 @@
145 146 int ret;
146 147 int wbits = MAX_WBITS;
147 148  
148   - down(&inflate_sem);
  149 + mutex_lock(&inflate_mutex);
149 150  
150 151 inf_strm.next_in = data_in;
151 152 inf_strm.avail_in = srclen;
... ... @@ -173,7 +174,7 @@
173 174  
174 175 if (Z_OK != zlib_inflateInit2(&inf_strm, wbits)) {
175 176 printk(KERN_WARNING "inflateInit failed\n");
176   - up(&inflate_sem);
  177 + mutex_unlock(&inflate_mutex);
177 178 return 1;
178 179 }
179 180  
... ... @@ -183,7 +184,7 @@
183 184 printk(KERN_NOTICE "inflate returned %d\n", ret);
184 185 }
185 186 zlib_inflateEnd(&inf_strm);
186   - up(&inflate_sem);
  187 + mutex_unlock(&inflate_mutex);
187 188 return 0;
188 189 }
189 190  
... ... @@ -69,6 +69,7 @@
69 69 #include <linux/bio.h>
70 70 #include <linux/suspend.h>
71 71 #include <linux/delay.h>
  72 +#include <linux/mutex.h>
72 73 #include "jfs_incore.h"
73 74 #include "jfs_filsys.h"
74 75 #include "jfs_metapage.h"
... ... @@ -165,7 +166,7 @@
165 166 */
166 167 static LIST_HEAD(jfs_external_logs);
167 168 static struct jfs_log *dummy_log = NULL;
168   -static DECLARE_MUTEX(jfs_log_sem);
  169 +static DEFINE_MUTEX(jfs_log_mutex);
169 170  
170 171 /*
171 172 * forward references
172 173  
173 174  
... ... @@ -1085,20 +1086,20 @@
1085 1086 if (sbi->mntflag & JFS_INLINELOG)
1086 1087 return open_inline_log(sb);
1087 1088  
1088   - down(&jfs_log_sem);
  1089 + mutex_lock(&jfs_log_mutex);
1089 1090 list_for_each_entry(log, &jfs_external_logs, journal_list) {
1090 1091 if (log->bdev->bd_dev == sbi->logdev) {
1091 1092 if (memcmp(log->uuid, sbi->loguuid,
1092 1093 sizeof(log->uuid))) {
1093 1094 jfs_warn("wrong uuid on JFS journal\n");
1094   - up(&jfs_log_sem);
  1095 + mutex_unlock(&jfs_log_mutex);
1095 1096 return -EINVAL;
1096 1097 }
1097 1098 /*
1098 1099 * add file system to log active file system list
1099 1100 */
1100 1101 if ((rc = lmLogFileSystem(log, sbi, 1))) {
1101   - up(&jfs_log_sem);
  1102 + mutex_unlock(&jfs_log_mutex);
1102 1103 return rc;
1103 1104 }
1104 1105 goto journal_found;
... ... @@ -1106,7 +1107,7 @@
1106 1107 }
1107 1108  
1108 1109 if (!(log = kzalloc(sizeof(struct jfs_log), GFP_KERNEL))) {
1109   - up(&jfs_log_sem);
  1110 + mutex_unlock(&jfs_log_mutex);
1110 1111 return -ENOMEM;
1111 1112 }
1112 1113 INIT_LIST_HEAD(&log->sb_list);
... ... @@ -1151,7 +1152,7 @@
1151 1152 sbi->log = log;
1152 1153 LOG_UNLOCK(log);
1153 1154  
1154   - up(&jfs_log_sem);
  1155 + mutex_unlock(&jfs_log_mutex);
1155 1156 return 0;
1156 1157  
1157 1158 /*
... ... @@ -1168,7 +1169,7 @@
1168 1169 blkdev_put(bdev);
1169 1170  
1170 1171 free: /* free log descriptor */
1171   - up(&jfs_log_sem);
  1172 + mutex_unlock(&jfs_log_mutex);
1172 1173 kfree(log);
1173 1174  
1174 1175 jfs_warn("lmLogOpen: exit(%d)", rc);
1175 1176  
... ... @@ -1212,11 +1213,11 @@
1212 1213 {
1213 1214 int rc;
1214 1215  
1215   - down(&jfs_log_sem);
  1216 + mutex_lock(&jfs_log_mutex);
1216 1217 if (!dummy_log) {
1217 1218 dummy_log = kzalloc(sizeof(struct jfs_log), GFP_KERNEL);
1218 1219 if (!dummy_log) {
1219   - up(&jfs_log_sem);
  1220 + mutex_unlock(&jfs_log_mutex);
1220 1221 return -ENOMEM;
1221 1222 }
1222 1223 INIT_LIST_HEAD(&dummy_log->sb_list);
... ... @@ -1229,7 +1230,7 @@
1229 1230 if (rc) {
1230 1231 kfree(dummy_log);
1231 1232 dummy_log = NULL;
1232   - up(&jfs_log_sem);
  1233 + mutex_unlock(&jfs_log_mutex);
1233 1234 return rc;
1234 1235 }
1235 1236 }
... ... @@ -1238,7 +1239,7 @@
1238 1239 list_add(&JFS_SBI(sb)->log_list, &dummy_log->sb_list);
1239 1240 JFS_SBI(sb)->log = dummy_log;
1240 1241 LOG_UNLOCK(dummy_log);
1241   - up(&jfs_log_sem);
  1242 + mutex_unlock(&jfs_log_mutex);
1242 1243  
1243 1244 return 0;
1244 1245 }
... ... @@ -1466,7 +1467,7 @@
1466 1467  
1467 1468 jfs_info("lmLogClose: log:0x%p", log);
1468 1469  
1469   - down(&jfs_log_sem);
  1470 + mutex_lock(&jfs_log_mutex);
1470 1471 LOG_LOCK(log);
1471 1472 list_del(&sbi->log_list);
1472 1473 LOG_UNLOCK(log);
... ... @@ -1516,7 +1517,7 @@
1516 1517 kfree(log);
1517 1518  
1518 1519 out:
1519   - up(&jfs_log_sem);
  1520 + mutex_unlock(&jfs_log_mutex);
1520 1521 jfs_info("lmLogClose: exit(%d)", rc);
1521 1522 return rc;
1522 1523 }
... ... @@ -16,6 +16,7 @@
16 16 #include <linux/sunrpc/svc.h>
17 17 #include <linux/lockd/lockd.h>
18 18 #include <linux/lockd/sm_inter.h>
  19 +#include <linux/mutex.h>
19 20  
20 21  
21 22 #define NLMDBG_FACILITY NLMDBG_HOSTCACHE
... ... @@ -30,7 +31,7 @@
30 31 static struct nlm_host * nlm_hosts[NLM_HOST_NRHASH];
31 32 static unsigned long next_gc;
32 33 static int nrhosts;
33   -static DECLARE_MUTEX(nlm_host_sema);
  34 +static DEFINE_MUTEX(nlm_host_mutex);
34 35  
35 36  
36 37 static void nlm_gc_hosts(void);
... ... @@ -71,7 +72,7 @@
71 72 hash = NLM_ADDRHASH(sin->sin_addr.s_addr);
72 73  
73 74 /* Lock hash table */
74   - down(&nlm_host_sema);
  75 + mutex_lock(&nlm_host_mutex);
75 76  
76 77 if (time_after_eq(jiffies, next_gc))
77 78 nlm_gc_hosts();
... ... @@ -91,7 +92,7 @@
91 92 nlm_hosts[hash] = host;
92 93 }
93 94 nlm_get_host(host);
94   - up(&nlm_host_sema);
  95 + mutex_unlock(&nlm_host_mutex);
95 96 return host;
96 97 }
97 98 }
... ... @@ -130,7 +131,7 @@
130 131 next_gc = 0;
131 132  
132 133 nohost:
133   - up(&nlm_host_sema);
  134 + mutex_unlock(&nlm_host_mutex);
134 135 return host;
135 136 }
136 137  
137 138  
138 139  
... ... @@ -141,19 +142,19 @@
141 142 * and return it
142 143 */
143 144 int hash;
144   - down(&nlm_host_sema);
  145 + mutex_lock(&nlm_host_mutex);
145 146 for (hash = 0 ; hash < NLM_HOST_NRHASH; hash++) {
146 147 struct nlm_host *host, **hp;
147 148 for (hp = &nlm_hosts[hash]; (host = *hp) != 0; hp = &host->h_next) {
148 149 if (host->h_server &&
149 150 host->h_killed == 0) {
150 151 nlm_get_host(host);
151   - up(&nlm_host_sema);
  152 + mutex_unlock(&nlm_host_mutex);
152 153 return host;
153 154 }
154 155 }
155 156 }
156   - up(&nlm_host_sema);
  157 + mutex_unlock(&nlm_host_mutex);
157 158 return NULL;
158 159 }
159 160  
... ... @@ -265,7 +266,7 @@
265 266 int i;
266 267  
267 268 dprintk("lockd: shutting down host module\n");
268   - down(&nlm_host_sema);
  269 + mutex_lock(&nlm_host_mutex);
269 270  
270 271 /* First, make all hosts eligible for gc */
271 272 dprintk("lockd: nuking all hosts...\n");
... ... @@ -276,7 +277,7 @@
276 277  
277 278 /* Then, perform a garbage collection pass */
278 279 nlm_gc_hosts();
279   - up(&nlm_host_sema);
  280 + mutex_unlock(&nlm_host_mutex);
280 281  
281 282 /* complain if any hosts are left */
282 283 if (nrhosts) {
... ... @@ -25,6 +25,7 @@
25 25 #include <linux/slab.h>
26 26 #include <linux/smp.h>
27 27 #include <linux/smp_lock.h>
  28 +#include <linux/mutex.h>
28 29  
29 30 #include <linux/sunrpc/types.h>
30 31 #include <linux/sunrpc/stats.h>
31 32  
... ... @@ -43,13 +44,13 @@
43 44 struct nlmsvc_binding * nlmsvc_ops;
44 45 EXPORT_SYMBOL(nlmsvc_ops);
45 46  
46   -static DECLARE_MUTEX(nlmsvc_sema);
  47 +static DEFINE_MUTEX(nlmsvc_mutex);
47 48 static unsigned int nlmsvc_users;
48 49 static pid_t nlmsvc_pid;
49 50 int nlmsvc_grace_period;
50 51 unsigned long nlmsvc_timeout;
51 52  
52   -static DECLARE_MUTEX_LOCKED(lockd_start);
  53 +static DECLARE_COMPLETION(lockd_start_done);
53 54 static DECLARE_WAIT_QUEUE_HEAD(lockd_exit);
54 55  
55 56 /*
... ... @@ -112,7 +113,7 @@
112 113 * Let our maker know we're running.
113 114 */
114 115 nlmsvc_pid = current->pid;
115   - up(&lockd_start);
  116 + complete(&lockd_start_done);
116 117  
117 118 daemonize("lockd");
118 119  
... ... @@ -215,7 +216,7 @@
215 216 struct svc_serv * serv;
216 217 int error = 0;
217 218  
218   - down(&nlmsvc_sema);
  219 + mutex_lock(&nlmsvc_mutex);
219 220 /*
220 221 * Unconditionally increment the user count ... this is
221 222 * the number of clients who _want_ a lockd process.
... ... @@ -263,7 +264,7 @@
263 264 "lockd_up: create thread failed, error=%d\n", error);
264 265 goto destroy_and_out;
265 266 }
266   - down(&lockd_start);
  267 + wait_for_completion(&lockd_start_done);
267 268  
268 269 /*
269 270 * Note: svc_serv structures have an initial use count of 1,
... ... @@ -272,7 +273,7 @@
272 273 destroy_and_out:
273 274 svc_destroy(serv);
274 275 out:
275   - up(&nlmsvc_sema);
  276 + mutex_unlock(&nlmsvc_mutex);
276 277 return error;
277 278 }
278 279 EXPORT_SYMBOL(lockd_up);
... ... @@ -285,7 +286,7 @@
285 286 {
286 287 static int warned;
287 288  
288   - down(&nlmsvc_sema);
  289 + mutex_lock(&nlmsvc_mutex);
289 290 if (nlmsvc_users) {
290 291 if (--nlmsvc_users)
291 292 goto out;
... ... @@ -315,7 +316,7 @@
315 316 recalc_sigpending();
316 317 spin_unlock_irq(&current->sighand->siglock);
317 318 out:
318   - up(&nlmsvc_sema);
  319 + mutex_unlock(&nlmsvc_mutex);
319 320 }
320 321 EXPORT_SYMBOL(lockd_down);
321 322  
... ... @@ -11,6 +11,7 @@
11 11 #include <linux/string.h>
12 12 #include <linux/time.h>
13 13 #include <linux/in.h>
  14 +#include <linux/mutex.h>
14 15 #include <linux/sunrpc/svc.h>
15 16 #include <linux/sunrpc/clnt.h>
16 17 #include <linux/nfsd/nfsfh.h>
... ... @@ -28,7 +29,7 @@
28 29 #define FILE_HASH_BITS 5
29 30 #define FILE_NRHASH (1<<FILE_HASH_BITS)
30 31 static struct nlm_file * nlm_files[FILE_NRHASH];
31   -static DECLARE_MUTEX(nlm_file_sema);
  32 +static DEFINE_MUTEX(nlm_file_mutex);
32 33  
33 34 #ifdef NFSD_DEBUG
34 35 static inline void nlm_debug_print_fh(char *msg, struct nfs_fh *f)
... ... @@ -91,7 +92,7 @@
91 92 hash = file_hash(f);
92 93  
93 94 /* Lock file table */
94   - down(&nlm_file_sema);
  95 + mutex_lock(&nlm_file_mutex);
95 96  
96 97 for (file = nlm_files[hash]; file; file = file->f_next)
97 98 if (!nfs_compare_fh(&file->f_handle, f))
... ... @@ -130,7 +131,7 @@
130 131 nfserr = 0;
131 132  
132 133 out_unlock:
133   - up(&nlm_file_sema);
  134 + mutex_unlock(&nlm_file_mutex);
134 135 return nfserr;
135 136  
136 137 out_free:
137 138  
... ... @@ -239,14 +240,14 @@
239 240 struct nlm_file *file, **fp;
240 241 int i;
241 242  
242   - down(&nlm_file_sema);
  243 + mutex_lock(&nlm_file_mutex);
243 244 for (i = 0; i < FILE_NRHASH; i++) {
244 245 fp = nlm_files + i;
245 246 while ((file = *fp) != NULL) {
246 247 /* Traverse locks, blocks and shares of this file
247 248 * and update file->f_locks count */
248 249 if (nlm_inspect_file(host, file, action)) {
249   - up(&nlm_file_sema);
  250 + mutex_unlock(&nlm_file_mutex);
250 251 return 1;
251 252 }
252 253  
... ... @@ -261,7 +262,7 @@
261 262 }
262 263 }
263 264 }
264   - up(&nlm_file_sema);
  265 + mutex_unlock(&nlm_file_mutex);
265 266 return 0;
266 267 }
267 268  
... ... @@ -281,7 +282,7 @@
281 282 file, file->f_count);
282 283  
283 284 /* Lock file table */
284   - down(&nlm_file_sema);
  285 + mutex_lock(&nlm_file_mutex);
285 286  
286 287 /* If there are no more locks etc, delete the file */
287 288 if(--file->f_count == 0) {
... ... @@ -289,7 +290,7 @@
289 290 nlm_delete_file(file);
290 291 }
291 292  
292   - up(&nlm_file_sema);
  293 + mutex_unlock(&nlm_file_mutex);
293 294 }
294 295  
295 296 /*
... ... @@ -14,6 +14,7 @@
14 14 #include <linux/sunrpc/svc.h>
15 15 #include <linux/sunrpc/svcsock.h>
16 16 #include <linux/nfs_fs.h>
  17 +#include <linux/mutex.h>
17 18  
18 19 #include <net/inet_sock.h>
19 20  
... ... @@ -31,7 +32,7 @@
31 32 };
32 33  
33 34 static struct nfs_callback_data nfs_callback_info;
34   -static DECLARE_MUTEX(nfs_callback_sema);
  35 +static DEFINE_MUTEX(nfs_callback_mutex);
35 36 static struct svc_program nfs4_callback_program;
36 37  
37 38 unsigned int nfs_callback_set_tcpport;
... ... @@ -95,7 +96,7 @@
95 96 int ret = 0;
96 97  
97 98 lock_kernel();
98   - down(&nfs_callback_sema);
  99 + mutex_lock(&nfs_callback_mutex);
99 100 if (nfs_callback_info.users++ || nfs_callback_info.pid != 0)
100 101 goto out;
101 102 init_completion(&nfs_callback_info.started);
... ... @@ -121,7 +122,7 @@
121 122 nfs_callback_info.serv = serv;
122 123 wait_for_completion(&nfs_callback_info.started);
123 124 out:
124   - up(&nfs_callback_sema);
  125 + mutex_unlock(&nfs_callback_mutex);
125 126 unlock_kernel();
126 127 return ret;
127 128 out_destroy:
... ... @@ -139,7 +140,7 @@
139 140 int ret = 0;
140 141  
141 142 lock_kernel();
142   - down(&nfs_callback_sema);
  143 + mutex_lock(&nfs_callback_mutex);
143 144 nfs_callback_info.users--;
144 145 do {
145 146 if (nfs_callback_info.users != 0 || nfs_callback_info.pid == 0)
... ... @@ -147,7 +148,7 @@
147 148 if (kill_proc(nfs_callback_info.pid, SIGKILL, 1) < 0)
148 149 break;
149 150 } while (wait_for_completion_timeout(&nfs_callback_info.stopped, 5*HZ) == 0);
150   - up(&nfs_callback_sema);
  151 + mutex_unlock(&nfs_callback_mutex);
151 152 unlock_kernel();
152 153 return ret;
153 154 }
... ... @@ -49,6 +49,7 @@
49 49 #include <linux/nfsd/state.h>
50 50 #include <linux/nfsd/xdr4.h>
51 51 #include <linux/namei.h>
  52 +#include <linux/mutex.h>
52 53  
53 54 #define NFSDDBG_FACILITY NFSDDBG_PROC
54 55  
55 56  
... ... @@ -77,11 +78,11 @@
77 78  
78 79 /* Locking:
79 80 *
80   - * client_sema:
  81 + * client_mutex:
81 82 * protects clientid_hashtbl[], clientstr_hashtbl[],
82 83 * unconfstr_hashtbl[], uncofid_hashtbl[].
83 84 */
84   -static DECLARE_MUTEX(client_sema);
  85 +static DEFINE_MUTEX(client_mutex);
85 86  
86 87 static kmem_cache_t *stateowner_slab = NULL;
87 88 static kmem_cache_t *file_slab = NULL;
88 89  
... ... @@ -91,13 +92,13 @@
91 92 void
92 93 nfs4_lock_state(void)
93 94 {
94   - down(&client_sema);
  95 + mutex_lock(&client_mutex);
95 96 }
96 97  
97 98 void
98 99 nfs4_unlock_state(void)
99 100 {
100   - up(&client_sema);
  101 + mutex_unlock(&client_mutex);
101 102 }
102 103  
103 104 static inline u32
fs/partitions/devfs.c
... ... @@ -6,7 +6,7 @@
6 6 #include <linux/vmalloc.h>
7 7 #include <linux/genhd.h>
8 8 #include <linux/bitops.h>
9   -#include <asm/semaphore.h>
  9 +#include <linux/mutex.h>
10 10  
11 11  
12 12 struct unique_numspace {
... ... @@ -16,7 +16,7 @@
16 16 struct semaphore mutex;
17 17 };
18 18  
19   -static DECLARE_MUTEX(numspace_mutex);
  19 +static DEFINE_MUTEX(numspace_mutex);
20 20  
21 21 static int expand_numspace(struct unique_numspace *s)
22 22 {
... ... @@ -48,7 +48,7 @@
48 48 {
49 49 int rval = 0;
50 50  
51   - down(&numspace_mutex);
  51 + mutex_lock(&numspace_mutex);
52 52 if (s->num_free < 1)
53 53 rval = expand_numspace(s);
54 54 if (!rval) {
... ... @@ -56,7 +56,7 @@
56 56 --s->num_free;
57 57 __set_bit(rval, s->bits);
58 58 }
59   - up(&numspace_mutex);
  59 + mutex_unlock(&numspace_mutex);
60 60  
61 61 return rval;
62 62 }
63 63  
... ... @@ -66,11 +66,11 @@
66 66 int old_val;
67 67  
68 68 if (number >= 0) {
69   - down(&numspace_mutex);
  69 + mutex_lock(&numspace_mutex);
70 70 old_val = __test_and_clear_bit(number, s->bits);
71 71 if (old_val)
72 72 ++s->num_free;
73   - up(&numspace_mutex);
  73 + mutex_unlock(&numspace_mutex);
74 74 }
75 75 }
76 76  
... ... @@ -37,6 +37,7 @@
37 37 #include <linux/writeback.h> /* for the emergency remount stuff */
38 38 #include <linux/idr.h>
39 39 #include <linux/kobject.h>
  40 +#include <linux/mutex.h>
40 41 #include <asm/uaccess.h>
41 42  
42 43  
43 44  
... ... @@ -380,9 +381,9 @@
380 381 void sync_filesystems(int wait)
381 382 {
382 383 struct super_block *sb;
383   - static DECLARE_MUTEX(mutex);
  384 + static DEFINE_MUTEX(mutex);
384 385  
385   - down(&mutex); /* Could be down_interruptible */
  386 + mutex_lock(&mutex); /* Could be down_interruptible */
386 387 spin_lock(&sb_lock);
387 388 list_for_each_entry(sb, &super_blocks, s_list) {
388 389 if (!sb->s_op->sync_fs)
... ... @@ -411,7 +412,7 @@
411 412 goto restart;
412 413 }
413 414 spin_unlock(&sb_lock);
414   - up(&mutex);
  415 + mutex_unlock(&mutex);
415 416 }
416 417  
417 418 /**