Commit 01a05b337a5b647909e1d6670f57e7202318a5fb

Authored by Al Viro
1 parent 35cf7ba0b4

new helper: iterate_supers()

... and switch the simple "loop over superblocks and do something"
loops to it.

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>

Showing 6 changed files with 56 additions and 82 deletions Side-by-side Diff

... ... @@ -560,25 +560,17 @@
560 560 return err;
561 561 }
562 562  
563   -static void do_thaw_all(struct work_struct *work)
  563 +static void do_thaw_one(struct super_block *sb, void *unused)
564 564 {
565   - struct super_block *sb, *n;
566 565 char b[BDEVNAME_SIZE];
  566 + while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
  567 + printk(KERN_WARNING "Emergency Thaw on %s\n",
  568 + bdevname(sb->s_bdev, b));
  569 +}
567 570  
568   - spin_lock(&sb_lock);
569   - list_for_each_entry_safe(sb, n, &super_blocks, s_list) {
570   - if (list_empty(&sb->s_instances))
571   - continue;
572   - sb->s_count++;
573   - spin_unlock(&sb_lock);
574   - down_read(&sb->s_umount);
575   - while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
576   - printk(KERN_WARNING "Emergency Thaw on %s\n",
577   - bdevname(sb->s_bdev, b));
578   - up_read(&sb->s_umount);
579   - spin_lock(&sb_lock);
580   - }
581   - spin_unlock(&sb_lock);
  571 +static void do_thaw_all(struct work_struct *work)
  572 +{
  573 + iterate_supers(do_thaw_one, NULL);
582 574 kfree(work);
583 575 printk(KERN_WARNING "Emergency Thaw complete\n");
584 576 }
... ... @@ -8,12 +8,11 @@
8 8 #include <linux/writeback.h>
9 9 #include <linux/sysctl.h>
10 10 #include <linux/gfp.h>
11   -#include "internal.h"
12 11  
13 12 /* A global variable is a bit ugly, but it keeps the code simple */
14 13 int sysctl_drop_caches;
15 14  
16   -static void drop_pagecache_sb(struct super_block *sb)
  15 +static void drop_pagecache_sb(struct super_block *sb, void *unused)
17 16 {
18 17 struct inode *inode, *toput_inode = NULL;
19 18  
... ... @@ -34,26 +33,6 @@
34 33 iput(toput_inode);
35 34 }
36 35  
37   -static void drop_pagecache(void)
38   -{
39   - struct super_block *sb, *n;
40   -
41   - spin_lock(&sb_lock);
42   - list_for_each_entry_safe(sb, n, &super_blocks, s_list) {
43   - if (list_empty(&sb->s_instances))
44   - continue;
45   - sb->s_count++;
46   - spin_unlock(&sb_lock);
47   - down_read(&sb->s_umount);
48   - if (sb->s_root)
49   - drop_pagecache_sb(sb);
50   - up_read(&sb->s_umount);
51   - spin_lock(&sb_lock);
52   - __put_super(sb);
53   - }
54   - spin_unlock(&sb_lock);
55   -}
56   -
57 36 static void drop_slab(void)
58 37 {
59 38 int nr_objects;
... ... @@ -69,7 +48,7 @@
69 48 proc_dointvec_minmax(table, write, buffer, length, ppos);
70 49 if (write) {
71 50 if (sysctl_drop_caches & 1)
72   - drop_pagecache();
  51 + iterate_supers(drop_pagecache_sb, NULL);
73 52 if (sysctl_drop_caches & 2)
74 53 drop_slab();
75 54 }
... ... @@ -18,7 +18,6 @@
18 18 #include <linux/quotaops.h>
19 19 #include <linux/types.h>
20 20 #include <linux/writeback.h>
21   -#include "../internal.h"
22 21  
23 22 static int check_quotactl_permission(struct super_block *sb, int type, int cmd,
24 23 qid_t id)
25 24  
26 25  
... ... @@ -46,36 +45,22 @@
46 45 return security_quotactl(cmd, type, id, sb);
47 46 }
48 47  
  48 +static void quota_sync_one(struct super_block *sb, void *arg)
  49 +{
  50 + if (sb->s_qcop && sb->s_qcop->quota_sync)
  51 + sb->s_qcop->quota_sync(sb, *(int *)arg, 1);
  52 +}
  53 +
49 54 static int quota_sync_all(int type)
50 55 {
51   - struct super_block *sb, *n;
52 56 int ret;
53 57  
54 58 if (type >= MAXQUOTAS)
55 59 return -EINVAL;
56 60 ret = security_quotactl(Q_SYNC, type, 0, NULL);
57   - if (ret)
58   - return ret;
59   -
60   - spin_lock(&sb_lock);
61   - list_for_each_entry_safe(sb, n, &super_blocks, s_list) {
62   - if (list_empty(&sb->s_instances))
63   - continue;
64   - if (!sb->s_qcop || !sb->s_qcop->quota_sync)
65   - continue;
66   -
67   - sb->s_count++;
68   - spin_unlock(&sb_lock);
69   - down_read(&sb->s_umount);
70   - if (sb->s_root)
71   - sb->s_qcop->quota_sync(sb, type, 1);
72   - up_read(&sb->s_umount);
73   - spin_lock(&sb_lock);
74   - __put_super(sb);
75   - }
76   - spin_unlock(&sb_lock);
77   -
78   - return 0;
  61 + if (!ret)
  62 + iterate_supers(quota_sync_one, &type);
  63 + return ret;
79 64 }
80 65  
81 66 static int quota_quotaon(struct super_block *sb, int type, int cmd, qid_t id,
... ... @@ -392,6 +392,36 @@
392 392 }
393 393  
394 394 /**
  395 + * iterate_supers - call function for all active superblocks
  396 + * @f: function to call
  397 + * @arg: argument to pass to it
  398 + *
  399 + * Scans the superblock list and calls given function, passing it
  400 + * locked superblock and given argument.
  401 + */
  402 +void iterate_supers(void (*f)(struct super_block *, void *), void *arg)
  403 +{
  404 + struct super_block *sb, *n;
  405 +
  406 + spin_lock(&sb_lock);
  407 + list_for_each_entry_safe(sb, n, &super_blocks, s_list) {
  408 + if (list_empty(&sb->s_instances))
  409 + continue;
  410 + sb->s_count++;
  411 + spin_unlock(&sb_lock);
  412 +
  413 + down_read(&sb->s_umount);
  414 + if (sb->s_root)
  415 + f(sb, arg);
  416 + up_read(&sb->s_umount);
  417 +
  418 + spin_lock(&sb_lock);
  419 + __put_super(sb);
  420 + }
  421 + spin_unlock(&sb_lock);
  422 +}
  423 +
  424 +/**
395 425 * get_super - get the superblock of a device
396 426 * @bdev: device to get the superblock for
397 427 *
... ... @@ -77,31 +77,18 @@
77 77 }
78 78 EXPORT_SYMBOL_GPL(sync_filesystem);
79 79  
  80 +static void sync_one_sb(struct super_block *sb, void *arg)
  81 +{
  82 + if (!(sb->s_flags & MS_RDONLY) && sb->s_bdi)
  83 + __sync_filesystem(sb, *(int *)arg);
  84 +}
80 85 /*
81 86 * Sync all the data for all the filesystems (called by sys_sync() and
82 87 * emergency sync)
83 88 */
84 89 static void sync_filesystems(int wait)
85 90 {
86   - struct super_block *sb, *n;
87   -
88   - spin_lock(&sb_lock);
89   - list_for_each_entry_safe(sb, n, &super_blocks, s_list) {
90   - if (list_empty(&sb->s_instances))
91   - continue;
92   - sb->s_count++;
93   - spin_unlock(&sb_lock);
94   -
95   - down_read(&sb->s_umount);
96   - if (!(sb->s_flags & MS_RDONLY) && sb->s_root && sb->s_bdi)
97   - __sync_filesystem(sb, wait);
98   - up_read(&sb->s_umount);
99   -
100   - /* restart only when sb is no longer on the list */
101   - spin_lock(&sb_lock);
102   - __put_super(sb);
103   - }
104   - spin_unlock(&sb_lock);
  91 + iterate_supers(sync_one_sb, &wait);
105 92 }
106 93  
107 94 /*
... ... @@ -2324,6 +2324,7 @@
2324 2324 extern struct super_block *get_active_super(struct block_device *bdev);
2325 2325 extern struct super_block *user_get_super(dev_t);
2326 2326 extern void drop_super(struct super_block *sb);
  2327 +extern void iterate_supers(void (*)(struct super_block *, void *), void *);
2327 2328  
2328 2329 extern int dcache_dir_open(struct inode *, struct file *);
2329 2330 extern int dcache_dir_close(struct inode *, struct file *);