Commit 30ac0683dd452ba273c8db92a74d8cf7aef981d8

Authored by Christoph Hellwig
Committed by Alex Elder
1 parent 0c3dc2b02a

xfs: cleanup dmapi macros in the umount path

Stop the flag saving as we never mangle those in the unmount path, and
hide all the weird arguents to the dmapi code inside the
XFS_SEND_PREUNMOUNT / XFS_SEND_UNMOUNT macros.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <david@fromorbit.com>
Signed-off-by: Alex Elder <aelder@sgi.com>

Showing 2 changed files with 21 additions and 24 deletions Inline Diff

fs/xfs/linux-2.6/xfs_super.c
1 /* 1 /*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * All Rights Reserved. 3 * All Rights Reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as 6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
8 * 8 *
9 * This program is distributed in the hope that it would be useful, 9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details. 12 * GNU General Public License for more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License 14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation, 15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */ 17 */
18 #include "xfs.h" 18 #include "xfs.h"
19 #include "xfs_bit.h" 19 #include "xfs_bit.h"
20 #include "xfs_log.h" 20 #include "xfs_log.h"
21 #include "xfs_inum.h" 21 #include "xfs_inum.h"
22 #include "xfs_trans.h" 22 #include "xfs_trans.h"
23 #include "xfs_sb.h" 23 #include "xfs_sb.h"
24 #include "xfs_ag.h" 24 #include "xfs_ag.h"
25 #include "xfs_dir2.h" 25 #include "xfs_dir2.h"
26 #include "xfs_alloc.h" 26 #include "xfs_alloc.h"
27 #include "xfs_dmapi.h" 27 #include "xfs_dmapi.h"
28 #include "xfs_quota.h" 28 #include "xfs_quota.h"
29 #include "xfs_mount.h" 29 #include "xfs_mount.h"
30 #include "xfs_bmap_btree.h" 30 #include "xfs_bmap_btree.h"
31 #include "xfs_alloc_btree.h" 31 #include "xfs_alloc_btree.h"
32 #include "xfs_ialloc_btree.h" 32 #include "xfs_ialloc_btree.h"
33 #include "xfs_dir2_sf.h" 33 #include "xfs_dir2_sf.h"
34 #include "xfs_attr_sf.h" 34 #include "xfs_attr_sf.h"
35 #include "xfs_dinode.h" 35 #include "xfs_dinode.h"
36 #include "xfs_inode.h" 36 #include "xfs_inode.h"
37 #include "xfs_btree.h" 37 #include "xfs_btree.h"
38 #include "xfs_btree_trace.h" 38 #include "xfs_btree_trace.h"
39 #include "xfs_ialloc.h" 39 #include "xfs_ialloc.h"
40 #include "xfs_bmap.h" 40 #include "xfs_bmap.h"
41 #include "xfs_rtalloc.h" 41 #include "xfs_rtalloc.h"
42 #include "xfs_error.h" 42 #include "xfs_error.h"
43 #include "xfs_itable.h" 43 #include "xfs_itable.h"
44 #include "xfs_fsops.h" 44 #include "xfs_fsops.h"
45 #include "xfs_rw.h" 45 #include "xfs_rw.h"
46 #include "xfs_attr.h" 46 #include "xfs_attr.h"
47 #include "xfs_buf_item.h" 47 #include "xfs_buf_item.h"
48 #include "xfs_utils.h" 48 #include "xfs_utils.h"
49 #include "xfs_vnodeops.h" 49 #include "xfs_vnodeops.h"
50 #include "xfs_version.h" 50 #include "xfs_version.h"
51 #include "xfs_log_priv.h" 51 #include "xfs_log_priv.h"
52 #include "xfs_trans_priv.h" 52 #include "xfs_trans_priv.h"
53 #include "xfs_filestream.h" 53 #include "xfs_filestream.h"
54 #include "xfs_da_btree.h" 54 #include "xfs_da_btree.h"
55 #include "xfs_dir2_trace.h" 55 #include "xfs_dir2_trace.h"
56 #include "xfs_extfree_item.h" 56 #include "xfs_extfree_item.h"
57 #include "xfs_mru_cache.h" 57 #include "xfs_mru_cache.h"
58 #include "xfs_inode_item.h" 58 #include "xfs_inode_item.h"
59 #include "xfs_sync.h" 59 #include "xfs_sync.h"
60 60
61 #include <linux/namei.h> 61 #include <linux/namei.h>
62 #include <linux/init.h> 62 #include <linux/init.h>
63 #include <linux/mount.h> 63 #include <linux/mount.h>
64 #include <linux/mempool.h> 64 #include <linux/mempool.h>
65 #include <linux/writeback.h> 65 #include <linux/writeback.h>
66 #include <linux/kthread.h> 66 #include <linux/kthread.h>
67 #include <linux/freezer.h> 67 #include <linux/freezer.h>
68 #include <linux/parser.h> 68 #include <linux/parser.h>
69 69
70 static const struct super_operations xfs_super_operations; 70 static const struct super_operations xfs_super_operations;
71 static kmem_zone_t *xfs_ioend_zone; 71 static kmem_zone_t *xfs_ioend_zone;
72 mempool_t *xfs_ioend_pool; 72 mempool_t *xfs_ioend_pool;
73 73
74 #define MNTOPT_LOGBUFS "logbufs" /* number of XFS log buffers */ 74 #define MNTOPT_LOGBUFS "logbufs" /* number of XFS log buffers */
75 #define MNTOPT_LOGBSIZE "logbsize" /* size of XFS log buffers */ 75 #define MNTOPT_LOGBSIZE "logbsize" /* size of XFS log buffers */
76 #define MNTOPT_LOGDEV "logdev" /* log device */ 76 #define MNTOPT_LOGDEV "logdev" /* log device */
77 #define MNTOPT_RTDEV "rtdev" /* realtime I/O device */ 77 #define MNTOPT_RTDEV "rtdev" /* realtime I/O device */
78 #define MNTOPT_BIOSIZE "biosize" /* log2 of preferred buffered io size */ 78 #define MNTOPT_BIOSIZE "biosize" /* log2 of preferred buffered io size */
79 #define MNTOPT_WSYNC "wsync" /* safe-mode nfs compatible mount */ 79 #define MNTOPT_WSYNC "wsync" /* safe-mode nfs compatible mount */
80 #define MNTOPT_NOALIGN "noalign" /* turn off stripe alignment */ 80 #define MNTOPT_NOALIGN "noalign" /* turn off stripe alignment */
81 #define MNTOPT_SWALLOC "swalloc" /* turn on stripe width allocation */ 81 #define MNTOPT_SWALLOC "swalloc" /* turn on stripe width allocation */
82 #define MNTOPT_SUNIT "sunit" /* data volume stripe unit */ 82 #define MNTOPT_SUNIT "sunit" /* data volume stripe unit */
83 #define MNTOPT_SWIDTH "swidth" /* data volume stripe width */ 83 #define MNTOPT_SWIDTH "swidth" /* data volume stripe width */
84 #define MNTOPT_NOUUID "nouuid" /* ignore filesystem UUID */ 84 #define MNTOPT_NOUUID "nouuid" /* ignore filesystem UUID */
85 #define MNTOPT_MTPT "mtpt" /* filesystem mount point */ 85 #define MNTOPT_MTPT "mtpt" /* filesystem mount point */
86 #define MNTOPT_GRPID "grpid" /* group-ID from parent directory */ 86 #define MNTOPT_GRPID "grpid" /* group-ID from parent directory */
87 #define MNTOPT_NOGRPID "nogrpid" /* group-ID from current process */ 87 #define MNTOPT_NOGRPID "nogrpid" /* group-ID from current process */
88 #define MNTOPT_BSDGROUPS "bsdgroups" /* group-ID from parent directory */ 88 #define MNTOPT_BSDGROUPS "bsdgroups" /* group-ID from parent directory */
89 #define MNTOPT_SYSVGROUPS "sysvgroups" /* group-ID from current process */ 89 #define MNTOPT_SYSVGROUPS "sysvgroups" /* group-ID from current process */
90 #define MNTOPT_ALLOCSIZE "allocsize" /* preferred allocation size */ 90 #define MNTOPT_ALLOCSIZE "allocsize" /* preferred allocation size */
91 #define MNTOPT_NORECOVERY "norecovery" /* don't run XFS recovery */ 91 #define MNTOPT_NORECOVERY "norecovery" /* don't run XFS recovery */
92 #define MNTOPT_BARRIER "barrier" /* use writer barriers for log write and 92 #define MNTOPT_BARRIER "barrier" /* use writer barriers for log write and
93 * unwritten extent conversion */ 93 * unwritten extent conversion */
94 #define MNTOPT_NOBARRIER "nobarrier" /* .. disable */ 94 #define MNTOPT_NOBARRIER "nobarrier" /* .. disable */
95 #define MNTOPT_OSYNCISOSYNC "osyncisosync" /* o_sync is REALLY o_sync */ 95 #define MNTOPT_OSYNCISOSYNC "osyncisosync" /* o_sync is REALLY o_sync */
96 #define MNTOPT_64BITINODE "inode64" /* inodes can be allocated anywhere */ 96 #define MNTOPT_64BITINODE "inode64" /* inodes can be allocated anywhere */
97 #define MNTOPT_IKEEP "ikeep" /* do not free empty inode clusters */ 97 #define MNTOPT_IKEEP "ikeep" /* do not free empty inode clusters */
98 #define MNTOPT_NOIKEEP "noikeep" /* free empty inode clusters */ 98 #define MNTOPT_NOIKEEP "noikeep" /* free empty inode clusters */
99 #define MNTOPT_LARGEIO "largeio" /* report large I/O sizes in stat() */ 99 #define MNTOPT_LARGEIO "largeio" /* report large I/O sizes in stat() */
100 #define MNTOPT_NOLARGEIO "nolargeio" /* do not report large I/O sizes 100 #define MNTOPT_NOLARGEIO "nolargeio" /* do not report large I/O sizes
101 * in stat(). */ 101 * in stat(). */
102 #define MNTOPT_ATTR2 "attr2" /* do use attr2 attribute format */ 102 #define MNTOPT_ATTR2 "attr2" /* do use attr2 attribute format */
103 #define MNTOPT_NOATTR2 "noattr2" /* do not use attr2 attribute format */ 103 #define MNTOPT_NOATTR2 "noattr2" /* do not use attr2 attribute format */
104 #define MNTOPT_FILESTREAM "filestreams" /* use filestreams allocator */ 104 #define MNTOPT_FILESTREAM "filestreams" /* use filestreams allocator */
105 #define MNTOPT_QUOTA "quota" /* disk quotas (user) */ 105 #define MNTOPT_QUOTA "quota" /* disk quotas (user) */
106 #define MNTOPT_NOQUOTA "noquota" /* no quotas */ 106 #define MNTOPT_NOQUOTA "noquota" /* no quotas */
107 #define MNTOPT_USRQUOTA "usrquota" /* user quota enabled */ 107 #define MNTOPT_USRQUOTA "usrquota" /* user quota enabled */
108 #define MNTOPT_GRPQUOTA "grpquota" /* group quota enabled */ 108 #define MNTOPT_GRPQUOTA "grpquota" /* group quota enabled */
109 #define MNTOPT_PRJQUOTA "prjquota" /* project quota enabled */ 109 #define MNTOPT_PRJQUOTA "prjquota" /* project quota enabled */
110 #define MNTOPT_UQUOTA "uquota" /* user quota (IRIX variant) */ 110 #define MNTOPT_UQUOTA "uquota" /* user quota (IRIX variant) */
111 #define MNTOPT_GQUOTA "gquota" /* group quota (IRIX variant) */ 111 #define MNTOPT_GQUOTA "gquota" /* group quota (IRIX variant) */
112 #define MNTOPT_PQUOTA "pquota" /* project quota (IRIX variant) */ 112 #define MNTOPT_PQUOTA "pquota" /* project quota (IRIX variant) */
113 #define MNTOPT_UQUOTANOENF "uqnoenforce"/* user quota limit enforcement */ 113 #define MNTOPT_UQUOTANOENF "uqnoenforce"/* user quota limit enforcement */
114 #define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */ 114 #define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */
115 #define MNTOPT_PQUOTANOENF "pqnoenforce"/* project quota limit enforcement */ 115 #define MNTOPT_PQUOTANOENF "pqnoenforce"/* project quota limit enforcement */
116 #define MNTOPT_QUOTANOENF "qnoenforce" /* same as uqnoenforce */ 116 #define MNTOPT_QUOTANOENF "qnoenforce" /* same as uqnoenforce */
117 #define MNTOPT_DMAPI "dmapi" /* DMI enabled (DMAPI / XDSM) */ 117 #define MNTOPT_DMAPI "dmapi" /* DMI enabled (DMAPI / XDSM) */
118 #define MNTOPT_XDSM "xdsm" /* DMI enabled (DMAPI / XDSM) */ 118 #define MNTOPT_XDSM "xdsm" /* DMI enabled (DMAPI / XDSM) */
119 #define MNTOPT_DMI "dmi" /* DMI enabled (DMAPI / XDSM) */ 119 #define MNTOPT_DMI "dmi" /* DMI enabled (DMAPI / XDSM) */
120 120
121 /* 121 /*
122 * Table driven mount option parser. 122 * Table driven mount option parser.
123 * 123 *
124 * Currently only used for remount, but it will be used for mount 124 * Currently only used for remount, but it will be used for mount
125 * in the future, too. 125 * in the future, too.
126 */ 126 */
127 enum { 127 enum {
128 Opt_barrier, Opt_nobarrier, Opt_err 128 Opt_barrier, Opt_nobarrier, Opt_err
129 }; 129 };
130 130
131 static const match_table_t tokens = { 131 static const match_table_t tokens = {
132 {Opt_barrier, "barrier"}, 132 {Opt_barrier, "barrier"},
133 {Opt_nobarrier, "nobarrier"}, 133 {Opt_nobarrier, "nobarrier"},
134 {Opt_err, NULL} 134 {Opt_err, NULL}
135 }; 135 };
136 136
137 137
138 STATIC unsigned long 138 STATIC unsigned long
139 suffix_strtoul(char *s, char **endp, unsigned int base) 139 suffix_strtoul(char *s, char **endp, unsigned int base)
140 { 140 {
141 int last, shift_left_factor = 0; 141 int last, shift_left_factor = 0;
142 char *value = s; 142 char *value = s;
143 143
144 last = strlen(value) - 1; 144 last = strlen(value) - 1;
145 if (value[last] == 'K' || value[last] == 'k') { 145 if (value[last] == 'K' || value[last] == 'k') {
146 shift_left_factor = 10; 146 shift_left_factor = 10;
147 value[last] = '\0'; 147 value[last] = '\0';
148 } 148 }
149 if (value[last] == 'M' || value[last] == 'm') { 149 if (value[last] == 'M' || value[last] == 'm') {
150 shift_left_factor = 20; 150 shift_left_factor = 20;
151 value[last] = '\0'; 151 value[last] = '\0';
152 } 152 }
153 if (value[last] == 'G' || value[last] == 'g') { 153 if (value[last] == 'G' || value[last] == 'g') {
154 shift_left_factor = 30; 154 shift_left_factor = 30;
155 value[last] = '\0'; 155 value[last] = '\0';
156 } 156 }
157 157
158 return simple_strtoul((const char *)s, endp, base) << shift_left_factor; 158 return simple_strtoul((const char *)s, endp, base) << shift_left_factor;
159 } 159 }
160 160
161 /* 161 /*
162 * This function fills in xfs_mount_t fields based on mount args. 162 * This function fills in xfs_mount_t fields based on mount args.
163 * Note: the superblock has _not_ yet been read in. 163 * Note: the superblock has _not_ yet been read in.
164 * 164 *
165 * Note that this function leaks the various device name allocations on 165 * Note that this function leaks the various device name allocations on
166 * failure. The caller takes care of them. 166 * failure. The caller takes care of them.
167 */ 167 */
168 STATIC int 168 STATIC int
169 xfs_parseargs( 169 xfs_parseargs(
170 struct xfs_mount *mp, 170 struct xfs_mount *mp,
171 char *options, 171 char *options,
172 char **mtpt) 172 char **mtpt)
173 { 173 {
174 struct super_block *sb = mp->m_super; 174 struct super_block *sb = mp->m_super;
175 char *this_char, *value, *eov; 175 char *this_char, *value, *eov;
176 int dsunit = 0; 176 int dsunit = 0;
177 int dswidth = 0; 177 int dswidth = 0;
178 int iosize = 0; 178 int iosize = 0;
179 int dmapi_implies_ikeep = 1; 179 int dmapi_implies_ikeep = 1;
180 __uint8_t iosizelog = 0; 180 __uint8_t iosizelog = 0;
181 181
182 /* 182 /*
183 * Copy binary VFS mount flags we are interested in. 183 * Copy binary VFS mount flags we are interested in.
184 */ 184 */
185 if (sb->s_flags & MS_RDONLY) 185 if (sb->s_flags & MS_RDONLY)
186 mp->m_flags |= XFS_MOUNT_RDONLY; 186 mp->m_flags |= XFS_MOUNT_RDONLY;
187 if (sb->s_flags & MS_DIRSYNC) 187 if (sb->s_flags & MS_DIRSYNC)
188 mp->m_flags |= XFS_MOUNT_DIRSYNC; 188 mp->m_flags |= XFS_MOUNT_DIRSYNC;
189 if (sb->s_flags & MS_SYNCHRONOUS) 189 if (sb->s_flags & MS_SYNCHRONOUS)
190 mp->m_flags |= XFS_MOUNT_WSYNC; 190 mp->m_flags |= XFS_MOUNT_WSYNC;
191 191
192 /* 192 /*
193 * Set some default flags that could be cleared by the mount option 193 * Set some default flags that could be cleared by the mount option
194 * parsing. 194 * parsing.
195 */ 195 */
196 mp->m_flags |= XFS_MOUNT_BARRIER; 196 mp->m_flags |= XFS_MOUNT_BARRIER;
197 mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE; 197 mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
198 mp->m_flags |= XFS_MOUNT_SMALL_INUMS; 198 mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
199 199
200 /* 200 /*
201 * These can be overridden by the mount option parsing. 201 * These can be overridden by the mount option parsing.
202 */ 202 */
203 mp->m_logbufs = -1; 203 mp->m_logbufs = -1;
204 mp->m_logbsize = -1; 204 mp->m_logbsize = -1;
205 205
206 if (!options) 206 if (!options)
207 goto done; 207 goto done;
208 208
209 while ((this_char = strsep(&options, ",")) != NULL) { 209 while ((this_char = strsep(&options, ",")) != NULL) {
210 if (!*this_char) 210 if (!*this_char)
211 continue; 211 continue;
212 if ((value = strchr(this_char, '=')) != NULL) 212 if ((value = strchr(this_char, '=')) != NULL)
213 *value++ = 0; 213 *value++ = 0;
214 214
215 if (!strcmp(this_char, MNTOPT_LOGBUFS)) { 215 if (!strcmp(this_char, MNTOPT_LOGBUFS)) {
216 if (!value || !*value) { 216 if (!value || !*value) {
217 cmn_err(CE_WARN, 217 cmn_err(CE_WARN,
218 "XFS: %s option requires an argument", 218 "XFS: %s option requires an argument",
219 this_char); 219 this_char);
220 return EINVAL; 220 return EINVAL;
221 } 221 }
222 mp->m_logbufs = simple_strtoul(value, &eov, 10); 222 mp->m_logbufs = simple_strtoul(value, &eov, 10);
223 } else if (!strcmp(this_char, MNTOPT_LOGBSIZE)) { 223 } else if (!strcmp(this_char, MNTOPT_LOGBSIZE)) {
224 if (!value || !*value) { 224 if (!value || !*value) {
225 cmn_err(CE_WARN, 225 cmn_err(CE_WARN,
226 "XFS: %s option requires an argument", 226 "XFS: %s option requires an argument",
227 this_char); 227 this_char);
228 return EINVAL; 228 return EINVAL;
229 } 229 }
230 mp->m_logbsize = suffix_strtoul(value, &eov, 10); 230 mp->m_logbsize = suffix_strtoul(value, &eov, 10);
231 } else if (!strcmp(this_char, MNTOPT_LOGDEV)) { 231 } else if (!strcmp(this_char, MNTOPT_LOGDEV)) {
232 if (!value || !*value) { 232 if (!value || !*value) {
233 cmn_err(CE_WARN, 233 cmn_err(CE_WARN,
234 "XFS: %s option requires an argument", 234 "XFS: %s option requires an argument",
235 this_char); 235 this_char);
236 return EINVAL; 236 return EINVAL;
237 } 237 }
238 mp->m_logname = kstrndup(value, MAXNAMELEN, GFP_KERNEL); 238 mp->m_logname = kstrndup(value, MAXNAMELEN, GFP_KERNEL);
239 if (!mp->m_logname) 239 if (!mp->m_logname)
240 return ENOMEM; 240 return ENOMEM;
241 } else if (!strcmp(this_char, MNTOPT_MTPT)) { 241 } else if (!strcmp(this_char, MNTOPT_MTPT)) {
242 if (!value || !*value) { 242 if (!value || !*value) {
243 cmn_err(CE_WARN, 243 cmn_err(CE_WARN,
244 "XFS: %s option requires an argument", 244 "XFS: %s option requires an argument",
245 this_char); 245 this_char);
246 return EINVAL; 246 return EINVAL;
247 } 247 }
248 *mtpt = kstrndup(value, MAXNAMELEN, GFP_KERNEL); 248 *mtpt = kstrndup(value, MAXNAMELEN, GFP_KERNEL);
249 if (!*mtpt) 249 if (!*mtpt)
250 return ENOMEM; 250 return ENOMEM;
251 } else if (!strcmp(this_char, MNTOPT_RTDEV)) { 251 } else if (!strcmp(this_char, MNTOPT_RTDEV)) {
252 if (!value || !*value) { 252 if (!value || !*value) {
253 cmn_err(CE_WARN, 253 cmn_err(CE_WARN,
254 "XFS: %s option requires an argument", 254 "XFS: %s option requires an argument",
255 this_char); 255 this_char);
256 return EINVAL; 256 return EINVAL;
257 } 257 }
258 mp->m_rtname = kstrndup(value, MAXNAMELEN, GFP_KERNEL); 258 mp->m_rtname = kstrndup(value, MAXNAMELEN, GFP_KERNEL);
259 if (!mp->m_rtname) 259 if (!mp->m_rtname)
260 return ENOMEM; 260 return ENOMEM;
261 } else if (!strcmp(this_char, MNTOPT_BIOSIZE)) { 261 } else if (!strcmp(this_char, MNTOPT_BIOSIZE)) {
262 if (!value || !*value) { 262 if (!value || !*value) {
263 cmn_err(CE_WARN, 263 cmn_err(CE_WARN,
264 "XFS: %s option requires an argument", 264 "XFS: %s option requires an argument",
265 this_char); 265 this_char);
266 return EINVAL; 266 return EINVAL;
267 } 267 }
268 iosize = simple_strtoul(value, &eov, 10); 268 iosize = simple_strtoul(value, &eov, 10);
269 iosizelog = ffs(iosize) - 1; 269 iosizelog = ffs(iosize) - 1;
270 } else if (!strcmp(this_char, MNTOPT_ALLOCSIZE)) { 270 } else if (!strcmp(this_char, MNTOPT_ALLOCSIZE)) {
271 if (!value || !*value) { 271 if (!value || !*value) {
272 cmn_err(CE_WARN, 272 cmn_err(CE_WARN,
273 "XFS: %s option requires an argument", 273 "XFS: %s option requires an argument",
274 this_char); 274 this_char);
275 return EINVAL; 275 return EINVAL;
276 } 276 }
277 iosize = suffix_strtoul(value, &eov, 10); 277 iosize = suffix_strtoul(value, &eov, 10);
278 iosizelog = ffs(iosize) - 1; 278 iosizelog = ffs(iosize) - 1;
279 } else if (!strcmp(this_char, MNTOPT_GRPID) || 279 } else if (!strcmp(this_char, MNTOPT_GRPID) ||
280 !strcmp(this_char, MNTOPT_BSDGROUPS)) { 280 !strcmp(this_char, MNTOPT_BSDGROUPS)) {
281 mp->m_flags |= XFS_MOUNT_GRPID; 281 mp->m_flags |= XFS_MOUNT_GRPID;
282 } else if (!strcmp(this_char, MNTOPT_NOGRPID) || 282 } else if (!strcmp(this_char, MNTOPT_NOGRPID) ||
283 !strcmp(this_char, MNTOPT_SYSVGROUPS)) { 283 !strcmp(this_char, MNTOPT_SYSVGROUPS)) {
284 mp->m_flags &= ~XFS_MOUNT_GRPID; 284 mp->m_flags &= ~XFS_MOUNT_GRPID;
285 } else if (!strcmp(this_char, MNTOPT_WSYNC)) { 285 } else if (!strcmp(this_char, MNTOPT_WSYNC)) {
286 mp->m_flags |= XFS_MOUNT_WSYNC; 286 mp->m_flags |= XFS_MOUNT_WSYNC;
287 } else if (!strcmp(this_char, MNTOPT_OSYNCISOSYNC)) { 287 } else if (!strcmp(this_char, MNTOPT_OSYNCISOSYNC)) {
288 mp->m_flags |= XFS_MOUNT_OSYNCISOSYNC; 288 mp->m_flags |= XFS_MOUNT_OSYNCISOSYNC;
289 } else if (!strcmp(this_char, MNTOPT_NORECOVERY)) { 289 } else if (!strcmp(this_char, MNTOPT_NORECOVERY)) {
290 mp->m_flags |= XFS_MOUNT_NORECOVERY; 290 mp->m_flags |= XFS_MOUNT_NORECOVERY;
291 } else if (!strcmp(this_char, MNTOPT_NOALIGN)) { 291 } else if (!strcmp(this_char, MNTOPT_NOALIGN)) {
292 mp->m_flags |= XFS_MOUNT_NOALIGN; 292 mp->m_flags |= XFS_MOUNT_NOALIGN;
293 } else if (!strcmp(this_char, MNTOPT_SWALLOC)) { 293 } else if (!strcmp(this_char, MNTOPT_SWALLOC)) {
294 mp->m_flags |= XFS_MOUNT_SWALLOC; 294 mp->m_flags |= XFS_MOUNT_SWALLOC;
295 } else if (!strcmp(this_char, MNTOPT_SUNIT)) { 295 } else if (!strcmp(this_char, MNTOPT_SUNIT)) {
296 if (!value || !*value) { 296 if (!value || !*value) {
297 cmn_err(CE_WARN, 297 cmn_err(CE_WARN,
298 "XFS: %s option requires an argument", 298 "XFS: %s option requires an argument",
299 this_char); 299 this_char);
300 return EINVAL; 300 return EINVAL;
301 } 301 }
302 dsunit = simple_strtoul(value, &eov, 10); 302 dsunit = simple_strtoul(value, &eov, 10);
303 } else if (!strcmp(this_char, MNTOPT_SWIDTH)) { 303 } else if (!strcmp(this_char, MNTOPT_SWIDTH)) {
304 if (!value || !*value) { 304 if (!value || !*value) {
305 cmn_err(CE_WARN, 305 cmn_err(CE_WARN,
306 "XFS: %s option requires an argument", 306 "XFS: %s option requires an argument",
307 this_char); 307 this_char);
308 return EINVAL; 308 return EINVAL;
309 } 309 }
310 dswidth = simple_strtoul(value, &eov, 10); 310 dswidth = simple_strtoul(value, &eov, 10);
311 } else if (!strcmp(this_char, MNTOPT_64BITINODE)) { 311 } else if (!strcmp(this_char, MNTOPT_64BITINODE)) {
312 mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS; 312 mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
313 #if !XFS_BIG_INUMS 313 #if !XFS_BIG_INUMS
314 cmn_err(CE_WARN, 314 cmn_err(CE_WARN,
315 "XFS: %s option not allowed on this system", 315 "XFS: %s option not allowed on this system",
316 this_char); 316 this_char);
317 return EINVAL; 317 return EINVAL;
318 #endif 318 #endif
319 } else if (!strcmp(this_char, MNTOPT_NOUUID)) { 319 } else if (!strcmp(this_char, MNTOPT_NOUUID)) {
320 mp->m_flags |= XFS_MOUNT_NOUUID; 320 mp->m_flags |= XFS_MOUNT_NOUUID;
321 } else if (!strcmp(this_char, MNTOPT_BARRIER)) { 321 } else if (!strcmp(this_char, MNTOPT_BARRIER)) {
322 mp->m_flags |= XFS_MOUNT_BARRIER; 322 mp->m_flags |= XFS_MOUNT_BARRIER;
323 } else if (!strcmp(this_char, MNTOPT_NOBARRIER)) { 323 } else if (!strcmp(this_char, MNTOPT_NOBARRIER)) {
324 mp->m_flags &= ~XFS_MOUNT_BARRIER; 324 mp->m_flags &= ~XFS_MOUNT_BARRIER;
325 } else if (!strcmp(this_char, MNTOPT_IKEEP)) { 325 } else if (!strcmp(this_char, MNTOPT_IKEEP)) {
326 mp->m_flags |= XFS_MOUNT_IKEEP; 326 mp->m_flags |= XFS_MOUNT_IKEEP;
327 } else if (!strcmp(this_char, MNTOPT_NOIKEEP)) { 327 } else if (!strcmp(this_char, MNTOPT_NOIKEEP)) {
328 dmapi_implies_ikeep = 0; 328 dmapi_implies_ikeep = 0;
329 mp->m_flags &= ~XFS_MOUNT_IKEEP; 329 mp->m_flags &= ~XFS_MOUNT_IKEEP;
330 } else if (!strcmp(this_char, MNTOPT_LARGEIO)) { 330 } else if (!strcmp(this_char, MNTOPT_LARGEIO)) {
331 mp->m_flags &= ~XFS_MOUNT_COMPAT_IOSIZE; 331 mp->m_flags &= ~XFS_MOUNT_COMPAT_IOSIZE;
332 } else if (!strcmp(this_char, MNTOPT_NOLARGEIO)) { 332 } else if (!strcmp(this_char, MNTOPT_NOLARGEIO)) {
333 mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE; 333 mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
334 } else if (!strcmp(this_char, MNTOPT_ATTR2)) { 334 } else if (!strcmp(this_char, MNTOPT_ATTR2)) {
335 mp->m_flags |= XFS_MOUNT_ATTR2; 335 mp->m_flags |= XFS_MOUNT_ATTR2;
336 } else if (!strcmp(this_char, MNTOPT_NOATTR2)) { 336 } else if (!strcmp(this_char, MNTOPT_NOATTR2)) {
337 mp->m_flags &= ~XFS_MOUNT_ATTR2; 337 mp->m_flags &= ~XFS_MOUNT_ATTR2;
338 mp->m_flags |= XFS_MOUNT_NOATTR2; 338 mp->m_flags |= XFS_MOUNT_NOATTR2;
339 } else if (!strcmp(this_char, MNTOPT_FILESTREAM)) { 339 } else if (!strcmp(this_char, MNTOPT_FILESTREAM)) {
340 mp->m_flags |= XFS_MOUNT_FILESTREAMS; 340 mp->m_flags |= XFS_MOUNT_FILESTREAMS;
341 } else if (!strcmp(this_char, MNTOPT_NOQUOTA)) { 341 } else if (!strcmp(this_char, MNTOPT_NOQUOTA)) {
342 mp->m_qflags &= ~(XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE | 342 mp->m_qflags &= ~(XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
343 XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE | 343 XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
344 XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE | 344 XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
345 XFS_UQUOTA_ENFD | XFS_OQUOTA_ENFD); 345 XFS_UQUOTA_ENFD | XFS_OQUOTA_ENFD);
346 } else if (!strcmp(this_char, MNTOPT_QUOTA) || 346 } else if (!strcmp(this_char, MNTOPT_QUOTA) ||
347 !strcmp(this_char, MNTOPT_UQUOTA) || 347 !strcmp(this_char, MNTOPT_UQUOTA) ||
348 !strcmp(this_char, MNTOPT_USRQUOTA)) { 348 !strcmp(this_char, MNTOPT_USRQUOTA)) {
349 mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE | 349 mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
350 XFS_UQUOTA_ENFD); 350 XFS_UQUOTA_ENFD);
351 } else if (!strcmp(this_char, MNTOPT_QUOTANOENF) || 351 } else if (!strcmp(this_char, MNTOPT_QUOTANOENF) ||
352 !strcmp(this_char, MNTOPT_UQUOTANOENF)) { 352 !strcmp(this_char, MNTOPT_UQUOTANOENF)) {
353 mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE); 353 mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE);
354 mp->m_qflags &= ~XFS_UQUOTA_ENFD; 354 mp->m_qflags &= ~XFS_UQUOTA_ENFD;
355 } else if (!strcmp(this_char, MNTOPT_PQUOTA) || 355 } else if (!strcmp(this_char, MNTOPT_PQUOTA) ||
356 !strcmp(this_char, MNTOPT_PRJQUOTA)) { 356 !strcmp(this_char, MNTOPT_PRJQUOTA)) {
357 mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE | 357 mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
358 XFS_OQUOTA_ENFD); 358 XFS_OQUOTA_ENFD);
359 } else if (!strcmp(this_char, MNTOPT_PQUOTANOENF)) { 359 } else if (!strcmp(this_char, MNTOPT_PQUOTANOENF)) {
360 mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE); 360 mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE);
361 mp->m_qflags &= ~XFS_OQUOTA_ENFD; 361 mp->m_qflags &= ~XFS_OQUOTA_ENFD;
362 } else if (!strcmp(this_char, MNTOPT_GQUOTA) || 362 } else if (!strcmp(this_char, MNTOPT_GQUOTA) ||
363 !strcmp(this_char, MNTOPT_GRPQUOTA)) { 363 !strcmp(this_char, MNTOPT_GRPQUOTA)) {
364 mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE | 364 mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
365 XFS_OQUOTA_ENFD); 365 XFS_OQUOTA_ENFD);
366 } else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) { 366 } else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) {
367 mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE); 367 mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
368 mp->m_qflags &= ~XFS_OQUOTA_ENFD; 368 mp->m_qflags &= ~XFS_OQUOTA_ENFD;
369 } else if (!strcmp(this_char, MNTOPT_DMAPI)) { 369 } else if (!strcmp(this_char, MNTOPT_DMAPI)) {
370 mp->m_flags |= XFS_MOUNT_DMAPI; 370 mp->m_flags |= XFS_MOUNT_DMAPI;
371 } else if (!strcmp(this_char, MNTOPT_XDSM)) { 371 } else if (!strcmp(this_char, MNTOPT_XDSM)) {
372 mp->m_flags |= XFS_MOUNT_DMAPI; 372 mp->m_flags |= XFS_MOUNT_DMAPI;
373 } else if (!strcmp(this_char, MNTOPT_DMI)) { 373 } else if (!strcmp(this_char, MNTOPT_DMI)) {
374 mp->m_flags |= XFS_MOUNT_DMAPI; 374 mp->m_flags |= XFS_MOUNT_DMAPI;
375 } else if (!strcmp(this_char, "ihashsize")) { 375 } else if (!strcmp(this_char, "ihashsize")) {
376 cmn_err(CE_WARN, 376 cmn_err(CE_WARN,
377 "XFS: ihashsize no longer used, option is deprecated."); 377 "XFS: ihashsize no longer used, option is deprecated.");
378 } else if (!strcmp(this_char, "osyncisdsync")) { 378 } else if (!strcmp(this_char, "osyncisdsync")) {
379 /* no-op, this is now the default */ 379 /* no-op, this is now the default */
380 cmn_err(CE_WARN, 380 cmn_err(CE_WARN,
381 "XFS: osyncisdsync is now the default, option is deprecated."); 381 "XFS: osyncisdsync is now the default, option is deprecated.");
382 } else if (!strcmp(this_char, "irixsgid")) { 382 } else if (!strcmp(this_char, "irixsgid")) {
383 cmn_err(CE_WARN, 383 cmn_err(CE_WARN,
384 "XFS: irixsgid is now a sysctl(2) variable, option is deprecated."); 384 "XFS: irixsgid is now a sysctl(2) variable, option is deprecated.");
385 } else { 385 } else {
386 cmn_err(CE_WARN, 386 cmn_err(CE_WARN,
387 "XFS: unknown mount option [%s].", this_char); 387 "XFS: unknown mount option [%s].", this_char);
388 return EINVAL; 388 return EINVAL;
389 } 389 }
390 } 390 }
391 391
392 /* 392 /*
393 * no recovery flag requires a read-only mount 393 * no recovery flag requires a read-only mount
394 */ 394 */
395 if ((mp->m_flags & XFS_MOUNT_NORECOVERY) && 395 if ((mp->m_flags & XFS_MOUNT_NORECOVERY) &&
396 !(mp->m_flags & XFS_MOUNT_RDONLY)) { 396 !(mp->m_flags & XFS_MOUNT_RDONLY)) {
397 cmn_err(CE_WARN, "XFS: no-recovery mounts must be read-only."); 397 cmn_err(CE_WARN, "XFS: no-recovery mounts must be read-only.");
398 return EINVAL; 398 return EINVAL;
399 } 399 }
400 400
401 if ((mp->m_flags & XFS_MOUNT_NOALIGN) && (dsunit || dswidth)) { 401 if ((mp->m_flags & XFS_MOUNT_NOALIGN) && (dsunit || dswidth)) {
402 cmn_err(CE_WARN, 402 cmn_err(CE_WARN,
403 "XFS: sunit and swidth options incompatible with the noalign option"); 403 "XFS: sunit and swidth options incompatible with the noalign option");
404 return EINVAL; 404 return EINVAL;
405 } 405 }
406 406
407 #ifndef CONFIG_XFS_QUOTA 407 #ifndef CONFIG_XFS_QUOTA
408 if (XFS_IS_QUOTA_RUNNING(mp)) { 408 if (XFS_IS_QUOTA_RUNNING(mp)) {
409 cmn_err(CE_WARN, 409 cmn_err(CE_WARN,
410 "XFS: quota support not available in this kernel."); 410 "XFS: quota support not available in this kernel.");
411 return EINVAL; 411 return EINVAL;
412 } 412 }
413 #endif 413 #endif
414 414
415 if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) && 415 if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) &&
416 (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE))) { 416 (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE))) {
417 cmn_err(CE_WARN, 417 cmn_err(CE_WARN,
418 "XFS: cannot mount with both project and group quota"); 418 "XFS: cannot mount with both project and group quota");
419 return EINVAL; 419 return EINVAL;
420 } 420 }
421 421
422 if ((mp->m_flags & XFS_MOUNT_DMAPI) && (!*mtpt || *mtpt[0] == '\0')) { 422 if ((mp->m_flags & XFS_MOUNT_DMAPI) && (!*mtpt || *mtpt[0] == '\0')) {
423 printk("XFS: %s option needs the mount point option as well\n", 423 printk("XFS: %s option needs the mount point option as well\n",
424 MNTOPT_DMAPI); 424 MNTOPT_DMAPI);
425 return EINVAL; 425 return EINVAL;
426 } 426 }
427 427
428 if ((dsunit && !dswidth) || (!dsunit && dswidth)) { 428 if ((dsunit && !dswidth) || (!dsunit && dswidth)) {
429 cmn_err(CE_WARN, 429 cmn_err(CE_WARN,
430 "XFS: sunit and swidth must be specified together"); 430 "XFS: sunit and swidth must be specified together");
431 return EINVAL; 431 return EINVAL;
432 } 432 }
433 433
434 if (dsunit && (dswidth % dsunit != 0)) { 434 if (dsunit && (dswidth % dsunit != 0)) {
435 cmn_err(CE_WARN, 435 cmn_err(CE_WARN,
436 "XFS: stripe width (%d) must be a multiple of the stripe unit (%d)", 436 "XFS: stripe width (%d) must be a multiple of the stripe unit (%d)",
437 dswidth, dsunit); 437 dswidth, dsunit);
438 return EINVAL; 438 return EINVAL;
439 } 439 }
440 440
441 /* 441 /*
442 * Applications using DMI filesystems often expect the 442 * Applications using DMI filesystems often expect the
443 * inode generation number to be monotonically increasing. 443 * inode generation number to be monotonically increasing.
444 * If we delete inode chunks we break this assumption, so 444 * If we delete inode chunks we break this assumption, so
445 * keep unused inode chunks on disk for DMI filesystems 445 * keep unused inode chunks on disk for DMI filesystems
446 * until we come up with a better solution. 446 * until we come up with a better solution.
447 * Note that if "ikeep" or "noikeep" mount options are 447 * Note that if "ikeep" or "noikeep" mount options are
448 * supplied, then they are honored. 448 * supplied, then they are honored.
449 */ 449 */
450 if ((mp->m_flags & XFS_MOUNT_DMAPI) && dmapi_implies_ikeep) 450 if ((mp->m_flags & XFS_MOUNT_DMAPI) && dmapi_implies_ikeep)
451 mp->m_flags |= XFS_MOUNT_IKEEP; 451 mp->m_flags |= XFS_MOUNT_IKEEP;
452 452
453 done: 453 done:
454 if (!(mp->m_flags & XFS_MOUNT_NOALIGN)) { 454 if (!(mp->m_flags & XFS_MOUNT_NOALIGN)) {
455 /* 455 /*
456 * At this point the superblock has not been read 456 * At this point the superblock has not been read
457 * in, therefore we do not know the block size. 457 * in, therefore we do not know the block size.
458 * Before the mount call ends we will convert 458 * Before the mount call ends we will convert
459 * these to FSBs. 459 * these to FSBs.
460 */ 460 */
461 if (dsunit) { 461 if (dsunit) {
462 mp->m_dalign = dsunit; 462 mp->m_dalign = dsunit;
463 mp->m_flags |= XFS_MOUNT_RETERR; 463 mp->m_flags |= XFS_MOUNT_RETERR;
464 } 464 }
465 465
466 if (dswidth) 466 if (dswidth)
467 mp->m_swidth = dswidth; 467 mp->m_swidth = dswidth;
468 } 468 }
469 469
470 if (mp->m_logbufs != -1 && 470 if (mp->m_logbufs != -1 &&
471 mp->m_logbufs != 0 && 471 mp->m_logbufs != 0 &&
472 (mp->m_logbufs < XLOG_MIN_ICLOGS || 472 (mp->m_logbufs < XLOG_MIN_ICLOGS ||
473 mp->m_logbufs > XLOG_MAX_ICLOGS)) { 473 mp->m_logbufs > XLOG_MAX_ICLOGS)) {
474 cmn_err(CE_WARN, 474 cmn_err(CE_WARN,
475 "XFS: invalid logbufs value: %d [not %d-%d]", 475 "XFS: invalid logbufs value: %d [not %d-%d]",
476 mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS); 476 mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
477 return XFS_ERROR(EINVAL); 477 return XFS_ERROR(EINVAL);
478 } 478 }
479 if (mp->m_logbsize != -1 && 479 if (mp->m_logbsize != -1 &&
480 mp->m_logbsize != 0 && 480 mp->m_logbsize != 0 &&
481 (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE || 481 (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
482 mp->m_logbsize > XLOG_MAX_RECORD_BSIZE || 482 mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
483 !is_power_of_2(mp->m_logbsize))) { 483 !is_power_of_2(mp->m_logbsize))) {
484 cmn_err(CE_WARN, 484 cmn_err(CE_WARN,
485 "XFS: invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]", 485 "XFS: invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
486 mp->m_logbsize); 486 mp->m_logbsize);
487 return XFS_ERROR(EINVAL); 487 return XFS_ERROR(EINVAL);
488 } 488 }
489 489
490 mp->m_fsname = kstrndup(sb->s_id, MAXNAMELEN, GFP_KERNEL); 490 mp->m_fsname = kstrndup(sb->s_id, MAXNAMELEN, GFP_KERNEL);
491 if (!mp->m_fsname) 491 if (!mp->m_fsname)
492 return ENOMEM; 492 return ENOMEM;
493 mp->m_fsname_len = strlen(mp->m_fsname) + 1; 493 mp->m_fsname_len = strlen(mp->m_fsname) + 1;
494 494
495 if (iosizelog) { 495 if (iosizelog) {
496 if (iosizelog > XFS_MAX_IO_LOG || 496 if (iosizelog > XFS_MAX_IO_LOG ||
497 iosizelog < XFS_MIN_IO_LOG) { 497 iosizelog < XFS_MIN_IO_LOG) {
498 cmn_err(CE_WARN, 498 cmn_err(CE_WARN,
499 "XFS: invalid log iosize: %d [not %d-%d]", 499 "XFS: invalid log iosize: %d [not %d-%d]",
500 iosizelog, XFS_MIN_IO_LOG, 500 iosizelog, XFS_MIN_IO_LOG,
501 XFS_MAX_IO_LOG); 501 XFS_MAX_IO_LOG);
502 return XFS_ERROR(EINVAL); 502 return XFS_ERROR(EINVAL);
503 } 503 }
504 504
505 mp->m_flags |= XFS_MOUNT_DFLT_IOSIZE; 505 mp->m_flags |= XFS_MOUNT_DFLT_IOSIZE;
506 mp->m_readio_log = iosizelog; 506 mp->m_readio_log = iosizelog;
507 mp->m_writeio_log = iosizelog; 507 mp->m_writeio_log = iosizelog;
508 } 508 }
509 509
510 return 0; 510 return 0;
511 } 511 }
512 512
513 struct proc_xfs_info { 513 struct proc_xfs_info {
514 int flag; 514 int flag;
515 char *str; 515 char *str;
516 }; 516 };
517 517
518 STATIC int 518 STATIC int
519 xfs_showargs( 519 xfs_showargs(
520 struct xfs_mount *mp, 520 struct xfs_mount *mp,
521 struct seq_file *m) 521 struct seq_file *m)
522 { 522 {
523 static struct proc_xfs_info xfs_info_set[] = { 523 static struct proc_xfs_info xfs_info_set[] = {
524 /* the few simple ones we can get from the mount struct */ 524 /* the few simple ones we can get from the mount struct */
525 { XFS_MOUNT_IKEEP, "," MNTOPT_IKEEP }, 525 { XFS_MOUNT_IKEEP, "," MNTOPT_IKEEP },
526 { XFS_MOUNT_WSYNC, "," MNTOPT_WSYNC }, 526 { XFS_MOUNT_WSYNC, "," MNTOPT_WSYNC },
527 { XFS_MOUNT_NOALIGN, "," MNTOPT_NOALIGN }, 527 { XFS_MOUNT_NOALIGN, "," MNTOPT_NOALIGN },
528 { XFS_MOUNT_SWALLOC, "," MNTOPT_SWALLOC }, 528 { XFS_MOUNT_SWALLOC, "," MNTOPT_SWALLOC },
529 { XFS_MOUNT_NOUUID, "," MNTOPT_NOUUID }, 529 { XFS_MOUNT_NOUUID, "," MNTOPT_NOUUID },
530 { XFS_MOUNT_NORECOVERY, "," MNTOPT_NORECOVERY }, 530 { XFS_MOUNT_NORECOVERY, "," MNTOPT_NORECOVERY },
531 { XFS_MOUNT_OSYNCISOSYNC, "," MNTOPT_OSYNCISOSYNC }, 531 { XFS_MOUNT_OSYNCISOSYNC, "," MNTOPT_OSYNCISOSYNC },
532 { XFS_MOUNT_ATTR2, "," MNTOPT_ATTR2 }, 532 { XFS_MOUNT_ATTR2, "," MNTOPT_ATTR2 },
533 { XFS_MOUNT_FILESTREAMS, "," MNTOPT_FILESTREAM }, 533 { XFS_MOUNT_FILESTREAMS, "," MNTOPT_FILESTREAM },
534 { XFS_MOUNT_DMAPI, "," MNTOPT_DMAPI }, 534 { XFS_MOUNT_DMAPI, "," MNTOPT_DMAPI },
535 { XFS_MOUNT_GRPID, "," MNTOPT_GRPID }, 535 { XFS_MOUNT_GRPID, "," MNTOPT_GRPID },
536 { 0, NULL } 536 { 0, NULL }
537 }; 537 };
538 static struct proc_xfs_info xfs_info_unset[] = { 538 static struct proc_xfs_info xfs_info_unset[] = {
539 /* the few simple ones we can get from the mount struct */ 539 /* the few simple ones we can get from the mount struct */
540 { XFS_MOUNT_COMPAT_IOSIZE, "," MNTOPT_LARGEIO }, 540 { XFS_MOUNT_COMPAT_IOSIZE, "," MNTOPT_LARGEIO },
541 { XFS_MOUNT_BARRIER, "," MNTOPT_NOBARRIER }, 541 { XFS_MOUNT_BARRIER, "," MNTOPT_NOBARRIER },
542 { XFS_MOUNT_SMALL_INUMS, "," MNTOPT_64BITINODE }, 542 { XFS_MOUNT_SMALL_INUMS, "," MNTOPT_64BITINODE },
543 { 0, NULL } 543 { 0, NULL }
544 }; 544 };
545 struct proc_xfs_info *xfs_infop; 545 struct proc_xfs_info *xfs_infop;
546 546
547 for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) { 547 for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
548 if (mp->m_flags & xfs_infop->flag) 548 if (mp->m_flags & xfs_infop->flag)
549 seq_puts(m, xfs_infop->str); 549 seq_puts(m, xfs_infop->str);
550 } 550 }
551 for (xfs_infop = xfs_info_unset; xfs_infop->flag; xfs_infop++) { 551 for (xfs_infop = xfs_info_unset; xfs_infop->flag; xfs_infop++) {
552 if (!(mp->m_flags & xfs_infop->flag)) 552 if (!(mp->m_flags & xfs_infop->flag))
553 seq_puts(m, xfs_infop->str); 553 seq_puts(m, xfs_infop->str);
554 } 554 }
555 555
556 if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) 556 if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)
557 seq_printf(m, "," MNTOPT_ALLOCSIZE "=%dk", 557 seq_printf(m, "," MNTOPT_ALLOCSIZE "=%dk",
558 (int)(1 << mp->m_writeio_log) >> 10); 558 (int)(1 << mp->m_writeio_log) >> 10);
559 559
560 if (mp->m_logbufs > 0) 560 if (mp->m_logbufs > 0)
561 seq_printf(m, "," MNTOPT_LOGBUFS "=%d", mp->m_logbufs); 561 seq_printf(m, "," MNTOPT_LOGBUFS "=%d", mp->m_logbufs);
562 if (mp->m_logbsize > 0) 562 if (mp->m_logbsize > 0)
563 seq_printf(m, "," MNTOPT_LOGBSIZE "=%dk", mp->m_logbsize >> 10); 563 seq_printf(m, "," MNTOPT_LOGBSIZE "=%dk", mp->m_logbsize >> 10);
564 564
565 if (mp->m_logname) 565 if (mp->m_logname)
566 seq_printf(m, "," MNTOPT_LOGDEV "=%s", mp->m_logname); 566 seq_printf(m, "," MNTOPT_LOGDEV "=%s", mp->m_logname);
567 if (mp->m_rtname) 567 if (mp->m_rtname)
568 seq_printf(m, "," MNTOPT_RTDEV "=%s", mp->m_rtname); 568 seq_printf(m, "," MNTOPT_RTDEV "=%s", mp->m_rtname);
569 569
570 if (mp->m_dalign > 0) 570 if (mp->m_dalign > 0)
571 seq_printf(m, "," MNTOPT_SUNIT "=%d", 571 seq_printf(m, "," MNTOPT_SUNIT "=%d",
572 (int)XFS_FSB_TO_BB(mp, mp->m_dalign)); 572 (int)XFS_FSB_TO_BB(mp, mp->m_dalign));
573 if (mp->m_swidth > 0) 573 if (mp->m_swidth > 0)
574 seq_printf(m, "," MNTOPT_SWIDTH "=%d", 574 seq_printf(m, "," MNTOPT_SWIDTH "=%d",
575 (int)XFS_FSB_TO_BB(mp, mp->m_swidth)); 575 (int)XFS_FSB_TO_BB(mp, mp->m_swidth));
576 576
577 if (mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD)) 577 if (mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD))
578 seq_puts(m, "," MNTOPT_USRQUOTA); 578 seq_puts(m, "," MNTOPT_USRQUOTA);
579 else if (mp->m_qflags & XFS_UQUOTA_ACCT) 579 else if (mp->m_qflags & XFS_UQUOTA_ACCT)
580 seq_puts(m, "," MNTOPT_UQUOTANOENF); 580 seq_puts(m, "," MNTOPT_UQUOTANOENF);
581 581
582 /* Either project or group quotas can be active, not both */ 582 /* Either project or group quotas can be active, not both */
583 583
584 if (mp->m_qflags & XFS_PQUOTA_ACCT) { 584 if (mp->m_qflags & XFS_PQUOTA_ACCT) {
585 if (mp->m_qflags & XFS_OQUOTA_ENFD) 585 if (mp->m_qflags & XFS_OQUOTA_ENFD)
586 seq_puts(m, "," MNTOPT_PRJQUOTA); 586 seq_puts(m, "," MNTOPT_PRJQUOTA);
587 else 587 else
588 seq_puts(m, "," MNTOPT_PQUOTANOENF); 588 seq_puts(m, "," MNTOPT_PQUOTANOENF);
589 } else if (mp->m_qflags & XFS_GQUOTA_ACCT) { 589 } else if (mp->m_qflags & XFS_GQUOTA_ACCT) {
590 if (mp->m_qflags & XFS_OQUOTA_ENFD) 590 if (mp->m_qflags & XFS_OQUOTA_ENFD)
591 seq_puts(m, "," MNTOPT_GRPQUOTA); 591 seq_puts(m, "," MNTOPT_GRPQUOTA);
592 else 592 else
593 seq_puts(m, "," MNTOPT_GQUOTANOENF); 593 seq_puts(m, "," MNTOPT_GQUOTANOENF);
594 } 594 }
595 595
596 if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT)) 596 if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
597 seq_puts(m, "," MNTOPT_NOQUOTA); 597 seq_puts(m, "," MNTOPT_NOQUOTA);
598 598
599 return 0; 599 return 0;
600 } 600 }
601 __uint64_t 601 __uint64_t
602 xfs_max_file_offset( 602 xfs_max_file_offset(
603 unsigned int blockshift) 603 unsigned int blockshift)
604 { 604 {
605 unsigned int pagefactor = 1; 605 unsigned int pagefactor = 1;
606 unsigned int bitshift = BITS_PER_LONG - 1; 606 unsigned int bitshift = BITS_PER_LONG - 1;
607 607
608 /* Figure out maximum filesize, on Linux this can depend on 608 /* Figure out maximum filesize, on Linux this can depend on
609 * the filesystem blocksize (on 32 bit platforms). 609 * the filesystem blocksize (on 32 bit platforms).
610 * __block_prepare_write does this in an [unsigned] long... 610 * __block_prepare_write does this in an [unsigned] long...
611 * page->index << (PAGE_CACHE_SHIFT - bbits) 611 * page->index << (PAGE_CACHE_SHIFT - bbits)
612 * So, for page sized blocks (4K on 32 bit platforms), 612 * So, for page sized blocks (4K on 32 bit platforms),
613 * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is 613 * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
614 * (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) 614 * (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
615 * but for smaller blocksizes it is less (bbits = log2 bsize). 615 * but for smaller blocksizes it is less (bbits = log2 bsize).
616 * Note1: get_block_t takes a long (implicit cast from above) 616 * Note1: get_block_t takes a long (implicit cast from above)
617 * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch 617 * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch
618 * can optionally convert the [unsigned] long from above into 618 * can optionally convert the [unsigned] long from above into
619 * an [unsigned] long long. 619 * an [unsigned] long long.
620 */ 620 */
621 621
622 #if BITS_PER_LONG == 32 622 #if BITS_PER_LONG == 32
623 # if defined(CONFIG_LBDAF) 623 # if defined(CONFIG_LBDAF)
624 ASSERT(sizeof(sector_t) == 8); 624 ASSERT(sizeof(sector_t) == 8);
625 pagefactor = PAGE_CACHE_SIZE; 625 pagefactor = PAGE_CACHE_SIZE;
626 bitshift = BITS_PER_LONG; 626 bitshift = BITS_PER_LONG;
627 # else 627 # else
628 pagefactor = PAGE_CACHE_SIZE >> (PAGE_CACHE_SHIFT - blockshift); 628 pagefactor = PAGE_CACHE_SIZE >> (PAGE_CACHE_SHIFT - blockshift);
629 # endif 629 # endif
630 #endif 630 #endif
631 631
632 return (((__uint64_t)pagefactor) << bitshift) - 1; 632 return (((__uint64_t)pagefactor) << bitshift) - 1;
633 } 633 }
634 634
635 STATIC int 635 STATIC int
636 xfs_blkdev_get( 636 xfs_blkdev_get(
637 xfs_mount_t *mp, 637 xfs_mount_t *mp,
638 const char *name, 638 const char *name,
639 struct block_device **bdevp) 639 struct block_device **bdevp)
640 { 640 {
641 int error = 0; 641 int error = 0;
642 642
643 *bdevp = open_bdev_exclusive(name, FMODE_READ|FMODE_WRITE, mp); 643 *bdevp = open_bdev_exclusive(name, FMODE_READ|FMODE_WRITE, mp);
644 if (IS_ERR(*bdevp)) { 644 if (IS_ERR(*bdevp)) {
645 error = PTR_ERR(*bdevp); 645 error = PTR_ERR(*bdevp);
646 printk("XFS: Invalid device [%s], error=%d\n", name, error); 646 printk("XFS: Invalid device [%s], error=%d\n", name, error);
647 } 647 }
648 648
649 return -error; 649 return -error;
650 } 650 }
651 651
652 STATIC void 652 STATIC void
653 xfs_blkdev_put( 653 xfs_blkdev_put(
654 struct block_device *bdev) 654 struct block_device *bdev)
655 { 655 {
656 if (bdev) 656 if (bdev)
657 close_bdev_exclusive(bdev, FMODE_READ|FMODE_WRITE); 657 close_bdev_exclusive(bdev, FMODE_READ|FMODE_WRITE);
658 } 658 }
659 659
660 /* 660 /*
661 * Try to write out the superblock using barriers. 661 * Try to write out the superblock using barriers.
662 */ 662 */
663 STATIC int 663 STATIC int
664 xfs_barrier_test( 664 xfs_barrier_test(
665 xfs_mount_t *mp) 665 xfs_mount_t *mp)
666 { 666 {
667 xfs_buf_t *sbp = xfs_getsb(mp, 0); 667 xfs_buf_t *sbp = xfs_getsb(mp, 0);
668 int error; 668 int error;
669 669
670 XFS_BUF_UNDONE(sbp); 670 XFS_BUF_UNDONE(sbp);
671 XFS_BUF_UNREAD(sbp); 671 XFS_BUF_UNREAD(sbp);
672 XFS_BUF_UNDELAYWRITE(sbp); 672 XFS_BUF_UNDELAYWRITE(sbp);
673 XFS_BUF_WRITE(sbp); 673 XFS_BUF_WRITE(sbp);
674 XFS_BUF_UNASYNC(sbp); 674 XFS_BUF_UNASYNC(sbp);
675 XFS_BUF_ORDERED(sbp); 675 XFS_BUF_ORDERED(sbp);
676 676
677 xfsbdstrat(mp, sbp); 677 xfsbdstrat(mp, sbp);
678 error = xfs_iowait(sbp); 678 error = xfs_iowait(sbp);
679 679
680 /* 680 /*
681 * Clear all the flags we set and possible error state in the 681 * Clear all the flags we set and possible error state in the
682 * buffer. We only did the write to try out whether barriers 682 * buffer. We only did the write to try out whether barriers
683 * worked and shouldn't leave any traces in the superblock 683 * worked and shouldn't leave any traces in the superblock
684 * buffer. 684 * buffer.
685 */ 685 */
686 XFS_BUF_DONE(sbp); 686 XFS_BUF_DONE(sbp);
687 XFS_BUF_ERROR(sbp, 0); 687 XFS_BUF_ERROR(sbp, 0);
688 XFS_BUF_UNORDERED(sbp); 688 XFS_BUF_UNORDERED(sbp);
689 689
690 xfs_buf_relse(sbp); 690 xfs_buf_relse(sbp);
691 return error; 691 return error;
692 } 692 }
693 693
694 STATIC void 694 STATIC void
695 xfs_mountfs_check_barriers(xfs_mount_t *mp) 695 xfs_mountfs_check_barriers(xfs_mount_t *mp)
696 { 696 {
697 int error; 697 int error;
698 698
699 if (mp->m_logdev_targp != mp->m_ddev_targp) { 699 if (mp->m_logdev_targp != mp->m_ddev_targp) {
700 xfs_fs_cmn_err(CE_NOTE, mp, 700 xfs_fs_cmn_err(CE_NOTE, mp,
701 "Disabling barriers, not supported with external log device"); 701 "Disabling barriers, not supported with external log device");
702 mp->m_flags &= ~XFS_MOUNT_BARRIER; 702 mp->m_flags &= ~XFS_MOUNT_BARRIER;
703 return; 703 return;
704 } 704 }
705 705
706 if (xfs_readonly_buftarg(mp->m_ddev_targp)) { 706 if (xfs_readonly_buftarg(mp->m_ddev_targp)) {
707 xfs_fs_cmn_err(CE_NOTE, mp, 707 xfs_fs_cmn_err(CE_NOTE, mp,
708 "Disabling barriers, underlying device is readonly"); 708 "Disabling barriers, underlying device is readonly");
709 mp->m_flags &= ~XFS_MOUNT_BARRIER; 709 mp->m_flags &= ~XFS_MOUNT_BARRIER;
710 return; 710 return;
711 } 711 }
712 712
713 error = xfs_barrier_test(mp); 713 error = xfs_barrier_test(mp);
714 if (error) { 714 if (error) {
715 xfs_fs_cmn_err(CE_NOTE, mp, 715 xfs_fs_cmn_err(CE_NOTE, mp,
716 "Disabling barriers, trial barrier write failed"); 716 "Disabling barriers, trial barrier write failed");
717 mp->m_flags &= ~XFS_MOUNT_BARRIER; 717 mp->m_flags &= ~XFS_MOUNT_BARRIER;
718 return; 718 return;
719 } 719 }
720 } 720 }
721 721
722 void 722 void
723 xfs_blkdev_issue_flush( 723 xfs_blkdev_issue_flush(
724 xfs_buftarg_t *buftarg) 724 xfs_buftarg_t *buftarg)
725 { 725 {
726 blkdev_issue_flush(buftarg->bt_bdev, NULL); 726 blkdev_issue_flush(buftarg->bt_bdev, NULL);
727 } 727 }
728 728
729 STATIC void 729 STATIC void
730 xfs_close_devices( 730 xfs_close_devices(
731 struct xfs_mount *mp) 731 struct xfs_mount *mp)
732 { 732 {
733 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) { 733 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
734 struct block_device *logdev = mp->m_logdev_targp->bt_bdev; 734 struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
735 xfs_free_buftarg(mp, mp->m_logdev_targp); 735 xfs_free_buftarg(mp, mp->m_logdev_targp);
736 xfs_blkdev_put(logdev); 736 xfs_blkdev_put(logdev);
737 } 737 }
738 if (mp->m_rtdev_targp) { 738 if (mp->m_rtdev_targp) {
739 struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev; 739 struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
740 xfs_free_buftarg(mp, mp->m_rtdev_targp); 740 xfs_free_buftarg(mp, mp->m_rtdev_targp);
741 xfs_blkdev_put(rtdev); 741 xfs_blkdev_put(rtdev);
742 } 742 }
743 xfs_free_buftarg(mp, mp->m_ddev_targp); 743 xfs_free_buftarg(mp, mp->m_ddev_targp);
744 } 744 }
745 745
746 /* 746 /*
747 * The file system configurations are: 747 * The file system configurations are:
748 * (1) device (partition) with data and internal log 748 * (1) device (partition) with data and internal log
749 * (2) logical volume with data and log subvolumes. 749 * (2) logical volume with data and log subvolumes.
750 * (3) logical volume with data, log, and realtime subvolumes. 750 * (3) logical volume with data, log, and realtime subvolumes.
751 * 751 *
752 * We only have to handle opening the log and realtime volumes here if 752 * We only have to handle opening the log and realtime volumes here if
753 * they are present. The data subvolume has already been opened by 753 * they are present. The data subvolume has already been opened by
754 * get_sb_bdev() and is stored in sb->s_bdev. 754 * get_sb_bdev() and is stored in sb->s_bdev.
755 */ 755 */
756 STATIC int 756 STATIC int
757 xfs_open_devices( 757 xfs_open_devices(
758 struct xfs_mount *mp) 758 struct xfs_mount *mp)
759 { 759 {
760 struct block_device *ddev = mp->m_super->s_bdev; 760 struct block_device *ddev = mp->m_super->s_bdev;
761 struct block_device *logdev = NULL, *rtdev = NULL; 761 struct block_device *logdev = NULL, *rtdev = NULL;
762 int error; 762 int error;
763 763
764 /* 764 /*
765 * Open real time and log devices - order is important. 765 * Open real time and log devices - order is important.
766 */ 766 */
767 if (mp->m_logname) { 767 if (mp->m_logname) {
768 error = xfs_blkdev_get(mp, mp->m_logname, &logdev); 768 error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
769 if (error) 769 if (error)
770 goto out; 770 goto out;
771 } 771 }
772 772
773 if (mp->m_rtname) { 773 if (mp->m_rtname) {
774 error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev); 774 error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev);
775 if (error) 775 if (error)
776 goto out_close_logdev; 776 goto out_close_logdev;
777 777
778 if (rtdev == ddev || rtdev == logdev) { 778 if (rtdev == ddev || rtdev == logdev) {
779 cmn_err(CE_WARN, 779 cmn_err(CE_WARN,
780 "XFS: Cannot mount filesystem with identical rtdev and ddev/logdev."); 780 "XFS: Cannot mount filesystem with identical rtdev and ddev/logdev.");
781 error = EINVAL; 781 error = EINVAL;
782 goto out_close_rtdev; 782 goto out_close_rtdev;
783 } 783 }
784 } 784 }
785 785
786 /* 786 /*
787 * Setup xfs_mount buffer target pointers 787 * Setup xfs_mount buffer target pointers
788 */ 788 */
789 error = ENOMEM; 789 error = ENOMEM;
790 mp->m_ddev_targp = xfs_alloc_buftarg(ddev, 0); 790 mp->m_ddev_targp = xfs_alloc_buftarg(ddev, 0);
791 if (!mp->m_ddev_targp) 791 if (!mp->m_ddev_targp)
792 goto out_close_rtdev; 792 goto out_close_rtdev;
793 793
794 if (rtdev) { 794 if (rtdev) {
795 mp->m_rtdev_targp = xfs_alloc_buftarg(rtdev, 1); 795 mp->m_rtdev_targp = xfs_alloc_buftarg(rtdev, 1);
796 if (!mp->m_rtdev_targp) 796 if (!mp->m_rtdev_targp)
797 goto out_free_ddev_targ; 797 goto out_free_ddev_targ;
798 } 798 }
799 799
800 if (logdev && logdev != ddev) { 800 if (logdev && logdev != ddev) {
801 mp->m_logdev_targp = xfs_alloc_buftarg(logdev, 1); 801 mp->m_logdev_targp = xfs_alloc_buftarg(logdev, 1);
802 if (!mp->m_logdev_targp) 802 if (!mp->m_logdev_targp)
803 goto out_free_rtdev_targ; 803 goto out_free_rtdev_targ;
804 } else { 804 } else {
805 mp->m_logdev_targp = mp->m_ddev_targp; 805 mp->m_logdev_targp = mp->m_ddev_targp;
806 } 806 }
807 807
808 return 0; 808 return 0;
809 809
810 out_free_rtdev_targ: 810 out_free_rtdev_targ:
811 if (mp->m_rtdev_targp) 811 if (mp->m_rtdev_targp)
812 xfs_free_buftarg(mp, mp->m_rtdev_targp); 812 xfs_free_buftarg(mp, mp->m_rtdev_targp);
813 out_free_ddev_targ: 813 out_free_ddev_targ:
814 xfs_free_buftarg(mp, mp->m_ddev_targp); 814 xfs_free_buftarg(mp, mp->m_ddev_targp);
815 out_close_rtdev: 815 out_close_rtdev:
816 if (rtdev) 816 if (rtdev)
817 xfs_blkdev_put(rtdev); 817 xfs_blkdev_put(rtdev);
818 out_close_logdev: 818 out_close_logdev:
819 if (logdev && logdev != ddev) 819 if (logdev && logdev != ddev)
820 xfs_blkdev_put(logdev); 820 xfs_blkdev_put(logdev);
821 out: 821 out:
822 return error; 822 return error;
823 } 823 }
824 824
825 /* 825 /*
826 * Setup xfs_mount buffer target pointers based on superblock 826 * Setup xfs_mount buffer target pointers based on superblock
827 */ 827 */
828 STATIC int 828 STATIC int
829 xfs_setup_devices( 829 xfs_setup_devices(
830 struct xfs_mount *mp) 830 struct xfs_mount *mp)
831 { 831 {
832 int error; 832 int error;
833 833
834 error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_blocksize, 834 error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_blocksize,
835 mp->m_sb.sb_sectsize); 835 mp->m_sb.sb_sectsize);
836 if (error) 836 if (error)
837 return error; 837 return error;
838 838
839 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) { 839 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
840 unsigned int log_sector_size = BBSIZE; 840 unsigned int log_sector_size = BBSIZE;
841 841
842 if (xfs_sb_version_hassector(&mp->m_sb)) 842 if (xfs_sb_version_hassector(&mp->m_sb))
843 log_sector_size = mp->m_sb.sb_logsectsize; 843 log_sector_size = mp->m_sb.sb_logsectsize;
844 error = xfs_setsize_buftarg(mp->m_logdev_targp, 844 error = xfs_setsize_buftarg(mp->m_logdev_targp,
845 mp->m_sb.sb_blocksize, 845 mp->m_sb.sb_blocksize,
846 log_sector_size); 846 log_sector_size);
847 if (error) 847 if (error)
848 return error; 848 return error;
849 } 849 }
850 if (mp->m_rtdev_targp) { 850 if (mp->m_rtdev_targp) {
851 error = xfs_setsize_buftarg(mp->m_rtdev_targp, 851 error = xfs_setsize_buftarg(mp->m_rtdev_targp,
852 mp->m_sb.sb_blocksize, 852 mp->m_sb.sb_blocksize,
853 mp->m_sb.sb_sectsize); 853 mp->m_sb.sb_sectsize);
854 if (error) 854 if (error)
855 return error; 855 return error;
856 } 856 }
857 857
858 return 0; 858 return 0;
859 } 859 }
860 860
861 /* 861 /*
862 * XFS AIL push thread support 862 * XFS AIL push thread support
863 */ 863 */
864 void 864 void
865 xfsaild_wakeup( 865 xfsaild_wakeup(
866 struct xfs_ail *ailp, 866 struct xfs_ail *ailp,
867 xfs_lsn_t threshold_lsn) 867 xfs_lsn_t threshold_lsn)
868 { 868 {
869 ailp->xa_target = threshold_lsn; 869 ailp->xa_target = threshold_lsn;
870 wake_up_process(ailp->xa_task); 870 wake_up_process(ailp->xa_task);
871 } 871 }
872 872
873 STATIC int 873 STATIC int
874 xfsaild( 874 xfsaild(
875 void *data) 875 void *data)
876 { 876 {
877 struct xfs_ail *ailp = data; 877 struct xfs_ail *ailp = data;
878 xfs_lsn_t last_pushed_lsn = 0; 878 xfs_lsn_t last_pushed_lsn = 0;
879 long tout = 0; 879 long tout = 0;
880 880
881 while (!kthread_should_stop()) { 881 while (!kthread_should_stop()) {
882 if (tout) 882 if (tout)
883 schedule_timeout_interruptible(msecs_to_jiffies(tout)); 883 schedule_timeout_interruptible(msecs_to_jiffies(tout));
884 tout = 1000; 884 tout = 1000;
885 885
886 /* swsusp */ 886 /* swsusp */
887 try_to_freeze(); 887 try_to_freeze();
888 888
889 ASSERT(ailp->xa_mount->m_log); 889 ASSERT(ailp->xa_mount->m_log);
890 if (XFS_FORCED_SHUTDOWN(ailp->xa_mount)) 890 if (XFS_FORCED_SHUTDOWN(ailp->xa_mount))
891 continue; 891 continue;
892 892
893 tout = xfsaild_push(ailp, &last_pushed_lsn); 893 tout = xfsaild_push(ailp, &last_pushed_lsn);
894 } 894 }
895 895
896 return 0; 896 return 0;
897 } /* xfsaild */ 897 } /* xfsaild */
898 898
899 int 899 int
900 xfsaild_start( 900 xfsaild_start(
901 struct xfs_ail *ailp) 901 struct xfs_ail *ailp)
902 { 902 {
903 ailp->xa_target = 0; 903 ailp->xa_target = 0;
904 ailp->xa_task = kthread_run(xfsaild, ailp, "xfsaild"); 904 ailp->xa_task = kthread_run(xfsaild, ailp, "xfsaild");
905 if (IS_ERR(ailp->xa_task)) 905 if (IS_ERR(ailp->xa_task))
906 return -PTR_ERR(ailp->xa_task); 906 return -PTR_ERR(ailp->xa_task);
907 return 0; 907 return 0;
908 } 908 }
909 909
910 void 910 void
911 xfsaild_stop( 911 xfsaild_stop(
912 struct xfs_ail *ailp) 912 struct xfs_ail *ailp)
913 { 913 {
914 kthread_stop(ailp->xa_task); 914 kthread_stop(ailp->xa_task);
915 } 915 }
916 916
917 917
918 /* Catch misguided souls that try to use this interface on XFS */ 918 /* Catch misguided souls that try to use this interface on XFS */
919 STATIC struct inode * 919 STATIC struct inode *
920 xfs_fs_alloc_inode( 920 xfs_fs_alloc_inode(
921 struct super_block *sb) 921 struct super_block *sb)
922 { 922 {
923 BUG(); 923 BUG();
924 return NULL; 924 return NULL;
925 } 925 }
926 926
927 /* 927 /*
928 * Now that the generic code is guaranteed not to be accessing 928 * Now that the generic code is guaranteed not to be accessing
929 * the linux inode, we can reclaim the inode. 929 * the linux inode, we can reclaim the inode.
930 */ 930 */
931 STATIC void 931 STATIC void
932 xfs_fs_destroy_inode( 932 xfs_fs_destroy_inode(
933 struct inode *inode) 933 struct inode *inode)
934 { 934 {
935 struct xfs_inode *ip = XFS_I(inode); 935 struct xfs_inode *ip = XFS_I(inode);
936 936
937 xfs_itrace_entry(ip); 937 xfs_itrace_entry(ip);
938 938
939 XFS_STATS_INC(vn_reclaim); 939 XFS_STATS_INC(vn_reclaim);
940 940
941 /* bad inode, get out here ASAP */ 941 /* bad inode, get out here ASAP */
942 if (is_bad_inode(inode)) 942 if (is_bad_inode(inode))
943 goto out_reclaim; 943 goto out_reclaim;
944 944
945 xfs_ioend_wait(ip); 945 xfs_ioend_wait(ip);
946 946
947 ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0); 947 ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0);
948 948
949 /* 949 /*
950 * We should never get here with one of the reclaim flags already set. 950 * We should never get here with one of the reclaim flags already set.
951 */ 951 */
952 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE)); 952 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
953 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM)); 953 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM));
954 954
955 /* 955 /*
956 * If we have nothing to flush with this inode then complete the 956 * If we have nothing to flush with this inode then complete the
957 * teardown now, otherwise delay the flush operation. 957 * teardown now, otherwise delay the flush operation.
958 */ 958 */
959 if (!xfs_inode_clean(ip)) { 959 if (!xfs_inode_clean(ip)) {
960 xfs_inode_set_reclaim_tag(ip); 960 xfs_inode_set_reclaim_tag(ip);
961 return; 961 return;
962 } 962 }
963 963
964 out_reclaim: 964 out_reclaim:
965 xfs_ireclaim(ip); 965 xfs_ireclaim(ip);
966 } 966 }
967 967
968 /* 968 /*
969 * Slab object creation initialisation for the XFS inode. 969 * Slab object creation initialisation for the XFS inode.
970 * This covers only the idempotent fields in the XFS inode; 970 * This covers only the idempotent fields in the XFS inode;
971 * all other fields need to be initialised on allocation 971 * all other fields need to be initialised on allocation
972 * from the slab. This avoids the need to repeatedly intialise 972 * from the slab. This avoids the need to repeatedly intialise
973 * fields in the xfs inode that left in the initialise state 973 * fields in the xfs inode that left in the initialise state
974 * when freeing the inode. 974 * when freeing the inode.
975 */ 975 */
976 STATIC void 976 STATIC void
977 xfs_fs_inode_init_once( 977 xfs_fs_inode_init_once(
978 void *inode) 978 void *inode)
979 { 979 {
980 struct xfs_inode *ip = inode; 980 struct xfs_inode *ip = inode;
981 981
982 memset(ip, 0, sizeof(struct xfs_inode)); 982 memset(ip, 0, sizeof(struct xfs_inode));
983 983
984 /* vfs inode */ 984 /* vfs inode */
985 inode_init_once(VFS_I(ip)); 985 inode_init_once(VFS_I(ip));
986 986
987 /* xfs inode */ 987 /* xfs inode */
988 atomic_set(&ip->i_iocount, 0); 988 atomic_set(&ip->i_iocount, 0);
989 atomic_set(&ip->i_pincount, 0); 989 atomic_set(&ip->i_pincount, 0);
990 spin_lock_init(&ip->i_flags_lock); 990 spin_lock_init(&ip->i_flags_lock);
991 init_waitqueue_head(&ip->i_ipin_wait); 991 init_waitqueue_head(&ip->i_ipin_wait);
992 /* 992 /*
993 * Because we want to use a counting completion, complete 993 * Because we want to use a counting completion, complete
994 * the flush completion once to allow a single access to 994 * the flush completion once to allow a single access to
995 * the flush completion without blocking. 995 * the flush completion without blocking.
996 */ 996 */
997 init_completion(&ip->i_flush); 997 init_completion(&ip->i_flush);
998 complete(&ip->i_flush); 998 complete(&ip->i_flush);
999 999
1000 mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER, 1000 mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
1001 "xfsino", ip->i_ino); 1001 "xfsino", ip->i_ino);
1002 } 1002 }
1003 1003
1004 /* 1004 /*
1005 * Dirty the XFS inode when mark_inode_dirty_sync() is called so that 1005 * Dirty the XFS inode when mark_inode_dirty_sync() is called so that
1006 * we catch unlogged VFS level updates to the inode. Care must be taken 1006 * we catch unlogged VFS level updates to the inode. Care must be taken
1007 * here - the transaction code calls mark_inode_dirty_sync() to mark the 1007 * here - the transaction code calls mark_inode_dirty_sync() to mark the
1008 * VFS inode dirty in a transaction and clears the i_update_core field; 1008 * VFS inode dirty in a transaction and clears the i_update_core field;
1009 * it must clear the field after calling mark_inode_dirty_sync() to 1009 * it must clear the field after calling mark_inode_dirty_sync() to
1010 * correctly indicate that the dirty state has been propagated into the 1010 * correctly indicate that the dirty state has been propagated into the
1011 * inode log item. 1011 * inode log item.
1012 * 1012 *
1013 * We need the barrier() to maintain correct ordering between unlogged 1013 * We need the barrier() to maintain correct ordering between unlogged
1014 * updates and the transaction commit code that clears the i_update_core 1014 * updates and the transaction commit code that clears the i_update_core
1015 * field. This requires all updates to be completed before marking the 1015 * field. This requires all updates to be completed before marking the
1016 * inode dirty. 1016 * inode dirty.
1017 */ 1017 */
1018 STATIC void 1018 STATIC void
1019 xfs_fs_dirty_inode( 1019 xfs_fs_dirty_inode(
1020 struct inode *inode) 1020 struct inode *inode)
1021 { 1021 {
1022 barrier(); 1022 barrier();
1023 XFS_I(inode)->i_update_core = 1; 1023 XFS_I(inode)->i_update_core = 1;
1024 } 1024 }
1025 1025
1026 /* 1026 /*
1027 * Attempt to flush the inode, this will actually fail 1027 * Attempt to flush the inode, this will actually fail
1028 * if the inode is pinned, but we dirty the inode again 1028 * if the inode is pinned, but we dirty the inode again
1029 * at the point when it is unpinned after a log write, 1029 * at the point when it is unpinned after a log write,
1030 * since this is when the inode itself becomes flushable. 1030 * since this is when the inode itself becomes flushable.
1031 */ 1031 */
1032 STATIC int 1032 STATIC int
1033 xfs_fs_write_inode( 1033 xfs_fs_write_inode(
1034 struct inode *inode, 1034 struct inode *inode,
1035 int sync) 1035 int sync)
1036 { 1036 {
1037 struct xfs_inode *ip = XFS_I(inode); 1037 struct xfs_inode *ip = XFS_I(inode);
1038 struct xfs_mount *mp = ip->i_mount; 1038 struct xfs_mount *mp = ip->i_mount;
1039 int error = 0; 1039 int error = 0;
1040 1040
1041 xfs_itrace_entry(ip); 1041 xfs_itrace_entry(ip);
1042 1042
1043 if (XFS_FORCED_SHUTDOWN(mp)) 1043 if (XFS_FORCED_SHUTDOWN(mp))
1044 return XFS_ERROR(EIO); 1044 return XFS_ERROR(EIO);
1045 1045
1046 if (sync) { 1046 if (sync) {
1047 error = xfs_wait_on_pages(ip, 0, -1); 1047 error = xfs_wait_on_pages(ip, 0, -1);
1048 if (error) 1048 if (error)
1049 goto out; 1049 goto out;
1050 } 1050 }
1051 1051
1052 /* 1052 /*
1053 * Bypass inodes which have already been cleaned by 1053 * Bypass inodes which have already been cleaned by
1054 * the inode flush clustering code inside xfs_iflush 1054 * the inode flush clustering code inside xfs_iflush
1055 */ 1055 */
1056 if (xfs_inode_clean(ip)) 1056 if (xfs_inode_clean(ip))
1057 goto out; 1057 goto out;
1058 1058
1059 /* 1059 /*
1060 * We make this non-blocking if the inode is contended, return 1060 * We make this non-blocking if the inode is contended, return
1061 * EAGAIN to indicate to the caller that they did not succeed. 1061 * EAGAIN to indicate to the caller that they did not succeed.
1062 * This prevents the flush path from blocking on inodes inside 1062 * This prevents the flush path from blocking on inodes inside
1063 * another operation right now, they get caught later by xfs_sync. 1063 * another operation right now, they get caught later by xfs_sync.
1064 */ 1064 */
1065 if (sync) { 1065 if (sync) {
1066 xfs_ilock(ip, XFS_ILOCK_SHARED); 1066 xfs_ilock(ip, XFS_ILOCK_SHARED);
1067 xfs_iflock(ip); 1067 xfs_iflock(ip);
1068 1068
1069 error = xfs_iflush(ip, XFS_IFLUSH_SYNC); 1069 error = xfs_iflush(ip, XFS_IFLUSH_SYNC);
1070 } else { 1070 } else {
1071 error = EAGAIN; 1071 error = EAGAIN;
1072 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) 1072 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED))
1073 goto out; 1073 goto out;
1074 if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip)) 1074 if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip))
1075 goto out_unlock; 1075 goto out_unlock;
1076 1076
1077 error = xfs_iflush(ip, XFS_IFLUSH_ASYNC_NOBLOCK); 1077 error = xfs_iflush(ip, XFS_IFLUSH_ASYNC_NOBLOCK);
1078 } 1078 }
1079 1079
1080 out_unlock: 1080 out_unlock:
1081 xfs_iunlock(ip, XFS_ILOCK_SHARED); 1081 xfs_iunlock(ip, XFS_ILOCK_SHARED);
1082 out: 1082 out:
1083 /* 1083 /*
1084 * if we failed to write out the inode then mark 1084 * if we failed to write out the inode then mark
1085 * it dirty again so we'll try again later. 1085 * it dirty again so we'll try again later.
1086 */ 1086 */
1087 if (error) 1087 if (error)
1088 xfs_mark_inode_dirty_sync(ip); 1088 xfs_mark_inode_dirty_sync(ip);
1089 return -error; 1089 return -error;
1090 } 1090 }
1091 1091
1092 STATIC void 1092 STATIC void
1093 xfs_fs_clear_inode( 1093 xfs_fs_clear_inode(
1094 struct inode *inode) 1094 struct inode *inode)
1095 { 1095 {
1096 xfs_inode_t *ip = XFS_I(inode); 1096 xfs_inode_t *ip = XFS_I(inode);
1097 1097
1098 xfs_itrace_entry(ip); 1098 xfs_itrace_entry(ip);
1099 XFS_STATS_INC(vn_rele); 1099 XFS_STATS_INC(vn_rele);
1100 XFS_STATS_INC(vn_remove); 1100 XFS_STATS_INC(vn_remove);
1101 XFS_STATS_DEC(vn_active); 1101 XFS_STATS_DEC(vn_active);
1102 1102
1103 /* 1103 /*
1104 * The iolock is used by the file system to coordinate reads, 1104 * The iolock is used by the file system to coordinate reads,
1105 * writes, and block truncates. Up to this point the lock 1105 * writes, and block truncates. Up to this point the lock
1106 * protected concurrent accesses by users of the inode. But 1106 * protected concurrent accesses by users of the inode. But
1107 * from here forward we're doing some final processing of the 1107 * from here forward we're doing some final processing of the
1108 * inode because we're done with it, and although we reuse the 1108 * inode because we're done with it, and although we reuse the
1109 * iolock for protection it is really a distinct lock class 1109 * iolock for protection it is really a distinct lock class
1110 * (in the lockdep sense) from before. To keep lockdep happy 1110 * (in the lockdep sense) from before. To keep lockdep happy
1111 * (and basically indicate what we are doing), we explicitly 1111 * (and basically indicate what we are doing), we explicitly
1112 * re-init the iolock here. 1112 * re-init the iolock here.
1113 */ 1113 */
1114 ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock)); 1114 ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
1115 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); 1115 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
1116 1116
1117 xfs_inactive(ip); 1117 xfs_inactive(ip);
1118 } 1118 }
1119 1119
1120 STATIC void 1120 STATIC void
1121 xfs_free_fsname( 1121 xfs_free_fsname(
1122 struct xfs_mount *mp) 1122 struct xfs_mount *mp)
1123 { 1123 {
1124 kfree(mp->m_fsname); 1124 kfree(mp->m_fsname);
1125 kfree(mp->m_rtname); 1125 kfree(mp->m_rtname);
1126 kfree(mp->m_logname); 1126 kfree(mp->m_logname);
1127 } 1127 }
1128 1128
1129 STATIC void 1129 STATIC void
1130 xfs_fs_put_super( 1130 xfs_fs_put_super(
1131 struct super_block *sb) 1131 struct super_block *sb)
1132 { 1132 {
1133 struct xfs_mount *mp = XFS_M(sb); 1133 struct xfs_mount *mp = XFS_M(sb);
1134 struct xfs_inode *rip = mp->m_rootip;
1135 int unmount_event_flags = 0;
1136 1134
1137 xfs_syncd_stop(mp); 1135 xfs_syncd_stop(mp);
1138 1136
1139 if (!(sb->s_flags & MS_RDONLY)) { 1137 if (!(sb->s_flags & MS_RDONLY)) {
1140 /* 1138 /*
1141 * XXX(hch): this should be SYNC_WAIT. 1139 * XXX(hch): this should be SYNC_WAIT.
1142 * 1140 *
1143 * Or more likely not needed at all because the VFS is already 1141 * Or more likely not needed at all because the VFS is already
1144 * calling ->sync_fs after shutting down all filestem 1142 * calling ->sync_fs after shutting down all filestem
1145 * operations and just before calling ->put_super. 1143 * operations and just before calling ->put_super.
1146 */ 1144 */
1147 xfs_sync_data(mp, 0); 1145 xfs_sync_data(mp, 0);
1148 xfs_sync_attr(mp, 0); 1146 xfs_sync_attr(mp, 0);
1149 } 1147 }
1150 1148
1151 #ifdef HAVE_DMAPI 1149 XFS_SEND_PREUNMOUNT(mp);
1152 if (mp->m_flags & XFS_MOUNT_DMAPI) {
1153 unmount_event_flags =
1154 (mp->m_dmevmask & (1 << DM_EVENT_UNMOUNT)) ?
1155 0 : DM_FLAGS_UNWANTED;
1156 /*
1157 * Ignore error from dmapi here, first unmount is not allowed
1158 * to fail anyway, and second we wouldn't want to fail a
1159 * unmount because of dmapi.
1160 */
1161 XFS_SEND_PREUNMOUNT(mp, rip, DM_RIGHT_NULL, rip, DM_RIGHT_NULL,
1162 NULL, NULL, 0, 0, unmount_event_flags);
1163 }
1164 #endif
1165 1150
1166 /* 1151 /*
1167 * Blow away any referenced inode in the filestreams cache. 1152 * Blow away any referenced inode in the filestreams cache.
1168 * This can and will cause log traffic as inodes go inactive 1153 * This can and will cause log traffic as inodes go inactive
1169 * here. 1154 * here.
1170 */ 1155 */
1171 xfs_filestream_unmount(mp); 1156 xfs_filestream_unmount(mp);
1172 1157
1173 XFS_bflush(mp->m_ddev_targp); 1158 XFS_bflush(mp->m_ddev_targp);
1174 1159
1175 if (mp->m_flags & XFS_MOUNT_DMAPI) { 1160 XFS_SEND_UNMOUNT(mp);
1176 XFS_SEND_UNMOUNT(mp, rip, DM_RIGHT_NULL, 0, 0,
1177 unmount_event_flags);
1178 }
1179 1161
1180 xfs_unmountfs(mp); 1162 xfs_unmountfs(mp);
1181 xfs_freesb(mp); 1163 xfs_freesb(mp);
1182 xfs_icsb_destroy_counters(mp); 1164 xfs_icsb_destroy_counters(mp);
1183 xfs_close_devices(mp); 1165 xfs_close_devices(mp);
1184 xfs_dmops_put(mp); 1166 xfs_dmops_put(mp);
1185 xfs_free_fsname(mp); 1167 xfs_free_fsname(mp);
1186 kfree(mp); 1168 kfree(mp);
1187 } 1169 }
1188 1170
1189 STATIC int 1171 STATIC int
1190 xfs_fs_sync_fs( 1172 xfs_fs_sync_fs(
1191 struct super_block *sb, 1173 struct super_block *sb,
1192 int wait) 1174 int wait)
1193 { 1175 {
1194 struct xfs_mount *mp = XFS_M(sb); 1176 struct xfs_mount *mp = XFS_M(sb);
1195 int error; 1177 int error;
1196 1178
1197 /* 1179 /*
1198 * Not much we can do for the first async pass. Writing out the 1180 * Not much we can do for the first async pass. Writing out the
1199 * superblock would be counter-productive as we are going to redirty 1181 * superblock would be counter-productive as we are going to redirty
1200 * when writing out other data and metadata (and writing out a single 1182 * when writing out other data and metadata (and writing out a single
1201 * block is quite fast anyway). 1183 * block is quite fast anyway).
1202 * 1184 *
1203 * Try to asynchronously kick off quota syncing at least. 1185 * Try to asynchronously kick off quota syncing at least.
1204 */ 1186 */
1205 if (!wait) { 1187 if (!wait) {
1206 xfs_qm_sync(mp, SYNC_TRYLOCK); 1188 xfs_qm_sync(mp, SYNC_TRYLOCK);
1207 return 0; 1189 return 0;
1208 } 1190 }
1209 1191
1210 error = xfs_quiesce_data(mp); 1192 error = xfs_quiesce_data(mp);
1211 if (error) 1193 if (error)
1212 return -error; 1194 return -error;
1213 1195
1214 if (laptop_mode) { 1196 if (laptop_mode) {
1215 int prev_sync_seq = mp->m_sync_seq; 1197 int prev_sync_seq = mp->m_sync_seq;
1216 1198
1217 /* 1199 /*
1218 * The disk must be active because we're syncing. 1200 * The disk must be active because we're syncing.
1219 * We schedule xfssyncd now (now that the disk is 1201 * We schedule xfssyncd now (now that the disk is
1220 * active) instead of later (when it might not be). 1202 * active) instead of later (when it might not be).
1221 */ 1203 */
1222 wake_up_process(mp->m_sync_task); 1204 wake_up_process(mp->m_sync_task);
1223 /* 1205 /*
1224 * We have to wait for the sync iteration to complete. 1206 * We have to wait for the sync iteration to complete.
1225 * If we don't, the disk activity caused by the sync 1207 * If we don't, the disk activity caused by the sync
1226 * will come after the sync is completed, and that 1208 * will come after the sync is completed, and that
1227 * triggers another sync from laptop mode. 1209 * triggers another sync from laptop mode.
1228 */ 1210 */
1229 wait_event(mp->m_wait_single_sync_task, 1211 wait_event(mp->m_wait_single_sync_task,
1230 mp->m_sync_seq != prev_sync_seq); 1212 mp->m_sync_seq != prev_sync_seq);
1231 } 1213 }
1232 1214
1233 return 0; 1215 return 0;
1234 } 1216 }
1235 1217
1236 STATIC int 1218 STATIC int
1237 xfs_fs_statfs( 1219 xfs_fs_statfs(
1238 struct dentry *dentry, 1220 struct dentry *dentry,
1239 struct kstatfs *statp) 1221 struct kstatfs *statp)
1240 { 1222 {
1241 struct xfs_mount *mp = XFS_M(dentry->d_sb); 1223 struct xfs_mount *mp = XFS_M(dentry->d_sb);
1242 xfs_sb_t *sbp = &mp->m_sb; 1224 xfs_sb_t *sbp = &mp->m_sb;
1243 struct xfs_inode *ip = XFS_I(dentry->d_inode); 1225 struct xfs_inode *ip = XFS_I(dentry->d_inode);
1244 __uint64_t fakeinos, id; 1226 __uint64_t fakeinos, id;
1245 xfs_extlen_t lsize; 1227 xfs_extlen_t lsize;
1246 1228
1247 statp->f_type = XFS_SB_MAGIC; 1229 statp->f_type = XFS_SB_MAGIC;
1248 statp->f_namelen = MAXNAMELEN - 1; 1230 statp->f_namelen = MAXNAMELEN - 1;
1249 1231
1250 id = huge_encode_dev(mp->m_ddev_targp->bt_dev); 1232 id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
1251 statp->f_fsid.val[0] = (u32)id; 1233 statp->f_fsid.val[0] = (u32)id;
1252 statp->f_fsid.val[1] = (u32)(id >> 32); 1234 statp->f_fsid.val[1] = (u32)(id >> 32);
1253 1235
1254 xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT); 1236 xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT);
1255 1237
1256 spin_lock(&mp->m_sb_lock); 1238 spin_lock(&mp->m_sb_lock);
1257 statp->f_bsize = sbp->sb_blocksize; 1239 statp->f_bsize = sbp->sb_blocksize;
1258 lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0; 1240 lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
1259 statp->f_blocks = sbp->sb_dblocks - lsize; 1241 statp->f_blocks = sbp->sb_dblocks - lsize;
1260 statp->f_bfree = statp->f_bavail = 1242 statp->f_bfree = statp->f_bavail =
1261 sbp->sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); 1243 sbp->sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
1262 fakeinos = statp->f_bfree << sbp->sb_inopblog; 1244 fakeinos = statp->f_bfree << sbp->sb_inopblog;
1263 statp->f_files = 1245 statp->f_files =
1264 MIN(sbp->sb_icount + fakeinos, (__uint64_t)XFS_MAXINUMBER); 1246 MIN(sbp->sb_icount + fakeinos, (__uint64_t)XFS_MAXINUMBER);
1265 if (mp->m_maxicount) 1247 if (mp->m_maxicount)
1266 statp->f_files = min_t(typeof(statp->f_files), 1248 statp->f_files = min_t(typeof(statp->f_files),
1267 statp->f_files, 1249 statp->f_files,
1268 mp->m_maxicount); 1250 mp->m_maxicount);
1269 statp->f_ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree); 1251 statp->f_ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree);
1270 spin_unlock(&mp->m_sb_lock); 1252 spin_unlock(&mp->m_sb_lock);
1271 1253
1272 if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) || 1254 if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) ||
1273 ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD))) == 1255 ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD))) ==
1274 (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD)) 1256 (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD))
1275 xfs_qm_statvfs(ip, statp); 1257 xfs_qm_statvfs(ip, statp);
1276 return 0; 1258 return 0;
1277 } 1259 }
1278 1260
1279 STATIC int 1261 STATIC int
1280 xfs_fs_remount( 1262 xfs_fs_remount(
1281 struct super_block *sb, 1263 struct super_block *sb,
1282 int *flags, 1264 int *flags,
1283 char *options) 1265 char *options)
1284 { 1266 {
1285 struct xfs_mount *mp = XFS_M(sb); 1267 struct xfs_mount *mp = XFS_M(sb);
1286 substring_t args[MAX_OPT_ARGS]; 1268 substring_t args[MAX_OPT_ARGS];
1287 char *p; 1269 char *p;
1288 int error; 1270 int error;
1289 1271
1290 while ((p = strsep(&options, ",")) != NULL) { 1272 while ((p = strsep(&options, ",")) != NULL) {
1291 int token; 1273 int token;
1292 1274
1293 if (!*p) 1275 if (!*p)
1294 continue; 1276 continue;
1295 1277
1296 token = match_token(p, tokens, args); 1278 token = match_token(p, tokens, args);
1297 switch (token) { 1279 switch (token) {
1298 case Opt_barrier: 1280 case Opt_barrier:
1299 mp->m_flags |= XFS_MOUNT_BARRIER; 1281 mp->m_flags |= XFS_MOUNT_BARRIER;
1300 1282
1301 /* 1283 /*
1302 * Test if barriers are actually working if we can, 1284 * Test if barriers are actually working if we can,
1303 * else delay this check until the filesystem is 1285 * else delay this check until the filesystem is
1304 * marked writeable. 1286 * marked writeable.
1305 */ 1287 */
1306 if (!(mp->m_flags & XFS_MOUNT_RDONLY)) 1288 if (!(mp->m_flags & XFS_MOUNT_RDONLY))
1307 xfs_mountfs_check_barriers(mp); 1289 xfs_mountfs_check_barriers(mp);
1308 break; 1290 break;
1309 case Opt_nobarrier: 1291 case Opt_nobarrier:
1310 mp->m_flags &= ~XFS_MOUNT_BARRIER; 1292 mp->m_flags &= ~XFS_MOUNT_BARRIER;
1311 break; 1293 break;
1312 default: 1294 default:
1313 /* 1295 /*
1314 * Logically we would return an error here to prevent 1296 * Logically we would return an error here to prevent
1315 * users from believing they might have changed 1297 * users from believing they might have changed
1316 * mount options using remount which can't be changed. 1298 * mount options using remount which can't be changed.
1317 * 1299 *
1318 * But unfortunately mount(8) adds all options from 1300 * But unfortunately mount(8) adds all options from
1319 * mtab and fstab to the mount arguments in some cases 1301 * mtab and fstab to the mount arguments in some cases
1320 * so we can't blindly reject options, but have to 1302 * so we can't blindly reject options, but have to
1321 * check for each specified option if it actually 1303 * check for each specified option if it actually
1322 * differs from the currently set option and only 1304 * differs from the currently set option and only
1323 * reject it if that's the case. 1305 * reject it if that's the case.
1324 * 1306 *
1325 * Until that is implemented we return success for 1307 * Until that is implemented we return success for
1326 * every remount request, and silently ignore all 1308 * every remount request, and silently ignore all
1327 * options that we can't actually change. 1309 * options that we can't actually change.
1328 */ 1310 */
1329 #if 0 1311 #if 0
1330 printk(KERN_INFO 1312 printk(KERN_INFO
1331 "XFS: mount option \"%s\" not supported for remount\n", p); 1313 "XFS: mount option \"%s\" not supported for remount\n", p);
1332 return -EINVAL; 1314 return -EINVAL;
1333 #else 1315 #else
1334 break; 1316 break;
1335 #endif 1317 #endif
1336 } 1318 }
1337 } 1319 }
1338 1320
1339 /* ro -> rw */ 1321 /* ro -> rw */
1340 if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) { 1322 if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) {
1341 mp->m_flags &= ~XFS_MOUNT_RDONLY; 1323 mp->m_flags &= ~XFS_MOUNT_RDONLY;
1342 if (mp->m_flags & XFS_MOUNT_BARRIER) 1324 if (mp->m_flags & XFS_MOUNT_BARRIER)
1343 xfs_mountfs_check_barriers(mp); 1325 xfs_mountfs_check_barriers(mp);
1344 1326
1345 /* 1327 /*
1346 * If this is the first remount to writeable state we 1328 * If this is the first remount to writeable state we
1347 * might have some superblock changes to update. 1329 * might have some superblock changes to update.
1348 */ 1330 */
1349 if (mp->m_update_flags) { 1331 if (mp->m_update_flags) {
1350 error = xfs_mount_log_sb(mp, mp->m_update_flags); 1332 error = xfs_mount_log_sb(mp, mp->m_update_flags);
1351 if (error) { 1333 if (error) {
1352 cmn_err(CE_WARN, 1334 cmn_err(CE_WARN,
1353 "XFS: failed to write sb changes"); 1335 "XFS: failed to write sb changes");
1354 return error; 1336 return error;
1355 } 1337 }
1356 mp->m_update_flags = 0; 1338 mp->m_update_flags = 0;
1357 } 1339 }
1358 } 1340 }
1359 1341
1360 /* rw -> ro */ 1342 /* rw -> ro */
1361 if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & MS_RDONLY)) { 1343 if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & MS_RDONLY)) {
1362 xfs_quiesce_data(mp); 1344 xfs_quiesce_data(mp);
1363 xfs_quiesce_attr(mp); 1345 xfs_quiesce_attr(mp);
1364 mp->m_flags |= XFS_MOUNT_RDONLY; 1346 mp->m_flags |= XFS_MOUNT_RDONLY;
1365 } 1347 }
1366 1348
1367 return 0; 1349 return 0;
1368 } 1350 }
1369 1351
1370 /* 1352 /*
1371 * Second stage of a freeze. The data is already frozen so we only 1353 * Second stage of a freeze. The data is already frozen so we only
1372 * need to take care of the metadata. Once that's done write a dummy 1354 * need to take care of the metadata. Once that's done write a dummy
1373 * record to dirty the log in case of a crash while frozen. 1355 * record to dirty the log in case of a crash while frozen.
1374 */ 1356 */
1375 STATIC int 1357 STATIC int
1376 xfs_fs_freeze( 1358 xfs_fs_freeze(
1377 struct super_block *sb) 1359 struct super_block *sb)
1378 { 1360 {
1379 struct xfs_mount *mp = XFS_M(sb); 1361 struct xfs_mount *mp = XFS_M(sb);
1380 1362
1381 xfs_quiesce_attr(mp); 1363 xfs_quiesce_attr(mp);
1382 return -xfs_fs_log_dummy(mp); 1364 return -xfs_fs_log_dummy(mp);
1383 } 1365 }
1384 1366
1385 STATIC int 1367 STATIC int
1386 xfs_fs_show_options( 1368 xfs_fs_show_options(
1387 struct seq_file *m, 1369 struct seq_file *m,
1388 struct vfsmount *mnt) 1370 struct vfsmount *mnt)
1389 { 1371 {
1390 return -xfs_showargs(XFS_M(mnt->mnt_sb), m); 1372 return -xfs_showargs(XFS_M(mnt->mnt_sb), m);
1391 } 1373 }
1392 1374
1393 /* 1375 /*
1394 * This function fills in xfs_mount_t fields based on mount args. 1376 * This function fills in xfs_mount_t fields based on mount args.
1395 * Note: the superblock _has_ now been read in. 1377 * Note: the superblock _has_ now been read in.
1396 */ 1378 */
1397 STATIC int 1379 STATIC int
1398 xfs_finish_flags( 1380 xfs_finish_flags(
1399 struct xfs_mount *mp) 1381 struct xfs_mount *mp)
1400 { 1382 {
1401 int ronly = (mp->m_flags & XFS_MOUNT_RDONLY); 1383 int ronly = (mp->m_flags & XFS_MOUNT_RDONLY);
1402 1384
1403 /* Fail a mount where the logbuf is smaller than the log stripe */ 1385 /* Fail a mount where the logbuf is smaller than the log stripe */
1404 if (xfs_sb_version_haslogv2(&mp->m_sb)) { 1386 if (xfs_sb_version_haslogv2(&mp->m_sb)) {
1405 if (mp->m_logbsize <= 0 && 1387 if (mp->m_logbsize <= 0 &&
1406 mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) { 1388 mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
1407 mp->m_logbsize = mp->m_sb.sb_logsunit; 1389 mp->m_logbsize = mp->m_sb.sb_logsunit;
1408 } else if (mp->m_logbsize > 0 && 1390 } else if (mp->m_logbsize > 0 &&
1409 mp->m_logbsize < mp->m_sb.sb_logsunit) { 1391 mp->m_logbsize < mp->m_sb.sb_logsunit) {
1410 cmn_err(CE_WARN, 1392 cmn_err(CE_WARN,
1411 "XFS: logbuf size must be greater than or equal to log stripe size"); 1393 "XFS: logbuf size must be greater than or equal to log stripe size");
1412 return XFS_ERROR(EINVAL); 1394 return XFS_ERROR(EINVAL);
1413 } 1395 }
1414 } else { 1396 } else {
1415 /* Fail a mount if the logbuf is larger than 32K */ 1397 /* Fail a mount if the logbuf is larger than 32K */
1416 if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) { 1398 if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
1417 cmn_err(CE_WARN, 1399 cmn_err(CE_WARN,
1418 "XFS: logbuf size for version 1 logs must be 16K or 32K"); 1400 "XFS: logbuf size for version 1 logs must be 16K or 32K");
1419 return XFS_ERROR(EINVAL); 1401 return XFS_ERROR(EINVAL);
1420 } 1402 }
1421 } 1403 }
1422 1404
1423 /* 1405 /*
1424 * mkfs'ed attr2 will turn on attr2 mount unless explicitly 1406 * mkfs'ed attr2 will turn on attr2 mount unless explicitly
1425 * told by noattr2 to turn it off 1407 * told by noattr2 to turn it off
1426 */ 1408 */
1427 if (xfs_sb_version_hasattr2(&mp->m_sb) && 1409 if (xfs_sb_version_hasattr2(&mp->m_sb) &&
1428 !(mp->m_flags & XFS_MOUNT_NOATTR2)) 1410 !(mp->m_flags & XFS_MOUNT_NOATTR2))
1429 mp->m_flags |= XFS_MOUNT_ATTR2; 1411 mp->m_flags |= XFS_MOUNT_ATTR2;
1430 1412
1431 /* 1413 /*
1432 * prohibit r/w mounts of read-only filesystems 1414 * prohibit r/w mounts of read-only filesystems
1433 */ 1415 */
1434 if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) { 1416 if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) {
1435 cmn_err(CE_WARN, 1417 cmn_err(CE_WARN,
1436 "XFS: cannot mount a read-only filesystem as read-write"); 1418 "XFS: cannot mount a read-only filesystem as read-write");
1437 return XFS_ERROR(EROFS); 1419 return XFS_ERROR(EROFS);
1438 } 1420 }
1439 1421
1440 return 0; 1422 return 0;
1441 } 1423 }
1442 1424
1443 STATIC int 1425 STATIC int
1444 xfs_fs_fill_super( 1426 xfs_fs_fill_super(
1445 struct super_block *sb, 1427 struct super_block *sb,
1446 void *data, 1428 void *data,
1447 int silent) 1429 int silent)
1448 { 1430 {
1449 struct inode *root; 1431 struct inode *root;
1450 struct xfs_mount *mp = NULL; 1432 struct xfs_mount *mp = NULL;
1451 int flags = 0, error = ENOMEM; 1433 int flags = 0, error = ENOMEM;
1452 char *mtpt = NULL; 1434 char *mtpt = NULL;
1453 1435
1454 mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL); 1436 mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL);
1455 if (!mp) 1437 if (!mp)
1456 goto out; 1438 goto out;
1457 1439
1458 spin_lock_init(&mp->m_sb_lock); 1440 spin_lock_init(&mp->m_sb_lock);
1459 mutex_init(&mp->m_growlock); 1441 mutex_init(&mp->m_growlock);
1460 atomic_set(&mp->m_active_trans, 0); 1442 atomic_set(&mp->m_active_trans, 0);
1461 INIT_LIST_HEAD(&mp->m_sync_list); 1443 INIT_LIST_HEAD(&mp->m_sync_list);
1462 spin_lock_init(&mp->m_sync_lock); 1444 spin_lock_init(&mp->m_sync_lock);
1463 init_waitqueue_head(&mp->m_wait_single_sync_task); 1445 init_waitqueue_head(&mp->m_wait_single_sync_task);
1464 1446
1465 mp->m_super = sb; 1447 mp->m_super = sb;
1466 sb->s_fs_info = mp; 1448 sb->s_fs_info = mp;
1467 1449
1468 error = xfs_parseargs(mp, (char *)data, &mtpt); 1450 error = xfs_parseargs(mp, (char *)data, &mtpt);
1469 if (error) 1451 if (error)
1470 goto out_free_fsname; 1452 goto out_free_fsname;
1471 1453
1472 sb_min_blocksize(sb, BBSIZE); 1454 sb_min_blocksize(sb, BBSIZE);
1473 sb->s_xattr = xfs_xattr_handlers; 1455 sb->s_xattr = xfs_xattr_handlers;
1474 sb->s_export_op = &xfs_export_operations; 1456 sb->s_export_op = &xfs_export_operations;
1475 #ifdef CONFIG_XFS_QUOTA 1457 #ifdef CONFIG_XFS_QUOTA
1476 sb->s_qcop = &xfs_quotactl_operations; 1458 sb->s_qcop = &xfs_quotactl_operations;
1477 #endif 1459 #endif
1478 sb->s_op = &xfs_super_operations; 1460 sb->s_op = &xfs_super_operations;
1479 1461
1480 error = xfs_dmops_get(mp); 1462 error = xfs_dmops_get(mp);
1481 if (error) 1463 if (error)
1482 goto out_free_fsname; 1464 goto out_free_fsname;
1483 1465
1484 if (silent) 1466 if (silent)
1485 flags |= XFS_MFSI_QUIET; 1467 flags |= XFS_MFSI_QUIET;
1486 1468
1487 error = xfs_open_devices(mp); 1469 error = xfs_open_devices(mp);
1488 if (error) 1470 if (error)
1489 goto out_put_dmops; 1471 goto out_put_dmops;
1490 1472
1491 if (xfs_icsb_init_counters(mp)) 1473 if (xfs_icsb_init_counters(mp))
1492 mp->m_flags |= XFS_MOUNT_NO_PERCPU_SB; 1474 mp->m_flags |= XFS_MOUNT_NO_PERCPU_SB;
1493 1475
1494 error = xfs_readsb(mp, flags); 1476 error = xfs_readsb(mp, flags);
1495 if (error) 1477 if (error)
1496 goto out_destroy_counters; 1478 goto out_destroy_counters;
1497 1479
1498 error = xfs_finish_flags(mp); 1480 error = xfs_finish_flags(mp);
1499 if (error) 1481 if (error)
1500 goto out_free_sb; 1482 goto out_free_sb;
1501 1483
1502 error = xfs_setup_devices(mp); 1484 error = xfs_setup_devices(mp);
1503 if (error) 1485 if (error)
1504 goto out_free_sb; 1486 goto out_free_sb;
1505 1487
1506 if (mp->m_flags & XFS_MOUNT_BARRIER) 1488 if (mp->m_flags & XFS_MOUNT_BARRIER)
1507 xfs_mountfs_check_barriers(mp); 1489 xfs_mountfs_check_barriers(mp);
1508 1490
1509 error = xfs_filestream_mount(mp); 1491 error = xfs_filestream_mount(mp);
1510 if (error) 1492 if (error)
1511 goto out_free_sb; 1493 goto out_free_sb;
1512 1494
1513 error = xfs_mountfs(mp); 1495 error = xfs_mountfs(mp);
1514 if (error) 1496 if (error)
1515 goto out_filestream_unmount; 1497 goto out_filestream_unmount;
1516 1498
1517 XFS_SEND_MOUNT(mp, DM_RIGHT_NULL, mtpt, mp->m_fsname); 1499 XFS_SEND_MOUNT(mp, DM_RIGHT_NULL, mtpt, mp->m_fsname);
1518 1500
1519 sb->s_magic = XFS_SB_MAGIC; 1501 sb->s_magic = XFS_SB_MAGIC;
1520 sb->s_blocksize = mp->m_sb.sb_blocksize; 1502 sb->s_blocksize = mp->m_sb.sb_blocksize;
1521 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1; 1503 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
1522 sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits); 1504 sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits);
1523 sb->s_time_gran = 1; 1505 sb->s_time_gran = 1;
1524 set_posix_acl_flag(sb); 1506 set_posix_acl_flag(sb);
1525 1507
1526 root = igrab(VFS_I(mp->m_rootip)); 1508 root = igrab(VFS_I(mp->m_rootip));
1527 if (!root) { 1509 if (!root) {
1528 error = ENOENT; 1510 error = ENOENT;
1529 goto fail_unmount; 1511 goto fail_unmount;
1530 } 1512 }
1531 if (is_bad_inode(root)) { 1513 if (is_bad_inode(root)) {
1532 error = EINVAL; 1514 error = EINVAL;
1533 goto fail_vnrele; 1515 goto fail_vnrele;
1534 } 1516 }
1535 sb->s_root = d_alloc_root(root); 1517 sb->s_root = d_alloc_root(root);
1536 if (!sb->s_root) { 1518 if (!sb->s_root) {
1537 error = ENOMEM; 1519 error = ENOMEM;
1538 goto fail_vnrele; 1520 goto fail_vnrele;
1539 } 1521 }
1540 1522
1541 error = xfs_syncd_init(mp); 1523 error = xfs_syncd_init(mp);
1542 if (error) 1524 if (error)
1543 goto fail_vnrele; 1525 goto fail_vnrele;
1544 1526
1545 kfree(mtpt); 1527 kfree(mtpt);
1546 1528
1547 xfs_itrace_exit(XFS_I(sb->s_root->d_inode)); 1529 xfs_itrace_exit(XFS_I(sb->s_root->d_inode));
1548 return 0; 1530 return 0;
1549 1531
1550 out_filestream_unmount: 1532 out_filestream_unmount:
1551 xfs_filestream_unmount(mp); 1533 xfs_filestream_unmount(mp);
1552 out_free_sb: 1534 out_free_sb:
1553 xfs_freesb(mp); 1535 xfs_freesb(mp);
1554 out_destroy_counters: 1536 out_destroy_counters:
1555 xfs_icsb_destroy_counters(mp); 1537 xfs_icsb_destroy_counters(mp);
1556 xfs_close_devices(mp); 1538 xfs_close_devices(mp);
1557 out_put_dmops: 1539 out_put_dmops:
1558 xfs_dmops_put(mp); 1540 xfs_dmops_put(mp);
1559 out_free_fsname: 1541 out_free_fsname:
1560 xfs_free_fsname(mp); 1542 xfs_free_fsname(mp);
1561 kfree(mtpt); 1543 kfree(mtpt);
1562 kfree(mp); 1544 kfree(mp);
1563 out: 1545 out:
1564 return -error; 1546 return -error;
1565 1547
1566 fail_vnrele: 1548 fail_vnrele:
1567 if (sb->s_root) { 1549 if (sb->s_root) {
1568 dput(sb->s_root); 1550 dput(sb->s_root);
1569 sb->s_root = NULL; 1551 sb->s_root = NULL;
1570 } else { 1552 } else {
1571 iput(root); 1553 iput(root);
1572 } 1554 }
1573 1555
1574 fail_unmount: 1556 fail_unmount:
1575 /* 1557 /*
1576 * Blow away any referenced inode in the filestreams cache. 1558 * Blow away any referenced inode in the filestreams cache.
1577 * This can and will cause log traffic as inodes go inactive 1559 * This can and will cause log traffic as inodes go inactive
1578 * here. 1560 * here.
1579 */ 1561 */
1580 xfs_filestream_unmount(mp); 1562 xfs_filestream_unmount(mp);
1581 1563
1582 XFS_bflush(mp->m_ddev_targp); 1564 XFS_bflush(mp->m_ddev_targp);
1583 1565
1584 xfs_unmountfs(mp); 1566 xfs_unmountfs(mp);
1585 goto out_free_sb; 1567 goto out_free_sb;
1586 } 1568 }
1587 1569
1588 STATIC int 1570 STATIC int
1589 xfs_fs_get_sb( 1571 xfs_fs_get_sb(
1590 struct file_system_type *fs_type, 1572 struct file_system_type *fs_type,
1591 int flags, 1573 int flags,
1592 const char *dev_name, 1574 const char *dev_name,
1593 void *data, 1575 void *data,
1594 struct vfsmount *mnt) 1576 struct vfsmount *mnt)
1595 { 1577 {
1596 return get_sb_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super, 1578 return get_sb_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super,
1597 mnt); 1579 mnt);
1598 } 1580 }
1599 1581
1600 static const struct super_operations xfs_super_operations = { 1582 static const struct super_operations xfs_super_operations = {
1601 .alloc_inode = xfs_fs_alloc_inode, 1583 .alloc_inode = xfs_fs_alloc_inode,
1602 .destroy_inode = xfs_fs_destroy_inode, 1584 .destroy_inode = xfs_fs_destroy_inode,
1603 .dirty_inode = xfs_fs_dirty_inode, 1585 .dirty_inode = xfs_fs_dirty_inode,
1604 .write_inode = xfs_fs_write_inode, 1586 .write_inode = xfs_fs_write_inode,
1605 .clear_inode = xfs_fs_clear_inode, 1587 .clear_inode = xfs_fs_clear_inode,
1606 .put_super = xfs_fs_put_super, 1588 .put_super = xfs_fs_put_super,
1607 .sync_fs = xfs_fs_sync_fs, 1589 .sync_fs = xfs_fs_sync_fs,
1608 .freeze_fs = xfs_fs_freeze, 1590 .freeze_fs = xfs_fs_freeze,
1609 .statfs = xfs_fs_statfs, 1591 .statfs = xfs_fs_statfs,
1610 .remount_fs = xfs_fs_remount, 1592 .remount_fs = xfs_fs_remount,
1611 .show_options = xfs_fs_show_options, 1593 .show_options = xfs_fs_show_options,
1612 }; 1594 };
1613 1595
1614 static struct file_system_type xfs_fs_type = { 1596 static struct file_system_type xfs_fs_type = {
1615 .owner = THIS_MODULE, 1597 .owner = THIS_MODULE,
1616 .name = "xfs", 1598 .name = "xfs",
1617 .get_sb = xfs_fs_get_sb, 1599 .get_sb = xfs_fs_get_sb,
1618 .kill_sb = kill_block_super, 1600 .kill_sb = kill_block_super,
1619 .fs_flags = FS_REQUIRES_DEV, 1601 .fs_flags = FS_REQUIRES_DEV,
1620 }; 1602 };
1621 1603
1622 STATIC int __init 1604 STATIC int __init
1623 xfs_alloc_trace_bufs(void) 1605 xfs_alloc_trace_bufs(void)
1624 { 1606 {
1625 #ifdef XFS_ALLOC_TRACE 1607 #ifdef XFS_ALLOC_TRACE
1626 xfs_alloc_trace_buf = ktrace_alloc(XFS_ALLOC_TRACE_SIZE, KM_MAYFAIL); 1608 xfs_alloc_trace_buf = ktrace_alloc(XFS_ALLOC_TRACE_SIZE, KM_MAYFAIL);
1627 if (!xfs_alloc_trace_buf) 1609 if (!xfs_alloc_trace_buf)
1628 goto out; 1610 goto out;
1629 #endif 1611 #endif
1630 #ifdef XFS_BMAP_TRACE 1612 #ifdef XFS_BMAP_TRACE
1631 xfs_bmap_trace_buf = ktrace_alloc(XFS_BMAP_TRACE_SIZE, KM_MAYFAIL); 1613 xfs_bmap_trace_buf = ktrace_alloc(XFS_BMAP_TRACE_SIZE, KM_MAYFAIL);
1632 if (!xfs_bmap_trace_buf) 1614 if (!xfs_bmap_trace_buf)
1633 goto out_free_alloc_trace; 1615 goto out_free_alloc_trace;
1634 #endif 1616 #endif
1635 #ifdef XFS_BTREE_TRACE 1617 #ifdef XFS_BTREE_TRACE
1636 xfs_allocbt_trace_buf = ktrace_alloc(XFS_ALLOCBT_TRACE_SIZE, 1618 xfs_allocbt_trace_buf = ktrace_alloc(XFS_ALLOCBT_TRACE_SIZE,
1637 KM_MAYFAIL); 1619 KM_MAYFAIL);
1638 if (!xfs_allocbt_trace_buf) 1620 if (!xfs_allocbt_trace_buf)
1639 goto out_free_bmap_trace; 1621 goto out_free_bmap_trace;
1640 1622
1641 xfs_inobt_trace_buf = ktrace_alloc(XFS_INOBT_TRACE_SIZE, KM_MAYFAIL); 1623 xfs_inobt_trace_buf = ktrace_alloc(XFS_INOBT_TRACE_SIZE, KM_MAYFAIL);
1642 if (!xfs_inobt_trace_buf) 1624 if (!xfs_inobt_trace_buf)
1643 goto out_free_allocbt_trace; 1625 goto out_free_allocbt_trace;
1644 1626
1645 xfs_bmbt_trace_buf = ktrace_alloc(XFS_BMBT_TRACE_SIZE, KM_MAYFAIL); 1627 xfs_bmbt_trace_buf = ktrace_alloc(XFS_BMBT_TRACE_SIZE, KM_MAYFAIL);
1646 if (!xfs_bmbt_trace_buf) 1628 if (!xfs_bmbt_trace_buf)
1647 goto out_free_inobt_trace; 1629 goto out_free_inobt_trace;
1648 #endif 1630 #endif
1649 #ifdef XFS_ATTR_TRACE 1631 #ifdef XFS_ATTR_TRACE
1650 xfs_attr_trace_buf = ktrace_alloc(XFS_ATTR_TRACE_SIZE, KM_MAYFAIL); 1632 xfs_attr_trace_buf = ktrace_alloc(XFS_ATTR_TRACE_SIZE, KM_MAYFAIL);
1651 if (!xfs_attr_trace_buf) 1633 if (!xfs_attr_trace_buf)
1652 goto out_free_bmbt_trace; 1634 goto out_free_bmbt_trace;
1653 #endif 1635 #endif
1654 #ifdef XFS_DIR2_TRACE 1636 #ifdef XFS_DIR2_TRACE
1655 xfs_dir2_trace_buf = ktrace_alloc(XFS_DIR2_GTRACE_SIZE, KM_MAYFAIL); 1637 xfs_dir2_trace_buf = ktrace_alloc(XFS_DIR2_GTRACE_SIZE, KM_MAYFAIL);
1656 if (!xfs_dir2_trace_buf) 1638 if (!xfs_dir2_trace_buf)
1657 goto out_free_attr_trace; 1639 goto out_free_attr_trace;
1658 #endif 1640 #endif
1659 1641
1660 return 0; 1642 return 0;
1661 1643
1662 #ifdef XFS_DIR2_TRACE 1644 #ifdef XFS_DIR2_TRACE
1663 out_free_attr_trace: 1645 out_free_attr_trace:
1664 #endif 1646 #endif
1665 #ifdef XFS_ATTR_TRACE 1647 #ifdef XFS_ATTR_TRACE
1666 ktrace_free(xfs_attr_trace_buf); 1648 ktrace_free(xfs_attr_trace_buf);
1667 out_free_bmbt_trace: 1649 out_free_bmbt_trace:
1668 #endif 1650 #endif
1669 #ifdef XFS_BTREE_TRACE 1651 #ifdef XFS_BTREE_TRACE
1670 ktrace_free(xfs_bmbt_trace_buf); 1652 ktrace_free(xfs_bmbt_trace_buf);
1671 out_free_inobt_trace: 1653 out_free_inobt_trace:
1672 ktrace_free(xfs_inobt_trace_buf); 1654 ktrace_free(xfs_inobt_trace_buf);
1673 out_free_allocbt_trace: 1655 out_free_allocbt_trace:
1674 ktrace_free(xfs_allocbt_trace_buf); 1656 ktrace_free(xfs_allocbt_trace_buf);
1675 out_free_bmap_trace: 1657 out_free_bmap_trace:
1676 #endif 1658 #endif
1677 #ifdef XFS_BMAP_TRACE 1659 #ifdef XFS_BMAP_TRACE
1678 ktrace_free(xfs_bmap_trace_buf); 1660 ktrace_free(xfs_bmap_trace_buf);
1679 out_free_alloc_trace: 1661 out_free_alloc_trace:
1680 #endif 1662 #endif
1681 #ifdef XFS_ALLOC_TRACE 1663 #ifdef XFS_ALLOC_TRACE
1682 ktrace_free(xfs_alloc_trace_buf); 1664 ktrace_free(xfs_alloc_trace_buf);
1683 out: 1665 out:
1684 #endif 1666 #endif
1685 return -ENOMEM; 1667 return -ENOMEM;
1686 } 1668 }
1687 1669
1688 STATIC void 1670 STATIC void
1689 xfs_free_trace_bufs(void) 1671 xfs_free_trace_bufs(void)
1690 { 1672 {
1691 #ifdef XFS_DIR2_TRACE 1673 #ifdef XFS_DIR2_TRACE
1692 ktrace_free(xfs_dir2_trace_buf); 1674 ktrace_free(xfs_dir2_trace_buf);
1693 #endif 1675 #endif
1694 #ifdef XFS_ATTR_TRACE 1676 #ifdef XFS_ATTR_TRACE
1695 ktrace_free(xfs_attr_trace_buf); 1677 ktrace_free(xfs_attr_trace_buf);
1696 #endif 1678 #endif
1697 #ifdef XFS_BTREE_TRACE 1679 #ifdef XFS_BTREE_TRACE
1698 ktrace_free(xfs_bmbt_trace_buf); 1680 ktrace_free(xfs_bmbt_trace_buf);
1699 ktrace_free(xfs_inobt_trace_buf); 1681 ktrace_free(xfs_inobt_trace_buf);
1700 ktrace_free(xfs_allocbt_trace_buf); 1682 ktrace_free(xfs_allocbt_trace_buf);
1701 #endif 1683 #endif
1702 #ifdef XFS_BMAP_TRACE 1684 #ifdef XFS_BMAP_TRACE
1703 ktrace_free(xfs_bmap_trace_buf); 1685 ktrace_free(xfs_bmap_trace_buf);
1704 #endif 1686 #endif
1705 #ifdef XFS_ALLOC_TRACE 1687 #ifdef XFS_ALLOC_TRACE
1706 ktrace_free(xfs_alloc_trace_buf); 1688 ktrace_free(xfs_alloc_trace_buf);
1707 #endif 1689 #endif
1708 } 1690 }
1709 1691
1710 STATIC int __init 1692 STATIC int __init
1711 xfs_init_zones(void) 1693 xfs_init_zones(void)
1712 { 1694 {
1713 1695
1714 xfs_ioend_zone = kmem_zone_init(sizeof(xfs_ioend_t), "xfs_ioend"); 1696 xfs_ioend_zone = kmem_zone_init(sizeof(xfs_ioend_t), "xfs_ioend");
1715 if (!xfs_ioend_zone) 1697 if (!xfs_ioend_zone)
1716 goto out; 1698 goto out;
1717 1699
1718 xfs_ioend_pool = mempool_create_slab_pool(4 * MAX_BUF_PER_PAGE, 1700 xfs_ioend_pool = mempool_create_slab_pool(4 * MAX_BUF_PER_PAGE,
1719 xfs_ioend_zone); 1701 xfs_ioend_zone);
1720 if (!xfs_ioend_pool) 1702 if (!xfs_ioend_pool)
1721 goto out_destroy_ioend_zone; 1703 goto out_destroy_ioend_zone;
1722 1704
1723 xfs_log_ticket_zone = kmem_zone_init(sizeof(xlog_ticket_t), 1705 xfs_log_ticket_zone = kmem_zone_init(sizeof(xlog_ticket_t),
1724 "xfs_log_ticket"); 1706 "xfs_log_ticket");
1725 if (!xfs_log_ticket_zone) 1707 if (!xfs_log_ticket_zone)
1726 goto out_destroy_ioend_pool; 1708 goto out_destroy_ioend_pool;
1727 1709
1728 xfs_bmap_free_item_zone = kmem_zone_init(sizeof(xfs_bmap_free_item_t), 1710 xfs_bmap_free_item_zone = kmem_zone_init(sizeof(xfs_bmap_free_item_t),
1729 "xfs_bmap_free_item"); 1711 "xfs_bmap_free_item");
1730 if (!xfs_bmap_free_item_zone) 1712 if (!xfs_bmap_free_item_zone)
1731 goto out_destroy_log_ticket_zone; 1713 goto out_destroy_log_ticket_zone;
1732 1714
1733 xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t), 1715 xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t),
1734 "xfs_btree_cur"); 1716 "xfs_btree_cur");
1735 if (!xfs_btree_cur_zone) 1717 if (!xfs_btree_cur_zone)
1736 goto out_destroy_bmap_free_item_zone; 1718 goto out_destroy_bmap_free_item_zone;
1737 1719
1738 xfs_da_state_zone = kmem_zone_init(sizeof(xfs_da_state_t), 1720 xfs_da_state_zone = kmem_zone_init(sizeof(xfs_da_state_t),
1739 "xfs_da_state"); 1721 "xfs_da_state");
1740 if (!xfs_da_state_zone) 1722 if (!xfs_da_state_zone)
1741 goto out_destroy_btree_cur_zone; 1723 goto out_destroy_btree_cur_zone;
1742 1724
1743 xfs_dabuf_zone = kmem_zone_init(sizeof(xfs_dabuf_t), "xfs_dabuf"); 1725 xfs_dabuf_zone = kmem_zone_init(sizeof(xfs_dabuf_t), "xfs_dabuf");
1744 if (!xfs_dabuf_zone) 1726 if (!xfs_dabuf_zone)
1745 goto out_destroy_da_state_zone; 1727 goto out_destroy_da_state_zone;
1746 1728
1747 xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork"); 1729 xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork");
1748 if (!xfs_ifork_zone) 1730 if (!xfs_ifork_zone)
1749 goto out_destroy_dabuf_zone; 1731 goto out_destroy_dabuf_zone;
1750 1732
1751 xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans"); 1733 xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans");
1752 if (!xfs_trans_zone) 1734 if (!xfs_trans_zone)
1753 goto out_destroy_ifork_zone; 1735 goto out_destroy_ifork_zone;
1754 1736
1755 /* 1737 /*
1756 * The size of the zone allocated buf log item is the maximum 1738 * The size of the zone allocated buf log item is the maximum
1757 * size possible under XFS. This wastes a little bit of memory, 1739 * size possible under XFS. This wastes a little bit of memory,
1758 * but it is much faster. 1740 * but it is much faster.
1759 */ 1741 */
1760 xfs_buf_item_zone = kmem_zone_init((sizeof(xfs_buf_log_item_t) + 1742 xfs_buf_item_zone = kmem_zone_init((sizeof(xfs_buf_log_item_t) +
1761 (((XFS_MAX_BLOCKSIZE / XFS_BLI_CHUNK) / 1743 (((XFS_MAX_BLOCKSIZE / XFS_BLI_CHUNK) /
1762 NBWORD) * sizeof(int))), "xfs_buf_item"); 1744 NBWORD) * sizeof(int))), "xfs_buf_item");
1763 if (!xfs_buf_item_zone) 1745 if (!xfs_buf_item_zone)
1764 goto out_destroy_trans_zone; 1746 goto out_destroy_trans_zone;
1765 1747
1766 xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) + 1748 xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) +
1767 ((XFS_EFD_MAX_FAST_EXTENTS - 1) * 1749 ((XFS_EFD_MAX_FAST_EXTENTS - 1) *
1768 sizeof(xfs_extent_t))), "xfs_efd_item"); 1750 sizeof(xfs_extent_t))), "xfs_efd_item");
1769 if (!xfs_efd_zone) 1751 if (!xfs_efd_zone)
1770 goto out_destroy_buf_item_zone; 1752 goto out_destroy_buf_item_zone;
1771 1753
1772 xfs_efi_zone = kmem_zone_init((sizeof(xfs_efi_log_item_t) + 1754 xfs_efi_zone = kmem_zone_init((sizeof(xfs_efi_log_item_t) +
1773 ((XFS_EFI_MAX_FAST_EXTENTS - 1) * 1755 ((XFS_EFI_MAX_FAST_EXTENTS - 1) *
1774 sizeof(xfs_extent_t))), "xfs_efi_item"); 1756 sizeof(xfs_extent_t))), "xfs_efi_item");
1775 if (!xfs_efi_zone) 1757 if (!xfs_efi_zone)
1776 goto out_destroy_efd_zone; 1758 goto out_destroy_efd_zone;
1777 1759
1778 xfs_inode_zone = 1760 xfs_inode_zone =
1779 kmem_zone_init_flags(sizeof(xfs_inode_t), "xfs_inode", 1761 kmem_zone_init_flags(sizeof(xfs_inode_t), "xfs_inode",
1780 KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | KM_ZONE_SPREAD, 1762 KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | KM_ZONE_SPREAD,
1781 xfs_fs_inode_init_once); 1763 xfs_fs_inode_init_once);
1782 if (!xfs_inode_zone) 1764 if (!xfs_inode_zone)
1783 goto out_destroy_efi_zone; 1765 goto out_destroy_efi_zone;
1784 1766
1785 xfs_ili_zone = 1767 xfs_ili_zone =
1786 kmem_zone_init_flags(sizeof(xfs_inode_log_item_t), "xfs_ili", 1768 kmem_zone_init_flags(sizeof(xfs_inode_log_item_t), "xfs_ili",
1787 KM_ZONE_SPREAD, NULL); 1769 KM_ZONE_SPREAD, NULL);
1788 if (!xfs_ili_zone) 1770 if (!xfs_ili_zone)
1789 goto out_destroy_inode_zone; 1771 goto out_destroy_inode_zone;
1790 1772
1791 return 0; 1773 return 0;
1792 1774
1793 out_destroy_inode_zone: 1775 out_destroy_inode_zone:
1794 kmem_zone_destroy(xfs_inode_zone); 1776 kmem_zone_destroy(xfs_inode_zone);
1795 out_destroy_efi_zone: 1777 out_destroy_efi_zone:
1796 kmem_zone_destroy(xfs_efi_zone); 1778 kmem_zone_destroy(xfs_efi_zone);
1797 out_destroy_efd_zone: 1779 out_destroy_efd_zone:
1798 kmem_zone_destroy(xfs_efd_zone); 1780 kmem_zone_destroy(xfs_efd_zone);
1799 out_destroy_buf_item_zone: 1781 out_destroy_buf_item_zone:
1800 kmem_zone_destroy(xfs_buf_item_zone); 1782 kmem_zone_destroy(xfs_buf_item_zone);
1801 out_destroy_trans_zone: 1783 out_destroy_trans_zone:
1802 kmem_zone_destroy(xfs_trans_zone); 1784 kmem_zone_destroy(xfs_trans_zone);
1803 out_destroy_ifork_zone: 1785 out_destroy_ifork_zone:
1804 kmem_zone_destroy(xfs_ifork_zone); 1786 kmem_zone_destroy(xfs_ifork_zone);
1805 out_destroy_dabuf_zone: 1787 out_destroy_dabuf_zone:
1806 kmem_zone_destroy(xfs_dabuf_zone); 1788 kmem_zone_destroy(xfs_dabuf_zone);
1807 out_destroy_da_state_zone: 1789 out_destroy_da_state_zone:
1808 kmem_zone_destroy(xfs_da_state_zone); 1790 kmem_zone_destroy(xfs_da_state_zone);
1809 out_destroy_btree_cur_zone: 1791 out_destroy_btree_cur_zone:
1810 kmem_zone_destroy(xfs_btree_cur_zone); 1792 kmem_zone_destroy(xfs_btree_cur_zone);
1811 out_destroy_bmap_free_item_zone: 1793 out_destroy_bmap_free_item_zone:
1812 kmem_zone_destroy(xfs_bmap_free_item_zone); 1794 kmem_zone_destroy(xfs_bmap_free_item_zone);
1813 out_destroy_log_ticket_zone: 1795 out_destroy_log_ticket_zone:
1814 kmem_zone_destroy(xfs_log_ticket_zone); 1796 kmem_zone_destroy(xfs_log_ticket_zone);
1815 out_destroy_ioend_pool: 1797 out_destroy_ioend_pool:
1816 mempool_destroy(xfs_ioend_pool); 1798 mempool_destroy(xfs_ioend_pool);
1817 out_destroy_ioend_zone: 1799 out_destroy_ioend_zone:
1818 kmem_zone_destroy(xfs_ioend_zone); 1800 kmem_zone_destroy(xfs_ioend_zone);
1819 out: 1801 out:
1820 return -ENOMEM; 1802 return -ENOMEM;
1821 } 1803 }
1822 1804
1823 STATIC void 1805 STATIC void
1824 xfs_destroy_zones(void) 1806 xfs_destroy_zones(void)
1825 { 1807 {
1826 kmem_zone_destroy(xfs_ili_zone); 1808 kmem_zone_destroy(xfs_ili_zone);
1827 kmem_zone_destroy(xfs_inode_zone); 1809 kmem_zone_destroy(xfs_inode_zone);
1828 kmem_zone_destroy(xfs_efi_zone); 1810 kmem_zone_destroy(xfs_efi_zone);
1829 kmem_zone_destroy(xfs_efd_zone); 1811 kmem_zone_destroy(xfs_efd_zone);
1830 kmem_zone_destroy(xfs_buf_item_zone); 1812 kmem_zone_destroy(xfs_buf_item_zone);
1831 kmem_zone_destroy(xfs_trans_zone); 1813 kmem_zone_destroy(xfs_trans_zone);
1832 kmem_zone_destroy(xfs_ifork_zone); 1814 kmem_zone_destroy(xfs_ifork_zone);
1833 kmem_zone_destroy(xfs_dabuf_zone); 1815 kmem_zone_destroy(xfs_dabuf_zone);
1834 kmem_zone_destroy(xfs_da_state_zone); 1816 kmem_zone_destroy(xfs_da_state_zone);
1835 kmem_zone_destroy(xfs_btree_cur_zone); 1817 kmem_zone_destroy(xfs_btree_cur_zone);
1836 kmem_zone_destroy(xfs_bmap_free_item_zone); 1818 kmem_zone_destroy(xfs_bmap_free_item_zone);
1837 kmem_zone_destroy(xfs_log_ticket_zone); 1819 kmem_zone_destroy(xfs_log_ticket_zone);
1838 mempool_destroy(xfs_ioend_pool); 1820 mempool_destroy(xfs_ioend_pool);
1839 kmem_zone_destroy(xfs_ioend_zone); 1821 kmem_zone_destroy(xfs_ioend_zone);
1840 1822
1841 } 1823 }
1842 1824
1843 STATIC int __init 1825 STATIC int __init
1844 init_xfs_fs(void) 1826 init_xfs_fs(void)
1845 { 1827 {
1846 int error; 1828 int error;
1847 1829
1848 printk(KERN_INFO XFS_VERSION_STRING " with " 1830 printk(KERN_INFO XFS_VERSION_STRING " with "
1849 XFS_BUILD_OPTIONS " enabled\n"); 1831 XFS_BUILD_OPTIONS " enabled\n");
1850 1832
1851 ktrace_init(64); 1833 ktrace_init(64);
1852 xfs_ioend_init(); 1834 xfs_ioend_init();
1853 xfs_dir_startup(); 1835 xfs_dir_startup();
1854 1836
1855 error = xfs_init_zones(); 1837 error = xfs_init_zones();
1856 if (error) 1838 if (error)
1857 goto out; 1839 goto out;
1858 1840
1859 error = xfs_alloc_trace_bufs(); 1841 error = xfs_alloc_trace_bufs();
1860 if (error) 1842 if (error)
1861 goto out_destroy_zones; 1843 goto out_destroy_zones;
1862 1844
1863 error = xfs_mru_cache_init(); 1845 error = xfs_mru_cache_init();
1864 if (error) 1846 if (error)
1865 goto out_free_trace_buffers; 1847 goto out_free_trace_buffers;
1866 1848
1867 error = xfs_filestream_init(); 1849 error = xfs_filestream_init();
1868 if (error) 1850 if (error)
1869 goto out_mru_cache_uninit; 1851 goto out_mru_cache_uninit;
1870 1852
1871 error = xfs_buf_init(); 1853 error = xfs_buf_init();
1872 if (error) 1854 if (error)
1873 goto out_filestream_uninit; 1855 goto out_filestream_uninit;
1874 1856
1875 error = xfs_init_procfs(); 1857 error = xfs_init_procfs();
1876 if (error) 1858 if (error)
1877 goto out_buf_terminate; 1859 goto out_buf_terminate;
1878 1860
1879 error = xfs_sysctl_register(); 1861 error = xfs_sysctl_register();
1880 if (error) 1862 if (error)
1881 goto out_cleanup_procfs; 1863 goto out_cleanup_procfs;
1882 1864
1883 vfs_initquota(); 1865 vfs_initquota();
1884 1866
1885 error = register_filesystem(&xfs_fs_type); 1867 error = register_filesystem(&xfs_fs_type);
1886 if (error) 1868 if (error)
1887 goto out_sysctl_unregister; 1869 goto out_sysctl_unregister;
1888 return 0; 1870 return 0;
1889 1871
1890 out_sysctl_unregister: 1872 out_sysctl_unregister:
1891 xfs_sysctl_unregister(); 1873 xfs_sysctl_unregister();
1892 out_cleanup_procfs: 1874 out_cleanup_procfs:
1893 xfs_cleanup_procfs(); 1875 xfs_cleanup_procfs();
1894 out_buf_terminate: 1876 out_buf_terminate:
1895 xfs_buf_terminate(); 1877 xfs_buf_terminate();
1896 out_filestream_uninit: 1878 out_filestream_uninit:
1897 xfs_filestream_uninit(); 1879 xfs_filestream_uninit();
1898 out_mru_cache_uninit: 1880 out_mru_cache_uninit:
1899 xfs_mru_cache_uninit(); 1881 xfs_mru_cache_uninit();
1900 out_free_trace_buffers: 1882 out_free_trace_buffers:
1901 xfs_free_trace_bufs(); 1883 xfs_free_trace_bufs();
1902 out_destroy_zones: 1884 out_destroy_zones:
1903 xfs_destroy_zones(); 1885 xfs_destroy_zones();
1904 out: 1886 out:
1905 return error; 1887 return error;
1906 } 1888 }
1907 1889
1908 STATIC void __exit 1890 STATIC void __exit
1909 exit_xfs_fs(void) 1891 exit_xfs_fs(void)
1910 { 1892 {
1911 vfs_exitquota(); 1893 vfs_exitquota();
1912 unregister_filesystem(&xfs_fs_type); 1894 unregister_filesystem(&xfs_fs_type);
1913 xfs_sysctl_unregister(); 1895 xfs_sysctl_unregister();
1914 xfs_cleanup_procfs(); 1896 xfs_cleanup_procfs();
1915 xfs_buf_terminate(); 1897 xfs_buf_terminate();
1916 xfs_filestream_uninit(); 1898 xfs_filestream_uninit();
1917 xfs_mru_cache_uninit(); 1899 xfs_mru_cache_uninit();
1918 xfs_free_trace_bufs(); 1900 xfs_free_trace_bufs();
1919 xfs_destroy_zones(); 1901 xfs_destroy_zones();
1920 ktrace_uninit(); 1902 ktrace_uninit();
1921 } 1903 }
1922 1904
1923 module_init(init_xfs_fs); 1905 module_init(init_xfs_fs);
1924 module_exit(exit_xfs_fs); 1906 module_exit(exit_xfs_fs);
1925 1907
1926 MODULE_AUTHOR("Silicon Graphics, Inc."); 1908 MODULE_AUTHOR("Silicon Graphics, Inc.");
1927 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled"); 1909 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
1928 MODULE_LICENSE("GPL"); 1910 MODULE_LICENSE("GPL");
1929 1911
1 /* 1 /*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved. 3 * All Rights Reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as 6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
8 * 8 *
9 * This program is distributed in the hope that it would be useful, 9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details. 12 * GNU General Public License for more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License 14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation, 15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */ 17 */
18 #ifndef __XFS_MOUNT_H__ 18 #ifndef __XFS_MOUNT_H__
19 #define __XFS_MOUNT_H__ 19 #define __XFS_MOUNT_H__
20 20
21 typedef struct xfs_trans_reservations { 21 typedef struct xfs_trans_reservations {
22 uint tr_write; /* extent alloc trans */ 22 uint tr_write; /* extent alloc trans */
23 uint tr_itruncate; /* truncate trans */ 23 uint tr_itruncate; /* truncate trans */
24 uint tr_rename; /* rename trans */ 24 uint tr_rename; /* rename trans */
25 uint tr_link; /* link trans */ 25 uint tr_link; /* link trans */
26 uint tr_remove; /* unlink trans */ 26 uint tr_remove; /* unlink trans */
27 uint tr_symlink; /* symlink trans */ 27 uint tr_symlink; /* symlink trans */
28 uint tr_create; /* create trans */ 28 uint tr_create; /* create trans */
29 uint tr_mkdir; /* mkdir trans */ 29 uint tr_mkdir; /* mkdir trans */
30 uint tr_ifree; /* inode free trans */ 30 uint tr_ifree; /* inode free trans */
31 uint tr_ichange; /* inode update trans */ 31 uint tr_ichange; /* inode update trans */
32 uint tr_growdata; /* fs data section grow trans */ 32 uint tr_growdata; /* fs data section grow trans */
33 uint tr_swrite; /* sync write inode trans */ 33 uint tr_swrite; /* sync write inode trans */
34 uint tr_addafork; /* cvt inode to attributed trans */ 34 uint tr_addafork; /* cvt inode to attributed trans */
35 uint tr_writeid; /* write setuid/setgid file */ 35 uint tr_writeid; /* write setuid/setgid file */
36 uint tr_attrinval; /* attr fork buffer invalidation */ 36 uint tr_attrinval; /* attr fork buffer invalidation */
37 uint tr_attrset; /* set/create an attribute */ 37 uint tr_attrset; /* set/create an attribute */
38 uint tr_attrrm; /* remove an attribute */ 38 uint tr_attrrm; /* remove an attribute */
39 uint tr_clearagi; /* clear bad agi unlinked ino bucket */ 39 uint tr_clearagi; /* clear bad agi unlinked ino bucket */
40 uint tr_growrtalloc; /* grow realtime allocations */ 40 uint tr_growrtalloc; /* grow realtime allocations */
41 uint tr_growrtzero; /* grow realtime zeroing */ 41 uint tr_growrtzero; /* grow realtime zeroing */
42 uint tr_growrtfree; /* grow realtime freeing */ 42 uint tr_growrtfree; /* grow realtime freeing */
43 } xfs_trans_reservations_t; 43 } xfs_trans_reservations_t;
44 44
45 #ifndef __KERNEL__ 45 #ifndef __KERNEL__
46 46
47 #define xfs_daddr_to_agno(mp,d) \ 47 #define xfs_daddr_to_agno(mp,d) \
48 ((xfs_agnumber_t)(XFS_BB_TO_FSBT(mp, d) / (mp)->m_sb.sb_agblocks)) 48 ((xfs_agnumber_t)(XFS_BB_TO_FSBT(mp, d) / (mp)->m_sb.sb_agblocks))
49 #define xfs_daddr_to_agbno(mp,d) \ 49 #define xfs_daddr_to_agbno(mp,d) \
50 ((xfs_agblock_t)(XFS_BB_TO_FSBT(mp, d) % (mp)->m_sb.sb_agblocks)) 50 ((xfs_agblock_t)(XFS_BB_TO_FSBT(mp, d) % (mp)->m_sb.sb_agblocks))
51 51
52 #else /* __KERNEL__ */ 52 #else /* __KERNEL__ */
53 53
54 #include "xfs_sync.h" 54 #include "xfs_sync.h"
55 55
56 struct cred; 56 struct cred;
57 struct log; 57 struct log;
58 struct xfs_mount_args; 58 struct xfs_mount_args;
59 struct xfs_inode; 59 struct xfs_inode;
60 struct xfs_bmbt_irec; 60 struct xfs_bmbt_irec;
61 struct xfs_bmap_free; 61 struct xfs_bmap_free;
62 struct xfs_extdelta; 62 struct xfs_extdelta;
63 struct xfs_swapext; 63 struct xfs_swapext;
64 struct xfs_mru_cache; 64 struct xfs_mru_cache;
65 struct xfs_nameops; 65 struct xfs_nameops;
66 struct xfs_ail; 66 struct xfs_ail;
67 struct xfs_quotainfo; 67 struct xfs_quotainfo;
68 68
69 69
70 /* 70 /*
71 * Prototypes and functions for the Data Migration subsystem. 71 * Prototypes and functions for the Data Migration subsystem.
72 */ 72 */
73 73
74 typedef int (*xfs_send_data_t)(int, struct xfs_inode *, 74 typedef int (*xfs_send_data_t)(int, struct xfs_inode *,
75 xfs_off_t, size_t, int, int *); 75 xfs_off_t, size_t, int, int *);
76 typedef int (*xfs_send_mmap_t)(struct vm_area_struct *, uint); 76 typedef int (*xfs_send_mmap_t)(struct vm_area_struct *, uint);
77 typedef int (*xfs_send_destroy_t)(struct xfs_inode *, dm_right_t); 77 typedef int (*xfs_send_destroy_t)(struct xfs_inode *, dm_right_t);
78 typedef int (*xfs_send_namesp_t)(dm_eventtype_t, struct xfs_mount *, 78 typedef int (*xfs_send_namesp_t)(dm_eventtype_t, struct xfs_mount *,
79 struct xfs_inode *, dm_right_t, 79 struct xfs_inode *, dm_right_t,
80 struct xfs_inode *, dm_right_t, 80 struct xfs_inode *, dm_right_t,
81 const char *, const char *, mode_t, int, int); 81 const char *, const char *, mode_t, int, int);
82 typedef int (*xfs_send_mount_t)(struct xfs_mount *, dm_right_t, 82 typedef int (*xfs_send_mount_t)(struct xfs_mount *, dm_right_t,
83 char *, char *); 83 char *, char *);
84 typedef void (*xfs_send_unmount_t)(struct xfs_mount *, struct xfs_inode *, 84 typedef void (*xfs_send_unmount_t)(struct xfs_mount *, struct xfs_inode *,
85 dm_right_t, mode_t, int, int); 85 dm_right_t, mode_t, int, int);
86 86
87 typedef struct xfs_dmops { 87 typedef struct xfs_dmops {
88 xfs_send_data_t xfs_send_data; 88 xfs_send_data_t xfs_send_data;
89 xfs_send_mmap_t xfs_send_mmap; 89 xfs_send_mmap_t xfs_send_mmap;
90 xfs_send_destroy_t xfs_send_destroy; 90 xfs_send_destroy_t xfs_send_destroy;
91 xfs_send_namesp_t xfs_send_namesp; 91 xfs_send_namesp_t xfs_send_namesp;
92 xfs_send_mount_t xfs_send_mount; 92 xfs_send_mount_t xfs_send_mount;
93 xfs_send_unmount_t xfs_send_unmount; 93 xfs_send_unmount_t xfs_send_unmount;
94 } xfs_dmops_t; 94 } xfs_dmops_t;
95 95
96 #define XFS_DMAPI_UNMOUNT_FLAGS(mp) \
97 (((mp)->m_dmevmask & (1 << DM_EVENT_UNMOUNT)) ? 0 : DM_FLAGS_UNWANTED)
98
96 #define XFS_SEND_DATA(mp, ev,ip,off,len,fl,lock) \ 99 #define XFS_SEND_DATA(mp, ev,ip,off,len,fl,lock) \
97 (*(mp)->m_dm_ops->xfs_send_data)(ev,ip,off,len,fl,lock) 100 (*(mp)->m_dm_ops->xfs_send_data)(ev,ip,off,len,fl,lock)
98 #define XFS_SEND_MMAP(mp, vma,fl) \ 101 #define XFS_SEND_MMAP(mp, vma,fl) \
99 (*(mp)->m_dm_ops->xfs_send_mmap)(vma,fl) 102 (*(mp)->m_dm_ops->xfs_send_mmap)(vma,fl)
100 #define XFS_SEND_DESTROY(mp, ip,right) \ 103 #define XFS_SEND_DESTROY(mp, ip,right) \
101 (*(mp)->m_dm_ops->xfs_send_destroy)(ip,right) 104 (*(mp)->m_dm_ops->xfs_send_destroy)(ip,right)
102 #define XFS_SEND_NAMESP(mp, ev,b1,r1,b2,r2,n1,n2,mode,rval,fl) \ 105 #define XFS_SEND_NAMESP(mp, ev,b1,r1,b2,r2,n1,n2,mode,rval,fl) \
103 (*(mp)->m_dm_ops->xfs_send_namesp)(ev,NULL,b1,r1,b2,r2,n1,n2,mode,rval,fl) 106 (*(mp)->m_dm_ops->xfs_send_namesp)(ev,NULL,b1,r1,b2,r2,n1,n2,mode,rval,fl)
104 #define XFS_SEND_PREUNMOUNT(mp,b1,r1,b2,r2,n1,n2,mode,rval,fl) \
105 (*(mp)->m_dm_ops->xfs_send_namesp)(DM_EVENT_PREUNMOUNT,mp,b1,r1,b2,r2,n1,n2,mode,rval,fl)
106 #define XFS_SEND_MOUNT(mp,right,path,name) \ 107 #define XFS_SEND_MOUNT(mp,right,path,name) \
107 (*(mp)->m_dm_ops->xfs_send_mount)(mp,right,path,name) 108 (*(mp)->m_dm_ops->xfs_send_mount)(mp,right,path,name)
108 #define XFS_SEND_UNMOUNT(mp, ip,right,mode,rval,fl) \ 109 #define XFS_SEND_PREUNMOUNT(mp) \
109 (*(mp)->m_dm_ops->xfs_send_unmount)(mp,ip,right,mode,rval,fl) 110 do { \
111 if (mp->m_flags & XFS_MOUNT_DMAPI) { \
112 (*(mp)->m_dm_ops->xfs_send_namesp)(DM_EVENT_PREUNMOUNT, mp, \
113 (mp)->m_rootip, DM_RIGHT_NULL, \
114 (mp)->m_rootip, DM_RIGHT_NULL, \
115 NULL, NULL, 0, 0, XFS_DMAPI_UNMOUNT_FLAGS(mp)); \
116 } \
117 } while (0)
118 #define XFS_SEND_UNMOUNT(mp) \
119 do { \
120 if (mp->m_flags & XFS_MOUNT_DMAPI) { \
121 (*(mp)->m_dm_ops->xfs_send_unmount)(mp, (mp)->m_rootip, \
122 DM_RIGHT_NULL, 0, 0, XFS_DMAPI_UNMOUNT_FLAGS(mp)); \
123 } \
124 } while (0)
110 125
111 126
112 #ifdef HAVE_PERCPU_SB 127 #ifdef HAVE_PERCPU_SB
113 128
114 /* 129 /*
115 * Valid per-cpu incore superblock counters. Note that if you add new counters, 130 * Valid per-cpu incore superblock counters. Note that if you add new counters,
116 * you may need to define new counter disabled bit field descriptors as there 131 * you may need to define new counter disabled bit field descriptors as there
117 * are more possible fields in the superblock that can fit in a bitfield on a 132 * are more possible fields in the superblock that can fit in a bitfield on a
118 * 32 bit platform. The XFS_SBS_* values for the current current counters just 133 * 32 bit platform. The XFS_SBS_* values for the current current counters just
119 * fit. 134 * fit.
120 */ 135 */
121 typedef struct xfs_icsb_cnts { 136 typedef struct xfs_icsb_cnts {
122 uint64_t icsb_fdblocks; 137 uint64_t icsb_fdblocks;
123 uint64_t icsb_ifree; 138 uint64_t icsb_ifree;
124 uint64_t icsb_icount; 139 uint64_t icsb_icount;
125 unsigned long icsb_flags; 140 unsigned long icsb_flags;
126 } xfs_icsb_cnts_t; 141 } xfs_icsb_cnts_t;
127 142
128 #define XFS_ICSB_FLAG_LOCK (1 << 0) /* counter lock bit */ 143 #define XFS_ICSB_FLAG_LOCK (1 << 0) /* counter lock bit */
129 144
130 #define XFS_ICSB_LAZY_COUNT (1 << 1) /* accuracy not needed */ 145 #define XFS_ICSB_LAZY_COUNT (1 << 1) /* accuracy not needed */
131 146
132 extern int xfs_icsb_init_counters(struct xfs_mount *); 147 extern int xfs_icsb_init_counters(struct xfs_mount *);
133 extern void xfs_icsb_reinit_counters(struct xfs_mount *); 148 extern void xfs_icsb_reinit_counters(struct xfs_mount *);
134 extern void xfs_icsb_destroy_counters(struct xfs_mount *); 149 extern void xfs_icsb_destroy_counters(struct xfs_mount *);
135 extern void xfs_icsb_sync_counters(struct xfs_mount *, int); 150 extern void xfs_icsb_sync_counters(struct xfs_mount *, int);
136 extern void xfs_icsb_sync_counters_locked(struct xfs_mount *, int); 151 extern void xfs_icsb_sync_counters_locked(struct xfs_mount *, int);
137 152
138 #else 153 #else
139 #define xfs_icsb_init_counters(mp) (0) 154 #define xfs_icsb_init_counters(mp) (0)
140 #define xfs_icsb_destroy_counters(mp) do { } while (0) 155 #define xfs_icsb_destroy_counters(mp) do { } while (0)
141 #define xfs_icsb_reinit_counters(mp) do { } while (0) 156 #define xfs_icsb_reinit_counters(mp) do { } while (0)
142 #define xfs_icsb_sync_counters(mp, flags) do { } while (0) 157 #define xfs_icsb_sync_counters(mp, flags) do { } while (0)
143 #define xfs_icsb_sync_counters_locked(mp, flags) do { } while (0) 158 #define xfs_icsb_sync_counters_locked(mp, flags) do { } while (0)
144 #endif 159 #endif
145 160
146 typedef struct xfs_mount { 161 typedef struct xfs_mount {
147 struct super_block *m_super; 162 struct super_block *m_super;
148 xfs_tid_t m_tid; /* next unused tid for fs */ 163 xfs_tid_t m_tid; /* next unused tid for fs */
149 struct xfs_ail *m_ail; /* fs active log item list */ 164 struct xfs_ail *m_ail; /* fs active log item list */
150 xfs_sb_t m_sb; /* copy of fs superblock */ 165 xfs_sb_t m_sb; /* copy of fs superblock */
151 spinlock_t m_sb_lock; /* sb counter lock */ 166 spinlock_t m_sb_lock; /* sb counter lock */
152 struct xfs_buf *m_sb_bp; /* buffer for superblock */ 167 struct xfs_buf *m_sb_bp; /* buffer for superblock */
153 char *m_fsname; /* filesystem name */ 168 char *m_fsname; /* filesystem name */
154 int m_fsname_len; /* strlen of fs name */ 169 int m_fsname_len; /* strlen of fs name */
155 char *m_rtname; /* realtime device name */ 170 char *m_rtname; /* realtime device name */
156 char *m_logname; /* external log device name */ 171 char *m_logname; /* external log device name */
157 int m_bsize; /* fs logical block size */ 172 int m_bsize; /* fs logical block size */
158 xfs_agnumber_t m_agfrotor; /* last ag where space found */ 173 xfs_agnumber_t m_agfrotor; /* last ag where space found */
159 xfs_agnumber_t m_agirotor; /* last ag dir inode alloced */ 174 xfs_agnumber_t m_agirotor; /* last ag dir inode alloced */
160 spinlock_t m_agirotor_lock;/* .. and lock protecting it */ 175 spinlock_t m_agirotor_lock;/* .. and lock protecting it */
161 xfs_agnumber_t m_maxagi; /* highest inode alloc group */ 176 xfs_agnumber_t m_maxagi; /* highest inode alloc group */
162 uint m_readio_log; /* min read size log bytes */ 177 uint m_readio_log; /* min read size log bytes */
163 uint m_readio_blocks; /* min read size blocks */ 178 uint m_readio_blocks; /* min read size blocks */
164 uint m_writeio_log; /* min write size log bytes */ 179 uint m_writeio_log; /* min write size log bytes */
165 uint m_writeio_blocks; /* min write size blocks */ 180 uint m_writeio_blocks; /* min write size blocks */
166 struct log *m_log; /* log specific stuff */ 181 struct log *m_log; /* log specific stuff */
167 int m_logbufs; /* number of log buffers */ 182 int m_logbufs; /* number of log buffers */
168 int m_logbsize; /* size of each log buffer */ 183 int m_logbsize; /* size of each log buffer */
169 uint m_rsumlevels; /* rt summary levels */ 184 uint m_rsumlevels; /* rt summary levels */
170 uint m_rsumsize; /* size of rt summary, bytes */ 185 uint m_rsumsize; /* size of rt summary, bytes */
171 struct xfs_inode *m_rbmip; /* pointer to bitmap inode */ 186 struct xfs_inode *m_rbmip; /* pointer to bitmap inode */
172 struct xfs_inode *m_rsumip; /* pointer to summary inode */ 187 struct xfs_inode *m_rsumip; /* pointer to summary inode */
173 struct xfs_inode *m_rootip; /* pointer to root directory */ 188 struct xfs_inode *m_rootip; /* pointer to root directory */
174 struct xfs_quotainfo *m_quotainfo; /* disk quota information */ 189 struct xfs_quotainfo *m_quotainfo; /* disk quota information */
175 xfs_buftarg_t *m_ddev_targp; /* saves taking the address */ 190 xfs_buftarg_t *m_ddev_targp; /* saves taking the address */
176 xfs_buftarg_t *m_logdev_targp;/* ptr to log device */ 191 xfs_buftarg_t *m_logdev_targp;/* ptr to log device */
177 xfs_buftarg_t *m_rtdev_targp; /* ptr to rt device */ 192 xfs_buftarg_t *m_rtdev_targp; /* ptr to rt device */
178 __uint8_t m_blkbit_log; /* blocklog + NBBY */ 193 __uint8_t m_blkbit_log; /* blocklog + NBBY */
179 __uint8_t m_blkbb_log; /* blocklog - BBSHIFT */ 194 __uint8_t m_blkbb_log; /* blocklog - BBSHIFT */
180 __uint8_t m_agno_log; /* log #ag's */ 195 __uint8_t m_agno_log; /* log #ag's */
181 __uint8_t m_agino_log; /* #bits for agino in inum */ 196 __uint8_t m_agino_log; /* #bits for agino in inum */
182 __uint16_t m_inode_cluster_size;/* min inode buf size */ 197 __uint16_t m_inode_cluster_size;/* min inode buf size */
183 uint m_blockmask; /* sb_blocksize-1 */ 198 uint m_blockmask; /* sb_blocksize-1 */
184 uint m_blockwsize; /* sb_blocksize in words */ 199 uint m_blockwsize; /* sb_blocksize in words */
185 uint m_blockwmask; /* blockwsize-1 */ 200 uint m_blockwmask; /* blockwsize-1 */
186 uint m_alloc_mxr[2]; /* max alloc btree records */ 201 uint m_alloc_mxr[2]; /* max alloc btree records */
187 uint m_alloc_mnr[2]; /* min alloc btree records */ 202 uint m_alloc_mnr[2]; /* min alloc btree records */
188 uint m_bmap_dmxr[2]; /* max bmap btree records */ 203 uint m_bmap_dmxr[2]; /* max bmap btree records */
189 uint m_bmap_dmnr[2]; /* min bmap btree records */ 204 uint m_bmap_dmnr[2]; /* min bmap btree records */
190 uint m_inobt_mxr[2]; /* max inobt btree records */ 205 uint m_inobt_mxr[2]; /* max inobt btree records */
191 uint m_inobt_mnr[2]; /* min inobt btree records */ 206 uint m_inobt_mnr[2]; /* min inobt btree records */
192 uint m_ag_maxlevels; /* XFS_AG_MAXLEVELS */ 207 uint m_ag_maxlevels; /* XFS_AG_MAXLEVELS */
193 uint m_bm_maxlevels[2]; /* XFS_BM_MAXLEVELS */ 208 uint m_bm_maxlevels[2]; /* XFS_BM_MAXLEVELS */
194 uint m_in_maxlevels; /* max inobt btree levels. */ 209 uint m_in_maxlevels; /* max inobt btree levels. */
195 struct xfs_perag *m_perag; /* per-ag accounting info */ 210 struct xfs_perag *m_perag; /* per-ag accounting info */
196 struct rw_semaphore m_peraglock; /* lock for m_perag (pointer) */ 211 struct rw_semaphore m_peraglock; /* lock for m_perag (pointer) */
197 struct mutex m_growlock; /* growfs mutex */ 212 struct mutex m_growlock; /* growfs mutex */
198 int m_fixedfsid[2]; /* unchanged for life of FS */ 213 int m_fixedfsid[2]; /* unchanged for life of FS */
199 uint m_dmevmask; /* DMI events for this FS */ 214 uint m_dmevmask; /* DMI events for this FS */
200 __uint64_t m_flags; /* global mount flags */ 215 __uint64_t m_flags; /* global mount flags */
201 uint m_dir_node_ents; /* #entries in a dir danode */ 216 uint m_dir_node_ents; /* #entries in a dir danode */
202 uint m_attr_node_ents; /* #entries in attr danode */ 217 uint m_attr_node_ents; /* #entries in attr danode */
203 int m_ialloc_inos; /* inodes in inode allocation */ 218 int m_ialloc_inos; /* inodes in inode allocation */
204 int m_ialloc_blks; /* blocks in inode allocation */ 219 int m_ialloc_blks; /* blocks in inode allocation */
205 int m_inoalign_mask;/* mask sb_inoalignmt if used */ 220 int m_inoalign_mask;/* mask sb_inoalignmt if used */
206 uint m_qflags; /* quota status flags */ 221 uint m_qflags; /* quota status flags */
207 xfs_trans_reservations_t m_reservations;/* precomputed res values */ 222 xfs_trans_reservations_t m_reservations;/* precomputed res values */
208 __uint64_t m_maxicount; /* maximum inode count */ 223 __uint64_t m_maxicount; /* maximum inode count */
209 __uint64_t m_maxioffset; /* maximum inode offset */ 224 __uint64_t m_maxioffset; /* maximum inode offset */
210 __uint64_t m_resblks; /* total reserved blocks */ 225 __uint64_t m_resblks; /* total reserved blocks */
211 __uint64_t m_resblks_avail;/* available reserved blocks */ 226 __uint64_t m_resblks_avail;/* available reserved blocks */
212 int m_dalign; /* stripe unit */ 227 int m_dalign; /* stripe unit */
213 int m_swidth; /* stripe width */ 228 int m_swidth; /* stripe width */
214 int m_sinoalign; /* stripe unit inode alignment */ 229 int m_sinoalign; /* stripe unit inode alignment */
215 int m_attr_magicpct;/* 37% of the blocksize */ 230 int m_attr_magicpct;/* 37% of the blocksize */
216 int m_dir_magicpct; /* 37% of the dir blocksize */ 231 int m_dir_magicpct; /* 37% of the dir blocksize */
217 __uint8_t m_sectbb_log; /* sectlog - BBSHIFT */ 232 __uint8_t m_sectbb_log; /* sectlog - BBSHIFT */
218 const struct xfs_nameops *m_dirnameops; /* vector of dir name ops */ 233 const struct xfs_nameops *m_dirnameops; /* vector of dir name ops */
219 int m_dirblksize; /* directory block sz--bytes */ 234 int m_dirblksize; /* directory block sz--bytes */
220 int m_dirblkfsbs; /* directory block sz--fsbs */ 235 int m_dirblkfsbs; /* directory block sz--fsbs */
221 xfs_dablk_t m_dirdatablk; /* blockno of dir data v2 */ 236 xfs_dablk_t m_dirdatablk; /* blockno of dir data v2 */
222 xfs_dablk_t m_dirleafblk; /* blockno of dir non-data v2 */ 237 xfs_dablk_t m_dirleafblk; /* blockno of dir non-data v2 */
223 xfs_dablk_t m_dirfreeblk; /* blockno of dirfreeindex v2 */ 238 xfs_dablk_t m_dirfreeblk; /* blockno of dirfreeindex v2 */
224 uint m_chsize; /* size of next field */ 239 uint m_chsize; /* size of next field */
225 struct xfs_chash *m_chash; /* fs private inode per-cluster 240 struct xfs_chash *m_chash; /* fs private inode per-cluster
226 * hash table */ 241 * hash table */
227 struct xfs_dmops *m_dm_ops; /* vector of DMI ops */ 242 struct xfs_dmops *m_dm_ops; /* vector of DMI ops */
228 struct xfs_qmops *m_qm_ops; /* vector of XQM ops */ 243 struct xfs_qmops *m_qm_ops; /* vector of XQM ops */
229 atomic_t m_active_trans; /* number trans frozen */ 244 atomic_t m_active_trans; /* number trans frozen */
230 #ifdef HAVE_PERCPU_SB 245 #ifdef HAVE_PERCPU_SB
231 xfs_icsb_cnts_t *m_sb_cnts; /* per-cpu superblock counters */ 246 xfs_icsb_cnts_t *m_sb_cnts; /* per-cpu superblock counters */
232 unsigned long m_icsb_counters; /* disabled per-cpu counters */ 247 unsigned long m_icsb_counters; /* disabled per-cpu counters */
233 struct notifier_block m_icsb_notifier; /* hotplug cpu notifier */ 248 struct notifier_block m_icsb_notifier; /* hotplug cpu notifier */
234 struct mutex m_icsb_mutex; /* balancer sync lock */ 249 struct mutex m_icsb_mutex; /* balancer sync lock */
235 #endif 250 #endif
236 struct xfs_mru_cache *m_filestream; /* per-mount filestream data */ 251 struct xfs_mru_cache *m_filestream; /* per-mount filestream data */
237 struct task_struct *m_sync_task; /* generalised sync thread */ 252 struct task_struct *m_sync_task; /* generalised sync thread */
238 xfs_sync_work_t m_sync_work; /* work item for VFS_SYNC */ 253 xfs_sync_work_t m_sync_work; /* work item for VFS_SYNC */
239 struct list_head m_sync_list; /* sync thread work item list */ 254 struct list_head m_sync_list; /* sync thread work item list */
240 spinlock_t m_sync_lock; /* work item list lock */ 255 spinlock_t m_sync_lock; /* work item list lock */
241 int m_sync_seq; /* sync thread generation no. */ 256 int m_sync_seq; /* sync thread generation no. */
242 wait_queue_head_t m_wait_single_sync_task; 257 wait_queue_head_t m_wait_single_sync_task;
243 __int64_t m_update_flags; /* sb flags we need to update 258 __int64_t m_update_flags; /* sb flags we need to update
244 on the next remount,rw */ 259 on the next remount,rw */
245 } xfs_mount_t; 260 } xfs_mount_t;
246 261
247 /* 262 /*
248 * Flags for m_flags. 263 * Flags for m_flags.
249 */ 264 */
250 #define XFS_MOUNT_WSYNC (1ULL << 0) /* for nfs - all metadata ops 265 #define XFS_MOUNT_WSYNC (1ULL << 0) /* for nfs - all metadata ops
251 must be synchronous except 266 must be synchronous except
252 for space allocations */ 267 for space allocations */
253 #define XFS_MOUNT_DMAPI (1ULL << 2) /* dmapi is enabled */ 268 #define XFS_MOUNT_DMAPI (1ULL << 2) /* dmapi is enabled */
254 #define XFS_MOUNT_WAS_CLEAN (1ULL << 3) 269 #define XFS_MOUNT_WAS_CLEAN (1ULL << 3)
255 #define XFS_MOUNT_FS_SHUTDOWN (1ULL << 4) /* atomic stop of all filesystem 270 #define XFS_MOUNT_FS_SHUTDOWN (1ULL << 4) /* atomic stop of all filesystem
256 operations, typically for 271 operations, typically for
257 disk errors in metadata */ 272 disk errors in metadata */
258 #define XFS_MOUNT_RETERR (1ULL << 6) /* return alignment errors to 273 #define XFS_MOUNT_RETERR (1ULL << 6) /* return alignment errors to
259 user */ 274 user */
260 #define XFS_MOUNT_NOALIGN (1ULL << 7) /* turn off stripe alignment 275 #define XFS_MOUNT_NOALIGN (1ULL << 7) /* turn off stripe alignment
261 allocations */ 276 allocations */
262 #define XFS_MOUNT_ATTR2 (1ULL << 8) /* allow use of attr2 format */ 277 #define XFS_MOUNT_ATTR2 (1ULL << 8) /* allow use of attr2 format */
263 #define XFS_MOUNT_GRPID (1ULL << 9) /* group-ID assigned from directory */ 278 #define XFS_MOUNT_GRPID (1ULL << 9) /* group-ID assigned from directory */
264 #define XFS_MOUNT_NORECOVERY (1ULL << 10) /* no recovery - dirty fs */ 279 #define XFS_MOUNT_NORECOVERY (1ULL << 10) /* no recovery - dirty fs */
265 #define XFS_MOUNT_DFLT_IOSIZE (1ULL << 12) /* set default i/o size */ 280 #define XFS_MOUNT_DFLT_IOSIZE (1ULL << 12) /* set default i/o size */
266 #define XFS_MOUNT_OSYNCISOSYNC (1ULL << 13) /* o_sync is REALLY o_sync */ 281 #define XFS_MOUNT_OSYNCISOSYNC (1ULL << 13) /* o_sync is REALLY o_sync */
267 /* osyncisdsync is now default*/ 282 /* osyncisdsync is now default*/
268 #define XFS_MOUNT_32BITINODES (1ULL << 14) /* do not create inodes above 283 #define XFS_MOUNT_32BITINODES (1ULL << 14) /* do not create inodes above
269 * 32 bits in size */ 284 * 32 bits in size */
270 #define XFS_MOUNT_SMALL_INUMS (1ULL << 15) /* users wants 32bit inodes */ 285 #define XFS_MOUNT_SMALL_INUMS (1ULL << 15) /* users wants 32bit inodes */
271 #define XFS_MOUNT_NOUUID (1ULL << 16) /* ignore uuid during mount */ 286 #define XFS_MOUNT_NOUUID (1ULL << 16) /* ignore uuid during mount */
272 #define XFS_MOUNT_BARRIER (1ULL << 17) 287 #define XFS_MOUNT_BARRIER (1ULL << 17)
273 #define XFS_MOUNT_IKEEP (1ULL << 18) /* keep empty inode clusters*/ 288 #define XFS_MOUNT_IKEEP (1ULL << 18) /* keep empty inode clusters*/
274 #define XFS_MOUNT_SWALLOC (1ULL << 19) /* turn on stripe width 289 #define XFS_MOUNT_SWALLOC (1ULL << 19) /* turn on stripe width
275 * allocation */ 290 * allocation */
276 #define XFS_MOUNT_RDONLY (1ULL << 20) /* read-only fs */ 291 #define XFS_MOUNT_RDONLY (1ULL << 20) /* read-only fs */
277 #define XFS_MOUNT_DIRSYNC (1ULL << 21) /* synchronous directory ops */ 292 #define XFS_MOUNT_DIRSYNC (1ULL << 21) /* synchronous directory ops */
278 #define XFS_MOUNT_COMPAT_IOSIZE (1ULL << 22) /* don't report large preferred 293 #define XFS_MOUNT_COMPAT_IOSIZE (1ULL << 22) /* don't report large preferred
279 * I/O size in stat() */ 294 * I/O size in stat() */
280 #define XFS_MOUNT_NO_PERCPU_SB (1ULL << 23) /* don't use per-cpu superblock 295 #define XFS_MOUNT_NO_PERCPU_SB (1ULL << 23) /* don't use per-cpu superblock
281 counters */ 296 counters */
282 #define XFS_MOUNT_FILESTREAMS (1ULL << 24) /* enable the filestreams 297 #define XFS_MOUNT_FILESTREAMS (1ULL << 24) /* enable the filestreams
283 allocator */ 298 allocator */
284 #define XFS_MOUNT_NOATTR2 (1ULL << 25) /* disable use of attr2 format */ 299 #define XFS_MOUNT_NOATTR2 (1ULL << 25) /* disable use of attr2 format */
285 300
286 301
287 /* 302 /*
288 * Default minimum read and write sizes. 303 * Default minimum read and write sizes.
289 */ 304 */
290 #define XFS_READIO_LOG_LARGE 16 305 #define XFS_READIO_LOG_LARGE 16
291 #define XFS_WRITEIO_LOG_LARGE 16 306 #define XFS_WRITEIO_LOG_LARGE 16
292 307
293 /* 308 /*
294 * Max and min values for mount-option defined I/O 309 * Max and min values for mount-option defined I/O
295 * preallocation sizes. 310 * preallocation sizes.
296 */ 311 */
297 #define XFS_MAX_IO_LOG 30 /* 1G */ 312 #define XFS_MAX_IO_LOG 30 /* 1G */
298 #define XFS_MIN_IO_LOG PAGE_SHIFT 313 #define XFS_MIN_IO_LOG PAGE_SHIFT
299 314
300 /* 315 /*
301 * Synchronous read and write sizes. This should be 316 * Synchronous read and write sizes. This should be
302 * better for NFSv2 wsync filesystems. 317 * better for NFSv2 wsync filesystems.
303 */ 318 */
304 #define XFS_WSYNC_READIO_LOG 15 /* 32k */ 319 #define XFS_WSYNC_READIO_LOG 15 /* 32k */
305 #define XFS_WSYNC_WRITEIO_LOG 14 /* 16k */ 320 #define XFS_WSYNC_WRITEIO_LOG 14 /* 16k */
306 321
307 /* 322 /*
308 * Allow large block sizes to be reported to userspace programs if the 323 * Allow large block sizes to be reported to userspace programs if the
309 * "largeio" mount option is used. 324 * "largeio" mount option is used.
310 * 325 *
311 * If compatibility mode is specified, simply return the basic unit of caching 326 * If compatibility mode is specified, simply return the basic unit of caching
312 * so that we don't get inefficient read/modify/write I/O from user apps. 327 * so that we don't get inefficient read/modify/write I/O from user apps.
313 * Otherwise.... 328 * Otherwise....
314 * 329 *
315 * If the underlying volume is a stripe, then return the stripe width in bytes 330 * If the underlying volume is a stripe, then return the stripe width in bytes
316 * as the recommended I/O size. It is not a stripe and we've set a default 331 * as the recommended I/O size. It is not a stripe and we've set a default
317 * buffered I/O size, return that, otherwise return the compat default. 332 * buffered I/O size, return that, otherwise return the compat default.
318 */ 333 */
319 static inline unsigned long 334 static inline unsigned long
320 xfs_preferred_iosize(xfs_mount_t *mp) 335 xfs_preferred_iosize(xfs_mount_t *mp)
321 { 336 {
322 if (mp->m_flags & XFS_MOUNT_COMPAT_IOSIZE) 337 if (mp->m_flags & XFS_MOUNT_COMPAT_IOSIZE)
323 return PAGE_CACHE_SIZE; 338 return PAGE_CACHE_SIZE;
324 return (mp->m_swidth ? 339 return (mp->m_swidth ?
325 (mp->m_swidth << mp->m_sb.sb_blocklog) : 340 (mp->m_swidth << mp->m_sb.sb_blocklog) :
326 ((mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) ? 341 ((mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) ?
327 (1 << (int)MAX(mp->m_readio_log, mp->m_writeio_log)) : 342 (1 << (int)MAX(mp->m_readio_log, mp->m_writeio_log)) :
328 PAGE_CACHE_SIZE)); 343 PAGE_CACHE_SIZE));
329 } 344 }
330 345
331 #define XFS_MAXIOFFSET(mp) ((mp)->m_maxioffset) 346 #define XFS_MAXIOFFSET(mp) ((mp)->m_maxioffset)
332 347
333 #define XFS_LAST_UNMOUNT_WAS_CLEAN(mp) \ 348 #define XFS_LAST_UNMOUNT_WAS_CLEAN(mp) \
334 ((mp)->m_flags & XFS_MOUNT_WAS_CLEAN) 349 ((mp)->m_flags & XFS_MOUNT_WAS_CLEAN)
335 #define XFS_FORCED_SHUTDOWN(mp) ((mp)->m_flags & XFS_MOUNT_FS_SHUTDOWN) 350 #define XFS_FORCED_SHUTDOWN(mp) ((mp)->m_flags & XFS_MOUNT_FS_SHUTDOWN)
336 void xfs_do_force_shutdown(struct xfs_mount *mp, int flags, char *fname, 351 void xfs_do_force_shutdown(struct xfs_mount *mp, int flags, char *fname,
337 int lnnum); 352 int lnnum);
338 #define xfs_force_shutdown(m,f) \ 353 #define xfs_force_shutdown(m,f) \
339 xfs_do_force_shutdown(m, f, __FILE__, __LINE__) 354 xfs_do_force_shutdown(m, f, __FILE__, __LINE__)
340 355
341 #define SHUTDOWN_META_IO_ERROR 0x0001 /* write attempt to metadata failed */ 356 #define SHUTDOWN_META_IO_ERROR 0x0001 /* write attempt to metadata failed */
342 #define SHUTDOWN_LOG_IO_ERROR 0x0002 /* write attempt to the log failed */ 357 #define SHUTDOWN_LOG_IO_ERROR 0x0002 /* write attempt to the log failed */
343 #define SHUTDOWN_FORCE_UMOUNT 0x0004 /* shutdown from a forced unmount */ 358 #define SHUTDOWN_FORCE_UMOUNT 0x0004 /* shutdown from a forced unmount */
344 #define SHUTDOWN_CORRUPT_INCORE 0x0008 /* corrupt in-memory data structures */ 359 #define SHUTDOWN_CORRUPT_INCORE 0x0008 /* corrupt in-memory data structures */
345 #define SHUTDOWN_REMOTE_REQ 0x0010 /* shutdown came from remote cell */ 360 #define SHUTDOWN_REMOTE_REQ 0x0010 /* shutdown came from remote cell */
346 #define SHUTDOWN_DEVICE_REQ 0x0020 /* failed all paths to the device */ 361 #define SHUTDOWN_DEVICE_REQ 0x0020 /* failed all paths to the device */
347 362
348 #define xfs_test_for_freeze(mp) ((mp)->m_super->s_frozen) 363 #define xfs_test_for_freeze(mp) ((mp)->m_super->s_frozen)
349 #define xfs_wait_for_freeze(mp,l) vfs_check_frozen((mp)->m_super, (l)) 364 #define xfs_wait_for_freeze(mp,l) vfs_check_frozen((mp)->m_super, (l))
350 365
351 /* 366 /*
352 * Flags for xfs_mountfs 367 * Flags for xfs_mountfs
353 */ 368 */
354 #define XFS_MFSI_QUIET 0x40 /* Be silent if mount errors found */ 369 #define XFS_MFSI_QUIET 0x40 /* Be silent if mount errors found */
355 370
356 static inline xfs_agnumber_t 371 static inline xfs_agnumber_t
357 xfs_daddr_to_agno(struct xfs_mount *mp, xfs_daddr_t d) 372 xfs_daddr_to_agno(struct xfs_mount *mp, xfs_daddr_t d)
358 { 373 {
359 xfs_daddr_t ld = XFS_BB_TO_FSBT(mp, d); 374 xfs_daddr_t ld = XFS_BB_TO_FSBT(mp, d);
360 do_div(ld, mp->m_sb.sb_agblocks); 375 do_div(ld, mp->m_sb.sb_agblocks);
361 return (xfs_agnumber_t) ld; 376 return (xfs_agnumber_t) ld;
362 } 377 }
363 378
364 static inline xfs_agblock_t 379 static inline xfs_agblock_t
365 xfs_daddr_to_agbno(struct xfs_mount *mp, xfs_daddr_t d) 380 xfs_daddr_to_agbno(struct xfs_mount *mp, xfs_daddr_t d)
366 { 381 {
367 xfs_daddr_t ld = XFS_BB_TO_FSBT(mp, d); 382 xfs_daddr_t ld = XFS_BB_TO_FSBT(mp, d);
368 return (xfs_agblock_t) do_div(ld, mp->m_sb.sb_agblocks); 383 return (xfs_agblock_t) do_div(ld, mp->m_sb.sb_agblocks);
369 } 384 }
370 385
371 /* 386 /*
372 * perag get/put wrappers for eventual ref counting 387 * perag get/put wrappers for eventual ref counting
373 */ 388 */
374 static inline xfs_perag_t * 389 static inline xfs_perag_t *
375 xfs_get_perag(struct xfs_mount *mp, xfs_ino_t ino) 390 xfs_get_perag(struct xfs_mount *mp, xfs_ino_t ino)
376 { 391 {
377 return &mp->m_perag[XFS_INO_TO_AGNO(mp, ino)]; 392 return &mp->m_perag[XFS_INO_TO_AGNO(mp, ino)];
378 } 393 }
379 394
380 static inline void 395 static inline void
381 xfs_put_perag(struct xfs_mount *mp, xfs_perag_t *pag) 396 xfs_put_perag(struct xfs_mount *mp, xfs_perag_t *pag)
382 { 397 {
383 /* nothing to see here, move along */ 398 /* nothing to see here, move along */
384 } 399 }
385 400
386 /* 401 /*
387 * Per-cpu superblock locking functions 402 * Per-cpu superblock locking functions
388 */ 403 */
389 #ifdef HAVE_PERCPU_SB 404 #ifdef HAVE_PERCPU_SB
390 static inline void 405 static inline void
391 xfs_icsb_lock(xfs_mount_t *mp) 406 xfs_icsb_lock(xfs_mount_t *mp)
392 { 407 {
393 mutex_lock(&mp->m_icsb_mutex); 408 mutex_lock(&mp->m_icsb_mutex);
394 } 409 }
395 410
396 static inline void 411 static inline void
397 xfs_icsb_unlock(xfs_mount_t *mp) 412 xfs_icsb_unlock(xfs_mount_t *mp)
398 { 413 {
399 mutex_unlock(&mp->m_icsb_mutex); 414 mutex_unlock(&mp->m_icsb_mutex);
400 } 415 }
401 #else 416 #else
402 #define xfs_icsb_lock(mp) 417 #define xfs_icsb_lock(mp)
403 #define xfs_icsb_unlock(mp) 418 #define xfs_icsb_unlock(mp)
404 #endif 419 #endif
405 420
406 /* 421 /*
407 * This structure is for use by the xfs_mod_incore_sb_batch() routine. 422 * This structure is for use by the xfs_mod_incore_sb_batch() routine.
408 * xfs_growfs can specify a few fields which are more than int limit 423 * xfs_growfs can specify a few fields which are more than int limit
409 */ 424 */
410 typedef struct xfs_mod_sb { 425 typedef struct xfs_mod_sb {
411 xfs_sb_field_t msb_field; /* Field to modify, see below */ 426 xfs_sb_field_t msb_field; /* Field to modify, see below */
412 int64_t msb_delta; /* Change to make to specified field */ 427 int64_t msb_delta; /* Change to make to specified field */
413 } xfs_mod_sb_t; 428 } xfs_mod_sb_t;
414 429
415 extern int xfs_log_sbcount(xfs_mount_t *, uint); 430 extern int xfs_log_sbcount(xfs_mount_t *, uint);
416 extern int xfs_mountfs(xfs_mount_t *mp); 431 extern int xfs_mountfs(xfs_mount_t *mp);
417 432
418 extern void xfs_unmountfs(xfs_mount_t *); 433 extern void xfs_unmountfs(xfs_mount_t *);
419 extern int xfs_unmountfs_writesb(xfs_mount_t *); 434 extern int xfs_unmountfs_writesb(xfs_mount_t *);
420 extern int xfs_mod_incore_sb(xfs_mount_t *, xfs_sb_field_t, int64_t, int); 435 extern int xfs_mod_incore_sb(xfs_mount_t *, xfs_sb_field_t, int64_t, int);
421 extern int xfs_mod_incore_sb_batch(xfs_mount_t *, xfs_mod_sb_t *, 436 extern int xfs_mod_incore_sb_batch(xfs_mount_t *, xfs_mod_sb_t *,
422 uint, int); 437 uint, int);
423 extern int xfs_mount_log_sb(xfs_mount_t *, __int64_t); 438 extern int xfs_mount_log_sb(xfs_mount_t *, __int64_t);
424 extern struct xfs_buf *xfs_getsb(xfs_mount_t *, int); 439 extern struct xfs_buf *xfs_getsb(xfs_mount_t *, int);
425 extern int xfs_readsb(xfs_mount_t *, int); 440 extern int xfs_readsb(xfs_mount_t *, int);
426 extern void xfs_freesb(xfs_mount_t *); 441 extern void xfs_freesb(xfs_mount_t *);
427 extern int xfs_fs_writable(xfs_mount_t *); 442 extern int xfs_fs_writable(xfs_mount_t *);
428 extern int xfs_sb_validate_fsb_count(struct xfs_sb *, __uint64_t); 443 extern int xfs_sb_validate_fsb_count(struct xfs_sb *, __uint64_t);
429 444
430 extern int xfs_dmops_get(struct xfs_mount *); 445 extern int xfs_dmops_get(struct xfs_mount *);
431 extern void xfs_dmops_put(struct xfs_mount *); 446 extern void xfs_dmops_put(struct xfs_mount *);
432 447
433 extern struct xfs_dmops xfs_dmcore_xfs; 448 extern struct xfs_dmops xfs_dmcore_xfs;
434 449
435 #endif /* __KERNEL__ */ 450 #endif /* __KERNEL__ */
436 451
437 extern void xfs_mod_sb(struct xfs_trans *, __int64_t); 452 extern void xfs_mod_sb(struct xfs_trans *, __int64_t);
438 extern xfs_agnumber_t xfs_initialize_perag(struct xfs_mount *, xfs_agnumber_t); 453 extern xfs_agnumber_t xfs_initialize_perag(struct xfs_mount *, xfs_agnumber_t);
439 extern void xfs_sb_from_disk(struct xfs_sb *, struct xfs_dsb *); 454 extern void xfs_sb_from_disk(struct xfs_sb *, struct xfs_dsb *);
440 extern void xfs_sb_to_disk(struct xfs_dsb *, struct xfs_sb *, __int64_t); 455 extern void xfs_sb_to_disk(struct xfs_dsb *, struct xfs_sb *, __int64_t);
441 456