Blame view
fs/sync.c
12.6 KB
f79e2abb9 [PATCH] sys_sync_... |
1 2 3 4 5 6 7 8 |
/* * High-level sync()-related operations */ #include <linux/kernel.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/module.h> |
914e26379 [PATCH] severing ... |
9 |
#include <linux/sched.h> |
f79e2abb9 [PATCH] sys_sync_... |
10 11 12 13 |
#include <linux/writeback.h> #include <linux/syscalls.h> #include <linux/linkage.h> #include <linux/pagemap.h> |
cf9a2ae8d [PATCH] BLOCK: Mo... |
14 15 |
#include <linux/quotaops.h> #include <linux/buffer_head.h> |
5a3e5cb8e vfs: Fix sys_sync... |
16 |
#include "internal.h" |
f79e2abb9 [PATCH] sys_sync_... |
17 18 19 |
#define VALID_FLAGS (SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE| \ SYNC_FILE_RANGE_WAIT_AFTER) |
c15c54f5f vfs: Move syncing... |
20 |
/* |
d8a8559cd writeback: get ri... |
21 22 23 24 25 |
* Do the filesystem syncing work. For simple filesystems * writeback_inodes_sb(sb) just dirties buffers with inodes so we have to * submit IO for these buffers via __sync_blockdev(). This also speeds up the * wait == 1 case since in that case write_inode() functions do * sync_dirty_buffer() and thus effectively write one block at a time. |
c15c54f5f vfs: Move syncing... |
26 |
*/ |
60b0680fa vfs: Rename fsync... |
27 |
static int __sync_filesystem(struct super_block *sb, int wait) |
c15c54f5f vfs: Move syncing... |
28 |
{ |
32a88aa1b fs: Assign bdi in... |
29 30 31 32 33 34 |
/* * This should be safe, as we require bdi backing to actually * write out data in the first place */ if (!sb->s_bdi) return 0; |
c3f8a40c1 quota: Introduce ... |
35 |
/* Avoid doing twice syncing and cache pruning for quota sync */ |
d8a8559cd writeback: get ri... |
36 |
if (!wait) { |
c3f8a40c1 quota: Introduce ... |
37 |
writeout_quota_sb(sb, -1); |
d8a8559cd writeback: get ri... |
38 39 |
writeback_inodes_sb(sb); } else { |
c3f8a40c1 quota: Introduce ... |
40 |
sync_quota_sb(sb, -1); |
d8a8559cd writeback: get ri... |
41 42 |
sync_inodes_sb(sb); } |
c15c54f5f vfs: Move syncing... |
43 44 45 46 47 48 49 50 51 52 |
if (sb->s_op->sync_fs) sb->s_op->sync_fs(sb, wait); return __sync_blockdev(sb->s_bdev, wait); } /* * Write out and wait upon all dirty data associated with this * superblock. Filesystem data as well as the underlying block * device. Takes the superblock lock. */ |
60b0680fa vfs: Rename fsync... |
53 |
int sync_filesystem(struct super_block *sb) |
c15c54f5f vfs: Move syncing... |
54 55 |
{ int ret; |
5af7926ff enforce ->sync_fs... |
56 57 58 59 60 61 62 63 64 65 66 |
/* * We need to be protected against the filesystem going from * r/o to r/w or vice versa. */ WARN_ON(!rwsem_is_locked(&sb->s_umount)); /* * No point in syncing out anything if the filesystem is read-only. */ if (sb->s_flags & MS_RDONLY) return 0; |
60b0680fa vfs: Rename fsync... |
67 |
ret = __sync_filesystem(sb, 0); |
c15c54f5f vfs: Move syncing... |
68 69 |
if (ret < 0) return ret; |
60b0680fa vfs: Rename fsync... |
70 |
return __sync_filesystem(sb, 1); |
c15c54f5f vfs: Move syncing... |
71 |
} |
60b0680fa vfs: Rename fsync... |
72 |
EXPORT_SYMBOL_GPL(sync_filesystem); |
c15c54f5f vfs: Move syncing... |
73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 |
/* * Sync all the data for all the filesystems (called by sys_sync() and * emergency sync) * * This operation is careful to avoid the livelock which could easily happen * if two or more filesystems are being continuously dirtied. s_need_sync * is used only here. We set it against all filesystems and then clear it as * we sync them. So redirtied filesystems are skipped. * * But if process A is currently running sync_filesystems and then process B * calls sync_filesystems as well, process B will set all the s_need_sync * flags again, which will cause process A to resync everything. Fix that with * a local mutex. */ static void sync_filesystems(int wait) { struct super_block *sb; static DEFINE_MUTEX(mutex); mutex_lock(&mutex); /* Could be down_interruptible */ spin_lock(&sb_lock); |
5af7926ff enforce ->sync_fs... |
95 |
list_for_each_entry(sb, &super_blocks, s_list) |
c15c54f5f vfs: Move syncing... |
96 |
sb->s_need_sync = 1; |
c15c54f5f vfs: Move syncing... |
97 98 99 100 101 102 |
restart: list_for_each_entry(sb, &super_blocks, s_list) { if (!sb->s_need_sync) continue; sb->s_need_sync = 0; |
c15c54f5f vfs: Move syncing... |
103 104 |
sb->s_count++; spin_unlock(&sb_lock); |
5af7926ff enforce ->sync_fs... |
105 |
|
c15c54f5f vfs: Move syncing... |
106 |
down_read(&sb->s_umount); |
32a88aa1b fs: Assign bdi in... |
107 |
if (!(sb->s_flags & MS_RDONLY) && sb->s_root && sb->s_bdi) |
60b0680fa vfs: Rename fsync... |
108 |
__sync_filesystem(sb, wait); |
c15c54f5f vfs: Move syncing... |
109 |
up_read(&sb->s_umount); |
5af7926ff enforce ->sync_fs... |
110 |
|
c15c54f5f vfs: Move syncing... |
111 112 113 114 115 116 117 118 |
/* restart only when sb is no longer on the list */ spin_lock(&sb_lock); if (__put_super_and_need_restart(sb)) goto restart; } spin_unlock(&sb_lock); mutex_unlock(&mutex); } |
3beab0b42 sys_sync(): fix 1... |
119 120 121 122 |
/* * sync everything. Start out by waking pdflush, because that writes back * all queues in parallel. */ |
5cee5815d vfs: Make sys_syn... |
123 |
SYSCALL_DEFINE0(sync) |
cf9a2ae8d [PATCH] BLOCK: Mo... |
124 |
{ |
03ba3782e writeback: switch... |
125 |
wakeup_flusher_threads(0); |
5cee5815d vfs: Make sys_syn... |
126 127 |
sync_filesystems(0); sync_filesystems(1); |
cf9a2ae8d [PATCH] BLOCK: Mo... |
128 129 |
if (unlikely(laptop_mode)) laptop_sync_completion(); |
cf9a2ae8d [PATCH] BLOCK: Mo... |
130 131 |
return 0; } |
a2a9537ac Get rid of pdflus... |
132 133 |
static void do_sync_work(struct work_struct *work) { |
5cee5815d vfs: Make sys_syn... |
134 135 136 137 138 139 140 141 |
/* * Sync twice to reduce the possibility we skipped some inodes / pages * because they were temporarily locked */ sync_filesystems(0); sync_filesystems(0); printk("Emergency Sync complete "); |
a2a9537ac Get rid of pdflus... |
142 143 |
kfree(work); } |
cf9a2ae8d [PATCH] BLOCK: Mo... |
144 145 |
void emergency_sync(void) { |
a2a9537ac Get rid of pdflus... |
146 147 148 149 150 151 152 |
struct work_struct *work; work = kmalloc(sizeof(*work), GFP_ATOMIC); if (work) { INIT_WORK(work, do_sync_work); schedule_work(work); } |
cf9a2ae8d [PATCH] BLOCK: Mo... |
153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 |
} /* * Generic function to fsync a file. * * filp may be NULL if called via the msync of a vma. */ int file_fsync(struct file *filp, struct dentry *dentry, int datasync) { struct inode * inode = dentry->d_inode; struct super_block * sb; int ret, err; /* sync the inode to buffers */ ret = write_inode_now(inode, 0); /* sync the superblock to buffers */ sb = inode->i_sb; |
762873c25 vfs: fix uncondit... |
171 |
if (sb->s_dirt && sb->s_op->write_super) |
cf9a2ae8d [PATCH] BLOCK: Mo... |
172 |
sb->s_op->write_super(sb); |
cf9a2ae8d [PATCH] BLOCK: Mo... |
173 174 175 176 177 178 179 |
/* .. finally sync the buffers to disk */ err = sync_blockdev(sb->s_bdev); if (!ret) ret = err; return ret; } |
1fe72eaa0 fs/buffer.c: clea... |
180 |
EXPORT_SYMBOL(file_fsync); |
cf9a2ae8d [PATCH] BLOCK: Mo... |
181 |
|
4c728ef58 add a vfs_fsync h... |
182 |
/** |
148f948ba vfs: Introduce ne... |
183 |
* vfs_fsync_range - helper to sync a range of data & metadata to disk |
4c728ef58 add a vfs_fsync h... |
184 185 |
* @file: file to sync * @dentry: dentry of @file |
148f948ba vfs: Introduce ne... |
186 187 188 |
* @start: offset in bytes of the beginning of data range to sync * @end: offset in bytes of the end of data range (inclusive) * @datasync: perform only datasync |
4c728ef58 add a vfs_fsync h... |
189 |
* |
148f948ba vfs: Introduce ne... |
190 191 192 |
* Write back data in range @start..@end and metadata for @file to disk. If * @datasync is set only metadata needed to access modified file data is * written. |
4c728ef58 add a vfs_fsync h... |
193 194 195 196 197 |
* * In case this function is called from nfsd @file may be %NULL and * only @dentry is set. This can only happen when the filesystem * implements the export_operations API. */ |
148f948ba vfs: Introduce ne... |
198 199 |
int vfs_fsync_range(struct file *file, struct dentry *dentry, loff_t start, loff_t end, int datasync) |
cf9a2ae8d [PATCH] BLOCK: Mo... |
200 |
{ |
4c728ef58 add a vfs_fsync h... |
201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 |
const struct file_operations *fop; struct address_space *mapping; int err, ret; /* * Get mapping and operations from the file in case we have * as file, or get the default values for them in case we * don't have a struct file available. Damn nfsd.. */ if (file) { mapping = file->f_mapping; fop = file->f_op; } else { mapping = dentry->d_inode->i_mapping; fop = dentry->d_inode->i_fop; } |
cf9a2ae8d [PATCH] BLOCK: Mo... |
217 |
|
4c728ef58 add a vfs_fsync h... |
218 |
if (!fop || !fop->fsync) { |
cf9a2ae8d [PATCH] BLOCK: Mo... |
219 220 221 |
ret = -EINVAL; goto out; } |
2daea67e9 fsync: wait for d... |
222 |
ret = filemap_write_and_wait_range(mapping, start, end); |
cf9a2ae8d [PATCH] BLOCK: Mo... |
223 224 225 226 227 228 |
/* * We need to protect against concurrent writers, which could cause * livelocks in fsync_buffers_list(). */ mutex_lock(&mapping->host->i_mutex); |
4c728ef58 add a vfs_fsync h... |
229 |
err = fop->fsync(file, dentry, datasync); |
cf9a2ae8d [PATCH] BLOCK: Mo... |
230 231 232 |
if (!ret) ret = err; mutex_unlock(&mapping->host->i_mutex); |
148f948ba vfs: Introduce ne... |
233 |
|
cf9a2ae8d [PATCH] BLOCK: Mo... |
234 235 236 |
out: return ret; } |
148f948ba vfs: Introduce ne... |
237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 |
EXPORT_SYMBOL(vfs_fsync_range); /** * vfs_fsync - perform a fsync or fdatasync on a file * @file: file to sync * @dentry: dentry of @file * @datasync: only perform a fdatasync operation * * Write back data and metadata for @file to disk. If @datasync is * set only metadata needed to access modified file data is written. * * In case this function is called from nfsd @file may be %NULL and * only @dentry is set. This can only happen when the filesystem * implements the export_operations API. */ int vfs_fsync(struct file *file, struct dentry *dentry, int datasync) { return vfs_fsync_range(file, dentry, 0, LLONG_MAX, datasync); } |
4c728ef58 add a vfs_fsync h... |
256 |
EXPORT_SYMBOL(vfs_fsync); |
cf9a2ae8d [PATCH] BLOCK: Mo... |
257 |
|
4c728ef58 add a vfs_fsync h... |
258 |
static int do_fsync(unsigned int fd, int datasync) |
cf9a2ae8d [PATCH] BLOCK: Mo... |
259 260 261 262 263 264 |
{ struct file *file; int ret = -EBADF; file = fget(fd); if (file) { |
4c728ef58 add a vfs_fsync h... |
265 |
ret = vfs_fsync(file, file->f_path.dentry, datasync); |
cf9a2ae8d [PATCH] BLOCK: Mo... |
266 267 268 269 |
fput(file); } return ret; } |
a5f8fa9e9 [CVE-2009-0029] S... |
270 |
SYSCALL_DEFINE1(fsync, unsigned int, fd) |
cf9a2ae8d [PATCH] BLOCK: Mo... |
271 |
{ |
4c728ef58 add a vfs_fsync h... |
272 |
return do_fsync(fd, 0); |
cf9a2ae8d [PATCH] BLOCK: Mo... |
273 |
} |
a5f8fa9e9 [CVE-2009-0029] S... |
274 |
SYSCALL_DEFINE1(fdatasync, unsigned int, fd) |
cf9a2ae8d [PATCH] BLOCK: Mo... |
275 |
{ |
4c728ef58 add a vfs_fsync h... |
276 |
return do_fsync(fd, 1); |
cf9a2ae8d [PATCH] BLOCK: Mo... |
277 |
} |
148f948ba vfs: Introduce ne... |
278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 |
/** * generic_write_sync - perform syncing after a write if file / inode is sync * @file: file to which the write happened * @pos: offset where the write started * @count: length of the write * * This is just a simple wrapper about our general syncing function. */ int generic_write_sync(struct file *file, loff_t pos, loff_t count) { if (!(file->f_flags & O_SYNC) && !IS_SYNC(file->f_mapping->host)) return 0; return vfs_fsync_range(file, file->f_path.dentry, pos, pos + count - 1, 1); } EXPORT_SYMBOL(generic_write_sync); |
cf9a2ae8d [PATCH] BLOCK: Mo... |
294 |
/* |
f79e2abb9 [PATCH] sys_sync_... |
295 296 297 298 299 300 301 302 303 304 |
* sys_sync_file_range() permits finely controlled syncing over a segment of * a file in the range offset .. (offset+nbytes-1) inclusive. If nbytes is * zero then sys_sync_file_range() will operate from offset out to EOF. * * The flag bits are: * * SYNC_FILE_RANGE_WAIT_BEFORE: wait upon writeout of all pages in the range * before performing the write. * * SYNC_FILE_RANGE_WRITE: initiate writeout of all those dirty pages in the |
cce770815 SYNC_FILE_RANGE_W... |
305 306 |
* range which are not presently under writeback. Note that this may block for * significant periods due to exhaustion of disk request structures. |
f79e2abb9 [PATCH] sys_sync_... |
307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 |
* * SYNC_FILE_RANGE_WAIT_AFTER: wait upon writeout of all pages in the range * after performing the write. * * Useful combinations of the flag bits are: * * SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE: ensures that all pages * in the range which were dirty on entry to sys_sync_file_range() are placed * under writeout. This is a start-write-for-data-integrity operation. * * SYNC_FILE_RANGE_WRITE: start writeout of all dirty pages in the range which * are not presently under writeout. This is an asynchronous flush-to-disk * operation. Not suitable for data integrity operations. * * SYNC_FILE_RANGE_WAIT_BEFORE (or SYNC_FILE_RANGE_WAIT_AFTER): wait for * completion of writeout of all pages in the range. This will be used after an * earlier SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE operation to wait * for that operation to complete and to return the result. * * SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE|SYNC_FILE_RANGE_WAIT_AFTER: * a traditional sync() operation. This is a write-for-data-integrity operation * which will ensure that all pages in the range which were dirty on entry to * sys_sync_file_range() are committed to disk. * * * SYNC_FILE_RANGE_WAIT_BEFORE and SYNC_FILE_RANGE_WAIT_AFTER will detect any * I/O errors or ENOSPC conditions and will return those to the caller, after * clearing the EIO and ENOSPC flags in the address_space. * * It should be noted that none of these operations write out the file's * metadata. So unless the application is strictly performing overwrites of * already-instantiated disk blocks, there are no guarantees here that the data * will be available after a crash. */ |
6673e0c3f [CVE-2009-0029] S... |
341 342 |
SYSCALL_DEFINE(sync_file_range)(int fd, loff_t offset, loff_t nbytes, unsigned int flags) |
f79e2abb9 [PATCH] sys_sync_... |
343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 |
{ int ret; struct file *file; loff_t endbyte; /* inclusive */ int fput_needed; umode_t i_mode; ret = -EINVAL; if (flags & ~VALID_FLAGS) goto out; endbyte = offset + nbytes; if ((s64)offset < 0) goto out; if ((s64)endbyte < 0) goto out; if (endbyte < offset) goto out; if (sizeof(pgoff_t) == 4) { if (offset >= (0x100000000ULL << PAGE_CACHE_SHIFT)) { /* * The range starts outside a 32 bit machine's * pagecache addressing capabilities. Let it "succeed" */ ret = 0; goto out; } if (endbyte >= (0x100000000ULL << PAGE_CACHE_SHIFT)) { /* * Out to EOF */ nbytes = 0; } } if (nbytes == 0) |
111ebb6e6 [PATCH] writeback... |
381 |
endbyte = LLONG_MAX; |
f79e2abb9 [PATCH] sys_sync_... |
382 383 384 385 386 387 388 |
else endbyte--; /* inclusive */ ret = -EBADF; file = fget_light(fd, &fput_needed); if (!file) goto out; |
0f7fc9e4d [PATCH] VFS: chan... |
389 |
i_mode = file->f_path.dentry->d_inode->i_mode; |
f79e2abb9 [PATCH] sys_sync_... |
390 391 392 393 |
ret = -ESPIPE; if (!S_ISREG(i_mode) && !S_ISBLK(i_mode) && !S_ISDIR(i_mode) && !S_ISLNK(i_mode)) goto out_put; |
ef51c9762 Remove do_sync_fi... |
394 |
ret = do_sync_mapping_range(file->f_mapping, offset, endbyte, flags); |
f79e2abb9 [PATCH] sys_sync_... |
395 396 397 398 399 |
out_put: fput_light(file, fput_needed); out: return ret; } |
6673e0c3f [CVE-2009-0029] S... |
400 401 402 403 404 405 406 407 408 |
#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS asmlinkage long SyS_sync_file_range(long fd, loff_t offset, loff_t nbytes, long flags) { return SYSC_sync_file_range((int) fd, offset, nbytes, (unsigned int) flags); } SYSCALL_ALIAS(sys_sync_file_range, SyS_sync_file_range); #endif |
f79e2abb9 [PATCH] sys_sync_... |
409 |
|
edd5cd4a9 Introduce fixed s... |
410 411 |
/* It would be nice if people remember that not all the world's an i386 when they introduce new system calls */ |
6673e0c3f [CVE-2009-0029] S... |
412 413 |
SYSCALL_DEFINE(sync_file_range2)(int fd, unsigned int flags, loff_t offset, loff_t nbytes) |
edd5cd4a9 Introduce fixed s... |
414 415 416 |
{ return sys_sync_file_range(fd, offset, nbytes, flags); } |
6673e0c3f [CVE-2009-0029] S... |
417 418 419 420 421 422 423 424 425 |
#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS asmlinkage long SyS_sync_file_range2(long fd, long flags, loff_t offset, loff_t nbytes) { return SYSC_sync_file_range2((int) fd, (unsigned int) flags, offset, nbytes); } SYSCALL_ALIAS(sys_sync_file_range2, SyS_sync_file_range2); #endif |
edd5cd4a9 Introduce fixed s... |
426 |
|
f79e2abb9 [PATCH] sys_sync_... |
427 428 429 |
/* * `endbyte' is inclusive */ |
5b04aa3a6 [PATCH] Turn do_s... |
430 431 |
int do_sync_mapping_range(struct address_space *mapping, loff_t offset, loff_t endbyte, unsigned int flags) |
f79e2abb9 [PATCH] sys_sync_... |
432 433 |
{ int ret; |
f79e2abb9 [PATCH] sys_sync_... |
434 |
|
f79e2abb9 [PATCH] sys_sync_... |
435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 |
if (!mapping) { ret = -EINVAL; goto out; } ret = 0; if (flags & SYNC_FILE_RANGE_WAIT_BEFORE) { ret = wait_on_page_writeback_range(mapping, offset >> PAGE_CACHE_SHIFT, endbyte >> PAGE_CACHE_SHIFT); if (ret < 0) goto out; } if (flags & SYNC_FILE_RANGE_WRITE) { ret = __filemap_fdatawrite_range(mapping, offset, endbyte, |
ee53a891f mm: do_sync_mappi... |
451 |
WB_SYNC_ALL); |
f79e2abb9 [PATCH] sys_sync_... |
452 453 454 455 456 457 458 459 460 461 462 463 |
if (ret < 0) goto out; } if (flags & SYNC_FILE_RANGE_WAIT_AFTER) { ret = wait_on_page_writeback_range(mapping, offset >> PAGE_CACHE_SHIFT, endbyte >> PAGE_CACHE_SHIFT); } out: return ret; } |
5b04aa3a6 [PATCH] Turn do_s... |
464 |
EXPORT_SYMBOL_GPL(do_sync_mapping_range); |