Blame view

fs/logfs/file.c 7.12 KB
5db53f3e8   Joern Engel   [LogFS] add new f...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
  /*
   * fs/logfs/file.c	- prepare_write, commit_write and friends
   *
   * As should be obvious for Linux kernel code, license is GPLv2
   *
   * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
   */
  #include "logfs.h"
  #include <linux/sched.h>
  #include <linux/writeback.h>
  
  static int logfs_write_begin(struct file *file, struct address_space *mapping,
  		loff_t pos, unsigned len, unsigned flags,
  		struct page **pagep, void **fsdata)
  {
  	struct inode *inode = mapping->host;
  	struct page *page;
  	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
  
  	page = grab_cache_page_write_begin(mapping, index, flags);
  	if (!page)
  		return -ENOMEM;
  	*pagep = page;
  
  	if ((len == PAGE_CACHE_SIZE) || PageUptodate(page))
  		return 0;
  	if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
  		unsigned start = pos & (PAGE_CACHE_SIZE - 1);
  		unsigned end = start + len;
  
  		/* Reading beyond i_size is simple: memset to zero */
  		zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
  		return 0;
  	}
  	return logfs_readpage_nolock(page);
  }
  
  static int logfs_write_end(struct file *file, struct address_space *mapping,
  		loff_t pos, unsigned len, unsigned copied, struct page *page,
  		void *fsdata)
  {
  	struct inode *inode = mapping->host;
  	pgoff_t index = page->index;
  	unsigned start = pos & (PAGE_CACHE_SIZE - 1);
  	unsigned end = start + copied;
  	int ret = 0;
  
  	BUG_ON(PAGE_CACHE_SIZE != inode->i_sb->s_blocksize);
  	BUG_ON(page->index > I3_BLOCKS);
  
  	if (copied < len) {
  		/*
  		 * Short write of a non-initialized paged.  Just tell userspace
  		 * to retry the entire page.
  		 */
  		if (!PageUptodate(page)) {
  			copied = 0;
  			goto out;
  		}
  	}
  	if (copied == 0)
  		goto out; /* FIXME: do we need to update inode? */
  
  	if (i_size_read(inode) < (index << PAGE_CACHE_SHIFT) + end) {
  		i_size_write(inode, (index << PAGE_CACHE_SHIFT) + end);
  		mark_inode_dirty_sync(inode);
  	}
  
  	SetPageUptodate(page);
  	if (!PageDirty(page)) {
  		if (!get_page_reserve(inode, page))
  			__set_page_dirty_nobuffers(page);
  		else
  			ret = logfs_write_buf(inode, page, WF_LOCK);
  	}
  out:
  	unlock_page(page);
  	page_cache_release(page);
  	return ret ? ret : copied;
  }
  
  int logfs_readpage(struct file *file, struct page *page)
  {
  	int ret;
  
  	ret = logfs_readpage_nolock(page);
  	unlock_page(page);
  	return ret;
  }
  
  /* Clear the page's dirty flag in the radix tree. */
  /* TODO: mucking with PageWriteback is silly.  Add a generic function to clear
   * the dirty bit from the radix tree for filesystems that don't have to wait
   * for page writeback to finish (i.e. any compressing filesystem).
   */
  static void clear_radix_tree_dirty(struct page *page)
  {
  	BUG_ON(PagePrivate(page) || page->private);
  	set_page_writeback(page);
  	end_page_writeback(page);
  }
  
  static int __logfs_writepage(struct page *page)
  {
  	struct inode *inode = page->mapping->host;
  	int err;
  
  	err = logfs_write_buf(inode, page, WF_LOCK);
  	if (err)
  		set_page_dirty(page);
  	else
  		clear_radix_tree_dirty(page);
  	unlock_page(page);
  	return err;
  }
  
  static int logfs_writepage(struct page *page, struct writeback_control *wbc)
  {
  	struct inode *inode = page->mapping->host;
  	loff_t i_size = i_size_read(inode);
  	pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
  	unsigned offset;
  	u64 bix;
  	level_t level;
  
  	log_file("logfs_writepage(%lx, %lx, %p)
  ", inode->i_ino, page->index,
  			page);
  
  	logfs_unpack_index(page->index, &bix, &level);
  
  	/* Indirect blocks are never truncated */
  	if (level != 0)
  		return __logfs_writepage(page);
  
  	/*
  	 * TODO: everything below is a near-verbatim copy of nobh_writepage().
  	 * The relevant bits should be factored out after logfs is merged.
  	 */
  
  	/* Is the page fully inside i_size? */
  	if (bix < end_index)
  		return __logfs_writepage(page);
  
  	 /* Is the page fully outside i_size? (truncate in progress) */
  	offset = i_size & (PAGE_CACHE_SIZE-1);
  	if (bix > end_index || offset == 0) {
  		unlock_page(page);
  		return 0; /* don't care */
  	}
  
  	/*
  	 * The page straddles i_size.  It must be zeroed out on each and every
  	 * writepage invokation because it may be mmapped.  "A file is mapped
  	 * in multiples of the page size.  For a file that is not a multiple of
  	 * the  page size, the remaining memory is zeroed when mapped, and
  	 * writes to that region are not written out to the file."
  	 */
  	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
  	return __logfs_writepage(page);
  }
  
  static void logfs_invalidatepage(struct page *page, unsigned long offset)
  {
05ebad852   Joern Engel   logfs: commit res...
165
166
167
168
169
170
171
172
173
174
175
  	struct logfs_block *block = logfs_block(page);
  
  	if (block->reserved_bytes) {
  		struct super_block *sb = page->mapping->host->i_sb;
  		struct logfs_super *super = logfs_super(sb);
  
  		super->s_dirty_pages -= block->reserved_bytes;
  		block->ops->free_block(sb, block);
  		BUG_ON(bitmap_weight(block->alias_map, LOGFS_BLOCK_FACTOR));
  	} else
  		move_page_to_btree(page);
5db53f3e8   Joern Engel   [LogFS] add new f...
176
177
178
179
180
181
182
  	BUG_ON(PagePrivate(page) || page->private);
  }
  
  static int logfs_releasepage(struct page *page, gfp_t only_xfs_uses_this)
  {
  	return 0; /* None of these are easy to release */
  }
02d6d685f   Arnd Bergmann   logfs: kill BKL
183
  long logfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
5db53f3e8   Joern Engel   [LogFS] add new f...
184
  {
02d6d685f   Arnd Bergmann   logfs: kill BKL
185
  	struct inode *inode = file->f_path.dentry->d_inode;
5db53f3e8   Joern Engel   [LogFS] add new f...
186
187
188
189
190
191
192
193
194
195
196
  	struct logfs_inode *li = logfs_inode(inode);
  	unsigned int oldflags, flags;
  	int err;
  
  	switch (cmd) {
  	case FS_IOC_GETFLAGS:
  		flags = li->li_flags & LOGFS_FL_USER_VISIBLE;
  		return put_user(flags, (int __user *)arg);
  	case FS_IOC_SETFLAGS:
  		if (IS_RDONLY(inode))
  			return -EROFS;
2e1496707   Serge E. Hallyn   userns: rename is...
197
  		if (!inode_owner_or_capable(inode))
5db53f3e8   Joern Engel   [LogFS] add new f...
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
  			return -EACCES;
  
  		err = get_user(flags, (int __user *)arg);
  		if (err)
  			return err;
  
  		mutex_lock(&inode->i_mutex);
  		oldflags = li->li_flags;
  		flags &= LOGFS_FL_USER_MODIFIABLE;
  		flags |= oldflags & ~LOGFS_FL_USER_MODIFIABLE;
  		li->li_flags = flags;
  		mutex_unlock(&inode->i_mutex);
  
  		inode->i_ctime = CURRENT_TIME;
  		mark_inode_dirty_sync(inode);
  		return 0;
  
  	default:
  		return -ENOTTY;
  	}
  }
02c24a821   Josef Bacik   fs: push i_mutex ...
219
  int logfs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
5db53f3e8   Joern Engel   [LogFS] add new f...
220
  {
7ea808591   Christoph Hellwig   drop unused dentr...
221
  	struct super_block *sb = file->f_mapping->host->i_sb;
02c24a821   Josef Bacik   fs: push i_mutex ...
222
223
224
225
226
227
  	struct inode *inode = file->f_mapping->host;
  	int ret;
  
  	ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
  	if (ret)
  		return ret;
5db53f3e8   Joern Engel   [LogFS] add new f...
228

02c24a821   Josef Bacik   fs: push i_mutex ...
229
  	mutex_lock(&inode->i_mutex);
c0c79c31c   Joern Engel   logfs: fix sync
230
  	logfs_write_anchor(sb);
02c24a821   Josef Bacik   fs: push i_mutex ...
231
  	mutex_unlock(&inode->i_mutex);
5db53f3e8   Joern Engel   [LogFS] add new f...
232
233
234
235
236
237
238
  	return 0;
  }
  
  static int logfs_setattr(struct dentry *dentry, struct iattr *attr)
  {
  	struct inode *inode = dentry->d_inode;
  	int err = 0;
db78b877f   Christoph Hellwig   always call inode...
239
240
241
  	err = inode_change_ok(inode, attr);
  	if (err)
  		return err;
1025774ce   Christoph Hellwig   remove inode_setattr
242
  	if (attr->ia_valid & ATTR_SIZE) {
5db53f3e8   Joern Engel   [LogFS] add new f...
243
  		err = logfs_truncate(inode, attr->ia_size);
1025774ce   Christoph Hellwig   remove inode_setattr
244
245
246
  		if (err)
  			return err;
  	}
5db53f3e8   Joern Engel   [LogFS] add new f...
247

1025774ce   Christoph Hellwig   remove inode_setattr
248
249
250
  	setattr_copy(inode, attr);
  	mark_inode_dirty(inode);
  	return 0;
5db53f3e8   Joern Engel   [LogFS] add new f...
251
252
253
254
255
256
257
258
259
260
  }
  
  const struct inode_operations logfs_reg_iops = {
  	.setattr	= logfs_setattr,
  };
  
  const struct file_operations logfs_reg_fops = {
  	.aio_read	= generic_file_aio_read,
  	.aio_write	= generic_file_aio_write,
  	.fsync		= logfs_fsync,
02d6d685f   Arnd Bergmann   logfs: kill BKL
261
  	.unlocked_ioctl	= logfs_ioctl,
5db53f3e8   Joern Engel   [LogFS] add new f...
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
  	.llseek		= generic_file_llseek,
  	.mmap		= generic_file_readonly_mmap,
  	.open		= generic_file_open,
  	.read		= do_sync_read,
  	.write		= do_sync_write,
  };
  
  const struct address_space_operations logfs_reg_aops = {
  	.invalidatepage	= logfs_invalidatepage,
  	.readpage	= logfs_readpage,
  	.releasepage	= logfs_releasepage,
  	.set_page_dirty	= __set_page_dirty_nobuffers,
  	.writepage	= logfs_writepage,
  	.writepages	= generic_writepages,
  	.write_begin	= logfs_write_begin,
  	.write_end	= logfs_write_end,
  };