Blame view
fs/gfs2/ops_vm.c
3.89 KB
b3b94faa5
|
1 2 |
/* * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. |
3a8a9a103
|
3 |
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. |
b3b94faa5
|
4 5 6 |
* * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions |
e9fc2aa09
|
7 |
* of the GNU General Public License version 2. |
b3b94faa5
|
8 |
*/ |
b3b94faa5
|
9 10 11 12 13 14 |
#include <linux/slab.h> #include <linux/spinlock.h> #include <linux/completion.h> #include <linux/buffer_head.h> #include <linux/mm.h> #include <linux/pagemap.h> |
5c676f6d3
|
15 |
#include <linux/gfs2_ondisk.h> |
7d308590a
|
16 |
#include <linux/lm_interface.h> |
b3b94faa5
|
17 18 |
#include "gfs2.h" |
5c676f6d3
|
19 |
#include "incore.h" |
b3b94faa5
|
20 21 22 23 |
#include "bmap.h" #include "glock.h" #include "inode.h" #include "ops_vm.h" |
b3b94faa5
|
24 25 26 |
#include "quota.h" #include "rgrp.h" #include "trans.h" |
5c676f6d3
|
27 |
#include "util.h" |
b3b94faa5
|
28 |
|
d0217ac04
|
29 |
static int gfs2_private_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
b3b94faa5
|
30 |
{ |
54cb8821d
|
31 |
struct gfs2_inode *ip = GFS2_I(vma->vm_file->f_mapping->host); |
b3b94faa5
|
32 33 |
set_bit(GIF_PAGED, &ip->i_flags); |
d0217ac04
|
34 |
return filemap_fault(vma, vmf); |
b3b94faa5
|
35 36 37 38 |
} static int alloc_page_backing(struct gfs2_inode *ip, struct page *page) { |
feaa7bba0
|
39 |
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); |
b3b94faa5
|
40 |
unsigned long index = page->index; |
cd915493f
|
41 |
u64 lblock = index << (PAGE_CACHE_SHIFT - |
568f4c965
|
42 |
sdp->sd_sb.sb_bsize_shift); |
b3b94faa5
|
43 44 45 46 47 48 49 50 51 52 53 |
unsigned int blocks = PAGE_CACHE_SIZE >> sdp->sd_sb.sb_bsize_shift; struct gfs2_alloc *al; unsigned int data_blocks, ind_blocks; unsigned int x; int error; al = gfs2_alloc_get(ip); error = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE); if (error) goto out; |
2933f9254
|
54 |
error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid); |
b3b94faa5
|
55 56 |
if (error) goto out_gunlock_q; |
fd88de569
|
57 |
gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks); |
b3b94faa5
|
58 59 60 61 62 63 |
al->al_requested = data_blocks + ind_blocks; error = gfs2_inplace_reserve(ip); if (error) goto out_gunlock_q; |
bb8d8a6f5
|
64 |
error = gfs2_trans_begin(sdp, al->al_rgd->rd_length + |
b3b94faa5
|
65 66 67 68 69 70 |
ind_blocks + RES_DINODE + RES_STATFS + RES_QUOTA, 0); if (error) goto out_ipres; if (gfs2_is_stuffed(ip)) { |
f25ef0c1b
|
71 |
error = gfs2_unstuff_dinode(ip, NULL); |
b3b94faa5
|
72 73 74 75 76 |
if (error) goto out_trans; } for (x = 0; x < blocks; ) { |
cd915493f
|
77 |
u64 dblock; |
b3b94faa5
|
78 79 |
unsigned int extlen; int new = 1; |
feaa7bba0
|
80 |
error = gfs2_extent_map(&ip->i_inode, lblock, &new, &dblock, &extlen); |
b3b94faa5
|
81 82 83 84 85 86 87 88 |
if (error) goto out_trans; lblock += extlen; x += extlen; } gfs2_assert_warn(sdp, al->al_alloced); |
a91ea69ff
|
89 |
out_trans: |
b3b94faa5
|
90 |
gfs2_trans_end(sdp); |
a91ea69ff
|
91 |
out_ipres: |
b3b94faa5
|
92 |
gfs2_inplace_release(ip); |
a91ea69ff
|
93 |
out_gunlock_q: |
b3b94faa5
|
94 |
gfs2_quota_unlock(ip); |
a91ea69ff
|
95 |
out: |
b3b94faa5
|
96 |
gfs2_alloc_put(ip); |
b3b94faa5
|
97 98 |
return error; } |
d0217ac04
|
99 100 |
static int gfs2_sharewrite_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
b3b94faa5
|
101 |
{ |
54cb8821d
|
102 |
struct file *file = vma->vm_file; |
59a1cc6bd
|
103 104 |
struct gfs2_file *gf = file->private_data; struct gfs2_inode *ip = GFS2_I(file->f_mapping->host); |
b3b94faa5
|
105 |
struct gfs2_holder i_gh; |
b3b94faa5
|
106 107 |
int alloc_required; int error; |
83c54070e
|
108 |
int ret = 0; |
b3b94faa5
|
109 |
|
b3b94faa5
|
110 111 |
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh); if (error) |
d0217ac04
|
112 |
goto out; |
b3b94faa5
|
113 |
|
b3b94faa5
|
114 115 |
set_bit(GIF_PAGED, &ip->i_flags); set_bit(GIF_SW_PAGED, &ip->i_flags); |
54cb8821d
|
116 |
error = gfs2_write_alloc_required(ip, |
d0217ac04
|
117 |
(u64)vmf->pgoff << PAGE_CACHE_SHIFT, |
54cb8821d
|
118 119 |
PAGE_CACHE_SIZE, &alloc_required); if (error) { |
d0217ac04
|
120 121 |
ret = VM_FAULT_OOM; /* XXX: are these right? */ goto out_unlock; |
54cb8821d
|
122 |
} |
b3b94faa5
|
123 |
|
59a1cc6bd
|
124 |
set_bit(GFF_EXLOCK, &gf->f_flags); |
d0217ac04
|
125 |
ret = filemap_fault(vma, vmf); |
59a1cc6bd
|
126 |
clear_bit(GFF_EXLOCK, &gf->f_flags); |
83c54070e
|
127 |
if (ret & VM_FAULT_ERROR) |
d0217ac04
|
128 |
goto out_unlock; |
b3b94faa5
|
129 130 |
if (alloc_required) { |
d0217ac04
|
131 132 |
/* XXX: do we need to drop page lock around alloc_page_backing?*/ error = alloc_page_backing(ip, vmf->page); |
b3b94faa5
|
133 |
if (error) { |
83c54070e
|
134 135 136 137 138 139 |
/* * VM_FAULT_LOCKED should always be the case for * filemap_fault, but it may not be in a future * implementation. */ if (ret & VM_FAULT_LOCKED) |
d0217ac04
|
140 141 142 143 |
unlock_page(vmf->page); page_cache_release(vmf->page); ret = VM_FAULT_OOM; goto out_unlock; |
b3b94faa5
|
144 |
} |
d0217ac04
|
145 |
set_page_dirty(vmf->page); |
b3b94faa5
|
146 |
} |
d0217ac04
|
147 |
out_unlock: |
b3b94faa5
|
148 |
gfs2_glock_dq_uninit(&i_gh); |
d0217ac04
|
149 150 |
out: return ret; |
b3b94faa5
|
151 152 153 |
} struct vm_operations_struct gfs2_vm_ops_private = { |
54cb8821d
|
154 |
.fault = gfs2_private_fault, |
b3b94faa5
|
155 156 157 |
}; struct vm_operations_struct gfs2_vm_ops_sharewrite = { |
54cb8821d
|
158 |
.fault = gfs2_sharewrite_fault, |
b3b94faa5
|
159 |
}; |