Blame view
fs/ufs/inode.c
32.8 KB
b24413180
|
1 |
// SPDX-License-Identifier: GPL-2.0 |
1da177e4c
|
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 |
/* * linux/fs/ufs/inode.c * * Copyright (C) 1998 * Daniel Pirkl <daniel.pirkl@email.cz> * Charles University, Faculty of Mathematics and Physics * * from * * linux/fs/ext2/inode.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/inode.c * * Copyright (C) 1991, 1992 Linus Torvalds * * Goal-directed block allocation by Stephen Tweedie (sct@dcs.ed.ac.uk), 1993 * Big-endian to little-endian byte-swapping/bitmaps by * David S. Miller (davem@caip.rutgers.edu), 1995 */ |
7c0f6ba68
|
28 |
#include <linux/uaccess.h> |
1da177e4c
|
29 30 31 |
#include <linux/errno.h> #include <linux/fs.h> |
1da177e4c
|
32 33 34 35 |
#include <linux/time.h> #include <linux/stat.h> #include <linux/string.h> #include <linux/mm.h> |
1da177e4c
|
36 |
#include <linux/buffer_head.h> |
a9185b41a
|
37 |
#include <linux/writeback.h> |
bb8c2d66b
|
38 |
#include <linux/iversion.h> |
1da177e4c
|
39 |
|
e54205988
|
40 |
#include "ufs_fs.h" |
bcd6d4ecf
|
41 |
#include "ufs.h" |
1da177e4c
|
42 43 |
#include "swab.h" #include "util.h" |
4e3911f3d
|
44 |
static int ufs_block_to_path(struct inode *inode, sector_t i_block, unsigned offsets[4]) |
1da177e4c
|
45 46 47 48 49 50 51 52 |
{ struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi; int ptrs = uspi->s_apb; int ptrs_bits = uspi->s_apbshift; const long direct_blocks = UFS_NDADDR, indirect_blocks = ptrs, double_blocks = (1 << (ptrs_bits * 2)); int n = 0; |
abf5d15fd
|
53 54 |
UFSD("ptrs=uspi->s_apb = %d,double_blocks=%ld ",ptrs,double_blocks); |
37044c86b
|
55 |
if (i_block < direct_blocks) { |
1da177e4c
|
56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 |
offsets[n++] = i_block; } else if ((i_block -= direct_blocks) < indirect_blocks) { offsets[n++] = UFS_IND_BLOCK; offsets[n++] = i_block; } else if ((i_block -= indirect_blocks) < double_blocks) { offsets[n++] = UFS_DIND_BLOCK; offsets[n++] = i_block >> ptrs_bits; offsets[n++] = i_block & (ptrs - 1); } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) { offsets[n++] = UFS_TIND_BLOCK; offsets[n++] = i_block >> (ptrs_bits * 2); offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1); offsets[n++] = i_block & (ptrs - 1); } else { ufs_warning(inode->i_sb, "ufs_block_to_path", "block > big"); } return n; } |
724bb09fd
|
74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 |
typedef struct { void *p; union { __fs32 key32; __fs64 key64; }; struct buffer_head *bh; } Indirect; static inline int grow_chain32(struct ufs_inode_info *ufsi, struct buffer_head *bh, __fs32 *v, Indirect *from, Indirect *to) { Indirect *p; unsigned seq; to->bh = bh; do { seq = read_seqbegin(&ufsi->meta_lock); to->key32 = *(__fs32 *)(to->p = v); for (p = from; p <= to && p->key32 == *(__fs32 *)p->p; p++) ; } while (read_seqretry(&ufsi->meta_lock, seq)); return (p > to); } static inline int grow_chain64(struct ufs_inode_info *ufsi, struct buffer_head *bh, __fs64 *v, Indirect *from, Indirect *to) { Indirect *p; unsigned seq; to->bh = bh; do { seq = read_seqbegin(&ufsi->meta_lock); to->key64 = *(__fs64 *)(to->p = v); for (p = from; p <= to && p->key64 == *(__fs64 *)p->p; p++) ; } while (read_seqretry(&ufsi->meta_lock, seq)); return (p > to); } |
1da177e4c
|
114 115 |
/* * Returns the location of the fragment from |
25985edce
|
116 |
* the beginning of the filesystem. |
1da177e4c
|
117 |
*/ |
4b7068c8b
|
118 |
static u64 ufs_frag_map(struct inode *inode, unsigned offsets[4], int depth) |
1da177e4c
|
119 120 121 122 123 124 |
{ struct ufs_inode_info *ufsi = UFS_I(inode); struct super_block *sb = inode->i_sb; struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; u64 mask = (u64) uspi->s_apbmask>>uspi->s_fpbshift; int shift = uspi->s_apbshift-uspi->s_fpbshift; |
724bb09fd
|
125 |
Indirect chain[4], *q = chain; |
4b7068c8b
|
126 |
unsigned *p; |
1da177e4c
|
127 |
unsigned flags = UFS_SB(sb)->s_flags; |
724bb09fd
|
128 |
u64 res = 0; |
1da177e4c
|
129 |
|
7256d819e
|
130 131 132 133 |
UFSD(": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx ", uspi->s_fpbshift, uspi->s_apbmask, (unsigned long long)mask); |
1da177e4c
|
134 135 |
if (depth == 0) |
724bb09fd
|
136 |
goto no_block; |
1da177e4c
|
137 |
|
724bb09fd
|
138 |
again: |
1da177e4c
|
139 |
p = offsets; |
1da177e4c
|
140 141 |
if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) goto ufs2; |
724bb09fd
|
142 143 144 145 |
if (!grow_chain32(ufsi, NULL, &ufsi->i_u1.i_data[*p++], chain, q)) goto changed; if (!q->key32) goto no_block; |
1da177e4c
|
146 |
while (--depth) { |
724bb09fd
|
147 |
__fs32 *ptr; |
1da177e4c
|
148 |
struct buffer_head *bh; |
4e3911f3d
|
149 |
unsigned n = *p++; |
1da177e4c
|
150 |
|
724bb09fd
|
151 152 |
bh = sb_bread(sb, uspi->s_sbbase + fs32_to_cpu(sb, q->key32) + (n>>shift)); |
1da177e4c
|
153 |
if (!bh) |
724bb09fd
|
154 155 156 157 158 159 |
goto no_block; ptr = (__fs32 *)bh->b_data + (n & mask); if (!grow_chain32(ufsi, bh, ptr, chain, ++q)) goto changed; if (!q->key32) goto no_block; |
1da177e4c
|
160 |
} |
724bb09fd
|
161 162 |
res = fs32_to_cpu(sb, q->key32); goto found; |
1da177e4c
|
163 |
|
724bb09fd
|
164 165 166 167 168 |
ufs2: if (!grow_chain64(ufsi, NULL, &ufsi->i_u1.u2_i_data[*p++], chain, q)) goto changed; if (!q->key64) goto no_block; |
1da177e4c
|
169 170 |
while (--depth) { |
724bb09fd
|
171 |
__fs64 *ptr; |
1da177e4c
|
172 |
struct buffer_head *bh; |
4e3911f3d
|
173 |
unsigned n = *p++; |
1da177e4c
|
174 |
|
724bb09fd
|
175 176 |
bh = sb_bread(sb, uspi->s_sbbase + fs64_to_cpu(sb, q->key64) + (n>>shift)); |
1da177e4c
|
177 |
if (!bh) |
724bb09fd
|
178 179 180 181 182 183 184 185 186 |
goto no_block; ptr = (__fs64 *)bh->b_data + (n & mask); if (!grow_chain64(ufsi, bh, ptr, chain, ++q)) goto changed; if (!q->key64) goto no_block; } res = fs64_to_cpu(sb, q->key64); found: |
4b7068c8b
|
187 |
res += uspi->s_sbbase; |
724bb09fd
|
188 189 190 191 |
no_block: while (q > chain) { brelse(q->bh); q--; |
1da177e4c
|
192 |
} |
724bb09fd
|
193 |
return res; |
1da177e4c
|
194 |
|
724bb09fd
|
195 196 197 198 199 200 |
changed: while (q > chain) { brelse(q->bh); q--; } goto again; |
1da177e4c
|
201 |
} |
0f3c1294b
|
202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 |
/* * Unpacking tails: we have a file with partial final block and * we had been asked to extend it. If the fragment being written * is within the same block, we need to extend the tail just to cover * that fragment. Otherwise the tail is extended to full block. * * Note that we might need to create a _new_ tail, but that will * be handled elsewhere; this is strictly for resizing old * ones. */ static bool ufs_extend_tail(struct inode *inode, u64 writes_to, int *err, struct page *locked_page) { struct ufs_inode_info *ufsi = UFS_I(inode); struct super_block *sb = inode->i_sb; struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; unsigned lastfrag = ufsi->i_lastfrag; /* it's a short file, so unsigned is enough */ unsigned block = ufs_fragstoblks(lastfrag); unsigned new_size; void *p; u64 tmp; if (writes_to < (lastfrag | uspi->s_fpbmask)) new_size = (writes_to & uspi->s_fpbmask) + 1; else new_size = uspi->s_fpb; p = ufs_get_direct_data_ptr(uspi, ufsi, block); tmp = ufs_new_fragments(inode, p, lastfrag, ufs_data_ptr_to_cpu(sb, p), |
940ef1a0e
|
232 233 |
new_size - (lastfrag & uspi->s_fpbmask), err, locked_page); |
0f3c1294b
|
234 235 |
return tmp != 0; } |
022a6dc5f
|
236 237 |
/** * ufs_inode_getfrag() - allocate new fragment(s) |
edc023caf
|
238 |
* @inode: pointer to inode |
5336970be
|
239 |
* @index: number of block pointer within the inode's array. |
edc023caf
|
240 |
* @new_fragment: number of new allocated fragment(s) |
edc023caf
|
241 |
* @err: we set it if something wrong |
edc023caf
|
242 243 |
* @new: we set it if we allocate new block * @locked_page: for ufs_new_fragments() |
022a6dc5f
|
244 |
*/ |
177848a01
|
245 |
static u64 |
5336970be
|
246 247 |
ufs_inode_getfrag(struct inode *inode, unsigned index, sector_t new_fragment, int *err, |
4e317ce73
|
248 |
int *new, struct page *locked_page) |
1da177e4c
|
249 250 |
{ struct ufs_inode_info *ufsi = UFS_I(inode); |
022a6dc5f
|
251 252 |
struct super_block *sb = inode->i_sb; struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; |
5336970be
|
253 |
u64 tmp, goal, lastfrag; |
0f3c1294b
|
254 255 |
unsigned nfrags = uspi->s_fpb; void *p; |
1da177e4c
|
256 |
|
1da177e4c
|
257 258 259 260 |
/* TODO : to be done for write support if ( (flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) goto ufs2; */ |
5336970be
|
261 |
p = ufs_get_direct_data_ptr(uspi, ufsi, index); |
54fb996ac
|
262 |
tmp = ufs_data_ptr_to_cpu(sb, p); |
0f3c1294b
|
263 264 |
if (tmp) goto out; |
54fb996ac
|
265 |
|
1da177e4c
|
266 |
lastfrag = ufsi->i_lastfrag; |
1da177e4c
|
267 |
|
0f3c1294b
|
268 269 270 271 272 |
/* will that be a new tail? */ if (new_fragment < UFS_NDIR_FRAGMENT && new_fragment >= lastfrag) nfrags = (new_fragment & uspi->s_fpbmask) + 1; goal = 0; |
5336970be
|
273 |
if (index) { |
0f3c1294b
|
274 |
goal = ufs_data_ptr_to_cpu(sb, |
5336970be
|
275 |
ufs_get_direct_data_ptr(uspi, ufsi, index - 1)); |
0f3c1294b
|
276 277 |
if (goal) goal += uspi->s_fpb; |
1da177e4c
|
278 |
} |
5336970be
|
279 |
tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), |
8785d84d0
|
280 |
goal, nfrags, err, locked_page); |
0f3c1294b
|
281 |
|
1da177e4c
|
282 |
if (!tmp) { |
1da177e4c
|
283 |
*err = -ENOSPC; |
177848a01
|
284 |
return 0; |
1da177e4c
|
285 |
} |
4e317ce73
|
286 |
if (new) |
1da177e4c
|
287 |
*new = 1; |
02027d42c
|
288 |
inode->i_ctime = current_time(inode); |
1da177e4c
|
289 290 291 |
if (IS_SYNC(inode)) ufs_sync_inode (inode); mark_inode_dirty(inode); |
bbb3eb9d3
|
292 |
out: |
177848a01
|
293 |
return tmp + uspi->s_sbbase; |
1da177e4c
|
294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 |
/* This part : To be implemented .... Required only for writing, not required for READ-ONLY. ufs2: u2_block = ufs_fragstoblks(fragment); u2_blockoff = ufs_fragnum(fragment); p = ufsi->i_u1.u2_i_data + block; goal = 0; repeat2: tmp = fs32_to_cpu(sb, *p); lastfrag = ufsi->i_lastfrag; */ } |
022a6dc5f
|
310 311 |
/** * ufs_inode_getblock() - allocate new block |
edc023caf
|
312 |
* @inode: pointer to inode |
619cfac09
|
313 314 |
* @ind_block: block number of the indirect block * @index: number of pointer within the indirect block |
edc023caf
|
315 |
* @new_fragment: number of new allocated fragment |
022a6dc5f
|
316 |
* (block will hold this fragment and also uspi->s_fpb-1) |
edc023caf
|
317 |
* @err: see ufs_inode_getfrag() |
edc023caf
|
318 319 |
* @new: see ufs_inode_getfrag() * @locked_page: see ufs_inode_getfrag() |
022a6dc5f
|
320 |
*/ |
177848a01
|
321 |
static u64 |
619cfac09
|
322 |
ufs_inode_getblock(struct inode *inode, u64 ind_block, |
721435a76
|
323 |
unsigned index, sector_t new_fragment, int *err, |
4e317ce73
|
324 |
int *new, struct page *locked_page) |
1da177e4c
|
325 |
{ |
022a6dc5f
|
326 327 |
struct super_block *sb = inode->i_sb; struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; |
619cfac09
|
328 |
int shift = uspi->s_apbshift - uspi->s_fpbshift; |
721435a76
|
329 |
u64 tmp = 0, goal; |
619cfac09
|
330 |
struct buffer_head *bh; |
54fb996ac
|
331 |
void *p; |
1da177e4c
|
332 |
|
619cfac09
|
333 334 335 336 |
if (!ind_block) return 0; bh = sb_bread(sb, ind_block + (index >> shift)); |
5fbfb238f
|
337 338 |
if (unlikely(!bh)) { *err = -EIO; |
619cfac09
|
339 |
return 0; |
5fbfb238f
|
340 |
} |
619cfac09
|
341 342 |
index &= uspi->s_apbmask >> uspi->s_fpbshift; |
54fb996ac
|
343 |
if (uspi->fs_magic == UFS2_MAGIC) |
721435a76
|
344 |
p = (__fs64 *)bh->b_data + index; |
54fb996ac
|
345 |
else |
721435a76
|
346 |
p = (__fs32 *)bh->b_data + index; |
5a39c2556
|
347 |
|
54fb996ac
|
348 |
tmp = ufs_data_ptr_to_cpu(sb, p); |
bbb3eb9d3
|
349 |
if (tmp) |
5a39c2556
|
350 |
goto out; |
1da177e4c
|
351 |
|
721435a76
|
352 353 354 |
if (index && (uspi->fs_magic == UFS2_MAGIC ? (tmp = fs64_to_cpu(sb, ((__fs64 *)bh->b_data)[index-1])) : (tmp = fs32_to_cpu(sb, ((__fs32 *)bh->b_data)[index-1])))) |
1da177e4c
|
355 356 357 |
goal = tmp + uspi->s_fpb; else goal = bh->b_blocknr + uspi->s_fpb; |
6ef4d6bf8
|
358 359 |
tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), goal, uspi->s_fpb, err, locked_page); |
5a39c2556
|
360 |
if (!tmp) |
1da177e4c
|
361 |
goto out; |
c9a27b5dc
|
362 |
|
bbb3eb9d3
|
363 |
if (new) |
1da177e4c
|
364 |
*new = 1; |
1da177e4c
|
365 366 367 368 |
mark_buffer_dirty(bh); if (IS_SYNC(inode)) sync_dirty_buffer(bh); |
02027d42c
|
369 |
inode->i_ctime = current_time(inode); |
1da177e4c
|
370 371 372 |
mark_inode_dirty(inode); out: brelse (bh); |
abf5d15fd
|
373 374 |
UFSD("EXIT "); |
177848a01
|
375 376 377 |
if (tmp) tmp += uspi->s_sbbase; return tmp; |
1da177e4c
|
378 |
} |
022a6dc5f
|
379 |
/** |
7422caa5a
|
380 |
* ufs_getfrag_block() - `get_block_t' function, interface between UFS and |
022a6dc5f
|
381 |
* readpage, writepage and so on |
1da177e4c
|
382 |
*/ |
010d331fc
|
383 |
static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create) |
1da177e4c
|
384 |
{ |
0385f1f9e
|
385 386 387 |
struct super_block *sb = inode->i_sb; struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; int err = 0, new = 0; |
4b7068c8b
|
388 389 |
unsigned offsets[4]; int depth = ufs_block_to_path(inode, fragment >> uspi->s_fpbshift, offsets); |
1da177e4c
|
390 |
u64 phys64 = 0; |
177848a01
|
391 |
unsigned frag = fragment & uspi->s_fpbmask; |
010d331fc
|
392 |
|
09bf4f5b6
|
393 394 395 |
phys64 = ufs_frag_map(inode, offsets, depth); if (!create) goto done; |
1da177e4c
|
396 |
|
09bf4f5b6
|
397 398 399 400 401 402 403 404 405 406 |
if (phys64) { if (fragment >= UFS_NDIR_FRAGMENT) goto done; read_seqlock_excl(&UFS_I(inode)->meta_lock); if (fragment < UFS_I(inode)->i_lastfrag) { read_sequnlock_excl(&UFS_I(inode)->meta_lock); goto done; } read_sequnlock_excl(&UFS_I(inode)->meta_lock); } |
1da177e4c
|
407 |
/* This code entered only while writing ....? */ |
724bb09fd
|
408 |
mutex_lock(&UFS_I(inode)->truncate_mutex); |
1da177e4c
|
409 |
|
abf5d15fd
|
410 411 |
UFSD("ENTER, ino %lu, fragment %llu ", inode->i_ino, (unsigned long long)fragment); |
0385f1f9e
|
412 413 414 415 416 |
if (unlikely(!depth)) { ufs_warning(sb, "ufs_get_block", "block > big"); err = -EIO; goto out; } |
0f3c1294b
|
417 418 419 420 421 422 423 |
if (UFS_I(inode)->i_lastfrag < UFS_NDIR_FRAGMENT) { unsigned lastfrag = UFS_I(inode)->i_lastfrag; unsigned tailfrags = lastfrag & uspi->s_fpbmask; if (tailfrags && fragment >= lastfrag) { if (!ufs_extend_tail(inode, fragment, &err, bh_result->b_page)) |
0385f1f9e
|
424 |
goto out; |
0f3c1294b
|
425 426 |
} } |
71dd42846
|
427 |
if (depth == 1) { |
5336970be
|
428 |
phys64 = ufs_inode_getfrag(inode, offsets[0], fragment, |
4e317ce73
|
429 |
&err, &new, bh_result->b_page); |
4eeff4c93
|
430 431 |
} else { int i; |
5336970be
|
432 |
phys64 = ufs_inode_getfrag(inode, offsets[0], fragment, |
4e317ce73
|
433 |
&err, NULL, NULL); |
4eeff4c93
|
434 435 |
for (i = 1; i < depth - 1; i++) phys64 = ufs_inode_getblock(inode, phys64, offsets[i], |
4e317ce73
|
436 |
fragment, &err, NULL, NULL); |
4eeff4c93
|
437 |
phys64 = ufs_inode_getblock(inode, phys64, offsets[depth - 1], |
4e317ce73
|
438 |
fragment, &err, &new, bh_result->b_page); |
1da177e4c
|
439 |
} |
0385f1f9e
|
440 |
out: |
177848a01
|
441 442 |
if (phys64) { phys64 += frag; |
0385f1f9e
|
443 444 445 |
map_bh(bh_result, sb, phys64); if (new) set_buffer_new(bh_result); |
177848a01
|
446 |
} |
724bb09fd
|
447 |
mutex_unlock(&UFS_I(inode)->truncate_mutex); |
1da177e4c
|
448 |
return err; |
09bf4f5b6
|
449 450 451 452 453 |
done: if (phys64) map_bh(bh_result, sb, phys64 + frag); return 0; |
1da177e4c
|
454 |
} |
1da177e4c
|
455 456 457 458 |
static int ufs_writepage(struct page *page, struct writeback_control *wbc) { return block_write_full_page(page,ufs_getfrag_block,wbc); } |
82b9d1d0d
|
459 |
|
1da177e4c
|
460 461 462 463 |
static int ufs_readpage(struct file *file, struct page *page) { return block_read_full_page(page,ufs_getfrag_block); } |
82b9d1d0d
|
464 |
|
f4e420dc4
|
465 |
int ufs_prepare_chunk(struct page *page, loff_t pos, unsigned len) |
1da177e4c
|
466 |
{ |
6e1db88d5
|
467 |
return __block_write_begin(page, pos, len, ufs_getfrag_block); |
1da177e4c
|
468 |
} |
82b9d1d0d
|
469 |
|
010d331fc
|
470 |
static void ufs_truncate_blocks(struct inode *); |
83f6e3710
|
471 472 473 |
static void ufs_write_failed(struct address_space *mapping, loff_t to) { struct inode *inode = mapping->host; |
3b7a3a05e
|
474 |
if (to > inode->i_size) { |
7caef2676
|
475 |
truncate_pagecache(inode, inode->i_size); |
3b7a3a05e
|
476 477 |
ufs_truncate_blocks(inode); } |
83f6e3710
|
478 |
} |
82b9d1d0d
|
479 480 481 482 |
static int ufs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { |
155130a4f
|
483 484 485 |
int ret; ret = block_write_begin(mapping, pos, len, flags, pagep, |
f4e420dc4
|
486 |
ufs_getfrag_block); |
83f6e3710
|
487 488 |
if (unlikely(ret)) ufs_write_failed(mapping, pos + len); |
155130a4f
|
489 490 |
return ret; |
82b9d1d0d
|
491 |
} |
3b7a3a05e
|
492 493 494 495 496 497 498 499 500 501 502 |
static int ufs_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { int ret; ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); if (ret < len) ufs_write_failed(mapping, pos + len); return ret; } |
1da177e4c
|
503 504 505 506 |
static sector_t ufs_bmap(struct address_space *mapping, sector_t block) { return generic_block_bmap(mapping,block,ufs_getfrag_block); } |
82b9d1d0d
|
507 |
|
f5e54d6e5
|
508 |
const struct address_space_operations ufs_aops = { |
1da177e4c
|
509 510 |
.readpage = ufs_readpage, .writepage = ufs_writepage, |
82b9d1d0d
|
511 |
.write_begin = ufs_write_begin, |
3b7a3a05e
|
512 |
.write_end = ufs_write_end, |
1da177e4c
|
513 514 |
.bmap = ufs_bmap }; |
826843a34
|
515 516 517 518 519 520 521 522 523 524 525 |
static void ufs_set_inode_ops(struct inode *inode) { if (S_ISREG(inode->i_mode)) { inode->i_op = &ufs_file_inode_operations; inode->i_fop = &ufs_file_operations; inode->i_mapping->a_ops = &ufs_aops; } else if (S_ISDIR(inode->i_mode)) { inode->i_op = &ufs_dir_inode_operations; inode->i_fop = &ufs_dir_operations; inode->i_mapping->a_ops = &ufs_aops; } else if (S_ISLNK(inode->i_mode)) { |
4b8061a67
|
526 |
if (!inode->i_blocks) { |
4b8061a67
|
527 |
inode->i_link = (char *)UFS_I(inode)->i_u1.i_symlink; |
9cdce3c07
|
528 |
inode->i_op = &simple_symlink_inode_operations; |
4b8061a67
|
529 |
} else { |
826843a34
|
530 |
inode->i_mapping->a_ops = &ufs_aops; |
9cdce3c07
|
531 |
inode->i_op = &page_symlink_inode_operations; |
21fc61c73
|
532 |
inode_nohighmem(inode); |
826843a34
|
533 534 535 536 537 |
} } else init_special_inode(inode, inode->i_mode, ufs_get_inode_dev(inode->i_sb, UFS_I(inode))); } |
07a0cfec3
|
538 |
static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode) |
1da177e4c
|
539 540 |
{ struct ufs_inode_info *ufsi = UFS_I(inode); |
05f225dc8
|
541 |
struct super_block *sb = inode->i_sb; |
6a9a06d9c
|
542 |
umode_t mode; |
1da177e4c
|
543 544 545 546 547 |
/* * Copy data to the in-core inode. */ inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode); |
bfe868486
|
548 |
set_nlink(inode, fs16_to_cpu(sb, ufs_inode->ui_nlink)); |
c0ef65d29
|
549 550 |
if (inode->i_nlink == 0) return -ESTALE; |
010d331fc
|
551 |
|
1da177e4c
|
552 553 554 |
/* * Linux now has 32-bit uid and gid, so we can support EFT. */ |
722354658
|
555 556 |
i_uid_write(inode, ufs_get_inode_uid(sb, ufs_inode)); i_gid_write(inode, ufs_get_inode_gid(sb, ufs_inode)); |
1da177e4c
|
557 558 |
inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size); |
23ac7cba7
|
559 560 561 |
inode->i_atime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec); inode->i_ctime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec); inode->i_mtime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec); |
1da177e4c
|
562 563 564 565 |
inode->i_mtime.tv_nsec = 0; inode->i_atime.tv_nsec = 0; inode->i_ctime.tv_nsec = 0; inode->i_blocks = fs32_to_cpu(sb, ufs_inode->ui_blocks); |
3313e2926
|
566 |
inode->i_generation = fs32_to_cpu(sb, ufs_inode->ui_gen); |
1da177e4c
|
567 |
ufsi->i_flags = fs32_to_cpu(sb, ufs_inode->ui_flags); |
1da177e4c
|
568 569 |
ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow); ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag); |
05f225dc8
|
570 |
|
010d331fc
|
571 |
|
1da177e4c
|
572 |
if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) { |
f33219b7a
|
573 574 |
memcpy(ufsi->i_u1.i_data, &ufs_inode->ui_u2.ui_addr, sizeof(ufs_inode->ui_u2.ui_addr)); |
dd187a260
|
575 |
} else { |
f33219b7a
|
576 |
memcpy(ufsi->i_u1.i_symlink, ufs_inode->ui_u2.ui_symlink, |
b12903f13
|
577 578 |
sizeof(ufs_inode->ui_u2.ui_symlink) - 1); ufsi->i_u1.i_symlink[sizeof(ufs_inode->ui_u2.ui_symlink) - 1] = 0; |
1da177e4c
|
579 |
} |
07a0cfec3
|
580 |
return 0; |
05f225dc8
|
581 |
} |
1da177e4c
|
582 |
|
07a0cfec3
|
583 |
static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode) |
05f225dc8
|
584 585 586 |
{ struct ufs_inode_info *ufsi = UFS_I(inode); struct super_block *sb = inode->i_sb; |
6a9a06d9c
|
587 |
umode_t mode; |
1da177e4c
|
588 |
|
abf5d15fd
|
589 590 |
UFSD("Reading ufs2 inode, ino %lu ", inode->i_ino); |
1da177e4c
|
591 592 593 594 |
/* * Copy data to the in-core inode. */ inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode); |
bfe868486
|
595 |
set_nlink(inode, fs16_to_cpu(sb, ufs2_inode->ui_nlink)); |
c0ef65d29
|
596 597 |
if (inode->i_nlink == 0) return -ESTALE; |
1da177e4c
|
598 599 600 601 |
/* * Linux now has 32-bit uid and gid, so we can support EFT. */ |
722354658
|
602 603 |
i_uid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_uid)); i_gid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_gid)); |
1da177e4c
|
604 605 |
inode->i_size = fs64_to_cpu(sb, ufs2_inode->ui_size); |
2189850f4
|
606 607 608 609 610 611 |
inode->i_atime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_atime); inode->i_ctime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_ctime); inode->i_mtime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_mtime); inode->i_atime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_atimensec); inode->i_ctime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_ctimensec); inode->i_mtime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_mtimensec); |
1da177e4c
|
612 |
inode->i_blocks = fs64_to_cpu(sb, ufs2_inode->ui_blocks); |
3313e2926
|
613 |
inode->i_generation = fs32_to_cpu(sb, ufs2_inode->ui_gen); |
1da177e4c
|
614 |
ufsi->i_flags = fs32_to_cpu(sb, ufs2_inode->ui_flags); |
1da177e4c
|
615 616 617 618 |
/* ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow); ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag); */ |
1da177e4c
|
619 620 |
if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) { |
f33219b7a
|
621 622 |
memcpy(ufsi->i_u1.u2_i_data, &ufs2_inode->ui_u2.ui_addr, sizeof(ufs2_inode->ui_u2.ui_addr)); |
05f225dc8
|
623 |
} else { |
f33219b7a
|
624 |
memcpy(ufsi->i_u1.i_symlink, ufs2_inode->ui_u2.ui_symlink, |
b12903f13
|
625 626 |
sizeof(ufs2_inode->ui_u2.ui_symlink) - 1); ufsi->i_u1.i_symlink[sizeof(ufs2_inode->ui_u2.ui_symlink) - 1] = 0; |
1da177e4c
|
627 |
} |
07a0cfec3
|
628 |
return 0; |
05f225dc8
|
629 |
} |
b55c460da
|
630 |
struct inode *ufs_iget(struct super_block *sb, unsigned long ino) |
05f225dc8
|
631 |
{ |
b55c460da
|
632 633 |
struct ufs_inode_info *ufsi; struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; |
05f225dc8
|
634 |
struct buffer_head * bh; |
b55c460da
|
635 |
struct inode *inode; |
c0ef65d29
|
636 |
int err = -EIO; |
05f225dc8
|
637 |
|
b55c460da
|
638 639 |
UFSD("ENTER, ino %lu ", ino); |
05f225dc8
|
640 |
|
b55c460da
|
641 |
if (ino < UFS_ROOTINO || ino > (uspi->s_ncg * uspi->s_ipg)) { |
05f225dc8
|
642 643 |
ufs_warning(sb, "ufs_read_inode", "bad inode number (%lu) ", |
b55c460da
|
644 645 |
ino); return ERR_PTR(-EIO); |
05f225dc8
|
646 |
} |
b55c460da
|
647 648 649 650 651 652 653 |
inode = iget_locked(sb, ino); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; ufsi = UFS_I(inode); |
05f225dc8
|
654 655 656 657 658 659 660 661 662 |
bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino)); if (!bh) { ufs_warning(sb, "ufs_read_inode", "unable to read inode %lu ", inode->i_ino); goto bad_inode; } if ((UFS_SB(sb)->s_flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) { struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data; |
07a0cfec3
|
663 664 |
err = ufs2_read_inode(inode, ufs2_inode + ufs_inotofsbo(inode->i_ino)); |
05f225dc8
|
665 666 |
} else { struct ufs_inode *ufs_inode = (struct ufs_inode *)bh->b_data; |
07a0cfec3
|
667 668 |
err = ufs1_read_inode(inode, ufs_inode + ufs_inotofsbo(inode->i_ino)); |
05f225dc8
|
669 |
} |
c0ef65d29
|
670 |
brelse(bh); |
07a0cfec3
|
671 672 |
if (err) goto bad_inode; |
c0ef65d29
|
673 |
|
bb8c2d66b
|
674 |
inode_inc_iversion(inode); |
05f225dc8
|
675 676 677 |
ufsi->i_lastfrag = (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift; ufsi->i_dir_start_lookup = 0; |
1da177e4c
|
678 |
ufsi->i_osync = 0; |
826843a34
|
679 |
ufs_set_inode_ops(inode); |
1da177e4c
|
680 |
|
abf5d15fd
|
681 682 |
UFSD("EXIT "); |
b55c460da
|
683 684 |
unlock_new_inode(inode); return inode; |
05f225dc8
|
685 686 |
bad_inode: |
b55c460da
|
687 |
iget_failed(inode); |
c0ef65d29
|
688 |
return ERR_PTR(err); |
1da177e4c
|
689 |
} |
3313e2926
|
690 |
static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode) |
1da177e4c
|
691 |
{ |
3313e2926
|
692 693 |
struct super_block *sb = inode->i_sb; struct ufs_inode_info *ufsi = UFS_I(inode); |
1da177e4c
|
694 695 696 |
ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode); ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink); |
722354658
|
697 698 |
ufs_set_inode_uid(sb, ufs_inode, i_uid_read(inode)); ufs_set_inode_gid(sb, ufs_inode, i_gid_read(inode)); |
010d331fc
|
699 |
|
1da177e4c
|
700 701 702 703 704 705 706 707 708 |
ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size); ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb, inode->i_atime.tv_sec); ufs_inode->ui_atime.tv_usec = 0; ufs_inode->ui_ctime.tv_sec = cpu_to_fs32(sb, inode->i_ctime.tv_sec); ufs_inode->ui_ctime.tv_usec = 0; ufs_inode->ui_mtime.tv_sec = cpu_to_fs32(sb, inode->i_mtime.tv_sec); ufs_inode->ui_mtime.tv_usec = 0; ufs_inode->ui_blocks = cpu_to_fs32(sb, inode->i_blocks); ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags); |
3313e2926
|
709 |
ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation); |
1da177e4c
|
710 |
|
3313e2926
|
711 |
if ((UFS_SB(sb)->s_flags & UFS_UID_MASK) == UFS_UID_EFT) { |
1da177e4c
|
712 713 714 715 716 717 718 719 |
ufs_inode->ui_u3.ui_sun.ui_shadow = cpu_to_fs32(sb, ufsi->i_shadow); ufs_inode->ui_u3.ui_sun.ui_oeftflag = cpu_to_fs32(sb, ufsi->i_oeftflag); } if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */ ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.i_data[0]; } else if (inode->i_blocks) { |
f33219b7a
|
720 721 |
memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.i_data, sizeof(ufs_inode->ui_u2.ui_addr)); |
1da177e4c
|
722 723 |
} else { |
f33219b7a
|
724 725 |
memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink, sizeof(ufs_inode->ui_u2.ui_symlink)); |
1da177e4c
|
726 727 728 729 |
} if (!inode->i_nlink) memset (ufs_inode, 0, sizeof(struct ufs_inode)); |
3313e2926
|
730 731 732 733 734 735 |
} static void ufs2_update_inode(struct inode *inode, struct ufs2_inode *ufs_inode) { struct super_block *sb = inode->i_sb; struct ufs_inode_info *ufsi = UFS_I(inode); |
3313e2926
|
736 737 738 739 740 |
UFSD("ENTER "); ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode); ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink); |
722354658
|
741 742 |
ufs_inode->ui_uid = cpu_to_fs32(sb, i_uid_read(inode)); ufs_inode->ui_gid = cpu_to_fs32(sb, i_gid_read(inode)); |
3313e2926
|
743 744 |
ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size); |
2189850f4
|
745 746 747 748 749 750 |
ufs_inode->ui_atime = cpu_to_fs64(sb, inode->i_atime.tv_sec); ufs_inode->ui_atimensec = cpu_to_fs32(sb, inode->i_atime.tv_nsec); ufs_inode->ui_ctime = cpu_to_fs64(sb, inode->i_ctime.tv_sec); ufs_inode->ui_ctimensec = cpu_to_fs32(sb, inode->i_ctime.tv_nsec); ufs_inode->ui_mtime = cpu_to_fs64(sb, inode->i_mtime.tv_sec); ufs_inode->ui_mtimensec = cpu_to_fs32(sb, inode->i_mtime.tv_nsec); |
3313e2926
|
751 752 753 754 755 756 757 758 759 |
ufs_inode->ui_blocks = cpu_to_fs64(sb, inode->i_blocks); ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags); ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation); if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */ ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.u2_i_data[0]; } else if (inode->i_blocks) { |
f33219b7a
|
760 761 |
memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.u2_i_data, sizeof(ufs_inode->ui_u2.ui_addr)); |
3313e2926
|
762 |
} else { |
f33219b7a
|
763 764 |
memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink, sizeof(ufs_inode->ui_u2.ui_symlink)); |
3313e2926
|
765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 |
} if (!inode->i_nlink) memset (ufs_inode, 0, sizeof(struct ufs2_inode)); UFSD("EXIT "); } static int ufs_update_inode(struct inode * inode, int do_sync) { struct super_block *sb = inode->i_sb; struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; struct buffer_head * bh; UFSD("ENTER, ino %lu ", inode->i_ino); if (inode->i_ino < UFS_ROOTINO || inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) { ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu) ", inode->i_ino); return -1; } bh = sb_bread(sb, ufs_inotofsba(inode->i_ino)); if (!bh) { ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu ", inode->i_ino); return -1; } if (uspi->fs_magic == UFS2_MAGIC) { struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data; ufs2_update_inode(inode, ufs2_inode + ufs_inotofsbo(inode->i_ino)); } else { struct ufs_inode *ufs_inode = (struct ufs_inode *) bh->b_data; ufs1_update_inode(inode, ufs_inode + ufs_inotofsbo(inode->i_ino)); } |
010d331fc
|
805 |
|
1da177e4c
|
806 807 808 809 |
mark_buffer_dirty(bh); if (do_sync) sync_dirty_buffer(bh); brelse (bh); |
010d331fc
|
810 |
|
abf5d15fd
|
811 812 |
UFSD("EXIT "); |
1da177e4c
|
813 814 |
return 0; } |
a9185b41a
|
815 |
int ufs_write_inode(struct inode *inode, struct writeback_control *wbc) |
1da177e4c
|
816 |
{ |
f3e0f3da1
|
817 |
return ufs_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL); |
1da177e4c
|
818 819 820 821 822 823 |
} int ufs_sync_inode (struct inode *inode) { return ufs_update_inode (inode, 1); } |
58e8268c7
|
824 |
void ufs_evict_inode(struct inode * inode) |
1da177e4c
|
825 |
{ |
58e8268c7
|
826 827 828 829 |
int want_delete = 0; if (!inode->i_nlink && !is_bad_inode(inode)) want_delete = 1; |
10e5dce07
|
830 |
|
91b0abe36
|
831 |
truncate_inode_pages_final(&inode->i_data); |
58e8268c7
|
832 |
if (want_delete) { |
58e8268c7
|
833 |
inode->i_size = 0; |
babef37dc
|
834 835 836 |
if (inode->i_blocks && (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) |
d622f167b
|
837 |
ufs_truncate_blocks(inode); |
67a70017f
|
838 |
ufs_update_inode(inode, inode_needs_sync(inode)); |
58e8268c7
|
839 840 841 |
} invalidate_inode_buffers(inode); |
dbd5768f8
|
842 |
clear_inode(inode); |
58e8268c7
|
843 |
|
f3e0f3da1
|
844 |
if (want_delete) |
9ef7db7f3
|
845 |
ufs_free_inode(inode); |
1da177e4c
|
846 |
} |
010d331fc
|
847 |
|
a138b4b68
|
848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 |
struct to_free { struct inode *inode; u64 to; unsigned count; }; static inline void free_data(struct to_free *ctx, u64 from, unsigned count) { if (ctx->count && ctx->to != from) { ufs_free_blocks(ctx->inode, ctx->to - ctx->count, ctx->count); ctx->count = 0; } ctx->count += count; ctx->to = from + count; } |
010d331fc
|
863 864 865 866 867 868 869 870 871 |
#define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift) static void ufs_trunc_direct(struct inode *inode) { struct ufs_inode_info *ufsi = UFS_I(inode); struct super_block * sb; struct ufs_sb_private_info * uspi; void *p; u64 frag1, frag2, frag3, frag4, block1, block2; |
a138b4b68
|
872 |
struct to_free ctx = {.inode = inode}; |
010d331fc
|
873 874 875 876 877 878 879 |
unsigned i, tmp; UFSD("ENTER: ino %lu ", inode->i_ino); sb = inode->i_sb; uspi = UFS_SB(sb)->s_uspi; |
010d331fc
|
880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 |
frag1 = DIRECT_FRAGMENT; frag4 = min_t(u64, UFS_NDIR_FRAGMENT, ufsi->i_lastfrag); frag2 = ((frag1 & uspi->s_fpbmask) ? ((frag1 | uspi->s_fpbmask) + 1) : frag1); frag3 = frag4 & ~uspi->s_fpbmask; block1 = block2 = 0; if (frag2 > frag3) { frag2 = frag4; frag3 = frag4 = 0; } else if (frag2 < frag3) { block1 = ufs_fragstoblks (frag2); block2 = ufs_fragstoblks (frag3); } UFSD("ino %lu, frag1 %llu, frag2 %llu, block1 %llu, block2 %llu," " frag3 %llu, frag4 %llu ", inode->i_ino, (unsigned long long)frag1, (unsigned long long)frag2, (unsigned long long)block1, (unsigned long long)block2, (unsigned long long)frag3, (unsigned long long)frag4); if (frag1 >= frag2) goto next1; /* * Free first free fragments */ p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag1)); tmp = ufs_data_ptr_to_cpu(sb, p); if (!tmp ) ufs_panic (sb, "ufs_trunc_direct", "internal error"); frag2 -= frag1; frag1 = ufs_fragnum (frag1); ufs_free_fragments(inode, tmp + frag1, frag2); |
010d331fc
|
914 915 916 917 918 919 920 921 922 923 924 925 926 |
next1: /* * Free whole blocks */ for (i = block1 ; i < block2; i++) { p = ufs_get_direct_data_ptr(uspi, ufsi, i); tmp = ufs_data_ptr_to_cpu(sb, p); if (!tmp) continue; write_seqlock(&ufsi->meta_lock); ufs_data_ptr_clear(uspi, p); write_sequnlock(&ufsi->meta_lock); |
a138b4b68
|
927 |
free_data(&ctx, tmp, uspi->s_fpb); |
010d331fc
|
928 |
} |
a138b4b68
|
929 |
free_data(&ctx, 0, 0); |
010d331fc
|
930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 |
if (frag3 >= frag4) goto next3; /* * Free last free fragments */ p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag3)); tmp = ufs_data_ptr_to_cpu(sb, p); if (!tmp ) ufs_panic(sb, "ufs_truncate_direct", "internal error"); frag4 = ufs_fragnum (frag4); write_seqlock(&ufsi->meta_lock); ufs_data_ptr_clear(uspi, p); write_sequnlock(&ufsi->meta_lock); ufs_free_fragments (inode, tmp, frag4); |
010d331fc
|
947 948 949 950 951 |
next3: UFSD("EXIT: ino %lu ", inode->i_ino); } |
163073db5
|
952 |
static void free_full_branch(struct inode *inode, u64 ind_block, int depth) |
6d1ebbca2
|
953 954 955 |
{ struct super_block *sb = inode->i_sb; struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; |
163073db5
|
956 |
struct ufs_buffer_head *ubh = ubh_bread(sb, ind_block, uspi->s_bsize); |
6d1ebbca2
|
957 |
unsigned i; |
163073db5
|
958 |
if (!ubh) |
6d1ebbca2
|
959 |
return; |
6d1ebbca2
|
960 961 |
if (--depth) { |
163073db5
|
962 963 964 |
for (i = 0; i < uspi->s_apb; i++) { void *p = ubh_get_data_ptr(uspi, ubh, i); u64 block = ufs_data_ptr_to_cpu(sb, p); |
cc7231e30
|
965 |
if (block) |
163073db5
|
966 |
free_full_branch(inode, block, depth); |
6d1ebbca2
|
967 968 969 970 971 |
} } else { struct to_free ctx = {.inode = inode}; for (i = 0; i < uspi->s_apb; i++) { |
163073db5
|
972 973 |
void *p = ubh_get_data_ptr(uspi, ubh, i); u64 block = ufs_data_ptr_to_cpu(sb, p); |
cc7231e30
|
974 |
if (block) |
163073db5
|
975 |
free_data(&ctx, block, uspi->s_fpb); |
6d1ebbca2
|
976 977 978 |
} free_data(&ctx, 0, 0); } |
6d1ebbca2
|
979 980 |
ubh_bforget(ubh); |
163073db5
|
981 |
ufs_free_blocks(inode, ind_block, uspi->s_fpb); |
6d1ebbca2
|
982 |
} |
7b4e4f7f8
|
983 |
static void free_branch_tail(struct inode *inode, unsigned from, struct ufs_buffer_head *ubh, int depth) |
010d331fc
|
984 |
{ |
7bad5939f
|
985 986 |
struct super_block *sb = inode->i_sb; struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; |
7bad5939f
|
987 |
unsigned i; |
010d331fc
|
988 |
|
9e0fbbde2
|
989 |
if (--depth) { |
7b4e4f7f8
|
990 |
for (i = from; i < uspi->s_apb ; i++) { |
163073db5
|
991 992 993 994 995 996 997 998 999 |
void *p = ubh_get_data_ptr(uspi, ubh, i); u64 block = ufs_data_ptr_to_cpu(sb, p); if (block) { write_seqlock(&UFS_I(inode)->meta_lock); ufs_data_ptr_clear(uspi, p); write_sequnlock(&UFS_I(inode)->meta_lock); ubh_mark_buffer_dirty(ubh); free_full_branch(inode, block, depth); } |
a96574233
|
1000 |
} |
9e0fbbde2
|
1001 |
} else { |
a138b4b68
|
1002 |
struct to_free ctx = {.inode = inode}; |
9e0fbbde2
|
1003 1004 |
for (i = from; i < uspi->s_apb; i++) { |
163073db5
|
1005 1006 1007 1008 1009 1010 1011 1012 |
void *p = ubh_get_data_ptr(uspi, ubh, i); u64 block = ufs_data_ptr_to_cpu(sb, p); if (block) { write_seqlock(&UFS_I(inode)->meta_lock); ufs_data_ptr_clear(uspi, p); write_sequnlock(&UFS_I(inode)->meta_lock); ubh_mark_buffer_dirty(ubh); free_data(&ctx, block, uspi->s_fpb); |
163073db5
|
1013 |
} |
9e0fbbde2
|
1014 |
} |
a138b4b68
|
1015 |
free_data(&ctx, 0, 0); |
010d331fc
|
1016 |
} |
9e0fbbde2
|
1017 1018 1019 |
if (IS_SYNC(inode) && ubh_buffer_dirty(ubh)) ubh_sync_block(ubh); ubh_brelse(ubh); |
010d331fc
|
1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 |
} static int ufs_alloc_lastblock(struct inode *inode, loff_t size) { int err = 0; struct super_block *sb = inode->i_sb; struct address_space *mapping = inode->i_mapping; struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; unsigned i, end; sector_t lastfrag; struct page *lastpage; struct buffer_head *bh; u64 phys64; lastfrag = (size + uspi->s_fsize - 1) >> uspi->s_fshift; if (!lastfrag) goto out; lastfrag--; lastpage = ufs_get_locked_page(mapping, lastfrag >> |
09cbfeaf1
|
1042 |
(PAGE_SHIFT - inode->i_blkbits)); |
010d331fc
|
1043 1044 1045 1046 |
if (IS_ERR(lastpage)) { err = -EIO; goto out; } |
09cbfeaf1
|
1047 |
end = lastfrag & ((1 << (PAGE_SHIFT - inode->i_blkbits)) - 1); |
010d331fc
|
1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 |
bh = page_buffers(lastpage); for (i = 0; i < end; ++i) bh = bh->b_this_page; err = ufs_getfrag_block(inode, lastfrag, bh, 1); if (unlikely(err)) goto out_unlock; if (buffer_new(bh)) { clear_buffer_new(bh); |
e64855c6c
|
1060 |
clean_bdev_bh_alias(bh); |
010d331fc
|
1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 |
/* * we do not zeroize fragment, because of * if it maped to hole, it already contains zeroes */ set_buffer_uptodate(bh); mark_buffer_dirty(bh); set_page_dirty(lastpage); } if (lastfrag >= UFS_IND_FRAGMENT) { end = uspi->s_fpb - ufs_fragnum(lastfrag) - 1; phys64 = bh->b_blocknr + 1; for (i = 0; i < end; ++i) { bh = sb_getblk(sb, i + phys64); lock_buffer(bh); memset(bh->b_data, 0, sb->s_blocksize); set_buffer_uptodate(bh); mark_buffer_dirty(bh); unlock_buffer(bh); sync_dirty_buffer(bh); brelse(bh); } } out_unlock: ufs_put_locked_page(lastpage); out: return err; } |
babef37dc
|
1089 |
static void ufs_truncate_blocks(struct inode *inode) |
010d331fc
|
1090 1091 1092 1093 |
{ struct ufs_inode_info *ufsi = UFS_I(inode); struct super_block *sb = inode->i_sb; struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; |
7bad5939f
|
1094 |
unsigned offsets[4]; |
a8fad9848
|
1095 |
int depth; |
6775e24d9
|
1096 |
int depth2; |
42432739b
|
1097 |
unsigned i; |
7b4e4f7f8
|
1098 1099 1100 |
struct ufs_buffer_head *ubh[3]; void *p; u64 block; |
6775e24d9
|
1101 |
|
a8fad9848
|
1102 1103 1104 1105 1106 1107 1108 1109 |
if (inode->i_size) { sector_t last = (inode->i_size - 1) >> uspi->s_bshift; depth = ufs_block_to_path(inode, last, offsets); if (!depth) return; } else { depth = 1; } |
6775e24d9
|
1110 |
|
6775e24d9
|
1111 |
for (depth2 = depth - 1; depth2; depth2--) |
a8fad9848
|
1112 |
if (offsets[depth2] != uspi->s_apb - 1) |
6775e24d9
|
1113 |
break; |
010d331fc
|
1114 1115 |
mutex_lock(&ufsi->truncate_mutex); |
42432739b
|
1116 |
if (depth == 1) { |
31cd043e1
|
1117 |
ufs_trunc_direct(inode); |
42432739b
|
1118 1119 |
offsets[0] = UFS_IND_BLOCK; } else { |
7b4e4f7f8
|
1120 |
/* get the blocks that should be partially emptied */ |
a8fad9848
|
1121 |
p = ufs_get_direct_data_ptr(uspi, ufsi, offsets[0]++); |
7b4e4f7f8
|
1122 |
for (i = 0; i < depth2; i++) { |
7b4e4f7f8
|
1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 |
block = ufs_data_ptr_to_cpu(sb, p); if (!block) break; ubh[i] = ubh_bread(sb, block, uspi->s_bsize); if (!ubh[i]) { write_seqlock(&ufsi->meta_lock); ufs_data_ptr_clear(uspi, p); write_sequnlock(&ufsi->meta_lock); break; } |
a8fad9848
|
1133 |
p = ubh_get_data_ptr(uspi, ubh[i], offsets[i + 1]++); |
7b4e4f7f8
|
1134 |
} |
f53bd1421
|
1135 |
while (i--) |
7b4e4f7f8
|
1136 |
free_branch_tail(inode, offsets[i + 1], ubh[i], depth - i - 1); |
42432739b
|
1137 1138 |
} for (i = offsets[0]; i <= UFS_TIND_BLOCK; i++) { |
163073db5
|
1139 1140 1141 1142 1143 1144 1145 1146 |
p = ufs_get_direct_data_ptr(uspi, ufsi, i); block = ufs_data_ptr_to_cpu(sb, p); if (block) { write_seqlock(&ufsi->meta_lock); ufs_data_ptr_clear(uspi, p); write_sequnlock(&ufsi->meta_lock); free_full_branch(inode, block, i - UFS_IND_BLOCK + 1); } |
31cd043e1
|
1147 |
} |
09bf4f5b6
|
1148 |
read_seqlock_excl(&ufsi->meta_lock); |
010d331fc
|
1149 |
ufsi->i_lastfrag = DIRECT_FRAGMENT; |
09bf4f5b6
|
1150 |
read_sequnlock_excl(&ufsi->meta_lock); |
b6eede0ec
|
1151 |
mark_inode_dirty(inode); |
010d331fc
|
1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 |
mutex_unlock(&ufsi->truncate_mutex); } static int ufs_truncate(struct inode *inode, loff_t size) { int err = 0; UFSD("ENTER: ino %lu, i_size: %llu, old_i_size: %llu ", inode->i_ino, (unsigned long long)size, (unsigned long long)i_size_read(inode)); if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) return -EINVAL; if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) return -EPERM; err = ufs_alloc_lastblock(inode, size); if (err) goto out; block_truncate_page(inode->i_mapping, size, ufs_getfrag_block); truncate_setsize(inode, size); |
babef37dc
|
1178 |
ufs_truncate_blocks(inode); |
02027d42c
|
1179 |
inode->i_mtime = inode->i_ctime = current_time(inode); |
010d331fc
|
1180 1181 1182 1183 1184 1185 |
mark_inode_dirty(inode); out: UFSD("EXIT: err %d ", err); return err; } |
010d331fc
|
1186 1187 1188 1189 1190 |
int ufs_setattr(struct dentry *dentry, struct iattr *attr) { struct inode *inode = d_inode(dentry); unsigned int ia_valid = attr->ia_valid; int error; |
31051c85b
|
1191 |
error = setattr_prepare(dentry, attr); |
010d331fc
|
1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 |
if (error) return error; if (ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) { error = ufs_truncate(inode, attr->ia_size); if (error) return error; } setattr_copy(inode, attr); mark_inode_dirty(inode); return 0; } const struct inode_operations ufs_file_inode_operations = { .setattr = ufs_setattr, }; |