Commit 33e6c1a0de818d3698cdab27c42915661011319d

Authored by Linus Torvalds

Merge branch 'linux-next' of git://git.infradead.org/ubifs-2.6

* 'linux-next' of git://git.infradead.org/ubifs-2.6:
  UBIFS: fix debugging dump
  UBIFS: improve lprops dump
  UBIFS: various minor commentary fixes
  UBIFS: improve journal head debugging prints
  UBIFS: define journal head numbers in ubifs-media.h
  UBIFS: amend commentaries
  UBIFS: check ubifs_scan error codes better
  UBIFS: do not print scary error messages needlessly
  UBIFS: add inode size debugging check
  UBIFS: constify file and inode operations
  UBIFS: remove unneeded call from ubifs_sync_fs
  UBIFS: kill BKL
  UBIFS: remove unused functions
  UBIFS: suppress compilation warning

Showing 22 changed files Side-by-side Diff

... ... @@ -715,7 +715,7 @@
715 715 * ubifs_get_free_space - return amount of free space.
716 716 * @c: UBIFS file-system description object
717 717 *
718   - * This function calculates and retuns amount of free space to report to
  718 + * This function calculates and returns amount of free space to report to
719 719 * user-space.
720 720 */
721 721 long long ubifs_get_free_space(struct ubifs_info *c)
... ... @@ -510,7 +510,7 @@
510 510 int lnum, offs, len, err = 0, uninitialized_var(last_level), child_cnt;
511 511 int first = 1, iip;
512 512 struct ubifs_debug_info *d = c->dbg;
513   - union ubifs_key lower_key, upper_key, l_key, u_key;
  513 + union ubifs_key uninitialized_var(lower_key), upper_key, l_key, u_key;
514 514 unsigned long long uninitialized_var(last_sqnum);
515 515 struct ubifs_idx_node *idx;
516 516 struct list_head list;
... ... @@ -210,6 +210,20 @@
210 210 }
211 211 }
212 212  
  213 +const char *dbg_jhead(int jhead)
  214 +{
  215 + switch (jhead) {
  216 + case GCHD:
  217 + return "0 (GC)";
  218 + case BASEHD:
  219 + return "1 (base)";
  220 + case DATAHD:
  221 + return "2 (data)";
  222 + default:
  223 + return "unknown journal head";
  224 + }
  225 +}
  226 +
213 227 static void dump_ch(const struct ubifs_ch *ch)
214 228 {
215 229 printk(KERN_DEBUG "\tmagic %#x\n", le32_to_cpu(ch->magic));
... ... @@ -623,8 +637,9 @@
623 637 /* If we are in R/O mode, journal heads do not exist */
624 638 if (c->jheads)
625 639 for (i = 0; i < c->jhead_cnt; i++)
626   - printk(KERN_DEBUG "\tjhead %d\t LEB %d\n",
627   - c->jheads[i].wbuf.jhead, c->jheads[i].wbuf.lnum);
  640 + printk(KERN_DEBUG "\tjhead %s\t LEB %d\n",
  641 + dbg_jhead(c->jheads[i].wbuf.jhead),
  642 + c->jheads[i].wbuf.lnum);
628 643 for (rb = rb_first(&c->buds); rb; rb = rb_next(rb)) {
629 644 bud = rb_entry(rb, struct ubifs_bud, rb);
630 645 printk(KERN_DEBUG "\tbud LEB %d\n", bud->lnum);
... ... @@ -648,9 +663,90 @@
648 663  
649 664 void dbg_dump_lprop(const struct ubifs_info *c, const struct ubifs_lprops *lp)
650 665 {
651   - printk(KERN_DEBUG "LEB %d lprops: free %d, dirty %d (used %d), "
652   - "flags %#x\n", lp->lnum, lp->free, lp->dirty,
653   - c->leb_size - lp->free - lp->dirty, lp->flags);
  666 + int i, spc, dark = 0, dead = 0;
  667 + struct rb_node *rb;
  668 + struct ubifs_bud *bud;
  669 +
  670 + spc = lp->free + lp->dirty;
  671 + if (spc < c->dead_wm)
  672 + dead = spc;
  673 + else
  674 + dark = ubifs_calc_dark(c, spc);
  675 +
  676 + if (lp->flags & LPROPS_INDEX)
  677 + printk(KERN_DEBUG "LEB %-7d free %-8d dirty %-8d used %-8d "
  678 + "free + dirty %-8d flags %#x (", lp->lnum, lp->free,
  679 + lp->dirty, c->leb_size - spc, spc, lp->flags);
  680 + else
  681 + printk(KERN_DEBUG "LEB %-7d free %-8d dirty %-8d used %-8d "
  682 + "free + dirty %-8d dark %-4d dead %-4d nodes fit %-3d "
  683 + "flags %#-4x (", lp->lnum, lp->free, lp->dirty,
  684 + c->leb_size - spc, spc, dark, dead,
  685 + (int)(spc / UBIFS_MAX_NODE_SZ), lp->flags);
  686 +
  687 + if (lp->flags & LPROPS_TAKEN) {
  688 + if (lp->flags & LPROPS_INDEX)
  689 + printk(KERN_CONT "index, taken");
  690 + else
  691 + printk(KERN_CONT "taken");
  692 + } else {
  693 + const char *s;
  694 +
  695 + if (lp->flags & LPROPS_INDEX) {
  696 + switch (lp->flags & LPROPS_CAT_MASK) {
  697 + case LPROPS_DIRTY_IDX:
  698 + s = "dirty index";
  699 + break;
  700 + case LPROPS_FRDI_IDX:
  701 + s = "freeable index";
  702 + break;
  703 + default:
  704 + s = "index";
  705 + }
  706 + } else {
  707 + switch (lp->flags & LPROPS_CAT_MASK) {
  708 + case LPROPS_UNCAT:
  709 + s = "not categorized";
  710 + break;
  711 + case LPROPS_DIRTY:
  712 + s = "dirty";
  713 + break;
  714 + case LPROPS_FREE:
  715 + s = "free";
  716 + break;
  717 + case LPROPS_EMPTY:
  718 + s = "empty";
  719 + break;
  720 + case LPROPS_FREEABLE:
  721 + s = "freeable";
  722 + break;
  723 + default:
  724 + s = NULL;
  725 + break;
  726 + }
  727 + }
  728 + printk(KERN_CONT "%s", s);
  729 + }
  730 +
  731 + for (rb = rb_first((struct rb_root *)&c->buds); rb; rb = rb_next(rb)) {
  732 + bud = rb_entry(rb, struct ubifs_bud, rb);
  733 + if (bud->lnum == lp->lnum) {
  734 + int head = 0;
  735 + for (i = 0; i < c->jhead_cnt; i++) {
  736 + if (lp->lnum == c->jheads[i].wbuf.lnum) {
  737 + printk(KERN_CONT ", jhead %s",
  738 + dbg_jhead(i));
  739 + head = 1;
  740 + }
  741 + }
  742 + if (!head)
  743 + printk(KERN_CONT ", bud of jhead %s",
  744 + dbg_jhead(bud->jhead));
  745 + }
  746 + }
  747 + if (lp->lnum == c->gc_lnum)
  748 + printk(KERN_CONT ", GC LEB");
  749 + printk(KERN_CONT ")\n");
654 750 }
655 751  
656 752 void dbg_dump_lprops(struct ubifs_info *c)
... ... @@ -724,7 +820,7 @@
724 820  
725 821 printk(KERN_DEBUG "(pid %d) start dumping LEB %d\n",
726 822 current->pid, lnum);
727   - sleb = ubifs_scan(c, lnum, 0, c->dbg->buf);
  823 + sleb = ubifs_scan(c, lnum, 0, c->dbg->buf, 0);
728 824 if (IS_ERR(sleb)) {
729 825 ubifs_err("scan error %d", (int)PTR_ERR(sleb));
730 826 return;
731 827  
... ... @@ -909,8 +1005,10 @@
909 1005 ubifs_msg("saved lprops statistics dump");
910 1006 dbg_dump_lstats(&d->saved_lst);
911 1007 ubifs_get_lp_stats(c, &lst);
  1008 +
912 1009 ubifs_msg("current lprops statistics dump");
913   - dbg_dump_lstats(&d->saved_lst);
  1010 + dbg_dump_lstats(&lst);
  1011 +
914 1012 spin_lock(&c->space_lock);
915 1013 dbg_dump_budg(c);
916 1014 spin_unlock(&c->space_lock);
... ... @@ -271,6 +271,7 @@
271 271 /* Dump functions */
272 272 const char *dbg_ntype(int type);
273 273 const char *dbg_cstate(int cmt_state);
  274 +const char *dbg_jhead(int jhead);
274 275 const char *dbg_get_key_dump(const struct ubifs_info *c,
275 276 const union ubifs_key *key);
276 277 void dbg_dump_inode(const struct ubifs_info *c, const struct inode *inode);
... ... @@ -321,6 +322,8 @@
321 322 int dbg_check_lprops(struct ubifs_info *c);
322 323 int dbg_check_lpt_nodes(struct ubifs_info *c, struct ubifs_cnode *cnode,
323 324 int row, int col);
  325 +int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode,
  326 + loff_t size);
324 327  
325 328 /* Force the use of in-the-gaps method for testing */
326 329  
... ... @@ -425,6 +428,7 @@
425 428  
426 429 #define dbg_ntype(type) ""
427 430 #define dbg_cstate(cmt_state) ""
  431 +#define dbg_jhead(jhead) ""
428 432 #define dbg_get_key_dump(c, key) ({})
429 433 #define dbg_dump_inode(c, inode) ({})
430 434 #define dbg_dump_node(c, node) ({})
... ... @@ -460,6 +464,7 @@
460 464 #define dbg_check_heap(c, heap, cat, add_pos) ({})
461 465 #define dbg_check_lprops(c) 0
462 466 #define dbg_check_lpt_nodes(c, cnode, row, col) 0
  467 +#define dbg_check_inode_size(c, inode, size) 0
463 468 #define dbg_force_in_the_gaps_enabled 0
464 469 #define dbg_force_in_the_gaps() 0
465 470 #define dbg_failure_mode 0
... ... @@ -21,34 +21,32 @@
21 21 */
22 22  
23 23 /*
24   - * This file implements VFS file and inode operations of regular files, device
  24 + * This file implements VFS file and inode operations for regular files, device
25 25 * nodes and symlinks as well as address space operations.
26 26 *
27   - * UBIFS uses 2 page flags: PG_private and PG_checked. PG_private is set if the
28   - * page is dirty and is used for budgeting purposes - dirty pages should not be
29   - * budgeted. The PG_checked flag is set if full budgeting is required for the
30   - * page e.g., when it corresponds to a file hole or it is just beyond the file
31   - * size. The budgeting is done in 'ubifs_write_begin()', because it is OK to
32   - * fail in this function, and the budget is released in 'ubifs_write_end()'. So
33   - * the PG_private and PG_checked flags carry the information about how the page
34   - * was budgeted, to make it possible to release the budget properly.
  27 + * UBIFS uses 2 page flags: @PG_private and @PG_checked. @PG_private is set if
  28 + * the page is dirty and is used for optimization purposes - dirty pages are
  29 + * not budgeted so the flag shows that 'ubifs_write_end()' should not release
  30 + * the budget for this page. The @PG_checked flag is set if full budgeting is
  31 + * required for the page e.g., when it corresponds to a file hole or it is
  32 + * beyond the file size. The budgeting is done in 'ubifs_write_begin()', because
  33 + * it is OK to fail in this function, and the budget is released in
  34 + * 'ubifs_write_end()'. So the @PG_private and @PG_checked flags carry
  35 + * information about how the page was budgeted, to make it possible to release
  36 + * the budget properly.
35 37 *
36   - * A thing to keep in mind: inode's 'i_mutex' is locked in most VFS operations
37   - * we implement. However, this is not true for '->writepage()', which might be
38   - * called with 'i_mutex' unlocked. For example, when pdflush is performing
39   - * write-back, it calls 'writepage()' with unlocked 'i_mutex', although the
40   - * inode has 'I_LOCK' flag in this case. At "normal" work-paths 'i_mutex' is
41   - * locked in '->writepage', e.g. in "sys_write -> alloc_pages -> direct reclaim
42   - * path'. So, in '->writepage()' we are only guaranteed that the page is
43   - * locked.
  38 + * A thing to keep in mind: inode @i_mutex is locked in most VFS operations we
  39 + * implement. However, this is not true for 'ubifs_writepage()', which may be
  40 + * called with @i_mutex unlocked. For example, when pdflush is doing background
  41 + * write-back, it calls 'ubifs_writepage()' with unlocked @i_mutex. At "normal"
  42 + * work-paths the @i_mutex is locked in 'ubifs_writepage()', e.g. in the
  43 + * "sys_write -> alloc_pages -> direct reclaim path". So, in 'ubifs_writepage()'
  44 + * we are only guaranteed that the page is locked.
44 45 *
45   - * Similarly, 'i_mutex' does not have to be locked in readpage(), e.g.,
46   - * readahead path does not have it locked ("sys_read -> generic_file_aio_read
47   - * -> ondemand_readahead -> readpage"). In case of readahead, 'I_LOCK' flag is
48   - * not set as well. However, UBIFS disables readahead.
49   - *
50   - * This, for example means that there might be 2 concurrent '->writepage()'
51   - * calls for the same inode, but different inode dirty pages.
  46 + * Similarly, @i_mutex is not always locked in 'ubifs_readpage()', e.g., the
  47 + * read-ahead path does not lock it ("sys_read -> generic_file_aio_read ->
  48 + * ondemand_readahead -> readpage"). In case of readahead, @I_LOCK flag is not
  49 + * set as well. However, UBIFS disables readahead.
52 50 */
53 51  
54 52 #include "ubifs.h"
... ... @@ -449,9 +447,9 @@
449 447 /*
450 448 * We change whole page so no need to load it. But we
451 449 * have to set the @PG_checked flag to make the further
452   - * code the page is new. This might be not true, but it
453   - * is better to budget more that to read the page from
454   - * the media.
  450 + * code know that the page is new. This might be not
  451 + * true, but it is better to budget more than to read
  452 + * the page from the media.
455 453 */
456 454 SetPageChecked(page);
457 455 skipped_read = 1;
... ... @@ -497,8 +495,8 @@
497 495 }
498 496  
499 497 /*
500   - * Whee, we aquired budgeting quickly - without involving
501   - * garbage-collection, committing or forceing write-back. We return
  498 + * Whee, we acquired budgeting quickly - without involving
  499 + * garbage-collection, committing or forcing write-back. We return
502 500 * with @ui->ui_mutex locked if we are appending pages, and unlocked
503 501 * otherwise. This is an optimization (slightly hacky though).
504 502 */
... ... @@ -562,7 +560,7 @@
562 560  
563 561 /*
564 562 * Return 0 to force VFS to repeat the whole operation, or the
565   - * error code if 'do_readpage()' failes.
  563 + * error code if 'do_readpage()' fails.
566 564 */
567 565 copied = do_readpage(page);
568 566 goto out;
569 567  
570 568  
... ... @@ -1175,11 +1173,11 @@
1175 1173 ui->ui_size = inode->i_size;
1176 1174 /* Truncation changes inode [mc]time */
1177 1175 inode->i_mtime = inode->i_ctime = ubifs_current_time(inode);
1178   - /* The other attributes may be changed at the same time as well */
  1176 + /* Other attributes may be changed at the same time as well */
1179 1177 do_attr_changes(inode, attr);
1180   -
1181 1178 err = ubifs_jnl_truncate(c, inode, old_size, new_size);
1182 1179 mutex_unlock(&ui->ui_mutex);
  1180 +
1183 1181 out_budg:
1184 1182 if (budgeted)
1185 1183 ubifs_release_budget(c, &req);
... ... @@ -529,7 +529,7 @@
529 529 * We scan the entire LEB even though we only really need to scan up to
530 530 * (c->leb_size - lp->free).
531 531 */
532   - sleb = ubifs_scan(c, lnum, 0, c->sbuf);
  532 + sleb = ubifs_scan(c, lnum, 0, c->sbuf, 0);
533 533 if (IS_ERR(sleb))
534 534 return PTR_ERR(sleb);
535 535  
... ... @@ -297,7 +297,7 @@
297 297 {
298 298 struct ubifs_wbuf *wbuf = container_of(timer, struct ubifs_wbuf, timer);
299 299  
300   - dbg_io("jhead %d", wbuf->jhead);
  300 + dbg_io("jhead %s", dbg_jhead(wbuf->jhead));
301 301 wbuf->need_sync = 1;
302 302 wbuf->c->need_wbuf_sync = 1;
303 303 ubifs_wake_up_bgt(wbuf->c);
... ... @@ -314,7 +314,8 @@
314 314  
315 315 if (wbuf->no_timer)
316 316 return;
317   - dbg_io("set timer for jhead %d, %llu-%llu millisecs", wbuf->jhead,
  317 + dbg_io("set timer for jhead %s, %llu-%llu millisecs",
  318 + dbg_jhead(wbuf->jhead),
318 319 div_u64(ktime_to_ns(wbuf->softlimit), USEC_PER_SEC),
319 320 div_u64(ktime_to_ns(wbuf->softlimit) + wbuf->delta,
320 321 USEC_PER_SEC));
... ... @@ -351,8 +352,8 @@
351 352 /* Write-buffer is empty or not seeked */
352 353 return 0;
353 354  
354   - dbg_io("LEB %d:%d, %d bytes, jhead %d",
355   - wbuf->lnum, wbuf->offs, wbuf->used, wbuf->jhead);
  355 + dbg_io("LEB %d:%d, %d bytes, jhead %s",
  356 + wbuf->lnum, wbuf->offs, wbuf->used, dbg_jhead(wbuf->jhead));
356 357 ubifs_assert(!(c->vfs_sb->s_flags & MS_RDONLY));
357 358 ubifs_assert(!(wbuf->avail & 7));
358 359 ubifs_assert(wbuf->offs + c->min_io_size <= c->leb_size);
... ... @@ -401,7 +402,7 @@
401 402 {
402 403 const struct ubifs_info *c = wbuf->c;
403 404  
404   - dbg_io("LEB %d:%d, jhead %d", lnum, offs, wbuf->jhead);
  405 + dbg_io("LEB %d:%d, jhead %s", lnum, offs, dbg_jhead(wbuf->jhead));
405 406 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt);
406 407 ubifs_assert(offs >= 0 && offs <= c->leb_size);
407 408 ubifs_assert(offs % c->min_io_size == 0 && !(offs & 7));
... ... @@ -508,9 +509,9 @@
508 509 struct ubifs_info *c = wbuf->c;
509 510 int err, written, n, aligned_len = ALIGN(len, 8), offs;
510 511  
511   - dbg_io("%d bytes (%s) to jhead %d wbuf at LEB %d:%d", len,
512   - dbg_ntype(((struct ubifs_ch *)buf)->node_type), wbuf->jhead,
513   - wbuf->lnum, wbuf->offs + wbuf->used);
  512 + dbg_io("%d bytes (%s) to jhead %s wbuf at LEB %d:%d", len,
  513 + dbg_ntype(((struct ubifs_ch *)buf)->node_type),
  514 + dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs + wbuf->used);
514 515 ubifs_assert(len > 0 && wbuf->lnum >= 0 && wbuf->lnum < c->leb_cnt);
515 516 ubifs_assert(wbuf->offs >= 0 && wbuf->offs % c->min_io_size == 0);
516 517 ubifs_assert(!(wbuf->offs & 7) && wbuf->offs <= c->leb_size);
... ... @@ -535,8 +536,8 @@
535 536 memcpy(wbuf->buf + wbuf->used, buf, len);
536 537  
537 538 if (aligned_len == wbuf->avail) {
538   - dbg_io("flush jhead %d wbuf to LEB %d:%d",
539   - wbuf->jhead, wbuf->lnum, wbuf->offs);
  539 + dbg_io("flush jhead %s wbuf to LEB %d:%d",
  540 + dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs);
540 541 err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf,
541 542 wbuf->offs, c->min_io_size,
542 543 wbuf->dtype);
... ... @@ -564,8 +565,8 @@
564 565 * minimal I/O unit. We have to fill and flush write-buffer and switch
565 566 * to the next min. I/O unit.
566 567 */
567   - dbg_io("flush jhead %d wbuf to LEB %d:%d",
568   - wbuf->jhead, wbuf->lnum, wbuf->offs);
  568 + dbg_io("flush jhead %s wbuf to LEB %d:%d",
  569 + dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs);
569 570 memcpy(wbuf->buf + wbuf->used, buf, wbuf->avail);
570 571 err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf, wbuf->offs,
571 572 c->min_io_size, wbuf->dtype);
... ... @@ -698,8 +699,8 @@
698 699 int err, rlen, overlap;
699 700 struct ubifs_ch *ch = buf;
700 701  
701   - dbg_io("LEB %d:%d, %s, length %d, jhead %d", lnum, offs,
702   - dbg_ntype(type), len, wbuf->jhead);
  702 + dbg_io("LEB %d:%d, %s, length %d, jhead %s", lnum, offs,
  703 + dbg_ntype(type), len, dbg_jhead(wbuf->jhead));
703 704 ubifs_assert(wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
704 705 ubifs_assert(!(offs & 7) && offs < c->leb_size);
705 706 ubifs_assert(type >= 0 && type < UBIFS_NODE_TYPES_CNT);
... ... @@ -158,7 +158,7 @@
158 158 * some. But the write-buffer mutex has to be unlocked because
159 159 * GC also takes it.
160 160 */
161   - dbg_jnl("no free space jhead %d, run GC", jhead);
  161 + dbg_jnl("no free space in jhead %s, run GC", dbg_jhead(jhead));
162 162 mutex_unlock(&wbuf->io_mutex);
163 163  
164 164 lnum = ubifs_garbage_collect(c, 0);
... ... @@ -173,7 +173,8 @@
173 173 * because we dropped @wbuf->io_mutex, so try once
174 174 * again.
175 175 */
176   - dbg_jnl("GC couldn't make a free LEB for jhead %d", jhead);
  176 + dbg_jnl("GC couldn't make a free LEB for jhead %s",
  177 + dbg_jhead(jhead));
177 178 if (retries++ < 2) {
178 179 dbg_jnl("retry (%d)", retries);
179 180 goto again;
... ... @@ -184,7 +185,7 @@
184 185 }
185 186  
186 187 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
187   - dbg_jnl("got LEB %d for jhead %d", lnum, jhead);
  188 + dbg_jnl("got LEB %d for jhead %s", lnum, dbg_jhead(jhead));
188 189 avail = c->leb_size - wbuf->offs - wbuf->used;
189 190  
190 191 if (wbuf->lnum != -1 && avail >= len) {
... ... @@ -255,7 +256,8 @@
255 256 *lnum = c->jheads[jhead].wbuf.lnum;
256 257 *offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used;
257 258  
258   - dbg_jnl("jhead %d, LEB %d:%d, len %d", jhead, *lnum, *offs, len);
  259 + dbg_jnl("jhead %s, LEB %d:%d, len %d",
  260 + dbg_jhead(jhead), *lnum, *offs, len);
259 261 ubifs_prepare_node(c, node, len, 0);
260 262  
261 263 return ubifs_wbuf_write_nolock(wbuf, node, len);
... ... @@ -285,7 +287,8 @@
285 287  
286 288 *lnum = c->jheads[jhead].wbuf.lnum;
287 289 *offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used;
288   - dbg_jnl("jhead %d, LEB %d:%d, len %d", jhead, *lnum, *offs, len);
  290 + dbg_jnl("jhead %s, LEB %d:%d, len %d",
  291 + dbg_jhead(jhead), *lnum, *offs, len);
289 292  
290 293 err = ubifs_wbuf_write_nolock(wbuf, buf, len);
291 294 if (err)
... ... @@ -229,23 +229,6 @@
229 229 }
230 230  
231 231 /**
232   - * xent_key_init_hash - initialize extended attribute entry key without
233   - * re-calculating hash function.
234   - * @c: UBIFS file-system description object
235   - * @key: key to initialize
236   - * @inum: host inode number
237   - * @hash: extended attribute entry name hash
238   - */
239   -static inline void xent_key_init_hash(const struct ubifs_info *c,
240   - union ubifs_key *key, ino_t inum,
241   - uint32_t hash)
242   -{
243   - ubifs_assert(!(hash & ~UBIFS_S_KEY_HASH_MASK));
244   - key->u32[0] = inum;
245   - key->u32[1] = hash | (UBIFS_XENT_KEY << UBIFS_S_KEY_HASH_BITS);
246   -}
247   -
248   -/**
249 232 * xent_key_init_flash - initialize on-flash extended attribute entry key.
250 233 * @c: UBIFS file-system description object
251 234 * @k: key to initialize
252 235  
253 236  
254 237  
255 238  
... ... @@ -295,22 +278,15 @@
295 278 }
296 279  
297 280 /**
298   - * data_key_init_flash - initialize on-flash data key.
  281 + * highest_data_key - get the highest possible data key for an inode.
299 282 * @c: UBIFS file-system description object
300   - * @k: key to initialize
  283 + * @key: key to initialize
301 284 * @inum: inode number
302   - * @block: block number
303 285 */
304   -static inline void data_key_init_flash(const struct ubifs_info *c, void *k,
305   - ino_t inum, unsigned int block)
  286 +static inline void highest_data_key(const struct ubifs_info *c,
  287 + union ubifs_key *key, ino_t inum)
306 288 {
307   - union ubifs_key *key = k;
308   -
309   - ubifs_assert(!(block & ~UBIFS_S_KEY_BLOCK_MASK));
310   - key->j32[0] = cpu_to_le32(inum);
311   - key->j32[1] = cpu_to_le32(block |
312   - (UBIFS_DATA_KEY << UBIFS_S_KEY_BLOCK_BITS));
313   - memset(k + 8, 0, UBIFS_MAX_KEY_LEN - 8);
  289 + data_key_init(c, key, inum, UBIFS_S_KEY_BLOCK_MASK);
314 290 }
315 291  
316 292 /**
... ... @@ -554,5 +530,6 @@
554 530 return 0;
555 531 }
556 532 }
  533 +
557 534 #endif /* !__UBIFS_KEY_H__ */
... ... @@ -169,8 +169,8 @@
169 169 */
170 170 c->bud_bytes += c->leb_size - bud->start;
171 171  
172   - dbg_log("LEB %d:%d, jhead %d, bud_bytes %lld", bud->lnum,
173   - bud->start, bud->jhead, c->bud_bytes);
  172 + dbg_log("LEB %d:%d, jhead %s, bud_bytes %lld", bud->lnum,
  173 + bud->start, dbg_jhead(bud->jhead), c->bud_bytes);
174 174 spin_unlock(&c->buds_lock);
175 175 }
176 176  
177 177  
178 178  
179 179  
... ... @@ -355,16 +355,16 @@
355 355 * heads (non-closed buds).
356 356 */
357 357 c->cmt_bud_bytes += wbuf->offs - bud->start;
358   - dbg_log("preserve %d:%d, jhead %d, bud bytes %d, "
  358 + dbg_log("preserve %d:%d, jhead %s, bud bytes %d, "
359 359 "cmt_bud_bytes %lld", bud->lnum, bud->start,
360   - bud->jhead, wbuf->offs - bud->start,
  360 + dbg_jhead(bud->jhead), wbuf->offs - bud->start,
361 361 c->cmt_bud_bytes);
362 362 bud->start = wbuf->offs;
363 363 } else {
364 364 c->cmt_bud_bytes += c->leb_size - bud->start;
365   - dbg_log("remove %d:%d, jhead %d, bud bytes %d, "
  365 + dbg_log("remove %d:%d, jhead %s, bud bytes %d, "
366 366 "cmt_bud_bytes %lld", bud->lnum, bud->start,
367   - bud->jhead, c->leb_size - bud->start,
  367 + dbg_jhead(bud->jhead), c->leb_size - bud->start,
368 368 c->cmt_bud_bytes);
369 369 rb_erase(p1, &c->buds);
370 370 /*
... ... @@ -429,7 +429,8 @@
429 429 if (lnum == -1 || offs == c->leb_size)
430 430 continue;
431 431  
432   - dbg_log("add ref to LEB %d:%d for jhead %d", lnum, offs, i);
  432 + dbg_log("add ref to LEB %d:%d for jhead %s",
  433 + lnum, offs, dbg_jhead(i));
433 434 ref = buf + len;
434 435 ref->ch.node_type = UBIFS_REF_NODE;
435 436 ref->lnum = cpu_to_le32(lnum);
... ... @@ -695,7 +696,7 @@
695 696 lnum = c->ltail_lnum;
696 697 write_lnum = lnum;
697 698 while (1) {
698   - sleb = ubifs_scan(c, lnum, 0, c->sbuf);
  699 + sleb = ubifs_scan(c, lnum, 0, c->sbuf, 0);
699 700 if (IS_ERR(sleb)) {
700 701 err = PTR_ERR(sleb);
701 702 goto out_free;
... ... @@ -281,7 +281,7 @@
281 281 case LPROPS_FREE:
282 282 if (add_to_lpt_heap(c, lprops, cat))
283 283 break;
284   - /* No more room on heap so make it uncategorized */
  284 + /* No more room on heap so make it un-categorized */
285 285 cat = LPROPS_UNCAT;
286 286 /* Fall through */
287 287 case LPROPS_UNCAT:
... ... @@ -375,8 +375,8 @@
375 375 * @lprops: LEB properties
376 376 *
377 377 * A LEB may have fallen off of the bottom of a heap, and ended up as
378   - * uncategorized even though it has enough space for us now. If that is the case
379   - * this function will put the LEB back onto a heap.
  378 + * un-categorized even though it has enough space for us now. If that is the
  379 + * case this function will put the LEB back onto a heap.
380 380 */
381 381 void ubifs_ensure_cat(struct ubifs_info *c, struct ubifs_lprops *lprops)
382 382 {
383 383  
... ... @@ -436,10 +436,10 @@
436 436 /**
437 437 * change_category - change LEB properties category.
438 438 * @c: UBIFS file-system description object
439   - * @lprops: LEB properties to recategorize
  439 + * @lprops: LEB properties to re-categorize
440 440 *
441 441 * LEB properties are categorized to enable fast find operations. When the LEB
442   - * properties change they must be recategorized.
  442 + * properties change they must be re-categorized.
443 443 */
444 444 static void change_category(struct ubifs_info *c, struct ubifs_lprops *lprops)
445 445 {
446 446  
447 447  
448 448  
... ... @@ -461,21 +461,18 @@
461 461 }
462 462  
463 463 /**
464   - * calc_dark - calculate LEB dark space size.
  464 + * ubifs_calc_dark - calculate LEB dark space size.
465 465 * @c: the UBIFS file-system description object
466 466 * @spc: amount of free and dirty space in the LEB
467 467 *
468   - * This function calculates amount of dark space in an LEB which has @spc bytes
469   - * of free and dirty space. Returns the calculations result.
  468 + * This function calculates and returns amount of dark space in an LEB which
  469 + * has @spc bytes of free and dirty space.
470 470 *
471   - * Dark space is the space which is not always usable - it depends on which
472   - * nodes are written in which order. E.g., if an LEB has only 512 free bytes,
473   - * it is dark space, because it cannot fit a large data node. So UBIFS cannot
474   - * count on this LEB and treat these 512 bytes as usable because it is not true
475   - * if, for example, only big chunks of uncompressible data will be written to
476   - * the FS.
  471 + * UBIFS is trying to account the space which might not be usable, and this
  472 + * space is called "dark space". For example, if an LEB has only %512 free
  473 + * bytes, it is dark space, because it cannot fit a large data node.
477 474 */
478   -static int calc_dark(struct ubifs_info *c, int spc)
  475 +int ubifs_calc_dark(const struct ubifs_info *c, int spc)
479 476 {
480 477 ubifs_assert(!(spc & 7));
481 478  
... ... @@ -518,7 +515,7 @@
518 515 * @free: new free space amount
519 516 * @dirty: new dirty space amount
520 517 * @flags: new flags
521   - * @idx_gc_cnt: change to the count of idx_gc list
  518 + * @idx_gc_cnt: change to the count of @idx_gc list
522 519 *
523 520 * This function changes LEB properties (@free, @dirty or @flag). However, the
524 521 * property which has the %LPROPS_NC value is not changed. Returns a pointer to
... ... @@ -535,7 +532,7 @@
535 532 {
536 533 /*
537 534 * This is the only function that is allowed to change lprops, so we
538   - * discard the const qualifier.
  535 + * discard the "const" qualifier.
539 536 */
540 537 struct ubifs_lprops *lprops = (struct ubifs_lprops *)lp;
541 538  
... ... @@ -575,7 +572,7 @@
575 572 if (old_spc < c->dead_wm)
576 573 c->lst.total_dead -= old_spc;
577 574 else
578   - c->lst.total_dark -= calc_dark(c, old_spc);
  575 + c->lst.total_dark -= ubifs_calc_dark(c, old_spc);
579 576  
580 577 c->lst.total_used -= c->leb_size - old_spc;
581 578 }
... ... @@ -616,7 +613,7 @@
616 613 if (new_spc < c->dead_wm)
617 614 c->lst.total_dead += new_spc;
618 615 else
619   - c->lst.total_dark += calc_dark(c, new_spc);
  616 + c->lst.total_dark += ubifs_calc_dark(c, new_spc);
620 617  
621 618 c->lst.total_used += c->leb_size - new_spc;
622 619 }
... ... @@ -1096,7 +1093,7 @@
1096 1093 }
1097 1094 }
1098 1095  
1099   - sleb = ubifs_scan(c, lnum, 0, c->dbg->buf);
  1096 + sleb = ubifs_scan(c, lnum, 0, c->dbg->buf, 0);
1100 1097 if (IS_ERR(sleb)) {
1101 1098 /*
1102 1099 * After an unclean unmount, empty and freeable LEBs
... ... @@ -1107,7 +1104,7 @@
1107 1104 "- continuing checking");
1108 1105 lst->empty_lebs += 1;
1109 1106 lst->total_free += c->leb_size;
1110   - lst->total_dark += calc_dark(c, c->leb_size);
  1107 + lst->total_dark += ubifs_calc_dark(c, c->leb_size);
1111 1108 return LPT_SCAN_CONTINUE;
1112 1109 }
1113 1110  
... ... @@ -1117,7 +1114,7 @@
1117 1114 "- continuing checking");
1118 1115 lst->total_free += lp->free;
1119 1116 lst->total_dirty += lp->dirty;
1120   - lst->total_dark += calc_dark(c, c->leb_size);
  1117 + lst->total_dark += ubifs_calc_dark(c, c->leb_size);
1121 1118 return LPT_SCAN_CONTINUE;
1122 1119 }
1123 1120 data->err = PTR_ERR(sleb);
... ... @@ -1235,7 +1232,7 @@
1235 1232 if (spc < c->dead_wm)
1236 1233 lst->total_dead += spc;
1237 1234 else
1238   - lst->total_dark += calc_dark(c, spc);
  1235 + lst->total_dark += ubifs_calc_dark(c, spc);
1239 1236 }
1240 1237  
1241 1238 ubifs_scan_destroy(sleb);
... ... @@ -29,7 +29,8 @@
29 29 * @c: UBIFS file-system description object
30 30 *
31 31 * This function scans the master node LEBs and search for the latest master
32   - * node. Returns zero in case of success and a negative error code in case of
  32 + * node. Returns zero in case of success, %-EUCLEAN if there master area is
  33 + * corrupted and requires recovery, and a negative error code in case of
33 34 * failure.
34 35 */
35 36 static int scan_for_master(struct ubifs_info *c)
... ... @@ -40,7 +41,7 @@
40 41  
41 42 lnum = UBIFS_MST_LNUM;
42 43  
43   - sleb = ubifs_scan(c, lnum, 0, c->sbuf);
  44 + sleb = ubifs_scan(c, lnum, 0, c->sbuf, 1);
44 45 if (IS_ERR(sleb))
45 46 return PTR_ERR(sleb);
46 47 nodes_cnt = sleb->nodes_cnt;
... ... @@ -48,7 +49,7 @@
48 49 snod = list_entry(sleb->nodes.prev, struct ubifs_scan_node,
49 50 list);
50 51 if (snod->type != UBIFS_MST_NODE)
51   - goto out;
  52 + goto out_dump;
52 53 memcpy(c->mst_node, snod->node, snod->len);
53 54 offs = snod->offs;
54 55 }
... ... @@ -56,7 +57,7 @@
56 57  
57 58 lnum += 1;
58 59  
59   - sleb = ubifs_scan(c, lnum, 0, c->sbuf);
  60 + sleb = ubifs_scan(c, lnum, 0, c->sbuf, 1);
60 61 if (IS_ERR(sleb))
61 62 return PTR_ERR(sleb);
62 63 if (sleb->nodes_cnt != nodes_cnt)
... ... @@ -65,7 +66,7 @@
65 66 goto out;
66 67 snod = list_entry(sleb->nodes.prev, struct ubifs_scan_node, list);
67 68 if (snod->type != UBIFS_MST_NODE)
68   - goto out;
  69 + goto out_dump;
69 70 if (snod->offs != offs)
70 71 goto out;
71 72 if (memcmp((void *)c->mst_node + UBIFS_CH_SZ,
... ... @@ -78,6 +79,12 @@
78 79  
79 80 out:
80 81 ubifs_scan_destroy(sleb);
  82 + return -EUCLEAN;
  83 +
  84 +out_dump:
  85 + ubifs_err("unexpected node type %d master LEB %d:%d",
  86 + snod->type, lnum, snod->offs);
  87 + ubifs_scan_destroy(sleb);
81 88 return -EINVAL;
82 89 }
83 90  
... ... @@ -256,7 +263,8 @@
256 263  
257 264 err = scan_for_master(c);
258 265 if (err) {
259   - err = ubifs_recover_master_node(c);
  266 + if (err == -EUCLEAN)
  267 + err = ubifs_recover_master_node(c);
260 268 if (err)
261 269 /*
262 270 * Note, we do not free 'c->mst_node' here because the
... ... @@ -670,9 +670,10 @@
670 670 struct ubifs_scan_leb *sleb;
671 671  
672 672 dbg_rcvry("LEB %d", lnum);
673   - sleb = ubifs_scan(c, lnum, 0, c->sbuf);
  673 + sleb = ubifs_scan(c, lnum, 0, c->sbuf, 1);
674 674 if (IS_ERR(sleb)) {
675   - sleb = ubifs_recover_leb(c, lnum, 0, c->sbuf, 0);
  675 + if (PTR_ERR(sleb) == -EUCLEAN)
  676 + sleb = ubifs_recover_leb(c, lnum, 0, c->sbuf, 0);
676 677 if (IS_ERR(sleb)) {
677 678 err = PTR_ERR(sleb);
678 679 break;
... ... @@ -899,7 +900,7 @@
899 900 for (lnum = c->orph_first; lnum <= c->orph_last; lnum++) {
900 901 struct ubifs_scan_leb *sleb;
901 902  
902   - sleb = ubifs_scan(c, lnum, 0, c->dbg->buf);
  903 + sleb = ubifs_scan(c, lnum, 0, c->dbg->buf, 0);
903 904 if (IS_ERR(sleb)) {
904 905 err = PTR_ERR(sleb);
905 906 break;
... ... @@ -286,7 +286,7 @@
286 286 mst = mst2;
287 287 }
288 288  
289   - dbg_rcvry("recovered master node from LEB %d",
  289 + ubifs_msg("recovered master node from LEB %d",
290 290 (mst == mst1 ? UBIFS_MST_LNUM : UBIFS_MST_LNUM + 1));
291 291  
292 292 memcpy(c->mst_node, mst, UBIFS_MST_NODE_SZ);
... ... @@ -790,7 +790,7 @@
790 790 * We can only recover at the end of the log, so check that the
791 791 * next log LEB is empty or out of date.
792 792 */
793   - sleb = ubifs_scan(c, next_lnum, 0, sbuf);
  793 + sleb = ubifs_scan(c, next_lnum, 0, sbuf, 0);
794 794 if (IS_ERR(sleb))
795 795 return sleb;
796 796 if (sleb->nodes_cnt) {
... ... @@ -506,7 +506,7 @@
506 506 if (c->need_recovery)
507 507 sleb = ubifs_recover_leb(c, lnum, offs, c->sbuf, jhead != GCHD);
508 508 else
509   - sleb = ubifs_scan(c, lnum, offs, c->sbuf);
  509 + sleb = ubifs_scan(c, lnum, offs, c->sbuf, 0);
510 510 if (IS_ERR(sleb))
511 511 return PTR_ERR(sleb);
512 512  
... ... @@ -836,8 +836,8 @@
836 836 const struct ubifs_cs_node *node;
837 837  
838 838 dbg_mnt("replay log LEB %d:%d", lnum, offs);
839   - sleb = ubifs_scan(c, lnum, offs, sbuf);
840   - if (IS_ERR(sleb) ) {
  839 + sleb = ubifs_scan(c, lnum, offs, sbuf, c->need_recovery);
  840 + if (IS_ERR(sleb)) {
841 841 if (PTR_ERR(sleb) != -EUCLEAN || !c->need_recovery)
842 842 return PTR_ERR(sleb);
843 843 sleb = ubifs_recover_log_leb(c, lnum, offs, sbuf);
... ... @@ -108,10 +108,9 @@
108 108  
109 109 /* Make the node pads to 8-byte boundary */
110 110 if ((node_len + pad_len) & 7) {
111   - if (!quiet) {
  111 + if (!quiet)
112 112 dbg_err("bad padding length %d - %d",
113 113 offs, offs + node_len + pad_len);
114   - }
115 114 return SCANNED_A_BAD_PAD_NODE;
116 115 }
117 116  
118 117  
119 118  
... ... @@ -253,15 +252,19 @@
253 252 * @c: UBIFS file-system description object
254 253 * @lnum: logical eraseblock number
255 254 * @offs: offset to start at (usually zero)
256   - * @sbuf: scan buffer (must be c->leb_size)
  255 + * @sbuf: scan buffer (must be of @c->leb_size bytes in size)
  256 + * @quiet: print no messages
257 257 *
258 258 * This function scans LEB number @lnum and returns complete information about
259 259 * its contents. Returns the scaned information in case of success and,
260 260 * %-EUCLEAN if the LEB neads recovery, and other negative error codes in case
261 261 * of failure.
  262 + *
  263 + * If @quiet is non-zero, this function does not print large and scary
  264 + * error messages and flash dumps in case of errors.
262 265 */
263 266 struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum,
264   - int offs, void *sbuf)
  267 + int offs, void *sbuf, int quiet)
265 268 {
266 269 void *buf = sbuf + offs;
267 270 int err, len = c->leb_size - offs;
... ... @@ -280,7 +283,7 @@
280 283  
281 284 cond_resched();
282 285  
283   - ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 0);
  286 + ret = ubifs_scan_a_node(c, buf, len, lnum, offs, quiet);
284 287 if (ret > 0) {
285 288 /* Padding bytes or a valid padding node */
286 289 offs += ret;
... ... @@ -320,7 +323,9 @@
320 323 }
321 324  
322 325 if (offs % c->min_io_size) {
323   - ubifs_err("empty space starts at non-aligned offset %d", offs);
  326 + if (!quiet)
  327 + ubifs_err("empty space starts at non-aligned offset %d",
  328 + offs);
324 329 goto corrupted;;
325 330 }
326 331  
327 332  
328 333  
329 334  
... ... @@ -331,18 +336,25 @@
331 336 break;
332 337 for (; len; offs++, buf++, len--)
333 338 if (*(uint8_t *)buf != 0xff) {
334   - ubifs_err("corrupt empty space at LEB %d:%d",
335   - lnum, offs);
  339 + if (!quiet)
  340 + ubifs_err("corrupt empty space at LEB %d:%d",
  341 + lnum, offs);
336 342 goto corrupted;
337 343 }
338 344  
339 345 return sleb;
340 346  
341 347 corrupted:
342   - ubifs_scanned_corruption(c, lnum, offs, buf);
  348 + if (!quiet) {
  349 + ubifs_scanned_corruption(c, lnum, offs, buf);
  350 + ubifs_err("LEB %d scanning failed", lnum);
  351 + }
343 352 err = -EUCLEAN;
  353 + ubifs_scan_destroy(sleb);
  354 + return ERR_PTR(err);
  355 +
344 356 error:
345   - ubifs_err("LEB %d scanning failed", lnum);
  357 + ubifs_err("LEB %d scanning failed, error %d", lnum, err);
346 358 ubifs_scan_destroy(sleb);
347 359 return ERR_PTR(err);
348 360 }
... ... @@ -36,7 +36,6 @@
36 36 #include <linux/mount.h>
37 37 #include <linux/math64.h>
38 38 #include <linux/writeback.h>
39   -#include <linux/smp_lock.h>
40 39 #include "ubifs.h"
41 40  
42 41 /*
... ... @@ -318,6 +317,8 @@
318 317 if (err)
319 318 ubifs_err("can't write inode %lu, error %d",
320 319 inode->i_ino, err);
  320 + else
  321 + err = dbg_check_inode_size(c, inode, ui->ui_size);
321 322 }
322 323  
323 324 ui->dirty = 0;
... ... @@ -448,17 +449,6 @@
448 449 return 0;
449 450  
450 451 /*
451   - * VFS calls '->sync_fs()' before synchronizing all dirty inodes and
452   - * pages, so synchronize them first, then commit the journal. Strictly
453   - * speaking, it is not necessary to commit the journal here,
454   - * synchronizing write-buffers would be enough. But committing makes
455   - * UBIFS free space predictions much more accurate, so we want to let
456   - * the user be able to get more accurate results of 'statfs()' after
457   - * they synchronize the file system.
458   - */
459   - sync_inodes_sb(sb);
460   -
461   - /*
462 452 * Synchronize write buffers, because 'ubifs_run_commit()' does not
463 453 * do this if it waits for an already running commit.
464 454 */
... ... @@ -468,6 +458,13 @@
468 458 return err;
469 459 }
470 460  
  461 + /*
  462 + * Strictly speaking, it is not necessary to commit the journal here,
  463 + * synchronizing write-buffers would be enough. But committing makes
  464 + * UBIFS free space predictions much more accurate, so we want to let
  465 + * the user be able to get more accurate results of 'statfs()' after
  466 + * they synchronize the file system.
  467 + */
471 468 err = ubifs_run_commit(c);
472 469 if (err)
473 470 return err;
... ... @@ -1720,8 +1717,6 @@
1720 1717 ubifs_msg("un-mount UBI device %d, volume %d", c->vi.ubi_num,
1721 1718 c->vi.vol_id);
1722 1719  
1723   - lock_kernel();
1724   -
1725 1720 /*
1726 1721 * The following asserts are only valid if there has not been a failure
1727 1722 * of the media. For example, there will be dirty inodes if we failed
... ... @@ -1786,8 +1781,6 @@
1786 1781 ubi_close_volume(c->ubi);
1787 1782 mutex_unlock(&c->umount_mutex);
1788 1783 kfree(c);
1789   -
1790   - unlock_kernel();
1791 1784 }
1792 1785  
1793 1786 static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data)
1794 1787  
1795 1788  
1796 1789  
1797 1790  
... ... @@ -1803,22 +1796,17 @@
1803 1796 return err;
1804 1797 }
1805 1798  
1806   - lock_kernel();
1807 1799 if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) {
1808 1800 if (c->ro_media) {
1809 1801 ubifs_msg("cannot re-mount due to prior errors");
1810   - unlock_kernel();
1811 1802 return -EROFS;
1812 1803 }
1813 1804 err = ubifs_remount_rw(c);
1814   - if (err) {
1815   - unlock_kernel();
  1805 + if (err)
1816 1806 return err;
1817   - }
1818 1807 } else if (!(sb->s_flags & MS_RDONLY) && (*flags & MS_RDONLY)) {
1819 1808 if (c->ro_media) {
1820 1809 ubifs_msg("cannot re-mount due to prior errors");
1821   - unlock_kernel();
1822 1810 return -EROFS;
1823 1811 }
1824 1812 ubifs_remount_ro(c);
... ... @@ -1833,7 +1821,6 @@
1833 1821 }
1834 1822  
1835 1823 ubifs_assert(c->lst.taken_empty_lebs > 0);
1836   - unlock_kernel();
1837 1824 return 0;
1838 1825 }
1839 1826  
... ... @@ -1159,8 +1159,8 @@
1159 1159 * o exact match, i.e. the found zero-level znode contains key @key, then %1
1160 1160 * is returned and slot number of the matched branch is stored in @n;
1161 1161 * o not exact match, which means that zero-level znode does not contain
1162   - * @key, then %0 is returned and slot number of the closed branch is stored
1163   - * in @n;
  1162 + * @key, then %0 is returned and slot number of the closest branch is stored
  1163 + * in @n;
1164 1164 * o @key is so small that it is even less than the lowest key of the
1165 1165 * leftmost zero-level node, then %0 is returned and %0 is stored in @n.
1166 1166 *
... ... @@ -1433,7 +1433,7 @@
1433 1433 * @lnum: LEB number is returned here
1434 1434 * @offs: offset is returned here
1435 1435 *
1436   - * This function look up and reads node with key @key. The caller has to make
  1436 + * This function looks up and reads node with key @key. The caller has to make
1437 1437 * sure the @node buffer is large enough to fit the node. Returns zero in case
1438 1438 * of success, %-ENOENT if the node was not found, and a negative error code in
1439 1439 * case of failure. The node location can be returned in @lnum and @offs.
... ... @@ -3268,4 +3268,74 @@
3268 3268 mutex_unlock(&c->tnc_mutex);
3269 3269 return err;
3270 3270 }
  3271 +
  3272 +#ifdef CONFIG_UBIFS_FS_DEBUG
  3273 +
  3274 +/**
  3275 + * dbg_check_inode_size - check if inode size is correct.
  3276 + * @c: UBIFS file-system description object
  3277 + * @inum: inode number
  3278 + * @size: inode size
  3279 + *
  3280 + * This function makes sure that the inode size (@size) is correct and it does
  3281 + * not have any pages beyond @size. Returns zero if the inode is OK, %-EINVAL
  3282 + * if it has a data page beyond @size, and other negative error code in case of
  3283 + * other errors.
  3284 + */
  3285 +int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode,
  3286 + loff_t size)
  3287 +{
  3288 + int err, n;
  3289 + union ubifs_key from_key, to_key, *key;
  3290 + struct ubifs_znode *znode;
  3291 + unsigned int block;
  3292 +
  3293 + if (!S_ISREG(inode->i_mode))
  3294 + return 0;
  3295 + if (!(ubifs_chk_flags & UBIFS_CHK_GEN))
  3296 + return 0;
  3297 +
  3298 + block = (size + UBIFS_BLOCK_SIZE - 1) >> UBIFS_BLOCK_SHIFT;
  3299 + data_key_init(c, &from_key, inode->i_ino, block);
  3300 + highest_data_key(c, &to_key, inode->i_ino);
  3301 +
  3302 + mutex_lock(&c->tnc_mutex);
  3303 + err = ubifs_lookup_level0(c, &from_key, &znode, &n);
  3304 + if (err < 0)
  3305 + goto out_unlock;
  3306 +
  3307 + if (err) {
  3308 + err = -EINVAL;
  3309 + key = &from_key;
  3310 + goto out_dump;
  3311 + }
  3312 +
  3313 + err = tnc_next(c, &znode, &n);
  3314 + if (err == -ENOENT) {
  3315 + err = 0;
  3316 + goto out_unlock;
  3317 + }
  3318 + if (err < 0)
  3319 + goto out_unlock;
  3320 +
  3321 + ubifs_assert(err == 0);
  3322 + key = &znode->zbranch[n].key;
  3323 + if (!key_in_range(c, key, &from_key, &to_key))
  3324 + goto out_unlock;
  3325 +
  3326 +out_dump:
  3327 + block = key_block(c, key);
  3328 + ubifs_err("inode %lu has size %lld, but there are data at offset %lld "
  3329 + "(data key %s)", (unsigned long)inode->i_ino, size,
  3330 + ((loff_t)block) << UBIFS_BLOCK_SHIFT, DBGKEY(key));
  3331 + dbg_dump_inode(c, inode);
  3332 + dbg_dump_stack();
  3333 + err = -EINVAL;
  3334 +
  3335 +out_unlock:
  3336 + mutex_unlock(&c->tnc_mutex);
  3337 + return err;
  3338 +}
  3339 +
  3340 +#endif /* CONFIG_UBIFS_FS_DEBUG */
fs/ubifs/tnc_commit.c
... ... @@ -245,7 +245,7 @@
245 245 * it is more comprehensive and less efficient than is needed for this
246 246 * purpose.
247 247 */
248   - sleb = ubifs_scan(c, lnum, 0, c->ileb_buf);
  248 + sleb = ubifs_scan(c, lnum, 0, c->ileb_buf, 0);
249 249 c->ileb_len = 0;
250 250 if (IS_ERR(sleb))
251 251 return PTR_ERR(sleb);
fs/ubifs/ubifs-media.h
... ... @@ -135,6 +135,13 @@
135 135 /* The key is always at the same position in all keyed nodes */
136 136 #define UBIFS_KEY_OFFSET offsetof(struct ubifs_ino_node, key)
137 137  
  138 +/* Garbage collector journal head number */
  139 +#define UBIFS_GC_HEAD 0
  140 +/* Base journal head number */
  141 +#define UBIFS_BASE_HEAD 1
  142 +/* Data journal head number */
  143 +#define UBIFS_DATA_HEAD 2
  144 +
138 145 /*
139 146 * LEB Properties Tree node types.
140 147 *
... ... @@ -105,12 +105,10 @@
105 105 /* Number of non-data journal heads */
106 106 #define NONDATA_JHEADS_CNT 2
107 107  
108   -/* Garbage collector head */
109   -#define GCHD 0
110   -/* Base journal head number */
111   -#define BASEHD 1
112   -/* First "general purpose" journal head */
113   -#define DATAHD 2
  108 +/* Shorter names for journal head numbers for internal usage */
  109 +#define GCHD UBIFS_GC_HEAD
  110 +#define BASEHD UBIFS_BASE_HEAD
  111 +#define DATAHD UBIFS_DATA_HEAD
114 112  
115 113 /* 'No change' value for 'ubifs_change_lp()' */
116 114 #define LPROPS_NC 0x80000001
... ... @@ -1451,7 +1449,7 @@
1451 1449  
1452 1450 /* scan.c */
1453 1451 struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum,
1454   - int offs, void *sbuf);
  1452 + int offs, void *sbuf, int quiet);
1455 1453 void ubifs_scan_destroy(struct ubifs_scan_leb *sleb);
1456 1454 int ubifs_scan_a_node(const struct ubifs_info *c, void *buf, int len, int lnum,
1457 1455 int offs, int quiet);
... ... @@ -1676,6 +1674,7 @@
1676 1674 const struct ubifs_lprops *ubifs_fast_find_empty(struct ubifs_info *c);
1677 1675 const struct ubifs_lprops *ubifs_fast_find_freeable(struct ubifs_info *c);
1678 1676 const struct ubifs_lprops *ubifs_fast_find_frdi_idx(struct ubifs_info *c);
  1677 +int ubifs_calc_dark(const struct ubifs_info *c, int spc);
1679 1678  
1680 1679 /* file.c */
1681 1680 int ubifs_fsync(struct file *file, struct dentry *dentry, int datasync);
... ... @@ -78,9 +78,9 @@
78 78 SECURITY_XATTR,
79 79 };
80 80  
81   -static struct inode_operations none_inode_operations;
  81 +static const struct inode_operations none_inode_operations;
82 82 static struct address_space_operations none_address_operations;
83   -static struct file_operations none_file_operations;
  83 +static const struct file_operations none_file_operations;
84 84  
85 85 /**
86 86 * create_xattr - create an extended attribute.