Commit 81def6b9862764924a99ac1b680e73ac8c80ac64

Authored by Joern Engel
1 parent 1932191726

Simplify and fix pad_wbuf

A comment in the old code read:
        /* The math in this function can surely use some love */

And indeed it did.  In the case that area->a_used_bytes is exactly
4096 bytes below segment size it fell apart.  pad_wbuf is now split
into two helpers that are significantly less complicated.

Signed-off-by: Joern Engel <joern@logfs.org>

Showing 1 changed file with 30 additions and 22 deletions Side-by-side Diff

... ... @@ -93,47 +93,55 @@
93 93 } while (len);
94 94 }
95 95  
96   -/*
97   - * bdev_writeseg will write full pages. Memset the tail to prevent data leaks.
98   - */
99   -static void pad_wbuf(struct logfs_area *area, int final)
  96 +static void pad_partial_page(struct logfs_area *area)
100 97 {
101 98 struct super_block *sb = area->a_sb;
102   - struct logfs_super *super = logfs_super(sb);
103 99 struct page *page;
104 100 u64 ofs = dev_ofs(sb, area->a_segno, area->a_used_bytes);
105 101 pgoff_t index = ofs >> PAGE_SHIFT;
106 102 long offset = ofs & (PAGE_SIZE-1);
107 103 u32 len = PAGE_SIZE - offset;
108 104  
109   - if (len == PAGE_SIZE) {
110   - /* The math in this function can surely use some love */
111   - len = 0;
112   - }
113   - if (len) {
114   - BUG_ON(area->a_used_bytes >= super->s_segsize);
115   -
116   - page = get_mapping_page(area->a_sb, index, 0);
  105 + if (len % PAGE_SIZE) {
  106 + page = get_mapping_page(sb, index, 0);
117 107 BUG_ON(!page); /* FIXME: reserve a pool */
118 108 memset(page_address(page) + offset, 0xff, len);
119 109 SetPagePrivate(page);
120 110 page_cache_release(page);
121 111 }
  112 +}
122 113  
123   - if (!final)
124   - return;
  114 +static void pad_full_pages(struct logfs_area *area)
  115 +{
  116 + struct super_block *sb = area->a_sb;
  117 + struct logfs_super *super = logfs_super(sb);
  118 + u64 ofs = dev_ofs(sb, area->a_segno, area->a_used_bytes);
  119 + u32 len = super->s_segsize - area->a_used_bytes;
  120 + pgoff_t index = PAGE_CACHE_ALIGN(ofs) >> PAGE_CACHE_SHIFT;
  121 + pgoff_t no_indizes = len >> PAGE_CACHE_SHIFT;
  122 + struct page *page;
125 123  
126   - area->a_used_bytes += len;
127   - for ( ; area->a_used_bytes < super->s_segsize;
128   - area->a_used_bytes += PAGE_SIZE) {
129   - /* Memset another page */
130   - index++;
131   - page = get_mapping_page(area->a_sb, index, 0);
  124 + while (no_indizes) {
  125 + page = get_mapping_page(sb, index, 0);
132 126 BUG_ON(!page); /* FIXME: reserve a pool */
133   - memset(page_address(page), 0xff, PAGE_SIZE);
  127 + SetPageUptodate(page);
  128 + memset(page_address(page), 0xff, PAGE_CACHE_SIZE);
134 129 SetPagePrivate(page);
135 130 page_cache_release(page);
  131 + index++;
  132 + no_indizes--;
136 133 }
  134 +}
  135 +
  136 +/*
  137 + * bdev_writeseg will write full pages. Memset the tail to prevent data leaks.
  138 + * Also make sure we allocate (and memset) all pages for final writeout.
  139 + */
  140 +static void pad_wbuf(struct logfs_area *area, int final)
  141 +{
  142 + pad_partial_page(area);
  143 + if (final)
  144 + pad_full_pages(area);
137 145 }
138 146  
139 147 /*