Blame view
fs/logfs/dev_mtd.c
6.72 KB
5db53f3e8
|
1 2 3 4 5 6 7 8 9 10 11 |
/* * fs/logfs/dev_mtd.c - Device access methods for MTD * * As should be obvious for Linux kernel code, license is GPLv2 * * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org> */ #include "logfs.h" #include <linux/completion.h> #include <linux/mount.h> #include <linux/sched.h> |
6f485b418
|
12 |
#include <linux/slab.h> |
5db53f3e8
|
13 14 |
#define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1)) |
48d361026
|
15 16 |
static int logfs_mtd_read(struct super_block *sb, loff_t ofs, size_t len, void *buf) |
5db53f3e8
|
17 18 19 20 |
{ struct mtd_info *mtd = logfs_super(sb)->s_mtd; size_t retlen; int ret; |
329ad399a
|
21 |
ret = mtd_read(mtd, ofs, len, &retlen, buf); |
5db53f3e8
|
22 23 24 25 26 27 28 29 30 31 |
BUG_ON(ret == -EINVAL); if (ret) return ret; /* Not sure if we should loop instead. */ if (retlen != len) return -EIO; return 0; } |
48d361026
|
32 33 |
static int loffs_mtd_write(struct super_block *sb, loff_t ofs, size_t len, void *buf) |
5db53f3e8
|
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
{ struct logfs_super *super = logfs_super(sb); struct mtd_info *mtd = super->s_mtd; size_t retlen; loff_t page_start, page_end; int ret; if (super->s_flags & LOGFS_SB_FLAG_RO) return -EROFS; BUG_ON((ofs >= mtd->size) || (len > mtd->size - ofs)); BUG_ON(ofs != (ofs >> super->s_writeshift) << super->s_writeshift); BUG_ON(len > PAGE_CACHE_SIZE); page_start = ofs & PAGE_CACHE_MASK; page_end = PAGE_CACHE_ALIGN(ofs + len) - 1; |
eda95cbf7
|
49 |
ret = mtd_write(mtd, ofs, len, &retlen, buf); |
5db53f3e8
|
50 51 52 53 54 55 56 57 58 59 60 61 |
if (ret || (retlen != len)) return -EIO; return 0; } /* * For as long as I can remember (since about 2001) mtd->erase has been an * asynchronous interface lacking the first driver to actually use the * asynchronous properties. So just to prevent the first implementor of such * a thing from breaking logfs in 2350, we do the usual pointless dance to * declare a completion variable and wait for completion before returning |
48d361026
|
62 |
* from logfs_mtd_erase(). What an exercise in futility! |
5db53f3e8
|
63 64 65 66 67 |
*/ static void logfs_erase_callback(struct erase_info *ei) { complete((struct completion *)ei->priv); } |
48d361026
|
68 69 |
static int logfs_mtd_erase_mapping(struct super_block *sb, loff_t ofs, size_t len) |
5db53f3e8
|
70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 |
{ struct logfs_super *super = logfs_super(sb); struct address_space *mapping = super->s_mapping_inode->i_mapping; struct page *page; pgoff_t index = ofs >> PAGE_SHIFT; for (index = ofs >> PAGE_SHIFT; index < (ofs + len) >> PAGE_SHIFT; index++) { page = find_get_page(mapping, index); if (!page) continue; memset(page_address(page), 0xFF, PAGE_SIZE); page_cache_release(page); } return 0; } |
48d361026
|
85 |
static int logfs_mtd_erase(struct super_block *sb, loff_t ofs, size_t len, |
9421502b4
|
86 |
int ensure_write) |
5db53f3e8
|
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 |
{ struct mtd_info *mtd = logfs_super(sb)->s_mtd; struct erase_info ei; DECLARE_COMPLETION_ONSTACK(complete); int ret; BUG_ON(len % mtd->erasesize); if (logfs_super(sb)->s_flags & LOGFS_SB_FLAG_RO) return -EROFS; memset(&ei, 0, sizeof(ei)); ei.mtd = mtd; ei.addr = ofs; ei.len = len; ei.callback = logfs_erase_callback; ei.priv = (long)&complete; |
7e1f0dc05
|
103 |
ret = mtd_erase(mtd, &ei); |
5db53f3e8
|
104 105 106 107 108 109 |
if (ret) return -EIO; wait_for_completion(&complete); if (ei.state != MTD_ERASE_DONE) return -EIO; |
48d361026
|
110 |
return logfs_mtd_erase_mapping(sb, ofs, len); |
5db53f3e8
|
111 |
} |
48d361026
|
112 |
static void logfs_mtd_sync(struct super_block *sb) |
5db53f3e8
|
113 114 |
{ struct mtd_info *mtd = logfs_super(sb)->s_mtd; |
327cf2922
|
115 |
mtd_sync(mtd); |
5db53f3e8
|
116 |
} |
48d361026
|
117 |
static int logfs_mtd_readpage(void *_sb, struct page *page) |
5db53f3e8
|
118 119 120 |
{ struct super_block *sb = _sb; int err; |
48d361026
|
121 |
err = logfs_mtd_read(sb, page->index << PAGE_SHIFT, PAGE_SIZE, |
5db53f3e8
|
122 |
page_address(page)); |
6f485b418
|
123 124 |
if (err == -EUCLEAN || err == -EBADMSG) { /* -EBADMSG happens regularly on power failures */ |
5db53f3e8
|
125 126 127 128 129 130 131 132 133 134 135 136 137 |
err = 0; /* FIXME: force GC this segment */ } if (err) { ClearPageUptodate(page); SetPageError(page); } else { SetPageUptodate(page); ClearPageError(page); } unlock_page(page); return err; } |
48d361026
|
138 |
static struct page *logfs_mtd_find_first_sb(struct super_block *sb, u64 *ofs) |
5db53f3e8
|
139 140 141 |
{ struct logfs_super *super = logfs_super(sb); struct address_space *mapping = super->s_mapping_inode->i_mapping; |
48d361026
|
142 |
filler_t *filler = logfs_mtd_readpage; |
5db53f3e8
|
143 |
struct mtd_info *mtd = super->s_mtd; |
d58b27ed5
|
144 |
if (!mtd_can_have_bb(mtd)) |
5db53f3e8
|
145 146 147 |
return NULL; *ofs = 0; |
7086c19d0
|
148 |
while (mtd_block_isbad(mtd, *ofs)) { |
5db53f3e8
|
149 150 151 152 153 154 155 |
*ofs += mtd->erasesize; if (*ofs >= mtd->size) return NULL; } BUG_ON(*ofs & ~PAGE_MASK); return read_cache_page(mapping, *ofs >> PAGE_SHIFT, filler, sb); } |
48d361026
|
156 |
static struct page *logfs_mtd_find_last_sb(struct super_block *sb, u64 *ofs) |
5db53f3e8
|
157 158 159 |
{ struct logfs_super *super = logfs_super(sb); struct address_space *mapping = super->s_mapping_inode->i_mapping; |
48d361026
|
160 |
filler_t *filler = logfs_mtd_readpage; |
5db53f3e8
|
161 |
struct mtd_info *mtd = super->s_mtd; |
d58b27ed5
|
162 |
if (!mtd_can_have_bb(mtd)) |
5db53f3e8
|
163 164 165 |
return NULL; *ofs = mtd->size - mtd->erasesize; |
7086c19d0
|
166 |
while (mtd_block_isbad(mtd, *ofs)) { |
5db53f3e8
|
167 168 169 170 171 172 173 174 |
*ofs -= mtd->erasesize; if (*ofs <= 0) return NULL; } *ofs = *ofs + mtd->erasesize - 0x1000; BUG_ON(*ofs & ~PAGE_MASK); return read_cache_page(mapping, *ofs >> PAGE_SHIFT, filler, sb); } |
48d361026
|
175 |
static int __logfs_mtd_writeseg(struct super_block *sb, u64 ofs, pgoff_t index, |
5db53f3e8
|
176 177 178 179 180 181 182 183 184 185 |
size_t nr_pages) { struct logfs_super *super = logfs_super(sb); struct address_space *mapping = super->s_mapping_inode->i_mapping; struct page *page; int i, err; for (i = 0; i < nr_pages; i++) { page = find_lock_page(mapping, index + i); BUG_ON(!page); |
48d361026
|
186 187 |
err = loffs_mtd_write(sb, page->index << PAGE_SHIFT, PAGE_SIZE, page_address(page)); |
5db53f3e8
|
188 189 190 191 192 193 194 |
unlock_page(page); page_cache_release(page); if (err) return err; } return 0; } |
48d361026
|
195 |
static void logfs_mtd_writeseg(struct super_block *sb, u64 ofs, size_t len) |
5db53f3e8
|
196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 |
{ struct logfs_super *super = logfs_super(sb); int head; if (super->s_flags & LOGFS_SB_FLAG_RO) return; if (len == 0) { /* This can happen when the object fit perfectly into a * segment, the segment gets written per sync and subsequently * closed. */ return; } head = ofs & (PAGE_SIZE - 1); if (head) { ofs -= head; len += head; } len = PAGE_ALIGN(len); |
48d361026
|
216 |
__logfs_mtd_writeseg(sb, ofs, ofs >> PAGE_SHIFT, len >> PAGE_SHIFT); |
5db53f3e8
|
217 |
} |
48d361026
|
218 |
static void logfs_mtd_put_device(struct logfs_super *s) |
5db53f3e8
|
219 |
{ |
e5a0726a9
|
220 |
put_mtd_device(s->s_mtd); |
5db53f3e8
|
221 |
} |
48d361026
|
222 |
static int logfs_mtd_can_write_buf(struct super_block *sb, u64 ofs) |
6f485b418
|
223 224 225 226 227 228 229 230 |
{ struct logfs_super *super = logfs_super(sb); void *buf; int err; buf = kmalloc(super->s_writesize, GFP_KERNEL); if (!buf) return -ENOMEM; |
48d361026
|
231 |
err = logfs_mtd_read(sb, ofs, super->s_writesize, buf); |
6f485b418
|
232 233 234 235 236 237 238 239 |
if (err) goto out; if (memchr_inv(buf, 0xff, super->s_writesize)) err = -EIO; kfree(buf); out: return err; } |
5db53f3e8
|
240 |
static const struct logfs_device_ops mtd_devops = { |
48d361026
|
241 242 243 244 245 246 247 248 |
.find_first_sb = logfs_mtd_find_first_sb, .find_last_sb = logfs_mtd_find_last_sb, .readpage = logfs_mtd_readpage, .writeseg = logfs_mtd_writeseg, .erase = logfs_mtd_erase, .can_write_buf = logfs_mtd_can_write_buf, .sync = logfs_mtd_sync, .put_device = logfs_mtd_put_device, |
5db53f3e8
|
249 |
}; |
7d945a3aa
|
250 |
int logfs_get_sb_mtd(struct logfs_super *s, int mtdnr) |
5db53f3e8
|
251 |
{ |
0d85c7996
|
252 |
struct mtd_info *mtd = get_mtd_device(NULL, mtdnr); |
7d945a3aa
|
253 |
if (IS_ERR(mtd)) |
ccf31c10f
|
254 |
return PTR_ERR(mtd); |
0d85c7996
|
255 256 257 258 |
s->s_bdev = NULL; s->s_mtd = mtd; s->s_devops = &mtd_devops; |
7d945a3aa
|
259 |
return 0; |
5db53f3e8
|
260 |
} |