Commit fbd0107f4d33be01c9fb2c630036bd66b7e3d4dc
1 parent
1fc2e3e59d
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
UBI: amend comments after all the renamings
This patch amends commentaries in scan.[ch] to match the new logic. Reminder - we did the restructuring to prepare the code for adding the fastmap. This patch also renames a couple of functions - it was too difficult to separate out that change and I decided that it is not too bad to have it in the same patch with commentaries changes. Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Showing 6 changed files with 73 additions and 81 deletions Inline Diff
drivers/mtd/ubi/build.c
1 | /* | 1 | /* |
2 | * Copyright (c) International Business Machines Corp., 2006 | 2 | * Copyright (c) International Business Machines Corp., 2006 |
3 | * Copyright (c) Nokia Corporation, 2007 | 3 | * Copyright (c) Nokia Corporation, 2007 |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
7 | * the Free Software Foundation; either version 2 of the License, or | 7 | * the Free Software Foundation; either version 2 of the License, or |
8 | * (at your option) any later version. | 8 | * (at your option) any later version. |
9 | * | 9 | * |
10 | * This program is distributed in the hope that it will be useful, | 10 | * This program is distributed in the hope that it will be useful, |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See |
13 | * the GNU General Public License for more details. | 13 | * the GNU General Public License for more details. |
14 | * | 14 | * |
15 | * You should have received a copy of the GNU General Public License | 15 | * You should have received a copy of the GNU General Public License |
16 | * along with this program; if not, write to the Free Software | 16 | * along with this program; if not, write to the Free Software |
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
18 | * | 18 | * |
19 | * Author: Artem Bityutskiy (Битюцкий Артём), | 19 | * Author: Artem Bityutskiy (Битюцкий Артём), |
20 | * Frank Haverkamp | 20 | * Frank Haverkamp |
21 | */ | 21 | */ |
22 | 22 | ||
23 | /* | 23 | /* |
24 | * This file includes UBI initialization and building of UBI devices. | 24 | * This file includes UBI initialization and building of UBI devices. |
25 | * | 25 | * |
26 | * When UBI is initialized, it attaches all the MTD devices specified as the | 26 | * When UBI is initialized, it attaches all the MTD devices specified as the |
27 | * module load parameters or the kernel boot parameters. If MTD devices were | 27 | * module load parameters or the kernel boot parameters. If MTD devices were |
28 | * specified, UBI does not attach any MTD device, but it is possible to do | 28 | * specified, UBI does not attach any MTD device, but it is possible to do |
29 | * later using the "UBI control device". | 29 | * later using the "UBI control device". |
30 | * | ||
31 | * At the moment we only attach UBI devices by scanning, which will become a | ||
32 | * bottleneck when flashes reach certain large size. Then one may improve UBI | ||
33 | * and add other methods, although it does not seem to be easy to do. | ||
34 | */ | 30 | */ |
35 | 31 | ||
36 | #include <linux/err.h> | 32 | #include <linux/err.h> |
37 | #include <linux/module.h> | 33 | #include <linux/module.h> |
38 | #include <linux/moduleparam.h> | 34 | #include <linux/moduleparam.h> |
39 | #include <linux/stringify.h> | 35 | #include <linux/stringify.h> |
40 | #include <linux/namei.h> | 36 | #include <linux/namei.h> |
41 | #include <linux/stat.h> | 37 | #include <linux/stat.h> |
42 | #include <linux/miscdevice.h> | 38 | #include <linux/miscdevice.h> |
43 | #include <linux/log2.h> | 39 | #include <linux/log2.h> |
44 | #include <linux/kthread.h> | 40 | #include <linux/kthread.h> |
45 | #include <linux/kernel.h> | 41 | #include <linux/kernel.h> |
46 | #include <linux/slab.h> | 42 | #include <linux/slab.h> |
47 | #include "ubi.h" | 43 | #include "ubi.h" |
48 | 44 | ||
49 | /* Maximum length of the 'mtd=' parameter */ | 45 | /* Maximum length of the 'mtd=' parameter */ |
50 | #define MTD_PARAM_LEN_MAX 64 | 46 | #define MTD_PARAM_LEN_MAX 64 |
51 | 47 | ||
52 | #ifdef CONFIG_MTD_UBI_MODULE | 48 | #ifdef CONFIG_MTD_UBI_MODULE |
53 | #define ubi_is_module() 1 | 49 | #define ubi_is_module() 1 |
54 | #else | 50 | #else |
55 | #define ubi_is_module() 0 | 51 | #define ubi_is_module() 0 |
56 | #endif | 52 | #endif |
57 | 53 | ||
58 | /** | 54 | /** |
59 | * struct mtd_dev_param - MTD device parameter description data structure. | 55 | * struct mtd_dev_param - MTD device parameter description data structure. |
60 | * @name: MTD character device node path, MTD device name, or MTD device number | 56 | * @name: MTD character device node path, MTD device name, or MTD device number |
61 | * string | 57 | * string |
62 | * @vid_hdr_offs: VID header offset | 58 | * @vid_hdr_offs: VID header offset |
63 | */ | 59 | */ |
64 | struct mtd_dev_param { | 60 | struct mtd_dev_param { |
65 | char name[MTD_PARAM_LEN_MAX]; | 61 | char name[MTD_PARAM_LEN_MAX]; |
66 | int vid_hdr_offs; | 62 | int vid_hdr_offs; |
67 | }; | 63 | }; |
68 | 64 | ||
69 | /* Numbers of elements set in the @mtd_dev_param array */ | 65 | /* Numbers of elements set in the @mtd_dev_param array */ |
70 | static int __initdata mtd_devs; | 66 | static int __initdata mtd_devs; |
71 | 67 | ||
72 | /* MTD devices specification parameters */ | 68 | /* MTD devices specification parameters */ |
73 | static struct mtd_dev_param __initdata mtd_dev_param[UBI_MAX_DEVICES]; | 69 | static struct mtd_dev_param __initdata mtd_dev_param[UBI_MAX_DEVICES]; |
74 | 70 | ||
75 | /* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */ | 71 | /* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */ |
76 | struct class *ubi_class; | 72 | struct class *ubi_class; |
77 | 73 | ||
78 | /* Slab cache for wear-leveling entries */ | 74 | /* Slab cache for wear-leveling entries */ |
79 | struct kmem_cache *ubi_wl_entry_slab; | 75 | struct kmem_cache *ubi_wl_entry_slab; |
80 | 76 | ||
81 | /* UBI control character device */ | 77 | /* UBI control character device */ |
82 | static struct miscdevice ubi_ctrl_cdev = { | 78 | static struct miscdevice ubi_ctrl_cdev = { |
83 | .minor = MISC_DYNAMIC_MINOR, | 79 | .minor = MISC_DYNAMIC_MINOR, |
84 | .name = "ubi_ctrl", | 80 | .name = "ubi_ctrl", |
85 | .fops = &ubi_ctrl_cdev_operations, | 81 | .fops = &ubi_ctrl_cdev_operations, |
86 | }; | 82 | }; |
87 | 83 | ||
88 | /* All UBI devices in system */ | 84 | /* All UBI devices in system */ |
89 | static struct ubi_device *ubi_devices[UBI_MAX_DEVICES]; | 85 | static struct ubi_device *ubi_devices[UBI_MAX_DEVICES]; |
90 | 86 | ||
91 | /* Serializes UBI devices creations and removals */ | 87 | /* Serializes UBI devices creations and removals */ |
92 | DEFINE_MUTEX(ubi_devices_mutex); | 88 | DEFINE_MUTEX(ubi_devices_mutex); |
93 | 89 | ||
94 | /* Protects @ubi_devices and @ubi->ref_count */ | 90 | /* Protects @ubi_devices and @ubi->ref_count */ |
95 | static DEFINE_SPINLOCK(ubi_devices_lock); | 91 | static DEFINE_SPINLOCK(ubi_devices_lock); |
96 | 92 | ||
97 | /* "Show" method for files in '/<sysfs>/class/ubi/' */ | 93 | /* "Show" method for files in '/<sysfs>/class/ubi/' */ |
98 | static ssize_t ubi_version_show(struct class *class, | 94 | static ssize_t ubi_version_show(struct class *class, |
99 | struct class_attribute *attr, char *buf) | 95 | struct class_attribute *attr, char *buf) |
100 | { | 96 | { |
101 | return sprintf(buf, "%d\n", UBI_VERSION); | 97 | return sprintf(buf, "%d\n", UBI_VERSION); |
102 | } | 98 | } |
103 | 99 | ||
104 | /* UBI version attribute ('/<sysfs>/class/ubi/version') */ | 100 | /* UBI version attribute ('/<sysfs>/class/ubi/version') */ |
105 | static struct class_attribute ubi_version = | 101 | static struct class_attribute ubi_version = |
106 | __ATTR(version, S_IRUGO, ubi_version_show, NULL); | 102 | __ATTR(version, S_IRUGO, ubi_version_show, NULL); |
107 | 103 | ||
108 | static ssize_t dev_attribute_show(struct device *dev, | 104 | static ssize_t dev_attribute_show(struct device *dev, |
109 | struct device_attribute *attr, char *buf); | 105 | struct device_attribute *attr, char *buf); |
110 | 106 | ||
111 | /* UBI device attributes (correspond to files in '/<sysfs>/class/ubi/ubiX') */ | 107 | /* UBI device attributes (correspond to files in '/<sysfs>/class/ubi/ubiX') */ |
112 | static struct device_attribute dev_eraseblock_size = | 108 | static struct device_attribute dev_eraseblock_size = |
113 | __ATTR(eraseblock_size, S_IRUGO, dev_attribute_show, NULL); | 109 | __ATTR(eraseblock_size, S_IRUGO, dev_attribute_show, NULL); |
114 | static struct device_attribute dev_avail_eraseblocks = | 110 | static struct device_attribute dev_avail_eraseblocks = |
115 | __ATTR(avail_eraseblocks, S_IRUGO, dev_attribute_show, NULL); | 111 | __ATTR(avail_eraseblocks, S_IRUGO, dev_attribute_show, NULL); |
116 | static struct device_attribute dev_total_eraseblocks = | 112 | static struct device_attribute dev_total_eraseblocks = |
117 | __ATTR(total_eraseblocks, S_IRUGO, dev_attribute_show, NULL); | 113 | __ATTR(total_eraseblocks, S_IRUGO, dev_attribute_show, NULL); |
118 | static struct device_attribute dev_volumes_count = | 114 | static struct device_attribute dev_volumes_count = |
119 | __ATTR(volumes_count, S_IRUGO, dev_attribute_show, NULL); | 115 | __ATTR(volumes_count, S_IRUGO, dev_attribute_show, NULL); |
120 | static struct device_attribute dev_max_ec = | 116 | static struct device_attribute dev_max_ec = |
121 | __ATTR(max_ec, S_IRUGO, dev_attribute_show, NULL); | 117 | __ATTR(max_ec, S_IRUGO, dev_attribute_show, NULL); |
122 | static struct device_attribute dev_reserved_for_bad = | 118 | static struct device_attribute dev_reserved_for_bad = |
123 | __ATTR(reserved_for_bad, S_IRUGO, dev_attribute_show, NULL); | 119 | __ATTR(reserved_for_bad, S_IRUGO, dev_attribute_show, NULL); |
124 | static struct device_attribute dev_bad_peb_count = | 120 | static struct device_attribute dev_bad_peb_count = |
125 | __ATTR(bad_peb_count, S_IRUGO, dev_attribute_show, NULL); | 121 | __ATTR(bad_peb_count, S_IRUGO, dev_attribute_show, NULL); |
126 | static struct device_attribute dev_max_vol_count = | 122 | static struct device_attribute dev_max_vol_count = |
127 | __ATTR(max_vol_count, S_IRUGO, dev_attribute_show, NULL); | 123 | __ATTR(max_vol_count, S_IRUGO, dev_attribute_show, NULL); |
128 | static struct device_attribute dev_min_io_size = | 124 | static struct device_attribute dev_min_io_size = |
129 | __ATTR(min_io_size, S_IRUGO, dev_attribute_show, NULL); | 125 | __ATTR(min_io_size, S_IRUGO, dev_attribute_show, NULL); |
130 | static struct device_attribute dev_bgt_enabled = | 126 | static struct device_attribute dev_bgt_enabled = |
131 | __ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL); | 127 | __ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL); |
132 | static struct device_attribute dev_mtd_num = | 128 | static struct device_attribute dev_mtd_num = |
133 | __ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL); | 129 | __ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL); |
134 | 130 | ||
135 | /** | 131 | /** |
136 | * ubi_volume_notify - send a volume change notification. | 132 | * ubi_volume_notify - send a volume change notification. |
137 | * @ubi: UBI device description object | 133 | * @ubi: UBI device description object |
138 | * @vol: volume description object of the changed volume | 134 | * @vol: volume description object of the changed volume |
139 | * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc) | 135 | * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc) |
140 | * | 136 | * |
141 | * This is a helper function which notifies all subscribers about a volume | 137 | * This is a helper function which notifies all subscribers about a volume |
142 | * change event (creation, removal, re-sizing, re-naming, updating). Returns | 138 | * change event (creation, removal, re-sizing, re-naming, updating). Returns |
143 | * zero in case of success and a negative error code in case of failure. | 139 | * zero in case of success and a negative error code in case of failure. |
144 | */ | 140 | */ |
145 | int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype) | 141 | int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype) |
146 | { | 142 | { |
147 | struct ubi_notification nt; | 143 | struct ubi_notification nt; |
148 | 144 | ||
149 | ubi_do_get_device_info(ubi, &nt.di); | 145 | ubi_do_get_device_info(ubi, &nt.di); |
150 | ubi_do_get_volume_info(ubi, vol, &nt.vi); | 146 | ubi_do_get_volume_info(ubi, vol, &nt.vi); |
151 | return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt); | 147 | return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt); |
152 | } | 148 | } |
153 | 149 | ||
154 | /** | 150 | /** |
155 | * ubi_notify_all - send a notification to all volumes. | 151 | * ubi_notify_all - send a notification to all volumes. |
156 | * @ubi: UBI device description object | 152 | * @ubi: UBI device description object |
157 | * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc) | 153 | * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc) |
158 | * @nb: the notifier to call | 154 | * @nb: the notifier to call |
159 | * | 155 | * |
160 | * This function walks all volumes of UBI device @ubi and sends the @ntype | 156 | * This function walks all volumes of UBI device @ubi and sends the @ntype |
161 | * notification for each volume. If @nb is %NULL, then all registered notifiers | 157 | * notification for each volume. If @nb is %NULL, then all registered notifiers |
162 | * are called, otherwise only the @nb notifier is called. Returns the number of | 158 | * are called, otherwise only the @nb notifier is called. Returns the number of |
163 | * sent notifications. | 159 | * sent notifications. |
164 | */ | 160 | */ |
165 | int ubi_notify_all(struct ubi_device *ubi, int ntype, struct notifier_block *nb) | 161 | int ubi_notify_all(struct ubi_device *ubi, int ntype, struct notifier_block *nb) |
166 | { | 162 | { |
167 | struct ubi_notification nt; | 163 | struct ubi_notification nt; |
168 | int i, count = 0; | 164 | int i, count = 0; |
169 | 165 | ||
170 | ubi_do_get_device_info(ubi, &nt.di); | 166 | ubi_do_get_device_info(ubi, &nt.di); |
171 | 167 | ||
172 | mutex_lock(&ubi->device_mutex); | 168 | mutex_lock(&ubi->device_mutex); |
173 | for (i = 0; i < ubi->vtbl_slots; i++) { | 169 | for (i = 0; i < ubi->vtbl_slots; i++) { |
174 | /* | 170 | /* |
175 | * Since the @ubi->device is locked, and we are not going to | 171 | * Since the @ubi->device is locked, and we are not going to |
176 | * change @ubi->volumes, we do not have to lock | 172 | * change @ubi->volumes, we do not have to lock |
177 | * @ubi->volumes_lock. | 173 | * @ubi->volumes_lock. |
178 | */ | 174 | */ |
179 | if (!ubi->volumes[i]) | 175 | if (!ubi->volumes[i]) |
180 | continue; | 176 | continue; |
181 | 177 | ||
182 | ubi_do_get_volume_info(ubi, ubi->volumes[i], &nt.vi); | 178 | ubi_do_get_volume_info(ubi, ubi->volumes[i], &nt.vi); |
183 | if (nb) | 179 | if (nb) |
184 | nb->notifier_call(nb, ntype, &nt); | 180 | nb->notifier_call(nb, ntype, &nt); |
185 | else | 181 | else |
186 | blocking_notifier_call_chain(&ubi_notifiers, ntype, | 182 | blocking_notifier_call_chain(&ubi_notifiers, ntype, |
187 | &nt); | 183 | &nt); |
188 | count += 1; | 184 | count += 1; |
189 | } | 185 | } |
190 | mutex_unlock(&ubi->device_mutex); | 186 | mutex_unlock(&ubi->device_mutex); |
191 | 187 | ||
192 | return count; | 188 | return count; |
193 | } | 189 | } |
194 | 190 | ||
195 | /** | 191 | /** |
196 | * ubi_enumerate_volumes - send "add" notification for all existing volumes. | 192 | * ubi_enumerate_volumes - send "add" notification for all existing volumes. |
197 | * @nb: the notifier to call | 193 | * @nb: the notifier to call |
198 | * | 194 | * |
199 | * This function walks all UBI devices and volumes and sends the | 195 | * This function walks all UBI devices and volumes and sends the |
200 | * %UBI_VOLUME_ADDED notification for each volume. If @nb is %NULL, then all | 196 | * %UBI_VOLUME_ADDED notification for each volume. If @nb is %NULL, then all |
201 | * registered notifiers are called, otherwise only the @nb notifier is called. | 197 | * registered notifiers are called, otherwise only the @nb notifier is called. |
202 | * Returns the number of sent notifications. | 198 | * Returns the number of sent notifications. |
203 | */ | 199 | */ |
204 | int ubi_enumerate_volumes(struct notifier_block *nb) | 200 | int ubi_enumerate_volumes(struct notifier_block *nb) |
205 | { | 201 | { |
206 | int i, count = 0; | 202 | int i, count = 0; |
207 | 203 | ||
208 | /* | 204 | /* |
209 | * Since the @ubi_devices_mutex is locked, and we are not going to | 205 | * Since the @ubi_devices_mutex is locked, and we are not going to |
210 | * change @ubi_devices, we do not have to lock @ubi_devices_lock. | 206 | * change @ubi_devices, we do not have to lock @ubi_devices_lock. |
211 | */ | 207 | */ |
212 | for (i = 0; i < UBI_MAX_DEVICES; i++) { | 208 | for (i = 0; i < UBI_MAX_DEVICES; i++) { |
213 | struct ubi_device *ubi = ubi_devices[i]; | 209 | struct ubi_device *ubi = ubi_devices[i]; |
214 | 210 | ||
215 | if (!ubi) | 211 | if (!ubi) |
216 | continue; | 212 | continue; |
217 | count += ubi_notify_all(ubi, UBI_VOLUME_ADDED, nb); | 213 | count += ubi_notify_all(ubi, UBI_VOLUME_ADDED, nb); |
218 | } | 214 | } |
219 | 215 | ||
220 | return count; | 216 | return count; |
221 | } | 217 | } |
222 | 218 | ||
223 | /** | 219 | /** |
224 | * ubi_get_device - get UBI device. | 220 | * ubi_get_device - get UBI device. |
225 | * @ubi_num: UBI device number | 221 | * @ubi_num: UBI device number |
226 | * | 222 | * |
227 | * This function returns UBI device description object for UBI device number | 223 | * This function returns UBI device description object for UBI device number |
228 | * @ubi_num, or %NULL if the device does not exist. This function increases the | 224 | * @ubi_num, or %NULL if the device does not exist. This function increases the |
229 | * device reference count to prevent removal of the device. In other words, the | 225 | * device reference count to prevent removal of the device. In other words, the |
230 | * device cannot be removed if its reference count is not zero. | 226 | * device cannot be removed if its reference count is not zero. |
231 | */ | 227 | */ |
232 | struct ubi_device *ubi_get_device(int ubi_num) | 228 | struct ubi_device *ubi_get_device(int ubi_num) |
233 | { | 229 | { |
234 | struct ubi_device *ubi; | 230 | struct ubi_device *ubi; |
235 | 231 | ||
236 | spin_lock(&ubi_devices_lock); | 232 | spin_lock(&ubi_devices_lock); |
237 | ubi = ubi_devices[ubi_num]; | 233 | ubi = ubi_devices[ubi_num]; |
238 | if (ubi) { | 234 | if (ubi) { |
239 | ubi_assert(ubi->ref_count >= 0); | 235 | ubi_assert(ubi->ref_count >= 0); |
240 | ubi->ref_count += 1; | 236 | ubi->ref_count += 1; |
241 | get_device(&ubi->dev); | 237 | get_device(&ubi->dev); |
242 | } | 238 | } |
243 | spin_unlock(&ubi_devices_lock); | 239 | spin_unlock(&ubi_devices_lock); |
244 | 240 | ||
245 | return ubi; | 241 | return ubi; |
246 | } | 242 | } |
247 | 243 | ||
248 | /** | 244 | /** |
249 | * ubi_put_device - drop an UBI device reference. | 245 | * ubi_put_device - drop an UBI device reference. |
250 | * @ubi: UBI device description object | 246 | * @ubi: UBI device description object |
251 | */ | 247 | */ |
252 | void ubi_put_device(struct ubi_device *ubi) | 248 | void ubi_put_device(struct ubi_device *ubi) |
253 | { | 249 | { |
254 | spin_lock(&ubi_devices_lock); | 250 | spin_lock(&ubi_devices_lock); |
255 | ubi->ref_count -= 1; | 251 | ubi->ref_count -= 1; |
256 | put_device(&ubi->dev); | 252 | put_device(&ubi->dev); |
257 | spin_unlock(&ubi_devices_lock); | 253 | spin_unlock(&ubi_devices_lock); |
258 | } | 254 | } |
259 | 255 | ||
260 | /** | 256 | /** |
261 | * ubi_get_by_major - get UBI device by character device major number. | 257 | * ubi_get_by_major - get UBI device by character device major number. |
262 | * @major: major number | 258 | * @major: major number |
263 | * | 259 | * |
264 | * This function is similar to 'ubi_get_device()', but it searches the device | 260 | * This function is similar to 'ubi_get_device()', but it searches the device |
265 | * by its major number. | 261 | * by its major number. |
266 | */ | 262 | */ |
267 | struct ubi_device *ubi_get_by_major(int major) | 263 | struct ubi_device *ubi_get_by_major(int major) |
268 | { | 264 | { |
269 | int i; | 265 | int i; |
270 | struct ubi_device *ubi; | 266 | struct ubi_device *ubi; |
271 | 267 | ||
272 | spin_lock(&ubi_devices_lock); | 268 | spin_lock(&ubi_devices_lock); |
273 | for (i = 0; i < UBI_MAX_DEVICES; i++) { | 269 | for (i = 0; i < UBI_MAX_DEVICES; i++) { |
274 | ubi = ubi_devices[i]; | 270 | ubi = ubi_devices[i]; |
275 | if (ubi && MAJOR(ubi->cdev.dev) == major) { | 271 | if (ubi && MAJOR(ubi->cdev.dev) == major) { |
276 | ubi_assert(ubi->ref_count >= 0); | 272 | ubi_assert(ubi->ref_count >= 0); |
277 | ubi->ref_count += 1; | 273 | ubi->ref_count += 1; |
278 | get_device(&ubi->dev); | 274 | get_device(&ubi->dev); |
279 | spin_unlock(&ubi_devices_lock); | 275 | spin_unlock(&ubi_devices_lock); |
280 | return ubi; | 276 | return ubi; |
281 | } | 277 | } |
282 | } | 278 | } |
283 | spin_unlock(&ubi_devices_lock); | 279 | spin_unlock(&ubi_devices_lock); |
284 | 280 | ||
285 | return NULL; | 281 | return NULL; |
286 | } | 282 | } |
287 | 283 | ||
288 | /** | 284 | /** |
289 | * ubi_major2num - get UBI device number by character device major number. | 285 | * ubi_major2num - get UBI device number by character device major number. |
290 | * @major: major number | 286 | * @major: major number |
291 | * | 287 | * |
292 | * This function searches UBI device number object by its major number. If UBI | 288 | * This function searches UBI device number object by its major number. If UBI |
293 | * device was not found, this function returns -ENODEV, otherwise the UBI device | 289 | * device was not found, this function returns -ENODEV, otherwise the UBI device |
294 | * number is returned. | 290 | * number is returned. |
295 | */ | 291 | */ |
296 | int ubi_major2num(int major) | 292 | int ubi_major2num(int major) |
297 | { | 293 | { |
298 | int i, ubi_num = -ENODEV; | 294 | int i, ubi_num = -ENODEV; |
299 | 295 | ||
300 | spin_lock(&ubi_devices_lock); | 296 | spin_lock(&ubi_devices_lock); |
301 | for (i = 0; i < UBI_MAX_DEVICES; i++) { | 297 | for (i = 0; i < UBI_MAX_DEVICES; i++) { |
302 | struct ubi_device *ubi = ubi_devices[i]; | 298 | struct ubi_device *ubi = ubi_devices[i]; |
303 | 299 | ||
304 | if (ubi && MAJOR(ubi->cdev.dev) == major) { | 300 | if (ubi && MAJOR(ubi->cdev.dev) == major) { |
305 | ubi_num = ubi->ubi_num; | 301 | ubi_num = ubi->ubi_num; |
306 | break; | 302 | break; |
307 | } | 303 | } |
308 | } | 304 | } |
309 | spin_unlock(&ubi_devices_lock); | 305 | spin_unlock(&ubi_devices_lock); |
310 | 306 | ||
311 | return ubi_num; | 307 | return ubi_num; |
312 | } | 308 | } |
313 | 309 | ||
314 | /* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */ | 310 | /* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */ |
315 | static ssize_t dev_attribute_show(struct device *dev, | 311 | static ssize_t dev_attribute_show(struct device *dev, |
316 | struct device_attribute *attr, char *buf) | 312 | struct device_attribute *attr, char *buf) |
317 | { | 313 | { |
318 | ssize_t ret; | 314 | ssize_t ret; |
319 | struct ubi_device *ubi; | 315 | struct ubi_device *ubi; |
320 | 316 | ||
321 | /* | 317 | /* |
322 | * The below code looks weird, but it actually makes sense. We get the | 318 | * The below code looks weird, but it actually makes sense. We get the |
323 | * UBI device reference from the contained 'struct ubi_device'. But it | 319 | * UBI device reference from the contained 'struct ubi_device'. But it |
324 | * is unclear if the device was removed or not yet. Indeed, if the | 320 | * is unclear if the device was removed or not yet. Indeed, if the |
325 | * device was removed before we increased its reference count, | 321 | * device was removed before we increased its reference count, |
326 | * 'ubi_get_device()' will return -ENODEV and we fail. | 322 | * 'ubi_get_device()' will return -ENODEV and we fail. |
327 | * | 323 | * |
328 | * Remember, 'struct ubi_device' is freed in the release function, so | 324 | * Remember, 'struct ubi_device' is freed in the release function, so |
329 | * we still can use 'ubi->ubi_num'. | 325 | * we still can use 'ubi->ubi_num'. |
330 | */ | 326 | */ |
331 | ubi = container_of(dev, struct ubi_device, dev); | 327 | ubi = container_of(dev, struct ubi_device, dev); |
332 | ubi = ubi_get_device(ubi->ubi_num); | 328 | ubi = ubi_get_device(ubi->ubi_num); |
333 | if (!ubi) | 329 | if (!ubi) |
334 | return -ENODEV; | 330 | return -ENODEV; |
335 | 331 | ||
336 | if (attr == &dev_eraseblock_size) | 332 | if (attr == &dev_eraseblock_size) |
337 | ret = sprintf(buf, "%d\n", ubi->leb_size); | 333 | ret = sprintf(buf, "%d\n", ubi->leb_size); |
338 | else if (attr == &dev_avail_eraseblocks) | 334 | else if (attr == &dev_avail_eraseblocks) |
339 | ret = sprintf(buf, "%d\n", ubi->avail_pebs); | 335 | ret = sprintf(buf, "%d\n", ubi->avail_pebs); |
340 | else if (attr == &dev_total_eraseblocks) | 336 | else if (attr == &dev_total_eraseblocks) |
341 | ret = sprintf(buf, "%d\n", ubi->good_peb_count); | 337 | ret = sprintf(buf, "%d\n", ubi->good_peb_count); |
342 | else if (attr == &dev_volumes_count) | 338 | else if (attr == &dev_volumes_count) |
343 | ret = sprintf(buf, "%d\n", ubi->vol_count - UBI_INT_VOL_COUNT); | 339 | ret = sprintf(buf, "%d\n", ubi->vol_count - UBI_INT_VOL_COUNT); |
344 | else if (attr == &dev_max_ec) | 340 | else if (attr == &dev_max_ec) |
345 | ret = sprintf(buf, "%d\n", ubi->max_ec); | 341 | ret = sprintf(buf, "%d\n", ubi->max_ec); |
346 | else if (attr == &dev_reserved_for_bad) | 342 | else if (attr == &dev_reserved_for_bad) |
347 | ret = sprintf(buf, "%d\n", ubi->beb_rsvd_pebs); | 343 | ret = sprintf(buf, "%d\n", ubi->beb_rsvd_pebs); |
348 | else if (attr == &dev_bad_peb_count) | 344 | else if (attr == &dev_bad_peb_count) |
349 | ret = sprintf(buf, "%d\n", ubi->bad_peb_count); | 345 | ret = sprintf(buf, "%d\n", ubi->bad_peb_count); |
350 | else if (attr == &dev_max_vol_count) | 346 | else if (attr == &dev_max_vol_count) |
351 | ret = sprintf(buf, "%d\n", ubi->vtbl_slots); | 347 | ret = sprintf(buf, "%d\n", ubi->vtbl_slots); |
352 | else if (attr == &dev_min_io_size) | 348 | else if (attr == &dev_min_io_size) |
353 | ret = sprintf(buf, "%d\n", ubi->min_io_size); | 349 | ret = sprintf(buf, "%d\n", ubi->min_io_size); |
354 | else if (attr == &dev_bgt_enabled) | 350 | else if (attr == &dev_bgt_enabled) |
355 | ret = sprintf(buf, "%d\n", ubi->thread_enabled); | 351 | ret = sprintf(buf, "%d\n", ubi->thread_enabled); |
356 | else if (attr == &dev_mtd_num) | 352 | else if (attr == &dev_mtd_num) |
357 | ret = sprintf(buf, "%d\n", ubi->mtd->index); | 353 | ret = sprintf(buf, "%d\n", ubi->mtd->index); |
358 | else | 354 | else |
359 | ret = -EINVAL; | 355 | ret = -EINVAL; |
360 | 356 | ||
361 | ubi_put_device(ubi); | 357 | ubi_put_device(ubi); |
362 | return ret; | 358 | return ret; |
363 | } | 359 | } |
364 | 360 | ||
365 | static void dev_release(struct device *dev) | 361 | static void dev_release(struct device *dev) |
366 | { | 362 | { |
367 | struct ubi_device *ubi = container_of(dev, struct ubi_device, dev); | 363 | struct ubi_device *ubi = container_of(dev, struct ubi_device, dev); |
368 | 364 | ||
369 | kfree(ubi); | 365 | kfree(ubi); |
370 | } | 366 | } |
371 | 367 | ||
372 | /** | 368 | /** |
373 | * ubi_sysfs_init - initialize sysfs for an UBI device. | 369 | * ubi_sysfs_init - initialize sysfs for an UBI device. |
374 | * @ubi: UBI device description object | 370 | * @ubi: UBI device description object |
375 | * @ref: set to %1 on exit in case of failure if a reference to @ubi->dev was | 371 | * @ref: set to %1 on exit in case of failure if a reference to @ubi->dev was |
376 | * taken | 372 | * taken |
377 | * | 373 | * |
378 | * This function returns zero in case of success and a negative error code in | 374 | * This function returns zero in case of success and a negative error code in |
379 | * case of failure. | 375 | * case of failure. |
380 | */ | 376 | */ |
381 | static int ubi_sysfs_init(struct ubi_device *ubi, int *ref) | 377 | static int ubi_sysfs_init(struct ubi_device *ubi, int *ref) |
382 | { | 378 | { |
383 | int err; | 379 | int err; |
384 | 380 | ||
385 | ubi->dev.release = dev_release; | 381 | ubi->dev.release = dev_release; |
386 | ubi->dev.devt = ubi->cdev.dev; | 382 | ubi->dev.devt = ubi->cdev.dev; |
387 | ubi->dev.class = ubi_class; | 383 | ubi->dev.class = ubi_class; |
388 | dev_set_name(&ubi->dev, UBI_NAME_STR"%d", ubi->ubi_num); | 384 | dev_set_name(&ubi->dev, UBI_NAME_STR"%d", ubi->ubi_num); |
389 | err = device_register(&ubi->dev); | 385 | err = device_register(&ubi->dev); |
390 | if (err) | 386 | if (err) |
391 | return err; | 387 | return err; |
392 | 388 | ||
393 | *ref = 1; | 389 | *ref = 1; |
394 | err = device_create_file(&ubi->dev, &dev_eraseblock_size); | 390 | err = device_create_file(&ubi->dev, &dev_eraseblock_size); |
395 | if (err) | 391 | if (err) |
396 | return err; | 392 | return err; |
397 | err = device_create_file(&ubi->dev, &dev_avail_eraseblocks); | 393 | err = device_create_file(&ubi->dev, &dev_avail_eraseblocks); |
398 | if (err) | 394 | if (err) |
399 | return err; | 395 | return err; |
400 | err = device_create_file(&ubi->dev, &dev_total_eraseblocks); | 396 | err = device_create_file(&ubi->dev, &dev_total_eraseblocks); |
401 | if (err) | 397 | if (err) |
402 | return err; | 398 | return err; |
403 | err = device_create_file(&ubi->dev, &dev_volumes_count); | 399 | err = device_create_file(&ubi->dev, &dev_volumes_count); |
404 | if (err) | 400 | if (err) |
405 | return err; | 401 | return err; |
406 | err = device_create_file(&ubi->dev, &dev_max_ec); | 402 | err = device_create_file(&ubi->dev, &dev_max_ec); |
407 | if (err) | 403 | if (err) |
408 | return err; | 404 | return err; |
409 | err = device_create_file(&ubi->dev, &dev_reserved_for_bad); | 405 | err = device_create_file(&ubi->dev, &dev_reserved_for_bad); |
410 | if (err) | 406 | if (err) |
411 | return err; | 407 | return err; |
412 | err = device_create_file(&ubi->dev, &dev_bad_peb_count); | 408 | err = device_create_file(&ubi->dev, &dev_bad_peb_count); |
413 | if (err) | 409 | if (err) |
414 | return err; | 410 | return err; |
415 | err = device_create_file(&ubi->dev, &dev_max_vol_count); | 411 | err = device_create_file(&ubi->dev, &dev_max_vol_count); |
416 | if (err) | 412 | if (err) |
417 | return err; | 413 | return err; |
418 | err = device_create_file(&ubi->dev, &dev_min_io_size); | 414 | err = device_create_file(&ubi->dev, &dev_min_io_size); |
419 | if (err) | 415 | if (err) |
420 | return err; | 416 | return err; |
421 | err = device_create_file(&ubi->dev, &dev_bgt_enabled); | 417 | err = device_create_file(&ubi->dev, &dev_bgt_enabled); |
422 | if (err) | 418 | if (err) |
423 | return err; | 419 | return err; |
424 | err = device_create_file(&ubi->dev, &dev_mtd_num); | 420 | err = device_create_file(&ubi->dev, &dev_mtd_num); |
425 | return err; | 421 | return err; |
426 | } | 422 | } |
427 | 423 | ||
428 | /** | 424 | /** |
429 | * ubi_sysfs_close - close sysfs for an UBI device. | 425 | * ubi_sysfs_close - close sysfs for an UBI device. |
430 | * @ubi: UBI device description object | 426 | * @ubi: UBI device description object |
431 | */ | 427 | */ |
432 | static void ubi_sysfs_close(struct ubi_device *ubi) | 428 | static void ubi_sysfs_close(struct ubi_device *ubi) |
433 | { | 429 | { |
434 | device_remove_file(&ubi->dev, &dev_mtd_num); | 430 | device_remove_file(&ubi->dev, &dev_mtd_num); |
435 | device_remove_file(&ubi->dev, &dev_bgt_enabled); | 431 | device_remove_file(&ubi->dev, &dev_bgt_enabled); |
436 | device_remove_file(&ubi->dev, &dev_min_io_size); | 432 | device_remove_file(&ubi->dev, &dev_min_io_size); |
437 | device_remove_file(&ubi->dev, &dev_max_vol_count); | 433 | device_remove_file(&ubi->dev, &dev_max_vol_count); |
438 | device_remove_file(&ubi->dev, &dev_bad_peb_count); | 434 | device_remove_file(&ubi->dev, &dev_bad_peb_count); |
439 | device_remove_file(&ubi->dev, &dev_reserved_for_bad); | 435 | device_remove_file(&ubi->dev, &dev_reserved_for_bad); |
440 | device_remove_file(&ubi->dev, &dev_max_ec); | 436 | device_remove_file(&ubi->dev, &dev_max_ec); |
441 | device_remove_file(&ubi->dev, &dev_volumes_count); | 437 | device_remove_file(&ubi->dev, &dev_volumes_count); |
442 | device_remove_file(&ubi->dev, &dev_total_eraseblocks); | 438 | device_remove_file(&ubi->dev, &dev_total_eraseblocks); |
443 | device_remove_file(&ubi->dev, &dev_avail_eraseblocks); | 439 | device_remove_file(&ubi->dev, &dev_avail_eraseblocks); |
444 | device_remove_file(&ubi->dev, &dev_eraseblock_size); | 440 | device_remove_file(&ubi->dev, &dev_eraseblock_size); |
445 | device_unregister(&ubi->dev); | 441 | device_unregister(&ubi->dev); |
446 | } | 442 | } |
447 | 443 | ||
448 | /** | 444 | /** |
449 | * kill_volumes - destroy all user volumes. | 445 | * kill_volumes - destroy all user volumes. |
450 | * @ubi: UBI device description object | 446 | * @ubi: UBI device description object |
451 | */ | 447 | */ |
452 | static void kill_volumes(struct ubi_device *ubi) | 448 | static void kill_volumes(struct ubi_device *ubi) |
453 | { | 449 | { |
454 | int i; | 450 | int i; |
455 | 451 | ||
456 | for (i = 0; i < ubi->vtbl_slots; i++) | 452 | for (i = 0; i < ubi->vtbl_slots; i++) |
457 | if (ubi->volumes[i]) | 453 | if (ubi->volumes[i]) |
458 | ubi_free_volume(ubi, ubi->volumes[i]); | 454 | ubi_free_volume(ubi, ubi->volumes[i]); |
459 | } | 455 | } |
460 | 456 | ||
461 | /** | 457 | /** |
462 | * uif_init - initialize user interfaces for an UBI device. | 458 | * uif_init - initialize user interfaces for an UBI device. |
463 | * @ubi: UBI device description object | 459 | * @ubi: UBI device description object |
464 | * @ref: set to %1 on exit in case of failure if a reference to @ubi->dev was | 460 | * @ref: set to %1 on exit in case of failure if a reference to @ubi->dev was |
465 | * taken, otherwise set to %0 | 461 | * taken, otherwise set to %0 |
466 | * | 462 | * |
467 | * This function initializes various user interfaces for an UBI device. If the | 463 | * This function initializes various user interfaces for an UBI device. If the |
468 | * initialization fails at an early stage, this function frees all the | 464 | * initialization fails at an early stage, this function frees all the |
469 | * resources it allocated, returns an error, and @ref is set to %0. However, | 465 | * resources it allocated, returns an error, and @ref is set to %0. However, |
470 | * if the initialization fails after the UBI device was registered in the | 466 | * if the initialization fails after the UBI device was registered in the |
471 | * driver core subsystem, this function takes a reference to @ubi->dev, because | 467 | * driver core subsystem, this function takes a reference to @ubi->dev, because |
472 | * otherwise the release function ('dev_release()') would free whole @ubi | 468 | * otherwise the release function ('dev_release()') would free whole @ubi |
473 | * object. The @ref argument is set to %1 in this case. The caller has to put | 469 | * object. The @ref argument is set to %1 in this case. The caller has to put |
474 | * this reference. | 470 | * this reference. |
475 | * | 471 | * |
476 | * This function returns zero in case of success and a negative error code in | 472 | * This function returns zero in case of success and a negative error code in |
477 | * case of failure. | 473 | * case of failure. |
478 | */ | 474 | */ |
479 | static int uif_init(struct ubi_device *ubi, int *ref) | 475 | static int uif_init(struct ubi_device *ubi, int *ref) |
480 | { | 476 | { |
481 | int i, err; | 477 | int i, err; |
482 | dev_t dev; | 478 | dev_t dev; |
483 | 479 | ||
484 | *ref = 0; | 480 | *ref = 0; |
485 | sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num); | 481 | sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num); |
486 | 482 | ||
487 | /* | 483 | /* |
488 | * Major numbers for the UBI character devices are allocated | 484 | * Major numbers for the UBI character devices are allocated |
489 | * dynamically. Major numbers of volume character devices are | 485 | * dynamically. Major numbers of volume character devices are |
490 | * equivalent to ones of the corresponding UBI character device. Minor | 486 | * equivalent to ones of the corresponding UBI character device. Minor |
491 | * numbers of UBI character devices are 0, while minor numbers of | 487 | * numbers of UBI character devices are 0, while minor numbers of |
492 | * volume character devices start from 1. Thus, we allocate one major | 488 | * volume character devices start from 1. Thus, we allocate one major |
493 | * number and ubi->vtbl_slots + 1 minor numbers. | 489 | * number and ubi->vtbl_slots + 1 minor numbers. |
494 | */ | 490 | */ |
495 | err = alloc_chrdev_region(&dev, 0, ubi->vtbl_slots + 1, ubi->ubi_name); | 491 | err = alloc_chrdev_region(&dev, 0, ubi->vtbl_slots + 1, ubi->ubi_name); |
496 | if (err) { | 492 | if (err) { |
497 | ubi_err("cannot register UBI character devices"); | 493 | ubi_err("cannot register UBI character devices"); |
498 | return err; | 494 | return err; |
499 | } | 495 | } |
500 | 496 | ||
501 | ubi_assert(MINOR(dev) == 0); | 497 | ubi_assert(MINOR(dev) == 0); |
502 | cdev_init(&ubi->cdev, &ubi_cdev_operations); | 498 | cdev_init(&ubi->cdev, &ubi_cdev_operations); |
503 | dbg_gen("%s major is %u", ubi->ubi_name, MAJOR(dev)); | 499 | dbg_gen("%s major is %u", ubi->ubi_name, MAJOR(dev)); |
504 | ubi->cdev.owner = THIS_MODULE; | 500 | ubi->cdev.owner = THIS_MODULE; |
505 | 501 | ||
506 | err = cdev_add(&ubi->cdev, dev, 1); | 502 | err = cdev_add(&ubi->cdev, dev, 1); |
507 | if (err) { | 503 | if (err) { |
508 | ubi_err("cannot add character device"); | 504 | ubi_err("cannot add character device"); |
509 | goto out_unreg; | 505 | goto out_unreg; |
510 | } | 506 | } |
511 | 507 | ||
512 | err = ubi_sysfs_init(ubi, ref); | 508 | err = ubi_sysfs_init(ubi, ref); |
513 | if (err) | 509 | if (err) |
514 | goto out_sysfs; | 510 | goto out_sysfs; |
515 | 511 | ||
516 | for (i = 0; i < ubi->vtbl_slots; i++) | 512 | for (i = 0; i < ubi->vtbl_slots; i++) |
517 | if (ubi->volumes[i]) { | 513 | if (ubi->volumes[i]) { |
518 | err = ubi_add_volume(ubi, ubi->volumes[i]); | 514 | err = ubi_add_volume(ubi, ubi->volumes[i]); |
519 | if (err) { | 515 | if (err) { |
520 | ubi_err("cannot add volume %d", i); | 516 | ubi_err("cannot add volume %d", i); |
521 | goto out_volumes; | 517 | goto out_volumes; |
522 | } | 518 | } |
523 | } | 519 | } |
524 | 520 | ||
525 | return 0; | 521 | return 0; |
526 | 522 | ||
527 | out_volumes: | 523 | out_volumes: |
528 | kill_volumes(ubi); | 524 | kill_volumes(ubi); |
529 | out_sysfs: | 525 | out_sysfs: |
530 | if (*ref) | 526 | if (*ref) |
531 | get_device(&ubi->dev); | 527 | get_device(&ubi->dev); |
532 | ubi_sysfs_close(ubi); | 528 | ubi_sysfs_close(ubi); |
533 | cdev_del(&ubi->cdev); | 529 | cdev_del(&ubi->cdev); |
534 | out_unreg: | 530 | out_unreg: |
535 | unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1); | 531 | unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1); |
536 | ubi_err("cannot initialize UBI %s, error %d", ubi->ubi_name, err); | 532 | ubi_err("cannot initialize UBI %s, error %d", ubi->ubi_name, err); |
537 | return err; | 533 | return err; |
538 | } | 534 | } |
539 | 535 | ||
540 | /** | 536 | /** |
541 | * uif_close - close user interfaces for an UBI device. | 537 | * uif_close - close user interfaces for an UBI device. |
542 | * @ubi: UBI device description object | 538 | * @ubi: UBI device description object |
543 | * | 539 | * |
544 | * Note, since this function un-registers UBI volume device objects (@vol->dev), | 540 | * Note, since this function un-registers UBI volume device objects (@vol->dev), |
545 | * the memory allocated voe the volumes is freed as well (in the release | 541 | * the memory allocated voe the volumes is freed as well (in the release |
546 | * function). | 542 | * function). |
547 | */ | 543 | */ |
548 | static void uif_close(struct ubi_device *ubi) | 544 | static void uif_close(struct ubi_device *ubi) |
549 | { | 545 | { |
550 | kill_volumes(ubi); | 546 | kill_volumes(ubi); |
551 | ubi_sysfs_close(ubi); | 547 | ubi_sysfs_close(ubi); |
552 | cdev_del(&ubi->cdev); | 548 | cdev_del(&ubi->cdev); |
553 | unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1); | 549 | unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1); |
554 | } | 550 | } |
555 | 551 | ||
556 | /** | 552 | /** |
557 | * free_internal_volumes - free internal volumes. | 553 | * free_internal_volumes - free internal volumes. |
558 | * @ubi: UBI device description object | 554 | * @ubi: UBI device description object |
559 | */ | 555 | */ |
560 | static void free_internal_volumes(struct ubi_device *ubi) | 556 | static void free_internal_volumes(struct ubi_device *ubi) |
561 | { | 557 | { |
562 | int i; | 558 | int i; |
563 | 559 | ||
564 | for (i = ubi->vtbl_slots; | 560 | for (i = ubi->vtbl_slots; |
565 | i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) { | 561 | i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) { |
566 | kfree(ubi->volumes[i]->eba_tbl); | 562 | kfree(ubi->volumes[i]->eba_tbl); |
567 | kfree(ubi->volumes[i]); | 563 | kfree(ubi->volumes[i]); |
568 | } | 564 | } |
569 | } | 565 | } |
570 | 566 | ||
571 | /** | 567 | /** |
572 | * attach_by_scanning - attach an MTD device using scanning method. | 568 | * attach_by_scanning - attach an MTD device using scanning method. |
573 | * @ubi: UBI device descriptor | 569 | * @ubi: UBI device descriptor |
574 | * | 570 | * |
575 | * This function returns zero in case of success and a negative error code in | 571 | * This function returns zero in case of success and a negative error code in |
576 | * case of failure. | 572 | * case of failure. |
577 | * | 573 | * |
578 | * Note, currently this is the only method to attach UBI devices. Hopefully in | 574 | * Note, currently this is the only method to attach UBI devices. Hopefully in |
579 | * the future we'll have more scalable attaching methods and avoid full media | 575 | * the future we'll have more scalable attaching methods and avoid full media |
580 | * scanning. But even in this case scanning will be needed as a fall-back | 576 | * scanning. But even in this case scanning will be needed as a fall-back |
581 | * attaching method if there are some on-flash table corruptions. | 577 | * attaching method if there are some on-flash table corruptions. |
582 | */ | 578 | */ |
583 | static int attach_by_scanning(struct ubi_device *ubi) | 579 | static int attach_by_scanning(struct ubi_device *ubi) |
584 | { | 580 | { |
585 | int err; | 581 | int err; |
586 | struct ubi_attach_info *ai; | 582 | struct ubi_attach_info *ai; |
587 | 583 | ||
588 | ai = ubi_scan(ubi); | 584 | ai = ubi_scan(ubi); |
589 | if (IS_ERR(ai)) | 585 | if (IS_ERR(ai)) |
590 | return PTR_ERR(ai); | 586 | return PTR_ERR(ai); |
591 | 587 | ||
592 | ubi->bad_peb_count = ai->bad_peb_count; | 588 | ubi->bad_peb_count = ai->bad_peb_count; |
593 | ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count; | 589 | ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count; |
594 | ubi->corr_peb_count = ai->corr_peb_count; | 590 | ubi->corr_peb_count = ai->corr_peb_count; |
595 | ubi->max_ec = ai->max_ec; | 591 | ubi->max_ec = ai->max_ec; |
596 | ubi->mean_ec = ai->mean_ec; | 592 | ubi->mean_ec = ai->mean_ec; |
597 | ubi_msg("max. sequence number: %llu", ai->max_sqnum); | 593 | ubi_msg("max. sequence number: %llu", ai->max_sqnum); |
598 | 594 | ||
599 | err = ubi_read_volume_table(ubi, ai); | 595 | err = ubi_read_volume_table(ubi, ai); |
600 | if (err) | 596 | if (err) |
601 | goto out_ai; | 597 | goto out_ai; |
602 | 598 | ||
603 | err = ubi_wl_init_scan(ubi, ai); | 599 | err = ubi_wl_init_scan(ubi, ai); |
604 | if (err) | 600 | if (err) |
605 | goto out_vtbl; | 601 | goto out_vtbl; |
606 | 602 | ||
607 | err = ubi_eba_init_scan(ubi, ai); | 603 | err = ubi_eba_init_scan(ubi, ai); |
608 | if (err) | 604 | if (err) |
609 | goto out_wl; | 605 | goto out_wl; |
610 | 606 | ||
611 | ubi_destroy_ai(ai); | 607 | ubi_destroy_ai(ai); |
612 | return 0; | 608 | return 0; |
613 | 609 | ||
614 | out_wl: | 610 | out_wl: |
615 | ubi_wl_close(ubi); | 611 | ubi_wl_close(ubi); |
616 | out_vtbl: | 612 | out_vtbl: |
617 | free_internal_volumes(ubi); | 613 | free_internal_volumes(ubi); |
618 | vfree(ubi->vtbl); | 614 | vfree(ubi->vtbl); |
619 | out_ai: | 615 | out_ai: |
620 | ubi_destroy_ai(ai); | 616 | ubi_destroy_ai(ai); |
621 | return err; | 617 | return err; |
622 | } | 618 | } |
623 | 619 | ||
624 | /** | 620 | /** |
625 | * io_init - initialize I/O sub-system for a given UBI device. | 621 | * io_init - initialize I/O sub-system for a given UBI device. |
626 | * @ubi: UBI device description object | 622 | * @ubi: UBI device description object |
627 | * | 623 | * |
628 | * If @ubi->vid_hdr_offset or @ubi->leb_start is zero, default offsets are | 624 | * If @ubi->vid_hdr_offset or @ubi->leb_start is zero, default offsets are |
629 | * assumed: | 625 | * assumed: |
630 | * o EC header is always at offset zero - this cannot be changed; | 626 | * o EC header is always at offset zero - this cannot be changed; |
631 | * o VID header starts just after the EC header at the closest address | 627 | * o VID header starts just after the EC header at the closest address |
632 | * aligned to @io->hdrs_min_io_size; | 628 | * aligned to @io->hdrs_min_io_size; |
633 | * o data starts just after the VID header at the closest address aligned to | 629 | * o data starts just after the VID header at the closest address aligned to |
634 | * @io->min_io_size | 630 | * @io->min_io_size |
635 | * | 631 | * |
636 | * This function returns zero in case of success and a negative error code in | 632 | * This function returns zero in case of success and a negative error code in |
637 | * case of failure. | 633 | * case of failure. |
638 | */ | 634 | */ |
639 | static int io_init(struct ubi_device *ubi) | 635 | static int io_init(struct ubi_device *ubi) |
640 | { | 636 | { |
641 | if (ubi->mtd->numeraseregions != 0) { | 637 | if (ubi->mtd->numeraseregions != 0) { |
642 | /* | 638 | /* |
643 | * Some flashes have several erase regions. Different regions | 639 | * Some flashes have several erase regions. Different regions |
644 | * may have different eraseblock size and other | 640 | * may have different eraseblock size and other |
645 | * characteristics. It looks like mostly multi-region flashes | 641 | * characteristics. It looks like mostly multi-region flashes |
646 | * have one "main" region and one or more small regions to | 642 | * have one "main" region and one or more small regions to |
647 | * store boot loader code or boot parameters or whatever. I | 643 | * store boot loader code or boot parameters or whatever. I |
648 | * guess we should just pick the largest region. But this is | 644 | * guess we should just pick the largest region. But this is |
649 | * not implemented. | 645 | * not implemented. |
650 | */ | 646 | */ |
651 | ubi_err("multiple regions, not implemented"); | 647 | ubi_err("multiple regions, not implemented"); |
652 | return -EINVAL; | 648 | return -EINVAL; |
653 | } | 649 | } |
654 | 650 | ||
655 | if (ubi->vid_hdr_offset < 0) | 651 | if (ubi->vid_hdr_offset < 0) |
656 | return -EINVAL; | 652 | return -EINVAL; |
657 | 653 | ||
658 | /* | 654 | /* |
659 | * Note, in this implementation we support MTD devices with 0x7FFFFFFF | 655 | * Note, in this implementation we support MTD devices with 0x7FFFFFFF |
660 | * physical eraseblocks maximum. | 656 | * physical eraseblocks maximum. |
661 | */ | 657 | */ |
662 | 658 | ||
663 | ubi->peb_size = ubi->mtd->erasesize; | 659 | ubi->peb_size = ubi->mtd->erasesize; |
664 | ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd); | 660 | ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd); |
665 | ubi->flash_size = ubi->mtd->size; | 661 | ubi->flash_size = ubi->mtd->size; |
666 | 662 | ||
667 | if (mtd_can_have_bb(ubi->mtd)) | 663 | if (mtd_can_have_bb(ubi->mtd)) |
668 | ubi->bad_allowed = 1; | 664 | ubi->bad_allowed = 1; |
669 | 665 | ||
670 | if (ubi->mtd->type == MTD_NORFLASH) { | 666 | if (ubi->mtd->type == MTD_NORFLASH) { |
671 | ubi_assert(ubi->mtd->writesize == 1); | 667 | ubi_assert(ubi->mtd->writesize == 1); |
672 | ubi->nor_flash = 1; | 668 | ubi->nor_flash = 1; |
673 | } | 669 | } |
674 | 670 | ||
675 | ubi->min_io_size = ubi->mtd->writesize; | 671 | ubi->min_io_size = ubi->mtd->writesize; |
676 | ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft; | 672 | ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft; |
677 | 673 | ||
678 | /* | 674 | /* |
679 | * Make sure minimal I/O unit is power of 2. Note, there is no | 675 | * Make sure minimal I/O unit is power of 2. Note, there is no |
680 | * fundamental reason for this assumption. It is just an optimization | 676 | * fundamental reason for this assumption. It is just an optimization |
681 | * which allows us to avoid costly division operations. | 677 | * which allows us to avoid costly division operations. |
682 | */ | 678 | */ |
683 | if (!is_power_of_2(ubi->min_io_size)) { | 679 | if (!is_power_of_2(ubi->min_io_size)) { |
684 | ubi_err("min. I/O unit (%d) is not power of 2", | 680 | ubi_err("min. I/O unit (%d) is not power of 2", |
685 | ubi->min_io_size); | 681 | ubi->min_io_size); |
686 | return -EINVAL; | 682 | return -EINVAL; |
687 | } | 683 | } |
688 | 684 | ||
689 | ubi_assert(ubi->hdrs_min_io_size > 0); | 685 | ubi_assert(ubi->hdrs_min_io_size > 0); |
690 | ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size); | 686 | ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size); |
691 | ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0); | 687 | ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0); |
692 | 688 | ||
693 | ubi->max_write_size = ubi->mtd->writebufsize; | 689 | ubi->max_write_size = ubi->mtd->writebufsize; |
694 | /* | 690 | /* |
695 | * Maximum write size has to be greater or equivalent to min. I/O | 691 | * Maximum write size has to be greater or equivalent to min. I/O |
696 | * size, and be multiple of min. I/O size. | 692 | * size, and be multiple of min. I/O size. |
697 | */ | 693 | */ |
698 | if (ubi->max_write_size < ubi->min_io_size || | 694 | if (ubi->max_write_size < ubi->min_io_size || |
699 | ubi->max_write_size % ubi->min_io_size || | 695 | ubi->max_write_size % ubi->min_io_size || |
700 | !is_power_of_2(ubi->max_write_size)) { | 696 | !is_power_of_2(ubi->max_write_size)) { |
701 | ubi_err("bad write buffer size %d for %d min. I/O unit", | 697 | ubi_err("bad write buffer size %d for %d min. I/O unit", |
702 | ubi->max_write_size, ubi->min_io_size); | 698 | ubi->max_write_size, ubi->min_io_size); |
703 | return -EINVAL; | 699 | return -EINVAL; |
704 | } | 700 | } |
705 | 701 | ||
706 | /* Calculate default aligned sizes of EC and VID headers */ | 702 | /* Calculate default aligned sizes of EC and VID headers */ |
707 | ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size); | 703 | ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size); |
708 | ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size); | 704 | ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size); |
709 | 705 | ||
710 | dbg_msg("min_io_size %d", ubi->min_io_size); | 706 | dbg_msg("min_io_size %d", ubi->min_io_size); |
711 | dbg_msg("max_write_size %d", ubi->max_write_size); | 707 | dbg_msg("max_write_size %d", ubi->max_write_size); |
712 | dbg_msg("hdrs_min_io_size %d", ubi->hdrs_min_io_size); | 708 | dbg_msg("hdrs_min_io_size %d", ubi->hdrs_min_io_size); |
713 | dbg_msg("ec_hdr_alsize %d", ubi->ec_hdr_alsize); | 709 | dbg_msg("ec_hdr_alsize %d", ubi->ec_hdr_alsize); |
714 | dbg_msg("vid_hdr_alsize %d", ubi->vid_hdr_alsize); | 710 | dbg_msg("vid_hdr_alsize %d", ubi->vid_hdr_alsize); |
715 | 711 | ||
716 | if (ubi->vid_hdr_offset == 0) | 712 | if (ubi->vid_hdr_offset == 0) |
717 | /* Default offset */ | 713 | /* Default offset */ |
718 | ubi->vid_hdr_offset = ubi->vid_hdr_aloffset = | 714 | ubi->vid_hdr_offset = ubi->vid_hdr_aloffset = |
719 | ubi->ec_hdr_alsize; | 715 | ubi->ec_hdr_alsize; |
720 | else { | 716 | else { |
721 | ubi->vid_hdr_aloffset = ubi->vid_hdr_offset & | 717 | ubi->vid_hdr_aloffset = ubi->vid_hdr_offset & |
722 | ~(ubi->hdrs_min_io_size - 1); | 718 | ~(ubi->hdrs_min_io_size - 1); |
723 | ubi->vid_hdr_shift = ubi->vid_hdr_offset - | 719 | ubi->vid_hdr_shift = ubi->vid_hdr_offset - |
724 | ubi->vid_hdr_aloffset; | 720 | ubi->vid_hdr_aloffset; |
725 | } | 721 | } |
726 | 722 | ||
727 | /* Similar for the data offset */ | 723 | /* Similar for the data offset */ |
728 | ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE; | 724 | ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE; |
729 | ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size); | 725 | ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size); |
730 | 726 | ||
731 | dbg_msg("vid_hdr_offset %d", ubi->vid_hdr_offset); | 727 | dbg_msg("vid_hdr_offset %d", ubi->vid_hdr_offset); |
732 | dbg_msg("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset); | 728 | dbg_msg("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset); |
733 | dbg_msg("vid_hdr_shift %d", ubi->vid_hdr_shift); | 729 | dbg_msg("vid_hdr_shift %d", ubi->vid_hdr_shift); |
734 | dbg_msg("leb_start %d", ubi->leb_start); | 730 | dbg_msg("leb_start %d", ubi->leb_start); |
735 | 731 | ||
736 | /* The shift must be aligned to 32-bit boundary */ | 732 | /* The shift must be aligned to 32-bit boundary */ |
737 | if (ubi->vid_hdr_shift % 4) { | 733 | if (ubi->vid_hdr_shift % 4) { |
738 | ubi_err("unaligned VID header shift %d", | 734 | ubi_err("unaligned VID header shift %d", |
739 | ubi->vid_hdr_shift); | 735 | ubi->vid_hdr_shift); |
740 | return -EINVAL; | 736 | return -EINVAL; |
741 | } | 737 | } |
742 | 738 | ||
743 | /* Check sanity */ | 739 | /* Check sanity */ |
744 | if (ubi->vid_hdr_offset < UBI_EC_HDR_SIZE || | 740 | if (ubi->vid_hdr_offset < UBI_EC_HDR_SIZE || |
745 | ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE || | 741 | ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE || |
746 | ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE || | 742 | ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE || |
747 | ubi->leb_start & (ubi->min_io_size - 1)) { | 743 | ubi->leb_start & (ubi->min_io_size - 1)) { |
748 | ubi_err("bad VID header (%d) or data offsets (%d)", | 744 | ubi_err("bad VID header (%d) or data offsets (%d)", |
749 | ubi->vid_hdr_offset, ubi->leb_start); | 745 | ubi->vid_hdr_offset, ubi->leb_start); |
750 | return -EINVAL; | 746 | return -EINVAL; |
751 | } | 747 | } |
752 | 748 | ||
753 | /* | 749 | /* |
754 | * Set maximum amount of physical erroneous eraseblocks to be 10%. | 750 | * Set maximum amount of physical erroneous eraseblocks to be 10%. |
755 | * Erroneous PEB are those which have read errors. | 751 | * Erroneous PEB are those which have read errors. |
756 | */ | 752 | */ |
757 | ubi->max_erroneous = ubi->peb_count / 10; | 753 | ubi->max_erroneous = ubi->peb_count / 10; |
758 | if (ubi->max_erroneous < 16) | 754 | if (ubi->max_erroneous < 16) |
759 | ubi->max_erroneous = 16; | 755 | ubi->max_erroneous = 16; |
760 | dbg_msg("max_erroneous %d", ubi->max_erroneous); | 756 | dbg_msg("max_erroneous %d", ubi->max_erroneous); |
761 | 757 | ||
762 | /* | 758 | /* |
763 | * It may happen that EC and VID headers are situated in one minimal | 759 | * It may happen that EC and VID headers are situated in one minimal |
764 | * I/O unit. In this case we can only accept this UBI image in | 760 | * I/O unit. In this case we can only accept this UBI image in |
765 | * read-only mode. | 761 | * read-only mode. |
766 | */ | 762 | */ |
767 | if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) { | 763 | if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) { |
768 | ubi_warn("EC and VID headers are in the same minimal I/O unit, " | 764 | ubi_warn("EC and VID headers are in the same minimal I/O unit, " |
769 | "switch to read-only mode"); | 765 | "switch to read-only mode"); |
770 | ubi->ro_mode = 1; | 766 | ubi->ro_mode = 1; |
771 | } | 767 | } |
772 | 768 | ||
773 | ubi->leb_size = ubi->peb_size - ubi->leb_start; | 769 | ubi->leb_size = ubi->peb_size - ubi->leb_start; |
774 | 770 | ||
775 | if (!(ubi->mtd->flags & MTD_WRITEABLE)) { | 771 | if (!(ubi->mtd->flags & MTD_WRITEABLE)) { |
776 | ubi_msg("MTD device %d is write-protected, attach in " | 772 | ubi_msg("MTD device %d is write-protected, attach in " |
777 | "read-only mode", ubi->mtd->index); | 773 | "read-only mode", ubi->mtd->index); |
778 | ubi->ro_mode = 1; | 774 | ubi->ro_mode = 1; |
779 | } | 775 | } |
780 | 776 | ||
781 | ubi_msg("physical eraseblock size: %d bytes (%d KiB)", | 777 | ubi_msg("physical eraseblock size: %d bytes (%d KiB)", |
782 | ubi->peb_size, ubi->peb_size >> 10); | 778 | ubi->peb_size, ubi->peb_size >> 10); |
783 | ubi_msg("logical eraseblock size: %d bytes", ubi->leb_size); | 779 | ubi_msg("logical eraseblock size: %d bytes", ubi->leb_size); |
784 | ubi_msg("smallest flash I/O unit: %d", ubi->min_io_size); | 780 | ubi_msg("smallest flash I/O unit: %d", ubi->min_io_size); |
785 | if (ubi->hdrs_min_io_size != ubi->min_io_size) | 781 | if (ubi->hdrs_min_io_size != ubi->min_io_size) |
786 | ubi_msg("sub-page size: %d", | 782 | ubi_msg("sub-page size: %d", |
787 | ubi->hdrs_min_io_size); | 783 | ubi->hdrs_min_io_size); |
788 | ubi_msg("VID header offset: %d (aligned %d)", | 784 | ubi_msg("VID header offset: %d (aligned %d)", |
789 | ubi->vid_hdr_offset, ubi->vid_hdr_aloffset); | 785 | ubi->vid_hdr_offset, ubi->vid_hdr_aloffset); |
790 | ubi_msg("data offset: %d", ubi->leb_start); | 786 | ubi_msg("data offset: %d", ubi->leb_start); |
791 | 787 | ||
792 | /* | 788 | /* |
793 | * Note, ideally, we have to initialize ubi->bad_peb_count here. But | 789 | * Note, ideally, we have to initialize @ubi->bad_peb_count here. But |
794 | * unfortunately, MTD does not provide this information. We should loop | 790 | * unfortunately, MTD does not provide this information. We should loop |
795 | * over all physical eraseblocks and invoke mtd->block_is_bad() for | 791 | * over all physical eraseblocks and invoke mtd->block_is_bad() for |
796 | * each physical eraseblock. So, we skip ubi->bad_peb_count | 792 | * each physical eraseblock. So, we leave @ubi->bad_peb_count |
797 | * uninitialized and initialize it after scanning. | 793 | * uninitialized so far. |
798 | */ | 794 | */ |
799 | 795 | ||
800 | return 0; | 796 | return 0; |
801 | } | 797 | } |
802 | 798 | ||
803 | /** | 799 | /** |
804 | * autoresize - re-size the volume which has the "auto-resize" flag set. | 800 | * autoresize - re-size the volume which has the "auto-resize" flag set. |
805 | * @ubi: UBI device description object | 801 | * @ubi: UBI device description object |
806 | * @vol_id: ID of the volume to re-size | 802 | * @vol_id: ID of the volume to re-size |
807 | * | 803 | * |
808 | * This function re-sizes the volume marked by the @UBI_VTBL_AUTORESIZE_FLG in | 804 | * This function re-sizes the volume marked by the %UBI_VTBL_AUTORESIZE_FLG in |
809 | * the volume table to the largest possible size. See comments in ubi-header.h | 805 | * the volume table to the largest possible size. See comments in ubi-header.h |
810 | * for more description of the flag. Returns zero in case of success and a | 806 | * for more description of the flag. Returns zero in case of success and a |
811 | * negative error code in case of failure. | 807 | * negative error code in case of failure. |
812 | */ | 808 | */ |
813 | static int autoresize(struct ubi_device *ubi, int vol_id) | 809 | static int autoresize(struct ubi_device *ubi, int vol_id) |
814 | { | 810 | { |
815 | struct ubi_volume_desc desc; | 811 | struct ubi_volume_desc desc; |
816 | struct ubi_volume *vol = ubi->volumes[vol_id]; | 812 | struct ubi_volume *vol = ubi->volumes[vol_id]; |
817 | int err, old_reserved_pebs = vol->reserved_pebs; | 813 | int err, old_reserved_pebs = vol->reserved_pebs; |
818 | 814 | ||
819 | /* | 815 | /* |
820 | * Clear the auto-resize flag in the volume in-memory copy of the | 816 | * Clear the auto-resize flag in the volume in-memory copy of the |
821 | * volume table, and 'ubi_resize_volume()' will propagate this change | 817 | * volume table, and 'ubi_resize_volume()' will propagate this change |
822 | * to the flash. | 818 | * to the flash. |
823 | */ | 819 | */ |
824 | ubi->vtbl[vol_id].flags &= ~UBI_VTBL_AUTORESIZE_FLG; | 820 | ubi->vtbl[vol_id].flags &= ~UBI_VTBL_AUTORESIZE_FLG; |
825 | 821 | ||
826 | if (ubi->avail_pebs == 0) { | 822 | if (ubi->avail_pebs == 0) { |
827 | struct ubi_vtbl_record vtbl_rec; | 823 | struct ubi_vtbl_record vtbl_rec; |
828 | 824 | ||
829 | /* | 825 | /* |
830 | * No available PEBs to re-size the volume, clear the flag on | 826 | * No available PEBs to re-size the volume, clear the flag on |
831 | * flash and exit. | 827 | * flash and exit. |
832 | */ | 828 | */ |
833 | memcpy(&vtbl_rec, &ubi->vtbl[vol_id], | 829 | memcpy(&vtbl_rec, &ubi->vtbl[vol_id], |
834 | sizeof(struct ubi_vtbl_record)); | 830 | sizeof(struct ubi_vtbl_record)); |
835 | err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); | 831 | err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); |
836 | if (err) | 832 | if (err) |
837 | ubi_err("cannot clean auto-resize flag for volume %d", | 833 | ubi_err("cannot clean auto-resize flag for volume %d", |
838 | vol_id); | 834 | vol_id); |
839 | } else { | 835 | } else { |
840 | desc.vol = vol; | 836 | desc.vol = vol; |
841 | err = ubi_resize_volume(&desc, | 837 | err = ubi_resize_volume(&desc, |
842 | old_reserved_pebs + ubi->avail_pebs); | 838 | old_reserved_pebs + ubi->avail_pebs); |
843 | if (err) | 839 | if (err) |
844 | ubi_err("cannot auto-resize volume %d", vol_id); | 840 | ubi_err("cannot auto-resize volume %d", vol_id); |
845 | } | 841 | } |
846 | 842 | ||
847 | if (err) | 843 | if (err) |
848 | return err; | 844 | return err; |
849 | 845 | ||
850 | ubi_msg("volume %d (\"%s\") re-sized from %d to %d LEBs", vol_id, | 846 | ubi_msg("volume %d (\"%s\") re-sized from %d to %d LEBs", vol_id, |
851 | vol->name, old_reserved_pebs, vol->reserved_pebs); | 847 | vol->name, old_reserved_pebs, vol->reserved_pebs); |
852 | return 0; | 848 | return 0; |
853 | } | 849 | } |
854 | 850 | ||
855 | /** | 851 | /** |
856 | * ubi_attach_mtd_dev - attach an MTD device. | 852 | * ubi_attach_mtd_dev - attach an MTD device. |
857 | * @mtd: MTD device description object | 853 | * @mtd: MTD device description object |
858 | * @ubi_num: number to assign to the new UBI device | 854 | * @ubi_num: number to assign to the new UBI device |
859 | * @vid_hdr_offset: VID header offset | 855 | * @vid_hdr_offset: VID header offset |
860 | * | 856 | * |
861 | * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number | 857 | * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number |
862 | * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in | 858 | * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in |
863 | * which case this function finds a vacant device number and assigns it | 859 | * which case this function finds a vacant device number and assigns it |
864 | * automatically. Returns the new UBI device number in case of success and a | 860 | * automatically. Returns the new UBI device number in case of success and a |
865 | * negative error code in case of failure. | 861 | * negative error code in case of failure. |
866 | * | 862 | * |
867 | * Note, the invocations of this function has to be serialized by the | 863 | * Note, the invocations of this function has to be serialized by the |
868 | * @ubi_devices_mutex. | 864 | * @ubi_devices_mutex. |
869 | */ | 865 | */ |
870 | int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset) | 866 | int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset) |
871 | { | 867 | { |
872 | struct ubi_device *ubi; | 868 | struct ubi_device *ubi; |
873 | int i, err, ref = 0; | 869 | int i, err, ref = 0; |
874 | 870 | ||
875 | /* | 871 | /* |
876 | * Check if we already have the same MTD device attached. | 872 | * Check if we already have the same MTD device attached. |
877 | * | 873 | * |
878 | * Note, this function assumes that UBI devices creations and deletions | 874 | * Note, this function assumes that UBI devices creations and deletions |
879 | * are serialized, so it does not take the &ubi_devices_lock. | 875 | * are serialized, so it does not take the &ubi_devices_lock. |
880 | */ | 876 | */ |
881 | for (i = 0; i < UBI_MAX_DEVICES; i++) { | 877 | for (i = 0; i < UBI_MAX_DEVICES; i++) { |
882 | ubi = ubi_devices[i]; | 878 | ubi = ubi_devices[i]; |
883 | if (ubi && mtd->index == ubi->mtd->index) { | 879 | if (ubi && mtd->index == ubi->mtd->index) { |
884 | ubi_err("mtd%d is already attached to ubi%d", | 880 | ubi_err("mtd%d is already attached to ubi%d", |
885 | mtd->index, i); | 881 | mtd->index, i); |
886 | return -EEXIST; | 882 | return -EEXIST; |
887 | } | 883 | } |
888 | } | 884 | } |
889 | 885 | ||
890 | /* | 886 | /* |
891 | * Make sure this MTD device is not emulated on top of an UBI volume | 887 | * Make sure this MTD device is not emulated on top of an UBI volume |
892 | * already. Well, generally this recursion works fine, but there are | 888 | * already. Well, generally this recursion works fine, but there are |
893 | * different problems like the UBI module takes a reference to itself | 889 | * different problems like the UBI module takes a reference to itself |
894 | * by attaching (and thus, opening) the emulated MTD device. This | 890 | * by attaching (and thus, opening) the emulated MTD device. This |
895 | * results in inability to unload the module. And in general it makes | 891 | * results in inability to unload the module. And in general it makes |
896 | * no sense to attach emulated MTD devices, so we prohibit this. | 892 | * no sense to attach emulated MTD devices, so we prohibit this. |
897 | */ | 893 | */ |
898 | if (mtd->type == MTD_UBIVOLUME) { | 894 | if (mtd->type == MTD_UBIVOLUME) { |
899 | ubi_err("refuse attaching mtd%d - it is already emulated on " | 895 | ubi_err("refuse attaching mtd%d - it is already emulated on " |
900 | "top of UBI", mtd->index); | 896 | "top of UBI", mtd->index); |
901 | return -EINVAL; | 897 | return -EINVAL; |
902 | } | 898 | } |
903 | 899 | ||
904 | if (ubi_num == UBI_DEV_NUM_AUTO) { | 900 | if (ubi_num == UBI_DEV_NUM_AUTO) { |
905 | /* Search for an empty slot in the @ubi_devices array */ | 901 | /* Search for an empty slot in the @ubi_devices array */ |
906 | for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++) | 902 | for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++) |
907 | if (!ubi_devices[ubi_num]) | 903 | if (!ubi_devices[ubi_num]) |
908 | break; | 904 | break; |
909 | if (ubi_num == UBI_MAX_DEVICES) { | 905 | if (ubi_num == UBI_MAX_DEVICES) { |
910 | ubi_err("only %d UBI devices may be created", | 906 | ubi_err("only %d UBI devices may be created", |
911 | UBI_MAX_DEVICES); | 907 | UBI_MAX_DEVICES); |
912 | return -ENFILE; | 908 | return -ENFILE; |
913 | } | 909 | } |
914 | } else { | 910 | } else { |
915 | if (ubi_num >= UBI_MAX_DEVICES) | 911 | if (ubi_num >= UBI_MAX_DEVICES) |
916 | return -EINVAL; | 912 | return -EINVAL; |
917 | 913 | ||
918 | /* Make sure ubi_num is not busy */ | 914 | /* Make sure ubi_num is not busy */ |
919 | if (ubi_devices[ubi_num]) { | 915 | if (ubi_devices[ubi_num]) { |
920 | ubi_err("ubi%d already exists", ubi_num); | 916 | ubi_err("ubi%d already exists", ubi_num); |
921 | return -EEXIST; | 917 | return -EEXIST; |
922 | } | 918 | } |
923 | } | 919 | } |
924 | 920 | ||
925 | ubi = kzalloc(sizeof(struct ubi_device), GFP_KERNEL); | 921 | ubi = kzalloc(sizeof(struct ubi_device), GFP_KERNEL); |
926 | if (!ubi) | 922 | if (!ubi) |
927 | return -ENOMEM; | 923 | return -ENOMEM; |
928 | 924 | ||
929 | ubi->mtd = mtd; | 925 | ubi->mtd = mtd; |
930 | ubi->ubi_num = ubi_num; | 926 | ubi->ubi_num = ubi_num; |
931 | ubi->vid_hdr_offset = vid_hdr_offset; | 927 | ubi->vid_hdr_offset = vid_hdr_offset; |
932 | ubi->autoresize_vol_id = -1; | 928 | ubi->autoresize_vol_id = -1; |
933 | 929 | ||
934 | mutex_init(&ubi->buf_mutex); | 930 | mutex_init(&ubi->buf_mutex); |
935 | mutex_init(&ubi->ckvol_mutex); | 931 | mutex_init(&ubi->ckvol_mutex); |
936 | mutex_init(&ubi->device_mutex); | 932 | mutex_init(&ubi->device_mutex); |
937 | spin_lock_init(&ubi->volumes_lock); | 933 | spin_lock_init(&ubi->volumes_lock); |
938 | 934 | ||
939 | ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num); | 935 | ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num); |
940 | dbg_msg("sizeof(struct ubi_ainf_peb) %zu", sizeof(struct ubi_ainf_peb)); | 936 | dbg_msg("sizeof(struct ubi_ainf_peb) %zu", sizeof(struct ubi_ainf_peb)); |
941 | dbg_msg("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry)); | 937 | dbg_msg("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry)); |
942 | 938 | ||
943 | err = io_init(ubi); | 939 | err = io_init(ubi); |
944 | if (err) | 940 | if (err) |
945 | goto out_free; | 941 | goto out_free; |
946 | 942 | ||
947 | err = -ENOMEM; | 943 | err = -ENOMEM; |
948 | ubi->peb_buf = vmalloc(ubi->peb_size); | 944 | ubi->peb_buf = vmalloc(ubi->peb_size); |
949 | if (!ubi->peb_buf) | 945 | if (!ubi->peb_buf) |
950 | goto out_free; | 946 | goto out_free; |
951 | 947 | ||
952 | err = ubi_debugging_init_dev(ubi); | 948 | err = ubi_debugging_init_dev(ubi); |
953 | if (err) | 949 | if (err) |
954 | goto out_free; | 950 | goto out_free; |
955 | 951 | ||
956 | err = attach_by_scanning(ubi); | 952 | err = attach_by_scanning(ubi); |
957 | if (err) { | 953 | if (err) { |
958 | ubi_err("failed to attach by scanning, error %d", err); | 954 | ubi_err("failed to attach by scanning, error %d", err); |
959 | goto out_debugging; | 955 | goto out_debugging; |
960 | } | 956 | } |
961 | 957 | ||
962 | if (ubi->autoresize_vol_id != -1) { | 958 | if (ubi->autoresize_vol_id != -1) { |
963 | err = autoresize(ubi, ubi->autoresize_vol_id); | 959 | err = autoresize(ubi, ubi->autoresize_vol_id); |
964 | if (err) | 960 | if (err) |
965 | goto out_detach; | 961 | goto out_detach; |
966 | } | 962 | } |
967 | 963 | ||
968 | err = uif_init(ubi, &ref); | 964 | err = uif_init(ubi, &ref); |
969 | if (err) | 965 | if (err) |
970 | goto out_detach; | 966 | goto out_detach; |
971 | 967 | ||
972 | err = ubi_debugfs_init_dev(ubi); | 968 | err = ubi_debugfs_init_dev(ubi); |
973 | if (err) | 969 | if (err) |
974 | goto out_uif; | 970 | goto out_uif; |
975 | 971 | ||
976 | ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name); | 972 | ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name); |
977 | if (IS_ERR(ubi->bgt_thread)) { | 973 | if (IS_ERR(ubi->bgt_thread)) { |
978 | err = PTR_ERR(ubi->bgt_thread); | 974 | err = PTR_ERR(ubi->bgt_thread); |
979 | ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name, | 975 | ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name, |
980 | err); | 976 | err); |
981 | goto out_debugfs; | 977 | goto out_debugfs; |
982 | } | 978 | } |
983 | 979 | ||
984 | ubi_msg("attached mtd%d to ubi%d", mtd->index, ubi_num); | 980 | ubi_msg("attached mtd%d to ubi%d", mtd->index, ubi_num); |
985 | ubi_msg("MTD device name: \"%s\"", mtd->name); | 981 | ubi_msg("MTD device name: \"%s\"", mtd->name); |
986 | ubi_msg("MTD device size: %llu MiB", ubi->flash_size >> 20); | 982 | ubi_msg("MTD device size: %llu MiB", ubi->flash_size >> 20); |
987 | ubi_msg("number of good PEBs: %d", ubi->good_peb_count); | 983 | ubi_msg("number of good PEBs: %d", ubi->good_peb_count); |
988 | ubi_msg("number of bad PEBs: %d", ubi->bad_peb_count); | 984 | ubi_msg("number of bad PEBs: %d", ubi->bad_peb_count); |
989 | ubi_msg("number of corrupted PEBs: %d", ubi->corr_peb_count); | 985 | ubi_msg("number of corrupted PEBs: %d", ubi->corr_peb_count); |
990 | ubi_msg("max. allowed volumes: %d", ubi->vtbl_slots); | 986 | ubi_msg("max. allowed volumes: %d", ubi->vtbl_slots); |
991 | ubi_msg("wear-leveling threshold: %d", CONFIG_MTD_UBI_WL_THRESHOLD); | 987 | ubi_msg("wear-leveling threshold: %d", CONFIG_MTD_UBI_WL_THRESHOLD); |
992 | ubi_msg("number of internal volumes: %d", UBI_INT_VOL_COUNT); | 988 | ubi_msg("number of internal volumes: %d", UBI_INT_VOL_COUNT); |
993 | ubi_msg("number of user volumes: %d", | 989 | ubi_msg("number of user volumes: %d", |
994 | ubi->vol_count - UBI_INT_VOL_COUNT); | 990 | ubi->vol_count - UBI_INT_VOL_COUNT); |
995 | ubi_msg("available PEBs: %d", ubi->avail_pebs); | 991 | ubi_msg("available PEBs: %d", ubi->avail_pebs); |
996 | ubi_msg("total number of reserved PEBs: %d", ubi->rsvd_pebs); | 992 | ubi_msg("total number of reserved PEBs: %d", ubi->rsvd_pebs); |
997 | ubi_msg("number of PEBs reserved for bad PEB handling: %d", | 993 | ubi_msg("number of PEBs reserved for bad PEB handling: %d", |
998 | ubi->beb_rsvd_pebs); | 994 | ubi->beb_rsvd_pebs); |
999 | ubi_msg("max/mean erase counter: %d/%d", ubi->max_ec, ubi->mean_ec); | 995 | ubi_msg("max/mean erase counter: %d/%d", ubi->max_ec, ubi->mean_ec); |
1000 | ubi_msg("image sequence number: %d", ubi->image_seq); | 996 | ubi_msg("image sequence number: %d", ubi->image_seq); |
1001 | 997 | ||
1002 | /* | 998 | /* |
1003 | * The below lock makes sure we do not race with 'ubi_thread()' which | 999 | * The below lock makes sure we do not race with 'ubi_thread()' which |
1004 | * checks @ubi->thread_enabled. Otherwise we may fail to wake it up. | 1000 | * checks @ubi->thread_enabled. Otherwise we may fail to wake it up. |
1005 | */ | 1001 | */ |
1006 | spin_lock(&ubi->wl_lock); | 1002 | spin_lock(&ubi->wl_lock); |
1007 | ubi->thread_enabled = 1; | 1003 | ubi->thread_enabled = 1; |
1008 | wake_up_process(ubi->bgt_thread); | 1004 | wake_up_process(ubi->bgt_thread); |
1009 | spin_unlock(&ubi->wl_lock); | 1005 | spin_unlock(&ubi->wl_lock); |
1010 | 1006 | ||
1011 | ubi_devices[ubi_num] = ubi; | 1007 | ubi_devices[ubi_num] = ubi; |
1012 | ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL); | 1008 | ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL); |
1013 | return ubi_num; | 1009 | return ubi_num; |
1014 | 1010 | ||
1015 | out_debugfs: | 1011 | out_debugfs: |
1016 | ubi_debugfs_exit_dev(ubi); | 1012 | ubi_debugfs_exit_dev(ubi); |
1017 | out_uif: | 1013 | out_uif: |
1018 | get_device(&ubi->dev); | 1014 | get_device(&ubi->dev); |
1019 | ubi_assert(ref); | 1015 | ubi_assert(ref); |
1020 | uif_close(ubi); | 1016 | uif_close(ubi); |
1021 | out_detach: | 1017 | out_detach: |
1022 | ubi_wl_close(ubi); | 1018 | ubi_wl_close(ubi); |
1023 | free_internal_volumes(ubi); | 1019 | free_internal_volumes(ubi); |
1024 | vfree(ubi->vtbl); | 1020 | vfree(ubi->vtbl); |
1025 | out_debugging: | 1021 | out_debugging: |
1026 | ubi_debugging_exit_dev(ubi); | 1022 | ubi_debugging_exit_dev(ubi); |
1027 | out_free: | 1023 | out_free: |
1028 | vfree(ubi->peb_buf); | 1024 | vfree(ubi->peb_buf); |
1029 | if (ref) | 1025 | if (ref) |
1030 | put_device(&ubi->dev); | 1026 | put_device(&ubi->dev); |
1031 | else | 1027 | else |
1032 | kfree(ubi); | 1028 | kfree(ubi); |
1033 | return err; | 1029 | return err; |
1034 | } | 1030 | } |
1035 | 1031 | ||
1036 | /** | 1032 | /** |
1037 | * ubi_detach_mtd_dev - detach an MTD device. | 1033 | * ubi_detach_mtd_dev - detach an MTD device. |
1038 | * @ubi_num: UBI device number to detach from | 1034 | * @ubi_num: UBI device number to detach from |
1039 | * @anyway: detach MTD even if device reference count is not zero | 1035 | * @anyway: detach MTD even if device reference count is not zero |
1040 | * | 1036 | * |
1041 | * This function destroys an UBI device number @ubi_num and detaches the | 1037 | * This function destroys an UBI device number @ubi_num and detaches the |
1042 | * underlying MTD device. Returns zero in case of success and %-EBUSY if the | 1038 | * underlying MTD device. Returns zero in case of success and %-EBUSY if the |
1043 | * UBI device is busy and cannot be destroyed, and %-EINVAL if it does not | 1039 | * UBI device is busy and cannot be destroyed, and %-EINVAL if it does not |
1044 | * exist. | 1040 | * exist. |
1045 | * | 1041 | * |
1046 | * Note, the invocations of this function has to be serialized by the | 1042 | * Note, the invocations of this function has to be serialized by the |
1047 | * @ubi_devices_mutex. | 1043 | * @ubi_devices_mutex. |
1048 | */ | 1044 | */ |
1049 | int ubi_detach_mtd_dev(int ubi_num, int anyway) | 1045 | int ubi_detach_mtd_dev(int ubi_num, int anyway) |
1050 | { | 1046 | { |
1051 | struct ubi_device *ubi; | 1047 | struct ubi_device *ubi; |
1052 | 1048 | ||
1053 | if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES) | 1049 | if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES) |
1054 | return -EINVAL; | 1050 | return -EINVAL; |
1055 | 1051 | ||
1056 | ubi = ubi_get_device(ubi_num); | 1052 | ubi = ubi_get_device(ubi_num); |
1057 | if (!ubi) | 1053 | if (!ubi) |
1058 | return -EINVAL; | 1054 | return -EINVAL; |
1059 | 1055 | ||
1060 | spin_lock(&ubi_devices_lock); | 1056 | spin_lock(&ubi_devices_lock); |
1061 | put_device(&ubi->dev); | 1057 | put_device(&ubi->dev); |
1062 | ubi->ref_count -= 1; | 1058 | ubi->ref_count -= 1; |
1063 | if (ubi->ref_count) { | 1059 | if (ubi->ref_count) { |
1064 | if (!anyway) { | 1060 | if (!anyway) { |
1065 | spin_unlock(&ubi_devices_lock); | 1061 | spin_unlock(&ubi_devices_lock); |
1066 | return -EBUSY; | 1062 | return -EBUSY; |
1067 | } | 1063 | } |
1068 | /* This may only happen if there is a bug */ | 1064 | /* This may only happen if there is a bug */ |
1069 | ubi_err("%s reference count %d, destroy anyway", | 1065 | ubi_err("%s reference count %d, destroy anyway", |
1070 | ubi->ubi_name, ubi->ref_count); | 1066 | ubi->ubi_name, ubi->ref_count); |
1071 | } | 1067 | } |
1072 | ubi_devices[ubi_num] = NULL; | 1068 | ubi_devices[ubi_num] = NULL; |
1073 | spin_unlock(&ubi_devices_lock); | 1069 | spin_unlock(&ubi_devices_lock); |
1074 | 1070 | ||
1075 | ubi_assert(ubi_num == ubi->ubi_num); | 1071 | ubi_assert(ubi_num == ubi->ubi_num); |
1076 | ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL); | 1072 | ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL); |
1077 | dbg_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num); | 1073 | dbg_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num); |
1078 | 1074 | ||
1079 | /* | 1075 | /* |
1080 | * Before freeing anything, we have to stop the background thread to | 1076 | * Before freeing anything, we have to stop the background thread to |
1081 | * prevent it from doing anything on this device while we are freeing. | 1077 | * prevent it from doing anything on this device while we are freeing. |
1082 | */ | 1078 | */ |
1083 | if (ubi->bgt_thread) | 1079 | if (ubi->bgt_thread) |
1084 | kthread_stop(ubi->bgt_thread); | 1080 | kthread_stop(ubi->bgt_thread); |
1085 | 1081 | ||
1086 | /* | 1082 | /* |
1087 | * Get a reference to the device in order to prevent 'dev_release()' | 1083 | * Get a reference to the device in order to prevent 'dev_release()' |
1088 | * from freeing the @ubi object. | 1084 | * from freeing the @ubi object. |
1089 | */ | 1085 | */ |
1090 | get_device(&ubi->dev); | 1086 | get_device(&ubi->dev); |
1091 | 1087 | ||
1092 | ubi_debugfs_exit_dev(ubi); | 1088 | ubi_debugfs_exit_dev(ubi); |
1093 | uif_close(ubi); | 1089 | uif_close(ubi); |
1094 | ubi_wl_close(ubi); | 1090 | ubi_wl_close(ubi); |
1095 | free_internal_volumes(ubi); | 1091 | free_internal_volumes(ubi); |
1096 | vfree(ubi->vtbl); | 1092 | vfree(ubi->vtbl); |
1097 | put_mtd_device(ubi->mtd); | 1093 | put_mtd_device(ubi->mtd); |
1098 | ubi_debugging_exit_dev(ubi); | 1094 | ubi_debugging_exit_dev(ubi); |
1099 | vfree(ubi->peb_buf); | 1095 | vfree(ubi->peb_buf); |
1100 | ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num); | 1096 | ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num); |
1101 | put_device(&ubi->dev); | 1097 | put_device(&ubi->dev); |
1102 | return 0; | 1098 | return 0; |
1103 | } | 1099 | } |
1104 | 1100 | ||
1105 | /** | 1101 | /** |
1106 | * open_mtd_by_chdev - open an MTD device by its character device node path. | 1102 | * open_mtd_by_chdev - open an MTD device by its character device node path. |
1107 | * @mtd_dev: MTD character device node path | 1103 | * @mtd_dev: MTD character device node path |
1108 | * | 1104 | * |
1109 | * This helper function opens an MTD device by its character node device path. | 1105 | * This helper function opens an MTD device by its character node device path. |
1110 | * Returns MTD device description object in case of success and a negative | 1106 | * Returns MTD device description object in case of success and a negative |
1111 | * error code in case of failure. | 1107 | * error code in case of failure. |
1112 | */ | 1108 | */ |
1113 | static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev) | 1109 | static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev) |
1114 | { | 1110 | { |
1115 | int err, major, minor, mode; | 1111 | int err, major, minor, mode; |
1116 | struct path path; | 1112 | struct path path; |
1117 | 1113 | ||
1118 | /* Probably this is an MTD character device node path */ | 1114 | /* Probably this is an MTD character device node path */ |
1119 | err = kern_path(mtd_dev, LOOKUP_FOLLOW, &path); | 1115 | err = kern_path(mtd_dev, LOOKUP_FOLLOW, &path); |
1120 | if (err) | 1116 | if (err) |
1121 | return ERR_PTR(err); | 1117 | return ERR_PTR(err); |
1122 | 1118 | ||
1123 | /* MTD device number is defined by the major / minor numbers */ | 1119 | /* MTD device number is defined by the major / minor numbers */ |
1124 | major = imajor(path.dentry->d_inode); | 1120 | major = imajor(path.dentry->d_inode); |
1125 | minor = iminor(path.dentry->d_inode); | 1121 | minor = iminor(path.dentry->d_inode); |
1126 | mode = path.dentry->d_inode->i_mode; | 1122 | mode = path.dentry->d_inode->i_mode; |
1127 | path_put(&path); | 1123 | path_put(&path); |
1128 | if (major != MTD_CHAR_MAJOR || !S_ISCHR(mode)) | 1124 | if (major != MTD_CHAR_MAJOR || !S_ISCHR(mode)) |
1129 | return ERR_PTR(-EINVAL); | 1125 | return ERR_PTR(-EINVAL); |
1130 | 1126 | ||
1131 | if (minor & 1) | 1127 | if (minor & 1) |
1132 | /* | 1128 | /* |
1133 | * Just do not think the "/dev/mtdrX" devices support is need, | 1129 | * Just do not think the "/dev/mtdrX" devices support is need, |
1134 | * so do not support them to avoid doing extra work. | 1130 | * so do not support them to avoid doing extra work. |
1135 | */ | 1131 | */ |
1136 | return ERR_PTR(-EINVAL); | 1132 | return ERR_PTR(-EINVAL); |
1137 | 1133 | ||
1138 | return get_mtd_device(NULL, minor / 2); | 1134 | return get_mtd_device(NULL, minor / 2); |
1139 | } | 1135 | } |
1140 | 1136 | ||
1141 | /** | 1137 | /** |
1142 | * open_mtd_device - open MTD device by name, character device path, or number. | 1138 | * open_mtd_device - open MTD device by name, character device path, or number. |
1143 | * @mtd_dev: name, character device node path, or MTD device device number | 1139 | * @mtd_dev: name, character device node path, or MTD device device number |
1144 | * | 1140 | * |
1145 | * This function tries to open and MTD device described by @mtd_dev string, | 1141 | * This function tries to open and MTD device described by @mtd_dev string, |
1146 | * which is first treated as ASCII MTD device number, and if it is not true, it | 1142 | * which is first treated as ASCII MTD device number, and if it is not true, it |
1147 | * is treated as MTD device name, and if that is also not true, it is treated | 1143 | * is treated as MTD device name, and if that is also not true, it is treated |
1148 | * as MTD character device node path. Returns MTD device description object in | 1144 | * as MTD character device node path. Returns MTD device description object in |
1149 | * case of success and a negative error code in case of failure. | 1145 | * case of success and a negative error code in case of failure. |
1150 | */ | 1146 | */ |
1151 | static struct mtd_info * __init open_mtd_device(const char *mtd_dev) | 1147 | static struct mtd_info * __init open_mtd_device(const char *mtd_dev) |
1152 | { | 1148 | { |
1153 | struct mtd_info *mtd; | 1149 | struct mtd_info *mtd; |
1154 | int mtd_num; | 1150 | int mtd_num; |
1155 | char *endp; | 1151 | char *endp; |
1156 | 1152 | ||
1157 | mtd_num = simple_strtoul(mtd_dev, &endp, 0); | 1153 | mtd_num = simple_strtoul(mtd_dev, &endp, 0); |
1158 | if (*endp != '\0' || mtd_dev == endp) { | 1154 | if (*endp != '\0' || mtd_dev == endp) { |
1159 | /* | 1155 | /* |
1160 | * This does not look like an ASCII integer, probably this is | 1156 | * This does not look like an ASCII integer, probably this is |
1161 | * MTD device name. | 1157 | * MTD device name. |
1162 | */ | 1158 | */ |
1163 | mtd = get_mtd_device_nm(mtd_dev); | 1159 | mtd = get_mtd_device_nm(mtd_dev); |
1164 | if (IS_ERR(mtd) && PTR_ERR(mtd) == -ENODEV) | 1160 | if (IS_ERR(mtd) && PTR_ERR(mtd) == -ENODEV) |
1165 | /* Probably this is an MTD character device node path */ | 1161 | /* Probably this is an MTD character device node path */ |
1166 | mtd = open_mtd_by_chdev(mtd_dev); | 1162 | mtd = open_mtd_by_chdev(mtd_dev); |
1167 | } else | 1163 | } else |
1168 | mtd = get_mtd_device(NULL, mtd_num); | 1164 | mtd = get_mtd_device(NULL, mtd_num); |
1169 | 1165 | ||
1170 | return mtd; | 1166 | return mtd; |
1171 | } | 1167 | } |
1172 | 1168 | ||
1173 | static int __init ubi_init(void) | 1169 | static int __init ubi_init(void) |
1174 | { | 1170 | { |
1175 | int err, i, k; | 1171 | int err, i, k; |
1176 | 1172 | ||
1177 | /* Ensure that EC and VID headers have correct size */ | 1173 | /* Ensure that EC and VID headers have correct size */ |
1178 | BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64); | 1174 | BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64); |
1179 | BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64); | 1175 | BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64); |
1180 | 1176 | ||
1181 | if (mtd_devs > UBI_MAX_DEVICES) { | 1177 | if (mtd_devs > UBI_MAX_DEVICES) { |
1182 | ubi_err("too many MTD devices, maximum is %d", UBI_MAX_DEVICES); | 1178 | ubi_err("too many MTD devices, maximum is %d", UBI_MAX_DEVICES); |
1183 | return -EINVAL; | 1179 | return -EINVAL; |
1184 | } | 1180 | } |
1185 | 1181 | ||
1186 | /* Create base sysfs directory and sysfs files */ | 1182 | /* Create base sysfs directory and sysfs files */ |
1187 | ubi_class = class_create(THIS_MODULE, UBI_NAME_STR); | 1183 | ubi_class = class_create(THIS_MODULE, UBI_NAME_STR); |
1188 | if (IS_ERR(ubi_class)) { | 1184 | if (IS_ERR(ubi_class)) { |
1189 | err = PTR_ERR(ubi_class); | 1185 | err = PTR_ERR(ubi_class); |
1190 | ubi_err("cannot create UBI class"); | 1186 | ubi_err("cannot create UBI class"); |
1191 | goto out; | 1187 | goto out; |
1192 | } | 1188 | } |
1193 | 1189 | ||
1194 | err = class_create_file(ubi_class, &ubi_version); | 1190 | err = class_create_file(ubi_class, &ubi_version); |
1195 | if (err) { | 1191 | if (err) { |
1196 | ubi_err("cannot create sysfs file"); | 1192 | ubi_err("cannot create sysfs file"); |
1197 | goto out_class; | 1193 | goto out_class; |
1198 | } | 1194 | } |
1199 | 1195 | ||
1200 | err = misc_register(&ubi_ctrl_cdev); | 1196 | err = misc_register(&ubi_ctrl_cdev); |
1201 | if (err) { | 1197 | if (err) { |
1202 | ubi_err("cannot register device"); | 1198 | ubi_err("cannot register device"); |
1203 | goto out_version; | 1199 | goto out_version; |
1204 | } | 1200 | } |
1205 | 1201 | ||
1206 | ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab", | 1202 | ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab", |
1207 | sizeof(struct ubi_wl_entry), | 1203 | sizeof(struct ubi_wl_entry), |
1208 | 0, 0, NULL); | 1204 | 0, 0, NULL); |
1209 | if (!ubi_wl_entry_slab) | 1205 | if (!ubi_wl_entry_slab) |
1210 | goto out_dev_unreg; | 1206 | goto out_dev_unreg; |
1211 | 1207 | ||
1212 | err = ubi_debugfs_init(); | 1208 | err = ubi_debugfs_init(); |
1213 | if (err) | 1209 | if (err) |
1214 | goto out_slab; | 1210 | goto out_slab; |
1215 | 1211 | ||
1216 | 1212 | ||
1217 | /* Attach MTD devices */ | 1213 | /* Attach MTD devices */ |
1218 | for (i = 0; i < mtd_devs; i++) { | 1214 | for (i = 0; i < mtd_devs; i++) { |
1219 | struct mtd_dev_param *p = &mtd_dev_param[i]; | 1215 | struct mtd_dev_param *p = &mtd_dev_param[i]; |
1220 | struct mtd_info *mtd; | 1216 | struct mtd_info *mtd; |
1221 | 1217 | ||
1222 | cond_resched(); | 1218 | cond_resched(); |
1223 | 1219 | ||
1224 | mtd = open_mtd_device(p->name); | 1220 | mtd = open_mtd_device(p->name); |
1225 | if (IS_ERR(mtd)) { | 1221 | if (IS_ERR(mtd)) { |
1226 | err = PTR_ERR(mtd); | 1222 | err = PTR_ERR(mtd); |
1227 | goto out_detach; | 1223 | goto out_detach; |
1228 | } | 1224 | } |
1229 | 1225 | ||
1230 | mutex_lock(&ubi_devices_mutex); | 1226 | mutex_lock(&ubi_devices_mutex); |
1231 | err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO, | 1227 | err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO, |
1232 | p->vid_hdr_offs); | 1228 | p->vid_hdr_offs); |
1233 | mutex_unlock(&ubi_devices_mutex); | 1229 | mutex_unlock(&ubi_devices_mutex); |
1234 | if (err < 0) { | 1230 | if (err < 0) { |
1235 | ubi_err("cannot attach mtd%d", mtd->index); | 1231 | ubi_err("cannot attach mtd%d", mtd->index); |
1236 | put_mtd_device(mtd); | 1232 | put_mtd_device(mtd); |
1237 | 1233 | ||
1238 | /* | 1234 | /* |
1239 | * Originally UBI stopped initializing on any error. | 1235 | * Originally UBI stopped initializing on any error. |
1240 | * However, later on it was found out that this | 1236 | * However, later on it was found out that this |
1241 | * behavior is not very good when UBI is compiled into | 1237 | * behavior is not very good when UBI is compiled into |
1242 | * the kernel and the MTD devices to attach are passed | 1238 | * the kernel and the MTD devices to attach are passed |
1243 | * through the command line. Indeed, UBI failure | 1239 | * through the command line. Indeed, UBI failure |
1244 | * stopped whole boot sequence. | 1240 | * stopped whole boot sequence. |
1245 | * | 1241 | * |
1246 | * To fix this, we changed the behavior for the | 1242 | * To fix this, we changed the behavior for the |
1247 | * non-module case, but preserved the old behavior for | 1243 | * non-module case, but preserved the old behavior for |
1248 | * the module case, just for compatibility. This is a | 1244 | * the module case, just for compatibility. This is a |
1249 | * little inconsistent, though. | 1245 | * little inconsistent, though. |
1250 | */ | 1246 | */ |
1251 | if (ubi_is_module()) | 1247 | if (ubi_is_module()) |
1252 | goto out_detach; | 1248 | goto out_detach; |
1253 | } | 1249 | } |
1254 | } | 1250 | } |
1255 | 1251 | ||
1256 | return 0; | 1252 | return 0; |
1257 | 1253 | ||
1258 | out_detach: | 1254 | out_detach: |
1259 | for (k = 0; k < i; k++) | 1255 | for (k = 0; k < i; k++) |
1260 | if (ubi_devices[k]) { | 1256 | if (ubi_devices[k]) { |
1261 | mutex_lock(&ubi_devices_mutex); | 1257 | mutex_lock(&ubi_devices_mutex); |
1262 | ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1); | 1258 | ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1); |
1263 | mutex_unlock(&ubi_devices_mutex); | 1259 | mutex_unlock(&ubi_devices_mutex); |
1264 | } | 1260 | } |
1265 | ubi_debugfs_exit(); | 1261 | ubi_debugfs_exit(); |
1266 | out_slab: | 1262 | out_slab: |
1267 | kmem_cache_destroy(ubi_wl_entry_slab); | 1263 | kmem_cache_destroy(ubi_wl_entry_slab); |
1268 | out_dev_unreg: | 1264 | out_dev_unreg: |
1269 | misc_deregister(&ubi_ctrl_cdev); | 1265 | misc_deregister(&ubi_ctrl_cdev); |
1270 | out_version: | 1266 | out_version: |
1271 | class_remove_file(ubi_class, &ubi_version); | 1267 | class_remove_file(ubi_class, &ubi_version); |
1272 | out_class: | 1268 | out_class: |
1273 | class_destroy(ubi_class); | 1269 | class_destroy(ubi_class); |
1274 | out: | 1270 | out: |
1275 | ubi_err("UBI error: cannot initialize UBI, error %d", err); | 1271 | ubi_err("UBI error: cannot initialize UBI, error %d", err); |
1276 | return err; | 1272 | return err; |
1277 | } | 1273 | } |
1278 | module_init(ubi_init); | 1274 | module_init(ubi_init); |
1279 | 1275 | ||
1280 | static void __exit ubi_exit(void) | 1276 | static void __exit ubi_exit(void) |
1281 | { | 1277 | { |
1282 | int i; | 1278 | int i; |
1283 | 1279 | ||
1284 | for (i = 0; i < UBI_MAX_DEVICES; i++) | 1280 | for (i = 0; i < UBI_MAX_DEVICES; i++) |
1285 | if (ubi_devices[i]) { | 1281 | if (ubi_devices[i]) { |
1286 | mutex_lock(&ubi_devices_mutex); | 1282 | mutex_lock(&ubi_devices_mutex); |
1287 | ubi_detach_mtd_dev(ubi_devices[i]->ubi_num, 1); | 1283 | ubi_detach_mtd_dev(ubi_devices[i]->ubi_num, 1); |
1288 | mutex_unlock(&ubi_devices_mutex); | 1284 | mutex_unlock(&ubi_devices_mutex); |
1289 | } | 1285 | } |
1290 | ubi_debugfs_exit(); | 1286 | ubi_debugfs_exit(); |
1291 | kmem_cache_destroy(ubi_wl_entry_slab); | 1287 | kmem_cache_destroy(ubi_wl_entry_slab); |
1292 | misc_deregister(&ubi_ctrl_cdev); | 1288 | misc_deregister(&ubi_ctrl_cdev); |
1293 | class_remove_file(ubi_class, &ubi_version); | 1289 | class_remove_file(ubi_class, &ubi_version); |
1294 | class_destroy(ubi_class); | 1290 | class_destroy(ubi_class); |
1295 | } | 1291 | } |
1296 | module_exit(ubi_exit); | 1292 | module_exit(ubi_exit); |
1297 | 1293 | ||
1298 | /** | 1294 | /** |
1299 | * bytes_str_to_int - convert a number of bytes string into an integer. | 1295 | * bytes_str_to_int - convert a number of bytes string into an integer. |
1300 | * @str: the string to convert | 1296 | * @str: the string to convert |
1301 | * | 1297 | * |
1302 | * This function returns positive resulting integer in case of success and a | 1298 | * This function returns positive resulting integer in case of success and a |
1303 | * negative error code in case of failure. | 1299 | * negative error code in case of failure. |
1304 | */ | 1300 | */ |
1305 | static int __init bytes_str_to_int(const char *str) | 1301 | static int __init bytes_str_to_int(const char *str) |
1306 | { | 1302 | { |
1307 | char *endp; | 1303 | char *endp; |
1308 | unsigned long result; | 1304 | unsigned long result; |
1309 | 1305 | ||
1310 | result = simple_strtoul(str, &endp, 0); | 1306 | result = simple_strtoul(str, &endp, 0); |
1311 | if (str == endp || result >= INT_MAX) { | 1307 | if (str == endp || result >= INT_MAX) { |
1312 | printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n", | 1308 | printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n", |
1313 | str); | 1309 | str); |
1314 | return -EINVAL; | 1310 | return -EINVAL; |
1315 | } | 1311 | } |
1316 | 1312 | ||
1317 | switch (*endp) { | 1313 | switch (*endp) { |
1318 | case 'G': | 1314 | case 'G': |
1319 | result *= 1024; | 1315 | result *= 1024; |
1320 | case 'M': | 1316 | case 'M': |
1321 | result *= 1024; | 1317 | result *= 1024; |
1322 | case 'K': | 1318 | case 'K': |
1323 | result *= 1024; | 1319 | result *= 1024; |
1324 | if (endp[1] == 'i' && endp[2] == 'B') | 1320 | if (endp[1] == 'i' && endp[2] == 'B') |
1325 | endp += 2; | 1321 | endp += 2; |
1326 | case '\0': | 1322 | case '\0': |
1327 | break; | 1323 | break; |
1328 | default: | 1324 | default: |
1329 | printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n", | 1325 | printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n", |
1330 | str); | 1326 | str); |
1331 | return -EINVAL; | 1327 | return -EINVAL; |
1332 | } | 1328 | } |
1333 | 1329 | ||
1334 | return result; | 1330 | return result; |
1335 | } | 1331 | } |
1336 | 1332 | ||
1337 | /** | 1333 | /** |
1338 | * ubi_mtd_param_parse - parse the 'mtd=' UBI parameter. | 1334 | * ubi_mtd_param_parse - parse the 'mtd=' UBI parameter. |
1339 | * @val: the parameter value to parse | 1335 | * @val: the parameter value to parse |
1340 | * @kp: not used | 1336 | * @kp: not used |
1341 | * | 1337 | * |
1342 | * This function returns zero in case of success and a negative error code in | 1338 | * This function returns zero in case of success and a negative error code in |
1343 | * case of error. | 1339 | * case of error. |
1344 | */ | 1340 | */ |
1345 | static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp) | 1341 | static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp) |
1346 | { | 1342 | { |
1347 | int i, len; | 1343 | int i, len; |
1348 | struct mtd_dev_param *p; | 1344 | struct mtd_dev_param *p; |
1349 | char buf[MTD_PARAM_LEN_MAX]; | 1345 | char buf[MTD_PARAM_LEN_MAX]; |
1350 | char *pbuf = &buf[0]; | 1346 | char *pbuf = &buf[0]; |
1351 | char *tokens[2] = {NULL, NULL}; | 1347 | char *tokens[2] = {NULL, NULL}; |
1352 | 1348 | ||
1353 | if (!val) | 1349 | if (!val) |
1354 | return -EINVAL; | 1350 | return -EINVAL; |
1355 | 1351 | ||
1356 | if (mtd_devs == UBI_MAX_DEVICES) { | 1352 | if (mtd_devs == UBI_MAX_DEVICES) { |
1357 | printk(KERN_ERR "UBI error: too many parameters, max. is %d\n", | 1353 | printk(KERN_ERR "UBI error: too many parameters, max. is %d\n", |
1358 | UBI_MAX_DEVICES); | 1354 | UBI_MAX_DEVICES); |
1359 | return -EINVAL; | 1355 | return -EINVAL; |
1360 | } | 1356 | } |
1361 | 1357 | ||
1362 | len = strnlen(val, MTD_PARAM_LEN_MAX); | 1358 | len = strnlen(val, MTD_PARAM_LEN_MAX); |
1363 | if (len == MTD_PARAM_LEN_MAX) { | 1359 | if (len == MTD_PARAM_LEN_MAX) { |
1364 | printk(KERN_ERR "UBI error: parameter \"%s\" is too long, " | 1360 | printk(KERN_ERR "UBI error: parameter \"%s\" is too long, " |
1365 | "max. is %d\n", val, MTD_PARAM_LEN_MAX); | 1361 | "max. is %d\n", val, MTD_PARAM_LEN_MAX); |
1366 | return -EINVAL; | 1362 | return -EINVAL; |
1367 | } | 1363 | } |
1368 | 1364 | ||
1369 | if (len == 0) { | 1365 | if (len == 0) { |
1370 | printk(KERN_WARNING "UBI warning: empty 'mtd=' parameter - " | 1366 | printk(KERN_WARNING "UBI warning: empty 'mtd=' parameter - " |
1371 | "ignored\n"); | 1367 | "ignored\n"); |
1372 | return 0; | 1368 | return 0; |
1373 | } | 1369 | } |
1374 | 1370 | ||
1375 | strcpy(buf, val); | 1371 | strcpy(buf, val); |
1376 | 1372 | ||
1377 | /* Get rid of the final newline */ | 1373 | /* Get rid of the final newline */ |
1378 | if (buf[len - 1] == '\n') | 1374 | if (buf[len - 1] == '\n') |
1379 | buf[len - 1] = '\0'; | 1375 | buf[len - 1] = '\0'; |
1380 | 1376 | ||
1381 | for (i = 0; i < 2; i++) | 1377 | for (i = 0; i < 2; i++) |
1382 | tokens[i] = strsep(&pbuf, ","); | 1378 | tokens[i] = strsep(&pbuf, ","); |
1383 | 1379 | ||
1384 | if (pbuf) { | 1380 | if (pbuf) { |
1385 | printk(KERN_ERR "UBI error: too many arguments at \"%s\"\n", | 1381 | printk(KERN_ERR "UBI error: too many arguments at \"%s\"\n", |
1386 | val); | 1382 | val); |
1387 | return -EINVAL; | 1383 | return -EINVAL; |
1388 | } | 1384 | } |
1389 | 1385 | ||
1390 | p = &mtd_dev_param[mtd_devs]; | 1386 | p = &mtd_dev_param[mtd_devs]; |
1391 | strcpy(&p->name[0], tokens[0]); | 1387 | strcpy(&p->name[0], tokens[0]); |
1392 | 1388 | ||
1393 | if (tokens[1]) | 1389 | if (tokens[1]) |
1394 | p->vid_hdr_offs = bytes_str_to_int(tokens[1]); | 1390 | p->vid_hdr_offs = bytes_str_to_int(tokens[1]); |
1395 | 1391 | ||
1396 | if (p->vid_hdr_offs < 0) | 1392 | if (p->vid_hdr_offs < 0) |
1397 | return p->vid_hdr_offs; | 1393 | return p->vid_hdr_offs; |
1398 | 1394 | ||
1399 | mtd_devs += 1; | 1395 | mtd_devs += 1; |
1400 | return 0; | 1396 | return 0; |
1401 | } | 1397 | } |
1402 | 1398 | ||
1403 | module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000); | 1399 | module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000); |
1404 | MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: " | 1400 | MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: " |
1405 | "mtd=<name|num|path>[,<vid_hdr_offs>].\n" | 1401 | "mtd=<name|num|path>[,<vid_hdr_offs>].\n" |
1406 | "Multiple \"mtd\" parameters may be specified.\n" | 1402 | "Multiple \"mtd\" parameters may be specified.\n" |
1407 | "MTD devices may be specified by their number, name, or " | 1403 | "MTD devices may be specified by their number, name, or " |
1408 | "path to the MTD character device node.\n" | 1404 | "path to the MTD character device node.\n" |
1409 | "Optional \"vid_hdr_offs\" parameter specifies UBI VID " | 1405 | "Optional \"vid_hdr_offs\" parameter specifies UBI VID " |
1410 | "header position to be used by UBI.\n" | 1406 | "header position to be used by UBI.\n" |
1411 | "Example 1: mtd=/dev/mtd0 - attach MTD device " | 1407 | "Example 1: mtd=/dev/mtd0 - attach MTD device " |
1412 | "/dev/mtd0.\n" | 1408 | "/dev/mtd0.\n" |
1413 | "Example 2: mtd=content,1984 mtd=4 - attach MTD device " | 1409 | "Example 2: mtd=content,1984 mtd=4 - attach MTD device " |
1414 | "with name \"content\" using VID header offset 1984, and " | 1410 | "with name \"content\" using VID header offset 1984, and " |
1415 | "MTD device number 4 with default VID header offset."); | 1411 | "MTD device number 4 with default VID header offset."); |
1416 | 1412 | ||
1417 | MODULE_VERSION(__stringify(UBI_VERSION)); | 1413 | MODULE_VERSION(__stringify(UBI_VERSION)); |
1418 | MODULE_DESCRIPTION("UBI - Unsorted Block Images"); | 1414 | MODULE_DESCRIPTION("UBI - Unsorted Block Images"); |
1419 | MODULE_AUTHOR("Artem Bityutskiy"); | 1415 | MODULE_AUTHOR("Artem Bityutskiy"); |
1420 | MODULE_LICENSE("GPL"); | 1416 | MODULE_LICENSE("GPL"); |
1421 | 1417 |
drivers/mtd/ubi/io.c
1 | /* | 1 | /* |
2 | * Copyright (c) International Business Machines Corp., 2006 | 2 | * Copyright (c) International Business Machines Corp., 2006 |
3 | * Copyright (c) Nokia Corporation, 2006, 2007 | 3 | * Copyright (c) Nokia Corporation, 2006, 2007 |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
7 | * the Free Software Foundation; either version 2 of the License, or | 7 | * the Free Software Foundation; either version 2 of the License, or |
8 | * (at your option) any later version. | 8 | * (at your option) any later version. |
9 | * | 9 | * |
10 | * This program is distributed in the hope that it will be useful, | 10 | * This program is distributed in the hope that it will be useful, |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See |
13 | * the GNU General Public License for more details. | 13 | * the GNU General Public License for more details. |
14 | * | 14 | * |
15 | * You should have received a copy of the GNU General Public License | 15 | * You should have received a copy of the GNU General Public License |
16 | * along with this program; if not, write to the Free Software | 16 | * along with this program; if not, write to the Free Software |
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
18 | * | 18 | * |
19 | * Author: Artem Bityutskiy (Битюцкий Артём) | 19 | * Author: Artem Bityutskiy (Битюцкий Артём) |
20 | */ | 20 | */ |
21 | 21 | ||
22 | /* | 22 | /* |
23 | * UBI input/output sub-system. | 23 | * UBI input/output sub-system. |
24 | * | 24 | * |
25 | * This sub-system provides a uniform way to work with all kinds of the | 25 | * This sub-system provides a uniform way to work with all kinds of the |
26 | * underlying MTD devices. It also implements handy functions for reading and | 26 | * underlying MTD devices. It also implements handy functions for reading and |
27 | * writing UBI headers. | 27 | * writing UBI headers. |
28 | * | 28 | * |
29 | * We are trying to have a paranoid mindset and not to trust to what we read | 29 | * We are trying to have a paranoid mindset and not to trust to what we read |
30 | * from the flash media in order to be more secure and robust. So this | 30 | * from the flash media in order to be more secure and robust. So this |
31 | * sub-system validates every single header it reads from the flash media. | 31 | * sub-system validates every single header it reads from the flash media. |
32 | * | 32 | * |
33 | * Some words about how the eraseblock headers are stored. | 33 | * Some words about how the eraseblock headers are stored. |
34 | * | 34 | * |
35 | * The erase counter header is always stored at offset zero. By default, the | 35 | * The erase counter header is always stored at offset zero. By default, the |
36 | * VID header is stored after the EC header at the closest aligned offset | 36 | * VID header is stored after the EC header at the closest aligned offset |
37 | * (i.e. aligned to the minimum I/O unit size). Data starts next to the VID | 37 | * (i.e. aligned to the minimum I/O unit size). Data starts next to the VID |
38 | * header at the closest aligned offset. But this default layout may be | 38 | * header at the closest aligned offset. But this default layout may be |
39 | * changed. For example, for different reasons (e.g., optimization) UBI may be | 39 | * changed. For example, for different reasons (e.g., optimization) UBI may be |
40 | * asked to put the VID header at further offset, and even at an unaligned | 40 | * asked to put the VID header at further offset, and even at an unaligned |
41 | * offset. Of course, if the offset of the VID header is unaligned, UBI adds | 41 | * offset. Of course, if the offset of the VID header is unaligned, UBI adds |
42 | * proper padding in front of it. Data offset may also be changed but it has to | 42 | * proper padding in front of it. Data offset may also be changed but it has to |
43 | * be aligned. | 43 | * be aligned. |
44 | * | 44 | * |
45 | * About minimal I/O units. In general, UBI assumes flash device model where | 45 | * About minimal I/O units. In general, UBI assumes flash device model where |
46 | * there is only one minimal I/O unit size. E.g., in case of NOR flash it is 1, | 46 | * there is only one minimal I/O unit size. E.g., in case of NOR flash it is 1, |
47 | * in case of NAND flash it is a NAND page, etc. This is reported by MTD in the | 47 | * in case of NAND flash it is a NAND page, etc. This is reported by MTD in the |
48 | * @ubi->mtd->writesize field. But as an exception, UBI admits of using another | 48 | * @ubi->mtd->writesize field. But as an exception, UBI admits of using another |
49 | * (smaller) minimal I/O unit size for EC and VID headers to make it possible | 49 | * (smaller) minimal I/O unit size for EC and VID headers to make it possible |
50 | * to do different optimizations. | 50 | * to do different optimizations. |
51 | * | 51 | * |
52 | * This is extremely useful in case of NAND flashes which admit of several | 52 | * This is extremely useful in case of NAND flashes which admit of several |
53 | * write operations to one NAND page. In this case UBI can fit EC and VID | 53 | * write operations to one NAND page. In this case UBI can fit EC and VID |
54 | * headers at one NAND page. Thus, UBI may use "sub-page" size as the minimal | 54 | * headers at one NAND page. Thus, UBI may use "sub-page" size as the minimal |
55 | * I/O unit for the headers (the @ubi->hdrs_min_io_size field). But it still | 55 | * I/O unit for the headers (the @ubi->hdrs_min_io_size field). But it still |
56 | * reports NAND page size (@ubi->min_io_size) as a minimal I/O unit for the UBI | 56 | * reports NAND page size (@ubi->min_io_size) as a minimal I/O unit for the UBI |
57 | * users. | 57 | * users. |
58 | * | 58 | * |
59 | * Example: some Samsung NANDs with 2KiB pages allow 4x 512-byte writes, so | 59 | * Example: some Samsung NANDs with 2KiB pages allow 4x 512-byte writes, so |
60 | * although the minimal I/O unit is 2K, UBI uses 512 bytes for EC and VID | 60 | * although the minimal I/O unit is 2K, UBI uses 512 bytes for EC and VID |
61 | * headers. | 61 | * headers. |
62 | * | 62 | * |
63 | * Q: why not just to treat sub-page as a minimal I/O unit of this flash | 63 | * Q: why not just to treat sub-page as a minimal I/O unit of this flash |
64 | * device, e.g., make @ubi->min_io_size = 512 in the example above? | 64 | * device, e.g., make @ubi->min_io_size = 512 in the example above? |
65 | * | 65 | * |
66 | * A: because when writing a sub-page, MTD still writes a full 2K page but the | 66 | * A: because when writing a sub-page, MTD still writes a full 2K page but the |
67 | * bytes which are not relevant to the sub-page are 0xFF. So, basically, | 67 | * bytes which are not relevant to the sub-page are 0xFF. So, basically, |
68 | * writing 4x512 sub-pages is 4 times slower than writing one 2KiB NAND page. | 68 | * writing 4x512 sub-pages is 4 times slower than writing one 2KiB NAND page. |
69 | * Thus, we prefer to use sub-pages only for EC and VID headers. | 69 | * Thus, we prefer to use sub-pages only for EC and VID headers. |
70 | * | 70 | * |
71 | * As it was noted above, the VID header may start at a non-aligned offset. | 71 | * As it was noted above, the VID header may start at a non-aligned offset. |
72 | * For example, in case of a 2KiB page NAND flash with a 512 bytes sub-page, | 72 | * For example, in case of a 2KiB page NAND flash with a 512 bytes sub-page, |
73 | * the VID header may reside at offset 1984 which is the last 64 bytes of the | 73 | * the VID header may reside at offset 1984 which is the last 64 bytes of the |
74 | * last sub-page (EC header is always at offset zero). This causes some | 74 | * last sub-page (EC header is always at offset zero). This causes some |
75 | * difficulties when reading and writing VID headers. | 75 | * difficulties when reading and writing VID headers. |
76 | * | 76 | * |
77 | * Suppose we have a 64-byte buffer and we read a VID header at it. We change | 77 | * Suppose we have a 64-byte buffer and we read a VID header at it. We change |
78 | * the data and want to write this VID header out. As we can only write in | 78 | * the data and want to write this VID header out. As we can only write in |
79 | * 512-byte chunks, we have to allocate one more buffer and copy our VID header | 79 | * 512-byte chunks, we have to allocate one more buffer and copy our VID header |
80 | * to offset 448 of this buffer. | 80 | * to offset 448 of this buffer. |
81 | * | 81 | * |
82 | * The I/O sub-system does the following trick in order to avoid this extra | 82 | * The I/O sub-system does the following trick in order to avoid this extra |
83 | * copy. It always allocates a @ubi->vid_hdr_alsize bytes buffer for the VID | 83 | * copy. It always allocates a @ubi->vid_hdr_alsize bytes buffer for the VID |
84 | * header and returns a pointer to offset @ubi->vid_hdr_shift of this buffer. | 84 | * header and returns a pointer to offset @ubi->vid_hdr_shift of this buffer. |
85 | * When the VID header is being written out, it shifts the VID header pointer | 85 | * When the VID header is being written out, it shifts the VID header pointer |
86 | * back and writes the whole sub-page. | 86 | * back and writes the whole sub-page. |
87 | */ | 87 | */ |
88 | 88 | ||
89 | #include <linux/crc32.h> | 89 | #include <linux/crc32.h> |
90 | #include <linux/err.h> | 90 | #include <linux/err.h> |
91 | #include <linux/slab.h> | 91 | #include <linux/slab.h> |
92 | #include "ubi.h" | 92 | #include "ubi.h" |
93 | 93 | ||
94 | static int self_check_not_bad(const struct ubi_device *ubi, int pnum); | 94 | static int self_check_not_bad(const struct ubi_device *ubi, int pnum); |
95 | static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum); | 95 | static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum); |
96 | static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum, | 96 | static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum, |
97 | const struct ubi_ec_hdr *ec_hdr); | 97 | const struct ubi_ec_hdr *ec_hdr); |
98 | static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum); | 98 | static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum); |
99 | static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum, | 99 | static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum, |
100 | const struct ubi_vid_hdr *vid_hdr); | 100 | const struct ubi_vid_hdr *vid_hdr); |
101 | static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum, | 101 | static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum, |
102 | int offset, int len); | 102 | int offset, int len); |
103 | 103 | ||
104 | /** | 104 | /** |
105 | * ubi_io_read - read data from a physical eraseblock. | 105 | * ubi_io_read - read data from a physical eraseblock. |
106 | * @ubi: UBI device description object | 106 | * @ubi: UBI device description object |
107 | * @buf: buffer where to store the read data | 107 | * @buf: buffer where to store the read data |
108 | * @pnum: physical eraseblock number to read from | 108 | * @pnum: physical eraseblock number to read from |
109 | * @offset: offset within the physical eraseblock from where to read | 109 | * @offset: offset within the physical eraseblock from where to read |
110 | * @len: how many bytes to read | 110 | * @len: how many bytes to read |
111 | * | 111 | * |
112 | * This function reads data from offset @offset of physical eraseblock @pnum | 112 | * This function reads data from offset @offset of physical eraseblock @pnum |
113 | * and stores the read data in the @buf buffer. The following return codes are | 113 | * and stores the read data in the @buf buffer. The following return codes are |
114 | * possible: | 114 | * possible: |
115 | * | 115 | * |
116 | * o %0 if all the requested data were successfully read; | 116 | * o %0 if all the requested data were successfully read; |
117 | * o %UBI_IO_BITFLIPS if all the requested data were successfully read, but | 117 | * o %UBI_IO_BITFLIPS if all the requested data were successfully read, but |
118 | * correctable bit-flips were detected; this is harmless but may indicate | 118 | * correctable bit-flips were detected; this is harmless but may indicate |
119 | * that this eraseblock may become bad soon (but do not have to); | 119 | * that this eraseblock may become bad soon (but do not have to); |
120 | * o %-EBADMSG if the MTD subsystem reported about data integrity problems, for | 120 | * o %-EBADMSG if the MTD subsystem reported about data integrity problems, for |
121 | * example it can be an ECC error in case of NAND; this most probably means | 121 | * example it can be an ECC error in case of NAND; this most probably means |
122 | * that the data is corrupted; | 122 | * that the data is corrupted; |
123 | * o %-EIO if some I/O error occurred; | 123 | * o %-EIO if some I/O error occurred; |
124 | * o other negative error codes in case of other errors. | 124 | * o other negative error codes in case of other errors. |
125 | */ | 125 | */ |
126 | int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset, | 126 | int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset, |
127 | int len) | 127 | int len) |
128 | { | 128 | { |
129 | int err, retries = 0; | 129 | int err, retries = 0; |
130 | size_t read; | 130 | size_t read; |
131 | loff_t addr; | 131 | loff_t addr; |
132 | 132 | ||
133 | dbg_io("read %d bytes from PEB %d:%d", len, pnum, offset); | 133 | dbg_io("read %d bytes from PEB %d:%d", len, pnum, offset); |
134 | 134 | ||
135 | ubi_assert(pnum >= 0 && pnum < ubi->peb_count); | 135 | ubi_assert(pnum >= 0 && pnum < ubi->peb_count); |
136 | ubi_assert(offset >= 0 && offset + len <= ubi->peb_size); | 136 | ubi_assert(offset >= 0 && offset + len <= ubi->peb_size); |
137 | ubi_assert(len > 0); | 137 | ubi_assert(len > 0); |
138 | 138 | ||
139 | err = self_check_not_bad(ubi, pnum); | 139 | err = self_check_not_bad(ubi, pnum); |
140 | if (err) | 140 | if (err) |
141 | return err; | 141 | return err; |
142 | 142 | ||
143 | /* | 143 | /* |
144 | * Deliberately corrupt the buffer to improve robustness. Indeed, if we | 144 | * Deliberately corrupt the buffer to improve robustness. Indeed, if we |
145 | * do not do this, the following may happen: | 145 | * do not do this, the following may happen: |
146 | * 1. The buffer contains data from previous operation, e.g., read from | 146 | * 1. The buffer contains data from previous operation, e.g., read from |
147 | * another PEB previously. The data looks like expected, e.g., if we | 147 | * another PEB previously. The data looks like expected, e.g., if we |
148 | * just do not read anything and return - the caller would not | 148 | * just do not read anything and return - the caller would not |
149 | * notice this. E.g., if we are reading a VID header, the buffer may | 149 | * notice this. E.g., if we are reading a VID header, the buffer may |
150 | * contain a valid VID header from another PEB. | 150 | * contain a valid VID header from another PEB. |
151 | * 2. The driver is buggy and returns us success or -EBADMSG or | 151 | * 2. The driver is buggy and returns us success or -EBADMSG or |
152 | * -EUCLEAN, but it does not actually put any data to the buffer. | 152 | * -EUCLEAN, but it does not actually put any data to the buffer. |
153 | * | 153 | * |
154 | * This may confuse UBI or upper layers - they may think the buffer | 154 | * This may confuse UBI or upper layers - they may think the buffer |
155 | * contains valid data while in fact it is just old data. This is | 155 | * contains valid data while in fact it is just old data. This is |
156 | * especially possible because UBI (and UBIFS) relies on CRC, and | 156 | * especially possible because UBI (and UBIFS) relies on CRC, and |
157 | * treats data as correct even in case of ECC errors if the CRC is | 157 | * treats data as correct even in case of ECC errors if the CRC is |
158 | * correct. | 158 | * correct. |
159 | * | 159 | * |
160 | * Try to prevent this situation by changing the first byte of the | 160 | * Try to prevent this situation by changing the first byte of the |
161 | * buffer. | 161 | * buffer. |
162 | */ | 162 | */ |
163 | *((uint8_t *)buf) ^= 0xFF; | 163 | *((uint8_t *)buf) ^= 0xFF; |
164 | 164 | ||
165 | addr = (loff_t)pnum * ubi->peb_size + offset; | 165 | addr = (loff_t)pnum * ubi->peb_size + offset; |
166 | retry: | 166 | retry: |
167 | err = mtd_read(ubi->mtd, addr, len, &read, buf); | 167 | err = mtd_read(ubi->mtd, addr, len, &read, buf); |
168 | if (err) { | 168 | if (err) { |
169 | const char *errstr = mtd_is_eccerr(err) ? " (ECC error)" : ""; | 169 | const char *errstr = mtd_is_eccerr(err) ? " (ECC error)" : ""; |
170 | 170 | ||
171 | if (mtd_is_bitflip(err)) { | 171 | if (mtd_is_bitflip(err)) { |
172 | /* | 172 | /* |
173 | * -EUCLEAN is reported if there was a bit-flip which | 173 | * -EUCLEAN is reported if there was a bit-flip which |
174 | * was corrected, so this is harmless. | 174 | * was corrected, so this is harmless. |
175 | * | 175 | * |
176 | * We do not report about it here unless debugging is | 176 | * We do not report about it here unless debugging is |
177 | * enabled. A corresponding message will be printed | 177 | * enabled. A corresponding message will be printed |
178 | * later, when it is has been scrubbed. | 178 | * later, when it is has been scrubbed. |
179 | */ | 179 | */ |
180 | dbg_msg("fixable bit-flip detected at PEB %d", pnum); | 180 | dbg_msg("fixable bit-flip detected at PEB %d", pnum); |
181 | ubi_assert(len == read); | 181 | ubi_assert(len == read); |
182 | return UBI_IO_BITFLIPS; | 182 | return UBI_IO_BITFLIPS; |
183 | } | 183 | } |
184 | 184 | ||
185 | if (retries++ < UBI_IO_RETRIES) { | 185 | if (retries++ < UBI_IO_RETRIES) { |
186 | ubi_warn("error %d%s while reading %d bytes from PEB " | 186 | ubi_warn("error %d%s while reading %d bytes from PEB " |
187 | "%d:%d, read only %zd bytes, retry", | 187 | "%d:%d, read only %zd bytes, retry", |
188 | err, errstr, len, pnum, offset, read); | 188 | err, errstr, len, pnum, offset, read); |
189 | yield(); | 189 | yield(); |
190 | goto retry; | 190 | goto retry; |
191 | } | 191 | } |
192 | 192 | ||
193 | ubi_err("error %d%s while reading %d bytes from PEB %d:%d, " | 193 | ubi_err("error %d%s while reading %d bytes from PEB %d:%d, " |
194 | "read %zd bytes", err, errstr, len, pnum, offset, read); | 194 | "read %zd bytes", err, errstr, len, pnum, offset, read); |
195 | dump_stack(); | 195 | dump_stack(); |
196 | 196 | ||
197 | /* | 197 | /* |
198 | * The driver should never return -EBADMSG if it failed to read | 198 | * The driver should never return -EBADMSG if it failed to read |
199 | * all the requested data. But some buggy drivers might do | 199 | * all the requested data. But some buggy drivers might do |
200 | * this, so we change it to -EIO. | 200 | * this, so we change it to -EIO. |
201 | */ | 201 | */ |
202 | if (read != len && mtd_is_eccerr(err)) { | 202 | if (read != len && mtd_is_eccerr(err)) { |
203 | ubi_assert(0); | 203 | ubi_assert(0); |
204 | err = -EIO; | 204 | err = -EIO; |
205 | } | 205 | } |
206 | } else { | 206 | } else { |
207 | ubi_assert(len == read); | 207 | ubi_assert(len == read); |
208 | 208 | ||
209 | if (ubi_dbg_is_bitflip(ubi)) { | 209 | if (ubi_dbg_is_bitflip(ubi)) { |
210 | dbg_gen("bit-flip (emulated)"); | 210 | dbg_gen("bit-flip (emulated)"); |
211 | err = UBI_IO_BITFLIPS; | 211 | err = UBI_IO_BITFLIPS; |
212 | } | 212 | } |
213 | } | 213 | } |
214 | 214 | ||
215 | return err; | 215 | return err; |
216 | } | 216 | } |
217 | 217 | ||
218 | /** | 218 | /** |
219 | * ubi_io_write - write data to a physical eraseblock. | 219 | * ubi_io_write - write data to a physical eraseblock. |
220 | * @ubi: UBI device description object | 220 | * @ubi: UBI device description object |
221 | * @buf: buffer with the data to write | 221 | * @buf: buffer with the data to write |
222 | * @pnum: physical eraseblock number to write to | 222 | * @pnum: physical eraseblock number to write to |
223 | * @offset: offset within the physical eraseblock where to write | 223 | * @offset: offset within the physical eraseblock where to write |
224 | * @len: how many bytes to write | 224 | * @len: how many bytes to write |
225 | * | 225 | * |
226 | * This function writes @len bytes of data from buffer @buf to offset @offset | 226 | * This function writes @len bytes of data from buffer @buf to offset @offset |
227 | * of physical eraseblock @pnum. If all the data were successfully written, | 227 | * of physical eraseblock @pnum. If all the data were successfully written, |
228 | * zero is returned. If an error occurred, this function returns a negative | 228 | * zero is returned. If an error occurred, this function returns a negative |
229 | * error code. If %-EIO is returned, the physical eraseblock most probably went | 229 | * error code. If %-EIO is returned, the physical eraseblock most probably went |
230 | * bad. | 230 | * bad. |
231 | * | 231 | * |
232 | * Note, in case of an error, it is possible that something was still written | 232 | * Note, in case of an error, it is possible that something was still written |
233 | * to the flash media, but may be some garbage. | 233 | * to the flash media, but may be some garbage. |
234 | */ | 234 | */ |
235 | int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset, | 235 | int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset, |
236 | int len) | 236 | int len) |
237 | { | 237 | { |
238 | int err; | 238 | int err; |
239 | size_t written; | 239 | size_t written; |
240 | loff_t addr; | 240 | loff_t addr; |
241 | 241 | ||
242 | dbg_io("write %d bytes to PEB %d:%d", len, pnum, offset); | 242 | dbg_io("write %d bytes to PEB %d:%d", len, pnum, offset); |
243 | 243 | ||
244 | ubi_assert(pnum >= 0 && pnum < ubi->peb_count); | 244 | ubi_assert(pnum >= 0 && pnum < ubi->peb_count); |
245 | ubi_assert(offset >= 0 && offset + len <= ubi->peb_size); | 245 | ubi_assert(offset >= 0 && offset + len <= ubi->peb_size); |
246 | ubi_assert(offset % ubi->hdrs_min_io_size == 0); | 246 | ubi_assert(offset % ubi->hdrs_min_io_size == 0); |
247 | ubi_assert(len > 0 && len % ubi->hdrs_min_io_size == 0); | 247 | ubi_assert(len > 0 && len % ubi->hdrs_min_io_size == 0); |
248 | 248 | ||
249 | if (ubi->ro_mode) { | 249 | if (ubi->ro_mode) { |
250 | ubi_err("read-only mode"); | 250 | ubi_err("read-only mode"); |
251 | return -EROFS; | 251 | return -EROFS; |
252 | } | 252 | } |
253 | 253 | ||
254 | err = self_check_not_bad(ubi, pnum); | 254 | err = self_check_not_bad(ubi, pnum); |
255 | if (err) | 255 | if (err) |
256 | return err; | 256 | return err; |
257 | 257 | ||
258 | /* The area we are writing to has to contain all 0xFF bytes */ | 258 | /* The area we are writing to has to contain all 0xFF bytes */ |
259 | err = ubi_self_check_all_ff(ubi, pnum, offset, len); | 259 | err = ubi_self_check_all_ff(ubi, pnum, offset, len); |
260 | if (err) | 260 | if (err) |
261 | return err; | 261 | return err; |
262 | 262 | ||
263 | if (offset >= ubi->leb_start) { | 263 | if (offset >= ubi->leb_start) { |
264 | /* | 264 | /* |
265 | * We write to the data area of the physical eraseblock. Make | 265 | * We write to the data area of the physical eraseblock. Make |
266 | * sure it has valid EC and VID headers. | 266 | * sure it has valid EC and VID headers. |
267 | */ | 267 | */ |
268 | err = self_check_peb_ec_hdr(ubi, pnum); | 268 | err = self_check_peb_ec_hdr(ubi, pnum); |
269 | if (err) | 269 | if (err) |
270 | return err; | 270 | return err; |
271 | err = self_check_peb_vid_hdr(ubi, pnum); | 271 | err = self_check_peb_vid_hdr(ubi, pnum); |
272 | if (err) | 272 | if (err) |
273 | return err; | 273 | return err; |
274 | } | 274 | } |
275 | 275 | ||
276 | if (ubi_dbg_is_write_failure(ubi)) { | 276 | if (ubi_dbg_is_write_failure(ubi)) { |
277 | ubi_err("cannot write %d bytes to PEB %d:%d " | 277 | ubi_err("cannot write %d bytes to PEB %d:%d " |
278 | "(emulated)", len, pnum, offset); | 278 | "(emulated)", len, pnum, offset); |
279 | dump_stack(); | 279 | dump_stack(); |
280 | return -EIO; | 280 | return -EIO; |
281 | } | 281 | } |
282 | 282 | ||
283 | addr = (loff_t)pnum * ubi->peb_size + offset; | 283 | addr = (loff_t)pnum * ubi->peb_size + offset; |
284 | err = mtd_write(ubi->mtd, addr, len, &written, buf); | 284 | err = mtd_write(ubi->mtd, addr, len, &written, buf); |
285 | if (err) { | 285 | if (err) { |
286 | ubi_err("error %d while writing %d bytes to PEB %d:%d, written " | 286 | ubi_err("error %d while writing %d bytes to PEB %d:%d, written " |
287 | "%zd bytes", err, len, pnum, offset, written); | 287 | "%zd bytes", err, len, pnum, offset, written); |
288 | dump_stack(); | 288 | dump_stack(); |
289 | ubi_dump_flash(ubi, pnum, offset, len); | 289 | ubi_dump_flash(ubi, pnum, offset, len); |
290 | } else | 290 | } else |
291 | ubi_assert(written == len); | 291 | ubi_assert(written == len); |
292 | 292 | ||
293 | if (!err) { | 293 | if (!err) { |
294 | err = self_check_write(ubi, buf, pnum, offset, len); | 294 | err = self_check_write(ubi, buf, pnum, offset, len); |
295 | if (err) | 295 | if (err) |
296 | return err; | 296 | return err; |
297 | 297 | ||
298 | /* | 298 | /* |
299 | * Since we always write sequentially, the rest of the PEB has | 299 | * Since we always write sequentially, the rest of the PEB has |
300 | * to contain only 0xFF bytes. | 300 | * to contain only 0xFF bytes. |
301 | */ | 301 | */ |
302 | offset += len; | 302 | offset += len; |
303 | len = ubi->peb_size - offset; | 303 | len = ubi->peb_size - offset; |
304 | if (len) | 304 | if (len) |
305 | err = ubi_self_check_all_ff(ubi, pnum, offset, len); | 305 | err = ubi_self_check_all_ff(ubi, pnum, offset, len); |
306 | } | 306 | } |
307 | 307 | ||
308 | return err; | 308 | return err; |
309 | } | 309 | } |
310 | 310 | ||
311 | /** | 311 | /** |
312 | * erase_callback - MTD erasure call-back. | 312 | * erase_callback - MTD erasure call-back. |
313 | * @ei: MTD erase information object. | 313 | * @ei: MTD erase information object. |
314 | * | 314 | * |
315 | * Note, even though MTD erase interface is asynchronous, all the current | 315 | * Note, even though MTD erase interface is asynchronous, all the current |
316 | * implementations are synchronous anyway. | 316 | * implementations are synchronous anyway. |
317 | */ | 317 | */ |
318 | static void erase_callback(struct erase_info *ei) | 318 | static void erase_callback(struct erase_info *ei) |
319 | { | 319 | { |
320 | wake_up_interruptible((wait_queue_head_t *)ei->priv); | 320 | wake_up_interruptible((wait_queue_head_t *)ei->priv); |
321 | } | 321 | } |
322 | 322 | ||
323 | /** | 323 | /** |
324 | * do_sync_erase - synchronously erase a physical eraseblock. | 324 | * do_sync_erase - synchronously erase a physical eraseblock. |
325 | * @ubi: UBI device description object | 325 | * @ubi: UBI device description object |
326 | * @pnum: the physical eraseblock number to erase | 326 | * @pnum: the physical eraseblock number to erase |
327 | * | 327 | * |
328 | * This function synchronously erases physical eraseblock @pnum and returns | 328 | * This function synchronously erases physical eraseblock @pnum and returns |
329 | * zero in case of success and a negative error code in case of failure. If | 329 | * zero in case of success and a negative error code in case of failure. If |
330 | * %-EIO is returned, the physical eraseblock most probably went bad. | 330 | * %-EIO is returned, the physical eraseblock most probably went bad. |
331 | */ | 331 | */ |
332 | static int do_sync_erase(struct ubi_device *ubi, int pnum) | 332 | static int do_sync_erase(struct ubi_device *ubi, int pnum) |
333 | { | 333 | { |
334 | int err, retries = 0; | 334 | int err, retries = 0; |
335 | struct erase_info ei; | 335 | struct erase_info ei; |
336 | wait_queue_head_t wq; | 336 | wait_queue_head_t wq; |
337 | 337 | ||
338 | dbg_io("erase PEB %d", pnum); | 338 | dbg_io("erase PEB %d", pnum); |
339 | ubi_assert(pnum >= 0 && pnum < ubi->peb_count); | 339 | ubi_assert(pnum >= 0 && pnum < ubi->peb_count); |
340 | 340 | ||
341 | if (ubi->ro_mode) { | 341 | if (ubi->ro_mode) { |
342 | ubi_err("read-only mode"); | 342 | ubi_err("read-only mode"); |
343 | return -EROFS; | 343 | return -EROFS; |
344 | } | 344 | } |
345 | 345 | ||
346 | retry: | 346 | retry: |
347 | init_waitqueue_head(&wq); | 347 | init_waitqueue_head(&wq); |
348 | memset(&ei, 0, sizeof(struct erase_info)); | 348 | memset(&ei, 0, sizeof(struct erase_info)); |
349 | 349 | ||
350 | ei.mtd = ubi->mtd; | 350 | ei.mtd = ubi->mtd; |
351 | ei.addr = (loff_t)pnum * ubi->peb_size; | 351 | ei.addr = (loff_t)pnum * ubi->peb_size; |
352 | ei.len = ubi->peb_size; | 352 | ei.len = ubi->peb_size; |
353 | ei.callback = erase_callback; | 353 | ei.callback = erase_callback; |
354 | ei.priv = (unsigned long)&wq; | 354 | ei.priv = (unsigned long)&wq; |
355 | 355 | ||
356 | err = mtd_erase(ubi->mtd, &ei); | 356 | err = mtd_erase(ubi->mtd, &ei); |
357 | if (err) { | 357 | if (err) { |
358 | if (retries++ < UBI_IO_RETRIES) { | 358 | if (retries++ < UBI_IO_RETRIES) { |
359 | ubi_warn("error %d while erasing PEB %d, retry", | 359 | ubi_warn("error %d while erasing PEB %d, retry", |
360 | err, pnum); | 360 | err, pnum); |
361 | yield(); | 361 | yield(); |
362 | goto retry; | 362 | goto retry; |
363 | } | 363 | } |
364 | ubi_err("cannot erase PEB %d, error %d", pnum, err); | 364 | ubi_err("cannot erase PEB %d, error %d", pnum, err); |
365 | dump_stack(); | 365 | dump_stack(); |
366 | return err; | 366 | return err; |
367 | } | 367 | } |
368 | 368 | ||
369 | err = wait_event_interruptible(wq, ei.state == MTD_ERASE_DONE || | 369 | err = wait_event_interruptible(wq, ei.state == MTD_ERASE_DONE || |
370 | ei.state == MTD_ERASE_FAILED); | 370 | ei.state == MTD_ERASE_FAILED); |
371 | if (err) { | 371 | if (err) { |
372 | ubi_err("interrupted PEB %d erasure", pnum); | 372 | ubi_err("interrupted PEB %d erasure", pnum); |
373 | return -EINTR; | 373 | return -EINTR; |
374 | } | 374 | } |
375 | 375 | ||
376 | if (ei.state == MTD_ERASE_FAILED) { | 376 | if (ei.state == MTD_ERASE_FAILED) { |
377 | if (retries++ < UBI_IO_RETRIES) { | 377 | if (retries++ < UBI_IO_RETRIES) { |
378 | ubi_warn("error while erasing PEB %d, retry", pnum); | 378 | ubi_warn("error while erasing PEB %d, retry", pnum); |
379 | yield(); | 379 | yield(); |
380 | goto retry; | 380 | goto retry; |
381 | } | 381 | } |
382 | ubi_err("cannot erase PEB %d", pnum); | 382 | ubi_err("cannot erase PEB %d", pnum); |
383 | dump_stack(); | 383 | dump_stack(); |
384 | return -EIO; | 384 | return -EIO; |
385 | } | 385 | } |
386 | 386 | ||
387 | err = ubi_self_check_all_ff(ubi, pnum, 0, ubi->peb_size); | 387 | err = ubi_self_check_all_ff(ubi, pnum, 0, ubi->peb_size); |
388 | if (err) | 388 | if (err) |
389 | return err; | 389 | return err; |
390 | 390 | ||
391 | if (ubi_dbg_is_erase_failure(ubi)) { | 391 | if (ubi_dbg_is_erase_failure(ubi)) { |
392 | ubi_err("cannot erase PEB %d (emulated)", pnum); | 392 | ubi_err("cannot erase PEB %d (emulated)", pnum); |
393 | return -EIO; | 393 | return -EIO; |
394 | } | 394 | } |
395 | 395 | ||
396 | return 0; | 396 | return 0; |
397 | } | 397 | } |
398 | 398 | ||
399 | /* Patterns to write to a physical eraseblock when torturing it */ | 399 | /* Patterns to write to a physical eraseblock when torturing it */ |
400 | static uint8_t patterns[] = {0xa5, 0x5a, 0x0}; | 400 | static uint8_t patterns[] = {0xa5, 0x5a, 0x0}; |
401 | 401 | ||
402 | /** | 402 | /** |
403 | * torture_peb - test a supposedly bad physical eraseblock. | 403 | * torture_peb - test a supposedly bad physical eraseblock. |
404 | * @ubi: UBI device description object | 404 | * @ubi: UBI device description object |
405 | * @pnum: the physical eraseblock number to test | 405 | * @pnum: the physical eraseblock number to test |
406 | * | 406 | * |
407 | * This function returns %-EIO if the physical eraseblock did not pass the | 407 | * This function returns %-EIO if the physical eraseblock did not pass the |
408 | * test, a positive number of erase operations done if the test was | 408 | * test, a positive number of erase operations done if the test was |
409 | * successfully passed, and other negative error codes in case of other errors. | 409 | * successfully passed, and other negative error codes in case of other errors. |
410 | */ | 410 | */ |
411 | static int torture_peb(struct ubi_device *ubi, int pnum) | 411 | static int torture_peb(struct ubi_device *ubi, int pnum) |
412 | { | 412 | { |
413 | int err, i, patt_count; | 413 | int err, i, patt_count; |
414 | 414 | ||
415 | ubi_msg("run torture test for PEB %d", pnum); | 415 | ubi_msg("run torture test for PEB %d", pnum); |
416 | patt_count = ARRAY_SIZE(patterns); | 416 | patt_count = ARRAY_SIZE(patterns); |
417 | ubi_assert(patt_count > 0); | 417 | ubi_assert(patt_count > 0); |
418 | 418 | ||
419 | mutex_lock(&ubi->buf_mutex); | 419 | mutex_lock(&ubi->buf_mutex); |
420 | for (i = 0; i < patt_count; i++) { | 420 | for (i = 0; i < patt_count; i++) { |
421 | err = do_sync_erase(ubi, pnum); | 421 | err = do_sync_erase(ubi, pnum); |
422 | if (err) | 422 | if (err) |
423 | goto out; | 423 | goto out; |
424 | 424 | ||
425 | /* Make sure the PEB contains only 0xFF bytes */ | 425 | /* Make sure the PEB contains only 0xFF bytes */ |
426 | err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size); | 426 | err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size); |
427 | if (err) | 427 | if (err) |
428 | goto out; | 428 | goto out; |
429 | 429 | ||
430 | err = ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->peb_size); | 430 | err = ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->peb_size); |
431 | if (err == 0) { | 431 | if (err == 0) { |
432 | ubi_err("erased PEB %d, but a non-0xFF byte found", | 432 | ubi_err("erased PEB %d, but a non-0xFF byte found", |
433 | pnum); | 433 | pnum); |
434 | err = -EIO; | 434 | err = -EIO; |
435 | goto out; | 435 | goto out; |
436 | } | 436 | } |
437 | 437 | ||
438 | /* Write a pattern and check it */ | 438 | /* Write a pattern and check it */ |
439 | memset(ubi->peb_buf, patterns[i], ubi->peb_size); | 439 | memset(ubi->peb_buf, patterns[i], ubi->peb_size); |
440 | err = ubi_io_write(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size); | 440 | err = ubi_io_write(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size); |
441 | if (err) | 441 | if (err) |
442 | goto out; | 442 | goto out; |
443 | 443 | ||
444 | memset(ubi->peb_buf, ~patterns[i], ubi->peb_size); | 444 | memset(ubi->peb_buf, ~patterns[i], ubi->peb_size); |
445 | err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size); | 445 | err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size); |
446 | if (err) | 446 | if (err) |
447 | goto out; | 447 | goto out; |
448 | 448 | ||
449 | err = ubi_check_pattern(ubi->peb_buf, patterns[i], | 449 | err = ubi_check_pattern(ubi->peb_buf, patterns[i], |
450 | ubi->peb_size); | 450 | ubi->peb_size); |
451 | if (err == 0) { | 451 | if (err == 0) { |
452 | ubi_err("pattern %x checking failed for PEB %d", | 452 | ubi_err("pattern %x checking failed for PEB %d", |
453 | patterns[i], pnum); | 453 | patterns[i], pnum); |
454 | err = -EIO; | 454 | err = -EIO; |
455 | goto out; | 455 | goto out; |
456 | } | 456 | } |
457 | } | 457 | } |
458 | 458 | ||
459 | err = patt_count; | 459 | err = patt_count; |
460 | ubi_msg("PEB %d passed torture test, do not mark it as bad", pnum); | 460 | ubi_msg("PEB %d passed torture test, do not mark it as bad", pnum); |
461 | 461 | ||
462 | out: | 462 | out: |
463 | mutex_unlock(&ubi->buf_mutex); | 463 | mutex_unlock(&ubi->buf_mutex); |
464 | if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) { | 464 | if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) { |
465 | /* | 465 | /* |
466 | * If a bit-flip or data integrity error was detected, the test | 466 | * If a bit-flip or data integrity error was detected, the test |
467 | * has not passed because it happened on a freshly erased | 467 | * has not passed because it happened on a freshly erased |
468 | * physical eraseblock which means something is wrong with it. | 468 | * physical eraseblock which means something is wrong with it. |
469 | */ | 469 | */ |
470 | ubi_err("read problems on freshly erased PEB %d, must be bad", | 470 | ubi_err("read problems on freshly erased PEB %d, must be bad", |
471 | pnum); | 471 | pnum); |
472 | err = -EIO; | 472 | err = -EIO; |
473 | } | 473 | } |
474 | return err; | 474 | return err; |
475 | } | 475 | } |
476 | 476 | ||
477 | /** | 477 | /** |
478 | * nor_erase_prepare - prepare a NOR flash PEB for erasure. | 478 | * nor_erase_prepare - prepare a NOR flash PEB for erasure. |
479 | * @ubi: UBI device description object | 479 | * @ubi: UBI device description object |
480 | * @pnum: physical eraseblock number to prepare | 480 | * @pnum: physical eraseblock number to prepare |
481 | * | 481 | * |
482 | * NOR flash, or at least some of them, have peculiar embedded PEB erasure | 482 | * NOR flash, or at least some of them, have peculiar embedded PEB erasure |
483 | * algorithm: the PEB is first filled with zeroes, then it is erased. And | 483 | * algorithm: the PEB is first filled with zeroes, then it is erased. And |
484 | * filling with zeroes starts from the end of the PEB. This was observed with | 484 | * filling with zeroes starts from the end of the PEB. This was observed with |
485 | * Spansion S29GL512N NOR flash. | 485 | * Spansion S29GL512N NOR flash. |
486 | * | 486 | * |
487 | * This means that in case of a power cut we may end up with intact data at the | 487 | * This means that in case of a power cut we may end up with intact data at the |
488 | * beginning of the PEB, and all zeroes at the end of PEB. In other words, the | 488 | * beginning of the PEB, and all zeroes at the end of PEB. In other words, the |
489 | * EC and VID headers are OK, but a large chunk of data at the end of PEB is | 489 | * EC and VID headers are OK, but a large chunk of data at the end of PEB is |
490 | * zeroed. This makes UBI mistakenly treat this PEB as used and associate it | 490 | * zeroed. This makes UBI mistakenly treat this PEB as used and associate it |
491 | * with an LEB, which leads to subsequent failures (e.g., UBIFS fails). | 491 | * with an LEB, which leads to subsequent failures (e.g., UBIFS fails). |
492 | * | 492 | * |
493 | * This function is called before erasing NOR PEBs and it zeroes out EC and VID | 493 | * This function is called before erasing NOR PEBs and it zeroes out EC and VID |
494 | * magic numbers in order to invalidate them and prevent the failures. Returns | 494 | * magic numbers in order to invalidate them and prevent the failures. Returns |
495 | * zero in case of success and a negative error code in case of failure. | 495 | * zero in case of success and a negative error code in case of failure. |
496 | */ | 496 | */ |
497 | static int nor_erase_prepare(struct ubi_device *ubi, int pnum) | 497 | static int nor_erase_prepare(struct ubi_device *ubi, int pnum) |
498 | { | 498 | { |
499 | int err, err1; | 499 | int err, err1; |
500 | size_t written; | 500 | size_t written; |
501 | loff_t addr; | 501 | loff_t addr; |
502 | uint32_t data = 0; | 502 | uint32_t data = 0; |
503 | /* | 503 | /* |
504 | * Note, we cannot generally define VID header buffers on stack, | 504 | * Note, we cannot generally define VID header buffers on stack, |
505 | * because of the way we deal with these buffers (see the header | 505 | * because of the way we deal with these buffers (see the header |
506 | * comment in this file). But we know this is a NOR-specific piece of | 506 | * comment in this file). But we know this is a NOR-specific piece of |
507 | * code, so we can do this. But yes, this is error-prone and we should | 507 | * code, so we can do this. But yes, this is error-prone and we should |
508 | * (pre-)allocate VID header buffer instead. | 508 | * (pre-)allocate VID header buffer instead. |
509 | */ | 509 | */ |
510 | struct ubi_vid_hdr vid_hdr; | 510 | struct ubi_vid_hdr vid_hdr; |
511 | 511 | ||
512 | /* | 512 | /* |
513 | * It is important to first invalidate the EC header, and then the VID | 513 | * It is important to first invalidate the EC header, and then the VID |
514 | * header. Otherwise a power cut may lead to valid EC header and | 514 | * header. Otherwise a power cut may lead to valid EC header and |
515 | * invalid VID header, in which case UBI will treat this PEB as | 515 | * invalid VID header, in which case UBI will treat this PEB as |
516 | * corrupted and will try to preserve it, and print scary warnings (see | 516 | * corrupted and will try to preserve it, and print scary warnings. |
517 | * the header comment in scan.c for more information). | ||
518 | */ | 517 | */ |
519 | addr = (loff_t)pnum * ubi->peb_size; | 518 | addr = (loff_t)pnum * ubi->peb_size; |
520 | err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data); | 519 | err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data); |
521 | if (!err) { | 520 | if (!err) { |
522 | addr += ubi->vid_hdr_aloffset; | 521 | addr += ubi->vid_hdr_aloffset; |
523 | err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data); | 522 | err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data); |
524 | if (!err) | 523 | if (!err) |
525 | return 0; | 524 | return 0; |
526 | } | 525 | } |
527 | 526 | ||
528 | /* | 527 | /* |
529 | * We failed to write to the media. This was observed with Spansion | 528 | * We failed to write to the media. This was observed with Spansion |
530 | * S29GL512N NOR flash. Most probably the previously eraseblock erasure | 529 | * S29GL512N NOR flash. Most probably the previously eraseblock erasure |
531 | * was interrupted at a very inappropriate moment, so it became | 530 | * was interrupted at a very inappropriate moment, so it became |
532 | * unwritable. In this case we probably anyway have garbage in this | 531 | * unwritable. In this case we probably anyway have garbage in this |
533 | * PEB. | 532 | * PEB. |
534 | */ | 533 | */ |
535 | err1 = ubi_io_read_vid_hdr(ubi, pnum, &vid_hdr, 0); | 534 | err1 = ubi_io_read_vid_hdr(ubi, pnum, &vid_hdr, 0); |
536 | if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR || | 535 | if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR || |
537 | err1 == UBI_IO_FF) { | 536 | err1 == UBI_IO_FF) { |
538 | struct ubi_ec_hdr ec_hdr; | 537 | struct ubi_ec_hdr ec_hdr; |
539 | 538 | ||
540 | err1 = ubi_io_read_ec_hdr(ubi, pnum, &ec_hdr, 0); | 539 | err1 = ubi_io_read_ec_hdr(ubi, pnum, &ec_hdr, 0); |
541 | if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR || | 540 | if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR || |
542 | err1 == UBI_IO_FF) | 541 | err1 == UBI_IO_FF) |
543 | /* | 542 | /* |
544 | * Both VID and EC headers are corrupted, so we can | 543 | * Both VID and EC headers are corrupted, so we can |
545 | * safely erase this PEB and not afraid that it will be | 544 | * safely erase this PEB and not afraid that it will be |
546 | * treated as a valid PEB in case of an unclean reboot. | 545 | * treated as a valid PEB in case of an unclean reboot. |
547 | */ | 546 | */ |
548 | return 0; | 547 | return 0; |
549 | } | 548 | } |
550 | 549 | ||
551 | /* | 550 | /* |
552 | * The PEB contains a valid VID header, but we cannot invalidate it. | 551 | * The PEB contains a valid VID header, but we cannot invalidate it. |
553 | * Supposedly the flash media or the driver is screwed up, so return an | 552 | * Supposedly the flash media or the driver is screwed up, so return an |
554 | * error. | 553 | * error. |
555 | */ | 554 | */ |
556 | ubi_err("cannot invalidate PEB %d, write returned %d read returned %d", | 555 | ubi_err("cannot invalidate PEB %d, write returned %d read returned %d", |
557 | pnum, err, err1); | 556 | pnum, err, err1); |
558 | ubi_dump_flash(ubi, pnum, 0, ubi->peb_size); | 557 | ubi_dump_flash(ubi, pnum, 0, ubi->peb_size); |
559 | return -EIO; | 558 | return -EIO; |
560 | } | 559 | } |
561 | 560 | ||
562 | /** | 561 | /** |
563 | * ubi_io_sync_erase - synchronously erase a physical eraseblock. | 562 | * ubi_io_sync_erase - synchronously erase a physical eraseblock. |
564 | * @ubi: UBI device description object | 563 | * @ubi: UBI device description object |
565 | * @pnum: physical eraseblock number to erase | 564 | * @pnum: physical eraseblock number to erase |
566 | * @torture: if this physical eraseblock has to be tortured | 565 | * @torture: if this physical eraseblock has to be tortured |
567 | * | 566 | * |
568 | * This function synchronously erases physical eraseblock @pnum. If @torture | 567 | * This function synchronously erases physical eraseblock @pnum. If @torture |
569 | * flag is not zero, the physical eraseblock is checked by means of writing | 568 | * flag is not zero, the physical eraseblock is checked by means of writing |
570 | * different patterns to it and reading them back. If the torturing is enabled, | 569 | * different patterns to it and reading them back. If the torturing is enabled, |
571 | * the physical eraseblock is erased more than once. | 570 | * the physical eraseblock is erased more than once. |
572 | * | 571 | * |
573 | * This function returns the number of erasures made in case of success, %-EIO | 572 | * This function returns the number of erasures made in case of success, %-EIO |
574 | * if the erasure failed or the torturing test failed, and other negative error | 573 | * if the erasure failed or the torturing test failed, and other negative error |
575 | * codes in case of other errors. Note, %-EIO means that the physical | 574 | * codes in case of other errors. Note, %-EIO means that the physical |
576 | * eraseblock is bad. | 575 | * eraseblock is bad. |
577 | */ | 576 | */ |
578 | int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture) | 577 | int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture) |
579 | { | 578 | { |
580 | int err, ret = 0; | 579 | int err, ret = 0; |
581 | 580 | ||
582 | ubi_assert(pnum >= 0 && pnum < ubi->peb_count); | 581 | ubi_assert(pnum >= 0 && pnum < ubi->peb_count); |
583 | 582 | ||
584 | err = self_check_not_bad(ubi, pnum); | 583 | err = self_check_not_bad(ubi, pnum); |
585 | if (err != 0) | 584 | if (err != 0) |
586 | return err; | 585 | return err; |
587 | 586 | ||
588 | if (ubi->ro_mode) { | 587 | if (ubi->ro_mode) { |
589 | ubi_err("read-only mode"); | 588 | ubi_err("read-only mode"); |
590 | return -EROFS; | 589 | return -EROFS; |
591 | } | 590 | } |
592 | 591 | ||
593 | if (ubi->nor_flash) { | 592 | if (ubi->nor_flash) { |
594 | err = nor_erase_prepare(ubi, pnum); | 593 | err = nor_erase_prepare(ubi, pnum); |
595 | if (err) | 594 | if (err) |
596 | return err; | 595 | return err; |
597 | } | 596 | } |
598 | 597 | ||
599 | if (torture) { | 598 | if (torture) { |
600 | ret = torture_peb(ubi, pnum); | 599 | ret = torture_peb(ubi, pnum); |
601 | if (ret < 0) | 600 | if (ret < 0) |
602 | return ret; | 601 | return ret; |
603 | } | 602 | } |
604 | 603 | ||
605 | err = do_sync_erase(ubi, pnum); | 604 | err = do_sync_erase(ubi, pnum); |
606 | if (err) | 605 | if (err) |
607 | return err; | 606 | return err; |
608 | 607 | ||
609 | return ret + 1; | 608 | return ret + 1; |
610 | } | 609 | } |
611 | 610 | ||
612 | /** | 611 | /** |
613 | * ubi_io_is_bad - check if a physical eraseblock is bad. | 612 | * ubi_io_is_bad - check if a physical eraseblock is bad. |
614 | * @ubi: UBI device description object | 613 | * @ubi: UBI device description object |
615 | * @pnum: the physical eraseblock number to check | 614 | * @pnum: the physical eraseblock number to check |
616 | * | 615 | * |
617 | * This function returns a positive number if the physical eraseblock is bad, | 616 | * This function returns a positive number if the physical eraseblock is bad, |
618 | * zero if not, and a negative error code if an error occurred. | 617 | * zero if not, and a negative error code if an error occurred. |
619 | */ | 618 | */ |
620 | int ubi_io_is_bad(const struct ubi_device *ubi, int pnum) | 619 | int ubi_io_is_bad(const struct ubi_device *ubi, int pnum) |
621 | { | 620 | { |
622 | struct mtd_info *mtd = ubi->mtd; | 621 | struct mtd_info *mtd = ubi->mtd; |
623 | 622 | ||
624 | ubi_assert(pnum >= 0 && pnum < ubi->peb_count); | 623 | ubi_assert(pnum >= 0 && pnum < ubi->peb_count); |
625 | 624 | ||
626 | if (ubi->bad_allowed) { | 625 | if (ubi->bad_allowed) { |
627 | int ret; | 626 | int ret; |
628 | 627 | ||
629 | ret = mtd_block_isbad(mtd, (loff_t)pnum * ubi->peb_size); | 628 | ret = mtd_block_isbad(mtd, (loff_t)pnum * ubi->peb_size); |
630 | if (ret < 0) | 629 | if (ret < 0) |
631 | ubi_err("error %d while checking if PEB %d is bad", | 630 | ubi_err("error %d while checking if PEB %d is bad", |
632 | ret, pnum); | 631 | ret, pnum); |
633 | else if (ret) | 632 | else if (ret) |
634 | dbg_io("PEB %d is bad", pnum); | 633 | dbg_io("PEB %d is bad", pnum); |
635 | return ret; | 634 | return ret; |
636 | } | 635 | } |
637 | 636 | ||
638 | return 0; | 637 | return 0; |
639 | } | 638 | } |
640 | 639 | ||
641 | /** | 640 | /** |
642 | * ubi_io_mark_bad - mark a physical eraseblock as bad. | 641 | * ubi_io_mark_bad - mark a physical eraseblock as bad. |
643 | * @ubi: UBI device description object | 642 | * @ubi: UBI device description object |
644 | * @pnum: the physical eraseblock number to mark | 643 | * @pnum: the physical eraseblock number to mark |
645 | * | 644 | * |
646 | * This function returns zero in case of success and a negative error code in | 645 | * This function returns zero in case of success and a negative error code in |
647 | * case of failure. | 646 | * case of failure. |
648 | */ | 647 | */ |
649 | int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum) | 648 | int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum) |
650 | { | 649 | { |
651 | int err; | 650 | int err; |
652 | struct mtd_info *mtd = ubi->mtd; | 651 | struct mtd_info *mtd = ubi->mtd; |
653 | 652 | ||
654 | ubi_assert(pnum >= 0 && pnum < ubi->peb_count); | 653 | ubi_assert(pnum >= 0 && pnum < ubi->peb_count); |
655 | 654 | ||
656 | if (ubi->ro_mode) { | 655 | if (ubi->ro_mode) { |
657 | ubi_err("read-only mode"); | 656 | ubi_err("read-only mode"); |
658 | return -EROFS; | 657 | return -EROFS; |
659 | } | 658 | } |
660 | 659 | ||
661 | if (!ubi->bad_allowed) | 660 | if (!ubi->bad_allowed) |
662 | return 0; | 661 | return 0; |
663 | 662 | ||
664 | err = mtd_block_markbad(mtd, (loff_t)pnum * ubi->peb_size); | 663 | err = mtd_block_markbad(mtd, (loff_t)pnum * ubi->peb_size); |
665 | if (err) | 664 | if (err) |
666 | ubi_err("cannot mark PEB %d bad, error %d", pnum, err); | 665 | ubi_err("cannot mark PEB %d bad, error %d", pnum, err); |
667 | return err; | 666 | return err; |
668 | } | 667 | } |
669 | 668 | ||
670 | /** | 669 | /** |
671 | * validate_ec_hdr - validate an erase counter header. | 670 | * validate_ec_hdr - validate an erase counter header. |
672 | * @ubi: UBI device description object | 671 | * @ubi: UBI device description object |
673 | * @ec_hdr: the erase counter header to check | 672 | * @ec_hdr: the erase counter header to check |
674 | * | 673 | * |
675 | * This function returns zero if the erase counter header is OK, and %1 if | 674 | * This function returns zero if the erase counter header is OK, and %1 if |
676 | * not. | 675 | * not. |
677 | */ | 676 | */ |
678 | static int validate_ec_hdr(const struct ubi_device *ubi, | 677 | static int validate_ec_hdr(const struct ubi_device *ubi, |
679 | const struct ubi_ec_hdr *ec_hdr) | 678 | const struct ubi_ec_hdr *ec_hdr) |
680 | { | 679 | { |
681 | long long ec; | 680 | long long ec; |
682 | int vid_hdr_offset, leb_start; | 681 | int vid_hdr_offset, leb_start; |
683 | 682 | ||
684 | ec = be64_to_cpu(ec_hdr->ec); | 683 | ec = be64_to_cpu(ec_hdr->ec); |
685 | vid_hdr_offset = be32_to_cpu(ec_hdr->vid_hdr_offset); | 684 | vid_hdr_offset = be32_to_cpu(ec_hdr->vid_hdr_offset); |
686 | leb_start = be32_to_cpu(ec_hdr->data_offset); | 685 | leb_start = be32_to_cpu(ec_hdr->data_offset); |
687 | 686 | ||
688 | if (ec_hdr->version != UBI_VERSION) { | 687 | if (ec_hdr->version != UBI_VERSION) { |
689 | ubi_err("node with incompatible UBI version found: " | 688 | ubi_err("node with incompatible UBI version found: " |
690 | "this UBI version is %d, image version is %d", | 689 | "this UBI version is %d, image version is %d", |
691 | UBI_VERSION, (int)ec_hdr->version); | 690 | UBI_VERSION, (int)ec_hdr->version); |
692 | goto bad; | 691 | goto bad; |
693 | } | 692 | } |
694 | 693 | ||
695 | if (vid_hdr_offset != ubi->vid_hdr_offset) { | 694 | if (vid_hdr_offset != ubi->vid_hdr_offset) { |
696 | ubi_err("bad VID header offset %d, expected %d", | 695 | ubi_err("bad VID header offset %d, expected %d", |
697 | vid_hdr_offset, ubi->vid_hdr_offset); | 696 | vid_hdr_offset, ubi->vid_hdr_offset); |
698 | goto bad; | 697 | goto bad; |
699 | } | 698 | } |
700 | 699 | ||
701 | if (leb_start != ubi->leb_start) { | 700 | if (leb_start != ubi->leb_start) { |
702 | ubi_err("bad data offset %d, expected %d", | 701 | ubi_err("bad data offset %d, expected %d", |
703 | leb_start, ubi->leb_start); | 702 | leb_start, ubi->leb_start); |
704 | goto bad; | 703 | goto bad; |
705 | } | 704 | } |
706 | 705 | ||
707 | if (ec < 0 || ec > UBI_MAX_ERASECOUNTER) { | 706 | if (ec < 0 || ec > UBI_MAX_ERASECOUNTER) { |
708 | ubi_err("bad erase counter %lld", ec); | 707 | ubi_err("bad erase counter %lld", ec); |
709 | goto bad; | 708 | goto bad; |
710 | } | 709 | } |
711 | 710 | ||
712 | return 0; | 711 | return 0; |
713 | 712 | ||
714 | bad: | 713 | bad: |
715 | ubi_err("bad EC header"); | 714 | ubi_err("bad EC header"); |
716 | ubi_dump_ec_hdr(ec_hdr); | 715 | ubi_dump_ec_hdr(ec_hdr); |
717 | dump_stack(); | 716 | dump_stack(); |
718 | return 1; | 717 | return 1; |
719 | } | 718 | } |
720 | 719 | ||
721 | /** | 720 | /** |
722 | * ubi_io_read_ec_hdr - read and check an erase counter header. | 721 | * ubi_io_read_ec_hdr - read and check an erase counter header. |
723 | * @ubi: UBI device description object | 722 | * @ubi: UBI device description object |
724 | * @pnum: physical eraseblock to read from | 723 | * @pnum: physical eraseblock to read from |
725 | * @ec_hdr: a &struct ubi_ec_hdr object where to store the read erase counter | 724 | * @ec_hdr: a &struct ubi_ec_hdr object where to store the read erase counter |
726 | * header | 725 | * header |
727 | * @verbose: be verbose if the header is corrupted or was not found | 726 | * @verbose: be verbose if the header is corrupted or was not found |
728 | * | 727 | * |
729 | * This function reads erase counter header from physical eraseblock @pnum and | 728 | * This function reads erase counter header from physical eraseblock @pnum and |
730 | * stores it in @ec_hdr. This function also checks CRC checksum of the read | 729 | * stores it in @ec_hdr. This function also checks CRC checksum of the read |
731 | * erase counter header. The following codes may be returned: | 730 | * erase counter header. The following codes may be returned: |
732 | * | 731 | * |
733 | * o %0 if the CRC checksum is correct and the header was successfully read; | 732 | * o %0 if the CRC checksum is correct and the header was successfully read; |
734 | * o %UBI_IO_BITFLIPS if the CRC is correct, but bit-flips were detected | 733 | * o %UBI_IO_BITFLIPS if the CRC is correct, but bit-flips were detected |
735 | * and corrected by the flash driver; this is harmless but may indicate that | 734 | * and corrected by the flash driver; this is harmless but may indicate that |
736 | * this eraseblock may become bad soon (but may be not); | 735 | * this eraseblock may become bad soon (but may be not); |
737 | * o %UBI_IO_BAD_HDR if the erase counter header is corrupted (a CRC error); | 736 | * o %UBI_IO_BAD_HDR if the erase counter header is corrupted (a CRC error); |
738 | * o %UBI_IO_BAD_HDR_EBADMSG is the same as %UBI_IO_BAD_HDR, but there also was | 737 | * o %UBI_IO_BAD_HDR_EBADMSG is the same as %UBI_IO_BAD_HDR, but there also was |
739 | * a data integrity error (uncorrectable ECC error in case of NAND); | 738 | * a data integrity error (uncorrectable ECC error in case of NAND); |
740 | * o %UBI_IO_FF if only 0xFF bytes were read (the PEB is supposedly empty) | 739 | * o %UBI_IO_FF if only 0xFF bytes were read (the PEB is supposedly empty) |
741 | * o a negative error code in case of failure. | 740 | * o a negative error code in case of failure. |
742 | */ | 741 | */ |
743 | int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum, | 742 | int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum, |
744 | struct ubi_ec_hdr *ec_hdr, int verbose) | 743 | struct ubi_ec_hdr *ec_hdr, int verbose) |
745 | { | 744 | { |
746 | int err, read_err; | 745 | int err, read_err; |
747 | uint32_t crc, magic, hdr_crc; | 746 | uint32_t crc, magic, hdr_crc; |
748 | 747 | ||
749 | dbg_io("read EC header from PEB %d", pnum); | 748 | dbg_io("read EC header from PEB %d", pnum); |
750 | ubi_assert(pnum >= 0 && pnum < ubi->peb_count); | 749 | ubi_assert(pnum >= 0 && pnum < ubi->peb_count); |
751 | 750 | ||
752 | read_err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE); | 751 | read_err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE); |
753 | if (read_err) { | 752 | if (read_err) { |
754 | if (read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err)) | 753 | if (read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err)) |
755 | return read_err; | 754 | return read_err; |
756 | 755 | ||
757 | /* | 756 | /* |
758 | * We read all the data, but either a correctable bit-flip | 757 | * We read all the data, but either a correctable bit-flip |
759 | * occurred, or MTD reported a data integrity error | 758 | * occurred, or MTD reported a data integrity error |
760 | * (uncorrectable ECC error in case of NAND). The former is | 759 | * (uncorrectable ECC error in case of NAND). The former is |
761 | * harmless, the later may mean that the read data is | 760 | * harmless, the later may mean that the read data is |
762 | * corrupted. But we have a CRC check-sum and we will detect | 761 | * corrupted. But we have a CRC check-sum and we will detect |
763 | * this. If the EC header is still OK, we just report this as | 762 | * this. If the EC header is still OK, we just report this as |
764 | * there was a bit-flip, to force scrubbing. | 763 | * there was a bit-flip, to force scrubbing. |
765 | */ | 764 | */ |
766 | } | 765 | } |
767 | 766 | ||
768 | magic = be32_to_cpu(ec_hdr->magic); | 767 | magic = be32_to_cpu(ec_hdr->magic); |
769 | if (magic != UBI_EC_HDR_MAGIC) { | 768 | if (magic != UBI_EC_HDR_MAGIC) { |
770 | if (mtd_is_eccerr(read_err)) | 769 | if (mtd_is_eccerr(read_err)) |
771 | return UBI_IO_BAD_HDR_EBADMSG; | 770 | return UBI_IO_BAD_HDR_EBADMSG; |
772 | 771 | ||
773 | /* | 772 | /* |
774 | * The magic field is wrong. Let's check if we have read all | 773 | * The magic field is wrong. Let's check if we have read all |
775 | * 0xFF. If yes, this physical eraseblock is assumed to be | 774 | * 0xFF. If yes, this physical eraseblock is assumed to be |
776 | * empty. | 775 | * empty. |
777 | */ | 776 | */ |
778 | if (ubi_check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) { | 777 | if (ubi_check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) { |
779 | /* The physical eraseblock is supposedly empty */ | 778 | /* The physical eraseblock is supposedly empty */ |
780 | if (verbose) | 779 | if (verbose) |
781 | ubi_warn("no EC header found at PEB %d, " | 780 | ubi_warn("no EC header found at PEB %d, " |
782 | "only 0xFF bytes", pnum); | 781 | "only 0xFF bytes", pnum); |
783 | dbg_bld("no EC header found at PEB %d, " | 782 | dbg_bld("no EC header found at PEB %d, " |
784 | "only 0xFF bytes", pnum); | 783 | "only 0xFF bytes", pnum); |
785 | if (!read_err) | 784 | if (!read_err) |
786 | return UBI_IO_FF; | 785 | return UBI_IO_FF; |
787 | else | 786 | else |
788 | return UBI_IO_FF_BITFLIPS; | 787 | return UBI_IO_FF_BITFLIPS; |
789 | } | 788 | } |
790 | 789 | ||
791 | /* | 790 | /* |
792 | * This is not a valid erase counter header, and these are not | 791 | * This is not a valid erase counter header, and these are not |
793 | * 0xFF bytes. Report that the header is corrupted. | 792 | * 0xFF bytes. Report that the header is corrupted. |
794 | */ | 793 | */ |
795 | if (verbose) { | 794 | if (verbose) { |
796 | ubi_warn("bad magic number at PEB %d: %08x instead of " | 795 | ubi_warn("bad magic number at PEB %d: %08x instead of " |
797 | "%08x", pnum, magic, UBI_EC_HDR_MAGIC); | 796 | "%08x", pnum, magic, UBI_EC_HDR_MAGIC); |
798 | ubi_dump_ec_hdr(ec_hdr); | 797 | ubi_dump_ec_hdr(ec_hdr); |
799 | } | 798 | } |
800 | dbg_bld("bad magic number at PEB %d: %08x instead of " | 799 | dbg_bld("bad magic number at PEB %d: %08x instead of " |
801 | "%08x", pnum, magic, UBI_EC_HDR_MAGIC); | 800 | "%08x", pnum, magic, UBI_EC_HDR_MAGIC); |
802 | return UBI_IO_BAD_HDR; | 801 | return UBI_IO_BAD_HDR; |
803 | } | 802 | } |
804 | 803 | ||
805 | crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC); | 804 | crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC); |
806 | hdr_crc = be32_to_cpu(ec_hdr->hdr_crc); | 805 | hdr_crc = be32_to_cpu(ec_hdr->hdr_crc); |
807 | 806 | ||
808 | if (hdr_crc != crc) { | 807 | if (hdr_crc != crc) { |
809 | if (verbose) { | 808 | if (verbose) { |
810 | ubi_warn("bad EC header CRC at PEB %d, calculated " | 809 | ubi_warn("bad EC header CRC at PEB %d, calculated " |
811 | "%#08x, read %#08x", pnum, crc, hdr_crc); | 810 | "%#08x, read %#08x", pnum, crc, hdr_crc); |
812 | ubi_dump_ec_hdr(ec_hdr); | 811 | ubi_dump_ec_hdr(ec_hdr); |
813 | } | 812 | } |
814 | dbg_bld("bad EC header CRC at PEB %d, calculated " | 813 | dbg_bld("bad EC header CRC at PEB %d, calculated " |
815 | "%#08x, read %#08x", pnum, crc, hdr_crc); | 814 | "%#08x, read %#08x", pnum, crc, hdr_crc); |
816 | 815 | ||
817 | if (!read_err) | 816 | if (!read_err) |
818 | return UBI_IO_BAD_HDR; | 817 | return UBI_IO_BAD_HDR; |
819 | else | 818 | else |
820 | return UBI_IO_BAD_HDR_EBADMSG; | 819 | return UBI_IO_BAD_HDR_EBADMSG; |
821 | } | 820 | } |
822 | 821 | ||
823 | /* And of course validate what has just been read from the media */ | 822 | /* And of course validate what has just been read from the media */ |
824 | err = validate_ec_hdr(ubi, ec_hdr); | 823 | err = validate_ec_hdr(ubi, ec_hdr); |
825 | if (err) { | 824 | if (err) { |
826 | ubi_err("validation failed for PEB %d", pnum); | 825 | ubi_err("validation failed for PEB %d", pnum); |
827 | return -EINVAL; | 826 | return -EINVAL; |
828 | } | 827 | } |
829 | 828 | ||
830 | /* | 829 | /* |
831 | * If there was %-EBADMSG, but the header CRC is still OK, report about | 830 | * If there was %-EBADMSG, but the header CRC is still OK, report about |
832 | * a bit-flip to force scrubbing on this PEB. | 831 | * a bit-flip to force scrubbing on this PEB. |
833 | */ | 832 | */ |
834 | return read_err ? UBI_IO_BITFLIPS : 0; | 833 | return read_err ? UBI_IO_BITFLIPS : 0; |
835 | } | 834 | } |
836 | 835 | ||
837 | /** | 836 | /** |
838 | * ubi_io_write_ec_hdr - write an erase counter header. | 837 | * ubi_io_write_ec_hdr - write an erase counter header. |
839 | * @ubi: UBI device description object | 838 | * @ubi: UBI device description object |
840 | * @pnum: physical eraseblock to write to | 839 | * @pnum: physical eraseblock to write to |
841 | * @ec_hdr: the erase counter header to write | 840 | * @ec_hdr: the erase counter header to write |
842 | * | 841 | * |
843 | * This function writes erase counter header described by @ec_hdr to physical | 842 | * This function writes erase counter header described by @ec_hdr to physical |
844 | * eraseblock @pnum. It also fills most fields of @ec_hdr before writing, so | 843 | * eraseblock @pnum. It also fills most fields of @ec_hdr before writing, so |
845 | * the caller do not have to fill them. Callers must only fill the @ec_hdr->ec | 844 | * the caller do not have to fill them. Callers must only fill the @ec_hdr->ec |
846 | * field. | 845 | * field. |
847 | * | 846 | * |
848 | * This function returns zero in case of success and a negative error code in | 847 | * This function returns zero in case of success and a negative error code in |
849 | * case of failure. If %-EIO is returned, the physical eraseblock most probably | 848 | * case of failure. If %-EIO is returned, the physical eraseblock most probably |
850 | * went bad. | 849 | * went bad. |
851 | */ | 850 | */ |
852 | int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum, | 851 | int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum, |
853 | struct ubi_ec_hdr *ec_hdr) | 852 | struct ubi_ec_hdr *ec_hdr) |
854 | { | 853 | { |
855 | int err; | 854 | int err; |
856 | uint32_t crc; | 855 | uint32_t crc; |
857 | 856 | ||
858 | dbg_io("write EC header to PEB %d", pnum); | 857 | dbg_io("write EC header to PEB %d", pnum); |
859 | ubi_assert(pnum >= 0 && pnum < ubi->peb_count); | 858 | ubi_assert(pnum >= 0 && pnum < ubi->peb_count); |
860 | 859 | ||
861 | ec_hdr->magic = cpu_to_be32(UBI_EC_HDR_MAGIC); | 860 | ec_hdr->magic = cpu_to_be32(UBI_EC_HDR_MAGIC); |
862 | ec_hdr->version = UBI_VERSION; | 861 | ec_hdr->version = UBI_VERSION; |
863 | ec_hdr->vid_hdr_offset = cpu_to_be32(ubi->vid_hdr_offset); | 862 | ec_hdr->vid_hdr_offset = cpu_to_be32(ubi->vid_hdr_offset); |
864 | ec_hdr->data_offset = cpu_to_be32(ubi->leb_start); | 863 | ec_hdr->data_offset = cpu_to_be32(ubi->leb_start); |
865 | ec_hdr->image_seq = cpu_to_be32(ubi->image_seq); | 864 | ec_hdr->image_seq = cpu_to_be32(ubi->image_seq); |
866 | crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC); | 865 | crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC); |
867 | ec_hdr->hdr_crc = cpu_to_be32(crc); | 866 | ec_hdr->hdr_crc = cpu_to_be32(crc); |
868 | 867 | ||
869 | err = self_check_ec_hdr(ubi, pnum, ec_hdr); | 868 | err = self_check_ec_hdr(ubi, pnum, ec_hdr); |
870 | if (err) | 869 | if (err) |
871 | return err; | 870 | return err; |
872 | 871 | ||
873 | err = ubi_io_write(ubi, ec_hdr, pnum, 0, ubi->ec_hdr_alsize); | 872 | err = ubi_io_write(ubi, ec_hdr, pnum, 0, ubi->ec_hdr_alsize); |
874 | return err; | 873 | return err; |
875 | } | 874 | } |
876 | 875 | ||
877 | /** | 876 | /** |
878 | * validate_vid_hdr - validate a volume identifier header. | 877 | * validate_vid_hdr - validate a volume identifier header. |
879 | * @ubi: UBI device description object | 878 | * @ubi: UBI device description object |
880 | * @vid_hdr: the volume identifier header to check | 879 | * @vid_hdr: the volume identifier header to check |
881 | * | 880 | * |
882 | * This function checks that data stored in the volume identifier header | 881 | * This function checks that data stored in the volume identifier header |
883 | * @vid_hdr. Returns zero if the VID header is OK and %1 if not. | 882 | * @vid_hdr. Returns zero if the VID header is OK and %1 if not. |
884 | */ | 883 | */ |
885 | static int validate_vid_hdr(const struct ubi_device *ubi, | 884 | static int validate_vid_hdr(const struct ubi_device *ubi, |
886 | const struct ubi_vid_hdr *vid_hdr) | 885 | const struct ubi_vid_hdr *vid_hdr) |
887 | { | 886 | { |
888 | int vol_type = vid_hdr->vol_type; | 887 | int vol_type = vid_hdr->vol_type; |
889 | int copy_flag = vid_hdr->copy_flag; | 888 | int copy_flag = vid_hdr->copy_flag; |
890 | int vol_id = be32_to_cpu(vid_hdr->vol_id); | 889 | int vol_id = be32_to_cpu(vid_hdr->vol_id); |
891 | int lnum = be32_to_cpu(vid_hdr->lnum); | 890 | int lnum = be32_to_cpu(vid_hdr->lnum); |
892 | int compat = vid_hdr->compat; | 891 | int compat = vid_hdr->compat; |
893 | int data_size = be32_to_cpu(vid_hdr->data_size); | 892 | int data_size = be32_to_cpu(vid_hdr->data_size); |
894 | int used_ebs = be32_to_cpu(vid_hdr->used_ebs); | 893 | int used_ebs = be32_to_cpu(vid_hdr->used_ebs); |
895 | int data_pad = be32_to_cpu(vid_hdr->data_pad); | 894 | int data_pad = be32_to_cpu(vid_hdr->data_pad); |
896 | int data_crc = be32_to_cpu(vid_hdr->data_crc); | 895 | int data_crc = be32_to_cpu(vid_hdr->data_crc); |
897 | int usable_leb_size = ubi->leb_size - data_pad; | 896 | int usable_leb_size = ubi->leb_size - data_pad; |
898 | 897 | ||
899 | if (copy_flag != 0 && copy_flag != 1) { | 898 | if (copy_flag != 0 && copy_flag != 1) { |
900 | ubi_err("bad copy_flag"); | 899 | ubi_err("bad copy_flag"); |
901 | goto bad; | 900 | goto bad; |
902 | } | 901 | } |
903 | 902 | ||
904 | if (vol_id < 0 || lnum < 0 || data_size < 0 || used_ebs < 0 || | 903 | if (vol_id < 0 || lnum < 0 || data_size < 0 || used_ebs < 0 || |
905 | data_pad < 0) { | 904 | data_pad < 0) { |
906 | ubi_err("negative values"); | 905 | ubi_err("negative values"); |
907 | goto bad; | 906 | goto bad; |
908 | } | 907 | } |
909 | 908 | ||
910 | if (vol_id >= UBI_MAX_VOLUMES && vol_id < UBI_INTERNAL_VOL_START) { | 909 | if (vol_id >= UBI_MAX_VOLUMES && vol_id < UBI_INTERNAL_VOL_START) { |
911 | ubi_err("bad vol_id"); | 910 | ubi_err("bad vol_id"); |
912 | goto bad; | 911 | goto bad; |
913 | } | 912 | } |
914 | 913 | ||
915 | if (vol_id < UBI_INTERNAL_VOL_START && compat != 0) { | 914 | if (vol_id < UBI_INTERNAL_VOL_START && compat != 0) { |
916 | ubi_err("bad compat"); | 915 | ubi_err("bad compat"); |
917 | goto bad; | 916 | goto bad; |
918 | } | 917 | } |
919 | 918 | ||
920 | if (vol_id >= UBI_INTERNAL_VOL_START && compat != UBI_COMPAT_DELETE && | 919 | if (vol_id >= UBI_INTERNAL_VOL_START && compat != UBI_COMPAT_DELETE && |
921 | compat != UBI_COMPAT_RO && compat != UBI_COMPAT_PRESERVE && | 920 | compat != UBI_COMPAT_RO && compat != UBI_COMPAT_PRESERVE && |
922 | compat != UBI_COMPAT_REJECT) { | 921 | compat != UBI_COMPAT_REJECT) { |
923 | ubi_err("bad compat"); | 922 | ubi_err("bad compat"); |
924 | goto bad; | 923 | goto bad; |
925 | } | 924 | } |
926 | 925 | ||
927 | if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) { | 926 | if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) { |
928 | ubi_err("bad vol_type"); | 927 | ubi_err("bad vol_type"); |
929 | goto bad; | 928 | goto bad; |
930 | } | 929 | } |
931 | 930 | ||
932 | if (data_pad >= ubi->leb_size / 2) { | 931 | if (data_pad >= ubi->leb_size / 2) { |
933 | ubi_err("bad data_pad"); | 932 | ubi_err("bad data_pad"); |
934 | goto bad; | 933 | goto bad; |
935 | } | 934 | } |
936 | 935 | ||
937 | if (vol_type == UBI_VID_STATIC) { | 936 | if (vol_type == UBI_VID_STATIC) { |
938 | /* | 937 | /* |
939 | * Although from high-level point of view static volumes may | 938 | * Although from high-level point of view static volumes may |
940 | * contain zero bytes of data, but no VID headers can contain | 939 | * contain zero bytes of data, but no VID headers can contain |
941 | * zero at these fields, because they empty volumes do not have | 940 | * zero at these fields, because they empty volumes do not have |
942 | * mapped logical eraseblocks. | 941 | * mapped logical eraseblocks. |
943 | */ | 942 | */ |
944 | if (used_ebs == 0) { | 943 | if (used_ebs == 0) { |
945 | ubi_err("zero used_ebs"); | 944 | ubi_err("zero used_ebs"); |
946 | goto bad; | 945 | goto bad; |
947 | } | 946 | } |
948 | if (data_size == 0) { | 947 | if (data_size == 0) { |
949 | ubi_err("zero data_size"); | 948 | ubi_err("zero data_size"); |
950 | goto bad; | 949 | goto bad; |
951 | } | 950 | } |
952 | if (lnum < used_ebs - 1) { | 951 | if (lnum < used_ebs - 1) { |
953 | if (data_size != usable_leb_size) { | 952 | if (data_size != usable_leb_size) { |
954 | ubi_err("bad data_size"); | 953 | ubi_err("bad data_size"); |
955 | goto bad; | 954 | goto bad; |
956 | } | 955 | } |
957 | } else if (lnum == used_ebs - 1) { | 956 | } else if (lnum == used_ebs - 1) { |
958 | if (data_size == 0) { | 957 | if (data_size == 0) { |
959 | ubi_err("bad data_size at last LEB"); | 958 | ubi_err("bad data_size at last LEB"); |
960 | goto bad; | 959 | goto bad; |
961 | } | 960 | } |
962 | } else { | 961 | } else { |
963 | ubi_err("too high lnum"); | 962 | ubi_err("too high lnum"); |
964 | goto bad; | 963 | goto bad; |
965 | } | 964 | } |
966 | } else { | 965 | } else { |
967 | if (copy_flag == 0) { | 966 | if (copy_flag == 0) { |
968 | if (data_crc != 0) { | 967 | if (data_crc != 0) { |
969 | ubi_err("non-zero data CRC"); | 968 | ubi_err("non-zero data CRC"); |
970 | goto bad; | 969 | goto bad; |
971 | } | 970 | } |
972 | if (data_size != 0) { | 971 | if (data_size != 0) { |
973 | ubi_err("non-zero data_size"); | 972 | ubi_err("non-zero data_size"); |
974 | goto bad; | 973 | goto bad; |
975 | } | 974 | } |
976 | } else { | 975 | } else { |
977 | if (data_size == 0) { | 976 | if (data_size == 0) { |
978 | ubi_err("zero data_size of copy"); | 977 | ubi_err("zero data_size of copy"); |
979 | goto bad; | 978 | goto bad; |
980 | } | 979 | } |
981 | } | 980 | } |
982 | if (used_ebs != 0) { | 981 | if (used_ebs != 0) { |
983 | ubi_err("bad used_ebs"); | 982 | ubi_err("bad used_ebs"); |
984 | goto bad; | 983 | goto bad; |
985 | } | 984 | } |
986 | } | 985 | } |
987 | 986 | ||
988 | return 0; | 987 | return 0; |
989 | 988 | ||
990 | bad: | 989 | bad: |
991 | ubi_err("bad VID header"); | 990 | ubi_err("bad VID header"); |
992 | ubi_dump_vid_hdr(vid_hdr); | 991 | ubi_dump_vid_hdr(vid_hdr); |
993 | dump_stack(); | 992 | dump_stack(); |
994 | return 1; | 993 | return 1; |
995 | } | 994 | } |
996 | 995 | ||
997 | /** | 996 | /** |
998 | * ubi_io_read_vid_hdr - read and check a volume identifier header. | 997 | * ubi_io_read_vid_hdr - read and check a volume identifier header. |
999 | * @ubi: UBI device description object | 998 | * @ubi: UBI device description object |
1000 | * @pnum: physical eraseblock number to read from | 999 | * @pnum: physical eraseblock number to read from |
1001 | * @vid_hdr: &struct ubi_vid_hdr object where to store the read volume | 1000 | * @vid_hdr: &struct ubi_vid_hdr object where to store the read volume |
1002 | * identifier header | 1001 | * identifier header |
1003 | * @verbose: be verbose if the header is corrupted or wasn't found | 1002 | * @verbose: be verbose if the header is corrupted or wasn't found |
1004 | * | 1003 | * |
1005 | * This function reads the volume identifier header from physical eraseblock | 1004 | * This function reads the volume identifier header from physical eraseblock |
1006 | * @pnum and stores it in @vid_hdr. It also checks CRC checksum of the read | 1005 | * @pnum and stores it in @vid_hdr. It also checks CRC checksum of the read |
1007 | * volume identifier header. The error codes are the same as in | 1006 | * volume identifier header. The error codes are the same as in |
1008 | * 'ubi_io_read_ec_hdr()'. | 1007 | * 'ubi_io_read_ec_hdr()'. |
1009 | * | 1008 | * |
1010 | * Note, the implementation of this function is also very similar to | 1009 | * Note, the implementation of this function is also very similar to |
1011 | * 'ubi_io_read_ec_hdr()', so refer commentaries in 'ubi_io_read_ec_hdr()'. | 1010 | * 'ubi_io_read_ec_hdr()', so refer commentaries in 'ubi_io_read_ec_hdr()'. |
1012 | */ | 1011 | */ |
1013 | int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum, | 1012 | int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum, |
1014 | struct ubi_vid_hdr *vid_hdr, int verbose) | 1013 | struct ubi_vid_hdr *vid_hdr, int verbose) |
1015 | { | 1014 | { |
1016 | int err, read_err; | 1015 | int err, read_err; |
1017 | uint32_t crc, magic, hdr_crc; | 1016 | uint32_t crc, magic, hdr_crc; |
1018 | void *p; | 1017 | void *p; |
1019 | 1018 | ||
1020 | dbg_io("read VID header from PEB %d", pnum); | 1019 | dbg_io("read VID header from PEB %d", pnum); |
1021 | ubi_assert(pnum >= 0 && pnum < ubi->peb_count); | 1020 | ubi_assert(pnum >= 0 && pnum < ubi->peb_count); |
1022 | 1021 | ||
1023 | p = (char *)vid_hdr - ubi->vid_hdr_shift; | 1022 | p = (char *)vid_hdr - ubi->vid_hdr_shift; |
1024 | read_err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset, | 1023 | read_err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset, |
1025 | ubi->vid_hdr_alsize); | 1024 | ubi->vid_hdr_alsize); |
1026 | if (read_err && read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err)) | 1025 | if (read_err && read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err)) |
1027 | return read_err; | 1026 | return read_err; |
1028 | 1027 | ||
1029 | magic = be32_to_cpu(vid_hdr->magic); | 1028 | magic = be32_to_cpu(vid_hdr->magic); |
1030 | if (magic != UBI_VID_HDR_MAGIC) { | 1029 | if (magic != UBI_VID_HDR_MAGIC) { |
1031 | if (mtd_is_eccerr(read_err)) | 1030 | if (mtd_is_eccerr(read_err)) |
1032 | return UBI_IO_BAD_HDR_EBADMSG; | 1031 | return UBI_IO_BAD_HDR_EBADMSG; |
1033 | 1032 | ||
1034 | if (ubi_check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) { | 1033 | if (ubi_check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) { |
1035 | if (verbose) | 1034 | if (verbose) |
1036 | ubi_warn("no VID header found at PEB %d, " | 1035 | ubi_warn("no VID header found at PEB %d, " |
1037 | "only 0xFF bytes", pnum); | 1036 | "only 0xFF bytes", pnum); |
1038 | dbg_bld("no VID header found at PEB %d, " | 1037 | dbg_bld("no VID header found at PEB %d, " |
1039 | "only 0xFF bytes", pnum); | 1038 | "only 0xFF bytes", pnum); |
1040 | if (!read_err) | 1039 | if (!read_err) |
1041 | return UBI_IO_FF; | 1040 | return UBI_IO_FF; |
1042 | else | 1041 | else |
1043 | return UBI_IO_FF_BITFLIPS; | 1042 | return UBI_IO_FF_BITFLIPS; |
1044 | } | 1043 | } |
1045 | 1044 | ||
1046 | if (verbose) { | 1045 | if (verbose) { |
1047 | ubi_warn("bad magic number at PEB %d: %08x instead of " | 1046 | ubi_warn("bad magic number at PEB %d: %08x instead of " |
1048 | "%08x", pnum, magic, UBI_VID_HDR_MAGIC); | 1047 | "%08x", pnum, magic, UBI_VID_HDR_MAGIC); |
1049 | ubi_dump_vid_hdr(vid_hdr); | 1048 | ubi_dump_vid_hdr(vid_hdr); |
1050 | } | 1049 | } |
1051 | dbg_bld("bad magic number at PEB %d: %08x instead of " | 1050 | dbg_bld("bad magic number at PEB %d: %08x instead of " |
1052 | "%08x", pnum, magic, UBI_VID_HDR_MAGIC); | 1051 | "%08x", pnum, magic, UBI_VID_HDR_MAGIC); |
1053 | return UBI_IO_BAD_HDR; | 1052 | return UBI_IO_BAD_HDR; |
1054 | } | 1053 | } |
1055 | 1054 | ||
1056 | crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC); | 1055 | crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC); |
1057 | hdr_crc = be32_to_cpu(vid_hdr->hdr_crc); | 1056 | hdr_crc = be32_to_cpu(vid_hdr->hdr_crc); |
1058 | 1057 | ||
1059 | if (hdr_crc != crc) { | 1058 | if (hdr_crc != crc) { |
1060 | if (verbose) { | 1059 | if (verbose) { |
1061 | ubi_warn("bad CRC at PEB %d, calculated %#08x, " | 1060 | ubi_warn("bad CRC at PEB %d, calculated %#08x, " |
1062 | "read %#08x", pnum, crc, hdr_crc); | 1061 | "read %#08x", pnum, crc, hdr_crc); |
1063 | ubi_dump_vid_hdr(vid_hdr); | 1062 | ubi_dump_vid_hdr(vid_hdr); |
1064 | } | 1063 | } |
1065 | dbg_bld("bad CRC at PEB %d, calculated %#08x, " | 1064 | dbg_bld("bad CRC at PEB %d, calculated %#08x, " |
1066 | "read %#08x", pnum, crc, hdr_crc); | 1065 | "read %#08x", pnum, crc, hdr_crc); |
1067 | if (!read_err) | 1066 | if (!read_err) |
1068 | return UBI_IO_BAD_HDR; | 1067 | return UBI_IO_BAD_HDR; |
1069 | else | 1068 | else |
1070 | return UBI_IO_BAD_HDR_EBADMSG; | 1069 | return UBI_IO_BAD_HDR_EBADMSG; |
1071 | } | 1070 | } |
1072 | 1071 | ||
1073 | err = validate_vid_hdr(ubi, vid_hdr); | 1072 | err = validate_vid_hdr(ubi, vid_hdr); |
1074 | if (err) { | 1073 | if (err) { |
1075 | ubi_err("validation failed for PEB %d", pnum); | 1074 | ubi_err("validation failed for PEB %d", pnum); |
1076 | return -EINVAL; | 1075 | return -EINVAL; |
1077 | } | 1076 | } |
1078 | 1077 | ||
1079 | return read_err ? UBI_IO_BITFLIPS : 0; | 1078 | return read_err ? UBI_IO_BITFLIPS : 0; |
1080 | } | 1079 | } |
1081 | 1080 | ||
1082 | /** | 1081 | /** |
1083 | * ubi_io_write_vid_hdr - write a volume identifier header. | 1082 | * ubi_io_write_vid_hdr - write a volume identifier header. |
1084 | * @ubi: UBI device description object | 1083 | * @ubi: UBI device description object |
1085 | * @pnum: the physical eraseblock number to write to | 1084 | * @pnum: the physical eraseblock number to write to |
1086 | * @vid_hdr: the volume identifier header to write | 1085 | * @vid_hdr: the volume identifier header to write |
1087 | * | 1086 | * |
1088 | * This function writes the volume identifier header described by @vid_hdr to | 1087 | * This function writes the volume identifier header described by @vid_hdr to |
1089 | * physical eraseblock @pnum. This function automatically fills the | 1088 | * physical eraseblock @pnum. This function automatically fills the |
1090 | * @vid_hdr->magic and the @vid_hdr->version fields, as well as calculates | 1089 | * @vid_hdr->magic and the @vid_hdr->version fields, as well as calculates |
1091 | * header CRC checksum and stores it at vid_hdr->hdr_crc. | 1090 | * header CRC checksum and stores it at vid_hdr->hdr_crc. |
1092 | * | 1091 | * |
1093 | * This function returns zero in case of success and a negative error code in | 1092 | * This function returns zero in case of success and a negative error code in |
1094 | * case of failure. If %-EIO is returned, the physical eraseblock probably went | 1093 | * case of failure. If %-EIO is returned, the physical eraseblock probably went |
1095 | * bad. | 1094 | * bad. |
1096 | */ | 1095 | */ |
1097 | int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum, | 1096 | int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum, |
1098 | struct ubi_vid_hdr *vid_hdr) | 1097 | struct ubi_vid_hdr *vid_hdr) |
1099 | { | 1098 | { |
1100 | int err; | 1099 | int err; |
1101 | uint32_t crc; | 1100 | uint32_t crc; |
1102 | void *p; | 1101 | void *p; |
1103 | 1102 | ||
1104 | dbg_io("write VID header to PEB %d", pnum); | 1103 | dbg_io("write VID header to PEB %d", pnum); |
1105 | ubi_assert(pnum >= 0 && pnum < ubi->peb_count); | 1104 | ubi_assert(pnum >= 0 && pnum < ubi->peb_count); |
1106 | 1105 | ||
1107 | err = self_check_peb_ec_hdr(ubi, pnum); | 1106 | err = self_check_peb_ec_hdr(ubi, pnum); |
1108 | if (err) | 1107 | if (err) |
1109 | return err; | 1108 | return err; |
1110 | 1109 | ||
1111 | vid_hdr->magic = cpu_to_be32(UBI_VID_HDR_MAGIC); | 1110 | vid_hdr->magic = cpu_to_be32(UBI_VID_HDR_MAGIC); |
1112 | vid_hdr->version = UBI_VERSION; | 1111 | vid_hdr->version = UBI_VERSION; |
1113 | crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC); | 1112 | crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC); |
1114 | vid_hdr->hdr_crc = cpu_to_be32(crc); | 1113 | vid_hdr->hdr_crc = cpu_to_be32(crc); |
1115 | 1114 | ||
1116 | err = self_check_vid_hdr(ubi, pnum, vid_hdr); | 1115 | err = self_check_vid_hdr(ubi, pnum, vid_hdr); |
1117 | if (err) | 1116 | if (err) |
1118 | return err; | 1117 | return err; |
1119 | 1118 | ||
1120 | p = (char *)vid_hdr - ubi->vid_hdr_shift; | 1119 | p = (char *)vid_hdr - ubi->vid_hdr_shift; |
1121 | err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset, | 1120 | err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset, |
1122 | ubi->vid_hdr_alsize); | 1121 | ubi->vid_hdr_alsize); |
1123 | return err; | 1122 | return err; |
1124 | } | 1123 | } |
1125 | 1124 | ||
1126 | /** | 1125 | /** |
1127 | * self_check_not_bad - ensure that a physical eraseblock is not bad. | 1126 | * self_check_not_bad - ensure that a physical eraseblock is not bad. |
1128 | * @ubi: UBI device description object | 1127 | * @ubi: UBI device description object |
1129 | * @pnum: physical eraseblock number to check | 1128 | * @pnum: physical eraseblock number to check |
1130 | * | 1129 | * |
1131 | * This function returns zero if the physical eraseblock is good, %-EINVAL if | 1130 | * This function returns zero if the physical eraseblock is good, %-EINVAL if |
1132 | * it is bad and a negative error code if an error occurred. | 1131 | * it is bad and a negative error code if an error occurred. |
1133 | */ | 1132 | */ |
1134 | static int self_check_not_bad(const struct ubi_device *ubi, int pnum) | 1133 | static int self_check_not_bad(const struct ubi_device *ubi, int pnum) |
1135 | { | 1134 | { |
1136 | int err; | 1135 | int err; |
1137 | 1136 | ||
1138 | if (!ubi->dbg->chk_io) | 1137 | if (!ubi->dbg->chk_io) |
1139 | return 0; | 1138 | return 0; |
1140 | 1139 | ||
1141 | err = ubi_io_is_bad(ubi, pnum); | 1140 | err = ubi_io_is_bad(ubi, pnum); |
1142 | if (!err) | 1141 | if (!err) |
1143 | return err; | 1142 | return err; |
1144 | 1143 | ||
1145 | ubi_err("self-check failed for PEB %d", pnum); | 1144 | ubi_err("self-check failed for PEB %d", pnum); |
1146 | dump_stack(); | 1145 | dump_stack(); |
1147 | return err > 0 ? -EINVAL : err; | 1146 | return err > 0 ? -EINVAL : err; |
1148 | } | 1147 | } |
1149 | 1148 | ||
1150 | /** | 1149 | /** |
1151 | * self_check_ec_hdr - check if an erase counter header is all right. | 1150 | * self_check_ec_hdr - check if an erase counter header is all right. |
1152 | * @ubi: UBI device description object | 1151 | * @ubi: UBI device description object |
1153 | * @pnum: physical eraseblock number the erase counter header belongs to | 1152 | * @pnum: physical eraseblock number the erase counter header belongs to |
1154 | * @ec_hdr: the erase counter header to check | 1153 | * @ec_hdr: the erase counter header to check |
1155 | * | 1154 | * |
1156 | * This function returns zero if the erase counter header contains valid | 1155 | * This function returns zero if the erase counter header contains valid |
1157 | * values, and %-EINVAL if not. | 1156 | * values, and %-EINVAL if not. |
1158 | */ | 1157 | */ |
1159 | static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum, | 1158 | static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum, |
1160 | const struct ubi_ec_hdr *ec_hdr) | 1159 | const struct ubi_ec_hdr *ec_hdr) |
1161 | { | 1160 | { |
1162 | int err; | 1161 | int err; |
1163 | uint32_t magic; | 1162 | uint32_t magic; |
1164 | 1163 | ||
1165 | if (!ubi->dbg->chk_io) | 1164 | if (!ubi->dbg->chk_io) |
1166 | return 0; | 1165 | return 0; |
1167 | 1166 | ||
1168 | magic = be32_to_cpu(ec_hdr->magic); | 1167 | magic = be32_to_cpu(ec_hdr->magic); |
1169 | if (magic != UBI_EC_HDR_MAGIC) { | 1168 | if (magic != UBI_EC_HDR_MAGIC) { |
1170 | ubi_err("bad magic %#08x, must be %#08x", | 1169 | ubi_err("bad magic %#08x, must be %#08x", |
1171 | magic, UBI_EC_HDR_MAGIC); | 1170 | magic, UBI_EC_HDR_MAGIC); |
1172 | goto fail; | 1171 | goto fail; |
1173 | } | 1172 | } |
1174 | 1173 | ||
1175 | err = validate_ec_hdr(ubi, ec_hdr); | 1174 | err = validate_ec_hdr(ubi, ec_hdr); |
1176 | if (err) { | 1175 | if (err) { |
1177 | ubi_err("self-check failed for PEB %d", pnum); | 1176 | ubi_err("self-check failed for PEB %d", pnum); |
1178 | goto fail; | 1177 | goto fail; |
1179 | } | 1178 | } |
1180 | 1179 | ||
1181 | return 0; | 1180 | return 0; |
1182 | 1181 | ||
1183 | fail: | 1182 | fail: |
1184 | ubi_dump_ec_hdr(ec_hdr); | 1183 | ubi_dump_ec_hdr(ec_hdr); |
1185 | dump_stack(); | 1184 | dump_stack(); |
1186 | return -EINVAL; | 1185 | return -EINVAL; |
1187 | } | 1186 | } |
1188 | 1187 | ||
1189 | /** | 1188 | /** |
1190 | * self_check_peb_ec_hdr - check erase counter header. | 1189 | * self_check_peb_ec_hdr - check erase counter header. |
1191 | * @ubi: UBI device description object | 1190 | * @ubi: UBI device description object |
1192 | * @pnum: the physical eraseblock number to check | 1191 | * @pnum: the physical eraseblock number to check |
1193 | * | 1192 | * |
1194 | * This function returns zero if the erase counter header is all right and and | 1193 | * This function returns zero if the erase counter header is all right and and |
1195 | * a negative error code if not or if an error occurred. | 1194 | * a negative error code if not or if an error occurred. |
1196 | */ | 1195 | */ |
1197 | static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum) | 1196 | static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum) |
1198 | { | 1197 | { |
1199 | int err; | 1198 | int err; |
1200 | uint32_t crc, hdr_crc; | 1199 | uint32_t crc, hdr_crc; |
1201 | struct ubi_ec_hdr *ec_hdr; | 1200 | struct ubi_ec_hdr *ec_hdr; |
1202 | 1201 | ||
1203 | if (!ubi->dbg->chk_io) | 1202 | if (!ubi->dbg->chk_io) |
1204 | return 0; | 1203 | return 0; |
1205 | 1204 | ||
1206 | ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS); | 1205 | ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS); |
1207 | if (!ec_hdr) | 1206 | if (!ec_hdr) |
1208 | return -ENOMEM; | 1207 | return -ENOMEM; |
1209 | 1208 | ||
1210 | err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE); | 1209 | err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE); |
1211 | if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err)) | 1210 | if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err)) |
1212 | goto exit; | 1211 | goto exit; |
1213 | 1212 | ||
1214 | crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC); | 1213 | crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC); |
1215 | hdr_crc = be32_to_cpu(ec_hdr->hdr_crc); | 1214 | hdr_crc = be32_to_cpu(ec_hdr->hdr_crc); |
1216 | if (hdr_crc != crc) { | 1215 | if (hdr_crc != crc) { |
1217 | ubi_err("bad CRC, calculated %#08x, read %#08x", crc, hdr_crc); | 1216 | ubi_err("bad CRC, calculated %#08x, read %#08x", crc, hdr_crc); |
1218 | ubi_err("self-check failed for PEB %d", pnum); | 1217 | ubi_err("self-check failed for PEB %d", pnum); |
1219 | ubi_dump_ec_hdr(ec_hdr); | 1218 | ubi_dump_ec_hdr(ec_hdr); |
1220 | dump_stack(); | 1219 | dump_stack(); |
1221 | err = -EINVAL; | 1220 | err = -EINVAL; |
1222 | goto exit; | 1221 | goto exit; |
1223 | } | 1222 | } |
1224 | 1223 | ||
1225 | err = self_check_ec_hdr(ubi, pnum, ec_hdr); | 1224 | err = self_check_ec_hdr(ubi, pnum, ec_hdr); |
1226 | 1225 | ||
1227 | exit: | 1226 | exit: |
1228 | kfree(ec_hdr); | 1227 | kfree(ec_hdr); |
1229 | return err; | 1228 | return err; |
1230 | } | 1229 | } |
1231 | 1230 | ||
1232 | /** | 1231 | /** |
1233 | * self_check_vid_hdr - check that a volume identifier header is all right. | 1232 | * self_check_vid_hdr - check that a volume identifier header is all right. |
1234 | * @ubi: UBI device description object | 1233 | * @ubi: UBI device description object |
1235 | * @pnum: physical eraseblock number the volume identifier header belongs to | 1234 | * @pnum: physical eraseblock number the volume identifier header belongs to |
1236 | * @vid_hdr: the volume identifier header to check | 1235 | * @vid_hdr: the volume identifier header to check |
1237 | * | 1236 | * |
1238 | * This function returns zero if the volume identifier header is all right, and | 1237 | * This function returns zero if the volume identifier header is all right, and |
1239 | * %-EINVAL if not. | 1238 | * %-EINVAL if not. |
1240 | */ | 1239 | */ |
1241 | static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum, | 1240 | static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum, |
1242 | const struct ubi_vid_hdr *vid_hdr) | 1241 | const struct ubi_vid_hdr *vid_hdr) |
1243 | { | 1242 | { |
1244 | int err; | 1243 | int err; |
1245 | uint32_t magic; | 1244 | uint32_t magic; |
1246 | 1245 | ||
1247 | if (!ubi->dbg->chk_io) | 1246 | if (!ubi->dbg->chk_io) |
1248 | return 0; | 1247 | return 0; |
1249 | 1248 | ||
1250 | magic = be32_to_cpu(vid_hdr->magic); | 1249 | magic = be32_to_cpu(vid_hdr->magic); |
1251 | if (magic != UBI_VID_HDR_MAGIC) { | 1250 | if (magic != UBI_VID_HDR_MAGIC) { |
1252 | ubi_err("bad VID header magic %#08x at PEB %d, must be %#08x", | 1251 | ubi_err("bad VID header magic %#08x at PEB %d, must be %#08x", |
1253 | magic, pnum, UBI_VID_HDR_MAGIC); | 1252 | magic, pnum, UBI_VID_HDR_MAGIC); |
1254 | goto fail; | 1253 | goto fail; |
1255 | } | 1254 | } |
1256 | 1255 | ||
1257 | err = validate_vid_hdr(ubi, vid_hdr); | 1256 | err = validate_vid_hdr(ubi, vid_hdr); |
1258 | if (err) { | 1257 | if (err) { |
1259 | ubi_err("self-check failed for PEB %d", pnum); | 1258 | ubi_err("self-check failed for PEB %d", pnum); |
1260 | goto fail; | 1259 | goto fail; |
1261 | } | 1260 | } |
1262 | 1261 | ||
1263 | return err; | 1262 | return err; |
1264 | 1263 | ||
1265 | fail: | 1264 | fail: |
1266 | ubi_err("self-check failed for PEB %d", pnum); | 1265 | ubi_err("self-check failed for PEB %d", pnum); |
1267 | ubi_dump_vid_hdr(vid_hdr); | 1266 | ubi_dump_vid_hdr(vid_hdr); |
1268 | dump_stack(); | 1267 | dump_stack(); |
1269 | return -EINVAL; | 1268 | return -EINVAL; |
1270 | 1269 | ||
1271 | } | 1270 | } |
1272 | 1271 | ||
1273 | /** | 1272 | /** |
1274 | * self_check_peb_vid_hdr - check volume identifier header. | 1273 | * self_check_peb_vid_hdr - check volume identifier header. |
1275 | * @ubi: UBI device description object | 1274 | * @ubi: UBI device description object |
1276 | * @pnum: the physical eraseblock number to check | 1275 | * @pnum: the physical eraseblock number to check |
1277 | * | 1276 | * |
1278 | * This function returns zero if the volume identifier header is all right, | 1277 | * This function returns zero if the volume identifier header is all right, |
1279 | * and a negative error code if not or if an error occurred. | 1278 | * and a negative error code if not or if an error occurred. |
1280 | */ | 1279 | */ |
1281 | static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum) | 1280 | static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum) |
1282 | { | 1281 | { |
1283 | int err; | 1282 | int err; |
1284 | uint32_t crc, hdr_crc; | 1283 | uint32_t crc, hdr_crc; |
1285 | struct ubi_vid_hdr *vid_hdr; | 1284 | struct ubi_vid_hdr *vid_hdr; |
1286 | void *p; | 1285 | void *p; |
1287 | 1286 | ||
1288 | if (!ubi->dbg->chk_io) | 1287 | if (!ubi->dbg->chk_io) |
1289 | return 0; | 1288 | return 0; |
1290 | 1289 | ||
1291 | vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); | 1290 | vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); |
1292 | if (!vid_hdr) | 1291 | if (!vid_hdr) |
1293 | return -ENOMEM; | 1292 | return -ENOMEM; |
1294 | 1293 | ||
1295 | p = (char *)vid_hdr - ubi->vid_hdr_shift; | 1294 | p = (char *)vid_hdr - ubi->vid_hdr_shift; |
1296 | err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset, | 1295 | err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset, |
1297 | ubi->vid_hdr_alsize); | 1296 | ubi->vid_hdr_alsize); |
1298 | if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err)) | 1297 | if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err)) |
1299 | goto exit; | 1298 | goto exit; |
1300 | 1299 | ||
1301 | crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC); | 1300 | crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC); |
1302 | hdr_crc = be32_to_cpu(vid_hdr->hdr_crc); | 1301 | hdr_crc = be32_to_cpu(vid_hdr->hdr_crc); |
1303 | if (hdr_crc != crc) { | 1302 | if (hdr_crc != crc) { |
1304 | ubi_err("bad VID header CRC at PEB %d, calculated %#08x, " | 1303 | ubi_err("bad VID header CRC at PEB %d, calculated %#08x, " |
1305 | "read %#08x", pnum, crc, hdr_crc); | 1304 | "read %#08x", pnum, crc, hdr_crc); |
1306 | ubi_err("self-check failed for PEB %d", pnum); | 1305 | ubi_err("self-check failed for PEB %d", pnum); |
1307 | ubi_dump_vid_hdr(vid_hdr); | 1306 | ubi_dump_vid_hdr(vid_hdr); |
1308 | dump_stack(); | 1307 | dump_stack(); |
1309 | err = -EINVAL; | 1308 | err = -EINVAL; |
1310 | goto exit; | 1309 | goto exit; |
1311 | } | 1310 | } |
1312 | 1311 | ||
1313 | err = self_check_vid_hdr(ubi, pnum, vid_hdr); | 1312 | err = self_check_vid_hdr(ubi, pnum, vid_hdr); |
1314 | 1313 | ||
1315 | exit: | 1314 | exit: |
1316 | ubi_free_vid_hdr(ubi, vid_hdr); | 1315 | ubi_free_vid_hdr(ubi, vid_hdr); |
1317 | return err; | 1316 | return err; |
1318 | } | 1317 | } |
1319 | 1318 | ||
1320 | /** | 1319 | /** |
1321 | * self_check_write - make sure write succeeded. | 1320 | * self_check_write - make sure write succeeded. |
1322 | * @ubi: UBI device description object | 1321 | * @ubi: UBI device description object |
1323 | * @buf: buffer with data which were written | 1322 | * @buf: buffer with data which were written |
1324 | * @pnum: physical eraseblock number the data were written to | 1323 | * @pnum: physical eraseblock number the data were written to |
1325 | * @offset: offset within the physical eraseblock the data were written to | 1324 | * @offset: offset within the physical eraseblock the data were written to |
1326 | * @len: how many bytes were written | 1325 | * @len: how many bytes were written |
1327 | * | 1326 | * |
1328 | * This functions reads data which were recently written and compares it with | 1327 | * This functions reads data which were recently written and compares it with |
1329 | * the original data buffer - the data have to match. Returns zero if the data | 1328 | * the original data buffer - the data have to match. Returns zero if the data |
1330 | * match and a negative error code if not or in case of failure. | 1329 | * match and a negative error code if not or in case of failure. |
1331 | */ | 1330 | */ |
1332 | static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum, | 1331 | static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum, |
1333 | int offset, int len) | 1332 | int offset, int len) |
1334 | { | 1333 | { |
1335 | int err, i; | 1334 | int err, i; |
1336 | size_t read; | 1335 | size_t read; |
1337 | void *buf1; | 1336 | void *buf1; |
1338 | loff_t addr = (loff_t)pnum * ubi->peb_size + offset; | 1337 | loff_t addr = (loff_t)pnum * ubi->peb_size + offset; |
1339 | 1338 | ||
1340 | if (!ubi->dbg->chk_io) | 1339 | if (!ubi->dbg->chk_io) |
1341 | return 0; | 1340 | return 0; |
1342 | 1341 | ||
1343 | buf1 = __vmalloc(len, GFP_NOFS, PAGE_KERNEL); | 1342 | buf1 = __vmalloc(len, GFP_NOFS, PAGE_KERNEL); |
1344 | if (!buf1) { | 1343 | if (!buf1) { |
1345 | ubi_err("cannot allocate memory to check writes"); | 1344 | ubi_err("cannot allocate memory to check writes"); |
1346 | return 0; | 1345 | return 0; |
1347 | } | 1346 | } |
1348 | 1347 | ||
1349 | err = mtd_read(ubi->mtd, addr, len, &read, buf1); | 1348 | err = mtd_read(ubi->mtd, addr, len, &read, buf1); |
1350 | if (err && !mtd_is_bitflip(err)) | 1349 | if (err && !mtd_is_bitflip(err)) |
1351 | goto out_free; | 1350 | goto out_free; |
1352 | 1351 | ||
1353 | for (i = 0; i < len; i++) { | 1352 | for (i = 0; i < len; i++) { |
1354 | uint8_t c = ((uint8_t *)buf)[i]; | 1353 | uint8_t c = ((uint8_t *)buf)[i]; |
1355 | uint8_t c1 = ((uint8_t *)buf1)[i]; | 1354 | uint8_t c1 = ((uint8_t *)buf1)[i]; |
1356 | int dump_len; | 1355 | int dump_len; |
1357 | 1356 | ||
1358 | if (c == c1) | 1357 | if (c == c1) |
1359 | continue; | 1358 | continue; |
1360 | 1359 | ||
1361 | ubi_err("self-check failed for PEB %d:%d, len %d", | 1360 | ubi_err("self-check failed for PEB %d:%d, len %d", |
1362 | pnum, offset, len); | 1361 | pnum, offset, len); |
1363 | ubi_msg("data differ at position %d", i); | 1362 | ubi_msg("data differ at position %d", i); |
1364 | dump_len = max_t(int, 128, len - i); | 1363 | dump_len = max_t(int, 128, len - i); |
1365 | ubi_msg("hex dump of the original buffer from %d to %d", | 1364 | ubi_msg("hex dump of the original buffer from %d to %d", |
1366 | i, i + dump_len); | 1365 | i, i + dump_len); |
1367 | print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, | 1366 | print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, |
1368 | buf + i, dump_len, 1); | 1367 | buf + i, dump_len, 1); |
1369 | ubi_msg("hex dump of the read buffer from %d to %d", | 1368 | ubi_msg("hex dump of the read buffer from %d to %d", |
1370 | i, i + dump_len); | 1369 | i, i + dump_len); |
1371 | print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, | 1370 | print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, |
1372 | buf1 + i, dump_len, 1); | 1371 | buf1 + i, dump_len, 1); |
1373 | dump_stack(); | 1372 | dump_stack(); |
1374 | err = -EINVAL; | 1373 | err = -EINVAL; |
1375 | goto out_free; | 1374 | goto out_free; |
1376 | } | 1375 | } |
1377 | 1376 | ||
1378 | vfree(buf1); | 1377 | vfree(buf1); |
1379 | return 0; | 1378 | return 0; |
1380 | 1379 | ||
1381 | out_free: | 1380 | out_free: |
1382 | vfree(buf1); | 1381 | vfree(buf1); |
1383 | return err; | 1382 | return err; |
1384 | } | 1383 | } |
1385 | 1384 | ||
1386 | /** | 1385 | /** |
1387 | * ubi_self_check_all_ff - check that a region of flash is empty. | 1386 | * ubi_self_check_all_ff - check that a region of flash is empty. |
1388 | * @ubi: UBI device description object | 1387 | * @ubi: UBI device description object |
1389 | * @pnum: the physical eraseblock number to check | 1388 | * @pnum: the physical eraseblock number to check |
1390 | * @offset: the starting offset within the physical eraseblock to check | 1389 | * @offset: the starting offset within the physical eraseblock to check |
1391 | * @len: the length of the region to check | 1390 | * @len: the length of the region to check |
1392 | * | 1391 | * |
1393 | * This function returns zero if only 0xFF bytes are present at offset | 1392 | * This function returns zero if only 0xFF bytes are present at offset |
1394 | * @offset of the physical eraseblock @pnum, and a negative error code if not | 1393 | * @offset of the physical eraseblock @pnum, and a negative error code if not |
1395 | * or if an error occurred. | 1394 | * or if an error occurred. |
1396 | */ | 1395 | */ |
1397 | int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len) | 1396 | int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len) |
1398 | { | 1397 | { |
1399 | size_t read; | 1398 | size_t read; |
1400 | int err; | 1399 | int err; |
1401 | void *buf; | 1400 | void *buf; |
1402 | loff_t addr = (loff_t)pnum * ubi->peb_size + offset; | 1401 | loff_t addr = (loff_t)pnum * ubi->peb_size + offset; |
1403 | 1402 | ||
1404 | if (!ubi->dbg->chk_io) | 1403 | if (!ubi->dbg->chk_io) |
1405 | return 0; | 1404 | return 0; |
1406 | 1405 | ||
1407 | buf = __vmalloc(len, GFP_NOFS, PAGE_KERNEL); | 1406 | buf = __vmalloc(len, GFP_NOFS, PAGE_KERNEL); |
1408 | if (!buf) { | 1407 | if (!buf) { |
1409 | ubi_err("cannot allocate memory to check for 0xFFs"); | 1408 | ubi_err("cannot allocate memory to check for 0xFFs"); |
1410 | return 0; | 1409 | return 0; |
1411 | } | 1410 | } |
1412 | 1411 | ||
1413 | err = mtd_read(ubi->mtd, addr, len, &read, buf); | 1412 | err = mtd_read(ubi->mtd, addr, len, &read, buf); |
1414 | if (err && !mtd_is_bitflip(err)) { | 1413 | if (err && !mtd_is_bitflip(err)) { |
1415 | ubi_err("error %d while reading %d bytes from PEB %d:%d, " | 1414 | ubi_err("error %d while reading %d bytes from PEB %d:%d, " |
1416 | "read %zd bytes", err, len, pnum, offset, read); | 1415 | "read %zd bytes", err, len, pnum, offset, read); |
1417 | goto error; | 1416 | goto error; |
1418 | } | 1417 | } |
1419 | 1418 | ||
1420 | err = ubi_check_pattern(buf, 0xFF, len); | 1419 | err = ubi_check_pattern(buf, 0xFF, len); |
1421 | if (err == 0) { | 1420 | if (err == 0) { |
1422 | ubi_err("flash region at PEB %d:%d, length %d does not " | 1421 | ubi_err("flash region at PEB %d:%d, length %d does not " |
1423 | "contain all 0xFF bytes", pnum, offset, len); | 1422 | "contain all 0xFF bytes", pnum, offset, len); |
1424 | goto fail; | 1423 | goto fail; |
1425 | } | 1424 | } |
1426 | 1425 | ||
1427 | vfree(buf); | 1426 | vfree(buf); |
1428 | return 0; | 1427 | return 0; |
1429 | 1428 | ||
1430 | fail: | 1429 | fail: |
1431 | ubi_err("self-check failed for PEB %d", pnum); | 1430 | ubi_err("self-check failed for PEB %d", pnum); |
1432 | ubi_msg("hex dump of the %d-%d region", offset, offset + len); | 1431 | ubi_msg("hex dump of the %d-%d region", offset, offset + len); |
1433 | print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1); | 1432 | print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1); |
1434 | err = -EINVAL; | 1433 | err = -EINVAL; |
1435 | error: | 1434 | error: |
1436 | dump_stack(); | 1435 | dump_stack(); |
1437 | vfree(buf); | 1436 | vfree(buf); |
1438 | return err; | 1437 | return err; |
1439 | } | 1438 | } |
1440 | 1439 |
drivers/mtd/ubi/scan.c
1 | /* | 1 | /* |
2 | * Copyright (c) International Business Machines Corp., 2006 | 2 | * Copyright (c) International Business Machines Corp., 2006 |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License as published by | 5 | * it under the terms of the GNU General Public License as published by |
6 | * the Free Software Foundation; either version 2 of the License, or | 6 | * the Free Software Foundation; either version 2 of the License, or |
7 | * (at your option) any later version. | 7 | * (at your option) any later version. |
8 | * | 8 | * |
9 | * This program is distributed in the hope that it will be useful, | 9 | * This program is distributed in the hope that it will be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See |
12 | * the GNU General Public License for more details. | 12 | * the GNU General Public License for more details. |
13 | * | 13 | * |
14 | * You should have received a copy of the GNU General Public License | 14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write to the Free Software | 15 | * along with this program; if not, write to the Free Software |
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
17 | * | 17 | * |
18 | * Author: Artem Bityutskiy (Битюцкий Артём) | 18 | * Author: Artem Bityutskiy (Битюцкий Артём) |
19 | */ | 19 | */ |
20 | 20 | ||
21 | /* | 21 | /* |
22 | * UBI scanning sub-system. | 22 | * UBI attaching sub-system. |
23 | * | 23 | * |
24 | * This sub-system is responsible for scanning the flash media, checking UBI | 24 | * This sub-system is responsible for attaching MTD devices and it also |
25 | * headers and providing complete information about the UBI flash image. | 25 | * implements flash media scanning. |
26 | * | 26 | * |
27 | * The attaching information is represented by a &struct ubi_attach_info' | 27 | * The attaching information is represented by a &struct ubi_attach_info' |
28 | * object. Information about found volumes is represented by | 28 | * object. Information about volumes is represented by &struct ubi_ainf_volume |
29 | * &struct ubi_ainf_volume objects which are kept in volume RB-tree with root | 29 | * objects which are kept in volume RB-tree with root at the @volumes field. |
30 | * at the @volumes field. The RB-tree is indexed by the volume ID. | 30 | * The RB-tree is indexed by the volume ID. |
31 | * | 31 | * |
32 | * Scanned logical eraseblocks are represented by &struct ubi_ainf_peb objects. | 32 | * Logical eraseblocks are represented by &struct ubi_ainf_peb objects. These |
33 | * These objects are kept in per-volume RB-trees with the root at the | 33 | * objects are kept in per-volume RB-trees with the root at the corresponding |
34 | * corresponding &struct ubi_ainf_volume object. To put it differently, we keep | 34 | * &struct ubi_ainf_volume object. To put it differently, we keep an RB-tree of |
35 | * an RB-tree of per-volume objects and each of these objects is the root of | 35 | * per-volume objects and each of these objects is the root of RB-tree of |
36 | * RB-tree of per-eraseblock objects. | 36 | * per-LEB objects. |
37 | * | 37 | * |
38 | * Corrupted physical eraseblocks are put to the @corr list, free physical | 38 | * Corrupted physical eraseblocks are put to the @corr list, free physical |
39 | * eraseblocks are put to the @free list and the physical eraseblock to be | 39 | * eraseblocks are put to the @free list and the physical eraseblock to be |
40 | * erased are put to the @erase list. | 40 | * erased are put to the @erase list. |
41 | * | 41 | * |
42 | * About corruptions | 42 | * About corruptions |
43 | * ~~~~~~~~~~~~~~~~~ | 43 | * ~~~~~~~~~~~~~~~~~ |
44 | * | 44 | * |
45 | * UBI protects EC and VID headers with CRC-32 checksums, so it can detect | 45 | * UBI protects EC and VID headers with CRC-32 checksums, so it can detect |
46 | * whether the headers are corrupted or not. Sometimes UBI also protects the | 46 | * whether the headers are corrupted or not. Sometimes UBI also protects the |
47 | * data with CRC-32, e.g., when it executes the atomic LEB change operation, or | 47 | * data with CRC-32, e.g., when it executes the atomic LEB change operation, or |
48 | * when it moves the contents of a PEB for wear-leveling purposes. | 48 | * when it moves the contents of a PEB for wear-leveling purposes. |
49 | * | 49 | * |
50 | * UBI tries to distinguish between 2 types of corruptions. | 50 | * UBI tries to distinguish between 2 types of corruptions. |
51 | * | 51 | * |
52 | * 1. Corruptions caused by power cuts. These are expected corruptions and UBI | 52 | * 1. Corruptions caused by power cuts. These are expected corruptions and UBI |
53 | * tries to handle them gracefully, without printing too many warnings and | 53 | * tries to handle them gracefully, without printing too many warnings and |
54 | * error messages. The idea is that we do not lose important data in these case | 54 | * error messages. The idea is that we do not lose important data in these |
55 | * - we may lose only the data which was being written to the media just before | 55 | * cases - we may lose only the data which were being written to the media just |
56 | * the power cut happened, and the upper layers (e.g., UBIFS) are supposed to | 56 | * before the power cut happened, and the upper layers (e.g., UBIFS) are |
57 | * handle such data losses (e.g., by using the FS journal). | 57 | * supposed to handle such data losses (e.g., by using the FS journal). |
58 | * | 58 | * |
59 | * When UBI detects a corruption (CRC-32 mismatch) in a PEB, and it looks like | 59 | * When UBI detects a corruption (CRC-32 mismatch) in a PEB, and it looks like |
60 | * the reason is a power cut, UBI puts this PEB to the @erase list, and all | 60 | * the reason is a power cut, UBI puts this PEB to the @erase list, and all |
61 | * PEBs in the @erase list are scheduled for erasure later. | 61 | * PEBs in the @erase list are scheduled for erasure later. |
62 | * | 62 | * |
63 | * 2. Unexpected corruptions which are not caused by power cuts. During | 63 | * 2. Unexpected corruptions which are not caused by power cuts. During |
64 | * scanning, such PEBs are put to the @corr list and UBI preserves them. | 64 | * attaching, such PEBs are put to the @corr list and UBI preserves them. |
65 | * Obviously, this lessens the amount of available PEBs, and if at some point | 65 | * Obviously, this lessens the amount of available PEBs, and if at some point |
66 | * UBI runs out of free PEBs, it switches to R/O mode. UBI also loudly informs | 66 | * UBI runs out of free PEBs, it switches to R/O mode. UBI also loudly informs |
67 | * about such PEBs every time the MTD device is attached. | 67 | * about such PEBs every time the MTD device is attached. |
68 | * | 68 | * |
69 | * However, it is difficult to reliably distinguish between these types of | 69 | * However, it is difficult to reliably distinguish between these types of |
70 | * corruptions and UBI's strategy is as follows. UBI assumes corruption type 2 | 70 | * corruptions and UBI's strategy is as follows (in case of attaching by |
71 | * if the VID header is corrupted and the data area does not contain all 0xFFs, | 71 | * scanning). UBI assumes corruption type 2 if the VID header is corrupted and |
72 | * and there were no bit-flips or integrity errors while reading the data area. | 72 | * the data area does not contain all 0xFFs, and there were no bit-flips or |
73 | * Otherwise UBI assumes corruption type 1. So the decision criteria are as | 73 | * integrity errors (e.g., ECC errors in case of NAND) while reading the data |
74 | * follows. | 74 | * area. Otherwise UBI assumes corruption type 1. So the decision criteria |
75 | * o If the data area contains only 0xFFs, there is no data, and it is safe | 75 | * are as follows. |
76 | * o If the data area contains only 0xFFs, there are no data, and it is safe | ||
76 | * to just erase this PEB - this is corruption type 1. | 77 | * to just erase this PEB - this is corruption type 1. |
77 | * o If the data area has bit-flips or data integrity errors (ECC errors on | 78 | * o If the data area has bit-flips or data integrity errors (ECC errors on |
78 | * NAND), it is probably a PEB which was being erased when power cut | 79 | * NAND), it is probably a PEB which was being erased when power cut |
79 | * happened, so this is corruption type 1. However, this is just a guess, | 80 | * happened, so this is corruption type 1. However, this is just a guess, |
80 | * which might be wrong. | 81 | * which might be wrong. |
81 | * o Otherwise this it corruption type 2. | 82 | * o Otherwise this it corruption type 2. |
82 | */ | 83 | */ |
83 | 84 | ||
84 | #include <linux/err.h> | 85 | #include <linux/err.h> |
85 | #include <linux/slab.h> | 86 | #include <linux/slab.h> |
86 | #include <linux/crc32.h> | 87 | #include <linux/crc32.h> |
87 | #include <linux/math64.h> | 88 | #include <linux/math64.h> |
88 | #include <linux/random.h> | 89 | #include <linux/random.h> |
89 | #include "ubi.h" | 90 | #include "ubi.h" |
90 | 91 | ||
91 | static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai); | 92 | static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai); |
92 | 93 | ||
93 | /* Temporary variables used during scanning */ | 94 | /* Temporary variables used during scanning */ |
94 | static struct ubi_ec_hdr *ech; | 95 | static struct ubi_ec_hdr *ech; |
95 | static struct ubi_vid_hdr *vidh; | 96 | static struct ubi_vid_hdr *vidh; |
96 | 97 | ||
97 | /** | 98 | /** |
98 | * add_to_list - add physical eraseblock to a list. | 99 | * add_to_list - add physical eraseblock to a list. |
99 | * @ai: attaching information | 100 | * @ai: attaching information |
100 | * @pnum: physical eraseblock number to add | 101 | * @pnum: physical eraseblock number to add |
101 | * @ec: erase counter of the physical eraseblock | 102 | * @ec: erase counter of the physical eraseblock |
102 | * @to_head: if not zero, add to the head of the list | 103 | * @to_head: if not zero, add to the head of the list |
103 | * @list: the list to add to | 104 | * @list: the list to add to |
104 | * | 105 | * |
105 | * This function adds physical eraseblock @pnum to free, erase, or alien lists. | 106 | * This function allocates a 'struct ubi_ainf_peb' object for physical |
107 | * eraseblock @pnum and adds it to the "free", "erase", or "alien" lists. | ||
106 | * If @to_head is not zero, PEB will be added to the head of the list, which | 108 | * If @to_head is not zero, PEB will be added to the head of the list, which |
107 | * basically means it will be processed first later. E.g., we add corrupted | 109 | * basically means it will be processed first later. E.g., we add corrupted |
108 | * PEBs (corrupted due to power cuts) to the head of the erase list to make | 110 | * PEBs (corrupted due to power cuts) to the head of the erase list to make |
109 | * sure we erase them first and get rid of corruptions ASAP. This function | 111 | * sure we erase them first and get rid of corruptions ASAP. This function |
110 | * returns zero in case of success and a negative error code in case of | 112 | * returns zero in case of success and a negative error code in case of |
111 | * failure. | 113 | * failure. |
112 | */ | 114 | */ |
113 | static int add_to_list(struct ubi_attach_info *ai, int pnum, int ec, | 115 | static int add_to_list(struct ubi_attach_info *ai, int pnum, int ec, |
114 | int to_head, struct list_head *list) | 116 | int to_head, struct list_head *list) |
115 | { | 117 | { |
116 | struct ubi_ainf_peb *aeb; | 118 | struct ubi_ainf_peb *aeb; |
117 | 119 | ||
118 | if (list == &ai->free) { | 120 | if (list == &ai->free) { |
119 | dbg_bld("add to free: PEB %d, EC %d", pnum, ec); | 121 | dbg_bld("add to free: PEB %d, EC %d", pnum, ec); |
120 | } else if (list == &ai->erase) { | 122 | } else if (list == &ai->erase) { |
121 | dbg_bld("add to erase: PEB %d, EC %d", pnum, ec); | 123 | dbg_bld("add to erase: PEB %d, EC %d", pnum, ec); |
122 | } else if (list == &ai->alien) { | 124 | } else if (list == &ai->alien) { |
123 | dbg_bld("add to alien: PEB %d, EC %d", pnum, ec); | 125 | dbg_bld("add to alien: PEB %d, EC %d", pnum, ec); |
124 | ai->alien_peb_count += 1; | 126 | ai->alien_peb_count += 1; |
125 | } else | 127 | } else |
126 | BUG(); | 128 | BUG(); |
127 | 129 | ||
128 | aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL); | 130 | aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL); |
129 | if (!aeb) | 131 | if (!aeb) |
130 | return -ENOMEM; | 132 | return -ENOMEM; |
131 | 133 | ||
132 | aeb->pnum = pnum; | 134 | aeb->pnum = pnum; |
133 | aeb->ec = ec; | 135 | aeb->ec = ec; |
134 | if (to_head) | 136 | if (to_head) |
135 | list_add(&aeb->u.list, list); | 137 | list_add(&aeb->u.list, list); |
136 | else | 138 | else |
137 | list_add_tail(&aeb->u.list, list); | 139 | list_add_tail(&aeb->u.list, list); |
138 | return 0; | 140 | return 0; |
139 | } | 141 | } |
140 | 142 | ||
141 | /** | 143 | /** |
142 | * add_corrupted - add a corrupted physical eraseblock. | 144 | * add_corrupted - add a corrupted physical eraseblock. |
143 | * @ai: attaching information | 145 | * @ai: attaching information |
144 | * @pnum: physical eraseblock number to add | 146 | * @pnum: physical eraseblock number to add |
145 | * @ec: erase counter of the physical eraseblock | 147 | * @ec: erase counter of the physical eraseblock |
146 | * | 148 | * |
147 | * This function adds corrupted physical eraseblock @pnum to the 'corr' list. | 149 | * This function allocates a 'struct ubi_ainf_peb' object for a corrupted |
148 | * The corruption was presumably not caused by a power cut. Returns zero in | 150 | * physical eraseblock @pnum and adds it to the 'corr' list. The corruption |
149 | * case of success and a negative error code in case of failure. | 151 | * was presumably not caused by a power cut. Returns zero in case of success |
152 | * and a negative error code in case of failure. | ||
150 | */ | 153 | */ |
151 | static int add_corrupted(struct ubi_attach_info *ai, int pnum, int ec) | 154 | static int add_corrupted(struct ubi_attach_info *ai, int pnum, int ec) |
152 | { | 155 | { |
153 | struct ubi_ainf_peb *aeb; | 156 | struct ubi_ainf_peb *aeb; |
154 | 157 | ||
155 | dbg_bld("add to corrupted: PEB %d, EC %d", pnum, ec); | 158 | dbg_bld("add to corrupted: PEB %d, EC %d", pnum, ec); |
156 | 159 | ||
157 | aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL); | 160 | aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL); |
158 | if (!aeb) | 161 | if (!aeb) |
159 | return -ENOMEM; | 162 | return -ENOMEM; |
160 | 163 | ||
161 | ai->corr_peb_count += 1; | 164 | ai->corr_peb_count += 1; |
162 | aeb->pnum = pnum; | 165 | aeb->pnum = pnum; |
163 | aeb->ec = ec; | 166 | aeb->ec = ec; |
164 | list_add(&aeb->u.list, &ai->corr); | 167 | list_add(&aeb->u.list, &ai->corr); |
165 | return 0; | 168 | return 0; |
166 | } | 169 | } |
167 | 170 | ||
168 | /** | 171 | /** |
169 | * validate_vid_hdr - check volume identifier header. | 172 | * validate_vid_hdr - check volume identifier header. |
170 | * @vid_hdr: the volume identifier header to check | 173 | * @vid_hdr: the volume identifier header to check |
171 | * @av: information about the volume this logical eraseblock belongs to | 174 | * @av: information about the volume this logical eraseblock belongs to |
172 | * @pnum: physical eraseblock number the VID header came from | 175 | * @pnum: physical eraseblock number the VID header came from |
173 | * | 176 | * |
174 | * This function checks that data stored in @vid_hdr is consistent. Returns | 177 | * This function checks that data stored in @vid_hdr is consistent. Returns |
175 | * non-zero if an inconsistency was found and zero if not. | 178 | * non-zero if an inconsistency was found and zero if not. |
176 | * | 179 | * |
177 | * Note, UBI does sanity check of everything it reads from the flash media. | 180 | * Note, UBI does sanity check of everything it reads from the flash media. |
178 | * Most of the checks are done in the I/O sub-system. Here we check that the | 181 | * Most of the checks are done in the I/O sub-system. Here we check that the |
179 | * information in the VID header is consistent to the information in other VID | 182 | * information in the VID header is consistent to the information in other VID |
180 | * headers of the same volume. | 183 | * headers of the same volume. |
181 | */ | 184 | */ |
182 | static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr, | 185 | static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr, |
183 | const struct ubi_ainf_volume *av, int pnum) | 186 | const struct ubi_ainf_volume *av, int pnum) |
184 | { | 187 | { |
185 | int vol_type = vid_hdr->vol_type; | 188 | int vol_type = vid_hdr->vol_type; |
186 | int vol_id = be32_to_cpu(vid_hdr->vol_id); | 189 | int vol_id = be32_to_cpu(vid_hdr->vol_id); |
187 | int used_ebs = be32_to_cpu(vid_hdr->used_ebs); | 190 | int used_ebs = be32_to_cpu(vid_hdr->used_ebs); |
188 | int data_pad = be32_to_cpu(vid_hdr->data_pad); | 191 | int data_pad = be32_to_cpu(vid_hdr->data_pad); |
189 | 192 | ||
190 | if (av->leb_count != 0) { | 193 | if (av->leb_count != 0) { |
191 | int av_vol_type; | 194 | int av_vol_type; |
192 | 195 | ||
193 | /* | 196 | /* |
194 | * This is not the first logical eraseblock belonging to this | 197 | * This is not the first logical eraseblock belonging to this |
195 | * volume. Ensure that the data in its VID header is consistent | 198 | * volume. Ensure that the data in its VID header is consistent |
196 | * to the data in previous logical eraseblock headers. | 199 | * to the data in previous logical eraseblock headers. |
197 | */ | 200 | */ |
198 | 201 | ||
199 | if (vol_id != av->vol_id) { | 202 | if (vol_id != av->vol_id) { |
200 | ubi_err("inconsistent vol_id"); | 203 | ubi_err("inconsistent vol_id"); |
201 | goto bad; | 204 | goto bad; |
202 | } | 205 | } |
203 | 206 | ||
204 | if (av->vol_type == UBI_STATIC_VOLUME) | 207 | if (av->vol_type == UBI_STATIC_VOLUME) |
205 | av_vol_type = UBI_VID_STATIC; | 208 | av_vol_type = UBI_VID_STATIC; |
206 | else | 209 | else |
207 | av_vol_type = UBI_VID_DYNAMIC; | 210 | av_vol_type = UBI_VID_DYNAMIC; |
208 | 211 | ||
209 | if (vol_type != av_vol_type) { | 212 | if (vol_type != av_vol_type) { |
210 | ubi_err("inconsistent vol_type"); | 213 | ubi_err("inconsistent vol_type"); |
211 | goto bad; | 214 | goto bad; |
212 | } | 215 | } |
213 | 216 | ||
214 | if (used_ebs != av->used_ebs) { | 217 | if (used_ebs != av->used_ebs) { |
215 | ubi_err("inconsistent used_ebs"); | 218 | ubi_err("inconsistent used_ebs"); |
216 | goto bad; | 219 | goto bad; |
217 | } | 220 | } |
218 | 221 | ||
219 | if (data_pad != av->data_pad) { | 222 | if (data_pad != av->data_pad) { |
220 | ubi_err("inconsistent data_pad"); | 223 | ubi_err("inconsistent data_pad"); |
221 | goto bad; | 224 | goto bad; |
222 | } | 225 | } |
223 | } | 226 | } |
224 | 227 | ||
225 | return 0; | 228 | return 0; |
226 | 229 | ||
227 | bad: | 230 | bad: |
228 | ubi_err("inconsistent VID header at PEB %d", pnum); | 231 | ubi_err("inconsistent VID header at PEB %d", pnum); |
229 | ubi_dump_vid_hdr(vid_hdr); | 232 | ubi_dump_vid_hdr(vid_hdr); |
230 | ubi_dump_av(av); | 233 | ubi_dump_av(av); |
231 | return -EINVAL; | 234 | return -EINVAL; |
232 | } | 235 | } |
233 | 236 | ||
234 | /** | 237 | /** |
235 | * add_volume - add volume to the attaching information. | 238 | * add_volume - add volume to the attaching information. |
236 | * @ai: attaching information | 239 | * @ai: attaching information |
237 | * @vol_id: ID of the volume to add | 240 | * @vol_id: ID of the volume to add |
238 | * @pnum: physical eraseblock number | 241 | * @pnum: physical eraseblock number |
239 | * @vid_hdr: volume identifier header | 242 | * @vid_hdr: volume identifier header |
240 | * | 243 | * |
241 | * If the volume corresponding to the @vid_hdr logical eraseblock is already | 244 | * If the volume corresponding to the @vid_hdr logical eraseblock is already |
242 | * present in the attaching information, this function does nothing. Otherwise | 245 | * present in the attaching information, this function does nothing. Otherwise |
243 | * it adds corresponding volume to the attaching information. Returns a pointer | 246 | * it adds corresponding volume to the attaching information. Returns a pointer |
244 | * to the scanning volume object in case of success and a negative error code | 247 | * to the allocated "av" object in case of success and a negative error code in |
245 | * in case of failure. | 248 | * case of failure. |
246 | */ | 249 | */ |
247 | static struct ubi_ainf_volume *add_volume(struct ubi_attach_info *ai, | 250 | static struct ubi_ainf_volume *add_volume(struct ubi_attach_info *ai, |
248 | int vol_id, int pnum, | 251 | int vol_id, int pnum, |
249 | const struct ubi_vid_hdr *vid_hdr) | 252 | const struct ubi_vid_hdr *vid_hdr) |
250 | { | 253 | { |
251 | struct ubi_ainf_volume *av; | 254 | struct ubi_ainf_volume *av; |
252 | struct rb_node **p = &ai->volumes.rb_node, *parent = NULL; | 255 | struct rb_node **p = &ai->volumes.rb_node, *parent = NULL; |
253 | 256 | ||
254 | ubi_assert(vol_id == be32_to_cpu(vid_hdr->vol_id)); | 257 | ubi_assert(vol_id == be32_to_cpu(vid_hdr->vol_id)); |
255 | 258 | ||
256 | /* Walk the volume RB-tree to look if this volume is already present */ | 259 | /* Walk the volume RB-tree to look if this volume is already present */ |
257 | while (*p) { | 260 | while (*p) { |
258 | parent = *p; | 261 | parent = *p; |
259 | av = rb_entry(parent, struct ubi_ainf_volume, rb); | 262 | av = rb_entry(parent, struct ubi_ainf_volume, rb); |
260 | 263 | ||
261 | if (vol_id == av->vol_id) | 264 | if (vol_id == av->vol_id) |
262 | return av; | 265 | return av; |
263 | 266 | ||
264 | if (vol_id > av->vol_id) | 267 | if (vol_id > av->vol_id) |
265 | p = &(*p)->rb_left; | 268 | p = &(*p)->rb_left; |
266 | else | 269 | else |
267 | p = &(*p)->rb_right; | 270 | p = &(*p)->rb_right; |
268 | } | 271 | } |
269 | 272 | ||
270 | /* The volume is absent - add it */ | 273 | /* The volume is absent - add it */ |
271 | av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL); | 274 | av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL); |
272 | if (!av) | 275 | if (!av) |
273 | return ERR_PTR(-ENOMEM); | 276 | return ERR_PTR(-ENOMEM); |
274 | 277 | ||
275 | av->highest_lnum = av->leb_count = 0; | 278 | av->highest_lnum = av->leb_count = 0; |
276 | av->vol_id = vol_id; | 279 | av->vol_id = vol_id; |
277 | av->root = RB_ROOT; | 280 | av->root = RB_ROOT; |
278 | av->used_ebs = be32_to_cpu(vid_hdr->used_ebs); | 281 | av->used_ebs = be32_to_cpu(vid_hdr->used_ebs); |
279 | av->data_pad = be32_to_cpu(vid_hdr->data_pad); | 282 | av->data_pad = be32_to_cpu(vid_hdr->data_pad); |
280 | av->compat = vid_hdr->compat; | 283 | av->compat = vid_hdr->compat; |
281 | av->vol_type = vid_hdr->vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME | 284 | av->vol_type = vid_hdr->vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME |
282 | : UBI_STATIC_VOLUME; | 285 | : UBI_STATIC_VOLUME; |
283 | if (vol_id > ai->highest_vol_id) | 286 | if (vol_id > ai->highest_vol_id) |
284 | ai->highest_vol_id = vol_id; | 287 | ai->highest_vol_id = vol_id; |
285 | 288 | ||
286 | rb_link_node(&av->rb, parent, p); | 289 | rb_link_node(&av->rb, parent, p); |
287 | rb_insert_color(&av->rb, &ai->volumes); | 290 | rb_insert_color(&av->rb, &ai->volumes); |
288 | ai->vols_found += 1; | 291 | ai->vols_found += 1; |
289 | dbg_bld("added volume %d", vol_id); | 292 | dbg_bld("added volume %d", vol_id); |
290 | return av; | 293 | return av; |
291 | } | 294 | } |
292 | 295 | ||
293 | /** | 296 | /** |
294 | * compare_lebs - find out which logical eraseblock is newer. | 297 | * compare_lebs - find out which logical eraseblock is newer. |
295 | * @ubi: UBI device description object | 298 | * @ubi: UBI device description object |
296 | * @aeb: first logical eraseblock to compare | 299 | * @aeb: first logical eraseblock to compare |
297 | * @pnum: physical eraseblock number of the second logical eraseblock to | 300 | * @pnum: physical eraseblock number of the second logical eraseblock to |
298 | * compare | 301 | * compare |
299 | * @vid_hdr: volume identifier header of the second logical eraseblock | 302 | * @vid_hdr: volume identifier header of the second logical eraseblock |
300 | * | 303 | * |
301 | * This function compares 2 copies of a LEB and informs which one is newer. In | 304 | * This function compares 2 copies of a LEB and informs which one is newer. In |
302 | * case of success this function returns a positive value, in case of failure, a | 305 | * case of success this function returns a positive value, in case of failure, a |
303 | * negative error code is returned. The success return codes use the following | 306 | * negative error code is returned. The success return codes use the following |
304 | * bits: | 307 | * bits: |
305 | * o bit 0 is cleared: the first PEB (described by @aeb) is newer than the | 308 | * o bit 0 is cleared: the first PEB (described by @aeb) is newer than the |
306 | * second PEB (described by @pnum and @vid_hdr); | 309 | * second PEB (described by @pnum and @vid_hdr); |
307 | * o bit 0 is set: the second PEB is newer; | 310 | * o bit 0 is set: the second PEB is newer; |
308 | * o bit 1 is cleared: no bit-flips were detected in the newer LEB; | 311 | * o bit 1 is cleared: no bit-flips were detected in the newer LEB; |
309 | * o bit 1 is set: bit-flips were detected in the newer LEB; | 312 | * o bit 1 is set: bit-flips were detected in the newer LEB; |
310 | * o bit 2 is cleared: the older LEB is not corrupted; | 313 | * o bit 2 is cleared: the older LEB is not corrupted; |
311 | * o bit 2 is set: the older LEB is corrupted. | 314 | * o bit 2 is set: the older LEB is corrupted. |
312 | */ | 315 | */ |
313 | static int compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb, | 316 | static int compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb, |
314 | int pnum, const struct ubi_vid_hdr *vid_hdr) | 317 | int pnum, const struct ubi_vid_hdr *vid_hdr) |
315 | { | 318 | { |
316 | void *buf; | 319 | void *buf; |
317 | int len, err, second_is_newer, bitflips = 0, corrupted = 0; | 320 | int len, err, second_is_newer, bitflips = 0, corrupted = 0; |
318 | uint32_t data_crc, crc; | 321 | uint32_t data_crc, crc; |
319 | struct ubi_vid_hdr *vh = NULL; | 322 | struct ubi_vid_hdr *vh = NULL; |
320 | unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum); | 323 | unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum); |
321 | 324 | ||
322 | if (sqnum2 == aeb->sqnum) { | 325 | if (sqnum2 == aeb->sqnum) { |
323 | /* | 326 | /* |
324 | * This must be a really ancient UBI image which has been | 327 | * This must be a really ancient UBI image which has been |
325 | * created before sequence numbers support has been added. At | 328 | * created before sequence numbers support has been added. At |
326 | * that times we used 32-bit LEB versions stored in logical | 329 | * that times we used 32-bit LEB versions stored in logical |
327 | * eraseblocks. That was before UBI got into mainline. We do not | 330 | * eraseblocks. That was before UBI got into mainline. We do not |
328 | * support these images anymore. Well, those images still work, | 331 | * support these images anymore. Well, those images still work, |
329 | * but only if no unclean reboots happened. | 332 | * but only if no unclean reboots happened. |
330 | */ | 333 | */ |
331 | ubi_err("unsupported on-flash UBI format\n"); | 334 | ubi_err("unsupported on-flash UBI format\n"); |
332 | return -EINVAL; | 335 | return -EINVAL; |
333 | } | 336 | } |
334 | 337 | ||
335 | /* Obviously the LEB with lower sequence counter is older */ | 338 | /* Obviously the LEB with lower sequence counter is older */ |
336 | second_is_newer = (sqnum2 > aeb->sqnum); | 339 | second_is_newer = (sqnum2 > aeb->sqnum); |
337 | 340 | ||
338 | /* | 341 | /* |
339 | * Now we know which copy is newer. If the copy flag of the PEB with | 342 | * Now we know which copy is newer. If the copy flag of the PEB with |
340 | * newer version is not set, then we just return, otherwise we have to | 343 | * newer version is not set, then we just return, otherwise we have to |
341 | * check data CRC. For the second PEB we already have the VID header, | 344 | * check data CRC. For the second PEB we already have the VID header, |
342 | * for the first one - we'll need to re-read it from flash. | 345 | * for the first one - we'll need to re-read it from flash. |
343 | * | 346 | * |
344 | * Note: this may be optimized so that we wouldn't read twice. | 347 | * Note: this may be optimized so that we wouldn't read twice. |
345 | */ | 348 | */ |
346 | 349 | ||
347 | if (second_is_newer) { | 350 | if (second_is_newer) { |
348 | if (!vid_hdr->copy_flag) { | 351 | if (!vid_hdr->copy_flag) { |
349 | /* It is not a copy, so it is newer */ | 352 | /* It is not a copy, so it is newer */ |
350 | dbg_bld("second PEB %d is newer, copy_flag is unset", | 353 | dbg_bld("second PEB %d is newer, copy_flag is unset", |
351 | pnum); | 354 | pnum); |
352 | return 1; | 355 | return 1; |
353 | } | 356 | } |
354 | } else { | 357 | } else { |
355 | if (!aeb->copy_flag) { | 358 | if (!aeb->copy_flag) { |
356 | /* It is not a copy, so it is newer */ | 359 | /* It is not a copy, so it is newer */ |
357 | dbg_bld("first PEB %d is newer, copy_flag is unset", | 360 | dbg_bld("first PEB %d is newer, copy_flag is unset", |
358 | pnum); | 361 | pnum); |
359 | return bitflips << 1; | 362 | return bitflips << 1; |
360 | } | 363 | } |
361 | 364 | ||
362 | vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); | 365 | vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); |
363 | if (!vh) | 366 | if (!vh) |
364 | return -ENOMEM; | 367 | return -ENOMEM; |
365 | 368 | ||
366 | pnum = aeb->pnum; | 369 | pnum = aeb->pnum; |
367 | err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0); | 370 | err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0); |
368 | if (err) { | 371 | if (err) { |
369 | if (err == UBI_IO_BITFLIPS) | 372 | if (err == UBI_IO_BITFLIPS) |
370 | bitflips = 1; | 373 | bitflips = 1; |
371 | else { | 374 | else { |
372 | ubi_err("VID of PEB %d header is bad, but it " | 375 | ubi_err("VID of PEB %d header is bad, but it " |
373 | "was OK earlier, err %d", pnum, err); | 376 | "was OK earlier, err %d", pnum, err); |
374 | if (err > 0) | 377 | if (err > 0) |
375 | err = -EIO; | 378 | err = -EIO; |
376 | 379 | ||
377 | goto out_free_vidh; | 380 | goto out_free_vidh; |
378 | } | 381 | } |
379 | } | 382 | } |
380 | 383 | ||
381 | vid_hdr = vh; | 384 | vid_hdr = vh; |
382 | } | 385 | } |
383 | 386 | ||
384 | /* Read the data of the copy and check the CRC */ | 387 | /* Read the data of the copy and check the CRC */ |
385 | 388 | ||
386 | len = be32_to_cpu(vid_hdr->data_size); | 389 | len = be32_to_cpu(vid_hdr->data_size); |
387 | buf = vmalloc(len); | 390 | buf = vmalloc(len); |
388 | if (!buf) { | 391 | if (!buf) { |
389 | err = -ENOMEM; | 392 | err = -ENOMEM; |
390 | goto out_free_vidh; | 393 | goto out_free_vidh; |
391 | } | 394 | } |
392 | 395 | ||
393 | err = ubi_io_read_data(ubi, buf, pnum, 0, len); | 396 | err = ubi_io_read_data(ubi, buf, pnum, 0, len); |
394 | if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err)) | 397 | if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err)) |
395 | goto out_free_buf; | 398 | goto out_free_buf; |
396 | 399 | ||
397 | data_crc = be32_to_cpu(vid_hdr->data_crc); | 400 | data_crc = be32_to_cpu(vid_hdr->data_crc); |
398 | crc = crc32(UBI_CRC32_INIT, buf, len); | 401 | crc = crc32(UBI_CRC32_INIT, buf, len); |
399 | if (crc != data_crc) { | 402 | if (crc != data_crc) { |
400 | dbg_bld("PEB %d CRC error: calculated %#08x, must be %#08x", | 403 | dbg_bld("PEB %d CRC error: calculated %#08x, must be %#08x", |
401 | pnum, crc, data_crc); | 404 | pnum, crc, data_crc); |
402 | corrupted = 1; | 405 | corrupted = 1; |
403 | bitflips = 0; | 406 | bitflips = 0; |
404 | second_is_newer = !second_is_newer; | 407 | second_is_newer = !second_is_newer; |
405 | } else { | 408 | } else { |
406 | dbg_bld("PEB %d CRC is OK", pnum); | 409 | dbg_bld("PEB %d CRC is OK", pnum); |
407 | bitflips = !!err; | 410 | bitflips = !!err; |
408 | } | 411 | } |
409 | 412 | ||
410 | vfree(buf); | 413 | vfree(buf); |
411 | ubi_free_vid_hdr(ubi, vh); | 414 | ubi_free_vid_hdr(ubi, vh); |
412 | 415 | ||
413 | if (second_is_newer) | 416 | if (second_is_newer) |
414 | dbg_bld("second PEB %d is newer, copy_flag is set", pnum); | 417 | dbg_bld("second PEB %d is newer, copy_flag is set", pnum); |
415 | else | 418 | else |
416 | dbg_bld("first PEB %d is newer, copy_flag is set", pnum); | 419 | dbg_bld("first PEB %d is newer, copy_flag is set", pnum); |
417 | 420 | ||
418 | return second_is_newer | (bitflips << 1) | (corrupted << 2); | 421 | return second_is_newer | (bitflips << 1) | (corrupted << 2); |
419 | 422 | ||
420 | out_free_buf: | 423 | out_free_buf: |
421 | vfree(buf); | 424 | vfree(buf); |
422 | out_free_vidh: | 425 | out_free_vidh: |
423 | ubi_free_vid_hdr(ubi, vh); | 426 | ubi_free_vid_hdr(ubi, vh); |
424 | return err; | 427 | return err; |
425 | } | 428 | } |
426 | 429 | ||
427 | /** | 430 | /** |
428 | * ubi_add_to_av - add physical eraseblock to the attaching information. | 431 | * ubi_add_to_av - add used physical eraseblock to the attaching information. |
429 | * @ubi: UBI device description object | 432 | * @ubi: UBI device description object |
430 | * @ai: attaching information | 433 | * @ai: attaching information |
431 | * @pnum: the physical eraseblock number | 434 | * @pnum: the physical eraseblock number |
432 | * @ec: erase counter | 435 | * @ec: erase counter |
433 | * @vid_hdr: the volume identifier header | 436 | * @vid_hdr: the volume identifier header |
434 | * @bitflips: if bit-flips were detected when this physical eraseblock was read | 437 | * @bitflips: if bit-flips were detected when this physical eraseblock was read |
435 | * | 438 | * |
436 | * This function adds information about a used physical eraseblock to the | 439 | * This function adds information about a used physical eraseblock to the |
437 | * 'used' tree of the corresponding volume. The function is rather complex | 440 | * 'used' tree of the corresponding volume. The function is rather complex |
438 | * because it has to handle cases when this is not the first physical | 441 | * because it has to handle cases when this is not the first physical |
439 | * eraseblock belonging to the same logical eraseblock, and the newer one has | 442 | * eraseblock belonging to the same logical eraseblock, and the newer one has |
440 | * to be picked, while the older one has to be dropped. This function returns | 443 | * to be picked, while the older one has to be dropped. This function returns |
441 | * zero in case of success and a negative error code in case of failure. | 444 | * zero in case of success and a negative error code in case of failure. |
442 | */ | 445 | */ |
443 | int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum, | 446 | int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum, |
444 | int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips) | 447 | int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips) |
445 | { | 448 | { |
446 | int err, vol_id, lnum; | 449 | int err, vol_id, lnum; |
447 | unsigned long long sqnum; | 450 | unsigned long long sqnum; |
448 | struct ubi_ainf_volume *av; | 451 | struct ubi_ainf_volume *av; |
449 | struct ubi_ainf_peb *aeb; | 452 | struct ubi_ainf_peb *aeb; |
450 | struct rb_node **p, *parent = NULL; | 453 | struct rb_node **p, *parent = NULL; |
451 | 454 | ||
452 | vol_id = be32_to_cpu(vid_hdr->vol_id); | 455 | vol_id = be32_to_cpu(vid_hdr->vol_id); |
453 | lnum = be32_to_cpu(vid_hdr->lnum); | 456 | lnum = be32_to_cpu(vid_hdr->lnum); |
454 | sqnum = be64_to_cpu(vid_hdr->sqnum); | 457 | sqnum = be64_to_cpu(vid_hdr->sqnum); |
455 | 458 | ||
456 | dbg_bld("PEB %d, LEB %d:%d, EC %d, sqnum %llu, bitflips %d", | 459 | dbg_bld("PEB %d, LEB %d:%d, EC %d, sqnum %llu, bitflips %d", |
457 | pnum, vol_id, lnum, ec, sqnum, bitflips); | 460 | pnum, vol_id, lnum, ec, sqnum, bitflips); |
458 | 461 | ||
459 | av = add_volume(ai, vol_id, pnum, vid_hdr); | 462 | av = add_volume(ai, vol_id, pnum, vid_hdr); |
460 | if (IS_ERR(av)) | 463 | if (IS_ERR(av)) |
461 | return PTR_ERR(av); | 464 | return PTR_ERR(av); |
462 | 465 | ||
463 | if (ai->max_sqnum < sqnum) | 466 | if (ai->max_sqnum < sqnum) |
464 | ai->max_sqnum = sqnum; | 467 | ai->max_sqnum = sqnum; |
465 | 468 | ||
466 | /* | 469 | /* |
467 | * Walk the RB-tree of logical eraseblocks of volume @vol_id to look | 470 | * Walk the RB-tree of logical eraseblocks of volume @vol_id to look |
468 | * if this is the first instance of this logical eraseblock or not. | 471 | * if this is the first instance of this logical eraseblock or not. |
469 | */ | 472 | */ |
470 | p = &av->root.rb_node; | 473 | p = &av->root.rb_node; |
471 | while (*p) { | 474 | while (*p) { |
472 | int cmp_res; | 475 | int cmp_res; |
473 | 476 | ||
474 | parent = *p; | 477 | parent = *p; |
475 | aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb); | 478 | aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb); |
476 | if (lnum != aeb->lnum) { | 479 | if (lnum != aeb->lnum) { |
477 | if (lnum < aeb->lnum) | 480 | if (lnum < aeb->lnum) |
478 | p = &(*p)->rb_left; | 481 | p = &(*p)->rb_left; |
479 | else | 482 | else |
480 | p = &(*p)->rb_right; | 483 | p = &(*p)->rb_right; |
481 | continue; | 484 | continue; |
482 | } | 485 | } |
483 | 486 | ||
484 | /* | 487 | /* |
485 | * There is already a physical eraseblock describing the same | 488 | * There is already a physical eraseblock describing the same |
486 | * logical eraseblock present. | 489 | * logical eraseblock present. |
487 | */ | 490 | */ |
488 | 491 | ||
489 | dbg_bld("this LEB already exists: PEB %d, sqnum %llu, EC %d", | 492 | dbg_bld("this LEB already exists: PEB %d, sqnum %llu, EC %d", |
490 | aeb->pnum, aeb->sqnum, aeb->ec); | 493 | aeb->pnum, aeb->sqnum, aeb->ec); |
491 | 494 | ||
492 | /* | 495 | /* |
493 | * Make sure that the logical eraseblocks have different | 496 | * Make sure that the logical eraseblocks have different |
494 | * sequence numbers. Otherwise the image is bad. | 497 | * sequence numbers. Otherwise the image is bad. |
495 | * | 498 | * |
496 | * However, if the sequence number is zero, we assume it must | 499 | * However, if the sequence number is zero, we assume it must |
497 | * be an ancient UBI image from the era when UBI did not have | 500 | * be an ancient UBI image from the era when UBI did not have |
498 | * sequence numbers. We still can attach these images, unless | 501 | * sequence numbers. We still can attach these images, unless |
499 | * there is a need to distinguish between old and new | 502 | * there is a need to distinguish between old and new |
500 | * eraseblocks, in which case we'll refuse the image in | 503 | * eraseblocks, in which case we'll refuse the image in |
501 | * 'compare_lebs()'. In other words, we attach old clean | 504 | * 'compare_lebs()'. In other words, we attach old clean |
502 | * images, but refuse attaching old images with duplicated | 505 | * images, but refuse attaching old images with duplicated |
503 | * logical eraseblocks because there was an unclean reboot. | 506 | * logical eraseblocks because there was an unclean reboot. |
504 | */ | 507 | */ |
505 | if (aeb->sqnum == sqnum && sqnum != 0) { | 508 | if (aeb->sqnum == sqnum && sqnum != 0) { |
506 | ubi_err("two LEBs with same sequence number %llu", | 509 | ubi_err("two LEBs with same sequence number %llu", |
507 | sqnum); | 510 | sqnum); |
508 | ubi_dump_aeb(aeb, 0); | 511 | ubi_dump_aeb(aeb, 0); |
509 | ubi_dump_vid_hdr(vid_hdr); | 512 | ubi_dump_vid_hdr(vid_hdr); |
510 | return -EINVAL; | 513 | return -EINVAL; |
511 | } | 514 | } |
512 | 515 | ||
513 | /* | 516 | /* |
514 | * Now we have to drop the older one and preserve the newer | 517 | * Now we have to drop the older one and preserve the newer |
515 | * one. | 518 | * one. |
516 | */ | 519 | */ |
517 | cmp_res = compare_lebs(ubi, aeb, pnum, vid_hdr); | 520 | cmp_res = compare_lebs(ubi, aeb, pnum, vid_hdr); |
518 | if (cmp_res < 0) | 521 | if (cmp_res < 0) |
519 | return cmp_res; | 522 | return cmp_res; |
520 | 523 | ||
521 | if (cmp_res & 1) { | 524 | if (cmp_res & 1) { |
522 | /* | 525 | /* |
523 | * This logical eraseblock is newer than the one | 526 | * This logical eraseblock is newer than the one |
524 | * found earlier. | 527 | * found earlier. |
525 | */ | 528 | */ |
526 | err = validate_vid_hdr(vid_hdr, av, pnum); | 529 | err = validate_vid_hdr(vid_hdr, av, pnum); |
527 | if (err) | 530 | if (err) |
528 | return err; | 531 | return err; |
529 | 532 | ||
530 | err = add_to_list(ai, aeb->pnum, aeb->ec, cmp_res & 4, | 533 | err = add_to_list(ai, aeb->pnum, aeb->ec, cmp_res & 4, |
531 | &ai->erase); | 534 | &ai->erase); |
532 | if (err) | 535 | if (err) |
533 | return err; | 536 | return err; |
534 | 537 | ||
535 | aeb->ec = ec; | 538 | aeb->ec = ec; |
536 | aeb->pnum = pnum; | 539 | aeb->pnum = pnum; |
537 | aeb->scrub = ((cmp_res & 2) || bitflips); | 540 | aeb->scrub = ((cmp_res & 2) || bitflips); |
538 | aeb->copy_flag = vid_hdr->copy_flag; | 541 | aeb->copy_flag = vid_hdr->copy_flag; |
539 | aeb->sqnum = sqnum; | 542 | aeb->sqnum = sqnum; |
540 | 543 | ||
541 | if (av->highest_lnum == lnum) | 544 | if (av->highest_lnum == lnum) |
542 | av->last_data_size = | 545 | av->last_data_size = |
543 | be32_to_cpu(vid_hdr->data_size); | 546 | be32_to_cpu(vid_hdr->data_size); |
544 | 547 | ||
545 | return 0; | 548 | return 0; |
546 | } else { | 549 | } else { |
547 | /* | 550 | /* |
548 | * This logical eraseblock is older than the one found | 551 | * This logical eraseblock is older than the one found |
549 | * previously. | 552 | * previously. |
550 | */ | 553 | */ |
551 | return add_to_list(ai, pnum, ec, cmp_res & 4, | 554 | return add_to_list(ai, pnum, ec, cmp_res & 4, |
552 | &ai->erase); | 555 | &ai->erase); |
553 | } | 556 | } |
554 | } | 557 | } |
555 | 558 | ||
556 | /* | 559 | /* |
557 | * We've met this logical eraseblock for the first time, add it to the | 560 | * We've met this logical eraseblock for the first time, add it to the |
558 | * attaching information. | 561 | * attaching information. |
559 | */ | 562 | */ |
560 | 563 | ||
561 | err = validate_vid_hdr(vid_hdr, av, pnum); | 564 | err = validate_vid_hdr(vid_hdr, av, pnum); |
562 | if (err) | 565 | if (err) |
563 | return err; | 566 | return err; |
564 | 567 | ||
565 | aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL); | 568 | aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL); |
566 | if (!aeb) | 569 | if (!aeb) |
567 | return -ENOMEM; | 570 | return -ENOMEM; |
568 | 571 | ||
569 | aeb->ec = ec; | 572 | aeb->ec = ec; |
570 | aeb->pnum = pnum; | 573 | aeb->pnum = pnum; |
571 | aeb->lnum = lnum; | 574 | aeb->lnum = lnum; |
572 | aeb->scrub = bitflips; | 575 | aeb->scrub = bitflips; |
573 | aeb->copy_flag = vid_hdr->copy_flag; | 576 | aeb->copy_flag = vid_hdr->copy_flag; |
574 | aeb->sqnum = sqnum; | 577 | aeb->sqnum = sqnum; |
575 | 578 | ||
576 | if (av->highest_lnum <= lnum) { | 579 | if (av->highest_lnum <= lnum) { |
577 | av->highest_lnum = lnum; | 580 | av->highest_lnum = lnum; |
578 | av->last_data_size = be32_to_cpu(vid_hdr->data_size); | 581 | av->last_data_size = be32_to_cpu(vid_hdr->data_size); |
579 | } | 582 | } |
580 | 583 | ||
581 | av->leb_count += 1; | 584 | av->leb_count += 1; |
582 | rb_link_node(&aeb->u.rb, parent, p); | 585 | rb_link_node(&aeb->u.rb, parent, p); |
583 | rb_insert_color(&aeb->u.rb, &av->root); | 586 | rb_insert_color(&aeb->u.rb, &av->root); |
584 | return 0; | 587 | return 0; |
585 | } | 588 | } |
586 | 589 | ||
587 | /** | 590 | /** |
588 | * ubi_find_av - find volume in the attaching information. | 591 | * ubi_find_av - find volume in the attaching information. |
589 | * @ai: attaching information | 592 | * @ai: attaching information |
590 | * @vol_id: the requested volume ID | 593 | * @vol_id: the requested volume ID |
591 | * | 594 | * |
592 | * This function returns a pointer to the volume description or %NULL if there | 595 | * This function returns a pointer to the volume description or %NULL if there |
593 | * are no data about this volume in the attaching information. | 596 | * are no data about this volume in the attaching information. |
594 | */ | 597 | */ |
595 | struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai, | 598 | struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai, |
596 | int vol_id) | 599 | int vol_id) |
597 | { | 600 | { |
598 | struct ubi_ainf_volume *av; | 601 | struct ubi_ainf_volume *av; |
599 | struct rb_node *p = ai->volumes.rb_node; | 602 | struct rb_node *p = ai->volumes.rb_node; |
600 | 603 | ||
601 | while (p) { | 604 | while (p) { |
602 | av = rb_entry(p, struct ubi_ainf_volume, rb); | 605 | av = rb_entry(p, struct ubi_ainf_volume, rb); |
603 | 606 | ||
604 | if (vol_id == av->vol_id) | 607 | if (vol_id == av->vol_id) |
605 | return av; | 608 | return av; |
606 | 609 | ||
607 | if (vol_id > av->vol_id) | 610 | if (vol_id > av->vol_id) |
608 | p = p->rb_left; | 611 | p = p->rb_left; |
609 | else | 612 | else |
610 | p = p->rb_right; | 613 | p = p->rb_right; |
611 | } | 614 | } |
612 | 615 | ||
613 | return NULL; | 616 | return NULL; |
614 | } | 617 | } |
615 | 618 | ||
616 | /** | 619 | /** |
617 | * ubi_remove_av - delete attaching information about a volume. | 620 | * ubi_remove_av - delete attaching information about a volume. |
618 | * @ai: attaching information | 621 | * @ai: attaching information |
619 | * @av: the volume attaching information to delete | 622 | * @av: the volume attaching information to delete |
620 | */ | 623 | */ |
621 | void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av) | 624 | void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av) |
622 | { | 625 | { |
623 | struct rb_node *rb; | 626 | struct rb_node *rb; |
624 | struct ubi_ainf_peb *aeb; | 627 | struct ubi_ainf_peb *aeb; |
625 | 628 | ||
626 | dbg_bld("remove attaching information about volume %d", av->vol_id); | 629 | dbg_bld("remove attaching information about volume %d", av->vol_id); |
627 | 630 | ||
628 | while ((rb = rb_first(&av->root))) { | 631 | while ((rb = rb_first(&av->root))) { |
629 | aeb = rb_entry(rb, struct ubi_ainf_peb, u.rb); | 632 | aeb = rb_entry(rb, struct ubi_ainf_peb, u.rb); |
630 | rb_erase(&aeb->u.rb, &av->root); | 633 | rb_erase(&aeb->u.rb, &av->root); |
631 | list_add_tail(&aeb->u.list, &ai->erase); | 634 | list_add_tail(&aeb->u.list, &ai->erase); |
632 | } | 635 | } |
633 | 636 | ||
634 | rb_erase(&av->rb, &ai->volumes); | 637 | rb_erase(&av->rb, &ai->volumes); |
635 | kfree(av); | 638 | kfree(av); |
636 | ai->vols_found -= 1; | 639 | ai->vols_found -= 1; |
637 | } | 640 | } |
638 | 641 | ||
639 | /** | 642 | /** |
640 | * early_erase_peb - erase a physical eraseblock. | 643 | * early_erase_peb - erase a physical eraseblock. |
641 | * @ubi: UBI device description object | 644 | * @ubi: UBI device description object |
642 | * @ai: attaching information | 645 | * @ai: attaching information |
643 | * @pnum: physical eraseblock number to erase; | 646 | * @pnum: physical eraseblock number to erase; |
644 | * @ec: erase counter value to write (%UBI_SCAN_UNKNOWN_EC if it is unknown) | 647 | * @ec: erase counter value to write (%UBI_SCAN_UNKNOWN_EC if it is unknown) |
645 | * | 648 | * |
646 | * This function erases physical eraseblock 'pnum', and writes the erase | 649 | * This function erases physical eraseblock 'pnum', and writes the erase |
647 | * counter header to it. This function should only be used on UBI device | 650 | * counter header to it. This function should only be used on UBI device |
648 | * initialization stages, when the EBA sub-system had not been yet initialized. | 651 | * initialization stages, when the EBA sub-system had not been yet initialized. |
649 | * This function returns zero in case of success and a negative error code in | 652 | * This function returns zero in case of success and a negative error code in |
650 | * case of failure. | 653 | * case of failure. |
651 | */ | 654 | */ |
652 | static int early_erase_peb(struct ubi_device *ubi, | 655 | static int early_erase_peb(struct ubi_device *ubi, |
653 | const struct ubi_attach_info *ai, int pnum, int ec) | 656 | const struct ubi_attach_info *ai, int pnum, int ec) |
654 | { | 657 | { |
655 | int err; | 658 | int err; |
656 | struct ubi_ec_hdr *ec_hdr; | 659 | struct ubi_ec_hdr *ec_hdr; |
657 | 660 | ||
658 | if ((long long)ec >= UBI_MAX_ERASECOUNTER) { | 661 | if ((long long)ec >= UBI_MAX_ERASECOUNTER) { |
659 | /* | 662 | /* |
660 | * Erase counter overflow. Upgrade UBI and use 64-bit | 663 | * Erase counter overflow. Upgrade UBI and use 64-bit |
661 | * erase counters internally. | 664 | * erase counters internally. |
662 | */ | 665 | */ |
663 | ubi_err("erase counter overflow at PEB %d, EC %d", pnum, ec); | 666 | ubi_err("erase counter overflow at PEB %d, EC %d", pnum, ec); |
664 | return -EINVAL; | 667 | return -EINVAL; |
665 | } | 668 | } |
666 | 669 | ||
667 | ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); | 670 | ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); |
668 | if (!ec_hdr) | 671 | if (!ec_hdr) |
669 | return -ENOMEM; | 672 | return -ENOMEM; |
670 | 673 | ||
671 | ec_hdr->ec = cpu_to_be64(ec); | 674 | ec_hdr->ec = cpu_to_be64(ec); |
672 | 675 | ||
673 | err = ubi_io_sync_erase(ubi, pnum, 0); | 676 | err = ubi_io_sync_erase(ubi, pnum, 0); |
674 | if (err < 0) | 677 | if (err < 0) |
675 | goto out_free; | 678 | goto out_free; |
676 | 679 | ||
677 | err = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr); | 680 | err = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr); |
678 | 681 | ||
679 | out_free: | 682 | out_free: |
680 | kfree(ec_hdr); | 683 | kfree(ec_hdr); |
681 | return err; | 684 | return err; |
682 | } | 685 | } |
683 | 686 | ||
684 | /** | 687 | /** |
685 | * ubi_early_get_peb - get a free physical eraseblock. | 688 | * ubi_early_get_peb - get a free physical eraseblock. |
686 | * @ubi: UBI device description object | 689 | * @ubi: UBI device description object |
687 | * @ai: attaching information | 690 | * @ai: attaching information |
688 | * | 691 | * |
689 | * This function returns a free physical eraseblock. It is supposed to be | 692 | * This function returns a free physical eraseblock. It is supposed to be |
690 | * called on the UBI initialization stages when the wear-leveling sub-system is | 693 | * called on the UBI initialization stages when the wear-leveling sub-system is |
691 | * not initialized yet. This function picks a physical eraseblocks from one of | 694 | * not initialized yet. This function picks a physical eraseblocks from one of |
692 | * the lists, writes the EC header if it is needed, and removes it from the | 695 | * the lists, writes the EC header if it is needed, and removes it from the |
693 | * list. | 696 | * list. |
694 | * | 697 | * |
695 | * This function returns scanning physical eraseblock information in case of | 698 | * This function returns a pointer to the "aeb" of the found free PEB in case |
696 | * success and an error code in case of failure. | 699 | * of success and an error code in case of failure. |
697 | */ | 700 | */ |
698 | struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi, | 701 | struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi, |
699 | struct ubi_attach_info *ai) | 702 | struct ubi_attach_info *ai) |
700 | { | 703 | { |
701 | int err = 0; | 704 | int err = 0; |
702 | struct ubi_ainf_peb *aeb, *tmp_aeb; | 705 | struct ubi_ainf_peb *aeb, *tmp_aeb; |
703 | 706 | ||
704 | if (!list_empty(&ai->free)) { | 707 | if (!list_empty(&ai->free)) { |
705 | aeb = list_entry(ai->free.next, struct ubi_ainf_peb, u.list); | 708 | aeb = list_entry(ai->free.next, struct ubi_ainf_peb, u.list); |
706 | list_del(&aeb->u.list); | 709 | list_del(&aeb->u.list); |
707 | dbg_bld("return free PEB %d, EC %d", aeb->pnum, aeb->ec); | 710 | dbg_bld("return free PEB %d, EC %d", aeb->pnum, aeb->ec); |
708 | return aeb; | 711 | return aeb; |
709 | } | 712 | } |
710 | 713 | ||
711 | /* | 714 | /* |
712 | * We try to erase the first physical eraseblock from the erase list | 715 | * We try to erase the first physical eraseblock from the erase list |
713 | * and pick it if we succeed, or try to erase the next one if not. And | 716 | * and pick it if we succeed, or try to erase the next one if not. And |
714 | * so forth. We don't want to take care about bad eraseblocks here - | 717 | * so forth. We don't want to take care about bad eraseblocks here - |
715 | * they'll be handled later. | 718 | * they'll be handled later. |
716 | */ | 719 | */ |
717 | list_for_each_entry_safe(aeb, tmp_aeb, &ai->erase, u.list) { | 720 | list_for_each_entry_safe(aeb, tmp_aeb, &ai->erase, u.list) { |
718 | if (aeb->ec == UBI_SCAN_UNKNOWN_EC) | 721 | if (aeb->ec == UBI_SCAN_UNKNOWN_EC) |
719 | aeb->ec = ai->mean_ec; | 722 | aeb->ec = ai->mean_ec; |
720 | 723 | ||
721 | err = early_erase_peb(ubi, ai, aeb->pnum, aeb->ec+1); | 724 | err = early_erase_peb(ubi, ai, aeb->pnum, aeb->ec+1); |
722 | if (err) | 725 | if (err) |
723 | continue; | 726 | continue; |
724 | 727 | ||
725 | aeb->ec += 1; | 728 | aeb->ec += 1; |
726 | list_del(&aeb->u.list); | 729 | list_del(&aeb->u.list); |
727 | dbg_bld("return PEB %d, EC %d", aeb->pnum, aeb->ec); | 730 | dbg_bld("return PEB %d, EC %d", aeb->pnum, aeb->ec); |
728 | return aeb; | 731 | return aeb; |
729 | } | 732 | } |
730 | 733 | ||
731 | ubi_err("no free eraseblocks"); | 734 | ubi_err("no free eraseblocks"); |
732 | return ERR_PTR(-ENOSPC); | 735 | return ERR_PTR(-ENOSPC); |
733 | } | 736 | } |
734 | 737 | ||
735 | /** | 738 | /** |
736 | * check_corruption - check the data area of PEB. | 739 | * check_corruption - check the data area of PEB. |
737 | * @ubi: UBI device description object | 740 | * @ubi: UBI device description object |
738 | * @vid_hrd: the (corrupted) VID header of this PEB | 741 | * @vid_hrd: the (corrupted) VID header of this PEB |
739 | * @pnum: the physical eraseblock number to check | 742 | * @pnum: the physical eraseblock number to check |
740 | * | 743 | * |
741 | * This is a helper function which is used to distinguish between VID header | 744 | * This is a helper function which is used to distinguish between VID header |
742 | * corruptions caused by power cuts and other reasons. If the PEB contains only | 745 | * corruptions caused by power cuts and other reasons. If the PEB contains only |
743 | * 0xFF bytes in the data area, the VID header is most probably corrupted | 746 | * 0xFF bytes in the data area, the VID header is most probably corrupted |
744 | * because of a power cut (%0 is returned in this case). Otherwise, it was | 747 | * because of a power cut (%0 is returned in this case). Otherwise, it was |
745 | * probably corrupted for some other reasons (%1 is returned in this case). A | 748 | * probably corrupted for some other reasons (%1 is returned in this case). A |
746 | * negative error code is returned if a read error occurred. | 749 | * negative error code is returned if a read error occurred. |
747 | * | 750 | * |
748 | * If the corruption reason was a power cut, UBI can safely erase this PEB. | 751 | * If the corruption reason was a power cut, UBI can safely erase this PEB. |
749 | * Otherwise, it should preserve it to avoid possibly destroying important | 752 | * Otherwise, it should preserve it to avoid possibly destroying important |
750 | * information. | 753 | * information. |
751 | */ | 754 | */ |
752 | static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr, | 755 | static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr, |
753 | int pnum) | 756 | int pnum) |
754 | { | 757 | { |
755 | int err; | 758 | int err; |
756 | 759 | ||
757 | mutex_lock(&ubi->buf_mutex); | 760 | mutex_lock(&ubi->buf_mutex); |
758 | memset(ubi->peb_buf, 0x00, ubi->leb_size); | 761 | memset(ubi->peb_buf, 0x00, ubi->leb_size); |
759 | 762 | ||
760 | err = ubi_io_read(ubi, ubi->peb_buf, pnum, ubi->leb_start, | 763 | err = ubi_io_read(ubi, ubi->peb_buf, pnum, ubi->leb_start, |
761 | ubi->leb_size); | 764 | ubi->leb_size); |
762 | if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) { | 765 | if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) { |
763 | /* | 766 | /* |
764 | * Bit-flips or integrity errors while reading the data area. | 767 | * Bit-flips or integrity errors while reading the data area. |
765 | * It is difficult to say for sure what type of corruption is | 768 | * It is difficult to say for sure what type of corruption is |
766 | * this, but presumably a power cut happened while this PEB was | 769 | * this, but presumably a power cut happened while this PEB was |
767 | * erased, so it became unstable and corrupted, and should be | 770 | * erased, so it became unstable and corrupted, and should be |
768 | * erased. | 771 | * erased. |
769 | */ | 772 | */ |
770 | err = 0; | 773 | err = 0; |
771 | goto out_unlock; | 774 | goto out_unlock; |
772 | } | 775 | } |
773 | 776 | ||
774 | if (err) | 777 | if (err) |
775 | goto out_unlock; | 778 | goto out_unlock; |
776 | 779 | ||
777 | if (ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->leb_size)) | 780 | if (ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->leb_size)) |
778 | goto out_unlock; | 781 | goto out_unlock; |
779 | 782 | ||
780 | ubi_err("PEB %d contains corrupted VID header, and the data does not " | 783 | ubi_err("PEB %d contains corrupted VID header, and the data does not " |
781 | "contain all 0xFF, this may be a non-UBI PEB or a severe VID " | 784 | "contain all 0xFF, this may be a non-UBI PEB or a severe VID " |
782 | "header corruption which requires manual inspection", pnum); | 785 | "header corruption which requires manual inspection", pnum); |
783 | ubi_dump_vid_hdr(vid_hdr); | 786 | ubi_dump_vid_hdr(vid_hdr); |
784 | dbg_msg("hexdump of PEB %d offset %d, length %d", | 787 | dbg_msg("hexdump of PEB %d offset %d, length %d", |
785 | pnum, ubi->leb_start, ubi->leb_size); | 788 | pnum, ubi->leb_start, ubi->leb_size); |
786 | ubi_dbg_print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, | 789 | ubi_dbg_print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, |
787 | ubi->peb_buf, ubi->leb_size, 1); | 790 | ubi->peb_buf, ubi->leb_size, 1); |
788 | err = 1; | 791 | err = 1; |
789 | 792 | ||
790 | out_unlock: | 793 | out_unlock: |
791 | mutex_unlock(&ubi->buf_mutex); | 794 | mutex_unlock(&ubi->buf_mutex); |
792 | return err; | 795 | return err; |
793 | } | 796 | } |
794 | 797 | ||
795 | /** | 798 | /** |
796 | * process_eb - read, check UBI headers, and add them to attaching information. | 799 | * scan_peb - scan and process UBI headers of a PEB. |
797 | * @ubi: UBI device description object | 800 | * @ubi: UBI device description object |
798 | * @ai: attaching information | 801 | * @ai: attaching information |
799 | * @pnum: the physical eraseblock number | 802 | * @pnum: the physical eraseblock number |
800 | * | 803 | * |
801 | * This function returns a zero if the physical eraseblock was successfully | 804 | * This function reads UBI headers of PEB @pnum, checks them, and adds |
802 | * handled and a negative error code in case of failure. | 805 | * information about this PEB to the corresponding list or RB-tree in the |
806 | * "attaching info" structure. Returns zero if the physical eraseblock was | ||
807 | * successfully handled and a negative error code in case of failure. | ||
803 | */ | 808 | */ |
804 | static int process_eb(struct ubi_device *ubi, struct ubi_attach_info *ai, | 809 | static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai, |
805 | int pnum) | 810 | int pnum) |
806 | { | 811 | { |
807 | long long uninitialized_var(ec); | 812 | long long uninitialized_var(ec); |
808 | int err, bitflips = 0, vol_id, ec_err = 0; | 813 | int err, bitflips = 0, vol_id, ec_err = 0; |
809 | 814 | ||
810 | dbg_bld("scan PEB %d", pnum); | 815 | dbg_bld("scan PEB %d", pnum); |
811 | 816 | ||
812 | /* Skip bad physical eraseblocks */ | 817 | /* Skip bad physical eraseblocks */ |
813 | err = ubi_io_is_bad(ubi, pnum); | 818 | err = ubi_io_is_bad(ubi, pnum); |
814 | if (err < 0) | 819 | if (err < 0) |
815 | return err; | 820 | return err; |
816 | else if (err) { | 821 | else if (err) { |
817 | /* | ||
818 | * FIXME: this is actually duty of the I/O sub-system to | ||
819 | * initialize this, but MTD does not provide enough | ||
820 | * information. | ||
821 | */ | ||
822 | ai->bad_peb_count += 1; | 822 | ai->bad_peb_count += 1; |
823 | return 0; | 823 | return 0; |
824 | } | 824 | } |
825 | 825 | ||
826 | err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0); | 826 | err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0); |
827 | if (err < 0) | 827 | if (err < 0) |
828 | return err; | 828 | return err; |
829 | switch (err) { | 829 | switch (err) { |
830 | case 0: | 830 | case 0: |
831 | break; | 831 | break; |
832 | case UBI_IO_BITFLIPS: | 832 | case UBI_IO_BITFLIPS: |
833 | bitflips = 1; | 833 | bitflips = 1; |
834 | break; | 834 | break; |
835 | case UBI_IO_FF: | 835 | case UBI_IO_FF: |
836 | ai->empty_peb_count += 1; | 836 | ai->empty_peb_count += 1; |
837 | return add_to_list(ai, pnum, UBI_SCAN_UNKNOWN_EC, 0, | 837 | return add_to_list(ai, pnum, UBI_SCAN_UNKNOWN_EC, 0, |
838 | &ai->erase); | 838 | &ai->erase); |
839 | case UBI_IO_FF_BITFLIPS: | 839 | case UBI_IO_FF_BITFLIPS: |
840 | ai->empty_peb_count += 1; | 840 | ai->empty_peb_count += 1; |
841 | return add_to_list(ai, pnum, UBI_SCAN_UNKNOWN_EC, 1, | 841 | return add_to_list(ai, pnum, UBI_SCAN_UNKNOWN_EC, 1, |
842 | &ai->erase); | 842 | &ai->erase); |
843 | case UBI_IO_BAD_HDR_EBADMSG: | 843 | case UBI_IO_BAD_HDR_EBADMSG: |
844 | case UBI_IO_BAD_HDR: | 844 | case UBI_IO_BAD_HDR: |
845 | /* | 845 | /* |
846 | * We have to also look at the VID header, possibly it is not | 846 | * We have to also look at the VID header, possibly it is not |
847 | * corrupted. Set %bitflips flag in order to make this PEB be | 847 | * corrupted. Set %bitflips flag in order to make this PEB be |
848 | * moved and EC be re-created. | 848 | * moved and EC be re-created. |
849 | */ | 849 | */ |
850 | ec_err = err; | 850 | ec_err = err; |
851 | ec = UBI_SCAN_UNKNOWN_EC; | 851 | ec = UBI_SCAN_UNKNOWN_EC; |
852 | bitflips = 1; | 852 | bitflips = 1; |
853 | break; | 853 | break; |
854 | default: | 854 | default: |
855 | ubi_err("'ubi_io_read_ec_hdr()' returned unknown code %d", err); | 855 | ubi_err("'ubi_io_read_ec_hdr()' returned unknown code %d", err); |
856 | return -EINVAL; | 856 | return -EINVAL; |
857 | } | 857 | } |
858 | 858 | ||
859 | if (!ec_err) { | 859 | if (!ec_err) { |
860 | int image_seq; | 860 | int image_seq; |
861 | 861 | ||
862 | /* Make sure UBI version is OK */ | 862 | /* Make sure UBI version is OK */ |
863 | if (ech->version != UBI_VERSION) { | 863 | if (ech->version != UBI_VERSION) { |
864 | ubi_err("this UBI version is %d, image version is %d", | 864 | ubi_err("this UBI version is %d, image version is %d", |
865 | UBI_VERSION, (int)ech->version); | 865 | UBI_VERSION, (int)ech->version); |
866 | return -EINVAL; | 866 | return -EINVAL; |
867 | } | 867 | } |
868 | 868 | ||
869 | ec = be64_to_cpu(ech->ec); | 869 | ec = be64_to_cpu(ech->ec); |
870 | if (ec > UBI_MAX_ERASECOUNTER) { | 870 | if (ec > UBI_MAX_ERASECOUNTER) { |
871 | /* | 871 | /* |
872 | * Erase counter overflow. The EC headers have 64 bits | 872 | * Erase counter overflow. The EC headers have 64 bits |
873 | * reserved, but we anyway make use of only 31 bit | 873 | * reserved, but we anyway make use of only 31 bit |
874 | * values, as this seems to be enough for any existing | 874 | * values, as this seems to be enough for any existing |
875 | * flash. Upgrade UBI and use 64-bit erase counters | 875 | * flash. Upgrade UBI and use 64-bit erase counters |
876 | * internally. | 876 | * internally. |
877 | */ | 877 | */ |
878 | ubi_err("erase counter overflow, max is %d", | 878 | ubi_err("erase counter overflow, max is %d", |
879 | UBI_MAX_ERASECOUNTER); | 879 | UBI_MAX_ERASECOUNTER); |
880 | ubi_dump_ec_hdr(ech); | 880 | ubi_dump_ec_hdr(ech); |
881 | return -EINVAL; | 881 | return -EINVAL; |
882 | } | 882 | } |
883 | 883 | ||
884 | /* | 884 | /* |
885 | * Make sure that all PEBs have the same image sequence number. | 885 | * Make sure that all PEBs have the same image sequence number. |
886 | * This allows us to detect situations when users flash UBI | 886 | * This allows us to detect situations when users flash UBI |
887 | * images incorrectly, so that the flash has the new UBI image | 887 | * images incorrectly, so that the flash has the new UBI image |
888 | * and leftovers from the old one. This feature was added | 888 | * and leftovers from the old one. This feature was added |
889 | * relatively recently, and the sequence number was always | 889 | * relatively recently, and the sequence number was always |
890 | * zero, because old UBI implementations always set it to zero. | 890 | * zero, because old UBI implementations always set it to zero. |
891 | * For this reasons, we do not panic if some PEBs have zero | 891 | * For this reasons, we do not panic if some PEBs have zero |
892 | * sequence number, while other PEBs have non-zero sequence | 892 | * sequence number, while other PEBs have non-zero sequence |
893 | * number. | 893 | * number. |
894 | */ | 894 | */ |
895 | image_seq = be32_to_cpu(ech->image_seq); | 895 | image_seq = be32_to_cpu(ech->image_seq); |
896 | if (!ubi->image_seq && image_seq) | 896 | if (!ubi->image_seq && image_seq) |
897 | ubi->image_seq = image_seq; | 897 | ubi->image_seq = image_seq; |
898 | if (ubi->image_seq && image_seq && | 898 | if (ubi->image_seq && image_seq && |
899 | ubi->image_seq != image_seq) { | 899 | ubi->image_seq != image_seq) { |
900 | ubi_err("bad image sequence number %d in PEB %d, " | 900 | ubi_err("bad image sequence number %d in PEB %d, " |
901 | "expected %d", image_seq, pnum, ubi->image_seq); | 901 | "expected %d", image_seq, pnum, ubi->image_seq); |
902 | ubi_dump_ec_hdr(ech); | 902 | ubi_dump_ec_hdr(ech); |
903 | return -EINVAL; | 903 | return -EINVAL; |
904 | } | 904 | } |
905 | } | 905 | } |
906 | 906 | ||
907 | /* OK, we've done with the EC header, let's look at the VID header */ | 907 | /* OK, we've done with the EC header, let's look at the VID header */ |
908 | 908 | ||
909 | err = ubi_io_read_vid_hdr(ubi, pnum, vidh, 0); | 909 | err = ubi_io_read_vid_hdr(ubi, pnum, vidh, 0); |
910 | if (err < 0) | 910 | if (err < 0) |
911 | return err; | 911 | return err; |
912 | switch (err) { | 912 | switch (err) { |
913 | case 0: | 913 | case 0: |
914 | break; | 914 | break; |
915 | case UBI_IO_BITFLIPS: | 915 | case UBI_IO_BITFLIPS: |
916 | bitflips = 1; | 916 | bitflips = 1; |
917 | break; | 917 | break; |
918 | case UBI_IO_BAD_HDR_EBADMSG: | 918 | case UBI_IO_BAD_HDR_EBADMSG: |
919 | if (ec_err == UBI_IO_BAD_HDR_EBADMSG) | 919 | if (ec_err == UBI_IO_BAD_HDR_EBADMSG) |
920 | /* | 920 | /* |
921 | * Both EC and VID headers are corrupted and were read | 921 | * Both EC and VID headers are corrupted and were read |
922 | * with data integrity error, probably this is a bad | 922 | * with data integrity error, probably this is a bad |
923 | * PEB, bit it is not marked as bad yet. This may also | 923 | * PEB, bit it is not marked as bad yet. This may also |
924 | * be a result of power cut during erasure. | 924 | * be a result of power cut during erasure. |
925 | */ | 925 | */ |
926 | ai->maybe_bad_peb_count += 1; | 926 | ai->maybe_bad_peb_count += 1; |
927 | case UBI_IO_BAD_HDR: | 927 | case UBI_IO_BAD_HDR: |
928 | if (ec_err) | 928 | if (ec_err) |
929 | /* | 929 | /* |
930 | * Both headers are corrupted. There is a possibility | 930 | * Both headers are corrupted. There is a possibility |
931 | * that this a valid UBI PEB which has corresponding | 931 | * that this a valid UBI PEB which has corresponding |
932 | * LEB, but the headers are corrupted. However, it is | 932 | * LEB, but the headers are corrupted. However, it is |
933 | * impossible to distinguish it from a PEB which just | 933 | * impossible to distinguish it from a PEB which just |
934 | * contains garbage because of a power cut during erase | 934 | * contains garbage because of a power cut during erase |
935 | * operation. So we just schedule this PEB for erasure. | 935 | * operation. So we just schedule this PEB for erasure. |
936 | * | 936 | * |
937 | * Besides, in case of NOR flash, we deliberately | 937 | * Besides, in case of NOR flash, we deliberately |
938 | * corrupt both headers because NOR flash erasure is | 938 | * corrupt both headers because NOR flash erasure is |
939 | * slow and can start from the end. | 939 | * slow and can start from the end. |
940 | */ | 940 | */ |
941 | err = 0; | 941 | err = 0; |
942 | else | 942 | else |
943 | /* | 943 | /* |
944 | * The EC was OK, but the VID header is corrupted. We | 944 | * The EC was OK, but the VID header is corrupted. We |
945 | * have to check what is in the data area. | 945 | * have to check what is in the data area. |
946 | */ | 946 | */ |
947 | err = check_corruption(ubi, vidh, pnum); | 947 | err = check_corruption(ubi, vidh, pnum); |
948 | 948 | ||
949 | if (err < 0) | 949 | if (err < 0) |
950 | return err; | 950 | return err; |
951 | else if (!err) | 951 | else if (!err) |
952 | /* This corruption is caused by a power cut */ | 952 | /* This corruption is caused by a power cut */ |
953 | err = add_to_list(ai, pnum, ec, 1, &ai->erase); | 953 | err = add_to_list(ai, pnum, ec, 1, &ai->erase); |
954 | else | 954 | else |
955 | /* This is an unexpected corruption */ | 955 | /* This is an unexpected corruption */ |
956 | err = add_corrupted(ai, pnum, ec); | 956 | err = add_corrupted(ai, pnum, ec); |
957 | if (err) | 957 | if (err) |
958 | return err; | 958 | return err; |
959 | goto adjust_mean_ec; | 959 | goto adjust_mean_ec; |
960 | case UBI_IO_FF_BITFLIPS: | 960 | case UBI_IO_FF_BITFLIPS: |
961 | err = add_to_list(ai, pnum, ec, 1, &ai->erase); | 961 | err = add_to_list(ai, pnum, ec, 1, &ai->erase); |
962 | if (err) | 962 | if (err) |
963 | return err; | 963 | return err; |
964 | goto adjust_mean_ec; | 964 | goto adjust_mean_ec; |
965 | case UBI_IO_FF: | 965 | case UBI_IO_FF: |
966 | if (ec_err) | 966 | if (ec_err) |
967 | err = add_to_list(ai, pnum, ec, 1, &ai->erase); | 967 | err = add_to_list(ai, pnum, ec, 1, &ai->erase); |
968 | else | 968 | else |
969 | err = add_to_list(ai, pnum, ec, 0, &ai->free); | 969 | err = add_to_list(ai, pnum, ec, 0, &ai->free); |
970 | if (err) | 970 | if (err) |
971 | return err; | 971 | return err; |
972 | goto adjust_mean_ec; | 972 | goto adjust_mean_ec; |
973 | default: | 973 | default: |
974 | ubi_err("'ubi_io_read_vid_hdr()' returned unknown code %d", | 974 | ubi_err("'ubi_io_read_vid_hdr()' returned unknown code %d", |
975 | err); | 975 | err); |
976 | return -EINVAL; | 976 | return -EINVAL; |
977 | } | 977 | } |
978 | 978 | ||
979 | vol_id = be32_to_cpu(vidh->vol_id); | 979 | vol_id = be32_to_cpu(vidh->vol_id); |
980 | if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOLUME_ID) { | 980 | if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOLUME_ID) { |
981 | int lnum = be32_to_cpu(vidh->lnum); | 981 | int lnum = be32_to_cpu(vidh->lnum); |
982 | 982 | ||
983 | /* Unsupported internal volume */ | 983 | /* Unsupported internal volume */ |
984 | switch (vidh->compat) { | 984 | switch (vidh->compat) { |
985 | case UBI_COMPAT_DELETE: | 985 | case UBI_COMPAT_DELETE: |
986 | ubi_msg("\"delete\" compatible internal volume %d:%d" | 986 | ubi_msg("\"delete\" compatible internal volume %d:%d" |
987 | " found, will remove it", vol_id, lnum); | 987 | " found, will remove it", vol_id, lnum); |
988 | err = add_to_list(ai, pnum, ec, 1, &ai->erase); | 988 | err = add_to_list(ai, pnum, ec, 1, &ai->erase); |
989 | if (err) | 989 | if (err) |
990 | return err; | 990 | return err; |
991 | return 0; | 991 | return 0; |
992 | 992 | ||
993 | case UBI_COMPAT_RO: | 993 | case UBI_COMPAT_RO: |
994 | ubi_msg("read-only compatible internal volume %d:%d" | 994 | ubi_msg("read-only compatible internal volume %d:%d" |
995 | " found, switch to read-only mode", | 995 | " found, switch to read-only mode", |
996 | vol_id, lnum); | 996 | vol_id, lnum); |
997 | ubi->ro_mode = 1; | 997 | ubi->ro_mode = 1; |
998 | break; | 998 | break; |
999 | 999 | ||
1000 | case UBI_COMPAT_PRESERVE: | 1000 | case UBI_COMPAT_PRESERVE: |
1001 | ubi_msg("\"preserve\" compatible internal volume %d:%d" | 1001 | ubi_msg("\"preserve\" compatible internal volume %d:%d" |
1002 | " found", vol_id, lnum); | 1002 | " found", vol_id, lnum); |
1003 | err = add_to_list(ai, pnum, ec, 0, &ai->alien); | 1003 | err = add_to_list(ai, pnum, ec, 0, &ai->alien); |
1004 | if (err) | 1004 | if (err) |
1005 | return err; | 1005 | return err; |
1006 | return 0; | 1006 | return 0; |
1007 | 1007 | ||
1008 | case UBI_COMPAT_REJECT: | 1008 | case UBI_COMPAT_REJECT: |
1009 | ubi_err("incompatible internal volume %d:%d found", | 1009 | ubi_err("incompatible internal volume %d:%d found", |
1010 | vol_id, lnum); | 1010 | vol_id, lnum); |
1011 | return -EINVAL; | 1011 | return -EINVAL; |
1012 | } | 1012 | } |
1013 | } | 1013 | } |
1014 | 1014 | ||
1015 | if (ec_err) | 1015 | if (ec_err) |
1016 | ubi_warn("valid VID header but corrupted EC header at PEB %d", | 1016 | ubi_warn("valid VID header but corrupted EC header at PEB %d", |
1017 | pnum); | 1017 | pnum); |
1018 | err = ubi_add_to_av(ubi, ai, pnum, ec, vidh, bitflips); | 1018 | err = ubi_add_to_av(ubi, ai, pnum, ec, vidh, bitflips); |
1019 | if (err) | 1019 | if (err) |
1020 | return err; | 1020 | return err; |
1021 | 1021 | ||
1022 | adjust_mean_ec: | 1022 | adjust_mean_ec: |
1023 | if (!ec_err) { | 1023 | if (!ec_err) { |
1024 | ai->ec_sum += ec; | 1024 | ai->ec_sum += ec; |
1025 | ai->ec_count += 1; | 1025 | ai->ec_count += 1; |
1026 | if (ec > ai->max_ec) | 1026 | if (ec > ai->max_ec) |
1027 | ai->max_ec = ec; | 1027 | ai->max_ec = ec; |
1028 | if (ec < ai->min_ec) | 1028 | if (ec < ai->min_ec) |
1029 | ai->min_ec = ec; | 1029 | ai->min_ec = ec; |
1030 | } | 1030 | } |
1031 | 1031 | ||
1032 | return 0; | 1032 | return 0; |
1033 | } | 1033 | } |
1034 | 1034 | ||
1035 | /** | 1035 | /** |
1036 | * check_what_we_have - check what PEB were found by scanning. | 1036 | * late_analysis - analyze the overall situation with PEB. |
1037 | * @ubi: UBI device description object | 1037 | * @ubi: UBI device description object |
1038 | * @ai: attaching information | 1038 | * @ai: attaching information |
1039 | * | 1039 | * |
1040 | * This is a helper function which takes a look what PEBs were found by | 1040 | * This is a helper function which takes a look what PEBs we have after we |
1041 | * scanning, and decides whether the flash is empty and should be formatted and | 1041 | * gather information about all of them ("ai" is compete). It decides whether |
1042 | * whether there are too many corrupted PEBs and we should not attach this | 1042 | * the flash is empty and should be formatted of whether there are too many |
1043 | * MTD device. Returns zero if we should proceed with attaching the MTD device, | 1043 | * corrupted PEBs and we should not attach this MTD device. Returns zero if we |
1044 | * and %-EINVAL if we should not. | 1044 | * should proceed with attaching the MTD device, and %-EINVAL if we should not. |
1045 | */ | 1045 | */ |
1046 | static int check_what_we_have(struct ubi_device *ubi, | 1046 | static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai) |
1047 | struct ubi_attach_info *ai) | ||
1048 | { | 1047 | { |
1049 | struct ubi_ainf_peb *aeb; | 1048 | struct ubi_ainf_peb *aeb; |
1050 | int max_corr, peb_count; | 1049 | int max_corr, peb_count; |
1051 | 1050 | ||
1052 | peb_count = ubi->peb_count - ai->bad_peb_count - ai->alien_peb_count; | 1051 | peb_count = ubi->peb_count - ai->bad_peb_count - ai->alien_peb_count; |
1053 | max_corr = peb_count / 20 ?: 8; | 1052 | max_corr = peb_count / 20 ?: 8; |
1054 | 1053 | ||
1055 | /* | 1054 | /* |
1056 | * Few corrupted PEBs is not a problem and may be just a result of | 1055 | * Few corrupted PEBs is not a problem and may be just a result of |
1057 | * unclean reboots. However, many of them may indicate some problems | 1056 | * unclean reboots. However, many of them may indicate some problems |
1058 | * with the flash HW or driver. | 1057 | * with the flash HW or driver. |
1059 | */ | 1058 | */ |
1060 | if (ai->corr_peb_count) { | 1059 | if (ai->corr_peb_count) { |
1061 | ubi_err("%d PEBs are corrupted and preserved", | 1060 | ubi_err("%d PEBs are corrupted and preserved", |
1062 | ai->corr_peb_count); | 1061 | ai->corr_peb_count); |
1063 | printk(KERN_ERR "Corrupted PEBs are:"); | 1062 | printk(KERN_ERR "Corrupted PEBs are:"); |
1064 | list_for_each_entry(aeb, &ai->corr, u.list) | 1063 | list_for_each_entry(aeb, &ai->corr, u.list) |
1065 | printk(KERN_CONT " %d", aeb->pnum); | 1064 | printk(KERN_CONT " %d", aeb->pnum); |
1066 | printk(KERN_CONT "\n"); | 1065 | printk(KERN_CONT "\n"); |
1067 | 1066 | ||
1068 | /* | 1067 | /* |
1069 | * If too many PEBs are corrupted, we refuse attaching, | 1068 | * If too many PEBs are corrupted, we refuse attaching, |
1070 | * otherwise, only print a warning. | 1069 | * otherwise, only print a warning. |
1071 | */ | 1070 | */ |
1072 | if (ai->corr_peb_count >= max_corr) { | 1071 | if (ai->corr_peb_count >= max_corr) { |
1073 | ubi_err("too many corrupted PEBs, refusing"); | 1072 | ubi_err("too many corrupted PEBs, refusing"); |
1074 | return -EINVAL; | 1073 | return -EINVAL; |
1075 | } | 1074 | } |
1076 | } | 1075 | } |
1077 | 1076 | ||
1078 | if (ai->empty_peb_count + ai->maybe_bad_peb_count == peb_count) { | 1077 | if (ai->empty_peb_count + ai->maybe_bad_peb_count == peb_count) { |
1079 | /* | 1078 | /* |
1080 | * All PEBs are empty, or almost all - a couple PEBs look like | 1079 | * All PEBs are empty, or almost all - a couple PEBs look like |
1081 | * they may be bad PEBs which were not marked as bad yet. | 1080 | * they may be bad PEBs which were not marked as bad yet. |
1082 | * | 1081 | * |
1083 | * This piece of code basically tries to distinguish between | 1082 | * This piece of code basically tries to distinguish between |
1084 | * the following situations: | 1083 | * the following situations: |
1085 | * | 1084 | * |
1086 | * 1. Flash is empty, but there are few bad PEBs, which are not | 1085 | * 1. Flash is empty, but there are few bad PEBs, which are not |
1087 | * marked as bad so far, and which were read with error. We | 1086 | * marked as bad so far, and which were read with error. We |
1088 | * want to go ahead and format this flash. While formatting, | 1087 | * want to go ahead and format this flash. While formatting, |
1089 | * the faulty PEBs will probably be marked as bad. | 1088 | * the faulty PEBs will probably be marked as bad. |
1090 | * | 1089 | * |
1091 | * 2. Flash contains non-UBI data and we do not want to format | 1090 | * 2. Flash contains non-UBI data and we do not want to format |
1092 | * it and destroy possibly important information. | 1091 | * it and destroy possibly important information. |
1093 | */ | 1092 | */ |
1094 | if (ai->maybe_bad_peb_count <= 2) { | 1093 | if (ai->maybe_bad_peb_count <= 2) { |
1095 | ai->is_empty = 1; | 1094 | ai->is_empty = 1; |
1096 | ubi_msg("empty MTD device detected"); | 1095 | ubi_msg("empty MTD device detected"); |
1097 | get_random_bytes(&ubi->image_seq, | 1096 | get_random_bytes(&ubi->image_seq, |
1098 | sizeof(ubi->image_seq)); | 1097 | sizeof(ubi->image_seq)); |
1099 | } else { | 1098 | } else { |
1100 | ubi_err("MTD device is not UBI-formatted and possibly " | 1099 | ubi_err("MTD device is not UBI-formatted and possibly " |
1101 | "contains non-UBI data - refusing it"); | 1100 | "contains non-UBI data - refusing it"); |
1102 | return -EINVAL; | 1101 | return -EINVAL; |
1103 | } | 1102 | } |
1104 | 1103 | ||
1105 | } | 1104 | } |
1106 | 1105 | ||
1107 | return 0; | 1106 | return 0; |
1108 | } | 1107 | } |
1109 | 1108 | ||
1110 | /** | 1109 | /** |
1111 | * ubi_scan - scan an MTD device. | 1110 | * ubi_scan - scan an MTD device. |
1112 | * @ubi: UBI device description object | 1111 | * @ubi: UBI device description object |
1113 | * | 1112 | * |
1114 | * This function does full scanning of an MTD device and returns complete | 1113 | * This function does full scanning of an MTD device and returns complete |
1115 | * information about it. In case of failure, an error code is returned. | 1114 | * information about it in form of a "struct ubi_attach_info" object. In case |
1115 | * of failure, an error code is returned. | ||
1116 | */ | 1116 | */ |
1117 | struct ubi_attach_info *ubi_scan(struct ubi_device *ubi) | 1117 | struct ubi_attach_info *ubi_scan(struct ubi_device *ubi) |
1118 | { | 1118 | { |
1119 | int err, pnum; | 1119 | int err, pnum; |
1120 | struct rb_node *rb1, *rb2; | 1120 | struct rb_node *rb1, *rb2; |
1121 | struct ubi_ainf_volume *av; | 1121 | struct ubi_ainf_volume *av; |
1122 | struct ubi_ainf_peb *aeb; | 1122 | struct ubi_ainf_peb *aeb; |
1123 | struct ubi_attach_info *ai; | 1123 | struct ubi_attach_info *ai; |
1124 | 1124 | ||
1125 | ai = kzalloc(sizeof(struct ubi_attach_info), GFP_KERNEL); | 1125 | ai = kzalloc(sizeof(struct ubi_attach_info), GFP_KERNEL); |
1126 | if (!ai) | 1126 | if (!ai) |
1127 | return ERR_PTR(-ENOMEM); | 1127 | return ERR_PTR(-ENOMEM); |
1128 | 1128 | ||
1129 | INIT_LIST_HEAD(&ai->corr); | 1129 | INIT_LIST_HEAD(&ai->corr); |
1130 | INIT_LIST_HEAD(&ai->free); | 1130 | INIT_LIST_HEAD(&ai->free); |
1131 | INIT_LIST_HEAD(&ai->erase); | 1131 | INIT_LIST_HEAD(&ai->erase); |
1132 | INIT_LIST_HEAD(&ai->alien); | 1132 | INIT_LIST_HEAD(&ai->alien); |
1133 | ai->volumes = RB_ROOT; | 1133 | ai->volumes = RB_ROOT; |
1134 | 1134 | ||
1135 | err = -ENOMEM; | 1135 | err = -ENOMEM; |
1136 | ai->aeb_slab_cache = kmem_cache_create("ubi_aeb_slab_cache", | 1136 | ai->aeb_slab_cache = kmem_cache_create("ubi_aeb_slab_cache", |
1137 | sizeof(struct ubi_ainf_peb), | 1137 | sizeof(struct ubi_ainf_peb), |
1138 | 0, 0, NULL); | 1138 | 0, 0, NULL); |
1139 | if (!ai->aeb_slab_cache) | 1139 | if (!ai->aeb_slab_cache) |
1140 | goto out_ai; | 1140 | goto out_ai; |
1141 | 1141 | ||
1142 | ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); | 1142 | ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); |
1143 | if (!ech) | 1143 | if (!ech) |
1144 | goto out_ai; | 1144 | goto out_ai; |
1145 | 1145 | ||
1146 | vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); | 1146 | vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); |
1147 | if (!vidh) | 1147 | if (!vidh) |
1148 | goto out_ech; | 1148 | goto out_ech; |
1149 | 1149 | ||
1150 | for (pnum = 0; pnum < ubi->peb_count; pnum++) { | 1150 | for (pnum = 0; pnum < ubi->peb_count; pnum++) { |
1151 | cond_resched(); | 1151 | cond_resched(); |
1152 | 1152 | ||
1153 | dbg_gen("process PEB %d", pnum); | 1153 | dbg_gen("process PEB %d", pnum); |
1154 | err = process_eb(ubi, ai, pnum); | 1154 | err = scan_peb(ubi, ai, pnum); |
1155 | if (err < 0) | 1155 | if (err < 0) |
1156 | goto out_vidh; | 1156 | goto out_vidh; |
1157 | } | 1157 | } |
1158 | 1158 | ||
1159 | dbg_msg("scanning is finished"); | 1159 | dbg_msg("scanning is finished"); |
1160 | 1160 | ||
1161 | /* Calculate mean erase counter */ | 1161 | /* Calculate mean erase counter */ |
1162 | if (ai->ec_count) | 1162 | if (ai->ec_count) |
1163 | ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count); | 1163 | ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count); |
1164 | 1164 | ||
1165 | err = check_what_we_have(ubi, ai); | 1165 | err = late_analysis(ubi, ai); |
1166 | if (err) | 1166 | if (err) |
1167 | goto out_vidh; | 1167 | goto out_vidh; |
1168 | 1168 | ||
1169 | /* | 1169 | /* |
1170 | * In case of unknown erase counter we use the mean erase counter | 1170 | * In case of unknown erase counter we use the mean erase counter |
1171 | * value. | 1171 | * value. |
1172 | */ | 1172 | */ |
1173 | ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) { | 1173 | ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) { |
1174 | ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) | 1174 | ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) |
1175 | if (aeb->ec == UBI_SCAN_UNKNOWN_EC) | 1175 | if (aeb->ec == UBI_SCAN_UNKNOWN_EC) |
1176 | aeb->ec = ai->mean_ec; | 1176 | aeb->ec = ai->mean_ec; |
1177 | } | 1177 | } |
1178 | 1178 | ||
1179 | list_for_each_entry(aeb, &ai->free, u.list) { | 1179 | list_for_each_entry(aeb, &ai->free, u.list) { |
1180 | if (aeb->ec == UBI_SCAN_UNKNOWN_EC) | 1180 | if (aeb->ec == UBI_SCAN_UNKNOWN_EC) |
1181 | aeb->ec = ai->mean_ec; | 1181 | aeb->ec = ai->mean_ec; |
1182 | } | 1182 | } |
1183 | 1183 | ||
1184 | list_for_each_entry(aeb, &ai->corr, u.list) | 1184 | list_for_each_entry(aeb, &ai->corr, u.list) |
1185 | if (aeb->ec == UBI_SCAN_UNKNOWN_EC) | 1185 | if (aeb->ec == UBI_SCAN_UNKNOWN_EC) |
1186 | aeb->ec = ai->mean_ec; | 1186 | aeb->ec = ai->mean_ec; |
1187 | 1187 | ||
1188 | list_for_each_entry(aeb, &ai->erase, u.list) | 1188 | list_for_each_entry(aeb, &ai->erase, u.list) |
1189 | if (aeb->ec == UBI_SCAN_UNKNOWN_EC) | 1189 | if (aeb->ec == UBI_SCAN_UNKNOWN_EC) |
1190 | aeb->ec = ai->mean_ec; | 1190 | aeb->ec = ai->mean_ec; |
1191 | 1191 | ||
1192 | err = self_check_ai(ubi, ai); | 1192 | err = self_check_ai(ubi, ai); |
1193 | if (err) | 1193 | if (err) |
1194 | goto out_vidh; | 1194 | goto out_vidh; |
1195 | 1195 | ||
1196 | ubi_free_vid_hdr(ubi, vidh); | 1196 | ubi_free_vid_hdr(ubi, vidh); |
1197 | kfree(ech); | 1197 | kfree(ech); |
1198 | 1198 | ||
1199 | return ai; | 1199 | return ai; |
1200 | 1200 | ||
1201 | out_vidh: | 1201 | out_vidh: |
1202 | ubi_free_vid_hdr(ubi, vidh); | 1202 | ubi_free_vid_hdr(ubi, vidh); |
1203 | out_ech: | 1203 | out_ech: |
1204 | kfree(ech); | 1204 | kfree(ech); |
1205 | out_ai: | 1205 | out_ai: |
1206 | ubi_destroy_ai(ai); | 1206 | ubi_destroy_ai(ai); |
1207 | return ERR_PTR(err); | 1207 | return ERR_PTR(err); |
1208 | } | 1208 | } |
1209 | 1209 | ||
1210 | /** | 1210 | /** |
1211 | * destroy_av - free the scanning volume information | 1211 | * destroy_av - free volume attaching information. |
1212 | * @av: scanning volume information | 1212 | * @av: volume attaching information |
1213 | * @ai: attaching information | 1213 | * @ai: attaching information |
1214 | * | 1214 | * |
1215 | * This function destroys the volume RB-tree (@av->root) and the scanning | 1215 | * This function destroys the volume attaching information. |
1216 | * volume information. | ||
1217 | */ | 1216 | */ |
1218 | static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av) | 1217 | static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av) |
1219 | { | 1218 | { |
1220 | struct ubi_ainf_peb *aeb; | 1219 | struct ubi_ainf_peb *aeb; |
1221 | struct rb_node *this = av->root.rb_node; | 1220 | struct rb_node *this = av->root.rb_node; |
1222 | 1221 | ||
1223 | while (this) { | 1222 | while (this) { |
1224 | if (this->rb_left) | 1223 | if (this->rb_left) |
1225 | this = this->rb_left; | 1224 | this = this->rb_left; |
1226 | else if (this->rb_right) | 1225 | else if (this->rb_right) |
1227 | this = this->rb_right; | 1226 | this = this->rb_right; |
1228 | else { | 1227 | else { |
1229 | aeb = rb_entry(this, struct ubi_ainf_peb, u.rb); | 1228 | aeb = rb_entry(this, struct ubi_ainf_peb, u.rb); |
1230 | this = rb_parent(this); | 1229 | this = rb_parent(this); |
1231 | if (this) { | 1230 | if (this) { |
1232 | if (this->rb_left == &aeb->u.rb) | 1231 | if (this->rb_left == &aeb->u.rb) |
1233 | this->rb_left = NULL; | 1232 | this->rb_left = NULL; |
1234 | else | 1233 | else |
1235 | this->rb_right = NULL; | 1234 | this->rb_right = NULL; |
1236 | } | 1235 | } |
1237 | 1236 | ||
1238 | kmem_cache_free(ai->aeb_slab_cache, aeb); | 1237 | kmem_cache_free(ai->aeb_slab_cache, aeb); |
1239 | } | 1238 | } |
1240 | } | 1239 | } |
1241 | kfree(av); | 1240 | kfree(av); |
1242 | } | 1241 | } |
1243 | 1242 | ||
1244 | /** | 1243 | /** |
1245 | * ubi_destroy_ai - destroy attaching information. | 1244 | * ubi_destroy_ai - destroy attaching information. |
1246 | * @ai: attaching information | 1245 | * @ai: attaching information |
1247 | */ | 1246 | */ |
1248 | void ubi_destroy_ai(struct ubi_attach_info *ai) | 1247 | void ubi_destroy_ai(struct ubi_attach_info *ai) |
1249 | { | 1248 | { |
1250 | struct ubi_ainf_peb *aeb, *aeb_tmp; | 1249 | struct ubi_ainf_peb *aeb, *aeb_tmp; |
1251 | struct ubi_ainf_volume *av; | 1250 | struct ubi_ainf_volume *av; |
1252 | struct rb_node *rb; | 1251 | struct rb_node *rb; |
1253 | 1252 | ||
1254 | list_for_each_entry_safe(aeb, aeb_tmp, &ai->alien, u.list) { | 1253 | list_for_each_entry_safe(aeb, aeb_tmp, &ai->alien, u.list) { |
1255 | list_del(&aeb->u.list); | 1254 | list_del(&aeb->u.list); |
1256 | kmem_cache_free(ai->aeb_slab_cache, aeb); | 1255 | kmem_cache_free(ai->aeb_slab_cache, aeb); |
1257 | } | 1256 | } |
1258 | list_for_each_entry_safe(aeb, aeb_tmp, &ai->erase, u.list) { | 1257 | list_for_each_entry_safe(aeb, aeb_tmp, &ai->erase, u.list) { |
1259 | list_del(&aeb->u.list); | 1258 | list_del(&aeb->u.list); |
1260 | kmem_cache_free(ai->aeb_slab_cache, aeb); | 1259 | kmem_cache_free(ai->aeb_slab_cache, aeb); |
1261 | } | 1260 | } |
1262 | list_for_each_entry_safe(aeb, aeb_tmp, &ai->corr, u.list) { | 1261 | list_for_each_entry_safe(aeb, aeb_tmp, &ai->corr, u.list) { |
1263 | list_del(&aeb->u.list); | 1262 | list_del(&aeb->u.list); |
1264 | kmem_cache_free(ai->aeb_slab_cache, aeb); | 1263 | kmem_cache_free(ai->aeb_slab_cache, aeb); |
1265 | } | 1264 | } |
1266 | list_for_each_entry_safe(aeb, aeb_tmp, &ai->free, u.list) { | 1265 | list_for_each_entry_safe(aeb, aeb_tmp, &ai->free, u.list) { |
1267 | list_del(&aeb->u.list); | 1266 | list_del(&aeb->u.list); |
1268 | kmem_cache_free(ai->aeb_slab_cache, aeb); | 1267 | kmem_cache_free(ai->aeb_slab_cache, aeb); |
1269 | } | 1268 | } |
1270 | 1269 | ||
1271 | /* Destroy the volume RB-tree */ | 1270 | /* Destroy the volume RB-tree */ |
1272 | rb = ai->volumes.rb_node; | 1271 | rb = ai->volumes.rb_node; |
1273 | while (rb) { | 1272 | while (rb) { |
1274 | if (rb->rb_left) | 1273 | if (rb->rb_left) |
1275 | rb = rb->rb_left; | 1274 | rb = rb->rb_left; |
1276 | else if (rb->rb_right) | 1275 | else if (rb->rb_right) |
1277 | rb = rb->rb_right; | 1276 | rb = rb->rb_right; |
1278 | else { | 1277 | else { |
1279 | av = rb_entry(rb, struct ubi_ainf_volume, rb); | 1278 | av = rb_entry(rb, struct ubi_ainf_volume, rb); |
1280 | 1279 | ||
1281 | rb = rb_parent(rb); | 1280 | rb = rb_parent(rb); |
1282 | if (rb) { | 1281 | if (rb) { |
1283 | if (rb->rb_left == &av->rb) | 1282 | if (rb->rb_left == &av->rb) |
1284 | rb->rb_left = NULL; | 1283 | rb->rb_left = NULL; |
1285 | else | 1284 | else |
1286 | rb->rb_right = NULL; | 1285 | rb->rb_right = NULL; |
1287 | } | 1286 | } |
1288 | 1287 | ||
1289 | destroy_av(ai, av); | 1288 | destroy_av(ai, av); |
1290 | } | 1289 | } |
1291 | } | 1290 | } |
1292 | 1291 | ||
1293 | if (ai->aeb_slab_cache) | 1292 | if (ai->aeb_slab_cache) |
1294 | kmem_cache_destroy(ai->aeb_slab_cache); | 1293 | kmem_cache_destroy(ai->aeb_slab_cache); |
1295 | 1294 | ||
1296 | kfree(ai); | 1295 | kfree(ai); |
1297 | } | 1296 | } |
1298 | 1297 | ||
1299 | /** | 1298 | /** |
1300 | * self_check_ai - check the attaching information. | 1299 | * self_check_ai - check the attaching information. |
1301 | * @ubi: UBI device description object | 1300 | * @ubi: UBI device description object |
1302 | * @ai: attaching information | 1301 | * @ai: attaching information |
1303 | * | 1302 | * |
1304 | * This function returns zero if the attaching information is all right, and a | 1303 | * This function returns zero if the attaching information is all right, and a |
1305 | * negative error code if not or if an error occurred. | 1304 | * negative error code if not or if an error occurred. |
1306 | */ | 1305 | */ |
1307 | static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai) | 1306 | static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai) |
1308 | { | 1307 | { |
1309 | int pnum, err, vols_found = 0; | 1308 | int pnum, err, vols_found = 0; |
1310 | struct rb_node *rb1, *rb2; | 1309 | struct rb_node *rb1, *rb2; |
1311 | struct ubi_ainf_volume *av; | 1310 | struct ubi_ainf_volume *av; |
1312 | struct ubi_ainf_peb *aeb, *last_aeb; | 1311 | struct ubi_ainf_peb *aeb, *last_aeb; |
1313 | uint8_t *buf; | 1312 | uint8_t *buf; |
1314 | 1313 | ||
1315 | if (!ubi->dbg->chk_gen) | 1314 | if (!ubi->dbg->chk_gen) |
1316 | return 0; | 1315 | return 0; |
1317 | 1316 | ||
1318 | /* | 1317 | /* |
1319 | * At first, check that attaching information is OK. | 1318 | * At first, check that attaching information is OK. |
1320 | */ | 1319 | */ |
1321 | ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) { | 1320 | ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) { |
1322 | int leb_count = 0; | 1321 | int leb_count = 0; |
1323 | 1322 | ||
1324 | cond_resched(); | 1323 | cond_resched(); |
1325 | 1324 | ||
1326 | vols_found += 1; | 1325 | vols_found += 1; |
1327 | 1326 | ||
1328 | if (ai->is_empty) { | 1327 | if (ai->is_empty) { |
1329 | ubi_err("bad is_empty flag"); | 1328 | ubi_err("bad is_empty flag"); |
1330 | goto bad_av; | 1329 | goto bad_av; |
1331 | } | 1330 | } |
1332 | 1331 | ||
1333 | if (av->vol_id < 0 || av->highest_lnum < 0 || | 1332 | if (av->vol_id < 0 || av->highest_lnum < 0 || |
1334 | av->leb_count < 0 || av->vol_type < 0 || av->used_ebs < 0 || | 1333 | av->leb_count < 0 || av->vol_type < 0 || av->used_ebs < 0 || |
1335 | av->data_pad < 0 || av->last_data_size < 0) { | 1334 | av->data_pad < 0 || av->last_data_size < 0) { |
1336 | ubi_err("negative values"); | 1335 | ubi_err("negative values"); |
1337 | goto bad_av; | 1336 | goto bad_av; |
1338 | } | 1337 | } |
1339 | 1338 | ||
1340 | if (av->vol_id >= UBI_MAX_VOLUMES && | 1339 | if (av->vol_id >= UBI_MAX_VOLUMES && |
1341 | av->vol_id < UBI_INTERNAL_VOL_START) { | 1340 | av->vol_id < UBI_INTERNAL_VOL_START) { |
1342 | ubi_err("bad vol_id"); | 1341 | ubi_err("bad vol_id"); |
1343 | goto bad_av; | 1342 | goto bad_av; |
1344 | } | 1343 | } |
1345 | 1344 | ||
1346 | if (av->vol_id > ai->highest_vol_id) { | 1345 | if (av->vol_id > ai->highest_vol_id) { |
1347 | ubi_err("highest_vol_id is %d, but vol_id %d is there", | 1346 | ubi_err("highest_vol_id is %d, but vol_id %d is there", |
1348 | ai->highest_vol_id, av->vol_id); | 1347 | ai->highest_vol_id, av->vol_id); |
1349 | goto out; | 1348 | goto out; |
1350 | } | 1349 | } |
1351 | 1350 | ||
1352 | if (av->vol_type != UBI_DYNAMIC_VOLUME && | 1351 | if (av->vol_type != UBI_DYNAMIC_VOLUME && |
1353 | av->vol_type != UBI_STATIC_VOLUME) { | 1352 | av->vol_type != UBI_STATIC_VOLUME) { |
1354 | ubi_err("bad vol_type"); | 1353 | ubi_err("bad vol_type"); |
1355 | goto bad_av; | 1354 | goto bad_av; |
1356 | } | 1355 | } |
1357 | 1356 | ||
1358 | if (av->data_pad > ubi->leb_size / 2) { | 1357 | if (av->data_pad > ubi->leb_size / 2) { |
1359 | ubi_err("bad data_pad"); | 1358 | ubi_err("bad data_pad"); |
1360 | goto bad_av; | 1359 | goto bad_av; |
1361 | } | 1360 | } |
1362 | 1361 | ||
1363 | last_aeb = NULL; | 1362 | last_aeb = NULL; |
1364 | ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) { | 1363 | ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) { |
1365 | cond_resched(); | 1364 | cond_resched(); |
1366 | 1365 | ||
1367 | last_aeb = aeb; | 1366 | last_aeb = aeb; |
1368 | leb_count += 1; | 1367 | leb_count += 1; |
1369 | 1368 | ||
1370 | if (aeb->pnum < 0 || aeb->ec < 0) { | 1369 | if (aeb->pnum < 0 || aeb->ec < 0) { |
1371 | ubi_err("negative values"); | 1370 | ubi_err("negative values"); |
1372 | goto bad_aeb; | 1371 | goto bad_aeb; |
1373 | } | 1372 | } |
1374 | 1373 | ||
1375 | if (aeb->ec < ai->min_ec) { | 1374 | if (aeb->ec < ai->min_ec) { |
1376 | ubi_err("bad ai->min_ec (%d), %d found", | 1375 | ubi_err("bad ai->min_ec (%d), %d found", |
1377 | ai->min_ec, aeb->ec); | 1376 | ai->min_ec, aeb->ec); |
1378 | goto bad_aeb; | 1377 | goto bad_aeb; |
1379 | } | 1378 | } |
1380 | 1379 | ||
1381 | if (aeb->ec > ai->max_ec) { | 1380 | if (aeb->ec > ai->max_ec) { |
1382 | ubi_err("bad ai->max_ec (%d), %d found", | 1381 | ubi_err("bad ai->max_ec (%d), %d found", |
1383 | ai->max_ec, aeb->ec); | 1382 | ai->max_ec, aeb->ec); |
1384 | goto bad_aeb; | 1383 | goto bad_aeb; |
1385 | } | 1384 | } |
1386 | 1385 | ||
1387 | if (aeb->pnum >= ubi->peb_count) { | 1386 | if (aeb->pnum >= ubi->peb_count) { |
1388 | ubi_err("too high PEB number %d, total PEBs %d", | 1387 | ubi_err("too high PEB number %d, total PEBs %d", |
1389 | aeb->pnum, ubi->peb_count); | 1388 | aeb->pnum, ubi->peb_count); |
1390 | goto bad_aeb; | 1389 | goto bad_aeb; |
1391 | } | 1390 | } |
1392 | 1391 | ||
1393 | if (av->vol_type == UBI_STATIC_VOLUME) { | 1392 | if (av->vol_type == UBI_STATIC_VOLUME) { |
1394 | if (aeb->lnum >= av->used_ebs) { | 1393 | if (aeb->lnum >= av->used_ebs) { |
1395 | ubi_err("bad lnum or used_ebs"); | 1394 | ubi_err("bad lnum or used_ebs"); |
1396 | goto bad_aeb; | 1395 | goto bad_aeb; |
1397 | } | 1396 | } |
1398 | } else { | 1397 | } else { |
1399 | if (av->used_ebs != 0) { | 1398 | if (av->used_ebs != 0) { |
1400 | ubi_err("non-zero used_ebs"); | 1399 | ubi_err("non-zero used_ebs"); |
1401 | goto bad_aeb; | 1400 | goto bad_aeb; |
1402 | } | 1401 | } |
1403 | } | 1402 | } |
1404 | 1403 | ||
1405 | if (aeb->lnum > av->highest_lnum) { | 1404 | if (aeb->lnum > av->highest_lnum) { |
1406 | ubi_err("incorrect highest_lnum or lnum"); | 1405 | ubi_err("incorrect highest_lnum or lnum"); |
1407 | goto bad_aeb; | 1406 | goto bad_aeb; |
1408 | } | 1407 | } |
1409 | } | 1408 | } |
1410 | 1409 | ||
1411 | if (av->leb_count != leb_count) { | 1410 | if (av->leb_count != leb_count) { |
1412 | ubi_err("bad leb_count, %d objects in the tree", | 1411 | ubi_err("bad leb_count, %d objects in the tree", |
1413 | leb_count); | 1412 | leb_count); |
1414 | goto bad_av; | 1413 | goto bad_av; |
1415 | } | 1414 | } |
1416 | 1415 | ||
1417 | if (!last_aeb) | 1416 | if (!last_aeb) |
1418 | continue; | 1417 | continue; |
1419 | 1418 | ||
1420 | aeb = last_aeb; | 1419 | aeb = last_aeb; |
1421 | 1420 | ||
1422 | if (aeb->lnum != av->highest_lnum) { | 1421 | if (aeb->lnum != av->highest_lnum) { |
1423 | ubi_err("bad highest_lnum"); | 1422 | ubi_err("bad highest_lnum"); |
1424 | goto bad_aeb; | 1423 | goto bad_aeb; |
1425 | } | 1424 | } |
1426 | } | 1425 | } |
1427 | 1426 | ||
1428 | if (vols_found != ai->vols_found) { | 1427 | if (vols_found != ai->vols_found) { |
1429 | ubi_err("bad ai->vols_found %d, should be %d", | 1428 | ubi_err("bad ai->vols_found %d, should be %d", |
1430 | ai->vols_found, vols_found); | 1429 | ai->vols_found, vols_found); |
1431 | goto out; | 1430 | goto out; |
1432 | } | 1431 | } |
1433 | 1432 | ||
1434 | /* Check that attaching information is correct */ | 1433 | /* Check that attaching information is correct */ |
1435 | ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) { | 1434 | ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) { |
1436 | last_aeb = NULL; | 1435 | last_aeb = NULL; |
1437 | ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) { | 1436 | ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) { |
1438 | int vol_type; | 1437 | int vol_type; |
1439 | 1438 | ||
1440 | cond_resched(); | 1439 | cond_resched(); |
1441 | 1440 | ||
1442 | last_aeb = aeb; | 1441 | last_aeb = aeb; |
1443 | 1442 | ||
1444 | err = ubi_io_read_vid_hdr(ubi, aeb->pnum, vidh, 1); | 1443 | err = ubi_io_read_vid_hdr(ubi, aeb->pnum, vidh, 1); |
1445 | if (err && err != UBI_IO_BITFLIPS) { | 1444 | if (err && err != UBI_IO_BITFLIPS) { |
1446 | ubi_err("VID header is not OK (%d)", err); | 1445 | ubi_err("VID header is not OK (%d)", err); |
1447 | if (err > 0) | 1446 | if (err > 0) |
1448 | err = -EIO; | 1447 | err = -EIO; |
1449 | return err; | 1448 | return err; |
1450 | } | 1449 | } |
1451 | 1450 | ||
1452 | vol_type = vidh->vol_type == UBI_VID_DYNAMIC ? | 1451 | vol_type = vidh->vol_type == UBI_VID_DYNAMIC ? |
1453 | UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME; | 1452 | UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME; |
1454 | if (av->vol_type != vol_type) { | 1453 | if (av->vol_type != vol_type) { |
1455 | ubi_err("bad vol_type"); | 1454 | ubi_err("bad vol_type"); |
1456 | goto bad_vid_hdr; | 1455 | goto bad_vid_hdr; |
1457 | } | 1456 | } |
1458 | 1457 | ||
1459 | if (aeb->sqnum != be64_to_cpu(vidh->sqnum)) { | 1458 | if (aeb->sqnum != be64_to_cpu(vidh->sqnum)) { |
1460 | ubi_err("bad sqnum %llu", aeb->sqnum); | 1459 | ubi_err("bad sqnum %llu", aeb->sqnum); |
1461 | goto bad_vid_hdr; | 1460 | goto bad_vid_hdr; |
1462 | } | 1461 | } |
1463 | 1462 | ||
1464 | if (av->vol_id != be32_to_cpu(vidh->vol_id)) { | 1463 | if (av->vol_id != be32_to_cpu(vidh->vol_id)) { |
1465 | ubi_err("bad vol_id %d", av->vol_id); | 1464 | ubi_err("bad vol_id %d", av->vol_id); |
1466 | goto bad_vid_hdr; | 1465 | goto bad_vid_hdr; |
1467 | } | 1466 | } |
1468 | 1467 | ||
1469 | if (av->compat != vidh->compat) { | 1468 | if (av->compat != vidh->compat) { |
1470 | ubi_err("bad compat %d", vidh->compat); | 1469 | ubi_err("bad compat %d", vidh->compat); |
1471 | goto bad_vid_hdr; | 1470 | goto bad_vid_hdr; |
1472 | } | 1471 | } |
1473 | 1472 | ||
1474 | if (aeb->lnum != be32_to_cpu(vidh->lnum)) { | 1473 | if (aeb->lnum != be32_to_cpu(vidh->lnum)) { |
1475 | ubi_err("bad lnum %d", aeb->lnum); | 1474 | ubi_err("bad lnum %d", aeb->lnum); |
1476 | goto bad_vid_hdr; | 1475 | goto bad_vid_hdr; |
1477 | } | 1476 | } |
1478 | 1477 | ||
1479 | if (av->used_ebs != be32_to_cpu(vidh->used_ebs)) { | 1478 | if (av->used_ebs != be32_to_cpu(vidh->used_ebs)) { |
1480 | ubi_err("bad used_ebs %d", av->used_ebs); | 1479 | ubi_err("bad used_ebs %d", av->used_ebs); |
1481 | goto bad_vid_hdr; | 1480 | goto bad_vid_hdr; |
1482 | } | 1481 | } |
1483 | 1482 | ||
1484 | if (av->data_pad != be32_to_cpu(vidh->data_pad)) { | 1483 | if (av->data_pad != be32_to_cpu(vidh->data_pad)) { |
1485 | ubi_err("bad data_pad %d", av->data_pad); | 1484 | ubi_err("bad data_pad %d", av->data_pad); |
1486 | goto bad_vid_hdr; | 1485 | goto bad_vid_hdr; |
1487 | } | 1486 | } |
1488 | } | 1487 | } |
1489 | 1488 | ||
1490 | if (!last_aeb) | 1489 | if (!last_aeb) |
1491 | continue; | 1490 | continue; |
1492 | 1491 | ||
1493 | if (av->highest_lnum != be32_to_cpu(vidh->lnum)) { | 1492 | if (av->highest_lnum != be32_to_cpu(vidh->lnum)) { |
1494 | ubi_err("bad highest_lnum %d", av->highest_lnum); | 1493 | ubi_err("bad highest_lnum %d", av->highest_lnum); |
1495 | goto bad_vid_hdr; | 1494 | goto bad_vid_hdr; |
1496 | } | 1495 | } |
1497 | 1496 | ||
1498 | if (av->last_data_size != be32_to_cpu(vidh->data_size)) { | 1497 | if (av->last_data_size != be32_to_cpu(vidh->data_size)) { |
1499 | ubi_err("bad last_data_size %d", av->last_data_size); | 1498 | ubi_err("bad last_data_size %d", av->last_data_size); |
1500 | goto bad_vid_hdr; | 1499 | goto bad_vid_hdr; |
1501 | } | 1500 | } |
1502 | } | 1501 | } |
1503 | 1502 | ||
1504 | /* | 1503 | /* |
1505 | * Make sure that all the physical eraseblocks are in one of the lists | 1504 | * Make sure that all the physical eraseblocks are in one of the lists |
1506 | * or trees. | 1505 | * or trees. |
1507 | */ | 1506 | */ |
1508 | buf = kzalloc(ubi->peb_count, GFP_KERNEL); | 1507 | buf = kzalloc(ubi->peb_count, GFP_KERNEL); |
1509 | if (!buf) | 1508 | if (!buf) |
1510 | return -ENOMEM; | 1509 | return -ENOMEM; |
1511 | 1510 | ||
1512 | for (pnum = 0; pnum < ubi->peb_count; pnum++) { | 1511 | for (pnum = 0; pnum < ubi->peb_count; pnum++) { |
1513 | err = ubi_io_is_bad(ubi, pnum); | 1512 | err = ubi_io_is_bad(ubi, pnum); |
1514 | if (err < 0) { | 1513 | if (err < 0) { |
1515 | kfree(buf); | 1514 | kfree(buf); |
1516 | return err; | 1515 | return err; |
1517 | } else if (err) | 1516 | } else if (err) |
1518 | buf[pnum] = 1; | 1517 | buf[pnum] = 1; |
1519 | } | 1518 | } |
1520 | 1519 | ||
1521 | ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) | 1520 | ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) |
1522 | ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) | 1521 | ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) |
1523 | buf[aeb->pnum] = 1; | 1522 | buf[aeb->pnum] = 1; |
1524 | 1523 | ||
1525 | list_for_each_entry(aeb, &ai->free, u.list) | 1524 | list_for_each_entry(aeb, &ai->free, u.list) |
1526 | buf[aeb->pnum] = 1; | 1525 | buf[aeb->pnum] = 1; |
1527 | 1526 | ||
1528 | list_for_each_entry(aeb, &ai->corr, u.list) | 1527 | list_for_each_entry(aeb, &ai->corr, u.list) |
1529 | buf[aeb->pnum] = 1; | 1528 | buf[aeb->pnum] = 1; |
1530 | 1529 | ||
1531 | list_for_each_entry(aeb, &ai->erase, u.list) | 1530 | list_for_each_entry(aeb, &ai->erase, u.list) |
1532 | buf[aeb->pnum] = 1; | 1531 | buf[aeb->pnum] = 1; |
1533 | 1532 | ||
1534 | list_for_each_entry(aeb, &ai->alien, u.list) | 1533 | list_for_each_entry(aeb, &ai->alien, u.list) |
1535 | buf[aeb->pnum] = 1; | 1534 | buf[aeb->pnum] = 1; |
1536 | 1535 | ||
1537 | err = 0; | 1536 | err = 0; |
1538 | for (pnum = 0; pnum < ubi->peb_count; pnum++) | 1537 | for (pnum = 0; pnum < ubi->peb_count; pnum++) |
1539 | if (!buf[pnum]) { | 1538 | if (!buf[pnum]) { |
1540 | ubi_err("PEB %d is not referred", pnum); | 1539 | ubi_err("PEB %d is not referred", pnum); |
1541 | err = 1; | 1540 | err = 1; |
1542 | } | 1541 | } |
1543 | 1542 | ||
1544 | kfree(buf); | 1543 | kfree(buf); |
1545 | if (err) | 1544 | if (err) |
1546 | goto out; | 1545 | goto out; |
1547 | return 0; | 1546 | return 0; |
1548 | 1547 | ||
1549 | bad_aeb: | 1548 | bad_aeb: |
1550 | ubi_err("bad attaching information about LEB %d", aeb->lnum); | 1549 | ubi_err("bad attaching information about LEB %d", aeb->lnum); |
1551 | ubi_dump_aeb(aeb, 0); | 1550 | ubi_dump_aeb(aeb, 0); |
1552 | ubi_dump_av(av); | 1551 | ubi_dump_av(av); |
1553 | goto out; | 1552 | goto out; |
1554 | 1553 | ||
1555 | bad_av: | 1554 | bad_av: |
1556 | ubi_err("bad attaching information about volume %d", av->vol_id); | 1555 | ubi_err("bad attaching information about volume %d", av->vol_id); |
1557 | ubi_dump_av(av); | 1556 | ubi_dump_av(av); |
1558 | goto out; | 1557 | goto out; |
1559 | 1558 | ||
1560 | bad_vid_hdr: | 1559 | bad_vid_hdr: |
1561 | ubi_err("bad attaching information about volume %d", av->vol_id); | 1560 | ubi_err("bad attaching information about volume %d", av->vol_id); |
1562 | ubi_dump_av(av); | 1561 | ubi_dump_av(av); |
1563 | ubi_dump_vid_hdr(vidh); | 1562 | ubi_dump_vid_hdr(vidh); |
drivers/mtd/ubi/scan.h
1 | /* | 1 | /* |
2 | * Copyright (c) International Business Machines Corp., 2006 | 2 | * Copyright (c) International Business Machines Corp., 2006 |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License as published by | 5 | * it under the terms of the GNU General Public License as published by |
6 | * the Free Software Foundation; either version 2 of the License, or | 6 | * the Free Software Foundation; either version 2 of the License, or |
7 | * (at your option) any later version. | 7 | * (at your option) any later version. |
8 | * | 8 | * |
9 | * This program is distributed in the hope that it will be useful, | 9 | * This program is distributed in the hope that it will be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See |
12 | * the GNU General Public License for more details. | 12 | * the GNU General Public License for more details. |
13 | * | 13 | * |
14 | * You should have received a copy of the GNU General Public License | 14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write to the Free Software | 15 | * along with this program; if not, write to the Free Software |
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
17 | * | 17 | * |
18 | * Author: Artem Bityutskiy (Битюцкий Артём) | 18 | * Author: Artem Bityutskiy (Битюцкий Артём) |
19 | */ | 19 | */ |
20 | 20 | ||
21 | #ifndef __UBI_SCAN_H__ | 21 | #ifndef __UBI_SCAN_H__ |
22 | #define __UBI_SCAN_H__ | 22 | #define __UBI_SCAN_H__ |
23 | 23 | ||
24 | /* The erase counter value for this physical eraseblock is unknown */ | 24 | /* The erase counter value for this physical eraseblock is unknown */ |
25 | #define UBI_SCAN_UNKNOWN_EC (-1) | 25 | #define UBI_SCAN_UNKNOWN_EC (-1) |
26 | 26 | ||
27 | /** | 27 | /** |
28 | * struct ubi_ainf_peb - attach information about a physical eraseblock. | 28 | * struct ubi_ainf_peb - attach information about a physical eraseblock. |
29 | * @ec: erase counter (%UBI_SCAN_UNKNOWN_EC if it is unknown) | 29 | * @ec: erase counter (%UBI_SCAN_UNKNOWN_EC if it is unknown) |
30 | * @pnum: physical eraseblock number | 30 | * @pnum: physical eraseblock number |
31 | * @lnum: logical eraseblock number | 31 | * @lnum: logical eraseblock number |
32 | * @scrub: if this physical eraseblock needs scrubbing | 32 | * @scrub: if this physical eraseblock needs scrubbing |
33 | * @copy_flag: this LEB is a copy (@copy_flag is set in VID header of this LEB) | 33 | * @copy_flag: this LEB is a copy (@copy_flag is set in VID header of this LEB) |
34 | * @sqnum: sequence number | 34 | * @sqnum: sequence number |
35 | * @u: unions RB-tree or @list links | 35 | * @u: unions RB-tree or @list links |
36 | * @u.rb: link in the per-volume RB-tree of &struct ubi_ainf_peb objects | 36 | * @u.rb: link in the per-volume RB-tree of &struct ubi_ainf_peb objects |
37 | * @u.list: link in one of the eraseblock lists | 37 | * @u.list: link in one of the eraseblock lists |
38 | * | 38 | * |
39 | * One object of this type is allocated for each physical eraseblock when | 39 | * One object of this type is allocated for each physical eraseblock when |
40 | * attaching an MTD device. | 40 | * attaching an MTD device. |
41 | */ | 41 | */ |
42 | struct ubi_ainf_peb { | 42 | struct ubi_ainf_peb { |
43 | int ec; | 43 | int ec; |
44 | int pnum; | 44 | int pnum; |
45 | int lnum; | 45 | int lnum; |
46 | unsigned int scrub:1; | 46 | unsigned int scrub:1; |
47 | unsigned int copy_flag:1; | 47 | unsigned int copy_flag:1; |
48 | unsigned long long sqnum; | 48 | unsigned long long sqnum; |
49 | union { | 49 | union { |
50 | struct rb_node rb; | 50 | struct rb_node rb; |
51 | struct list_head list; | 51 | struct list_head list; |
52 | } u; | 52 | } u; |
53 | }; | 53 | }; |
54 | 54 | ||
55 | /** | 55 | /** |
56 | * struct ubi_ainf_volume - attaching information about a volume. | 56 | * struct ubi_ainf_volume - attaching information about a volume. |
57 | * @vol_id: volume ID | 57 | * @vol_id: volume ID |
58 | * @highest_lnum: highest logical eraseblock number in this volume | 58 | * @highest_lnum: highest logical eraseblock number in this volume |
59 | * @leb_count: number of logical eraseblocks in this volume | 59 | * @leb_count: number of logical eraseblocks in this volume |
60 | * @vol_type: volume type | 60 | * @vol_type: volume type |
61 | * @used_ebs: number of used logical eraseblocks in this volume (only for | 61 | * @used_ebs: number of used logical eraseblocks in this volume (only for |
62 | * static volumes) | 62 | * static volumes) |
63 | * @last_data_size: amount of data in the last logical eraseblock of this | 63 | * @last_data_size: amount of data in the last logical eraseblock of this |
64 | * volume (always equivalent to the usable logical eraseblock | 64 | * volume (always equivalent to the usable logical eraseblock |
65 | * size in case of dynamic volumes) | 65 | * size in case of dynamic volumes) |
66 | * @data_pad: how many bytes at the end of logical eraseblocks of this volume | 66 | * @data_pad: how many bytes at the end of logical eraseblocks of this volume |
67 | * are not used (due to volume alignment) | 67 | * are not used (due to volume alignment) |
68 | * @compat: compatibility flags of this volume | 68 | * @compat: compatibility flags of this volume |
69 | * @rb: link in the volume RB-tree | 69 | * @rb: link in the volume RB-tree |
70 | * @root: root of the RB-tree containing all the eraseblock belonging to this | 70 | * @root: root of the RB-tree containing all the eraseblock belonging to this |
71 | * volume (&struct ubi_ainf_peb objects) | 71 | * volume (&struct ubi_ainf_peb objects) |
72 | * | 72 | * |
73 | * One object of this type is allocated for each volume when attaching an MTD | 73 | * One object of this type is allocated for each volume when attaching an MTD |
74 | * device. | 74 | * device. |
75 | */ | 75 | */ |
76 | struct ubi_ainf_volume { | 76 | struct ubi_ainf_volume { |
77 | int vol_id; | 77 | int vol_id; |
78 | int highest_lnum; | 78 | int highest_lnum; |
79 | int leb_count; | 79 | int leb_count; |
80 | int vol_type; | 80 | int vol_type; |
81 | int used_ebs; | 81 | int used_ebs; |
82 | int last_data_size; | 82 | int last_data_size; |
83 | int data_pad; | 83 | int data_pad; |
84 | int compat; | 84 | int compat; |
85 | struct rb_node rb; | 85 | struct rb_node rb; |
86 | struct rb_root root; | 86 | struct rb_root root; |
87 | }; | 87 | }; |
88 | 88 | ||
89 | /** | 89 | /** |
90 | * struct ubi_attach_info - MTD device attaching information. | 90 | * struct ubi_attach_info - MTD device attaching information. |
91 | * @volumes: root of the volume RB-tree | 91 | * @volumes: root of the volume RB-tree |
92 | * @corr: list of corrupted physical eraseblocks | 92 | * @corr: list of corrupted physical eraseblocks |
93 | * @free: list of free physical eraseblocks | 93 | * @free: list of free physical eraseblocks |
94 | * @erase: list of physical eraseblocks which have to be erased | 94 | * @erase: list of physical eraseblocks which have to be erased |
95 | * @alien: list of physical eraseblocks which should not be used by UBI (e.g., | 95 | * @alien: list of physical eraseblocks which should not be used by UBI (e.g., |
96 | * those belonging to "preserve"-compatible internal volumes) | 96 | * those belonging to "preserve"-compatible internal volumes) |
97 | * @corr_peb_count: count of PEBs in the @corr list | 97 | * @corr_peb_count: count of PEBs in the @corr list |
98 | * @empty_peb_count: count of PEBs which are presumably empty (contain only | 98 | * @empty_peb_count: count of PEBs which are presumably empty (contain only |
99 | * 0xFF bytes) | 99 | * 0xFF bytes) |
100 | * @alien_peb_count: count of PEBs in the @alien list | 100 | * @alien_peb_count: count of PEBs in the @alien list |
101 | * @bad_peb_count: count of bad physical eraseblocks | 101 | * @bad_peb_count: count of bad physical eraseblocks |
102 | * @maybe_bad_peb_count: count of bad physical eraseblocks which are not marked | 102 | * @maybe_bad_peb_count: count of bad physical eraseblocks which are not marked |
103 | * as bad yet, but which look like bad | 103 | * as bad yet, but which look like bad |
104 | * @vols_found: number of volumes found | 104 | * @vols_found: number of volumes found |
105 | * @highest_vol_id: highest volume ID | 105 | * @highest_vol_id: highest volume ID |
106 | * @is_empty: flag indicating whether the MTD device is empty or not | 106 | * @is_empty: flag indicating whether the MTD device is empty or not |
107 | * @min_ec: lowest erase counter value | 107 | * @min_ec: lowest erase counter value |
108 | * @max_ec: highest erase counter value | 108 | * @max_ec: highest erase counter value |
109 | * @max_sqnum: highest sequence number value | 109 | * @max_sqnum: highest sequence number value |
110 | * @mean_ec: mean erase counter value | 110 | * @mean_ec: mean erase counter value |
111 | * @ec_sum: a temporary variable used when calculating @mean_ec | 111 | * @ec_sum: a temporary variable used when calculating @mean_ec |
112 | * @ec_count: a temporary variable used when calculating @mean_ec | 112 | * @ec_count: a temporary variable used when calculating @mean_ec |
113 | * @aeb_slab_cache: slab cache for &struct ubi_ainf_peb objects | 113 | * @aeb_slab_cache: slab cache for &struct ubi_ainf_peb objects |
114 | * | 114 | * |
115 | * This data structure contains the result of attaching an MTD device and may | 115 | * This data structure contains the result of attaching an MTD device and may |
116 | * be used by other UBI sub-systems to build final UBI data structures, further | 116 | * be used by other UBI sub-systems to build final UBI data structures, further |
117 | * error-recovery and so on. | 117 | * error-recovery and so on. |
118 | */ | 118 | */ |
119 | struct ubi_attach_info { | 119 | struct ubi_attach_info { |
120 | struct rb_root volumes; | 120 | struct rb_root volumes; |
121 | struct list_head corr; | 121 | struct list_head corr; |
122 | struct list_head free; | 122 | struct list_head free; |
123 | struct list_head erase; | 123 | struct list_head erase; |
124 | struct list_head alien; | 124 | struct list_head alien; |
125 | int corr_peb_count; | 125 | int corr_peb_count; |
126 | int empty_peb_count; | 126 | int empty_peb_count; |
127 | int alien_peb_count; | 127 | int alien_peb_count; |
128 | int bad_peb_count; | 128 | int bad_peb_count; |
129 | int maybe_bad_peb_count; | 129 | int maybe_bad_peb_count; |
130 | int vols_found; | 130 | int vols_found; |
131 | int highest_vol_id; | 131 | int highest_vol_id; |
132 | int is_empty; | 132 | int is_empty; |
133 | int min_ec; | 133 | int min_ec; |
134 | int max_ec; | 134 | int max_ec; |
135 | unsigned long long max_sqnum; | 135 | unsigned long long max_sqnum; |
136 | int mean_ec; | 136 | int mean_ec; |
137 | uint64_t ec_sum; | 137 | uint64_t ec_sum; |
138 | int ec_count; | 138 | int ec_count; |
139 | struct kmem_cache *aeb_slab_cache; | 139 | struct kmem_cache *aeb_slab_cache; |
140 | }; | 140 | }; |
141 | 141 | ||
142 | struct ubi_device; | 142 | struct ubi_device; |
143 | struct ubi_vid_hdr; | 143 | struct ubi_vid_hdr; |
144 | 144 | ||
145 | /* | 145 | /* |
146 | * ubi_move_aeb_to_list - move a PEB from the volume tree to a list. | 146 | * ubi_move_aeb_to_list - move a PEB from the volume tree to a list. |
147 | * | 147 | * |
148 | * @av: volume attaching information | 148 | * @av: volume attaching information |
149 | * @aeb: scanning eraseblock information | 149 | * @aeb: attaching eraseblock information |
150 | * @list: the list to move to | 150 | * @list: the list to move to |
151 | */ | 151 | */ |
152 | static inline void ubi_move_aeb_to_list(struct ubi_ainf_volume *av, | 152 | static inline void ubi_move_aeb_to_list(struct ubi_ainf_volume *av, |
153 | struct ubi_ainf_peb *aeb, | 153 | struct ubi_ainf_peb *aeb, |
154 | struct list_head *list) | 154 | struct list_head *list) |
155 | { | 155 | { |
156 | rb_erase(&aeb->u.rb, &av->root); | 156 | rb_erase(&aeb->u.rb, &av->root); |
157 | list_add_tail(&aeb->u.list, list); | 157 | list_add_tail(&aeb->u.list, list); |
158 | } | 158 | } |
159 | 159 | ||
160 | int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum, | 160 | int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum, |
161 | int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips); | 161 | int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips); |
162 | struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai, | 162 | struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai, |
163 | int vol_id); | 163 | int vol_id); |
164 | void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av); | 164 | void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av); |
165 | struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi, | 165 | struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi, |
166 | struct ubi_attach_info *ai); | 166 | struct ubi_attach_info *ai); |
167 | struct ubi_attach_info *ubi_scan(struct ubi_device *ubi); | 167 | struct ubi_attach_info *ubi_scan(struct ubi_device *ubi); |
168 | void ubi_destroy_ai(struct ubi_attach_info *ai); | 168 | void ubi_destroy_ai(struct ubi_attach_info *ai); |
169 | 169 | ||
170 | #endif /* !__UBI_SCAN_H__ */ | 170 | #endif /* !__UBI_SCAN_H__ */ |
171 | 171 |
drivers/mtd/ubi/ubi-media.h
1 | /* | 1 | /* |
2 | * Copyright (c) International Business Machines Corp., 2006 | 2 | * Copyright (c) International Business Machines Corp., 2006 |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License as published by | 5 | * it under the terms of the GNU General Public License as published by |
6 | * the Free Software Foundation; either version 2 of the License, or | 6 | * the Free Software Foundation; either version 2 of the License, or |
7 | * (at your option) any later version. | 7 | * (at your option) any later version. |
8 | * | 8 | * |
9 | * This program is distributed in the hope that it will be useful, | 9 | * This program is distributed in the hope that it will be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See |
12 | * the GNU General Public License for more details. | 12 | * the GNU General Public License for more details. |
13 | * | 13 | * |
14 | * You should have received a copy of the GNU General Public License | 14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write to the Free Software | 15 | * along with this program; if not, write to the Free Software |
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
17 | * | 17 | * |
18 | * Authors: Artem Bityutskiy (Битюцкий Артём) | 18 | * Authors: Artem Bityutskiy (Битюцкий Артём) |
19 | * Thomas Gleixner | 19 | * Thomas Gleixner |
20 | * Frank Haverkamp | 20 | * Frank Haverkamp |
21 | * Oliver Lohmann | 21 | * Oliver Lohmann |
22 | * Andreas Arnez | 22 | * Andreas Arnez |
23 | */ | 23 | */ |
24 | 24 | ||
25 | /* | 25 | /* |
26 | * This file defines the layout of UBI headers and all the other UBI on-flash | 26 | * This file defines the layout of UBI headers and all the other UBI on-flash |
27 | * data structures. | 27 | * data structures. |
28 | */ | 28 | */ |
29 | 29 | ||
30 | #ifndef __UBI_MEDIA_H__ | 30 | #ifndef __UBI_MEDIA_H__ |
31 | #define __UBI_MEDIA_H__ | 31 | #define __UBI_MEDIA_H__ |
32 | 32 | ||
33 | #include <asm/byteorder.h> | 33 | #include <asm/byteorder.h> |
34 | 34 | ||
35 | /* The version of UBI images supported by this implementation */ | 35 | /* The version of UBI images supported by this implementation */ |
36 | #define UBI_VERSION 1 | 36 | #define UBI_VERSION 1 |
37 | 37 | ||
38 | /* The highest erase counter value supported by this implementation */ | 38 | /* The highest erase counter value supported by this implementation */ |
39 | #define UBI_MAX_ERASECOUNTER 0x7FFFFFFF | 39 | #define UBI_MAX_ERASECOUNTER 0x7FFFFFFF |
40 | 40 | ||
41 | /* The initial CRC32 value used when calculating CRC checksums */ | 41 | /* The initial CRC32 value used when calculating CRC checksums */ |
42 | #define UBI_CRC32_INIT 0xFFFFFFFFU | 42 | #define UBI_CRC32_INIT 0xFFFFFFFFU |
43 | 43 | ||
44 | /* Erase counter header magic number (ASCII "UBI#") */ | 44 | /* Erase counter header magic number (ASCII "UBI#") */ |
45 | #define UBI_EC_HDR_MAGIC 0x55424923 | 45 | #define UBI_EC_HDR_MAGIC 0x55424923 |
46 | /* Volume identifier header magic number (ASCII "UBI!") */ | 46 | /* Volume identifier header magic number (ASCII "UBI!") */ |
47 | #define UBI_VID_HDR_MAGIC 0x55424921 | 47 | #define UBI_VID_HDR_MAGIC 0x55424921 |
48 | 48 | ||
49 | /* | 49 | /* |
50 | * Volume type constants used in the volume identifier header. | 50 | * Volume type constants used in the volume identifier header. |
51 | * | 51 | * |
52 | * @UBI_VID_DYNAMIC: dynamic volume | 52 | * @UBI_VID_DYNAMIC: dynamic volume |
53 | * @UBI_VID_STATIC: static volume | 53 | * @UBI_VID_STATIC: static volume |
54 | */ | 54 | */ |
55 | enum { | 55 | enum { |
56 | UBI_VID_DYNAMIC = 1, | 56 | UBI_VID_DYNAMIC = 1, |
57 | UBI_VID_STATIC = 2 | 57 | UBI_VID_STATIC = 2 |
58 | }; | 58 | }; |
59 | 59 | ||
60 | /* | 60 | /* |
61 | * Volume flags used in the volume table record. | 61 | * Volume flags used in the volume table record. |
62 | * | 62 | * |
63 | * @UBI_VTBL_AUTORESIZE_FLG: auto-resize this volume | 63 | * @UBI_VTBL_AUTORESIZE_FLG: auto-resize this volume |
64 | * | 64 | * |
65 | * %UBI_VTBL_AUTORESIZE_FLG flag can be set only for one volume in the volume | 65 | * %UBI_VTBL_AUTORESIZE_FLG flag can be set only for one volume in the volume |
66 | * table. UBI automatically re-sizes the volume which has this flag and makes | 66 | * table. UBI automatically re-sizes the volume which has this flag and makes |
67 | * the volume to be of largest possible size. This means that if after the | 67 | * the volume to be of largest possible size. This means that if after the |
68 | * initialization UBI finds out that there are available physical eraseblocks | 68 | * initialization UBI finds out that there are available physical eraseblocks |
69 | * present on the device, it automatically appends all of them to the volume | 69 | * present on the device, it automatically appends all of them to the volume |
70 | * (the physical eraseblocks reserved for bad eraseblocks handling and other | 70 | * (the physical eraseblocks reserved for bad eraseblocks handling and other |
71 | * reserved physical eraseblocks are not taken). So, if there is a volume with | 71 | * reserved physical eraseblocks are not taken). So, if there is a volume with |
72 | * the %UBI_VTBL_AUTORESIZE_FLG flag set, the amount of available logical | 72 | * the %UBI_VTBL_AUTORESIZE_FLG flag set, the amount of available logical |
73 | * eraseblocks will be zero after UBI is loaded, because all of them will be | 73 | * eraseblocks will be zero after UBI is loaded, because all of them will be |
74 | * reserved for this volume. Note, the %UBI_VTBL_AUTORESIZE_FLG bit is cleared | 74 | * reserved for this volume. Note, the %UBI_VTBL_AUTORESIZE_FLG bit is cleared |
75 | * after the volume had been initialized. | 75 | * after the volume had been initialized. |
76 | * | 76 | * |
77 | * The auto-resize feature is useful for device production purposes. For | 77 | * The auto-resize feature is useful for device production purposes. For |
78 | * example, different NAND flash chips may have different amount of initial bad | 78 | * example, different NAND flash chips may have different amount of initial bad |
79 | * eraseblocks, depending of particular chip instance. Manufacturers of NAND | 79 | * eraseblocks, depending of particular chip instance. Manufacturers of NAND |
80 | * chips usually guarantee that the amount of initial bad eraseblocks does not | 80 | * chips usually guarantee that the amount of initial bad eraseblocks does not |
81 | * exceed certain percent, e.g. 2%. When one creates an UBI image which will be | 81 | * exceed certain percent, e.g. 2%. When one creates an UBI image which will be |
82 | * flashed to the end devices in production, he does not know the exact amount | 82 | * flashed to the end devices in production, he does not know the exact amount |
83 | * of good physical eraseblocks the NAND chip on the device will have, but this | 83 | * of good physical eraseblocks the NAND chip on the device will have, but this |
84 | * number is required to calculate the volume sized and put them to the volume | 84 | * number is required to calculate the volume sized and put them to the volume |
85 | * table of the UBI image. In this case, one of the volumes (e.g., the one | 85 | * table of the UBI image. In this case, one of the volumes (e.g., the one |
86 | * which will store the root file system) is marked as "auto-resizable", and | 86 | * which will store the root file system) is marked as "auto-resizable", and |
87 | * UBI will adjust its size on the first boot if needed. | 87 | * UBI will adjust its size on the first boot if needed. |
88 | * | 88 | * |
89 | * Note, first UBI reserves some amount of physical eraseblocks for bad | 89 | * Note, first UBI reserves some amount of physical eraseblocks for bad |
90 | * eraseblock handling, and then re-sizes the volume, not vice-versa. This | 90 | * eraseblock handling, and then re-sizes the volume, not vice-versa. This |
91 | * means that the pool of reserved physical eraseblocks will always be present. | 91 | * means that the pool of reserved physical eraseblocks will always be present. |
92 | */ | 92 | */ |
93 | enum { | 93 | enum { |
94 | UBI_VTBL_AUTORESIZE_FLG = 0x01, | 94 | UBI_VTBL_AUTORESIZE_FLG = 0x01, |
95 | }; | 95 | }; |
96 | 96 | ||
97 | /* | 97 | /* |
98 | * Compatibility constants used by internal volumes. | 98 | * Compatibility constants used by internal volumes. |
99 | * | 99 | * |
100 | * @UBI_COMPAT_DELETE: delete this internal volume before anything is written | 100 | * @UBI_COMPAT_DELETE: delete this internal volume before anything is written |
101 | * to the flash | 101 | * to the flash |
102 | * @UBI_COMPAT_RO: attach this device in read-only mode | 102 | * @UBI_COMPAT_RO: attach this device in read-only mode |
103 | * @UBI_COMPAT_PRESERVE: preserve this internal volume - do not touch its | 103 | * @UBI_COMPAT_PRESERVE: preserve this internal volume - do not touch its |
104 | * physical eraseblocks, don't allow the wear-leveling | 104 | * physical eraseblocks, don't allow the wear-leveling |
105 | * sub-system to move them | 105 | * sub-system to move them |
106 | * @UBI_COMPAT_REJECT: reject this UBI image | 106 | * @UBI_COMPAT_REJECT: reject this UBI image |
107 | */ | 107 | */ |
108 | enum { | 108 | enum { |
109 | UBI_COMPAT_DELETE = 1, | 109 | UBI_COMPAT_DELETE = 1, |
110 | UBI_COMPAT_RO = 2, | 110 | UBI_COMPAT_RO = 2, |
111 | UBI_COMPAT_PRESERVE = 4, | 111 | UBI_COMPAT_PRESERVE = 4, |
112 | UBI_COMPAT_REJECT = 5 | 112 | UBI_COMPAT_REJECT = 5 |
113 | }; | 113 | }; |
114 | 114 | ||
115 | /* Sizes of UBI headers */ | 115 | /* Sizes of UBI headers */ |
116 | #define UBI_EC_HDR_SIZE sizeof(struct ubi_ec_hdr) | 116 | #define UBI_EC_HDR_SIZE sizeof(struct ubi_ec_hdr) |
117 | #define UBI_VID_HDR_SIZE sizeof(struct ubi_vid_hdr) | 117 | #define UBI_VID_HDR_SIZE sizeof(struct ubi_vid_hdr) |
118 | 118 | ||
119 | /* Sizes of UBI headers without the ending CRC */ | 119 | /* Sizes of UBI headers without the ending CRC */ |
120 | #define UBI_EC_HDR_SIZE_CRC (UBI_EC_HDR_SIZE - sizeof(__be32)) | 120 | #define UBI_EC_HDR_SIZE_CRC (UBI_EC_HDR_SIZE - sizeof(__be32)) |
121 | #define UBI_VID_HDR_SIZE_CRC (UBI_VID_HDR_SIZE - sizeof(__be32)) | 121 | #define UBI_VID_HDR_SIZE_CRC (UBI_VID_HDR_SIZE - sizeof(__be32)) |
122 | 122 | ||
123 | /** | 123 | /** |
124 | * struct ubi_ec_hdr - UBI erase counter header. | 124 | * struct ubi_ec_hdr - UBI erase counter header. |
125 | * @magic: erase counter header magic number (%UBI_EC_HDR_MAGIC) | 125 | * @magic: erase counter header magic number (%UBI_EC_HDR_MAGIC) |
126 | * @version: version of UBI implementation which is supposed to accept this | 126 | * @version: version of UBI implementation which is supposed to accept this |
127 | * UBI image | 127 | * UBI image |
128 | * @padding1: reserved for future, zeroes | 128 | * @padding1: reserved for future, zeroes |
129 | * @ec: the erase counter | 129 | * @ec: the erase counter |
130 | * @vid_hdr_offset: where the VID header starts | 130 | * @vid_hdr_offset: where the VID header starts |
131 | * @data_offset: where the user data start | 131 | * @data_offset: where the user data start |
132 | * @image_seq: image sequence number | 132 | * @image_seq: image sequence number |
133 | * @padding2: reserved for future, zeroes | 133 | * @padding2: reserved for future, zeroes |
134 | * @hdr_crc: erase counter header CRC checksum | 134 | * @hdr_crc: erase counter header CRC checksum |
135 | * | 135 | * |
136 | * The erase counter header takes 64 bytes and has a plenty of unused space for | 136 | * The erase counter header takes 64 bytes and has a plenty of unused space for |
137 | * future usage. The unused fields are zeroed. The @version field is used to | 137 | * future usage. The unused fields are zeroed. The @version field is used to |
138 | * indicate the version of UBI implementation which is supposed to be able to | 138 | * indicate the version of UBI implementation which is supposed to be able to |
139 | * work with this UBI image. If @version is greater than the current UBI | 139 | * work with this UBI image. If @version is greater than the current UBI |
140 | * version, the image is rejected. This may be useful in future if something | 140 | * version, the image is rejected. This may be useful in future if something |
141 | * is changed radically. This field is duplicated in the volume identifier | 141 | * is changed radically. This field is duplicated in the volume identifier |
142 | * header. | 142 | * header. |
143 | * | 143 | * |
144 | * The @vid_hdr_offset and @data_offset fields contain the offset of the the | 144 | * The @vid_hdr_offset and @data_offset fields contain the offset of the the |
145 | * volume identifier header and user data, relative to the beginning of the | 145 | * volume identifier header and user data, relative to the beginning of the |
146 | * physical eraseblock. These values have to be the same for all physical | 146 | * physical eraseblock. These values have to be the same for all physical |
147 | * eraseblocks. | 147 | * eraseblocks. |
148 | * | 148 | * |
149 | * The @image_seq field is used to validate a UBI image that has been prepared | 149 | * The @image_seq field is used to validate a UBI image that has been prepared |
150 | * for a UBI device. The @image_seq value can be any value, but it must be the | 150 | * for a UBI device. The @image_seq value can be any value, but it must be the |
151 | * same on all eraseblocks. UBI will ensure that all new erase counter headers | 151 | * same on all eraseblocks. UBI will ensure that all new erase counter headers |
152 | * also contain this value, and will check the value when scanning at start-up. | 152 | * also contain this value, and will check the value when attaching the flash. |
153 | * One way to make use of @image_seq is to increase its value by one every time | 153 | * One way to make use of @image_seq is to increase its value by one every time |
154 | * an image is flashed over an existing image, then, if the flashing does not | 154 | * an image is flashed over an existing image, then, if the flashing does not |
155 | * complete, UBI will detect the error when scanning. | 155 | * complete, UBI will detect the error when attaching the media. |
156 | */ | 156 | */ |
157 | struct ubi_ec_hdr { | 157 | struct ubi_ec_hdr { |
158 | __be32 magic; | 158 | __be32 magic; |
159 | __u8 version; | 159 | __u8 version; |
160 | __u8 padding1[3]; | 160 | __u8 padding1[3]; |
161 | __be64 ec; /* Warning: the current limit is 31-bit anyway! */ | 161 | __be64 ec; /* Warning: the current limit is 31-bit anyway! */ |
162 | __be32 vid_hdr_offset; | 162 | __be32 vid_hdr_offset; |
163 | __be32 data_offset; | 163 | __be32 data_offset; |
164 | __be32 image_seq; | 164 | __be32 image_seq; |
165 | __u8 padding2[32]; | 165 | __u8 padding2[32]; |
166 | __be32 hdr_crc; | 166 | __be32 hdr_crc; |
167 | } __packed; | 167 | } __packed; |
168 | 168 | ||
169 | /** | 169 | /** |
170 | * struct ubi_vid_hdr - on-flash UBI volume identifier header. | 170 | * struct ubi_vid_hdr - on-flash UBI volume identifier header. |
171 | * @magic: volume identifier header magic number (%UBI_VID_HDR_MAGIC) | 171 | * @magic: volume identifier header magic number (%UBI_VID_HDR_MAGIC) |
172 | * @version: UBI implementation version which is supposed to accept this UBI | 172 | * @version: UBI implementation version which is supposed to accept this UBI |
173 | * image (%UBI_VERSION) | 173 | * image (%UBI_VERSION) |
174 | * @vol_type: volume type (%UBI_VID_DYNAMIC or %UBI_VID_STATIC) | 174 | * @vol_type: volume type (%UBI_VID_DYNAMIC or %UBI_VID_STATIC) |
175 | * @copy_flag: if this logical eraseblock was copied from another physical | 175 | * @copy_flag: if this logical eraseblock was copied from another physical |
176 | * eraseblock (for wear-leveling reasons) | 176 | * eraseblock (for wear-leveling reasons) |
177 | * @compat: compatibility of this volume (%0, %UBI_COMPAT_DELETE, | 177 | * @compat: compatibility of this volume (%0, %UBI_COMPAT_DELETE, |
178 | * %UBI_COMPAT_IGNORE, %UBI_COMPAT_PRESERVE, or %UBI_COMPAT_REJECT) | 178 | * %UBI_COMPAT_IGNORE, %UBI_COMPAT_PRESERVE, or %UBI_COMPAT_REJECT) |
179 | * @vol_id: ID of this volume | 179 | * @vol_id: ID of this volume |
180 | * @lnum: logical eraseblock number | 180 | * @lnum: logical eraseblock number |
181 | * @padding1: reserved for future, zeroes | 181 | * @padding1: reserved for future, zeroes |
182 | * @data_size: how many bytes of data this logical eraseblock contains | 182 | * @data_size: how many bytes of data this logical eraseblock contains |
183 | * @used_ebs: total number of used logical eraseblocks in this volume | 183 | * @used_ebs: total number of used logical eraseblocks in this volume |
184 | * @data_pad: how many bytes at the end of this physical eraseblock are not | 184 | * @data_pad: how many bytes at the end of this physical eraseblock are not |
185 | * used | 185 | * used |
186 | * @data_crc: CRC checksum of the data stored in this logical eraseblock | 186 | * @data_crc: CRC checksum of the data stored in this logical eraseblock |
187 | * @padding2: reserved for future, zeroes | 187 | * @padding2: reserved for future, zeroes |
188 | * @sqnum: sequence number | 188 | * @sqnum: sequence number |
189 | * @padding3: reserved for future, zeroes | 189 | * @padding3: reserved for future, zeroes |
190 | * @hdr_crc: volume identifier header CRC checksum | 190 | * @hdr_crc: volume identifier header CRC checksum |
191 | * | 191 | * |
192 | * The @sqnum is the value of the global sequence counter at the time when this | 192 | * The @sqnum is the value of the global sequence counter at the time when this |
193 | * VID header was created. The global sequence counter is incremented each time | 193 | * VID header was created. The global sequence counter is incremented each time |
194 | * UBI writes a new VID header to the flash, i.e. when it maps a logical | 194 | * UBI writes a new VID header to the flash, i.e. when it maps a logical |
195 | * eraseblock to a new physical eraseblock. The global sequence counter is an | 195 | * eraseblock to a new physical eraseblock. The global sequence counter is an |
196 | * unsigned 64-bit integer and we assume it never overflows. The @sqnum | 196 | * unsigned 64-bit integer and we assume it never overflows. The @sqnum |
197 | * (sequence number) is used to distinguish between older and newer versions of | 197 | * (sequence number) is used to distinguish between older and newer versions of |
198 | * logical eraseblocks. | 198 | * logical eraseblocks. |
199 | * | 199 | * |
200 | * There are 2 situations when there may be more than one physical eraseblock | 200 | * There are 2 situations when there may be more than one physical eraseblock |
201 | * corresponding to the same logical eraseblock, i.e., having the same @vol_id | 201 | * corresponding to the same logical eraseblock, i.e., having the same @vol_id |
202 | * and @lnum values in the volume identifier header. Suppose we have a logical | 202 | * and @lnum values in the volume identifier header. Suppose we have a logical |
203 | * eraseblock L and it is mapped to the physical eraseblock P. | 203 | * eraseblock L and it is mapped to the physical eraseblock P. |
204 | * | 204 | * |
205 | * 1. Because UBI may erase physical eraseblocks asynchronously, the following | 205 | * 1. Because UBI may erase physical eraseblocks asynchronously, the following |
206 | * situation is possible: L is asynchronously erased, so P is scheduled for | 206 | * situation is possible: L is asynchronously erased, so P is scheduled for |
207 | * erasure, then L is written to,i.e. mapped to another physical eraseblock P1, | 207 | * erasure, then L is written to,i.e. mapped to another physical eraseblock P1, |
208 | * so P1 is written to, then an unclean reboot happens. Result - there are 2 | 208 | * so P1 is written to, then an unclean reboot happens. Result - there are 2 |
209 | * physical eraseblocks P and P1 corresponding to the same logical eraseblock | 209 | * physical eraseblocks P and P1 corresponding to the same logical eraseblock |
210 | * L. But P1 has greater sequence number, so UBI picks P1 when it attaches the | 210 | * L. But P1 has greater sequence number, so UBI picks P1 when it attaches the |
211 | * flash. | 211 | * flash. |
212 | * | 212 | * |
213 | * 2. From time to time UBI moves logical eraseblocks to other physical | 213 | * 2. From time to time UBI moves logical eraseblocks to other physical |
214 | * eraseblocks for wear-leveling reasons. If, for example, UBI moves L from P | 214 | * eraseblocks for wear-leveling reasons. If, for example, UBI moves L from P |
215 | * to P1, and an unclean reboot happens before P is physically erased, there | 215 | * to P1, and an unclean reboot happens before P is physically erased, there |
216 | * are two physical eraseblocks P and P1 corresponding to L and UBI has to | 216 | * are two physical eraseblocks P and P1 corresponding to L and UBI has to |
217 | * select one of them when the flash is attached. The @sqnum field says which | 217 | * select one of them when the flash is attached. The @sqnum field says which |
218 | * PEB is the original (obviously P will have lower @sqnum) and the copy. But | 218 | * PEB is the original (obviously P will have lower @sqnum) and the copy. But |
219 | * it is not enough to select the physical eraseblock with the higher sequence | 219 | * it is not enough to select the physical eraseblock with the higher sequence |
220 | * number, because the unclean reboot could have happen in the middle of the | 220 | * number, because the unclean reboot could have happen in the middle of the |
221 | * copying process, so the data in P is corrupted. It is also not enough to | 221 | * copying process, so the data in P is corrupted. It is also not enough to |
222 | * just select the physical eraseblock with lower sequence number, because the | 222 | * just select the physical eraseblock with lower sequence number, because the |
223 | * data there may be old (consider a case if more data was added to P1 after | 223 | * data there may be old (consider a case if more data was added to P1 after |
224 | * the copying). Moreover, the unclean reboot may happen when the erasure of P | 224 | * the copying). Moreover, the unclean reboot may happen when the erasure of P |
225 | * was just started, so it result in unstable P, which is "mostly" OK, but | 225 | * was just started, so it result in unstable P, which is "mostly" OK, but |
226 | * still has unstable bits. | 226 | * still has unstable bits. |
227 | * | 227 | * |
228 | * UBI uses the @copy_flag field to indicate that this logical eraseblock is a | 228 | * UBI uses the @copy_flag field to indicate that this logical eraseblock is a |
229 | * copy. UBI also calculates data CRC when the data is moved and stores it at | 229 | * copy. UBI also calculates data CRC when the data is moved and stores it at |
230 | * the @data_crc field of the copy (P1). So when UBI needs to pick one physical | 230 | * the @data_crc field of the copy (P1). So when UBI needs to pick one physical |
231 | * eraseblock of two (P or P1), the @copy_flag of the newer one (P1) is | 231 | * eraseblock of two (P or P1), the @copy_flag of the newer one (P1) is |
232 | * examined. If it is cleared, the situation* is simple and the newer one is | 232 | * examined. If it is cleared, the situation* is simple and the newer one is |
233 | * picked. If it is set, the data CRC of the copy (P1) is examined. If the CRC | 233 | * picked. If it is set, the data CRC of the copy (P1) is examined. If the CRC |
234 | * checksum is correct, this physical eraseblock is selected (P1). Otherwise | 234 | * checksum is correct, this physical eraseblock is selected (P1). Otherwise |
235 | * the older one (P) is selected. | 235 | * the older one (P) is selected. |
236 | * | 236 | * |
237 | * There are 2 sorts of volumes in UBI: user volumes and internal volumes. | 237 | * There are 2 sorts of volumes in UBI: user volumes and internal volumes. |
238 | * Internal volumes are not seen from outside and are used for various internal | 238 | * Internal volumes are not seen from outside and are used for various internal |
239 | * UBI purposes. In this implementation there is only one internal volume - the | 239 | * UBI purposes. In this implementation there is only one internal volume - the |
240 | * layout volume. Internal volumes are the main mechanism of UBI extensions. | 240 | * layout volume. Internal volumes are the main mechanism of UBI extensions. |
241 | * For example, in future one may introduce a journal internal volume. Internal | 241 | * For example, in future one may introduce a journal internal volume. Internal |
242 | * volumes have their own reserved range of IDs. | 242 | * volumes have their own reserved range of IDs. |
243 | * | 243 | * |
244 | * The @compat field is only used for internal volumes and contains the "degree | 244 | * The @compat field is only used for internal volumes and contains the "degree |
245 | * of their compatibility". It is always zero for user volumes. This field | 245 | * of their compatibility". It is always zero for user volumes. This field |
246 | * provides a mechanism to introduce UBI extensions and to be still compatible | 246 | * provides a mechanism to introduce UBI extensions and to be still compatible |
247 | * with older UBI binaries. For example, if someone introduced a journal in | 247 | * with older UBI binaries. For example, if someone introduced a journal in |
248 | * future, he would probably use %UBI_COMPAT_DELETE compatibility for the | 248 | * future, he would probably use %UBI_COMPAT_DELETE compatibility for the |
249 | * journal volume. And in this case, older UBI binaries, which know nothing | 249 | * journal volume. And in this case, older UBI binaries, which know nothing |
250 | * about the journal volume, would just delete this volume and work perfectly | 250 | * about the journal volume, would just delete this volume and work perfectly |
251 | * fine. This is similar to what Ext2fs does when it is fed by an Ext3fs image | 251 | * fine. This is similar to what Ext2fs does when it is fed by an Ext3fs image |
252 | * - it just ignores the Ext3fs journal. | 252 | * - it just ignores the Ext3fs journal. |
253 | * | 253 | * |
254 | * The @data_crc field contains the CRC checksum of the contents of the logical | 254 | * The @data_crc field contains the CRC checksum of the contents of the logical |
255 | * eraseblock if this is a static volume. In case of dynamic volumes, it does | 255 | * eraseblock if this is a static volume. In case of dynamic volumes, it does |
256 | * not contain the CRC checksum as a rule. The only exception is when the | 256 | * not contain the CRC checksum as a rule. The only exception is when the |
257 | * data of the physical eraseblock was moved by the wear-leveling sub-system, | 257 | * data of the physical eraseblock was moved by the wear-leveling sub-system, |
258 | * then the wear-leveling sub-system calculates the data CRC and stores it in | 258 | * then the wear-leveling sub-system calculates the data CRC and stores it in |
259 | * the @data_crc field. And of course, the @copy_flag is %in this case. | 259 | * the @data_crc field. And of course, the @copy_flag is %in this case. |
260 | * | 260 | * |
261 | * The @data_size field is used only for static volumes because UBI has to know | 261 | * The @data_size field is used only for static volumes because UBI has to know |
262 | * how many bytes of data are stored in this eraseblock. For dynamic volumes, | 262 | * how many bytes of data are stored in this eraseblock. For dynamic volumes, |
263 | * this field usually contains zero. The only exception is when the data of the | 263 | * this field usually contains zero. The only exception is when the data of the |
264 | * physical eraseblock was moved to another physical eraseblock for | 264 | * physical eraseblock was moved to another physical eraseblock for |
265 | * wear-leveling reasons. In this case, UBI calculates CRC checksum of the | 265 | * wear-leveling reasons. In this case, UBI calculates CRC checksum of the |
266 | * contents and uses both @data_crc and @data_size fields. In this case, the | 266 | * contents and uses both @data_crc and @data_size fields. In this case, the |
267 | * @data_size field contains data size. | 267 | * @data_size field contains data size. |
268 | * | 268 | * |
269 | * The @used_ebs field is used only for static volumes and indicates how many | 269 | * The @used_ebs field is used only for static volumes and indicates how many |
270 | * eraseblocks the data of the volume takes. For dynamic volumes this field is | 270 | * eraseblocks the data of the volume takes. For dynamic volumes this field is |
271 | * not used and always contains zero. | 271 | * not used and always contains zero. |
272 | * | 272 | * |
273 | * The @data_pad is calculated when volumes are created using the alignment | 273 | * The @data_pad is calculated when volumes are created using the alignment |
274 | * parameter. So, effectively, the @data_pad field reduces the size of logical | 274 | * parameter. So, effectively, the @data_pad field reduces the size of logical |
275 | * eraseblocks of this volume. This is very handy when one uses block-oriented | 275 | * eraseblocks of this volume. This is very handy when one uses block-oriented |
276 | * software (say, cramfs) on top of the UBI volume. | 276 | * software (say, cramfs) on top of the UBI volume. |
277 | */ | 277 | */ |
278 | struct ubi_vid_hdr { | 278 | struct ubi_vid_hdr { |
279 | __be32 magic; | 279 | __be32 magic; |
280 | __u8 version; | 280 | __u8 version; |
281 | __u8 vol_type; | 281 | __u8 vol_type; |
282 | __u8 copy_flag; | 282 | __u8 copy_flag; |
283 | __u8 compat; | 283 | __u8 compat; |
284 | __be32 vol_id; | 284 | __be32 vol_id; |
285 | __be32 lnum; | 285 | __be32 lnum; |
286 | __u8 padding1[4]; | 286 | __u8 padding1[4]; |
287 | __be32 data_size; | 287 | __be32 data_size; |
288 | __be32 used_ebs; | 288 | __be32 used_ebs; |
289 | __be32 data_pad; | 289 | __be32 data_pad; |
290 | __be32 data_crc; | 290 | __be32 data_crc; |
291 | __u8 padding2[4]; | 291 | __u8 padding2[4]; |
292 | __be64 sqnum; | 292 | __be64 sqnum; |
293 | __u8 padding3[12]; | 293 | __u8 padding3[12]; |
294 | __be32 hdr_crc; | 294 | __be32 hdr_crc; |
295 | } __packed; | 295 | } __packed; |
296 | 296 | ||
297 | /* Internal UBI volumes count */ | 297 | /* Internal UBI volumes count */ |
298 | #define UBI_INT_VOL_COUNT 1 | 298 | #define UBI_INT_VOL_COUNT 1 |
299 | 299 | ||
300 | /* | 300 | /* |
301 | * Starting ID of internal volumes. There is reserved room for 4096 internal | 301 | * Starting ID of internal volumes. There is reserved room for 4096 internal |
302 | * volumes. | 302 | * volumes. |
303 | */ | 303 | */ |
304 | #define UBI_INTERNAL_VOL_START (0x7FFFFFFF - 4096) | 304 | #define UBI_INTERNAL_VOL_START (0x7FFFFFFF - 4096) |
305 | 305 | ||
306 | /* The layout volume contains the volume table */ | 306 | /* The layout volume contains the volume table */ |
307 | 307 | ||
308 | #define UBI_LAYOUT_VOLUME_ID UBI_INTERNAL_VOL_START | 308 | #define UBI_LAYOUT_VOLUME_ID UBI_INTERNAL_VOL_START |
309 | #define UBI_LAYOUT_VOLUME_TYPE UBI_VID_DYNAMIC | 309 | #define UBI_LAYOUT_VOLUME_TYPE UBI_VID_DYNAMIC |
310 | #define UBI_LAYOUT_VOLUME_ALIGN 1 | 310 | #define UBI_LAYOUT_VOLUME_ALIGN 1 |
311 | #define UBI_LAYOUT_VOLUME_EBS 2 | 311 | #define UBI_LAYOUT_VOLUME_EBS 2 |
312 | #define UBI_LAYOUT_VOLUME_NAME "layout volume" | 312 | #define UBI_LAYOUT_VOLUME_NAME "layout volume" |
313 | #define UBI_LAYOUT_VOLUME_COMPAT UBI_COMPAT_REJECT | 313 | #define UBI_LAYOUT_VOLUME_COMPAT UBI_COMPAT_REJECT |
314 | 314 | ||
315 | /* The maximum number of volumes per one UBI device */ | 315 | /* The maximum number of volumes per one UBI device */ |
316 | #define UBI_MAX_VOLUMES 128 | 316 | #define UBI_MAX_VOLUMES 128 |
317 | 317 | ||
318 | /* The maximum volume name length */ | 318 | /* The maximum volume name length */ |
319 | #define UBI_VOL_NAME_MAX 127 | 319 | #define UBI_VOL_NAME_MAX 127 |
320 | 320 | ||
321 | /* Size of the volume table record */ | 321 | /* Size of the volume table record */ |
322 | #define UBI_VTBL_RECORD_SIZE sizeof(struct ubi_vtbl_record) | 322 | #define UBI_VTBL_RECORD_SIZE sizeof(struct ubi_vtbl_record) |
323 | 323 | ||
324 | /* Size of the volume table record without the ending CRC */ | 324 | /* Size of the volume table record without the ending CRC */ |
325 | #define UBI_VTBL_RECORD_SIZE_CRC (UBI_VTBL_RECORD_SIZE - sizeof(__be32)) | 325 | #define UBI_VTBL_RECORD_SIZE_CRC (UBI_VTBL_RECORD_SIZE - sizeof(__be32)) |
326 | 326 | ||
327 | /** | 327 | /** |
328 | * struct ubi_vtbl_record - a record in the volume table. | 328 | * struct ubi_vtbl_record - a record in the volume table. |
329 | * @reserved_pebs: how many physical eraseblocks are reserved for this volume | 329 | * @reserved_pebs: how many physical eraseblocks are reserved for this volume |
330 | * @alignment: volume alignment | 330 | * @alignment: volume alignment |
331 | * @data_pad: how many bytes are unused at the end of the each physical | 331 | * @data_pad: how many bytes are unused at the end of the each physical |
332 | * eraseblock to satisfy the requested alignment | 332 | * eraseblock to satisfy the requested alignment |
333 | * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME) | 333 | * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME) |
334 | * @upd_marker: if volume update was started but not finished | 334 | * @upd_marker: if volume update was started but not finished |
335 | * @name_len: volume name length | 335 | * @name_len: volume name length |
336 | * @name: the volume name | 336 | * @name: the volume name |
337 | * @flags: volume flags (%UBI_VTBL_AUTORESIZE_FLG) | 337 | * @flags: volume flags (%UBI_VTBL_AUTORESIZE_FLG) |
338 | * @padding: reserved, zeroes | 338 | * @padding: reserved, zeroes |
339 | * @crc: a CRC32 checksum of the record | 339 | * @crc: a CRC32 checksum of the record |
340 | * | 340 | * |
341 | * The volume table records are stored in the volume table, which is stored in | 341 | * The volume table records are stored in the volume table, which is stored in |
342 | * the layout volume. The layout volume consists of 2 logical eraseblock, each | 342 | * the layout volume. The layout volume consists of 2 logical eraseblock, each |
343 | * of which contains a copy of the volume table (i.e., the volume table is | 343 | * of which contains a copy of the volume table (i.e., the volume table is |
344 | * duplicated). The volume table is an array of &struct ubi_vtbl_record | 344 | * duplicated). The volume table is an array of &struct ubi_vtbl_record |
345 | * objects indexed by the volume ID. | 345 | * objects indexed by the volume ID. |
346 | * | 346 | * |
347 | * If the size of the logical eraseblock is large enough to fit | 347 | * If the size of the logical eraseblock is large enough to fit |
348 | * %UBI_MAX_VOLUMES records, the volume table contains %UBI_MAX_VOLUMES | 348 | * %UBI_MAX_VOLUMES records, the volume table contains %UBI_MAX_VOLUMES |
349 | * records. Otherwise, it contains as many records as it can fit (i.e., size of | 349 | * records. Otherwise, it contains as many records as it can fit (i.e., size of |
350 | * logical eraseblock divided by sizeof(struct ubi_vtbl_record)). | 350 | * logical eraseblock divided by sizeof(struct ubi_vtbl_record)). |
351 | * | 351 | * |
352 | * The @upd_marker flag is used to implement volume update. It is set to %1 | 352 | * The @upd_marker flag is used to implement volume update. It is set to %1 |
353 | * before update and set to %0 after the update. So if the update operation was | 353 | * before update and set to %0 after the update. So if the update operation was |
354 | * interrupted, UBI knows that the volume is corrupted. | 354 | * interrupted, UBI knows that the volume is corrupted. |
355 | * | 355 | * |
356 | * The @alignment field is specified when the volume is created and cannot be | 356 | * The @alignment field is specified when the volume is created and cannot be |
357 | * later changed. It may be useful, for example, when a block-oriented file | 357 | * later changed. It may be useful, for example, when a block-oriented file |
358 | * system works on top of UBI. The @data_pad field is calculated using the | 358 | * system works on top of UBI. The @data_pad field is calculated using the |
359 | * logical eraseblock size and @alignment. The alignment must be multiple to the | 359 | * logical eraseblock size and @alignment. The alignment must be multiple to the |
360 | * minimal flash I/O unit. If @alignment is 1, all the available space of | 360 | * minimal flash I/O unit. If @alignment is 1, all the available space of |
361 | * the physical eraseblocks is used. | 361 | * the physical eraseblocks is used. |
362 | * | 362 | * |
363 | * Empty records contain all zeroes and the CRC checksum of those zeroes. | 363 | * Empty records contain all zeroes and the CRC checksum of those zeroes. |
364 | */ | 364 | */ |
365 | struct ubi_vtbl_record { | 365 | struct ubi_vtbl_record { |
366 | __be32 reserved_pebs; | 366 | __be32 reserved_pebs; |
367 | __be32 alignment; | 367 | __be32 alignment; |
368 | __be32 data_pad; | 368 | __be32 data_pad; |
369 | __u8 vol_type; | 369 | __u8 vol_type; |
370 | __u8 upd_marker; | 370 | __u8 upd_marker; |
371 | __be16 name_len; | 371 | __be16 name_len; |
372 | __u8 name[UBI_VOL_NAME_MAX+1]; | 372 | __u8 name[UBI_VOL_NAME_MAX+1]; |
373 | __u8 flags; | 373 | __u8 flags; |
374 | __u8 padding[23]; | 374 | __u8 padding[23]; |
375 | __be32 crc; | 375 | __be32 crc; |
376 | } __packed; | 376 | } __packed; |
377 | 377 | ||
378 | #endif /* !__UBI_MEDIA_H__ */ | 378 | #endif /* !__UBI_MEDIA_H__ */ |
379 | 379 |
drivers/mtd/ubi/vtbl.c
1 | /* | 1 | /* |
2 | * Copyright (c) International Business Machines Corp., 2006 | 2 | * Copyright (c) International Business Machines Corp., 2006 |
3 | * Copyright (c) Nokia Corporation, 2006, 2007 | 3 | * Copyright (c) Nokia Corporation, 2006, 2007 |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
7 | * the Free Software Foundation; either version 2 of the License, or | 7 | * the Free Software Foundation; either version 2 of the License, or |
8 | * (at your option) any later version. | 8 | * (at your option) any later version. |
9 | * | 9 | * |
10 | * This program is distributed in the hope that it will be useful, | 10 | * This program is distributed in the hope that it will be useful, |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See |
13 | * the GNU General Public License for more details. | 13 | * the GNU General Public License for more details. |
14 | * | 14 | * |
15 | * You should have received a copy of the GNU General Public License | 15 | * You should have received a copy of the GNU General Public License |
16 | * along with this program; if not, write to the Free Software | 16 | * along with this program; if not, write to the Free Software |
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
18 | * | 18 | * |
19 | * Author: Artem Bityutskiy (Битюцкий Артём) | 19 | * Author: Artem Bityutskiy (Битюцкий Артём) |
20 | */ | 20 | */ |
21 | 21 | ||
22 | /* | 22 | /* |
23 | * This file includes volume table manipulation code. The volume table is an | 23 | * This file includes volume table manipulation code. The volume table is an |
24 | * on-flash table containing volume meta-data like name, number of reserved | 24 | * on-flash table containing volume meta-data like name, number of reserved |
25 | * physical eraseblocks, type, etc. The volume table is stored in the so-called | 25 | * physical eraseblocks, type, etc. The volume table is stored in the so-called |
26 | * "layout volume". | 26 | * "layout volume". |
27 | * | 27 | * |
28 | * The layout volume is an internal volume which is organized as follows. It | 28 | * The layout volume is an internal volume which is organized as follows. It |
29 | * consists of two logical eraseblocks - LEB 0 and LEB 1. Each logical | 29 | * consists of two logical eraseblocks - LEB 0 and LEB 1. Each logical |
30 | * eraseblock stores one volume table copy, i.e. LEB 0 and LEB 1 duplicate each | 30 | * eraseblock stores one volume table copy, i.e. LEB 0 and LEB 1 duplicate each |
31 | * other. This redundancy guarantees robustness to unclean reboots. The volume | 31 | * other. This redundancy guarantees robustness to unclean reboots. The volume |
32 | * table is basically an array of volume table records. Each record contains | 32 | * table is basically an array of volume table records. Each record contains |
33 | * full information about the volume and protected by a CRC checksum. | 33 | * full information about the volume and protected by a CRC checksum. |
34 | * | 34 | * |
35 | * The volume table is changed, it is first changed in RAM. Then LEB 0 is | 35 | * The volume table is changed, it is first changed in RAM. Then LEB 0 is |
36 | * erased, and the updated volume table is written back to LEB 0. Then same for | 36 | * erased, and the updated volume table is written back to LEB 0. Then same for |
37 | * LEB 1. This scheme guarantees recoverability from unclean reboots. | 37 | * LEB 1. This scheme guarantees recoverability from unclean reboots. |
38 | * | 38 | * |
39 | * In this UBI implementation the on-flash volume table does not contain any | 39 | * In this UBI implementation the on-flash volume table does not contain any |
40 | * information about how many data static volumes contain. This information may | 40 | * information about how much data static volumes contain. |
41 | * be found from the scanning data. | ||
42 | * | 41 | * |
43 | * But it would still be beneficial to store this information in the volume | 42 | * But it would still be beneficial to store this information in the volume |
44 | * table. For example, suppose we have a static volume X, and all its physical | 43 | * table. For example, suppose we have a static volume X, and all its physical |
45 | * eraseblocks became bad for some reasons. Suppose we are attaching the | 44 | * eraseblocks became bad for some reasons. Suppose we are attaching the |
46 | * corresponding MTD device, the scanning has found no logical eraseblocks | 45 | * corresponding MTD device, for some reason we find no logical eraseblocks |
47 | * corresponding to the volume X. According to the volume table volume X does | 46 | * corresponding to the volume X. According to the volume table volume X does |
48 | * exist. So we don't know whether it is just empty or all its physical | 47 | * exist. So we don't know whether it is just empty or all its physical |
49 | * eraseblocks went bad. So we cannot alarm the user about this corruption. | 48 | * eraseblocks went bad. So we cannot alarm the user properly. |
50 | * | 49 | * |
51 | * The volume table also stores so-called "update marker", which is used for | 50 | * The volume table also stores so-called "update marker", which is used for |
52 | * volume updates. Before updating the volume, the update marker is set, and | 51 | * volume updates. Before updating the volume, the update marker is set, and |
53 | * after the update operation is finished, the update marker is cleared. So if | 52 | * after the update operation is finished, the update marker is cleared. So if |
54 | * the update operation was interrupted (e.g. by an unclean reboot) - the | 53 | * the update operation was interrupted (e.g. by an unclean reboot) - the |
55 | * update marker is still there and we know that the volume's contents is | 54 | * update marker is still there and we know that the volume's contents is |
56 | * damaged. | 55 | * damaged. |
57 | */ | 56 | */ |
58 | 57 | ||
59 | #include <linux/crc32.h> | 58 | #include <linux/crc32.h> |
60 | #include <linux/err.h> | 59 | #include <linux/err.h> |
61 | #include <linux/slab.h> | 60 | #include <linux/slab.h> |
62 | #include <asm/div64.h> | 61 | #include <asm/div64.h> |
63 | #include "ubi.h" | 62 | #include "ubi.h" |
64 | 63 | ||
65 | static void self_vtbl_check(const struct ubi_device *ubi); | 64 | static void self_vtbl_check(const struct ubi_device *ubi); |
66 | 65 | ||
67 | /* Empty volume table record */ | 66 | /* Empty volume table record */ |
68 | static struct ubi_vtbl_record empty_vtbl_record; | 67 | static struct ubi_vtbl_record empty_vtbl_record; |
69 | 68 | ||
70 | /** | 69 | /** |
71 | * ubi_change_vtbl_record - change volume table record. | 70 | * ubi_change_vtbl_record - change volume table record. |
72 | * @ubi: UBI device description object | 71 | * @ubi: UBI device description object |
73 | * @idx: table index to change | 72 | * @idx: table index to change |
74 | * @vtbl_rec: new volume table record | 73 | * @vtbl_rec: new volume table record |
75 | * | 74 | * |
76 | * This function changes volume table record @idx. If @vtbl_rec is %NULL, empty | 75 | * This function changes volume table record @idx. If @vtbl_rec is %NULL, empty |
77 | * volume table record is written. The caller does not have to calculate CRC of | 76 | * volume table record is written. The caller does not have to calculate CRC of |
78 | * the record as it is done by this function. Returns zero in case of success | 77 | * the record as it is done by this function. Returns zero in case of success |
79 | * and a negative error code in case of failure. | 78 | * and a negative error code in case of failure. |
80 | */ | 79 | */ |
81 | int ubi_change_vtbl_record(struct ubi_device *ubi, int idx, | 80 | int ubi_change_vtbl_record(struct ubi_device *ubi, int idx, |
82 | struct ubi_vtbl_record *vtbl_rec) | 81 | struct ubi_vtbl_record *vtbl_rec) |
83 | { | 82 | { |
84 | int i, err; | 83 | int i, err; |
85 | uint32_t crc; | 84 | uint32_t crc; |
86 | struct ubi_volume *layout_vol; | 85 | struct ubi_volume *layout_vol; |
87 | 86 | ||
88 | ubi_assert(idx >= 0 && idx < ubi->vtbl_slots); | 87 | ubi_assert(idx >= 0 && idx < ubi->vtbl_slots); |
89 | layout_vol = ubi->volumes[vol_id2idx(ubi, UBI_LAYOUT_VOLUME_ID)]; | 88 | layout_vol = ubi->volumes[vol_id2idx(ubi, UBI_LAYOUT_VOLUME_ID)]; |
90 | 89 | ||
91 | if (!vtbl_rec) | 90 | if (!vtbl_rec) |
92 | vtbl_rec = &empty_vtbl_record; | 91 | vtbl_rec = &empty_vtbl_record; |
93 | else { | 92 | else { |
94 | crc = crc32(UBI_CRC32_INIT, vtbl_rec, UBI_VTBL_RECORD_SIZE_CRC); | 93 | crc = crc32(UBI_CRC32_INIT, vtbl_rec, UBI_VTBL_RECORD_SIZE_CRC); |
95 | vtbl_rec->crc = cpu_to_be32(crc); | 94 | vtbl_rec->crc = cpu_to_be32(crc); |
96 | } | 95 | } |
97 | 96 | ||
98 | memcpy(&ubi->vtbl[idx], vtbl_rec, sizeof(struct ubi_vtbl_record)); | 97 | memcpy(&ubi->vtbl[idx], vtbl_rec, sizeof(struct ubi_vtbl_record)); |
99 | for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) { | 98 | for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) { |
100 | err = ubi_eba_unmap_leb(ubi, layout_vol, i); | 99 | err = ubi_eba_unmap_leb(ubi, layout_vol, i); |
101 | if (err) | 100 | if (err) |
102 | return err; | 101 | return err; |
103 | 102 | ||
104 | err = ubi_eba_write_leb(ubi, layout_vol, i, ubi->vtbl, 0, | 103 | err = ubi_eba_write_leb(ubi, layout_vol, i, ubi->vtbl, 0, |
105 | ubi->vtbl_size); | 104 | ubi->vtbl_size); |
106 | if (err) | 105 | if (err) |
107 | return err; | 106 | return err; |
108 | } | 107 | } |
109 | 108 | ||
110 | self_vtbl_check(ubi); | 109 | self_vtbl_check(ubi); |
111 | return 0; | 110 | return 0; |
112 | } | 111 | } |
113 | 112 | ||
114 | /** | 113 | /** |
115 | * ubi_vtbl_rename_volumes - rename UBI volumes in the volume table. | 114 | * ubi_vtbl_rename_volumes - rename UBI volumes in the volume table. |
116 | * @ubi: UBI device description object | 115 | * @ubi: UBI device description object |
117 | * @rename_list: list of &struct ubi_rename_entry objects | 116 | * @rename_list: list of &struct ubi_rename_entry objects |
118 | * | 117 | * |
119 | * This function re-names multiple volumes specified in @req in the volume | 118 | * This function re-names multiple volumes specified in @req in the volume |
120 | * table. Returns zero in case of success and a negative error code in case of | 119 | * table. Returns zero in case of success and a negative error code in case of |
121 | * failure. | 120 | * failure. |
122 | */ | 121 | */ |
123 | int ubi_vtbl_rename_volumes(struct ubi_device *ubi, | 122 | int ubi_vtbl_rename_volumes(struct ubi_device *ubi, |
124 | struct list_head *rename_list) | 123 | struct list_head *rename_list) |
125 | { | 124 | { |
126 | int i, err; | 125 | int i, err; |
127 | struct ubi_rename_entry *re; | 126 | struct ubi_rename_entry *re; |
128 | struct ubi_volume *layout_vol; | 127 | struct ubi_volume *layout_vol; |
129 | 128 | ||
130 | list_for_each_entry(re, rename_list, list) { | 129 | list_for_each_entry(re, rename_list, list) { |
131 | uint32_t crc; | 130 | uint32_t crc; |
132 | struct ubi_volume *vol = re->desc->vol; | 131 | struct ubi_volume *vol = re->desc->vol; |
133 | struct ubi_vtbl_record *vtbl_rec = &ubi->vtbl[vol->vol_id]; | 132 | struct ubi_vtbl_record *vtbl_rec = &ubi->vtbl[vol->vol_id]; |
134 | 133 | ||
135 | if (re->remove) { | 134 | if (re->remove) { |
136 | memcpy(vtbl_rec, &empty_vtbl_record, | 135 | memcpy(vtbl_rec, &empty_vtbl_record, |
137 | sizeof(struct ubi_vtbl_record)); | 136 | sizeof(struct ubi_vtbl_record)); |
138 | continue; | 137 | continue; |
139 | } | 138 | } |
140 | 139 | ||
141 | vtbl_rec->name_len = cpu_to_be16(re->new_name_len); | 140 | vtbl_rec->name_len = cpu_to_be16(re->new_name_len); |
142 | memcpy(vtbl_rec->name, re->new_name, re->new_name_len); | 141 | memcpy(vtbl_rec->name, re->new_name, re->new_name_len); |
143 | memset(vtbl_rec->name + re->new_name_len, 0, | 142 | memset(vtbl_rec->name + re->new_name_len, 0, |
144 | UBI_VOL_NAME_MAX + 1 - re->new_name_len); | 143 | UBI_VOL_NAME_MAX + 1 - re->new_name_len); |
145 | crc = crc32(UBI_CRC32_INIT, vtbl_rec, | 144 | crc = crc32(UBI_CRC32_INIT, vtbl_rec, |
146 | UBI_VTBL_RECORD_SIZE_CRC); | 145 | UBI_VTBL_RECORD_SIZE_CRC); |
147 | vtbl_rec->crc = cpu_to_be32(crc); | 146 | vtbl_rec->crc = cpu_to_be32(crc); |
148 | } | 147 | } |
149 | 148 | ||
150 | layout_vol = ubi->volumes[vol_id2idx(ubi, UBI_LAYOUT_VOLUME_ID)]; | 149 | layout_vol = ubi->volumes[vol_id2idx(ubi, UBI_LAYOUT_VOLUME_ID)]; |
151 | for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) { | 150 | for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) { |
152 | err = ubi_eba_unmap_leb(ubi, layout_vol, i); | 151 | err = ubi_eba_unmap_leb(ubi, layout_vol, i); |
153 | if (err) | 152 | if (err) |
154 | return err; | 153 | return err; |
155 | 154 | ||
156 | err = ubi_eba_write_leb(ubi, layout_vol, i, ubi->vtbl, 0, | 155 | err = ubi_eba_write_leb(ubi, layout_vol, i, ubi->vtbl, 0, |
157 | ubi->vtbl_size); | 156 | ubi->vtbl_size); |
158 | if (err) | 157 | if (err) |
159 | return err; | 158 | return err; |
160 | } | 159 | } |
161 | 160 | ||
162 | return 0; | 161 | return 0; |
163 | } | 162 | } |
164 | 163 | ||
165 | /** | 164 | /** |
166 | * vtbl_check - check if volume table is not corrupted and sensible. | 165 | * vtbl_check - check if volume table is not corrupted and sensible. |
167 | * @ubi: UBI device description object | 166 | * @ubi: UBI device description object |
168 | * @vtbl: volume table | 167 | * @vtbl: volume table |
169 | * | 168 | * |
170 | * This function returns zero if @vtbl is all right, %1 if CRC is incorrect, | 169 | * This function returns zero if @vtbl is all right, %1 if CRC is incorrect, |
171 | * and %-EINVAL if it contains inconsistent data. | 170 | * and %-EINVAL if it contains inconsistent data. |
172 | */ | 171 | */ |
173 | static int vtbl_check(const struct ubi_device *ubi, | 172 | static int vtbl_check(const struct ubi_device *ubi, |
174 | const struct ubi_vtbl_record *vtbl) | 173 | const struct ubi_vtbl_record *vtbl) |
175 | { | 174 | { |
176 | int i, n, reserved_pebs, alignment, data_pad, vol_type, name_len; | 175 | int i, n, reserved_pebs, alignment, data_pad, vol_type, name_len; |
177 | int upd_marker, err; | 176 | int upd_marker, err; |
178 | uint32_t crc; | 177 | uint32_t crc; |
179 | const char *name; | 178 | const char *name; |
180 | 179 | ||
181 | for (i = 0; i < ubi->vtbl_slots; i++) { | 180 | for (i = 0; i < ubi->vtbl_slots; i++) { |
182 | cond_resched(); | 181 | cond_resched(); |
183 | 182 | ||
184 | reserved_pebs = be32_to_cpu(vtbl[i].reserved_pebs); | 183 | reserved_pebs = be32_to_cpu(vtbl[i].reserved_pebs); |
185 | alignment = be32_to_cpu(vtbl[i].alignment); | 184 | alignment = be32_to_cpu(vtbl[i].alignment); |
186 | data_pad = be32_to_cpu(vtbl[i].data_pad); | 185 | data_pad = be32_to_cpu(vtbl[i].data_pad); |
187 | upd_marker = vtbl[i].upd_marker; | 186 | upd_marker = vtbl[i].upd_marker; |
188 | vol_type = vtbl[i].vol_type; | 187 | vol_type = vtbl[i].vol_type; |
189 | name_len = be16_to_cpu(vtbl[i].name_len); | 188 | name_len = be16_to_cpu(vtbl[i].name_len); |
190 | name = &vtbl[i].name[0]; | 189 | name = &vtbl[i].name[0]; |
191 | 190 | ||
192 | crc = crc32(UBI_CRC32_INIT, &vtbl[i], UBI_VTBL_RECORD_SIZE_CRC); | 191 | crc = crc32(UBI_CRC32_INIT, &vtbl[i], UBI_VTBL_RECORD_SIZE_CRC); |
193 | if (be32_to_cpu(vtbl[i].crc) != crc) { | 192 | if (be32_to_cpu(vtbl[i].crc) != crc) { |
194 | ubi_err("bad CRC at record %u: %#08x, not %#08x", | 193 | ubi_err("bad CRC at record %u: %#08x, not %#08x", |
195 | i, crc, be32_to_cpu(vtbl[i].crc)); | 194 | i, crc, be32_to_cpu(vtbl[i].crc)); |
196 | ubi_dump_vtbl_record(&vtbl[i], i); | 195 | ubi_dump_vtbl_record(&vtbl[i], i); |
197 | return 1; | 196 | return 1; |
198 | } | 197 | } |
199 | 198 | ||
200 | if (reserved_pebs == 0) { | 199 | if (reserved_pebs == 0) { |
201 | if (memcmp(&vtbl[i], &empty_vtbl_record, | 200 | if (memcmp(&vtbl[i], &empty_vtbl_record, |
202 | UBI_VTBL_RECORD_SIZE)) { | 201 | UBI_VTBL_RECORD_SIZE)) { |
203 | err = 2; | 202 | err = 2; |
204 | goto bad; | 203 | goto bad; |
205 | } | 204 | } |
206 | continue; | 205 | continue; |
207 | } | 206 | } |
208 | 207 | ||
209 | if (reserved_pebs < 0 || alignment < 0 || data_pad < 0 || | 208 | if (reserved_pebs < 0 || alignment < 0 || data_pad < 0 || |
210 | name_len < 0) { | 209 | name_len < 0) { |
211 | err = 3; | 210 | err = 3; |
212 | goto bad; | 211 | goto bad; |
213 | } | 212 | } |
214 | 213 | ||
215 | if (alignment > ubi->leb_size || alignment == 0) { | 214 | if (alignment > ubi->leb_size || alignment == 0) { |
216 | err = 4; | 215 | err = 4; |
217 | goto bad; | 216 | goto bad; |
218 | } | 217 | } |
219 | 218 | ||
220 | n = alignment & (ubi->min_io_size - 1); | 219 | n = alignment & (ubi->min_io_size - 1); |
221 | if (alignment != 1 && n) { | 220 | if (alignment != 1 && n) { |
222 | err = 5; | 221 | err = 5; |
223 | goto bad; | 222 | goto bad; |
224 | } | 223 | } |
225 | 224 | ||
226 | n = ubi->leb_size % alignment; | 225 | n = ubi->leb_size % alignment; |
227 | if (data_pad != n) { | 226 | if (data_pad != n) { |
228 | ubi_err("bad data_pad, has to be %d", n); | 227 | ubi_err("bad data_pad, has to be %d", n); |
229 | err = 6; | 228 | err = 6; |
230 | goto bad; | 229 | goto bad; |
231 | } | 230 | } |
232 | 231 | ||
233 | if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) { | 232 | if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) { |
234 | err = 7; | 233 | err = 7; |
235 | goto bad; | 234 | goto bad; |
236 | } | 235 | } |
237 | 236 | ||
238 | if (upd_marker != 0 && upd_marker != 1) { | 237 | if (upd_marker != 0 && upd_marker != 1) { |
239 | err = 8; | 238 | err = 8; |
240 | goto bad; | 239 | goto bad; |
241 | } | 240 | } |
242 | 241 | ||
243 | if (reserved_pebs > ubi->good_peb_count) { | 242 | if (reserved_pebs > ubi->good_peb_count) { |
244 | ubi_err("too large reserved_pebs %d, good PEBs %d", | 243 | ubi_err("too large reserved_pebs %d, good PEBs %d", |
245 | reserved_pebs, ubi->good_peb_count); | 244 | reserved_pebs, ubi->good_peb_count); |
246 | err = 9; | 245 | err = 9; |
247 | goto bad; | 246 | goto bad; |
248 | } | 247 | } |
249 | 248 | ||
250 | if (name_len > UBI_VOL_NAME_MAX) { | 249 | if (name_len > UBI_VOL_NAME_MAX) { |
251 | err = 10; | 250 | err = 10; |
252 | goto bad; | 251 | goto bad; |
253 | } | 252 | } |
254 | 253 | ||
255 | if (name[0] == '\0') { | 254 | if (name[0] == '\0') { |
256 | err = 11; | 255 | err = 11; |
257 | goto bad; | 256 | goto bad; |
258 | } | 257 | } |
259 | 258 | ||
260 | if (name_len != strnlen(name, name_len + 1)) { | 259 | if (name_len != strnlen(name, name_len + 1)) { |
261 | err = 12; | 260 | err = 12; |
262 | goto bad; | 261 | goto bad; |
263 | } | 262 | } |
264 | } | 263 | } |
265 | 264 | ||
266 | /* Checks that all names are unique */ | 265 | /* Checks that all names are unique */ |
267 | for (i = 0; i < ubi->vtbl_slots - 1; i++) { | 266 | for (i = 0; i < ubi->vtbl_slots - 1; i++) { |
268 | for (n = i + 1; n < ubi->vtbl_slots; n++) { | 267 | for (n = i + 1; n < ubi->vtbl_slots; n++) { |
269 | int len1 = be16_to_cpu(vtbl[i].name_len); | 268 | int len1 = be16_to_cpu(vtbl[i].name_len); |
270 | int len2 = be16_to_cpu(vtbl[n].name_len); | 269 | int len2 = be16_to_cpu(vtbl[n].name_len); |
271 | 270 | ||
272 | if (len1 > 0 && len1 == len2 && | 271 | if (len1 > 0 && len1 == len2 && |
273 | !strncmp(vtbl[i].name, vtbl[n].name, len1)) { | 272 | !strncmp(vtbl[i].name, vtbl[n].name, len1)) { |
274 | ubi_err("volumes %d and %d have the same name" | 273 | ubi_err("volumes %d and %d have the same name" |
275 | " \"%s\"", i, n, vtbl[i].name); | 274 | " \"%s\"", i, n, vtbl[i].name); |
276 | ubi_dump_vtbl_record(&vtbl[i], i); | 275 | ubi_dump_vtbl_record(&vtbl[i], i); |
277 | ubi_dump_vtbl_record(&vtbl[n], n); | 276 | ubi_dump_vtbl_record(&vtbl[n], n); |
278 | return -EINVAL; | 277 | return -EINVAL; |
279 | } | 278 | } |
280 | } | 279 | } |
281 | } | 280 | } |
282 | 281 | ||
283 | return 0; | 282 | return 0; |
284 | 283 | ||
285 | bad: | 284 | bad: |
286 | ubi_err("volume table check failed: record %d, error %d", i, err); | 285 | ubi_err("volume table check failed: record %d, error %d", i, err); |
287 | ubi_dump_vtbl_record(&vtbl[i], i); | 286 | ubi_dump_vtbl_record(&vtbl[i], i); |
288 | return -EINVAL; | 287 | return -EINVAL; |
289 | } | 288 | } |
290 | 289 | ||
291 | /** | 290 | /** |
292 | * create_vtbl - create a copy of volume table. | 291 | * create_vtbl - create a copy of volume table. |
293 | * @ubi: UBI device description object | 292 | * @ubi: UBI device description object |
294 | * @ai: attaching information | 293 | * @ai: attaching information |
295 | * @copy: number of the volume table copy | 294 | * @copy: number of the volume table copy |
296 | * @vtbl: contents of the volume table | 295 | * @vtbl: contents of the volume table |
297 | * | 296 | * |
298 | * This function returns zero in case of success and a negative error code in | 297 | * This function returns zero in case of success and a negative error code in |
299 | * case of failure. | 298 | * case of failure. |
300 | */ | 299 | */ |
301 | static int create_vtbl(struct ubi_device *ubi, struct ubi_attach_info *ai, | 300 | static int create_vtbl(struct ubi_device *ubi, struct ubi_attach_info *ai, |
302 | int copy, void *vtbl) | 301 | int copy, void *vtbl) |
303 | { | 302 | { |
304 | int err, tries = 0; | 303 | int err, tries = 0; |
305 | struct ubi_vid_hdr *vid_hdr; | 304 | struct ubi_vid_hdr *vid_hdr; |
306 | struct ubi_ainf_peb *new_aeb; | 305 | struct ubi_ainf_peb *new_aeb; |
307 | 306 | ||
308 | ubi_msg("create volume table (copy #%d)", copy + 1); | 307 | ubi_msg("create volume table (copy #%d)", copy + 1); |
309 | 308 | ||
310 | vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); | 309 | vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); |
311 | if (!vid_hdr) | 310 | if (!vid_hdr) |
312 | return -ENOMEM; | 311 | return -ENOMEM; |
313 | 312 | ||
314 | retry: | 313 | retry: |
315 | new_aeb = ubi_early_get_peb(ubi, ai); | 314 | new_aeb = ubi_early_get_peb(ubi, ai); |
316 | if (IS_ERR(new_aeb)) { | 315 | if (IS_ERR(new_aeb)) { |
317 | err = PTR_ERR(new_aeb); | 316 | err = PTR_ERR(new_aeb); |
318 | goto out_free; | 317 | goto out_free; |
319 | } | 318 | } |
320 | 319 | ||
321 | vid_hdr->vol_type = UBI_LAYOUT_VOLUME_TYPE; | 320 | vid_hdr->vol_type = UBI_LAYOUT_VOLUME_TYPE; |
322 | vid_hdr->vol_id = cpu_to_be32(UBI_LAYOUT_VOLUME_ID); | 321 | vid_hdr->vol_id = cpu_to_be32(UBI_LAYOUT_VOLUME_ID); |
323 | vid_hdr->compat = UBI_LAYOUT_VOLUME_COMPAT; | 322 | vid_hdr->compat = UBI_LAYOUT_VOLUME_COMPAT; |
324 | vid_hdr->data_size = vid_hdr->used_ebs = | 323 | vid_hdr->data_size = vid_hdr->used_ebs = |
325 | vid_hdr->data_pad = cpu_to_be32(0); | 324 | vid_hdr->data_pad = cpu_to_be32(0); |
326 | vid_hdr->lnum = cpu_to_be32(copy); | 325 | vid_hdr->lnum = cpu_to_be32(copy); |
327 | vid_hdr->sqnum = cpu_to_be64(++ai->max_sqnum); | 326 | vid_hdr->sqnum = cpu_to_be64(++ai->max_sqnum); |
328 | 327 | ||
329 | /* The EC header is already there, write the VID header */ | 328 | /* The EC header is already there, write the VID header */ |
330 | err = ubi_io_write_vid_hdr(ubi, new_aeb->pnum, vid_hdr); | 329 | err = ubi_io_write_vid_hdr(ubi, new_aeb->pnum, vid_hdr); |
331 | if (err) | 330 | if (err) |
332 | goto write_error; | 331 | goto write_error; |
333 | 332 | ||
334 | /* Write the layout volume contents */ | 333 | /* Write the layout volume contents */ |
335 | err = ubi_io_write_data(ubi, vtbl, new_aeb->pnum, 0, ubi->vtbl_size); | 334 | err = ubi_io_write_data(ubi, vtbl, new_aeb->pnum, 0, ubi->vtbl_size); |
336 | if (err) | 335 | if (err) |
337 | goto write_error; | 336 | goto write_error; |
338 | 337 | ||
339 | /* | 338 | /* |
340 | * And add it to the attaching information. Don't delete the old version | 339 | * And add it to the attaching information. Don't delete the old version |
341 | * of this LEB as it will be deleted and freed in 'ubi_add_to_av()'. | 340 | * of this LEB as it will be deleted and freed in 'ubi_add_to_av()'. |
342 | */ | 341 | */ |
343 | err = ubi_add_to_av(ubi, ai, new_aeb->pnum, new_aeb->ec, vid_hdr, 0); | 342 | err = ubi_add_to_av(ubi, ai, new_aeb->pnum, new_aeb->ec, vid_hdr, 0); |
344 | kfree(new_aeb); | 343 | kfree(new_aeb); |
345 | ubi_free_vid_hdr(ubi, vid_hdr); | 344 | ubi_free_vid_hdr(ubi, vid_hdr); |
346 | return err; | 345 | return err; |
347 | 346 | ||
348 | write_error: | 347 | write_error: |
349 | if (err == -EIO && ++tries <= 5) { | 348 | if (err == -EIO && ++tries <= 5) { |
350 | /* | 349 | /* |
351 | * Probably this physical eraseblock went bad, try to pick | 350 | * Probably this physical eraseblock went bad, try to pick |
352 | * another one. | 351 | * another one. |
353 | */ | 352 | */ |
354 | list_add(&new_aeb->u.list, &ai->erase); | 353 | list_add(&new_aeb->u.list, &ai->erase); |
355 | goto retry; | 354 | goto retry; |
356 | } | 355 | } |
357 | kfree(new_aeb); | 356 | kfree(new_aeb); |
358 | out_free: | 357 | out_free: |
359 | ubi_free_vid_hdr(ubi, vid_hdr); | 358 | ubi_free_vid_hdr(ubi, vid_hdr); |
360 | return err; | 359 | return err; |
361 | 360 | ||
362 | } | 361 | } |
363 | 362 | ||
364 | /** | 363 | /** |
365 | * process_lvol - process the layout volume. | 364 | * process_lvol - process the layout volume. |
366 | * @ubi: UBI device description object | 365 | * @ubi: UBI device description object |
367 | * @ai: attaching information | 366 | * @ai: attaching information |
368 | * @av: layout volume attaching information | 367 | * @av: layout volume attaching information |
369 | * | 368 | * |
370 | * This function is responsible for reading the layout volume, ensuring it is | 369 | * This function is responsible for reading the layout volume, ensuring it is |
371 | * not corrupted, and recovering from corruptions if needed. Returns volume | 370 | * not corrupted, and recovering from corruptions if needed. Returns volume |
372 | * table in case of success and a negative error code in case of failure. | 371 | * table in case of success and a negative error code in case of failure. |
373 | */ | 372 | */ |
374 | static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi, | 373 | static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi, |
375 | struct ubi_attach_info *ai, | 374 | struct ubi_attach_info *ai, |
376 | struct ubi_ainf_volume *av) | 375 | struct ubi_ainf_volume *av) |
377 | { | 376 | { |
378 | int err; | 377 | int err; |
379 | struct rb_node *rb; | 378 | struct rb_node *rb; |
380 | struct ubi_ainf_peb *aeb; | 379 | struct ubi_ainf_peb *aeb; |
381 | struct ubi_vtbl_record *leb[UBI_LAYOUT_VOLUME_EBS] = { NULL, NULL }; | 380 | struct ubi_vtbl_record *leb[UBI_LAYOUT_VOLUME_EBS] = { NULL, NULL }; |
382 | int leb_corrupted[UBI_LAYOUT_VOLUME_EBS] = {1, 1}; | 381 | int leb_corrupted[UBI_LAYOUT_VOLUME_EBS] = {1, 1}; |
383 | 382 | ||
384 | /* | 383 | /* |
385 | * UBI goes through the following steps when it changes the layout | 384 | * UBI goes through the following steps when it changes the layout |
386 | * volume: | 385 | * volume: |
387 | * a. erase LEB 0; | 386 | * a. erase LEB 0; |
388 | * b. write new data to LEB 0; | 387 | * b. write new data to LEB 0; |
389 | * c. erase LEB 1; | 388 | * c. erase LEB 1; |
390 | * d. write new data to LEB 1. | 389 | * d. write new data to LEB 1. |
391 | * | 390 | * |
392 | * Before the change, both LEBs contain the same data. | 391 | * Before the change, both LEBs contain the same data. |
393 | * | 392 | * |
394 | * Due to unclean reboots, the contents of LEB 0 may be lost, but there | 393 | * Due to unclean reboots, the contents of LEB 0 may be lost, but there |
395 | * should LEB 1. So it is OK if LEB 0 is corrupted while LEB 1 is not. | 394 | * should LEB 1. So it is OK if LEB 0 is corrupted while LEB 1 is not. |
396 | * Similarly, LEB 1 may be lost, but there should be LEB 0. And | 395 | * Similarly, LEB 1 may be lost, but there should be LEB 0. And |
397 | * finally, unclean reboots may result in a situation when neither LEB | 396 | * finally, unclean reboots may result in a situation when neither LEB |
398 | * 0 nor LEB 1 are corrupted, but they are different. In this case, LEB | 397 | * 0 nor LEB 1 are corrupted, but they are different. In this case, LEB |
399 | * 0 contains more recent information. | 398 | * 0 contains more recent information. |
400 | * | 399 | * |
401 | * So the plan is to first check LEB 0. Then | 400 | * So the plan is to first check LEB 0. Then |
402 | * a. if LEB 0 is OK, it must be containing the most recent data; then | 401 | * a. if LEB 0 is OK, it must be containing the most recent data; then |
403 | * we compare it with LEB 1, and if they are different, we copy LEB | 402 | * we compare it with LEB 1, and if they are different, we copy LEB |
404 | * 0 to LEB 1; | 403 | * 0 to LEB 1; |
405 | * b. if LEB 0 is corrupted, but LEB 1 has to be OK, and we copy LEB 1 | 404 | * b. if LEB 0 is corrupted, but LEB 1 has to be OK, and we copy LEB 1 |
406 | * to LEB 0. | 405 | * to LEB 0. |
407 | */ | 406 | */ |
408 | 407 | ||
409 | dbg_gen("check layout volume"); | 408 | dbg_gen("check layout volume"); |
410 | 409 | ||
411 | /* Read both LEB 0 and LEB 1 into memory */ | 410 | /* Read both LEB 0 and LEB 1 into memory */ |
412 | ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) { | 411 | ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) { |
413 | leb[aeb->lnum] = vzalloc(ubi->vtbl_size); | 412 | leb[aeb->lnum] = vzalloc(ubi->vtbl_size); |
414 | if (!leb[aeb->lnum]) { | 413 | if (!leb[aeb->lnum]) { |
415 | err = -ENOMEM; | 414 | err = -ENOMEM; |
416 | goto out_free; | 415 | goto out_free; |
417 | } | 416 | } |
418 | 417 | ||
419 | err = ubi_io_read_data(ubi, leb[aeb->lnum], aeb->pnum, 0, | 418 | err = ubi_io_read_data(ubi, leb[aeb->lnum], aeb->pnum, 0, |
420 | ubi->vtbl_size); | 419 | ubi->vtbl_size); |
421 | if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) | 420 | if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) |
422 | /* | 421 | /* |
423 | * Scrub the PEB later. Note, -EBADMSG indicates an | 422 | * Scrub the PEB later. Note, -EBADMSG indicates an |
424 | * uncorrectable ECC error, but we have our own CRC and | 423 | * uncorrectable ECC error, but we have our own CRC and |
425 | * the data will be checked later. If the data is OK, | 424 | * the data will be checked later. If the data is OK, |
426 | * the PEB will be scrubbed (because we set | 425 | * the PEB will be scrubbed (because we set |
427 | * aeb->scrub). If the data is not OK, the contents of | 426 | * aeb->scrub). If the data is not OK, the contents of |
428 | * the PEB will be recovered from the second copy, and | 427 | * the PEB will be recovered from the second copy, and |
429 | * aeb->scrub will be cleared in | 428 | * aeb->scrub will be cleared in |
430 | * 'ubi_add_to_av()'. | 429 | * 'ubi_add_to_av()'. |
431 | */ | 430 | */ |
432 | aeb->scrub = 1; | 431 | aeb->scrub = 1; |
433 | else if (err) | 432 | else if (err) |
434 | goto out_free; | 433 | goto out_free; |
435 | } | 434 | } |
436 | 435 | ||
437 | err = -EINVAL; | 436 | err = -EINVAL; |
438 | if (leb[0]) { | 437 | if (leb[0]) { |
439 | leb_corrupted[0] = vtbl_check(ubi, leb[0]); | 438 | leb_corrupted[0] = vtbl_check(ubi, leb[0]); |
440 | if (leb_corrupted[0] < 0) | 439 | if (leb_corrupted[0] < 0) |
441 | goto out_free; | 440 | goto out_free; |
442 | } | 441 | } |
443 | 442 | ||
444 | if (!leb_corrupted[0]) { | 443 | if (!leb_corrupted[0]) { |
445 | /* LEB 0 is OK */ | 444 | /* LEB 0 is OK */ |
446 | if (leb[1]) | 445 | if (leb[1]) |
447 | leb_corrupted[1] = memcmp(leb[0], leb[1], | 446 | leb_corrupted[1] = memcmp(leb[0], leb[1], |
448 | ubi->vtbl_size); | 447 | ubi->vtbl_size); |
449 | if (leb_corrupted[1]) { | 448 | if (leb_corrupted[1]) { |
450 | ubi_warn("volume table copy #2 is corrupted"); | 449 | ubi_warn("volume table copy #2 is corrupted"); |
451 | err = create_vtbl(ubi, ai, 1, leb[0]); | 450 | err = create_vtbl(ubi, ai, 1, leb[0]); |
452 | if (err) | 451 | if (err) |
453 | goto out_free; | 452 | goto out_free; |
454 | ubi_msg("volume table was restored"); | 453 | ubi_msg("volume table was restored"); |
455 | } | 454 | } |
456 | 455 | ||
457 | /* Both LEB 1 and LEB 2 are OK and consistent */ | 456 | /* Both LEB 1 and LEB 2 are OK and consistent */ |
458 | vfree(leb[1]); | 457 | vfree(leb[1]); |
459 | return leb[0]; | 458 | return leb[0]; |
460 | } else { | 459 | } else { |
461 | /* LEB 0 is corrupted or does not exist */ | 460 | /* LEB 0 is corrupted or does not exist */ |
462 | if (leb[1]) { | 461 | if (leb[1]) { |
463 | leb_corrupted[1] = vtbl_check(ubi, leb[1]); | 462 | leb_corrupted[1] = vtbl_check(ubi, leb[1]); |
464 | if (leb_corrupted[1] < 0) | 463 | if (leb_corrupted[1] < 0) |
465 | goto out_free; | 464 | goto out_free; |
466 | } | 465 | } |
467 | if (leb_corrupted[1]) { | 466 | if (leb_corrupted[1]) { |
468 | /* Both LEB 0 and LEB 1 are corrupted */ | 467 | /* Both LEB 0 and LEB 1 are corrupted */ |
469 | ubi_err("both volume tables are corrupted"); | 468 | ubi_err("both volume tables are corrupted"); |
470 | goto out_free; | 469 | goto out_free; |
471 | } | 470 | } |
472 | 471 | ||
473 | ubi_warn("volume table copy #1 is corrupted"); | 472 | ubi_warn("volume table copy #1 is corrupted"); |
474 | err = create_vtbl(ubi, ai, 0, leb[1]); | 473 | err = create_vtbl(ubi, ai, 0, leb[1]); |
475 | if (err) | 474 | if (err) |
476 | goto out_free; | 475 | goto out_free; |
477 | ubi_msg("volume table was restored"); | 476 | ubi_msg("volume table was restored"); |
478 | 477 | ||
479 | vfree(leb[0]); | 478 | vfree(leb[0]); |
480 | return leb[1]; | 479 | return leb[1]; |
481 | } | 480 | } |
482 | 481 | ||
483 | out_free: | 482 | out_free: |
484 | vfree(leb[0]); | 483 | vfree(leb[0]); |
485 | vfree(leb[1]); | 484 | vfree(leb[1]); |
486 | return ERR_PTR(err); | 485 | return ERR_PTR(err); |
487 | } | 486 | } |
488 | 487 | ||
489 | /** | 488 | /** |
490 | * create_empty_lvol - create empty layout volume. | 489 | * create_empty_lvol - create empty layout volume. |
491 | * @ubi: UBI device description object | 490 | * @ubi: UBI device description object |
492 | * @ai: attaching information | 491 | * @ai: attaching information |
493 | * | 492 | * |
494 | * This function returns volume table contents in case of success and a | 493 | * This function returns volume table contents in case of success and a |
495 | * negative error code in case of failure. | 494 | * negative error code in case of failure. |
496 | */ | 495 | */ |
497 | static struct ubi_vtbl_record *create_empty_lvol(struct ubi_device *ubi, | 496 | static struct ubi_vtbl_record *create_empty_lvol(struct ubi_device *ubi, |
498 | struct ubi_attach_info *ai) | 497 | struct ubi_attach_info *ai) |
499 | { | 498 | { |
500 | int i; | 499 | int i; |
501 | struct ubi_vtbl_record *vtbl; | 500 | struct ubi_vtbl_record *vtbl; |
502 | 501 | ||
503 | vtbl = vzalloc(ubi->vtbl_size); | 502 | vtbl = vzalloc(ubi->vtbl_size); |
504 | if (!vtbl) | 503 | if (!vtbl) |
505 | return ERR_PTR(-ENOMEM); | 504 | return ERR_PTR(-ENOMEM); |
506 | 505 | ||
507 | for (i = 0; i < ubi->vtbl_slots; i++) | 506 | for (i = 0; i < ubi->vtbl_slots; i++) |
508 | memcpy(&vtbl[i], &empty_vtbl_record, UBI_VTBL_RECORD_SIZE); | 507 | memcpy(&vtbl[i], &empty_vtbl_record, UBI_VTBL_RECORD_SIZE); |
509 | 508 | ||
510 | for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) { | 509 | for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) { |
511 | int err; | 510 | int err; |
512 | 511 | ||
513 | err = create_vtbl(ubi, ai, i, vtbl); | 512 | err = create_vtbl(ubi, ai, i, vtbl); |
514 | if (err) { | 513 | if (err) { |
515 | vfree(vtbl); | 514 | vfree(vtbl); |
516 | return ERR_PTR(err); | 515 | return ERR_PTR(err); |
517 | } | 516 | } |
518 | } | 517 | } |
519 | 518 | ||
520 | return vtbl; | 519 | return vtbl; |
521 | } | 520 | } |
522 | 521 | ||
523 | /** | 522 | /** |
524 | * init_volumes - initialize volume information for existing volumes. | 523 | * init_volumes - initialize volume information for existing volumes. |
525 | * @ubi: UBI device description object | 524 | * @ubi: UBI device description object |
526 | * @ai: scanning information | 525 | * @ai: scanning information |
527 | * @vtbl: volume table | 526 | * @vtbl: volume table |
528 | * | 527 | * |
529 | * This function allocates volume description objects for existing volumes. | 528 | * This function allocates volume description objects for existing volumes. |
530 | * Returns zero in case of success and a negative error code in case of | 529 | * Returns zero in case of success and a negative error code in case of |
531 | * failure. | 530 | * failure. |
532 | */ | 531 | */ |
533 | static int init_volumes(struct ubi_device *ubi, | 532 | static int init_volumes(struct ubi_device *ubi, |
534 | const struct ubi_attach_info *ai, | 533 | const struct ubi_attach_info *ai, |
535 | const struct ubi_vtbl_record *vtbl) | 534 | const struct ubi_vtbl_record *vtbl) |
536 | { | 535 | { |
537 | int i, reserved_pebs = 0; | 536 | int i, reserved_pebs = 0; |
538 | struct ubi_ainf_volume *av; | 537 | struct ubi_ainf_volume *av; |
539 | struct ubi_volume *vol; | 538 | struct ubi_volume *vol; |
540 | 539 | ||
541 | for (i = 0; i < ubi->vtbl_slots; i++) { | 540 | for (i = 0; i < ubi->vtbl_slots; i++) { |
542 | cond_resched(); | 541 | cond_resched(); |
543 | 542 | ||
544 | if (be32_to_cpu(vtbl[i].reserved_pebs) == 0) | 543 | if (be32_to_cpu(vtbl[i].reserved_pebs) == 0) |
545 | continue; /* Empty record */ | 544 | continue; /* Empty record */ |
546 | 545 | ||
547 | vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL); | 546 | vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL); |
548 | if (!vol) | 547 | if (!vol) |
549 | return -ENOMEM; | 548 | return -ENOMEM; |
550 | 549 | ||
551 | vol->reserved_pebs = be32_to_cpu(vtbl[i].reserved_pebs); | 550 | vol->reserved_pebs = be32_to_cpu(vtbl[i].reserved_pebs); |
552 | vol->alignment = be32_to_cpu(vtbl[i].alignment); | 551 | vol->alignment = be32_to_cpu(vtbl[i].alignment); |
553 | vol->data_pad = be32_to_cpu(vtbl[i].data_pad); | 552 | vol->data_pad = be32_to_cpu(vtbl[i].data_pad); |
554 | vol->upd_marker = vtbl[i].upd_marker; | 553 | vol->upd_marker = vtbl[i].upd_marker; |
555 | vol->vol_type = vtbl[i].vol_type == UBI_VID_DYNAMIC ? | 554 | vol->vol_type = vtbl[i].vol_type == UBI_VID_DYNAMIC ? |
556 | UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME; | 555 | UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME; |
557 | vol->name_len = be16_to_cpu(vtbl[i].name_len); | 556 | vol->name_len = be16_to_cpu(vtbl[i].name_len); |
558 | vol->usable_leb_size = ubi->leb_size - vol->data_pad; | 557 | vol->usable_leb_size = ubi->leb_size - vol->data_pad; |
559 | memcpy(vol->name, vtbl[i].name, vol->name_len); | 558 | memcpy(vol->name, vtbl[i].name, vol->name_len); |
560 | vol->name[vol->name_len] = '\0'; | 559 | vol->name[vol->name_len] = '\0'; |
561 | vol->vol_id = i; | 560 | vol->vol_id = i; |
562 | 561 | ||
563 | if (vtbl[i].flags & UBI_VTBL_AUTORESIZE_FLG) { | 562 | if (vtbl[i].flags & UBI_VTBL_AUTORESIZE_FLG) { |
564 | /* Auto re-size flag may be set only for one volume */ | 563 | /* Auto re-size flag may be set only for one volume */ |
565 | if (ubi->autoresize_vol_id != -1) { | 564 | if (ubi->autoresize_vol_id != -1) { |
566 | ubi_err("more than one auto-resize volume (%d " | 565 | ubi_err("more than one auto-resize volume (%d " |
567 | "and %d)", ubi->autoresize_vol_id, i); | 566 | "and %d)", ubi->autoresize_vol_id, i); |
568 | kfree(vol); | 567 | kfree(vol); |
569 | return -EINVAL; | 568 | return -EINVAL; |
570 | } | 569 | } |
571 | 570 | ||
572 | ubi->autoresize_vol_id = i; | 571 | ubi->autoresize_vol_id = i; |
573 | } | 572 | } |
574 | 573 | ||
575 | ubi_assert(!ubi->volumes[i]); | 574 | ubi_assert(!ubi->volumes[i]); |
576 | ubi->volumes[i] = vol; | 575 | ubi->volumes[i] = vol; |
577 | ubi->vol_count += 1; | 576 | ubi->vol_count += 1; |
578 | vol->ubi = ubi; | 577 | vol->ubi = ubi; |
579 | reserved_pebs += vol->reserved_pebs; | 578 | reserved_pebs += vol->reserved_pebs; |
580 | 579 | ||
581 | /* | 580 | /* |
582 | * In case of dynamic volume UBI knows nothing about how many | 581 | * In case of dynamic volume UBI knows nothing about how many |
583 | * data is stored there. So assume the whole volume is used. | 582 | * data is stored there. So assume the whole volume is used. |
584 | */ | 583 | */ |
585 | if (vol->vol_type == UBI_DYNAMIC_VOLUME) { | 584 | if (vol->vol_type == UBI_DYNAMIC_VOLUME) { |
586 | vol->used_ebs = vol->reserved_pebs; | 585 | vol->used_ebs = vol->reserved_pebs; |
587 | vol->last_eb_bytes = vol->usable_leb_size; | 586 | vol->last_eb_bytes = vol->usable_leb_size; |
588 | vol->used_bytes = | 587 | vol->used_bytes = |
589 | (long long)vol->used_ebs * vol->usable_leb_size; | 588 | (long long)vol->used_ebs * vol->usable_leb_size; |
590 | continue; | 589 | continue; |
591 | } | 590 | } |
592 | 591 | ||
593 | /* Static volumes only */ | 592 | /* Static volumes only */ |
594 | av = ubi_find_av(ai, i); | 593 | av = ubi_find_av(ai, i); |
595 | if (!av) { | 594 | if (!av) { |
596 | /* | 595 | /* |
597 | * No eraseblocks belonging to this volume found. We | 596 | * No eraseblocks belonging to this volume found. We |
598 | * don't actually know whether this static volume is | 597 | * don't actually know whether this static volume is |
599 | * completely corrupted or just contains no data. And | 598 | * completely corrupted or just contains no data. And |
600 | * we cannot know this as long as data size is not | 599 | * we cannot know this as long as data size is not |
601 | * stored on flash. So we just assume the volume is | 600 | * stored on flash. So we just assume the volume is |
602 | * empty. FIXME: this should be handled. | 601 | * empty. FIXME: this should be handled. |
603 | */ | 602 | */ |
604 | continue; | 603 | continue; |
605 | } | 604 | } |
606 | 605 | ||
607 | if (av->leb_count != av->used_ebs) { | 606 | if (av->leb_count != av->used_ebs) { |
608 | /* | 607 | /* |
609 | * We found a static volume which misses several | 608 | * We found a static volume which misses several |
610 | * eraseblocks. Treat it as corrupted. | 609 | * eraseblocks. Treat it as corrupted. |
611 | */ | 610 | */ |
612 | ubi_warn("static volume %d misses %d LEBs - corrupted", | 611 | ubi_warn("static volume %d misses %d LEBs - corrupted", |
613 | av->vol_id, av->used_ebs - av->leb_count); | 612 | av->vol_id, av->used_ebs - av->leb_count); |
614 | vol->corrupted = 1; | 613 | vol->corrupted = 1; |
615 | continue; | 614 | continue; |
616 | } | 615 | } |
617 | 616 | ||
618 | vol->used_ebs = av->used_ebs; | 617 | vol->used_ebs = av->used_ebs; |
619 | vol->used_bytes = | 618 | vol->used_bytes = |
620 | (long long)(vol->used_ebs - 1) * vol->usable_leb_size; | 619 | (long long)(vol->used_ebs - 1) * vol->usable_leb_size; |
621 | vol->used_bytes += av->last_data_size; | 620 | vol->used_bytes += av->last_data_size; |
622 | vol->last_eb_bytes = av->last_data_size; | 621 | vol->last_eb_bytes = av->last_data_size; |
623 | } | 622 | } |
624 | 623 | ||
625 | /* And add the layout volume */ | 624 | /* And add the layout volume */ |
626 | vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL); | 625 | vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL); |
627 | if (!vol) | 626 | if (!vol) |
628 | return -ENOMEM; | 627 | return -ENOMEM; |
629 | 628 | ||
630 | vol->reserved_pebs = UBI_LAYOUT_VOLUME_EBS; | 629 | vol->reserved_pebs = UBI_LAYOUT_VOLUME_EBS; |
631 | vol->alignment = UBI_LAYOUT_VOLUME_ALIGN; | 630 | vol->alignment = UBI_LAYOUT_VOLUME_ALIGN; |
632 | vol->vol_type = UBI_DYNAMIC_VOLUME; | 631 | vol->vol_type = UBI_DYNAMIC_VOLUME; |
633 | vol->name_len = sizeof(UBI_LAYOUT_VOLUME_NAME) - 1; | 632 | vol->name_len = sizeof(UBI_LAYOUT_VOLUME_NAME) - 1; |
634 | memcpy(vol->name, UBI_LAYOUT_VOLUME_NAME, vol->name_len + 1); | 633 | memcpy(vol->name, UBI_LAYOUT_VOLUME_NAME, vol->name_len + 1); |
635 | vol->usable_leb_size = ubi->leb_size; | 634 | vol->usable_leb_size = ubi->leb_size; |
636 | vol->used_ebs = vol->reserved_pebs; | 635 | vol->used_ebs = vol->reserved_pebs; |
637 | vol->last_eb_bytes = vol->reserved_pebs; | 636 | vol->last_eb_bytes = vol->reserved_pebs; |
638 | vol->used_bytes = | 637 | vol->used_bytes = |
639 | (long long)vol->used_ebs * (ubi->leb_size - vol->data_pad); | 638 | (long long)vol->used_ebs * (ubi->leb_size - vol->data_pad); |
640 | vol->vol_id = UBI_LAYOUT_VOLUME_ID; | 639 | vol->vol_id = UBI_LAYOUT_VOLUME_ID; |
641 | vol->ref_count = 1; | 640 | vol->ref_count = 1; |
642 | 641 | ||
643 | ubi_assert(!ubi->volumes[i]); | 642 | ubi_assert(!ubi->volumes[i]); |
644 | ubi->volumes[vol_id2idx(ubi, vol->vol_id)] = vol; | 643 | ubi->volumes[vol_id2idx(ubi, vol->vol_id)] = vol; |
645 | reserved_pebs += vol->reserved_pebs; | 644 | reserved_pebs += vol->reserved_pebs; |
646 | ubi->vol_count += 1; | 645 | ubi->vol_count += 1; |
647 | vol->ubi = ubi; | 646 | vol->ubi = ubi; |
648 | 647 | ||
649 | if (reserved_pebs > ubi->avail_pebs) { | 648 | if (reserved_pebs > ubi->avail_pebs) { |
650 | ubi_err("not enough PEBs, required %d, available %d", | 649 | ubi_err("not enough PEBs, required %d, available %d", |
651 | reserved_pebs, ubi->avail_pebs); | 650 | reserved_pebs, ubi->avail_pebs); |
652 | if (ubi->corr_peb_count) | 651 | if (ubi->corr_peb_count) |
653 | ubi_err("%d PEBs are corrupted and not used", | 652 | ubi_err("%d PEBs are corrupted and not used", |
654 | ubi->corr_peb_count); | 653 | ubi->corr_peb_count); |
655 | } | 654 | } |
656 | ubi->rsvd_pebs += reserved_pebs; | 655 | ubi->rsvd_pebs += reserved_pebs; |
657 | ubi->avail_pebs -= reserved_pebs; | 656 | ubi->avail_pebs -= reserved_pebs; |
658 | 657 | ||
659 | return 0; | 658 | return 0; |
660 | } | 659 | } |
661 | 660 | ||
662 | /** | 661 | /** |
663 | * check_av - check volume attaching information. | 662 | * check_av - check volume attaching information. |
664 | * @vol: UBI volume description object | 663 | * @vol: UBI volume description object |
665 | * @av: volume attaching information | 664 | * @av: volume attaching information |
666 | * | 665 | * |
667 | * This function returns zero if the volume attaching information is consistent | 666 | * This function returns zero if the volume attaching information is consistent |
668 | * to the data read from the volume tabla, and %-EINVAL if not. | 667 | * to the data read from the volume tabla, and %-EINVAL if not. |
669 | */ | 668 | */ |
670 | static int check_av(const struct ubi_volume *vol, | 669 | static int check_av(const struct ubi_volume *vol, |
671 | const struct ubi_ainf_volume *av) | 670 | const struct ubi_ainf_volume *av) |
672 | { | 671 | { |
673 | int err; | 672 | int err; |
674 | 673 | ||
675 | if (av->highest_lnum >= vol->reserved_pebs) { | 674 | if (av->highest_lnum >= vol->reserved_pebs) { |
676 | err = 1; | 675 | err = 1; |
677 | goto bad; | 676 | goto bad; |
678 | } | 677 | } |
679 | if (av->leb_count > vol->reserved_pebs) { | 678 | if (av->leb_count > vol->reserved_pebs) { |
680 | err = 2; | 679 | err = 2; |
681 | goto bad; | 680 | goto bad; |
682 | } | 681 | } |
683 | if (av->vol_type != vol->vol_type) { | 682 | if (av->vol_type != vol->vol_type) { |
684 | err = 3; | 683 | err = 3; |
685 | goto bad; | 684 | goto bad; |
686 | } | 685 | } |
687 | if (av->used_ebs > vol->reserved_pebs) { | 686 | if (av->used_ebs > vol->reserved_pebs) { |
688 | err = 4; | 687 | err = 4; |
689 | goto bad; | 688 | goto bad; |
690 | } | 689 | } |
691 | if (av->data_pad != vol->data_pad) { | 690 | if (av->data_pad != vol->data_pad) { |
692 | err = 5; | 691 | err = 5; |
693 | goto bad; | 692 | goto bad; |
694 | } | 693 | } |
695 | return 0; | 694 | return 0; |
696 | 695 | ||
697 | bad: | 696 | bad: |
698 | ubi_err("bad attaching information, error %d", err); | 697 | ubi_err("bad attaching information, error %d", err); |
699 | ubi_dump_av(av); | 698 | ubi_dump_av(av); |
700 | ubi_dump_vol_info(vol); | 699 | ubi_dump_vol_info(vol); |
701 | return -EINVAL; | 700 | return -EINVAL; |
702 | } | 701 | } |
703 | 702 | ||
704 | /** | 703 | /** |
705 | * check_scanning_info - check that attaching information. | 704 | * check_attaching_info - check that attaching information. |
706 | * @ubi: UBI device description object | 705 | * @ubi: UBI device description object |
707 | * @ai: attaching information | 706 | * @ai: attaching information |
708 | * | 707 | * |
709 | * Even though we protect on-flash data by CRC checksums, we still don't trust | 708 | * Even though we protect on-flash data by CRC checksums, we still don't trust |
710 | * the media. This function ensures that attaching information is consistent to | 709 | * the media. This function ensures that attaching information is consistent to |
711 | * the information read from the volume table. Returns zero if the scanning | 710 | * the information read from the volume table. Returns zero if the attaching |
712 | * information is OK and %-EINVAL if it is not. | 711 | * information is OK and %-EINVAL if it is not. |
713 | */ | 712 | */ |
714 | static int check_scanning_info(const struct ubi_device *ubi, | 713 | static int check_attaching_info(const struct ubi_device *ubi, |
715 | struct ubi_attach_info *ai) | 714 | struct ubi_attach_info *ai) |
716 | { | 715 | { |
717 | int err, i; | 716 | int err, i; |
718 | struct ubi_ainf_volume *av; | 717 | struct ubi_ainf_volume *av; |
719 | struct ubi_volume *vol; | 718 | struct ubi_volume *vol; |
720 | 719 | ||
721 | if (ai->vols_found > UBI_INT_VOL_COUNT + ubi->vtbl_slots) { | 720 | if (ai->vols_found > UBI_INT_VOL_COUNT + ubi->vtbl_slots) { |
722 | ubi_err("scanning found %d volumes, maximum is %d + %d", | 721 | ubi_err("found %d volumes while attaching, maximum is %d + %d", |
723 | ai->vols_found, UBI_INT_VOL_COUNT, ubi->vtbl_slots); | 722 | ai->vols_found, UBI_INT_VOL_COUNT, ubi->vtbl_slots); |
724 | return -EINVAL; | 723 | return -EINVAL; |
725 | } | 724 | } |
726 | 725 | ||
727 | if (ai->highest_vol_id >= ubi->vtbl_slots + UBI_INT_VOL_COUNT && | 726 | if (ai->highest_vol_id >= ubi->vtbl_slots + UBI_INT_VOL_COUNT && |
728 | ai->highest_vol_id < UBI_INTERNAL_VOL_START) { | 727 | ai->highest_vol_id < UBI_INTERNAL_VOL_START) { |
729 | ubi_err("too large volume ID %d found by scanning", | 728 | ubi_err("too large volume ID %d found", ai->highest_vol_id); |
730 | ai->highest_vol_id); | ||
731 | return -EINVAL; | 729 | return -EINVAL; |
732 | } | 730 | } |
733 | 731 | ||
734 | for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) { | 732 | for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) { |
735 | cond_resched(); | 733 | cond_resched(); |
736 | 734 | ||
737 | av = ubi_find_av(ai, i); | 735 | av = ubi_find_av(ai, i); |
738 | vol = ubi->volumes[i]; | 736 | vol = ubi->volumes[i]; |
739 | if (!vol) { | 737 | if (!vol) { |
740 | if (av) | 738 | if (av) |
741 | ubi_remove_av(ai, av); | 739 | ubi_remove_av(ai, av); |
742 | continue; | 740 | continue; |
743 | } | 741 | } |
744 | 742 | ||
745 | if (vol->reserved_pebs == 0) { | 743 | if (vol->reserved_pebs == 0) { |
746 | ubi_assert(i < ubi->vtbl_slots); | 744 | ubi_assert(i < ubi->vtbl_slots); |
747 | 745 | ||
748 | if (!av) | 746 | if (!av) |
749 | continue; | 747 | continue; |
750 | 748 | ||
751 | /* | 749 | /* |
752 | * During scanning we found a volume which does not | 750 | * During attaching we found a volume which does not |
753 | * exist according to the information in the volume | 751 | * exist according to the information in the volume |
754 | * table. This must have happened due to an unclean | 752 | * table. This must have happened due to an unclean |
755 | * reboot while the volume was being removed. Discard | 753 | * reboot while the volume was being removed. Discard |
756 | * these eraseblocks. | 754 | * these eraseblocks. |
757 | */ | 755 | */ |
758 | ubi_msg("finish volume %d removal", av->vol_id); | 756 | ubi_msg("finish volume %d removal", av->vol_id); |
759 | ubi_remove_av(ai, av); | 757 | ubi_remove_av(ai, av); |
760 | } else if (av) { | 758 | } else if (av) { |
761 | err = check_av(vol, av); | 759 | err = check_av(vol, av); |
762 | if (err) | 760 | if (err) |
763 | return err; | 761 | return err; |
764 | } | 762 | } |
765 | } | 763 | } |
766 | 764 | ||
767 | return 0; | 765 | return 0; |
768 | } | 766 | } |
769 | 767 | ||
770 | /** | 768 | /** |
771 | * ubi_read_volume_table - read the volume table. | 769 | * ubi_read_volume_table - read the volume table. |
772 | * @ubi: UBI device description object | 770 | * @ubi: UBI device description object |
773 | * @ai: attaching information | 771 | * @ai: attaching information |
774 | * | 772 | * |
775 | * This function reads volume table, checks it, recover from errors if needed, | 773 | * This function reads volume table, checks it, recover from errors if needed, |
776 | * or creates it if needed. Returns zero in case of success and a negative | 774 | * or creates it if needed. Returns zero in case of success and a negative |
777 | * error code in case of failure. | 775 | * error code in case of failure. |
778 | */ | 776 | */ |
779 | int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai) | 777 | int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai) |
780 | { | 778 | { |
781 | int i, err; | 779 | int i, err; |
782 | struct ubi_ainf_volume *av; | 780 | struct ubi_ainf_volume *av; |
783 | 781 | ||
784 | empty_vtbl_record.crc = cpu_to_be32(0xf116c36b); | 782 | empty_vtbl_record.crc = cpu_to_be32(0xf116c36b); |
785 | 783 | ||
786 | /* | 784 | /* |
787 | * The number of supported volumes is limited by the eraseblock size | 785 | * The number of supported volumes is limited by the eraseblock size |
788 | * and by the UBI_MAX_VOLUMES constant. | 786 | * and by the UBI_MAX_VOLUMES constant. |
789 | */ | 787 | */ |
790 | ubi->vtbl_slots = ubi->leb_size / UBI_VTBL_RECORD_SIZE; | 788 | ubi->vtbl_slots = ubi->leb_size / UBI_VTBL_RECORD_SIZE; |
791 | if (ubi->vtbl_slots > UBI_MAX_VOLUMES) | 789 | if (ubi->vtbl_slots > UBI_MAX_VOLUMES) |
792 | ubi->vtbl_slots = UBI_MAX_VOLUMES; | 790 | ubi->vtbl_slots = UBI_MAX_VOLUMES; |
793 | 791 | ||
794 | ubi->vtbl_size = ubi->vtbl_slots * UBI_VTBL_RECORD_SIZE; | 792 | ubi->vtbl_size = ubi->vtbl_slots * UBI_VTBL_RECORD_SIZE; |
795 | ubi->vtbl_size = ALIGN(ubi->vtbl_size, ubi->min_io_size); | 793 | ubi->vtbl_size = ALIGN(ubi->vtbl_size, ubi->min_io_size); |
796 | 794 | ||
797 | av = ubi_find_av(ai, UBI_LAYOUT_VOLUME_ID); | 795 | av = ubi_find_av(ai, UBI_LAYOUT_VOLUME_ID); |
798 | if (!av) { | 796 | if (!av) { |
799 | /* | 797 | /* |
800 | * No logical eraseblocks belonging to the layout volume were | 798 | * No logical eraseblocks belonging to the layout volume were |
801 | * found. This could mean that the flash is just empty. In | 799 | * found. This could mean that the flash is just empty. In |
802 | * this case we create empty layout volume. | 800 | * this case we create empty layout volume. |
803 | * | 801 | * |
804 | * But if flash is not empty this must be a corruption or the | 802 | * But if flash is not empty this must be a corruption or the |
805 | * MTD device just contains garbage. | 803 | * MTD device just contains garbage. |
806 | */ | 804 | */ |
807 | if (ai->is_empty) { | 805 | if (ai->is_empty) { |
808 | ubi->vtbl = create_empty_lvol(ubi, ai); | 806 | ubi->vtbl = create_empty_lvol(ubi, ai); |
809 | if (IS_ERR(ubi->vtbl)) | 807 | if (IS_ERR(ubi->vtbl)) |
810 | return PTR_ERR(ubi->vtbl); | 808 | return PTR_ERR(ubi->vtbl); |
811 | } else { | 809 | } else { |
812 | ubi_err("the layout volume was not found"); | 810 | ubi_err("the layout volume was not found"); |
813 | return -EINVAL; | 811 | return -EINVAL; |
814 | } | 812 | } |
815 | } else { | 813 | } else { |
816 | if (av->leb_count > UBI_LAYOUT_VOLUME_EBS) { | 814 | if (av->leb_count > UBI_LAYOUT_VOLUME_EBS) { |
817 | /* This must not happen with proper UBI images */ | 815 | /* This must not happen with proper UBI images */ |
818 | ubi_err("too many LEBs (%d) in layout volume", | 816 | ubi_err("too many LEBs (%d) in layout volume", |
819 | av->leb_count); | 817 | av->leb_count); |
820 | return -EINVAL; | 818 | return -EINVAL; |
821 | } | 819 | } |
822 | 820 | ||
823 | ubi->vtbl = process_lvol(ubi, ai, av); | 821 | ubi->vtbl = process_lvol(ubi, ai, av); |
824 | if (IS_ERR(ubi->vtbl)) | 822 | if (IS_ERR(ubi->vtbl)) |
825 | return PTR_ERR(ubi->vtbl); | 823 | return PTR_ERR(ubi->vtbl); |
826 | } | 824 | } |
827 | 825 | ||
828 | ubi->avail_pebs = ubi->good_peb_count - ubi->corr_peb_count; | 826 | ubi->avail_pebs = ubi->good_peb_count - ubi->corr_peb_count; |
829 | 827 | ||
830 | /* | 828 | /* |
831 | * The layout volume is OK, initialize the corresponding in-RAM data | 829 | * The layout volume is OK, initialize the corresponding in-RAM data |
832 | * structures. | 830 | * structures. |
833 | */ | 831 | */ |
834 | err = init_volumes(ubi, ai, ubi->vtbl); | 832 | err = init_volumes(ubi, ai, ubi->vtbl); |
835 | if (err) | 833 | if (err) |
836 | goto out_free; | 834 | goto out_free; |
837 | 835 | ||
838 | /* | 836 | /* |
839 | * Make sure that the attaching information is consistent to the | 837 | * Make sure that the attaching information is consistent to the |
840 | * information stored in the volume table. | 838 | * information stored in the volume table. |
841 | */ | 839 | */ |
842 | err = check_scanning_info(ubi, ai); | 840 | err = check_attaching_info(ubi, ai); |
843 | if (err) | 841 | if (err) |
844 | goto out_free; | 842 | goto out_free; |
845 | 843 | ||
846 | return 0; | 844 | return 0; |
847 | 845 | ||
848 | out_free: | 846 | out_free: |
849 | vfree(ubi->vtbl); | 847 | vfree(ubi->vtbl); |
850 | for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) { | 848 | for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) { |
851 | kfree(ubi->volumes[i]); | 849 | kfree(ubi->volumes[i]); |
852 | ubi->volumes[i] = NULL; | 850 | ubi->volumes[i] = NULL; |
853 | } | 851 | } |
854 | return err; | 852 | return err; |
855 | } | 853 | } |
856 | 854 | ||
857 | /** | 855 | /** |
858 | * self_vtbl_check - check volume table. | 856 | * self_vtbl_check - check volume table. |
859 | * @ubi: UBI device description object | 857 | * @ubi: UBI device description object |
860 | */ | 858 | */ |
861 | static void self_vtbl_check(const struct ubi_device *ubi) | 859 | static void self_vtbl_check(const struct ubi_device *ubi) |
862 | { | 860 | { |
863 | if (!ubi->dbg->chk_gen) | 861 | if (!ubi->dbg->chk_gen) |
864 | return; | 862 | return; |
865 | 863 | ||
866 | if (vtbl_check(ubi, ubi->vtbl)) { | 864 | if (vtbl_check(ubi, ubi->vtbl)) { |
867 | ubi_err("self-check failed"); | 865 | ubi_err("self-check failed"); |
868 | BUG(); | 866 | BUG(); |
869 | } | 867 | } |
870 | } | 868 | } |
871 | 869 |