Commit 41e0cd9d4eeff0895e66cad5c70a90ba41023ea3
1 parent
fbd0107f4d
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
UBI: rename _init_scan functions
We have a couple of initialization funcntionsn left which have "_scan" suffic - rename them: ubi_eba_init_scan() -> ubi_eba_init() ubi_wl_init_scan() -> ubi_wl_init() Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Showing 4 changed files with 9 additions and 9 deletions Inline Diff
drivers/mtd/ubi/build.c
1 | /* | 1 | /* |
2 | * Copyright (c) International Business Machines Corp., 2006 | 2 | * Copyright (c) International Business Machines Corp., 2006 |
3 | * Copyright (c) Nokia Corporation, 2007 | 3 | * Copyright (c) Nokia Corporation, 2007 |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
7 | * the Free Software Foundation; either version 2 of the License, or | 7 | * the Free Software Foundation; either version 2 of the License, or |
8 | * (at your option) any later version. | 8 | * (at your option) any later version. |
9 | * | 9 | * |
10 | * This program is distributed in the hope that it will be useful, | 10 | * This program is distributed in the hope that it will be useful, |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See |
13 | * the GNU General Public License for more details. | 13 | * the GNU General Public License for more details. |
14 | * | 14 | * |
15 | * You should have received a copy of the GNU General Public License | 15 | * You should have received a copy of the GNU General Public License |
16 | * along with this program; if not, write to the Free Software | 16 | * along with this program; if not, write to the Free Software |
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
18 | * | 18 | * |
19 | * Author: Artem Bityutskiy (Битюцкий Артём), | 19 | * Author: Artem Bityutskiy (Битюцкий Артём), |
20 | * Frank Haverkamp | 20 | * Frank Haverkamp |
21 | */ | 21 | */ |
22 | 22 | ||
23 | /* | 23 | /* |
24 | * This file includes UBI initialization and building of UBI devices. | 24 | * This file includes UBI initialization and building of UBI devices. |
25 | * | 25 | * |
26 | * When UBI is initialized, it attaches all the MTD devices specified as the | 26 | * When UBI is initialized, it attaches all the MTD devices specified as the |
27 | * module load parameters or the kernel boot parameters. If MTD devices were | 27 | * module load parameters or the kernel boot parameters. If MTD devices were |
28 | * specified, UBI does not attach any MTD device, but it is possible to do | 28 | * specified, UBI does not attach any MTD device, but it is possible to do |
29 | * later using the "UBI control device". | 29 | * later using the "UBI control device". |
30 | */ | 30 | */ |
31 | 31 | ||
32 | #include <linux/err.h> | 32 | #include <linux/err.h> |
33 | #include <linux/module.h> | 33 | #include <linux/module.h> |
34 | #include <linux/moduleparam.h> | 34 | #include <linux/moduleparam.h> |
35 | #include <linux/stringify.h> | 35 | #include <linux/stringify.h> |
36 | #include <linux/namei.h> | 36 | #include <linux/namei.h> |
37 | #include <linux/stat.h> | 37 | #include <linux/stat.h> |
38 | #include <linux/miscdevice.h> | 38 | #include <linux/miscdevice.h> |
39 | #include <linux/log2.h> | 39 | #include <linux/log2.h> |
40 | #include <linux/kthread.h> | 40 | #include <linux/kthread.h> |
41 | #include <linux/kernel.h> | 41 | #include <linux/kernel.h> |
42 | #include <linux/slab.h> | 42 | #include <linux/slab.h> |
43 | #include "ubi.h" | 43 | #include "ubi.h" |
44 | 44 | ||
45 | /* Maximum length of the 'mtd=' parameter */ | 45 | /* Maximum length of the 'mtd=' parameter */ |
46 | #define MTD_PARAM_LEN_MAX 64 | 46 | #define MTD_PARAM_LEN_MAX 64 |
47 | 47 | ||
48 | #ifdef CONFIG_MTD_UBI_MODULE | 48 | #ifdef CONFIG_MTD_UBI_MODULE |
49 | #define ubi_is_module() 1 | 49 | #define ubi_is_module() 1 |
50 | #else | 50 | #else |
51 | #define ubi_is_module() 0 | 51 | #define ubi_is_module() 0 |
52 | #endif | 52 | #endif |
53 | 53 | ||
54 | /** | 54 | /** |
55 | * struct mtd_dev_param - MTD device parameter description data structure. | 55 | * struct mtd_dev_param - MTD device parameter description data structure. |
56 | * @name: MTD character device node path, MTD device name, or MTD device number | 56 | * @name: MTD character device node path, MTD device name, or MTD device number |
57 | * string | 57 | * string |
58 | * @vid_hdr_offs: VID header offset | 58 | * @vid_hdr_offs: VID header offset |
59 | */ | 59 | */ |
60 | struct mtd_dev_param { | 60 | struct mtd_dev_param { |
61 | char name[MTD_PARAM_LEN_MAX]; | 61 | char name[MTD_PARAM_LEN_MAX]; |
62 | int vid_hdr_offs; | 62 | int vid_hdr_offs; |
63 | }; | 63 | }; |
64 | 64 | ||
65 | /* Numbers of elements set in the @mtd_dev_param array */ | 65 | /* Numbers of elements set in the @mtd_dev_param array */ |
66 | static int __initdata mtd_devs; | 66 | static int __initdata mtd_devs; |
67 | 67 | ||
68 | /* MTD devices specification parameters */ | 68 | /* MTD devices specification parameters */ |
69 | static struct mtd_dev_param __initdata mtd_dev_param[UBI_MAX_DEVICES]; | 69 | static struct mtd_dev_param __initdata mtd_dev_param[UBI_MAX_DEVICES]; |
70 | 70 | ||
71 | /* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */ | 71 | /* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */ |
72 | struct class *ubi_class; | 72 | struct class *ubi_class; |
73 | 73 | ||
74 | /* Slab cache for wear-leveling entries */ | 74 | /* Slab cache for wear-leveling entries */ |
75 | struct kmem_cache *ubi_wl_entry_slab; | 75 | struct kmem_cache *ubi_wl_entry_slab; |
76 | 76 | ||
77 | /* UBI control character device */ | 77 | /* UBI control character device */ |
78 | static struct miscdevice ubi_ctrl_cdev = { | 78 | static struct miscdevice ubi_ctrl_cdev = { |
79 | .minor = MISC_DYNAMIC_MINOR, | 79 | .minor = MISC_DYNAMIC_MINOR, |
80 | .name = "ubi_ctrl", | 80 | .name = "ubi_ctrl", |
81 | .fops = &ubi_ctrl_cdev_operations, | 81 | .fops = &ubi_ctrl_cdev_operations, |
82 | }; | 82 | }; |
83 | 83 | ||
84 | /* All UBI devices in system */ | 84 | /* All UBI devices in system */ |
85 | static struct ubi_device *ubi_devices[UBI_MAX_DEVICES]; | 85 | static struct ubi_device *ubi_devices[UBI_MAX_DEVICES]; |
86 | 86 | ||
87 | /* Serializes UBI devices creations and removals */ | 87 | /* Serializes UBI devices creations and removals */ |
88 | DEFINE_MUTEX(ubi_devices_mutex); | 88 | DEFINE_MUTEX(ubi_devices_mutex); |
89 | 89 | ||
90 | /* Protects @ubi_devices and @ubi->ref_count */ | 90 | /* Protects @ubi_devices and @ubi->ref_count */ |
91 | static DEFINE_SPINLOCK(ubi_devices_lock); | 91 | static DEFINE_SPINLOCK(ubi_devices_lock); |
92 | 92 | ||
93 | /* "Show" method for files in '/<sysfs>/class/ubi/' */ | 93 | /* "Show" method for files in '/<sysfs>/class/ubi/' */ |
94 | static ssize_t ubi_version_show(struct class *class, | 94 | static ssize_t ubi_version_show(struct class *class, |
95 | struct class_attribute *attr, char *buf) | 95 | struct class_attribute *attr, char *buf) |
96 | { | 96 | { |
97 | return sprintf(buf, "%d\n", UBI_VERSION); | 97 | return sprintf(buf, "%d\n", UBI_VERSION); |
98 | } | 98 | } |
99 | 99 | ||
100 | /* UBI version attribute ('/<sysfs>/class/ubi/version') */ | 100 | /* UBI version attribute ('/<sysfs>/class/ubi/version') */ |
101 | static struct class_attribute ubi_version = | 101 | static struct class_attribute ubi_version = |
102 | __ATTR(version, S_IRUGO, ubi_version_show, NULL); | 102 | __ATTR(version, S_IRUGO, ubi_version_show, NULL); |
103 | 103 | ||
104 | static ssize_t dev_attribute_show(struct device *dev, | 104 | static ssize_t dev_attribute_show(struct device *dev, |
105 | struct device_attribute *attr, char *buf); | 105 | struct device_attribute *attr, char *buf); |
106 | 106 | ||
107 | /* UBI device attributes (correspond to files in '/<sysfs>/class/ubi/ubiX') */ | 107 | /* UBI device attributes (correspond to files in '/<sysfs>/class/ubi/ubiX') */ |
108 | static struct device_attribute dev_eraseblock_size = | 108 | static struct device_attribute dev_eraseblock_size = |
109 | __ATTR(eraseblock_size, S_IRUGO, dev_attribute_show, NULL); | 109 | __ATTR(eraseblock_size, S_IRUGO, dev_attribute_show, NULL); |
110 | static struct device_attribute dev_avail_eraseblocks = | 110 | static struct device_attribute dev_avail_eraseblocks = |
111 | __ATTR(avail_eraseblocks, S_IRUGO, dev_attribute_show, NULL); | 111 | __ATTR(avail_eraseblocks, S_IRUGO, dev_attribute_show, NULL); |
112 | static struct device_attribute dev_total_eraseblocks = | 112 | static struct device_attribute dev_total_eraseblocks = |
113 | __ATTR(total_eraseblocks, S_IRUGO, dev_attribute_show, NULL); | 113 | __ATTR(total_eraseblocks, S_IRUGO, dev_attribute_show, NULL); |
114 | static struct device_attribute dev_volumes_count = | 114 | static struct device_attribute dev_volumes_count = |
115 | __ATTR(volumes_count, S_IRUGO, dev_attribute_show, NULL); | 115 | __ATTR(volumes_count, S_IRUGO, dev_attribute_show, NULL); |
116 | static struct device_attribute dev_max_ec = | 116 | static struct device_attribute dev_max_ec = |
117 | __ATTR(max_ec, S_IRUGO, dev_attribute_show, NULL); | 117 | __ATTR(max_ec, S_IRUGO, dev_attribute_show, NULL); |
118 | static struct device_attribute dev_reserved_for_bad = | 118 | static struct device_attribute dev_reserved_for_bad = |
119 | __ATTR(reserved_for_bad, S_IRUGO, dev_attribute_show, NULL); | 119 | __ATTR(reserved_for_bad, S_IRUGO, dev_attribute_show, NULL); |
120 | static struct device_attribute dev_bad_peb_count = | 120 | static struct device_attribute dev_bad_peb_count = |
121 | __ATTR(bad_peb_count, S_IRUGO, dev_attribute_show, NULL); | 121 | __ATTR(bad_peb_count, S_IRUGO, dev_attribute_show, NULL); |
122 | static struct device_attribute dev_max_vol_count = | 122 | static struct device_attribute dev_max_vol_count = |
123 | __ATTR(max_vol_count, S_IRUGO, dev_attribute_show, NULL); | 123 | __ATTR(max_vol_count, S_IRUGO, dev_attribute_show, NULL); |
124 | static struct device_attribute dev_min_io_size = | 124 | static struct device_attribute dev_min_io_size = |
125 | __ATTR(min_io_size, S_IRUGO, dev_attribute_show, NULL); | 125 | __ATTR(min_io_size, S_IRUGO, dev_attribute_show, NULL); |
126 | static struct device_attribute dev_bgt_enabled = | 126 | static struct device_attribute dev_bgt_enabled = |
127 | __ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL); | 127 | __ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL); |
128 | static struct device_attribute dev_mtd_num = | 128 | static struct device_attribute dev_mtd_num = |
129 | __ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL); | 129 | __ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL); |
130 | 130 | ||
131 | /** | 131 | /** |
132 | * ubi_volume_notify - send a volume change notification. | 132 | * ubi_volume_notify - send a volume change notification. |
133 | * @ubi: UBI device description object | 133 | * @ubi: UBI device description object |
134 | * @vol: volume description object of the changed volume | 134 | * @vol: volume description object of the changed volume |
135 | * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc) | 135 | * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc) |
136 | * | 136 | * |
137 | * This is a helper function which notifies all subscribers about a volume | 137 | * This is a helper function which notifies all subscribers about a volume |
138 | * change event (creation, removal, re-sizing, re-naming, updating). Returns | 138 | * change event (creation, removal, re-sizing, re-naming, updating). Returns |
139 | * zero in case of success and a negative error code in case of failure. | 139 | * zero in case of success and a negative error code in case of failure. |
140 | */ | 140 | */ |
141 | int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype) | 141 | int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype) |
142 | { | 142 | { |
143 | struct ubi_notification nt; | 143 | struct ubi_notification nt; |
144 | 144 | ||
145 | ubi_do_get_device_info(ubi, &nt.di); | 145 | ubi_do_get_device_info(ubi, &nt.di); |
146 | ubi_do_get_volume_info(ubi, vol, &nt.vi); | 146 | ubi_do_get_volume_info(ubi, vol, &nt.vi); |
147 | return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt); | 147 | return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt); |
148 | } | 148 | } |
149 | 149 | ||
150 | /** | 150 | /** |
151 | * ubi_notify_all - send a notification to all volumes. | 151 | * ubi_notify_all - send a notification to all volumes. |
152 | * @ubi: UBI device description object | 152 | * @ubi: UBI device description object |
153 | * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc) | 153 | * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc) |
154 | * @nb: the notifier to call | 154 | * @nb: the notifier to call |
155 | * | 155 | * |
156 | * This function walks all volumes of UBI device @ubi and sends the @ntype | 156 | * This function walks all volumes of UBI device @ubi and sends the @ntype |
157 | * notification for each volume. If @nb is %NULL, then all registered notifiers | 157 | * notification for each volume. If @nb is %NULL, then all registered notifiers |
158 | * are called, otherwise only the @nb notifier is called. Returns the number of | 158 | * are called, otherwise only the @nb notifier is called. Returns the number of |
159 | * sent notifications. | 159 | * sent notifications. |
160 | */ | 160 | */ |
161 | int ubi_notify_all(struct ubi_device *ubi, int ntype, struct notifier_block *nb) | 161 | int ubi_notify_all(struct ubi_device *ubi, int ntype, struct notifier_block *nb) |
162 | { | 162 | { |
163 | struct ubi_notification nt; | 163 | struct ubi_notification nt; |
164 | int i, count = 0; | 164 | int i, count = 0; |
165 | 165 | ||
166 | ubi_do_get_device_info(ubi, &nt.di); | 166 | ubi_do_get_device_info(ubi, &nt.di); |
167 | 167 | ||
168 | mutex_lock(&ubi->device_mutex); | 168 | mutex_lock(&ubi->device_mutex); |
169 | for (i = 0; i < ubi->vtbl_slots; i++) { | 169 | for (i = 0; i < ubi->vtbl_slots; i++) { |
170 | /* | 170 | /* |
171 | * Since the @ubi->device is locked, and we are not going to | 171 | * Since the @ubi->device is locked, and we are not going to |
172 | * change @ubi->volumes, we do not have to lock | 172 | * change @ubi->volumes, we do not have to lock |
173 | * @ubi->volumes_lock. | 173 | * @ubi->volumes_lock. |
174 | */ | 174 | */ |
175 | if (!ubi->volumes[i]) | 175 | if (!ubi->volumes[i]) |
176 | continue; | 176 | continue; |
177 | 177 | ||
178 | ubi_do_get_volume_info(ubi, ubi->volumes[i], &nt.vi); | 178 | ubi_do_get_volume_info(ubi, ubi->volumes[i], &nt.vi); |
179 | if (nb) | 179 | if (nb) |
180 | nb->notifier_call(nb, ntype, &nt); | 180 | nb->notifier_call(nb, ntype, &nt); |
181 | else | 181 | else |
182 | blocking_notifier_call_chain(&ubi_notifiers, ntype, | 182 | blocking_notifier_call_chain(&ubi_notifiers, ntype, |
183 | &nt); | 183 | &nt); |
184 | count += 1; | 184 | count += 1; |
185 | } | 185 | } |
186 | mutex_unlock(&ubi->device_mutex); | 186 | mutex_unlock(&ubi->device_mutex); |
187 | 187 | ||
188 | return count; | 188 | return count; |
189 | } | 189 | } |
190 | 190 | ||
191 | /** | 191 | /** |
192 | * ubi_enumerate_volumes - send "add" notification for all existing volumes. | 192 | * ubi_enumerate_volumes - send "add" notification for all existing volumes. |
193 | * @nb: the notifier to call | 193 | * @nb: the notifier to call |
194 | * | 194 | * |
195 | * This function walks all UBI devices and volumes and sends the | 195 | * This function walks all UBI devices and volumes and sends the |
196 | * %UBI_VOLUME_ADDED notification for each volume. If @nb is %NULL, then all | 196 | * %UBI_VOLUME_ADDED notification for each volume. If @nb is %NULL, then all |
197 | * registered notifiers are called, otherwise only the @nb notifier is called. | 197 | * registered notifiers are called, otherwise only the @nb notifier is called. |
198 | * Returns the number of sent notifications. | 198 | * Returns the number of sent notifications. |
199 | */ | 199 | */ |
200 | int ubi_enumerate_volumes(struct notifier_block *nb) | 200 | int ubi_enumerate_volumes(struct notifier_block *nb) |
201 | { | 201 | { |
202 | int i, count = 0; | 202 | int i, count = 0; |
203 | 203 | ||
204 | /* | 204 | /* |
205 | * Since the @ubi_devices_mutex is locked, and we are not going to | 205 | * Since the @ubi_devices_mutex is locked, and we are not going to |
206 | * change @ubi_devices, we do not have to lock @ubi_devices_lock. | 206 | * change @ubi_devices, we do not have to lock @ubi_devices_lock. |
207 | */ | 207 | */ |
208 | for (i = 0; i < UBI_MAX_DEVICES; i++) { | 208 | for (i = 0; i < UBI_MAX_DEVICES; i++) { |
209 | struct ubi_device *ubi = ubi_devices[i]; | 209 | struct ubi_device *ubi = ubi_devices[i]; |
210 | 210 | ||
211 | if (!ubi) | 211 | if (!ubi) |
212 | continue; | 212 | continue; |
213 | count += ubi_notify_all(ubi, UBI_VOLUME_ADDED, nb); | 213 | count += ubi_notify_all(ubi, UBI_VOLUME_ADDED, nb); |
214 | } | 214 | } |
215 | 215 | ||
216 | return count; | 216 | return count; |
217 | } | 217 | } |
218 | 218 | ||
219 | /** | 219 | /** |
220 | * ubi_get_device - get UBI device. | 220 | * ubi_get_device - get UBI device. |
221 | * @ubi_num: UBI device number | 221 | * @ubi_num: UBI device number |
222 | * | 222 | * |
223 | * This function returns UBI device description object for UBI device number | 223 | * This function returns UBI device description object for UBI device number |
224 | * @ubi_num, or %NULL if the device does not exist. This function increases the | 224 | * @ubi_num, or %NULL if the device does not exist. This function increases the |
225 | * device reference count to prevent removal of the device. In other words, the | 225 | * device reference count to prevent removal of the device. In other words, the |
226 | * device cannot be removed if its reference count is not zero. | 226 | * device cannot be removed if its reference count is not zero. |
227 | */ | 227 | */ |
228 | struct ubi_device *ubi_get_device(int ubi_num) | 228 | struct ubi_device *ubi_get_device(int ubi_num) |
229 | { | 229 | { |
230 | struct ubi_device *ubi; | 230 | struct ubi_device *ubi; |
231 | 231 | ||
232 | spin_lock(&ubi_devices_lock); | 232 | spin_lock(&ubi_devices_lock); |
233 | ubi = ubi_devices[ubi_num]; | 233 | ubi = ubi_devices[ubi_num]; |
234 | if (ubi) { | 234 | if (ubi) { |
235 | ubi_assert(ubi->ref_count >= 0); | 235 | ubi_assert(ubi->ref_count >= 0); |
236 | ubi->ref_count += 1; | 236 | ubi->ref_count += 1; |
237 | get_device(&ubi->dev); | 237 | get_device(&ubi->dev); |
238 | } | 238 | } |
239 | spin_unlock(&ubi_devices_lock); | 239 | spin_unlock(&ubi_devices_lock); |
240 | 240 | ||
241 | return ubi; | 241 | return ubi; |
242 | } | 242 | } |
243 | 243 | ||
244 | /** | 244 | /** |
245 | * ubi_put_device - drop an UBI device reference. | 245 | * ubi_put_device - drop an UBI device reference. |
246 | * @ubi: UBI device description object | 246 | * @ubi: UBI device description object |
247 | */ | 247 | */ |
248 | void ubi_put_device(struct ubi_device *ubi) | 248 | void ubi_put_device(struct ubi_device *ubi) |
249 | { | 249 | { |
250 | spin_lock(&ubi_devices_lock); | 250 | spin_lock(&ubi_devices_lock); |
251 | ubi->ref_count -= 1; | 251 | ubi->ref_count -= 1; |
252 | put_device(&ubi->dev); | 252 | put_device(&ubi->dev); |
253 | spin_unlock(&ubi_devices_lock); | 253 | spin_unlock(&ubi_devices_lock); |
254 | } | 254 | } |
255 | 255 | ||
256 | /** | 256 | /** |
257 | * ubi_get_by_major - get UBI device by character device major number. | 257 | * ubi_get_by_major - get UBI device by character device major number. |
258 | * @major: major number | 258 | * @major: major number |
259 | * | 259 | * |
260 | * This function is similar to 'ubi_get_device()', but it searches the device | 260 | * This function is similar to 'ubi_get_device()', but it searches the device |
261 | * by its major number. | 261 | * by its major number. |
262 | */ | 262 | */ |
263 | struct ubi_device *ubi_get_by_major(int major) | 263 | struct ubi_device *ubi_get_by_major(int major) |
264 | { | 264 | { |
265 | int i; | 265 | int i; |
266 | struct ubi_device *ubi; | 266 | struct ubi_device *ubi; |
267 | 267 | ||
268 | spin_lock(&ubi_devices_lock); | 268 | spin_lock(&ubi_devices_lock); |
269 | for (i = 0; i < UBI_MAX_DEVICES; i++) { | 269 | for (i = 0; i < UBI_MAX_DEVICES; i++) { |
270 | ubi = ubi_devices[i]; | 270 | ubi = ubi_devices[i]; |
271 | if (ubi && MAJOR(ubi->cdev.dev) == major) { | 271 | if (ubi && MAJOR(ubi->cdev.dev) == major) { |
272 | ubi_assert(ubi->ref_count >= 0); | 272 | ubi_assert(ubi->ref_count >= 0); |
273 | ubi->ref_count += 1; | 273 | ubi->ref_count += 1; |
274 | get_device(&ubi->dev); | 274 | get_device(&ubi->dev); |
275 | spin_unlock(&ubi_devices_lock); | 275 | spin_unlock(&ubi_devices_lock); |
276 | return ubi; | 276 | return ubi; |
277 | } | 277 | } |
278 | } | 278 | } |
279 | spin_unlock(&ubi_devices_lock); | 279 | spin_unlock(&ubi_devices_lock); |
280 | 280 | ||
281 | return NULL; | 281 | return NULL; |
282 | } | 282 | } |
283 | 283 | ||
284 | /** | 284 | /** |
285 | * ubi_major2num - get UBI device number by character device major number. | 285 | * ubi_major2num - get UBI device number by character device major number. |
286 | * @major: major number | 286 | * @major: major number |
287 | * | 287 | * |
288 | * This function searches UBI device number object by its major number. If UBI | 288 | * This function searches UBI device number object by its major number. If UBI |
289 | * device was not found, this function returns -ENODEV, otherwise the UBI device | 289 | * device was not found, this function returns -ENODEV, otherwise the UBI device |
290 | * number is returned. | 290 | * number is returned. |
291 | */ | 291 | */ |
292 | int ubi_major2num(int major) | 292 | int ubi_major2num(int major) |
293 | { | 293 | { |
294 | int i, ubi_num = -ENODEV; | 294 | int i, ubi_num = -ENODEV; |
295 | 295 | ||
296 | spin_lock(&ubi_devices_lock); | 296 | spin_lock(&ubi_devices_lock); |
297 | for (i = 0; i < UBI_MAX_DEVICES; i++) { | 297 | for (i = 0; i < UBI_MAX_DEVICES; i++) { |
298 | struct ubi_device *ubi = ubi_devices[i]; | 298 | struct ubi_device *ubi = ubi_devices[i]; |
299 | 299 | ||
300 | if (ubi && MAJOR(ubi->cdev.dev) == major) { | 300 | if (ubi && MAJOR(ubi->cdev.dev) == major) { |
301 | ubi_num = ubi->ubi_num; | 301 | ubi_num = ubi->ubi_num; |
302 | break; | 302 | break; |
303 | } | 303 | } |
304 | } | 304 | } |
305 | spin_unlock(&ubi_devices_lock); | 305 | spin_unlock(&ubi_devices_lock); |
306 | 306 | ||
307 | return ubi_num; | 307 | return ubi_num; |
308 | } | 308 | } |
309 | 309 | ||
310 | /* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */ | 310 | /* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */ |
311 | static ssize_t dev_attribute_show(struct device *dev, | 311 | static ssize_t dev_attribute_show(struct device *dev, |
312 | struct device_attribute *attr, char *buf) | 312 | struct device_attribute *attr, char *buf) |
313 | { | 313 | { |
314 | ssize_t ret; | 314 | ssize_t ret; |
315 | struct ubi_device *ubi; | 315 | struct ubi_device *ubi; |
316 | 316 | ||
317 | /* | 317 | /* |
318 | * The below code looks weird, but it actually makes sense. We get the | 318 | * The below code looks weird, but it actually makes sense. We get the |
319 | * UBI device reference from the contained 'struct ubi_device'. But it | 319 | * UBI device reference from the contained 'struct ubi_device'. But it |
320 | * is unclear if the device was removed or not yet. Indeed, if the | 320 | * is unclear if the device was removed or not yet. Indeed, if the |
321 | * device was removed before we increased its reference count, | 321 | * device was removed before we increased its reference count, |
322 | * 'ubi_get_device()' will return -ENODEV and we fail. | 322 | * 'ubi_get_device()' will return -ENODEV and we fail. |
323 | * | 323 | * |
324 | * Remember, 'struct ubi_device' is freed in the release function, so | 324 | * Remember, 'struct ubi_device' is freed in the release function, so |
325 | * we still can use 'ubi->ubi_num'. | 325 | * we still can use 'ubi->ubi_num'. |
326 | */ | 326 | */ |
327 | ubi = container_of(dev, struct ubi_device, dev); | 327 | ubi = container_of(dev, struct ubi_device, dev); |
328 | ubi = ubi_get_device(ubi->ubi_num); | 328 | ubi = ubi_get_device(ubi->ubi_num); |
329 | if (!ubi) | 329 | if (!ubi) |
330 | return -ENODEV; | 330 | return -ENODEV; |
331 | 331 | ||
332 | if (attr == &dev_eraseblock_size) | 332 | if (attr == &dev_eraseblock_size) |
333 | ret = sprintf(buf, "%d\n", ubi->leb_size); | 333 | ret = sprintf(buf, "%d\n", ubi->leb_size); |
334 | else if (attr == &dev_avail_eraseblocks) | 334 | else if (attr == &dev_avail_eraseblocks) |
335 | ret = sprintf(buf, "%d\n", ubi->avail_pebs); | 335 | ret = sprintf(buf, "%d\n", ubi->avail_pebs); |
336 | else if (attr == &dev_total_eraseblocks) | 336 | else if (attr == &dev_total_eraseblocks) |
337 | ret = sprintf(buf, "%d\n", ubi->good_peb_count); | 337 | ret = sprintf(buf, "%d\n", ubi->good_peb_count); |
338 | else if (attr == &dev_volumes_count) | 338 | else if (attr == &dev_volumes_count) |
339 | ret = sprintf(buf, "%d\n", ubi->vol_count - UBI_INT_VOL_COUNT); | 339 | ret = sprintf(buf, "%d\n", ubi->vol_count - UBI_INT_VOL_COUNT); |
340 | else if (attr == &dev_max_ec) | 340 | else if (attr == &dev_max_ec) |
341 | ret = sprintf(buf, "%d\n", ubi->max_ec); | 341 | ret = sprintf(buf, "%d\n", ubi->max_ec); |
342 | else if (attr == &dev_reserved_for_bad) | 342 | else if (attr == &dev_reserved_for_bad) |
343 | ret = sprintf(buf, "%d\n", ubi->beb_rsvd_pebs); | 343 | ret = sprintf(buf, "%d\n", ubi->beb_rsvd_pebs); |
344 | else if (attr == &dev_bad_peb_count) | 344 | else if (attr == &dev_bad_peb_count) |
345 | ret = sprintf(buf, "%d\n", ubi->bad_peb_count); | 345 | ret = sprintf(buf, "%d\n", ubi->bad_peb_count); |
346 | else if (attr == &dev_max_vol_count) | 346 | else if (attr == &dev_max_vol_count) |
347 | ret = sprintf(buf, "%d\n", ubi->vtbl_slots); | 347 | ret = sprintf(buf, "%d\n", ubi->vtbl_slots); |
348 | else if (attr == &dev_min_io_size) | 348 | else if (attr == &dev_min_io_size) |
349 | ret = sprintf(buf, "%d\n", ubi->min_io_size); | 349 | ret = sprintf(buf, "%d\n", ubi->min_io_size); |
350 | else if (attr == &dev_bgt_enabled) | 350 | else if (attr == &dev_bgt_enabled) |
351 | ret = sprintf(buf, "%d\n", ubi->thread_enabled); | 351 | ret = sprintf(buf, "%d\n", ubi->thread_enabled); |
352 | else if (attr == &dev_mtd_num) | 352 | else if (attr == &dev_mtd_num) |
353 | ret = sprintf(buf, "%d\n", ubi->mtd->index); | 353 | ret = sprintf(buf, "%d\n", ubi->mtd->index); |
354 | else | 354 | else |
355 | ret = -EINVAL; | 355 | ret = -EINVAL; |
356 | 356 | ||
357 | ubi_put_device(ubi); | 357 | ubi_put_device(ubi); |
358 | return ret; | 358 | return ret; |
359 | } | 359 | } |
360 | 360 | ||
361 | static void dev_release(struct device *dev) | 361 | static void dev_release(struct device *dev) |
362 | { | 362 | { |
363 | struct ubi_device *ubi = container_of(dev, struct ubi_device, dev); | 363 | struct ubi_device *ubi = container_of(dev, struct ubi_device, dev); |
364 | 364 | ||
365 | kfree(ubi); | 365 | kfree(ubi); |
366 | } | 366 | } |
367 | 367 | ||
368 | /** | 368 | /** |
369 | * ubi_sysfs_init - initialize sysfs for an UBI device. | 369 | * ubi_sysfs_init - initialize sysfs for an UBI device. |
370 | * @ubi: UBI device description object | 370 | * @ubi: UBI device description object |
371 | * @ref: set to %1 on exit in case of failure if a reference to @ubi->dev was | 371 | * @ref: set to %1 on exit in case of failure if a reference to @ubi->dev was |
372 | * taken | 372 | * taken |
373 | * | 373 | * |
374 | * This function returns zero in case of success and a negative error code in | 374 | * This function returns zero in case of success and a negative error code in |
375 | * case of failure. | 375 | * case of failure. |
376 | */ | 376 | */ |
377 | static int ubi_sysfs_init(struct ubi_device *ubi, int *ref) | 377 | static int ubi_sysfs_init(struct ubi_device *ubi, int *ref) |
378 | { | 378 | { |
379 | int err; | 379 | int err; |
380 | 380 | ||
381 | ubi->dev.release = dev_release; | 381 | ubi->dev.release = dev_release; |
382 | ubi->dev.devt = ubi->cdev.dev; | 382 | ubi->dev.devt = ubi->cdev.dev; |
383 | ubi->dev.class = ubi_class; | 383 | ubi->dev.class = ubi_class; |
384 | dev_set_name(&ubi->dev, UBI_NAME_STR"%d", ubi->ubi_num); | 384 | dev_set_name(&ubi->dev, UBI_NAME_STR"%d", ubi->ubi_num); |
385 | err = device_register(&ubi->dev); | 385 | err = device_register(&ubi->dev); |
386 | if (err) | 386 | if (err) |
387 | return err; | 387 | return err; |
388 | 388 | ||
389 | *ref = 1; | 389 | *ref = 1; |
390 | err = device_create_file(&ubi->dev, &dev_eraseblock_size); | 390 | err = device_create_file(&ubi->dev, &dev_eraseblock_size); |
391 | if (err) | 391 | if (err) |
392 | return err; | 392 | return err; |
393 | err = device_create_file(&ubi->dev, &dev_avail_eraseblocks); | 393 | err = device_create_file(&ubi->dev, &dev_avail_eraseblocks); |
394 | if (err) | 394 | if (err) |
395 | return err; | 395 | return err; |
396 | err = device_create_file(&ubi->dev, &dev_total_eraseblocks); | 396 | err = device_create_file(&ubi->dev, &dev_total_eraseblocks); |
397 | if (err) | 397 | if (err) |
398 | return err; | 398 | return err; |
399 | err = device_create_file(&ubi->dev, &dev_volumes_count); | 399 | err = device_create_file(&ubi->dev, &dev_volumes_count); |
400 | if (err) | 400 | if (err) |
401 | return err; | 401 | return err; |
402 | err = device_create_file(&ubi->dev, &dev_max_ec); | 402 | err = device_create_file(&ubi->dev, &dev_max_ec); |
403 | if (err) | 403 | if (err) |
404 | return err; | 404 | return err; |
405 | err = device_create_file(&ubi->dev, &dev_reserved_for_bad); | 405 | err = device_create_file(&ubi->dev, &dev_reserved_for_bad); |
406 | if (err) | 406 | if (err) |
407 | return err; | 407 | return err; |
408 | err = device_create_file(&ubi->dev, &dev_bad_peb_count); | 408 | err = device_create_file(&ubi->dev, &dev_bad_peb_count); |
409 | if (err) | 409 | if (err) |
410 | return err; | 410 | return err; |
411 | err = device_create_file(&ubi->dev, &dev_max_vol_count); | 411 | err = device_create_file(&ubi->dev, &dev_max_vol_count); |
412 | if (err) | 412 | if (err) |
413 | return err; | 413 | return err; |
414 | err = device_create_file(&ubi->dev, &dev_min_io_size); | 414 | err = device_create_file(&ubi->dev, &dev_min_io_size); |
415 | if (err) | 415 | if (err) |
416 | return err; | 416 | return err; |
417 | err = device_create_file(&ubi->dev, &dev_bgt_enabled); | 417 | err = device_create_file(&ubi->dev, &dev_bgt_enabled); |
418 | if (err) | 418 | if (err) |
419 | return err; | 419 | return err; |
420 | err = device_create_file(&ubi->dev, &dev_mtd_num); | 420 | err = device_create_file(&ubi->dev, &dev_mtd_num); |
421 | return err; | 421 | return err; |
422 | } | 422 | } |
423 | 423 | ||
424 | /** | 424 | /** |
425 | * ubi_sysfs_close - close sysfs for an UBI device. | 425 | * ubi_sysfs_close - close sysfs for an UBI device. |
426 | * @ubi: UBI device description object | 426 | * @ubi: UBI device description object |
427 | */ | 427 | */ |
428 | static void ubi_sysfs_close(struct ubi_device *ubi) | 428 | static void ubi_sysfs_close(struct ubi_device *ubi) |
429 | { | 429 | { |
430 | device_remove_file(&ubi->dev, &dev_mtd_num); | 430 | device_remove_file(&ubi->dev, &dev_mtd_num); |
431 | device_remove_file(&ubi->dev, &dev_bgt_enabled); | 431 | device_remove_file(&ubi->dev, &dev_bgt_enabled); |
432 | device_remove_file(&ubi->dev, &dev_min_io_size); | 432 | device_remove_file(&ubi->dev, &dev_min_io_size); |
433 | device_remove_file(&ubi->dev, &dev_max_vol_count); | 433 | device_remove_file(&ubi->dev, &dev_max_vol_count); |
434 | device_remove_file(&ubi->dev, &dev_bad_peb_count); | 434 | device_remove_file(&ubi->dev, &dev_bad_peb_count); |
435 | device_remove_file(&ubi->dev, &dev_reserved_for_bad); | 435 | device_remove_file(&ubi->dev, &dev_reserved_for_bad); |
436 | device_remove_file(&ubi->dev, &dev_max_ec); | 436 | device_remove_file(&ubi->dev, &dev_max_ec); |
437 | device_remove_file(&ubi->dev, &dev_volumes_count); | 437 | device_remove_file(&ubi->dev, &dev_volumes_count); |
438 | device_remove_file(&ubi->dev, &dev_total_eraseblocks); | 438 | device_remove_file(&ubi->dev, &dev_total_eraseblocks); |
439 | device_remove_file(&ubi->dev, &dev_avail_eraseblocks); | 439 | device_remove_file(&ubi->dev, &dev_avail_eraseblocks); |
440 | device_remove_file(&ubi->dev, &dev_eraseblock_size); | 440 | device_remove_file(&ubi->dev, &dev_eraseblock_size); |
441 | device_unregister(&ubi->dev); | 441 | device_unregister(&ubi->dev); |
442 | } | 442 | } |
443 | 443 | ||
444 | /** | 444 | /** |
445 | * kill_volumes - destroy all user volumes. | 445 | * kill_volumes - destroy all user volumes. |
446 | * @ubi: UBI device description object | 446 | * @ubi: UBI device description object |
447 | */ | 447 | */ |
448 | static void kill_volumes(struct ubi_device *ubi) | 448 | static void kill_volumes(struct ubi_device *ubi) |
449 | { | 449 | { |
450 | int i; | 450 | int i; |
451 | 451 | ||
452 | for (i = 0; i < ubi->vtbl_slots; i++) | 452 | for (i = 0; i < ubi->vtbl_slots; i++) |
453 | if (ubi->volumes[i]) | 453 | if (ubi->volumes[i]) |
454 | ubi_free_volume(ubi, ubi->volumes[i]); | 454 | ubi_free_volume(ubi, ubi->volumes[i]); |
455 | } | 455 | } |
456 | 456 | ||
457 | /** | 457 | /** |
458 | * uif_init - initialize user interfaces for an UBI device. | 458 | * uif_init - initialize user interfaces for an UBI device. |
459 | * @ubi: UBI device description object | 459 | * @ubi: UBI device description object |
460 | * @ref: set to %1 on exit in case of failure if a reference to @ubi->dev was | 460 | * @ref: set to %1 on exit in case of failure if a reference to @ubi->dev was |
461 | * taken, otherwise set to %0 | 461 | * taken, otherwise set to %0 |
462 | * | 462 | * |
463 | * This function initializes various user interfaces for an UBI device. If the | 463 | * This function initializes various user interfaces for an UBI device. If the |
464 | * initialization fails at an early stage, this function frees all the | 464 | * initialization fails at an early stage, this function frees all the |
465 | * resources it allocated, returns an error, and @ref is set to %0. However, | 465 | * resources it allocated, returns an error, and @ref is set to %0. However, |
466 | * if the initialization fails after the UBI device was registered in the | 466 | * if the initialization fails after the UBI device was registered in the |
467 | * driver core subsystem, this function takes a reference to @ubi->dev, because | 467 | * driver core subsystem, this function takes a reference to @ubi->dev, because |
468 | * otherwise the release function ('dev_release()') would free whole @ubi | 468 | * otherwise the release function ('dev_release()') would free whole @ubi |
469 | * object. The @ref argument is set to %1 in this case. The caller has to put | 469 | * object. The @ref argument is set to %1 in this case. The caller has to put |
470 | * this reference. | 470 | * this reference. |
471 | * | 471 | * |
472 | * This function returns zero in case of success and a negative error code in | 472 | * This function returns zero in case of success and a negative error code in |
473 | * case of failure. | 473 | * case of failure. |
474 | */ | 474 | */ |
475 | static int uif_init(struct ubi_device *ubi, int *ref) | 475 | static int uif_init(struct ubi_device *ubi, int *ref) |
476 | { | 476 | { |
477 | int i, err; | 477 | int i, err; |
478 | dev_t dev; | 478 | dev_t dev; |
479 | 479 | ||
480 | *ref = 0; | 480 | *ref = 0; |
481 | sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num); | 481 | sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num); |
482 | 482 | ||
483 | /* | 483 | /* |
484 | * Major numbers for the UBI character devices are allocated | 484 | * Major numbers for the UBI character devices are allocated |
485 | * dynamically. Major numbers of volume character devices are | 485 | * dynamically. Major numbers of volume character devices are |
486 | * equivalent to ones of the corresponding UBI character device. Minor | 486 | * equivalent to ones of the corresponding UBI character device. Minor |
487 | * numbers of UBI character devices are 0, while minor numbers of | 487 | * numbers of UBI character devices are 0, while minor numbers of |
488 | * volume character devices start from 1. Thus, we allocate one major | 488 | * volume character devices start from 1. Thus, we allocate one major |
489 | * number and ubi->vtbl_slots + 1 minor numbers. | 489 | * number and ubi->vtbl_slots + 1 minor numbers. |
490 | */ | 490 | */ |
491 | err = alloc_chrdev_region(&dev, 0, ubi->vtbl_slots + 1, ubi->ubi_name); | 491 | err = alloc_chrdev_region(&dev, 0, ubi->vtbl_slots + 1, ubi->ubi_name); |
492 | if (err) { | 492 | if (err) { |
493 | ubi_err("cannot register UBI character devices"); | 493 | ubi_err("cannot register UBI character devices"); |
494 | return err; | 494 | return err; |
495 | } | 495 | } |
496 | 496 | ||
497 | ubi_assert(MINOR(dev) == 0); | 497 | ubi_assert(MINOR(dev) == 0); |
498 | cdev_init(&ubi->cdev, &ubi_cdev_operations); | 498 | cdev_init(&ubi->cdev, &ubi_cdev_operations); |
499 | dbg_gen("%s major is %u", ubi->ubi_name, MAJOR(dev)); | 499 | dbg_gen("%s major is %u", ubi->ubi_name, MAJOR(dev)); |
500 | ubi->cdev.owner = THIS_MODULE; | 500 | ubi->cdev.owner = THIS_MODULE; |
501 | 501 | ||
502 | err = cdev_add(&ubi->cdev, dev, 1); | 502 | err = cdev_add(&ubi->cdev, dev, 1); |
503 | if (err) { | 503 | if (err) { |
504 | ubi_err("cannot add character device"); | 504 | ubi_err("cannot add character device"); |
505 | goto out_unreg; | 505 | goto out_unreg; |
506 | } | 506 | } |
507 | 507 | ||
508 | err = ubi_sysfs_init(ubi, ref); | 508 | err = ubi_sysfs_init(ubi, ref); |
509 | if (err) | 509 | if (err) |
510 | goto out_sysfs; | 510 | goto out_sysfs; |
511 | 511 | ||
512 | for (i = 0; i < ubi->vtbl_slots; i++) | 512 | for (i = 0; i < ubi->vtbl_slots; i++) |
513 | if (ubi->volumes[i]) { | 513 | if (ubi->volumes[i]) { |
514 | err = ubi_add_volume(ubi, ubi->volumes[i]); | 514 | err = ubi_add_volume(ubi, ubi->volumes[i]); |
515 | if (err) { | 515 | if (err) { |
516 | ubi_err("cannot add volume %d", i); | 516 | ubi_err("cannot add volume %d", i); |
517 | goto out_volumes; | 517 | goto out_volumes; |
518 | } | 518 | } |
519 | } | 519 | } |
520 | 520 | ||
521 | return 0; | 521 | return 0; |
522 | 522 | ||
523 | out_volumes: | 523 | out_volumes: |
524 | kill_volumes(ubi); | 524 | kill_volumes(ubi); |
525 | out_sysfs: | 525 | out_sysfs: |
526 | if (*ref) | 526 | if (*ref) |
527 | get_device(&ubi->dev); | 527 | get_device(&ubi->dev); |
528 | ubi_sysfs_close(ubi); | 528 | ubi_sysfs_close(ubi); |
529 | cdev_del(&ubi->cdev); | 529 | cdev_del(&ubi->cdev); |
530 | out_unreg: | 530 | out_unreg: |
531 | unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1); | 531 | unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1); |
532 | ubi_err("cannot initialize UBI %s, error %d", ubi->ubi_name, err); | 532 | ubi_err("cannot initialize UBI %s, error %d", ubi->ubi_name, err); |
533 | return err; | 533 | return err; |
534 | } | 534 | } |
535 | 535 | ||
536 | /** | 536 | /** |
537 | * uif_close - close user interfaces for an UBI device. | 537 | * uif_close - close user interfaces for an UBI device. |
538 | * @ubi: UBI device description object | 538 | * @ubi: UBI device description object |
539 | * | 539 | * |
540 | * Note, since this function un-registers UBI volume device objects (@vol->dev), | 540 | * Note, since this function un-registers UBI volume device objects (@vol->dev), |
541 | * the memory allocated voe the volumes is freed as well (in the release | 541 | * the memory allocated voe the volumes is freed as well (in the release |
542 | * function). | 542 | * function). |
543 | */ | 543 | */ |
544 | static void uif_close(struct ubi_device *ubi) | 544 | static void uif_close(struct ubi_device *ubi) |
545 | { | 545 | { |
546 | kill_volumes(ubi); | 546 | kill_volumes(ubi); |
547 | ubi_sysfs_close(ubi); | 547 | ubi_sysfs_close(ubi); |
548 | cdev_del(&ubi->cdev); | 548 | cdev_del(&ubi->cdev); |
549 | unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1); | 549 | unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1); |
550 | } | 550 | } |
551 | 551 | ||
552 | /** | 552 | /** |
553 | * free_internal_volumes - free internal volumes. | 553 | * free_internal_volumes - free internal volumes. |
554 | * @ubi: UBI device description object | 554 | * @ubi: UBI device description object |
555 | */ | 555 | */ |
556 | static void free_internal_volumes(struct ubi_device *ubi) | 556 | static void free_internal_volumes(struct ubi_device *ubi) |
557 | { | 557 | { |
558 | int i; | 558 | int i; |
559 | 559 | ||
560 | for (i = ubi->vtbl_slots; | 560 | for (i = ubi->vtbl_slots; |
561 | i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) { | 561 | i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) { |
562 | kfree(ubi->volumes[i]->eba_tbl); | 562 | kfree(ubi->volumes[i]->eba_tbl); |
563 | kfree(ubi->volumes[i]); | 563 | kfree(ubi->volumes[i]); |
564 | } | 564 | } |
565 | } | 565 | } |
566 | 566 | ||
567 | /** | 567 | /** |
568 | * attach_by_scanning - attach an MTD device using scanning method. | 568 | * attach_by_scanning - attach an MTD device using scanning method. |
569 | * @ubi: UBI device descriptor | 569 | * @ubi: UBI device descriptor |
570 | * | 570 | * |
571 | * This function returns zero in case of success and a negative error code in | 571 | * This function returns zero in case of success and a negative error code in |
572 | * case of failure. | 572 | * case of failure. |
573 | * | 573 | * |
574 | * Note, currently this is the only method to attach UBI devices. Hopefully in | 574 | * Note, currently this is the only method to attach UBI devices. Hopefully in |
575 | * the future we'll have more scalable attaching methods and avoid full media | 575 | * the future we'll have more scalable attaching methods and avoid full media |
576 | * scanning. But even in this case scanning will be needed as a fall-back | 576 | * scanning. But even in this case scanning will be needed as a fall-back |
577 | * attaching method if there are some on-flash table corruptions. | 577 | * attaching method if there are some on-flash table corruptions. |
578 | */ | 578 | */ |
579 | static int attach_by_scanning(struct ubi_device *ubi) | 579 | static int attach_by_scanning(struct ubi_device *ubi) |
580 | { | 580 | { |
581 | int err; | 581 | int err; |
582 | struct ubi_attach_info *ai; | 582 | struct ubi_attach_info *ai; |
583 | 583 | ||
584 | ai = ubi_scan(ubi); | 584 | ai = ubi_scan(ubi); |
585 | if (IS_ERR(ai)) | 585 | if (IS_ERR(ai)) |
586 | return PTR_ERR(ai); | 586 | return PTR_ERR(ai); |
587 | 587 | ||
588 | ubi->bad_peb_count = ai->bad_peb_count; | 588 | ubi->bad_peb_count = ai->bad_peb_count; |
589 | ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count; | 589 | ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count; |
590 | ubi->corr_peb_count = ai->corr_peb_count; | 590 | ubi->corr_peb_count = ai->corr_peb_count; |
591 | ubi->max_ec = ai->max_ec; | 591 | ubi->max_ec = ai->max_ec; |
592 | ubi->mean_ec = ai->mean_ec; | 592 | ubi->mean_ec = ai->mean_ec; |
593 | ubi_msg("max. sequence number: %llu", ai->max_sqnum); | 593 | ubi_msg("max. sequence number: %llu", ai->max_sqnum); |
594 | 594 | ||
595 | err = ubi_read_volume_table(ubi, ai); | 595 | err = ubi_read_volume_table(ubi, ai); |
596 | if (err) | 596 | if (err) |
597 | goto out_ai; | 597 | goto out_ai; |
598 | 598 | ||
599 | err = ubi_wl_init_scan(ubi, ai); | 599 | err = ubi_wl_init(ubi, ai); |
600 | if (err) | 600 | if (err) |
601 | goto out_vtbl; | 601 | goto out_vtbl; |
602 | 602 | ||
603 | err = ubi_eba_init_scan(ubi, ai); | 603 | err = ubi_eba_init(ubi, ai); |
604 | if (err) | 604 | if (err) |
605 | goto out_wl; | 605 | goto out_wl; |
606 | 606 | ||
607 | ubi_destroy_ai(ai); | 607 | ubi_destroy_ai(ai); |
608 | return 0; | 608 | return 0; |
609 | 609 | ||
610 | out_wl: | 610 | out_wl: |
611 | ubi_wl_close(ubi); | 611 | ubi_wl_close(ubi); |
612 | out_vtbl: | 612 | out_vtbl: |
613 | free_internal_volumes(ubi); | 613 | free_internal_volumes(ubi); |
614 | vfree(ubi->vtbl); | 614 | vfree(ubi->vtbl); |
615 | out_ai: | 615 | out_ai: |
616 | ubi_destroy_ai(ai); | 616 | ubi_destroy_ai(ai); |
617 | return err; | 617 | return err; |
618 | } | 618 | } |
619 | 619 | ||
620 | /** | 620 | /** |
621 | * io_init - initialize I/O sub-system for a given UBI device. | 621 | * io_init - initialize I/O sub-system for a given UBI device. |
622 | * @ubi: UBI device description object | 622 | * @ubi: UBI device description object |
623 | * | 623 | * |
624 | * If @ubi->vid_hdr_offset or @ubi->leb_start is zero, default offsets are | 624 | * If @ubi->vid_hdr_offset or @ubi->leb_start is zero, default offsets are |
625 | * assumed: | 625 | * assumed: |
626 | * o EC header is always at offset zero - this cannot be changed; | 626 | * o EC header is always at offset zero - this cannot be changed; |
627 | * o VID header starts just after the EC header at the closest address | 627 | * o VID header starts just after the EC header at the closest address |
628 | * aligned to @io->hdrs_min_io_size; | 628 | * aligned to @io->hdrs_min_io_size; |
629 | * o data starts just after the VID header at the closest address aligned to | 629 | * o data starts just after the VID header at the closest address aligned to |
630 | * @io->min_io_size | 630 | * @io->min_io_size |
631 | * | 631 | * |
632 | * This function returns zero in case of success and a negative error code in | 632 | * This function returns zero in case of success and a negative error code in |
633 | * case of failure. | 633 | * case of failure. |
634 | */ | 634 | */ |
635 | static int io_init(struct ubi_device *ubi) | 635 | static int io_init(struct ubi_device *ubi) |
636 | { | 636 | { |
637 | if (ubi->mtd->numeraseregions != 0) { | 637 | if (ubi->mtd->numeraseregions != 0) { |
638 | /* | 638 | /* |
639 | * Some flashes have several erase regions. Different regions | 639 | * Some flashes have several erase regions. Different regions |
640 | * may have different eraseblock size and other | 640 | * may have different eraseblock size and other |
641 | * characteristics. It looks like mostly multi-region flashes | 641 | * characteristics. It looks like mostly multi-region flashes |
642 | * have one "main" region and one or more small regions to | 642 | * have one "main" region and one or more small regions to |
643 | * store boot loader code or boot parameters or whatever. I | 643 | * store boot loader code or boot parameters or whatever. I |
644 | * guess we should just pick the largest region. But this is | 644 | * guess we should just pick the largest region. But this is |
645 | * not implemented. | 645 | * not implemented. |
646 | */ | 646 | */ |
647 | ubi_err("multiple regions, not implemented"); | 647 | ubi_err("multiple regions, not implemented"); |
648 | return -EINVAL; | 648 | return -EINVAL; |
649 | } | 649 | } |
650 | 650 | ||
651 | if (ubi->vid_hdr_offset < 0) | 651 | if (ubi->vid_hdr_offset < 0) |
652 | return -EINVAL; | 652 | return -EINVAL; |
653 | 653 | ||
654 | /* | 654 | /* |
655 | * Note, in this implementation we support MTD devices with 0x7FFFFFFF | 655 | * Note, in this implementation we support MTD devices with 0x7FFFFFFF |
656 | * physical eraseblocks maximum. | 656 | * physical eraseblocks maximum. |
657 | */ | 657 | */ |
658 | 658 | ||
659 | ubi->peb_size = ubi->mtd->erasesize; | 659 | ubi->peb_size = ubi->mtd->erasesize; |
660 | ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd); | 660 | ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd); |
661 | ubi->flash_size = ubi->mtd->size; | 661 | ubi->flash_size = ubi->mtd->size; |
662 | 662 | ||
663 | if (mtd_can_have_bb(ubi->mtd)) | 663 | if (mtd_can_have_bb(ubi->mtd)) |
664 | ubi->bad_allowed = 1; | 664 | ubi->bad_allowed = 1; |
665 | 665 | ||
666 | if (ubi->mtd->type == MTD_NORFLASH) { | 666 | if (ubi->mtd->type == MTD_NORFLASH) { |
667 | ubi_assert(ubi->mtd->writesize == 1); | 667 | ubi_assert(ubi->mtd->writesize == 1); |
668 | ubi->nor_flash = 1; | 668 | ubi->nor_flash = 1; |
669 | } | 669 | } |
670 | 670 | ||
671 | ubi->min_io_size = ubi->mtd->writesize; | 671 | ubi->min_io_size = ubi->mtd->writesize; |
672 | ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft; | 672 | ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft; |
673 | 673 | ||
674 | /* | 674 | /* |
675 | * Make sure minimal I/O unit is power of 2. Note, there is no | 675 | * Make sure minimal I/O unit is power of 2. Note, there is no |
676 | * fundamental reason for this assumption. It is just an optimization | 676 | * fundamental reason for this assumption. It is just an optimization |
677 | * which allows us to avoid costly division operations. | 677 | * which allows us to avoid costly division operations. |
678 | */ | 678 | */ |
679 | if (!is_power_of_2(ubi->min_io_size)) { | 679 | if (!is_power_of_2(ubi->min_io_size)) { |
680 | ubi_err("min. I/O unit (%d) is not power of 2", | 680 | ubi_err("min. I/O unit (%d) is not power of 2", |
681 | ubi->min_io_size); | 681 | ubi->min_io_size); |
682 | return -EINVAL; | 682 | return -EINVAL; |
683 | } | 683 | } |
684 | 684 | ||
685 | ubi_assert(ubi->hdrs_min_io_size > 0); | 685 | ubi_assert(ubi->hdrs_min_io_size > 0); |
686 | ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size); | 686 | ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size); |
687 | ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0); | 687 | ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0); |
688 | 688 | ||
689 | ubi->max_write_size = ubi->mtd->writebufsize; | 689 | ubi->max_write_size = ubi->mtd->writebufsize; |
690 | /* | 690 | /* |
691 | * Maximum write size has to be greater or equivalent to min. I/O | 691 | * Maximum write size has to be greater or equivalent to min. I/O |
692 | * size, and be multiple of min. I/O size. | 692 | * size, and be multiple of min. I/O size. |
693 | */ | 693 | */ |
694 | if (ubi->max_write_size < ubi->min_io_size || | 694 | if (ubi->max_write_size < ubi->min_io_size || |
695 | ubi->max_write_size % ubi->min_io_size || | 695 | ubi->max_write_size % ubi->min_io_size || |
696 | !is_power_of_2(ubi->max_write_size)) { | 696 | !is_power_of_2(ubi->max_write_size)) { |
697 | ubi_err("bad write buffer size %d for %d min. I/O unit", | 697 | ubi_err("bad write buffer size %d for %d min. I/O unit", |
698 | ubi->max_write_size, ubi->min_io_size); | 698 | ubi->max_write_size, ubi->min_io_size); |
699 | return -EINVAL; | 699 | return -EINVAL; |
700 | } | 700 | } |
701 | 701 | ||
702 | /* Calculate default aligned sizes of EC and VID headers */ | 702 | /* Calculate default aligned sizes of EC and VID headers */ |
703 | ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size); | 703 | ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size); |
704 | ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size); | 704 | ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size); |
705 | 705 | ||
706 | dbg_msg("min_io_size %d", ubi->min_io_size); | 706 | dbg_msg("min_io_size %d", ubi->min_io_size); |
707 | dbg_msg("max_write_size %d", ubi->max_write_size); | 707 | dbg_msg("max_write_size %d", ubi->max_write_size); |
708 | dbg_msg("hdrs_min_io_size %d", ubi->hdrs_min_io_size); | 708 | dbg_msg("hdrs_min_io_size %d", ubi->hdrs_min_io_size); |
709 | dbg_msg("ec_hdr_alsize %d", ubi->ec_hdr_alsize); | 709 | dbg_msg("ec_hdr_alsize %d", ubi->ec_hdr_alsize); |
710 | dbg_msg("vid_hdr_alsize %d", ubi->vid_hdr_alsize); | 710 | dbg_msg("vid_hdr_alsize %d", ubi->vid_hdr_alsize); |
711 | 711 | ||
712 | if (ubi->vid_hdr_offset == 0) | 712 | if (ubi->vid_hdr_offset == 0) |
713 | /* Default offset */ | 713 | /* Default offset */ |
714 | ubi->vid_hdr_offset = ubi->vid_hdr_aloffset = | 714 | ubi->vid_hdr_offset = ubi->vid_hdr_aloffset = |
715 | ubi->ec_hdr_alsize; | 715 | ubi->ec_hdr_alsize; |
716 | else { | 716 | else { |
717 | ubi->vid_hdr_aloffset = ubi->vid_hdr_offset & | 717 | ubi->vid_hdr_aloffset = ubi->vid_hdr_offset & |
718 | ~(ubi->hdrs_min_io_size - 1); | 718 | ~(ubi->hdrs_min_io_size - 1); |
719 | ubi->vid_hdr_shift = ubi->vid_hdr_offset - | 719 | ubi->vid_hdr_shift = ubi->vid_hdr_offset - |
720 | ubi->vid_hdr_aloffset; | 720 | ubi->vid_hdr_aloffset; |
721 | } | 721 | } |
722 | 722 | ||
723 | /* Similar for the data offset */ | 723 | /* Similar for the data offset */ |
724 | ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE; | 724 | ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE; |
725 | ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size); | 725 | ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size); |
726 | 726 | ||
727 | dbg_msg("vid_hdr_offset %d", ubi->vid_hdr_offset); | 727 | dbg_msg("vid_hdr_offset %d", ubi->vid_hdr_offset); |
728 | dbg_msg("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset); | 728 | dbg_msg("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset); |
729 | dbg_msg("vid_hdr_shift %d", ubi->vid_hdr_shift); | 729 | dbg_msg("vid_hdr_shift %d", ubi->vid_hdr_shift); |
730 | dbg_msg("leb_start %d", ubi->leb_start); | 730 | dbg_msg("leb_start %d", ubi->leb_start); |
731 | 731 | ||
732 | /* The shift must be aligned to 32-bit boundary */ | 732 | /* The shift must be aligned to 32-bit boundary */ |
733 | if (ubi->vid_hdr_shift % 4) { | 733 | if (ubi->vid_hdr_shift % 4) { |
734 | ubi_err("unaligned VID header shift %d", | 734 | ubi_err("unaligned VID header shift %d", |
735 | ubi->vid_hdr_shift); | 735 | ubi->vid_hdr_shift); |
736 | return -EINVAL; | 736 | return -EINVAL; |
737 | } | 737 | } |
738 | 738 | ||
739 | /* Check sanity */ | 739 | /* Check sanity */ |
740 | if (ubi->vid_hdr_offset < UBI_EC_HDR_SIZE || | 740 | if (ubi->vid_hdr_offset < UBI_EC_HDR_SIZE || |
741 | ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE || | 741 | ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE || |
742 | ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE || | 742 | ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE || |
743 | ubi->leb_start & (ubi->min_io_size - 1)) { | 743 | ubi->leb_start & (ubi->min_io_size - 1)) { |
744 | ubi_err("bad VID header (%d) or data offsets (%d)", | 744 | ubi_err("bad VID header (%d) or data offsets (%d)", |
745 | ubi->vid_hdr_offset, ubi->leb_start); | 745 | ubi->vid_hdr_offset, ubi->leb_start); |
746 | return -EINVAL; | 746 | return -EINVAL; |
747 | } | 747 | } |
748 | 748 | ||
749 | /* | 749 | /* |
750 | * Set maximum amount of physical erroneous eraseblocks to be 10%. | 750 | * Set maximum amount of physical erroneous eraseblocks to be 10%. |
751 | * Erroneous PEB are those which have read errors. | 751 | * Erroneous PEB are those which have read errors. |
752 | */ | 752 | */ |
753 | ubi->max_erroneous = ubi->peb_count / 10; | 753 | ubi->max_erroneous = ubi->peb_count / 10; |
754 | if (ubi->max_erroneous < 16) | 754 | if (ubi->max_erroneous < 16) |
755 | ubi->max_erroneous = 16; | 755 | ubi->max_erroneous = 16; |
756 | dbg_msg("max_erroneous %d", ubi->max_erroneous); | 756 | dbg_msg("max_erroneous %d", ubi->max_erroneous); |
757 | 757 | ||
758 | /* | 758 | /* |
759 | * It may happen that EC and VID headers are situated in one minimal | 759 | * It may happen that EC and VID headers are situated in one minimal |
760 | * I/O unit. In this case we can only accept this UBI image in | 760 | * I/O unit. In this case we can only accept this UBI image in |
761 | * read-only mode. | 761 | * read-only mode. |
762 | */ | 762 | */ |
763 | if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) { | 763 | if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) { |
764 | ubi_warn("EC and VID headers are in the same minimal I/O unit, " | 764 | ubi_warn("EC and VID headers are in the same minimal I/O unit, " |
765 | "switch to read-only mode"); | 765 | "switch to read-only mode"); |
766 | ubi->ro_mode = 1; | 766 | ubi->ro_mode = 1; |
767 | } | 767 | } |
768 | 768 | ||
769 | ubi->leb_size = ubi->peb_size - ubi->leb_start; | 769 | ubi->leb_size = ubi->peb_size - ubi->leb_start; |
770 | 770 | ||
771 | if (!(ubi->mtd->flags & MTD_WRITEABLE)) { | 771 | if (!(ubi->mtd->flags & MTD_WRITEABLE)) { |
772 | ubi_msg("MTD device %d is write-protected, attach in " | 772 | ubi_msg("MTD device %d is write-protected, attach in " |
773 | "read-only mode", ubi->mtd->index); | 773 | "read-only mode", ubi->mtd->index); |
774 | ubi->ro_mode = 1; | 774 | ubi->ro_mode = 1; |
775 | } | 775 | } |
776 | 776 | ||
777 | ubi_msg("physical eraseblock size: %d bytes (%d KiB)", | 777 | ubi_msg("physical eraseblock size: %d bytes (%d KiB)", |
778 | ubi->peb_size, ubi->peb_size >> 10); | 778 | ubi->peb_size, ubi->peb_size >> 10); |
779 | ubi_msg("logical eraseblock size: %d bytes", ubi->leb_size); | 779 | ubi_msg("logical eraseblock size: %d bytes", ubi->leb_size); |
780 | ubi_msg("smallest flash I/O unit: %d", ubi->min_io_size); | 780 | ubi_msg("smallest flash I/O unit: %d", ubi->min_io_size); |
781 | if (ubi->hdrs_min_io_size != ubi->min_io_size) | 781 | if (ubi->hdrs_min_io_size != ubi->min_io_size) |
782 | ubi_msg("sub-page size: %d", | 782 | ubi_msg("sub-page size: %d", |
783 | ubi->hdrs_min_io_size); | 783 | ubi->hdrs_min_io_size); |
784 | ubi_msg("VID header offset: %d (aligned %d)", | 784 | ubi_msg("VID header offset: %d (aligned %d)", |
785 | ubi->vid_hdr_offset, ubi->vid_hdr_aloffset); | 785 | ubi->vid_hdr_offset, ubi->vid_hdr_aloffset); |
786 | ubi_msg("data offset: %d", ubi->leb_start); | 786 | ubi_msg("data offset: %d", ubi->leb_start); |
787 | 787 | ||
788 | /* | 788 | /* |
789 | * Note, ideally, we have to initialize @ubi->bad_peb_count here. But | 789 | * Note, ideally, we have to initialize @ubi->bad_peb_count here. But |
790 | * unfortunately, MTD does not provide this information. We should loop | 790 | * unfortunately, MTD does not provide this information. We should loop |
791 | * over all physical eraseblocks and invoke mtd->block_is_bad() for | 791 | * over all physical eraseblocks and invoke mtd->block_is_bad() for |
792 | * each physical eraseblock. So, we leave @ubi->bad_peb_count | 792 | * each physical eraseblock. So, we leave @ubi->bad_peb_count |
793 | * uninitialized so far. | 793 | * uninitialized so far. |
794 | */ | 794 | */ |
795 | 795 | ||
796 | return 0; | 796 | return 0; |
797 | } | 797 | } |
798 | 798 | ||
799 | /** | 799 | /** |
800 | * autoresize - re-size the volume which has the "auto-resize" flag set. | 800 | * autoresize - re-size the volume which has the "auto-resize" flag set. |
801 | * @ubi: UBI device description object | 801 | * @ubi: UBI device description object |
802 | * @vol_id: ID of the volume to re-size | 802 | * @vol_id: ID of the volume to re-size |
803 | * | 803 | * |
804 | * This function re-sizes the volume marked by the %UBI_VTBL_AUTORESIZE_FLG in | 804 | * This function re-sizes the volume marked by the %UBI_VTBL_AUTORESIZE_FLG in |
805 | * the volume table to the largest possible size. See comments in ubi-header.h | 805 | * the volume table to the largest possible size. See comments in ubi-header.h |
806 | * for more description of the flag. Returns zero in case of success and a | 806 | * for more description of the flag. Returns zero in case of success and a |
807 | * negative error code in case of failure. | 807 | * negative error code in case of failure. |
808 | */ | 808 | */ |
809 | static int autoresize(struct ubi_device *ubi, int vol_id) | 809 | static int autoresize(struct ubi_device *ubi, int vol_id) |
810 | { | 810 | { |
811 | struct ubi_volume_desc desc; | 811 | struct ubi_volume_desc desc; |
812 | struct ubi_volume *vol = ubi->volumes[vol_id]; | 812 | struct ubi_volume *vol = ubi->volumes[vol_id]; |
813 | int err, old_reserved_pebs = vol->reserved_pebs; | 813 | int err, old_reserved_pebs = vol->reserved_pebs; |
814 | 814 | ||
815 | /* | 815 | /* |
816 | * Clear the auto-resize flag in the volume in-memory copy of the | 816 | * Clear the auto-resize flag in the volume in-memory copy of the |
817 | * volume table, and 'ubi_resize_volume()' will propagate this change | 817 | * volume table, and 'ubi_resize_volume()' will propagate this change |
818 | * to the flash. | 818 | * to the flash. |
819 | */ | 819 | */ |
820 | ubi->vtbl[vol_id].flags &= ~UBI_VTBL_AUTORESIZE_FLG; | 820 | ubi->vtbl[vol_id].flags &= ~UBI_VTBL_AUTORESIZE_FLG; |
821 | 821 | ||
822 | if (ubi->avail_pebs == 0) { | 822 | if (ubi->avail_pebs == 0) { |
823 | struct ubi_vtbl_record vtbl_rec; | 823 | struct ubi_vtbl_record vtbl_rec; |
824 | 824 | ||
825 | /* | 825 | /* |
826 | * No available PEBs to re-size the volume, clear the flag on | 826 | * No available PEBs to re-size the volume, clear the flag on |
827 | * flash and exit. | 827 | * flash and exit. |
828 | */ | 828 | */ |
829 | memcpy(&vtbl_rec, &ubi->vtbl[vol_id], | 829 | memcpy(&vtbl_rec, &ubi->vtbl[vol_id], |
830 | sizeof(struct ubi_vtbl_record)); | 830 | sizeof(struct ubi_vtbl_record)); |
831 | err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); | 831 | err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); |
832 | if (err) | 832 | if (err) |
833 | ubi_err("cannot clean auto-resize flag for volume %d", | 833 | ubi_err("cannot clean auto-resize flag for volume %d", |
834 | vol_id); | 834 | vol_id); |
835 | } else { | 835 | } else { |
836 | desc.vol = vol; | 836 | desc.vol = vol; |
837 | err = ubi_resize_volume(&desc, | 837 | err = ubi_resize_volume(&desc, |
838 | old_reserved_pebs + ubi->avail_pebs); | 838 | old_reserved_pebs + ubi->avail_pebs); |
839 | if (err) | 839 | if (err) |
840 | ubi_err("cannot auto-resize volume %d", vol_id); | 840 | ubi_err("cannot auto-resize volume %d", vol_id); |
841 | } | 841 | } |
842 | 842 | ||
843 | if (err) | 843 | if (err) |
844 | return err; | 844 | return err; |
845 | 845 | ||
846 | ubi_msg("volume %d (\"%s\") re-sized from %d to %d LEBs", vol_id, | 846 | ubi_msg("volume %d (\"%s\") re-sized from %d to %d LEBs", vol_id, |
847 | vol->name, old_reserved_pebs, vol->reserved_pebs); | 847 | vol->name, old_reserved_pebs, vol->reserved_pebs); |
848 | return 0; | 848 | return 0; |
849 | } | 849 | } |
850 | 850 | ||
851 | /** | 851 | /** |
852 | * ubi_attach_mtd_dev - attach an MTD device. | 852 | * ubi_attach_mtd_dev - attach an MTD device. |
853 | * @mtd: MTD device description object | 853 | * @mtd: MTD device description object |
854 | * @ubi_num: number to assign to the new UBI device | 854 | * @ubi_num: number to assign to the new UBI device |
855 | * @vid_hdr_offset: VID header offset | 855 | * @vid_hdr_offset: VID header offset |
856 | * | 856 | * |
857 | * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number | 857 | * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number |
858 | * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in | 858 | * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in |
859 | * which case this function finds a vacant device number and assigns it | 859 | * which case this function finds a vacant device number and assigns it |
860 | * automatically. Returns the new UBI device number in case of success and a | 860 | * automatically. Returns the new UBI device number in case of success and a |
861 | * negative error code in case of failure. | 861 | * negative error code in case of failure. |
862 | * | 862 | * |
863 | * Note, the invocations of this function has to be serialized by the | 863 | * Note, the invocations of this function has to be serialized by the |
864 | * @ubi_devices_mutex. | 864 | * @ubi_devices_mutex. |
865 | */ | 865 | */ |
866 | int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset) | 866 | int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset) |
867 | { | 867 | { |
868 | struct ubi_device *ubi; | 868 | struct ubi_device *ubi; |
869 | int i, err, ref = 0; | 869 | int i, err, ref = 0; |
870 | 870 | ||
871 | /* | 871 | /* |
872 | * Check if we already have the same MTD device attached. | 872 | * Check if we already have the same MTD device attached. |
873 | * | 873 | * |
874 | * Note, this function assumes that UBI devices creations and deletions | 874 | * Note, this function assumes that UBI devices creations and deletions |
875 | * are serialized, so it does not take the &ubi_devices_lock. | 875 | * are serialized, so it does not take the &ubi_devices_lock. |
876 | */ | 876 | */ |
877 | for (i = 0; i < UBI_MAX_DEVICES; i++) { | 877 | for (i = 0; i < UBI_MAX_DEVICES; i++) { |
878 | ubi = ubi_devices[i]; | 878 | ubi = ubi_devices[i]; |
879 | if (ubi && mtd->index == ubi->mtd->index) { | 879 | if (ubi && mtd->index == ubi->mtd->index) { |
880 | ubi_err("mtd%d is already attached to ubi%d", | 880 | ubi_err("mtd%d is already attached to ubi%d", |
881 | mtd->index, i); | 881 | mtd->index, i); |
882 | return -EEXIST; | 882 | return -EEXIST; |
883 | } | 883 | } |
884 | } | 884 | } |
885 | 885 | ||
886 | /* | 886 | /* |
887 | * Make sure this MTD device is not emulated on top of an UBI volume | 887 | * Make sure this MTD device is not emulated on top of an UBI volume |
888 | * already. Well, generally this recursion works fine, but there are | 888 | * already. Well, generally this recursion works fine, but there are |
889 | * different problems like the UBI module takes a reference to itself | 889 | * different problems like the UBI module takes a reference to itself |
890 | * by attaching (and thus, opening) the emulated MTD device. This | 890 | * by attaching (and thus, opening) the emulated MTD device. This |
891 | * results in inability to unload the module. And in general it makes | 891 | * results in inability to unload the module. And in general it makes |
892 | * no sense to attach emulated MTD devices, so we prohibit this. | 892 | * no sense to attach emulated MTD devices, so we prohibit this. |
893 | */ | 893 | */ |
894 | if (mtd->type == MTD_UBIVOLUME) { | 894 | if (mtd->type == MTD_UBIVOLUME) { |
895 | ubi_err("refuse attaching mtd%d - it is already emulated on " | 895 | ubi_err("refuse attaching mtd%d - it is already emulated on " |
896 | "top of UBI", mtd->index); | 896 | "top of UBI", mtd->index); |
897 | return -EINVAL; | 897 | return -EINVAL; |
898 | } | 898 | } |
899 | 899 | ||
900 | if (ubi_num == UBI_DEV_NUM_AUTO) { | 900 | if (ubi_num == UBI_DEV_NUM_AUTO) { |
901 | /* Search for an empty slot in the @ubi_devices array */ | 901 | /* Search for an empty slot in the @ubi_devices array */ |
902 | for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++) | 902 | for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++) |
903 | if (!ubi_devices[ubi_num]) | 903 | if (!ubi_devices[ubi_num]) |
904 | break; | 904 | break; |
905 | if (ubi_num == UBI_MAX_DEVICES) { | 905 | if (ubi_num == UBI_MAX_DEVICES) { |
906 | ubi_err("only %d UBI devices may be created", | 906 | ubi_err("only %d UBI devices may be created", |
907 | UBI_MAX_DEVICES); | 907 | UBI_MAX_DEVICES); |
908 | return -ENFILE; | 908 | return -ENFILE; |
909 | } | 909 | } |
910 | } else { | 910 | } else { |
911 | if (ubi_num >= UBI_MAX_DEVICES) | 911 | if (ubi_num >= UBI_MAX_DEVICES) |
912 | return -EINVAL; | 912 | return -EINVAL; |
913 | 913 | ||
914 | /* Make sure ubi_num is not busy */ | 914 | /* Make sure ubi_num is not busy */ |
915 | if (ubi_devices[ubi_num]) { | 915 | if (ubi_devices[ubi_num]) { |
916 | ubi_err("ubi%d already exists", ubi_num); | 916 | ubi_err("ubi%d already exists", ubi_num); |
917 | return -EEXIST; | 917 | return -EEXIST; |
918 | } | 918 | } |
919 | } | 919 | } |
920 | 920 | ||
921 | ubi = kzalloc(sizeof(struct ubi_device), GFP_KERNEL); | 921 | ubi = kzalloc(sizeof(struct ubi_device), GFP_KERNEL); |
922 | if (!ubi) | 922 | if (!ubi) |
923 | return -ENOMEM; | 923 | return -ENOMEM; |
924 | 924 | ||
925 | ubi->mtd = mtd; | 925 | ubi->mtd = mtd; |
926 | ubi->ubi_num = ubi_num; | 926 | ubi->ubi_num = ubi_num; |
927 | ubi->vid_hdr_offset = vid_hdr_offset; | 927 | ubi->vid_hdr_offset = vid_hdr_offset; |
928 | ubi->autoresize_vol_id = -1; | 928 | ubi->autoresize_vol_id = -1; |
929 | 929 | ||
930 | mutex_init(&ubi->buf_mutex); | 930 | mutex_init(&ubi->buf_mutex); |
931 | mutex_init(&ubi->ckvol_mutex); | 931 | mutex_init(&ubi->ckvol_mutex); |
932 | mutex_init(&ubi->device_mutex); | 932 | mutex_init(&ubi->device_mutex); |
933 | spin_lock_init(&ubi->volumes_lock); | 933 | spin_lock_init(&ubi->volumes_lock); |
934 | 934 | ||
935 | ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num); | 935 | ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num); |
936 | dbg_msg("sizeof(struct ubi_ainf_peb) %zu", sizeof(struct ubi_ainf_peb)); | 936 | dbg_msg("sizeof(struct ubi_ainf_peb) %zu", sizeof(struct ubi_ainf_peb)); |
937 | dbg_msg("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry)); | 937 | dbg_msg("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry)); |
938 | 938 | ||
939 | err = io_init(ubi); | 939 | err = io_init(ubi); |
940 | if (err) | 940 | if (err) |
941 | goto out_free; | 941 | goto out_free; |
942 | 942 | ||
943 | err = -ENOMEM; | 943 | err = -ENOMEM; |
944 | ubi->peb_buf = vmalloc(ubi->peb_size); | 944 | ubi->peb_buf = vmalloc(ubi->peb_size); |
945 | if (!ubi->peb_buf) | 945 | if (!ubi->peb_buf) |
946 | goto out_free; | 946 | goto out_free; |
947 | 947 | ||
948 | err = ubi_debugging_init_dev(ubi); | 948 | err = ubi_debugging_init_dev(ubi); |
949 | if (err) | 949 | if (err) |
950 | goto out_free; | 950 | goto out_free; |
951 | 951 | ||
952 | err = attach_by_scanning(ubi); | 952 | err = attach_by_scanning(ubi); |
953 | if (err) { | 953 | if (err) { |
954 | ubi_err("failed to attach by scanning, error %d", err); | 954 | ubi_err("failed to attach by scanning, error %d", err); |
955 | goto out_debugging; | 955 | goto out_debugging; |
956 | } | 956 | } |
957 | 957 | ||
958 | if (ubi->autoresize_vol_id != -1) { | 958 | if (ubi->autoresize_vol_id != -1) { |
959 | err = autoresize(ubi, ubi->autoresize_vol_id); | 959 | err = autoresize(ubi, ubi->autoresize_vol_id); |
960 | if (err) | 960 | if (err) |
961 | goto out_detach; | 961 | goto out_detach; |
962 | } | 962 | } |
963 | 963 | ||
964 | err = uif_init(ubi, &ref); | 964 | err = uif_init(ubi, &ref); |
965 | if (err) | 965 | if (err) |
966 | goto out_detach; | 966 | goto out_detach; |
967 | 967 | ||
968 | err = ubi_debugfs_init_dev(ubi); | 968 | err = ubi_debugfs_init_dev(ubi); |
969 | if (err) | 969 | if (err) |
970 | goto out_uif; | 970 | goto out_uif; |
971 | 971 | ||
972 | ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name); | 972 | ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name); |
973 | if (IS_ERR(ubi->bgt_thread)) { | 973 | if (IS_ERR(ubi->bgt_thread)) { |
974 | err = PTR_ERR(ubi->bgt_thread); | 974 | err = PTR_ERR(ubi->bgt_thread); |
975 | ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name, | 975 | ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name, |
976 | err); | 976 | err); |
977 | goto out_debugfs; | 977 | goto out_debugfs; |
978 | } | 978 | } |
979 | 979 | ||
980 | ubi_msg("attached mtd%d to ubi%d", mtd->index, ubi_num); | 980 | ubi_msg("attached mtd%d to ubi%d", mtd->index, ubi_num); |
981 | ubi_msg("MTD device name: \"%s\"", mtd->name); | 981 | ubi_msg("MTD device name: \"%s\"", mtd->name); |
982 | ubi_msg("MTD device size: %llu MiB", ubi->flash_size >> 20); | 982 | ubi_msg("MTD device size: %llu MiB", ubi->flash_size >> 20); |
983 | ubi_msg("number of good PEBs: %d", ubi->good_peb_count); | 983 | ubi_msg("number of good PEBs: %d", ubi->good_peb_count); |
984 | ubi_msg("number of bad PEBs: %d", ubi->bad_peb_count); | 984 | ubi_msg("number of bad PEBs: %d", ubi->bad_peb_count); |
985 | ubi_msg("number of corrupted PEBs: %d", ubi->corr_peb_count); | 985 | ubi_msg("number of corrupted PEBs: %d", ubi->corr_peb_count); |
986 | ubi_msg("max. allowed volumes: %d", ubi->vtbl_slots); | 986 | ubi_msg("max. allowed volumes: %d", ubi->vtbl_slots); |
987 | ubi_msg("wear-leveling threshold: %d", CONFIG_MTD_UBI_WL_THRESHOLD); | 987 | ubi_msg("wear-leveling threshold: %d", CONFIG_MTD_UBI_WL_THRESHOLD); |
988 | ubi_msg("number of internal volumes: %d", UBI_INT_VOL_COUNT); | 988 | ubi_msg("number of internal volumes: %d", UBI_INT_VOL_COUNT); |
989 | ubi_msg("number of user volumes: %d", | 989 | ubi_msg("number of user volumes: %d", |
990 | ubi->vol_count - UBI_INT_VOL_COUNT); | 990 | ubi->vol_count - UBI_INT_VOL_COUNT); |
991 | ubi_msg("available PEBs: %d", ubi->avail_pebs); | 991 | ubi_msg("available PEBs: %d", ubi->avail_pebs); |
992 | ubi_msg("total number of reserved PEBs: %d", ubi->rsvd_pebs); | 992 | ubi_msg("total number of reserved PEBs: %d", ubi->rsvd_pebs); |
993 | ubi_msg("number of PEBs reserved for bad PEB handling: %d", | 993 | ubi_msg("number of PEBs reserved for bad PEB handling: %d", |
994 | ubi->beb_rsvd_pebs); | 994 | ubi->beb_rsvd_pebs); |
995 | ubi_msg("max/mean erase counter: %d/%d", ubi->max_ec, ubi->mean_ec); | 995 | ubi_msg("max/mean erase counter: %d/%d", ubi->max_ec, ubi->mean_ec); |
996 | ubi_msg("image sequence number: %d", ubi->image_seq); | 996 | ubi_msg("image sequence number: %d", ubi->image_seq); |
997 | 997 | ||
998 | /* | 998 | /* |
999 | * The below lock makes sure we do not race with 'ubi_thread()' which | 999 | * The below lock makes sure we do not race with 'ubi_thread()' which |
1000 | * checks @ubi->thread_enabled. Otherwise we may fail to wake it up. | 1000 | * checks @ubi->thread_enabled. Otherwise we may fail to wake it up. |
1001 | */ | 1001 | */ |
1002 | spin_lock(&ubi->wl_lock); | 1002 | spin_lock(&ubi->wl_lock); |
1003 | ubi->thread_enabled = 1; | 1003 | ubi->thread_enabled = 1; |
1004 | wake_up_process(ubi->bgt_thread); | 1004 | wake_up_process(ubi->bgt_thread); |
1005 | spin_unlock(&ubi->wl_lock); | 1005 | spin_unlock(&ubi->wl_lock); |
1006 | 1006 | ||
1007 | ubi_devices[ubi_num] = ubi; | 1007 | ubi_devices[ubi_num] = ubi; |
1008 | ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL); | 1008 | ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL); |
1009 | return ubi_num; | 1009 | return ubi_num; |
1010 | 1010 | ||
1011 | out_debugfs: | 1011 | out_debugfs: |
1012 | ubi_debugfs_exit_dev(ubi); | 1012 | ubi_debugfs_exit_dev(ubi); |
1013 | out_uif: | 1013 | out_uif: |
1014 | get_device(&ubi->dev); | 1014 | get_device(&ubi->dev); |
1015 | ubi_assert(ref); | 1015 | ubi_assert(ref); |
1016 | uif_close(ubi); | 1016 | uif_close(ubi); |
1017 | out_detach: | 1017 | out_detach: |
1018 | ubi_wl_close(ubi); | 1018 | ubi_wl_close(ubi); |
1019 | free_internal_volumes(ubi); | 1019 | free_internal_volumes(ubi); |
1020 | vfree(ubi->vtbl); | 1020 | vfree(ubi->vtbl); |
1021 | out_debugging: | 1021 | out_debugging: |
1022 | ubi_debugging_exit_dev(ubi); | 1022 | ubi_debugging_exit_dev(ubi); |
1023 | out_free: | 1023 | out_free: |
1024 | vfree(ubi->peb_buf); | 1024 | vfree(ubi->peb_buf); |
1025 | if (ref) | 1025 | if (ref) |
1026 | put_device(&ubi->dev); | 1026 | put_device(&ubi->dev); |
1027 | else | 1027 | else |
1028 | kfree(ubi); | 1028 | kfree(ubi); |
1029 | return err; | 1029 | return err; |
1030 | } | 1030 | } |
1031 | 1031 | ||
1032 | /** | 1032 | /** |
1033 | * ubi_detach_mtd_dev - detach an MTD device. | 1033 | * ubi_detach_mtd_dev - detach an MTD device. |
1034 | * @ubi_num: UBI device number to detach from | 1034 | * @ubi_num: UBI device number to detach from |
1035 | * @anyway: detach MTD even if device reference count is not zero | 1035 | * @anyway: detach MTD even if device reference count is not zero |
1036 | * | 1036 | * |
1037 | * This function destroys an UBI device number @ubi_num and detaches the | 1037 | * This function destroys an UBI device number @ubi_num and detaches the |
1038 | * underlying MTD device. Returns zero in case of success and %-EBUSY if the | 1038 | * underlying MTD device. Returns zero in case of success and %-EBUSY if the |
1039 | * UBI device is busy and cannot be destroyed, and %-EINVAL if it does not | 1039 | * UBI device is busy and cannot be destroyed, and %-EINVAL if it does not |
1040 | * exist. | 1040 | * exist. |
1041 | * | 1041 | * |
1042 | * Note, the invocations of this function has to be serialized by the | 1042 | * Note, the invocations of this function has to be serialized by the |
1043 | * @ubi_devices_mutex. | 1043 | * @ubi_devices_mutex. |
1044 | */ | 1044 | */ |
1045 | int ubi_detach_mtd_dev(int ubi_num, int anyway) | 1045 | int ubi_detach_mtd_dev(int ubi_num, int anyway) |
1046 | { | 1046 | { |
1047 | struct ubi_device *ubi; | 1047 | struct ubi_device *ubi; |
1048 | 1048 | ||
1049 | if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES) | 1049 | if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES) |
1050 | return -EINVAL; | 1050 | return -EINVAL; |
1051 | 1051 | ||
1052 | ubi = ubi_get_device(ubi_num); | 1052 | ubi = ubi_get_device(ubi_num); |
1053 | if (!ubi) | 1053 | if (!ubi) |
1054 | return -EINVAL; | 1054 | return -EINVAL; |
1055 | 1055 | ||
1056 | spin_lock(&ubi_devices_lock); | 1056 | spin_lock(&ubi_devices_lock); |
1057 | put_device(&ubi->dev); | 1057 | put_device(&ubi->dev); |
1058 | ubi->ref_count -= 1; | 1058 | ubi->ref_count -= 1; |
1059 | if (ubi->ref_count) { | 1059 | if (ubi->ref_count) { |
1060 | if (!anyway) { | 1060 | if (!anyway) { |
1061 | spin_unlock(&ubi_devices_lock); | 1061 | spin_unlock(&ubi_devices_lock); |
1062 | return -EBUSY; | 1062 | return -EBUSY; |
1063 | } | 1063 | } |
1064 | /* This may only happen if there is a bug */ | 1064 | /* This may only happen if there is a bug */ |
1065 | ubi_err("%s reference count %d, destroy anyway", | 1065 | ubi_err("%s reference count %d, destroy anyway", |
1066 | ubi->ubi_name, ubi->ref_count); | 1066 | ubi->ubi_name, ubi->ref_count); |
1067 | } | 1067 | } |
1068 | ubi_devices[ubi_num] = NULL; | 1068 | ubi_devices[ubi_num] = NULL; |
1069 | spin_unlock(&ubi_devices_lock); | 1069 | spin_unlock(&ubi_devices_lock); |
1070 | 1070 | ||
1071 | ubi_assert(ubi_num == ubi->ubi_num); | 1071 | ubi_assert(ubi_num == ubi->ubi_num); |
1072 | ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL); | 1072 | ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL); |
1073 | dbg_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num); | 1073 | dbg_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num); |
1074 | 1074 | ||
1075 | /* | 1075 | /* |
1076 | * Before freeing anything, we have to stop the background thread to | 1076 | * Before freeing anything, we have to stop the background thread to |
1077 | * prevent it from doing anything on this device while we are freeing. | 1077 | * prevent it from doing anything on this device while we are freeing. |
1078 | */ | 1078 | */ |
1079 | if (ubi->bgt_thread) | 1079 | if (ubi->bgt_thread) |
1080 | kthread_stop(ubi->bgt_thread); | 1080 | kthread_stop(ubi->bgt_thread); |
1081 | 1081 | ||
1082 | /* | 1082 | /* |
1083 | * Get a reference to the device in order to prevent 'dev_release()' | 1083 | * Get a reference to the device in order to prevent 'dev_release()' |
1084 | * from freeing the @ubi object. | 1084 | * from freeing the @ubi object. |
1085 | */ | 1085 | */ |
1086 | get_device(&ubi->dev); | 1086 | get_device(&ubi->dev); |
1087 | 1087 | ||
1088 | ubi_debugfs_exit_dev(ubi); | 1088 | ubi_debugfs_exit_dev(ubi); |
1089 | uif_close(ubi); | 1089 | uif_close(ubi); |
1090 | ubi_wl_close(ubi); | 1090 | ubi_wl_close(ubi); |
1091 | free_internal_volumes(ubi); | 1091 | free_internal_volumes(ubi); |
1092 | vfree(ubi->vtbl); | 1092 | vfree(ubi->vtbl); |
1093 | put_mtd_device(ubi->mtd); | 1093 | put_mtd_device(ubi->mtd); |
1094 | ubi_debugging_exit_dev(ubi); | 1094 | ubi_debugging_exit_dev(ubi); |
1095 | vfree(ubi->peb_buf); | 1095 | vfree(ubi->peb_buf); |
1096 | ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num); | 1096 | ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num); |
1097 | put_device(&ubi->dev); | 1097 | put_device(&ubi->dev); |
1098 | return 0; | 1098 | return 0; |
1099 | } | 1099 | } |
1100 | 1100 | ||
1101 | /** | 1101 | /** |
1102 | * open_mtd_by_chdev - open an MTD device by its character device node path. | 1102 | * open_mtd_by_chdev - open an MTD device by its character device node path. |
1103 | * @mtd_dev: MTD character device node path | 1103 | * @mtd_dev: MTD character device node path |
1104 | * | 1104 | * |
1105 | * This helper function opens an MTD device by its character node device path. | 1105 | * This helper function opens an MTD device by its character node device path. |
1106 | * Returns MTD device description object in case of success and a negative | 1106 | * Returns MTD device description object in case of success and a negative |
1107 | * error code in case of failure. | 1107 | * error code in case of failure. |
1108 | */ | 1108 | */ |
1109 | static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev) | 1109 | static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev) |
1110 | { | 1110 | { |
1111 | int err, major, minor, mode; | 1111 | int err, major, minor, mode; |
1112 | struct path path; | 1112 | struct path path; |
1113 | 1113 | ||
1114 | /* Probably this is an MTD character device node path */ | 1114 | /* Probably this is an MTD character device node path */ |
1115 | err = kern_path(mtd_dev, LOOKUP_FOLLOW, &path); | 1115 | err = kern_path(mtd_dev, LOOKUP_FOLLOW, &path); |
1116 | if (err) | 1116 | if (err) |
1117 | return ERR_PTR(err); | 1117 | return ERR_PTR(err); |
1118 | 1118 | ||
1119 | /* MTD device number is defined by the major / minor numbers */ | 1119 | /* MTD device number is defined by the major / minor numbers */ |
1120 | major = imajor(path.dentry->d_inode); | 1120 | major = imajor(path.dentry->d_inode); |
1121 | minor = iminor(path.dentry->d_inode); | 1121 | minor = iminor(path.dentry->d_inode); |
1122 | mode = path.dentry->d_inode->i_mode; | 1122 | mode = path.dentry->d_inode->i_mode; |
1123 | path_put(&path); | 1123 | path_put(&path); |
1124 | if (major != MTD_CHAR_MAJOR || !S_ISCHR(mode)) | 1124 | if (major != MTD_CHAR_MAJOR || !S_ISCHR(mode)) |
1125 | return ERR_PTR(-EINVAL); | 1125 | return ERR_PTR(-EINVAL); |
1126 | 1126 | ||
1127 | if (minor & 1) | 1127 | if (minor & 1) |
1128 | /* | 1128 | /* |
1129 | * Just do not think the "/dev/mtdrX" devices support is need, | 1129 | * Just do not think the "/dev/mtdrX" devices support is need, |
1130 | * so do not support them to avoid doing extra work. | 1130 | * so do not support them to avoid doing extra work. |
1131 | */ | 1131 | */ |
1132 | return ERR_PTR(-EINVAL); | 1132 | return ERR_PTR(-EINVAL); |
1133 | 1133 | ||
1134 | return get_mtd_device(NULL, minor / 2); | 1134 | return get_mtd_device(NULL, minor / 2); |
1135 | } | 1135 | } |
1136 | 1136 | ||
1137 | /** | 1137 | /** |
1138 | * open_mtd_device - open MTD device by name, character device path, or number. | 1138 | * open_mtd_device - open MTD device by name, character device path, or number. |
1139 | * @mtd_dev: name, character device node path, or MTD device device number | 1139 | * @mtd_dev: name, character device node path, or MTD device device number |
1140 | * | 1140 | * |
1141 | * This function tries to open and MTD device described by @mtd_dev string, | 1141 | * This function tries to open and MTD device described by @mtd_dev string, |
1142 | * which is first treated as ASCII MTD device number, and if it is not true, it | 1142 | * which is first treated as ASCII MTD device number, and if it is not true, it |
1143 | * is treated as MTD device name, and if that is also not true, it is treated | 1143 | * is treated as MTD device name, and if that is also not true, it is treated |
1144 | * as MTD character device node path. Returns MTD device description object in | 1144 | * as MTD character device node path. Returns MTD device description object in |
1145 | * case of success and a negative error code in case of failure. | 1145 | * case of success and a negative error code in case of failure. |
1146 | */ | 1146 | */ |
1147 | static struct mtd_info * __init open_mtd_device(const char *mtd_dev) | 1147 | static struct mtd_info * __init open_mtd_device(const char *mtd_dev) |
1148 | { | 1148 | { |
1149 | struct mtd_info *mtd; | 1149 | struct mtd_info *mtd; |
1150 | int mtd_num; | 1150 | int mtd_num; |
1151 | char *endp; | 1151 | char *endp; |
1152 | 1152 | ||
1153 | mtd_num = simple_strtoul(mtd_dev, &endp, 0); | 1153 | mtd_num = simple_strtoul(mtd_dev, &endp, 0); |
1154 | if (*endp != '\0' || mtd_dev == endp) { | 1154 | if (*endp != '\0' || mtd_dev == endp) { |
1155 | /* | 1155 | /* |
1156 | * This does not look like an ASCII integer, probably this is | 1156 | * This does not look like an ASCII integer, probably this is |
1157 | * MTD device name. | 1157 | * MTD device name. |
1158 | */ | 1158 | */ |
1159 | mtd = get_mtd_device_nm(mtd_dev); | 1159 | mtd = get_mtd_device_nm(mtd_dev); |
1160 | if (IS_ERR(mtd) && PTR_ERR(mtd) == -ENODEV) | 1160 | if (IS_ERR(mtd) && PTR_ERR(mtd) == -ENODEV) |
1161 | /* Probably this is an MTD character device node path */ | 1161 | /* Probably this is an MTD character device node path */ |
1162 | mtd = open_mtd_by_chdev(mtd_dev); | 1162 | mtd = open_mtd_by_chdev(mtd_dev); |
1163 | } else | 1163 | } else |
1164 | mtd = get_mtd_device(NULL, mtd_num); | 1164 | mtd = get_mtd_device(NULL, mtd_num); |
1165 | 1165 | ||
1166 | return mtd; | 1166 | return mtd; |
1167 | } | 1167 | } |
1168 | 1168 | ||
1169 | static int __init ubi_init(void) | 1169 | static int __init ubi_init(void) |
1170 | { | 1170 | { |
1171 | int err, i, k; | 1171 | int err, i, k; |
1172 | 1172 | ||
1173 | /* Ensure that EC and VID headers have correct size */ | 1173 | /* Ensure that EC and VID headers have correct size */ |
1174 | BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64); | 1174 | BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64); |
1175 | BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64); | 1175 | BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64); |
1176 | 1176 | ||
1177 | if (mtd_devs > UBI_MAX_DEVICES) { | 1177 | if (mtd_devs > UBI_MAX_DEVICES) { |
1178 | ubi_err("too many MTD devices, maximum is %d", UBI_MAX_DEVICES); | 1178 | ubi_err("too many MTD devices, maximum is %d", UBI_MAX_DEVICES); |
1179 | return -EINVAL; | 1179 | return -EINVAL; |
1180 | } | 1180 | } |
1181 | 1181 | ||
1182 | /* Create base sysfs directory and sysfs files */ | 1182 | /* Create base sysfs directory and sysfs files */ |
1183 | ubi_class = class_create(THIS_MODULE, UBI_NAME_STR); | 1183 | ubi_class = class_create(THIS_MODULE, UBI_NAME_STR); |
1184 | if (IS_ERR(ubi_class)) { | 1184 | if (IS_ERR(ubi_class)) { |
1185 | err = PTR_ERR(ubi_class); | 1185 | err = PTR_ERR(ubi_class); |
1186 | ubi_err("cannot create UBI class"); | 1186 | ubi_err("cannot create UBI class"); |
1187 | goto out; | 1187 | goto out; |
1188 | } | 1188 | } |
1189 | 1189 | ||
1190 | err = class_create_file(ubi_class, &ubi_version); | 1190 | err = class_create_file(ubi_class, &ubi_version); |
1191 | if (err) { | 1191 | if (err) { |
1192 | ubi_err("cannot create sysfs file"); | 1192 | ubi_err("cannot create sysfs file"); |
1193 | goto out_class; | 1193 | goto out_class; |
1194 | } | 1194 | } |
1195 | 1195 | ||
1196 | err = misc_register(&ubi_ctrl_cdev); | 1196 | err = misc_register(&ubi_ctrl_cdev); |
1197 | if (err) { | 1197 | if (err) { |
1198 | ubi_err("cannot register device"); | 1198 | ubi_err("cannot register device"); |
1199 | goto out_version; | 1199 | goto out_version; |
1200 | } | 1200 | } |
1201 | 1201 | ||
1202 | ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab", | 1202 | ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab", |
1203 | sizeof(struct ubi_wl_entry), | 1203 | sizeof(struct ubi_wl_entry), |
1204 | 0, 0, NULL); | 1204 | 0, 0, NULL); |
1205 | if (!ubi_wl_entry_slab) | 1205 | if (!ubi_wl_entry_slab) |
1206 | goto out_dev_unreg; | 1206 | goto out_dev_unreg; |
1207 | 1207 | ||
1208 | err = ubi_debugfs_init(); | 1208 | err = ubi_debugfs_init(); |
1209 | if (err) | 1209 | if (err) |
1210 | goto out_slab; | 1210 | goto out_slab; |
1211 | 1211 | ||
1212 | 1212 | ||
1213 | /* Attach MTD devices */ | 1213 | /* Attach MTD devices */ |
1214 | for (i = 0; i < mtd_devs; i++) { | 1214 | for (i = 0; i < mtd_devs; i++) { |
1215 | struct mtd_dev_param *p = &mtd_dev_param[i]; | 1215 | struct mtd_dev_param *p = &mtd_dev_param[i]; |
1216 | struct mtd_info *mtd; | 1216 | struct mtd_info *mtd; |
1217 | 1217 | ||
1218 | cond_resched(); | 1218 | cond_resched(); |
1219 | 1219 | ||
1220 | mtd = open_mtd_device(p->name); | 1220 | mtd = open_mtd_device(p->name); |
1221 | if (IS_ERR(mtd)) { | 1221 | if (IS_ERR(mtd)) { |
1222 | err = PTR_ERR(mtd); | 1222 | err = PTR_ERR(mtd); |
1223 | goto out_detach; | 1223 | goto out_detach; |
1224 | } | 1224 | } |
1225 | 1225 | ||
1226 | mutex_lock(&ubi_devices_mutex); | 1226 | mutex_lock(&ubi_devices_mutex); |
1227 | err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO, | 1227 | err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO, |
1228 | p->vid_hdr_offs); | 1228 | p->vid_hdr_offs); |
1229 | mutex_unlock(&ubi_devices_mutex); | 1229 | mutex_unlock(&ubi_devices_mutex); |
1230 | if (err < 0) { | 1230 | if (err < 0) { |
1231 | ubi_err("cannot attach mtd%d", mtd->index); | 1231 | ubi_err("cannot attach mtd%d", mtd->index); |
1232 | put_mtd_device(mtd); | 1232 | put_mtd_device(mtd); |
1233 | 1233 | ||
1234 | /* | 1234 | /* |
1235 | * Originally UBI stopped initializing on any error. | 1235 | * Originally UBI stopped initializing on any error. |
1236 | * However, later on it was found out that this | 1236 | * However, later on it was found out that this |
1237 | * behavior is not very good when UBI is compiled into | 1237 | * behavior is not very good when UBI is compiled into |
1238 | * the kernel and the MTD devices to attach are passed | 1238 | * the kernel and the MTD devices to attach are passed |
1239 | * through the command line. Indeed, UBI failure | 1239 | * through the command line. Indeed, UBI failure |
1240 | * stopped whole boot sequence. | 1240 | * stopped whole boot sequence. |
1241 | * | 1241 | * |
1242 | * To fix this, we changed the behavior for the | 1242 | * To fix this, we changed the behavior for the |
1243 | * non-module case, but preserved the old behavior for | 1243 | * non-module case, but preserved the old behavior for |
1244 | * the module case, just for compatibility. This is a | 1244 | * the module case, just for compatibility. This is a |
1245 | * little inconsistent, though. | 1245 | * little inconsistent, though. |
1246 | */ | 1246 | */ |
1247 | if (ubi_is_module()) | 1247 | if (ubi_is_module()) |
1248 | goto out_detach; | 1248 | goto out_detach; |
1249 | } | 1249 | } |
1250 | } | 1250 | } |
1251 | 1251 | ||
1252 | return 0; | 1252 | return 0; |
1253 | 1253 | ||
1254 | out_detach: | 1254 | out_detach: |
1255 | for (k = 0; k < i; k++) | 1255 | for (k = 0; k < i; k++) |
1256 | if (ubi_devices[k]) { | 1256 | if (ubi_devices[k]) { |
1257 | mutex_lock(&ubi_devices_mutex); | 1257 | mutex_lock(&ubi_devices_mutex); |
1258 | ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1); | 1258 | ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1); |
1259 | mutex_unlock(&ubi_devices_mutex); | 1259 | mutex_unlock(&ubi_devices_mutex); |
1260 | } | 1260 | } |
1261 | ubi_debugfs_exit(); | 1261 | ubi_debugfs_exit(); |
1262 | out_slab: | 1262 | out_slab: |
1263 | kmem_cache_destroy(ubi_wl_entry_slab); | 1263 | kmem_cache_destroy(ubi_wl_entry_slab); |
1264 | out_dev_unreg: | 1264 | out_dev_unreg: |
1265 | misc_deregister(&ubi_ctrl_cdev); | 1265 | misc_deregister(&ubi_ctrl_cdev); |
1266 | out_version: | 1266 | out_version: |
1267 | class_remove_file(ubi_class, &ubi_version); | 1267 | class_remove_file(ubi_class, &ubi_version); |
1268 | out_class: | 1268 | out_class: |
1269 | class_destroy(ubi_class); | 1269 | class_destroy(ubi_class); |
1270 | out: | 1270 | out: |
1271 | ubi_err("UBI error: cannot initialize UBI, error %d", err); | 1271 | ubi_err("UBI error: cannot initialize UBI, error %d", err); |
1272 | return err; | 1272 | return err; |
1273 | } | 1273 | } |
1274 | module_init(ubi_init); | 1274 | module_init(ubi_init); |
1275 | 1275 | ||
1276 | static void __exit ubi_exit(void) | 1276 | static void __exit ubi_exit(void) |
1277 | { | 1277 | { |
1278 | int i; | 1278 | int i; |
1279 | 1279 | ||
1280 | for (i = 0; i < UBI_MAX_DEVICES; i++) | 1280 | for (i = 0; i < UBI_MAX_DEVICES; i++) |
1281 | if (ubi_devices[i]) { | 1281 | if (ubi_devices[i]) { |
1282 | mutex_lock(&ubi_devices_mutex); | 1282 | mutex_lock(&ubi_devices_mutex); |
1283 | ubi_detach_mtd_dev(ubi_devices[i]->ubi_num, 1); | 1283 | ubi_detach_mtd_dev(ubi_devices[i]->ubi_num, 1); |
1284 | mutex_unlock(&ubi_devices_mutex); | 1284 | mutex_unlock(&ubi_devices_mutex); |
1285 | } | 1285 | } |
1286 | ubi_debugfs_exit(); | 1286 | ubi_debugfs_exit(); |
1287 | kmem_cache_destroy(ubi_wl_entry_slab); | 1287 | kmem_cache_destroy(ubi_wl_entry_slab); |
1288 | misc_deregister(&ubi_ctrl_cdev); | 1288 | misc_deregister(&ubi_ctrl_cdev); |
1289 | class_remove_file(ubi_class, &ubi_version); | 1289 | class_remove_file(ubi_class, &ubi_version); |
1290 | class_destroy(ubi_class); | 1290 | class_destroy(ubi_class); |
1291 | } | 1291 | } |
1292 | module_exit(ubi_exit); | 1292 | module_exit(ubi_exit); |
1293 | 1293 | ||
1294 | /** | 1294 | /** |
1295 | * bytes_str_to_int - convert a number of bytes string into an integer. | 1295 | * bytes_str_to_int - convert a number of bytes string into an integer. |
1296 | * @str: the string to convert | 1296 | * @str: the string to convert |
1297 | * | 1297 | * |
1298 | * This function returns positive resulting integer in case of success and a | 1298 | * This function returns positive resulting integer in case of success and a |
1299 | * negative error code in case of failure. | 1299 | * negative error code in case of failure. |
1300 | */ | 1300 | */ |
1301 | static int __init bytes_str_to_int(const char *str) | 1301 | static int __init bytes_str_to_int(const char *str) |
1302 | { | 1302 | { |
1303 | char *endp; | 1303 | char *endp; |
1304 | unsigned long result; | 1304 | unsigned long result; |
1305 | 1305 | ||
1306 | result = simple_strtoul(str, &endp, 0); | 1306 | result = simple_strtoul(str, &endp, 0); |
1307 | if (str == endp || result >= INT_MAX) { | 1307 | if (str == endp || result >= INT_MAX) { |
1308 | printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n", | 1308 | printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n", |
1309 | str); | 1309 | str); |
1310 | return -EINVAL; | 1310 | return -EINVAL; |
1311 | } | 1311 | } |
1312 | 1312 | ||
1313 | switch (*endp) { | 1313 | switch (*endp) { |
1314 | case 'G': | 1314 | case 'G': |
1315 | result *= 1024; | 1315 | result *= 1024; |
1316 | case 'M': | 1316 | case 'M': |
1317 | result *= 1024; | 1317 | result *= 1024; |
1318 | case 'K': | 1318 | case 'K': |
1319 | result *= 1024; | 1319 | result *= 1024; |
1320 | if (endp[1] == 'i' && endp[2] == 'B') | 1320 | if (endp[1] == 'i' && endp[2] == 'B') |
1321 | endp += 2; | 1321 | endp += 2; |
1322 | case '\0': | 1322 | case '\0': |
1323 | break; | 1323 | break; |
1324 | default: | 1324 | default: |
1325 | printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n", | 1325 | printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n", |
1326 | str); | 1326 | str); |
1327 | return -EINVAL; | 1327 | return -EINVAL; |
1328 | } | 1328 | } |
1329 | 1329 | ||
1330 | return result; | 1330 | return result; |
1331 | } | 1331 | } |
1332 | 1332 | ||
1333 | /** | 1333 | /** |
1334 | * ubi_mtd_param_parse - parse the 'mtd=' UBI parameter. | 1334 | * ubi_mtd_param_parse - parse the 'mtd=' UBI parameter. |
1335 | * @val: the parameter value to parse | 1335 | * @val: the parameter value to parse |
1336 | * @kp: not used | 1336 | * @kp: not used |
1337 | * | 1337 | * |
1338 | * This function returns zero in case of success and a negative error code in | 1338 | * This function returns zero in case of success and a negative error code in |
1339 | * case of error. | 1339 | * case of error. |
1340 | */ | 1340 | */ |
1341 | static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp) | 1341 | static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp) |
1342 | { | 1342 | { |
1343 | int i, len; | 1343 | int i, len; |
1344 | struct mtd_dev_param *p; | 1344 | struct mtd_dev_param *p; |
1345 | char buf[MTD_PARAM_LEN_MAX]; | 1345 | char buf[MTD_PARAM_LEN_MAX]; |
1346 | char *pbuf = &buf[0]; | 1346 | char *pbuf = &buf[0]; |
1347 | char *tokens[2] = {NULL, NULL}; | 1347 | char *tokens[2] = {NULL, NULL}; |
1348 | 1348 | ||
1349 | if (!val) | 1349 | if (!val) |
1350 | return -EINVAL; | 1350 | return -EINVAL; |
1351 | 1351 | ||
1352 | if (mtd_devs == UBI_MAX_DEVICES) { | 1352 | if (mtd_devs == UBI_MAX_DEVICES) { |
1353 | printk(KERN_ERR "UBI error: too many parameters, max. is %d\n", | 1353 | printk(KERN_ERR "UBI error: too many parameters, max. is %d\n", |
1354 | UBI_MAX_DEVICES); | 1354 | UBI_MAX_DEVICES); |
1355 | return -EINVAL; | 1355 | return -EINVAL; |
1356 | } | 1356 | } |
1357 | 1357 | ||
1358 | len = strnlen(val, MTD_PARAM_LEN_MAX); | 1358 | len = strnlen(val, MTD_PARAM_LEN_MAX); |
1359 | if (len == MTD_PARAM_LEN_MAX) { | 1359 | if (len == MTD_PARAM_LEN_MAX) { |
1360 | printk(KERN_ERR "UBI error: parameter \"%s\" is too long, " | 1360 | printk(KERN_ERR "UBI error: parameter \"%s\" is too long, " |
1361 | "max. is %d\n", val, MTD_PARAM_LEN_MAX); | 1361 | "max. is %d\n", val, MTD_PARAM_LEN_MAX); |
1362 | return -EINVAL; | 1362 | return -EINVAL; |
1363 | } | 1363 | } |
1364 | 1364 | ||
1365 | if (len == 0) { | 1365 | if (len == 0) { |
1366 | printk(KERN_WARNING "UBI warning: empty 'mtd=' parameter - " | 1366 | printk(KERN_WARNING "UBI warning: empty 'mtd=' parameter - " |
1367 | "ignored\n"); | 1367 | "ignored\n"); |
1368 | return 0; | 1368 | return 0; |
1369 | } | 1369 | } |
1370 | 1370 | ||
1371 | strcpy(buf, val); | 1371 | strcpy(buf, val); |
1372 | 1372 | ||
1373 | /* Get rid of the final newline */ | 1373 | /* Get rid of the final newline */ |
1374 | if (buf[len - 1] == '\n') | 1374 | if (buf[len - 1] == '\n') |
1375 | buf[len - 1] = '\0'; | 1375 | buf[len - 1] = '\0'; |
1376 | 1376 | ||
1377 | for (i = 0; i < 2; i++) | 1377 | for (i = 0; i < 2; i++) |
1378 | tokens[i] = strsep(&pbuf, ","); | 1378 | tokens[i] = strsep(&pbuf, ","); |
1379 | 1379 | ||
1380 | if (pbuf) { | 1380 | if (pbuf) { |
1381 | printk(KERN_ERR "UBI error: too many arguments at \"%s\"\n", | 1381 | printk(KERN_ERR "UBI error: too many arguments at \"%s\"\n", |
1382 | val); | 1382 | val); |
1383 | return -EINVAL; | 1383 | return -EINVAL; |
1384 | } | 1384 | } |
1385 | 1385 | ||
1386 | p = &mtd_dev_param[mtd_devs]; | 1386 | p = &mtd_dev_param[mtd_devs]; |
1387 | strcpy(&p->name[0], tokens[0]); | 1387 | strcpy(&p->name[0], tokens[0]); |
1388 | 1388 | ||
1389 | if (tokens[1]) | 1389 | if (tokens[1]) |
1390 | p->vid_hdr_offs = bytes_str_to_int(tokens[1]); | 1390 | p->vid_hdr_offs = bytes_str_to_int(tokens[1]); |
1391 | 1391 | ||
1392 | if (p->vid_hdr_offs < 0) | 1392 | if (p->vid_hdr_offs < 0) |
1393 | return p->vid_hdr_offs; | 1393 | return p->vid_hdr_offs; |
1394 | 1394 | ||
1395 | mtd_devs += 1; | 1395 | mtd_devs += 1; |
1396 | return 0; | 1396 | return 0; |
1397 | } | 1397 | } |
1398 | 1398 | ||
1399 | module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000); | 1399 | module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000); |
1400 | MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: " | 1400 | MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: " |
1401 | "mtd=<name|num|path>[,<vid_hdr_offs>].\n" | 1401 | "mtd=<name|num|path>[,<vid_hdr_offs>].\n" |
1402 | "Multiple \"mtd\" parameters may be specified.\n" | 1402 | "Multiple \"mtd\" parameters may be specified.\n" |
1403 | "MTD devices may be specified by their number, name, or " | 1403 | "MTD devices may be specified by their number, name, or " |
1404 | "path to the MTD character device node.\n" | 1404 | "path to the MTD character device node.\n" |
1405 | "Optional \"vid_hdr_offs\" parameter specifies UBI VID " | 1405 | "Optional \"vid_hdr_offs\" parameter specifies UBI VID " |
1406 | "header position to be used by UBI.\n" | 1406 | "header position to be used by UBI.\n" |
1407 | "Example 1: mtd=/dev/mtd0 - attach MTD device " | 1407 | "Example 1: mtd=/dev/mtd0 - attach MTD device " |
1408 | "/dev/mtd0.\n" | 1408 | "/dev/mtd0.\n" |
1409 | "Example 2: mtd=content,1984 mtd=4 - attach MTD device " | 1409 | "Example 2: mtd=content,1984 mtd=4 - attach MTD device " |
1410 | "with name \"content\" using VID header offset 1984, and " | 1410 | "with name \"content\" using VID header offset 1984, and " |
1411 | "MTD device number 4 with default VID header offset."); | 1411 | "MTD device number 4 with default VID header offset."); |
1412 | 1412 | ||
1413 | MODULE_VERSION(__stringify(UBI_VERSION)); | 1413 | MODULE_VERSION(__stringify(UBI_VERSION)); |
1414 | MODULE_DESCRIPTION("UBI - Unsorted Block Images"); | 1414 | MODULE_DESCRIPTION("UBI - Unsorted Block Images"); |
1415 | MODULE_AUTHOR("Artem Bityutskiy"); | 1415 | MODULE_AUTHOR("Artem Bityutskiy"); |
1416 | MODULE_LICENSE("GPL"); | 1416 | MODULE_LICENSE("GPL"); |
1417 | 1417 |
drivers/mtd/ubi/eba.c
1 | /* | 1 | /* |
2 | * Copyright (c) International Business Machines Corp., 2006 | 2 | * Copyright (c) International Business Machines Corp., 2006 |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License as published by | 5 | * it under the terms of the GNU General Public License as published by |
6 | * the Free Software Foundation; either version 2 of the License, or | 6 | * the Free Software Foundation; either version 2 of the License, or |
7 | * (at your option) any later version. | 7 | * (at your option) any later version. |
8 | * | 8 | * |
9 | * This program is distributed in the hope that it will be useful, | 9 | * This program is distributed in the hope that it will be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See |
12 | * the GNU General Public License for more details. | 12 | * the GNU General Public License for more details. |
13 | * | 13 | * |
14 | * You should have received a copy of the GNU General Public License | 14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write to the Free Software | 15 | * along with this program; if not, write to the Free Software |
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
17 | * | 17 | * |
18 | * Author: Artem Bityutskiy (Битюцкий Артём) | 18 | * Author: Artem Bityutskiy (Битюцкий Артём) |
19 | */ | 19 | */ |
20 | 20 | ||
21 | /* | 21 | /* |
22 | * The UBI Eraseblock Association (EBA) sub-system. | 22 | * The UBI Eraseblock Association (EBA) sub-system. |
23 | * | 23 | * |
24 | * This sub-system is responsible for I/O to/from logical eraseblock. | 24 | * This sub-system is responsible for I/O to/from logical eraseblock. |
25 | * | 25 | * |
26 | * Although in this implementation the EBA table is fully kept and managed in | 26 | * Although in this implementation the EBA table is fully kept and managed in |
27 | * RAM, which assumes poor scalability, it might be (partially) maintained on | 27 | * RAM, which assumes poor scalability, it might be (partially) maintained on |
28 | * flash in future implementations. | 28 | * flash in future implementations. |
29 | * | 29 | * |
30 | * The EBA sub-system implements per-logical eraseblock locking. Before | 30 | * The EBA sub-system implements per-logical eraseblock locking. Before |
31 | * accessing a logical eraseblock it is locked for reading or writing. The | 31 | * accessing a logical eraseblock it is locked for reading or writing. The |
32 | * per-logical eraseblock locking is implemented by means of the lock tree. The | 32 | * per-logical eraseblock locking is implemented by means of the lock tree. The |
33 | * lock tree is an RB-tree which refers all the currently locked logical | 33 | * lock tree is an RB-tree which refers all the currently locked logical |
34 | * eraseblocks. The lock tree elements are &struct ubi_ltree_entry objects. | 34 | * eraseblocks. The lock tree elements are &struct ubi_ltree_entry objects. |
35 | * They are indexed by (@vol_id, @lnum) pairs. | 35 | * They are indexed by (@vol_id, @lnum) pairs. |
36 | * | 36 | * |
37 | * EBA also maintains the global sequence counter which is incremented each | 37 | * EBA also maintains the global sequence counter which is incremented each |
38 | * time a logical eraseblock is mapped to a physical eraseblock and it is | 38 | * time a logical eraseblock is mapped to a physical eraseblock and it is |
39 | * stored in the volume identifier header. This means that each VID header has | 39 | * stored in the volume identifier header. This means that each VID header has |
40 | * a unique sequence number. The sequence number is only increased an we assume | 40 | * a unique sequence number. The sequence number is only increased an we assume |
41 | * 64 bits is enough to never overflow. | 41 | * 64 bits is enough to never overflow. |
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <linux/slab.h> | 44 | #include <linux/slab.h> |
45 | #include <linux/crc32.h> | 45 | #include <linux/crc32.h> |
46 | #include <linux/err.h> | 46 | #include <linux/err.h> |
47 | #include "ubi.h" | 47 | #include "ubi.h" |
48 | 48 | ||
49 | /* Number of physical eraseblocks reserved for atomic LEB change operation */ | 49 | /* Number of physical eraseblocks reserved for atomic LEB change operation */ |
50 | #define EBA_RESERVED_PEBS 1 | 50 | #define EBA_RESERVED_PEBS 1 |
51 | 51 | ||
52 | /** | 52 | /** |
53 | * next_sqnum - get next sequence number. | 53 | * next_sqnum - get next sequence number. |
54 | * @ubi: UBI device description object | 54 | * @ubi: UBI device description object |
55 | * | 55 | * |
56 | * This function returns next sequence number to use, which is just the current | 56 | * This function returns next sequence number to use, which is just the current |
57 | * global sequence counter value. It also increases the global sequence | 57 | * global sequence counter value. It also increases the global sequence |
58 | * counter. | 58 | * counter. |
59 | */ | 59 | */ |
60 | static unsigned long long next_sqnum(struct ubi_device *ubi) | 60 | static unsigned long long next_sqnum(struct ubi_device *ubi) |
61 | { | 61 | { |
62 | unsigned long long sqnum; | 62 | unsigned long long sqnum; |
63 | 63 | ||
64 | spin_lock(&ubi->ltree_lock); | 64 | spin_lock(&ubi->ltree_lock); |
65 | sqnum = ubi->global_sqnum++; | 65 | sqnum = ubi->global_sqnum++; |
66 | spin_unlock(&ubi->ltree_lock); | 66 | spin_unlock(&ubi->ltree_lock); |
67 | 67 | ||
68 | return sqnum; | 68 | return sqnum; |
69 | } | 69 | } |
70 | 70 | ||
71 | /** | 71 | /** |
72 | * ubi_get_compat - get compatibility flags of a volume. | 72 | * ubi_get_compat - get compatibility flags of a volume. |
73 | * @ubi: UBI device description object | 73 | * @ubi: UBI device description object |
74 | * @vol_id: volume ID | 74 | * @vol_id: volume ID |
75 | * | 75 | * |
76 | * This function returns compatibility flags for an internal volume. User | 76 | * This function returns compatibility flags for an internal volume. User |
77 | * volumes have no compatibility flags, so %0 is returned. | 77 | * volumes have no compatibility flags, so %0 is returned. |
78 | */ | 78 | */ |
79 | static int ubi_get_compat(const struct ubi_device *ubi, int vol_id) | 79 | static int ubi_get_compat(const struct ubi_device *ubi, int vol_id) |
80 | { | 80 | { |
81 | if (vol_id == UBI_LAYOUT_VOLUME_ID) | 81 | if (vol_id == UBI_LAYOUT_VOLUME_ID) |
82 | return UBI_LAYOUT_VOLUME_COMPAT; | 82 | return UBI_LAYOUT_VOLUME_COMPAT; |
83 | return 0; | 83 | return 0; |
84 | } | 84 | } |
85 | 85 | ||
86 | /** | 86 | /** |
87 | * ltree_lookup - look up the lock tree. | 87 | * ltree_lookup - look up the lock tree. |
88 | * @ubi: UBI device description object | 88 | * @ubi: UBI device description object |
89 | * @vol_id: volume ID | 89 | * @vol_id: volume ID |
90 | * @lnum: logical eraseblock number | 90 | * @lnum: logical eraseblock number |
91 | * | 91 | * |
92 | * This function returns a pointer to the corresponding &struct ubi_ltree_entry | 92 | * This function returns a pointer to the corresponding &struct ubi_ltree_entry |
93 | * object if the logical eraseblock is locked and %NULL if it is not. | 93 | * object if the logical eraseblock is locked and %NULL if it is not. |
94 | * @ubi->ltree_lock has to be locked. | 94 | * @ubi->ltree_lock has to be locked. |
95 | */ | 95 | */ |
96 | static struct ubi_ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id, | 96 | static struct ubi_ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id, |
97 | int lnum) | 97 | int lnum) |
98 | { | 98 | { |
99 | struct rb_node *p; | 99 | struct rb_node *p; |
100 | 100 | ||
101 | p = ubi->ltree.rb_node; | 101 | p = ubi->ltree.rb_node; |
102 | while (p) { | 102 | while (p) { |
103 | struct ubi_ltree_entry *le; | 103 | struct ubi_ltree_entry *le; |
104 | 104 | ||
105 | le = rb_entry(p, struct ubi_ltree_entry, rb); | 105 | le = rb_entry(p, struct ubi_ltree_entry, rb); |
106 | 106 | ||
107 | if (vol_id < le->vol_id) | 107 | if (vol_id < le->vol_id) |
108 | p = p->rb_left; | 108 | p = p->rb_left; |
109 | else if (vol_id > le->vol_id) | 109 | else if (vol_id > le->vol_id) |
110 | p = p->rb_right; | 110 | p = p->rb_right; |
111 | else { | 111 | else { |
112 | if (lnum < le->lnum) | 112 | if (lnum < le->lnum) |
113 | p = p->rb_left; | 113 | p = p->rb_left; |
114 | else if (lnum > le->lnum) | 114 | else if (lnum > le->lnum) |
115 | p = p->rb_right; | 115 | p = p->rb_right; |
116 | else | 116 | else |
117 | return le; | 117 | return le; |
118 | } | 118 | } |
119 | } | 119 | } |
120 | 120 | ||
121 | return NULL; | 121 | return NULL; |
122 | } | 122 | } |
123 | 123 | ||
124 | /** | 124 | /** |
125 | * ltree_add_entry - add new entry to the lock tree. | 125 | * ltree_add_entry - add new entry to the lock tree. |
126 | * @ubi: UBI device description object | 126 | * @ubi: UBI device description object |
127 | * @vol_id: volume ID | 127 | * @vol_id: volume ID |
128 | * @lnum: logical eraseblock number | 128 | * @lnum: logical eraseblock number |
129 | * | 129 | * |
130 | * This function adds new entry for logical eraseblock (@vol_id, @lnum) to the | 130 | * This function adds new entry for logical eraseblock (@vol_id, @lnum) to the |
131 | * lock tree. If such entry is already there, its usage counter is increased. | 131 | * lock tree. If such entry is already there, its usage counter is increased. |
132 | * Returns pointer to the lock tree entry or %-ENOMEM if memory allocation | 132 | * Returns pointer to the lock tree entry or %-ENOMEM if memory allocation |
133 | * failed. | 133 | * failed. |
134 | */ | 134 | */ |
135 | static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi, | 135 | static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi, |
136 | int vol_id, int lnum) | 136 | int vol_id, int lnum) |
137 | { | 137 | { |
138 | struct ubi_ltree_entry *le, *le1, *le_free; | 138 | struct ubi_ltree_entry *le, *le1, *le_free; |
139 | 139 | ||
140 | le = kmalloc(sizeof(struct ubi_ltree_entry), GFP_NOFS); | 140 | le = kmalloc(sizeof(struct ubi_ltree_entry), GFP_NOFS); |
141 | if (!le) | 141 | if (!le) |
142 | return ERR_PTR(-ENOMEM); | 142 | return ERR_PTR(-ENOMEM); |
143 | 143 | ||
144 | le->users = 0; | 144 | le->users = 0; |
145 | init_rwsem(&le->mutex); | 145 | init_rwsem(&le->mutex); |
146 | le->vol_id = vol_id; | 146 | le->vol_id = vol_id; |
147 | le->lnum = lnum; | 147 | le->lnum = lnum; |
148 | 148 | ||
149 | spin_lock(&ubi->ltree_lock); | 149 | spin_lock(&ubi->ltree_lock); |
150 | le1 = ltree_lookup(ubi, vol_id, lnum); | 150 | le1 = ltree_lookup(ubi, vol_id, lnum); |
151 | 151 | ||
152 | if (le1) { | 152 | if (le1) { |
153 | /* | 153 | /* |
154 | * This logical eraseblock is already locked. The newly | 154 | * This logical eraseblock is already locked. The newly |
155 | * allocated lock entry is not needed. | 155 | * allocated lock entry is not needed. |
156 | */ | 156 | */ |
157 | le_free = le; | 157 | le_free = le; |
158 | le = le1; | 158 | le = le1; |
159 | } else { | 159 | } else { |
160 | struct rb_node **p, *parent = NULL; | 160 | struct rb_node **p, *parent = NULL; |
161 | 161 | ||
162 | /* | 162 | /* |
163 | * No lock entry, add the newly allocated one to the | 163 | * No lock entry, add the newly allocated one to the |
164 | * @ubi->ltree RB-tree. | 164 | * @ubi->ltree RB-tree. |
165 | */ | 165 | */ |
166 | le_free = NULL; | 166 | le_free = NULL; |
167 | 167 | ||
168 | p = &ubi->ltree.rb_node; | 168 | p = &ubi->ltree.rb_node; |
169 | while (*p) { | 169 | while (*p) { |
170 | parent = *p; | 170 | parent = *p; |
171 | le1 = rb_entry(parent, struct ubi_ltree_entry, rb); | 171 | le1 = rb_entry(parent, struct ubi_ltree_entry, rb); |
172 | 172 | ||
173 | if (vol_id < le1->vol_id) | 173 | if (vol_id < le1->vol_id) |
174 | p = &(*p)->rb_left; | 174 | p = &(*p)->rb_left; |
175 | else if (vol_id > le1->vol_id) | 175 | else if (vol_id > le1->vol_id) |
176 | p = &(*p)->rb_right; | 176 | p = &(*p)->rb_right; |
177 | else { | 177 | else { |
178 | ubi_assert(lnum != le1->lnum); | 178 | ubi_assert(lnum != le1->lnum); |
179 | if (lnum < le1->lnum) | 179 | if (lnum < le1->lnum) |
180 | p = &(*p)->rb_left; | 180 | p = &(*p)->rb_left; |
181 | else | 181 | else |
182 | p = &(*p)->rb_right; | 182 | p = &(*p)->rb_right; |
183 | } | 183 | } |
184 | } | 184 | } |
185 | 185 | ||
186 | rb_link_node(&le->rb, parent, p); | 186 | rb_link_node(&le->rb, parent, p); |
187 | rb_insert_color(&le->rb, &ubi->ltree); | 187 | rb_insert_color(&le->rb, &ubi->ltree); |
188 | } | 188 | } |
189 | le->users += 1; | 189 | le->users += 1; |
190 | spin_unlock(&ubi->ltree_lock); | 190 | spin_unlock(&ubi->ltree_lock); |
191 | 191 | ||
192 | kfree(le_free); | 192 | kfree(le_free); |
193 | return le; | 193 | return le; |
194 | } | 194 | } |
195 | 195 | ||
196 | /** | 196 | /** |
197 | * leb_read_lock - lock logical eraseblock for reading. | 197 | * leb_read_lock - lock logical eraseblock for reading. |
198 | * @ubi: UBI device description object | 198 | * @ubi: UBI device description object |
199 | * @vol_id: volume ID | 199 | * @vol_id: volume ID |
200 | * @lnum: logical eraseblock number | 200 | * @lnum: logical eraseblock number |
201 | * | 201 | * |
202 | * This function locks a logical eraseblock for reading. Returns zero in case | 202 | * This function locks a logical eraseblock for reading. Returns zero in case |
203 | * of success and a negative error code in case of failure. | 203 | * of success and a negative error code in case of failure. |
204 | */ | 204 | */ |
205 | static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum) | 205 | static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum) |
206 | { | 206 | { |
207 | struct ubi_ltree_entry *le; | 207 | struct ubi_ltree_entry *le; |
208 | 208 | ||
209 | le = ltree_add_entry(ubi, vol_id, lnum); | 209 | le = ltree_add_entry(ubi, vol_id, lnum); |
210 | if (IS_ERR(le)) | 210 | if (IS_ERR(le)) |
211 | return PTR_ERR(le); | 211 | return PTR_ERR(le); |
212 | down_read(&le->mutex); | 212 | down_read(&le->mutex); |
213 | return 0; | 213 | return 0; |
214 | } | 214 | } |
215 | 215 | ||
216 | /** | 216 | /** |
217 | * leb_read_unlock - unlock logical eraseblock. | 217 | * leb_read_unlock - unlock logical eraseblock. |
218 | * @ubi: UBI device description object | 218 | * @ubi: UBI device description object |
219 | * @vol_id: volume ID | 219 | * @vol_id: volume ID |
220 | * @lnum: logical eraseblock number | 220 | * @lnum: logical eraseblock number |
221 | */ | 221 | */ |
222 | static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum) | 222 | static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum) |
223 | { | 223 | { |
224 | struct ubi_ltree_entry *le; | 224 | struct ubi_ltree_entry *le; |
225 | 225 | ||
226 | spin_lock(&ubi->ltree_lock); | 226 | spin_lock(&ubi->ltree_lock); |
227 | le = ltree_lookup(ubi, vol_id, lnum); | 227 | le = ltree_lookup(ubi, vol_id, lnum); |
228 | le->users -= 1; | 228 | le->users -= 1; |
229 | ubi_assert(le->users >= 0); | 229 | ubi_assert(le->users >= 0); |
230 | up_read(&le->mutex); | 230 | up_read(&le->mutex); |
231 | if (le->users == 0) { | 231 | if (le->users == 0) { |
232 | rb_erase(&le->rb, &ubi->ltree); | 232 | rb_erase(&le->rb, &ubi->ltree); |
233 | kfree(le); | 233 | kfree(le); |
234 | } | 234 | } |
235 | spin_unlock(&ubi->ltree_lock); | 235 | spin_unlock(&ubi->ltree_lock); |
236 | } | 236 | } |
237 | 237 | ||
238 | /** | 238 | /** |
239 | * leb_write_lock - lock logical eraseblock for writing. | 239 | * leb_write_lock - lock logical eraseblock for writing. |
240 | * @ubi: UBI device description object | 240 | * @ubi: UBI device description object |
241 | * @vol_id: volume ID | 241 | * @vol_id: volume ID |
242 | * @lnum: logical eraseblock number | 242 | * @lnum: logical eraseblock number |
243 | * | 243 | * |
244 | * This function locks a logical eraseblock for writing. Returns zero in case | 244 | * This function locks a logical eraseblock for writing. Returns zero in case |
245 | * of success and a negative error code in case of failure. | 245 | * of success and a negative error code in case of failure. |
246 | */ | 246 | */ |
247 | static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum) | 247 | static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum) |
248 | { | 248 | { |
249 | struct ubi_ltree_entry *le; | 249 | struct ubi_ltree_entry *le; |
250 | 250 | ||
251 | le = ltree_add_entry(ubi, vol_id, lnum); | 251 | le = ltree_add_entry(ubi, vol_id, lnum); |
252 | if (IS_ERR(le)) | 252 | if (IS_ERR(le)) |
253 | return PTR_ERR(le); | 253 | return PTR_ERR(le); |
254 | down_write(&le->mutex); | 254 | down_write(&le->mutex); |
255 | return 0; | 255 | return 0; |
256 | } | 256 | } |
257 | 257 | ||
258 | /** | 258 | /** |
259 | * leb_write_lock - lock logical eraseblock for writing. | 259 | * leb_write_lock - lock logical eraseblock for writing. |
260 | * @ubi: UBI device description object | 260 | * @ubi: UBI device description object |
261 | * @vol_id: volume ID | 261 | * @vol_id: volume ID |
262 | * @lnum: logical eraseblock number | 262 | * @lnum: logical eraseblock number |
263 | * | 263 | * |
264 | * This function locks a logical eraseblock for writing if there is no | 264 | * This function locks a logical eraseblock for writing if there is no |
265 | * contention and does nothing if there is contention. Returns %0 in case of | 265 | * contention and does nothing if there is contention. Returns %0 in case of |
266 | * success, %1 in case of contention, and and a negative error code in case of | 266 | * success, %1 in case of contention, and and a negative error code in case of |
267 | * failure. | 267 | * failure. |
268 | */ | 268 | */ |
269 | static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum) | 269 | static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum) |
270 | { | 270 | { |
271 | struct ubi_ltree_entry *le; | 271 | struct ubi_ltree_entry *le; |
272 | 272 | ||
273 | le = ltree_add_entry(ubi, vol_id, lnum); | 273 | le = ltree_add_entry(ubi, vol_id, lnum); |
274 | if (IS_ERR(le)) | 274 | if (IS_ERR(le)) |
275 | return PTR_ERR(le); | 275 | return PTR_ERR(le); |
276 | if (down_write_trylock(&le->mutex)) | 276 | if (down_write_trylock(&le->mutex)) |
277 | return 0; | 277 | return 0; |
278 | 278 | ||
279 | /* Contention, cancel */ | 279 | /* Contention, cancel */ |
280 | spin_lock(&ubi->ltree_lock); | 280 | spin_lock(&ubi->ltree_lock); |
281 | le->users -= 1; | 281 | le->users -= 1; |
282 | ubi_assert(le->users >= 0); | 282 | ubi_assert(le->users >= 0); |
283 | if (le->users == 0) { | 283 | if (le->users == 0) { |
284 | rb_erase(&le->rb, &ubi->ltree); | 284 | rb_erase(&le->rb, &ubi->ltree); |
285 | kfree(le); | 285 | kfree(le); |
286 | } | 286 | } |
287 | spin_unlock(&ubi->ltree_lock); | 287 | spin_unlock(&ubi->ltree_lock); |
288 | 288 | ||
289 | return 1; | 289 | return 1; |
290 | } | 290 | } |
291 | 291 | ||
292 | /** | 292 | /** |
293 | * leb_write_unlock - unlock logical eraseblock. | 293 | * leb_write_unlock - unlock logical eraseblock. |
294 | * @ubi: UBI device description object | 294 | * @ubi: UBI device description object |
295 | * @vol_id: volume ID | 295 | * @vol_id: volume ID |
296 | * @lnum: logical eraseblock number | 296 | * @lnum: logical eraseblock number |
297 | */ | 297 | */ |
298 | static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum) | 298 | static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum) |
299 | { | 299 | { |
300 | struct ubi_ltree_entry *le; | 300 | struct ubi_ltree_entry *le; |
301 | 301 | ||
302 | spin_lock(&ubi->ltree_lock); | 302 | spin_lock(&ubi->ltree_lock); |
303 | le = ltree_lookup(ubi, vol_id, lnum); | 303 | le = ltree_lookup(ubi, vol_id, lnum); |
304 | le->users -= 1; | 304 | le->users -= 1; |
305 | ubi_assert(le->users >= 0); | 305 | ubi_assert(le->users >= 0); |
306 | up_write(&le->mutex); | 306 | up_write(&le->mutex); |
307 | if (le->users == 0) { | 307 | if (le->users == 0) { |
308 | rb_erase(&le->rb, &ubi->ltree); | 308 | rb_erase(&le->rb, &ubi->ltree); |
309 | kfree(le); | 309 | kfree(le); |
310 | } | 310 | } |
311 | spin_unlock(&ubi->ltree_lock); | 311 | spin_unlock(&ubi->ltree_lock); |
312 | } | 312 | } |
313 | 313 | ||
314 | /** | 314 | /** |
315 | * ubi_eba_unmap_leb - un-map logical eraseblock. | 315 | * ubi_eba_unmap_leb - un-map logical eraseblock. |
316 | * @ubi: UBI device description object | 316 | * @ubi: UBI device description object |
317 | * @vol: volume description object | 317 | * @vol: volume description object |
318 | * @lnum: logical eraseblock number | 318 | * @lnum: logical eraseblock number |
319 | * | 319 | * |
320 | * This function un-maps logical eraseblock @lnum and schedules corresponding | 320 | * This function un-maps logical eraseblock @lnum and schedules corresponding |
321 | * physical eraseblock for erasure. Returns zero in case of success and a | 321 | * physical eraseblock for erasure. Returns zero in case of success and a |
322 | * negative error code in case of failure. | 322 | * negative error code in case of failure. |
323 | */ | 323 | */ |
324 | int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol, | 324 | int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol, |
325 | int lnum) | 325 | int lnum) |
326 | { | 326 | { |
327 | int err, pnum, vol_id = vol->vol_id; | 327 | int err, pnum, vol_id = vol->vol_id; |
328 | 328 | ||
329 | if (ubi->ro_mode) | 329 | if (ubi->ro_mode) |
330 | return -EROFS; | 330 | return -EROFS; |
331 | 331 | ||
332 | err = leb_write_lock(ubi, vol_id, lnum); | 332 | err = leb_write_lock(ubi, vol_id, lnum); |
333 | if (err) | 333 | if (err) |
334 | return err; | 334 | return err; |
335 | 335 | ||
336 | pnum = vol->eba_tbl[lnum]; | 336 | pnum = vol->eba_tbl[lnum]; |
337 | if (pnum < 0) | 337 | if (pnum < 0) |
338 | /* This logical eraseblock is already unmapped */ | 338 | /* This logical eraseblock is already unmapped */ |
339 | goto out_unlock; | 339 | goto out_unlock; |
340 | 340 | ||
341 | dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum); | 341 | dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum); |
342 | 342 | ||
343 | vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED; | 343 | vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED; |
344 | err = ubi_wl_put_peb(ubi, pnum, 0); | 344 | err = ubi_wl_put_peb(ubi, pnum, 0); |
345 | 345 | ||
346 | out_unlock: | 346 | out_unlock: |
347 | leb_write_unlock(ubi, vol_id, lnum); | 347 | leb_write_unlock(ubi, vol_id, lnum); |
348 | return err; | 348 | return err; |
349 | } | 349 | } |
350 | 350 | ||
351 | /** | 351 | /** |
352 | * ubi_eba_read_leb - read data. | 352 | * ubi_eba_read_leb - read data. |
353 | * @ubi: UBI device description object | 353 | * @ubi: UBI device description object |
354 | * @vol: volume description object | 354 | * @vol: volume description object |
355 | * @lnum: logical eraseblock number | 355 | * @lnum: logical eraseblock number |
356 | * @buf: buffer to store the read data | 356 | * @buf: buffer to store the read data |
357 | * @offset: offset from where to read | 357 | * @offset: offset from where to read |
358 | * @len: how many bytes to read | 358 | * @len: how many bytes to read |
359 | * @check: data CRC check flag | 359 | * @check: data CRC check flag |
360 | * | 360 | * |
361 | * If the logical eraseblock @lnum is unmapped, @buf is filled with 0xFF | 361 | * If the logical eraseblock @lnum is unmapped, @buf is filled with 0xFF |
362 | * bytes. The @check flag only makes sense for static volumes and forces | 362 | * bytes. The @check flag only makes sense for static volumes and forces |
363 | * eraseblock data CRC checking. | 363 | * eraseblock data CRC checking. |
364 | * | 364 | * |
365 | * In case of success this function returns zero. In case of a static volume, | 365 | * In case of success this function returns zero. In case of a static volume, |
366 | * if data CRC mismatches - %-EBADMSG is returned. %-EBADMSG may also be | 366 | * if data CRC mismatches - %-EBADMSG is returned. %-EBADMSG may also be |
367 | * returned for any volume type if an ECC error was detected by the MTD device | 367 | * returned for any volume type if an ECC error was detected by the MTD device |
368 | * driver. Other negative error cored may be returned in case of other errors. | 368 | * driver. Other negative error cored may be returned in case of other errors. |
369 | */ | 369 | */ |
370 | int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, | 370 | int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, |
371 | void *buf, int offset, int len, int check) | 371 | void *buf, int offset, int len, int check) |
372 | { | 372 | { |
373 | int err, pnum, scrub = 0, vol_id = vol->vol_id; | 373 | int err, pnum, scrub = 0, vol_id = vol->vol_id; |
374 | struct ubi_vid_hdr *vid_hdr; | 374 | struct ubi_vid_hdr *vid_hdr; |
375 | uint32_t uninitialized_var(crc); | 375 | uint32_t uninitialized_var(crc); |
376 | 376 | ||
377 | err = leb_read_lock(ubi, vol_id, lnum); | 377 | err = leb_read_lock(ubi, vol_id, lnum); |
378 | if (err) | 378 | if (err) |
379 | return err; | 379 | return err; |
380 | 380 | ||
381 | pnum = vol->eba_tbl[lnum]; | 381 | pnum = vol->eba_tbl[lnum]; |
382 | if (pnum < 0) { | 382 | if (pnum < 0) { |
383 | /* | 383 | /* |
384 | * The logical eraseblock is not mapped, fill the whole buffer | 384 | * The logical eraseblock is not mapped, fill the whole buffer |
385 | * with 0xFF bytes. The exception is static volumes for which | 385 | * with 0xFF bytes. The exception is static volumes for which |
386 | * it is an error to read unmapped logical eraseblocks. | 386 | * it is an error to read unmapped logical eraseblocks. |
387 | */ | 387 | */ |
388 | dbg_eba("read %d bytes from offset %d of LEB %d:%d (unmapped)", | 388 | dbg_eba("read %d bytes from offset %d of LEB %d:%d (unmapped)", |
389 | len, offset, vol_id, lnum); | 389 | len, offset, vol_id, lnum); |
390 | leb_read_unlock(ubi, vol_id, lnum); | 390 | leb_read_unlock(ubi, vol_id, lnum); |
391 | ubi_assert(vol->vol_type != UBI_STATIC_VOLUME); | 391 | ubi_assert(vol->vol_type != UBI_STATIC_VOLUME); |
392 | memset(buf, 0xFF, len); | 392 | memset(buf, 0xFF, len); |
393 | return 0; | 393 | return 0; |
394 | } | 394 | } |
395 | 395 | ||
396 | dbg_eba("read %d bytes from offset %d of LEB %d:%d, PEB %d", | 396 | dbg_eba("read %d bytes from offset %d of LEB %d:%d, PEB %d", |
397 | len, offset, vol_id, lnum, pnum); | 397 | len, offset, vol_id, lnum, pnum); |
398 | 398 | ||
399 | if (vol->vol_type == UBI_DYNAMIC_VOLUME) | 399 | if (vol->vol_type == UBI_DYNAMIC_VOLUME) |
400 | check = 0; | 400 | check = 0; |
401 | 401 | ||
402 | retry: | 402 | retry: |
403 | if (check) { | 403 | if (check) { |
404 | vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); | 404 | vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); |
405 | if (!vid_hdr) { | 405 | if (!vid_hdr) { |
406 | err = -ENOMEM; | 406 | err = -ENOMEM; |
407 | goto out_unlock; | 407 | goto out_unlock; |
408 | } | 408 | } |
409 | 409 | ||
410 | err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1); | 410 | err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1); |
411 | if (err && err != UBI_IO_BITFLIPS) { | 411 | if (err && err != UBI_IO_BITFLIPS) { |
412 | if (err > 0) { | 412 | if (err > 0) { |
413 | /* | 413 | /* |
414 | * The header is either absent or corrupted. | 414 | * The header is either absent or corrupted. |
415 | * The former case means there is a bug - | 415 | * The former case means there is a bug - |
416 | * switch to read-only mode just in case. | 416 | * switch to read-only mode just in case. |
417 | * The latter case means a real corruption - we | 417 | * The latter case means a real corruption - we |
418 | * may try to recover data. FIXME: but this is | 418 | * may try to recover data. FIXME: but this is |
419 | * not implemented. | 419 | * not implemented. |
420 | */ | 420 | */ |
421 | if (err == UBI_IO_BAD_HDR_EBADMSG || | 421 | if (err == UBI_IO_BAD_HDR_EBADMSG || |
422 | err == UBI_IO_BAD_HDR) { | 422 | err == UBI_IO_BAD_HDR) { |
423 | ubi_warn("corrupted VID header at PEB " | 423 | ubi_warn("corrupted VID header at PEB " |
424 | "%d, LEB %d:%d", pnum, vol_id, | 424 | "%d, LEB %d:%d", pnum, vol_id, |
425 | lnum); | 425 | lnum); |
426 | err = -EBADMSG; | 426 | err = -EBADMSG; |
427 | } else | 427 | } else |
428 | ubi_ro_mode(ubi); | 428 | ubi_ro_mode(ubi); |
429 | } | 429 | } |
430 | goto out_free; | 430 | goto out_free; |
431 | } else if (err == UBI_IO_BITFLIPS) | 431 | } else if (err == UBI_IO_BITFLIPS) |
432 | scrub = 1; | 432 | scrub = 1; |
433 | 433 | ||
434 | ubi_assert(lnum < be32_to_cpu(vid_hdr->used_ebs)); | 434 | ubi_assert(lnum < be32_to_cpu(vid_hdr->used_ebs)); |
435 | ubi_assert(len == be32_to_cpu(vid_hdr->data_size)); | 435 | ubi_assert(len == be32_to_cpu(vid_hdr->data_size)); |
436 | 436 | ||
437 | crc = be32_to_cpu(vid_hdr->data_crc); | 437 | crc = be32_to_cpu(vid_hdr->data_crc); |
438 | ubi_free_vid_hdr(ubi, vid_hdr); | 438 | ubi_free_vid_hdr(ubi, vid_hdr); |
439 | } | 439 | } |
440 | 440 | ||
441 | err = ubi_io_read_data(ubi, buf, pnum, offset, len); | 441 | err = ubi_io_read_data(ubi, buf, pnum, offset, len); |
442 | if (err) { | 442 | if (err) { |
443 | if (err == UBI_IO_BITFLIPS) { | 443 | if (err == UBI_IO_BITFLIPS) { |
444 | scrub = 1; | 444 | scrub = 1; |
445 | err = 0; | 445 | err = 0; |
446 | } else if (mtd_is_eccerr(err)) { | 446 | } else if (mtd_is_eccerr(err)) { |
447 | if (vol->vol_type == UBI_DYNAMIC_VOLUME) | 447 | if (vol->vol_type == UBI_DYNAMIC_VOLUME) |
448 | goto out_unlock; | 448 | goto out_unlock; |
449 | scrub = 1; | 449 | scrub = 1; |
450 | if (!check) { | 450 | if (!check) { |
451 | ubi_msg("force data checking"); | 451 | ubi_msg("force data checking"); |
452 | check = 1; | 452 | check = 1; |
453 | goto retry; | 453 | goto retry; |
454 | } | 454 | } |
455 | } else | 455 | } else |
456 | goto out_unlock; | 456 | goto out_unlock; |
457 | } | 457 | } |
458 | 458 | ||
459 | if (check) { | 459 | if (check) { |
460 | uint32_t crc1 = crc32(UBI_CRC32_INIT, buf, len); | 460 | uint32_t crc1 = crc32(UBI_CRC32_INIT, buf, len); |
461 | if (crc1 != crc) { | 461 | if (crc1 != crc) { |
462 | ubi_warn("CRC error: calculated %#08x, must be %#08x", | 462 | ubi_warn("CRC error: calculated %#08x, must be %#08x", |
463 | crc1, crc); | 463 | crc1, crc); |
464 | err = -EBADMSG; | 464 | err = -EBADMSG; |
465 | goto out_unlock; | 465 | goto out_unlock; |
466 | } | 466 | } |
467 | } | 467 | } |
468 | 468 | ||
469 | if (scrub) | 469 | if (scrub) |
470 | err = ubi_wl_scrub_peb(ubi, pnum); | 470 | err = ubi_wl_scrub_peb(ubi, pnum); |
471 | 471 | ||
472 | leb_read_unlock(ubi, vol_id, lnum); | 472 | leb_read_unlock(ubi, vol_id, lnum); |
473 | return err; | 473 | return err; |
474 | 474 | ||
475 | out_free: | 475 | out_free: |
476 | ubi_free_vid_hdr(ubi, vid_hdr); | 476 | ubi_free_vid_hdr(ubi, vid_hdr); |
477 | out_unlock: | 477 | out_unlock: |
478 | leb_read_unlock(ubi, vol_id, lnum); | 478 | leb_read_unlock(ubi, vol_id, lnum); |
479 | return err; | 479 | return err; |
480 | } | 480 | } |
481 | 481 | ||
482 | /** | 482 | /** |
483 | * recover_peb - recover from write failure. | 483 | * recover_peb - recover from write failure. |
484 | * @ubi: UBI device description object | 484 | * @ubi: UBI device description object |
485 | * @pnum: the physical eraseblock to recover | 485 | * @pnum: the physical eraseblock to recover |
486 | * @vol_id: volume ID | 486 | * @vol_id: volume ID |
487 | * @lnum: logical eraseblock number | 487 | * @lnum: logical eraseblock number |
488 | * @buf: data which was not written because of the write failure | 488 | * @buf: data which was not written because of the write failure |
489 | * @offset: offset of the failed write | 489 | * @offset: offset of the failed write |
490 | * @len: how many bytes should have been written | 490 | * @len: how many bytes should have been written |
491 | * | 491 | * |
492 | * This function is called in case of a write failure and moves all good data | 492 | * This function is called in case of a write failure and moves all good data |
493 | * from the potentially bad physical eraseblock to a good physical eraseblock. | 493 | * from the potentially bad physical eraseblock to a good physical eraseblock. |
494 | * This function also writes the data which was not written due to the failure. | 494 | * This function also writes the data which was not written due to the failure. |
495 | * Returns new physical eraseblock number in case of success, and a negative | 495 | * Returns new physical eraseblock number in case of success, and a negative |
496 | * error code in case of failure. | 496 | * error code in case of failure. |
497 | */ | 497 | */ |
498 | static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum, | 498 | static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum, |
499 | const void *buf, int offset, int len) | 499 | const void *buf, int offset, int len) |
500 | { | 500 | { |
501 | int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0; | 501 | int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0; |
502 | struct ubi_volume *vol = ubi->volumes[idx]; | 502 | struct ubi_volume *vol = ubi->volumes[idx]; |
503 | struct ubi_vid_hdr *vid_hdr; | 503 | struct ubi_vid_hdr *vid_hdr; |
504 | 504 | ||
505 | vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); | 505 | vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); |
506 | if (!vid_hdr) | 506 | if (!vid_hdr) |
507 | return -ENOMEM; | 507 | return -ENOMEM; |
508 | 508 | ||
509 | retry: | 509 | retry: |
510 | new_pnum = ubi_wl_get_peb(ubi); | 510 | new_pnum = ubi_wl_get_peb(ubi); |
511 | if (new_pnum < 0) { | 511 | if (new_pnum < 0) { |
512 | ubi_free_vid_hdr(ubi, vid_hdr); | 512 | ubi_free_vid_hdr(ubi, vid_hdr); |
513 | return new_pnum; | 513 | return new_pnum; |
514 | } | 514 | } |
515 | 515 | ||
516 | ubi_msg("recover PEB %d, move data to PEB %d", pnum, new_pnum); | 516 | ubi_msg("recover PEB %d, move data to PEB %d", pnum, new_pnum); |
517 | 517 | ||
518 | err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1); | 518 | err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1); |
519 | if (err && err != UBI_IO_BITFLIPS) { | 519 | if (err && err != UBI_IO_BITFLIPS) { |
520 | if (err > 0) | 520 | if (err > 0) |
521 | err = -EIO; | 521 | err = -EIO; |
522 | goto out_put; | 522 | goto out_put; |
523 | } | 523 | } |
524 | 524 | ||
525 | vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); | 525 | vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); |
526 | err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr); | 526 | err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr); |
527 | if (err) | 527 | if (err) |
528 | goto write_error; | 528 | goto write_error; |
529 | 529 | ||
530 | data_size = offset + len; | 530 | data_size = offset + len; |
531 | mutex_lock(&ubi->buf_mutex); | 531 | mutex_lock(&ubi->buf_mutex); |
532 | memset(ubi->peb_buf + offset, 0xFF, len); | 532 | memset(ubi->peb_buf + offset, 0xFF, len); |
533 | 533 | ||
534 | /* Read everything before the area where the write failure happened */ | 534 | /* Read everything before the area where the write failure happened */ |
535 | if (offset > 0) { | 535 | if (offset > 0) { |
536 | err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, offset); | 536 | err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, offset); |
537 | if (err && err != UBI_IO_BITFLIPS) | 537 | if (err && err != UBI_IO_BITFLIPS) |
538 | goto out_unlock; | 538 | goto out_unlock; |
539 | } | 539 | } |
540 | 540 | ||
541 | memcpy(ubi->peb_buf + offset, buf, len); | 541 | memcpy(ubi->peb_buf + offset, buf, len); |
542 | 542 | ||
543 | err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size); | 543 | err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size); |
544 | if (err) { | 544 | if (err) { |
545 | mutex_unlock(&ubi->buf_mutex); | 545 | mutex_unlock(&ubi->buf_mutex); |
546 | goto write_error; | 546 | goto write_error; |
547 | } | 547 | } |
548 | 548 | ||
549 | mutex_unlock(&ubi->buf_mutex); | 549 | mutex_unlock(&ubi->buf_mutex); |
550 | ubi_free_vid_hdr(ubi, vid_hdr); | 550 | ubi_free_vid_hdr(ubi, vid_hdr); |
551 | 551 | ||
552 | vol->eba_tbl[lnum] = new_pnum; | 552 | vol->eba_tbl[lnum] = new_pnum; |
553 | ubi_wl_put_peb(ubi, pnum, 1); | 553 | ubi_wl_put_peb(ubi, pnum, 1); |
554 | 554 | ||
555 | ubi_msg("data was successfully recovered"); | 555 | ubi_msg("data was successfully recovered"); |
556 | return 0; | 556 | return 0; |
557 | 557 | ||
558 | out_unlock: | 558 | out_unlock: |
559 | mutex_unlock(&ubi->buf_mutex); | 559 | mutex_unlock(&ubi->buf_mutex); |
560 | out_put: | 560 | out_put: |
561 | ubi_wl_put_peb(ubi, new_pnum, 1); | 561 | ubi_wl_put_peb(ubi, new_pnum, 1); |
562 | ubi_free_vid_hdr(ubi, vid_hdr); | 562 | ubi_free_vid_hdr(ubi, vid_hdr); |
563 | return err; | 563 | return err; |
564 | 564 | ||
565 | write_error: | 565 | write_error: |
566 | /* | 566 | /* |
567 | * Bad luck? This physical eraseblock is bad too? Crud. Let's try to | 567 | * Bad luck? This physical eraseblock is bad too? Crud. Let's try to |
568 | * get another one. | 568 | * get another one. |
569 | */ | 569 | */ |
570 | ubi_warn("failed to write to PEB %d", new_pnum); | 570 | ubi_warn("failed to write to PEB %d", new_pnum); |
571 | ubi_wl_put_peb(ubi, new_pnum, 1); | 571 | ubi_wl_put_peb(ubi, new_pnum, 1); |
572 | if (++tries > UBI_IO_RETRIES) { | 572 | if (++tries > UBI_IO_RETRIES) { |
573 | ubi_free_vid_hdr(ubi, vid_hdr); | 573 | ubi_free_vid_hdr(ubi, vid_hdr); |
574 | return err; | 574 | return err; |
575 | } | 575 | } |
576 | ubi_msg("try again"); | 576 | ubi_msg("try again"); |
577 | goto retry; | 577 | goto retry; |
578 | } | 578 | } |
579 | 579 | ||
580 | /** | 580 | /** |
581 | * ubi_eba_write_leb - write data to dynamic volume. | 581 | * ubi_eba_write_leb - write data to dynamic volume. |
582 | * @ubi: UBI device description object | 582 | * @ubi: UBI device description object |
583 | * @vol: volume description object | 583 | * @vol: volume description object |
584 | * @lnum: logical eraseblock number | 584 | * @lnum: logical eraseblock number |
585 | * @buf: the data to write | 585 | * @buf: the data to write |
586 | * @offset: offset within the logical eraseblock where to write | 586 | * @offset: offset within the logical eraseblock where to write |
587 | * @len: how many bytes to write | 587 | * @len: how many bytes to write |
588 | * | 588 | * |
589 | * This function writes data to logical eraseblock @lnum of a dynamic volume | 589 | * This function writes data to logical eraseblock @lnum of a dynamic volume |
590 | * @vol. Returns zero in case of success and a negative error code in case | 590 | * @vol. Returns zero in case of success and a negative error code in case |
591 | * of failure. In case of error, it is possible that something was still | 591 | * of failure. In case of error, it is possible that something was still |
592 | * written to the flash media, but may be some garbage. | 592 | * written to the flash media, but may be some garbage. |
593 | */ | 593 | */ |
594 | int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, | 594 | int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, |
595 | const void *buf, int offset, int len) | 595 | const void *buf, int offset, int len) |
596 | { | 596 | { |
597 | int err, pnum, tries = 0, vol_id = vol->vol_id; | 597 | int err, pnum, tries = 0, vol_id = vol->vol_id; |
598 | struct ubi_vid_hdr *vid_hdr; | 598 | struct ubi_vid_hdr *vid_hdr; |
599 | 599 | ||
600 | if (ubi->ro_mode) | 600 | if (ubi->ro_mode) |
601 | return -EROFS; | 601 | return -EROFS; |
602 | 602 | ||
603 | err = leb_write_lock(ubi, vol_id, lnum); | 603 | err = leb_write_lock(ubi, vol_id, lnum); |
604 | if (err) | 604 | if (err) |
605 | return err; | 605 | return err; |
606 | 606 | ||
607 | pnum = vol->eba_tbl[lnum]; | 607 | pnum = vol->eba_tbl[lnum]; |
608 | if (pnum >= 0) { | 608 | if (pnum >= 0) { |
609 | dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d", | 609 | dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d", |
610 | len, offset, vol_id, lnum, pnum); | 610 | len, offset, vol_id, lnum, pnum); |
611 | 611 | ||
612 | err = ubi_io_write_data(ubi, buf, pnum, offset, len); | 612 | err = ubi_io_write_data(ubi, buf, pnum, offset, len); |
613 | if (err) { | 613 | if (err) { |
614 | ubi_warn("failed to write data to PEB %d", pnum); | 614 | ubi_warn("failed to write data to PEB %d", pnum); |
615 | if (err == -EIO && ubi->bad_allowed) | 615 | if (err == -EIO && ubi->bad_allowed) |
616 | err = recover_peb(ubi, pnum, vol_id, lnum, buf, | 616 | err = recover_peb(ubi, pnum, vol_id, lnum, buf, |
617 | offset, len); | 617 | offset, len); |
618 | if (err) | 618 | if (err) |
619 | ubi_ro_mode(ubi); | 619 | ubi_ro_mode(ubi); |
620 | } | 620 | } |
621 | leb_write_unlock(ubi, vol_id, lnum); | 621 | leb_write_unlock(ubi, vol_id, lnum); |
622 | return err; | 622 | return err; |
623 | } | 623 | } |
624 | 624 | ||
625 | /* | 625 | /* |
626 | * The logical eraseblock is not mapped. We have to get a free physical | 626 | * The logical eraseblock is not mapped. We have to get a free physical |
627 | * eraseblock and write the volume identifier header there first. | 627 | * eraseblock and write the volume identifier header there first. |
628 | */ | 628 | */ |
629 | vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); | 629 | vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); |
630 | if (!vid_hdr) { | 630 | if (!vid_hdr) { |
631 | leb_write_unlock(ubi, vol_id, lnum); | 631 | leb_write_unlock(ubi, vol_id, lnum); |
632 | return -ENOMEM; | 632 | return -ENOMEM; |
633 | } | 633 | } |
634 | 634 | ||
635 | vid_hdr->vol_type = UBI_VID_DYNAMIC; | 635 | vid_hdr->vol_type = UBI_VID_DYNAMIC; |
636 | vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); | 636 | vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); |
637 | vid_hdr->vol_id = cpu_to_be32(vol_id); | 637 | vid_hdr->vol_id = cpu_to_be32(vol_id); |
638 | vid_hdr->lnum = cpu_to_be32(lnum); | 638 | vid_hdr->lnum = cpu_to_be32(lnum); |
639 | vid_hdr->compat = ubi_get_compat(ubi, vol_id); | 639 | vid_hdr->compat = ubi_get_compat(ubi, vol_id); |
640 | vid_hdr->data_pad = cpu_to_be32(vol->data_pad); | 640 | vid_hdr->data_pad = cpu_to_be32(vol->data_pad); |
641 | 641 | ||
642 | retry: | 642 | retry: |
643 | pnum = ubi_wl_get_peb(ubi); | 643 | pnum = ubi_wl_get_peb(ubi); |
644 | if (pnum < 0) { | 644 | if (pnum < 0) { |
645 | ubi_free_vid_hdr(ubi, vid_hdr); | 645 | ubi_free_vid_hdr(ubi, vid_hdr); |
646 | leb_write_unlock(ubi, vol_id, lnum); | 646 | leb_write_unlock(ubi, vol_id, lnum); |
647 | return pnum; | 647 | return pnum; |
648 | } | 648 | } |
649 | 649 | ||
650 | dbg_eba("write VID hdr and %d bytes at offset %d of LEB %d:%d, PEB %d", | 650 | dbg_eba("write VID hdr and %d bytes at offset %d of LEB %d:%d, PEB %d", |
651 | len, offset, vol_id, lnum, pnum); | 651 | len, offset, vol_id, lnum, pnum); |
652 | 652 | ||
653 | err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr); | 653 | err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr); |
654 | if (err) { | 654 | if (err) { |
655 | ubi_warn("failed to write VID header to LEB %d:%d, PEB %d", | 655 | ubi_warn("failed to write VID header to LEB %d:%d, PEB %d", |
656 | vol_id, lnum, pnum); | 656 | vol_id, lnum, pnum); |
657 | goto write_error; | 657 | goto write_error; |
658 | } | 658 | } |
659 | 659 | ||
660 | if (len) { | 660 | if (len) { |
661 | err = ubi_io_write_data(ubi, buf, pnum, offset, len); | 661 | err = ubi_io_write_data(ubi, buf, pnum, offset, len); |
662 | if (err) { | 662 | if (err) { |
663 | ubi_warn("failed to write %d bytes at offset %d of " | 663 | ubi_warn("failed to write %d bytes at offset %d of " |
664 | "LEB %d:%d, PEB %d", len, offset, vol_id, | 664 | "LEB %d:%d, PEB %d", len, offset, vol_id, |
665 | lnum, pnum); | 665 | lnum, pnum); |
666 | goto write_error; | 666 | goto write_error; |
667 | } | 667 | } |
668 | } | 668 | } |
669 | 669 | ||
670 | vol->eba_tbl[lnum] = pnum; | 670 | vol->eba_tbl[lnum] = pnum; |
671 | 671 | ||
672 | leb_write_unlock(ubi, vol_id, lnum); | 672 | leb_write_unlock(ubi, vol_id, lnum); |
673 | ubi_free_vid_hdr(ubi, vid_hdr); | 673 | ubi_free_vid_hdr(ubi, vid_hdr); |
674 | return 0; | 674 | return 0; |
675 | 675 | ||
676 | write_error: | 676 | write_error: |
677 | if (err != -EIO || !ubi->bad_allowed) { | 677 | if (err != -EIO || !ubi->bad_allowed) { |
678 | ubi_ro_mode(ubi); | 678 | ubi_ro_mode(ubi); |
679 | leb_write_unlock(ubi, vol_id, lnum); | 679 | leb_write_unlock(ubi, vol_id, lnum); |
680 | ubi_free_vid_hdr(ubi, vid_hdr); | 680 | ubi_free_vid_hdr(ubi, vid_hdr); |
681 | return err; | 681 | return err; |
682 | } | 682 | } |
683 | 683 | ||
684 | /* | 684 | /* |
685 | * Fortunately, this is the first write operation to this physical | 685 | * Fortunately, this is the first write operation to this physical |
686 | * eraseblock, so just put it and request a new one. We assume that if | 686 | * eraseblock, so just put it and request a new one. We assume that if |
687 | * this physical eraseblock went bad, the erase code will handle that. | 687 | * this physical eraseblock went bad, the erase code will handle that. |
688 | */ | 688 | */ |
689 | err = ubi_wl_put_peb(ubi, pnum, 1); | 689 | err = ubi_wl_put_peb(ubi, pnum, 1); |
690 | if (err || ++tries > UBI_IO_RETRIES) { | 690 | if (err || ++tries > UBI_IO_RETRIES) { |
691 | ubi_ro_mode(ubi); | 691 | ubi_ro_mode(ubi); |
692 | leb_write_unlock(ubi, vol_id, lnum); | 692 | leb_write_unlock(ubi, vol_id, lnum); |
693 | ubi_free_vid_hdr(ubi, vid_hdr); | 693 | ubi_free_vid_hdr(ubi, vid_hdr); |
694 | return err; | 694 | return err; |
695 | } | 695 | } |
696 | 696 | ||
697 | vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); | 697 | vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); |
698 | ubi_msg("try another PEB"); | 698 | ubi_msg("try another PEB"); |
699 | goto retry; | 699 | goto retry; |
700 | } | 700 | } |
701 | 701 | ||
702 | /** | 702 | /** |
703 | * ubi_eba_write_leb_st - write data to static volume. | 703 | * ubi_eba_write_leb_st - write data to static volume. |
704 | * @ubi: UBI device description object | 704 | * @ubi: UBI device description object |
705 | * @vol: volume description object | 705 | * @vol: volume description object |
706 | * @lnum: logical eraseblock number | 706 | * @lnum: logical eraseblock number |
707 | * @buf: data to write | 707 | * @buf: data to write |
708 | * @len: how many bytes to write | 708 | * @len: how many bytes to write |
709 | * @used_ebs: how many logical eraseblocks will this volume contain | 709 | * @used_ebs: how many logical eraseblocks will this volume contain |
710 | * | 710 | * |
711 | * This function writes data to logical eraseblock @lnum of static volume | 711 | * This function writes data to logical eraseblock @lnum of static volume |
712 | * @vol. The @used_ebs argument should contain total number of logical | 712 | * @vol. The @used_ebs argument should contain total number of logical |
713 | * eraseblock in this static volume. | 713 | * eraseblock in this static volume. |
714 | * | 714 | * |
715 | * When writing to the last logical eraseblock, the @len argument doesn't have | 715 | * When writing to the last logical eraseblock, the @len argument doesn't have |
716 | * to be aligned to the minimal I/O unit size. Instead, it has to be equivalent | 716 | * to be aligned to the minimal I/O unit size. Instead, it has to be equivalent |
717 | * to the real data size, although the @buf buffer has to contain the | 717 | * to the real data size, although the @buf buffer has to contain the |
718 | * alignment. In all other cases, @len has to be aligned. | 718 | * alignment. In all other cases, @len has to be aligned. |
719 | * | 719 | * |
720 | * It is prohibited to write more than once to logical eraseblocks of static | 720 | * It is prohibited to write more than once to logical eraseblocks of static |
721 | * volumes. This function returns zero in case of success and a negative error | 721 | * volumes. This function returns zero in case of success and a negative error |
722 | * code in case of failure. | 722 | * code in case of failure. |
723 | */ | 723 | */ |
724 | int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol, | 724 | int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol, |
725 | int lnum, const void *buf, int len, int used_ebs) | 725 | int lnum, const void *buf, int len, int used_ebs) |
726 | { | 726 | { |
727 | int err, pnum, tries = 0, data_size = len, vol_id = vol->vol_id; | 727 | int err, pnum, tries = 0, data_size = len, vol_id = vol->vol_id; |
728 | struct ubi_vid_hdr *vid_hdr; | 728 | struct ubi_vid_hdr *vid_hdr; |
729 | uint32_t crc; | 729 | uint32_t crc; |
730 | 730 | ||
731 | if (ubi->ro_mode) | 731 | if (ubi->ro_mode) |
732 | return -EROFS; | 732 | return -EROFS; |
733 | 733 | ||
734 | if (lnum == used_ebs - 1) | 734 | if (lnum == used_ebs - 1) |
735 | /* If this is the last LEB @len may be unaligned */ | 735 | /* If this is the last LEB @len may be unaligned */ |
736 | len = ALIGN(data_size, ubi->min_io_size); | 736 | len = ALIGN(data_size, ubi->min_io_size); |
737 | else | 737 | else |
738 | ubi_assert(!(len & (ubi->min_io_size - 1))); | 738 | ubi_assert(!(len & (ubi->min_io_size - 1))); |
739 | 739 | ||
740 | vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); | 740 | vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); |
741 | if (!vid_hdr) | 741 | if (!vid_hdr) |
742 | return -ENOMEM; | 742 | return -ENOMEM; |
743 | 743 | ||
744 | err = leb_write_lock(ubi, vol_id, lnum); | 744 | err = leb_write_lock(ubi, vol_id, lnum); |
745 | if (err) { | 745 | if (err) { |
746 | ubi_free_vid_hdr(ubi, vid_hdr); | 746 | ubi_free_vid_hdr(ubi, vid_hdr); |
747 | return err; | 747 | return err; |
748 | } | 748 | } |
749 | 749 | ||
750 | vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); | 750 | vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); |
751 | vid_hdr->vol_id = cpu_to_be32(vol_id); | 751 | vid_hdr->vol_id = cpu_to_be32(vol_id); |
752 | vid_hdr->lnum = cpu_to_be32(lnum); | 752 | vid_hdr->lnum = cpu_to_be32(lnum); |
753 | vid_hdr->compat = ubi_get_compat(ubi, vol_id); | 753 | vid_hdr->compat = ubi_get_compat(ubi, vol_id); |
754 | vid_hdr->data_pad = cpu_to_be32(vol->data_pad); | 754 | vid_hdr->data_pad = cpu_to_be32(vol->data_pad); |
755 | 755 | ||
756 | crc = crc32(UBI_CRC32_INIT, buf, data_size); | 756 | crc = crc32(UBI_CRC32_INIT, buf, data_size); |
757 | vid_hdr->vol_type = UBI_VID_STATIC; | 757 | vid_hdr->vol_type = UBI_VID_STATIC; |
758 | vid_hdr->data_size = cpu_to_be32(data_size); | 758 | vid_hdr->data_size = cpu_to_be32(data_size); |
759 | vid_hdr->used_ebs = cpu_to_be32(used_ebs); | 759 | vid_hdr->used_ebs = cpu_to_be32(used_ebs); |
760 | vid_hdr->data_crc = cpu_to_be32(crc); | 760 | vid_hdr->data_crc = cpu_to_be32(crc); |
761 | 761 | ||
762 | retry: | 762 | retry: |
763 | pnum = ubi_wl_get_peb(ubi); | 763 | pnum = ubi_wl_get_peb(ubi); |
764 | if (pnum < 0) { | 764 | if (pnum < 0) { |
765 | ubi_free_vid_hdr(ubi, vid_hdr); | 765 | ubi_free_vid_hdr(ubi, vid_hdr); |
766 | leb_write_unlock(ubi, vol_id, lnum); | 766 | leb_write_unlock(ubi, vol_id, lnum); |
767 | return pnum; | 767 | return pnum; |
768 | } | 768 | } |
769 | 769 | ||
770 | dbg_eba("write VID hdr and %d bytes at LEB %d:%d, PEB %d, used_ebs %d", | 770 | dbg_eba("write VID hdr and %d bytes at LEB %d:%d, PEB %d, used_ebs %d", |
771 | len, vol_id, lnum, pnum, used_ebs); | 771 | len, vol_id, lnum, pnum, used_ebs); |
772 | 772 | ||
773 | err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr); | 773 | err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr); |
774 | if (err) { | 774 | if (err) { |
775 | ubi_warn("failed to write VID header to LEB %d:%d, PEB %d", | 775 | ubi_warn("failed to write VID header to LEB %d:%d, PEB %d", |
776 | vol_id, lnum, pnum); | 776 | vol_id, lnum, pnum); |
777 | goto write_error; | 777 | goto write_error; |
778 | } | 778 | } |
779 | 779 | ||
780 | err = ubi_io_write_data(ubi, buf, pnum, 0, len); | 780 | err = ubi_io_write_data(ubi, buf, pnum, 0, len); |
781 | if (err) { | 781 | if (err) { |
782 | ubi_warn("failed to write %d bytes of data to PEB %d", | 782 | ubi_warn("failed to write %d bytes of data to PEB %d", |
783 | len, pnum); | 783 | len, pnum); |
784 | goto write_error; | 784 | goto write_error; |
785 | } | 785 | } |
786 | 786 | ||
787 | ubi_assert(vol->eba_tbl[lnum] < 0); | 787 | ubi_assert(vol->eba_tbl[lnum] < 0); |
788 | vol->eba_tbl[lnum] = pnum; | 788 | vol->eba_tbl[lnum] = pnum; |
789 | 789 | ||
790 | leb_write_unlock(ubi, vol_id, lnum); | 790 | leb_write_unlock(ubi, vol_id, lnum); |
791 | ubi_free_vid_hdr(ubi, vid_hdr); | 791 | ubi_free_vid_hdr(ubi, vid_hdr); |
792 | return 0; | 792 | return 0; |
793 | 793 | ||
794 | write_error: | 794 | write_error: |
795 | if (err != -EIO || !ubi->bad_allowed) { | 795 | if (err != -EIO || !ubi->bad_allowed) { |
796 | /* | 796 | /* |
797 | * This flash device does not admit of bad eraseblocks or | 797 | * This flash device does not admit of bad eraseblocks or |
798 | * something nasty and unexpected happened. Switch to read-only | 798 | * something nasty and unexpected happened. Switch to read-only |
799 | * mode just in case. | 799 | * mode just in case. |
800 | */ | 800 | */ |
801 | ubi_ro_mode(ubi); | 801 | ubi_ro_mode(ubi); |
802 | leb_write_unlock(ubi, vol_id, lnum); | 802 | leb_write_unlock(ubi, vol_id, lnum); |
803 | ubi_free_vid_hdr(ubi, vid_hdr); | 803 | ubi_free_vid_hdr(ubi, vid_hdr); |
804 | return err; | 804 | return err; |
805 | } | 805 | } |
806 | 806 | ||
807 | err = ubi_wl_put_peb(ubi, pnum, 1); | 807 | err = ubi_wl_put_peb(ubi, pnum, 1); |
808 | if (err || ++tries > UBI_IO_RETRIES) { | 808 | if (err || ++tries > UBI_IO_RETRIES) { |
809 | ubi_ro_mode(ubi); | 809 | ubi_ro_mode(ubi); |
810 | leb_write_unlock(ubi, vol_id, lnum); | 810 | leb_write_unlock(ubi, vol_id, lnum); |
811 | ubi_free_vid_hdr(ubi, vid_hdr); | 811 | ubi_free_vid_hdr(ubi, vid_hdr); |
812 | return err; | 812 | return err; |
813 | } | 813 | } |
814 | 814 | ||
815 | vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); | 815 | vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); |
816 | ubi_msg("try another PEB"); | 816 | ubi_msg("try another PEB"); |
817 | goto retry; | 817 | goto retry; |
818 | } | 818 | } |
819 | 819 | ||
820 | /* | 820 | /* |
821 | * ubi_eba_atomic_leb_change - change logical eraseblock atomically. | 821 | * ubi_eba_atomic_leb_change - change logical eraseblock atomically. |
822 | * @ubi: UBI device description object | 822 | * @ubi: UBI device description object |
823 | * @vol: volume description object | 823 | * @vol: volume description object |
824 | * @lnum: logical eraseblock number | 824 | * @lnum: logical eraseblock number |
825 | * @buf: data to write | 825 | * @buf: data to write |
826 | * @len: how many bytes to write | 826 | * @len: how many bytes to write |
827 | * | 827 | * |
828 | * This function changes the contents of a logical eraseblock atomically. @buf | 828 | * This function changes the contents of a logical eraseblock atomically. @buf |
829 | * has to contain new logical eraseblock data, and @len - the length of the | 829 | * has to contain new logical eraseblock data, and @len - the length of the |
830 | * data, which has to be aligned. This function guarantees that in case of an | 830 | * data, which has to be aligned. This function guarantees that in case of an |
831 | * unclean reboot the old contents is preserved. Returns zero in case of | 831 | * unclean reboot the old contents is preserved. Returns zero in case of |
832 | * success and a negative error code in case of failure. | 832 | * success and a negative error code in case of failure. |
833 | * | 833 | * |
834 | * UBI reserves one LEB for the "atomic LEB change" operation, so only one | 834 | * UBI reserves one LEB for the "atomic LEB change" operation, so only one |
835 | * LEB change may be done at a time. This is ensured by @ubi->alc_mutex. | 835 | * LEB change may be done at a time. This is ensured by @ubi->alc_mutex. |
836 | */ | 836 | */ |
837 | int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, | 837 | int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, |
838 | int lnum, const void *buf, int len) | 838 | int lnum, const void *buf, int len) |
839 | { | 839 | { |
840 | int err, pnum, tries = 0, vol_id = vol->vol_id; | 840 | int err, pnum, tries = 0, vol_id = vol->vol_id; |
841 | struct ubi_vid_hdr *vid_hdr; | 841 | struct ubi_vid_hdr *vid_hdr; |
842 | uint32_t crc; | 842 | uint32_t crc; |
843 | 843 | ||
844 | if (ubi->ro_mode) | 844 | if (ubi->ro_mode) |
845 | return -EROFS; | 845 | return -EROFS; |
846 | 846 | ||
847 | if (len == 0) { | 847 | if (len == 0) { |
848 | /* | 848 | /* |
849 | * Special case when data length is zero. In this case the LEB | 849 | * Special case when data length is zero. In this case the LEB |
850 | * has to be unmapped and mapped somewhere else. | 850 | * has to be unmapped and mapped somewhere else. |
851 | */ | 851 | */ |
852 | err = ubi_eba_unmap_leb(ubi, vol, lnum); | 852 | err = ubi_eba_unmap_leb(ubi, vol, lnum); |
853 | if (err) | 853 | if (err) |
854 | return err; | 854 | return err; |
855 | return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0); | 855 | return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0); |
856 | } | 856 | } |
857 | 857 | ||
858 | vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); | 858 | vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); |
859 | if (!vid_hdr) | 859 | if (!vid_hdr) |
860 | return -ENOMEM; | 860 | return -ENOMEM; |
861 | 861 | ||
862 | mutex_lock(&ubi->alc_mutex); | 862 | mutex_lock(&ubi->alc_mutex); |
863 | err = leb_write_lock(ubi, vol_id, lnum); | 863 | err = leb_write_lock(ubi, vol_id, lnum); |
864 | if (err) | 864 | if (err) |
865 | goto out_mutex; | 865 | goto out_mutex; |
866 | 866 | ||
867 | vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); | 867 | vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); |
868 | vid_hdr->vol_id = cpu_to_be32(vol_id); | 868 | vid_hdr->vol_id = cpu_to_be32(vol_id); |
869 | vid_hdr->lnum = cpu_to_be32(lnum); | 869 | vid_hdr->lnum = cpu_to_be32(lnum); |
870 | vid_hdr->compat = ubi_get_compat(ubi, vol_id); | 870 | vid_hdr->compat = ubi_get_compat(ubi, vol_id); |
871 | vid_hdr->data_pad = cpu_to_be32(vol->data_pad); | 871 | vid_hdr->data_pad = cpu_to_be32(vol->data_pad); |
872 | 872 | ||
873 | crc = crc32(UBI_CRC32_INIT, buf, len); | 873 | crc = crc32(UBI_CRC32_INIT, buf, len); |
874 | vid_hdr->vol_type = UBI_VID_DYNAMIC; | 874 | vid_hdr->vol_type = UBI_VID_DYNAMIC; |
875 | vid_hdr->data_size = cpu_to_be32(len); | 875 | vid_hdr->data_size = cpu_to_be32(len); |
876 | vid_hdr->copy_flag = 1; | 876 | vid_hdr->copy_flag = 1; |
877 | vid_hdr->data_crc = cpu_to_be32(crc); | 877 | vid_hdr->data_crc = cpu_to_be32(crc); |
878 | 878 | ||
879 | retry: | 879 | retry: |
880 | pnum = ubi_wl_get_peb(ubi); | 880 | pnum = ubi_wl_get_peb(ubi); |
881 | if (pnum < 0) { | 881 | if (pnum < 0) { |
882 | err = pnum; | 882 | err = pnum; |
883 | goto out_leb_unlock; | 883 | goto out_leb_unlock; |
884 | } | 884 | } |
885 | 885 | ||
886 | dbg_eba("change LEB %d:%d, PEB %d, write VID hdr to PEB %d", | 886 | dbg_eba("change LEB %d:%d, PEB %d, write VID hdr to PEB %d", |
887 | vol_id, lnum, vol->eba_tbl[lnum], pnum); | 887 | vol_id, lnum, vol->eba_tbl[lnum], pnum); |
888 | 888 | ||
889 | err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr); | 889 | err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr); |
890 | if (err) { | 890 | if (err) { |
891 | ubi_warn("failed to write VID header to LEB %d:%d, PEB %d", | 891 | ubi_warn("failed to write VID header to LEB %d:%d, PEB %d", |
892 | vol_id, lnum, pnum); | 892 | vol_id, lnum, pnum); |
893 | goto write_error; | 893 | goto write_error; |
894 | } | 894 | } |
895 | 895 | ||
896 | err = ubi_io_write_data(ubi, buf, pnum, 0, len); | 896 | err = ubi_io_write_data(ubi, buf, pnum, 0, len); |
897 | if (err) { | 897 | if (err) { |
898 | ubi_warn("failed to write %d bytes of data to PEB %d", | 898 | ubi_warn("failed to write %d bytes of data to PEB %d", |
899 | len, pnum); | 899 | len, pnum); |
900 | goto write_error; | 900 | goto write_error; |
901 | } | 901 | } |
902 | 902 | ||
903 | if (vol->eba_tbl[lnum] >= 0) { | 903 | if (vol->eba_tbl[lnum] >= 0) { |
904 | err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 0); | 904 | err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 0); |
905 | if (err) | 905 | if (err) |
906 | goto out_leb_unlock; | 906 | goto out_leb_unlock; |
907 | } | 907 | } |
908 | 908 | ||
909 | vol->eba_tbl[lnum] = pnum; | 909 | vol->eba_tbl[lnum] = pnum; |
910 | 910 | ||
911 | out_leb_unlock: | 911 | out_leb_unlock: |
912 | leb_write_unlock(ubi, vol_id, lnum); | 912 | leb_write_unlock(ubi, vol_id, lnum); |
913 | out_mutex: | 913 | out_mutex: |
914 | mutex_unlock(&ubi->alc_mutex); | 914 | mutex_unlock(&ubi->alc_mutex); |
915 | ubi_free_vid_hdr(ubi, vid_hdr); | 915 | ubi_free_vid_hdr(ubi, vid_hdr); |
916 | return err; | 916 | return err; |
917 | 917 | ||
918 | write_error: | 918 | write_error: |
919 | if (err != -EIO || !ubi->bad_allowed) { | 919 | if (err != -EIO || !ubi->bad_allowed) { |
920 | /* | 920 | /* |
921 | * This flash device does not admit of bad eraseblocks or | 921 | * This flash device does not admit of bad eraseblocks or |
922 | * something nasty and unexpected happened. Switch to read-only | 922 | * something nasty and unexpected happened. Switch to read-only |
923 | * mode just in case. | 923 | * mode just in case. |
924 | */ | 924 | */ |
925 | ubi_ro_mode(ubi); | 925 | ubi_ro_mode(ubi); |
926 | goto out_leb_unlock; | 926 | goto out_leb_unlock; |
927 | } | 927 | } |
928 | 928 | ||
929 | err = ubi_wl_put_peb(ubi, pnum, 1); | 929 | err = ubi_wl_put_peb(ubi, pnum, 1); |
930 | if (err || ++tries > UBI_IO_RETRIES) { | 930 | if (err || ++tries > UBI_IO_RETRIES) { |
931 | ubi_ro_mode(ubi); | 931 | ubi_ro_mode(ubi); |
932 | goto out_leb_unlock; | 932 | goto out_leb_unlock; |
933 | } | 933 | } |
934 | 934 | ||
935 | vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); | 935 | vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); |
936 | ubi_msg("try another PEB"); | 936 | ubi_msg("try another PEB"); |
937 | goto retry; | 937 | goto retry; |
938 | } | 938 | } |
939 | 939 | ||
940 | /** | 940 | /** |
941 | * is_error_sane - check whether a read error is sane. | 941 | * is_error_sane - check whether a read error is sane. |
942 | * @err: code of the error happened during reading | 942 | * @err: code of the error happened during reading |
943 | * | 943 | * |
944 | * This is a helper function for 'ubi_eba_copy_leb()' which is called when we | 944 | * This is a helper function for 'ubi_eba_copy_leb()' which is called when we |
945 | * cannot read data from the target PEB (an error @err happened). If the error | 945 | * cannot read data from the target PEB (an error @err happened). If the error |
946 | * code is sane, then we treat this error as non-fatal. Otherwise the error is | 946 | * code is sane, then we treat this error as non-fatal. Otherwise the error is |
947 | * fatal and UBI will be switched to R/O mode later. | 947 | * fatal and UBI will be switched to R/O mode later. |
948 | * | 948 | * |
949 | * The idea is that we try not to switch to R/O mode if the read error is | 949 | * The idea is that we try not to switch to R/O mode if the read error is |
950 | * something which suggests there was a real read problem. E.g., %-EIO. Or a | 950 | * something which suggests there was a real read problem. E.g., %-EIO. Or a |
951 | * memory allocation failed (-%ENOMEM). Otherwise, it is safer to switch to R/O | 951 | * memory allocation failed (-%ENOMEM). Otherwise, it is safer to switch to R/O |
952 | * mode, simply because we do not know what happened at the MTD level, and we | 952 | * mode, simply because we do not know what happened at the MTD level, and we |
953 | * cannot handle this. E.g., the underlying driver may have become crazy, and | 953 | * cannot handle this. E.g., the underlying driver may have become crazy, and |
954 | * it is safer to switch to R/O mode to preserve the data. | 954 | * it is safer to switch to R/O mode to preserve the data. |
955 | * | 955 | * |
956 | * And bear in mind, this is about reading from the target PEB, i.e. the PEB | 956 | * And bear in mind, this is about reading from the target PEB, i.e. the PEB |
957 | * which we have just written. | 957 | * which we have just written. |
958 | */ | 958 | */ |
959 | static int is_error_sane(int err) | 959 | static int is_error_sane(int err) |
960 | { | 960 | { |
961 | if (err == -EIO || err == -ENOMEM || err == UBI_IO_BAD_HDR || | 961 | if (err == -EIO || err == -ENOMEM || err == UBI_IO_BAD_HDR || |
962 | err == UBI_IO_BAD_HDR_EBADMSG || err == -ETIMEDOUT) | 962 | err == UBI_IO_BAD_HDR_EBADMSG || err == -ETIMEDOUT) |
963 | return 0; | 963 | return 0; |
964 | return 1; | 964 | return 1; |
965 | } | 965 | } |
966 | 966 | ||
967 | /** | 967 | /** |
968 | * ubi_eba_copy_leb - copy logical eraseblock. | 968 | * ubi_eba_copy_leb - copy logical eraseblock. |
969 | * @ubi: UBI device description object | 969 | * @ubi: UBI device description object |
970 | * @from: physical eraseblock number from where to copy | 970 | * @from: physical eraseblock number from where to copy |
971 | * @to: physical eraseblock number where to copy | 971 | * @to: physical eraseblock number where to copy |
972 | * @vid_hdr: VID header of the @from physical eraseblock | 972 | * @vid_hdr: VID header of the @from physical eraseblock |
973 | * | 973 | * |
974 | * This function copies logical eraseblock from physical eraseblock @from to | 974 | * This function copies logical eraseblock from physical eraseblock @from to |
975 | * physical eraseblock @to. The @vid_hdr buffer may be changed by this | 975 | * physical eraseblock @to. The @vid_hdr buffer may be changed by this |
976 | * function. Returns: | 976 | * function. Returns: |
977 | * o %0 in case of success; | 977 | * o %0 in case of success; |
978 | * o %MOVE_CANCEL_RACE, %MOVE_TARGET_WR_ERR, %MOVE_TARGET_BITFLIPS, etc; | 978 | * o %MOVE_CANCEL_RACE, %MOVE_TARGET_WR_ERR, %MOVE_TARGET_BITFLIPS, etc; |
979 | * o a negative error code in case of failure. | 979 | * o a negative error code in case of failure. |
980 | */ | 980 | */ |
981 | int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, | 981 | int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, |
982 | struct ubi_vid_hdr *vid_hdr) | 982 | struct ubi_vid_hdr *vid_hdr) |
983 | { | 983 | { |
984 | int err, vol_id, lnum, data_size, aldata_size, idx; | 984 | int err, vol_id, lnum, data_size, aldata_size, idx; |
985 | struct ubi_volume *vol; | 985 | struct ubi_volume *vol; |
986 | uint32_t crc; | 986 | uint32_t crc; |
987 | 987 | ||
988 | vol_id = be32_to_cpu(vid_hdr->vol_id); | 988 | vol_id = be32_to_cpu(vid_hdr->vol_id); |
989 | lnum = be32_to_cpu(vid_hdr->lnum); | 989 | lnum = be32_to_cpu(vid_hdr->lnum); |
990 | 990 | ||
991 | dbg_wl("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to); | 991 | dbg_wl("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to); |
992 | 992 | ||
993 | if (vid_hdr->vol_type == UBI_VID_STATIC) { | 993 | if (vid_hdr->vol_type == UBI_VID_STATIC) { |
994 | data_size = be32_to_cpu(vid_hdr->data_size); | 994 | data_size = be32_to_cpu(vid_hdr->data_size); |
995 | aldata_size = ALIGN(data_size, ubi->min_io_size); | 995 | aldata_size = ALIGN(data_size, ubi->min_io_size); |
996 | } else | 996 | } else |
997 | data_size = aldata_size = | 997 | data_size = aldata_size = |
998 | ubi->leb_size - be32_to_cpu(vid_hdr->data_pad); | 998 | ubi->leb_size - be32_to_cpu(vid_hdr->data_pad); |
999 | 999 | ||
1000 | idx = vol_id2idx(ubi, vol_id); | 1000 | idx = vol_id2idx(ubi, vol_id); |
1001 | spin_lock(&ubi->volumes_lock); | 1001 | spin_lock(&ubi->volumes_lock); |
1002 | /* | 1002 | /* |
1003 | * Note, we may race with volume deletion, which means that the volume | 1003 | * Note, we may race with volume deletion, which means that the volume |
1004 | * this logical eraseblock belongs to might be being deleted. Since the | 1004 | * this logical eraseblock belongs to might be being deleted. Since the |
1005 | * volume deletion un-maps all the volume's logical eraseblocks, it will | 1005 | * volume deletion un-maps all the volume's logical eraseblocks, it will |
1006 | * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish. | 1006 | * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish. |
1007 | */ | 1007 | */ |
1008 | vol = ubi->volumes[idx]; | 1008 | vol = ubi->volumes[idx]; |
1009 | spin_unlock(&ubi->volumes_lock); | 1009 | spin_unlock(&ubi->volumes_lock); |
1010 | if (!vol) { | 1010 | if (!vol) { |
1011 | /* No need to do further work, cancel */ | 1011 | /* No need to do further work, cancel */ |
1012 | dbg_wl("volume %d is being removed, cancel", vol_id); | 1012 | dbg_wl("volume %d is being removed, cancel", vol_id); |
1013 | return MOVE_CANCEL_RACE; | 1013 | return MOVE_CANCEL_RACE; |
1014 | } | 1014 | } |
1015 | 1015 | ||
1016 | /* | 1016 | /* |
1017 | * We do not want anybody to write to this logical eraseblock while we | 1017 | * We do not want anybody to write to this logical eraseblock while we |
1018 | * are moving it, so lock it. | 1018 | * are moving it, so lock it. |
1019 | * | 1019 | * |
1020 | * Note, we are using non-waiting locking here, because we cannot sleep | 1020 | * Note, we are using non-waiting locking here, because we cannot sleep |
1021 | * on the LEB, since it may cause deadlocks. Indeed, imagine a task is | 1021 | * on the LEB, since it may cause deadlocks. Indeed, imagine a task is |
1022 | * unmapping the LEB which is mapped to the PEB we are going to move | 1022 | * unmapping the LEB which is mapped to the PEB we are going to move |
1023 | * (@from). This task locks the LEB and goes sleep in the | 1023 | * (@from). This task locks the LEB and goes sleep in the |
1024 | * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are | 1024 | * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are |
1025 | * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the | 1025 | * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the |
1026 | * LEB is already locked, we just do not move it and return | 1026 | * LEB is already locked, we just do not move it and return |
1027 | * %MOVE_RETRY. Note, we do not return %MOVE_CANCEL_RACE here because | 1027 | * %MOVE_RETRY. Note, we do not return %MOVE_CANCEL_RACE here because |
1028 | * we do not know the reasons of the contention - it may be just a | 1028 | * we do not know the reasons of the contention - it may be just a |
1029 | * normal I/O on this LEB, so we want to re-try. | 1029 | * normal I/O on this LEB, so we want to re-try. |
1030 | */ | 1030 | */ |
1031 | err = leb_write_trylock(ubi, vol_id, lnum); | 1031 | err = leb_write_trylock(ubi, vol_id, lnum); |
1032 | if (err) { | 1032 | if (err) { |
1033 | dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum); | 1033 | dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum); |
1034 | return MOVE_RETRY; | 1034 | return MOVE_RETRY; |
1035 | } | 1035 | } |
1036 | 1036 | ||
1037 | /* | 1037 | /* |
1038 | * The LEB might have been put meanwhile, and the task which put it is | 1038 | * The LEB might have been put meanwhile, and the task which put it is |
1039 | * probably waiting on @ubi->move_mutex. No need to continue the work, | 1039 | * probably waiting on @ubi->move_mutex. No need to continue the work, |
1040 | * cancel it. | 1040 | * cancel it. |
1041 | */ | 1041 | */ |
1042 | if (vol->eba_tbl[lnum] != from) { | 1042 | if (vol->eba_tbl[lnum] != from) { |
1043 | dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to " | 1043 | dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to " |
1044 | "PEB %d, cancel", vol_id, lnum, from, | 1044 | "PEB %d, cancel", vol_id, lnum, from, |
1045 | vol->eba_tbl[lnum]); | 1045 | vol->eba_tbl[lnum]); |
1046 | err = MOVE_CANCEL_RACE; | 1046 | err = MOVE_CANCEL_RACE; |
1047 | goto out_unlock_leb; | 1047 | goto out_unlock_leb; |
1048 | } | 1048 | } |
1049 | 1049 | ||
1050 | /* | 1050 | /* |
1051 | * OK, now the LEB is locked and we can safely start moving it. Since | 1051 | * OK, now the LEB is locked and we can safely start moving it. Since |
1052 | * this function utilizes the @ubi->peb_buf buffer which is shared | 1052 | * this function utilizes the @ubi->peb_buf buffer which is shared |
1053 | * with some other functions - we lock the buffer by taking the | 1053 | * with some other functions - we lock the buffer by taking the |
1054 | * @ubi->buf_mutex. | 1054 | * @ubi->buf_mutex. |
1055 | */ | 1055 | */ |
1056 | mutex_lock(&ubi->buf_mutex); | 1056 | mutex_lock(&ubi->buf_mutex); |
1057 | dbg_wl("read %d bytes of data", aldata_size); | 1057 | dbg_wl("read %d bytes of data", aldata_size); |
1058 | err = ubi_io_read_data(ubi, ubi->peb_buf, from, 0, aldata_size); | 1058 | err = ubi_io_read_data(ubi, ubi->peb_buf, from, 0, aldata_size); |
1059 | if (err && err != UBI_IO_BITFLIPS) { | 1059 | if (err && err != UBI_IO_BITFLIPS) { |
1060 | ubi_warn("error %d while reading data from PEB %d", | 1060 | ubi_warn("error %d while reading data from PEB %d", |
1061 | err, from); | 1061 | err, from); |
1062 | err = MOVE_SOURCE_RD_ERR; | 1062 | err = MOVE_SOURCE_RD_ERR; |
1063 | goto out_unlock_buf; | 1063 | goto out_unlock_buf; |
1064 | } | 1064 | } |
1065 | 1065 | ||
1066 | /* | 1066 | /* |
1067 | * Now we have got to calculate how much data we have to copy. In | 1067 | * Now we have got to calculate how much data we have to copy. In |
1068 | * case of a static volume it is fairly easy - the VID header contains | 1068 | * case of a static volume it is fairly easy - the VID header contains |
1069 | * the data size. In case of a dynamic volume it is more difficult - we | 1069 | * the data size. In case of a dynamic volume it is more difficult - we |
1070 | * have to read the contents, cut 0xFF bytes from the end and copy only | 1070 | * have to read the contents, cut 0xFF bytes from the end and copy only |
1071 | * the first part. We must do this to avoid writing 0xFF bytes as it | 1071 | * the first part. We must do this to avoid writing 0xFF bytes as it |
1072 | * may have some side-effects. And not only this. It is important not | 1072 | * may have some side-effects. And not only this. It is important not |
1073 | * to include those 0xFFs to CRC because later the they may be filled | 1073 | * to include those 0xFFs to CRC because later the they may be filled |
1074 | * by data. | 1074 | * by data. |
1075 | */ | 1075 | */ |
1076 | if (vid_hdr->vol_type == UBI_VID_DYNAMIC) | 1076 | if (vid_hdr->vol_type == UBI_VID_DYNAMIC) |
1077 | aldata_size = data_size = | 1077 | aldata_size = data_size = |
1078 | ubi_calc_data_len(ubi, ubi->peb_buf, data_size); | 1078 | ubi_calc_data_len(ubi, ubi->peb_buf, data_size); |
1079 | 1079 | ||
1080 | cond_resched(); | 1080 | cond_resched(); |
1081 | crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size); | 1081 | crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size); |
1082 | cond_resched(); | 1082 | cond_resched(); |
1083 | 1083 | ||
1084 | /* | 1084 | /* |
1085 | * It may turn out to be that the whole @from physical eraseblock | 1085 | * It may turn out to be that the whole @from physical eraseblock |
1086 | * contains only 0xFF bytes. Then we have to only write the VID header | 1086 | * contains only 0xFF bytes. Then we have to only write the VID header |
1087 | * and do not write any data. This also means we should not set | 1087 | * and do not write any data. This also means we should not set |
1088 | * @vid_hdr->copy_flag, @vid_hdr->data_size, and @vid_hdr->data_crc. | 1088 | * @vid_hdr->copy_flag, @vid_hdr->data_size, and @vid_hdr->data_crc. |
1089 | */ | 1089 | */ |
1090 | if (data_size > 0) { | 1090 | if (data_size > 0) { |
1091 | vid_hdr->copy_flag = 1; | 1091 | vid_hdr->copy_flag = 1; |
1092 | vid_hdr->data_size = cpu_to_be32(data_size); | 1092 | vid_hdr->data_size = cpu_to_be32(data_size); |
1093 | vid_hdr->data_crc = cpu_to_be32(crc); | 1093 | vid_hdr->data_crc = cpu_to_be32(crc); |
1094 | } | 1094 | } |
1095 | vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); | 1095 | vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); |
1096 | 1096 | ||
1097 | err = ubi_io_write_vid_hdr(ubi, to, vid_hdr); | 1097 | err = ubi_io_write_vid_hdr(ubi, to, vid_hdr); |
1098 | if (err) { | 1098 | if (err) { |
1099 | if (err == -EIO) | 1099 | if (err == -EIO) |
1100 | err = MOVE_TARGET_WR_ERR; | 1100 | err = MOVE_TARGET_WR_ERR; |
1101 | goto out_unlock_buf; | 1101 | goto out_unlock_buf; |
1102 | } | 1102 | } |
1103 | 1103 | ||
1104 | cond_resched(); | 1104 | cond_resched(); |
1105 | 1105 | ||
1106 | /* Read the VID header back and check if it was written correctly */ | 1106 | /* Read the VID header back and check if it was written correctly */ |
1107 | err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1); | 1107 | err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1); |
1108 | if (err) { | 1108 | if (err) { |
1109 | if (err != UBI_IO_BITFLIPS) { | 1109 | if (err != UBI_IO_BITFLIPS) { |
1110 | ubi_warn("error %d while reading VID header back from " | 1110 | ubi_warn("error %d while reading VID header back from " |
1111 | "PEB %d", err, to); | 1111 | "PEB %d", err, to); |
1112 | if (is_error_sane(err)) | 1112 | if (is_error_sane(err)) |
1113 | err = MOVE_TARGET_RD_ERR; | 1113 | err = MOVE_TARGET_RD_ERR; |
1114 | } else | 1114 | } else |
1115 | err = MOVE_TARGET_BITFLIPS; | 1115 | err = MOVE_TARGET_BITFLIPS; |
1116 | goto out_unlock_buf; | 1116 | goto out_unlock_buf; |
1117 | } | 1117 | } |
1118 | 1118 | ||
1119 | if (data_size > 0) { | 1119 | if (data_size > 0) { |
1120 | err = ubi_io_write_data(ubi, ubi->peb_buf, to, 0, aldata_size); | 1120 | err = ubi_io_write_data(ubi, ubi->peb_buf, to, 0, aldata_size); |
1121 | if (err) { | 1121 | if (err) { |
1122 | if (err == -EIO) | 1122 | if (err == -EIO) |
1123 | err = MOVE_TARGET_WR_ERR; | 1123 | err = MOVE_TARGET_WR_ERR; |
1124 | goto out_unlock_buf; | 1124 | goto out_unlock_buf; |
1125 | } | 1125 | } |
1126 | 1126 | ||
1127 | cond_resched(); | 1127 | cond_resched(); |
1128 | 1128 | ||
1129 | /* | 1129 | /* |
1130 | * We've written the data and are going to read it back to make | 1130 | * We've written the data and are going to read it back to make |
1131 | * sure it was written correctly. | 1131 | * sure it was written correctly. |
1132 | */ | 1132 | */ |
1133 | memset(ubi->peb_buf, 0xFF, aldata_size); | 1133 | memset(ubi->peb_buf, 0xFF, aldata_size); |
1134 | err = ubi_io_read_data(ubi, ubi->peb_buf, to, 0, aldata_size); | 1134 | err = ubi_io_read_data(ubi, ubi->peb_buf, to, 0, aldata_size); |
1135 | if (err) { | 1135 | if (err) { |
1136 | if (err != UBI_IO_BITFLIPS) { | 1136 | if (err != UBI_IO_BITFLIPS) { |
1137 | ubi_warn("error %d while reading data back " | 1137 | ubi_warn("error %d while reading data back " |
1138 | "from PEB %d", err, to); | 1138 | "from PEB %d", err, to); |
1139 | if (is_error_sane(err)) | 1139 | if (is_error_sane(err)) |
1140 | err = MOVE_TARGET_RD_ERR; | 1140 | err = MOVE_TARGET_RD_ERR; |
1141 | } else | 1141 | } else |
1142 | err = MOVE_TARGET_BITFLIPS; | 1142 | err = MOVE_TARGET_BITFLIPS; |
1143 | goto out_unlock_buf; | 1143 | goto out_unlock_buf; |
1144 | } | 1144 | } |
1145 | 1145 | ||
1146 | cond_resched(); | 1146 | cond_resched(); |
1147 | 1147 | ||
1148 | if (crc != crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size)) { | 1148 | if (crc != crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size)) { |
1149 | ubi_warn("read data back from PEB %d and it is " | 1149 | ubi_warn("read data back from PEB %d and it is " |
1150 | "different", to); | 1150 | "different", to); |
1151 | err = -EINVAL; | 1151 | err = -EINVAL; |
1152 | goto out_unlock_buf; | 1152 | goto out_unlock_buf; |
1153 | } | 1153 | } |
1154 | } | 1154 | } |
1155 | 1155 | ||
1156 | ubi_assert(vol->eba_tbl[lnum] == from); | 1156 | ubi_assert(vol->eba_tbl[lnum] == from); |
1157 | vol->eba_tbl[lnum] = to; | 1157 | vol->eba_tbl[lnum] = to; |
1158 | 1158 | ||
1159 | out_unlock_buf: | 1159 | out_unlock_buf: |
1160 | mutex_unlock(&ubi->buf_mutex); | 1160 | mutex_unlock(&ubi->buf_mutex); |
1161 | out_unlock_leb: | 1161 | out_unlock_leb: |
1162 | leb_write_unlock(ubi, vol_id, lnum); | 1162 | leb_write_unlock(ubi, vol_id, lnum); |
1163 | return err; | 1163 | return err; |
1164 | } | 1164 | } |
1165 | 1165 | ||
1166 | /** | 1166 | /** |
1167 | * print_rsvd_warning - warn about not having enough reserved PEBs. | 1167 | * print_rsvd_warning - warn about not having enough reserved PEBs. |
1168 | * @ubi: UBI device description object | 1168 | * @ubi: UBI device description object |
1169 | * | 1169 | * |
1170 | * This is a helper function for 'ubi_eba_init_scan()' which is called when UBI | 1170 | * This is a helper function for 'ubi_eba_init()' which is called when UBI |
1171 | * cannot reserve enough PEBs for bad block handling. This function makes a | 1171 | * cannot reserve enough PEBs for bad block handling. This function makes a |
1172 | * decision whether we have to print a warning or not. The algorithm is as | 1172 | * decision whether we have to print a warning or not. The algorithm is as |
1173 | * follows: | 1173 | * follows: |
1174 | * o if this is a new UBI image, then just print the warning | 1174 | * o if this is a new UBI image, then just print the warning |
1175 | * o if this is an UBI image which has already been used for some time, print | 1175 | * o if this is an UBI image which has already been used for some time, print |
1176 | * a warning only if we can reserve less than 10% of the expected amount of | 1176 | * a warning only if we can reserve less than 10% of the expected amount of |
1177 | * the reserved PEB. | 1177 | * the reserved PEB. |
1178 | * | 1178 | * |
1179 | * The idea is that when UBI is used, PEBs become bad, and the reserved pool | 1179 | * The idea is that when UBI is used, PEBs become bad, and the reserved pool |
1180 | * of PEBs becomes smaller, which is normal and we do not want to scare users | 1180 | * of PEBs becomes smaller, which is normal and we do not want to scare users |
1181 | * with a warning every time they attach the MTD device. This was an issue | 1181 | * with a warning every time they attach the MTD device. This was an issue |
1182 | * reported by real users. | 1182 | * reported by real users. |
1183 | */ | 1183 | */ |
1184 | static void print_rsvd_warning(struct ubi_device *ubi, | 1184 | static void print_rsvd_warning(struct ubi_device *ubi, |
1185 | struct ubi_attach_info *ai) | 1185 | struct ubi_attach_info *ai) |
1186 | { | 1186 | { |
1187 | /* | 1187 | /* |
1188 | * The 1 << 18 (256KiB) number is picked randomly, just a reasonably | 1188 | * The 1 << 18 (256KiB) number is picked randomly, just a reasonably |
1189 | * large number to distinguish between newly flashed and used images. | 1189 | * large number to distinguish between newly flashed and used images. |
1190 | */ | 1190 | */ |
1191 | if (ai->max_sqnum > (1 << 18)) { | 1191 | if (ai->max_sqnum > (1 << 18)) { |
1192 | int min = ubi->beb_rsvd_level / 10; | 1192 | int min = ubi->beb_rsvd_level / 10; |
1193 | 1193 | ||
1194 | if (!min) | 1194 | if (!min) |
1195 | min = 1; | 1195 | min = 1; |
1196 | if (ubi->beb_rsvd_pebs > min) | 1196 | if (ubi->beb_rsvd_pebs > min) |
1197 | return; | 1197 | return; |
1198 | } | 1198 | } |
1199 | 1199 | ||
1200 | ubi_warn("cannot reserve enough PEBs for bad PEB handling, reserved %d," | 1200 | ubi_warn("cannot reserve enough PEBs for bad PEB handling, reserved %d," |
1201 | " need %d", ubi->beb_rsvd_pebs, ubi->beb_rsvd_level); | 1201 | " need %d", ubi->beb_rsvd_pebs, ubi->beb_rsvd_level); |
1202 | if (ubi->corr_peb_count) | 1202 | if (ubi->corr_peb_count) |
1203 | ubi_warn("%d PEBs are corrupted and not used", | 1203 | ubi_warn("%d PEBs are corrupted and not used", |
1204 | ubi->corr_peb_count); | 1204 | ubi->corr_peb_count); |
1205 | } | 1205 | } |
1206 | 1206 | ||
1207 | /** | 1207 | /** |
1208 | * ubi_eba_init_scan - initialize the EBA sub-system using attaching information. | 1208 | * ubi_eba_init - initialize the EBA sub-system using attaching information. |
1209 | * @ubi: UBI device description object | 1209 | * @ubi: UBI device description object |
1210 | * @ai: attaching information | 1210 | * @ai: attaching information |
1211 | * | 1211 | * |
1212 | * This function returns zero in case of success and a negative error code in | 1212 | * This function returns zero in case of success and a negative error code in |
1213 | * case of failure. | 1213 | * case of failure. |
1214 | */ | 1214 | */ |
1215 | int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_attach_info *ai) | 1215 | int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai) |
1216 | { | 1216 | { |
1217 | int i, j, err, num_volumes; | 1217 | int i, j, err, num_volumes; |
1218 | struct ubi_ainf_volume *av; | 1218 | struct ubi_ainf_volume *av; |
1219 | struct ubi_volume *vol; | 1219 | struct ubi_volume *vol; |
1220 | struct ubi_ainf_peb *aeb; | 1220 | struct ubi_ainf_peb *aeb; |
1221 | struct rb_node *rb; | 1221 | struct rb_node *rb; |
1222 | 1222 | ||
1223 | dbg_eba("initialize EBA sub-system"); | 1223 | dbg_eba("initialize EBA sub-system"); |
1224 | 1224 | ||
1225 | spin_lock_init(&ubi->ltree_lock); | 1225 | spin_lock_init(&ubi->ltree_lock); |
1226 | mutex_init(&ubi->alc_mutex); | 1226 | mutex_init(&ubi->alc_mutex); |
1227 | ubi->ltree = RB_ROOT; | 1227 | ubi->ltree = RB_ROOT; |
1228 | 1228 | ||
1229 | ubi->global_sqnum = ai->max_sqnum + 1; | 1229 | ubi->global_sqnum = ai->max_sqnum + 1; |
1230 | num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT; | 1230 | num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT; |
1231 | 1231 | ||
1232 | for (i = 0; i < num_volumes; i++) { | 1232 | for (i = 0; i < num_volumes; i++) { |
1233 | vol = ubi->volumes[i]; | 1233 | vol = ubi->volumes[i]; |
1234 | if (!vol) | 1234 | if (!vol) |
1235 | continue; | 1235 | continue; |
1236 | 1236 | ||
1237 | cond_resched(); | 1237 | cond_resched(); |
1238 | 1238 | ||
1239 | vol->eba_tbl = kmalloc(vol->reserved_pebs * sizeof(int), | 1239 | vol->eba_tbl = kmalloc(vol->reserved_pebs * sizeof(int), |
1240 | GFP_KERNEL); | 1240 | GFP_KERNEL); |
1241 | if (!vol->eba_tbl) { | 1241 | if (!vol->eba_tbl) { |
1242 | err = -ENOMEM; | 1242 | err = -ENOMEM; |
1243 | goto out_free; | 1243 | goto out_free; |
1244 | } | 1244 | } |
1245 | 1245 | ||
1246 | for (j = 0; j < vol->reserved_pebs; j++) | 1246 | for (j = 0; j < vol->reserved_pebs; j++) |
1247 | vol->eba_tbl[j] = UBI_LEB_UNMAPPED; | 1247 | vol->eba_tbl[j] = UBI_LEB_UNMAPPED; |
1248 | 1248 | ||
1249 | av = ubi_find_av(ai, idx2vol_id(ubi, i)); | 1249 | av = ubi_find_av(ai, idx2vol_id(ubi, i)); |
1250 | if (!av) | 1250 | if (!av) |
1251 | continue; | 1251 | continue; |
1252 | 1252 | ||
1253 | ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) { | 1253 | ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) { |
1254 | if (aeb->lnum >= vol->reserved_pebs) | 1254 | if (aeb->lnum >= vol->reserved_pebs) |
1255 | /* | 1255 | /* |
1256 | * This may happen in case of an unclean reboot | 1256 | * This may happen in case of an unclean reboot |
1257 | * during re-size. | 1257 | * during re-size. |
1258 | */ | 1258 | */ |
1259 | ubi_move_aeb_to_list(av, aeb, &ai->erase); | 1259 | ubi_move_aeb_to_list(av, aeb, &ai->erase); |
1260 | vol->eba_tbl[aeb->lnum] = aeb->pnum; | 1260 | vol->eba_tbl[aeb->lnum] = aeb->pnum; |
1261 | } | 1261 | } |
1262 | } | 1262 | } |
1263 | 1263 | ||
1264 | if (ubi->avail_pebs < EBA_RESERVED_PEBS) { | 1264 | if (ubi->avail_pebs < EBA_RESERVED_PEBS) { |
1265 | ubi_err("no enough physical eraseblocks (%d, need %d)", | 1265 | ubi_err("no enough physical eraseblocks (%d, need %d)", |
1266 | ubi->avail_pebs, EBA_RESERVED_PEBS); | 1266 | ubi->avail_pebs, EBA_RESERVED_PEBS); |
1267 | if (ubi->corr_peb_count) | 1267 | if (ubi->corr_peb_count) |
1268 | ubi_err("%d PEBs are corrupted and not used", | 1268 | ubi_err("%d PEBs are corrupted and not used", |
1269 | ubi->corr_peb_count); | 1269 | ubi->corr_peb_count); |
1270 | err = -ENOSPC; | 1270 | err = -ENOSPC; |
1271 | goto out_free; | 1271 | goto out_free; |
1272 | } | 1272 | } |
1273 | ubi->avail_pebs -= EBA_RESERVED_PEBS; | 1273 | ubi->avail_pebs -= EBA_RESERVED_PEBS; |
1274 | ubi->rsvd_pebs += EBA_RESERVED_PEBS; | 1274 | ubi->rsvd_pebs += EBA_RESERVED_PEBS; |
1275 | 1275 | ||
1276 | if (ubi->bad_allowed) { | 1276 | if (ubi->bad_allowed) { |
1277 | ubi_calculate_reserved(ubi); | 1277 | ubi_calculate_reserved(ubi); |
1278 | 1278 | ||
1279 | if (ubi->avail_pebs < ubi->beb_rsvd_level) { | 1279 | if (ubi->avail_pebs < ubi->beb_rsvd_level) { |
1280 | /* No enough free physical eraseblocks */ | 1280 | /* No enough free physical eraseblocks */ |
1281 | ubi->beb_rsvd_pebs = ubi->avail_pebs; | 1281 | ubi->beb_rsvd_pebs = ubi->avail_pebs; |
1282 | print_rsvd_warning(ubi, ai); | 1282 | print_rsvd_warning(ubi, ai); |
1283 | } else | 1283 | } else |
1284 | ubi->beb_rsvd_pebs = ubi->beb_rsvd_level; | 1284 | ubi->beb_rsvd_pebs = ubi->beb_rsvd_level; |
1285 | 1285 | ||
1286 | ubi->avail_pebs -= ubi->beb_rsvd_pebs; | 1286 | ubi->avail_pebs -= ubi->beb_rsvd_pebs; |
1287 | ubi->rsvd_pebs += ubi->beb_rsvd_pebs; | 1287 | ubi->rsvd_pebs += ubi->beb_rsvd_pebs; |
1288 | } | 1288 | } |
1289 | 1289 | ||
1290 | dbg_eba("EBA sub-system is initialized"); | 1290 | dbg_eba("EBA sub-system is initialized"); |
1291 | return 0; | 1291 | return 0; |
1292 | 1292 | ||
1293 | out_free: | 1293 | out_free: |
1294 | for (i = 0; i < num_volumes; i++) { | 1294 | for (i = 0; i < num_volumes; i++) { |
1295 | if (!ubi->volumes[i]) | 1295 | if (!ubi->volumes[i]) |
1296 | continue; | 1296 | continue; |
1297 | kfree(ubi->volumes[i]->eba_tbl); | 1297 | kfree(ubi->volumes[i]->eba_tbl); |
1298 | ubi->volumes[i]->eba_tbl = NULL; | 1298 | ubi->volumes[i]->eba_tbl = NULL; |
1299 | } | 1299 | } |
1300 | return err; | 1300 | return err; |
1301 | } | 1301 | } |
1302 | 1302 |
drivers/mtd/ubi/ubi.h
1 | /* | 1 | /* |
2 | * Copyright (c) International Business Machines Corp., 2006 | 2 | * Copyright (c) International Business Machines Corp., 2006 |
3 | * Copyright (c) Nokia Corporation, 2006, 2007 | 3 | * Copyright (c) Nokia Corporation, 2006, 2007 |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
7 | * the Free Software Foundation; either version 2 of the License, or | 7 | * the Free Software Foundation; either version 2 of the License, or |
8 | * (at your option) any later version. | 8 | * (at your option) any later version. |
9 | * | 9 | * |
10 | * This program is distributed in the hope that it will be useful, | 10 | * This program is distributed in the hope that it will be useful, |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See |
13 | * the GNU General Public License for more details. | 13 | * the GNU General Public License for more details. |
14 | * | 14 | * |
15 | * You should have received a copy of the GNU General Public License | 15 | * You should have received a copy of the GNU General Public License |
16 | * along with this program; if not, write to the Free Software | 16 | * along with this program; if not, write to the Free Software |
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
18 | * | 18 | * |
19 | * Author: Artem Bityutskiy (Битюцкий Артём) | 19 | * Author: Artem Bityutskiy (Битюцкий Артём) |
20 | */ | 20 | */ |
21 | 21 | ||
22 | #ifndef __UBI_UBI_H__ | 22 | #ifndef __UBI_UBI_H__ |
23 | #define __UBI_UBI_H__ | 23 | #define __UBI_UBI_H__ |
24 | 24 | ||
25 | #include <linux/init.h> | 25 | #include <linux/init.h> |
26 | #include <linux/types.h> | 26 | #include <linux/types.h> |
27 | #include <linux/list.h> | 27 | #include <linux/list.h> |
28 | #include <linux/rbtree.h> | 28 | #include <linux/rbtree.h> |
29 | #include <linux/sched.h> | 29 | #include <linux/sched.h> |
30 | #include <linux/wait.h> | 30 | #include <linux/wait.h> |
31 | #include <linux/mutex.h> | 31 | #include <linux/mutex.h> |
32 | #include <linux/rwsem.h> | 32 | #include <linux/rwsem.h> |
33 | #include <linux/spinlock.h> | 33 | #include <linux/spinlock.h> |
34 | #include <linux/fs.h> | 34 | #include <linux/fs.h> |
35 | #include <linux/cdev.h> | 35 | #include <linux/cdev.h> |
36 | #include <linux/device.h> | 36 | #include <linux/device.h> |
37 | #include <linux/slab.h> | 37 | #include <linux/slab.h> |
38 | #include <linux/string.h> | 38 | #include <linux/string.h> |
39 | #include <linux/vmalloc.h> | 39 | #include <linux/vmalloc.h> |
40 | #include <linux/notifier.h> | 40 | #include <linux/notifier.h> |
41 | #include <linux/mtd/mtd.h> | 41 | #include <linux/mtd/mtd.h> |
42 | #include <linux/mtd/ubi.h> | 42 | #include <linux/mtd/ubi.h> |
43 | #include <asm/pgtable.h> | 43 | #include <asm/pgtable.h> |
44 | 44 | ||
45 | #include "ubi-media.h" | 45 | #include "ubi-media.h" |
46 | #include "scan.h" | 46 | #include "scan.h" |
47 | 47 | ||
48 | /* Maximum number of supported UBI devices */ | 48 | /* Maximum number of supported UBI devices */ |
49 | #define UBI_MAX_DEVICES 32 | 49 | #define UBI_MAX_DEVICES 32 |
50 | 50 | ||
51 | /* UBI name used for character devices, sysfs, etc */ | 51 | /* UBI name used for character devices, sysfs, etc */ |
52 | #define UBI_NAME_STR "ubi" | 52 | #define UBI_NAME_STR "ubi" |
53 | 53 | ||
54 | /* Normal UBI messages */ | 54 | /* Normal UBI messages */ |
55 | #define ubi_msg(fmt, ...) printk(KERN_NOTICE "UBI: " fmt "\n", ##__VA_ARGS__) | 55 | #define ubi_msg(fmt, ...) printk(KERN_NOTICE "UBI: " fmt "\n", ##__VA_ARGS__) |
56 | /* UBI warning messages */ | 56 | /* UBI warning messages */ |
57 | #define ubi_warn(fmt, ...) printk(KERN_WARNING "UBI warning: %s: " fmt "\n", \ | 57 | #define ubi_warn(fmt, ...) printk(KERN_WARNING "UBI warning: %s: " fmt "\n", \ |
58 | __func__, ##__VA_ARGS__) | 58 | __func__, ##__VA_ARGS__) |
59 | /* UBI error messages */ | 59 | /* UBI error messages */ |
60 | #define ubi_err(fmt, ...) printk(KERN_ERR "UBI error: %s: " fmt "\n", \ | 60 | #define ubi_err(fmt, ...) printk(KERN_ERR "UBI error: %s: " fmt "\n", \ |
61 | __func__, ##__VA_ARGS__) | 61 | __func__, ##__VA_ARGS__) |
62 | 62 | ||
63 | /* Lowest number PEBs reserved for bad PEB handling */ | 63 | /* Lowest number PEBs reserved for bad PEB handling */ |
64 | #define MIN_RESEVED_PEBS 2 | 64 | #define MIN_RESEVED_PEBS 2 |
65 | 65 | ||
66 | /* Background thread name pattern */ | 66 | /* Background thread name pattern */ |
67 | #define UBI_BGT_NAME_PATTERN "ubi_bgt%dd" | 67 | #define UBI_BGT_NAME_PATTERN "ubi_bgt%dd" |
68 | 68 | ||
69 | /* This marker in the EBA table means that the LEB is um-mapped */ | 69 | /* This marker in the EBA table means that the LEB is um-mapped */ |
70 | #define UBI_LEB_UNMAPPED -1 | 70 | #define UBI_LEB_UNMAPPED -1 |
71 | 71 | ||
72 | /* | 72 | /* |
73 | * In case of errors, UBI tries to repeat the operation several times before | 73 | * In case of errors, UBI tries to repeat the operation several times before |
74 | * returning error. The below constant defines how many times UBI re-tries. | 74 | * returning error. The below constant defines how many times UBI re-tries. |
75 | */ | 75 | */ |
76 | #define UBI_IO_RETRIES 3 | 76 | #define UBI_IO_RETRIES 3 |
77 | 77 | ||
78 | /* | 78 | /* |
79 | * Length of the protection queue. The length is effectively equivalent to the | 79 | * Length of the protection queue. The length is effectively equivalent to the |
80 | * number of (global) erase cycles PEBs are protected from the wear-leveling | 80 | * number of (global) erase cycles PEBs are protected from the wear-leveling |
81 | * worker. | 81 | * worker. |
82 | */ | 82 | */ |
83 | #define UBI_PROT_QUEUE_LEN 10 | 83 | #define UBI_PROT_QUEUE_LEN 10 |
84 | 84 | ||
85 | /* | 85 | /* |
86 | * Error codes returned by the I/O sub-system. | 86 | * Error codes returned by the I/O sub-system. |
87 | * | 87 | * |
88 | * UBI_IO_FF: the read region of flash contains only 0xFFs | 88 | * UBI_IO_FF: the read region of flash contains only 0xFFs |
89 | * UBI_IO_FF_BITFLIPS: the same as %UBI_IO_FF, but also also there was a data | 89 | * UBI_IO_FF_BITFLIPS: the same as %UBI_IO_FF, but also also there was a data |
90 | * integrity error reported by the MTD driver | 90 | * integrity error reported by the MTD driver |
91 | * (uncorrectable ECC error in case of NAND) | 91 | * (uncorrectable ECC error in case of NAND) |
92 | * UBI_IO_BAD_HDR: the EC or VID header is corrupted (bad magic or CRC) | 92 | * UBI_IO_BAD_HDR: the EC or VID header is corrupted (bad magic or CRC) |
93 | * UBI_IO_BAD_HDR_EBADMSG: the same as %UBI_IO_BAD_HDR, but also there was a | 93 | * UBI_IO_BAD_HDR_EBADMSG: the same as %UBI_IO_BAD_HDR, but also there was a |
94 | * data integrity error reported by the MTD driver | 94 | * data integrity error reported by the MTD driver |
95 | * (uncorrectable ECC error in case of NAND) | 95 | * (uncorrectable ECC error in case of NAND) |
96 | * UBI_IO_BITFLIPS: bit-flips were detected and corrected | 96 | * UBI_IO_BITFLIPS: bit-flips were detected and corrected |
97 | * | 97 | * |
98 | * Note, it is probably better to have bit-flip and ebadmsg as flags which can | 98 | * Note, it is probably better to have bit-flip and ebadmsg as flags which can |
99 | * be or'ed with other error code. But this is a big change because there are | 99 | * be or'ed with other error code. But this is a big change because there are |
100 | * may callers, so it does not worth the risk of introducing a bug | 100 | * may callers, so it does not worth the risk of introducing a bug |
101 | */ | 101 | */ |
102 | enum { | 102 | enum { |
103 | UBI_IO_FF = 1, | 103 | UBI_IO_FF = 1, |
104 | UBI_IO_FF_BITFLIPS, | 104 | UBI_IO_FF_BITFLIPS, |
105 | UBI_IO_BAD_HDR, | 105 | UBI_IO_BAD_HDR, |
106 | UBI_IO_BAD_HDR_EBADMSG, | 106 | UBI_IO_BAD_HDR_EBADMSG, |
107 | UBI_IO_BITFLIPS, | 107 | UBI_IO_BITFLIPS, |
108 | }; | 108 | }; |
109 | 109 | ||
110 | /* | 110 | /* |
111 | * Return codes of the 'ubi_eba_copy_leb()' function. | 111 | * Return codes of the 'ubi_eba_copy_leb()' function. |
112 | * | 112 | * |
113 | * MOVE_CANCEL_RACE: canceled because the volume is being deleted, the source | 113 | * MOVE_CANCEL_RACE: canceled because the volume is being deleted, the source |
114 | * PEB was put meanwhile, or there is I/O on the source PEB | 114 | * PEB was put meanwhile, or there is I/O on the source PEB |
115 | * MOVE_SOURCE_RD_ERR: canceled because there was a read error from the source | 115 | * MOVE_SOURCE_RD_ERR: canceled because there was a read error from the source |
116 | * PEB | 116 | * PEB |
117 | * MOVE_TARGET_RD_ERR: canceled because there was a read error from the target | 117 | * MOVE_TARGET_RD_ERR: canceled because there was a read error from the target |
118 | * PEB | 118 | * PEB |
119 | * MOVE_TARGET_WR_ERR: canceled because there was a write error to the target | 119 | * MOVE_TARGET_WR_ERR: canceled because there was a write error to the target |
120 | * PEB | 120 | * PEB |
121 | * MOVE_TARGET_BITFLIPS: canceled because a bit-flip was detected in the | 121 | * MOVE_TARGET_BITFLIPS: canceled because a bit-flip was detected in the |
122 | * target PEB | 122 | * target PEB |
123 | * MOVE_RETRY: retry scrubbing the PEB | 123 | * MOVE_RETRY: retry scrubbing the PEB |
124 | */ | 124 | */ |
125 | enum { | 125 | enum { |
126 | MOVE_CANCEL_RACE = 1, | 126 | MOVE_CANCEL_RACE = 1, |
127 | MOVE_SOURCE_RD_ERR, | 127 | MOVE_SOURCE_RD_ERR, |
128 | MOVE_TARGET_RD_ERR, | 128 | MOVE_TARGET_RD_ERR, |
129 | MOVE_TARGET_WR_ERR, | 129 | MOVE_TARGET_WR_ERR, |
130 | MOVE_TARGET_BITFLIPS, | 130 | MOVE_TARGET_BITFLIPS, |
131 | MOVE_RETRY, | 131 | MOVE_RETRY, |
132 | }; | 132 | }; |
133 | 133 | ||
134 | /** | 134 | /** |
135 | * struct ubi_wl_entry - wear-leveling entry. | 135 | * struct ubi_wl_entry - wear-leveling entry. |
136 | * @u.rb: link in the corresponding (free/used) RB-tree | 136 | * @u.rb: link in the corresponding (free/used) RB-tree |
137 | * @u.list: link in the protection queue | 137 | * @u.list: link in the protection queue |
138 | * @ec: erase counter | 138 | * @ec: erase counter |
139 | * @pnum: physical eraseblock number | 139 | * @pnum: physical eraseblock number |
140 | * | 140 | * |
141 | * This data structure is used in the WL sub-system. Each physical eraseblock | 141 | * This data structure is used in the WL sub-system. Each physical eraseblock |
142 | * has a corresponding &struct wl_entry object which may be kept in different | 142 | * has a corresponding &struct wl_entry object which may be kept in different |
143 | * RB-trees. See WL sub-system for details. | 143 | * RB-trees. See WL sub-system for details. |
144 | */ | 144 | */ |
145 | struct ubi_wl_entry { | 145 | struct ubi_wl_entry { |
146 | union { | 146 | union { |
147 | struct rb_node rb; | 147 | struct rb_node rb; |
148 | struct list_head list; | 148 | struct list_head list; |
149 | } u; | 149 | } u; |
150 | int ec; | 150 | int ec; |
151 | int pnum; | 151 | int pnum; |
152 | }; | 152 | }; |
153 | 153 | ||
154 | /** | 154 | /** |
155 | * struct ubi_ltree_entry - an entry in the lock tree. | 155 | * struct ubi_ltree_entry - an entry in the lock tree. |
156 | * @rb: links RB-tree nodes | 156 | * @rb: links RB-tree nodes |
157 | * @vol_id: volume ID of the locked logical eraseblock | 157 | * @vol_id: volume ID of the locked logical eraseblock |
158 | * @lnum: locked logical eraseblock number | 158 | * @lnum: locked logical eraseblock number |
159 | * @users: how many tasks are using this logical eraseblock or wait for it | 159 | * @users: how many tasks are using this logical eraseblock or wait for it |
160 | * @mutex: read/write mutex to implement read/write access serialization to | 160 | * @mutex: read/write mutex to implement read/write access serialization to |
161 | * the (@vol_id, @lnum) logical eraseblock | 161 | * the (@vol_id, @lnum) logical eraseblock |
162 | * | 162 | * |
163 | * This data structure is used in the EBA sub-system to implement per-LEB | 163 | * This data structure is used in the EBA sub-system to implement per-LEB |
164 | * locking. When a logical eraseblock is being locked - corresponding | 164 | * locking. When a logical eraseblock is being locked - corresponding |
165 | * &struct ubi_ltree_entry object is inserted to the lock tree (@ubi->ltree). | 165 | * &struct ubi_ltree_entry object is inserted to the lock tree (@ubi->ltree). |
166 | * See EBA sub-system for details. | 166 | * See EBA sub-system for details. |
167 | */ | 167 | */ |
168 | struct ubi_ltree_entry { | 168 | struct ubi_ltree_entry { |
169 | struct rb_node rb; | 169 | struct rb_node rb; |
170 | int vol_id; | 170 | int vol_id; |
171 | int lnum; | 171 | int lnum; |
172 | int users; | 172 | int users; |
173 | struct rw_semaphore mutex; | 173 | struct rw_semaphore mutex; |
174 | }; | 174 | }; |
175 | 175 | ||
176 | /** | 176 | /** |
177 | * struct ubi_rename_entry - volume re-name description data structure. | 177 | * struct ubi_rename_entry - volume re-name description data structure. |
178 | * @new_name_len: new volume name length | 178 | * @new_name_len: new volume name length |
179 | * @new_name: new volume name | 179 | * @new_name: new volume name |
180 | * @remove: if not zero, this volume should be removed, not re-named | 180 | * @remove: if not zero, this volume should be removed, not re-named |
181 | * @desc: descriptor of the volume | 181 | * @desc: descriptor of the volume |
182 | * @list: links re-name entries into a list | 182 | * @list: links re-name entries into a list |
183 | * | 183 | * |
184 | * This data structure is utilized in the multiple volume re-name code. Namely, | 184 | * This data structure is utilized in the multiple volume re-name code. Namely, |
185 | * UBI first creates a list of &struct ubi_rename_entry objects from the | 185 | * UBI first creates a list of &struct ubi_rename_entry objects from the |
186 | * &struct ubi_rnvol_req request object, and then utilizes this list to do all | 186 | * &struct ubi_rnvol_req request object, and then utilizes this list to do all |
187 | * the job. | 187 | * the job. |
188 | */ | 188 | */ |
189 | struct ubi_rename_entry { | 189 | struct ubi_rename_entry { |
190 | int new_name_len; | 190 | int new_name_len; |
191 | char new_name[UBI_VOL_NAME_MAX + 1]; | 191 | char new_name[UBI_VOL_NAME_MAX + 1]; |
192 | int remove; | 192 | int remove; |
193 | struct ubi_volume_desc *desc; | 193 | struct ubi_volume_desc *desc; |
194 | struct list_head list; | 194 | struct list_head list; |
195 | }; | 195 | }; |
196 | 196 | ||
197 | struct ubi_volume_desc; | 197 | struct ubi_volume_desc; |
198 | 198 | ||
199 | /** | 199 | /** |
200 | * struct ubi_volume - UBI volume description data structure. | 200 | * struct ubi_volume - UBI volume description data structure. |
201 | * @dev: device object to make use of the the Linux device model | 201 | * @dev: device object to make use of the the Linux device model |
202 | * @cdev: character device object to create character device | 202 | * @cdev: character device object to create character device |
203 | * @ubi: reference to the UBI device description object | 203 | * @ubi: reference to the UBI device description object |
204 | * @vol_id: volume ID | 204 | * @vol_id: volume ID |
205 | * @ref_count: volume reference count | 205 | * @ref_count: volume reference count |
206 | * @readers: number of users holding this volume in read-only mode | 206 | * @readers: number of users holding this volume in read-only mode |
207 | * @writers: number of users holding this volume in read-write mode | 207 | * @writers: number of users holding this volume in read-write mode |
208 | * @exclusive: whether somebody holds this volume in exclusive mode | 208 | * @exclusive: whether somebody holds this volume in exclusive mode |
209 | * | 209 | * |
210 | * @reserved_pebs: how many physical eraseblocks are reserved for this volume | 210 | * @reserved_pebs: how many physical eraseblocks are reserved for this volume |
211 | * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME) | 211 | * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME) |
212 | * @usable_leb_size: logical eraseblock size without padding | 212 | * @usable_leb_size: logical eraseblock size without padding |
213 | * @used_ebs: how many logical eraseblocks in this volume contain data | 213 | * @used_ebs: how many logical eraseblocks in this volume contain data |
214 | * @last_eb_bytes: how many bytes are stored in the last logical eraseblock | 214 | * @last_eb_bytes: how many bytes are stored in the last logical eraseblock |
215 | * @used_bytes: how many bytes of data this volume contains | 215 | * @used_bytes: how many bytes of data this volume contains |
216 | * @alignment: volume alignment | 216 | * @alignment: volume alignment |
217 | * @data_pad: how many bytes are not used at the end of physical eraseblocks to | 217 | * @data_pad: how many bytes are not used at the end of physical eraseblocks to |
218 | * satisfy the requested alignment | 218 | * satisfy the requested alignment |
219 | * @name_len: volume name length | 219 | * @name_len: volume name length |
220 | * @name: volume name | 220 | * @name: volume name |
221 | * | 221 | * |
222 | * @upd_ebs: how many eraseblocks are expected to be updated | 222 | * @upd_ebs: how many eraseblocks are expected to be updated |
223 | * @ch_lnum: LEB number which is being changing by the atomic LEB change | 223 | * @ch_lnum: LEB number which is being changing by the atomic LEB change |
224 | * operation | 224 | * operation |
225 | * @upd_bytes: how many bytes are expected to be received for volume update or | 225 | * @upd_bytes: how many bytes are expected to be received for volume update or |
226 | * atomic LEB change | 226 | * atomic LEB change |
227 | * @upd_received: how many bytes were already received for volume update or | 227 | * @upd_received: how many bytes were already received for volume update or |
228 | * atomic LEB change | 228 | * atomic LEB change |
229 | * @upd_buf: update buffer which is used to collect update data or data for | 229 | * @upd_buf: update buffer which is used to collect update data or data for |
230 | * atomic LEB change | 230 | * atomic LEB change |
231 | * | 231 | * |
232 | * @eba_tbl: EBA table of this volume (LEB->PEB mapping) | 232 | * @eba_tbl: EBA table of this volume (LEB->PEB mapping) |
233 | * @checked: %1 if this static volume was checked | 233 | * @checked: %1 if this static volume was checked |
234 | * @corrupted: %1 if the volume is corrupted (static volumes only) | 234 | * @corrupted: %1 if the volume is corrupted (static volumes only) |
235 | * @upd_marker: %1 if the update marker is set for this volume | 235 | * @upd_marker: %1 if the update marker is set for this volume |
236 | * @updating: %1 if the volume is being updated | 236 | * @updating: %1 if the volume is being updated |
237 | * @changing_leb: %1 if the atomic LEB change ioctl command is in progress | 237 | * @changing_leb: %1 if the atomic LEB change ioctl command is in progress |
238 | * @direct_writes: %1 if direct writes are enabled for this volume | 238 | * @direct_writes: %1 if direct writes are enabled for this volume |
239 | * | 239 | * |
240 | * The @corrupted field indicates that the volume's contents is corrupted. | 240 | * The @corrupted field indicates that the volume's contents is corrupted. |
241 | * Since UBI protects only static volumes, this field is not relevant to | 241 | * Since UBI protects only static volumes, this field is not relevant to |
242 | * dynamic volumes - it is user's responsibility to assure their data | 242 | * dynamic volumes - it is user's responsibility to assure their data |
243 | * integrity. | 243 | * integrity. |
244 | * | 244 | * |
245 | * The @upd_marker flag indicates that this volume is either being updated at | 245 | * The @upd_marker flag indicates that this volume is either being updated at |
246 | * the moment or is damaged because of an unclean reboot. | 246 | * the moment or is damaged because of an unclean reboot. |
247 | */ | 247 | */ |
248 | struct ubi_volume { | 248 | struct ubi_volume { |
249 | struct device dev; | 249 | struct device dev; |
250 | struct cdev cdev; | 250 | struct cdev cdev; |
251 | struct ubi_device *ubi; | 251 | struct ubi_device *ubi; |
252 | int vol_id; | 252 | int vol_id; |
253 | int ref_count; | 253 | int ref_count; |
254 | int readers; | 254 | int readers; |
255 | int writers; | 255 | int writers; |
256 | int exclusive; | 256 | int exclusive; |
257 | 257 | ||
258 | int reserved_pebs; | 258 | int reserved_pebs; |
259 | int vol_type; | 259 | int vol_type; |
260 | int usable_leb_size; | 260 | int usable_leb_size; |
261 | int used_ebs; | 261 | int used_ebs; |
262 | int last_eb_bytes; | 262 | int last_eb_bytes; |
263 | long long used_bytes; | 263 | long long used_bytes; |
264 | int alignment; | 264 | int alignment; |
265 | int data_pad; | 265 | int data_pad; |
266 | int name_len; | 266 | int name_len; |
267 | char name[UBI_VOL_NAME_MAX + 1]; | 267 | char name[UBI_VOL_NAME_MAX + 1]; |
268 | 268 | ||
269 | int upd_ebs; | 269 | int upd_ebs; |
270 | int ch_lnum; | 270 | int ch_lnum; |
271 | long long upd_bytes; | 271 | long long upd_bytes; |
272 | long long upd_received; | 272 | long long upd_received; |
273 | void *upd_buf; | 273 | void *upd_buf; |
274 | 274 | ||
275 | int *eba_tbl; | 275 | int *eba_tbl; |
276 | unsigned int checked:1; | 276 | unsigned int checked:1; |
277 | unsigned int corrupted:1; | 277 | unsigned int corrupted:1; |
278 | unsigned int upd_marker:1; | 278 | unsigned int upd_marker:1; |
279 | unsigned int updating:1; | 279 | unsigned int updating:1; |
280 | unsigned int changing_leb:1; | 280 | unsigned int changing_leb:1; |
281 | unsigned int direct_writes:1; | 281 | unsigned int direct_writes:1; |
282 | }; | 282 | }; |
283 | 283 | ||
284 | /** | 284 | /** |
285 | * struct ubi_volume_desc - UBI volume descriptor returned when it is opened. | 285 | * struct ubi_volume_desc - UBI volume descriptor returned when it is opened. |
286 | * @vol: reference to the corresponding volume description object | 286 | * @vol: reference to the corresponding volume description object |
287 | * @mode: open mode (%UBI_READONLY, %UBI_READWRITE, or %UBI_EXCLUSIVE) | 287 | * @mode: open mode (%UBI_READONLY, %UBI_READWRITE, or %UBI_EXCLUSIVE) |
288 | */ | 288 | */ |
289 | struct ubi_volume_desc { | 289 | struct ubi_volume_desc { |
290 | struct ubi_volume *vol; | 290 | struct ubi_volume *vol; |
291 | int mode; | 291 | int mode; |
292 | }; | 292 | }; |
293 | 293 | ||
294 | struct ubi_wl_entry; | 294 | struct ubi_wl_entry; |
295 | 295 | ||
296 | /** | 296 | /** |
297 | * struct ubi_device - UBI device description structure | 297 | * struct ubi_device - UBI device description structure |
298 | * @dev: UBI device object to use the the Linux device model | 298 | * @dev: UBI device object to use the the Linux device model |
299 | * @cdev: character device object to create character device | 299 | * @cdev: character device object to create character device |
300 | * @ubi_num: UBI device number | 300 | * @ubi_num: UBI device number |
301 | * @ubi_name: UBI device name | 301 | * @ubi_name: UBI device name |
302 | * @vol_count: number of volumes in this UBI device | 302 | * @vol_count: number of volumes in this UBI device |
303 | * @volumes: volumes of this UBI device | 303 | * @volumes: volumes of this UBI device |
304 | * @volumes_lock: protects @volumes, @rsvd_pebs, @avail_pebs, beb_rsvd_pebs, | 304 | * @volumes_lock: protects @volumes, @rsvd_pebs, @avail_pebs, beb_rsvd_pebs, |
305 | * @beb_rsvd_level, @bad_peb_count, @good_peb_count, @vol_count, | 305 | * @beb_rsvd_level, @bad_peb_count, @good_peb_count, @vol_count, |
306 | * @vol->readers, @vol->writers, @vol->exclusive, | 306 | * @vol->readers, @vol->writers, @vol->exclusive, |
307 | * @vol->ref_count, @vol->mapping and @vol->eba_tbl. | 307 | * @vol->ref_count, @vol->mapping and @vol->eba_tbl. |
308 | * @ref_count: count of references on the UBI device | 308 | * @ref_count: count of references on the UBI device |
309 | * @image_seq: image sequence number recorded on EC headers | 309 | * @image_seq: image sequence number recorded on EC headers |
310 | * | 310 | * |
311 | * @rsvd_pebs: count of reserved physical eraseblocks | 311 | * @rsvd_pebs: count of reserved physical eraseblocks |
312 | * @avail_pebs: count of available physical eraseblocks | 312 | * @avail_pebs: count of available physical eraseblocks |
313 | * @beb_rsvd_pebs: how many physical eraseblocks are reserved for bad PEB | 313 | * @beb_rsvd_pebs: how many physical eraseblocks are reserved for bad PEB |
314 | * handling | 314 | * handling |
315 | * @beb_rsvd_level: normal level of PEBs reserved for bad PEB handling | 315 | * @beb_rsvd_level: normal level of PEBs reserved for bad PEB handling |
316 | * | 316 | * |
317 | * @autoresize_vol_id: ID of the volume which has to be auto-resized at the end | 317 | * @autoresize_vol_id: ID of the volume which has to be auto-resized at the end |
318 | * of UBI initialization | 318 | * of UBI initialization |
319 | * @vtbl_slots: how many slots are available in the volume table | 319 | * @vtbl_slots: how many slots are available in the volume table |
320 | * @vtbl_size: size of the volume table in bytes | 320 | * @vtbl_size: size of the volume table in bytes |
321 | * @vtbl: in-RAM volume table copy | 321 | * @vtbl: in-RAM volume table copy |
322 | * @device_mutex: protects on-flash volume table and serializes volume | 322 | * @device_mutex: protects on-flash volume table and serializes volume |
323 | * creation, deletion, update, re-size, re-name and set | 323 | * creation, deletion, update, re-size, re-name and set |
324 | * property | 324 | * property |
325 | * | 325 | * |
326 | * @max_ec: current highest erase counter value | 326 | * @max_ec: current highest erase counter value |
327 | * @mean_ec: current mean erase counter value | 327 | * @mean_ec: current mean erase counter value |
328 | * | 328 | * |
329 | * @global_sqnum: global sequence number | 329 | * @global_sqnum: global sequence number |
330 | * @ltree_lock: protects the lock tree and @global_sqnum | 330 | * @ltree_lock: protects the lock tree and @global_sqnum |
331 | * @ltree: the lock tree | 331 | * @ltree: the lock tree |
332 | * @alc_mutex: serializes "atomic LEB change" operations | 332 | * @alc_mutex: serializes "atomic LEB change" operations |
333 | * | 333 | * |
334 | * @used: RB-tree of used physical eraseblocks | 334 | * @used: RB-tree of used physical eraseblocks |
335 | * @erroneous: RB-tree of erroneous used physical eraseblocks | 335 | * @erroneous: RB-tree of erroneous used physical eraseblocks |
336 | * @free: RB-tree of free physical eraseblocks | 336 | * @free: RB-tree of free physical eraseblocks |
337 | * @scrub: RB-tree of physical eraseblocks which need scrubbing | 337 | * @scrub: RB-tree of physical eraseblocks which need scrubbing |
338 | * @pq: protection queue (contain physical eraseblocks which are temporarily | 338 | * @pq: protection queue (contain physical eraseblocks which are temporarily |
339 | * protected from the wear-leveling worker) | 339 | * protected from the wear-leveling worker) |
340 | * @pq_head: protection queue head | 340 | * @pq_head: protection queue head |
341 | * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from, | 341 | * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from, |
342 | * @move_to, @move_to_put @erase_pending, @wl_scheduled, @works, | 342 | * @move_to, @move_to_put @erase_pending, @wl_scheduled, @works, |
343 | * @erroneous, and @erroneous_peb_count fields | 343 | * @erroneous, and @erroneous_peb_count fields |
344 | * @move_mutex: serializes eraseblock moves | 344 | * @move_mutex: serializes eraseblock moves |
345 | * @work_sem: synchronizes the WL worker with use tasks | 345 | * @work_sem: synchronizes the WL worker with use tasks |
346 | * @wl_scheduled: non-zero if the wear-leveling was scheduled | 346 | * @wl_scheduled: non-zero if the wear-leveling was scheduled |
347 | * @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any | 347 | * @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any |
348 | * physical eraseblock | 348 | * physical eraseblock |
349 | * @move_from: physical eraseblock from where the data is being moved | 349 | * @move_from: physical eraseblock from where the data is being moved |
350 | * @move_to: physical eraseblock where the data is being moved to | 350 | * @move_to: physical eraseblock where the data is being moved to |
351 | * @move_to_put: if the "to" PEB was put | 351 | * @move_to_put: if the "to" PEB was put |
352 | * @works: list of pending works | 352 | * @works: list of pending works |
353 | * @works_count: count of pending works | 353 | * @works_count: count of pending works |
354 | * @bgt_thread: background thread description object | 354 | * @bgt_thread: background thread description object |
355 | * @thread_enabled: if the background thread is enabled | 355 | * @thread_enabled: if the background thread is enabled |
356 | * @bgt_name: background thread name | 356 | * @bgt_name: background thread name |
357 | * | 357 | * |
358 | * @flash_size: underlying MTD device size (in bytes) | 358 | * @flash_size: underlying MTD device size (in bytes) |
359 | * @peb_count: count of physical eraseblocks on the MTD device | 359 | * @peb_count: count of physical eraseblocks on the MTD device |
360 | * @peb_size: physical eraseblock size | 360 | * @peb_size: physical eraseblock size |
361 | * @bad_peb_count: count of bad physical eraseblocks | 361 | * @bad_peb_count: count of bad physical eraseblocks |
362 | * @good_peb_count: count of good physical eraseblocks | 362 | * @good_peb_count: count of good physical eraseblocks |
363 | * @corr_peb_count: count of corrupted physical eraseblocks (preserved and not | 363 | * @corr_peb_count: count of corrupted physical eraseblocks (preserved and not |
364 | * used by UBI) | 364 | * used by UBI) |
365 | * @erroneous_peb_count: count of erroneous physical eraseblocks in @erroneous | 365 | * @erroneous_peb_count: count of erroneous physical eraseblocks in @erroneous |
366 | * @max_erroneous: maximum allowed amount of erroneous physical eraseblocks | 366 | * @max_erroneous: maximum allowed amount of erroneous physical eraseblocks |
367 | * @min_io_size: minimal input/output unit size of the underlying MTD device | 367 | * @min_io_size: minimal input/output unit size of the underlying MTD device |
368 | * @hdrs_min_io_size: minimal I/O unit size used for VID and EC headers | 368 | * @hdrs_min_io_size: minimal I/O unit size used for VID and EC headers |
369 | * @ro_mode: if the UBI device is in read-only mode | 369 | * @ro_mode: if the UBI device is in read-only mode |
370 | * @leb_size: logical eraseblock size | 370 | * @leb_size: logical eraseblock size |
371 | * @leb_start: starting offset of logical eraseblocks within physical | 371 | * @leb_start: starting offset of logical eraseblocks within physical |
372 | * eraseblocks | 372 | * eraseblocks |
373 | * @ec_hdr_alsize: size of the EC header aligned to @hdrs_min_io_size | 373 | * @ec_hdr_alsize: size of the EC header aligned to @hdrs_min_io_size |
374 | * @vid_hdr_alsize: size of the VID header aligned to @hdrs_min_io_size | 374 | * @vid_hdr_alsize: size of the VID header aligned to @hdrs_min_io_size |
375 | * @vid_hdr_offset: starting offset of the volume identifier header (might be | 375 | * @vid_hdr_offset: starting offset of the volume identifier header (might be |
376 | * unaligned) | 376 | * unaligned) |
377 | * @vid_hdr_aloffset: starting offset of the VID header aligned to | 377 | * @vid_hdr_aloffset: starting offset of the VID header aligned to |
378 | * @hdrs_min_io_size | 378 | * @hdrs_min_io_size |
379 | * @vid_hdr_shift: contains @vid_hdr_offset - @vid_hdr_aloffset | 379 | * @vid_hdr_shift: contains @vid_hdr_offset - @vid_hdr_aloffset |
380 | * @bad_allowed: whether the MTD device admits of bad physical eraseblocks or | 380 | * @bad_allowed: whether the MTD device admits of bad physical eraseblocks or |
381 | * not | 381 | * not |
382 | * @nor_flash: non-zero if working on top of NOR flash | 382 | * @nor_flash: non-zero if working on top of NOR flash |
383 | * @max_write_size: maximum amount of bytes the underlying flash can write at a | 383 | * @max_write_size: maximum amount of bytes the underlying flash can write at a |
384 | * time (MTD write buffer size) | 384 | * time (MTD write buffer size) |
385 | * @mtd: MTD device descriptor | 385 | * @mtd: MTD device descriptor |
386 | * | 386 | * |
387 | * @peb_buf: a buffer of PEB size used for different purposes | 387 | * @peb_buf: a buffer of PEB size used for different purposes |
388 | * @buf_mutex: protects @peb_buf | 388 | * @buf_mutex: protects @peb_buf |
389 | * @ckvol_mutex: serializes static volume checking when opening | 389 | * @ckvol_mutex: serializes static volume checking when opening |
390 | * | 390 | * |
391 | * @dbg: debugging information for this UBI device | 391 | * @dbg: debugging information for this UBI device |
392 | */ | 392 | */ |
393 | struct ubi_device { | 393 | struct ubi_device { |
394 | struct cdev cdev; | 394 | struct cdev cdev; |
395 | struct device dev; | 395 | struct device dev; |
396 | int ubi_num; | 396 | int ubi_num; |
397 | char ubi_name[sizeof(UBI_NAME_STR)+5]; | 397 | char ubi_name[sizeof(UBI_NAME_STR)+5]; |
398 | int vol_count; | 398 | int vol_count; |
399 | struct ubi_volume *volumes[UBI_MAX_VOLUMES+UBI_INT_VOL_COUNT]; | 399 | struct ubi_volume *volumes[UBI_MAX_VOLUMES+UBI_INT_VOL_COUNT]; |
400 | spinlock_t volumes_lock; | 400 | spinlock_t volumes_lock; |
401 | int ref_count; | 401 | int ref_count; |
402 | int image_seq; | 402 | int image_seq; |
403 | 403 | ||
404 | int rsvd_pebs; | 404 | int rsvd_pebs; |
405 | int avail_pebs; | 405 | int avail_pebs; |
406 | int beb_rsvd_pebs; | 406 | int beb_rsvd_pebs; |
407 | int beb_rsvd_level; | 407 | int beb_rsvd_level; |
408 | 408 | ||
409 | int autoresize_vol_id; | 409 | int autoresize_vol_id; |
410 | int vtbl_slots; | 410 | int vtbl_slots; |
411 | int vtbl_size; | 411 | int vtbl_size; |
412 | struct ubi_vtbl_record *vtbl; | 412 | struct ubi_vtbl_record *vtbl; |
413 | struct mutex device_mutex; | 413 | struct mutex device_mutex; |
414 | 414 | ||
415 | int max_ec; | 415 | int max_ec; |
416 | /* Note, mean_ec is not updated run-time - should be fixed */ | 416 | /* Note, mean_ec is not updated run-time - should be fixed */ |
417 | int mean_ec; | 417 | int mean_ec; |
418 | 418 | ||
419 | /* EBA sub-system's stuff */ | 419 | /* EBA sub-system's stuff */ |
420 | unsigned long long global_sqnum; | 420 | unsigned long long global_sqnum; |
421 | spinlock_t ltree_lock; | 421 | spinlock_t ltree_lock; |
422 | struct rb_root ltree; | 422 | struct rb_root ltree; |
423 | struct mutex alc_mutex; | 423 | struct mutex alc_mutex; |
424 | 424 | ||
425 | /* Wear-leveling sub-system's stuff */ | 425 | /* Wear-leveling sub-system's stuff */ |
426 | struct rb_root used; | 426 | struct rb_root used; |
427 | struct rb_root erroneous; | 427 | struct rb_root erroneous; |
428 | struct rb_root free; | 428 | struct rb_root free; |
429 | struct rb_root scrub; | 429 | struct rb_root scrub; |
430 | struct list_head pq[UBI_PROT_QUEUE_LEN]; | 430 | struct list_head pq[UBI_PROT_QUEUE_LEN]; |
431 | int pq_head; | 431 | int pq_head; |
432 | spinlock_t wl_lock; | 432 | spinlock_t wl_lock; |
433 | struct mutex move_mutex; | 433 | struct mutex move_mutex; |
434 | struct rw_semaphore work_sem; | 434 | struct rw_semaphore work_sem; |
435 | int wl_scheduled; | 435 | int wl_scheduled; |
436 | struct ubi_wl_entry **lookuptbl; | 436 | struct ubi_wl_entry **lookuptbl; |
437 | struct ubi_wl_entry *move_from; | 437 | struct ubi_wl_entry *move_from; |
438 | struct ubi_wl_entry *move_to; | 438 | struct ubi_wl_entry *move_to; |
439 | int move_to_put; | 439 | int move_to_put; |
440 | struct list_head works; | 440 | struct list_head works; |
441 | int works_count; | 441 | int works_count; |
442 | struct task_struct *bgt_thread; | 442 | struct task_struct *bgt_thread; |
443 | int thread_enabled; | 443 | int thread_enabled; |
444 | char bgt_name[sizeof(UBI_BGT_NAME_PATTERN)+2]; | 444 | char bgt_name[sizeof(UBI_BGT_NAME_PATTERN)+2]; |
445 | 445 | ||
446 | /* I/O sub-system's stuff */ | 446 | /* I/O sub-system's stuff */ |
447 | long long flash_size; | 447 | long long flash_size; |
448 | int peb_count; | 448 | int peb_count; |
449 | int peb_size; | 449 | int peb_size; |
450 | int bad_peb_count; | 450 | int bad_peb_count; |
451 | int good_peb_count; | 451 | int good_peb_count; |
452 | int corr_peb_count; | 452 | int corr_peb_count; |
453 | int erroneous_peb_count; | 453 | int erroneous_peb_count; |
454 | int max_erroneous; | 454 | int max_erroneous; |
455 | int min_io_size; | 455 | int min_io_size; |
456 | int hdrs_min_io_size; | 456 | int hdrs_min_io_size; |
457 | int ro_mode; | 457 | int ro_mode; |
458 | int leb_size; | 458 | int leb_size; |
459 | int leb_start; | 459 | int leb_start; |
460 | int ec_hdr_alsize; | 460 | int ec_hdr_alsize; |
461 | int vid_hdr_alsize; | 461 | int vid_hdr_alsize; |
462 | int vid_hdr_offset; | 462 | int vid_hdr_offset; |
463 | int vid_hdr_aloffset; | 463 | int vid_hdr_aloffset; |
464 | int vid_hdr_shift; | 464 | int vid_hdr_shift; |
465 | unsigned int bad_allowed:1; | 465 | unsigned int bad_allowed:1; |
466 | unsigned int nor_flash:1; | 466 | unsigned int nor_flash:1; |
467 | int max_write_size; | 467 | int max_write_size; |
468 | struct mtd_info *mtd; | 468 | struct mtd_info *mtd; |
469 | 469 | ||
470 | void *peb_buf; | 470 | void *peb_buf; |
471 | struct mutex buf_mutex; | 471 | struct mutex buf_mutex; |
472 | struct mutex ckvol_mutex; | 472 | struct mutex ckvol_mutex; |
473 | 473 | ||
474 | struct ubi_debug_info *dbg; | 474 | struct ubi_debug_info *dbg; |
475 | }; | 475 | }; |
476 | 476 | ||
477 | #include "debug.h" | 477 | #include "debug.h" |
478 | 478 | ||
479 | extern struct kmem_cache *ubi_wl_entry_slab; | 479 | extern struct kmem_cache *ubi_wl_entry_slab; |
480 | extern const struct file_operations ubi_ctrl_cdev_operations; | 480 | extern const struct file_operations ubi_ctrl_cdev_operations; |
481 | extern const struct file_operations ubi_cdev_operations; | 481 | extern const struct file_operations ubi_cdev_operations; |
482 | extern const struct file_operations ubi_vol_cdev_operations; | 482 | extern const struct file_operations ubi_vol_cdev_operations; |
483 | extern struct class *ubi_class; | 483 | extern struct class *ubi_class; |
484 | extern struct mutex ubi_devices_mutex; | 484 | extern struct mutex ubi_devices_mutex; |
485 | extern struct blocking_notifier_head ubi_notifiers; | 485 | extern struct blocking_notifier_head ubi_notifiers; |
486 | 486 | ||
487 | /* vtbl.c */ | 487 | /* vtbl.c */ |
488 | int ubi_change_vtbl_record(struct ubi_device *ubi, int idx, | 488 | int ubi_change_vtbl_record(struct ubi_device *ubi, int idx, |
489 | struct ubi_vtbl_record *vtbl_rec); | 489 | struct ubi_vtbl_record *vtbl_rec); |
490 | int ubi_vtbl_rename_volumes(struct ubi_device *ubi, | 490 | int ubi_vtbl_rename_volumes(struct ubi_device *ubi, |
491 | struct list_head *rename_list); | 491 | struct list_head *rename_list); |
492 | int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai); | 492 | int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai); |
493 | 493 | ||
494 | /* vmt.c */ | 494 | /* vmt.c */ |
495 | int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req); | 495 | int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req); |
496 | int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl); | 496 | int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl); |
497 | int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs); | 497 | int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs); |
498 | int ubi_rename_volumes(struct ubi_device *ubi, struct list_head *rename_list); | 498 | int ubi_rename_volumes(struct ubi_device *ubi, struct list_head *rename_list); |
499 | int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol); | 499 | int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol); |
500 | void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol); | 500 | void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol); |
501 | 501 | ||
502 | /* upd.c */ | 502 | /* upd.c */ |
503 | int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol, | 503 | int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol, |
504 | long long bytes); | 504 | long long bytes); |
505 | int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol, | 505 | int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol, |
506 | const void __user *buf, int count); | 506 | const void __user *buf, int count); |
507 | int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, | 507 | int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, |
508 | const struct ubi_leb_change_req *req); | 508 | const struct ubi_leb_change_req *req); |
509 | int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol, | 509 | int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol, |
510 | const void __user *buf, int count); | 510 | const void __user *buf, int count); |
511 | 511 | ||
512 | /* misc.c */ | 512 | /* misc.c */ |
513 | int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf, | 513 | int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf, |
514 | int length); | 514 | int length); |
515 | int ubi_check_volume(struct ubi_device *ubi, int vol_id); | 515 | int ubi_check_volume(struct ubi_device *ubi, int vol_id); |
516 | void ubi_calculate_reserved(struct ubi_device *ubi); | 516 | void ubi_calculate_reserved(struct ubi_device *ubi); |
517 | int ubi_check_pattern(const void *buf, uint8_t patt, int size); | 517 | int ubi_check_pattern(const void *buf, uint8_t patt, int size); |
518 | 518 | ||
519 | /* eba.c */ | 519 | /* eba.c */ |
520 | int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol, | 520 | int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol, |
521 | int lnum); | 521 | int lnum); |
522 | int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, | 522 | int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, |
523 | void *buf, int offset, int len, int check); | 523 | void *buf, int offset, int len, int check); |
524 | int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, | 524 | int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, |
525 | const void *buf, int offset, int len); | 525 | const void *buf, int offset, int len); |
526 | int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol, | 526 | int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol, |
527 | int lnum, const void *buf, int len, int used_ebs); | 527 | int lnum, const void *buf, int len, int used_ebs); |
528 | int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, | 528 | int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, |
529 | int lnum, const void *buf, int len); | 529 | int lnum, const void *buf, int len); |
530 | int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, | 530 | int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, |
531 | struct ubi_vid_hdr *vid_hdr); | 531 | struct ubi_vid_hdr *vid_hdr); |
532 | int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_attach_info *ai); | 532 | int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai); |
533 | 533 | ||
534 | /* wl.c */ | 534 | /* wl.c */ |
535 | int ubi_wl_get_peb(struct ubi_device *ubi); | 535 | int ubi_wl_get_peb(struct ubi_device *ubi); |
536 | int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture); | 536 | int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture); |
537 | int ubi_wl_flush(struct ubi_device *ubi); | 537 | int ubi_wl_flush(struct ubi_device *ubi); |
538 | int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum); | 538 | int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum); |
539 | int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_attach_info *ai); | 539 | int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai); |
540 | void ubi_wl_close(struct ubi_device *ubi); | 540 | void ubi_wl_close(struct ubi_device *ubi); |
541 | int ubi_thread(void *u); | 541 | int ubi_thread(void *u); |
542 | 542 | ||
543 | /* io.c */ | 543 | /* io.c */ |
544 | int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset, | 544 | int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset, |
545 | int len); | 545 | int len); |
546 | int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset, | 546 | int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset, |
547 | int len); | 547 | int len); |
548 | int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture); | 548 | int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture); |
549 | int ubi_io_is_bad(const struct ubi_device *ubi, int pnum); | 549 | int ubi_io_is_bad(const struct ubi_device *ubi, int pnum); |
550 | int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum); | 550 | int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum); |
551 | int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum, | 551 | int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum, |
552 | struct ubi_ec_hdr *ec_hdr, int verbose); | 552 | struct ubi_ec_hdr *ec_hdr, int verbose); |
553 | int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum, | 553 | int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum, |
554 | struct ubi_ec_hdr *ec_hdr); | 554 | struct ubi_ec_hdr *ec_hdr); |
555 | int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum, | 555 | int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum, |
556 | struct ubi_vid_hdr *vid_hdr, int verbose); | 556 | struct ubi_vid_hdr *vid_hdr, int verbose); |
557 | int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum, | 557 | int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum, |
558 | struct ubi_vid_hdr *vid_hdr); | 558 | struct ubi_vid_hdr *vid_hdr); |
559 | 559 | ||
560 | /* build.c */ | 560 | /* build.c */ |
561 | int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset); | 561 | int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset); |
562 | int ubi_detach_mtd_dev(int ubi_num, int anyway); | 562 | int ubi_detach_mtd_dev(int ubi_num, int anyway); |
563 | struct ubi_device *ubi_get_device(int ubi_num); | 563 | struct ubi_device *ubi_get_device(int ubi_num); |
564 | void ubi_put_device(struct ubi_device *ubi); | 564 | void ubi_put_device(struct ubi_device *ubi); |
565 | struct ubi_device *ubi_get_by_major(int major); | 565 | struct ubi_device *ubi_get_by_major(int major); |
566 | int ubi_major2num(int major); | 566 | int ubi_major2num(int major); |
567 | int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, | 567 | int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, |
568 | int ntype); | 568 | int ntype); |
569 | int ubi_notify_all(struct ubi_device *ubi, int ntype, | 569 | int ubi_notify_all(struct ubi_device *ubi, int ntype, |
570 | struct notifier_block *nb); | 570 | struct notifier_block *nb); |
571 | int ubi_enumerate_volumes(struct notifier_block *nb); | 571 | int ubi_enumerate_volumes(struct notifier_block *nb); |
572 | 572 | ||
573 | /* kapi.c */ | 573 | /* kapi.c */ |
574 | void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di); | 574 | void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di); |
575 | void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol, | 575 | void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol, |
576 | struct ubi_volume_info *vi); | 576 | struct ubi_volume_info *vi); |
577 | 577 | ||
578 | /* | 578 | /* |
579 | * ubi_rb_for_each_entry - walk an RB-tree. | 579 | * ubi_rb_for_each_entry - walk an RB-tree. |
580 | * @rb: a pointer to type 'struct rb_node' to use as a loop counter | 580 | * @rb: a pointer to type 'struct rb_node' to use as a loop counter |
581 | * @pos: a pointer to RB-tree entry type to use as a loop counter | 581 | * @pos: a pointer to RB-tree entry type to use as a loop counter |
582 | * @root: RB-tree's root | 582 | * @root: RB-tree's root |
583 | * @member: the name of the 'struct rb_node' within the RB-tree entry | 583 | * @member: the name of the 'struct rb_node' within the RB-tree entry |
584 | */ | 584 | */ |
585 | #define ubi_rb_for_each_entry(rb, pos, root, member) \ | 585 | #define ubi_rb_for_each_entry(rb, pos, root, member) \ |
586 | for (rb = rb_first(root), \ | 586 | for (rb = rb_first(root), \ |
587 | pos = (rb ? container_of(rb, typeof(*pos), member) : NULL); \ | 587 | pos = (rb ? container_of(rb, typeof(*pos), member) : NULL); \ |
588 | rb; \ | 588 | rb; \ |
589 | rb = rb_next(rb), \ | 589 | rb = rb_next(rb), \ |
590 | pos = (rb ? container_of(rb, typeof(*pos), member) : NULL)) | 590 | pos = (rb ? container_of(rb, typeof(*pos), member) : NULL)) |
591 | 591 | ||
592 | /** | 592 | /** |
593 | * ubi_zalloc_vid_hdr - allocate a volume identifier header object. | 593 | * ubi_zalloc_vid_hdr - allocate a volume identifier header object. |
594 | * @ubi: UBI device description object | 594 | * @ubi: UBI device description object |
595 | * @gfp_flags: GFP flags to allocate with | 595 | * @gfp_flags: GFP flags to allocate with |
596 | * | 596 | * |
597 | * This function returns a pointer to the newly allocated and zero-filled | 597 | * This function returns a pointer to the newly allocated and zero-filled |
598 | * volume identifier header object in case of success and %NULL in case of | 598 | * volume identifier header object in case of success and %NULL in case of |
599 | * failure. | 599 | * failure. |
600 | */ | 600 | */ |
601 | static inline struct ubi_vid_hdr * | 601 | static inline struct ubi_vid_hdr * |
602 | ubi_zalloc_vid_hdr(const struct ubi_device *ubi, gfp_t gfp_flags) | 602 | ubi_zalloc_vid_hdr(const struct ubi_device *ubi, gfp_t gfp_flags) |
603 | { | 603 | { |
604 | void *vid_hdr; | 604 | void *vid_hdr; |
605 | 605 | ||
606 | vid_hdr = kzalloc(ubi->vid_hdr_alsize, gfp_flags); | 606 | vid_hdr = kzalloc(ubi->vid_hdr_alsize, gfp_flags); |
607 | if (!vid_hdr) | 607 | if (!vid_hdr) |
608 | return NULL; | 608 | return NULL; |
609 | 609 | ||
610 | /* | 610 | /* |
611 | * VID headers may be stored at un-aligned flash offsets, so we shift | 611 | * VID headers may be stored at un-aligned flash offsets, so we shift |
612 | * the pointer. | 612 | * the pointer. |
613 | */ | 613 | */ |
614 | return vid_hdr + ubi->vid_hdr_shift; | 614 | return vid_hdr + ubi->vid_hdr_shift; |
615 | } | 615 | } |
616 | 616 | ||
617 | /** | 617 | /** |
618 | * ubi_free_vid_hdr - free a volume identifier header object. | 618 | * ubi_free_vid_hdr - free a volume identifier header object. |
619 | * @ubi: UBI device description object | 619 | * @ubi: UBI device description object |
620 | * @vid_hdr: the object to free | 620 | * @vid_hdr: the object to free |
621 | */ | 621 | */ |
622 | static inline void ubi_free_vid_hdr(const struct ubi_device *ubi, | 622 | static inline void ubi_free_vid_hdr(const struct ubi_device *ubi, |
623 | struct ubi_vid_hdr *vid_hdr) | 623 | struct ubi_vid_hdr *vid_hdr) |
624 | { | 624 | { |
625 | void *p = vid_hdr; | 625 | void *p = vid_hdr; |
626 | 626 | ||
627 | if (!p) | 627 | if (!p) |
628 | return; | 628 | return; |
629 | 629 | ||
630 | kfree(p - ubi->vid_hdr_shift); | 630 | kfree(p - ubi->vid_hdr_shift); |
631 | } | 631 | } |
632 | 632 | ||
633 | /* | 633 | /* |
634 | * This function is equivalent to 'ubi_io_read()', but @offset is relative to | 634 | * This function is equivalent to 'ubi_io_read()', but @offset is relative to |
635 | * the beginning of the logical eraseblock, not to the beginning of the | 635 | * the beginning of the logical eraseblock, not to the beginning of the |
636 | * physical eraseblock. | 636 | * physical eraseblock. |
637 | */ | 637 | */ |
638 | static inline int ubi_io_read_data(const struct ubi_device *ubi, void *buf, | 638 | static inline int ubi_io_read_data(const struct ubi_device *ubi, void *buf, |
639 | int pnum, int offset, int len) | 639 | int pnum, int offset, int len) |
640 | { | 640 | { |
641 | ubi_assert(offset >= 0); | 641 | ubi_assert(offset >= 0); |
642 | return ubi_io_read(ubi, buf, pnum, offset + ubi->leb_start, len); | 642 | return ubi_io_read(ubi, buf, pnum, offset + ubi->leb_start, len); |
643 | } | 643 | } |
644 | 644 | ||
645 | /* | 645 | /* |
646 | * This function is equivalent to 'ubi_io_write()', but @offset is relative to | 646 | * This function is equivalent to 'ubi_io_write()', but @offset is relative to |
647 | * the beginning of the logical eraseblock, not to the beginning of the | 647 | * the beginning of the logical eraseblock, not to the beginning of the |
648 | * physical eraseblock. | 648 | * physical eraseblock. |
649 | */ | 649 | */ |
650 | static inline int ubi_io_write_data(struct ubi_device *ubi, const void *buf, | 650 | static inline int ubi_io_write_data(struct ubi_device *ubi, const void *buf, |
651 | int pnum, int offset, int len) | 651 | int pnum, int offset, int len) |
652 | { | 652 | { |
653 | ubi_assert(offset >= 0); | 653 | ubi_assert(offset >= 0); |
654 | return ubi_io_write(ubi, buf, pnum, offset + ubi->leb_start, len); | 654 | return ubi_io_write(ubi, buf, pnum, offset + ubi->leb_start, len); |
655 | } | 655 | } |
656 | 656 | ||
657 | /** | 657 | /** |
658 | * ubi_ro_mode - switch to read-only mode. | 658 | * ubi_ro_mode - switch to read-only mode. |
659 | * @ubi: UBI device description object | 659 | * @ubi: UBI device description object |
660 | */ | 660 | */ |
661 | static inline void ubi_ro_mode(struct ubi_device *ubi) | 661 | static inline void ubi_ro_mode(struct ubi_device *ubi) |
662 | { | 662 | { |
663 | if (!ubi->ro_mode) { | 663 | if (!ubi->ro_mode) { |
664 | ubi->ro_mode = 1; | 664 | ubi->ro_mode = 1; |
665 | ubi_warn("switch to read-only mode"); | 665 | ubi_warn("switch to read-only mode"); |
666 | dump_stack(); | 666 | dump_stack(); |
667 | } | 667 | } |
668 | } | 668 | } |
669 | 669 | ||
670 | /** | 670 | /** |
671 | * vol_id2idx - get table index by volume ID. | 671 | * vol_id2idx - get table index by volume ID. |
672 | * @ubi: UBI device description object | 672 | * @ubi: UBI device description object |
673 | * @vol_id: volume ID | 673 | * @vol_id: volume ID |
674 | */ | 674 | */ |
675 | static inline int vol_id2idx(const struct ubi_device *ubi, int vol_id) | 675 | static inline int vol_id2idx(const struct ubi_device *ubi, int vol_id) |
676 | { | 676 | { |
677 | if (vol_id >= UBI_INTERNAL_VOL_START) | 677 | if (vol_id >= UBI_INTERNAL_VOL_START) |
678 | return vol_id - UBI_INTERNAL_VOL_START + ubi->vtbl_slots; | 678 | return vol_id - UBI_INTERNAL_VOL_START + ubi->vtbl_slots; |
679 | else | 679 | else |
680 | return vol_id; | 680 | return vol_id; |
681 | } | 681 | } |
682 | 682 | ||
683 | /** | 683 | /** |
684 | * idx2vol_id - get volume ID by table index. | 684 | * idx2vol_id - get volume ID by table index. |
685 | * @ubi: UBI device description object | 685 | * @ubi: UBI device description object |
686 | * @idx: table index | 686 | * @idx: table index |
687 | */ | 687 | */ |
688 | static inline int idx2vol_id(const struct ubi_device *ubi, int idx) | 688 | static inline int idx2vol_id(const struct ubi_device *ubi, int idx) |
689 | { | 689 | { |
690 | if (idx >= ubi->vtbl_slots) | 690 | if (idx >= ubi->vtbl_slots) |
691 | return idx - ubi->vtbl_slots + UBI_INTERNAL_VOL_START; | 691 | return idx - ubi->vtbl_slots + UBI_INTERNAL_VOL_START; |
692 | else | 692 | else |
693 | return idx; | 693 | return idx; |
694 | } | 694 | } |
695 | 695 | ||
696 | #endif /* !__UBI_UBI_H__ */ | 696 | #endif /* !__UBI_UBI_H__ */ |
697 | 697 |
drivers/mtd/ubi/wl.c
1 | /* | 1 | /* |
2 | * @ubi: UBI device description object | 2 | * @ubi: UBI device description object |
3 | * Copyright (c) International Business Machines Corp., 2006 | 3 | * Copyright (c) International Business Machines Corp., 2006 |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
7 | * the Free Software Foundation; either version 2 of the License, or | 7 | * the Free Software Foundation; either version 2 of the License, or |
8 | * (at your option) any later version. | 8 | * (at your option) any later version. |
9 | * | 9 | * |
10 | * This program is distributed in the hope that it will be useful, | 10 | * This program is distributed in the hope that it will be useful, |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See |
13 | * the GNU General Public License for more details. | 13 | * the GNU General Public License for more details. |
14 | * | 14 | * |
15 | * You should have received a copy of the GNU General Public License | 15 | * You should have received a copy of the GNU General Public License |
16 | * along with this program; if not, write to the Free Software | 16 | * along with this program; if not, write to the Free Software |
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
18 | * | 18 | * |
19 | * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner | 19 | * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner |
20 | */ | 20 | */ |
21 | 21 | ||
22 | /* | 22 | /* |
23 | * UBI wear-leveling sub-system. | 23 | * UBI wear-leveling sub-system. |
24 | * | 24 | * |
25 | * This sub-system is responsible for wear-leveling. It works in terms of | 25 | * This sub-system is responsible for wear-leveling. It works in terms of |
26 | * physical eraseblocks and erase counters and knows nothing about logical | 26 | * physical eraseblocks and erase counters and knows nothing about logical |
27 | * eraseblocks, volumes, etc. From this sub-system's perspective all physical | 27 | * eraseblocks, volumes, etc. From this sub-system's perspective all physical |
28 | * eraseblocks are of two types - used and free. Used physical eraseblocks are | 28 | * eraseblocks are of two types - used and free. Used physical eraseblocks are |
29 | * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical | 29 | * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical |
30 | * eraseblocks are those that were put by the 'ubi_wl_put_peb()' function. | 30 | * eraseblocks are those that were put by the 'ubi_wl_put_peb()' function. |
31 | * | 31 | * |
32 | * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter | 32 | * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter |
33 | * header. The rest of the physical eraseblock contains only %0xFF bytes. | 33 | * header. The rest of the physical eraseblock contains only %0xFF bytes. |
34 | * | 34 | * |
35 | * When physical eraseblocks are returned to the WL sub-system by means of the | 35 | * When physical eraseblocks are returned to the WL sub-system by means of the |
36 | * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is | 36 | * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is |
37 | * done asynchronously in context of the per-UBI device background thread, | 37 | * done asynchronously in context of the per-UBI device background thread, |
38 | * which is also managed by the WL sub-system. | 38 | * which is also managed by the WL sub-system. |
39 | * | 39 | * |
40 | * The wear-leveling is ensured by means of moving the contents of used | 40 | * The wear-leveling is ensured by means of moving the contents of used |
41 | * physical eraseblocks with low erase counter to free physical eraseblocks | 41 | * physical eraseblocks with low erase counter to free physical eraseblocks |
42 | * with high erase counter. | 42 | * with high erase counter. |
43 | * | 43 | * |
44 | * If the WL sub-system fails to erase a physical eraseblock, it marks it as | 44 | * If the WL sub-system fails to erase a physical eraseblock, it marks it as |
45 | * bad. | 45 | * bad. |
46 | * | 46 | * |
47 | * This sub-system is also responsible for scrubbing. If a bit-flip is detected | 47 | * This sub-system is also responsible for scrubbing. If a bit-flip is detected |
48 | * in a physical eraseblock, it has to be moved. Technically this is the same | 48 | * in a physical eraseblock, it has to be moved. Technically this is the same |
49 | * as moving it for wear-leveling reasons. | 49 | * as moving it for wear-leveling reasons. |
50 | * | 50 | * |
51 | * As it was said, for the UBI sub-system all physical eraseblocks are either | 51 | * As it was said, for the UBI sub-system all physical eraseblocks are either |
52 | * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while | 52 | * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while |
53 | * used eraseblocks are kept in @wl->used, @wl->erroneous, or @wl->scrub | 53 | * used eraseblocks are kept in @wl->used, @wl->erroneous, or @wl->scrub |
54 | * RB-trees, as well as (temporarily) in the @wl->pq queue. | 54 | * RB-trees, as well as (temporarily) in the @wl->pq queue. |
55 | * | 55 | * |
56 | * When the WL sub-system returns a physical eraseblock, the physical | 56 | * When the WL sub-system returns a physical eraseblock, the physical |
57 | * eraseblock is protected from being moved for some "time". For this reason, | 57 | * eraseblock is protected from being moved for some "time". For this reason, |
58 | * the physical eraseblock is not directly moved from the @wl->free tree to the | 58 | * the physical eraseblock is not directly moved from the @wl->free tree to the |
59 | * @wl->used tree. There is a protection queue in between where this | 59 | * @wl->used tree. There is a protection queue in between where this |
60 | * physical eraseblock is temporarily stored (@wl->pq). | 60 | * physical eraseblock is temporarily stored (@wl->pq). |
61 | * | 61 | * |
62 | * All this protection stuff is needed because: | 62 | * All this protection stuff is needed because: |
63 | * o we don't want to move physical eraseblocks just after we have given them | 63 | * o we don't want to move physical eraseblocks just after we have given them |
64 | * to the user; instead, we first want to let users fill them up with data; | 64 | * to the user; instead, we first want to let users fill them up with data; |
65 | * | 65 | * |
66 | * o there is a chance that the user will put the physical eraseblock very | 66 | * o there is a chance that the user will put the physical eraseblock very |
67 | * soon, so it makes sense not to move it for some time, but wait. | 67 | * soon, so it makes sense not to move it for some time, but wait. |
68 | * | 68 | * |
69 | * Physical eraseblocks stay protected only for limited time. But the "time" is | 69 | * Physical eraseblocks stay protected only for limited time. But the "time" is |
70 | * measured in erase cycles in this case. This is implemented with help of the | 70 | * measured in erase cycles in this case. This is implemented with help of the |
71 | * protection queue. Eraseblocks are put to the tail of this queue when they | 71 | * protection queue. Eraseblocks are put to the tail of this queue when they |
72 | * are returned by the 'ubi_wl_get_peb()', and eraseblocks are removed from the | 72 | * are returned by the 'ubi_wl_get_peb()', and eraseblocks are removed from the |
73 | * head of the queue on each erase operation (for any eraseblock). So the | 73 | * head of the queue on each erase operation (for any eraseblock). So the |
74 | * length of the queue defines how may (global) erase cycles PEBs are protected. | 74 | * length of the queue defines how may (global) erase cycles PEBs are protected. |
75 | * | 75 | * |
76 | * To put it differently, each physical eraseblock has 2 main states: free and | 76 | * To put it differently, each physical eraseblock has 2 main states: free and |
77 | * used. The former state corresponds to the @wl->free tree. The latter state | 77 | * used. The former state corresponds to the @wl->free tree. The latter state |
78 | * is split up on several sub-states: | 78 | * is split up on several sub-states: |
79 | * o the WL movement is allowed (@wl->used tree); | 79 | * o the WL movement is allowed (@wl->used tree); |
80 | * o the WL movement is disallowed (@wl->erroneous) because the PEB is | 80 | * o the WL movement is disallowed (@wl->erroneous) because the PEB is |
81 | * erroneous - e.g., there was a read error; | 81 | * erroneous - e.g., there was a read error; |
82 | * o the WL movement is temporarily prohibited (@wl->pq queue); | 82 | * o the WL movement is temporarily prohibited (@wl->pq queue); |
83 | * o scrubbing is needed (@wl->scrub tree). | 83 | * o scrubbing is needed (@wl->scrub tree). |
84 | * | 84 | * |
85 | * Depending on the sub-state, wear-leveling entries of the used physical | 85 | * Depending on the sub-state, wear-leveling entries of the used physical |
86 | * eraseblocks may be kept in one of those structures. | 86 | * eraseblocks may be kept in one of those structures. |
87 | * | 87 | * |
88 | * Note, in this implementation, we keep a small in-RAM object for each physical | 88 | * Note, in this implementation, we keep a small in-RAM object for each physical |
89 | * eraseblock. This is surely not a scalable solution. But it appears to be good | 89 | * eraseblock. This is surely not a scalable solution. But it appears to be good |
90 | * enough for moderately large flashes and it is simple. In future, one may | 90 | * enough for moderately large flashes and it is simple. In future, one may |
91 | * re-work this sub-system and make it more scalable. | 91 | * re-work this sub-system and make it more scalable. |
92 | * | 92 | * |
93 | * At the moment this sub-system does not utilize the sequence number, which | 93 | * At the moment this sub-system does not utilize the sequence number, which |
94 | * was introduced relatively recently. But it would be wise to do this because | 94 | * was introduced relatively recently. But it would be wise to do this because |
95 | * the sequence number of a logical eraseblock characterizes how old is it. For | 95 | * the sequence number of a logical eraseblock characterizes how old is it. For |
96 | * example, when we move a PEB with low erase counter, and we need to pick the | 96 | * example, when we move a PEB with low erase counter, and we need to pick the |
97 | * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we | 97 | * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we |
98 | * pick target PEB with an average EC if our PEB is not very "old". This is a | 98 | * pick target PEB with an average EC if our PEB is not very "old". This is a |
99 | * room for future re-works of the WL sub-system. | 99 | * room for future re-works of the WL sub-system. |
100 | */ | 100 | */ |
101 | 101 | ||
102 | #include <linux/slab.h> | 102 | #include <linux/slab.h> |
103 | #include <linux/crc32.h> | 103 | #include <linux/crc32.h> |
104 | #include <linux/freezer.h> | 104 | #include <linux/freezer.h> |
105 | #include <linux/kthread.h> | 105 | #include <linux/kthread.h> |
106 | #include "ubi.h" | 106 | #include "ubi.h" |
107 | 107 | ||
108 | /* Number of physical eraseblocks reserved for wear-leveling purposes */ | 108 | /* Number of physical eraseblocks reserved for wear-leveling purposes */ |
109 | #define WL_RESERVED_PEBS 1 | 109 | #define WL_RESERVED_PEBS 1 |
110 | 110 | ||
111 | /* | 111 | /* |
112 | * Maximum difference between two erase counters. If this threshold is | 112 | * Maximum difference between two erase counters. If this threshold is |
113 | * exceeded, the WL sub-system starts moving data from used physical | 113 | * exceeded, the WL sub-system starts moving data from used physical |
114 | * eraseblocks with low erase counter to free physical eraseblocks with high | 114 | * eraseblocks with low erase counter to free physical eraseblocks with high |
115 | * erase counter. | 115 | * erase counter. |
116 | */ | 116 | */ |
117 | #define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD | 117 | #define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD |
118 | 118 | ||
119 | /* | 119 | /* |
120 | * When a physical eraseblock is moved, the WL sub-system has to pick the target | 120 | * When a physical eraseblock is moved, the WL sub-system has to pick the target |
121 | * physical eraseblock to move to. The simplest way would be just to pick the | 121 | * physical eraseblock to move to. The simplest way would be just to pick the |
122 | * one with the highest erase counter. But in certain workloads this could lead | 122 | * one with the highest erase counter. But in certain workloads this could lead |
123 | * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a | 123 | * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a |
124 | * situation when the picked physical eraseblock is constantly erased after the | 124 | * situation when the picked physical eraseblock is constantly erased after the |
125 | * data is written to it. So, we have a constant which limits the highest erase | 125 | * data is written to it. So, we have a constant which limits the highest erase |
126 | * counter of the free physical eraseblock to pick. Namely, the WL sub-system | 126 | * counter of the free physical eraseblock to pick. Namely, the WL sub-system |
127 | * does not pick eraseblocks with erase counter greater than the lowest erase | 127 | * does not pick eraseblocks with erase counter greater than the lowest erase |
128 | * counter plus %WL_FREE_MAX_DIFF. | 128 | * counter plus %WL_FREE_MAX_DIFF. |
129 | */ | 129 | */ |
130 | #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD) | 130 | #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD) |
131 | 131 | ||
132 | /* | 132 | /* |
133 | * Maximum number of consecutive background thread failures which is enough to | 133 | * Maximum number of consecutive background thread failures which is enough to |
134 | * switch to read-only mode. | 134 | * switch to read-only mode. |
135 | */ | 135 | */ |
136 | #define WL_MAX_FAILURES 32 | 136 | #define WL_MAX_FAILURES 32 |
137 | 137 | ||
138 | /** | 138 | /** |
139 | * struct ubi_work - UBI work description data structure. | 139 | * struct ubi_work - UBI work description data structure. |
140 | * @list: a link in the list of pending works | 140 | * @list: a link in the list of pending works |
141 | * @func: worker function | 141 | * @func: worker function |
142 | * @e: physical eraseblock to erase | 142 | * @e: physical eraseblock to erase |
143 | * @torture: if the physical eraseblock has to be tortured | 143 | * @torture: if the physical eraseblock has to be tortured |
144 | * | 144 | * |
145 | * The @func pointer points to the worker function. If the @cancel argument is | 145 | * The @func pointer points to the worker function. If the @cancel argument is |
146 | * not zero, the worker has to free the resources and exit immediately. The | 146 | * not zero, the worker has to free the resources and exit immediately. The |
147 | * worker has to return zero in case of success and a negative error code in | 147 | * worker has to return zero in case of success and a negative error code in |
148 | * case of failure. | 148 | * case of failure. |
149 | */ | 149 | */ |
150 | struct ubi_work { | 150 | struct ubi_work { |
151 | struct list_head list; | 151 | struct list_head list; |
152 | int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel); | 152 | int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel); |
153 | /* The below fields are only relevant to erasure works */ | 153 | /* The below fields are only relevant to erasure works */ |
154 | struct ubi_wl_entry *e; | 154 | struct ubi_wl_entry *e; |
155 | int torture; | 155 | int torture; |
156 | }; | 156 | }; |
157 | 157 | ||
158 | static int self_check_ec(struct ubi_device *ubi, int pnum, int ec); | 158 | static int self_check_ec(struct ubi_device *ubi, int pnum, int ec); |
159 | static int self_check_in_wl_tree(const struct ubi_device *ubi, | 159 | static int self_check_in_wl_tree(const struct ubi_device *ubi, |
160 | struct ubi_wl_entry *e, struct rb_root *root); | 160 | struct ubi_wl_entry *e, struct rb_root *root); |
161 | static int self_check_in_pq(const struct ubi_device *ubi, | 161 | static int self_check_in_pq(const struct ubi_device *ubi, |
162 | struct ubi_wl_entry *e); | 162 | struct ubi_wl_entry *e); |
163 | 163 | ||
164 | /** | 164 | /** |
165 | * wl_tree_add - add a wear-leveling entry to a WL RB-tree. | 165 | * wl_tree_add - add a wear-leveling entry to a WL RB-tree. |
166 | * @e: the wear-leveling entry to add | 166 | * @e: the wear-leveling entry to add |
167 | * @root: the root of the tree | 167 | * @root: the root of the tree |
168 | * | 168 | * |
169 | * Note, we use (erase counter, physical eraseblock number) pairs as keys in | 169 | * Note, we use (erase counter, physical eraseblock number) pairs as keys in |
170 | * the @ubi->used and @ubi->free RB-trees. | 170 | * the @ubi->used and @ubi->free RB-trees. |
171 | */ | 171 | */ |
172 | static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root) | 172 | static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root) |
173 | { | 173 | { |
174 | struct rb_node **p, *parent = NULL; | 174 | struct rb_node **p, *parent = NULL; |
175 | 175 | ||
176 | p = &root->rb_node; | 176 | p = &root->rb_node; |
177 | while (*p) { | 177 | while (*p) { |
178 | struct ubi_wl_entry *e1; | 178 | struct ubi_wl_entry *e1; |
179 | 179 | ||
180 | parent = *p; | 180 | parent = *p; |
181 | e1 = rb_entry(parent, struct ubi_wl_entry, u.rb); | 181 | e1 = rb_entry(parent, struct ubi_wl_entry, u.rb); |
182 | 182 | ||
183 | if (e->ec < e1->ec) | 183 | if (e->ec < e1->ec) |
184 | p = &(*p)->rb_left; | 184 | p = &(*p)->rb_left; |
185 | else if (e->ec > e1->ec) | 185 | else if (e->ec > e1->ec) |
186 | p = &(*p)->rb_right; | 186 | p = &(*p)->rb_right; |
187 | else { | 187 | else { |
188 | ubi_assert(e->pnum != e1->pnum); | 188 | ubi_assert(e->pnum != e1->pnum); |
189 | if (e->pnum < e1->pnum) | 189 | if (e->pnum < e1->pnum) |
190 | p = &(*p)->rb_left; | 190 | p = &(*p)->rb_left; |
191 | else | 191 | else |
192 | p = &(*p)->rb_right; | 192 | p = &(*p)->rb_right; |
193 | } | 193 | } |
194 | } | 194 | } |
195 | 195 | ||
196 | rb_link_node(&e->u.rb, parent, p); | 196 | rb_link_node(&e->u.rb, parent, p); |
197 | rb_insert_color(&e->u.rb, root); | 197 | rb_insert_color(&e->u.rb, root); |
198 | } | 198 | } |
199 | 199 | ||
200 | /** | 200 | /** |
201 | * do_work - do one pending work. | 201 | * do_work - do one pending work. |
202 | * @ubi: UBI device description object | 202 | * @ubi: UBI device description object |
203 | * | 203 | * |
204 | * This function returns zero in case of success and a negative error code in | 204 | * This function returns zero in case of success and a negative error code in |
205 | * case of failure. | 205 | * case of failure. |
206 | */ | 206 | */ |
207 | static int do_work(struct ubi_device *ubi) | 207 | static int do_work(struct ubi_device *ubi) |
208 | { | 208 | { |
209 | int err; | 209 | int err; |
210 | struct ubi_work *wrk; | 210 | struct ubi_work *wrk; |
211 | 211 | ||
212 | cond_resched(); | 212 | cond_resched(); |
213 | 213 | ||
214 | /* | 214 | /* |
215 | * @ubi->work_sem is used to synchronize with the workers. Workers take | 215 | * @ubi->work_sem is used to synchronize with the workers. Workers take |
216 | * it in read mode, so many of them may be doing works at a time. But | 216 | * it in read mode, so many of them may be doing works at a time. But |
217 | * the queue flush code has to be sure the whole queue of works is | 217 | * the queue flush code has to be sure the whole queue of works is |
218 | * done, and it takes the mutex in write mode. | 218 | * done, and it takes the mutex in write mode. |
219 | */ | 219 | */ |
220 | down_read(&ubi->work_sem); | 220 | down_read(&ubi->work_sem); |
221 | spin_lock(&ubi->wl_lock); | 221 | spin_lock(&ubi->wl_lock); |
222 | if (list_empty(&ubi->works)) { | 222 | if (list_empty(&ubi->works)) { |
223 | spin_unlock(&ubi->wl_lock); | 223 | spin_unlock(&ubi->wl_lock); |
224 | up_read(&ubi->work_sem); | 224 | up_read(&ubi->work_sem); |
225 | return 0; | 225 | return 0; |
226 | } | 226 | } |
227 | 227 | ||
228 | wrk = list_entry(ubi->works.next, struct ubi_work, list); | 228 | wrk = list_entry(ubi->works.next, struct ubi_work, list); |
229 | list_del(&wrk->list); | 229 | list_del(&wrk->list); |
230 | ubi->works_count -= 1; | 230 | ubi->works_count -= 1; |
231 | ubi_assert(ubi->works_count >= 0); | 231 | ubi_assert(ubi->works_count >= 0); |
232 | spin_unlock(&ubi->wl_lock); | 232 | spin_unlock(&ubi->wl_lock); |
233 | 233 | ||
234 | /* | 234 | /* |
235 | * Call the worker function. Do not touch the work structure | 235 | * Call the worker function. Do not touch the work structure |
236 | * after this call as it will have been freed or reused by that | 236 | * after this call as it will have been freed or reused by that |
237 | * time by the worker function. | 237 | * time by the worker function. |
238 | */ | 238 | */ |
239 | err = wrk->func(ubi, wrk, 0); | 239 | err = wrk->func(ubi, wrk, 0); |
240 | if (err) | 240 | if (err) |
241 | ubi_err("work failed with error code %d", err); | 241 | ubi_err("work failed with error code %d", err); |
242 | up_read(&ubi->work_sem); | 242 | up_read(&ubi->work_sem); |
243 | 243 | ||
244 | return err; | 244 | return err; |
245 | } | 245 | } |
246 | 246 | ||
247 | /** | 247 | /** |
248 | * produce_free_peb - produce a free physical eraseblock. | 248 | * produce_free_peb - produce a free physical eraseblock. |
249 | * @ubi: UBI device description object | 249 | * @ubi: UBI device description object |
250 | * | 250 | * |
251 | * This function tries to make a free PEB by means of synchronous execution of | 251 | * This function tries to make a free PEB by means of synchronous execution of |
252 | * pending works. This may be needed if, for example the background thread is | 252 | * pending works. This may be needed if, for example the background thread is |
253 | * disabled. Returns zero in case of success and a negative error code in case | 253 | * disabled. Returns zero in case of success and a negative error code in case |
254 | * of failure. | 254 | * of failure. |
255 | */ | 255 | */ |
256 | static int produce_free_peb(struct ubi_device *ubi) | 256 | static int produce_free_peb(struct ubi_device *ubi) |
257 | { | 257 | { |
258 | int err; | 258 | int err; |
259 | 259 | ||
260 | spin_lock(&ubi->wl_lock); | 260 | spin_lock(&ubi->wl_lock); |
261 | while (!ubi->free.rb_node) { | 261 | while (!ubi->free.rb_node) { |
262 | spin_unlock(&ubi->wl_lock); | 262 | spin_unlock(&ubi->wl_lock); |
263 | 263 | ||
264 | dbg_wl("do one work synchronously"); | 264 | dbg_wl("do one work synchronously"); |
265 | err = do_work(ubi); | 265 | err = do_work(ubi); |
266 | if (err) | 266 | if (err) |
267 | return err; | 267 | return err; |
268 | 268 | ||
269 | spin_lock(&ubi->wl_lock); | 269 | spin_lock(&ubi->wl_lock); |
270 | } | 270 | } |
271 | spin_unlock(&ubi->wl_lock); | 271 | spin_unlock(&ubi->wl_lock); |
272 | 272 | ||
273 | return 0; | 273 | return 0; |
274 | } | 274 | } |
275 | 275 | ||
276 | /** | 276 | /** |
277 | * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree. | 277 | * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree. |
278 | * @e: the wear-leveling entry to check | 278 | * @e: the wear-leveling entry to check |
279 | * @root: the root of the tree | 279 | * @root: the root of the tree |
280 | * | 280 | * |
281 | * This function returns non-zero if @e is in the @root RB-tree and zero if it | 281 | * This function returns non-zero if @e is in the @root RB-tree and zero if it |
282 | * is not. | 282 | * is not. |
283 | */ | 283 | */ |
284 | static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root) | 284 | static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root) |
285 | { | 285 | { |
286 | struct rb_node *p; | 286 | struct rb_node *p; |
287 | 287 | ||
288 | p = root->rb_node; | 288 | p = root->rb_node; |
289 | while (p) { | 289 | while (p) { |
290 | struct ubi_wl_entry *e1; | 290 | struct ubi_wl_entry *e1; |
291 | 291 | ||
292 | e1 = rb_entry(p, struct ubi_wl_entry, u.rb); | 292 | e1 = rb_entry(p, struct ubi_wl_entry, u.rb); |
293 | 293 | ||
294 | if (e->pnum == e1->pnum) { | 294 | if (e->pnum == e1->pnum) { |
295 | ubi_assert(e == e1); | 295 | ubi_assert(e == e1); |
296 | return 1; | 296 | return 1; |
297 | } | 297 | } |
298 | 298 | ||
299 | if (e->ec < e1->ec) | 299 | if (e->ec < e1->ec) |
300 | p = p->rb_left; | 300 | p = p->rb_left; |
301 | else if (e->ec > e1->ec) | 301 | else if (e->ec > e1->ec) |
302 | p = p->rb_right; | 302 | p = p->rb_right; |
303 | else { | 303 | else { |
304 | ubi_assert(e->pnum != e1->pnum); | 304 | ubi_assert(e->pnum != e1->pnum); |
305 | if (e->pnum < e1->pnum) | 305 | if (e->pnum < e1->pnum) |
306 | p = p->rb_left; | 306 | p = p->rb_left; |
307 | else | 307 | else |
308 | p = p->rb_right; | 308 | p = p->rb_right; |
309 | } | 309 | } |
310 | } | 310 | } |
311 | 311 | ||
312 | return 0; | 312 | return 0; |
313 | } | 313 | } |
314 | 314 | ||
315 | /** | 315 | /** |
316 | * prot_queue_add - add physical eraseblock to the protection queue. | 316 | * prot_queue_add - add physical eraseblock to the protection queue. |
317 | * @ubi: UBI device description object | 317 | * @ubi: UBI device description object |
318 | * @e: the physical eraseblock to add | 318 | * @e: the physical eraseblock to add |
319 | * | 319 | * |
320 | * This function adds @e to the tail of the protection queue @ubi->pq, where | 320 | * This function adds @e to the tail of the protection queue @ubi->pq, where |
321 | * @e will stay for %UBI_PROT_QUEUE_LEN erase operations and will be | 321 | * @e will stay for %UBI_PROT_QUEUE_LEN erase operations and will be |
322 | * temporarily protected from the wear-leveling worker. Note, @wl->lock has to | 322 | * temporarily protected from the wear-leveling worker. Note, @wl->lock has to |
323 | * be locked. | 323 | * be locked. |
324 | */ | 324 | */ |
325 | static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e) | 325 | static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e) |
326 | { | 326 | { |
327 | int pq_tail = ubi->pq_head - 1; | 327 | int pq_tail = ubi->pq_head - 1; |
328 | 328 | ||
329 | if (pq_tail < 0) | 329 | if (pq_tail < 0) |
330 | pq_tail = UBI_PROT_QUEUE_LEN - 1; | 330 | pq_tail = UBI_PROT_QUEUE_LEN - 1; |
331 | ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN); | 331 | ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN); |
332 | list_add_tail(&e->u.list, &ubi->pq[pq_tail]); | 332 | list_add_tail(&e->u.list, &ubi->pq[pq_tail]); |
333 | dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec); | 333 | dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec); |
334 | } | 334 | } |
335 | 335 | ||
336 | /** | 336 | /** |
337 | * find_wl_entry - find wear-leveling entry closest to certain erase counter. | 337 | * find_wl_entry - find wear-leveling entry closest to certain erase counter. |
338 | * @root: the RB-tree where to look for | 338 | * @root: the RB-tree where to look for |
339 | * @diff: maximum possible difference from the smallest erase counter | 339 | * @diff: maximum possible difference from the smallest erase counter |
340 | * | 340 | * |
341 | * This function looks for a wear leveling entry with erase counter closest to | 341 | * This function looks for a wear leveling entry with erase counter closest to |
342 | * min + @diff, where min is the smallest erase counter. | 342 | * min + @diff, where min is the smallest erase counter. |
343 | */ | 343 | */ |
344 | static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int diff) | 344 | static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int diff) |
345 | { | 345 | { |
346 | struct rb_node *p; | 346 | struct rb_node *p; |
347 | struct ubi_wl_entry *e; | 347 | struct ubi_wl_entry *e; |
348 | int max; | 348 | int max; |
349 | 349 | ||
350 | e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb); | 350 | e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb); |
351 | max = e->ec + diff; | 351 | max = e->ec + diff; |
352 | 352 | ||
353 | p = root->rb_node; | 353 | p = root->rb_node; |
354 | while (p) { | 354 | while (p) { |
355 | struct ubi_wl_entry *e1; | 355 | struct ubi_wl_entry *e1; |
356 | 356 | ||
357 | e1 = rb_entry(p, struct ubi_wl_entry, u.rb); | 357 | e1 = rb_entry(p, struct ubi_wl_entry, u.rb); |
358 | if (e1->ec >= max) | 358 | if (e1->ec >= max) |
359 | p = p->rb_left; | 359 | p = p->rb_left; |
360 | else { | 360 | else { |
361 | p = p->rb_right; | 361 | p = p->rb_right; |
362 | e = e1; | 362 | e = e1; |
363 | } | 363 | } |
364 | } | 364 | } |
365 | 365 | ||
366 | return e; | 366 | return e; |
367 | } | 367 | } |
368 | 368 | ||
369 | /** | 369 | /** |
370 | * ubi_wl_get_peb - get a physical eraseblock. | 370 | * ubi_wl_get_peb - get a physical eraseblock. |
371 | * @ubi: UBI device description object | 371 | * @ubi: UBI device description object |
372 | * | 372 | * |
373 | * This function returns a physical eraseblock in case of success and a | 373 | * This function returns a physical eraseblock in case of success and a |
374 | * negative error code in case of failure. Might sleep. | 374 | * negative error code in case of failure. Might sleep. |
375 | */ | 375 | */ |
376 | int ubi_wl_get_peb(struct ubi_device *ubi) | 376 | int ubi_wl_get_peb(struct ubi_device *ubi) |
377 | { | 377 | { |
378 | int err; | 378 | int err; |
379 | struct ubi_wl_entry *e, *first, *last; | 379 | struct ubi_wl_entry *e, *first, *last; |
380 | 380 | ||
381 | retry: | 381 | retry: |
382 | spin_lock(&ubi->wl_lock); | 382 | spin_lock(&ubi->wl_lock); |
383 | if (!ubi->free.rb_node) { | 383 | if (!ubi->free.rb_node) { |
384 | if (ubi->works_count == 0) { | 384 | if (ubi->works_count == 0) { |
385 | ubi_assert(list_empty(&ubi->works)); | 385 | ubi_assert(list_empty(&ubi->works)); |
386 | ubi_err("no free eraseblocks"); | 386 | ubi_err("no free eraseblocks"); |
387 | spin_unlock(&ubi->wl_lock); | 387 | spin_unlock(&ubi->wl_lock); |
388 | return -ENOSPC; | 388 | return -ENOSPC; |
389 | } | 389 | } |
390 | spin_unlock(&ubi->wl_lock); | 390 | spin_unlock(&ubi->wl_lock); |
391 | 391 | ||
392 | err = produce_free_peb(ubi); | 392 | err = produce_free_peb(ubi); |
393 | if (err < 0) | 393 | if (err < 0) |
394 | return err; | 394 | return err; |
395 | goto retry; | 395 | goto retry; |
396 | } | 396 | } |
397 | 397 | ||
398 | first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, u.rb); | 398 | first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, u.rb); |
399 | last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, u.rb); | 399 | last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, u.rb); |
400 | 400 | ||
401 | if (last->ec - first->ec < WL_FREE_MAX_DIFF) | 401 | if (last->ec - first->ec < WL_FREE_MAX_DIFF) |
402 | e = rb_entry(ubi->free.rb_node, struct ubi_wl_entry, u.rb); | 402 | e = rb_entry(ubi->free.rb_node, struct ubi_wl_entry, u.rb); |
403 | else | 403 | else |
404 | e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF/2); | 404 | e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF/2); |
405 | 405 | ||
406 | self_check_in_wl_tree(ubi, e, &ubi->free); | 406 | self_check_in_wl_tree(ubi, e, &ubi->free); |
407 | 407 | ||
408 | /* | 408 | /* |
409 | * Move the physical eraseblock to the protection queue where it will | 409 | * Move the physical eraseblock to the protection queue where it will |
410 | * be protected from being moved for some time. | 410 | * be protected from being moved for some time. |
411 | */ | 411 | */ |
412 | rb_erase(&e->u.rb, &ubi->free); | 412 | rb_erase(&e->u.rb, &ubi->free); |
413 | dbg_wl("PEB %d EC %d", e->pnum, e->ec); | 413 | dbg_wl("PEB %d EC %d", e->pnum, e->ec); |
414 | prot_queue_add(ubi, e); | 414 | prot_queue_add(ubi, e); |
415 | spin_unlock(&ubi->wl_lock); | 415 | spin_unlock(&ubi->wl_lock); |
416 | 416 | ||
417 | err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset, | 417 | err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset, |
418 | ubi->peb_size - ubi->vid_hdr_aloffset); | 418 | ubi->peb_size - ubi->vid_hdr_aloffset); |
419 | if (err) { | 419 | if (err) { |
420 | ubi_err("new PEB %d does not contain all 0xFF bytes", e->pnum); | 420 | ubi_err("new PEB %d does not contain all 0xFF bytes", e->pnum); |
421 | return err; | 421 | return err; |
422 | } | 422 | } |
423 | 423 | ||
424 | return e->pnum; | 424 | return e->pnum; |
425 | } | 425 | } |
426 | 426 | ||
427 | /** | 427 | /** |
428 | * prot_queue_del - remove a physical eraseblock from the protection queue. | 428 | * prot_queue_del - remove a physical eraseblock from the protection queue. |
429 | * @ubi: UBI device description object | 429 | * @ubi: UBI device description object |
430 | * @pnum: the physical eraseblock to remove | 430 | * @pnum: the physical eraseblock to remove |
431 | * | 431 | * |
432 | * This function deletes PEB @pnum from the protection queue and returns zero | 432 | * This function deletes PEB @pnum from the protection queue and returns zero |
433 | * in case of success and %-ENODEV if the PEB was not found. | 433 | * in case of success and %-ENODEV if the PEB was not found. |
434 | */ | 434 | */ |
435 | static int prot_queue_del(struct ubi_device *ubi, int pnum) | 435 | static int prot_queue_del(struct ubi_device *ubi, int pnum) |
436 | { | 436 | { |
437 | struct ubi_wl_entry *e; | 437 | struct ubi_wl_entry *e; |
438 | 438 | ||
439 | e = ubi->lookuptbl[pnum]; | 439 | e = ubi->lookuptbl[pnum]; |
440 | if (!e) | 440 | if (!e) |
441 | return -ENODEV; | 441 | return -ENODEV; |
442 | 442 | ||
443 | if (self_check_in_pq(ubi, e)) | 443 | if (self_check_in_pq(ubi, e)) |
444 | return -ENODEV; | 444 | return -ENODEV; |
445 | 445 | ||
446 | list_del(&e->u.list); | 446 | list_del(&e->u.list); |
447 | dbg_wl("deleted PEB %d from the protection queue", e->pnum); | 447 | dbg_wl("deleted PEB %d from the protection queue", e->pnum); |
448 | return 0; | 448 | return 0; |
449 | } | 449 | } |
450 | 450 | ||
451 | /** | 451 | /** |
452 | * sync_erase - synchronously erase a physical eraseblock. | 452 | * sync_erase - synchronously erase a physical eraseblock. |
453 | * @ubi: UBI device description object | 453 | * @ubi: UBI device description object |
454 | * @e: the the physical eraseblock to erase | 454 | * @e: the the physical eraseblock to erase |
455 | * @torture: if the physical eraseblock has to be tortured | 455 | * @torture: if the physical eraseblock has to be tortured |
456 | * | 456 | * |
457 | * This function returns zero in case of success and a negative error code in | 457 | * This function returns zero in case of success and a negative error code in |
458 | * case of failure. | 458 | * case of failure. |
459 | */ | 459 | */ |
460 | static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, | 460 | static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, |
461 | int torture) | 461 | int torture) |
462 | { | 462 | { |
463 | int err; | 463 | int err; |
464 | struct ubi_ec_hdr *ec_hdr; | 464 | struct ubi_ec_hdr *ec_hdr; |
465 | unsigned long long ec = e->ec; | 465 | unsigned long long ec = e->ec; |
466 | 466 | ||
467 | dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec); | 467 | dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec); |
468 | 468 | ||
469 | err = self_check_ec(ubi, e->pnum, e->ec); | 469 | err = self_check_ec(ubi, e->pnum, e->ec); |
470 | if (err) | 470 | if (err) |
471 | return -EINVAL; | 471 | return -EINVAL; |
472 | 472 | ||
473 | ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS); | 473 | ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS); |
474 | if (!ec_hdr) | 474 | if (!ec_hdr) |
475 | return -ENOMEM; | 475 | return -ENOMEM; |
476 | 476 | ||
477 | err = ubi_io_sync_erase(ubi, e->pnum, torture); | 477 | err = ubi_io_sync_erase(ubi, e->pnum, torture); |
478 | if (err < 0) | 478 | if (err < 0) |
479 | goto out_free; | 479 | goto out_free; |
480 | 480 | ||
481 | ec += err; | 481 | ec += err; |
482 | if (ec > UBI_MAX_ERASECOUNTER) { | 482 | if (ec > UBI_MAX_ERASECOUNTER) { |
483 | /* | 483 | /* |
484 | * Erase counter overflow. Upgrade UBI and use 64-bit | 484 | * Erase counter overflow. Upgrade UBI and use 64-bit |
485 | * erase counters internally. | 485 | * erase counters internally. |
486 | */ | 486 | */ |
487 | ubi_err("erase counter overflow at PEB %d, EC %llu", | 487 | ubi_err("erase counter overflow at PEB %d, EC %llu", |
488 | e->pnum, ec); | 488 | e->pnum, ec); |
489 | err = -EINVAL; | 489 | err = -EINVAL; |
490 | goto out_free; | 490 | goto out_free; |
491 | } | 491 | } |
492 | 492 | ||
493 | dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec); | 493 | dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec); |
494 | 494 | ||
495 | ec_hdr->ec = cpu_to_be64(ec); | 495 | ec_hdr->ec = cpu_to_be64(ec); |
496 | 496 | ||
497 | err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr); | 497 | err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr); |
498 | if (err) | 498 | if (err) |
499 | goto out_free; | 499 | goto out_free; |
500 | 500 | ||
501 | e->ec = ec; | 501 | e->ec = ec; |
502 | spin_lock(&ubi->wl_lock); | 502 | spin_lock(&ubi->wl_lock); |
503 | if (e->ec > ubi->max_ec) | 503 | if (e->ec > ubi->max_ec) |
504 | ubi->max_ec = e->ec; | 504 | ubi->max_ec = e->ec; |
505 | spin_unlock(&ubi->wl_lock); | 505 | spin_unlock(&ubi->wl_lock); |
506 | 506 | ||
507 | out_free: | 507 | out_free: |
508 | kfree(ec_hdr); | 508 | kfree(ec_hdr); |
509 | return err; | 509 | return err; |
510 | } | 510 | } |
511 | 511 | ||
512 | /** | 512 | /** |
513 | * serve_prot_queue - check if it is time to stop protecting PEBs. | 513 | * serve_prot_queue - check if it is time to stop protecting PEBs. |
514 | * @ubi: UBI device description object | 514 | * @ubi: UBI device description object |
515 | * | 515 | * |
516 | * This function is called after each erase operation and removes PEBs from the | 516 | * This function is called after each erase operation and removes PEBs from the |
517 | * tail of the protection queue. These PEBs have been protected for long enough | 517 | * tail of the protection queue. These PEBs have been protected for long enough |
518 | * and should be moved to the used tree. | 518 | * and should be moved to the used tree. |
519 | */ | 519 | */ |
520 | static void serve_prot_queue(struct ubi_device *ubi) | 520 | static void serve_prot_queue(struct ubi_device *ubi) |
521 | { | 521 | { |
522 | struct ubi_wl_entry *e, *tmp; | 522 | struct ubi_wl_entry *e, *tmp; |
523 | int count; | 523 | int count; |
524 | 524 | ||
525 | /* | 525 | /* |
526 | * There may be several protected physical eraseblock to remove, | 526 | * There may be several protected physical eraseblock to remove, |
527 | * process them all. | 527 | * process them all. |
528 | */ | 528 | */ |
529 | repeat: | 529 | repeat: |
530 | count = 0; | 530 | count = 0; |
531 | spin_lock(&ubi->wl_lock); | 531 | spin_lock(&ubi->wl_lock); |
532 | list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) { | 532 | list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) { |
533 | dbg_wl("PEB %d EC %d protection over, move to used tree", | 533 | dbg_wl("PEB %d EC %d protection over, move to used tree", |
534 | e->pnum, e->ec); | 534 | e->pnum, e->ec); |
535 | 535 | ||
536 | list_del(&e->u.list); | 536 | list_del(&e->u.list); |
537 | wl_tree_add(e, &ubi->used); | 537 | wl_tree_add(e, &ubi->used); |
538 | if (count++ > 32) { | 538 | if (count++ > 32) { |
539 | /* | 539 | /* |
540 | * Let's be nice and avoid holding the spinlock for | 540 | * Let's be nice and avoid holding the spinlock for |
541 | * too long. | 541 | * too long. |
542 | */ | 542 | */ |
543 | spin_unlock(&ubi->wl_lock); | 543 | spin_unlock(&ubi->wl_lock); |
544 | cond_resched(); | 544 | cond_resched(); |
545 | goto repeat; | 545 | goto repeat; |
546 | } | 546 | } |
547 | } | 547 | } |
548 | 548 | ||
549 | ubi->pq_head += 1; | 549 | ubi->pq_head += 1; |
550 | if (ubi->pq_head == UBI_PROT_QUEUE_LEN) | 550 | if (ubi->pq_head == UBI_PROT_QUEUE_LEN) |
551 | ubi->pq_head = 0; | 551 | ubi->pq_head = 0; |
552 | ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN); | 552 | ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN); |
553 | spin_unlock(&ubi->wl_lock); | 553 | spin_unlock(&ubi->wl_lock); |
554 | } | 554 | } |
555 | 555 | ||
556 | /** | 556 | /** |
557 | * schedule_ubi_work - schedule a work. | 557 | * schedule_ubi_work - schedule a work. |
558 | * @ubi: UBI device description object | 558 | * @ubi: UBI device description object |
559 | * @wrk: the work to schedule | 559 | * @wrk: the work to schedule |
560 | * | 560 | * |
561 | * This function adds a work defined by @wrk to the tail of the pending works | 561 | * This function adds a work defined by @wrk to the tail of the pending works |
562 | * list. | 562 | * list. |
563 | */ | 563 | */ |
564 | static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) | 564 | static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) |
565 | { | 565 | { |
566 | spin_lock(&ubi->wl_lock); | 566 | spin_lock(&ubi->wl_lock); |
567 | list_add_tail(&wrk->list, &ubi->works); | 567 | list_add_tail(&wrk->list, &ubi->works); |
568 | ubi_assert(ubi->works_count >= 0); | 568 | ubi_assert(ubi->works_count >= 0); |
569 | ubi->works_count += 1; | 569 | ubi->works_count += 1; |
570 | if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi)) | 570 | if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi)) |
571 | wake_up_process(ubi->bgt_thread); | 571 | wake_up_process(ubi->bgt_thread); |
572 | spin_unlock(&ubi->wl_lock); | 572 | spin_unlock(&ubi->wl_lock); |
573 | } | 573 | } |
574 | 574 | ||
575 | static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, | 575 | static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, |
576 | int cancel); | 576 | int cancel); |
577 | 577 | ||
578 | /** | 578 | /** |
579 | * schedule_erase - schedule an erase work. | 579 | * schedule_erase - schedule an erase work. |
580 | * @ubi: UBI device description object | 580 | * @ubi: UBI device description object |
581 | * @e: the WL entry of the physical eraseblock to erase | 581 | * @e: the WL entry of the physical eraseblock to erase |
582 | * @torture: if the physical eraseblock has to be tortured | 582 | * @torture: if the physical eraseblock has to be tortured |
583 | * | 583 | * |
584 | * This function returns zero in case of success and a %-ENOMEM in case of | 584 | * This function returns zero in case of success and a %-ENOMEM in case of |
585 | * failure. | 585 | * failure. |
586 | */ | 586 | */ |
587 | static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, | 587 | static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, |
588 | int torture) | 588 | int torture) |
589 | { | 589 | { |
590 | struct ubi_work *wl_wrk; | 590 | struct ubi_work *wl_wrk; |
591 | 591 | ||
592 | dbg_wl("schedule erasure of PEB %d, EC %d, torture %d", | 592 | dbg_wl("schedule erasure of PEB %d, EC %d, torture %d", |
593 | e->pnum, e->ec, torture); | 593 | e->pnum, e->ec, torture); |
594 | 594 | ||
595 | wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS); | 595 | wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS); |
596 | if (!wl_wrk) | 596 | if (!wl_wrk) |
597 | return -ENOMEM; | 597 | return -ENOMEM; |
598 | 598 | ||
599 | wl_wrk->func = &erase_worker; | 599 | wl_wrk->func = &erase_worker; |
600 | wl_wrk->e = e; | 600 | wl_wrk->e = e; |
601 | wl_wrk->torture = torture; | 601 | wl_wrk->torture = torture; |
602 | 602 | ||
603 | schedule_ubi_work(ubi, wl_wrk); | 603 | schedule_ubi_work(ubi, wl_wrk); |
604 | return 0; | 604 | return 0; |
605 | } | 605 | } |
606 | 606 | ||
607 | /** | 607 | /** |
608 | * wear_leveling_worker - wear-leveling worker function. | 608 | * wear_leveling_worker - wear-leveling worker function. |
609 | * @ubi: UBI device description object | 609 | * @ubi: UBI device description object |
610 | * @wrk: the work object | 610 | * @wrk: the work object |
611 | * @cancel: non-zero if the worker has to free memory and exit | 611 | * @cancel: non-zero if the worker has to free memory and exit |
612 | * | 612 | * |
613 | * This function copies a more worn out physical eraseblock to a less worn out | 613 | * This function copies a more worn out physical eraseblock to a less worn out |
614 | * one. Returns zero in case of success and a negative error code in case of | 614 | * one. Returns zero in case of success and a negative error code in case of |
615 | * failure. | 615 | * failure. |
616 | */ | 616 | */ |
617 | static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | 617 | static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, |
618 | int cancel) | 618 | int cancel) |
619 | { | 619 | { |
620 | int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0; | 620 | int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0; |
621 | int vol_id = -1, uninitialized_var(lnum); | 621 | int vol_id = -1, uninitialized_var(lnum); |
622 | struct ubi_wl_entry *e1, *e2; | 622 | struct ubi_wl_entry *e1, *e2; |
623 | struct ubi_vid_hdr *vid_hdr; | 623 | struct ubi_vid_hdr *vid_hdr; |
624 | 624 | ||
625 | kfree(wrk); | 625 | kfree(wrk); |
626 | if (cancel) | 626 | if (cancel) |
627 | return 0; | 627 | return 0; |
628 | 628 | ||
629 | vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); | 629 | vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); |
630 | if (!vid_hdr) | 630 | if (!vid_hdr) |
631 | return -ENOMEM; | 631 | return -ENOMEM; |
632 | 632 | ||
633 | mutex_lock(&ubi->move_mutex); | 633 | mutex_lock(&ubi->move_mutex); |
634 | spin_lock(&ubi->wl_lock); | 634 | spin_lock(&ubi->wl_lock); |
635 | ubi_assert(!ubi->move_from && !ubi->move_to); | 635 | ubi_assert(!ubi->move_from && !ubi->move_to); |
636 | ubi_assert(!ubi->move_to_put); | 636 | ubi_assert(!ubi->move_to_put); |
637 | 637 | ||
638 | if (!ubi->free.rb_node || | 638 | if (!ubi->free.rb_node || |
639 | (!ubi->used.rb_node && !ubi->scrub.rb_node)) { | 639 | (!ubi->used.rb_node && !ubi->scrub.rb_node)) { |
640 | /* | 640 | /* |
641 | * No free physical eraseblocks? Well, they must be waiting in | 641 | * No free physical eraseblocks? Well, they must be waiting in |
642 | * the queue to be erased. Cancel movement - it will be | 642 | * the queue to be erased. Cancel movement - it will be |
643 | * triggered again when a free physical eraseblock appears. | 643 | * triggered again when a free physical eraseblock appears. |
644 | * | 644 | * |
645 | * No used physical eraseblocks? They must be temporarily | 645 | * No used physical eraseblocks? They must be temporarily |
646 | * protected from being moved. They will be moved to the | 646 | * protected from being moved. They will be moved to the |
647 | * @ubi->used tree later and the wear-leveling will be | 647 | * @ubi->used tree later and the wear-leveling will be |
648 | * triggered again. | 648 | * triggered again. |
649 | */ | 649 | */ |
650 | dbg_wl("cancel WL, a list is empty: free %d, used %d", | 650 | dbg_wl("cancel WL, a list is empty: free %d, used %d", |
651 | !ubi->free.rb_node, !ubi->used.rb_node); | 651 | !ubi->free.rb_node, !ubi->used.rb_node); |
652 | goto out_cancel; | 652 | goto out_cancel; |
653 | } | 653 | } |
654 | 654 | ||
655 | if (!ubi->scrub.rb_node) { | 655 | if (!ubi->scrub.rb_node) { |
656 | /* | 656 | /* |
657 | * Now pick the least worn-out used physical eraseblock and a | 657 | * Now pick the least worn-out used physical eraseblock and a |
658 | * highly worn-out free physical eraseblock. If the erase | 658 | * highly worn-out free physical eraseblock. If the erase |
659 | * counters differ much enough, start wear-leveling. | 659 | * counters differ much enough, start wear-leveling. |
660 | */ | 660 | */ |
661 | e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb); | 661 | e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb); |
662 | e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); | 662 | e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); |
663 | 663 | ||
664 | if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) { | 664 | if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) { |
665 | dbg_wl("no WL needed: min used EC %d, max free EC %d", | 665 | dbg_wl("no WL needed: min used EC %d, max free EC %d", |
666 | e1->ec, e2->ec); | 666 | e1->ec, e2->ec); |
667 | goto out_cancel; | 667 | goto out_cancel; |
668 | } | 668 | } |
669 | self_check_in_wl_tree(ubi, e1, &ubi->used); | 669 | self_check_in_wl_tree(ubi, e1, &ubi->used); |
670 | rb_erase(&e1->u.rb, &ubi->used); | 670 | rb_erase(&e1->u.rb, &ubi->used); |
671 | dbg_wl("move PEB %d EC %d to PEB %d EC %d", | 671 | dbg_wl("move PEB %d EC %d to PEB %d EC %d", |
672 | e1->pnum, e1->ec, e2->pnum, e2->ec); | 672 | e1->pnum, e1->ec, e2->pnum, e2->ec); |
673 | } else { | 673 | } else { |
674 | /* Perform scrubbing */ | 674 | /* Perform scrubbing */ |
675 | scrubbing = 1; | 675 | scrubbing = 1; |
676 | e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb); | 676 | e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb); |
677 | e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); | 677 | e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); |
678 | self_check_in_wl_tree(ubi, e1, &ubi->scrub); | 678 | self_check_in_wl_tree(ubi, e1, &ubi->scrub); |
679 | rb_erase(&e1->u.rb, &ubi->scrub); | 679 | rb_erase(&e1->u.rb, &ubi->scrub); |
680 | dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum); | 680 | dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum); |
681 | } | 681 | } |
682 | 682 | ||
683 | self_check_in_wl_tree(ubi, e2, &ubi->free); | 683 | self_check_in_wl_tree(ubi, e2, &ubi->free); |
684 | rb_erase(&e2->u.rb, &ubi->free); | 684 | rb_erase(&e2->u.rb, &ubi->free); |
685 | ubi->move_from = e1; | 685 | ubi->move_from = e1; |
686 | ubi->move_to = e2; | 686 | ubi->move_to = e2; |
687 | spin_unlock(&ubi->wl_lock); | 687 | spin_unlock(&ubi->wl_lock); |
688 | 688 | ||
689 | /* | 689 | /* |
690 | * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum. | 690 | * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum. |
691 | * We so far do not know which logical eraseblock our physical | 691 | * We so far do not know which logical eraseblock our physical |
692 | * eraseblock (@e1) belongs to. We have to read the volume identifier | 692 | * eraseblock (@e1) belongs to. We have to read the volume identifier |
693 | * header first. | 693 | * header first. |
694 | * | 694 | * |
695 | * Note, we are protected from this PEB being unmapped and erased. The | 695 | * Note, we are protected from this PEB being unmapped and erased. The |
696 | * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB | 696 | * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB |
697 | * which is being moved was unmapped. | 697 | * which is being moved was unmapped. |
698 | */ | 698 | */ |
699 | 699 | ||
700 | err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0); | 700 | err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0); |
701 | if (err && err != UBI_IO_BITFLIPS) { | 701 | if (err && err != UBI_IO_BITFLIPS) { |
702 | if (err == UBI_IO_FF) { | 702 | if (err == UBI_IO_FF) { |
703 | /* | 703 | /* |
704 | * We are trying to move PEB without a VID header. UBI | 704 | * We are trying to move PEB without a VID header. UBI |
705 | * always write VID headers shortly after the PEB was | 705 | * always write VID headers shortly after the PEB was |
706 | * given, so we have a situation when it has not yet | 706 | * given, so we have a situation when it has not yet |
707 | * had a chance to write it, because it was preempted. | 707 | * had a chance to write it, because it was preempted. |
708 | * So add this PEB to the protection queue so far, | 708 | * So add this PEB to the protection queue so far, |
709 | * because presumably more data will be written there | 709 | * because presumably more data will be written there |
710 | * (including the missing VID header), and then we'll | 710 | * (including the missing VID header), and then we'll |
711 | * move it. | 711 | * move it. |
712 | */ | 712 | */ |
713 | dbg_wl("PEB %d has no VID header", e1->pnum); | 713 | dbg_wl("PEB %d has no VID header", e1->pnum); |
714 | protect = 1; | 714 | protect = 1; |
715 | goto out_not_moved; | 715 | goto out_not_moved; |
716 | } else if (err == UBI_IO_FF_BITFLIPS) { | 716 | } else if (err == UBI_IO_FF_BITFLIPS) { |
717 | /* | 717 | /* |
718 | * The same situation as %UBI_IO_FF, but bit-flips were | 718 | * The same situation as %UBI_IO_FF, but bit-flips were |
719 | * detected. It is better to schedule this PEB for | 719 | * detected. It is better to schedule this PEB for |
720 | * scrubbing. | 720 | * scrubbing. |
721 | */ | 721 | */ |
722 | dbg_wl("PEB %d has no VID header but has bit-flips", | 722 | dbg_wl("PEB %d has no VID header but has bit-flips", |
723 | e1->pnum); | 723 | e1->pnum); |
724 | scrubbing = 1; | 724 | scrubbing = 1; |
725 | goto out_not_moved; | 725 | goto out_not_moved; |
726 | } | 726 | } |
727 | 727 | ||
728 | ubi_err("error %d while reading VID header from PEB %d", | 728 | ubi_err("error %d while reading VID header from PEB %d", |
729 | err, e1->pnum); | 729 | err, e1->pnum); |
730 | goto out_error; | 730 | goto out_error; |
731 | } | 731 | } |
732 | 732 | ||
733 | vol_id = be32_to_cpu(vid_hdr->vol_id); | 733 | vol_id = be32_to_cpu(vid_hdr->vol_id); |
734 | lnum = be32_to_cpu(vid_hdr->lnum); | 734 | lnum = be32_to_cpu(vid_hdr->lnum); |
735 | 735 | ||
736 | err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr); | 736 | err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr); |
737 | if (err) { | 737 | if (err) { |
738 | if (err == MOVE_CANCEL_RACE) { | 738 | if (err == MOVE_CANCEL_RACE) { |
739 | /* | 739 | /* |
740 | * The LEB has not been moved because the volume is | 740 | * The LEB has not been moved because the volume is |
741 | * being deleted or the PEB has been put meanwhile. We | 741 | * being deleted or the PEB has been put meanwhile. We |
742 | * should prevent this PEB from being selected for | 742 | * should prevent this PEB from being selected for |
743 | * wear-leveling movement again, so put it to the | 743 | * wear-leveling movement again, so put it to the |
744 | * protection queue. | 744 | * protection queue. |
745 | */ | 745 | */ |
746 | protect = 1; | 746 | protect = 1; |
747 | goto out_not_moved; | 747 | goto out_not_moved; |
748 | } | 748 | } |
749 | if (err == MOVE_RETRY) { | 749 | if (err == MOVE_RETRY) { |
750 | scrubbing = 1; | 750 | scrubbing = 1; |
751 | goto out_not_moved; | 751 | goto out_not_moved; |
752 | } | 752 | } |
753 | if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR || | 753 | if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR || |
754 | err == MOVE_TARGET_RD_ERR) { | 754 | err == MOVE_TARGET_RD_ERR) { |
755 | /* | 755 | /* |
756 | * Target PEB had bit-flips or write error - torture it. | 756 | * Target PEB had bit-flips or write error - torture it. |
757 | */ | 757 | */ |
758 | torture = 1; | 758 | torture = 1; |
759 | goto out_not_moved; | 759 | goto out_not_moved; |
760 | } | 760 | } |
761 | 761 | ||
762 | if (err == MOVE_SOURCE_RD_ERR) { | 762 | if (err == MOVE_SOURCE_RD_ERR) { |
763 | /* | 763 | /* |
764 | * An error happened while reading the source PEB. Do | 764 | * An error happened while reading the source PEB. Do |
765 | * not switch to R/O mode in this case, and give the | 765 | * not switch to R/O mode in this case, and give the |
766 | * upper layers a possibility to recover from this, | 766 | * upper layers a possibility to recover from this, |
767 | * e.g. by unmapping corresponding LEB. Instead, just | 767 | * e.g. by unmapping corresponding LEB. Instead, just |
768 | * put this PEB to the @ubi->erroneous list to prevent | 768 | * put this PEB to the @ubi->erroneous list to prevent |
769 | * UBI from trying to move it over and over again. | 769 | * UBI from trying to move it over and over again. |
770 | */ | 770 | */ |
771 | if (ubi->erroneous_peb_count > ubi->max_erroneous) { | 771 | if (ubi->erroneous_peb_count > ubi->max_erroneous) { |
772 | ubi_err("too many erroneous eraseblocks (%d)", | 772 | ubi_err("too many erroneous eraseblocks (%d)", |
773 | ubi->erroneous_peb_count); | 773 | ubi->erroneous_peb_count); |
774 | goto out_error; | 774 | goto out_error; |
775 | } | 775 | } |
776 | erroneous = 1; | 776 | erroneous = 1; |
777 | goto out_not_moved; | 777 | goto out_not_moved; |
778 | } | 778 | } |
779 | 779 | ||
780 | if (err < 0) | 780 | if (err < 0) |
781 | goto out_error; | 781 | goto out_error; |
782 | 782 | ||
783 | ubi_assert(0); | 783 | ubi_assert(0); |
784 | } | 784 | } |
785 | 785 | ||
786 | /* The PEB has been successfully moved */ | 786 | /* The PEB has been successfully moved */ |
787 | if (scrubbing) | 787 | if (scrubbing) |
788 | ubi_msg("scrubbed PEB %d (LEB %d:%d), data moved to PEB %d", | 788 | ubi_msg("scrubbed PEB %d (LEB %d:%d), data moved to PEB %d", |
789 | e1->pnum, vol_id, lnum, e2->pnum); | 789 | e1->pnum, vol_id, lnum, e2->pnum); |
790 | ubi_free_vid_hdr(ubi, vid_hdr); | 790 | ubi_free_vid_hdr(ubi, vid_hdr); |
791 | 791 | ||
792 | spin_lock(&ubi->wl_lock); | 792 | spin_lock(&ubi->wl_lock); |
793 | if (!ubi->move_to_put) { | 793 | if (!ubi->move_to_put) { |
794 | wl_tree_add(e2, &ubi->used); | 794 | wl_tree_add(e2, &ubi->used); |
795 | e2 = NULL; | 795 | e2 = NULL; |
796 | } | 796 | } |
797 | ubi->move_from = ubi->move_to = NULL; | 797 | ubi->move_from = ubi->move_to = NULL; |
798 | ubi->move_to_put = ubi->wl_scheduled = 0; | 798 | ubi->move_to_put = ubi->wl_scheduled = 0; |
799 | spin_unlock(&ubi->wl_lock); | 799 | spin_unlock(&ubi->wl_lock); |
800 | 800 | ||
801 | err = schedule_erase(ubi, e1, 0); | 801 | err = schedule_erase(ubi, e1, 0); |
802 | if (err) { | 802 | if (err) { |
803 | kmem_cache_free(ubi_wl_entry_slab, e1); | 803 | kmem_cache_free(ubi_wl_entry_slab, e1); |
804 | if (e2) | 804 | if (e2) |
805 | kmem_cache_free(ubi_wl_entry_slab, e2); | 805 | kmem_cache_free(ubi_wl_entry_slab, e2); |
806 | goto out_ro; | 806 | goto out_ro; |
807 | } | 807 | } |
808 | 808 | ||
809 | if (e2) { | 809 | if (e2) { |
810 | /* | 810 | /* |
811 | * Well, the target PEB was put meanwhile, schedule it for | 811 | * Well, the target PEB was put meanwhile, schedule it for |
812 | * erasure. | 812 | * erasure. |
813 | */ | 813 | */ |
814 | dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase", | 814 | dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase", |
815 | e2->pnum, vol_id, lnum); | 815 | e2->pnum, vol_id, lnum); |
816 | err = schedule_erase(ubi, e2, 0); | 816 | err = schedule_erase(ubi, e2, 0); |
817 | if (err) { | 817 | if (err) { |
818 | kmem_cache_free(ubi_wl_entry_slab, e2); | 818 | kmem_cache_free(ubi_wl_entry_slab, e2); |
819 | goto out_ro; | 819 | goto out_ro; |
820 | } | 820 | } |
821 | } | 821 | } |
822 | 822 | ||
823 | dbg_wl("done"); | 823 | dbg_wl("done"); |
824 | mutex_unlock(&ubi->move_mutex); | 824 | mutex_unlock(&ubi->move_mutex); |
825 | return 0; | 825 | return 0; |
826 | 826 | ||
827 | /* | 827 | /* |
828 | * For some reasons the LEB was not moved, might be an error, might be | 828 | * For some reasons the LEB was not moved, might be an error, might be |
829 | * something else. @e1 was not changed, so return it back. @e2 might | 829 | * something else. @e1 was not changed, so return it back. @e2 might |
830 | * have been changed, schedule it for erasure. | 830 | * have been changed, schedule it for erasure. |
831 | */ | 831 | */ |
832 | out_not_moved: | 832 | out_not_moved: |
833 | if (vol_id != -1) | 833 | if (vol_id != -1) |
834 | dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)", | 834 | dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)", |
835 | e1->pnum, vol_id, lnum, e2->pnum, err); | 835 | e1->pnum, vol_id, lnum, e2->pnum, err); |
836 | else | 836 | else |
837 | dbg_wl("cancel moving PEB %d to PEB %d (%d)", | 837 | dbg_wl("cancel moving PEB %d to PEB %d (%d)", |
838 | e1->pnum, e2->pnum, err); | 838 | e1->pnum, e2->pnum, err); |
839 | spin_lock(&ubi->wl_lock); | 839 | spin_lock(&ubi->wl_lock); |
840 | if (protect) | 840 | if (protect) |
841 | prot_queue_add(ubi, e1); | 841 | prot_queue_add(ubi, e1); |
842 | else if (erroneous) { | 842 | else if (erroneous) { |
843 | wl_tree_add(e1, &ubi->erroneous); | 843 | wl_tree_add(e1, &ubi->erroneous); |
844 | ubi->erroneous_peb_count += 1; | 844 | ubi->erroneous_peb_count += 1; |
845 | } else if (scrubbing) | 845 | } else if (scrubbing) |
846 | wl_tree_add(e1, &ubi->scrub); | 846 | wl_tree_add(e1, &ubi->scrub); |
847 | else | 847 | else |
848 | wl_tree_add(e1, &ubi->used); | 848 | wl_tree_add(e1, &ubi->used); |
849 | ubi_assert(!ubi->move_to_put); | 849 | ubi_assert(!ubi->move_to_put); |
850 | ubi->move_from = ubi->move_to = NULL; | 850 | ubi->move_from = ubi->move_to = NULL; |
851 | ubi->wl_scheduled = 0; | 851 | ubi->wl_scheduled = 0; |
852 | spin_unlock(&ubi->wl_lock); | 852 | spin_unlock(&ubi->wl_lock); |
853 | 853 | ||
854 | ubi_free_vid_hdr(ubi, vid_hdr); | 854 | ubi_free_vid_hdr(ubi, vid_hdr); |
855 | err = schedule_erase(ubi, e2, torture); | 855 | err = schedule_erase(ubi, e2, torture); |
856 | if (err) { | 856 | if (err) { |
857 | kmem_cache_free(ubi_wl_entry_slab, e2); | 857 | kmem_cache_free(ubi_wl_entry_slab, e2); |
858 | goto out_ro; | 858 | goto out_ro; |
859 | } | 859 | } |
860 | mutex_unlock(&ubi->move_mutex); | 860 | mutex_unlock(&ubi->move_mutex); |
861 | return 0; | 861 | return 0; |
862 | 862 | ||
863 | out_error: | 863 | out_error: |
864 | if (vol_id != -1) | 864 | if (vol_id != -1) |
865 | ubi_err("error %d while moving PEB %d to PEB %d", | 865 | ubi_err("error %d while moving PEB %d to PEB %d", |
866 | err, e1->pnum, e2->pnum); | 866 | err, e1->pnum, e2->pnum); |
867 | else | 867 | else |
868 | ubi_err("error %d while moving PEB %d (LEB %d:%d) to PEB %d", | 868 | ubi_err("error %d while moving PEB %d (LEB %d:%d) to PEB %d", |
869 | err, e1->pnum, vol_id, lnum, e2->pnum); | 869 | err, e1->pnum, vol_id, lnum, e2->pnum); |
870 | spin_lock(&ubi->wl_lock); | 870 | spin_lock(&ubi->wl_lock); |
871 | ubi->move_from = ubi->move_to = NULL; | 871 | ubi->move_from = ubi->move_to = NULL; |
872 | ubi->move_to_put = ubi->wl_scheduled = 0; | 872 | ubi->move_to_put = ubi->wl_scheduled = 0; |
873 | spin_unlock(&ubi->wl_lock); | 873 | spin_unlock(&ubi->wl_lock); |
874 | 874 | ||
875 | ubi_free_vid_hdr(ubi, vid_hdr); | 875 | ubi_free_vid_hdr(ubi, vid_hdr); |
876 | kmem_cache_free(ubi_wl_entry_slab, e1); | 876 | kmem_cache_free(ubi_wl_entry_slab, e1); |
877 | kmem_cache_free(ubi_wl_entry_slab, e2); | 877 | kmem_cache_free(ubi_wl_entry_slab, e2); |
878 | 878 | ||
879 | out_ro: | 879 | out_ro: |
880 | ubi_ro_mode(ubi); | 880 | ubi_ro_mode(ubi); |
881 | mutex_unlock(&ubi->move_mutex); | 881 | mutex_unlock(&ubi->move_mutex); |
882 | ubi_assert(err != 0); | 882 | ubi_assert(err != 0); |
883 | return err < 0 ? err : -EIO; | 883 | return err < 0 ? err : -EIO; |
884 | 884 | ||
885 | out_cancel: | 885 | out_cancel: |
886 | ubi->wl_scheduled = 0; | 886 | ubi->wl_scheduled = 0; |
887 | spin_unlock(&ubi->wl_lock); | 887 | spin_unlock(&ubi->wl_lock); |
888 | mutex_unlock(&ubi->move_mutex); | 888 | mutex_unlock(&ubi->move_mutex); |
889 | ubi_free_vid_hdr(ubi, vid_hdr); | 889 | ubi_free_vid_hdr(ubi, vid_hdr); |
890 | return 0; | 890 | return 0; |
891 | } | 891 | } |
892 | 892 | ||
893 | /** | 893 | /** |
894 | * ensure_wear_leveling - schedule wear-leveling if it is needed. | 894 | * ensure_wear_leveling - schedule wear-leveling if it is needed. |
895 | * @ubi: UBI device description object | 895 | * @ubi: UBI device description object |
896 | * | 896 | * |
897 | * This function checks if it is time to start wear-leveling and schedules it | 897 | * This function checks if it is time to start wear-leveling and schedules it |
898 | * if yes. This function returns zero in case of success and a negative error | 898 | * if yes. This function returns zero in case of success and a negative error |
899 | * code in case of failure. | 899 | * code in case of failure. |
900 | */ | 900 | */ |
901 | static int ensure_wear_leveling(struct ubi_device *ubi) | 901 | static int ensure_wear_leveling(struct ubi_device *ubi) |
902 | { | 902 | { |
903 | int err = 0; | 903 | int err = 0; |
904 | struct ubi_wl_entry *e1; | 904 | struct ubi_wl_entry *e1; |
905 | struct ubi_wl_entry *e2; | 905 | struct ubi_wl_entry *e2; |
906 | struct ubi_work *wrk; | 906 | struct ubi_work *wrk; |
907 | 907 | ||
908 | spin_lock(&ubi->wl_lock); | 908 | spin_lock(&ubi->wl_lock); |
909 | if (ubi->wl_scheduled) | 909 | if (ubi->wl_scheduled) |
910 | /* Wear-leveling is already in the work queue */ | 910 | /* Wear-leveling is already in the work queue */ |
911 | goto out_unlock; | 911 | goto out_unlock; |
912 | 912 | ||
913 | /* | 913 | /* |
914 | * If the ubi->scrub tree is not empty, scrubbing is needed, and the | 914 | * If the ubi->scrub tree is not empty, scrubbing is needed, and the |
915 | * the WL worker has to be scheduled anyway. | 915 | * the WL worker has to be scheduled anyway. |
916 | */ | 916 | */ |
917 | if (!ubi->scrub.rb_node) { | 917 | if (!ubi->scrub.rb_node) { |
918 | if (!ubi->used.rb_node || !ubi->free.rb_node) | 918 | if (!ubi->used.rb_node || !ubi->free.rb_node) |
919 | /* No physical eraseblocks - no deal */ | 919 | /* No physical eraseblocks - no deal */ |
920 | goto out_unlock; | 920 | goto out_unlock; |
921 | 921 | ||
922 | /* | 922 | /* |
923 | * We schedule wear-leveling only if the difference between the | 923 | * We schedule wear-leveling only if the difference between the |
924 | * lowest erase counter of used physical eraseblocks and a high | 924 | * lowest erase counter of used physical eraseblocks and a high |
925 | * erase counter of free physical eraseblocks is greater than | 925 | * erase counter of free physical eraseblocks is greater than |
926 | * %UBI_WL_THRESHOLD. | 926 | * %UBI_WL_THRESHOLD. |
927 | */ | 927 | */ |
928 | e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb); | 928 | e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb); |
929 | e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); | 929 | e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); |
930 | 930 | ||
931 | if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) | 931 | if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) |
932 | goto out_unlock; | 932 | goto out_unlock; |
933 | dbg_wl("schedule wear-leveling"); | 933 | dbg_wl("schedule wear-leveling"); |
934 | } else | 934 | } else |
935 | dbg_wl("schedule scrubbing"); | 935 | dbg_wl("schedule scrubbing"); |
936 | 936 | ||
937 | ubi->wl_scheduled = 1; | 937 | ubi->wl_scheduled = 1; |
938 | spin_unlock(&ubi->wl_lock); | 938 | spin_unlock(&ubi->wl_lock); |
939 | 939 | ||
940 | wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS); | 940 | wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS); |
941 | if (!wrk) { | 941 | if (!wrk) { |
942 | err = -ENOMEM; | 942 | err = -ENOMEM; |
943 | goto out_cancel; | 943 | goto out_cancel; |
944 | } | 944 | } |
945 | 945 | ||
946 | wrk->func = &wear_leveling_worker; | 946 | wrk->func = &wear_leveling_worker; |
947 | schedule_ubi_work(ubi, wrk); | 947 | schedule_ubi_work(ubi, wrk); |
948 | return err; | 948 | return err; |
949 | 949 | ||
950 | out_cancel: | 950 | out_cancel: |
951 | spin_lock(&ubi->wl_lock); | 951 | spin_lock(&ubi->wl_lock); |
952 | ubi->wl_scheduled = 0; | 952 | ubi->wl_scheduled = 0; |
953 | out_unlock: | 953 | out_unlock: |
954 | spin_unlock(&ubi->wl_lock); | 954 | spin_unlock(&ubi->wl_lock); |
955 | return err; | 955 | return err; |
956 | } | 956 | } |
957 | 957 | ||
958 | /** | 958 | /** |
959 | * erase_worker - physical eraseblock erase worker function. | 959 | * erase_worker - physical eraseblock erase worker function. |
960 | * @ubi: UBI device description object | 960 | * @ubi: UBI device description object |
961 | * @wl_wrk: the work object | 961 | * @wl_wrk: the work object |
962 | * @cancel: non-zero if the worker has to free memory and exit | 962 | * @cancel: non-zero if the worker has to free memory and exit |
963 | * | 963 | * |
964 | * This function erases a physical eraseblock and perform torture testing if | 964 | * This function erases a physical eraseblock and perform torture testing if |
965 | * needed. It also takes care about marking the physical eraseblock bad if | 965 | * needed. It also takes care about marking the physical eraseblock bad if |
966 | * needed. Returns zero in case of success and a negative error code in case of | 966 | * needed. Returns zero in case of success and a negative error code in case of |
967 | * failure. | 967 | * failure. |
968 | */ | 968 | */ |
969 | static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, | 969 | static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, |
970 | int cancel) | 970 | int cancel) |
971 | { | 971 | { |
972 | struct ubi_wl_entry *e = wl_wrk->e; | 972 | struct ubi_wl_entry *e = wl_wrk->e; |
973 | int pnum = e->pnum, err, need; | 973 | int pnum = e->pnum, err, need; |
974 | 974 | ||
975 | if (cancel) { | 975 | if (cancel) { |
976 | dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec); | 976 | dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec); |
977 | kfree(wl_wrk); | 977 | kfree(wl_wrk); |
978 | kmem_cache_free(ubi_wl_entry_slab, e); | 978 | kmem_cache_free(ubi_wl_entry_slab, e); |
979 | return 0; | 979 | return 0; |
980 | } | 980 | } |
981 | 981 | ||
982 | dbg_wl("erase PEB %d EC %d", pnum, e->ec); | 982 | dbg_wl("erase PEB %d EC %d", pnum, e->ec); |
983 | 983 | ||
984 | err = sync_erase(ubi, e, wl_wrk->torture); | 984 | err = sync_erase(ubi, e, wl_wrk->torture); |
985 | if (!err) { | 985 | if (!err) { |
986 | /* Fine, we've erased it successfully */ | 986 | /* Fine, we've erased it successfully */ |
987 | kfree(wl_wrk); | 987 | kfree(wl_wrk); |
988 | 988 | ||
989 | spin_lock(&ubi->wl_lock); | 989 | spin_lock(&ubi->wl_lock); |
990 | wl_tree_add(e, &ubi->free); | 990 | wl_tree_add(e, &ubi->free); |
991 | spin_unlock(&ubi->wl_lock); | 991 | spin_unlock(&ubi->wl_lock); |
992 | 992 | ||
993 | /* | 993 | /* |
994 | * One more erase operation has happened, take care about | 994 | * One more erase operation has happened, take care about |
995 | * protected physical eraseblocks. | 995 | * protected physical eraseblocks. |
996 | */ | 996 | */ |
997 | serve_prot_queue(ubi); | 997 | serve_prot_queue(ubi); |
998 | 998 | ||
999 | /* And take care about wear-leveling */ | 999 | /* And take care about wear-leveling */ |
1000 | err = ensure_wear_leveling(ubi); | 1000 | err = ensure_wear_leveling(ubi); |
1001 | return err; | 1001 | return err; |
1002 | } | 1002 | } |
1003 | 1003 | ||
1004 | ubi_err("failed to erase PEB %d, error %d", pnum, err); | 1004 | ubi_err("failed to erase PEB %d, error %d", pnum, err); |
1005 | kfree(wl_wrk); | 1005 | kfree(wl_wrk); |
1006 | 1006 | ||
1007 | if (err == -EINTR || err == -ENOMEM || err == -EAGAIN || | 1007 | if (err == -EINTR || err == -ENOMEM || err == -EAGAIN || |
1008 | err == -EBUSY) { | 1008 | err == -EBUSY) { |
1009 | int err1; | 1009 | int err1; |
1010 | 1010 | ||
1011 | /* Re-schedule the LEB for erasure */ | 1011 | /* Re-schedule the LEB for erasure */ |
1012 | err1 = schedule_erase(ubi, e, 0); | 1012 | err1 = schedule_erase(ubi, e, 0); |
1013 | if (err1) { | 1013 | if (err1) { |
1014 | err = err1; | 1014 | err = err1; |
1015 | goto out_ro; | 1015 | goto out_ro; |
1016 | } | 1016 | } |
1017 | return err; | 1017 | return err; |
1018 | } | 1018 | } |
1019 | 1019 | ||
1020 | kmem_cache_free(ubi_wl_entry_slab, e); | 1020 | kmem_cache_free(ubi_wl_entry_slab, e); |
1021 | if (err != -EIO) | 1021 | if (err != -EIO) |
1022 | /* | 1022 | /* |
1023 | * If this is not %-EIO, we have no idea what to do. Scheduling | 1023 | * If this is not %-EIO, we have no idea what to do. Scheduling |
1024 | * this physical eraseblock for erasure again would cause | 1024 | * this physical eraseblock for erasure again would cause |
1025 | * errors again and again. Well, lets switch to R/O mode. | 1025 | * errors again and again. Well, lets switch to R/O mode. |
1026 | */ | 1026 | */ |
1027 | goto out_ro; | 1027 | goto out_ro; |
1028 | 1028 | ||
1029 | /* It is %-EIO, the PEB went bad */ | 1029 | /* It is %-EIO, the PEB went bad */ |
1030 | 1030 | ||
1031 | if (!ubi->bad_allowed) { | 1031 | if (!ubi->bad_allowed) { |
1032 | ubi_err("bad physical eraseblock %d detected", pnum); | 1032 | ubi_err("bad physical eraseblock %d detected", pnum); |
1033 | goto out_ro; | 1033 | goto out_ro; |
1034 | } | 1034 | } |
1035 | 1035 | ||
1036 | spin_lock(&ubi->volumes_lock); | 1036 | spin_lock(&ubi->volumes_lock); |
1037 | need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1; | 1037 | need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1; |
1038 | if (need > 0) { | 1038 | if (need > 0) { |
1039 | need = ubi->avail_pebs >= need ? need : ubi->avail_pebs; | 1039 | need = ubi->avail_pebs >= need ? need : ubi->avail_pebs; |
1040 | ubi->avail_pebs -= need; | 1040 | ubi->avail_pebs -= need; |
1041 | ubi->rsvd_pebs += need; | 1041 | ubi->rsvd_pebs += need; |
1042 | ubi->beb_rsvd_pebs += need; | 1042 | ubi->beb_rsvd_pebs += need; |
1043 | if (need > 0) | 1043 | if (need > 0) |
1044 | ubi_msg("reserve more %d PEBs", need); | 1044 | ubi_msg("reserve more %d PEBs", need); |
1045 | } | 1045 | } |
1046 | 1046 | ||
1047 | if (ubi->beb_rsvd_pebs == 0) { | 1047 | if (ubi->beb_rsvd_pebs == 0) { |
1048 | spin_unlock(&ubi->volumes_lock); | 1048 | spin_unlock(&ubi->volumes_lock); |
1049 | ubi_err("no reserved physical eraseblocks"); | 1049 | ubi_err("no reserved physical eraseblocks"); |
1050 | goto out_ro; | 1050 | goto out_ro; |
1051 | } | 1051 | } |
1052 | spin_unlock(&ubi->volumes_lock); | 1052 | spin_unlock(&ubi->volumes_lock); |
1053 | 1053 | ||
1054 | ubi_msg("mark PEB %d as bad", pnum); | 1054 | ubi_msg("mark PEB %d as bad", pnum); |
1055 | err = ubi_io_mark_bad(ubi, pnum); | 1055 | err = ubi_io_mark_bad(ubi, pnum); |
1056 | if (err) | 1056 | if (err) |
1057 | goto out_ro; | 1057 | goto out_ro; |
1058 | 1058 | ||
1059 | spin_lock(&ubi->volumes_lock); | 1059 | spin_lock(&ubi->volumes_lock); |
1060 | ubi->beb_rsvd_pebs -= 1; | 1060 | ubi->beb_rsvd_pebs -= 1; |
1061 | ubi->bad_peb_count += 1; | 1061 | ubi->bad_peb_count += 1; |
1062 | ubi->good_peb_count -= 1; | 1062 | ubi->good_peb_count -= 1; |
1063 | ubi_calculate_reserved(ubi); | 1063 | ubi_calculate_reserved(ubi); |
1064 | if (ubi->beb_rsvd_pebs) | 1064 | if (ubi->beb_rsvd_pebs) |
1065 | ubi_msg("%d PEBs left in the reserve", ubi->beb_rsvd_pebs); | 1065 | ubi_msg("%d PEBs left in the reserve", ubi->beb_rsvd_pebs); |
1066 | else | 1066 | else |
1067 | ubi_warn("last PEB from the reserved pool was used"); | 1067 | ubi_warn("last PEB from the reserved pool was used"); |
1068 | spin_unlock(&ubi->volumes_lock); | 1068 | spin_unlock(&ubi->volumes_lock); |
1069 | 1069 | ||
1070 | return err; | 1070 | return err; |
1071 | 1071 | ||
1072 | out_ro: | 1072 | out_ro: |
1073 | ubi_ro_mode(ubi); | 1073 | ubi_ro_mode(ubi); |
1074 | return err; | 1074 | return err; |
1075 | } | 1075 | } |
1076 | 1076 | ||
1077 | /** | 1077 | /** |
1078 | * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system. | 1078 | * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system. |
1079 | * @ubi: UBI device description object | 1079 | * @ubi: UBI device description object |
1080 | * @pnum: physical eraseblock to return | 1080 | * @pnum: physical eraseblock to return |
1081 | * @torture: if this physical eraseblock has to be tortured | 1081 | * @torture: if this physical eraseblock has to be tortured |
1082 | * | 1082 | * |
1083 | * This function is called to return physical eraseblock @pnum to the pool of | 1083 | * This function is called to return physical eraseblock @pnum to the pool of |
1084 | * free physical eraseblocks. The @torture flag has to be set if an I/O error | 1084 | * free physical eraseblocks. The @torture flag has to be set if an I/O error |
1085 | * occurred to this @pnum and it has to be tested. This function returns zero | 1085 | * occurred to this @pnum and it has to be tested. This function returns zero |
1086 | * in case of success, and a negative error code in case of failure. | 1086 | * in case of success, and a negative error code in case of failure. |
1087 | */ | 1087 | */ |
1088 | int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture) | 1088 | int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture) |
1089 | { | 1089 | { |
1090 | int err; | 1090 | int err; |
1091 | struct ubi_wl_entry *e; | 1091 | struct ubi_wl_entry *e; |
1092 | 1092 | ||
1093 | dbg_wl("PEB %d", pnum); | 1093 | dbg_wl("PEB %d", pnum); |
1094 | ubi_assert(pnum >= 0); | 1094 | ubi_assert(pnum >= 0); |
1095 | ubi_assert(pnum < ubi->peb_count); | 1095 | ubi_assert(pnum < ubi->peb_count); |
1096 | 1096 | ||
1097 | retry: | 1097 | retry: |
1098 | spin_lock(&ubi->wl_lock); | 1098 | spin_lock(&ubi->wl_lock); |
1099 | e = ubi->lookuptbl[pnum]; | 1099 | e = ubi->lookuptbl[pnum]; |
1100 | if (e == ubi->move_from) { | 1100 | if (e == ubi->move_from) { |
1101 | /* | 1101 | /* |
1102 | * User is putting the physical eraseblock which was selected to | 1102 | * User is putting the physical eraseblock which was selected to |
1103 | * be moved. It will be scheduled for erasure in the | 1103 | * be moved. It will be scheduled for erasure in the |
1104 | * wear-leveling worker. | 1104 | * wear-leveling worker. |
1105 | */ | 1105 | */ |
1106 | dbg_wl("PEB %d is being moved, wait", pnum); | 1106 | dbg_wl("PEB %d is being moved, wait", pnum); |
1107 | spin_unlock(&ubi->wl_lock); | 1107 | spin_unlock(&ubi->wl_lock); |
1108 | 1108 | ||
1109 | /* Wait for the WL worker by taking the @ubi->move_mutex */ | 1109 | /* Wait for the WL worker by taking the @ubi->move_mutex */ |
1110 | mutex_lock(&ubi->move_mutex); | 1110 | mutex_lock(&ubi->move_mutex); |
1111 | mutex_unlock(&ubi->move_mutex); | 1111 | mutex_unlock(&ubi->move_mutex); |
1112 | goto retry; | 1112 | goto retry; |
1113 | } else if (e == ubi->move_to) { | 1113 | } else if (e == ubi->move_to) { |
1114 | /* | 1114 | /* |
1115 | * User is putting the physical eraseblock which was selected | 1115 | * User is putting the physical eraseblock which was selected |
1116 | * as the target the data is moved to. It may happen if the EBA | 1116 | * as the target the data is moved to. It may happen if the EBA |
1117 | * sub-system already re-mapped the LEB in 'ubi_eba_copy_leb()' | 1117 | * sub-system already re-mapped the LEB in 'ubi_eba_copy_leb()' |
1118 | * but the WL sub-system has not put the PEB to the "used" tree | 1118 | * but the WL sub-system has not put the PEB to the "used" tree |
1119 | * yet, but it is about to do this. So we just set a flag which | 1119 | * yet, but it is about to do this. So we just set a flag which |
1120 | * will tell the WL worker that the PEB is not needed anymore | 1120 | * will tell the WL worker that the PEB is not needed anymore |
1121 | * and should be scheduled for erasure. | 1121 | * and should be scheduled for erasure. |
1122 | */ | 1122 | */ |
1123 | dbg_wl("PEB %d is the target of data moving", pnum); | 1123 | dbg_wl("PEB %d is the target of data moving", pnum); |
1124 | ubi_assert(!ubi->move_to_put); | 1124 | ubi_assert(!ubi->move_to_put); |
1125 | ubi->move_to_put = 1; | 1125 | ubi->move_to_put = 1; |
1126 | spin_unlock(&ubi->wl_lock); | 1126 | spin_unlock(&ubi->wl_lock); |
1127 | return 0; | 1127 | return 0; |
1128 | } else { | 1128 | } else { |
1129 | if (in_wl_tree(e, &ubi->used)) { | 1129 | if (in_wl_tree(e, &ubi->used)) { |
1130 | self_check_in_wl_tree(ubi, e, &ubi->used); | 1130 | self_check_in_wl_tree(ubi, e, &ubi->used); |
1131 | rb_erase(&e->u.rb, &ubi->used); | 1131 | rb_erase(&e->u.rb, &ubi->used); |
1132 | } else if (in_wl_tree(e, &ubi->scrub)) { | 1132 | } else if (in_wl_tree(e, &ubi->scrub)) { |
1133 | self_check_in_wl_tree(ubi, e, &ubi->scrub); | 1133 | self_check_in_wl_tree(ubi, e, &ubi->scrub); |
1134 | rb_erase(&e->u.rb, &ubi->scrub); | 1134 | rb_erase(&e->u.rb, &ubi->scrub); |
1135 | } else if (in_wl_tree(e, &ubi->erroneous)) { | 1135 | } else if (in_wl_tree(e, &ubi->erroneous)) { |
1136 | self_check_in_wl_tree(ubi, e, &ubi->erroneous); | 1136 | self_check_in_wl_tree(ubi, e, &ubi->erroneous); |
1137 | rb_erase(&e->u.rb, &ubi->erroneous); | 1137 | rb_erase(&e->u.rb, &ubi->erroneous); |
1138 | ubi->erroneous_peb_count -= 1; | 1138 | ubi->erroneous_peb_count -= 1; |
1139 | ubi_assert(ubi->erroneous_peb_count >= 0); | 1139 | ubi_assert(ubi->erroneous_peb_count >= 0); |
1140 | /* Erroneous PEBs should be tortured */ | 1140 | /* Erroneous PEBs should be tortured */ |
1141 | torture = 1; | 1141 | torture = 1; |
1142 | } else { | 1142 | } else { |
1143 | err = prot_queue_del(ubi, e->pnum); | 1143 | err = prot_queue_del(ubi, e->pnum); |
1144 | if (err) { | 1144 | if (err) { |
1145 | ubi_err("PEB %d not found", pnum); | 1145 | ubi_err("PEB %d not found", pnum); |
1146 | ubi_ro_mode(ubi); | 1146 | ubi_ro_mode(ubi); |
1147 | spin_unlock(&ubi->wl_lock); | 1147 | spin_unlock(&ubi->wl_lock); |
1148 | return err; | 1148 | return err; |
1149 | } | 1149 | } |
1150 | } | 1150 | } |
1151 | } | 1151 | } |
1152 | spin_unlock(&ubi->wl_lock); | 1152 | spin_unlock(&ubi->wl_lock); |
1153 | 1153 | ||
1154 | err = schedule_erase(ubi, e, torture); | 1154 | err = schedule_erase(ubi, e, torture); |
1155 | if (err) { | 1155 | if (err) { |
1156 | spin_lock(&ubi->wl_lock); | 1156 | spin_lock(&ubi->wl_lock); |
1157 | wl_tree_add(e, &ubi->used); | 1157 | wl_tree_add(e, &ubi->used); |
1158 | spin_unlock(&ubi->wl_lock); | 1158 | spin_unlock(&ubi->wl_lock); |
1159 | } | 1159 | } |
1160 | 1160 | ||
1161 | return err; | 1161 | return err; |
1162 | } | 1162 | } |
1163 | 1163 | ||
1164 | /** | 1164 | /** |
1165 | * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing. | 1165 | * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing. |
1166 | * @ubi: UBI device description object | 1166 | * @ubi: UBI device description object |
1167 | * @pnum: the physical eraseblock to schedule | 1167 | * @pnum: the physical eraseblock to schedule |
1168 | * | 1168 | * |
1169 | * If a bit-flip in a physical eraseblock is detected, this physical eraseblock | 1169 | * If a bit-flip in a physical eraseblock is detected, this physical eraseblock |
1170 | * needs scrubbing. This function schedules a physical eraseblock for | 1170 | * needs scrubbing. This function schedules a physical eraseblock for |
1171 | * scrubbing which is done in background. This function returns zero in case of | 1171 | * scrubbing which is done in background. This function returns zero in case of |
1172 | * success and a negative error code in case of failure. | 1172 | * success and a negative error code in case of failure. |
1173 | */ | 1173 | */ |
1174 | int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum) | 1174 | int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum) |
1175 | { | 1175 | { |
1176 | struct ubi_wl_entry *e; | 1176 | struct ubi_wl_entry *e; |
1177 | 1177 | ||
1178 | dbg_msg("schedule PEB %d for scrubbing", pnum); | 1178 | dbg_msg("schedule PEB %d for scrubbing", pnum); |
1179 | 1179 | ||
1180 | retry: | 1180 | retry: |
1181 | spin_lock(&ubi->wl_lock); | 1181 | spin_lock(&ubi->wl_lock); |
1182 | e = ubi->lookuptbl[pnum]; | 1182 | e = ubi->lookuptbl[pnum]; |
1183 | if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) || | 1183 | if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) || |
1184 | in_wl_tree(e, &ubi->erroneous)) { | 1184 | in_wl_tree(e, &ubi->erroneous)) { |
1185 | spin_unlock(&ubi->wl_lock); | 1185 | spin_unlock(&ubi->wl_lock); |
1186 | return 0; | 1186 | return 0; |
1187 | } | 1187 | } |
1188 | 1188 | ||
1189 | if (e == ubi->move_to) { | 1189 | if (e == ubi->move_to) { |
1190 | /* | 1190 | /* |
1191 | * This physical eraseblock was used to move data to. The data | 1191 | * This physical eraseblock was used to move data to. The data |
1192 | * was moved but the PEB was not yet inserted to the proper | 1192 | * was moved but the PEB was not yet inserted to the proper |
1193 | * tree. We should just wait a little and let the WL worker | 1193 | * tree. We should just wait a little and let the WL worker |
1194 | * proceed. | 1194 | * proceed. |
1195 | */ | 1195 | */ |
1196 | spin_unlock(&ubi->wl_lock); | 1196 | spin_unlock(&ubi->wl_lock); |
1197 | dbg_wl("the PEB %d is not in proper tree, retry", pnum); | 1197 | dbg_wl("the PEB %d is not in proper tree, retry", pnum); |
1198 | yield(); | 1198 | yield(); |
1199 | goto retry; | 1199 | goto retry; |
1200 | } | 1200 | } |
1201 | 1201 | ||
1202 | if (in_wl_tree(e, &ubi->used)) { | 1202 | if (in_wl_tree(e, &ubi->used)) { |
1203 | self_check_in_wl_tree(ubi, e, &ubi->used); | 1203 | self_check_in_wl_tree(ubi, e, &ubi->used); |
1204 | rb_erase(&e->u.rb, &ubi->used); | 1204 | rb_erase(&e->u.rb, &ubi->used); |
1205 | } else { | 1205 | } else { |
1206 | int err; | 1206 | int err; |
1207 | 1207 | ||
1208 | err = prot_queue_del(ubi, e->pnum); | 1208 | err = prot_queue_del(ubi, e->pnum); |
1209 | if (err) { | 1209 | if (err) { |
1210 | ubi_err("PEB %d not found", pnum); | 1210 | ubi_err("PEB %d not found", pnum); |
1211 | ubi_ro_mode(ubi); | 1211 | ubi_ro_mode(ubi); |
1212 | spin_unlock(&ubi->wl_lock); | 1212 | spin_unlock(&ubi->wl_lock); |
1213 | return err; | 1213 | return err; |
1214 | } | 1214 | } |
1215 | } | 1215 | } |
1216 | 1216 | ||
1217 | wl_tree_add(e, &ubi->scrub); | 1217 | wl_tree_add(e, &ubi->scrub); |
1218 | spin_unlock(&ubi->wl_lock); | 1218 | spin_unlock(&ubi->wl_lock); |
1219 | 1219 | ||
1220 | /* | 1220 | /* |
1221 | * Technically scrubbing is the same as wear-leveling, so it is done | 1221 | * Technically scrubbing is the same as wear-leveling, so it is done |
1222 | * by the WL worker. | 1222 | * by the WL worker. |
1223 | */ | 1223 | */ |
1224 | return ensure_wear_leveling(ubi); | 1224 | return ensure_wear_leveling(ubi); |
1225 | } | 1225 | } |
1226 | 1226 | ||
1227 | /** | 1227 | /** |
1228 | * ubi_wl_flush - flush all pending works. | 1228 | * ubi_wl_flush - flush all pending works. |
1229 | * @ubi: UBI device description object | 1229 | * @ubi: UBI device description object |
1230 | * | 1230 | * |
1231 | * This function returns zero in case of success and a negative error code in | 1231 | * This function returns zero in case of success and a negative error code in |
1232 | * case of failure. | 1232 | * case of failure. |
1233 | */ | 1233 | */ |
1234 | int ubi_wl_flush(struct ubi_device *ubi) | 1234 | int ubi_wl_flush(struct ubi_device *ubi) |
1235 | { | 1235 | { |
1236 | int err; | 1236 | int err; |
1237 | 1237 | ||
1238 | /* | 1238 | /* |
1239 | * Erase while the pending works queue is not empty, but not more than | 1239 | * Erase while the pending works queue is not empty, but not more than |
1240 | * the number of currently pending works. | 1240 | * the number of currently pending works. |
1241 | */ | 1241 | */ |
1242 | dbg_wl("flush (%d pending works)", ubi->works_count); | 1242 | dbg_wl("flush (%d pending works)", ubi->works_count); |
1243 | while (ubi->works_count) { | 1243 | while (ubi->works_count) { |
1244 | err = do_work(ubi); | 1244 | err = do_work(ubi); |
1245 | if (err) | 1245 | if (err) |
1246 | return err; | 1246 | return err; |
1247 | } | 1247 | } |
1248 | 1248 | ||
1249 | /* | 1249 | /* |
1250 | * Make sure all the works which have been done in parallel are | 1250 | * Make sure all the works which have been done in parallel are |
1251 | * finished. | 1251 | * finished. |
1252 | */ | 1252 | */ |
1253 | down_write(&ubi->work_sem); | 1253 | down_write(&ubi->work_sem); |
1254 | up_write(&ubi->work_sem); | 1254 | up_write(&ubi->work_sem); |
1255 | 1255 | ||
1256 | /* | 1256 | /* |
1257 | * And in case last was the WL worker and it canceled the LEB | 1257 | * And in case last was the WL worker and it canceled the LEB |
1258 | * movement, flush again. | 1258 | * movement, flush again. |
1259 | */ | 1259 | */ |
1260 | while (ubi->works_count) { | 1260 | while (ubi->works_count) { |
1261 | dbg_wl("flush more (%d pending works)", ubi->works_count); | 1261 | dbg_wl("flush more (%d pending works)", ubi->works_count); |
1262 | err = do_work(ubi); | 1262 | err = do_work(ubi); |
1263 | if (err) | 1263 | if (err) |
1264 | return err; | 1264 | return err; |
1265 | } | 1265 | } |
1266 | 1266 | ||
1267 | return 0; | 1267 | return 0; |
1268 | } | 1268 | } |
1269 | 1269 | ||
1270 | /** | 1270 | /** |
1271 | * tree_destroy - destroy an RB-tree. | 1271 | * tree_destroy - destroy an RB-tree. |
1272 | * @root: the root of the tree to destroy | 1272 | * @root: the root of the tree to destroy |
1273 | */ | 1273 | */ |
1274 | static void tree_destroy(struct rb_root *root) | 1274 | static void tree_destroy(struct rb_root *root) |
1275 | { | 1275 | { |
1276 | struct rb_node *rb; | 1276 | struct rb_node *rb; |
1277 | struct ubi_wl_entry *e; | 1277 | struct ubi_wl_entry *e; |
1278 | 1278 | ||
1279 | rb = root->rb_node; | 1279 | rb = root->rb_node; |
1280 | while (rb) { | 1280 | while (rb) { |
1281 | if (rb->rb_left) | 1281 | if (rb->rb_left) |
1282 | rb = rb->rb_left; | 1282 | rb = rb->rb_left; |
1283 | else if (rb->rb_right) | 1283 | else if (rb->rb_right) |
1284 | rb = rb->rb_right; | 1284 | rb = rb->rb_right; |
1285 | else { | 1285 | else { |
1286 | e = rb_entry(rb, struct ubi_wl_entry, u.rb); | 1286 | e = rb_entry(rb, struct ubi_wl_entry, u.rb); |
1287 | 1287 | ||
1288 | rb = rb_parent(rb); | 1288 | rb = rb_parent(rb); |
1289 | if (rb) { | 1289 | if (rb) { |
1290 | if (rb->rb_left == &e->u.rb) | 1290 | if (rb->rb_left == &e->u.rb) |
1291 | rb->rb_left = NULL; | 1291 | rb->rb_left = NULL; |
1292 | else | 1292 | else |
1293 | rb->rb_right = NULL; | 1293 | rb->rb_right = NULL; |
1294 | } | 1294 | } |
1295 | 1295 | ||
1296 | kmem_cache_free(ubi_wl_entry_slab, e); | 1296 | kmem_cache_free(ubi_wl_entry_slab, e); |
1297 | } | 1297 | } |
1298 | } | 1298 | } |
1299 | } | 1299 | } |
1300 | 1300 | ||
1301 | /** | 1301 | /** |
1302 | * ubi_thread - UBI background thread. | 1302 | * ubi_thread - UBI background thread. |
1303 | * @u: the UBI device description object pointer | 1303 | * @u: the UBI device description object pointer |
1304 | */ | 1304 | */ |
1305 | int ubi_thread(void *u) | 1305 | int ubi_thread(void *u) |
1306 | { | 1306 | { |
1307 | int failures = 0; | 1307 | int failures = 0; |
1308 | struct ubi_device *ubi = u; | 1308 | struct ubi_device *ubi = u; |
1309 | 1309 | ||
1310 | ubi_msg("background thread \"%s\" started, PID %d", | 1310 | ubi_msg("background thread \"%s\" started, PID %d", |
1311 | ubi->bgt_name, task_pid_nr(current)); | 1311 | ubi->bgt_name, task_pid_nr(current)); |
1312 | 1312 | ||
1313 | set_freezable(); | 1313 | set_freezable(); |
1314 | for (;;) { | 1314 | for (;;) { |
1315 | int err; | 1315 | int err; |
1316 | 1316 | ||
1317 | if (kthread_should_stop()) | 1317 | if (kthread_should_stop()) |
1318 | break; | 1318 | break; |
1319 | 1319 | ||
1320 | if (try_to_freeze()) | 1320 | if (try_to_freeze()) |
1321 | continue; | 1321 | continue; |
1322 | 1322 | ||
1323 | spin_lock(&ubi->wl_lock); | 1323 | spin_lock(&ubi->wl_lock); |
1324 | if (list_empty(&ubi->works) || ubi->ro_mode || | 1324 | if (list_empty(&ubi->works) || ubi->ro_mode || |
1325 | !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) { | 1325 | !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) { |
1326 | set_current_state(TASK_INTERRUPTIBLE); | 1326 | set_current_state(TASK_INTERRUPTIBLE); |
1327 | spin_unlock(&ubi->wl_lock); | 1327 | spin_unlock(&ubi->wl_lock); |
1328 | schedule(); | 1328 | schedule(); |
1329 | continue; | 1329 | continue; |
1330 | } | 1330 | } |
1331 | spin_unlock(&ubi->wl_lock); | 1331 | spin_unlock(&ubi->wl_lock); |
1332 | 1332 | ||
1333 | err = do_work(ubi); | 1333 | err = do_work(ubi); |
1334 | if (err) { | 1334 | if (err) { |
1335 | ubi_err("%s: work failed with error code %d", | 1335 | ubi_err("%s: work failed with error code %d", |
1336 | ubi->bgt_name, err); | 1336 | ubi->bgt_name, err); |
1337 | if (failures++ > WL_MAX_FAILURES) { | 1337 | if (failures++ > WL_MAX_FAILURES) { |
1338 | /* | 1338 | /* |
1339 | * Too many failures, disable the thread and | 1339 | * Too many failures, disable the thread and |
1340 | * switch to read-only mode. | 1340 | * switch to read-only mode. |
1341 | */ | 1341 | */ |
1342 | ubi_msg("%s: %d consecutive failures", | 1342 | ubi_msg("%s: %d consecutive failures", |
1343 | ubi->bgt_name, WL_MAX_FAILURES); | 1343 | ubi->bgt_name, WL_MAX_FAILURES); |
1344 | ubi_ro_mode(ubi); | 1344 | ubi_ro_mode(ubi); |
1345 | ubi->thread_enabled = 0; | 1345 | ubi->thread_enabled = 0; |
1346 | continue; | 1346 | continue; |
1347 | } | 1347 | } |
1348 | } else | 1348 | } else |
1349 | failures = 0; | 1349 | failures = 0; |
1350 | 1350 | ||
1351 | cond_resched(); | 1351 | cond_resched(); |
1352 | } | 1352 | } |
1353 | 1353 | ||
1354 | dbg_wl("background thread \"%s\" is killed", ubi->bgt_name); | 1354 | dbg_wl("background thread \"%s\" is killed", ubi->bgt_name); |
1355 | return 0; | 1355 | return 0; |
1356 | } | 1356 | } |
1357 | 1357 | ||
1358 | /** | 1358 | /** |
1359 | * cancel_pending - cancel all pending works. | 1359 | * cancel_pending - cancel all pending works. |
1360 | * @ubi: UBI device description object | 1360 | * @ubi: UBI device description object |
1361 | */ | 1361 | */ |
1362 | static void cancel_pending(struct ubi_device *ubi) | 1362 | static void cancel_pending(struct ubi_device *ubi) |
1363 | { | 1363 | { |
1364 | while (!list_empty(&ubi->works)) { | 1364 | while (!list_empty(&ubi->works)) { |
1365 | struct ubi_work *wrk; | 1365 | struct ubi_work *wrk; |
1366 | 1366 | ||
1367 | wrk = list_entry(ubi->works.next, struct ubi_work, list); | 1367 | wrk = list_entry(ubi->works.next, struct ubi_work, list); |
1368 | list_del(&wrk->list); | 1368 | list_del(&wrk->list); |
1369 | wrk->func(ubi, wrk, 1); | 1369 | wrk->func(ubi, wrk, 1); |
1370 | ubi->works_count -= 1; | 1370 | ubi->works_count -= 1; |
1371 | ubi_assert(ubi->works_count >= 0); | 1371 | ubi_assert(ubi->works_count >= 0); |
1372 | } | 1372 | } |
1373 | } | 1373 | } |
1374 | 1374 | ||
1375 | /** | 1375 | /** |
1376 | * ubi_wl_init_scan - initialize the WL sub-system using attaching information. | 1376 | * ubi_wl_init - initialize the WL sub-system using attaching information. |
1377 | * @ubi: UBI device description object | 1377 | * @ubi: UBI device description object |
1378 | * @ai: attaching information | 1378 | * @ai: attaching information |
1379 | * | 1379 | * |
1380 | * This function returns zero in case of success, and a negative error code in | 1380 | * This function returns zero in case of success, and a negative error code in |
1381 | * case of failure. | 1381 | * case of failure. |
1382 | */ | 1382 | */ |
1383 | int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_attach_info *ai) | 1383 | int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) |
1384 | { | 1384 | { |
1385 | int err, i; | 1385 | int err, i; |
1386 | struct rb_node *rb1, *rb2; | 1386 | struct rb_node *rb1, *rb2; |
1387 | struct ubi_ainf_volume *av; | 1387 | struct ubi_ainf_volume *av; |
1388 | struct ubi_ainf_peb *aeb, *tmp; | 1388 | struct ubi_ainf_peb *aeb, *tmp; |
1389 | struct ubi_wl_entry *e; | 1389 | struct ubi_wl_entry *e; |
1390 | 1390 | ||
1391 | ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT; | 1391 | ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT; |
1392 | spin_lock_init(&ubi->wl_lock); | 1392 | spin_lock_init(&ubi->wl_lock); |
1393 | mutex_init(&ubi->move_mutex); | 1393 | mutex_init(&ubi->move_mutex); |
1394 | init_rwsem(&ubi->work_sem); | 1394 | init_rwsem(&ubi->work_sem); |
1395 | ubi->max_ec = ai->max_ec; | 1395 | ubi->max_ec = ai->max_ec; |
1396 | INIT_LIST_HEAD(&ubi->works); | 1396 | INIT_LIST_HEAD(&ubi->works); |
1397 | 1397 | ||
1398 | sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num); | 1398 | sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num); |
1399 | 1399 | ||
1400 | err = -ENOMEM; | 1400 | err = -ENOMEM; |
1401 | ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL); | 1401 | ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL); |
1402 | if (!ubi->lookuptbl) | 1402 | if (!ubi->lookuptbl) |
1403 | return err; | 1403 | return err; |
1404 | 1404 | ||
1405 | for (i = 0; i < UBI_PROT_QUEUE_LEN; i++) | 1405 | for (i = 0; i < UBI_PROT_QUEUE_LEN; i++) |
1406 | INIT_LIST_HEAD(&ubi->pq[i]); | 1406 | INIT_LIST_HEAD(&ubi->pq[i]); |
1407 | ubi->pq_head = 0; | 1407 | ubi->pq_head = 0; |
1408 | 1408 | ||
1409 | list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) { | 1409 | list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) { |
1410 | cond_resched(); | 1410 | cond_resched(); |
1411 | 1411 | ||
1412 | e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); | 1412 | e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); |
1413 | if (!e) | 1413 | if (!e) |
1414 | goto out_free; | 1414 | goto out_free; |
1415 | 1415 | ||
1416 | e->pnum = aeb->pnum; | 1416 | e->pnum = aeb->pnum; |
1417 | e->ec = aeb->ec; | 1417 | e->ec = aeb->ec; |
1418 | ubi->lookuptbl[e->pnum] = e; | 1418 | ubi->lookuptbl[e->pnum] = e; |
1419 | if (schedule_erase(ubi, e, 0)) { | 1419 | if (schedule_erase(ubi, e, 0)) { |
1420 | kmem_cache_free(ubi_wl_entry_slab, e); | 1420 | kmem_cache_free(ubi_wl_entry_slab, e); |
1421 | goto out_free; | 1421 | goto out_free; |
1422 | } | 1422 | } |
1423 | } | 1423 | } |
1424 | 1424 | ||
1425 | list_for_each_entry(aeb, &ai->free, u.list) { | 1425 | list_for_each_entry(aeb, &ai->free, u.list) { |
1426 | cond_resched(); | 1426 | cond_resched(); |
1427 | 1427 | ||
1428 | e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); | 1428 | e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); |
1429 | if (!e) | 1429 | if (!e) |
1430 | goto out_free; | 1430 | goto out_free; |
1431 | 1431 | ||
1432 | e->pnum = aeb->pnum; | 1432 | e->pnum = aeb->pnum; |
1433 | e->ec = aeb->ec; | 1433 | e->ec = aeb->ec; |
1434 | ubi_assert(e->ec >= 0); | 1434 | ubi_assert(e->ec >= 0); |
1435 | wl_tree_add(e, &ubi->free); | 1435 | wl_tree_add(e, &ubi->free); |
1436 | ubi->lookuptbl[e->pnum] = e; | 1436 | ubi->lookuptbl[e->pnum] = e; |
1437 | } | 1437 | } |
1438 | 1438 | ||
1439 | ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) { | 1439 | ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) { |
1440 | ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) { | 1440 | ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) { |
1441 | cond_resched(); | 1441 | cond_resched(); |
1442 | 1442 | ||
1443 | e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); | 1443 | e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); |
1444 | if (!e) | 1444 | if (!e) |
1445 | goto out_free; | 1445 | goto out_free; |
1446 | 1446 | ||
1447 | e->pnum = aeb->pnum; | 1447 | e->pnum = aeb->pnum; |
1448 | e->ec = aeb->ec; | 1448 | e->ec = aeb->ec; |
1449 | ubi->lookuptbl[e->pnum] = e; | 1449 | ubi->lookuptbl[e->pnum] = e; |
1450 | if (!aeb->scrub) { | 1450 | if (!aeb->scrub) { |
1451 | dbg_wl("add PEB %d EC %d to the used tree", | 1451 | dbg_wl("add PEB %d EC %d to the used tree", |
1452 | e->pnum, e->ec); | 1452 | e->pnum, e->ec); |
1453 | wl_tree_add(e, &ubi->used); | 1453 | wl_tree_add(e, &ubi->used); |
1454 | } else { | 1454 | } else { |
1455 | dbg_wl("add PEB %d EC %d to the scrub tree", | 1455 | dbg_wl("add PEB %d EC %d to the scrub tree", |
1456 | e->pnum, e->ec); | 1456 | e->pnum, e->ec); |
1457 | wl_tree_add(e, &ubi->scrub); | 1457 | wl_tree_add(e, &ubi->scrub); |
1458 | } | 1458 | } |
1459 | } | 1459 | } |
1460 | } | 1460 | } |
1461 | 1461 | ||
1462 | if (ubi->avail_pebs < WL_RESERVED_PEBS) { | 1462 | if (ubi->avail_pebs < WL_RESERVED_PEBS) { |
1463 | ubi_err("no enough physical eraseblocks (%d, need %d)", | 1463 | ubi_err("no enough physical eraseblocks (%d, need %d)", |
1464 | ubi->avail_pebs, WL_RESERVED_PEBS); | 1464 | ubi->avail_pebs, WL_RESERVED_PEBS); |
1465 | if (ubi->corr_peb_count) | 1465 | if (ubi->corr_peb_count) |
1466 | ubi_err("%d PEBs are corrupted and not used", | 1466 | ubi_err("%d PEBs are corrupted and not used", |
1467 | ubi->corr_peb_count); | 1467 | ubi->corr_peb_count); |
1468 | goto out_free; | 1468 | goto out_free; |
1469 | } | 1469 | } |
1470 | ubi->avail_pebs -= WL_RESERVED_PEBS; | 1470 | ubi->avail_pebs -= WL_RESERVED_PEBS; |
1471 | ubi->rsvd_pebs += WL_RESERVED_PEBS; | 1471 | ubi->rsvd_pebs += WL_RESERVED_PEBS; |
1472 | 1472 | ||
1473 | /* Schedule wear-leveling if needed */ | 1473 | /* Schedule wear-leveling if needed */ |
1474 | err = ensure_wear_leveling(ubi); | 1474 | err = ensure_wear_leveling(ubi); |
1475 | if (err) | 1475 | if (err) |
1476 | goto out_free; | 1476 | goto out_free; |
1477 | 1477 | ||
1478 | return 0; | 1478 | return 0; |
1479 | 1479 | ||
1480 | out_free: | 1480 | out_free: |
1481 | cancel_pending(ubi); | 1481 | cancel_pending(ubi); |
1482 | tree_destroy(&ubi->used); | 1482 | tree_destroy(&ubi->used); |
1483 | tree_destroy(&ubi->free); | 1483 | tree_destroy(&ubi->free); |
1484 | tree_destroy(&ubi->scrub); | 1484 | tree_destroy(&ubi->scrub); |
1485 | kfree(ubi->lookuptbl); | 1485 | kfree(ubi->lookuptbl); |
1486 | return err; | 1486 | return err; |
1487 | } | 1487 | } |
1488 | 1488 | ||
1489 | /** | 1489 | /** |
1490 | * protection_queue_destroy - destroy the protection queue. | 1490 | * protection_queue_destroy - destroy the protection queue. |
1491 | * @ubi: UBI device description object | 1491 | * @ubi: UBI device description object |
1492 | */ | 1492 | */ |
1493 | static void protection_queue_destroy(struct ubi_device *ubi) | 1493 | static void protection_queue_destroy(struct ubi_device *ubi) |
1494 | { | 1494 | { |
1495 | int i; | 1495 | int i; |
1496 | struct ubi_wl_entry *e, *tmp; | 1496 | struct ubi_wl_entry *e, *tmp; |
1497 | 1497 | ||
1498 | for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) { | 1498 | for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) { |
1499 | list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) { | 1499 | list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) { |
1500 | list_del(&e->u.list); | 1500 | list_del(&e->u.list); |
1501 | kmem_cache_free(ubi_wl_entry_slab, e); | 1501 | kmem_cache_free(ubi_wl_entry_slab, e); |
1502 | } | 1502 | } |
1503 | } | 1503 | } |
1504 | } | 1504 | } |
1505 | 1505 | ||
1506 | /** | 1506 | /** |
1507 | * ubi_wl_close - close the wear-leveling sub-system. | 1507 | * ubi_wl_close - close the wear-leveling sub-system. |
1508 | * @ubi: UBI device description object | 1508 | * @ubi: UBI device description object |
1509 | */ | 1509 | */ |
1510 | void ubi_wl_close(struct ubi_device *ubi) | 1510 | void ubi_wl_close(struct ubi_device *ubi) |
1511 | { | 1511 | { |
1512 | dbg_wl("close the WL sub-system"); | 1512 | dbg_wl("close the WL sub-system"); |
1513 | cancel_pending(ubi); | 1513 | cancel_pending(ubi); |
1514 | protection_queue_destroy(ubi); | 1514 | protection_queue_destroy(ubi); |
1515 | tree_destroy(&ubi->used); | 1515 | tree_destroy(&ubi->used); |
1516 | tree_destroy(&ubi->erroneous); | 1516 | tree_destroy(&ubi->erroneous); |
1517 | tree_destroy(&ubi->free); | 1517 | tree_destroy(&ubi->free); |
1518 | tree_destroy(&ubi->scrub); | 1518 | tree_destroy(&ubi->scrub); |
1519 | kfree(ubi->lookuptbl); | 1519 | kfree(ubi->lookuptbl); |
1520 | } | 1520 | } |
1521 | 1521 | ||
1522 | /** | 1522 | /** |
1523 | * self_check_ec - make sure that the erase counter of a PEB is correct. | 1523 | * self_check_ec - make sure that the erase counter of a PEB is correct. |
1524 | * @ubi: UBI device description object | 1524 | * @ubi: UBI device description object |
1525 | * @pnum: the physical eraseblock number to check | 1525 | * @pnum: the physical eraseblock number to check |
1526 | * @ec: the erase counter to check | 1526 | * @ec: the erase counter to check |
1527 | * | 1527 | * |
1528 | * This function returns zero if the erase counter of physical eraseblock @pnum | 1528 | * This function returns zero if the erase counter of physical eraseblock @pnum |
1529 | * is equivalent to @ec, and a negative error code if not or if an error | 1529 | * is equivalent to @ec, and a negative error code if not or if an error |
1530 | * occurred. | 1530 | * occurred. |
1531 | */ | 1531 | */ |
1532 | static int self_check_ec(struct ubi_device *ubi, int pnum, int ec) | 1532 | static int self_check_ec(struct ubi_device *ubi, int pnum, int ec) |
1533 | { | 1533 | { |
1534 | int err; | 1534 | int err; |
1535 | long long read_ec; | 1535 | long long read_ec; |
1536 | struct ubi_ec_hdr *ec_hdr; | 1536 | struct ubi_ec_hdr *ec_hdr; |
1537 | 1537 | ||
1538 | if (!ubi->dbg->chk_gen) | 1538 | if (!ubi->dbg->chk_gen) |
1539 | return 0; | 1539 | return 0; |
1540 | 1540 | ||
1541 | ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS); | 1541 | ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS); |
1542 | if (!ec_hdr) | 1542 | if (!ec_hdr) |
1543 | return -ENOMEM; | 1543 | return -ENOMEM; |
1544 | 1544 | ||
1545 | err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0); | 1545 | err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0); |
1546 | if (err && err != UBI_IO_BITFLIPS) { | 1546 | if (err && err != UBI_IO_BITFLIPS) { |
1547 | /* The header does not have to exist */ | 1547 | /* The header does not have to exist */ |
1548 | err = 0; | 1548 | err = 0; |
1549 | goto out_free; | 1549 | goto out_free; |
1550 | } | 1550 | } |
1551 | 1551 | ||
1552 | read_ec = be64_to_cpu(ec_hdr->ec); | 1552 | read_ec = be64_to_cpu(ec_hdr->ec); |
1553 | if (ec != read_ec) { | 1553 | if (ec != read_ec) { |
1554 | ubi_err("self-check failed for PEB %d", pnum); | 1554 | ubi_err("self-check failed for PEB %d", pnum); |
1555 | ubi_err("read EC is %lld, should be %d", read_ec, ec); | 1555 | ubi_err("read EC is %lld, should be %d", read_ec, ec); |
1556 | dump_stack(); | 1556 | dump_stack(); |
1557 | err = 1; | 1557 | err = 1; |
1558 | } else | 1558 | } else |
1559 | err = 0; | 1559 | err = 0; |
1560 | 1560 | ||
1561 | out_free: | 1561 | out_free: |
1562 | kfree(ec_hdr); | 1562 | kfree(ec_hdr); |
1563 | return err; | 1563 | return err; |
1564 | } | 1564 | } |
1565 | 1565 | ||
1566 | /** | 1566 | /** |
1567 | * self_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree. | 1567 | * self_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree. |
1568 | * @ubi: UBI device description object | 1568 | * @ubi: UBI device description object |
1569 | * @e: the wear-leveling entry to check | 1569 | * @e: the wear-leveling entry to check |
1570 | * @root: the root of the tree | 1570 | * @root: the root of the tree |
1571 | * | 1571 | * |
1572 | * This function returns zero if @e is in the @root RB-tree and %-EINVAL if it | 1572 | * This function returns zero if @e is in the @root RB-tree and %-EINVAL if it |
1573 | * is not. | 1573 | * is not. |
1574 | */ | 1574 | */ |
1575 | static int self_check_in_wl_tree(const struct ubi_device *ubi, | 1575 | static int self_check_in_wl_tree(const struct ubi_device *ubi, |
1576 | struct ubi_wl_entry *e, struct rb_root *root) | 1576 | struct ubi_wl_entry *e, struct rb_root *root) |
1577 | { | 1577 | { |
1578 | if (!ubi->dbg->chk_gen) | 1578 | if (!ubi->dbg->chk_gen) |
1579 | return 0; | 1579 | return 0; |
1580 | 1580 | ||
1581 | if (in_wl_tree(e, root)) | 1581 | if (in_wl_tree(e, root)) |
1582 | return 0; | 1582 | return 0; |
1583 | 1583 | ||
1584 | ubi_err("self-check failed for PEB %d, EC %d, RB-tree %p ", | 1584 | ubi_err("self-check failed for PEB %d, EC %d, RB-tree %p ", |
1585 | e->pnum, e->ec, root); | 1585 | e->pnum, e->ec, root); |
1586 | dump_stack(); | 1586 | dump_stack(); |
1587 | return -EINVAL; | 1587 | return -EINVAL; |
1588 | } | 1588 | } |
1589 | 1589 | ||
1590 | /** | 1590 | /** |
1591 | * self_check_in_pq - check if wear-leveling entry is in the protection | 1591 | * self_check_in_pq - check if wear-leveling entry is in the protection |
1592 | * queue. | 1592 | * queue. |
1593 | * @ubi: UBI device description object | 1593 | * @ubi: UBI device description object |
1594 | * @e: the wear-leveling entry to check | 1594 | * @e: the wear-leveling entry to check |
1595 | * | 1595 | * |
1596 | * This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not. | 1596 | * This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not. |
1597 | */ | 1597 | */ |
1598 | static int self_check_in_pq(const struct ubi_device *ubi, | 1598 | static int self_check_in_pq(const struct ubi_device *ubi, |
1599 | struct ubi_wl_entry *e) | 1599 | struct ubi_wl_entry *e) |
1600 | { | 1600 | { |
1601 | struct ubi_wl_entry *p; | 1601 | struct ubi_wl_entry *p; |
1602 | int i; | 1602 | int i; |
1603 | 1603 | ||
1604 | if (!ubi->dbg->chk_gen) | 1604 | if (!ubi->dbg->chk_gen) |
1605 | return 0; | 1605 | return 0; |
1606 | 1606 | ||
1607 | for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) | 1607 | for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) |
1608 | list_for_each_entry(p, &ubi->pq[i], u.list) | 1608 | list_for_each_entry(p, &ubi->pq[i], u.list) |
1609 | if (p == e) | 1609 | if (p == e) |
1610 | return 0; | 1610 | return 0; |
1611 | 1611 | ||
1612 | ubi_err("self-check failed for PEB %d, EC %d, Protect queue", | 1612 | ubi_err("self-check failed for PEB %d, EC %d, Protect queue", |
1613 | e->pnum, e->ec); | 1613 | e->pnum, e->ec); |
1614 | dump_stack(); | 1614 | dump_stack(); |
1615 | return -EINVAL; | 1615 | return -EINVAL; |
1616 | } | 1616 | } |
1617 | 1617 |