Commit 45175476ae2dbebc860d5cf486f2916044343513

Authored by Linus Torvalds

Merge tag 'upstream-3.11-rc1' of git://git.infradead.org/linux-ubi

Pull ubi fixes from Artem Bityutskiy:
 "A couple of fixes and clean-ups, allow for assigning user-defined UBI
  device numbers when attaching MTD devices by using the "mtd=" module
  parameter"

* tag 'upstream-3.11-rc1' of git://git.infradead.org/linux-ubi:
  UBI: support ubi_num on mtd.ubi command line
  UBI: fastmap break out of used PEB search
  UBI: document UBI_IOCVOLUP better in user header
  UBI: do not abort init when ubi.mtd devices cannot be found
  UBI: drop redundant "UBI error" string

Showing 3 changed files Inline Diff

drivers/mtd/ubi/build.c
1 /* 1 /*
2 * Copyright (c) International Business Machines Corp., 2006 2 * Copyright (c) International Business Machines Corp., 2006
3 * Copyright (c) Nokia Corporation, 2007 3 * Copyright (c) Nokia Corporation, 2007
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or 7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version. 8 * (at your option) any later version.
9 * 9 *
10 * This program is distributed in the hope that it will be useful, 10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13 * the GNU General Public License for more details. 13 * the GNU General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License 15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software 16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 * 18 *
19 * Author: Artem Bityutskiy (Битюцкий Артём), 19 * Author: Artem Bityutskiy (Битюцкий Артём),
20 * Frank Haverkamp 20 * Frank Haverkamp
21 */ 21 */
22 22
23 /* 23 /*
24 * This file includes UBI initialization and building of UBI devices. 24 * This file includes UBI initialization and building of UBI devices.
25 * 25 *
26 * When UBI is initialized, it attaches all the MTD devices specified as the 26 * When UBI is initialized, it attaches all the MTD devices specified as the
27 * module load parameters or the kernel boot parameters. If MTD devices were 27 * module load parameters or the kernel boot parameters. If MTD devices were
28 * specified, UBI does not attach any MTD device, but it is possible to do 28 * specified, UBI does not attach any MTD device, but it is possible to do
29 * later using the "UBI control device". 29 * later using the "UBI control device".
30 */ 30 */
31 31
32 #include <linux/err.h> 32 #include <linux/err.h>
33 #include <linux/module.h> 33 #include <linux/module.h>
34 #include <linux/moduleparam.h> 34 #include <linux/moduleparam.h>
35 #include <linux/stringify.h> 35 #include <linux/stringify.h>
36 #include <linux/namei.h> 36 #include <linux/namei.h>
37 #include <linux/stat.h> 37 #include <linux/stat.h>
38 #include <linux/miscdevice.h> 38 #include <linux/miscdevice.h>
39 #include <linux/mtd/partitions.h> 39 #include <linux/mtd/partitions.h>
40 #include <linux/log2.h> 40 #include <linux/log2.h>
41 #include <linux/kthread.h> 41 #include <linux/kthread.h>
42 #include <linux/kernel.h> 42 #include <linux/kernel.h>
43 #include <linux/slab.h> 43 #include <linux/slab.h>
44 #include "ubi.h" 44 #include "ubi.h"
45 45
46 /* Maximum length of the 'mtd=' parameter */ 46 /* Maximum length of the 'mtd=' parameter */
47 #define MTD_PARAM_LEN_MAX 64 47 #define MTD_PARAM_LEN_MAX 64
48 48
49 /* Maximum number of comma-separated items in the 'mtd=' parameter */ 49 /* Maximum number of comma-separated items in the 'mtd=' parameter */
50 #define MTD_PARAM_MAX_COUNT 3 50 #define MTD_PARAM_MAX_COUNT 4
51 51
52 /* Maximum value for the number of bad PEBs per 1024 PEBs */ 52 /* Maximum value for the number of bad PEBs per 1024 PEBs */
53 #define MAX_MTD_UBI_BEB_LIMIT 768 53 #define MAX_MTD_UBI_BEB_LIMIT 768
54 54
55 #ifdef CONFIG_MTD_UBI_MODULE 55 #ifdef CONFIG_MTD_UBI_MODULE
56 #define ubi_is_module() 1 56 #define ubi_is_module() 1
57 #else 57 #else
58 #define ubi_is_module() 0 58 #define ubi_is_module() 0
59 #endif 59 #endif
60 60
61 /** 61 /**
62 * struct mtd_dev_param - MTD device parameter description data structure. 62 * struct mtd_dev_param - MTD device parameter description data structure.
63 * @name: MTD character device node path, MTD device name, or MTD device number 63 * @name: MTD character device node path, MTD device name, or MTD device number
64 * string 64 * string
65 * @vid_hdr_offs: VID header offset 65 * @vid_hdr_offs: VID header offset
66 * @max_beb_per1024: maximum expected number of bad PEBs per 1024 PEBs 66 * @max_beb_per1024: maximum expected number of bad PEBs per 1024 PEBs
67 */ 67 */
68 struct mtd_dev_param { 68 struct mtd_dev_param {
69 char name[MTD_PARAM_LEN_MAX]; 69 char name[MTD_PARAM_LEN_MAX];
70 int ubi_num;
70 int vid_hdr_offs; 71 int vid_hdr_offs;
71 int max_beb_per1024; 72 int max_beb_per1024;
72 }; 73 };
73 74
74 /* Numbers of elements set in the @mtd_dev_param array */ 75 /* Numbers of elements set in the @mtd_dev_param array */
75 static int __initdata mtd_devs; 76 static int __initdata mtd_devs;
76 77
77 /* MTD devices specification parameters */ 78 /* MTD devices specification parameters */
78 static struct mtd_dev_param __initdata mtd_dev_param[UBI_MAX_DEVICES]; 79 static struct mtd_dev_param __initdata mtd_dev_param[UBI_MAX_DEVICES];
79 #ifdef CONFIG_MTD_UBI_FASTMAP 80 #ifdef CONFIG_MTD_UBI_FASTMAP
80 /* UBI module parameter to enable fastmap automatically on non-fastmap images */ 81 /* UBI module parameter to enable fastmap automatically on non-fastmap images */
81 static bool fm_autoconvert; 82 static bool fm_autoconvert;
82 #endif 83 #endif
83 /* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */ 84 /* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
84 struct class *ubi_class; 85 struct class *ubi_class;
85 86
86 /* Slab cache for wear-leveling entries */ 87 /* Slab cache for wear-leveling entries */
87 struct kmem_cache *ubi_wl_entry_slab; 88 struct kmem_cache *ubi_wl_entry_slab;
88 89
89 /* UBI control character device */ 90 /* UBI control character device */
90 static struct miscdevice ubi_ctrl_cdev = { 91 static struct miscdevice ubi_ctrl_cdev = {
91 .minor = MISC_DYNAMIC_MINOR, 92 .minor = MISC_DYNAMIC_MINOR,
92 .name = "ubi_ctrl", 93 .name = "ubi_ctrl",
93 .fops = &ubi_ctrl_cdev_operations, 94 .fops = &ubi_ctrl_cdev_operations,
94 }; 95 };
95 96
96 /* All UBI devices in system */ 97 /* All UBI devices in system */
97 static struct ubi_device *ubi_devices[UBI_MAX_DEVICES]; 98 static struct ubi_device *ubi_devices[UBI_MAX_DEVICES];
98 99
99 /* Serializes UBI devices creations and removals */ 100 /* Serializes UBI devices creations and removals */
100 DEFINE_MUTEX(ubi_devices_mutex); 101 DEFINE_MUTEX(ubi_devices_mutex);
101 102
102 /* Protects @ubi_devices and @ubi->ref_count */ 103 /* Protects @ubi_devices and @ubi->ref_count */
103 static DEFINE_SPINLOCK(ubi_devices_lock); 104 static DEFINE_SPINLOCK(ubi_devices_lock);
104 105
105 /* "Show" method for files in '/<sysfs>/class/ubi/' */ 106 /* "Show" method for files in '/<sysfs>/class/ubi/' */
106 static ssize_t ubi_version_show(struct class *class, 107 static ssize_t ubi_version_show(struct class *class,
107 struct class_attribute *attr, char *buf) 108 struct class_attribute *attr, char *buf)
108 { 109 {
109 return sprintf(buf, "%d\n", UBI_VERSION); 110 return sprintf(buf, "%d\n", UBI_VERSION);
110 } 111 }
111 112
112 /* UBI version attribute ('/<sysfs>/class/ubi/version') */ 113 /* UBI version attribute ('/<sysfs>/class/ubi/version') */
113 static struct class_attribute ubi_version = 114 static struct class_attribute ubi_version =
114 __ATTR(version, S_IRUGO, ubi_version_show, NULL); 115 __ATTR(version, S_IRUGO, ubi_version_show, NULL);
115 116
116 static ssize_t dev_attribute_show(struct device *dev, 117 static ssize_t dev_attribute_show(struct device *dev,
117 struct device_attribute *attr, char *buf); 118 struct device_attribute *attr, char *buf);
118 119
119 /* UBI device attributes (correspond to files in '/<sysfs>/class/ubi/ubiX') */ 120 /* UBI device attributes (correspond to files in '/<sysfs>/class/ubi/ubiX') */
120 static struct device_attribute dev_eraseblock_size = 121 static struct device_attribute dev_eraseblock_size =
121 __ATTR(eraseblock_size, S_IRUGO, dev_attribute_show, NULL); 122 __ATTR(eraseblock_size, S_IRUGO, dev_attribute_show, NULL);
122 static struct device_attribute dev_avail_eraseblocks = 123 static struct device_attribute dev_avail_eraseblocks =
123 __ATTR(avail_eraseblocks, S_IRUGO, dev_attribute_show, NULL); 124 __ATTR(avail_eraseblocks, S_IRUGO, dev_attribute_show, NULL);
124 static struct device_attribute dev_total_eraseblocks = 125 static struct device_attribute dev_total_eraseblocks =
125 __ATTR(total_eraseblocks, S_IRUGO, dev_attribute_show, NULL); 126 __ATTR(total_eraseblocks, S_IRUGO, dev_attribute_show, NULL);
126 static struct device_attribute dev_volumes_count = 127 static struct device_attribute dev_volumes_count =
127 __ATTR(volumes_count, S_IRUGO, dev_attribute_show, NULL); 128 __ATTR(volumes_count, S_IRUGO, dev_attribute_show, NULL);
128 static struct device_attribute dev_max_ec = 129 static struct device_attribute dev_max_ec =
129 __ATTR(max_ec, S_IRUGO, dev_attribute_show, NULL); 130 __ATTR(max_ec, S_IRUGO, dev_attribute_show, NULL);
130 static struct device_attribute dev_reserved_for_bad = 131 static struct device_attribute dev_reserved_for_bad =
131 __ATTR(reserved_for_bad, S_IRUGO, dev_attribute_show, NULL); 132 __ATTR(reserved_for_bad, S_IRUGO, dev_attribute_show, NULL);
132 static struct device_attribute dev_bad_peb_count = 133 static struct device_attribute dev_bad_peb_count =
133 __ATTR(bad_peb_count, S_IRUGO, dev_attribute_show, NULL); 134 __ATTR(bad_peb_count, S_IRUGO, dev_attribute_show, NULL);
134 static struct device_attribute dev_max_vol_count = 135 static struct device_attribute dev_max_vol_count =
135 __ATTR(max_vol_count, S_IRUGO, dev_attribute_show, NULL); 136 __ATTR(max_vol_count, S_IRUGO, dev_attribute_show, NULL);
136 static struct device_attribute dev_min_io_size = 137 static struct device_attribute dev_min_io_size =
137 __ATTR(min_io_size, S_IRUGO, dev_attribute_show, NULL); 138 __ATTR(min_io_size, S_IRUGO, dev_attribute_show, NULL);
138 static struct device_attribute dev_bgt_enabled = 139 static struct device_attribute dev_bgt_enabled =
139 __ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL); 140 __ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL);
140 static struct device_attribute dev_mtd_num = 141 static struct device_attribute dev_mtd_num =
141 __ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL); 142 __ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL);
142 143
143 /** 144 /**
144 * ubi_volume_notify - send a volume change notification. 145 * ubi_volume_notify - send a volume change notification.
145 * @ubi: UBI device description object 146 * @ubi: UBI device description object
146 * @vol: volume description object of the changed volume 147 * @vol: volume description object of the changed volume
147 * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc) 148 * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc)
148 * 149 *
149 * This is a helper function which notifies all subscribers about a volume 150 * This is a helper function which notifies all subscribers about a volume
150 * change event (creation, removal, re-sizing, re-naming, updating). Returns 151 * change event (creation, removal, re-sizing, re-naming, updating). Returns
151 * zero in case of success and a negative error code in case of failure. 152 * zero in case of success and a negative error code in case of failure.
152 */ 153 */
153 int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype) 154 int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype)
154 { 155 {
155 struct ubi_notification nt; 156 struct ubi_notification nt;
156 157
157 ubi_do_get_device_info(ubi, &nt.di); 158 ubi_do_get_device_info(ubi, &nt.di);
158 ubi_do_get_volume_info(ubi, vol, &nt.vi); 159 ubi_do_get_volume_info(ubi, vol, &nt.vi);
159 160
160 #ifdef CONFIG_MTD_UBI_FASTMAP 161 #ifdef CONFIG_MTD_UBI_FASTMAP
161 switch (ntype) { 162 switch (ntype) {
162 case UBI_VOLUME_ADDED: 163 case UBI_VOLUME_ADDED:
163 case UBI_VOLUME_REMOVED: 164 case UBI_VOLUME_REMOVED:
164 case UBI_VOLUME_RESIZED: 165 case UBI_VOLUME_RESIZED:
165 case UBI_VOLUME_RENAMED: 166 case UBI_VOLUME_RENAMED:
166 if (ubi_update_fastmap(ubi)) { 167 if (ubi_update_fastmap(ubi)) {
167 ubi_err("Unable to update fastmap!"); 168 ubi_err("Unable to update fastmap!");
168 ubi_ro_mode(ubi); 169 ubi_ro_mode(ubi);
169 } 170 }
170 } 171 }
171 #endif 172 #endif
172 return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt); 173 return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt);
173 } 174 }
174 175
175 /** 176 /**
176 * ubi_notify_all - send a notification to all volumes. 177 * ubi_notify_all - send a notification to all volumes.
177 * @ubi: UBI device description object 178 * @ubi: UBI device description object
178 * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc) 179 * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc)
179 * @nb: the notifier to call 180 * @nb: the notifier to call
180 * 181 *
181 * This function walks all volumes of UBI device @ubi and sends the @ntype 182 * This function walks all volumes of UBI device @ubi and sends the @ntype
182 * notification for each volume. If @nb is %NULL, then all registered notifiers 183 * notification for each volume. If @nb is %NULL, then all registered notifiers
183 * are called, otherwise only the @nb notifier is called. Returns the number of 184 * are called, otherwise only the @nb notifier is called. Returns the number of
184 * sent notifications. 185 * sent notifications.
185 */ 186 */
186 int ubi_notify_all(struct ubi_device *ubi, int ntype, struct notifier_block *nb) 187 int ubi_notify_all(struct ubi_device *ubi, int ntype, struct notifier_block *nb)
187 { 188 {
188 struct ubi_notification nt; 189 struct ubi_notification nt;
189 int i, count = 0; 190 int i, count = 0;
190 191
191 ubi_do_get_device_info(ubi, &nt.di); 192 ubi_do_get_device_info(ubi, &nt.di);
192 193
193 mutex_lock(&ubi->device_mutex); 194 mutex_lock(&ubi->device_mutex);
194 for (i = 0; i < ubi->vtbl_slots; i++) { 195 for (i = 0; i < ubi->vtbl_slots; i++) {
195 /* 196 /*
196 * Since the @ubi->device is locked, and we are not going to 197 * Since the @ubi->device is locked, and we are not going to
197 * change @ubi->volumes, we do not have to lock 198 * change @ubi->volumes, we do not have to lock
198 * @ubi->volumes_lock. 199 * @ubi->volumes_lock.
199 */ 200 */
200 if (!ubi->volumes[i]) 201 if (!ubi->volumes[i])
201 continue; 202 continue;
202 203
203 ubi_do_get_volume_info(ubi, ubi->volumes[i], &nt.vi); 204 ubi_do_get_volume_info(ubi, ubi->volumes[i], &nt.vi);
204 if (nb) 205 if (nb)
205 nb->notifier_call(nb, ntype, &nt); 206 nb->notifier_call(nb, ntype, &nt);
206 else 207 else
207 blocking_notifier_call_chain(&ubi_notifiers, ntype, 208 blocking_notifier_call_chain(&ubi_notifiers, ntype,
208 &nt); 209 &nt);
209 count += 1; 210 count += 1;
210 } 211 }
211 mutex_unlock(&ubi->device_mutex); 212 mutex_unlock(&ubi->device_mutex);
212 213
213 return count; 214 return count;
214 } 215 }
215 216
216 /** 217 /**
217 * ubi_enumerate_volumes - send "add" notification for all existing volumes. 218 * ubi_enumerate_volumes - send "add" notification for all existing volumes.
218 * @nb: the notifier to call 219 * @nb: the notifier to call
219 * 220 *
220 * This function walks all UBI devices and volumes and sends the 221 * This function walks all UBI devices and volumes and sends the
221 * %UBI_VOLUME_ADDED notification for each volume. If @nb is %NULL, then all 222 * %UBI_VOLUME_ADDED notification for each volume. If @nb is %NULL, then all
222 * registered notifiers are called, otherwise only the @nb notifier is called. 223 * registered notifiers are called, otherwise only the @nb notifier is called.
223 * Returns the number of sent notifications. 224 * Returns the number of sent notifications.
224 */ 225 */
225 int ubi_enumerate_volumes(struct notifier_block *nb) 226 int ubi_enumerate_volumes(struct notifier_block *nb)
226 { 227 {
227 int i, count = 0; 228 int i, count = 0;
228 229
229 /* 230 /*
230 * Since the @ubi_devices_mutex is locked, and we are not going to 231 * Since the @ubi_devices_mutex is locked, and we are not going to
231 * change @ubi_devices, we do not have to lock @ubi_devices_lock. 232 * change @ubi_devices, we do not have to lock @ubi_devices_lock.
232 */ 233 */
233 for (i = 0; i < UBI_MAX_DEVICES; i++) { 234 for (i = 0; i < UBI_MAX_DEVICES; i++) {
234 struct ubi_device *ubi = ubi_devices[i]; 235 struct ubi_device *ubi = ubi_devices[i];
235 236
236 if (!ubi) 237 if (!ubi)
237 continue; 238 continue;
238 count += ubi_notify_all(ubi, UBI_VOLUME_ADDED, nb); 239 count += ubi_notify_all(ubi, UBI_VOLUME_ADDED, nb);
239 } 240 }
240 241
241 return count; 242 return count;
242 } 243 }
243 244
244 /** 245 /**
245 * ubi_get_device - get UBI device. 246 * ubi_get_device - get UBI device.
246 * @ubi_num: UBI device number 247 * @ubi_num: UBI device number
247 * 248 *
248 * This function returns UBI device description object for UBI device number 249 * This function returns UBI device description object for UBI device number
249 * @ubi_num, or %NULL if the device does not exist. This function increases the 250 * @ubi_num, or %NULL if the device does not exist. This function increases the
250 * device reference count to prevent removal of the device. In other words, the 251 * device reference count to prevent removal of the device. In other words, the
251 * device cannot be removed if its reference count is not zero. 252 * device cannot be removed if its reference count is not zero.
252 */ 253 */
253 struct ubi_device *ubi_get_device(int ubi_num) 254 struct ubi_device *ubi_get_device(int ubi_num)
254 { 255 {
255 struct ubi_device *ubi; 256 struct ubi_device *ubi;
256 257
257 spin_lock(&ubi_devices_lock); 258 spin_lock(&ubi_devices_lock);
258 ubi = ubi_devices[ubi_num]; 259 ubi = ubi_devices[ubi_num];
259 if (ubi) { 260 if (ubi) {
260 ubi_assert(ubi->ref_count >= 0); 261 ubi_assert(ubi->ref_count >= 0);
261 ubi->ref_count += 1; 262 ubi->ref_count += 1;
262 get_device(&ubi->dev); 263 get_device(&ubi->dev);
263 } 264 }
264 spin_unlock(&ubi_devices_lock); 265 spin_unlock(&ubi_devices_lock);
265 266
266 return ubi; 267 return ubi;
267 } 268 }
268 269
269 /** 270 /**
270 * ubi_put_device - drop an UBI device reference. 271 * ubi_put_device - drop an UBI device reference.
271 * @ubi: UBI device description object 272 * @ubi: UBI device description object
272 */ 273 */
273 void ubi_put_device(struct ubi_device *ubi) 274 void ubi_put_device(struct ubi_device *ubi)
274 { 275 {
275 spin_lock(&ubi_devices_lock); 276 spin_lock(&ubi_devices_lock);
276 ubi->ref_count -= 1; 277 ubi->ref_count -= 1;
277 put_device(&ubi->dev); 278 put_device(&ubi->dev);
278 spin_unlock(&ubi_devices_lock); 279 spin_unlock(&ubi_devices_lock);
279 } 280 }
280 281
281 /** 282 /**
282 * ubi_get_by_major - get UBI device by character device major number. 283 * ubi_get_by_major - get UBI device by character device major number.
283 * @major: major number 284 * @major: major number
284 * 285 *
285 * This function is similar to 'ubi_get_device()', but it searches the device 286 * This function is similar to 'ubi_get_device()', but it searches the device
286 * by its major number. 287 * by its major number.
287 */ 288 */
288 struct ubi_device *ubi_get_by_major(int major) 289 struct ubi_device *ubi_get_by_major(int major)
289 { 290 {
290 int i; 291 int i;
291 struct ubi_device *ubi; 292 struct ubi_device *ubi;
292 293
293 spin_lock(&ubi_devices_lock); 294 spin_lock(&ubi_devices_lock);
294 for (i = 0; i < UBI_MAX_DEVICES; i++) { 295 for (i = 0; i < UBI_MAX_DEVICES; i++) {
295 ubi = ubi_devices[i]; 296 ubi = ubi_devices[i];
296 if (ubi && MAJOR(ubi->cdev.dev) == major) { 297 if (ubi && MAJOR(ubi->cdev.dev) == major) {
297 ubi_assert(ubi->ref_count >= 0); 298 ubi_assert(ubi->ref_count >= 0);
298 ubi->ref_count += 1; 299 ubi->ref_count += 1;
299 get_device(&ubi->dev); 300 get_device(&ubi->dev);
300 spin_unlock(&ubi_devices_lock); 301 spin_unlock(&ubi_devices_lock);
301 return ubi; 302 return ubi;
302 } 303 }
303 } 304 }
304 spin_unlock(&ubi_devices_lock); 305 spin_unlock(&ubi_devices_lock);
305 306
306 return NULL; 307 return NULL;
307 } 308 }
308 309
309 /** 310 /**
310 * ubi_major2num - get UBI device number by character device major number. 311 * ubi_major2num - get UBI device number by character device major number.
311 * @major: major number 312 * @major: major number
312 * 313 *
313 * This function searches UBI device number object by its major number. If UBI 314 * This function searches UBI device number object by its major number. If UBI
314 * device was not found, this function returns -ENODEV, otherwise the UBI device 315 * device was not found, this function returns -ENODEV, otherwise the UBI device
315 * number is returned. 316 * number is returned.
316 */ 317 */
317 int ubi_major2num(int major) 318 int ubi_major2num(int major)
318 { 319 {
319 int i, ubi_num = -ENODEV; 320 int i, ubi_num = -ENODEV;
320 321
321 spin_lock(&ubi_devices_lock); 322 spin_lock(&ubi_devices_lock);
322 for (i = 0; i < UBI_MAX_DEVICES; i++) { 323 for (i = 0; i < UBI_MAX_DEVICES; i++) {
323 struct ubi_device *ubi = ubi_devices[i]; 324 struct ubi_device *ubi = ubi_devices[i];
324 325
325 if (ubi && MAJOR(ubi->cdev.dev) == major) { 326 if (ubi && MAJOR(ubi->cdev.dev) == major) {
326 ubi_num = ubi->ubi_num; 327 ubi_num = ubi->ubi_num;
327 break; 328 break;
328 } 329 }
329 } 330 }
330 spin_unlock(&ubi_devices_lock); 331 spin_unlock(&ubi_devices_lock);
331 332
332 return ubi_num; 333 return ubi_num;
333 } 334 }
334 335
335 /* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */ 336 /* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */
336 static ssize_t dev_attribute_show(struct device *dev, 337 static ssize_t dev_attribute_show(struct device *dev,
337 struct device_attribute *attr, char *buf) 338 struct device_attribute *attr, char *buf)
338 { 339 {
339 ssize_t ret; 340 ssize_t ret;
340 struct ubi_device *ubi; 341 struct ubi_device *ubi;
341 342
342 /* 343 /*
343 * The below code looks weird, but it actually makes sense. We get the 344 * The below code looks weird, but it actually makes sense. We get the
344 * UBI device reference from the contained 'struct ubi_device'. But it 345 * UBI device reference from the contained 'struct ubi_device'. But it
345 * is unclear if the device was removed or not yet. Indeed, if the 346 * is unclear if the device was removed or not yet. Indeed, if the
346 * device was removed before we increased its reference count, 347 * device was removed before we increased its reference count,
347 * 'ubi_get_device()' will return -ENODEV and we fail. 348 * 'ubi_get_device()' will return -ENODEV and we fail.
348 * 349 *
349 * Remember, 'struct ubi_device' is freed in the release function, so 350 * Remember, 'struct ubi_device' is freed in the release function, so
350 * we still can use 'ubi->ubi_num'. 351 * we still can use 'ubi->ubi_num'.
351 */ 352 */
352 ubi = container_of(dev, struct ubi_device, dev); 353 ubi = container_of(dev, struct ubi_device, dev);
353 ubi = ubi_get_device(ubi->ubi_num); 354 ubi = ubi_get_device(ubi->ubi_num);
354 if (!ubi) 355 if (!ubi)
355 return -ENODEV; 356 return -ENODEV;
356 357
357 if (attr == &dev_eraseblock_size) 358 if (attr == &dev_eraseblock_size)
358 ret = sprintf(buf, "%d\n", ubi->leb_size); 359 ret = sprintf(buf, "%d\n", ubi->leb_size);
359 else if (attr == &dev_avail_eraseblocks) 360 else if (attr == &dev_avail_eraseblocks)
360 ret = sprintf(buf, "%d\n", ubi->avail_pebs); 361 ret = sprintf(buf, "%d\n", ubi->avail_pebs);
361 else if (attr == &dev_total_eraseblocks) 362 else if (attr == &dev_total_eraseblocks)
362 ret = sprintf(buf, "%d\n", ubi->good_peb_count); 363 ret = sprintf(buf, "%d\n", ubi->good_peb_count);
363 else if (attr == &dev_volumes_count) 364 else if (attr == &dev_volumes_count)
364 ret = sprintf(buf, "%d\n", ubi->vol_count - UBI_INT_VOL_COUNT); 365 ret = sprintf(buf, "%d\n", ubi->vol_count - UBI_INT_VOL_COUNT);
365 else if (attr == &dev_max_ec) 366 else if (attr == &dev_max_ec)
366 ret = sprintf(buf, "%d\n", ubi->max_ec); 367 ret = sprintf(buf, "%d\n", ubi->max_ec);
367 else if (attr == &dev_reserved_for_bad) 368 else if (attr == &dev_reserved_for_bad)
368 ret = sprintf(buf, "%d\n", ubi->beb_rsvd_pebs); 369 ret = sprintf(buf, "%d\n", ubi->beb_rsvd_pebs);
369 else if (attr == &dev_bad_peb_count) 370 else if (attr == &dev_bad_peb_count)
370 ret = sprintf(buf, "%d\n", ubi->bad_peb_count); 371 ret = sprintf(buf, "%d\n", ubi->bad_peb_count);
371 else if (attr == &dev_max_vol_count) 372 else if (attr == &dev_max_vol_count)
372 ret = sprintf(buf, "%d\n", ubi->vtbl_slots); 373 ret = sprintf(buf, "%d\n", ubi->vtbl_slots);
373 else if (attr == &dev_min_io_size) 374 else if (attr == &dev_min_io_size)
374 ret = sprintf(buf, "%d\n", ubi->min_io_size); 375 ret = sprintf(buf, "%d\n", ubi->min_io_size);
375 else if (attr == &dev_bgt_enabled) 376 else if (attr == &dev_bgt_enabled)
376 ret = sprintf(buf, "%d\n", ubi->thread_enabled); 377 ret = sprintf(buf, "%d\n", ubi->thread_enabled);
377 else if (attr == &dev_mtd_num) 378 else if (attr == &dev_mtd_num)
378 ret = sprintf(buf, "%d\n", ubi->mtd->index); 379 ret = sprintf(buf, "%d\n", ubi->mtd->index);
379 else 380 else
380 ret = -EINVAL; 381 ret = -EINVAL;
381 382
382 ubi_put_device(ubi); 383 ubi_put_device(ubi);
383 return ret; 384 return ret;
384 } 385 }
385 386
386 static void dev_release(struct device *dev) 387 static void dev_release(struct device *dev)
387 { 388 {
388 struct ubi_device *ubi = container_of(dev, struct ubi_device, dev); 389 struct ubi_device *ubi = container_of(dev, struct ubi_device, dev);
389 390
390 kfree(ubi); 391 kfree(ubi);
391 } 392 }
392 393
393 /** 394 /**
394 * ubi_sysfs_init - initialize sysfs for an UBI device. 395 * ubi_sysfs_init - initialize sysfs for an UBI device.
395 * @ubi: UBI device description object 396 * @ubi: UBI device description object
396 * @ref: set to %1 on exit in case of failure if a reference to @ubi->dev was 397 * @ref: set to %1 on exit in case of failure if a reference to @ubi->dev was
397 * taken 398 * taken
398 * 399 *
399 * This function returns zero in case of success and a negative error code in 400 * This function returns zero in case of success and a negative error code in
400 * case of failure. 401 * case of failure.
401 */ 402 */
402 static int ubi_sysfs_init(struct ubi_device *ubi, int *ref) 403 static int ubi_sysfs_init(struct ubi_device *ubi, int *ref)
403 { 404 {
404 int err; 405 int err;
405 406
406 ubi->dev.release = dev_release; 407 ubi->dev.release = dev_release;
407 ubi->dev.devt = ubi->cdev.dev; 408 ubi->dev.devt = ubi->cdev.dev;
408 ubi->dev.class = ubi_class; 409 ubi->dev.class = ubi_class;
409 dev_set_name(&ubi->dev, UBI_NAME_STR"%d", ubi->ubi_num); 410 dev_set_name(&ubi->dev, UBI_NAME_STR"%d", ubi->ubi_num);
410 err = device_register(&ubi->dev); 411 err = device_register(&ubi->dev);
411 if (err) 412 if (err)
412 return err; 413 return err;
413 414
414 *ref = 1; 415 *ref = 1;
415 err = device_create_file(&ubi->dev, &dev_eraseblock_size); 416 err = device_create_file(&ubi->dev, &dev_eraseblock_size);
416 if (err) 417 if (err)
417 return err; 418 return err;
418 err = device_create_file(&ubi->dev, &dev_avail_eraseblocks); 419 err = device_create_file(&ubi->dev, &dev_avail_eraseblocks);
419 if (err) 420 if (err)
420 return err; 421 return err;
421 err = device_create_file(&ubi->dev, &dev_total_eraseblocks); 422 err = device_create_file(&ubi->dev, &dev_total_eraseblocks);
422 if (err) 423 if (err)
423 return err; 424 return err;
424 err = device_create_file(&ubi->dev, &dev_volumes_count); 425 err = device_create_file(&ubi->dev, &dev_volumes_count);
425 if (err) 426 if (err)
426 return err; 427 return err;
427 err = device_create_file(&ubi->dev, &dev_max_ec); 428 err = device_create_file(&ubi->dev, &dev_max_ec);
428 if (err) 429 if (err)
429 return err; 430 return err;
430 err = device_create_file(&ubi->dev, &dev_reserved_for_bad); 431 err = device_create_file(&ubi->dev, &dev_reserved_for_bad);
431 if (err) 432 if (err)
432 return err; 433 return err;
433 err = device_create_file(&ubi->dev, &dev_bad_peb_count); 434 err = device_create_file(&ubi->dev, &dev_bad_peb_count);
434 if (err) 435 if (err)
435 return err; 436 return err;
436 err = device_create_file(&ubi->dev, &dev_max_vol_count); 437 err = device_create_file(&ubi->dev, &dev_max_vol_count);
437 if (err) 438 if (err)
438 return err; 439 return err;
439 err = device_create_file(&ubi->dev, &dev_min_io_size); 440 err = device_create_file(&ubi->dev, &dev_min_io_size);
440 if (err) 441 if (err)
441 return err; 442 return err;
442 err = device_create_file(&ubi->dev, &dev_bgt_enabled); 443 err = device_create_file(&ubi->dev, &dev_bgt_enabled);
443 if (err) 444 if (err)
444 return err; 445 return err;
445 err = device_create_file(&ubi->dev, &dev_mtd_num); 446 err = device_create_file(&ubi->dev, &dev_mtd_num);
446 return err; 447 return err;
447 } 448 }
448 449
449 /** 450 /**
450 * ubi_sysfs_close - close sysfs for an UBI device. 451 * ubi_sysfs_close - close sysfs for an UBI device.
451 * @ubi: UBI device description object 452 * @ubi: UBI device description object
452 */ 453 */
453 static void ubi_sysfs_close(struct ubi_device *ubi) 454 static void ubi_sysfs_close(struct ubi_device *ubi)
454 { 455 {
455 device_remove_file(&ubi->dev, &dev_mtd_num); 456 device_remove_file(&ubi->dev, &dev_mtd_num);
456 device_remove_file(&ubi->dev, &dev_bgt_enabled); 457 device_remove_file(&ubi->dev, &dev_bgt_enabled);
457 device_remove_file(&ubi->dev, &dev_min_io_size); 458 device_remove_file(&ubi->dev, &dev_min_io_size);
458 device_remove_file(&ubi->dev, &dev_max_vol_count); 459 device_remove_file(&ubi->dev, &dev_max_vol_count);
459 device_remove_file(&ubi->dev, &dev_bad_peb_count); 460 device_remove_file(&ubi->dev, &dev_bad_peb_count);
460 device_remove_file(&ubi->dev, &dev_reserved_for_bad); 461 device_remove_file(&ubi->dev, &dev_reserved_for_bad);
461 device_remove_file(&ubi->dev, &dev_max_ec); 462 device_remove_file(&ubi->dev, &dev_max_ec);
462 device_remove_file(&ubi->dev, &dev_volumes_count); 463 device_remove_file(&ubi->dev, &dev_volumes_count);
463 device_remove_file(&ubi->dev, &dev_total_eraseblocks); 464 device_remove_file(&ubi->dev, &dev_total_eraseblocks);
464 device_remove_file(&ubi->dev, &dev_avail_eraseblocks); 465 device_remove_file(&ubi->dev, &dev_avail_eraseblocks);
465 device_remove_file(&ubi->dev, &dev_eraseblock_size); 466 device_remove_file(&ubi->dev, &dev_eraseblock_size);
466 device_unregister(&ubi->dev); 467 device_unregister(&ubi->dev);
467 } 468 }
468 469
469 /** 470 /**
470 * kill_volumes - destroy all user volumes. 471 * kill_volumes - destroy all user volumes.
471 * @ubi: UBI device description object 472 * @ubi: UBI device description object
472 */ 473 */
473 static void kill_volumes(struct ubi_device *ubi) 474 static void kill_volumes(struct ubi_device *ubi)
474 { 475 {
475 int i; 476 int i;
476 477
477 for (i = 0; i < ubi->vtbl_slots; i++) 478 for (i = 0; i < ubi->vtbl_slots; i++)
478 if (ubi->volumes[i]) 479 if (ubi->volumes[i])
479 ubi_free_volume(ubi, ubi->volumes[i]); 480 ubi_free_volume(ubi, ubi->volumes[i]);
480 } 481 }
481 482
482 /** 483 /**
483 * uif_init - initialize user interfaces for an UBI device. 484 * uif_init - initialize user interfaces for an UBI device.
484 * @ubi: UBI device description object 485 * @ubi: UBI device description object
485 * @ref: set to %1 on exit in case of failure if a reference to @ubi->dev was 486 * @ref: set to %1 on exit in case of failure if a reference to @ubi->dev was
486 * taken, otherwise set to %0 487 * taken, otherwise set to %0
487 * 488 *
488 * This function initializes various user interfaces for an UBI device. If the 489 * This function initializes various user interfaces for an UBI device. If the
489 * initialization fails at an early stage, this function frees all the 490 * initialization fails at an early stage, this function frees all the
490 * resources it allocated, returns an error, and @ref is set to %0. However, 491 * resources it allocated, returns an error, and @ref is set to %0. However,
491 * if the initialization fails after the UBI device was registered in the 492 * if the initialization fails after the UBI device was registered in the
492 * driver core subsystem, this function takes a reference to @ubi->dev, because 493 * driver core subsystem, this function takes a reference to @ubi->dev, because
493 * otherwise the release function ('dev_release()') would free whole @ubi 494 * otherwise the release function ('dev_release()') would free whole @ubi
494 * object. The @ref argument is set to %1 in this case. The caller has to put 495 * object. The @ref argument is set to %1 in this case. The caller has to put
495 * this reference. 496 * this reference.
496 * 497 *
497 * This function returns zero in case of success and a negative error code in 498 * This function returns zero in case of success and a negative error code in
498 * case of failure. 499 * case of failure.
499 */ 500 */
500 static int uif_init(struct ubi_device *ubi, int *ref) 501 static int uif_init(struct ubi_device *ubi, int *ref)
501 { 502 {
502 int i, err; 503 int i, err;
503 dev_t dev; 504 dev_t dev;
504 505
505 *ref = 0; 506 *ref = 0;
506 sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num); 507 sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num);
507 508
508 /* 509 /*
509 * Major numbers for the UBI character devices are allocated 510 * Major numbers for the UBI character devices are allocated
510 * dynamically. Major numbers of volume character devices are 511 * dynamically. Major numbers of volume character devices are
511 * equivalent to ones of the corresponding UBI character device. Minor 512 * equivalent to ones of the corresponding UBI character device. Minor
512 * numbers of UBI character devices are 0, while minor numbers of 513 * numbers of UBI character devices are 0, while minor numbers of
513 * volume character devices start from 1. Thus, we allocate one major 514 * volume character devices start from 1. Thus, we allocate one major
514 * number and ubi->vtbl_slots + 1 minor numbers. 515 * number and ubi->vtbl_slots + 1 minor numbers.
515 */ 516 */
516 err = alloc_chrdev_region(&dev, 0, ubi->vtbl_slots + 1, ubi->ubi_name); 517 err = alloc_chrdev_region(&dev, 0, ubi->vtbl_slots + 1, ubi->ubi_name);
517 if (err) { 518 if (err) {
518 ubi_err("cannot register UBI character devices"); 519 ubi_err("cannot register UBI character devices");
519 return err; 520 return err;
520 } 521 }
521 522
522 ubi_assert(MINOR(dev) == 0); 523 ubi_assert(MINOR(dev) == 0);
523 cdev_init(&ubi->cdev, &ubi_cdev_operations); 524 cdev_init(&ubi->cdev, &ubi_cdev_operations);
524 dbg_gen("%s major is %u", ubi->ubi_name, MAJOR(dev)); 525 dbg_gen("%s major is %u", ubi->ubi_name, MAJOR(dev));
525 ubi->cdev.owner = THIS_MODULE; 526 ubi->cdev.owner = THIS_MODULE;
526 527
527 err = cdev_add(&ubi->cdev, dev, 1); 528 err = cdev_add(&ubi->cdev, dev, 1);
528 if (err) { 529 if (err) {
529 ubi_err("cannot add character device"); 530 ubi_err("cannot add character device");
530 goto out_unreg; 531 goto out_unreg;
531 } 532 }
532 533
533 err = ubi_sysfs_init(ubi, ref); 534 err = ubi_sysfs_init(ubi, ref);
534 if (err) 535 if (err)
535 goto out_sysfs; 536 goto out_sysfs;
536 537
537 for (i = 0; i < ubi->vtbl_slots; i++) 538 for (i = 0; i < ubi->vtbl_slots; i++)
538 if (ubi->volumes[i]) { 539 if (ubi->volumes[i]) {
539 err = ubi_add_volume(ubi, ubi->volumes[i]); 540 err = ubi_add_volume(ubi, ubi->volumes[i]);
540 if (err) { 541 if (err) {
541 ubi_err("cannot add volume %d", i); 542 ubi_err("cannot add volume %d", i);
542 goto out_volumes; 543 goto out_volumes;
543 } 544 }
544 } 545 }
545 546
546 return 0; 547 return 0;
547 548
548 out_volumes: 549 out_volumes:
549 kill_volumes(ubi); 550 kill_volumes(ubi);
550 out_sysfs: 551 out_sysfs:
551 if (*ref) 552 if (*ref)
552 get_device(&ubi->dev); 553 get_device(&ubi->dev);
553 ubi_sysfs_close(ubi); 554 ubi_sysfs_close(ubi);
554 cdev_del(&ubi->cdev); 555 cdev_del(&ubi->cdev);
555 out_unreg: 556 out_unreg:
556 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1); 557 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
557 ubi_err("cannot initialize UBI %s, error %d", ubi->ubi_name, err); 558 ubi_err("cannot initialize UBI %s, error %d", ubi->ubi_name, err);
558 return err; 559 return err;
559 } 560 }
560 561
561 /** 562 /**
562 * uif_close - close user interfaces for an UBI device. 563 * uif_close - close user interfaces for an UBI device.
563 * @ubi: UBI device description object 564 * @ubi: UBI device description object
564 * 565 *
565 * Note, since this function un-registers UBI volume device objects (@vol->dev), 566 * Note, since this function un-registers UBI volume device objects (@vol->dev),
566 * the memory allocated voe the volumes is freed as well (in the release 567 * the memory allocated voe the volumes is freed as well (in the release
567 * function). 568 * function).
568 */ 569 */
569 static void uif_close(struct ubi_device *ubi) 570 static void uif_close(struct ubi_device *ubi)
570 { 571 {
571 kill_volumes(ubi); 572 kill_volumes(ubi);
572 ubi_sysfs_close(ubi); 573 ubi_sysfs_close(ubi);
573 cdev_del(&ubi->cdev); 574 cdev_del(&ubi->cdev);
574 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1); 575 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
575 } 576 }
576 577
577 /** 578 /**
578 * ubi_free_internal_volumes - free internal volumes. 579 * ubi_free_internal_volumes - free internal volumes.
579 * @ubi: UBI device description object 580 * @ubi: UBI device description object
580 */ 581 */
581 void ubi_free_internal_volumes(struct ubi_device *ubi) 582 void ubi_free_internal_volumes(struct ubi_device *ubi)
582 { 583 {
583 int i; 584 int i;
584 585
585 for (i = ubi->vtbl_slots; 586 for (i = ubi->vtbl_slots;
586 i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) { 587 i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
587 kfree(ubi->volumes[i]->eba_tbl); 588 kfree(ubi->volumes[i]->eba_tbl);
588 kfree(ubi->volumes[i]); 589 kfree(ubi->volumes[i]);
589 } 590 }
590 } 591 }
591 592
592 static int get_bad_peb_limit(const struct ubi_device *ubi, int max_beb_per1024) 593 static int get_bad_peb_limit(const struct ubi_device *ubi, int max_beb_per1024)
593 { 594 {
594 int limit, device_pebs; 595 int limit, device_pebs;
595 uint64_t device_size; 596 uint64_t device_size;
596 597
597 if (!max_beb_per1024) 598 if (!max_beb_per1024)
598 return 0; 599 return 0;
599 600
600 /* 601 /*
601 * Here we are using size of the entire flash chip and 602 * Here we are using size of the entire flash chip and
602 * not just the MTD partition size because the maximum 603 * not just the MTD partition size because the maximum
603 * number of bad eraseblocks is a percentage of the 604 * number of bad eraseblocks is a percentage of the
604 * whole device and bad eraseblocks are not fairly 605 * whole device and bad eraseblocks are not fairly
605 * distributed over the flash chip. So the worst case 606 * distributed over the flash chip. So the worst case
606 * is that all the bad eraseblocks of the chip are in 607 * is that all the bad eraseblocks of the chip are in
607 * the MTD partition we are attaching (ubi->mtd). 608 * the MTD partition we are attaching (ubi->mtd).
608 */ 609 */
609 device_size = mtd_get_device_size(ubi->mtd); 610 device_size = mtd_get_device_size(ubi->mtd);
610 device_pebs = mtd_div_by_eb(device_size, ubi->mtd); 611 device_pebs = mtd_div_by_eb(device_size, ubi->mtd);
611 limit = mult_frac(device_pebs, max_beb_per1024, 1024); 612 limit = mult_frac(device_pebs, max_beb_per1024, 1024);
612 613
613 /* Round it up */ 614 /* Round it up */
614 if (mult_frac(limit, 1024, max_beb_per1024) < device_pebs) 615 if (mult_frac(limit, 1024, max_beb_per1024) < device_pebs)
615 limit += 1; 616 limit += 1;
616 617
617 return limit; 618 return limit;
618 } 619 }
619 620
620 /** 621 /**
621 * io_init - initialize I/O sub-system for a given UBI device. 622 * io_init - initialize I/O sub-system for a given UBI device.
622 * @ubi: UBI device description object 623 * @ubi: UBI device description object
623 * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs 624 * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
624 * 625 *
625 * If @ubi->vid_hdr_offset or @ubi->leb_start is zero, default offsets are 626 * If @ubi->vid_hdr_offset or @ubi->leb_start is zero, default offsets are
626 * assumed: 627 * assumed:
627 * o EC header is always at offset zero - this cannot be changed; 628 * o EC header is always at offset zero - this cannot be changed;
628 * o VID header starts just after the EC header at the closest address 629 * o VID header starts just after the EC header at the closest address
629 * aligned to @io->hdrs_min_io_size; 630 * aligned to @io->hdrs_min_io_size;
630 * o data starts just after the VID header at the closest address aligned to 631 * o data starts just after the VID header at the closest address aligned to
631 * @io->min_io_size 632 * @io->min_io_size
632 * 633 *
633 * This function returns zero in case of success and a negative error code in 634 * This function returns zero in case of success and a negative error code in
634 * case of failure. 635 * case of failure.
635 */ 636 */
636 static int io_init(struct ubi_device *ubi, int max_beb_per1024) 637 static int io_init(struct ubi_device *ubi, int max_beb_per1024)
637 { 638 {
638 dbg_gen("sizeof(struct ubi_ainf_peb) %zu", sizeof(struct ubi_ainf_peb)); 639 dbg_gen("sizeof(struct ubi_ainf_peb) %zu", sizeof(struct ubi_ainf_peb));
639 dbg_gen("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry)); 640 dbg_gen("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry));
640 641
641 if (ubi->mtd->numeraseregions != 0) { 642 if (ubi->mtd->numeraseregions != 0) {
642 /* 643 /*
643 * Some flashes have several erase regions. Different regions 644 * Some flashes have several erase regions. Different regions
644 * may have different eraseblock size and other 645 * may have different eraseblock size and other
645 * characteristics. It looks like mostly multi-region flashes 646 * characteristics. It looks like mostly multi-region flashes
646 * have one "main" region and one or more small regions to 647 * have one "main" region and one or more small regions to
647 * store boot loader code or boot parameters or whatever. I 648 * store boot loader code or boot parameters or whatever. I
648 * guess we should just pick the largest region. But this is 649 * guess we should just pick the largest region. But this is
649 * not implemented. 650 * not implemented.
650 */ 651 */
651 ubi_err("multiple regions, not implemented"); 652 ubi_err("multiple regions, not implemented");
652 return -EINVAL; 653 return -EINVAL;
653 } 654 }
654 655
655 if (ubi->vid_hdr_offset < 0) 656 if (ubi->vid_hdr_offset < 0)
656 return -EINVAL; 657 return -EINVAL;
657 658
658 /* 659 /*
659 * Note, in this implementation we support MTD devices with 0x7FFFFFFF 660 * Note, in this implementation we support MTD devices with 0x7FFFFFFF
660 * physical eraseblocks maximum. 661 * physical eraseblocks maximum.
661 */ 662 */
662 663
663 ubi->peb_size = ubi->mtd->erasesize; 664 ubi->peb_size = ubi->mtd->erasesize;
664 ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd); 665 ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd);
665 ubi->flash_size = ubi->mtd->size; 666 ubi->flash_size = ubi->mtd->size;
666 667
667 if (mtd_can_have_bb(ubi->mtd)) { 668 if (mtd_can_have_bb(ubi->mtd)) {
668 ubi->bad_allowed = 1; 669 ubi->bad_allowed = 1;
669 ubi->bad_peb_limit = get_bad_peb_limit(ubi, max_beb_per1024); 670 ubi->bad_peb_limit = get_bad_peb_limit(ubi, max_beb_per1024);
670 } 671 }
671 672
672 if (ubi->mtd->type == MTD_NORFLASH) { 673 if (ubi->mtd->type == MTD_NORFLASH) {
673 ubi_assert(ubi->mtd->writesize == 1); 674 ubi_assert(ubi->mtd->writesize == 1);
674 ubi->nor_flash = 1; 675 ubi->nor_flash = 1;
675 } 676 }
676 677
677 ubi->min_io_size = ubi->mtd->writesize; 678 ubi->min_io_size = ubi->mtd->writesize;
678 ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft; 679 ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft;
679 680
680 /* 681 /*
681 * Make sure minimal I/O unit is power of 2. Note, there is no 682 * Make sure minimal I/O unit is power of 2. Note, there is no
682 * fundamental reason for this assumption. It is just an optimization 683 * fundamental reason for this assumption. It is just an optimization
683 * which allows us to avoid costly division operations. 684 * which allows us to avoid costly division operations.
684 */ 685 */
685 if (!is_power_of_2(ubi->min_io_size)) { 686 if (!is_power_of_2(ubi->min_io_size)) {
686 ubi_err("min. I/O unit (%d) is not power of 2", 687 ubi_err("min. I/O unit (%d) is not power of 2",
687 ubi->min_io_size); 688 ubi->min_io_size);
688 return -EINVAL; 689 return -EINVAL;
689 } 690 }
690 691
691 ubi_assert(ubi->hdrs_min_io_size > 0); 692 ubi_assert(ubi->hdrs_min_io_size > 0);
692 ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size); 693 ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size);
693 ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0); 694 ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0);
694 695
695 ubi->max_write_size = ubi->mtd->writebufsize; 696 ubi->max_write_size = ubi->mtd->writebufsize;
696 /* 697 /*
697 * Maximum write size has to be greater or equivalent to min. I/O 698 * Maximum write size has to be greater or equivalent to min. I/O
698 * size, and be multiple of min. I/O size. 699 * size, and be multiple of min. I/O size.
699 */ 700 */
700 if (ubi->max_write_size < ubi->min_io_size || 701 if (ubi->max_write_size < ubi->min_io_size ||
701 ubi->max_write_size % ubi->min_io_size || 702 ubi->max_write_size % ubi->min_io_size ||
702 !is_power_of_2(ubi->max_write_size)) { 703 !is_power_of_2(ubi->max_write_size)) {
703 ubi_err("bad write buffer size %d for %d min. I/O unit", 704 ubi_err("bad write buffer size %d for %d min. I/O unit",
704 ubi->max_write_size, ubi->min_io_size); 705 ubi->max_write_size, ubi->min_io_size);
705 return -EINVAL; 706 return -EINVAL;
706 } 707 }
707 708
708 /* Calculate default aligned sizes of EC and VID headers */ 709 /* Calculate default aligned sizes of EC and VID headers */
709 ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size); 710 ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
710 ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size); 711 ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);
711 712
712 dbg_gen("min_io_size %d", ubi->min_io_size); 713 dbg_gen("min_io_size %d", ubi->min_io_size);
713 dbg_gen("max_write_size %d", ubi->max_write_size); 714 dbg_gen("max_write_size %d", ubi->max_write_size);
714 dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size); 715 dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
715 dbg_gen("ec_hdr_alsize %d", ubi->ec_hdr_alsize); 716 dbg_gen("ec_hdr_alsize %d", ubi->ec_hdr_alsize);
716 dbg_gen("vid_hdr_alsize %d", ubi->vid_hdr_alsize); 717 dbg_gen("vid_hdr_alsize %d", ubi->vid_hdr_alsize);
717 718
718 if (ubi->vid_hdr_offset == 0) 719 if (ubi->vid_hdr_offset == 0)
719 /* Default offset */ 720 /* Default offset */
720 ubi->vid_hdr_offset = ubi->vid_hdr_aloffset = 721 ubi->vid_hdr_offset = ubi->vid_hdr_aloffset =
721 ubi->ec_hdr_alsize; 722 ubi->ec_hdr_alsize;
722 else { 723 else {
723 ubi->vid_hdr_aloffset = ubi->vid_hdr_offset & 724 ubi->vid_hdr_aloffset = ubi->vid_hdr_offset &
724 ~(ubi->hdrs_min_io_size - 1); 725 ~(ubi->hdrs_min_io_size - 1);
725 ubi->vid_hdr_shift = ubi->vid_hdr_offset - 726 ubi->vid_hdr_shift = ubi->vid_hdr_offset -
726 ubi->vid_hdr_aloffset; 727 ubi->vid_hdr_aloffset;
727 } 728 }
728 729
729 /* Similar for the data offset */ 730 /* Similar for the data offset */
730 ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE; 731 ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE;
731 ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size); 732 ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
732 733
733 dbg_gen("vid_hdr_offset %d", ubi->vid_hdr_offset); 734 dbg_gen("vid_hdr_offset %d", ubi->vid_hdr_offset);
734 dbg_gen("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset); 735 dbg_gen("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset);
735 dbg_gen("vid_hdr_shift %d", ubi->vid_hdr_shift); 736 dbg_gen("vid_hdr_shift %d", ubi->vid_hdr_shift);
736 dbg_gen("leb_start %d", ubi->leb_start); 737 dbg_gen("leb_start %d", ubi->leb_start);
737 738
738 /* The shift must be aligned to 32-bit boundary */ 739 /* The shift must be aligned to 32-bit boundary */
739 if (ubi->vid_hdr_shift % 4) { 740 if (ubi->vid_hdr_shift % 4) {
740 ubi_err("unaligned VID header shift %d", 741 ubi_err("unaligned VID header shift %d",
741 ubi->vid_hdr_shift); 742 ubi->vid_hdr_shift);
742 return -EINVAL; 743 return -EINVAL;
743 } 744 }
744 745
745 /* Check sanity */ 746 /* Check sanity */
746 if (ubi->vid_hdr_offset < UBI_EC_HDR_SIZE || 747 if (ubi->vid_hdr_offset < UBI_EC_HDR_SIZE ||
747 ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE || 748 ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE ||
748 ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE || 749 ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE ||
749 ubi->leb_start & (ubi->min_io_size - 1)) { 750 ubi->leb_start & (ubi->min_io_size - 1)) {
750 ubi_err("bad VID header (%d) or data offsets (%d)", 751 ubi_err("bad VID header (%d) or data offsets (%d)",
751 ubi->vid_hdr_offset, ubi->leb_start); 752 ubi->vid_hdr_offset, ubi->leb_start);
752 return -EINVAL; 753 return -EINVAL;
753 } 754 }
754 755
755 /* 756 /*
756 * Set maximum amount of physical erroneous eraseblocks to be 10%. 757 * Set maximum amount of physical erroneous eraseblocks to be 10%.
757 * Erroneous PEB are those which have read errors. 758 * Erroneous PEB are those which have read errors.
758 */ 759 */
759 ubi->max_erroneous = ubi->peb_count / 10; 760 ubi->max_erroneous = ubi->peb_count / 10;
760 if (ubi->max_erroneous < 16) 761 if (ubi->max_erroneous < 16)
761 ubi->max_erroneous = 16; 762 ubi->max_erroneous = 16;
762 dbg_gen("max_erroneous %d", ubi->max_erroneous); 763 dbg_gen("max_erroneous %d", ubi->max_erroneous);
763 764
764 /* 765 /*
765 * It may happen that EC and VID headers are situated in one minimal 766 * It may happen that EC and VID headers are situated in one minimal
766 * I/O unit. In this case we can only accept this UBI image in 767 * I/O unit. In this case we can only accept this UBI image in
767 * read-only mode. 768 * read-only mode.
768 */ 769 */
769 if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) { 770 if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) {
770 ubi_warn("EC and VID headers are in the same minimal I/O unit, switch to read-only mode"); 771 ubi_warn("EC and VID headers are in the same minimal I/O unit, switch to read-only mode");
771 ubi->ro_mode = 1; 772 ubi->ro_mode = 1;
772 } 773 }
773 774
774 ubi->leb_size = ubi->peb_size - ubi->leb_start; 775 ubi->leb_size = ubi->peb_size - ubi->leb_start;
775 776
776 if (!(ubi->mtd->flags & MTD_WRITEABLE)) { 777 if (!(ubi->mtd->flags & MTD_WRITEABLE)) {
777 ubi_msg("MTD device %d is write-protected, attach in read-only mode", 778 ubi_msg("MTD device %d is write-protected, attach in read-only mode",
778 ubi->mtd->index); 779 ubi->mtd->index);
779 ubi->ro_mode = 1; 780 ubi->ro_mode = 1;
780 } 781 }
781 782
782 /* 783 /*
783 * Note, ideally, we have to initialize @ubi->bad_peb_count here. But 784 * Note, ideally, we have to initialize @ubi->bad_peb_count here. But
784 * unfortunately, MTD does not provide this information. We should loop 785 * unfortunately, MTD does not provide this information. We should loop
785 * over all physical eraseblocks and invoke mtd->block_is_bad() for 786 * over all physical eraseblocks and invoke mtd->block_is_bad() for
786 * each physical eraseblock. So, we leave @ubi->bad_peb_count 787 * each physical eraseblock. So, we leave @ubi->bad_peb_count
787 * uninitialized so far. 788 * uninitialized so far.
788 */ 789 */
789 790
790 return 0; 791 return 0;
791 } 792 }
792 793
793 /** 794 /**
794 * autoresize - re-size the volume which has the "auto-resize" flag set. 795 * autoresize - re-size the volume which has the "auto-resize" flag set.
795 * @ubi: UBI device description object 796 * @ubi: UBI device description object
796 * @vol_id: ID of the volume to re-size 797 * @vol_id: ID of the volume to re-size
797 * 798 *
798 * This function re-sizes the volume marked by the %UBI_VTBL_AUTORESIZE_FLG in 799 * This function re-sizes the volume marked by the %UBI_VTBL_AUTORESIZE_FLG in
799 * the volume table to the largest possible size. See comments in ubi-header.h 800 * the volume table to the largest possible size. See comments in ubi-header.h
800 * for more description of the flag. Returns zero in case of success and a 801 * for more description of the flag. Returns zero in case of success and a
801 * negative error code in case of failure. 802 * negative error code in case of failure.
802 */ 803 */
803 static int autoresize(struct ubi_device *ubi, int vol_id) 804 static int autoresize(struct ubi_device *ubi, int vol_id)
804 { 805 {
805 struct ubi_volume_desc desc; 806 struct ubi_volume_desc desc;
806 struct ubi_volume *vol = ubi->volumes[vol_id]; 807 struct ubi_volume *vol = ubi->volumes[vol_id];
807 int err, old_reserved_pebs = vol->reserved_pebs; 808 int err, old_reserved_pebs = vol->reserved_pebs;
808 809
809 if (ubi->ro_mode) { 810 if (ubi->ro_mode) {
810 ubi_warn("skip auto-resize because of R/O mode"); 811 ubi_warn("skip auto-resize because of R/O mode");
811 return 0; 812 return 0;
812 } 813 }
813 814
814 /* 815 /*
815 * Clear the auto-resize flag in the volume in-memory copy of the 816 * Clear the auto-resize flag in the volume in-memory copy of the
816 * volume table, and 'ubi_resize_volume()' will propagate this change 817 * volume table, and 'ubi_resize_volume()' will propagate this change
817 * to the flash. 818 * to the flash.
818 */ 819 */
819 ubi->vtbl[vol_id].flags &= ~UBI_VTBL_AUTORESIZE_FLG; 820 ubi->vtbl[vol_id].flags &= ~UBI_VTBL_AUTORESIZE_FLG;
820 821
821 if (ubi->avail_pebs == 0) { 822 if (ubi->avail_pebs == 0) {
822 struct ubi_vtbl_record vtbl_rec; 823 struct ubi_vtbl_record vtbl_rec;
823 824
824 /* 825 /*
825 * No available PEBs to re-size the volume, clear the flag on 826 * No available PEBs to re-size the volume, clear the flag on
826 * flash and exit. 827 * flash and exit.
827 */ 828 */
828 vtbl_rec = ubi->vtbl[vol_id]; 829 vtbl_rec = ubi->vtbl[vol_id];
829 err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); 830 err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
830 if (err) 831 if (err)
831 ubi_err("cannot clean auto-resize flag for volume %d", 832 ubi_err("cannot clean auto-resize flag for volume %d",
832 vol_id); 833 vol_id);
833 } else { 834 } else {
834 desc.vol = vol; 835 desc.vol = vol;
835 err = ubi_resize_volume(&desc, 836 err = ubi_resize_volume(&desc,
836 old_reserved_pebs + ubi->avail_pebs); 837 old_reserved_pebs + ubi->avail_pebs);
837 if (err) 838 if (err)
838 ubi_err("cannot auto-resize volume %d", vol_id); 839 ubi_err("cannot auto-resize volume %d", vol_id);
839 } 840 }
840 841
841 if (err) 842 if (err)
842 return err; 843 return err;
843 844
844 ubi_msg("volume %d (\"%s\") re-sized from %d to %d LEBs", vol_id, 845 ubi_msg("volume %d (\"%s\") re-sized from %d to %d LEBs", vol_id,
845 vol->name, old_reserved_pebs, vol->reserved_pebs); 846 vol->name, old_reserved_pebs, vol->reserved_pebs);
846 return 0; 847 return 0;
847 } 848 }
848 849
849 /** 850 /**
850 * ubi_attach_mtd_dev - attach an MTD device. 851 * ubi_attach_mtd_dev - attach an MTD device.
851 * @mtd: MTD device description object 852 * @mtd: MTD device description object
852 * @ubi_num: number to assign to the new UBI device 853 * @ubi_num: number to assign to the new UBI device
853 * @vid_hdr_offset: VID header offset 854 * @vid_hdr_offset: VID header offset
854 * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs 855 * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
855 * 856 *
856 * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number 857 * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number
857 * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in 858 * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in
858 * which case this function finds a vacant device number and assigns it 859 * which case this function finds a vacant device number and assigns it
859 * automatically. Returns the new UBI device number in case of success and a 860 * automatically. Returns the new UBI device number in case of success and a
860 * negative error code in case of failure. 861 * negative error code in case of failure.
861 * 862 *
862 * Note, the invocations of this function has to be serialized by the 863 * Note, the invocations of this function has to be serialized by the
863 * @ubi_devices_mutex. 864 * @ubi_devices_mutex.
864 */ 865 */
865 int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, 866 int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
866 int vid_hdr_offset, int max_beb_per1024) 867 int vid_hdr_offset, int max_beb_per1024)
867 { 868 {
868 struct ubi_device *ubi; 869 struct ubi_device *ubi;
869 int i, err, ref = 0; 870 int i, err, ref = 0;
870 871
871 if (max_beb_per1024 < 0 || max_beb_per1024 > MAX_MTD_UBI_BEB_LIMIT) 872 if (max_beb_per1024 < 0 || max_beb_per1024 > MAX_MTD_UBI_BEB_LIMIT)
872 return -EINVAL; 873 return -EINVAL;
873 874
874 if (!max_beb_per1024) 875 if (!max_beb_per1024)
875 max_beb_per1024 = CONFIG_MTD_UBI_BEB_LIMIT; 876 max_beb_per1024 = CONFIG_MTD_UBI_BEB_LIMIT;
876 877
877 /* 878 /*
878 * Check if we already have the same MTD device attached. 879 * Check if we already have the same MTD device attached.
879 * 880 *
880 * Note, this function assumes that UBI devices creations and deletions 881 * Note, this function assumes that UBI devices creations and deletions
881 * are serialized, so it does not take the &ubi_devices_lock. 882 * are serialized, so it does not take the &ubi_devices_lock.
882 */ 883 */
883 for (i = 0; i < UBI_MAX_DEVICES; i++) { 884 for (i = 0; i < UBI_MAX_DEVICES; i++) {
884 ubi = ubi_devices[i]; 885 ubi = ubi_devices[i];
885 if (ubi && mtd->index == ubi->mtd->index) { 886 if (ubi && mtd->index == ubi->mtd->index) {
886 ubi_err("mtd%d is already attached to ubi%d", 887 ubi_err("mtd%d is already attached to ubi%d",
887 mtd->index, i); 888 mtd->index, i);
888 return -EEXIST; 889 return -EEXIST;
889 } 890 }
890 } 891 }
891 892
892 /* 893 /*
893 * Make sure this MTD device is not emulated on top of an UBI volume 894 * Make sure this MTD device is not emulated on top of an UBI volume
894 * already. Well, generally this recursion works fine, but there are 895 * already. Well, generally this recursion works fine, but there are
895 * different problems like the UBI module takes a reference to itself 896 * different problems like the UBI module takes a reference to itself
896 * by attaching (and thus, opening) the emulated MTD device. This 897 * by attaching (and thus, opening) the emulated MTD device. This
897 * results in inability to unload the module. And in general it makes 898 * results in inability to unload the module. And in general it makes
898 * no sense to attach emulated MTD devices, so we prohibit this. 899 * no sense to attach emulated MTD devices, so we prohibit this.
899 */ 900 */
900 if (mtd->type == MTD_UBIVOLUME) { 901 if (mtd->type == MTD_UBIVOLUME) {
901 ubi_err("refuse attaching mtd%d - it is already emulated on top of UBI", 902 ubi_err("refuse attaching mtd%d - it is already emulated on top of UBI",
902 mtd->index); 903 mtd->index);
903 return -EINVAL; 904 return -EINVAL;
904 } 905 }
905 906
906 if (ubi_num == UBI_DEV_NUM_AUTO) { 907 if (ubi_num == UBI_DEV_NUM_AUTO) {
907 /* Search for an empty slot in the @ubi_devices array */ 908 /* Search for an empty slot in the @ubi_devices array */
908 for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++) 909 for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
909 if (!ubi_devices[ubi_num]) 910 if (!ubi_devices[ubi_num])
910 break; 911 break;
911 if (ubi_num == UBI_MAX_DEVICES) { 912 if (ubi_num == UBI_MAX_DEVICES) {
912 ubi_err("only %d UBI devices may be created", 913 ubi_err("only %d UBI devices may be created",
913 UBI_MAX_DEVICES); 914 UBI_MAX_DEVICES);
914 return -ENFILE; 915 return -ENFILE;
915 } 916 }
916 } else { 917 } else {
917 if (ubi_num >= UBI_MAX_DEVICES) 918 if (ubi_num >= UBI_MAX_DEVICES)
918 return -EINVAL; 919 return -EINVAL;
919 920
920 /* Make sure ubi_num is not busy */ 921 /* Make sure ubi_num is not busy */
921 if (ubi_devices[ubi_num]) { 922 if (ubi_devices[ubi_num]) {
922 ubi_err("ubi%d already exists", ubi_num); 923 ubi_err("ubi%d already exists", ubi_num);
923 return -EEXIST; 924 return -EEXIST;
924 } 925 }
925 } 926 }
926 927
927 ubi = kzalloc(sizeof(struct ubi_device), GFP_KERNEL); 928 ubi = kzalloc(sizeof(struct ubi_device), GFP_KERNEL);
928 if (!ubi) 929 if (!ubi)
929 return -ENOMEM; 930 return -ENOMEM;
930 931
931 ubi->mtd = mtd; 932 ubi->mtd = mtd;
932 ubi->ubi_num = ubi_num; 933 ubi->ubi_num = ubi_num;
933 ubi->vid_hdr_offset = vid_hdr_offset; 934 ubi->vid_hdr_offset = vid_hdr_offset;
934 ubi->autoresize_vol_id = -1; 935 ubi->autoresize_vol_id = -1;
935 936
936 #ifdef CONFIG_MTD_UBI_FASTMAP 937 #ifdef CONFIG_MTD_UBI_FASTMAP
937 ubi->fm_pool.used = ubi->fm_pool.size = 0; 938 ubi->fm_pool.used = ubi->fm_pool.size = 0;
938 ubi->fm_wl_pool.used = ubi->fm_wl_pool.size = 0; 939 ubi->fm_wl_pool.used = ubi->fm_wl_pool.size = 0;
939 940
940 /* 941 /*
941 * fm_pool.max_size is 5% of the total number of PEBs but it's also 942 * fm_pool.max_size is 5% of the total number of PEBs but it's also
942 * between UBI_FM_MAX_POOL_SIZE and UBI_FM_MIN_POOL_SIZE. 943 * between UBI_FM_MAX_POOL_SIZE and UBI_FM_MIN_POOL_SIZE.
943 */ 944 */
944 ubi->fm_pool.max_size = min(((int)mtd_div_by_eb(ubi->mtd->size, 945 ubi->fm_pool.max_size = min(((int)mtd_div_by_eb(ubi->mtd->size,
945 ubi->mtd) / 100) * 5, UBI_FM_MAX_POOL_SIZE); 946 ubi->mtd) / 100) * 5, UBI_FM_MAX_POOL_SIZE);
946 if (ubi->fm_pool.max_size < UBI_FM_MIN_POOL_SIZE) 947 if (ubi->fm_pool.max_size < UBI_FM_MIN_POOL_SIZE)
947 ubi->fm_pool.max_size = UBI_FM_MIN_POOL_SIZE; 948 ubi->fm_pool.max_size = UBI_FM_MIN_POOL_SIZE;
948 949
949 ubi->fm_wl_pool.max_size = UBI_FM_WL_POOL_SIZE; 950 ubi->fm_wl_pool.max_size = UBI_FM_WL_POOL_SIZE;
950 ubi->fm_disabled = !fm_autoconvert; 951 ubi->fm_disabled = !fm_autoconvert;
951 952
952 if (!ubi->fm_disabled && (int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd) 953 if (!ubi->fm_disabled && (int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd)
953 <= UBI_FM_MAX_START) { 954 <= UBI_FM_MAX_START) {
954 ubi_err("More than %i PEBs are needed for fastmap, sorry.", 955 ubi_err("More than %i PEBs are needed for fastmap, sorry.",
955 UBI_FM_MAX_START); 956 UBI_FM_MAX_START);
956 ubi->fm_disabled = 1; 957 ubi->fm_disabled = 1;
957 } 958 }
958 959
959 ubi_msg("default fastmap pool size: %d", ubi->fm_pool.max_size); 960 ubi_msg("default fastmap pool size: %d", ubi->fm_pool.max_size);
960 ubi_msg("default fastmap WL pool size: %d", ubi->fm_wl_pool.max_size); 961 ubi_msg("default fastmap WL pool size: %d", ubi->fm_wl_pool.max_size);
961 #else 962 #else
962 ubi->fm_disabled = 1; 963 ubi->fm_disabled = 1;
963 #endif 964 #endif
964 mutex_init(&ubi->buf_mutex); 965 mutex_init(&ubi->buf_mutex);
965 mutex_init(&ubi->ckvol_mutex); 966 mutex_init(&ubi->ckvol_mutex);
966 mutex_init(&ubi->device_mutex); 967 mutex_init(&ubi->device_mutex);
967 spin_lock_init(&ubi->volumes_lock); 968 spin_lock_init(&ubi->volumes_lock);
968 mutex_init(&ubi->fm_mutex); 969 mutex_init(&ubi->fm_mutex);
969 init_rwsem(&ubi->fm_sem); 970 init_rwsem(&ubi->fm_sem);
970 971
971 ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num); 972 ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num);
972 973
973 err = io_init(ubi, max_beb_per1024); 974 err = io_init(ubi, max_beb_per1024);
974 if (err) 975 if (err)
975 goto out_free; 976 goto out_free;
976 977
977 err = -ENOMEM; 978 err = -ENOMEM;
978 ubi->peb_buf = vmalloc(ubi->peb_size); 979 ubi->peb_buf = vmalloc(ubi->peb_size);
979 if (!ubi->peb_buf) 980 if (!ubi->peb_buf)
980 goto out_free; 981 goto out_free;
981 982
982 #ifdef CONFIG_MTD_UBI_FASTMAP 983 #ifdef CONFIG_MTD_UBI_FASTMAP
983 ubi->fm_size = ubi_calc_fm_size(ubi); 984 ubi->fm_size = ubi_calc_fm_size(ubi);
984 ubi->fm_buf = vzalloc(ubi->fm_size); 985 ubi->fm_buf = vzalloc(ubi->fm_size);
985 if (!ubi->fm_buf) 986 if (!ubi->fm_buf)
986 goto out_free; 987 goto out_free;
987 #endif 988 #endif
988 err = ubi_attach(ubi, 0); 989 err = ubi_attach(ubi, 0);
989 if (err) { 990 if (err) {
990 ubi_err("failed to attach mtd%d, error %d", mtd->index, err); 991 ubi_err("failed to attach mtd%d, error %d", mtd->index, err);
991 goto out_free; 992 goto out_free;
992 } 993 }
993 994
994 if (ubi->autoresize_vol_id != -1) { 995 if (ubi->autoresize_vol_id != -1) {
995 err = autoresize(ubi, ubi->autoresize_vol_id); 996 err = autoresize(ubi, ubi->autoresize_vol_id);
996 if (err) 997 if (err)
997 goto out_detach; 998 goto out_detach;
998 } 999 }
999 1000
1000 err = uif_init(ubi, &ref); 1001 err = uif_init(ubi, &ref);
1001 if (err) 1002 if (err)
1002 goto out_detach; 1003 goto out_detach;
1003 1004
1004 err = ubi_debugfs_init_dev(ubi); 1005 err = ubi_debugfs_init_dev(ubi);
1005 if (err) 1006 if (err)
1006 goto out_uif; 1007 goto out_uif;
1007 1008
1008 ubi->bgt_thread = kthread_create(ubi_thread, ubi, "%s", ubi->bgt_name); 1009 ubi->bgt_thread = kthread_create(ubi_thread, ubi, "%s", ubi->bgt_name);
1009 if (IS_ERR(ubi->bgt_thread)) { 1010 if (IS_ERR(ubi->bgt_thread)) {
1010 err = PTR_ERR(ubi->bgt_thread); 1011 err = PTR_ERR(ubi->bgt_thread);
1011 ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name, 1012 ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name,
1012 err); 1013 err);
1013 goto out_debugfs; 1014 goto out_debugfs;
1014 } 1015 }
1015 1016
1016 ubi_msg("attached mtd%d (name \"%s\", size %llu MiB) to ubi%d", 1017 ubi_msg("attached mtd%d (name \"%s\", size %llu MiB) to ubi%d",
1017 mtd->index, mtd->name, ubi->flash_size >> 20, ubi_num); 1018 mtd->index, mtd->name, ubi->flash_size >> 20, ubi_num);
1018 ubi_msg("PEB size: %d bytes (%d KiB), LEB size: %d bytes", 1019 ubi_msg("PEB size: %d bytes (%d KiB), LEB size: %d bytes",
1019 ubi->peb_size, ubi->peb_size >> 10, ubi->leb_size); 1020 ubi->peb_size, ubi->peb_size >> 10, ubi->leb_size);
1020 ubi_msg("min./max. I/O unit sizes: %d/%d, sub-page size %d", 1021 ubi_msg("min./max. I/O unit sizes: %d/%d, sub-page size %d",
1021 ubi->min_io_size, ubi->max_write_size, ubi->hdrs_min_io_size); 1022 ubi->min_io_size, ubi->max_write_size, ubi->hdrs_min_io_size);
1022 ubi_msg("VID header offset: %d (aligned %d), data offset: %d", 1023 ubi_msg("VID header offset: %d (aligned %d), data offset: %d",
1023 ubi->vid_hdr_offset, ubi->vid_hdr_aloffset, ubi->leb_start); 1024 ubi->vid_hdr_offset, ubi->vid_hdr_aloffset, ubi->leb_start);
1024 ubi_msg("good PEBs: %d, bad PEBs: %d, corrupted PEBs: %d", 1025 ubi_msg("good PEBs: %d, bad PEBs: %d, corrupted PEBs: %d",
1025 ubi->good_peb_count, ubi->bad_peb_count, ubi->corr_peb_count); 1026 ubi->good_peb_count, ubi->bad_peb_count, ubi->corr_peb_count);
1026 ubi_msg("user volume: %d, internal volumes: %d, max. volumes count: %d", 1027 ubi_msg("user volume: %d, internal volumes: %d, max. volumes count: %d",
1027 ubi->vol_count - UBI_INT_VOL_COUNT, UBI_INT_VOL_COUNT, 1028 ubi->vol_count - UBI_INT_VOL_COUNT, UBI_INT_VOL_COUNT,
1028 ubi->vtbl_slots); 1029 ubi->vtbl_slots);
1029 ubi_msg("max/mean erase counter: %d/%d, WL threshold: %d, image sequence number: %u", 1030 ubi_msg("max/mean erase counter: %d/%d, WL threshold: %d, image sequence number: %u",
1030 ubi->max_ec, ubi->mean_ec, CONFIG_MTD_UBI_WL_THRESHOLD, 1031 ubi->max_ec, ubi->mean_ec, CONFIG_MTD_UBI_WL_THRESHOLD,
1031 ubi->image_seq); 1032 ubi->image_seq);
1032 ubi_msg("available PEBs: %d, total reserved PEBs: %d, PEBs reserved for bad PEB handling: %d", 1033 ubi_msg("available PEBs: %d, total reserved PEBs: %d, PEBs reserved for bad PEB handling: %d",
1033 ubi->avail_pebs, ubi->rsvd_pebs, ubi->beb_rsvd_pebs); 1034 ubi->avail_pebs, ubi->rsvd_pebs, ubi->beb_rsvd_pebs);
1034 1035
1035 /* 1036 /*
1036 * The below lock makes sure we do not race with 'ubi_thread()' which 1037 * The below lock makes sure we do not race with 'ubi_thread()' which
1037 * checks @ubi->thread_enabled. Otherwise we may fail to wake it up. 1038 * checks @ubi->thread_enabled. Otherwise we may fail to wake it up.
1038 */ 1039 */
1039 spin_lock(&ubi->wl_lock); 1040 spin_lock(&ubi->wl_lock);
1040 ubi->thread_enabled = 1; 1041 ubi->thread_enabled = 1;
1041 wake_up_process(ubi->bgt_thread); 1042 wake_up_process(ubi->bgt_thread);
1042 spin_unlock(&ubi->wl_lock); 1043 spin_unlock(&ubi->wl_lock);
1043 1044
1044 ubi_devices[ubi_num] = ubi; 1045 ubi_devices[ubi_num] = ubi;
1045 ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL); 1046 ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL);
1046 return ubi_num; 1047 return ubi_num;
1047 1048
1048 out_debugfs: 1049 out_debugfs:
1049 ubi_debugfs_exit_dev(ubi); 1050 ubi_debugfs_exit_dev(ubi);
1050 out_uif: 1051 out_uif:
1051 get_device(&ubi->dev); 1052 get_device(&ubi->dev);
1052 ubi_assert(ref); 1053 ubi_assert(ref);
1053 uif_close(ubi); 1054 uif_close(ubi);
1054 out_detach: 1055 out_detach:
1055 ubi_wl_close(ubi); 1056 ubi_wl_close(ubi);
1056 ubi_free_internal_volumes(ubi); 1057 ubi_free_internal_volumes(ubi);
1057 vfree(ubi->vtbl); 1058 vfree(ubi->vtbl);
1058 out_free: 1059 out_free:
1059 vfree(ubi->peb_buf); 1060 vfree(ubi->peb_buf);
1060 vfree(ubi->fm_buf); 1061 vfree(ubi->fm_buf);
1061 if (ref) 1062 if (ref)
1062 put_device(&ubi->dev); 1063 put_device(&ubi->dev);
1063 else 1064 else
1064 kfree(ubi); 1065 kfree(ubi);
1065 return err; 1066 return err;
1066 } 1067 }
1067 1068
1068 /** 1069 /**
1069 * ubi_detach_mtd_dev - detach an MTD device. 1070 * ubi_detach_mtd_dev - detach an MTD device.
1070 * @ubi_num: UBI device number to detach from 1071 * @ubi_num: UBI device number to detach from
1071 * @anyway: detach MTD even if device reference count is not zero 1072 * @anyway: detach MTD even if device reference count is not zero
1072 * 1073 *
1073 * This function destroys an UBI device number @ubi_num and detaches the 1074 * This function destroys an UBI device number @ubi_num and detaches the
1074 * underlying MTD device. Returns zero in case of success and %-EBUSY if the 1075 * underlying MTD device. Returns zero in case of success and %-EBUSY if the
1075 * UBI device is busy and cannot be destroyed, and %-EINVAL if it does not 1076 * UBI device is busy and cannot be destroyed, and %-EINVAL if it does not
1076 * exist. 1077 * exist.
1077 * 1078 *
1078 * Note, the invocations of this function has to be serialized by the 1079 * Note, the invocations of this function has to be serialized by the
1079 * @ubi_devices_mutex. 1080 * @ubi_devices_mutex.
1080 */ 1081 */
1081 int ubi_detach_mtd_dev(int ubi_num, int anyway) 1082 int ubi_detach_mtd_dev(int ubi_num, int anyway)
1082 { 1083 {
1083 struct ubi_device *ubi; 1084 struct ubi_device *ubi;
1084 1085
1085 if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES) 1086 if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
1086 return -EINVAL; 1087 return -EINVAL;
1087 1088
1088 ubi = ubi_get_device(ubi_num); 1089 ubi = ubi_get_device(ubi_num);
1089 if (!ubi) 1090 if (!ubi)
1090 return -EINVAL; 1091 return -EINVAL;
1091 1092
1092 spin_lock(&ubi_devices_lock); 1093 spin_lock(&ubi_devices_lock);
1093 put_device(&ubi->dev); 1094 put_device(&ubi->dev);
1094 ubi->ref_count -= 1; 1095 ubi->ref_count -= 1;
1095 if (ubi->ref_count) { 1096 if (ubi->ref_count) {
1096 if (!anyway) { 1097 if (!anyway) {
1097 spin_unlock(&ubi_devices_lock); 1098 spin_unlock(&ubi_devices_lock);
1098 return -EBUSY; 1099 return -EBUSY;
1099 } 1100 }
1100 /* This may only happen if there is a bug */ 1101 /* This may only happen if there is a bug */
1101 ubi_err("%s reference count %d, destroy anyway", 1102 ubi_err("%s reference count %d, destroy anyway",
1102 ubi->ubi_name, ubi->ref_count); 1103 ubi->ubi_name, ubi->ref_count);
1103 } 1104 }
1104 ubi_devices[ubi_num] = NULL; 1105 ubi_devices[ubi_num] = NULL;
1105 spin_unlock(&ubi_devices_lock); 1106 spin_unlock(&ubi_devices_lock);
1106 1107
1107 ubi_assert(ubi_num == ubi->ubi_num); 1108 ubi_assert(ubi_num == ubi->ubi_num);
1108 ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL); 1109 ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL);
1109 ubi_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num); 1110 ubi_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num);
1110 #ifdef CONFIG_MTD_UBI_FASTMAP 1111 #ifdef CONFIG_MTD_UBI_FASTMAP
1111 /* If we don't write a new fastmap at detach time we lose all 1112 /* If we don't write a new fastmap at detach time we lose all
1112 * EC updates that have been made since the last written fastmap. */ 1113 * EC updates that have been made since the last written fastmap. */
1113 ubi_update_fastmap(ubi); 1114 ubi_update_fastmap(ubi);
1114 #endif 1115 #endif
1115 /* 1116 /*
1116 * Before freeing anything, we have to stop the background thread to 1117 * Before freeing anything, we have to stop the background thread to
1117 * prevent it from doing anything on this device while we are freeing. 1118 * prevent it from doing anything on this device while we are freeing.
1118 */ 1119 */
1119 if (ubi->bgt_thread) 1120 if (ubi->bgt_thread)
1120 kthread_stop(ubi->bgt_thread); 1121 kthread_stop(ubi->bgt_thread);
1121 1122
1122 /* 1123 /*
1123 * Get a reference to the device in order to prevent 'dev_release()' 1124 * Get a reference to the device in order to prevent 'dev_release()'
1124 * from freeing the @ubi object. 1125 * from freeing the @ubi object.
1125 */ 1126 */
1126 get_device(&ubi->dev); 1127 get_device(&ubi->dev);
1127 1128
1128 ubi_debugfs_exit_dev(ubi); 1129 ubi_debugfs_exit_dev(ubi);
1129 uif_close(ubi); 1130 uif_close(ubi);
1130 1131
1131 ubi_wl_close(ubi); 1132 ubi_wl_close(ubi);
1132 ubi_free_internal_volumes(ubi); 1133 ubi_free_internal_volumes(ubi);
1133 vfree(ubi->vtbl); 1134 vfree(ubi->vtbl);
1134 put_mtd_device(ubi->mtd); 1135 put_mtd_device(ubi->mtd);
1135 vfree(ubi->peb_buf); 1136 vfree(ubi->peb_buf);
1136 vfree(ubi->fm_buf); 1137 vfree(ubi->fm_buf);
1137 ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num); 1138 ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num);
1138 put_device(&ubi->dev); 1139 put_device(&ubi->dev);
1139 return 0; 1140 return 0;
1140 } 1141 }
1141 1142
1142 /** 1143 /**
1143 * open_mtd_by_chdev - open an MTD device by its character device node path. 1144 * open_mtd_by_chdev - open an MTD device by its character device node path.
1144 * @mtd_dev: MTD character device node path 1145 * @mtd_dev: MTD character device node path
1145 * 1146 *
1146 * This helper function opens an MTD device by its character node device path. 1147 * This helper function opens an MTD device by its character node device path.
1147 * Returns MTD device description object in case of success and a negative 1148 * Returns MTD device description object in case of success and a negative
1148 * error code in case of failure. 1149 * error code in case of failure.
1149 */ 1150 */
1150 static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev) 1151 static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev)
1151 { 1152 {
1152 int err, major, minor, mode; 1153 int err, major, minor, mode;
1153 struct path path; 1154 struct path path;
1154 1155
1155 /* Probably this is an MTD character device node path */ 1156 /* Probably this is an MTD character device node path */
1156 err = kern_path(mtd_dev, LOOKUP_FOLLOW, &path); 1157 err = kern_path(mtd_dev, LOOKUP_FOLLOW, &path);
1157 if (err) 1158 if (err)
1158 return ERR_PTR(err); 1159 return ERR_PTR(err);
1159 1160
1160 /* MTD device number is defined by the major / minor numbers */ 1161 /* MTD device number is defined by the major / minor numbers */
1161 major = imajor(path.dentry->d_inode); 1162 major = imajor(path.dentry->d_inode);
1162 minor = iminor(path.dentry->d_inode); 1163 minor = iminor(path.dentry->d_inode);
1163 mode = path.dentry->d_inode->i_mode; 1164 mode = path.dentry->d_inode->i_mode;
1164 path_put(&path); 1165 path_put(&path);
1165 if (major != MTD_CHAR_MAJOR || !S_ISCHR(mode)) 1166 if (major != MTD_CHAR_MAJOR || !S_ISCHR(mode))
1166 return ERR_PTR(-EINVAL); 1167 return ERR_PTR(-EINVAL);
1167 1168
1168 if (minor & 1) 1169 if (minor & 1)
1169 /* 1170 /*
1170 * Just do not think the "/dev/mtdrX" devices support is need, 1171 * Just do not think the "/dev/mtdrX" devices support is need,
1171 * so do not support them to avoid doing extra work. 1172 * so do not support them to avoid doing extra work.
1172 */ 1173 */
1173 return ERR_PTR(-EINVAL); 1174 return ERR_PTR(-EINVAL);
1174 1175
1175 return get_mtd_device(NULL, minor / 2); 1176 return get_mtd_device(NULL, minor / 2);
1176 } 1177 }
1177 1178
1178 /** 1179 /**
1179 * open_mtd_device - open MTD device by name, character device path, or number. 1180 * open_mtd_device - open MTD device by name, character device path, or number.
1180 * @mtd_dev: name, character device node path, or MTD device device number 1181 * @mtd_dev: name, character device node path, or MTD device device number
1181 * 1182 *
1182 * This function tries to open and MTD device described by @mtd_dev string, 1183 * This function tries to open and MTD device described by @mtd_dev string,
1183 * which is first treated as ASCII MTD device number, and if it is not true, it 1184 * which is first treated as ASCII MTD device number, and if it is not true, it
1184 * is treated as MTD device name, and if that is also not true, it is treated 1185 * is treated as MTD device name, and if that is also not true, it is treated
1185 * as MTD character device node path. Returns MTD device description object in 1186 * as MTD character device node path. Returns MTD device description object in
1186 * case of success and a negative error code in case of failure. 1187 * case of success and a negative error code in case of failure.
1187 */ 1188 */
1188 static struct mtd_info * __init open_mtd_device(const char *mtd_dev) 1189 static struct mtd_info * __init open_mtd_device(const char *mtd_dev)
1189 { 1190 {
1190 struct mtd_info *mtd; 1191 struct mtd_info *mtd;
1191 int mtd_num; 1192 int mtd_num;
1192 char *endp; 1193 char *endp;
1193 1194
1194 mtd_num = simple_strtoul(mtd_dev, &endp, 0); 1195 mtd_num = simple_strtoul(mtd_dev, &endp, 0);
1195 if (*endp != '\0' || mtd_dev == endp) { 1196 if (*endp != '\0' || mtd_dev == endp) {
1196 /* 1197 /*
1197 * This does not look like an ASCII integer, probably this is 1198 * This does not look like an ASCII integer, probably this is
1198 * MTD device name. 1199 * MTD device name.
1199 */ 1200 */
1200 mtd = get_mtd_device_nm(mtd_dev); 1201 mtd = get_mtd_device_nm(mtd_dev);
1201 if (IS_ERR(mtd) && PTR_ERR(mtd) == -ENODEV) 1202 if (IS_ERR(mtd) && PTR_ERR(mtd) == -ENODEV)
1202 /* Probably this is an MTD character device node path */ 1203 /* Probably this is an MTD character device node path */
1203 mtd = open_mtd_by_chdev(mtd_dev); 1204 mtd = open_mtd_by_chdev(mtd_dev);
1204 } else 1205 } else
1205 mtd = get_mtd_device(NULL, mtd_num); 1206 mtd = get_mtd_device(NULL, mtd_num);
1206 1207
1207 return mtd; 1208 return mtd;
1208 } 1209 }
1209 1210
1210 static int __init ubi_init(void) 1211 static int __init ubi_init(void)
1211 { 1212 {
1212 int err, i, k; 1213 int err, i, k;
1213 1214
1214 /* Ensure that EC and VID headers have correct size */ 1215 /* Ensure that EC and VID headers have correct size */
1215 BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64); 1216 BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64);
1216 BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64); 1217 BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
1217 1218
1218 if (mtd_devs > UBI_MAX_DEVICES) { 1219 if (mtd_devs > UBI_MAX_DEVICES) {
1219 ubi_err("too many MTD devices, maximum is %d", UBI_MAX_DEVICES); 1220 ubi_err("too many MTD devices, maximum is %d", UBI_MAX_DEVICES);
1220 return -EINVAL; 1221 return -EINVAL;
1221 } 1222 }
1222 1223
1223 /* Create base sysfs directory and sysfs files */ 1224 /* Create base sysfs directory and sysfs files */
1224 ubi_class = class_create(THIS_MODULE, UBI_NAME_STR); 1225 ubi_class = class_create(THIS_MODULE, UBI_NAME_STR);
1225 if (IS_ERR(ubi_class)) { 1226 if (IS_ERR(ubi_class)) {
1226 err = PTR_ERR(ubi_class); 1227 err = PTR_ERR(ubi_class);
1227 ubi_err("cannot create UBI class"); 1228 ubi_err("cannot create UBI class");
1228 goto out; 1229 goto out;
1229 } 1230 }
1230 1231
1231 err = class_create_file(ubi_class, &ubi_version); 1232 err = class_create_file(ubi_class, &ubi_version);
1232 if (err) { 1233 if (err) {
1233 ubi_err("cannot create sysfs file"); 1234 ubi_err("cannot create sysfs file");
1234 goto out_class; 1235 goto out_class;
1235 } 1236 }
1236 1237
1237 err = misc_register(&ubi_ctrl_cdev); 1238 err = misc_register(&ubi_ctrl_cdev);
1238 if (err) { 1239 if (err) {
1239 ubi_err("cannot register device"); 1240 ubi_err("cannot register device");
1240 goto out_version; 1241 goto out_version;
1241 } 1242 }
1242 1243
1243 ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab", 1244 ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
1244 sizeof(struct ubi_wl_entry), 1245 sizeof(struct ubi_wl_entry),
1245 0, 0, NULL); 1246 0, 0, NULL);
1246 if (!ubi_wl_entry_slab) 1247 if (!ubi_wl_entry_slab)
1247 goto out_dev_unreg; 1248 goto out_dev_unreg;
1248 1249
1249 err = ubi_debugfs_init(); 1250 err = ubi_debugfs_init();
1250 if (err) 1251 if (err)
1251 goto out_slab; 1252 goto out_slab;
1252 1253
1253 1254
1254 /* Attach MTD devices */ 1255 /* Attach MTD devices */
1255 for (i = 0; i < mtd_devs; i++) { 1256 for (i = 0; i < mtd_devs; i++) {
1256 struct mtd_dev_param *p = &mtd_dev_param[i]; 1257 struct mtd_dev_param *p = &mtd_dev_param[i];
1257 struct mtd_info *mtd; 1258 struct mtd_info *mtd;
1258 1259
1259 cond_resched(); 1260 cond_resched();
1260 1261
1261 mtd = open_mtd_device(p->name); 1262 mtd = open_mtd_device(p->name);
1262 if (IS_ERR(mtd)) { 1263 if (IS_ERR(mtd)) {
1263 err = PTR_ERR(mtd); 1264 err = PTR_ERR(mtd);
1264 goto out_detach; 1265 ubi_err("cannot open mtd %s, error %d", p->name, err);
1266 /* See comment below re-ubi_is_module(). */
1267 if (ubi_is_module())
1268 goto out_detach;
1269 continue;
1265 } 1270 }
1266 1271
1267 mutex_lock(&ubi_devices_mutex); 1272 mutex_lock(&ubi_devices_mutex);
1268 err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO, 1273 err = ubi_attach_mtd_dev(mtd, p->ubi_num,
1269 p->vid_hdr_offs, p->max_beb_per1024); 1274 p->vid_hdr_offs, p->max_beb_per1024);
1270 mutex_unlock(&ubi_devices_mutex); 1275 mutex_unlock(&ubi_devices_mutex);
1271 if (err < 0) { 1276 if (err < 0) {
1272 ubi_err("cannot attach mtd%d", mtd->index); 1277 ubi_err("cannot attach mtd%d", mtd->index);
1273 put_mtd_device(mtd); 1278 put_mtd_device(mtd);
1274 1279
1275 /* 1280 /*
1276 * Originally UBI stopped initializing on any error. 1281 * Originally UBI stopped initializing on any error.
1277 * However, later on it was found out that this 1282 * However, later on it was found out that this
1278 * behavior is not very good when UBI is compiled into 1283 * behavior is not very good when UBI is compiled into
1279 * the kernel and the MTD devices to attach are passed 1284 * the kernel and the MTD devices to attach are passed
1280 * through the command line. Indeed, UBI failure 1285 * through the command line. Indeed, UBI failure
1281 * stopped whole boot sequence. 1286 * stopped whole boot sequence.
1282 * 1287 *
1283 * To fix this, we changed the behavior for the 1288 * To fix this, we changed the behavior for the
1284 * non-module case, but preserved the old behavior for 1289 * non-module case, but preserved the old behavior for
1285 * the module case, just for compatibility. This is a 1290 * the module case, just for compatibility. This is a
1286 * little inconsistent, though. 1291 * little inconsistent, though.
1287 */ 1292 */
1288 if (ubi_is_module()) 1293 if (ubi_is_module())
1289 goto out_detach; 1294 goto out_detach;
1290 } 1295 }
1291 } 1296 }
1292 1297
1293 return 0; 1298 return 0;
1294 1299
1295 out_detach: 1300 out_detach:
1296 for (k = 0; k < i; k++) 1301 for (k = 0; k < i; k++)
1297 if (ubi_devices[k]) { 1302 if (ubi_devices[k]) {
1298 mutex_lock(&ubi_devices_mutex); 1303 mutex_lock(&ubi_devices_mutex);
1299 ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1); 1304 ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1);
1300 mutex_unlock(&ubi_devices_mutex); 1305 mutex_unlock(&ubi_devices_mutex);
1301 } 1306 }
1302 ubi_debugfs_exit(); 1307 ubi_debugfs_exit();
1303 out_slab: 1308 out_slab:
1304 kmem_cache_destroy(ubi_wl_entry_slab); 1309 kmem_cache_destroy(ubi_wl_entry_slab);
1305 out_dev_unreg: 1310 out_dev_unreg:
1306 misc_deregister(&ubi_ctrl_cdev); 1311 misc_deregister(&ubi_ctrl_cdev);
1307 out_version: 1312 out_version:
1308 class_remove_file(ubi_class, &ubi_version); 1313 class_remove_file(ubi_class, &ubi_version);
1309 out_class: 1314 out_class:
1310 class_destroy(ubi_class); 1315 class_destroy(ubi_class);
1311 out: 1316 out:
1312 ubi_err("UBI error: cannot initialize UBI, error %d", err); 1317 ubi_err("cannot initialize UBI, error %d", err);
1313 return err; 1318 return err;
1314 } 1319 }
1315 late_initcall(ubi_init); 1320 late_initcall(ubi_init);
1316 1321
1317 static void __exit ubi_exit(void) 1322 static void __exit ubi_exit(void)
1318 { 1323 {
1319 int i; 1324 int i;
1320 1325
1321 for (i = 0; i < UBI_MAX_DEVICES; i++) 1326 for (i = 0; i < UBI_MAX_DEVICES; i++)
1322 if (ubi_devices[i]) { 1327 if (ubi_devices[i]) {
1323 mutex_lock(&ubi_devices_mutex); 1328 mutex_lock(&ubi_devices_mutex);
1324 ubi_detach_mtd_dev(ubi_devices[i]->ubi_num, 1); 1329 ubi_detach_mtd_dev(ubi_devices[i]->ubi_num, 1);
1325 mutex_unlock(&ubi_devices_mutex); 1330 mutex_unlock(&ubi_devices_mutex);
1326 } 1331 }
1327 ubi_debugfs_exit(); 1332 ubi_debugfs_exit();
1328 kmem_cache_destroy(ubi_wl_entry_slab); 1333 kmem_cache_destroy(ubi_wl_entry_slab);
1329 misc_deregister(&ubi_ctrl_cdev); 1334 misc_deregister(&ubi_ctrl_cdev);
1330 class_remove_file(ubi_class, &ubi_version); 1335 class_remove_file(ubi_class, &ubi_version);
1331 class_destroy(ubi_class); 1336 class_destroy(ubi_class);
1332 } 1337 }
1333 module_exit(ubi_exit); 1338 module_exit(ubi_exit);
1334 1339
1335 /** 1340 /**
1336 * bytes_str_to_int - convert a number of bytes string into an integer. 1341 * bytes_str_to_int - convert a number of bytes string into an integer.
1337 * @str: the string to convert 1342 * @str: the string to convert
1338 * 1343 *
1339 * This function returns positive resulting integer in case of success and a 1344 * This function returns positive resulting integer in case of success and a
1340 * negative error code in case of failure. 1345 * negative error code in case of failure.
1341 */ 1346 */
1342 static int __init bytes_str_to_int(const char *str) 1347 static int __init bytes_str_to_int(const char *str)
1343 { 1348 {
1344 char *endp; 1349 char *endp;
1345 unsigned long result; 1350 unsigned long result;
1346 1351
1347 result = simple_strtoul(str, &endp, 0); 1352 result = simple_strtoul(str, &endp, 0);
1348 if (str == endp || result >= INT_MAX) { 1353 if (str == endp || result >= INT_MAX) {
1349 ubi_err("UBI error: incorrect bytes count: \"%s\"\n", str); 1354 ubi_err("incorrect bytes count: \"%s\"\n", str);
1350 return -EINVAL; 1355 return -EINVAL;
1351 } 1356 }
1352 1357
1353 switch (*endp) { 1358 switch (*endp) {
1354 case 'G': 1359 case 'G':
1355 result *= 1024; 1360 result *= 1024;
1356 case 'M': 1361 case 'M':
1357 result *= 1024; 1362 result *= 1024;
1358 case 'K': 1363 case 'K':
1359 result *= 1024; 1364 result *= 1024;
1360 if (endp[1] == 'i' && endp[2] == 'B') 1365 if (endp[1] == 'i' && endp[2] == 'B')
1361 endp += 2; 1366 endp += 2;
1362 case '\0': 1367 case '\0':
1363 break; 1368 break;
1364 default: 1369 default:
1365 ubi_err("UBI error: incorrect bytes count: \"%s\"\n", str); 1370 ubi_err("incorrect bytes count: \"%s\"\n", str);
1366 return -EINVAL; 1371 return -EINVAL;
1367 } 1372 }
1368 1373
1369 return result; 1374 return result;
1370 } 1375 }
1371 1376
1372 /** 1377 /**
1373 * ubi_mtd_param_parse - parse the 'mtd=' UBI parameter. 1378 * ubi_mtd_param_parse - parse the 'mtd=' UBI parameter.
1374 * @val: the parameter value to parse 1379 * @val: the parameter value to parse
1375 * @kp: not used 1380 * @kp: not used
1376 * 1381 *
1377 * This function returns zero in case of success and a negative error code in 1382 * This function returns zero in case of success and a negative error code in
1378 * case of error. 1383 * case of error.
1379 */ 1384 */
1380 static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp) 1385 static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
1381 { 1386 {
1382 int i, len; 1387 int i, len;
1383 struct mtd_dev_param *p; 1388 struct mtd_dev_param *p;
1384 char buf[MTD_PARAM_LEN_MAX]; 1389 char buf[MTD_PARAM_LEN_MAX];
1385 char *pbuf = &buf[0]; 1390 char *pbuf = &buf[0];
1386 char *tokens[MTD_PARAM_MAX_COUNT]; 1391 char *tokens[MTD_PARAM_MAX_COUNT], *token;
1387 1392
1388 if (!val) 1393 if (!val)
1389 return -EINVAL; 1394 return -EINVAL;
1390 1395
1391 if (mtd_devs == UBI_MAX_DEVICES) { 1396 if (mtd_devs == UBI_MAX_DEVICES) {
1392 ubi_err("UBI error: too many parameters, max. is %d\n", 1397 ubi_err("too many parameters, max. is %d\n",
1393 UBI_MAX_DEVICES); 1398 UBI_MAX_DEVICES);
1394 return -EINVAL; 1399 return -EINVAL;
1395 } 1400 }
1396 1401
1397 len = strnlen(val, MTD_PARAM_LEN_MAX); 1402 len = strnlen(val, MTD_PARAM_LEN_MAX);
1398 if (len == MTD_PARAM_LEN_MAX) { 1403 if (len == MTD_PARAM_LEN_MAX) {
1399 ubi_err("UBI error: parameter \"%s\" is too long, max. is %d\n", 1404 ubi_err("parameter \"%s\" is too long, max. is %d\n",
1400 val, MTD_PARAM_LEN_MAX); 1405 val, MTD_PARAM_LEN_MAX);
1401 return -EINVAL; 1406 return -EINVAL;
1402 } 1407 }
1403 1408
1404 if (len == 0) { 1409 if (len == 0) {
1405 pr_warn("UBI warning: empty 'mtd=' parameter - ignored\n"); 1410 pr_warn("UBI warning: empty 'mtd=' parameter - ignored\n");
1406 return 0; 1411 return 0;
1407 } 1412 }
1408 1413
1409 strcpy(buf, val); 1414 strcpy(buf, val);
1410 1415
1411 /* Get rid of the final newline */ 1416 /* Get rid of the final newline */
1412 if (buf[len - 1] == '\n') 1417 if (buf[len - 1] == '\n')
1413 buf[len - 1] = '\0'; 1418 buf[len - 1] = '\0';
1414 1419
1415 for (i = 0; i < MTD_PARAM_MAX_COUNT; i++) 1420 for (i = 0; i < MTD_PARAM_MAX_COUNT; i++)
1416 tokens[i] = strsep(&pbuf, ","); 1421 tokens[i] = strsep(&pbuf, ",");
1417 1422
1418 if (pbuf) { 1423 if (pbuf) {
1419 ubi_err("UBI error: too many arguments at \"%s\"\n", val); 1424 ubi_err("too many arguments at \"%s\"\n", val);
1420 return -EINVAL; 1425 return -EINVAL;
1421 } 1426 }
1422 1427
1423 p = &mtd_dev_param[mtd_devs]; 1428 p = &mtd_dev_param[mtd_devs];
1424 strcpy(&p->name[0], tokens[0]); 1429 strcpy(&p->name[0], tokens[0]);
1425 1430
1426 if (tokens[1]) 1431 token = tokens[1];
1427 p->vid_hdr_offs = bytes_str_to_int(tokens[1]); 1432 if (token) {
1433 p->vid_hdr_offs = bytes_str_to_int(token);
1428 1434
1429 if (p->vid_hdr_offs < 0) 1435 if (p->vid_hdr_offs < 0)
1430 return p->vid_hdr_offs; 1436 return p->vid_hdr_offs;
1437 }
1431 1438
1432 if (tokens[2]) { 1439 token = tokens[2];
1433 int err = kstrtoint(tokens[2], 10, &p->max_beb_per1024); 1440 if (token) {
1441 int err = kstrtoint(token, 10, &p->max_beb_per1024);
1434 1442
1435 if (err) { 1443 if (err) {
1436 ubi_err("UBI error: bad value for max_beb_per1024 parameter: %s", 1444 ubi_err("bad value for max_beb_per1024 parameter: %s",
1437 tokens[2]); 1445 token);
1438 return -EINVAL; 1446 return -EINVAL;
1439 } 1447 }
1440 } 1448 }
1441 1449
1450 token = tokens[3];
1451 if (token) {
1452 int err = kstrtoint(token, 10, &p->ubi_num);
1453
1454 if (err) {
1455 ubi_err("bad value for ubi_num parameter: %s", token);
1456 return -EINVAL;
1457 }
1458 } else
1459 p->ubi_num = UBI_DEV_NUM_AUTO;
1460
1442 mtd_devs += 1; 1461 mtd_devs += 1;
1443 return 0; 1462 return 0;
1444 } 1463 }
1445 1464
1446 module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000); 1465 module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000);
1447 MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: mtd=<name|num|path>[,<vid_hdr_offs>[,max_beb_per1024]].\n" 1466 MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: mtd=<name|num|path>[,<vid_hdr_offs>[,max_beb_per1024[,ubi_num]]].\n"
1448 "Multiple \"mtd\" parameters may be specified.\n" 1467 "Multiple \"mtd\" parameters may be specified.\n"
1449 "MTD devices may be specified by their number, name, or path to the MTD character device node.\n" 1468 "MTD devices may be specified by their number, name, or path to the MTD character device node.\n"
1450 "Optional \"vid_hdr_offs\" parameter specifies UBI VID header position to be used by UBI. (default value if 0)\n" 1469 "Optional \"vid_hdr_offs\" parameter specifies UBI VID header position to be used by UBI. (default value if 0)\n"
1451 "Optional \"max_beb_per1024\" parameter specifies the maximum expected bad eraseblock per 1024 eraseblocks. (default value (" 1470 "Optional \"max_beb_per1024\" parameter specifies the maximum expected bad eraseblock per 1024 eraseblocks. (default value ("
1452 __stringify(CONFIG_MTD_UBI_BEB_LIMIT) ") if 0)\n" 1471 __stringify(CONFIG_MTD_UBI_BEB_LIMIT) ") if 0)\n"
1472 "Optional \"ubi_num\" parameter specifies UBI device number which have to be assigned to the newly created UBI device (assigned automatically by default)\n"
1453 "\n" 1473 "\n"
1454 "Example 1: mtd=/dev/mtd0 - attach MTD device /dev/mtd0.\n" 1474 "Example 1: mtd=/dev/mtd0 - attach MTD device /dev/mtd0.\n"
1455 "Example 2: mtd=content,1984 mtd=4 - attach MTD device with name \"content\" using VID header offset 1984, and MTD device number 4 with default VID header offset.\n" 1475 "Example 2: mtd=content,1984 mtd=4 - attach MTD device with name \"content\" using VID header offset 1984, and MTD device number 4 with default VID header offset.\n"
1456 "Example 3: mtd=/dev/mtd1,0,25 - attach MTD device /dev/mtd1 using default VID header offset and reserve 25*nand_size_in_blocks/1024 erase blocks for bad block handling.\n" 1476 "Example 3: mtd=/dev/mtd1,0,25 - attach MTD device /dev/mtd1 using default VID header offset and reserve 25*nand_size_in_blocks/1024 erase blocks for bad block handling.\n"
1477 "Example 4: mtd=/dev/mtd1,0,0,5 - attach MTD device /dev/mtd1 to UBI 5 and using default values for the other fields.\n"
1457 "\t(e.g. if the NAND *chipset* has 4096 PEB, 100 will be reserved for this UBI device)."); 1478 "\t(e.g. if the NAND *chipset* has 4096 PEB, 100 will be reserved for this UBI device).");
1458 #ifdef CONFIG_MTD_UBI_FASTMAP 1479 #ifdef CONFIG_MTD_UBI_FASTMAP
1459 module_param(fm_autoconvert, bool, 0644); 1480 module_param(fm_autoconvert, bool, 0644);
1460 MODULE_PARM_DESC(fm_autoconvert, "Set this parameter to enable fastmap automatically on images without a fastmap."); 1481 MODULE_PARM_DESC(fm_autoconvert, "Set this parameter to enable fastmap automatically on images without a fastmap.");
1461 #endif 1482 #endif
1462 MODULE_VERSION(__stringify(UBI_VERSION)); 1483 MODULE_VERSION(__stringify(UBI_VERSION));
1463 MODULE_DESCRIPTION("UBI - Unsorted Block Images"); 1484 MODULE_DESCRIPTION("UBI - Unsorted Block Images");
1464 MODULE_AUTHOR("Artem Bityutskiy"); 1485 MODULE_AUTHOR("Artem Bityutskiy");
1465 MODULE_LICENSE("GPL"); 1486 MODULE_LICENSE("GPL");
1466 1487
drivers/mtd/ubi/fastmap.c
1 /* 1 /*
2 * Copyright (c) 2012 Linutronix GmbH 2 * Copyright (c) 2012 Linutronix GmbH
3 * Author: Richard Weinberger <richard@nod.at> 3 * Author: Richard Weinberger <richard@nod.at>
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2. 7 * the Free Software Foundation; version 2.
8 * 8 *
9 * This program is distributed in the hope that it will be useful, 9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12 * the GNU General Public License for more details. 12 * the GNU General Public License for more details.
13 * 13 *
14 */ 14 */
15 15
16 #include <linux/crc32.h> 16 #include <linux/crc32.h>
17 #include "ubi.h" 17 #include "ubi.h"
18 18
19 /** 19 /**
20 * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device. 20 * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
21 * @ubi: UBI device description object 21 * @ubi: UBI device description object
22 */ 22 */
23 size_t ubi_calc_fm_size(struct ubi_device *ubi) 23 size_t ubi_calc_fm_size(struct ubi_device *ubi)
24 { 24 {
25 size_t size; 25 size_t size;
26 26
27 size = sizeof(struct ubi_fm_hdr) + \ 27 size = sizeof(struct ubi_fm_hdr) + \
28 sizeof(struct ubi_fm_scan_pool) + \ 28 sizeof(struct ubi_fm_scan_pool) + \
29 sizeof(struct ubi_fm_scan_pool) + \ 29 sizeof(struct ubi_fm_scan_pool) + \
30 (ubi->peb_count * sizeof(struct ubi_fm_ec)) + \ 30 (ubi->peb_count * sizeof(struct ubi_fm_ec)) + \
31 (sizeof(struct ubi_fm_eba) + \ 31 (sizeof(struct ubi_fm_eba) + \
32 (ubi->peb_count * sizeof(__be32))) + \ 32 (ubi->peb_count * sizeof(__be32))) + \
33 sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES; 33 sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
34 return roundup(size, ubi->leb_size); 34 return roundup(size, ubi->leb_size);
35 } 35 }
36 36
37 37
38 /** 38 /**
39 * new_fm_vhdr - allocate a new volume header for fastmap usage. 39 * new_fm_vhdr - allocate a new volume header for fastmap usage.
40 * @ubi: UBI device description object 40 * @ubi: UBI device description object
41 * @vol_id: the VID of the new header 41 * @vol_id: the VID of the new header
42 * 42 *
43 * Returns a new struct ubi_vid_hdr on success. 43 * Returns a new struct ubi_vid_hdr on success.
44 * NULL indicates out of memory. 44 * NULL indicates out of memory.
45 */ 45 */
46 static struct ubi_vid_hdr *new_fm_vhdr(struct ubi_device *ubi, int vol_id) 46 static struct ubi_vid_hdr *new_fm_vhdr(struct ubi_device *ubi, int vol_id)
47 { 47 {
48 struct ubi_vid_hdr *new; 48 struct ubi_vid_hdr *new;
49 49
50 new = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); 50 new = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
51 if (!new) 51 if (!new)
52 goto out; 52 goto out;
53 53
54 new->vol_type = UBI_VID_DYNAMIC; 54 new->vol_type = UBI_VID_DYNAMIC;
55 new->vol_id = cpu_to_be32(vol_id); 55 new->vol_id = cpu_to_be32(vol_id);
56 56
57 /* UBI implementations without fastmap support have to delete the 57 /* UBI implementations without fastmap support have to delete the
58 * fastmap. 58 * fastmap.
59 */ 59 */
60 new->compat = UBI_COMPAT_DELETE; 60 new->compat = UBI_COMPAT_DELETE;
61 61
62 out: 62 out:
63 return new; 63 return new;
64 } 64 }
65 65
66 /** 66 /**
67 * add_aeb - create and add a attach erase block to a given list. 67 * add_aeb - create and add a attach erase block to a given list.
68 * @ai: UBI attach info object 68 * @ai: UBI attach info object
69 * @list: the target list 69 * @list: the target list
70 * @pnum: PEB number of the new attach erase block 70 * @pnum: PEB number of the new attach erase block
71 * @ec: erease counter of the new LEB 71 * @ec: erease counter of the new LEB
72 * @scrub: scrub this PEB after attaching 72 * @scrub: scrub this PEB after attaching
73 * 73 *
74 * Returns 0 on success, < 0 indicates an internal error. 74 * Returns 0 on success, < 0 indicates an internal error.
75 */ 75 */
76 static int add_aeb(struct ubi_attach_info *ai, struct list_head *list, 76 static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
77 int pnum, int ec, int scrub) 77 int pnum, int ec, int scrub)
78 { 78 {
79 struct ubi_ainf_peb *aeb; 79 struct ubi_ainf_peb *aeb;
80 80
81 aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL); 81 aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
82 if (!aeb) 82 if (!aeb)
83 return -ENOMEM; 83 return -ENOMEM;
84 84
85 aeb->pnum = pnum; 85 aeb->pnum = pnum;
86 aeb->ec = ec; 86 aeb->ec = ec;
87 aeb->lnum = -1; 87 aeb->lnum = -1;
88 aeb->scrub = scrub; 88 aeb->scrub = scrub;
89 aeb->copy_flag = aeb->sqnum = 0; 89 aeb->copy_flag = aeb->sqnum = 0;
90 90
91 ai->ec_sum += aeb->ec; 91 ai->ec_sum += aeb->ec;
92 ai->ec_count++; 92 ai->ec_count++;
93 93
94 if (ai->max_ec < aeb->ec) 94 if (ai->max_ec < aeb->ec)
95 ai->max_ec = aeb->ec; 95 ai->max_ec = aeb->ec;
96 96
97 if (ai->min_ec > aeb->ec) 97 if (ai->min_ec > aeb->ec)
98 ai->min_ec = aeb->ec; 98 ai->min_ec = aeb->ec;
99 99
100 list_add_tail(&aeb->u.list, list); 100 list_add_tail(&aeb->u.list, list);
101 101
102 return 0; 102 return 0;
103 } 103 }
104 104
105 /** 105 /**
106 * add_vol - create and add a new volume to ubi_attach_info. 106 * add_vol - create and add a new volume to ubi_attach_info.
107 * @ai: ubi_attach_info object 107 * @ai: ubi_attach_info object
108 * @vol_id: VID of the new volume 108 * @vol_id: VID of the new volume
109 * @used_ebs: number of used EBS 109 * @used_ebs: number of used EBS
110 * @data_pad: data padding value of the new volume 110 * @data_pad: data padding value of the new volume
111 * @vol_type: volume type 111 * @vol_type: volume type
112 * @last_eb_bytes: number of bytes in the last LEB 112 * @last_eb_bytes: number of bytes in the last LEB
113 * 113 *
114 * Returns the new struct ubi_ainf_volume on success. 114 * Returns the new struct ubi_ainf_volume on success.
115 * NULL indicates an error. 115 * NULL indicates an error.
116 */ 116 */
117 static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id, 117 static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
118 int used_ebs, int data_pad, u8 vol_type, 118 int used_ebs, int data_pad, u8 vol_type,
119 int last_eb_bytes) 119 int last_eb_bytes)
120 { 120 {
121 struct ubi_ainf_volume *av; 121 struct ubi_ainf_volume *av;
122 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL; 122 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
123 123
124 while (*p) { 124 while (*p) {
125 parent = *p; 125 parent = *p;
126 av = rb_entry(parent, struct ubi_ainf_volume, rb); 126 av = rb_entry(parent, struct ubi_ainf_volume, rb);
127 127
128 if (vol_id > av->vol_id) 128 if (vol_id > av->vol_id)
129 p = &(*p)->rb_left; 129 p = &(*p)->rb_left;
130 else if (vol_id > av->vol_id) 130 else if (vol_id > av->vol_id)
131 p = &(*p)->rb_right; 131 p = &(*p)->rb_right;
132 } 132 }
133 133
134 av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL); 134 av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
135 if (!av) 135 if (!av)
136 goto out; 136 goto out;
137 137
138 av->highest_lnum = av->leb_count = 0; 138 av->highest_lnum = av->leb_count = 0;
139 av->vol_id = vol_id; 139 av->vol_id = vol_id;
140 av->used_ebs = used_ebs; 140 av->used_ebs = used_ebs;
141 av->data_pad = data_pad; 141 av->data_pad = data_pad;
142 av->last_data_size = last_eb_bytes; 142 av->last_data_size = last_eb_bytes;
143 av->compat = 0; 143 av->compat = 0;
144 av->vol_type = vol_type; 144 av->vol_type = vol_type;
145 av->root = RB_ROOT; 145 av->root = RB_ROOT;
146 146
147 dbg_bld("found volume (ID %i)", vol_id); 147 dbg_bld("found volume (ID %i)", vol_id);
148 148
149 rb_link_node(&av->rb, parent, p); 149 rb_link_node(&av->rb, parent, p);
150 rb_insert_color(&av->rb, &ai->volumes); 150 rb_insert_color(&av->rb, &ai->volumes);
151 151
152 out: 152 out:
153 return av; 153 return av;
154 } 154 }
155 155
156 /** 156 /**
157 * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it 157 * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it
158 * from it's original list. 158 * from it's original list.
159 * @ai: ubi_attach_info object 159 * @ai: ubi_attach_info object
160 * @aeb: the to be assigned SEB 160 * @aeb: the to be assigned SEB
161 * @av: target scan volume 161 * @av: target scan volume
162 */ 162 */
163 static void assign_aeb_to_av(struct ubi_attach_info *ai, 163 static void assign_aeb_to_av(struct ubi_attach_info *ai,
164 struct ubi_ainf_peb *aeb, 164 struct ubi_ainf_peb *aeb,
165 struct ubi_ainf_volume *av) 165 struct ubi_ainf_volume *av)
166 { 166 {
167 struct ubi_ainf_peb *tmp_aeb; 167 struct ubi_ainf_peb *tmp_aeb;
168 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL; 168 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
169 169
170 p = &av->root.rb_node; 170 p = &av->root.rb_node;
171 while (*p) { 171 while (*p) {
172 parent = *p; 172 parent = *p;
173 173
174 tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb); 174 tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
175 if (aeb->lnum != tmp_aeb->lnum) { 175 if (aeb->lnum != tmp_aeb->lnum) {
176 if (aeb->lnum < tmp_aeb->lnum) 176 if (aeb->lnum < tmp_aeb->lnum)
177 p = &(*p)->rb_left; 177 p = &(*p)->rb_left;
178 else 178 else
179 p = &(*p)->rb_right; 179 p = &(*p)->rb_right;
180 180
181 continue; 181 continue;
182 } else 182 } else
183 break; 183 break;
184 } 184 }
185 185
186 list_del(&aeb->u.list); 186 list_del(&aeb->u.list);
187 av->leb_count++; 187 av->leb_count++;
188 188
189 rb_link_node(&aeb->u.rb, parent, p); 189 rb_link_node(&aeb->u.rb, parent, p);
190 rb_insert_color(&aeb->u.rb, &av->root); 190 rb_insert_color(&aeb->u.rb, &av->root);
191 } 191 }
192 192
193 /** 193 /**
194 * update_vol - inserts or updates a LEB which was found a pool. 194 * update_vol - inserts or updates a LEB which was found a pool.
195 * @ubi: the UBI device object 195 * @ubi: the UBI device object
196 * @ai: attach info object 196 * @ai: attach info object
197 * @av: the volume this LEB belongs to 197 * @av: the volume this LEB belongs to
198 * @new_vh: the volume header derived from new_aeb 198 * @new_vh: the volume header derived from new_aeb
199 * @new_aeb: the AEB to be examined 199 * @new_aeb: the AEB to be examined
200 * 200 *
201 * Returns 0 on success, < 0 indicates an internal error. 201 * Returns 0 on success, < 0 indicates an internal error.
202 */ 202 */
203 static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai, 203 static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
204 struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh, 204 struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh,
205 struct ubi_ainf_peb *new_aeb) 205 struct ubi_ainf_peb *new_aeb)
206 { 206 {
207 struct rb_node **p = &av->root.rb_node, *parent = NULL; 207 struct rb_node **p = &av->root.rb_node, *parent = NULL;
208 struct ubi_ainf_peb *aeb, *victim; 208 struct ubi_ainf_peb *aeb, *victim;
209 int cmp_res; 209 int cmp_res;
210 210
211 while (*p) { 211 while (*p) {
212 parent = *p; 212 parent = *p;
213 aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb); 213 aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
214 214
215 if (be32_to_cpu(new_vh->lnum) != aeb->lnum) { 215 if (be32_to_cpu(new_vh->lnum) != aeb->lnum) {
216 if (be32_to_cpu(new_vh->lnum) < aeb->lnum) 216 if (be32_to_cpu(new_vh->lnum) < aeb->lnum)
217 p = &(*p)->rb_left; 217 p = &(*p)->rb_left;
218 else 218 else
219 p = &(*p)->rb_right; 219 p = &(*p)->rb_right;
220 220
221 continue; 221 continue;
222 } 222 }
223 223
224 /* This case can happen if the fastmap gets written 224 /* This case can happen if the fastmap gets written
225 * because of a volume change (creation, deletion, ..). 225 * because of a volume change (creation, deletion, ..).
226 * Then a PEB can be within the persistent EBA and the pool. 226 * Then a PEB can be within the persistent EBA and the pool.
227 */ 227 */
228 if (aeb->pnum == new_aeb->pnum) { 228 if (aeb->pnum == new_aeb->pnum) {
229 ubi_assert(aeb->lnum == new_aeb->lnum); 229 ubi_assert(aeb->lnum == new_aeb->lnum);
230 kmem_cache_free(ai->aeb_slab_cache, new_aeb); 230 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
231 231
232 return 0; 232 return 0;
233 } 233 }
234 234
235 cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh); 235 cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh);
236 if (cmp_res < 0) 236 if (cmp_res < 0)
237 return cmp_res; 237 return cmp_res;
238 238
239 /* new_aeb is newer */ 239 /* new_aeb is newer */
240 if (cmp_res & 1) { 240 if (cmp_res & 1) {
241 victim = kmem_cache_alloc(ai->aeb_slab_cache, 241 victim = kmem_cache_alloc(ai->aeb_slab_cache,
242 GFP_KERNEL); 242 GFP_KERNEL);
243 if (!victim) 243 if (!victim)
244 return -ENOMEM; 244 return -ENOMEM;
245 245
246 victim->ec = aeb->ec; 246 victim->ec = aeb->ec;
247 victim->pnum = aeb->pnum; 247 victim->pnum = aeb->pnum;
248 list_add_tail(&victim->u.list, &ai->erase); 248 list_add_tail(&victim->u.list, &ai->erase);
249 249
250 if (av->highest_lnum == be32_to_cpu(new_vh->lnum)) 250 if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
251 av->last_data_size = \ 251 av->last_data_size = \
252 be32_to_cpu(new_vh->data_size); 252 be32_to_cpu(new_vh->data_size);
253 253
254 dbg_bld("vol %i: AEB %i's PEB %i is the newer", 254 dbg_bld("vol %i: AEB %i's PEB %i is the newer",
255 av->vol_id, aeb->lnum, new_aeb->pnum); 255 av->vol_id, aeb->lnum, new_aeb->pnum);
256 256
257 aeb->ec = new_aeb->ec; 257 aeb->ec = new_aeb->ec;
258 aeb->pnum = new_aeb->pnum; 258 aeb->pnum = new_aeb->pnum;
259 aeb->copy_flag = new_vh->copy_flag; 259 aeb->copy_flag = new_vh->copy_flag;
260 aeb->scrub = new_aeb->scrub; 260 aeb->scrub = new_aeb->scrub;
261 kmem_cache_free(ai->aeb_slab_cache, new_aeb); 261 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
262 262
263 /* new_aeb is older */ 263 /* new_aeb is older */
264 } else { 264 } else {
265 dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it", 265 dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
266 av->vol_id, aeb->lnum, new_aeb->pnum); 266 av->vol_id, aeb->lnum, new_aeb->pnum);
267 list_add_tail(&new_aeb->u.list, &ai->erase); 267 list_add_tail(&new_aeb->u.list, &ai->erase);
268 } 268 }
269 269
270 return 0; 270 return 0;
271 } 271 }
272 /* This LEB is new, let's add it to the volume */ 272 /* This LEB is new, let's add it to the volume */
273 273
274 if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) { 274 if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) {
275 av->highest_lnum = be32_to_cpu(new_vh->lnum); 275 av->highest_lnum = be32_to_cpu(new_vh->lnum);
276 av->last_data_size = be32_to_cpu(new_vh->data_size); 276 av->last_data_size = be32_to_cpu(new_vh->data_size);
277 } 277 }
278 278
279 if (av->vol_type == UBI_STATIC_VOLUME) 279 if (av->vol_type == UBI_STATIC_VOLUME)
280 av->used_ebs = be32_to_cpu(new_vh->used_ebs); 280 av->used_ebs = be32_to_cpu(new_vh->used_ebs);
281 281
282 av->leb_count++; 282 av->leb_count++;
283 283
284 rb_link_node(&new_aeb->u.rb, parent, p); 284 rb_link_node(&new_aeb->u.rb, parent, p);
285 rb_insert_color(&new_aeb->u.rb, &av->root); 285 rb_insert_color(&new_aeb->u.rb, &av->root);
286 286
287 return 0; 287 return 0;
288 } 288 }
289 289
290 /** 290 /**
291 * process_pool_aeb - we found a non-empty PEB in a pool. 291 * process_pool_aeb - we found a non-empty PEB in a pool.
292 * @ubi: UBI device object 292 * @ubi: UBI device object
293 * @ai: attach info object 293 * @ai: attach info object
294 * @new_vh: the volume header derived from new_aeb 294 * @new_vh: the volume header derived from new_aeb
295 * @new_aeb: the AEB to be examined 295 * @new_aeb: the AEB to be examined
296 * 296 *
297 * Returns 0 on success, < 0 indicates an internal error. 297 * Returns 0 on success, < 0 indicates an internal error.
298 */ 298 */
299 static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai, 299 static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
300 struct ubi_vid_hdr *new_vh, 300 struct ubi_vid_hdr *new_vh,
301 struct ubi_ainf_peb *new_aeb) 301 struct ubi_ainf_peb *new_aeb)
302 { 302 {
303 struct ubi_ainf_volume *av, *tmp_av = NULL; 303 struct ubi_ainf_volume *av, *tmp_av = NULL;
304 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL; 304 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
305 int found = 0; 305 int found = 0;
306 306
307 if (be32_to_cpu(new_vh->vol_id) == UBI_FM_SB_VOLUME_ID || 307 if (be32_to_cpu(new_vh->vol_id) == UBI_FM_SB_VOLUME_ID ||
308 be32_to_cpu(new_vh->vol_id) == UBI_FM_DATA_VOLUME_ID) { 308 be32_to_cpu(new_vh->vol_id) == UBI_FM_DATA_VOLUME_ID) {
309 kmem_cache_free(ai->aeb_slab_cache, new_aeb); 309 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
310 310
311 return 0; 311 return 0;
312 } 312 }
313 313
314 /* Find the volume this SEB belongs to */ 314 /* Find the volume this SEB belongs to */
315 while (*p) { 315 while (*p) {
316 parent = *p; 316 parent = *p;
317 tmp_av = rb_entry(parent, struct ubi_ainf_volume, rb); 317 tmp_av = rb_entry(parent, struct ubi_ainf_volume, rb);
318 318
319 if (be32_to_cpu(new_vh->vol_id) > tmp_av->vol_id) 319 if (be32_to_cpu(new_vh->vol_id) > tmp_av->vol_id)
320 p = &(*p)->rb_left; 320 p = &(*p)->rb_left;
321 else if (be32_to_cpu(new_vh->vol_id) < tmp_av->vol_id) 321 else if (be32_to_cpu(new_vh->vol_id) < tmp_av->vol_id)
322 p = &(*p)->rb_right; 322 p = &(*p)->rb_right;
323 else { 323 else {
324 found = 1; 324 found = 1;
325 break; 325 break;
326 } 326 }
327 } 327 }
328 328
329 if (found) 329 if (found)
330 av = tmp_av; 330 av = tmp_av;
331 else { 331 else {
332 ubi_err("orphaned volume in fastmap pool!"); 332 ubi_err("orphaned volume in fastmap pool!");
333 return UBI_BAD_FASTMAP; 333 return UBI_BAD_FASTMAP;
334 } 334 }
335 335
336 ubi_assert(be32_to_cpu(new_vh->vol_id) == av->vol_id); 336 ubi_assert(be32_to_cpu(new_vh->vol_id) == av->vol_id);
337 337
338 return update_vol(ubi, ai, av, new_vh, new_aeb); 338 return update_vol(ubi, ai, av, new_vh, new_aeb);
339 } 339 }
340 340
341 /** 341 /**
342 * unmap_peb - unmap a PEB. 342 * unmap_peb - unmap a PEB.
343 * If fastmap detects a free PEB in the pool it has to check whether 343 * If fastmap detects a free PEB in the pool it has to check whether
344 * this PEB has been unmapped after writing the fastmap. 344 * this PEB has been unmapped after writing the fastmap.
345 * 345 *
346 * @ai: UBI attach info object 346 * @ai: UBI attach info object
347 * @pnum: The PEB to be unmapped 347 * @pnum: The PEB to be unmapped
348 */ 348 */
349 static void unmap_peb(struct ubi_attach_info *ai, int pnum) 349 static void unmap_peb(struct ubi_attach_info *ai, int pnum)
350 { 350 {
351 struct ubi_ainf_volume *av; 351 struct ubi_ainf_volume *av;
352 struct rb_node *node, *node2; 352 struct rb_node *node, *node2;
353 struct ubi_ainf_peb *aeb; 353 struct ubi_ainf_peb *aeb;
354 354
355 for (node = rb_first(&ai->volumes); node; node = rb_next(node)) { 355 for (node = rb_first(&ai->volumes); node; node = rb_next(node)) {
356 av = rb_entry(node, struct ubi_ainf_volume, rb); 356 av = rb_entry(node, struct ubi_ainf_volume, rb);
357 357
358 for (node2 = rb_first(&av->root); node2; 358 for (node2 = rb_first(&av->root); node2;
359 node2 = rb_next(node2)) { 359 node2 = rb_next(node2)) {
360 aeb = rb_entry(node2, struct ubi_ainf_peb, u.rb); 360 aeb = rb_entry(node2, struct ubi_ainf_peb, u.rb);
361 if (aeb->pnum == pnum) { 361 if (aeb->pnum == pnum) {
362 rb_erase(&aeb->u.rb, &av->root); 362 rb_erase(&aeb->u.rb, &av->root);
363 kmem_cache_free(ai->aeb_slab_cache, aeb); 363 kmem_cache_free(ai->aeb_slab_cache, aeb);
364 return; 364 return;
365 } 365 }
366 } 366 }
367 } 367 }
368 } 368 }
369 369
370 /** 370 /**
371 * scan_pool - scans a pool for changed (no longer empty PEBs). 371 * scan_pool - scans a pool for changed (no longer empty PEBs).
372 * @ubi: UBI device object 372 * @ubi: UBI device object
373 * @ai: attach info object 373 * @ai: attach info object
374 * @pebs: an array of all PEB numbers in the to be scanned pool 374 * @pebs: an array of all PEB numbers in the to be scanned pool
375 * @pool_size: size of the pool (number of entries in @pebs) 375 * @pool_size: size of the pool (number of entries in @pebs)
376 * @max_sqnum: pointer to the maximal sequence number 376 * @max_sqnum: pointer to the maximal sequence number
377 * @eba_orphans: list of PEBs which need to be scanned 377 * @eba_orphans: list of PEBs which need to be scanned
378 * @free: list of PEBs which are most likely free (and go into @ai->free) 378 * @free: list of PEBs which are most likely free (and go into @ai->free)
379 * 379 *
380 * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned. 380 * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
381 * < 0 indicates an internal error. 381 * < 0 indicates an internal error.
382 */ 382 */
383 static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai, 383 static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
384 int *pebs, int pool_size, unsigned long long *max_sqnum, 384 int *pebs, int pool_size, unsigned long long *max_sqnum,
385 struct list_head *eba_orphans, struct list_head *free) 385 struct list_head *eba_orphans, struct list_head *free)
386 { 386 {
387 struct ubi_vid_hdr *vh; 387 struct ubi_vid_hdr *vh;
388 struct ubi_ec_hdr *ech; 388 struct ubi_ec_hdr *ech;
389 struct ubi_ainf_peb *new_aeb, *tmp_aeb; 389 struct ubi_ainf_peb *new_aeb, *tmp_aeb;
390 int i, pnum, err, found_orphan, ret = 0; 390 int i, pnum, err, found_orphan, ret = 0;
391 391
392 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); 392 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
393 if (!ech) 393 if (!ech)
394 return -ENOMEM; 394 return -ENOMEM;
395 395
396 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); 396 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
397 if (!vh) { 397 if (!vh) {
398 kfree(ech); 398 kfree(ech);
399 return -ENOMEM; 399 return -ENOMEM;
400 } 400 }
401 401
402 dbg_bld("scanning fastmap pool: size = %i", pool_size); 402 dbg_bld("scanning fastmap pool: size = %i", pool_size);
403 403
404 /* 404 /*
405 * Now scan all PEBs in the pool to find changes which have been made 405 * Now scan all PEBs in the pool to find changes which have been made
406 * after the creation of the fastmap 406 * after the creation of the fastmap
407 */ 407 */
408 for (i = 0; i < pool_size; i++) { 408 for (i = 0; i < pool_size; i++) {
409 int scrub = 0; 409 int scrub = 0;
410 410
411 pnum = be32_to_cpu(pebs[i]); 411 pnum = be32_to_cpu(pebs[i]);
412 412
413 if (ubi_io_is_bad(ubi, pnum)) { 413 if (ubi_io_is_bad(ubi, pnum)) {
414 ubi_err("bad PEB in fastmap pool!"); 414 ubi_err("bad PEB in fastmap pool!");
415 ret = UBI_BAD_FASTMAP; 415 ret = UBI_BAD_FASTMAP;
416 goto out; 416 goto out;
417 } 417 }
418 418
419 err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0); 419 err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
420 if (err && err != UBI_IO_BITFLIPS) { 420 if (err && err != UBI_IO_BITFLIPS) {
421 ubi_err("unable to read EC header! PEB:%i err:%i", 421 ubi_err("unable to read EC header! PEB:%i err:%i",
422 pnum, err); 422 pnum, err);
423 ret = err > 0 ? UBI_BAD_FASTMAP : err; 423 ret = err > 0 ? UBI_BAD_FASTMAP : err;
424 goto out; 424 goto out;
425 } else if (ret == UBI_IO_BITFLIPS) 425 } else if (ret == UBI_IO_BITFLIPS)
426 scrub = 1; 426 scrub = 1;
427 427
428 if (be32_to_cpu(ech->image_seq) != ubi->image_seq) { 428 if (be32_to_cpu(ech->image_seq) != ubi->image_seq) {
429 ubi_err("bad image seq: 0x%x, expected: 0x%x", 429 ubi_err("bad image seq: 0x%x, expected: 0x%x",
430 be32_to_cpu(ech->image_seq), ubi->image_seq); 430 be32_to_cpu(ech->image_seq), ubi->image_seq);
431 err = UBI_BAD_FASTMAP; 431 err = UBI_BAD_FASTMAP;
432 goto out; 432 goto out;
433 } 433 }
434 434
435 err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0); 435 err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
436 if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) { 436 if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
437 unsigned long long ec = be64_to_cpu(ech->ec); 437 unsigned long long ec = be64_to_cpu(ech->ec);
438 unmap_peb(ai, pnum); 438 unmap_peb(ai, pnum);
439 dbg_bld("Adding PEB to free: %i", pnum); 439 dbg_bld("Adding PEB to free: %i", pnum);
440 if (err == UBI_IO_FF_BITFLIPS) 440 if (err == UBI_IO_FF_BITFLIPS)
441 add_aeb(ai, free, pnum, ec, 1); 441 add_aeb(ai, free, pnum, ec, 1);
442 else 442 else
443 add_aeb(ai, free, pnum, ec, 0); 443 add_aeb(ai, free, pnum, ec, 0);
444 continue; 444 continue;
445 } else if (err == 0 || err == UBI_IO_BITFLIPS) { 445 } else if (err == 0 || err == UBI_IO_BITFLIPS) {
446 dbg_bld("Found non empty PEB:%i in pool", pnum); 446 dbg_bld("Found non empty PEB:%i in pool", pnum);
447 447
448 if (err == UBI_IO_BITFLIPS) 448 if (err == UBI_IO_BITFLIPS)
449 scrub = 1; 449 scrub = 1;
450 450
451 found_orphan = 0; 451 found_orphan = 0;
452 list_for_each_entry(tmp_aeb, eba_orphans, u.list) { 452 list_for_each_entry(tmp_aeb, eba_orphans, u.list) {
453 if (tmp_aeb->pnum == pnum) { 453 if (tmp_aeb->pnum == pnum) {
454 found_orphan = 1; 454 found_orphan = 1;
455 break; 455 break;
456 } 456 }
457 } 457 }
458 if (found_orphan) { 458 if (found_orphan) {
459 kmem_cache_free(ai->aeb_slab_cache, tmp_aeb); 459 kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
460 list_del(&tmp_aeb->u.list); 460 list_del(&tmp_aeb->u.list);
461 } 461 }
462 462
463 new_aeb = kmem_cache_alloc(ai->aeb_slab_cache, 463 new_aeb = kmem_cache_alloc(ai->aeb_slab_cache,
464 GFP_KERNEL); 464 GFP_KERNEL);
465 if (!new_aeb) { 465 if (!new_aeb) {
466 ret = -ENOMEM; 466 ret = -ENOMEM;
467 goto out; 467 goto out;
468 } 468 }
469 469
470 new_aeb->ec = be64_to_cpu(ech->ec); 470 new_aeb->ec = be64_to_cpu(ech->ec);
471 new_aeb->pnum = pnum; 471 new_aeb->pnum = pnum;
472 new_aeb->lnum = be32_to_cpu(vh->lnum); 472 new_aeb->lnum = be32_to_cpu(vh->lnum);
473 new_aeb->sqnum = be64_to_cpu(vh->sqnum); 473 new_aeb->sqnum = be64_to_cpu(vh->sqnum);
474 new_aeb->copy_flag = vh->copy_flag; 474 new_aeb->copy_flag = vh->copy_flag;
475 new_aeb->scrub = scrub; 475 new_aeb->scrub = scrub;
476 476
477 if (*max_sqnum < new_aeb->sqnum) 477 if (*max_sqnum < new_aeb->sqnum)
478 *max_sqnum = new_aeb->sqnum; 478 *max_sqnum = new_aeb->sqnum;
479 479
480 err = process_pool_aeb(ubi, ai, vh, new_aeb); 480 err = process_pool_aeb(ubi, ai, vh, new_aeb);
481 if (err) { 481 if (err) {
482 ret = err > 0 ? UBI_BAD_FASTMAP : err; 482 ret = err > 0 ? UBI_BAD_FASTMAP : err;
483 goto out; 483 goto out;
484 } 484 }
485 } else { 485 } else {
486 /* We are paranoid and fall back to scanning mode */ 486 /* We are paranoid and fall back to scanning mode */
487 ubi_err("fastmap pool PEBs contains damaged PEBs!"); 487 ubi_err("fastmap pool PEBs contains damaged PEBs!");
488 ret = err > 0 ? UBI_BAD_FASTMAP : err; 488 ret = err > 0 ? UBI_BAD_FASTMAP : err;
489 goto out; 489 goto out;
490 } 490 }
491 491
492 } 492 }
493 493
494 out: 494 out:
495 ubi_free_vid_hdr(ubi, vh); 495 ubi_free_vid_hdr(ubi, vh);
496 kfree(ech); 496 kfree(ech);
497 return ret; 497 return ret;
498 } 498 }
499 499
500 /** 500 /**
501 * count_fastmap_pebs - Counts the PEBs found by fastmap. 501 * count_fastmap_pebs - Counts the PEBs found by fastmap.
502 * @ai: The UBI attach info object 502 * @ai: The UBI attach info object
503 */ 503 */
504 static int count_fastmap_pebs(struct ubi_attach_info *ai) 504 static int count_fastmap_pebs(struct ubi_attach_info *ai)
505 { 505 {
506 struct ubi_ainf_peb *aeb; 506 struct ubi_ainf_peb *aeb;
507 struct ubi_ainf_volume *av; 507 struct ubi_ainf_volume *av;
508 struct rb_node *rb1, *rb2; 508 struct rb_node *rb1, *rb2;
509 int n = 0; 509 int n = 0;
510 510
511 list_for_each_entry(aeb, &ai->erase, u.list) 511 list_for_each_entry(aeb, &ai->erase, u.list)
512 n++; 512 n++;
513 513
514 list_for_each_entry(aeb, &ai->free, u.list) 514 list_for_each_entry(aeb, &ai->free, u.list)
515 n++; 515 n++;
516 516
517 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) 517 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
518 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) 518 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
519 n++; 519 n++;
520 520
521 return n; 521 return n;
522 } 522 }
523 523
524 /** 524 /**
525 * ubi_attach_fastmap - creates ubi_attach_info from a fastmap. 525 * ubi_attach_fastmap - creates ubi_attach_info from a fastmap.
526 * @ubi: UBI device object 526 * @ubi: UBI device object
527 * @ai: UBI attach info object 527 * @ai: UBI attach info object
528 * @fm: the fastmap to be attached 528 * @fm: the fastmap to be attached
529 * 529 *
530 * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable. 530 * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable.
531 * < 0 indicates an internal error. 531 * < 0 indicates an internal error.
532 */ 532 */
533 static int ubi_attach_fastmap(struct ubi_device *ubi, 533 static int ubi_attach_fastmap(struct ubi_device *ubi,
534 struct ubi_attach_info *ai, 534 struct ubi_attach_info *ai,
535 struct ubi_fastmap_layout *fm) 535 struct ubi_fastmap_layout *fm)
536 { 536 {
537 struct list_head used, eba_orphans, free; 537 struct list_head used, eba_orphans, free;
538 struct ubi_ainf_volume *av; 538 struct ubi_ainf_volume *av;
539 struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb; 539 struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
540 struct ubi_ec_hdr *ech; 540 struct ubi_ec_hdr *ech;
541 struct ubi_fm_sb *fmsb; 541 struct ubi_fm_sb *fmsb;
542 struct ubi_fm_hdr *fmhdr; 542 struct ubi_fm_hdr *fmhdr;
543 struct ubi_fm_scan_pool *fmpl1, *fmpl2; 543 struct ubi_fm_scan_pool *fmpl1, *fmpl2;
544 struct ubi_fm_ec *fmec; 544 struct ubi_fm_ec *fmec;
545 struct ubi_fm_volhdr *fmvhdr; 545 struct ubi_fm_volhdr *fmvhdr;
546 struct ubi_fm_eba *fm_eba; 546 struct ubi_fm_eba *fm_eba;
547 int ret, i, j, pool_size, wl_pool_size; 547 int ret, i, j, pool_size, wl_pool_size;
548 size_t fm_pos = 0, fm_size = ubi->fm_size; 548 size_t fm_pos = 0, fm_size = ubi->fm_size;
549 unsigned long long max_sqnum = 0; 549 unsigned long long max_sqnum = 0;
550 void *fm_raw = ubi->fm_buf; 550 void *fm_raw = ubi->fm_buf;
551 551
552 INIT_LIST_HEAD(&used); 552 INIT_LIST_HEAD(&used);
553 INIT_LIST_HEAD(&free); 553 INIT_LIST_HEAD(&free);
554 INIT_LIST_HEAD(&eba_orphans); 554 INIT_LIST_HEAD(&eba_orphans);
555 INIT_LIST_HEAD(&ai->corr); 555 INIT_LIST_HEAD(&ai->corr);
556 INIT_LIST_HEAD(&ai->free); 556 INIT_LIST_HEAD(&ai->free);
557 INIT_LIST_HEAD(&ai->erase); 557 INIT_LIST_HEAD(&ai->erase);
558 INIT_LIST_HEAD(&ai->alien); 558 INIT_LIST_HEAD(&ai->alien);
559 ai->volumes = RB_ROOT; 559 ai->volumes = RB_ROOT;
560 ai->min_ec = UBI_MAX_ERASECOUNTER; 560 ai->min_ec = UBI_MAX_ERASECOUNTER;
561 561
562 ai->aeb_slab_cache = kmem_cache_create("ubi_ainf_peb_slab", 562 ai->aeb_slab_cache = kmem_cache_create("ubi_ainf_peb_slab",
563 sizeof(struct ubi_ainf_peb), 563 sizeof(struct ubi_ainf_peb),
564 0, 0, NULL); 564 0, 0, NULL);
565 if (!ai->aeb_slab_cache) { 565 if (!ai->aeb_slab_cache) {
566 ret = -ENOMEM; 566 ret = -ENOMEM;
567 goto fail; 567 goto fail;
568 } 568 }
569 569
570 fmsb = (struct ubi_fm_sb *)(fm_raw); 570 fmsb = (struct ubi_fm_sb *)(fm_raw);
571 ai->max_sqnum = fmsb->sqnum; 571 ai->max_sqnum = fmsb->sqnum;
572 fm_pos += sizeof(struct ubi_fm_sb); 572 fm_pos += sizeof(struct ubi_fm_sb);
573 if (fm_pos >= fm_size) 573 if (fm_pos >= fm_size)
574 goto fail_bad; 574 goto fail_bad;
575 575
576 fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos); 576 fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
577 fm_pos += sizeof(*fmhdr); 577 fm_pos += sizeof(*fmhdr);
578 if (fm_pos >= fm_size) 578 if (fm_pos >= fm_size)
579 goto fail_bad; 579 goto fail_bad;
580 580
581 if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) { 581 if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
582 ubi_err("bad fastmap header magic: 0x%x, expected: 0x%x", 582 ubi_err("bad fastmap header magic: 0x%x, expected: 0x%x",
583 be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC); 583 be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
584 goto fail_bad; 584 goto fail_bad;
585 } 585 }
586 586
587 fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos); 587 fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
588 fm_pos += sizeof(*fmpl1); 588 fm_pos += sizeof(*fmpl1);
589 if (fm_pos >= fm_size) 589 if (fm_pos >= fm_size)
590 goto fail_bad; 590 goto fail_bad;
591 if (be32_to_cpu(fmpl1->magic) != UBI_FM_POOL_MAGIC) { 591 if (be32_to_cpu(fmpl1->magic) != UBI_FM_POOL_MAGIC) {
592 ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x", 592 ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
593 be32_to_cpu(fmpl1->magic), UBI_FM_POOL_MAGIC); 593 be32_to_cpu(fmpl1->magic), UBI_FM_POOL_MAGIC);
594 goto fail_bad; 594 goto fail_bad;
595 } 595 }
596 596
597 fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos); 597 fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
598 fm_pos += sizeof(*fmpl2); 598 fm_pos += sizeof(*fmpl2);
599 if (fm_pos >= fm_size) 599 if (fm_pos >= fm_size)
600 goto fail_bad; 600 goto fail_bad;
601 if (be32_to_cpu(fmpl2->magic) != UBI_FM_POOL_MAGIC) { 601 if (be32_to_cpu(fmpl2->magic) != UBI_FM_POOL_MAGIC) {
602 ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x", 602 ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
603 be32_to_cpu(fmpl2->magic), UBI_FM_POOL_MAGIC); 603 be32_to_cpu(fmpl2->magic), UBI_FM_POOL_MAGIC);
604 goto fail_bad; 604 goto fail_bad;
605 } 605 }
606 606
607 pool_size = be16_to_cpu(fmpl1->size); 607 pool_size = be16_to_cpu(fmpl1->size);
608 wl_pool_size = be16_to_cpu(fmpl2->size); 608 wl_pool_size = be16_to_cpu(fmpl2->size);
609 fm->max_pool_size = be16_to_cpu(fmpl1->max_size); 609 fm->max_pool_size = be16_to_cpu(fmpl1->max_size);
610 fm->max_wl_pool_size = be16_to_cpu(fmpl2->max_size); 610 fm->max_wl_pool_size = be16_to_cpu(fmpl2->max_size);
611 611
612 if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) { 612 if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
613 ubi_err("bad pool size: %i", pool_size); 613 ubi_err("bad pool size: %i", pool_size);
614 goto fail_bad; 614 goto fail_bad;
615 } 615 }
616 616
617 if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) { 617 if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
618 ubi_err("bad WL pool size: %i", wl_pool_size); 618 ubi_err("bad WL pool size: %i", wl_pool_size);
619 goto fail_bad; 619 goto fail_bad;
620 } 620 }
621 621
622 622
623 if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE || 623 if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
624 fm->max_pool_size < 0) { 624 fm->max_pool_size < 0) {
625 ubi_err("bad maximal pool size: %i", fm->max_pool_size); 625 ubi_err("bad maximal pool size: %i", fm->max_pool_size);
626 goto fail_bad; 626 goto fail_bad;
627 } 627 }
628 628
629 if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE || 629 if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
630 fm->max_wl_pool_size < 0) { 630 fm->max_wl_pool_size < 0) {
631 ubi_err("bad maximal WL pool size: %i", fm->max_wl_pool_size); 631 ubi_err("bad maximal WL pool size: %i", fm->max_wl_pool_size);
632 goto fail_bad; 632 goto fail_bad;
633 } 633 }
634 634
635 /* read EC values from free list */ 635 /* read EC values from free list */
636 for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) { 636 for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
637 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 637 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
638 fm_pos += sizeof(*fmec); 638 fm_pos += sizeof(*fmec);
639 if (fm_pos >= fm_size) 639 if (fm_pos >= fm_size)
640 goto fail_bad; 640 goto fail_bad;
641 641
642 add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum), 642 add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
643 be32_to_cpu(fmec->ec), 0); 643 be32_to_cpu(fmec->ec), 0);
644 } 644 }
645 645
646 /* read EC values from used list */ 646 /* read EC values from used list */
647 for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) { 647 for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
648 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 648 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
649 fm_pos += sizeof(*fmec); 649 fm_pos += sizeof(*fmec);
650 if (fm_pos >= fm_size) 650 if (fm_pos >= fm_size)
651 goto fail_bad; 651 goto fail_bad;
652 652
653 add_aeb(ai, &used, be32_to_cpu(fmec->pnum), 653 add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
654 be32_to_cpu(fmec->ec), 0); 654 be32_to_cpu(fmec->ec), 0);
655 } 655 }
656 656
657 /* read EC values from scrub list */ 657 /* read EC values from scrub list */
658 for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) { 658 for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
659 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 659 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
660 fm_pos += sizeof(*fmec); 660 fm_pos += sizeof(*fmec);
661 if (fm_pos >= fm_size) 661 if (fm_pos >= fm_size)
662 goto fail_bad; 662 goto fail_bad;
663 663
664 add_aeb(ai, &used, be32_to_cpu(fmec->pnum), 664 add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
665 be32_to_cpu(fmec->ec), 1); 665 be32_to_cpu(fmec->ec), 1);
666 } 666 }
667 667
668 /* read EC values from erase list */ 668 /* read EC values from erase list */
669 for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) { 669 for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
670 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 670 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
671 fm_pos += sizeof(*fmec); 671 fm_pos += sizeof(*fmec);
672 if (fm_pos >= fm_size) 672 if (fm_pos >= fm_size)
673 goto fail_bad; 673 goto fail_bad;
674 674
675 add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum), 675 add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
676 be32_to_cpu(fmec->ec), 1); 676 be32_to_cpu(fmec->ec), 1);
677 } 677 }
678 678
679 ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count); 679 ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
680 ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count); 680 ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count);
681 681
682 /* Iterate over all volumes and read their EBA table */ 682 /* Iterate over all volumes and read their EBA table */
683 for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) { 683 for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
684 fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos); 684 fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
685 fm_pos += sizeof(*fmvhdr); 685 fm_pos += sizeof(*fmvhdr);
686 if (fm_pos >= fm_size) 686 if (fm_pos >= fm_size)
687 goto fail_bad; 687 goto fail_bad;
688 688
689 if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) { 689 if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
690 ubi_err("bad fastmap vol header magic: 0x%x, " \ 690 ubi_err("bad fastmap vol header magic: 0x%x, " \
691 "expected: 0x%x", 691 "expected: 0x%x",
692 be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC); 692 be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
693 goto fail_bad; 693 goto fail_bad;
694 } 694 }
695 695
696 av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id), 696 av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id),
697 be32_to_cpu(fmvhdr->used_ebs), 697 be32_to_cpu(fmvhdr->used_ebs),
698 be32_to_cpu(fmvhdr->data_pad), 698 be32_to_cpu(fmvhdr->data_pad),
699 fmvhdr->vol_type, 699 fmvhdr->vol_type,
700 be32_to_cpu(fmvhdr->last_eb_bytes)); 700 be32_to_cpu(fmvhdr->last_eb_bytes));
701 701
702 if (!av) 702 if (!av)
703 goto fail_bad; 703 goto fail_bad;
704 704
705 ai->vols_found++; 705 ai->vols_found++;
706 if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id)) 706 if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
707 ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id); 707 ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id);
708 708
709 fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos); 709 fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
710 fm_pos += sizeof(*fm_eba); 710 fm_pos += sizeof(*fm_eba);
711 fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs)); 711 fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
712 if (fm_pos >= fm_size) 712 if (fm_pos >= fm_size)
713 goto fail_bad; 713 goto fail_bad;
714 714
715 if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) { 715 if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
716 ubi_err("bad fastmap EBA header magic: 0x%x, " \ 716 ubi_err("bad fastmap EBA header magic: 0x%x, " \
717 "expected: 0x%x", 717 "expected: 0x%x",
718 be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC); 718 be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
719 goto fail_bad; 719 goto fail_bad;
720 } 720 }
721 721
722 for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) { 722 for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) {
723 int pnum = be32_to_cpu(fm_eba->pnum[j]); 723 int pnum = be32_to_cpu(fm_eba->pnum[j]);
724 724
725 if ((int)be32_to_cpu(fm_eba->pnum[j]) < 0) 725 if ((int)be32_to_cpu(fm_eba->pnum[j]) < 0)
726 continue; 726 continue;
727 727
728 aeb = NULL; 728 aeb = NULL;
729 list_for_each_entry(tmp_aeb, &used, u.list) { 729 list_for_each_entry(tmp_aeb, &used, u.list) {
730 if (tmp_aeb->pnum == pnum) 730 if (tmp_aeb->pnum == pnum) {
731 aeb = tmp_aeb; 731 aeb = tmp_aeb;
732 break;
733 }
732 } 734 }
733 735
734 /* This can happen if a PEB is already in an EBA known 736 /* This can happen if a PEB is already in an EBA known
735 * by this fastmap but the PEB itself is not in the used 737 * by this fastmap but the PEB itself is not in the used
736 * list. 738 * list.
737 * In this case the PEB can be within the fastmap pool 739 * In this case the PEB can be within the fastmap pool
738 * or while writing the fastmap it was in the protection 740 * or while writing the fastmap it was in the protection
739 * queue. 741 * queue.
740 */ 742 */
741 if (!aeb) { 743 if (!aeb) {
742 aeb = kmem_cache_alloc(ai->aeb_slab_cache, 744 aeb = kmem_cache_alloc(ai->aeb_slab_cache,
743 GFP_KERNEL); 745 GFP_KERNEL);
744 if (!aeb) { 746 if (!aeb) {
745 ret = -ENOMEM; 747 ret = -ENOMEM;
746 748
747 goto fail; 749 goto fail;
748 } 750 }
749 751
750 aeb->lnum = j; 752 aeb->lnum = j;
751 aeb->pnum = be32_to_cpu(fm_eba->pnum[j]); 753 aeb->pnum = be32_to_cpu(fm_eba->pnum[j]);
752 aeb->ec = -1; 754 aeb->ec = -1;
753 aeb->scrub = aeb->copy_flag = aeb->sqnum = 0; 755 aeb->scrub = aeb->copy_flag = aeb->sqnum = 0;
754 list_add_tail(&aeb->u.list, &eba_orphans); 756 list_add_tail(&aeb->u.list, &eba_orphans);
755 continue; 757 continue;
756 } 758 }
757 759
758 aeb->lnum = j; 760 aeb->lnum = j;
759 761
760 if (av->highest_lnum <= aeb->lnum) 762 if (av->highest_lnum <= aeb->lnum)
761 av->highest_lnum = aeb->lnum; 763 av->highest_lnum = aeb->lnum;
762 764
763 assign_aeb_to_av(ai, aeb, av); 765 assign_aeb_to_av(ai, aeb, av);
764 766
765 dbg_bld("inserting PEB:%i (LEB %i) to vol %i", 767 dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
766 aeb->pnum, aeb->lnum, av->vol_id); 768 aeb->pnum, aeb->lnum, av->vol_id);
767 } 769 }
768 770
769 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); 771 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
770 if (!ech) { 772 if (!ech) {
771 ret = -ENOMEM; 773 ret = -ENOMEM;
772 goto fail; 774 goto fail;
773 } 775 }
774 776
775 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans, 777 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans,
776 u.list) { 778 u.list) {
777 int err; 779 int err;
778 780
779 if (ubi_io_is_bad(ubi, tmp_aeb->pnum)) { 781 if (ubi_io_is_bad(ubi, tmp_aeb->pnum)) {
780 ubi_err("bad PEB in fastmap EBA orphan list"); 782 ubi_err("bad PEB in fastmap EBA orphan list");
781 ret = UBI_BAD_FASTMAP; 783 ret = UBI_BAD_FASTMAP;
782 kfree(ech); 784 kfree(ech);
783 goto fail; 785 goto fail;
784 } 786 }
785 787
786 err = ubi_io_read_ec_hdr(ubi, tmp_aeb->pnum, ech, 0); 788 err = ubi_io_read_ec_hdr(ubi, tmp_aeb->pnum, ech, 0);
787 if (err && err != UBI_IO_BITFLIPS) { 789 if (err && err != UBI_IO_BITFLIPS) {
788 ubi_err("unable to read EC header! PEB:%i " \ 790 ubi_err("unable to read EC header! PEB:%i " \
789 "err:%i", tmp_aeb->pnum, err); 791 "err:%i", tmp_aeb->pnum, err);
790 ret = err > 0 ? UBI_BAD_FASTMAP : err; 792 ret = err > 0 ? UBI_BAD_FASTMAP : err;
791 kfree(ech); 793 kfree(ech);
792 794
793 goto fail; 795 goto fail;
794 } else if (err == UBI_IO_BITFLIPS) 796 } else if (err == UBI_IO_BITFLIPS)
795 tmp_aeb->scrub = 1; 797 tmp_aeb->scrub = 1;
796 798
797 tmp_aeb->ec = be64_to_cpu(ech->ec); 799 tmp_aeb->ec = be64_to_cpu(ech->ec);
798 assign_aeb_to_av(ai, tmp_aeb, av); 800 assign_aeb_to_av(ai, tmp_aeb, av);
799 } 801 }
800 802
801 kfree(ech); 803 kfree(ech);
802 } 804 }
803 805
804 ret = scan_pool(ubi, ai, fmpl1->pebs, pool_size, &max_sqnum, 806 ret = scan_pool(ubi, ai, fmpl1->pebs, pool_size, &max_sqnum,
805 &eba_orphans, &free); 807 &eba_orphans, &free);
806 if (ret) 808 if (ret)
807 goto fail; 809 goto fail;
808 810
809 ret = scan_pool(ubi, ai, fmpl2->pebs, wl_pool_size, &max_sqnum, 811 ret = scan_pool(ubi, ai, fmpl2->pebs, wl_pool_size, &max_sqnum,
810 &eba_orphans, &free); 812 &eba_orphans, &free);
811 if (ret) 813 if (ret)
812 goto fail; 814 goto fail;
813 815
814 if (max_sqnum > ai->max_sqnum) 816 if (max_sqnum > ai->max_sqnum)
815 ai->max_sqnum = max_sqnum; 817 ai->max_sqnum = max_sqnum;
816 818
817 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) 819 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
818 list_move_tail(&tmp_aeb->u.list, &ai->free); 820 list_move_tail(&tmp_aeb->u.list, &ai->free);
819 821
820 /* 822 /*
821 * If fastmap is leaking PEBs (must not happen), raise a 823 * If fastmap is leaking PEBs (must not happen), raise a
822 * fat warning and fall back to scanning mode. 824 * fat warning and fall back to scanning mode.
823 * We do this here because in ubi_wl_init() it's too late 825 * We do this here because in ubi_wl_init() it's too late
824 * and we cannot fall back to scanning. 826 * and we cannot fall back to scanning.
825 */ 827 */
826 if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count - 828 if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
827 ai->bad_peb_count - fm->used_blocks)) 829 ai->bad_peb_count - fm->used_blocks))
828 goto fail_bad; 830 goto fail_bad;
829 831
830 return 0; 832 return 0;
831 833
832 fail_bad: 834 fail_bad:
833 ret = UBI_BAD_FASTMAP; 835 ret = UBI_BAD_FASTMAP;
834 fail: 836 fail:
835 return ret; 837 return ret;
836 } 838 }
837 839
838 /** 840 /**
839 * ubi_scan_fastmap - scan the fastmap. 841 * ubi_scan_fastmap - scan the fastmap.
840 * @ubi: UBI device object 842 * @ubi: UBI device object
841 * @ai: UBI attach info to be filled 843 * @ai: UBI attach info to be filled
842 * @fm_anchor: The fastmap starts at this PEB 844 * @fm_anchor: The fastmap starts at this PEB
843 * 845 *
844 * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found, 846 * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
845 * UBI_BAD_FASTMAP if one was found but is not usable. 847 * UBI_BAD_FASTMAP if one was found but is not usable.
846 * < 0 indicates an internal error. 848 * < 0 indicates an internal error.
847 */ 849 */
848 int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai, 850 int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
849 int fm_anchor) 851 int fm_anchor)
850 { 852 {
851 struct ubi_fm_sb *fmsb, *fmsb2; 853 struct ubi_fm_sb *fmsb, *fmsb2;
852 struct ubi_vid_hdr *vh; 854 struct ubi_vid_hdr *vh;
853 struct ubi_ec_hdr *ech; 855 struct ubi_ec_hdr *ech;
854 struct ubi_fastmap_layout *fm; 856 struct ubi_fastmap_layout *fm;
855 int i, used_blocks, pnum, ret = 0; 857 int i, used_blocks, pnum, ret = 0;
856 size_t fm_size; 858 size_t fm_size;
857 __be32 crc, tmp_crc; 859 __be32 crc, tmp_crc;
858 unsigned long long sqnum = 0; 860 unsigned long long sqnum = 0;
859 861
860 mutex_lock(&ubi->fm_mutex); 862 mutex_lock(&ubi->fm_mutex);
861 memset(ubi->fm_buf, 0, ubi->fm_size); 863 memset(ubi->fm_buf, 0, ubi->fm_size);
862 864
863 fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL); 865 fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
864 if (!fmsb) { 866 if (!fmsb) {
865 ret = -ENOMEM; 867 ret = -ENOMEM;
866 goto out; 868 goto out;
867 } 869 }
868 870
869 fm = kzalloc(sizeof(*fm), GFP_KERNEL); 871 fm = kzalloc(sizeof(*fm), GFP_KERNEL);
870 if (!fm) { 872 if (!fm) {
871 ret = -ENOMEM; 873 ret = -ENOMEM;
872 kfree(fmsb); 874 kfree(fmsb);
873 goto out; 875 goto out;
874 } 876 }
875 877
876 ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb)); 878 ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb));
877 if (ret && ret != UBI_IO_BITFLIPS) 879 if (ret && ret != UBI_IO_BITFLIPS)
878 goto free_fm_sb; 880 goto free_fm_sb;
879 else if (ret == UBI_IO_BITFLIPS) 881 else if (ret == UBI_IO_BITFLIPS)
880 fm->to_be_tortured[0] = 1; 882 fm->to_be_tortured[0] = 1;
881 883
882 if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) { 884 if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
883 ubi_err("bad super block magic: 0x%x, expected: 0x%x", 885 ubi_err("bad super block magic: 0x%x, expected: 0x%x",
884 be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC); 886 be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
885 ret = UBI_BAD_FASTMAP; 887 ret = UBI_BAD_FASTMAP;
886 goto free_fm_sb; 888 goto free_fm_sb;
887 } 889 }
888 890
889 if (fmsb->version != UBI_FM_FMT_VERSION) { 891 if (fmsb->version != UBI_FM_FMT_VERSION) {
890 ubi_err("bad fastmap version: %i, expected: %i", 892 ubi_err("bad fastmap version: %i, expected: %i",
891 fmsb->version, UBI_FM_FMT_VERSION); 893 fmsb->version, UBI_FM_FMT_VERSION);
892 ret = UBI_BAD_FASTMAP; 894 ret = UBI_BAD_FASTMAP;
893 goto free_fm_sb; 895 goto free_fm_sb;
894 } 896 }
895 897
896 used_blocks = be32_to_cpu(fmsb->used_blocks); 898 used_blocks = be32_to_cpu(fmsb->used_blocks);
897 if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) { 899 if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
898 ubi_err("number of fastmap blocks is invalid: %i", used_blocks); 900 ubi_err("number of fastmap blocks is invalid: %i", used_blocks);
899 ret = UBI_BAD_FASTMAP; 901 ret = UBI_BAD_FASTMAP;
900 goto free_fm_sb; 902 goto free_fm_sb;
901 } 903 }
902 904
903 fm_size = ubi->leb_size * used_blocks; 905 fm_size = ubi->leb_size * used_blocks;
904 if (fm_size != ubi->fm_size) { 906 if (fm_size != ubi->fm_size) {
905 ubi_err("bad fastmap size: %zi, expected: %zi", fm_size, 907 ubi_err("bad fastmap size: %zi, expected: %zi", fm_size,
906 ubi->fm_size); 908 ubi->fm_size);
907 ret = UBI_BAD_FASTMAP; 909 ret = UBI_BAD_FASTMAP;
908 goto free_fm_sb; 910 goto free_fm_sb;
909 } 911 }
910 912
911 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); 913 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
912 if (!ech) { 914 if (!ech) {
913 ret = -ENOMEM; 915 ret = -ENOMEM;
914 goto free_fm_sb; 916 goto free_fm_sb;
915 } 917 }
916 918
917 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); 919 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
918 if (!vh) { 920 if (!vh) {
919 ret = -ENOMEM; 921 ret = -ENOMEM;
920 goto free_hdr; 922 goto free_hdr;
921 } 923 }
922 924
923 for (i = 0; i < used_blocks; i++) { 925 for (i = 0; i < used_blocks; i++) {
924 pnum = be32_to_cpu(fmsb->block_loc[i]); 926 pnum = be32_to_cpu(fmsb->block_loc[i]);
925 927
926 if (ubi_io_is_bad(ubi, pnum)) { 928 if (ubi_io_is_bad(ubi, pnum)) {
927 ret = UBI_BAD_FASTMAP; 929 ret = UBI_BAD_FASTMAP;
928 goto free_hdr; 930 goto free_hdr;
929 } 931 }
930 932
931 ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0); 933 ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
932 if (ret && ret != UBI_IO_BITFLIPS) { 934 if (ret && ret != UBI_IO_BITFLIPS) {
933 ubi_err("unable to read fastmap block# %i EC (PEB: %i)", 935 ubi_err("unable to read fastmap block# %i EC (PEB: %i)",
934 i, pnum); 936 i, pnum);
935 if (ret > 0) 937 if (ret > 0)
936 ret = UBI_BAD_FASTMAP; 938 ret = UBI_BAD_FASTMAP;
937 goto free_hdr; 939 goto free_hdr;
938 } else if (ret == UBI_IO_BITFLIPS) 940 } else if (ret == UBI_IO_BITFLIPS)
939 fm->to_be_tortured[i] = 1; 941 fm->to_be_tortured[i] = 1;
940 942
941 if (!ubi->image_seq) 943 if (!ubi->image_seq)
942 ubi->image_seq = be32_to_cpu(ech->image_seq); 944 ubi->image_seq = be32_to_cpu(ech->image_seq);
943 945
944 if (be32_to_cpu(ech->image_seq) != ubi->image_seq) { 946 if (be32_to_cpu(ech->image_seq) != ubi->image_seq) {
945 ret = UBI_BAD_FASTMAP; 947 ret = UBI_BAD_FASTMAP;
946 goto free_hdr; 948 goto free_hdr;
947 } 949 }
948 950
949 ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0); 951 ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
950 if (ret && ret != UBI_IO_BITFLIPS) { 952 if (ret && ret != UBI_IO_BITFLIPS) {
951 ubi_err("unable to read fastmap block# %i (PEB: %i)", 953 ubi_err("unable to read fastmap block# %i (PEB: %i)",
952 i, pnum); 954 i, pnum);
953 goto free_hdr; 955 goto free_hdr;
954 } 956 }
955 957
956 if (i == 0) { 958 if (i == 0) {
957 if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) { 959 if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
958 ubi_err("bad fastmap anchor vol_id: 0x%x," \ 960 ubi_err("bad fastmap anchor vol_id: 0x%x," \
959 " expected: 0x%x", 961 " expected: 0x%x",
960 be32_to_cpu(vh->vol_id), 962 be32_to_cpu(vh->vol_id),
961 UBI_FM_SB_VOLUME_ID); 963 UBI_FM_SB_VOLUME_ID);
962 ret = UBI_BAD_FASTMAP; 964 ret = UBI_BAD_FASTMAP;
963 goto free_hdr; 965 goto free_hdr;
964 } 966 }
965 } else { 967 } else {
966 if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) { 968 if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
967 ubi_err("bad fastmap data vol_id: 0x%x," \ 969 ubi_err("bad fastmap data vol_id: 0x%x," \
968 " expected: 0x%x", 970 " expected: 0x%x",
969 be32_to_cpu(vh->vol_id), 971 be32_to_cpu(vh->vol_id),
970 UBI_FM_DATA_VOLUME_ID); 972 UBI_FM_DATA_VOLUME_ID);
971 ret = UBI_BAD_FASTMAP; 973 ret = UBI_BAD_FASTMAP;
972 goto free_hdr; 974 goto free_hdr;
973 } 975 }
974 } 976 }
975 977
976 if (sqnum < be64_to_cpu(vh->sqnum)) 978 if (sqnum < be64_to_cpu(vh->sqnum))
977 sqnum = be64_to_cpu(vh->sqnum); 979 sqnum = be64_to_cpu(vh->sqnum);
978 980
979 ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum, 981 ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum,
980 ubi->leb_start, ubi->leb_size); 982 ubi->leb_start, ubi->leb_size);
981 if (ret && ret != UBI_IO_BITFLIPS) { 983 if (ret && ret != UBI_IO_BITFLIPS) {
982 ubi_err("unable to read fastmap block# %i (PEB: %i, " \ 984 ubi_err("unable to read fastmap block# %i (PEB: %i, " \
983 "err: %i)", i, pnum, ret); 985 "err: %i)", i, pnum, ret);
984 goto free_hdr; 986 goto free_hdr;
985 } 987 }
986 } 988 }
987 989
988 kfree(fmsb); 990 kfree(fmsb);
989 fmsb = NULL; 991 fmsb = NULL;
990 992
991 fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf); 993 fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
992 tmp_crc = be32_to_cpu(fmsb2->data_crc); 994 tmp_crc = be32_to_cpu(fmsb2->data_crc);
993 fmsb2->data_crc = 0; 995 fmsb2->data_crc = 0;
994 crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size); 996 crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
995 if (crc != tmp_crc) { 997 if (crc != tmp_crc) {
996 ubi_err("fastmap data CRC is invalid"); 998 ubi_err("fastmap data CRC is invalid");
997 ubi_err("CRC should be: 0x%x, calc: 0x%x", tmp_crc, crc); 999 ubi_err("CRC should be: 0x%x, calc: 0x%x", tmp_crc, crc);
998 ret = UBI_BAD_FASTMAP; 1000 ret = UBI_BAD_FASTMAP;
999 goto free_hdr; 1001 goto free_hdr;
1000 } 1002 }
1001 1003
1002 fmsb2->sqnum = sqnum; 1004 fmsb2->sqnum = sqnum;
1003 1005
1004 fm->used_blocks = used_blocks; 1006 fm->used_blocks = used_blocks;
1005 1007
1006 ret = ubi_attach_fastmap(ubi, ai, fm); 1008 ret = ubi_attach_fastmap(ubi, ai, fm);
1007 if (ret) { 1009 if (ret) {
1008 if (ret > 0) 1010 if (ret > 0)
1009 ret = UBI_BAD_FASTMAP; 1011 ret = UBI_BAD_FASTMAP;
1010 goto free_hdr; 1012 goto free_hdr;
1011 } 1013 }
1012 1014
1013 for (i = 0; i < used_blocks; i++) { 1015 for (i = 0; i < used_blocks; i++) {
1014 struct ubi_wl_entry *e; 1016 struct ubi_wl_entry *e;
1015 1017
1016 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); 1018 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1017 if (!e) { 1019 if (!e) {
1018 while (i--) 1020 while (i--)
1019 kfree(fm->e[i]); 1021 kfree(fm->e[i]);
1020 1022
1021 ret = -ENOMEM; 1023 ret = -ENOMEM;
1022 goto free_hdr; 1024 goto free_hdr;
1023 } 1025 }
1024 1026
1025 e->pnum = be32_to_cpu(fmsb2->block_loc[i]); 1027 e->pnum = be32_to_cpu(fmsb2->block_loc[i]);
1026 e->ec = be32_to_cpu(fmsb2->block_ec[i]); 1028 e->ec = be32_to_cpu(fmsb2->block_ec[i]);
1027 fm->e[i] = e; 1029 fm->e[i] = e;
1028 } 1030 }
1029 1031
1030 ubi->fm = fm; 1032 ubi->fm = fm;
1031 ubi->fm_pool.max_size = ubi->fm->max_pool_size; 1033 ubi->fm_pool.max_size = ubi->fm->max_pool_size;
1032 ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size; 1034 ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
1033 ubi_msg("attached by fastmap"); 1035 ubi_msg("attached by fastmap");
1034 ubi_msg("fastmap pool size: %d", ubi->fm_pool.max_size); 1036 ubi_msg("fastmap pool size: %d", ubi->fm_pool.max_size);
1035 ubi_msg("fastmap WL pool size: %d", ubi->fm_wl_pool.max_size); 1037 ubi_msg("fastmap WL pool size: %d", ubi->fm_wl_pool.max_size);
1036 ubi->fm_disabled = 0; 1038 ubi->fm_disabled = 0;
1037 1039
1038 ubi_free_vid_hdr(ubi, vh); 1040 ubi_free_vid_hdr(ubi, vh);
1039 kfree(ech); 1041 kfree(ech);
1040 out: 1042 out:
1041 mutex_unlock(&ubi->fm_mutex); 1043 mutex_unlock(&ubi->fm_mutex);
1042 if (ret == UBI_BAD_FASTMAP) 1044 if (ret == UBI_BAD_FASTMAP)
1043 ubi_err("Attach by fastmap failed, doing a full scan!"); 1045 ubi_err("Attach by fastmap failed, doing a full scan!");
1044 return ret; 1046 return ret;
1045 1047
1046 free_hdr: 1048 free_hdr:
1047 ubi_free_vid_hdr(ubi, vh); 1049 ubi_free_vid_hdr(ubi, vh);
1048 kfree(ech); 1050 kfree(ech);
1049 free_fm_sb: 1051 free_fm_sb:
1050 kfree(fmsb); 1052 kfree(fmsb);
1051 kfree(fm); 1053 kfree(fm);
1052 goto out; 1054 goto out;
1053 } 1055 }
1054 1056
1055 /** 1057 /**
1056 * ubi_write_fastmap - writes a fastmap. 1058 * ubi_write_fastmap - writes a fastmap.
1057 * @ubi: UBI device object 1059 * @ubi: UBI device object
1058 * @new_fm: the to be written fastmap 1060 * @new_fm: the to be written fastmap
1059 * 1061 *
1060 * Returns 0 on success, < 0 indicates an internal error. 1062 * Returns 0 on success, < 0 indicates an internal error.
1061 */ 1063 */
1062 static int ubi_write_fastmap(struct ubi_device *ubi, 1064 static int ubi_write_fastmap(struct ubi_device *ubi,
1063 struct ubi_fastmap_layout *new_fm) 1065 struct ubi_fastmap_layout *new_fm)
1064 { 1066 {
1065 size_t fm_pos = 0; 1067 size_t fm_pos = 0;
1066 void *fm_raw; 1068 void *fm_raw;
1067 struct ubi_fm_sb *fmsb; 1069 struct ubi_fm_sb *fmsb;
1068 struct ubi_fm_hdr *fmh; 1070 struct ubi_fm_hdr *fmh;
1069 struct ubi_fm_scan_pool *fmpl1, *fmpl2; 1071 struct ubi_fm_scan_pool *fmpl1, *fmpl2;
1070 struct ubi_fm_ec *fec; 1072 struct ubi_fm_ec *fec;
1071 struct ubi_fm_volhdr *fvh; 1073 struct ubi_fm_volhdr *fvh;
1072 struct ubi_fm_eba *feba; 1074 struct ubi_fm_eba *feba;
1073 struct rb_node *node; 1075 struct rb_node *node;
1074 struct ubi_wl_entry *wl_e; 1076 struct ubi_wl_entry *wl_e;
1075 struct ubi_volume *vol; 1077 struct ubi_volume *vol;
1076 struct ubi_vid_hdr *avhdr, *dvhdr; 1078 struct ubi_vid_hdr *avhdr, *dvhdr;
1077 struct ubi_work *ubi_wrk; 1079 struct ubi_work *ubi_wrk;
1078 int ret, i, j, free_peb_count, used_peb_count, vol_count; 1080 int ret, i, j, free_peb_count, used_peb_count, vol_count;
1079 int scrub_peb_count, erase_peb_count; 1081 int scrub_peb_count, erase_peb_count;
1080 1082
1081 fm_raw = ubi->fm_buf; 1083 fm_raw = ubi->fm_buf;
1082 memset(ubi->fm_buf, 0, ubi->fm_size); 1084 memset(ubi->fm_buf, 0, ubi->fm_size);
1083 1085
1084 avhdr = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID); 1086 avhdr = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
1085 if (!avhdr) { 1087 if (!avhdr) {
1086 ret = -ENOMEM; 1088 ret = -ENOMEM;
1087 goto out; 1089 goto out;
1088 } 1090 }
1089 1091
1090 dvhdr = new_fm_vhdr(ubi, UBI_FM_DATA_VOLUME_ID); 1092 dvhdr = new_fm_vhdr(ubi, UBI_FM_DATA_VOLUME_ID);
1091 if (!dvhdr) { 1093 if (!dvhdr) {
1092 ret = -ENOMEM; 1094 ret = -ENOMEM;
1093 goto out_kfree; 1095 goto out_kfree;
1094 } 1096 }
1095 1097
1096 spin_lock(&ubi->volumes_lock); 1098 spin_lock(&ubi->volumes_lock);
1097 spin_lock(&ubi->wl_lock); 1099 spin_lock(&ubi->wl_lock);
1098 1100
1099 fmsb = (struct ubi_fm_sb *)fm_raw; 1101 fmsb = (struct ubi_fm_sb *)fm_raw;
1100 fm_pos += sizeof(*fmsb); 1102 fm_pos += sizeof(*fmsb);
1101 ubi_assert(fm_pos <= ubi->fm_size); 1103 ubi_assert(fm_pos <= ubi->fm_size);
1102 1104
1103 fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos); 1105 fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
1104 fm_pos += sizeof(*fmh); 1106 fm_pos += sizeof(*fmh);
1105 ubi_assert(fm_pos <= ubi->fm_size); 1107 ubi_assert(fm_pos <= ubi->fm_size);
1106 1108
1107 fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC); 1109 fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC);
1108 fmsb->version = UBI_FM_FMT_VERSION; 1110 fmsb->version = UBI_FM_FMT_VERSION;
1109 fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks); 1111 fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks);
1110 /* the max sqnum will be filled in while *reading* the fastmap */ 1112 /* the max sqnum will be filled in while *reading* the fastmap */
1111 fmsb->sqnum = 0; 1113 fmsb->sqnum = 0;
1112 1114
1113 fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC); 1115 fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC);
1114 free_peb_count = 0; 1116 free_peb_count = 0;
1115 used_peb_count = 0; 1117 used_peb_count = 0;
1116 scrub_peb_count = 0; 1118 scrub_peb_count = 0;
1117 erase_peb_count = 0; 1119 erase_peb_count = 0;
1118 vol_count = 0; 1120 vol_count = 0;
1119 1121
1120 fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos); 1122 fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1121 fm_pos += sizeof(*fmpl1); 1123 fm_pos += sizeof(*fmpl1);
1122 fmpl1->magic = cpu_to_be32(UBI_FM_POOL_MAGIC); 1124 fmpl1->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1123 fmpl1->size = cpu_to_be16(ubi->fm_pool.size); 1125 fmpl1->size = cpu_to_be16(ubi->fm_pool.size);
1124 fmpl1->max_size = cpu_to_be16(ubi->fm_pool.max_size); 1126 fmpl1->max_size = cpu_to_be16(ubi->fm_pool.max_size);
1125 1127
1126 for (i = 0; i < ubi->fm_pool.size; i++) 1128 for (i = 0; i < ubi->fm_pool.size; i++)
1127 fmpl1->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]); 1129 fmpl1->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
1128 1130
1129 fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos); 1131 fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1130 fm_pos += sizeof(*fmpl2); 1132 fm_pos += sizeof(*fmpl2);
1131 fmpl2->magic = cpu_to_be32(UBI_FM_POOL_MAGIC); 1133 fmpl2->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1132 fmpl2->size = cpu_to_be16(ubi->fm_wl_pool.size); 1134 fmpl2->size = cpu_to_be16(ubi->fm_wl_pool.size);
1133 fmpl2->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size); 1135 fmpl2->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
1134 1136
1135 for (i = 0; i < ubi->fm_wl_pool.size; i++) 1137 for (i = 0; i < ubi->fm_wl_pool.size; i++)
1136 fmpl2->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]); 1138 fmpl2->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
1137 1139
1138 for (node = rb_first(&ubi->free); node; node = rb_next(node)) { 1140 for (node = rb_first(&ubi->free); node; node = rb_next(node)) {
1139 wl_e = rb_entry(node, struct ubi_wl_entry, u.rb); 1141 wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
1140 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 1142 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1141 1143
1142 fec->pnum = cpu_to_be32(wl_e->pnum); 1144 fec->pnum = cpu_to_be32(wl_e->pnum);
1143 fec->ec = cpu_to_be32(wl_e->ec); 1145 fec->ec = cpu_to_be32(wl_e->ec);
1144 1146
1145 free_peb_count++; 1147 free_peb_count++;
1146 fm_pos += sizeof(*fec); 1148 fm_pos += sizeof(*fec);
1147 ubi_assert(fm_pos <= ubi->fm_size); 1149 ubi_assert(fm_pos <= ubi->fm_size);
1148 } 1150 }
1149 fmh->free_peb_count = cpu_to_be32(free_peb_count); 1151 fmh->free_peb_count = cpu_to_be32(free_peb_count);
1150 1152
1151 for (node = rb_first(&ubi->used); node; node = rb_next(node)) { 1153 for (node = rb_first(&ubi->used); node; node = rb_next(node)) {
1152 wl_e = rb_entry(node, struct ubi_wl_entry, u.rb); 1154 wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
1153 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 1155 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1154 1156
1155 fec->pnum = cpu_to_be32(wl_e->pnum); 1157 fec->pnum = cpu_to_be32(wl_e->pnum);
1156 fec->ec = cpu_to_be32(wl_e->ec); 1158 fec->ec = cpu_to_be32(wl_e->ec);
1157 1159
1158 used_peb_count++; 1160 used_peb_count++;
1159 fm_pos += sizeof(*fec); 1161 fm_pos += sizeof(*fec);
1160 ubi_assert(fm_pos <= ubi->fm_size); 1162 ubi_assert(fm_pos <= ubi->fm_size);
1161 } 1163 }
1162 fmh->used_peb_count = cpu_to_be32(used_peb_count); 1164 fmh->used_peb_count = cpu_to_be32(used_peb_count);
1163 1165
1164 for (node = rb_first(&ubi->scrub); node; node = rb_next(node)) { 1166 for (node = rb_first(&ubi->scrub); node; node = rb_next(node)) {
1165 wl_e = rb_entry(node, struct ubi_wl_entry, u.rb); 1167 wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
1166 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 1168 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1167 1169
1168 fec->pnum = cpu_to_be32(wl_e->pnum); 1170 fec->pnum = cpu_to_be32(wl_e->pnum);
1169 fec->ec = cpu_to_be32(wl_e->ec); 1171 fec->ec = cpu_to_be32(wl_e->ec);
1170 1172
1171 scrub_peb_count++; 1173 scrub_peb_count++;
1172 fm_pos += sizeof(*fec); 1174 fm_pos += sizeof(*fec);
1173 ubi_assert(fm_pos <= ubi->fm_size); 1175 ubi_assert(fm_pos <= ubi->fm_size);
1174 } 1176 }
1175 fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count); 1177 fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count);
1176 1178
1177 1179
1178 list_for_each_entry(ubi_wrk, &ubi->works, list) { 1180 list_for_each_entry(ubi_wrk, &ubi->works, list) {
1179 if (ubi_is_erase_work(ubi_wrk)) { 1181 if (ubi_is_erase_work(ubi_wrk)) {
1180 wl_e = ubi_wrk->e; 1182 wl_e = ubi_wrk->e;
1181 ubi_assert(wl_e); 1183 ubi_assert(wl_e);
1182 1184
1183 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 1185 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1184 1186
1185 fec->pnum = cpu_to_be32(wl_e->pnum); 1187 fec->pnum = cpu_to_be32(wl_e->pnum);
1186 fec->ec = cpu_to_be32(wl_e->ec); 1188 fec->ec = cpu_to_be32(wl_e->ec);
1187 1189
1188 erase_peb_count++; 1190 erase_peb_count++;
1189 fm_pos += sizeof(*fec); 1191 fm_pos += sizeof(*fec);
1190 ubi_assert(fm_pos <= ubi->fm_size); 1192 ubi_assert(fm_pos <= ubi->fm_size);
1191 } 1193 }
1192 } 1194 }
1193 fmh->erase_peb_count = cpu_to_be32(erase_peb_count); 1195 fmh->erase_peb_count = cpu_to_be32(erase_peb_count);
1194 1196
1195 for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) { 1197 for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) {
1196 vol = ubi->volumes[i]; 1198 vol = ubi->volumes[i];
1197 1199
1198 if (!vol) 1200 if (!vol)
1199 continue; 1201 continue;
1200 1202
1201 vol_count++; 1203 vol_count++;
1202 1204
1203 fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos); 1205 fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
1204 fm_pos += sizeof(*fvh); 1206 fm_pos += sizeof(*fvh);
1205 ubi_assert(fm_pos <= ubi->fm_size); 1207 ubi_assert(fm_pos <= ubi->fm_size);
1206 1208
1207 fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC); 1209 fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC);
1208 fvh->vol_id = cpu_to_be32(vol->vol_id); 1210 fvh->vol_id = cpu_to_be32(vol->vol_id);
1209 fvh->vol_type = vol->vol_type; 1211 fvh->vol_type = vol->vol_type;
1210 fvh->used_ebs = cpu_to_be32(vol->used_ebs); 1212 fvh->used_ebs = cpu_to_be32(vol->used_ebs);
1211 fvh->data_pad = cpu_to_be32(vol->data_pad); 1213 fvh->data_pad = cpu_to_be32(vol->data_pad);
1212 fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes); 1214 fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes);
1213 1215
1214 ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME || 1216 ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME ||
1215 vol->vol_type == UBI_STATIC_VOLUME); 1217 vol->vol_type == UBI_STATIC_VOLUME);
1216 1218
1217 feba = (struct ubi_fm_eba *)(fm_raw + fm_pos); 1219 feba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
1218 fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs); 1220 fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
1219 ubi_assert(fm_pos <= ubi->fm_size); 1221 ubi_assert(fm_pos <= ubi->fm_size);
1220 1222
1221 for (j = 0; j < vol->reserved_pebs; j++) 1223 for (j = 0; j < vol->reserved_pebs; j++)
1222 feba->pnum[j] = cpu_to_be32(vol->eba_tbl[j]); 1224 feba->pnum[j] = cpu_to_be32(vol->eba_tbl[j]);
1223 1225
1224 feba->reserved_pebs = cpu_to_be32(j); 1226 feba->reserved_pebs = cpu_to_be32(j);
1225 feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC); 1227 feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC);
1226 } 1228 }
1227 fmh->vol_count = cpu_to_be32(vol_count); 1229 fmh->vol_count = cpu_to_be32(vol_count);
1228 fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count); 1230 fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count);
1229 1231
1230 avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 1232 avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1231 avhdr->lnum = 0; 1233 avhdr->lnum = 0;
1232 1234
1233 spin_unlock(&ubi->wl_lock); 1235 spin_unlock(&ubi->wl_lock);
1234 spin_unlock(&ubi->volumes_lock); 1236 spin_unlock(&ubi->volumes_lock);
1235 1237
1236 dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum); 1238 dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
1237 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr); 1239 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr);
1238 if (ret) { 1240 if (ret) {
1239 ubi_err("unable to write vid_hdr to fastmap SB!"); 1241 ubi_err("unable to write vid_hdr to fastmap SB!");
1240 goto out_kfree; 1242 goto out_kfree;
1241 } 1243 }
1242 1244
1243 for (i = 0; i < new_fm->used_blocks; i++) { 1245 for (i = 0; i < new_fm->used_blocks; i++) {
1244 fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum); 1246 fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
1245 fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec); 1247 fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
1246 } 1248 }
1247 1249
1248 fmsb->data_crc = 0; 1250 fmsb->data_crc = 0;
1249 fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw, 1251 fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw,
1250 ubi->fm_size)); 1252 ubi->fm_size));
1251 1253
1252 for (i = 1; i < new_fm->used_blocks; i++) { 1254 for (i = 1; i < new_fm->used_blocks; i++) {
1253 dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 1255 dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1254 dvhdr->lnum = cpu_to_be32(i); 1256 dvhdr->lnum = cpu_to_be32(i);
1255 dbg_bld("writing fastmap data to PEB %i sqnum %llu", 1257 dbg_bld("writing fastmap data to PEB %i sqnum %llu",
1256 new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum)); 1258 new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
1257 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr); 1259 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr);
1258 if (ret) { 1260 if (ret) {
1259 ubi_err("unable to write vid_hdr to PEB %i!", 1261 ubi_err("unable to write vid_hdr to PEB %i!",
1260 new_fm->e[i]->pnum); 1262 new_fm->e[i]->pnum);
1261 goto out_kfree; 1263 goto out_kfree;
1262 } 1264 }
1263 } 1265 }
1264 1266
1265 for (i = 0; i < new_fm->used_blocks; i++) { 1267 for (i = 0; i < new_fm->used_blocks; i++) {
1266 ret = ubi_io_write(ubi, fm_raw + (i * ubi->leb_size), 1268 ret = ubi_io_write(ubi, fm_raw + (i * ubi->leb_size),
1267 new_fm->e[i]->pnum, ubi->leb_start, ubi->leb_size); 1269 new_fm->e[i]->pnum, ubi->leb_start, ubi->leb_size);
1268 if (ret) { 1270 if (ret) {
1269 ubi_err("unable to write fastmap to PEB %i!", 1271 ubi_err("unable to write fastmap to PEB %i!",
1270 new_fm->e[i]->pnum); 1272 new_fm->e[i]->pnum);
1271 goto out_kfree; 1273 goto out_kfree;
1272 } 1274 }
1273 } 1275 }
1274 1276
1275 ubi_assert(new_fm); 1277 ubi_assert(new_fm);
1276 ubi->fm = new_fm; 1278 ubi->fm = new_fm;
1277 1279
1278 dbg_bld("fastmap written!"); 1280 dbg_bld("fastmap written!");
1279 1281
1280 out_kfree: 1282 out_kfree:
1281 ubi_free_vid_hdr(ubi, avhdr); 1283 ubi_free_vid_hdr(ubi, avhdr);
1282 ubi_free_vid_hdr(ubi, dvhdr); 1284 ubi_free_vid_hdr(ubi, dvhdr);
1283 out: 1285 out:
1284 return ret; 1286 return ret;
1285 } 1287 }
1286 1288
1287 /** 1289 /**
1288 * erase_block - Manually erase a PEB. 1290 * erase_block - Manually erase a PEB.
1289 * @ubi: UBI device object 1291 * @ubi: UBI device object
1290 * @pnum: PEB to be erased 1292 * @pnum: PEB to be erased
1291 * 1293 *
1292 * Returns the new EC value on success, < 0 indicates an internal error. 1294 * Returns the new EC value on success, < 0 indicates an internal error.
1293 */ 1295 */
1294 static int erase_block(struct ubi_device *ubi, int pnum) 1296 static int erase_block(struct ubi_device *ubi, int pnum)
1295 { 1297 {
1296 int ret; 1298 int ret;
1297 struct ubi_ec_hdr *ec_hdr; 1299 struct ubi_ec_hdr *ec_hdr;
1298 long long ec; 1300 long long ec;
1299 1301
1300 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); 1302 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1301 if (!ec_hdr) 1303 if (!ec_hdr)
1302 return -ENOMEM; 1304 return -ENOMEM;
1303 1305
1304 ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0); 1306 ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1305 if (ret < 0) 1307 if (ret < 0)
1306 goto out; 1308 goto out;
1307 else if (ret && ret != UBI_IO_BITFLIPS) { 1309 else if (ret && ret != UBI_IO_BITFLIPS) {
1308 ret = -EINVAL; 1310 ret = -EINVAL;
1309 goto out; 1311 goto out;
1310 } 1312 }
1311 1313
1312 ret = ubi_io_sync_erase(ubi, pnum, 0); 1314 ret = ubi_io_sync_erase(ubi, pnum, 0);
1313 if (ret < 0) 1315 if (ret < 0)
1314 goto out; 1316 goto out;
1315 1317
1316 ec = be64_to_cpu(ec_hdr->ec); 1318 ec = be64_to_cpu(ec_hdr->ec);
1317 ec += ret; 1319 ec += ret;
1318 if (ec > UBI_MAX_ERASECOUNTER) { 1320 if (ec > UBI_MAX_ERASECOUNTER) {
1319 ret = -EINVAL; 1321 ret = -EINVAL;
1320 goto out; 1322 goto out;
1321 } 1323 }
1322 1324
1323 ec_hdr->ec = cpu_to_be64(ec); 1325 ec_hdr->ec = cpu_to_be64(ec);
1324 ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr); 1326 ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
1325 if (ret < 0) 1327 if (ret < 0)
1326 goto out; 1328 goto out;
1327 1329
1328 ret = ec; 1330 ret = ec;
1329 out: 1331 out:
1330 kfree(ec_hdr); 1332 kfree(ec_hdr);
1331 return ret; 1333 return ret;
1332 } 1334 }
1333 1335
1334 /** 1336 /**
1335 * invalidate_fastmap - destroys a fastmap. 1337 * invalidate_fastmap - destroys a fastmap.
1336 * @ubi: UBI device object 1338 * @ubi: UBI device object
1337 * @fm: the fastmap to be destroyed 1339 * @fm: the fastmap to be destroyed
1338 * 1340 *
1339 * Returns 0 on success, < 0 indicates an internal error. 1341 * Returns 0 on success, < 0 indicates an internal error.
1340 */ 1342 */
1341 static int invalidate_fastmap(struct ubi_device *ubi, 1343 static int invalidate_fastmap(struct ubi_device *ubi,
1342 struct ubi_fastmap_layout *fm) 1344 struct ubi_fastmap_layout *fm)
1343 { 1345 {
1344 int ret, i; 1346 int ret, i;
1345 struct ubi_vid_hdr *vh; 1347 struct ubi_vid_hdr *vh;
1346 1348
1347 ret = erase_block(ubi, fm->e[0]->pnum); 1349 ret = erase_block(ubi, fm->e[0]->pnum);
1348 if (ret < 0) 1350 if (ret < 0)
1349 return ret; 1351 return ret;
1350 1352
1351 vh = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID); 1353 vh = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
1352 if (!vh) 1354 if (!vh)
1353 return -ENOMEM; 1355 return -ENOMEM;
1354 1356
1355 /* deleting the current fastmap SB is not enough, an old SB may exist, 1357 /* deleting the current fastmap SB is not enough, an old SB may exist,
1356 * so create a (corrupted) SB such that fastmap will find it and fall 1358 * so create a (corrupted) SB such that fastmap will find it and fall
1357 * back to scanning mode in any case */ 1359 * back to scanning mode in any case */
1358 vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 1360 vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1359 ret = ubi_io_write_vid_hdr(ubi, fm->e[0]->pnum, vh); 1361 ret = ubi_io_write_vid_hdr(ubi, fm->e[0]->pnum, vh);
1360 1362
1361 for (i = 0; i < fm->used_blocks; i++) 1363 for (i = 0; i < fm->used_blocks; i++)
1362 ubi_wl_put_fm_peb(ubi, fm->e[i], i, fm->to_be_tortured[i]); 1364 ubi_wl_put_fm_peb(ubi, fm->e[i], i, fm->to_be_tortured[i]);
1363 1365
1364 return ret; 1366 return ret;
1365 } 1367 }
1366 1368
1367 /** 1369 /**
1368 * ubi_update_fastmap - will be called by UBI if a volume changes or 1370 * ubi_update_fastmap - will be called by UBI if a volume changes or
1369 * a fastmap pool becomes full. 1371 * a fastmap pool becomes full.
1370 * @ubi: UBI device object 1372 * @ubi: UBI device object
1371 * 1373 *
1372 * Returns 0 on success, < 0 indicates an internal error. 1374 * Returns 0 on success, < 0 indicates an internal error.
1373 */ 1375 */
1374 int ubi_update_fastmap(struct ubi_device *ubi) 1376 int ubi_update_fastmap(struct ubi_device *ubi)
1375 { 1377 {
1376 int ret, i; 1378 int ret, i;
1377 struct ubi_fastmap_layout *new_fm, *old_fm; 1379 struct ubi_fastmap_layout *new_fm, *old_fm;
1378 struct ubi_wl_entry *tmp_e; 1380 struct ubi_wl_entry *tmp_e;
1379 1381
1380 mutex_lock(&ubi->fm_mutex); 1382 mutex_lock(&ubi->fm_mutex);
1381 1383
1382 ubi_refill_pools(ubi); 1384 ubi_refill_pools(ubi);
1383 1385
1384 if (ubi->ro_mode || ubi->fm_disabled) { 1386 if (ubi->ro_mode || ubi->fm_disabled) {
1385 mutex_unlock(&ubi->fm_mutex); 1387 mutex_unlock(&ubi->fm_mutex);
1386 return 0; 1388 return 0;
1387 } 1389 }
1388 1390
1389 ret = ubi_ensure_anchor_pebs(ubi); 1391 ret = ubi_ensure_anchor_pebs(ubi);
1390 if (ret) { 1392 if (ret) {
1391 mutex_unlock(&ubi->fm_mutex); 1393 mutex_unlock(&ubi->fm_mutex);
1392 return ret; 1394 return ret;
1393 } 1395 }
1394 1396
1395 new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL); 1397 new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
1396 if (!new_fm) { 1398 if (!new_fm) {
1397 mutex_unlock(&ubi->fm_mutex); 1399 mutex_unlock(&ubi->fm_mutex);
1398 return -ENOMEM; 1400 return -ENOMEM;
1399 } 1401 }
1400 1402
1401 new_fm->used_blocks = ubi->fm_size / ubi->leb_size; 1403 new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
1402 1404
1403 for (i = 0; i < new_fm->used_blocks; i++) { 1405 for (i = 0; i < new_fm->used_blocks; i++) {
1404 new_fm->e[i] = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); 1406 new_fm->e[i] = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1405 if (!new_fm->e[i]) { 1407 if (!new_fm->e[i]) {
1406 while (i--) 1408 while (i--)
1407 kfree(new_fm->e[i]); 1409 kfree(new_fm->e[i]);
1408 1410
1409 kfree(new_fm); 1411 kfree(new_fm);
1410 mutex_unlock(&ubi->fm_mutex); 1412 mutex_unlock(&ubi->fm_mutex);
1411 return -ENOMEM; 1413 return -ENOMEM;
1412 } 1414 }
1413 } 1415 }
1414 1416
1415 old_fm = ubi->fm; 1417 old_fm = ubi->fm;
1416 ubi->fm = NULL; 1418 ubi->fm = NULL;
1417 1419
1418 if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) { 1420 if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
1419 ubi_err("fastmap too large"); 1421 ubi_err("fastmap too large");
1420 ret = -ENOSPC; 1422 ret = -ENOSPC;
1421 goto err; 1423 goto err;
1422 } 1424 }
1423 1425
1424 for (i = 1; i < new_fm->used_blocks; i++) { 1426 for (i = 1; i < new_fm->used_blocks; i++) {
1425 spin_lock(&ubi->wl_lock); 1427 spin_lock(&ubi->wl_lock);
1426 tmp_e = ubi_wl_get_fm_peb(ubi, 0); 1428 tmp_e = ubi_wl_get_fm_peb(ubi, 0);
1427 spin_unlock(&ubi->wl_lock); 1429 spin_unlock(&ubi->wl_lock);
1428 1430
1429 if (!tmp_e && !old_fm) { 1431 if (!tmp_e && !old_fm) {
1430 int j; 1432 int j;
1431 ubi_err("could not get any free erase block"); 1433 ubi_err("could not get any free erase block");
1432 1434
1433 for (j = 1; j < i; j++) 1435 for (j = 1; j < i; j++)
1434 ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0); 1436 ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
1435 1437
1436 ret = -ENOSPC; 1438 ret = -ENOSPC;
1437 goto err; 1439 goto err;
1438 } else if (!tmp_e && old_fm) { 1440 } else if (!tmp_e && old_fm) {
1439 ret = erase_block(ubi, old_fm->e[i]->pnum); 1441 ret = erase_block(ubi, old_fm->e[i]->pnum);
1440 if (ret < 0) { 1442 if (ret < 0) {
1441 int j; 1443 int j;
1442 1444
1443 for (j = 1; j < i; j++) 1445 for (j = 1; j < i; j++)
1444 ubi_wl_put_fm_peb(ubi, new_fm->e[j], 1446 ubi_wl_put_fm_peb(ubi, new_fm->e[j],
1445 j, 0); 1447 j, 0);
1446 1448
1447 ubi_err("could not erase old fastmap PEB"); 1449 ubi_err("could not erase old fastmap PEB");
1448 goto err; 1450 goto err;
1449 } 1451 }
1450 1452
1451 new_fm->e[i]->pnum = old_fm->e[i]->pnum; 1453 new_fm->e[i]->pnum = old_fm->e[i]->pnum;
1452 new_fm->e[i]->ec = old_fm->e[i]->ec; 1454 new_fm->e[i]->ec = old_fm->e[i]->ec;
1453 } else { 1455 } else {
1454 new_fm->e[i]->pnum = tmp_e->pnum; 1456 new_fm->e[i]->pnum = tmp_e->pnum;
1455 new_fm->e[i]->ec = tmp_e->ec; 1457 new_fm->e[i]->ec = tmp_e->ec;
1456 1458
1457 if (old_fm) 1459 if (old_fm)
1458 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i, 1460 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1459 old_fm->to_be_tortured[i]); 1461 old_fm->to_be_tortured[i]);
1460 } 1462 }
1461 } 1463 }
1462 1464
1463 spin_lock(&ubi->wl_lock); 1465 spin_lock(&ubi->wl_lock);
1464 tmp_e = ubi_wl_get_fm_peb(ubi, 1); 1466 tmp_e = ubi_wl_get_fm_peb(ubi, 1);
1465 spin_unlock(&ubi->wl_lock); 1467 spin_unlock(&ubi->wl_lock);
1466 1468
1467 if (old_fm) { 1469 if (old_fm) {
1468 /* no fresh anchor PEB was found, reuse the old one */ 1470 /* no fresh anchor PEB was found, reuse the old one */
1469 if (!tmp_e) { 1471 if (!tmp_e) {
1470 ret = erase_block(ubi, old_fm->e[0]->pnum); 1472 ret = erase_block(ubi, old_fm->e[0]->pnum);
1471 if (ret < 0) { 1473 if (ret < 0) {
1472 int i; 1474 int i;
1473 ubi_err("could not erase old anchor PEB"); 1475 ubi_err("could not erase old anchor PEB");
1474 1476
1475 for (i = 1; i < new_fm->used_blocks; i++) 1477 for (i = 1; i < new_fm->used_blocks; i++)
1476 ubi_wl_put_fm_peb(ubi, new_fm->e[i], 1478 ubi_wl_put_fm_peb(ubi, new_fm->e[i],
1477 i, 0); 1479 i, 0);
1478 goto err; 1480 goto err;
1479 } 1481 }
1480 1482
1481 new_fm->e[0]->pnum = old_fm->e[0]->pnum; 1483 new_fm->e[0]->pnum = old_fm->e[0]->pnum;
1482 new_fm->e[0]->ec = ret; 1484 new_fm->e[0]->ec = ret;
1483 } else { 1485 } else {
1484 /* we've got a new anchor PEB, return the old one */ 1486 /* we've got a new anchor PEB, return the old one */
1485 ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0, 1487 ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
1486 old_fm->to_be_tortured[0]); 1488 old_fm->to_be_tortured[0]);
1487 1489
1488 new_fm->e[0]->pnum = tmp_e->pnum; 1490 new_fm->e[0]->pnum = tmp_e->pnum;
1489 new_fm->e[0]->ec = tmp_e->ec; 1491 new_fm->e[0]->ec = tmp_e->ec;
1490 } 1492 }
1491 } else { 1493 } else {
1492 if (!tmp_e) { 1494 if (!tmp_e) {
1493 int i; 1495 int i;
1494 ubi_err("could not find any anchor PEB"); 1496 ubi_err("could not find any anchor PEB");
1495 1497
1496 for (i = 1; i < new_fm->used_blocks; i++) 1498 for (i = 1; i < new_fm->used_blocks; i++)
1497 ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0); 1499 ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
1498 1500
1499 ret = -ENOSPC; 1501 ret = -ENOSPC;
1500 goto err; 1502 goto err;
1501 } 1503 }
1502 1504
1503 new_fm->e[0]->pnum = tmp_e->pnum; 1505 new_fm->e[0]->pnum = tmp_e->pnum;
1504 new_fm->e[0]->ec = tmp_e->ec; 1506 new_fm->e[0]->ec = tmp_e->ec;
1505 } 1507 }
1506 1508
1507 down_write(&ubi->work_sem); 1509 down_write(&ubi->work_sem);
1508 down_write(&ubi->fm_sem); 1510 down_write(&ubi->fm_sem);
1509 ret = ubi_write_fastmap(ubi, new_fm); 1511 ret = ubi_write_fastmap(ubi, new_fm);
1510 up_write(&ubi->fm_sem); 1512 up_write(&ubi->fm_sem);
1511 up_write(&ubi->work_sem); 1513 up_write(&ubi->work_sem);
1512 1514
1513 if (ret) 1515 if (ret)
1514 goto err; 1516 goto err;
1515 1517
1516 out_unlock: 1518 out_unlock:
1517 mutex_unlock(&ubi->fm_mutex); 1519 mutex_unlock(&ubi->fm_mutex);
1518 kfree(old_fm); 1520 kfree(old_fm);
1519 return ret; 1521 return ret;
1520 1522
1521 err: 1523 err:
1522 kfree(new_fm); 1524 kfree(new_fm);
1523 1525
1524 ubi_warn("Unable to write new fastmap, err=%i", ret); 1526 ubi_warn("Unable to write new fastmap, err=%i", ret);
1525 1527
1526 ret = 0; 1528 ret = 0;
1527 if (old_fm) { 1529 if (old_fm) {
1528 ret = invalidate_fastmap(ubi, old_fm); 1530 ret = invalidate_fastmap(ubi, old_fm);
1529 if (ret < 0) 1531 if (ret < 0)
1530 ubi_err("Unable to invalidiate current fastmap!"); 1532 ubi_err("Unable to invalidiate current fastmap!");
1531 else if (ret) 1533 else if (ret)
1532 ret = 0; 1534 ret = 0;
1533 } 1535 }
1534 goto out_unlock; 1536 goto out_unlock;
1535 } 1537 }
1536 1538
include/uapi/mtd/ubi-user.h
1 /* 1 /*
2 * Copyright © International Business Machines Corp., 2006 2 * Copyright © International Business Machines Corp., 2006
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by 5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or 6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version. 7 * (at your option) any later version.
8 * 8 *
9 * This program is distributed in the hope that it will be useful, 9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12 * the GNU General Public License for more details. 12 * the GNU General Public License for more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License 14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 * 17 *
18 * Author: Artem Bityutskiy (Битюцкий Артём) 18 * Author: Artem Bityutskiy (Битюцкий Артём)
19 */ 19 */
20 20
21 #ifndef __UBI_USER_H__ 21 #ifndef __UBI_USER_H__
22 #define __UBI_USER_H__ 22 #define __UBI_USER_H__
23 23
24 #include <linux/types.h> 24 #include <linux/types.h>
25 25
26 /* 26 /*
27 * UBI device creation (the same as MTD device attachment) 27 * UBI device creation (the same as MTD device attachment)
28 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 28 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
29 * 29 *
30 * MTD devices may be attached using %UBI_IOCATT ioctl command of the UBI 30 * MTD devices may be attached using %UBI_IOCATT ioctl command of the UBI
31 * control device. The caller has to properly fill and pass 31 * control device. The caller has to properly fill and pass
32 * &struct ubi_attach_req object - UBI will attach the MTD device specified in 32 * &struct ubi_attach_req object - UBI will attach the MTD device specified in
33 * the request and return the newly created UBI device number as the ioctl 33 * the request and return the newly created UBI device number as the ioctl
34 * return value. 34 * return value.
35 * 35 *
36 * UBI device deletion (the same as MTD device detachment) 36 * UBI device deletion (the same as MTD device detachment)
37 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 37 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
38 * 38 *
39 * An UBI device maybe deleted with %UBI_IOCDET ioctl command of the UBI 39 * An UBI device maybe deleted with %UBI_IOCDET ioctl command of the UBI
40 * control device. 40 * control device.
41 * 41 *
42 * UBI volume creation 42 * UBI volume creation
43 * ~~~~~~~~~~~~~~~~~~~ 43 * ~~~~~~~~~~~~~~~~~~~
44 * 44 *
45 * UBI volumes are created via the %UBI_IOCMKVOL ioctl command of UBI character 45 * UBI volumes are created via the %UBI_IOCMKVOL ioctl command of UBI character
46 * device. A &struct ubi_mkvol_req object has to be properly filled and a 46 * device. A &struct ubi_mkvol_req object has to be properly filled and a
47 * pointer to it has to be passed to the ioctl. 47 * pointer to it has to be passed to the ioctl.
48 * 48 *
49 * UBI volume deletion 49 * UBI volume deletion
50 * ~~~~~~~~~~~~~~~~~~~ 50 * ~~~~~~~~~~~~~~~~~~~
51 * 51 *
52 * To delete a volume, the %UBI_IOCRMVOL ioctl command of the UBI character 52 * To delete a volume, the %UBI_IOCRMVOL ioctl command of the UBI character
53 * device should be used. A pointer to the 32-bit volume ID hast to be passed 53 * device should be used. A pointer to the 32-bit volume ID hast to be passed
54 * to the ioctl. 54 * to the ioctl.
55 * 55 *
56 * UBI volume re-size 56 * UBI volume re-size
57 * ~~~~~~~~~~~~~~~~~~ 57 * ~~~~~~~~~~~~~~~~~~
58 * 58 *
59 * To re-size a volume, the %UBI_IOCRSVOL ioctl command of the UBI character 59 * To re-size a volume, the %UBI_IOCRSVOL ioctl command of the UBI character
60 * device should be used. A &struct ubi_rsvol_req object has to be properly 60 * device should be used. A &struct ubi_rsvol_req object has to be properly
61 * filled and a pointer to it has to be passed to the ioctl. 61 * filled and a pointer to it has to be passed to the ioctl.
62 * 62 *
63 * UBI volumes re-name 63 * UBI volumes re-name
64 * ~~~~~~~~~~~~~~~~~~~ 64 * ~~~~~~~~~~~~~~~~~~~
65 * 65 *
66 * To re-name several volumes atomically at one go, the %UBI_IOCRNVOL command 66 * To re-name several volumes atomically at one go, the %UBI_IOCRNVOL command
67 * of the UBI character device should be used. A &struct ubi_rnvol_req object 67 * of the UBI character device should be used. A &struct ubi_rnvol_req object
68 * has to be properly filled and a pointer to it has to be passed to the ioctl. 68 * has to be properly filled and a pointer to it has to be passed to the ioctl.
69 * 69 *
70 * UBI volume update 70 * UBI volume update
71 * ~~~~~~~~~~~~~~~~~ 71 * ~~~~~~~~~~~~~~~~~
72 * 72 *
73 * Volume update should be done via the %UBI_IOCVOLUP ioctl command of the 73 * Volume update should be done via the %UBI_IOCVOLUP ioctl command of the
74 * corresponding UBI volume character device. A pointer to a 64-bit update 74 * corresponding UBI volume character device. A pointer to a 64-bit update
75 * size should be passed to the ioctl. After this, UBI expects user to write 75 * size should be passed to the ioctl. After this, UBI expects user to write
76 * this number of bytes to the volume character device. The update is finished 76 * this number of bytes to the volume character device. The update is finished
77 * when the claimed number of bytes is passed. So, the volume update sequence 77 * when the claimed number of bytes is passed. So, the volume update sequence
78 * is something like: 78 * is something like:
79 * 79 *
80 * fd = open("/dev/my_volume"); 80 * fd = open("/dev/my_volume");
81 * ioctl(fd, UBI_IOCVOLUP, &image_size); 81 * ioctl(fd, UBI_IOCVOLUP, &image_size);
82 * write(fd, buf, image_size); 82 * write(fd, buf, image_size);
83 * close(fd); 83 * close(fd);
84 * 84 *
85 * Logical eraseblock erase 85 * Logical eraseblock erase
86 * ~~~~~~~~~~~~~~~~~~~~~~~~ 86 * ~~~~~~~~~~~~~~~~~~~~~~~~
87 * 87 *
88 * To erase a logical eraseblock, the %UBI_IOCEBER ioctl command of the 88 * To erase a logical eraseblock, the %UBI_IOCEBER ioctl command of the
89 * corresponding UBI volume character device should be used. This command 89 * corresponding UBI volume character device should be used. This command
90 * unmaps the requested logical eraseblock, makes sure the corresponding 90 * unmaps the requested logical eraseblock, makes sure the corresponding
91 * physical eraseblock is successfully erased, and returns. 91 * physical eraseblock is successfully erased, and returns.
92 * 92 *
93 * Atomic logical eraseblock change 93 * Atomic logical eraseblock change
94 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 94 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
95 * 95 *
96 * Atomic logical eraseblock change operation is called using the %UBI_IOCEBCH 96 * Atomic logical eraseblock change operation is called using the %UBI_IOCEBCH
97 * ioctl command of the corresponding UBI volume character device. A pointer to 97 * ioctl command of the corresponding UBI volume character device. A pointer to
98 * a &struct ubi_leb_change_req object has to be passed to the ioctl. Then the 98 * a &struct ubi_leb_change_req object has to be passed to the ioctl. Then the
99 * user is expected to write the requested amount of bytes (similarly to what 99 * user is expected to write the requested amount of bytes (similarly to what
100 * should be done in case of the "volume update" ioctl). 100 * should be done in case of the "volume update" ioctl).
101 * 101 *
102 * Logical eraseblock map 102 * Logical eraseblock map
103 * ~~~~~~~~~~~~~~~~~~~~~ 103 * ~~~~~~~~~~~~~~~~~~~~~
104 * 104 *
105 * To map a logical eraseblock to a physical eraseblock, the %UBI_IOCEBMAP 105 * To map a logical eraseblock to a physical eraseblock, the %UBI_IOCEBMAP
106 * ioctl command should be used. A pointer to a &struct ubi_map_req object is 106 * ioctl command should be used. A pointer to a &struct ubi_map_req object is
107 * expected to be passed. The ioctl maps the requested logical eraseblock to 107 * expected to be passed. The ioctl maps the requested logical eraseblock to
108 * a physical eraseblock and returns. Only non-mapped logical eraseblocks can 108 * a physical eraseblock and returns. Only non-mapped logical eraseblocks can
109 * be mapped. If the logical eraseblock specified in the request is already 109 * be mapped. If the logical eraseblock specified in the request is already
110 * mapped to a physical eraseblock, the ioctl fails and returns error. 110 * mapped to a physical eraseblock, the ioctl fails and returns error.
111 * 111 *
112 * Logical eraseblock unmap 112 * Logical eraseblock unmap
113 * ~~~~~~~~~~~~~~~~~~~~~~~~ 113 * ~~~~~~~~~~~~~~~~~~~~~~~~
114 * 114 *
115 * To unmap a logical eraseblock to a physical eraseblock, the %UBI_IOCEBUNMAP 115 * To unmap a logical eraseblock to a physical eraseblock, the %UBI_IOCEBUNMAP
116 * ioctl command should be used. The ioctl unmaps the logical eraseblocks, 116 * ioctl command should be used. The ioctl unmaps the logical eraseblocks,
117 * schedules corresponding physical eraseblock for erasure, and returns. Unlike 117 * schedules corresponding physical eraseblock for erasure, and returns. Unlike
118 * the "LEB erase" command, it does not wait for the physical eraseblock being 118 * the "LEB erase" command, it does not wait for the physical eraseblock being
119 * erased. Note, the side effect of this is that if an unclean reboot happens 119 * erased. Note, the side effect of this is that if an unclean reboot happens
120 * after the unmap ioctl returns, you may find the LEB mapped again to the same 120 * after the unmap ioctl returns, you may find the LEB mapped again to the same
121 * physical eraseblock after the UBI is run again. 121 * physical eraseblock after the UBI is run again.
122 * 122 *
123 * Check if logical eraseblock is mapped 123 * Check if logical eraseblock is mapped
124 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 124 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
125 * 125 *
126 * To check if a logical eraseblock is mapped to a physical eraseblock, the 126 * To check if a logical eraseblock is mapped to a physical eraseblock, the
127 * %UBI_IOCEBISMAP ioctl command should be used. It returns %0 if the LEB is 127 * %UBI_IOCEBISMAP ioctl command should be used. It returns %0 if the LEB is
128 * not mapped, and %1 if it is mapped. 128 * not mapped, and %1 if it is mapped.
129 * 129 *
130 * Set an UBI volume property 130 * Set an UBI volume property
131 * ~~~~~~~~~~~~~~~~~~~~~~~~~ 131 * ~~~~~~~~~~~~~~~~~~~~~~~~~
132 * 132 *
133 * To set an UBI volume property the %UBI_IOCSETPROP ioctl command should be 133 * To set an UBI volume property the %UBI_IOCSETPROP ioctl command should be
134 * used. A pointer to a &struct ubi_set_vol_prop_req object is expected to be 134 * used. A pointer to a &struct ubi_set_vol_prop_req object is expected to be
135 * passed. The object describes which property should be set, and to which value 135 * passed. The object describes which property should be set, and to which value
136 * it should be set. 136 * it should be set.
137 */ 137 */
138 138
139 /* 139 /*
140 * When a new UBI volume or UBI device is created, users may either specify the 140 * When a new UBI volume or UBI device is created, users may either specify the
141 * volume/device number they want to create or to let UBI automatically assign 141 * volume/device number they want to create or to let UBI automatically assign
142 * the number using these constants. 142 * the number using these constants.
143 */ 143 */
144 #define UBI_VOL_NUM_AUTO (-1) 144 #define UBI_VOL_NUM_AUTO (-1)
145 #define UBI_DEV_NUM_AUTO (-1) 145 #define UBI_DEV_NUM_AUTO (-1)
146 146
147 /* Maximum volume name length */ 147 /* Maximum volume name length */
148 #define UBI_MAX_VOLUME_NAME 127 148 #define UBI_MAX_VOLUME_NAME 127
149 149
150 /* ioctl commands of UBI character devices */ 150 /* ioctl commands of UBI character devices */
151 151
152 #define UBI_IOC_MAGIC 'o' 152 #define UBI_IOC_MAGIC 'o'
153 153
154 /* Create an UBI volume */ 154 /* Create an UBI volume */
155 #define UBI_IOCMKVOL _IOW(UBI_IOC_MAGIC, 0, struct ubi_mkvol_req) 155 #define UBI_IOCMKVOL _IOW(UBI_IOC_MAGIC, 0, struct ubi_mkvol_req)
156 /* Remove an UBI volume */ 156 /* Remove an UBI volume */
157 #define UBI_IOCRMVOL _IOW(UBI_IOC_MAGIC, 1, __s32) 157 #define UBI_IOCRMVOL _IOW(UBI_IOC_MAGIC, 1, __s32)
158 /* Re-size an UBI volume */ 158 /* Re-size an UBI volume */
159 #define UBI_IOCRSVOL _IOW(UBI_IOC_MAGIC, 2, struct ubi_rsvol_req) 159 #define UBI_IOCRSVOL _IOW(UBI_IOC_MAGIC, 2, struct ubi_rsvol_req)
160 /* Re-name volumes */ 160 /* Re-name volumes */
161 #define UBI_IOCRNVOL _IOW(UBI_IOC_MAGIC, 3, struct ubi_rnvol_req) 161 #define UBI_IOCRNVOL _IOW(UBI_IOC_MAGIC, 3, struct ubi_rnvol_req)
162 162
163 /* ioctl commands of the UBI control character device */ 163 /* ioctl commands of the UBI control character device */
164 164
165 #define UBI_CTRL_IOC_MAGIC 'o' 165 #define UBI_CTRL_IOC_MAGIC 'o'
166 166
167 /* Attach an MTD device */ 167 /* Attach an MTD device */
168 #define UBI_IOCATT _IOW(UBI_CTRL_IOC_MAGIC, 64, struct ubi_attach_req) 168 #define UBI_IOCATT _IOW(UBI_CTRL_IOC_MAGIC, 64, struct ubi_attach_req)
169 /* Detach an MTD device */ 169 /* Detach an MTD device */
170 #define UBI_IOCDET _IOW(UBI_CTRL_IOC_MAGIC, 65, __s32) 170 #define UBI_IOCDET _IOW(UBI_CTRL_IOC_MAGIC, 65, __s32)
171 171
172 /* ioctl commands of UBI volume character devices */ 172 /* ioctl commands of UBI volume character devices */
173 173
174 #define UBI_VOL_IOC_MAGIC 'O' 174 #define UBI_VOL_IOC_MAGIC 'O'
175 175
176 /* Start UBI volume update */ 176 /* Start UBI volume update
177 * Note: This actually takes a pointer (__s64*), but we can't change
178 * that without breaking the ABI on 32bit systems
179 */
177 #define UBI_IOCVOLUP _IOW(UBI_VOL_IOC_MAGIC, 0, __s64) 180 #define UBI_IOCVOLUP _IOW(UBI_VOL_IOC_MAGIC, 0, __s64)
178 /* LEB erasure command, used for debugging, disabled by default */ 181 /* LEB erasure command, used for debugging, disabled by default */
179 #define UBI_IOCEBER _IOW(UBI_VOL_IOC_MAGIC, 1, __s32) 182 #define UBI_IOCEBER _IOW(UBI_VOL_IOC_MAGIC, 1, __s32)
180 /* Atomic LEB change command */ 183 /* Atomic LEB change command */
181 #define UBI_IOCEBCH _IOW(UBI_VOL_IOC_MAGIC, 2, __s32) 184 #define UBI_IOCEBCH _IOW(UBI_VOL_IOC_MAGIC, 2, __s32)
182 /* Map LEB command */ 185 /* Map LEB command */
183 #define UBI_IOCEBMAP _IOW(UBI_VOL_IOC_MAGIC, 3, struct ubi_map_req) 186 #define UBI_IOCEBMAP _IOW(UBI_VOL_IOC_MAGIC, 3, struct ubi_map_req)
184 /* Unmap LEB command */ 187 /* Unmap LEB command */
185 #define UBI_IOCEBUNMAP _IOW(UBI_VOL_IOC_MAGIC, 4, __s32) 188 #define UBI_IOCEBUNMAP _IOW(UBI_VOL_IOC_MAGIC, 4, __s32)
186 /* Check if LEB is mapped command */ 189 /* Check if LEB is mapped command */
187 #define UBI_IOCEBISMAP _IOR(UBI_VOL_IOC_MAGIC, 5, __s32) 190 #define UBI_IOCEBISMAP _IOR(UBI_VOL_IOC_MAGIC, 5, __s32)
188 /* Set an UBI volume property */ 191 /* Set an UBI volume property */
189 #define UBI_IOCSETVOLPROP _IOW(UBI_VOL_IOC_MAGIC, 6, \ 192 #define UBI_IOCSETVOLPROP _IOW(UBI_VOL_IOC_MAGIC, 6, \
190 struct ubi_set_vol_prop_req) 193 struct ubi_set_vol_prop_req)
191 194
192 /* Maximum MTD device name length supported by UBI */ 195 /* Maximum MTD device name length supported by UBI */
193 #define MAX_UBI_MTD_NAME_LEN 127 196 #define MAX_UBI_MTD_NAME_LEN 127
194 197
195 /* Maximum amount of UBI volumes that can be re-named at one go */ 198 /* Maximum amount of UBI volumes that can be re-named at one go */
196 #define UBI_MAX_RNVOL 32 199 #define UBI_MAX_RNVOL 32
197 200
198 /* 201 /*
199 * UBI volume type constants. 202 * UBI volume type constants.
200 * 203 *
201 * @UBI_DYNAMIC_VOLUME: dynamic volume 204 * @UBI_DYNAMIC_VOLUME: dynamic volume
202 * @UBI_STATIC_VOLUME: static volume 205 * @UBI_STATIC_VOLUME: static volume
203 */ 206 */
204 enum { 207 enum {
205 UBI_DYNAMIC_VOLUME = 3, 208 UBI_DYNAMIC_VOLUME = 3,
206 UBI_STATIC_VOLUME = 4, 209 UBI_STATIC_VOLUME = 4,
207 }; 210 };
208 211
209 /* 212 /*
210 * UBI set volume property ioctl constants. 213 * UBI set volume property ioctl constants.
211 * 214 *
212 * @UBI_VOL_PROP_DIRECT_WRITE: allow (any non-zero value) or disallow (value 0) 215 * @UBI_VOL_PROP_DIRECT_WRITE: allow (any non-zero value) or disallow (value 0)
213 * user to directly write and erase individual 216 * user to directly write and erase individual
214 * eraseblocks on dynamic volumes 217 * eraseblocks on dynamic volumes
215 */ 218 */
216 enum { 219 enum {
217 UBI_VOL_PROP_DIRECT_WRITE = 1, 220 UBI_VOL_PROP_DIRECT_WRITE = 1,
218 }; 221 };
219 222
220 /** 223 /**
221 * struct ubi_attach_req - attach MTD device request. 224 * struct ubi_attach_req - attach MTD device request.
222 * @ubi_num: UBI device number to create 225 * @ubi_num: UBI device number to create
223 * @mtd_num: MTD device number to attach 226 * @mtd_num: MTD device number to attach
224 * @vid_hdr_offset: VID header offset (use defaults if %0) 227 * @vid_hdr_offset: VID header offset (use defaults if %0)
225 * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs 228 * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
226 * @padding: reserved for future, not used, has to be zeroed 229 * @padding: reserved for future, not used, has to be zeroed
227 * 230 *
228 * This data structure is used to specify MTD device UBI has to attach and the 231 * This data structure is used to specify MTD device UBI has to attach and the
229 * parameters it has to use. The number which should be assigned to the new UBI 232 * parameters it has to use. The number which should be assigned to the new UBI
230 * device is passed in @ubi_num. UBI may automatically assign the number if 233 * device is passed in @ubi_num. UBI may automatically assign the number if
231 * @UBI_DEV_NUM_AUTO is passed. In this case, the device number is returned in 234 * @UBI_DEV_NUM_AUTO is passed. In this case, the device number is returned in
232 * @ubi_num. 235 * @ubi_num.
233 * 236 *
234 * Most applications should pass %0 in @vid_hdr_offset to make UBI use default 237 * Most applications should pass %0 in @vid_hdr_offset to make UBI use default
235 * offset of the VID header within physical eraseblocks. The default offset is 238 * offset of the VID header within physical eraseblocks. The default offset is
236 * the next min. I/O unit after the EC header. For example, it will be offset 239 * the next min. I/O unit after the EC header. For example, it will be offset
237 * 512 in case of a 512 bytes page NAND flash with no sub-page support. Or 240 * 512 in case of a 512 bytes page NAND flash with no sub-page support. Or
238 * it will be 512 in case of a 2KiB page NAND flash with 4 512-byte sub-pages. 241 * it will be 512 in case of a 2KiB page NAND flash with 4 512-byte sub-pages.
239 * 242 *
240 * But in rare cases, if this optimizes things, the VID header may be placed to 243 * But in rare cases, if this optimizes things, the VID header may be placed to
241 * a different offset. For example, the boot-loader might do things faster if 244 * a different offset. For example, the boot-loader might do things faster if
242 * the VID header sits at the end of the first 2KiB NAND page with 4 sub-pages. 245 * the VID header sits at the end of the first 2KiB NAND page with 4 sub-pages.
243 * As the boot-loader would not normally need to read EC headers (unless it 246 * As the boot-loader would not normally need to read EC headers (unless it
244 * needs UBI in RW mode), it might be faster to calculate ECC. This is weird 247 * needs UBI in RW mode), it might be faster to calculate ECC. This is weird
245 * example, but it real-life example. So, in this example, @vid_hdr_offer would 248 * example, but it real-life example. So, in this example, @vid_hdr_offer would
246 * be 2KiB-64 bytes = 1984. Note, that this position is not even 512-bytes 249 * be 2KiB-64 bytes = 1984. Note, that this position is not even 512-bytes
247 * aligned, which is OK, as UBI is clever enough to realize this is 4th 250 * aligned, which is OK, as UBI is clever enough to realize this is 4th
248 * sub-page of the first page and add needed padding. 251 * sub-page of the first page and add needed padding.
249 * 252 *
250 * The @max_beb_per1024 is the maximum amount of bad PEBs UBI expects on the 253 * The @max_beb_per1024 is the maximum amount of bad PEBs UBI expects on the
251 * UBI device per 1024 eraseblocks. This value is often given in an other form 254 * UBI device per 1024 eraseblocks. This value is often given in an other form
252 * in the NAND datasheet (min NVB i.e. minimal number of valid blocks). The 255 * in the NAND datasheet (min NVB i.e. minimal number of valid blocks). The
253 * maximum expected bad eraseblocks per 1024 is then: 256 * maximum expected bad eraseblocks per 1024 is then:
254 * 1024 * (1 - MinNVB / MaxNVB) 257 * 1024 * (1 - MinNVB / MaxNVB)
255 * Which gives 20 for most NAND devices. This limit is used in order to derive 258 * Which gives 20 for most NAND devices. This limit is used in order to derive
256 * amount of eraseblock UBI reserves for handling new bad blocks. If the device 259 * amount of eraseblock UBI reserves for handling new bad blocks. If the device
257 * has more bad eraseblocks than this limit, UBI does not reserve any physical 260 * has more bad eraseblocks than this limit, UBI does not reserve any physical
258 * eraseblocks for new bad eraseblocks, but attempts to use available 261 * eraseblocks for new bad eraseblocks, but attempts to use available
259 * eraseblocks (if any). The accepted range is 0-768. If 0 is given, the 262 * eraseblocks (if any). The accepted range is 0-768. If 0 is given, the
260 * default kernel value of %CONFIG_MTD_UBI_BEB_LIMIT will be used. 263 * default kernel value of %CONFIG_MTD_UBI_BEB_LIMIT will be used.
261 */ 264 */
262 struct ubi_attach_req { 265 struct ubi_attach_req {
263 __s32 ubi_num; 266 __s32 ubi_num;
264 __s32 mtd_num; 267 __s32 mtd_num;
265 __s32 vid_hdr_offset; 268 __s32 vid_hdr_offset;
266 __s16 max_beb_per1024; 269 __s16 max_beb_per1024;
267 __s8 padding[10]; 270 __s8 padding[10];
268 }; 271 };
269 272
270 /** 273 /**
271 * struct ubi_mkvol_req - volume description data structure used in 274 * struct ubi_mkvol_req - volume description data structure used in
272 * volume creation requests. 275 * volume creation requests.
273 * @vol_id: volume number 276 * @vol_id: volume number
274 * @alignment: volume alignment 277 * @alignment: volume alignment
275 * @bytes: volume size in bytes 278 * @bytes: volume size in bytes
276 * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME) 279 * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME)
277 * @padding1: reserved for future, not used, has to be zeroed 280 * @padding1: reserved for future, not used, has to be zeroed
278 * @name_len: volume name length 281 * @name_len: volume name length
279 * @padding2: reserved for future, not used, has to be zeroed 282 * @padding2: reserved for future, not used, has to be zeroed
280 * @name: volume name 283 * @name: volume name
281 * 284 *
282 * This structure is used by user-space programs when creating new volumes. The 285 * This structure is used by user-space programs when creating new volumes. The
283 * @used_bytes field is only necessary when creating static volumes. 286 * @used_bytes field is only necessary when creating static volumes.
284 * 287 *
285 * The @alignment field specifies the required alignment of the volume logical 288 * The @alignment field specifies the required alignment of the volume logical
286 * eraseblock. This means, that the size of logical eraseblocks will be aligned 289 * eraseblock. This means, that the size of logical eraseblocks will be aligned
287 * to this number, i.e., 290 * to this number, i.e.,
288 * (UBI device logical eraseblock size) mod (@alignment) = 0. 291 * (UBI device logical eraseblock size) mod (@alignment) = 0.
289 * 292 *
290 * To put it differently, the logical eraseblock of this volume may be slightly 293 * To put it differently, the logical eraseblock of this volume may be slightly
291 * shortened in order to make it properly aligned. The alignment has to be 294 * shortened in order to make it properly aligned. The alignment has to be
292 * multiple of the flash minimal input/output unit, or %1 to utilize the entire 295 * multiple of the flash minimal input/output unit, or %1 to utilize the entire
293 * available space of logical eraseblocks. 296 * available space of logical eraseblocks.
294 * 297 *
295 * The @alignment field may be useful, for example, when one wants to maintain 298 * The @alignment field may be useful, for example, when one wants to maintain
296 * a block device on top of an UBI volume. In this case, it is desirable to fit 299 * a block device on top of an UBI volume. In this case, it is desirable to fit
297 * an integer number of blocks in logical eraseblocks of this UBI volume. With 300 * an integer number of blocks in logical eraseblocks of this UBI volume. With
298 * alignment it is possible to update this volume using plane UBI volume image 301 * alignment it is possible to update this volume using plane UBI volume image
299 * BLOBs, without caring about how to properly align them. 302 * BLOBs, without caring about how to properly align them.
300 */ 303 */
301 struct ubi_mkvol_req { 304 struct ubi_mkvol_req {
302 __s32 vol_id; 305 __s32 vol_id;
303 __s32 alignment; 306 __s32 alignment;
304 __s64 bytes; 307 __s64 bytes;
305 __s8 vol_type; 308 __s8 vol_type;
306 __s8 padding1; 309 __s8 padding1;
307 __s16 name_len; 310 __s16 name_len;
308 __s8 padding2[4]; 311 __s8 padding2[4];
309 char name[UBI_MAX_VOLUME_NAME + 1]; 312 char name[UBI_MAX_VOLUME_NAME + 1];
310 } __packed; 313 } __packed;
311 314
312 /** 315 /**
313 * struct ubi_rsvol_req - a data structure used in volume re-size requests. 316 * struct ubi_rsvol_req - a data structure used in volume re-size requests.
314 * @vol_id: ID of the volume to re-size 317 * @vol_id: ID of the volume to re-size
315 * @bytes: new size of the volume in bytes 318 * @bytes: new size of the volume in bytes
316 * 319 *
317 * Re-sizing is possible for both dynamic and static volumes. But while dynamic 320 * Re-sizing is possible for both dynamic and static volumes. But while dynamic
318 * volumes may be re-sized arbitrarily, static volumes cannot be made to be 321 * volumes may be re-sized arbitrarily, static volumes cannot be made to be
319 * smaller than the number of bytes they bear. To arbitrarily shrink a static 322 * smaller than the number of bytes they bear. To arbitrarily shrink a static
320 * volume, it must be wiped out first (by means of volume update operation with 323 * volume, it must be wiped out first (by means of volume update operation with
321 * zero number of bytes). 324 * zero number of bytes).
322 */ 325 */
323 struct ubi_rsvol_req { 326 struct ubi_rsvol_req {
324 __s64 bytes; 327 __s64 bytes;
325 __s32 vol_id; 328 __s32 vol_id;
326 } __packed; 329 } __packed;
327 330
328 /** 331 /**
329 * struct ubi_rnvol_req - volumes re-name request. 332 * struct ubi_rnvol_req - volumes re-name request.
330 * @count: count of volumes to re-name 333 * @count: count of volumes to re-name
331 * @padding1: reserved for future, not used, has to be zeroed 334 * @padding1: reserved for future, not used, has to be zeroed
332 * @vol_id: ID of the volume to re-name 335 * @vol_id: ID of the volume to re-name
333 * @name_len: name length 336 * @name_len: name length
334 * @padding2: reserved for future, not used, has to be zeroed 337 * @padding2: reserved for future, not used, has to be zeroed
335 * @name: new volume name 338 * @name: new volume name
336 * 339 *
337 * UBI allows to re-name up to %32 volumes at one go. The count of volumes to 340 * UBI allows to re-name up to %32 volumes at one go. The count of volumes to
338 * re-name is specified in the @count field. The ID of the volumes to re-name 341 * re-name is specified in the @count field. The ID of the volumes to re-name
339 * and the new names are specified in the @vol_id and @name fields. 342 * and the new names are specified in the @vol_id and @name fields.
340 * 343 *
341 * The UBI volume re-name operation is atomic, which means that should power cut 344 * The UBI volume re-name operation is atomic, which means that should power cut
342 * happen, the volumes will have either old name or new name. So the possible 345 * happen, the volumes will have either old name or new name. So the possible
343 * use-cases of this command is atomic upgrade. Indeed, to upgrade, say, volumes 346 * use-cases of this command is atomic upgrade. Indeed, to upgrade, say, volumes
344 * A and B one may create temporary volumes %A1 and %B1 with the new contents, 347 * A and B one may create temporary volumes %A1 and %B1 with the new contents,
345 * then atomically re-name A1->A and B1->B, in which case old %A and %B will 348 * then atomically re-name A1->A and B1->B, in which case old %A and %B will
346 * be removed. 349 * be removed.
347 * 350 *
348 * If it is not desirable to remove old A and B, the re-name request has to 351 * If it is not desirable to remove old A and B, the re-name request has to
349 * contain 4 entries: A1->A, A->A1, B1->B, B->B1, in which case old A1 and B1 352 * contain 4 entries: A1->A, A->A1, B1->B, B->B1, in which case old A1 and B1
350 * become A and B, and old A and B will become A1 and B1. 353 * become A and B, and old A and B will become A1 and B1.
351 * 354 *
352 * It is also OK to request: A1->A, A1->X, B1->B, B->Y, in which case old A1 355 * It is also OK to request: A1->A, A1->X, B1->B, B->Y, in which case old A1
353 * and B1 become A and B, and old A and B become X and Y. 356 * and B1 become A and B, and old A and B become X and Y.
354 * 357 *
355 * In other words, in case of re-naming into an existing volume name, the 358 * In other words, in case of re-naming into an existing volume name, the
356 * existing volume is removed, unless it is re-named as well at the same 359 * existing volume is removed, unless it is re-named as well at the same
357 * re-name request. 360 * re-name request.
358 */ 361 */
359 struct ubi_rnvol_req { 362 struct ubi_rnvol_req {
360 __s32 count; 363 __s32 count;
361 __s8 padding1[12]; 364 __s8 padding1[12];
362 struct { 365 struct {
363 __s32 vol_id; 366 __s32 vol_id;
364 __s16 name_len; 367 __s16 name_len;
365 __s8 padding2[2]; 368 __s8 padding2[2];
366 char name[UBI_MAX_VOLUME_NAME + 1]; 369 char name[UBI_MAX_VOLUME_NAME + 1];
367 } ents[UBI_MAX_RNVOL]; 370 } ents[UBI_MAX_RNVOL];
368 } __packed; 371 } __packed;
369 372
370 /** 373 /**
371 * struct ubi_leb_change_req - a data structure used in atomic LEB change 374 * struct ubi_leb_change_req - a data structure used in atomic LEB change
372 * requests. 375 * requests.
373 * @lnum: logical eraseblock number to change 376 * @lnum: logical eraseblock number to change
374 * @bytes: how many bytes will be written to the logical eraseblock 377 * @bytes: how many bytes will be written to the logical eraseblock
375 * @dtype: pass "3" for better compatibility with old kernels 378 * @dtype: pass "3" for better compatibility with old kernels
376 * @padding: reserved for future, not used, has to be zeroed 379 * @padding: reserved for future, not used, has to be zeroed
377 * 380 *
378 * The @dtype field used to inform UBI about what kind of data will be written 381 * The @dtype field used to inform UBI about what kind of data will be written
379 * to the LEB: long term (value 1), short term (value 2), unknown (value 3). 382 * to the LEB: long term (value 1), short term (value 2), unknown (value 3).
380 * UBI tried to pick a PEB with lower erase counter for short term data and a 383 * UBI tried to pick a PEB with lower erase counter for short term data and a
381 * PEB with higher erase counter for long term data. But this was not really 384 * PEB with higher erase counter for long term data. But this was not really
382 * used because users usually do not know this and could easily mislead UBI. We 385 * used because users usually do not know this and could easily mislead UBI. We
383 * removed this feature in May 2012. UBI currently just ignores the @dtype 386 * removed this feature in May 2012. UBI currently just ignores the @dtype
384 * field. But for better compatibility with older kernels it is recommended to 387 * field. But for better compatibility with older kernels it is recommended to
385 * set @dtype to 3 (unknown). 388 * set @dtype to 3 (unknown).
386 */ 389 */
387 struct ubi_leb_change_req { 390 struct ubi_leb_change_req {
388 __s32 lnum; 391 __s32 lnum;
389 __s32 bytes; 392 __s32 bytes;
390 __s8 dtype; /* obsolete, do not use! */ 393 __s8 dtype; /* obsolete, do not use! */
391 __s8 padding[7]; 394 __s8 padding[7];
392 } __packed; 395 } __packed;
393 396
394 /** 397 /**
395 * struct ubi_map_req - a data structure used in map LEB requests. 398 * struct ubi_map_req - a data structure used in map LEB requests.
396 * @dtype: pass "3" for better compatibility with old kernels 399 * @dtype: pass "3" for better compatibility with old kernels
397 * @lnum: logical eraseblock number to unmap 400 * @lnum: logical eraseblock number to unmap
398 * @padding: reserved for future, not used, has to be zeroed 401 * @padding: reserved for future, not used, has to be zeroed
399 */ 402 */
400 struct ubi_map_req { 403 struct ubi_map_req {
401 __s32 lnum; 404 __s32 lnum;
402 __s8 dtype; /* obsolete, do not use! */ 405 __s8 dtype; /* obsolete, do not use! */
403 __s8 padding[3]; 406 __s8 padding[3];
404 } __packed; 407 } __packed;
405 408
406 409
407 /** 410 /**
408 * struct ubi_set_vol_prop_req - a data structure used to set an UBI volume 411 * struct ubi_set_vol_prop_req - a data structure used to set an UBI volume
409 * property. 412 * property.
410 * @property: property to set (%UBI_VOL_PROP_DIRECT_WRITE) 413 * @property: property to set (%UBI_VOL_PROP_DIRECT_WRITE)
411 * @padding: reserved for future, not used, has to be zeroed 414 * @padding: reserved for future, not used, has to be zeroed
412 * @value: value to set 415 * @value: value to set
413 */ 416 */
414 struct ubi_set_vol_prop_req { 417 struct ubi_set_vol_prop_req {
415 __u8 property; 418 __u8 property;
416 __u8 padding[7]; 419 __u8 padding[7];
417 __u64 value; 420 __u64 value;
418 } __packed; 421 } __packed;
419 422
420 #endif /* __UBI_USER_H__ */ 423 #endif /* __UBI_USER_H__ */
421 424