Commit 080909503664641432cc8adf2ee2084775fd992a

Authored by Linus Torvalds

Merge tag 'mmc-fixes-for-3.6-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc

Pull MMC fixes from Chris Ball:
 - a firmware bug on several Samsung MoviNAND eMMC models causes
   permanent corruption on the device when secure erase and secure trim
   requests are made, so we disable those requests on these eMMC devices.
 - atmel-mci: fix a hang with some SD cards by waiting for not-busy flag.
 - dw_mmc: low-power mode breaks SDIO interrupts; fix PIO error handling;
   fix handling of error interrupts.
 - mxs-mmc: fix deadlocks; fix compile error due to dma.h arch change.
 - omap: fix broken PIO mode causing memory corruption.
 - sdhci-esdhc: fix card detection.

* tag 'mmc-fixes-for-3.6-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc:
  mmc: omap: fix broken PIO mode
  mmc: card: Skip secure erase on MoviNAND; causes unrecoverable corruption.
  mmc: dw_mmc: Disable low power mode if SDIO interrupts are used
  mmc: dw_mmc: fix error handling in PIO mode
  mmc: dw_mmc: correct mishandling error interrupt
  mmc: dw_mmc: amend using error interrupt status
  mmc: atmel-mci: not busy flag has also to be used for read operations
  mmc: sdhci-esdhc: break out early if clock is 0
  mmc: mxs-mmc: fix deadlock caused by recursion loop
  mmc: mxs-mmc: fix deadlock in SDIO IRQ case
  mmc: bfin_sdh: fix dma_desc_array build error

Showing 8 changed files Inline Diff

drivers/mmc/card/block.c
1 /* 1 /*
2 * Block driver for media (i.e., flash cards) 2 * Block driver for media (i.e., flash cards)
3 * 3 *
4 * Copyright 2002 Hewlett-Packard Company 4 * Copyright 2002 Hewlett-Packard Company
5 * Copyright 2005-2008 Pierre Ossman 5 * Copyright 2005-2008 Pierre Ossman
6 * 6 *
7 * Use consistent with the GNU GPL is permitted, 7 * Use consistent with the GNU GPL is permitted,
8 * provided that this copyright notice is 8 * provided that this copyright notice is
9 * preserved in its entirety in all copies and derived works. 9 * preserved in its entirety in all copies and derived works.
10 * 10 *
11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED, 11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS 12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13 * FITNESS FOR ANY PARTICULAR PURPOSE. 13 * FITNESS FOR ANY PARTICULAR PURPOSE.
14 * 14 *
15 * Many thanks to Alessandro Rubini and Jonathan Corbet! 15 * Many thanks to Alessandro Rubini and Jonathan Corbet!
16 * 16 *
17 * Author: Andrew Christian 17 * Author: Andrew Christian
18 * 28 May 2002 18 * 28 May 2002
19 */ 19 */
20 #include <linux/moduleparam.h> 20 #include <linux/moduleparam.h>
21 #include <linux/module.h> 21 #include <linux/module.h>
22 #include <linux/init.h> 22 #include <linux/init.h>
23 23
24 #include <linux/kernel.h> 24 #include <linux/kernel.h>
25 #include <linux/fs.h> 25 #include <linux/fs.h>
26 #include <linux/slab.h> 26 #include <linux/slab.h>
27 #include <linux/errno.h> 27 #include <linux/errno.h>
28 #include <linux/hdreg.h> 28 #include <linux/hdreg.h>
29 #include <linux/kdev_t.h> 29 #include <linux/kdev_t.h>
30 #include <linux/blkdev.h> 30 #include <linux/blkdev.h>
31 #include <linux/mutex.h> 31 #include <linux/mutex.h>
32 #include <linux/scatterlist.h> 32 #include <linux/scatterlist.h>
33 #include <linux/string_helpers.h> 33 #include <linux/string_helpers.h>
34 #include <linux/delay.h> 34 #include <linux/delay.h>
35 #include <linux/capability.h> 35 #include <linux/capability.h>
36 #include <linux/compat.h> 36 #include <linux/compat.h>
37 37
38 #include <linux/mmc/ioctl.h> 38 #include <linux/mmc/ioctl.h>
39 #include <linux/mmc/card.h> 39 #include <linux/mmc/card.h>
40 #include <linux/mmc/host.h> 40 #include <linux/mmc/host.h>
41 #include <linux/mmc/mmc.h> 41 #include <linux/mmc/mmc.h>
42 #include <linux/mmc/sd.h> 42 #include <linux/mmc/sd.h>
43 43
44 #include <asm/uaccess.h> 44 #include <asm/uaccess.h>
45 45
46 #include "queue.h" 46 #include "queue.h"
47 47
48 MODULE_ALIAS("mmc:block"); 48 MODULE_ALIAS("mmc:block");
49 #ifdef MODULE_PARAM_PREFIX 49 #ifdef MODULE_PARAM_PREFIX
50 #undef MODULE_PARAM_PREFIX 50 #undef MODULE_PARAM_PREFIX
51 #endif 51 #endif
52 #define MODULE_PARAM_PREFIX "mmcblk." 52 #define MODULE_PARAM_PREFIX "mmcblk."
53 53
54 #define INAND_CMD38_ARG_EXT_CSD 113 54 #define INAND_CMD38_ARG_EXT_CSD 113
55 #define INAND_CMD38_ARG_ERASE 0x00 55 #define INAND_CMD38_ARG_ERASE 0x00
56 #define INAND_CMD38_ARG_TRIM 0x01 56 #define INAND_CMD38_ARG_TRIM 0x01
57 #define INAND_CMD38_ARG_SECERASE 0x80 57 #define INAND_CMD38_ARG_SECERASE 0x80
58 #define INAND_CMD38_ARG_SECTRIM1 0x81 58 #define INAND_CMD38_ARG_SECTRIM1 0x81
59 #define INAND_CMD38_ARG_SECTRIM2 0x88 59 #define INAND_CMD38_ARG_SECTRIM2 0x88
60 60
61 static DEFINE_MUTEX(block_mutex); 61 static DEFINE_MUTEX(block_mutex);
62 62
63 /* 63 /*
64 * The defaults come from config options but can be overriden by module 64 * The defaults come from config options but can be overriden by module
65 * or bootarg options. 65 * or bootarg options.
66 */ 66 */
67 static int perdev_minors = CONFIG_MMC_BLOCK_MINORS; 67 static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
68 68
69 /* 69 /*
70 * We've only got one major, so number of mmcblk devices is 70 * We've only got one major, so number of mmcblk devices is
71 * limited to 256 / number of minors per device. 71 * limited to 256 / number of minors per device.
72 */ 72 */
73 static int max_devices; 73 static int max_devices;
74 74
75 /* 256 minors, so at most 256 separate devices */ 75 /* 256 minors, so at most 256 separate devices */
76 static DECLARE_BITMAP(dev_use, 256); 76 static DECLARE_BITMAP(dev_use, 256);
77 static DECLARE_BITMAP(name_use, 256); 77 static DECLARE_BITMAP(name_use, 256);
78 78
79 /* 79 /*
80 * There is one mmc_blk_data per slot. 80 * There is one mmc_blk_data per slot.
81 */ 81 */
82 struct mmc_blk_data { 82 struct mmc_blk_data {
83 spinlock_t lock; 83 spinlock_t lock;
84 struct gendisk *disk; 84 struct gendisk *disk;
85 struct mmc_queue queue; 85 struct mmc_queue queue;
86 struct list_head part; 86 struct list_head part;
87 87
88 unsigned int flags; 88 unsigned int flags;
89 #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */ 89 #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
90 #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */ 90 #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
91 91
92 unsigned int usage; 92 unsigned int usage;
93 unsigned int read_only; 93 unsigned int read_only;
94 unsigned int part_type; 94 unsigned int part_type;
95 unsigned int name_idx; 95 unsigned int name_idx;
96 unsigned int reset_done; 96 unsigned int reset_done;
97 #define MMC_BLK_READ BIT(0) 97 #define MMC_BLK_READ BIT(0)
98 #define MMC_BLK_WRITE BIT(1) 98 #define MMC_BLK_WRITE BIT(1)
99 #define MMC_BLK_DISCARD BIT(2) 99 #define MMC_BLK_DISCARD BIT(2)
100 #define MMC_BLK_SECDISCARD BIT(3) 100 #define MMC_BLK_SECDISCARD BIT(3)
101 101
102 /* 102 /*
103 * Only set in main mmc_blk_data associated 103 * Only set in main mmc_blk_data associated
104 * with mmc_card with mmc_set_drvdata, and keeps 104 * with mmc_card with mmc_set_drvdata, and keeps
105 * track of the current selected device partition. 105 * track of the current selected device partition.
106 */ 106 */
107 unsigned int part_curr; 107 unsigned int part_curr;
108 struct device_attribute force_ro; 108 struct device_attribute force_ro;
109 struct device_attribute power_ro_lock; 109 struct device_attribute power_ro_lock;
110 int area_type; 110 int area_type;
111 }; 111 };
112 112
113 static DEFINE_MUTEX(open_lock); 113 static DEFINE_MUTEX(open_lock);
114 114
115 enum mmc_blk_status { 115 enum mmc_blk_status {
116 MMC_BLK_SUCCESS = 0, 116 MMC_BLK_SUCCESS = 0,
117 MMC_BLK_PARTIAL, 117 MMC_BLK_PARTIAL,
118 MMC_BLK_CMD_ERR, 118 MMC_BLK_CMD_ERR,
119 MMC_BLK_RETRY, 119 MMC_BLK_RETRY,
120 MMC_BLK_ABORT, 120 MMC_BLK_ABORT,
121 MMC_BLK_DATA_ERR, 121 MMC_BLK_DATA_ERR,
122 MMC_BLK_ECC_ERR, 122 MMC_BLK_ECC_ERR,
123 MMC_BLK_NOMEDIUM, 123 MMC_BLK_NOMEDIUM,
124 }; 124 };
125 125
126 module_param(perdev_minors, int, 0444); 126 module_param(perdev_minors, int, 0444);
127 MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device"); 127 MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
128 128
129 static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) 129 static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
130 { 130 {
131 struct mmc_blk_data *md; 131 struct mmc_blk_data *md;
132 132
133 mutex_lock(&open_lock); 133 mutex_lock(&open_lock);
134 md = disk->private_data; 134 md = disk->private_data;
135 if (md && md->usage == 0) 135 if (md && md->usage == 0)
136 md = NULL; 136 md = NULL;
137 if (md) 137 if (md)
138 md->usage++; 138 md->usage++;
139 mutex_unlock(&open_lock); 139 mutex_unlock(&open_lock);
140 140
141 return md; 141 return md;
142 } 142 }
143 143
144 static inline int mmc_get_devidx(struct gendisk *disk) 144 static inline int mmc_get_devidx(struct gendisk *disk)
145 { 145 {
146 int devmaj = MAJOR(disk_devt(disk)); 146 int devmaj = MAJOR(disk_devt(disk));
147 int devidx = MINOR(disk_devt(disk)) / perdev_minors; 147 int devidx = MINOR(disk_devt(disk)) / perdev_minors;
148 148
149 if (!devmaj) 149 if (!devmaj)
150 devidx = disk->first_minor / perdev_minors; 150 devidx = disk->first_minor / perdev_minors;
151 return devidx; 151 return devidx;
152 } 152 }
153 153
154 static void mmc_blk_put(struct mmc_blk_data *md) 154 static void mmc_blk_put(struct mmc_blk_data *md)
155 { 155 {
156 mutex_lock(&open_lock); 156 mutex_lock(&open_lock);
157 md->usage--; 157 md->usage--;
158 if (md->usage == 0) { 158 if (md->usage == 0) {
159 int devidx = mmc_get_devidx(md->disk); 159 int devidx = mmc_get_devidx(md->disk);
160 blk_cleanup_queue(md->queue.queue); 160 blk_cleanup_queue(md->queue.queue);
161 161
162 __clear_bit(devidx, dev_use); 162 __clear_bit(devidx, dev_use);
163 163
164 put_disk(md->disk); 164 put_disk(md->disk);
165 kfree(md); 165 kfree(md);
166 } 166 }
167 mutex_unlock(&open_lock); 167 mutex_unlock(&open_lock);
168 } 168 }
169 169
170 static ssize_t power_ro_lock_show(struct device *dev, 170 static ssize_t power_ro_lock_show(struct device *dev,
171 struct device_attribute *attr, char *buf) 171 struct device_attribute *attr, char *buf)
172 { 172 {
173 int ret; 173 int ret;
174 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); 174 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
175 struct mmc_card *card = md->queue.card; 175 struct mmc_card *card = md->queue.card;
176 int locked = 0; 176 int locked = 0;
177 177
178 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN) 178 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
179 locked = 2; 179 locked = 2;
180 else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN) 180 else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
181 locked = 1; 181 locked = 1;
182 182
183 ret = snprintf(buf, PAGE_SIZE, "%d\n", locked); 183 ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
184 184
185 return ret; 185 return ret;
186 } 186 }
187 187
188 static ssize_t power_ro_lock_store(struct device *dev, 188 static ssize_t power_ro_lock_store(struct device *dev,
189 struct device_attribute *attr, const char *buf, size_t count) 189 struct device_attribute *attr, const char *buf, size_t count)
190 { 190 {
191 int ret; 191 int ret;
192 struct mmc_blk_data *md, *part_md; 192 struct mmc_blk_data *md, *part_md;
193 struct mmc_card *card; 193 struct mmc_card *card;
194 unsigned long set; 194 unsigned long set;
195 195
196 if (kstrtoul(buf, 0, &set)) 196 if (kstrtoul(buf, 0, &set))
197 return -EINVAL; 197 return -EINVAL;
198 198
199 if (set != 1) 199 if (set != 1)
200 return count; 200 return count;
201 201
202 md = mmc_blk_get(dev_to_disk(dev)); 202 md = mmc_blk_get(dev_to_disk(dev));
203 card = md->queue.card; 203 card = md->queue.card;
204 204
205 mmc_claim_host(card->host); 205 mmc_claim_host(card->host);
206 206
207 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, 207 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
208 card->ext_csd.boot_ro_lock | 208 card->ext_csd.boot_ro_lock |
209 EXT_CSD_BOOT_WP_B_PWR_WP_EN, 209 EXT_CSD_BOOT_WP_B_PWR_WP_EN,
210 card->ext_csd.part_time); 210 card->ext_csd.part_time);
211 if (ret) 211 if (ret)
212 pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret); 212 pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret);
213 else 213 else
214 card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN; 214 card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN;
215 215
216 mmc_release_host(card->host); 216 mmc_release_host(card->host);
217 217
218 if (!ret) { 218 if (!ret) {
219 pr_info("%s: Locking boot partition ro until next power on\n", 219 pr_info("%s: Locking boot partition ro until next power on\n",
220 md->disk->disk_name); 220 md->disk->disk_name);
221 set_disk_ro(md->disk, 1); 221 set_disk_ro(md->disk, 1);
222 222
223 list_for_each_entry(part_md, &md->part, part) 223 list_for_each_entry(part_md, &md->part, part)
224 if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) { 224 if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) {
225 pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name); 225 pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name);
226 set_disk_ro(part_md->disk, 1); 226 set_disk_ro(part_md->disk, 1);
227 } 227 }
228 } 228 }
229 229
230 mmc_blk_put(md); 230 mmc_blk_put(md);
231 return count; 231 return count;
232 } 232 }
233 233
234 static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr, 234 static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
235 char *buf) 235 char *buf)
236 { 236 {
237 int ret; 237 int ret;
238 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); 238 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
239 239
240 ret = snprintf(buf, PAGE_SIZE, "%d", 240 ret = snprintf(buf, PAGE_SIZE, "%d",
241 get_disk_ro(dev_to_disk(dev)) ^ 241 get_disk_ro(dev_to_disk(dev)) ^
242 md->read_only); 242 md->read_only);
243 mmc_blk_put(md); 243 mmc_blk_put(md);
244 return ret; 244 return ret;
245 } 245 }
246 246
247 static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr, 247 static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
248 const char *buf, size_t count) 248 const char *buf, size_t count)
249 { 249 {
250 int ret; 250 int ret;
251 char *end; 251 char *end;
252 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); 252 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
253 unsigned long set = simple_strtoul(buf, &end, 0); 253 unsigned long set = simple_strtoul(buf, &end, 0);
254 if (end == buf) { 254 if (end == buf) {
255 ret = -EINVAL; 255 ret = -EINVAL;
256 goto out; 256 goto out;
257 } 257 }
258 258
259 set_disk_ro(dev_to_disk(dev), set || md->read_only); 259 set_disk_ro(dev_to_disk(dev), set || md->read_only);
260 ret = count; 260 ret = count;
261 out: 261 out:
262 mmc_blk_put(md); 262 mmc_blk_put(md);
263 return ret; 263 return ret;
264 } 264 }
265 265
266 static int mmc_blk_open(struct block_device *bdev, fmode_t mode) 266 static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
267 { 267 {
268 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk); 268 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
269 int ret = -ENXIO; 269 int ret = -ENXIO;
270 270
271 mutex_lock(&block_mutex); 271 mutex_lock(&block_mutex);
272 if (md) { 272 if (md) {
273 if (md->usage == 2) 273 if (md->usage == 2)
274 check_disk_change(bdev); 274 check_disk_change(bdev);
275 ret = 0; 275 ret = 0;
276 276
277 if ((mode & FMODE_WRITE) && md->read_only) { 277 if ((mode & FMODE_WRITE) && md->read_only) {
278 mmc_blk_put(md); 278 mmc_blk_put(md);
279 ret = -EROFS; 279 ret = -EROFS;
280 } 280 }
281 } 281 }
282 mutex_unlock(&block_mutex); 282 mutex_unlock(&block_mutex);
283 283
284 return ret; 284 return ret;
285 } 285 }
286 286
287 static int mmc_blk_release(struct gendisk *disk, fmode_t mode) 287 static int mmc_blk_release(struct gendisk *disk, fmode_t mode)
288 { 288 {
289 struct mmc_blk_data *md = disk->private_data; 289 struct mmc_blk_data *md = disk->private_data;
290 290
291 mutex_lock(&block_mutex); 291 mutex_lock(&block_mutex);
292 mmc_blk_put(md); 292 mmc_blk_put(md);
293 mutex_unlock(&block_mutex); 293 mutex_unlock(&block_mutex);
294 return 0; 294 return 0;
295 } 295 }
296 296
297 static int 297 static int
298 mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 298 mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
299 { 299 {
300 geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16); 300 geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
301 geo->heads = 4; 301 geo->heads = 4;
302 geo->sectors = 16; 302 geo->sectors = 16;
303 return 0; 303 return 0;
304 } 304 }
305 305
306 struct mmc_blk_ioc_data { 306 struct mmc_blk_ioc_data {
307 struct mmc_ioc_cmd ic; 307 struct mmc_ioc_cmd ic;
308 unsigned char *buf; 308 unsigned char *buf;
309 u64 buf_bytes; 309 u64 buf_bytes;
310 }; 310 };
311 311
312 static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user( 312 static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
313 struct mmc_ioc_cmd __user *user) 313 struct mmc_ioc_cmd __user *user)
314 { 314 {
315 struct mmc_blk_ioc_data *idata; 315 struct mmc_blk_ioc_data *idata;
316 int err; 316 int err;
317 317
318 idata = kzalloc(sizeof(*idata), GFP_KERNEL); 318 idata = kzalloc(sizeof(*idata), GFP_KERNEL);
319 if (!idata) { 319 if (!idata) {
320 err = -ENOMEM; 320 err = -ENOMEM;
321 goto out; 321 goto out;
322 } 322 }
323 323
324 if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) { 324 if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
325 err = -EFAULT; 325 err = -EFAULT;
326 goto idata_err; 326 goto idata_err;
327 } 327 }
328 328
329 idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks; 329 idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
330 if (idata->buf_bytes > MMC_IOC_MAX_BYTES) { 330 if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
331 err = -EOVERFLOW; 331 err = -EOVERFLOW;
332 goto idata_err; 332 goto idata_err;
333 } 333 }
334 334
335 if (!idata->buf_bytes) 335 if (!idata->buf_bytes)
336 return idata; 336 return idata;
337 337
338 idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL); 338 idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL);
339 if (!idata->buf) { 339 if (!idata->buf) {
340 err = -ENOMEM; 340 err = -ENOMEM;
341 goto idata_err; 341 goto idata_err;
342 } 342 }
343 343
344 if (copy_from_user(idata->buf, (void __user *)(unsigned long) 344 if (copy_from_user(idata->buf, (void __user *)(unsigned long)
345 idata->ic.data_ptr, idata->buf_bytes)) { 345 idata->ic.data_ptr, idata->buf_bytes)) {
346 err = -EFAULT; 346 err = -EFAULT;
347 goto copy_err; 347 goto copy_err;
348 } 348 }
349 349
350 return idata; 350 return idata;
351 351
352 copy_err: 352 copy_err:
353 kfree(idata->buf); 353 kfree(idata->buf);
354 idata_err: 354 idata_err:
355 kfree(idata); 355 kfree(idata);
356 out: 356 out:
357 return ERR_PTR(err); 357 return ERR_PTR(err);
358 } 358 }
359 359
360 static int mmc_blk_ioctl_cmd(struct block_device *bdev, 360 static int mmc_blk_ioctl_cmd(struct block_device *bdev,
361 struct mmc_ioc_cmd __user *ic_ptr) 361 struct mmc_ioc_cmd __user *ic_ptr)
362 { 362 {
363 struct mmc_blk_ioc_data *idata; 363 struct mmc_blk_ioc_data *idata;
364 struct mmc_blk_data *md; 364 struct mmc_blk_data *md;
365 struct mmc_card *card; 365 struct mmc_card *card;
366 struct mmc_command cmd = {0}; 366 struct mmc_command cmd = {0};
367 struct mmc_data data = {0}; 367 struct mmc_data data = {0};
368 struct mmc_request mrq = {NULL}; 368 struct mmc_request mrq = {NULL};
369 struct scatterlist sg; 369 struct scatterlist sg;
370 int err; 370 int err;
371 371
372 /* 372 /*
373 * The caller must have CAP_SYS_RAWIO, and must be calling this on the 373 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
374 * whole block device, not on a partition. This prevents overspray 374 * whole block device, not on a partition. This prevents overspray
375 * between sibling partitions. 375 * between sibling partitions.
376 */ 376 */
377 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains)) 377 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
378 return -EPERM; 378 return -EPERM;
379 379
380 idata = mmc_blk_ioctl_copy_from_user(ic_ptr); 380 idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
381 if (IS_ERR(idata)) 381 if (IS_ERR(idata))
382 return PTR_ERR(idata); 382 return PTR_ERR(idata);
383 383
384 md = mmc_blk_get(bdev->bd_disk); 384 md = mmc_blk_get(bdev->bd_disk);
385 if (!md) { 385 if (!md) {
386 err = -EINVAL; 386 err = -EINVAL;
387 goto cmd_err; 387 goto cmd_err;
388 } 388 }
389 389
390 card = md->queue.card; 390 card = md->queue.card;
391 if (IS_ERR(card)) { 391 if (IS_ERR(card)) {
392 err = PTR_ERR(card); 392 err = PTR_ERR(card);
393 goto cmd_done; 393 goto cmd_done;
394 } 394 }
395 395
396 cmd.opcode = idata->ic.opcode; 396 cmd.opcode = idata->ic.opcode;
397 cmd.arg = idata->ic.arg; 397 cmd.arg = idata->ic.arg;
398 cmd.flags = idata->ic.flags; 398 cmd.flags = idata->ic.flags;
399 399
400 if (idata->buf_bytes) { 400 if (idata->buf_bytes) {
401 data.sg = &sg; 401 data.sg = &sg;
402 data.sg_len = 1; 402 data.sg_len = 1;
403 data.blksz = idata->ic.blksz; 403 data.blksz = idata->ic.blksz;
404 data.blocks = idata->ic.blocks; 404 data.blocks = idata->ic.blocks;
405 405
406 sg_init_one(data.sg, idata->buf, idata->buf_bytes); 406 sg_init_one(data.sg, idata->buf, idata->buf_bytes);
407 407
408 if (idata->ic.write_flag) 408 if (idata->ic.write_flag)
409 data.flags = MMC_DATA_WRITE; 409 data.flags = MMC_DATA_WRITE;
410 else 410 else
411 data.flags = MMC_DATA_READ; 411 data.flags = MMC_DATA_READ;
412 412
413 /* data.flags must already be set before doing this. */ 413 /* data.flags must already be set before doing this. */
414 mmc_set_data_timeout(&data, card); 414 mmc_set_data_timeout(&data, card);
415 415
416 /* Allow overriding the timeout_ns for empirical tuning. */ 416 /* Allow overriding the timeout_ns for empirical tuning. */
417 if (idata->ic.data_timeout_ns) 417 if (idata->ic.data_timeout_ns)
418 data.timeout_ns = idata->ic.data_timeout_ns; 418 data.timeout_ns = idata->ic.data_timeout_ns;
419 419
420 if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) { 420 if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
421 /* 421 /*
422 * Pretend this is a data transfer and rely on the 422 * Pretend this is a data transfer and rely on the
423 * host driver to compute timeout. When all host 423 * host driver to compute timeout. When all host
424 * drivers support cmd.cmd_timeout for R1B, this 424 * drivers support cmd.cmd_timeout for R1B, this
425 * can be changed to: 425 * can be changed to:
426 * 426 *
427 * mrq.data = NULL; 427 * mrq.data = NULL;
428 * cmd.cmd_timeout = idata->ic.cmd_timeout_ms; 428 * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
429 */ 429 */
430 data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000; 430 data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
431 } 431 }
432 432
433 mrq.data = &data; 433 mrq.data = &data;
434 } 434 }
435 435
436 mrq.cmd = &cmd; 436 mrq.cmd = &cmd;
437 437
438 mmc_claim_host(card->host); 438 mmc_claim_host(card->host);
439 439
440 if (idata->ic.is_acmd) { 440 if (idata->ic.is_acmd) {
441 err = mmc_app_cmd(card->host, card); 441 err = mmc_app_cmd(card->host, card);
442 if (err) 442 if (err)
443 goto cmd_rel_host; 443 goto cmd_rel_host;
444 } 444 }
445 445
446 mmc_wait_for_req(card->host, &mrq); 446 mmc_wait_for_req(card->host, &mrq);
447 447
448 if (cmd.error) { 448 if (cmd.error) {
449 dev_err(mmc_dev(card->host), "%s: cmd error %d\n", 449 dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
450 __func__, cmd.error); 450 __func__, cmd.error);
451 err = cmd.error; 451 err = cmd.error;
452 goto cmd_rel_host; 452 goto cmd_rel_host;
453 } 453 }
454 if (data.error) { 454 if (data.error) {
455 dev_err(mmc_dev(card->host), "%s: data error %d\n", 455 dev_err(mmc_dev(card->host), "%s: data error %d\n",
456 __func__, data.error); 456 __func__, data.error);
457 err = data.error; 457 err = data.error;
458 goto cmd_rel_host; 458 goto cmd_rel_host;
459 } 459 }
460 460
461 /* 461 /*
462 * According to the SD specs, some commands require a delay after 462 * According to the SD specs, some commands require a delay after
463 * issuing the command. 463 * issuing the command.
464 */ 464 */
465 if (idata->ic.postsleep_min_us) 465 if (idata->ic.postsleep_min_us)
466 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us); 466 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
467 467
468 if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) { 468 if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
469 err = -EFAULT; 469 err = -EFAULT;
470 goto cmd_rel_host; 470 goto cmd_rel_host;
471 } 471 }
472 472
473 if (!idata->ic.write_flag) { 473 if (!idata->ic.write_flag) {
474 if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr, 474 if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr,
475 idata->buf, idata->buf_bytes)) { 475 idata->buf, idata->buf_bytes)) {
476 err = -EFAULT; 476 err = -EFAULT;
477 goto cmd_rel_host; 477 goto cmd_rel_host;
478 } 478 }
479 } 479 }
480 480
481 cmd_rel_host: 481 cmd_rel_host:
482 mmc_release_host(card->host); 482 mmc_release_host(card->host);
483 483
484 cmd_done: 484 cmd_done:
485 mmc_blk_put(md); 485 mmc_blk_put(md);
486 cmd_err: 486 cmd_err:
487 kfree(idata->buf); 487 kfree(idata->buf);
488 kfree(idata); 488 kfree(idata);
489 return err; 489 return err;
490 } 490 }
491 491
492 static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode, 492 static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
493 unsigned int cmd, unsigned long arg) 493 unsigned int cmd, unsigned long arg)
494 { 494 {
495 int ret = -EINVAL; 495 int ret = -EINVAL;
496 if (cmd == MMC_IOC_CMD) 496 if (cmd == MMC_IOC_CMD)
497 ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg); 497 ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg);
498 return ret; 498 return ret;
499 } 499 }
500 500
501 #ifdef CONFIG_COMPAT 501 #ifdef CONFIG_COMPAT
502 static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode, 502 static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
503 unsigned int cmd, unsigned long arg) 503 unsigned int cmd, unsigned long arg)
504 { 504 {
505 return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg)); 505 return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
506 } 506 }
507 #endif 507 #endif
508 508
509 static const struct block_device_operations mmc_bdops = { 509 static const struct block_device_operations mmc_bdops = {
510 .open = mmc_blk_open, 510 .open = mmc_blk_open,
511 .release = mmc_blk_release, 511 .release = mmc_blk_release,
512 .getgeo = mmc_blk_getgeo, 512 .getgeo = mmc_blk_getgeo,
513 .owner = THIS_MODULE, 513 .owner = THIS_MODULE,
514 .ioctl = mmc_blk_ioctl, 514 .ioctl = mmc_blk_ioctl,
515 #ifdef CONFIG_COMPAT 515 #ifdef CONFIG_COMPAT
516 .compat_ioctl = mmc_blk_compat_ioctl, 516 .compat_ioctl = mmc_blk_compat_ioctl,
517 #endif 517 #endif
518 }; 518 };
519 519
520 static inline int mmc_blk_part_switch(struct mmc_card *card, 520 static inline int mmc_blk_part_switch(struct mmc_card *card,
521 struct mmc_blk_data *md) 521 struct mmc_blk_data *md)
522 { 522 {
523 int ret; 523 int ret;
524 struct mmc_blk_data *main_md = mmc_get_drvdata(card); 524 struct mmc_blk_data *main_md = mmc_get_drvdata(card);
525 525
526 if (main_md->part_curr == md->part_type) 526 if (main_md->part_curr == md->part_type)
527 return 0; 527 return 0;
528 528
529 if (mmc_card_mmc(card)) { 529 if (mmc_card_mmc(card)) {
530 u8 part_config = card->ext_csd.part_config; 530 u8 part_config = card->ext_csd.part_config;
531 531
532 part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; 532 part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
533 part_config |= md->part_type; 533 part_config |= md->part_type;
534 534
535 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 535 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
536 EXT_CSD_PART_CONFIG, part_config, 536 EXT_CSD_PART_CONFIG, part_config,
537 card->ext_csd.part_time); 537 card->ext_csd.part_time);
538 if (ret) 538 if (ret)
539 return ret; 539 return ret;
540 540
541 card->ext_csd.part_config = part_config; 541 card->ext_csd.part_config = part_config;
542 } 542 }
543 543
544 main_md->part_curr = md->part_type; 544 main_md->part_curr = md->part_type;
545 return 0; 545 return 0;
546 } 546 }
547 547
548 static u32 mmc_sd_num_wr_blocks(struct mmc_card *card) 548 static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
549 { 549 {
550 int err; 550 int err;
551 u32 result; 551 u32 result;
552 __be32 *blocks; 552 __be32 *blocks;
553 553
554 struct mmc_request mrq = {NULL}; 554 struct mmc_request mrq = {NULL};
555 struct mmc_command cmd = {0}; 555 struct mmc_command cmd = {0};
556 struct mmc_data data = {0}; 556 struct mmc_data data = {0};
557 557
558 struct scatterlist sg; 558 struct scatterlist sg;
559 559
560 cmd.opcode = MMC_APP_CMD; 560 cmd.opcode = MMC_APP_CMD;
561 cmd.arg = card->rca << 16; 561 cmd.arg = card->rca << 16;
562 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 562 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
563 563
564 err = mmc_wait_for_cmd(card->host, &cmd, 0); 564 err = mmc_wait_for_cmd(card->host, &cmd, 0);
565 if (err) 565 if (err)
566 return (u32)-1; 566 return (u32)-1;
567 if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD)) 567 if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
568 return (u32)-1; 568 return (u32)-1;
569 569
570 memset(&cmd, 0, sizeof(struct mmc_command)); 570 memset(&cmd, 0, sizeof(struct mmc_command));
571 571
572 cmd.opcode = SD_APP_SEND_NUM_WR_BLKS; 572 cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
573 cmd.arg = 0; 573 cmd.arg = 0;
574 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 574 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
575 575
576 data.blksz = 4; 576 data.blksz = 4;
577 data.blocks = 1; 577 data.blocks = 1;
578 data.flags = MMC_DATA_READ; 578 data.flags = MMC_DATA_READ;
579 data.sg = &sg; 579 data.sg = &sg;
580 data.sg_len = 1; 580 data.sg_len = 1;
581 mmc_set_data_timeout(&data, card); 581 mmc_set_data_timeout(&data, card);
582 582
583 mrq.cmd = &cmd; 583 mrq.cmd = &cmd;
584 mrq.data = &data; 584 mrq.data = &data;
585 585
586 blocks = kmalloc(4, GFP_KERNEL); 586 blocks = kmalloc(4, GFP_KERNEL);
587 if (!blocks) 587 if (!blocks)
588 return (u32)-1; 588 return (u32)-1;
589 589
590 sg_init_one(&sg, blocks, 4); 590 sg_init_one(&sg, blocks, 4);
591 591
592 mmc_wait_for_req(card->host, &mrq); 592 mmc_wait_for_req(card->host, &mrq);
593 593
594 result = ntohl(*blocks); 594 result = ntohl(*blocks);
595 kfree(blocks); 595 kfree(blocks);
596 596
597 if (cmd.error || data.error) 597 if (cmd.error || data.error)
598 result = (u32)-1; 598 result = (u32)-1;
599 599
600 return result; 600 return result;
601 } 601 }
602 602
603 static int send_stop(struct mmc_card *card, u32 *status) 603 static int send_stop(struct mmc_card *card, u32 *status)
604 { 604 {
605 struct mmc_command cmd = {0}; 605 struct mmc_command cmd = {0};
606 int err; 606 int err;
607 607
608 cmd.opcode = MMC_STOP_TRANSMISSION; 608 cmd.opcode = MMC_STOP_TRANSMISSION;
609 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 609 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
610 err = mmc_wait_for_cmd(card->host, &cmd, 5); 610 err = mmc_wait_for_cmd(card->host, &cmd, 5);
611 if (err == 0) 611 if (err == 0)
612 *status = cmd.resp[0]; 612 *status = cmd.resp[0];
613 return err; 613 return err;
614 } 614 }
615 615
616 static int get_card_status(struct mmc_card *card, u32 *status, int retries) 616 static int get_card_status(struct mmc_card *card, u32 *status, int retries)
617 { 617 {
618 struct mmc_command cmd = {0}; 618 struct mmc_command cmd = {0};
619 int err; 619 int err;
620 620
621 cmd.opcode = MMC_SEND_STATUS; 621 cmd.opcode = MMC_SEND_STATUS;
622 if (!mmc_host_is_spi(card->host)) 622 if (!mmc_host_is_spi(card->host))
623 cmd.arg = card->rca << 16; 623 cmd.arg = card->rca << 16;
624 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; 624 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
625 err = mmc_wait_for_cmd(card->host, &cmd, retries); 625 err = mmc_wait_for_cmd(card->host, &cmd, retries);
626 if (err == 0) 626 if (err == 0)
627 *status = cmd.resp[0]; 627 *status = cmd.resp[0];
628 return err; 628 return err;
629 } 629 }
630 630
631 #define ERR_NOMEDIUM 3 631 #define ERR_NOMEDIUM 3
632 #define ERR_RETRY 2 632 #define ERR_RETRY 2
633 #define ERR_ABORT 1 633 #define ERR_ABORT 1
634 #define ERR_CONTINUE 0 634 #define ERR_CONTINUE 0
635 635
636 static int mmc_blk_cmd_error(struct request *req, const char *name, int error, 636 static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
637 bool status_valid, u32 status) 637 bool status_valid, u32 status)
638 { 638 {
639 switch (error) { 639 switch (error) {
640 case -EILSEQ: 640 case -EILSEQ:
641 /* response crc error, retry the r/w cmd */ 641 /* response crc error, retry the r/w cmd */
642 pr_err("%s: %s sending %s command, card status %#x\n", 642 pr_err("%s: %s sending %s command, card status %#x\n",
643 req->rq_disk->disk_name, "response CRC error", 643 req->rq_disk->disk_name, "response CRC error",
644 name, status); 644 name, status);
645 return ERR_RETRY; 645 return ERR_RETRY;
646 646
647 case -ETIMEDOUT: 647 case -ETIMEDOUT:
648 pr_err("%s: %s sending %s command, card status %#x\n", 648 pr_err("%s: %s sending %s command, card status %#x\n",
649 req->rq_disk->disk_name, "timed out", name, status); 649 req->rq_disk->disk_name, "timed out", name, status);
650 650
651 /* If the status cmd initially failed, retry the r/w cmd */ 651 /* If the status cmd initially failed, retry the r/w cmd */
652 if (!status_valid) 652 if (!status_valid)
653 return ERR_RETRY; 653 return ERR_RETRY;
654 654
655 /* 655 /*
656 * If it was a r/w cmd crc error, or illegal command 656 * If it was a r/w cmd crc error, or illegal command
657 * (eg, issued in wrong state) then retry - we should 657 * (eg, issued in wrong state) then retry - we should
658 * have corrected the state problem above. 658 * have corrected the state problem above.
659 */ 659 */
660 if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) 660 if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND))
661 return ERR_RETRY; 661 return ERR_RETRY;
662 662
663 /* Otherwise abort the command */ 663 /* Otherwise abort the command */
664 return ERR_ABORT; 664 return ERR_ABORT;
665 665
666 default: 666 default:
667 /* We don't understand the error code the driver gave us */ 667 /* We don't understand the error code the driver gave us */
668 pr_err("%s: unknown error %d sending read/write command, card status %#x\n", 668 pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
669 req->rq_disk->disk_name, error, status); 669 req->rq_disk->disk_name, error, status);
670 return ERR_ABORT; 670 return ERR_ABORT;
671 } 671 }
672 } 672 }
673 673
674 /* 674 /*
675 * Initial r/w and stop cmd error recovery. 675 * Initial r/w and stop cmd error recovery.
676 * We don't know whether the card received the r/w cmd or not, so try to 676 * We don't know whether the card received the r/w cmd or not, so try to
677 * restore things back to a sane state. Essentially, we do this as follows: 677 * restore things back to a sane state. Essentially, we do this as follows:
678 * - Obtain card status. If the first attempt to obtain card status fails, 678 * - Obtain card status. If the first attempt to obtain card status fails,
679 * the status word will reflect the failed status cmd, not the failed 679 * the status word will reflect the failed status cmd, not the failed
680 * r/w cmd. If we fail to obtain card status, it suggests we can no 680 * r/w cmd. If we fail to obtain card status, it suggests we can no
681 * longer communicate with the card. 681 * longer communicate with the card.
682 * - Check the card state. If the card received the cmd but there was a 682 * - Check the card state. If the card received the cmd but there was a
683 * transient problem with the response, it might still be in a data transfer 683 * transient problem with the response, it might still be in a data transfer
684 * mode. Try to send it a stop command. If this fails, we can't recover. 684 * mode. Try to send it a stop command. If this fails, we can't recover.
685 * - If the r/w cmd failed due to a response CRC error, it was probably 685 * - If the r/w cmd failed due to a response CRC error, it was probably
686 * transient, so retry the cmd. 686 * transient, so retry the cmd.
687 * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry. 687 * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
688 * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or 688 * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
689 * illegal cmd, retry. 689 * illegal cmd, retry.
690 * Otherwise we don't understand what happened, so abort. 690 * Otherwise we don't understand what happened, so abort.
691 */ 691 */
692 static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req, 692 static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
693 struct mmc_blk_request *brq, int *ecc_err) 693 struct mmc_blk_request *brq, int *ecc_err)
694 { 694 {
695 bool prev_cmd_status_valid = true; 695 bool prev_cmd_status_valid = true;
696 u32 status, stop_status = 0; 696 u32 status, stop_status = 0;
697 int err, retry; 697 int err, retry;
698 698
699 if (mmc_card_removed(card)) 699 if (mmc_card_removed(card))
700 return ERR_NOMEDIUM; 700 return ERR_NOMEDIUM;
701 701
702 /* 702 /*
703 * Try to get card status which indicates both the card state 703 * Try to get card status which indicates both the card state
704 * and why there was no response. If the first attempt fails, 704 * and why there was no response. If the first attempt fails,
705 * we can't be sure the returned status is for the r/w command. 705 * we can't be sure the returned status is for the r/w command.
706 */ 706 */
707 for (retry = 2; retry >= 0; retry--) { 707 for (retry = 2; retry >= 0; retry--) {
708 err = get_card_status(card, &status, 0); 708 err = get_card_status(card, &status, 0);
709 if (!err) 709 if (!err)
710 break; 710 break;
711 711
712 prev_cmd_status_valid = false; 712 prev_cmd_status_valid = false;
713 pr_err("%s: error %d sending status command, %sing\n", 713 pr_err("%s: error %d sending status command, %sing\n",
714 req->rq_disk->disk_name, err, retry ? "retry" : "abort"); 714 req->rq_disk->disk_name, err, retry ? "retry" : "abort");
715 } 715 }
716 716
717 /* We couldn't get a response from the card. Give up. */ 717 /* We couldn't get a response from the card. Give up. */
718 if (err) { 718 if (err) {
719 /* Check if the card is removed */ 719 /* Check if the card is removed */
720 if (mmc_detect_card_removed(card->host)) 720 if (mmc_detect_card_removed(card->host))
721 return ERR_NOMEDIUM; 721 return ERR_NOMEDIUM;
722 return ERR_ABORT; 722 return ERR_ABORT;
723 } 723 }
724 724
725 /* Flag ECC errors */ 725 /* Flag ECC errors */
726 if ((status & R1_CARD_ECC_FAILED) || 726 if ((status & R1_CARD_ECC_FAILED) ||
727 (brq->stop.resp[0] & R1_CARD_ECC_FAILED) || 727 (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
728 (brq->cmd.resp[0] & R1_CARD_ECC_FAILED)) 728 (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
729 *ecc_err = 1; 729 *ecc_err = 1;
730 730
731 /* 731 /*
732 * Check the current card state. If it is in some data transfer 732 * Check the current card state. If it is in some data transfer
733 * mode, tell it to stop (and hopefully transition back to TRAN.) 733 * mode, tell it to stop (and hopefully transition back to TRAN.)
734 */ 734 */
735 if (R1_CURRENT_STATE(status) == R1_STATE_DATA || 735 if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
736 R1_CURRENT_STATE(status) == R1_STATE_RCV) { 736 R1_CURRENT_STATE(status) == R1_STATE_RCV) {
737 err = send_stop(card, &stop_status); 737 err = send_stop(card, &stop_status);
738 if (err) 738 if (err)
739 pr_err("%s: error %d sending stop command\n", 739 pr_err("%s: error %d sending stop command\n",
740 req->rq_disk->disk_name, err); 740 req->rq_disk->disk_name, err);
741 741
742 /* 742 /*
743 * If the stop cmd also timed out, the card is probably 743 * If the stop cmd also timed out, the card is probably
744 * not present, so abort. Other errors are bad news too. 744 * not present, so abort. Other errors are bad news too.
745 */ 745 */
746 if (err) 746 if (err)
747 return ERR_ABORT; 747 return ERR_ABORT;
748 if (stop_status & R1_CARD_ECC_FAILED) 748 if (stop_status & R1_CARD_ECC_FAILED)
749 *ecc_err = 1; 749 *ecc_err = 1;
750 } 750 }
751 751
752 /* Check for set block count errors */ 752 /* Check for set block count errors */
753 if (brq->sbc.error) 753 if (brq->sbc.error)
754 return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error, 754 return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
755 prev_cmd_status_valid, status); 755 prev_cmd_status_valid, status);
756 756
757 /* Check for r/w command errors */ 757 /* Check for r/w command errors */
758 if (brq->cmd.error) 758 if (brq->cmd.error)
759 return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error, 759 return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
760 prev_cmd_status_valid, status); 760 prev_cmd_status_valid, status);
761 761
762 /* Data errors */ 762 /* Data errors */
763 if (!brq->stop.error) 763 if (!brq->stop.error)
764 return ERR_CONTINUE; 764 return ERR_CONTINUE;
765 765
766 /* Now for stop errors. These aren't fatal to the transfer. */ 766 /* Now for stop errors. These aren't fatal to the transfer. */
767 pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n", 767 pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
768 req->rq_disk->disk_name, brq->stop.error, 768 req->rq_disk->disk_name, brq->stop.error,
769 brq->cmd.resp[0], status); 769 brq->cmd.resp[0], status);
770 770
771 /* 771 /*
772 * Subsitute in our own stop status as this will give the error 772 * Subsitute in our own stop status as this will give the error
773 * state which happened during the execution of the r/w command. 773 * state which happened during the execution of the r/w command.
774 */ 774 */
775 if (stop_status) { 775 if (stop_status) {
776 brq->stop.resp[0] = stop_status; 776 brq->stop.resp[0] = stop_status;
777 brq->stop.error = 0; 777 brq->stop.error = 0;
778 } 778 }
779 return ERR_CONTINUE; 779 return ERR_CONTINUE;
780 } 780 }
781 781
782 static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host, 782 static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
783 int type) 783 int type)
784 { 784 {
785 int err; 785 int err;
786 786
787 if (md->reset_done & type) 787 if (md->reset_done & type)
788 return -EEXIST; 788 return -EEXIST;
789 789
790 md->reset_done |= type; 790 md->reset_done |= type;
791 err = mmc_hw_reset(host); 791 err = mmc_hw_reset(host);
792 /* Ensure we switch back to the correct partition */ 792 /* Ensure we switch back to the correct partition */
793 if (err != -EOPNOTSUPP) { 793 if (err != -EOPNOTSUPP) {
794 struct mmc_blk_data *main_md = mmc_get_drvdata(host->card); 794 struct mmc_blk_data *main_md = mmc_get_drvdata(host->card);
795 int part_err; 795 int part_err;
796 796
797 main_md->part_curr = main_md->part_type; 797 main_md->part_curr = main_md->part_type;
798 part_err = mmc_blk_part_switch(host->card, md); 798 part_err = mmc_blk_part_switch(host->card, md);
799 if (part_err) { 799 if (part_err) {
800 /* 800 /*
801 * We have failed to get back into the correct 801 * We have failed to get back into the correct
802 * partition, so we need to abort the whole request. 802 * partition, so we need to abort the whole request.
803 */ 803 */
804 return -ENODEV; 804 return -ENODEV;
805 } 805 }
806 } 806 }
807 return err; 807 return err;
808 } 808 }
809 809
810 static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type) 810 static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
811 { 811 {
812 md->reset_done &= ~type; 812 md->reset_done &= ~type;
813 } 813 }
814 814
815 static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) 815 static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
816 { 816 {
817 struct mmc_blk_data *md = mq->data; 817 struct mmc_blk_data *md = mq->data;
818 struct mmc_card *card = md->queue.card; 818 struct mmc_card *card = md->queue.card;
819 unsigned int from, nr, arg; 819 unsigned int from, nr, arg;
820 int err = 0, type = MMC_BLK_DISCARD; 820 int err = 0, type = MMC_BLK_DISCARD;
821 821
822 if (!mmc_can_erase(card)) { 822 if (!mmc_can_erase(card)) {
823 err = -EOPNOTSUPP; 823 err = -EOPNOTSUPP;
824 goto out; 824 goto out;
825 } 825 }
826 826
827 from = blk_rq_pos(req); 827 from = blk_rq_pos(req);
828 nr = blk_rq_sectors(req); 828 nr = blk_rq_sectors(req);
829 829
830 if (mmc_can_discard(card)) 830 if (mmc_can_discard(card))
831 arg = MMC_DISCARD_ARG; 831 arg = MMC_DISCARD_ARG;
832 else if (mmc_can_trim(card)) 832 else if (mmc_can_trim(card))
833 arg = MMC_TRIM_ARG; 833 arg = MMC_TRIM_ARG;
834 else 834 else
835 arg = MMC_ERASE_ARG; 835 arg = MMC_ERASE_ARG;
836 retry: 836 retry:
837 if (card->quirks & MMC_QUIRK_INAND_CMD38) { 837 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
838 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 838 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
839 INAND_CMD38_ARG_EXT_CSD, 839 INAND_CMD38_ARG_EXT_CSD,
840 arg == MMC_TRIM_ARG ? 840 arg == MMC_TRIM_ARG ?
841 INAND_CMD38_ARG_TRIM : 841 INAND_CMD38_ARG_TRIM :
842 INAND_CMD38_ARG_ERASE, 842 INAND_CMD38_ARG_ERASE,
843 0); 843 0);
844 if (err) 844 if (err)
845 goto out; 845 goto out;
846 } 846 }
847 err = mmc_erase(card, from, nr, arg); 847 err = mmc_erase(card, from, nr, arg);
848 out: 848 out:
849 if (err == -EIO && !mmc_blk_reset(md, card->host, type)) 849 if (err == -EIO && !mmc_blk_reset(md, card->host, type))
850 goto retry; 850 goto retry;
851 if (!err) 851 if (!err)
852 mmc_blk_reset_success(md, type); 852 mmc_blk_reset_success(md, type);
853 blk_end_request(req, err, blk_rq_bytes(req)); 853 blk_end_request(req, err, blk_rq_bytes(req));
854 854
855 return err ? 0 : 1; 855 return err ? 0 : 1;
856 } 856 }
857 857
858 static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, 858 static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
859 struct request *req) 859 struct request *req)
860 { 860 {
861 struct mmc_blk_data *md = mq->data; 861 struct mmc_blk_data *md = mq->data;
862 struct mmc_card *card = md->queue.card; 862 struct mmc_card *card = md->queue.card;
863 unsigned int from, nr, arg, trim_arg, erase_arg; 863 unsigned int from, nr, arg, trim_arg, erase_arg;
864 int err = 0, type = MMC_BLK_SECDISCARD; 864 int err = 0, type = MMC_BLK_SECDISCARD;
865 865
866 if (!(mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))) { 866 if (!(mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))) {
867 err = -EOPNOTSUPP; 867 err = -EOPNOTSUPP;
868 goto out; 868 goto out;
869 } 869 }
870 870
871 from = blk_rq_pos(req); 871 from = blk_rq_pos(req);
872 nr = blk_rq_sectors(req); 872 nr = blk_rq_sectors(req);
873 873
874 /* The sanitize operation is supported at v4.5 only */ 874 /* The sanitize operation is supported at v4.5 only */
875 if (mmc_can_sanitize(card)) { 875 if (mmc_can_sanitize(card)) {
876 erase_arg = MMC_ERASE_ARG; 876 erase_arg = MMC_ERASE_ARG;
877 trim_arg = MMC_TRIM_ARG; 877 trim_arg = MMC_TRIM_ARG;
878 } else { 878 } else {
879 erase_arg = MMC_SECURE_ERASE_ARG; 879 erase_arg = MMC_SECURE_ERASE_ARG;
880 trim_arg = MMC_SECURE_TRIM1_ARG; 880 trim_arg = MMC_SECURE_TRIM1_ARG;
881 } 881 }
882 882
883 if (mmc_erase_group_aligned(card, from, nr)) 883 if (mmc_erase_group_aligned(card, from, nr))
884 arg = erase_arg; 884 arg = erase_arg;
885 else if (mmc_can_trim(card)) 885 else if (mmc_can_trim(card))
886 arg = trim_arg; 886 arg = trim_arg;
887 else { 887 else {
888 err = -EINVAL; 888 err = -EINVAL;
889 goto out; 889 goto out;
890 } 890 }
891 retry: 891 retry:
892 if (card->quirks & MMC_QUIRK_INAND_CMD38) { 892 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
893 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 893 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
894 INAND_CMD38_ARG_EXT_CSD, 894 INAND_CMD38_ARG_EXT_CSD,
895 arg == MMC_SECURE_TRIM1_ARG ? 895 arg == MMC_SECURE_TRIM1_ARG ?
896 INAND_CMD38_ARG_SECTRIM1 : 896 INAND_CMD38_ARG_SECTRIM1 :
897 INAND_CMD38_ARG_SECERASE, 897 INAND_CMD38_ARG_SECERASE,
898 0); 898 0);
899 if (err) 899 if (err)
900 goto out_retry; 900 goto out_retry;
901 } 901 }
902 902
903 err = mmc_erase(card, from, nr, arg); 903 err = mmc_erase(card, from, nr, arg);
904 if (err == -EIO) 904 if (err == -EIO)
905 goto out_retry; 905 goto out_retry;
906 if (err) 906 if (err)
907 goto out; 907 goto out;
908 908
909 if (arg == MMC_SECURE_TRIM1_ARG) { 909 if (arg == MMC_SECURE_TRIM1_ARG) {
910 if (card->quirks & MMC_QUIRK_INAND_CMD38) { 910 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
911 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 911 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
912 INAND_CMD38_ARG_EXT_CSD, 912 INAND_CMD38_ARG_EXT_CSD,
913 INAND_CMD38_ARG_SECTRIM2, 913 INAND_CMD38_ARG_SECTRIM2,
914 0); 914 0);
915 if (err) 915 if (err)
916 goto out_retry; 916 goto out_retry;
917 } 917 }
918 918
919 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG); 919 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
920 if (err == -EIO) 920 if (err == -EIO)
921 goto out_retry; 921 goto out_retry;
922 if (err) 922 if (err)
923 goto out; 923 goto out;
924 } 924 }
925 925
926 if (mmc_can_sanitize(card)) 926 if (mmc_can_sanitize(card))
927 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 927 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
928 EXT_CSD_SANITIZE_START, 1, 0); 928 EXT_CSD_SANITIZE_START, 1, 0);
929 out_retry: 929 out_retry:
930 if (err && !mmc_blk_reset(md, card->host, type)) 930 if (err && !mmc_blk_reset(md, card->host, type))
931 goto retry; 931 goto retry;
932 if (!err) 932 if (!err)
933 mmc_blk_reset_success(md, type); 933 mmc_blk_reset_success(md, type);
934 out: 934 out:
935 blk_end_request(req, err, blk_rq_bytes(req)); 935 blk_end_request(req, err, blk_rq_bytes(req));
936 936
937 return err ? 0 : 1; 937 return err ? 0 : 1;
938 } 938 }
939 939
940 static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req) 940 static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
941 { 941 {
942 struct mmc_blk_data *md = mq->data; 942 struct mmc_blk_data *md = mq->data;
943 struct mmc_card *card = md->queue.card; 943 struct mmc_card *card = md->queue.card;
944 int ret = 0; 944 int ret = 0;
945 945
946 ret = mmc_flush_cache(card); 946 ret = mmc_flush_cache(card);
947 if (ret) 947 if (ret)
948 ret = -EIO; 948 ret = -EIO;
949 949
950 blk_end_request_all(req, ret); 950 blk_end_request_all(req, ret);
951 951
952 return ret ? 0 : 1; 952 return ret ? 0 : 1;
953 } 953 }
954 954
955 /* 955 /*
956 * Reformat current write as a reliable write, supporting 956 * Reformat current write as a reliable write, supporting
957 * both legacy and the enhanced reliable write MMC cards. 957 * both legacy and the enhanced reliable write MMC cards.
958 * In each transfer we'll handle only as much as a single 958 * In each transfer we'll handle only as much as a single
959 * reliable write can handle, thus finish the request in 959 * reliable write can handle, thus finish the request in
960 * partial completions. 960 * partial completions.
961 */ 961 */
962 static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq, 962 static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
963 struct mmc_card *card, 963 struct mmc_card *card,
964 struct request *req) 964 struct request *req)
965 { 965 {
966 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) { 966 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
967 /* Legacy mode imposes restrictions on transfers. */ 967 /* Legacy mode imposes restrictions on transfers. */
968 if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors)) 968 if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
969 brq->data.blocks = 1; 969 brq->data.blocks = 1;
970 970
971 if (brq->data.blocks > card->ext_csd.rel_sectors) 971 if (brq->data.blocks > card->ext_csd.rel_sectors)
972 brq->data.blocks = card->ext_csd.rel_sectors; 972 brq->data.blocks = card->ext_csd.rel_sectors;
973 else if (brq->data.blocks < card->ext_csd.rel_sectors) 973 else if (brq->data.blocks < card->ext_csd.rel_sectors)
974 brq->data.blocks = 1; 974 brq->data.blocks = 1;
975 } 975 }
976 } 976 }
977 977
978 #define CMD_ERRORS \ 978 #define CMD_ERRORS \
979 (R1_OUT_OF_RANGE | /* Command argument out of range */ \ 979 (R1_OUT_OF_RANGE | /* Command argument out of range */ \
980 R1_ADDRESS_ERROR | /* Misaligned address */ \ 980 R1_ADDRESS_ERROR | /* Misaligned address */ \
981 R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\ 981 R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
982 R1_WP_VIOLATION | /* Tried to write to protected block */ \ 982 R1_WP_VIOLATION | /* Tried to write to protected block */ \
983 R1_CC_ERROR | /* Card controller error */ \ 983 R1_CC_ERROR | /* Card controller error */ \
984 R1_ERROR) /* General/unknown error */ 984 R1_ERROR) /* General/unknown error */
985 985
986 static int mmc_blk_err_check(struct mmc_card *card, 986 static int mmc_blk_err_check(struct mmc_card *card,
987 struct mmc_async_req *areq) 987 struct mmc_async_req *areq)
988 { 988 {
989 struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req, 989 struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
990 mmc_active); 990 mmc_active);
991 struct mmc_blk_request *brq = &mq_mrq->brq; 991 struct mmc_blk_request *brq = &mq_mrq->brq;
992 struct request *req = mq_mrq->req; 992 struct request *req = mq_mrq->req;
993 int ecc_err = 0; 993 int ecc_err = 0;
994 994
995 /* 995 /*
996 * sbc.error indicates a problem with the set block count 996 * sbc.error indicates a problem with the set block count
997 * command. No data will have been transferred. 997 * command. No data will have been transferred.
998 * 998 *
999 * cmd.error indicates a problem with the r/w command. No 999 * cmd.error indicates a problem with the r/w command. No
1000 * data will have been transferred. 1000 * data will have been transferred.
1001 * 1001 *
1002 * stop.error indicates a problem with the stop command. Data 1002 * stop.error indicates a problem with the stop command. Data
1003 * may have been transferred, or may still be transferring. 1003 * may have been transferred, or may still be transferring.
1004 */ 1004 */
1005 if (brq->sbc.error || brq->cmd.error || brq->stop.error || 1005 if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
1006 brq->data.error) { 1006 brq->data.error) {
1007 switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err)) { 1007 switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err)) {
1008 case ERR_RETRY: 1008 case ERR_RETRY:
1009 return MMC_BLK_RETRY; 1009 return MMC_BLK_RETRY;
1010 case ERR_ABORT: 1010 case ERR_ABORT:
1011 return MMC_BLK_ABORT; 1011 return MMC_BLK_ABORT;
1012 case ERR_NOMEDIUM: 1012 case ERR_NOMEDIUM:
1013 return MMC_BLK_NOMEDIUM; 1013 return MMC_BLK_NOMEDIUM;
1014 case ERR_CONTINUE: 1014 case ERR_CONTINUE:
1015 break; 1015 break;
1016 } 1016 }
1017 } 1017 }
1018 1018
1019 /* 1019 /*
1020 * Check for errors relating to the execution of the 1020 * Check for errors relating to the execution of the
1021 * initial command - such as address errors. No data 1021 * initial command - such as address errors. No data
1022 * has been transferred. 1022 * has been transferred.
1023 */ 1023 */
1024 if (brq->cmd.resp[0] & CMD_ERRORS) { 1024 if (brq->cmd.resp[0] & CMD_ERRORS) {
1025 pr_err("%s: r/w command failed, status = %#x\n", 1025 pr_err("%s: r/w command failed, status = %#x\n",
1026 req->rq_disk->disk_name, brq->cmd.resp[0]); 1026 req->rq_disk->disk_name, brq->cmd.resp[0]);
1027 return MMC_BLK_ABORT; 1027 return MMC_BLK_ABORT;
1028 } 1028 }
1029 1029
1030 /* 1030 /*
1031 * Everything else is either success, or a data error of some 1031 * Everything else is either success, or a data error of some
1032 * kind. If it was a write, we may have transitioned to 1032 * kind. If it was a write, we may have transitioned to
1033 * program mode, which we have to wait for it to complete. 1033 * program mode, which we have to wait for it to complete.
1034 */ 1034 */
1035 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) { 1035 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
1036 u32 status; 1036 u32 status;
1037 do { 1037 do {
1038 int err = get_card_status(card, &status, 5); 1038 int err = get_card_status(card, &status, 5);
1039 if (err) { 1039 if (err) {
1040 pr_err("%s: error %d requesting status\n", 1040 pr_err("%s: error %d requesting status\n",
1041 req->rq_disk->disk_name, err); 1041 req->rq_disk->disk_name, err);
1042 return MMC_BLK_CMD_ERR; 1042 return MMC_BLK_CMD_ERR;
1043 } 1043 }
1044 /* 1044 /*
1045 * Some cards mishandle the status bits, 1045 * Some cards mishandle the status bits,
1046 * so make sure to check both the busy 1046 * so make sure to check both the busy
1047 * indication and the card state. 1047 * indication and the card state.
1048 */ 1048 */
1049 } while (!(status & R1_READY_FOR_DATA) || 1049 } while (!(status & R1_READY_FOR_DATA) ||
1050 (R1_CURRENT_STATE(status) == R1_STATE_PRG)); 1050 (R1_CURRENT_STATE(status) == R1_STATE_PRG));
1051 } 1051 }
1052 1052
1053 if (brq->data.error) { 1053 if (brq->data.error) {
1054 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n", 1054 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
1055 req->rq_disk->disk_name, brq->data.error, 1055 req->rq_disk->disk_name, brq->data.error,
1056 (unsigned)blk_rq_pos(req), 1056 (unsigned)blk_rq_pos(req),
1057 (unsigned)blk_rq_sectors(req), 1057 (unsigned)blk_rq_sectors(req),
1058 brq->cmd.resp[0], brq->stop.resp[0]); 1058 brq->cmd.resp[0], brq->stop.resp[0]);
1059 1059
1060 if (rq_data_dir(req) == READ) { 1060 if (rq_data_dir(req) == READ) {
1061 if (ecc_err) 1061 if (ecc_err)
1062 return MMC_BLK_ECC_ERR; 1062 return MMC_BLK_ECC_ERR;
1063 return MMC_BLK_DATA_ERR; 1063 return MMC_BLK_DATA_ERR;
1064 } else { 1064 } else {
1065 return MMC_BLK_CMD_ERR; 1065 return MMC_BLK_CMD_ERR;
1066 } 1066 }
1067 } 1067 }
1068 1068
1069 if (!brq->data.bytes_xfered) 1069 if (!brq->data.bytes_xfered)
1070 return MMC_BLK_RETRY; 1070 return MMC_BLK_RETRY;
1071 1071
1072 if (blk_rq_bytes(req) != brq->data.bytes_xfered) 1072 if (blk_rq_bytes(req) != brq->data.bytes_xfered)
1073 return MMC_BLK_PARTIAL; 1073 return MMC_BLK_PARTIAL;
1074 1074
1075 return MMC_BLK_SUCCESS; 1075 return MMC_BLK_SUCCESS;
1076 } 1076 }
1077 1077
1078 static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, 1078 static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1079 struct mmc_card *card, 1079 struct mmc_card *card,
1080 int disable_multi, 1080 int disable_multi,
1081 struct mmc_queue *mq) 1081 struct mmc_queue *mq)
1082 { 1082 {
1083 u32 readcmd, writecmd; 1083 u32 readcmd, writecmd;
1084 struct mmc_blk_request *brq = &mqrq->brq; 1084 struct mmc_blk_request *brq = &mqrq->brq;
1085 struct request *req = mqrq->req; 1085 struct request *req = mqrq->req;
1086 struct mmc_blk_data *md = mq->data; 1086 struct mmc_blk_data *md = mq->data;
1087 bool do_data_tag; 1087 bool do_data_tag;
1088 1088
1089 /* 1089 /*
1090 * Reliable writes are used to implement Forced Unit Access and 1090 * Reliable writes are used to implement Forced Unit Access and
1091 * REQ_META accesses, and are supported only on MMCs. 1091 * REQ_META accesses, and are supported only on MMCs.
1092 * 1092 *
1093 * XXX: this really needs a good explanation of why REQ_META 1093 * XXX: this really needs a good explanation of why REQ_META
1094 * is treated special. 1094 * is treated special.
1095 */ 1095 */
1096 bool do_rel_wr = ((req->cmd_flags & REQ_FUA) || 1096 bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
1097 (req->cmd_flags & REQ_META)) && 1097 (req->cmd_flags & REQ_META)) &&
1098 (rq_data_dir(req) == WRITE) && 1098 (rq_data_dir(req) == WRITE) &&
1099 (md->flags & MMC_BLK_REL_WR); 1099 (md->flags & MMC_BLK_REL_WR);
1100 1100
1101 memset(brq, 0, sizeof(struct mmc_blk_request)); 1101 memset(brq, 0, sizeof(struct mmc_blk_request));
1102 brq->mrq.cmd = &brq->cmd; 1102 brq->mrq.cmd = &brq->cmd;
1103 brq->mrq.data = &brq->data; 1103 brq->mrq.data = &brq->data;
1104 1104
1105 brq->cmd.arg = blk_rq_pos(req); 1105 brq->cmd.arg = blk_rq_pos(req);
1106 if (!mmc_card_blockaddr(card)) 1106 if (!mmc_card_blockaddr(card))
1107 brq->cmd.arg <<= 9; 1107 brq->cmd.arg <<= 9;
1108 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 1108 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1109 brq->data.blksz = 512; 1109 brq->data.blksz = 512;
1110 brq->stop.opcode = MMC_STOP_TRANSMISSION; 1110 brq->stop.opcode = MMC_STOP_TRANSMISSION;
1111 brq->stop.arg = 0; 1111 brq->stop.arg = 0;
1112 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 1112 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1113 brq->data.blocks = blk_rq_sectors(req); 1113 brq->data.blocks = blk_rq_sectors(req);
1114 1114
1115 /* 1115 /*
1116 * The block layer doesn't support all sector count 1116 * The block layer doesn't support all sector count
1117 * restrictions, so we need to be prepared for too big 1117 * restrictions, so we need to be prepared for too big
1118 * requests. 1118 * requests.
1119 */ 1119 */
1120 if (brq->data.blocks > card->host->max_blk_count) 1120 if (brq->data.blocks > card->host->max_blk_count)
1121 brq->data.blocks = card->host->max_blk_count; 1121 brq->data.blocks = card->host->max_blk_count;
1122 1122
1123 if (brq->data.blocks > 1) { 1123 if (brq->data.blocks > 1) {
1124 /* 1124 /*
1125 * After a read error, we redo the request one sector 1125 * After a read error, we redo the request one sector
1126 * at a time in order to accurately determine which 1126 * at a time in order to accurately determine which
1127 * sectors can be read successfully. 1127 * sectors can be read successfully.
1128 */ 1128 */
1129 if (disable_multi) 1129 if (disable_multi)
1130 brq->data.blocks = 1; 1130 brq->data.blocks = 1;
1131 1131
1132 /* Some controllers can't do multiblock reads due to hw bugs */ 1132 /* Some controllers can't do multiblock reads due to hw bugs */
1133 if (card->host->caps2 & MMC_CAP2_NO_MULTI_READ && 1133 if (card->host->caps2 & MMC_CAP2_NO_MULTI_READ &&
1134 rq_data_dir(req) == READ) 1134 rq_data_dir(req) == READ)
1135 brq->data.blocks = 1; 1135 brq->data.blocks = 1;
1136 } 1136 }
1137 1137
1138 if (brq->data.blocks > 1 || do_rel_wr) { 1138 if (brq->data.blocks > 1 || do_rel_wr) {
1139 /* SPI multiblock writes terminate using a special 1139 /* SPI multiblock writes terminate using a special
1140 * token, not a STOP_TRANSMISSION request. 1140 * token, not a STOP_TRANSMISSION request.
1141 */ 1141 */
1142 if (!mmc_host_is_spi(card->host) || 1142 if (!mmc_host_is_spi(card->host) ||
1143 rq_data_dir(req) == READ) 1143 rq_data_dir(req) == READ)
1144 brq->mrq.stop = &brq->stop; 1144 brq->mrq.stop = &brq->stop;
1145 readcmd = MMC_READ_MULTIPLE_BLOCK; 1145 readcmd = MMC_READ_MULTIPLE_BLOCK;
1146 writecmd = MMC_WRITE_MULTIPLE_BLOCK; 1146 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
1147 } else { 1147 } else {
1148 brq->mrq.stop = NULL; 1148 brq->mrq.stop = NULL;
1149 readcmd = MMC_READ_SINGLE_BLOCK; 1149 readcmd = MMC_READ_SINGLE_BLOCK;
1150 writecmd = MMC_WRITE_BLOCK; 1150 writecmd = MMC_WRITE_BLOCK;
1151 } 1151 }
1152 if (rq_data_dir(req) == READ) { 1152 if (rq_data_dir(req) == READ) {
1153 brq->cmd.opcode = readcmd; 1153 brq->cmd.opcode = readcmd;
1154 brq->data.flags |= MMC_DATA_READ; 1154 brq->data.flags |= MMC_DATA_READ;
1155 } else { 1155 } else {
1156 brq->cmd.opcode = writecmd; 1156 brq->cmd.opcode = writecmd;
1157 brq->data.flags |= MMC_DATA_WRITE; 1157 brq->data.flags |= MMC_DATA_WRITE;
1158 } 1158 }
1159 1159
1160 if (do_rel_wr) 1160 if (do_rel_wr)
1161 mmc_apply_rel_rw(brq, card, req); 1161 mmc_apply_rel_rw(brq, card, req);
1162 1162
1163 /* 1163 /*
1164 * Data tag is used only during writing meta data to speed 1164 * Data tag is used only during writing meta data to speed
1165 * up write and any subsequent read of this meta data 1165 * up write and any subsequent read of this meta data
1166 */ 1166 */
1167 do_data_tag = (card->ext_csd.data_tag_unit_size) && 1167 do_data_tag = (card->ext_csd.data_tag_unit_size) &&
1168 (req->cmd_flags & REQ_META) && 1168 (req->cmd_flags & REQ_META) &&
1169 (rq_data_dir(req) == WRITE) && 1169 (rq_data_dir(req) == WRITE) &&
1170 ((brq->data.blocks * brq->data.blksz) >= 1170 ((brq->data.blocks * brq->data.blksz) >=
1171 card->ext_csd.data_tag_unit_size); 1171 card->ext_csd.data_tag_unit_size);
1172 1172
1173 /* 1173 /*
1174 * Pre-defined multi-block transfers are preferable to 1174 * Pre-defined multi-block transfers are preferable to
1175 * open ended-ones (and necessary for reliable writes). 1175 * open ended-ones (and necessary for reliable writes).
1176 * However, it is not sufficient to just send CMD23, 1176 * However, it is not sufficient to just send CMD23,
1177 * and avoid the final CMD12, as on an error condition 1177 * and avoid the final CMD12, as on an error condition
1178 * CMD12 (stop) needs to be sent anyway. This, coupled 1178 * CMD12 (stop) needs to be sent anyway. This, coupled
1179 * with Auto-CMD23 enhancements provided by some 1179 * with Auto-CMD23 enhancements provided by some
1180 * hosts, means that the complexity of dealing 1180 * hosts, means that the complexity of dealing
1181 * with this is best left to the host. If CMD23 is 1181 * with this is best left to the host. If CMD23 is
1182 * supported by card and host, we'll fill sbc in and let 1182 * supported by card and host, we'll fill sbc in and let
1183 * the host deal with handling it correctly. This means 1183 * the host deal with handling it correctly. This means
1184 * that for hosts that don't expose MMC_CAP_CMD23, no 1184 * that for hosts that don't expose MMC_CAP_CMD23, no
1185 * change of behavior will be observed. 1185 * change of behavior will be observed.
1186 * 1186 *
1187 * N.B: Some MMC cards experience perf degradation. 1187 * N.B: Some MMC cards experience perf degradation.
1188 * We'll avoid using CMD23-bounded multiblock writes for 1188 * We'll avoid using CMD23-bounded multiblock writes for
1189 * these, while retaining features like reliable writes. 1189 * these, while retaining features like reliable writes.
1190 */ 1190 */
1191 if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) && 1191 if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) &&
1192 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) || 1192 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) ||
1193 do_data_tag)) { 1193 do_data_tag)) {
1194 brq->sbc.opcode = MMC_SET_BLOCK_COUNT; 1194 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1195 brq->sbc.arg = brq->data.blocks | 1195 brq->sbc.arg = brq->data.blocks |
1196 (do_rel_wr ? (1 << 31) : 0) | 1196 (do_rel_wr ? (1 << 31) : 0) |
1197 (do_data_tag ? (1 << 29) : 0); 1197 (do_data_tag ? (1 << 29) : 0);
1198 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; 1198 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1199 brq->mrq.sbc = &brq->sbc; 1199 brq->mrq.sbc = &brq->sbc;
1200 } 1200 }
1201 1201
1202 mmc_set_data_timeout(&brq->data, card); 1202 mmc_set_data_timeout(&brq->data, card);
1203 1203
1204 brq->data.sg = mqrq->sg; 1204 brq->data.sg = mqrq->sg;
1205 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq); 1205 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
1206 1206
1207 /* 1207 /*
1208 * Adjust the sg list so it is the same size as the 1208 * Adjust the sg list so it is the same size as the
1209 * request. 1209 * request.
1210 */ 1210 */
1211 if (brq->data.blocks != blk_rq_sectors(req)) { 1211 if (brq->data.blocks != blk_rq_sectors(req)) {
1212 int i, data_size = brq->data.blocks << 9; 1212 int i, data_size = brq->data.blocks << 9;
1213 struct scatterlist *sg; 1213 struct scatterlist *sg;
1214 1214
1215 for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) { 1215 for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
1216 data_size -= sg->length; 1216 data_size -= sg->length;
1217 if (data_size <= 0) { 1217 if (data_size <= 0) {
1218 sg->length += data_size; 1218 sg->length += data_size;
1219 i++; 1219 i++;
1220 break; 1220 break;
1221 } 1221 }
1222 } 1222 }
1223 brq->data.sg_len = i; 1223 brq->data.sg_len = i;
1224 } 1224 }
1225 1225
1226 mqrq->mmc_active.mrq = &brq->mrq; 1226 mqrq->mmc_active.mrq = &brq->mrq;
1227 mqrq->mmc_active.err_check = mmc_blk_err_check; 1227 mqrq->mmc_active.err_check = mmc_blk_err_check;
1228 1228
1229 mmc_queue_bounce_pre(mqrq); 1229 mmc_queue_bounce_pre(mqrq);
1230 } 1230 }
1231 1231
1232 static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, 1232 static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
1233 struct mmc_blk_request *brq, struct request *req, 1233 struct mmc_blk_request *brq, struct request *req,
1234 int ret) 1234 int ret)
1235 { 1235 {
1236 /* 1236 /*
1237 * If this is an SD card and we're writing, we can first 1237 * If this is an SD card and we're writing, we can first
1238 * mark the known good sectors as ok. 1238 * mark the known good sectors as ok.
1239 * 1239 *
1240 * If the card is not SD, we can still ok written sectors 1240 * If the card is not SD, we can still ok written sectors
1241 * as reported by the controller (which might be less than 1241 * as reported by the controller (which might be less than
1242 * the real number of written sectors, but never more). 1242 * the real number of written sectors, but never more).
1243 */ 1243 */
1244 if (mmc_card_sd(card)) { 1244 if (mmc_card_sd(card)) {
1245 u32 blocks; 1245 u32 blocks;
1246 1246
1247 blocks = mmc_sd_num_wr_blocks(card); 1247 blocks = mmc_sd_num_wr_blocks(card);
1248 if (blocks != (u32)-1) { 1248 if (blocks != (u32)-1) {
1249 ret = blk_end_request(req, 0, blocks << 9); 1249 ret = blk_end_request(req, 0, blocks << 9);
1250 } 1250 }
1251 } else { 1251 } else {
1252 ret = blk_end_request(req, 0, brq->data.bytes_xfered); 1252 ret = blk_end_request(req, 0, brq->data.bytes_xfered);
1253 } 1253 }
1254 return ret; 1254 return ret;
1255 } 1255 }
1256 1256
1257 static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) 1257 static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1258 { 1258 {
1259 struct mmc_blk_data *md = mq->data; 1259 struct mmc_blk_data *md = mq->data;
1260 struct mmc_card *card = md->queue.card; 1260 struct mmc_card *card = md->queue.card;
1261 struct mmc_blk_request *brq = &mq->mqrq_cur->brq; 1261 struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
1262 int ret = 1, disable_multi = 0, retry = 0, type; 1262 int ret = 1, disable_multi = 0, retry = 0, type;
1263 enum mmc_blk_status status; 1263 enum mmc_blk_status status;
1264 struct mmc_queue_req *mq_rq; 1264 struct mmc_queue_req *mq_rq;
1265 struct request *req = rqc; 1265 struct request *req = rqc;
1266 struct mmc_async_req *areq; 1266 struct mmc_async_req *areq;
1267 1267
1268 if (!rqc && !mq->mqrq_prev->req) 1268 if (!rqc && !mq->mqrq_prev->req)
1269 return 0; 1269 return 0;
1270 1270
1271 do { 1271 do {
1272 if (rqc) { 1272 if (rqc) {
1273 /* 1273 /*
1274 * When 4KB native sector is enabled, only 8 blocks 1274 * When 4KB native sector is enabled, only 8 blocks
1275 * multiple read or write is allowed 1275 * multiple read or write is allowed
1276 */ 1276 */
1277 if ((brq->data.blocks & 0x07) && 1277 if ((brq->data.blocks & 0x07) &&
1278 (card->ext_csd.data_sector_size == 4096)) { 1278 (card->ext_csd.data_sector_size == 4096)) {
1279 pr_err("%s: Transfer size is not 4KB sector size aligned\n", 1279 pr_err("%s: Transfer size is not 4KB sector size aligned\n",
1280 req->rq_disk->disk_name); 1280 req->rq_disk->disk_name);
1281 goto cmd_abort; 1281 goto cmd_abort;
1282 } 1282 }
1283 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); 1283 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
1284 areq = &mq->mqrq_cur->mmc_active; 1284 areq = &mq->mqrq_cur->mmc_active;
1285 } else 1285 } else
1286 areq = NULL; 1286 areq = NULL;
1287 areq = mmc_start_req(card->host, areq, (int *) &status); 1287 areq = mmc_start_req(card->host, areq, (int *) &status);
1288 if (!areq) 1288 if (!areq)
1289 return 0; 1289 return 0;
1290 1290
1291 mq_rq = container_of(areq, struct mmc_queue_req, mmc_active); 1291 mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
1292 brq = &mq_rq->brq; 1292 brq = &mq_rq->brq;
1293 req = mq_rq->req; 1293 req = mq_rq->req;
1294 type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; 1294 type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
1295 mmc_queue_bounce_post(mq_rq); 1295 mmc_queue_bounce_post(mq_rq);
1296 1296
1297 switch (status) { 1297 switch (status) {
1298 case MMC_BLK_SUCCESS: 1298 case MMC_BLK_SUCCESS:
1299 case MMC_BLK_PARTIAL: 1299 case MMC_BLK_PARTIAL:
1300 /* 1300 /*
1301 * A block was successfully transferred. 1301 * A block was successfully transferred.
1302 */ 1302 */
1303 mmc_blk_reset_success(md, type); 1303 mmc_blk_reset_success(md, type);
1304 ret = blk_end_request(req, 0, 1304 ret = blk_end_request(req, 0,
1305 brq->data.bytes_xfered); 1305 brq->data.bytes_xfered);
1306 /* 1306 /*
1307 * If the blk_end_request function returns non-zero even 1307 * If the blk_end_request function returns non-zero even
1308 * though all data has been transferred and no errors 1308 * though all data has been transferred and no errors
1309 * were returned by the host controller, it's a bug. 1309 * were returned by the host controller, it's a bug.
1310 */ 1310 */
1311 if (status == MMC_BLK_SUCCESS && ret) { 1311 if (status == MMC_BLK_SUCCESS && ret) {
1312 pr_err("%s BUG rq_tot %d d_xfer %d\n", 1312 pr_err("%s BUG rq_tot %d d_xfer %d\n",
1313 __func__, blk_rq_bytes(req), 1313 __func__, blk_rq_bytes(req),
1314 brq->data.bytes_xfered); 1314 brq->data.bytes_xfered);
1315 rqc = NULL; 1315 rqc = NULL;
1316 goto cmd_abort; 1316 goto cmd_abort;
1317 } 1317 }
1318 break; 1318 break;
1319 case MMC_BLK_CMD_ERR: 1319 case MMC_BLK_CMD_ERR:
1320 ret = mmc_blk_cmd_err(md, card, brq, req, ret); 1320 ret = mmc_blk_cmd_err(md, card, brq, req, ret);
1321 if (!mmc_blk_reset(md, card->host, type)) 1321 if (!mmc_blk_reset(md, card->host, type))
1322 break; 1322 break;
1323 goto cmd_abort; 1323 goto cmd_abort;
1324 case MMC_BLK_RETRY: 1324 case MMC_BLK_RETRY:
1325 if (retry++ < 5) 1325 if (retry++ < 5)
1326 break; 1326 break;
1327 /* Fall through */ 1327 /* Fall through */
1328 case MMC_BLK_ABORT: 1328 case MMC_BLK_ABORT:
1329 if (!mmc_blk_reset(md, card->host, type)) 1329 if (!mmc_blk_reset(md, card->host, type))
1330 break; 1330 break;
1331 goto cmd_abort; 1331 goto cmd_abort;
1332 case MMC_BLK_DATA_ERR: { 1332 case MMC_BLK_DATA_ERR: {
1333 int err; 1333 int err;
1334 1334
1335 err = mmc_blk_reset(md, card->host, type); 1335 err = mmc_blk_reset(md, card->host, type);
1336 if (!err) 1336 if (!err)
1337 break; 1337 break;
1338 if (err == -ENODEV) 1338 if (err == -ENODEV)
1339 goto cmd_abort; 1339 goto cmd_abort;
1340 /* Fall through */ 1340 /* Fall through */
1341 } 1341 }
1342 case MMC_BLK_ECC_ERR: 1342 case MMC_BLK_ECC_ERR:
1343 if (brq->data.blocks > 1) { 1343 if (brq->data.blocks > 1) {
1344 /* Redo read one sector at a time */ 1344 /* Redo read one sector at a time */
1345 pr_warning("%s: retrying using single block read\n", 1345 pr_warning("%s: retrying using single block read\n",
1346 req->rq_disk->disk_name); 1346 req->rq_disk->disk_name);
1347 disable_multi = 1; 1347 disable_multi = 1;
1348 break; 1348 break;
1349 } 1349 }
1350 /* 1350 /*
1351 * After an error, we redo I/O one sector at a 1351 * After an error, we redo I/O one sector at a
1352 * time, so we only reach here after trying to 1352 * time, so we only reach here after trying to
1353 * read a single sector. 1353 * read a single sector.
1354 */ 1354 */
1355 ret = blk_end_request(req, -EIO, 1355 ret = blk_end_request(req, -EIO,
1356 brq->data.blksz); 1356 brq->data.blksz);
1357 if (!ret) 1357 if (!ret)
1358 goto start_new_req; 1358 goto start_new_req;
1359 break; 1359 break;
1360 case MMC_BLK_NOMEDIUM: 1360 case MMC_BLK_NOMEDIUM:
1361 goto cmd_abort; 1361 goto cmd_abort;
1362 } 1362 }
1363 1363
1364 if (ret) { 1364 if (ret) {
1365 /* 1365 /*
1366 * In case of a incomplete request 1366 * In case of a incomplete request
1367 * prepare it again and resend. 1367 * prepare it again and resend.
1368 */ 1368 */
1369 mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq); 1369 mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq);
1370 mmc_start_req(card->host, &mq_rq->mmc_active, NULL); 1370 mmc_start_req(card->host, &mq_rq->mmc_active, NULL);
1371 } 1371 }
1372 } while (ret); 1372 } while (ret);
1373 1373
1374 return 1; 1374 return 1;
1375 1375
1376 cmd_abort: 1376 cmd_abort:
1377 if (mmc_card_removed(card)) 1377 if (mmc_card_removed(card))
1378 req->cmd_flags |= REQ_QUIET; 1378 req->cmd_flags |= REQ_QUIET;
1379 while (ret) 1379 while (ret)
1380 ret = blk_end_request(req, -EIO, blk_rq_cur_bytes(req)); 1380 ret = blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
1381 1381
1382 start_new_req: 1382 start_new_req:
1383 if (rqc) { 1383 if (rqc) {
1384 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); 1384 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
1385 mmc_start_req(card->host, &mq->mqrq_cur->mmc_active, NULL); 1385 mmc_start_req(card->host, &mq->mqrq_cur->mmc_active, NULL);
1386 } 1386 }
1387 1387
1388 return 0; 1388 return 0;
1389 } 1389 }
1390 1390
1391 static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) 1391 static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
1392 { 1392 {
1393 int ret; 1393 int ret;
1394 struct mmc_blk_data *md = mq->data; 1394 struct mmc_blk_data *md = mq->data;
1395 struct mmc_card *card = md->queue.card; 1395 struct mmc_card *card = md->queue.card;
1396 1396
1397 if (req && !mq->mqrq_prev->req) 1397 if (req && !mq->mqrq_prev->req)
1398 /* claim host only for the first request */ 1398 /* claim host only for the first request */
1399 mmc_claim_host(card->host); 1399 mmc_claim_host(card->host);
1400 1400
1401 ret = mmc_blk_part_switch(card, md); 1401 ret = mmc_blk_part_switch(card, md);
1402 if (ret) { 1402 if (ret) {
1403 if (req) { 1403 if (req) {
1404 blk_end_request_all(req, -EIO); 1404 blk_end_request_all(req, -EIO);
1405 } 1405 }
1406 ret = 0; 1406 ret = 0;
1407 goto out; 1407 goto out;
1408 } 1408 }
1409 1409
1410 if (req && req->cmd_flags & REQ_DISCARD) { 1410 if (req && req->cmd_flags & REQ_DISCARD) {
1411 /* complete ongoing async transfer before issuing discard */ 1411 /* complete ongoing async transfer before issuing discard */
1412 if (card->host->areq) 1412 if (card->host->areq)
1413 mmc_blk_issue_rw_rq(mq, NULL); 1413 mmc_blk_issue_rw_rq(mq, NULL);
1414 if (req->cmd_flags & REQ_SECURE) 1414 if (req->cmd_flags & REQ_SECURE &&
1415 !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
1415 ret = mmc_blk_issue_secdiscard_rq(mq, req); 1416 ret = mmc_blk_issue_secdiscard_rq(mq, req);
1416 else 1417 else
1417 ret = mmc_blk_issue_discard_rq(mq, req); 1418 ret = mmc_blk_issue_discard_rq(mq, req);
1418 } else if (req && req->cmd_flags & REQ_FLUSH) { 1419 } else if (req && req->cmd_flags & REQ_FLUSH) {
1419 /* complete ongoing async transfer before issuing flush */ 1420 /* complete ongoing async transfer before issuing flush */
1420 if (card->host->areq) 1421 if (card->host->areq)
1421 mmc_blk_issue_rw_rq(mq, NULL); 1422 mmc_blk_issue_rw_rq(mq, NULL);
1422 ret = mmc_blk_issue_flush(mq, req); 1423 ret = mmc_blk_issue_flush(mq, req);
1423 } else { 1424 } else {
1424 ret = mmc_blk_issue_rw_rq(mq, req); 1425 ret = mmc_blk_issue_rw_rq(mq, req);
1425 } 1426 }
1426 1427
1427 out: 1428 out:
1428 if (!req) 1429 if (!req)
1429 /* release host only when there are no more requests */ 1430 /* release host only when there are no more requests */
1430 mmc_release_host(card->host); 1431 mmc_release_host(card->host);
1431 return ret; 1432 return ret;
1432 } 1433 }
1433 1434
1434 static inline int mmc_blk_readonly(struct mmc_card *card) 1435 static inline int mmc_blk_readonly(struct mmc_card *card)
1435 { 1436 {
1436 return mmc_card_readonly(card) || 1437 return mmc_card_readonly(card) ||
1437 !(card->csd.cmdclass & CCC_BLOCK_WRITE); 1438 !(card->csd.cmdclass & CCC_BLOCK_WRITE);
1438 } 1439 }
1439 1440
1440 static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, 1441 static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
1441 struct device *parent, 1442 struct device *parent,
1442 sector_t size, 1443 sector_t size,
1443 bool default_ro, 1444 bool default_ro,
1444 const char *subname, 1445 const char *subname,
1445 int area_type) 1446 int area_type)
1446 { 1447 {
1447 struct mmc_blk_data *md; 1448 struct mmc_blk_data *md;
1448 int devidx, ret; 1449 int devidx, ret;
1449 1450
1450 devidx = find_first_zero_bit(dev_use, max_devices); 1451 devidx = find_first_zero_bit(dev_use, max_devices);
1451 if (devidx >= max_devices) 1452 if (devidx >= max_devices)
1452 return ERR_PTR(-ENOSPC); 1453 return ERR_PTR(-ENOSPC);
1453 __set_bit(devidx, dev_use); 1454 __set_bit(devidx, dev_use);
1454 1455
1455 md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL); 1456 md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
1456 if (!md) { 1457 if (!md) {
1457 ret = -ENOMEM; 1458 ret = -ENOMEM;
1458 goto out; 1459 goto out;
1459 } 1460 }
1460 1461
1461 /* 1462 /*
1462 * !subname implies we are creating main mmc_blk_data that will be 1463 * !subname implies we are creating main mmc_blk_data that will be
1463 * associated with mmc_card with mmc_set_drvdata. Due to device 1464 * associated with mmc_card with mmc_set_drvdata. Due to device
1464 * partitions, devidx will not coincide with a per-physical card 1465 * partitions, devidx will not coincide with a per-physical card
1465 * index anymore so we keep track of a name index. 1466 * index anymore so we keep track of a name index.
1466 */ 1467 */
1467 if (!subname) { 1468 if (!subname) {
1468 md->name_idx = find_first_zero_bit(name_use, max_devices); 1469 md->name_idx = find_first_zero_bit(name_use, max_devices);
1469 __set_bit(md->name_idx, name_use); 1470 __set_bit(md->name_idx, name_use);
1470 } else 1471 } else
1471 md->name_idx = ((struct mmc_blk_data *) 1472 md->name_idx = ((struct mmc_blk_data *)
1472 dev_to_disk(parent)->private_data)->name_idx; 1473 dev_to_disk(parent)->private_data)->name_idx;
1473 1474
1474 md->area_type = area_type; 1475 md->area_type = area_type;
1475 1476
1476 /* 1477 /*
1477 * Set the read-only status based on the supported commands 1478 * Set the read-only status based on the supported commands
1478 * and the write protect switch. 1479 * and the write protect switch.
1479 */ 1480 */
1480 md->read_only = mmc_blk_readonly(card); 1481 md->read_only = mmc_blk_readonly(card);
1481 1482
1482 md->disk = alloc_disk(perdev_minors); 1483 md->disk = alloc_disk(perdev_minors);
1483 if (md->disk == NULL) { 1484 if (md->disk == NULL) {
1484 ret = -ENOMEM; 1485 ret = -ENOMEM;
1485 goto err_kfree; 1486 goto err_kfree;
1486 } 1487 }
1487 1488
1488 spin_lock_init(&md->lock); 1489 spin_lock_init(&md->lock);
1489 INIT_LIST_HEAD(&md->part); 1490 INIT_LIST_HEAD(&md->part);
1490 md->usage = 1; 1491 md->usage = 1;
1491 1492
1492 ret = mmc_init_queue(&md->queue, card, &md->lock, subname); 1493 ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
1493 if (ret) 1494 if (ret)
1494 goto err_putdisk; 1495 goto err_putdisk;
1495 1496
1496 md->queue.issue_fn = mmc_blk_issue_rq; 1497 md->queue.issue_fn = mmc_blk_issue_rq;
1497 md->queue.data = md; 1498 md->queue.data = md;
1498 1499
1499 md->disk->major = MMC_BLOCK_MAJOR; 1500 md->disk->major = MMC_BLOCK_MAJOR;
1500 md->disk->first_minor = devidx * perdev_minors; 1501 md->disk->first_minor = devidx * perdev_minors;
1501 md->disk->fops = &mmc_bdops; 1502 md->disk->fops = &mmc_bdops;
1502 md->disk->private_data = md; 1503 md->disk->private_data = md;
1503 md->disk->queue = md->queue.queue; 1504 md->disk->queue = md->queue.queue;
1504 md->disk->driverfs_dev = parent; 1505 md->disk->driverfs_dev = parent;
1505 set_disk_ro(md->disk, md->read_only || default_ro); 1506 set_disk_ro(md->disk, md->read_only || default_ro);
1506 1507
1507 /* 1508 /*
1508 * As discussed on lkml, GENHD_FL_REMOVABLE should: 1509 * As discussed on lkml, GENHD_FL_REMOVABLE should:
1509 * 1510 *
1510 * - be set for removable media with permanent block devices 1511 * - be set for removable media with permanent block devices
1511 * - be unset for removable block devices with permanent media 1512 * - be unset for removable block devices with permanent media
1512 * 1513 *
1513 * Since MMC block devices clearly fall under the second 1514 * Since MMC block devices clearly fall under the second
1514 * case, we do not set GENHD_FL_REMOVABLE. Userspace 1515 * case, we do not set GENHD_FL_REMOVABLE. Userspace
1515 * should use the block device creation/destruction hotplug 1516 * should use the block device creation/destruction hotplug
1516 * messages to tell when the card is present. 1517 * messages to tell when the card is present.
1517 */ 1518 */
1518 1519
1519 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name), 1520 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
1520 "mmcblk%d%s", md->name_idx, subname ? subname : ""); 1521 "mmcblk%d%s", md->name_idx, subname ? subname : "");
1521 1522
1522 if (mmc_card_mmc(card)) 1523 if (mmc_card_mmc(card))
1523 blk_queue_logical_block_size(md->queue.queue, 1524 blk_queue_logical_block_size(md->queue.queue,
1524 card->ext_csd.data_sector_size); 1525 card->ext_csd.data_sector_size);
1525 else 1526 else
1526 blk_queue_logical_block_size(md->queue.queue, 512); 1527 blk_queue_logical_block_size(md->queue.queue, 512);
1527 1528
1528 set_capacity(md->disk, size); 1529 set_capacity(md->disk, size);
1529 1530
1530 if (mmc_host_cmd23(card->host)) { 1531 if (mmc_host_cmd23(card->host)) {
1531 if (mmc_card_mmc(card) || 1532 if (mmc_card_mmc(card) ||
1532 (mmc_card_sd(card) && 1533 (mmc_card_sd(card) &&
1533 card->scr.cmds & SD_SCR_CMD23_SUPPORT)) 1534 card->scr.cmds & SD_SCR_CMD23_SUPPORT))
1534 md->flags |= MMC_BLK_CMD23; 1535 md->flags |= MMC_BLK_CMD23;
1535 } 1536 }
1536 1537
1537 if (mmc_card_mmc(card) && 1538 if (mmc_card_mmc(card) &&
1538 md->flags & MMC_BLK_CMD23 && 1539 md->flags & MMC_BLK_CMD23 &&
1539 ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) || 1540 ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
1540 card->ext_csd.rel_sectors)) { 1541 card->ext_csd.rel_sectors)) {
1541 md->flags |= MMC_BLK_REL_WR; 1542 md->flags |= MMC_BLK_REL_WR;
1542 blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA); 1543 blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
1543 } 1544 }
1544 1545
1545 return md; 1546 return md;
1546 1547
1547 err_putdisk: 1548 err_putdisk:
1548 put_disk(md->disk); 1549 put_disk(md->disk);
1549 err_kfree: 1550 err_kfree:
1550 kfree(md); 1551 kfree(md);
1551 out: 1552 out:
1552 return ERR_PTR(ret); 1553 return ERR_PTR(ret);
1553 } 1554 }
1554 1555
1555 static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card) 1556 static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
1556 { 1557 {
1557 sector_t size; 1558 sector_t size;
1558 struct mmc_blk_data *md; 1559 struct mmc_blk_data *md;
1559 1560
1560 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) { 1561 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
1561 /* 1562 /*
1562 * The EXT_CSD sector count is in number or 512 byte 1563 * The EXT_CSD sector count is in number or 512 byte
1563 * sectors. 1564 * sectors.
1564 */ 1565 */
1565 size = card->ext_csd.sectors; 1566 size = card->ext_csd.sectors;
1566 } else { 1567 } else {
1567 /* 1568 /*
1568 * The CSD capacity field is in units of read_blkbits. 1569 * The CSD capacity field is in units of read_blkbits.
1569 * set_capacity takes units of 512 bytes. 1570 * set_capacity takes units of 512 bytes.
1570 */ 1571 */
1571 size = card->csd.capacity << (card->csd.read_blkbits - 9); 1572 size = card->csd.capacity << (card->csd.read_blkbits - 9);
1572 } 1573 }
1573 1574
1574 md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL, 1575 md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
1575 MMC_BLK_DATA_AREA_MAIN); 1576 MMC_BLK_DATA_AREA_MAIN);
1576 return md; 1577 return md;
1577 } 1578 }
1578 1579
1579 static int mmc_blk_alloc_part(struct mmc_card *card, 1580 static int mmc_blk_alloc_part(struct mmc_card *card,
1580 struct mmc_blk_data *md, 1581 struct mmc_blk_data *md,
1581 unsigned int part_type, 1582 unsigned int part_type,
1582 sector_t size, 1583 sector_t size,
1583 bool default_ro, 1584 bool default_ro,
1584 const char *subname, 1585 const char *subname,
1585 int area_type) 1586 int area_type)
1586 { 1587 {
1587 char cap_str[10]; 1588 char cap_str[10];
1588 struct mmc_blk_data *part_md; 1589 struct mmc_blk_data *part_md;
1589 1590
1590 part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro, 1591 part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
1591 subname, area_type); 1592 subname, area_type);
1592 if (IS_ERR(part_md)) 1593 if (IS_ERR(part_md))
1593 return PTR_ERR(part_md); 1594 return PTR_ERR(part_md);
1594 part_md->part_type = part_type; 1595 part_md->part_type = part_type;
1595 list_add(&part_md->part, &md->part); 1596 list_add(&part_md->part, &md->part);
1596 1597
1597 string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2, 1598 string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2,
1598 cap_str, sizeof(cap_str)); 1599 cap_str, sizeof(cap_str));
1599 pr_info("%s: %s %s partition %u %s\n", 1600 pr_info("%s: %s %s partition %u %s\n",
1600 part_md->disk->disk_name, mmc_card_id(card), 1601 part_md->disk->disk_name, mmc_card_id(card),
1601 mmc_card_name(card), part_md->part_type, cap_str); 1602 mmc_card_name(card), part_md->part_type, cap_str);
1602 return 0; 1603 return 0;
1603 } 1604 }
1604 1605
1605 /* MMC Physical partitions consist of two boot partitions and 1606 /* MMC Physical partitions consist of two boot partitions and
1606 * up to four general purpose partitions. 1607 * up to four general purpose partitions.
1607 * For each partition enabled in EXT_CSD a block device will be allocatedi 1608 * For each partition enabled in EXT_CSD a block device will be allocatedi
1608 * to provide access to the partition. 1609 * to provide access to the partition.
1609 */ 1610 */
1610 1611
1611 static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md) 1612 static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
1612 { 1613 {
1613 int idx, ret = 0; 1614 int idx, ret = 0;
1614 1615
1615 if (!mmc_card_mmc(card)) 1616 if (!mmc_card_mmc(card))
1616 return 0; 1617 return 0;
1617 1618
1618 for (idx = 0; idx < card->nr_parts; idx++) { 1619 for (idx = 0; idx < card->nr_parts; idx++) {
1619 if (card->part[idx].size) { 1620 if (card->part[idx].size) {
1620 ret = mmc_blk_alloc_part(card, md, 1621 ret = mmc_blk_alloc_part(card, md,
1621 card->part[idx].part_cfg, 1622 card->part[idx].part_cfg,
1622 card->part[idx].size >> 9, 1623 card->part[idx].size >> 9,
1623 card->part[idx].force_ro, 1624 card->part[idx].force_ro,
1624 card->part[idx].name, 1625 card->part[idx].name,
1625 card->part[idx].area_type); 1626 card->part[idx].area_type);
1626 if (ret) 1627 if (ret)
1627 return ret; 1628 return ret;
1628 } 1629 }
1629 } 1630 }
1630 1631
1631 return ret; 1632 return ret;
1632 } 1633 }
1633 1634
1634 static void mmc_blk_remove_req(struct mmc_blk_data *md) 1635 static void mmc_blk_remove_req(struct mmc_blk_data *md)
1635 { 1636 {
1636 struct mmc_card *card; 1637 struct mmc_card *card;
1637 1638
1638 if (md) { 1639 if (md) {
1639 card = md->queue.card; 1640 card = md->queue.card;
1640 if (md->disk->flags & GENHD_FL_UP) { 1641 if (md->disk->flags & GENHD_FL_UP) {
1641 device_remove_file(disk_to_dev(md->disk), &md->force_ro); 1642 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
1642 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) && 1643 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
1643 card->ext_csd.boot_ro_lockable) 1644 card->ext_csd.boot_ro_lockable)
1644 device_remove_file(disk_to_dev(md->disk), 1645 device_remove_file(disk_to_dev(md->disk),
1645 &md->power_ro_lock); 1646 &md->power_ro_lock);
1646 1647
1647 /* Stop new requests from getting into the queue */ 1648 /* Stop new requests from getting into the queue */
1648 del_gendisk(md->disk); 1649 del_gendisk(md->disk);
1649 } 1650 }
1650 1651
1651 /* Then flush out any already in there */ 1652 /* Then flush out any already in there */
1652 mmc_cleanup_queue(&md->queue); 1653 mmc_cleanup_queue(&md->queue);
1653 mmc_blk_put(md); 1654 mmc_blk_put(md);
1654 } 1655 }
1655 } 1656 }
1656 1657
1657 static void mmc_blk_remove_parts(struct mmc_card *card, 1658 static void mmc_blk_remove_parts(struct mmc_card *card,
1658 struct mmc_blk_data *md) 1659 struct mmc_blk_data *md)
1659 { 1660 {
1660 struct list_head *pos, *q; 1661 struct list_head *pos, *q;
1661 struct mmc_blk_data *part_md; 1662 struct mmc_blk_data *part_md;
1662 1663
1663 __clear_bit(md->name_idx, name_use); 1664 __clear_bit(md->name_idx, name_use);
1664 list_for_each_safe(pos, q, &md->part) { 1665 list_for_each_safe(pos, q, &md->part) {
1665 part_md = list_entry(pos, struct mmc_blk_data, part); 1666 part_md = list_entry(pos, struct mmc_blk_data, part);
1666 list_del(pos); 1667 list_del(pos);
1667 mmc_blk_remove_req(part_md); 1668 mmc_blk_remove_req(part_md);
1668 } 1669 }
1669 } 1670 }
1670 1671
1671 static int mmc_add_disk(struct mmc_blk_data *md) 1672 static int mmc_add_disk(struct mmc_blk_data *md)
1672 { 1673 {
1673 int ret; 1674 int ret;
1674 struct mmc_card *card = md->queue.card; 1675 struct mmc_card *card = md->queue.card;
1675 1676
1676 add_disk(md->disk); 1677 add_disk(md->disk);
1677 md->force_ro.show = force_ro_show; 1678 md->force_ro.show = force_ro_show;
1678 md->force_ro.store = force_ro_store; 1679 md->force_ro.store = force_ro_store;
1679 sysfs_attr_init(&md->force_ro.attr); 1680 sysfs_attr_init(&md->force_ro.attr);
1680 md->force_ro.attr.name = "force_ro"; 1681 md->force_ro.attr.name = "force_ro";
1681 md->force_ro.attr.mode = S_IRUGO | S_IWUSR; 1682 md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
1682 ret = device_create_file(disk_to_dev(md->disk), &md->force_ro); 1683 ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
1683 if (ret) 1684 if (ret)
1684 goto force_ro_fail; 1685 goto force_ro_fail;
1685 1686
1686 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) && 1687 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
1687 card->ext_csd.boot_ro_lockable) { 1688 card->ext_csd.boot_ro_lockable) {
1688 umode_t mode; 1689 umode_t mode;
1689 1690
1690 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS) 1691 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS)
1691 mode = S_IRUGO; 1692 mode = S_IRUGO;
1692 else 1693 else
1693 mode = S_IRUGO | S_IWUSR; 1694 mode = S_IRUGO | S_IWUSR;
1694 1695
1695 md->power_ro_lock.show = power_ro_lock_show; 1696 md->power_ro_lock.show = power_ro_lock_show;
1696 md->power_ro_lock.store = power_ro_lock_store; 1697 md->power_ro_lock.store = power_ro_lock_store;
1697 sysfs_attr_init(&md->power_ro_lock.attr); 1698 sysfs_attr_init(&md->power_ro_lock.attr);
1698 md->power_ro_lock.attr.mode = mode; 1699 md->power_ro_lock.attr.mode = mode;
1699 md->power_ro_lock.attr.name = 1700 md->power_ro_lock.attr.name =
1700 "ro_lock_until_next_power_on"; 1701 "ro_lock_until_next_power_on";
1701 ret = device_create_file(disk_to_dev(md->disk), 1702 ret = device_create_file(disk_to_dev(md->disk),
1702 &md->power_ro_lock); 1703 &md->power_ro_lock);
1703 if (ret) 1704 if (ret)
1704 goto power_ro_lock_fail; 1705 goto power_ro_lock_fail;
1705 } 1706 }
1706 return ret; 1707 return ret;
1707 1708
1708 power_ro_lock_fail: 1709 power_ro_lock_fail:
1709 device_remove_file(disk_to_dev(md->disk), &md->force_ro); 1710 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
1710 force_ro_fail: 1711 force_ro_fail:
1711 del_gendisk(md->disk); 1712 del_gendisk(md->disk);
1712 1713
1713 return ret; 1714 return ret;
1714 } 1715 }
1715 1716
1716 #define CID_MANFID_SANDISK 0x2 1717 #define CID_MANFID_SANDISK 0x2
1717 #define CID_MANFID_TOSHIBA 0x11 1718 #define CID_MANFID_TOSHIBA 0x11
1718 #define CID_MANFID_MICRON 0x13 1719 #define CID_MANFID_MICRON 0x13
1720 #define CID_MANFID_SAMSUNG 0x15
1719 1721
1720 static const struct mmc_fixup blk_fixups[] = 1722 static const struct mmc_fixup blk_fixups[] =
1721 { 1723 {
1722 MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk, 1724 MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk,
1723 MMC_QUIRK_INAND_CMD38), 1725 MMC_QUIRK_INAND_CMD38),
1724 MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk, 1726 MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk,
1725 MMC_QUIRK_INAND_CMD38), 1727 MMC_QUIRK_INAND_CMD38),
1726 MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk, 1728 MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk,
1727 MMC_QUIRK_INAND_CMD38), 1729 MMC_QUIRK_INAND_CMD38),
1728 MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk, 1730 MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk,
1729 MMC_QUIRK_INAND_CMD38), 1731 MMC_QUIRK_INAND_CMD38),
1730 MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk, 1732 MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk,
1731 MMC_QUIRK_INAND_CMD38), 1733 MMC_QUIRK_INAND_CMD38),
1732 1734
1733 /* 1735 /*
1734 * Some MMC cards experience performance degradation with CMD23 1736 * Some MMC cards experience performance degradation with CMD23
1735 * instead of CMD12-bounded multiblock transfers. For now we'll 1737 * instead of CMD12-bounded multiblock transfers. For now we'll
1736 * black list what's bad... 1738 * black list what's bad...
1737 * - Certain Toshiba cards. 1739 * - Certain Toshiba cards.
1738 * 1740 *
1739 * N.B. This doesn't affect SD cards. 1741 * N.B. This doesn't affect SD cards.
1740 */ 1742 */
1741 MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, 1743 MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
1742 MMC_QUIRK_BLK_NO_CMD23), 1744 MMC_QUIRK_BLK_NO_CMD23),
1743 MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, 1745 MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
1744 MMC_QUIRK_BLK_NO_CMD23), 1746 MMC_QUIRK_BLK_NO_CMD23),
1745 MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, 1747 MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
1746 MMC_QUIRK_BLK_NO_CMD23), 1748 MMC_QUIRK_BLK_NO_CMD23),
1747 1749
1748 /* 1750 /*
1749 * Some Micron MMC cards needs longer data read timeout than 1751 * Some Micron MMC cards needs longer data read timeout than
1750 * indicated in CSD. 1752 * indicated in CSD.
1751 */ 1753 */
1752 MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc, 1754 MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
1753 MMC_QUIRK_LONG_READ_TIME), 1755 MMC_QUIRK_LONG_READ_TIME),
1756
1757 /*
1758 * On these Samsung MoviNAND parts, performing secure erase or
1759 * secure trim can result in unrecoverable corruption due to a
1760 * firmware bug.
1761 */
1762 MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
1763 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
1764 MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
1765 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
1766 MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
1767 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
1768 MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
1769 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
1770 MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
1771 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
1772 MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
1773 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
1774 MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
1775 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
1776 MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
1777 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
1754 1778
1755 END_FIXUP 1779 END_FIXUP
1756 }; 1780 };
1757 1781
1758 static int mmc_blk_probe(struct mmc_card *card) 1782 static int mmc_blk_probe(struct mmc_card *card)
1759 { 1783 {
1760 struct mmc_blk_data *md, *part_md; 1784 struct mmc_blk_data *md, *part_md;
1761 char cap_str[10]; 1785 char cap_str[10];
1762 1786
1763 /* 1787 /*
1764 * Check that the card supports the command class(es) we need. 1788 * Check that the card supports the command class(es) we need.
1765 */ 1789 */
1766 if (!(card->csd.cmdclass & CCC_BLOCK_READ)) 1790 if (!(card->csd.cmdclass & CCC_BLOCK_READ))
1767 return -ENODEV; 1791 return -ENODEV;
1768 1792
1769 md = mmc_blk_alloc(card); 1793 md = mmc_blk_alloc(card);
1770 if (IS_ERR(md)) 1794 if (IS_ERR(md))
1771 return PTR_ERR(md); 1795 return PTR_ERR(md);
1772 1796
1773 string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2, 1797 string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2,
1774 cap_str, sizeof(cap_str)); 1798 cap_str, sizeof(cap_str));
1775 pr_info("%s: %s %s %s %s\n", 1799 pr_info("%s: %s %s %s %s\n",
1776 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card), 1800 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
1777 cap_str, md->read_only ? "(ro)" : ""); 1801 cap_str, md->read_only ? "(ro)" : "");
1778 1802
1779 if (mmc_blk_alloc_parts(card, md)) 1803 if (mmc_blk_alloc_parts(card, md))
1780 goto out; 1804 goto out;
1781 1805
1782 mmc_set_drvdata(card, md); 1806 mmc_set_drvdata(card, md);
1783 mmc_fixup_device(card, blk_fixups); 1807 mmc_fixup_device(card, blk_fixups);
1784 1808
1785 if (mmc_add_disk(md)) 1809 if (mmc_add_disk(md))
1786 goto out; 1810 goto out;
1787 1811
1788 list_for_each_entry(part_md, &md->part, part) { 1812 list_for_each_entry(part_md, &md->part, part) {
1789 if (mmc_add_disk(part_md)) 1813 if (mmc_add_disk(part_md))
1790 goto out; 1814 goto out;
1791 } 1815 }
1792 return 0; 1816 return 0;
1793 1817
1794 out: 1818 out:
1795 mmc_blk_remove_parts(card, md); 1819 mmc_blk_remove_parts(card, md);
1796 mmc_blk_remove_req(md); 1820 mmc_blk_remove_req(md);
1797 return 0; 1821 return 0;
1798 } 1822 }
1799 1823
1800 static void mmc_blk_remove(struct mmc_card *card) 1824 static void mmc_blk_remove(struct mmc_card *card)
1801 { 1825 {
1802 struct mmc_blk_data *md = mmc_get_drvdata(card); 1826 struct mmc_blk_data *md = mmc_get_drvdata(card);
1803 1827
1804 mmc_blk_remove_parts(card, md); 1828 mmc_blk_remove_parts(card, md);
1805 mmc_claim_host(card->host); 1829 mmc_claim_host(card->host);
1806 mmc_blk_part_switch(card, md); 1830 mmc_blk_part_switch(card, md);
1807 mmc_release_host(card->host); 1831 mmc_release_host(card->host);
1808 mmc_blk_remove_req(md); 1832 mmc_blk_remove_req(md);
1809 mmc_set_drvdata(card, NULL); 1833 mmc_set_drvdata(card, NULL);
1810 } 1834 }
1811 1835
1812 #ifdef CONFIG_PM 1836 #ifdef CONFIG_PM
1813 static int mmc_blk_suspend(struct mmc_card *card) 1837 static int mmc_blk_suspend(struct mmc_card *card)
1814 { 1838 {
1815 struct mmc_blk_data *part_md; 1839 struct mmc_blk_data *part_md;
1816 struct mmc_blk_data *md = mmc_get_drvdata(card); 1840 struct mmc_blk_data *md = mmc_get_drvdata(card);
1817 1841
1818 if (md) { 1842 if (md) {
1819 mmc_queue_suspend(&md->queue); 1843 mmc_queue_suspend(&md->queue);
1820 list_for_each_entry(part_md, &md->part, part) { 1844 list_for_each_entry(part_md, &md->part, part) {
1821 mmc_queue_suspend(&part_md->queue); 1845 mmc_queue_suspend(&part_md->queue);
1822 } 1846 }
1823 } 1847 }
1824 return 0; 1848 return 0;
1825 } 1849 }
1826 1850
1827 static int mmc_blk_resume(struct mmc_card *card) 1851 static int mmc_blk_resume(struct mmc_card *card)
1828 { 1852 {
1829 struct mmc_blk_data *part_md; 1853 struct mmc_blk_data *part_md;
1830 struct mmc_blk_data *md = mmc_get_drvdata(card); 1854 struct mmc_blk_data *md = mmc_get_drvdata(card);
1831 1855
1832 if (md) { 1856 if (md) {
1833 /* 1857 /*
1834 * Resume involves the card going into idle state, 1858 * Resume involves the card going into idle state,
1835 * so current partition is always the main one. 1859 * so current partition is always the main one.
1836 */ 1860 */
1837 md->part_curr = md->part_type; 1861 md->part_curr = md->part_type;
1838 mmc_queue_resume(&md->queue); 1862 mmc_queue_resume(&md->queue);
1839 list_for_each_entry(part_md, &md->part, part) { 1863 list_for_each_entry(part_md, &md->part, part) {
1840 mmc_queue_resume(&part_md->queue); 1864 mmc_queue_resume(&part_md->queue);
1841 } 1865 }
1842 } 1866 }
1843 return 0; 1867 return 0;
1844 } 1868 }
1845 #else 1869 #else
1846 #define mmc_blk_suspend NULL 1870 #define mmc_blk_suspend NULL
1847 #define mmc_blk_resume NULL 1871 #define mmc_blk_resume NULL
1848 #endif 1872 #endif
1849 1873
1850 static struct mmc_driver mmc_driver = { 1874 static struct mmc_driver mmc_driver = {
1851 .drv = { 1875 .drv = {
1852 .name = "mmcblk", 1876 .name = "mmcblk",
1853 }, 1877 },
1854 .probe = mmc_blk_probe, 1878 .probe = mmc_blk_probe,
1855 .remove = mmc_blk_remove, 1879 .remove = mmc_blk_remove,
1856 .suspend = mmc_blk_suspend, 1880 .suspend = mmc_blk_suspend,
1857 .resume = mmc_blk_resume, 1881 .resume = mmc_blk_resume,
1858 }; 1882 };
1859 1883
1860 static int __init mmc_blk_init(void) 1884 static int __init mmc_blk_init(void)
1861 { 1885 {
1862 int res; 1886 int res;
1863 1887
1864 if (perdev_minors != CONFIG_MMC_BLOCK_MINORS) 1888 if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
1865 pr_info("mmcblk: using %d minors per device\n", perdev_minors); 1889 pr_info("mmcblk: using %d minors per device\n", perdev_minors);
1866 1890
1867 max_devices = 256 / perdev_minors; 1891 max_devices = 256 / perdev_minors;
1868 1892
1869 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc"); 1893 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
1870 if (res) 1894 if (res)
1871 goto out; 1895 goto out;
1872 1896
1873 res = mmc_register_driver(&mmc_driver); 1897 res = mmc_register_driver(&mmc_driver);
1874 if (res) 1898 if (res)
1875 goto out2; 1899 goto out2;
1876 1900
1877 return 0; 1901 return 0;
1878 out2: 1902 out2:
1879 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc"); 1903 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
1880 out: 1904 out:
1881 return res; 1905 return res;
1882 } 1906 }
1883 1907
1884 static void __exit mmc_blk_exit(void) 1908 static void __exit mmc_blk_exit(void)
1885 { 1909 {
1886 mmc_unregister_driver(&mmc_driver); 1910 mmc_unregister_driver(&mmc_driver);
1887 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc"); 1911 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
1888 } 1912 }
1889 1913
1890 module_init(mmc_blk_init); 1914 module_init(mmc_blk_init);
1891 module_exit(mmc_blk_exit); 1915 module_exit(mmc_blk_exit);
1892 1916
1893 MODULE_LICENSE("GPL"); 1917 MODULE_LICENSE("GPL");
1894 MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver"); 1918 MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
1895 1919
1896 1920
drivers/mmc/host/atmel-mci.c
1 /* 1 /*
2 * Atmel MultiMedia Card Interface driver 2 * Atmel MultiMedia Card Interface driver
3 * 3 *
4 * Copyright (C) 2004-2008 Atmel Corporation 4 * Copyright (C) 2004-2008 Atmel Corporation
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10 #include <linux/blkdev.h> 10 #include <linux/blkdev.h>
11 #include <linux/clk.h> 11 #include <linux/clk.h>
12 #include <linux/debugfs.h> 12 #include <linux/debugfs.h>
13 #include <linux/device.h> 13 #include <linux/device.h>
14 #include <linux/dmaengine.h> 14 #include <linux/dmaengine.h>
15 #include <linux/dma-mapping.h> 15 #include <linux/dma-mapping.h>
16 #include <linux/err.h> 16 #include <linux/err.h>
17 #include <linux/gpio.h> 17 #include <linux/gpio.h>
18 #include <linux/init.h> 18 #include <linux/init.h>
19 #include <linux/interrupt.h> 19 #include <linux/interrupt.h>
20 #include <linux/ioport.h> 20 #include <linux/ioport.h>
21 #include <linux/module.h> 21 #include <linux/module.h>
22 #include <linux/platform_device.h> 22 #include <linux/platform_device.h>
23 #include <linux/scatterlist.h> 23 #include <linux/scatterlist.h>
24 #include <linux/seq_file.h> 24 #include <linux/seq_file.h>
25 #include <linux/slab.h> 25 #include <linux/slab.h>
26 #include <linux/stat.h> 26 #include <linux/stat.h>
27 #include <linux/types.h> 27 #include <linux/types.h>
28 28
29 #include <linux/mmc/host.h> 29 #include <linux/mmc/host.h>
30 #include <linux/mmc/sdio.h> 30 #include <linux/mmc/sdio.h>
31 31
32 #include <mach/atmel-mci.h> 32 #include <mach/atmel-mci.h>
33 #include <linux/atmel-mci.h> 33 #include <linux/atmel-mci.h>
34 #include <linux/atmel_pdc.h> 34 #include <linux/atmel_pdc.h>
35 35
36 #include <asm/io.h> 36 #include <asm/io.h>
37 #include <asm/unaligned.h> 37 #include <asm/unaligned.h>
38 38
39 #include <mach/cpu.h> 39 #include <mach/cpu.h>
40 #include <mach/board.h> 40 #include <mach/board.h>
41 41
42 #include "atmel-mci-regs.h" 42 #include "atmel-mci-regs.h"
43 43
44 #define ATMCI_DATA_ERROR_FLAGS (ATMCI_DCRCE | ATMCI_DTOE | ATMCI_OVRE | ATMCI_UNRE) 44 #define ATMCI_DATA_ERROR_FLAGS (ATMCI_DCRCE | ATMCI_DTOE | ATMCI_OVRE | ATMCI_UNRE)
45 #define ATMCI_DMA_THRESHOLD 16 45 #define ATMCI_DMA_THRESHOLD 16
46 46
47 enum { 47 enum {
48 EVENT_CMD_RDY = 0, 48 EVENT_CMD_RDY = 0,
49 EVENT_XFER_COMPLETE, 49 EVENT_XFER_COMPLETE,
50 EVENT_NOTBUSY, 50 EVENT_NOTBUSY,
51 EVENT_DATA_ERROR, 51 EVENT_DATA_ERROR,
52 }; 52 };
53 53
54 enum atmel_mci_state { 54 enum atmel_mci_state {
55 STATE_IDLE = 0, 55 STATE_IDLE = 0,
56 STATE_SENDING_CMD, 56 STATE_SENDING_CMD,
57 STATE_DATA_XFER, 57 STATE_DATA_XFER,
58 STATE_WAITING_NOTBUSY, 58 STATE_WAITING_NOTBUSY,
59 STATE_SENDING_STOP, 59 STATE_SENDING_STOP,
60 STATE_END_REQUEST, 60 STATE_END_REQUEST,
61 }; 61 };
62 62
63 enum atmci_xfer_dir { 63 enum atmci_xfer_dir {
64 XFER_RECEIVE = 0, 64 XFER_RECEIVE = 0,
65 XFER_TRANSMIT, 65 XFER_TRANSMIT,
66 }; 66 };
67 67
68 enum atmci_pdc_buf { 68 enum atmci_pdc_buf {
69 PDC_FIRST_BUF = 0, 69 PDC_FIRST_BUF = 0,
70 PDC_SECOND_BUF, 70 PDC_SECOND_BUF,
71 }; 71 };
72 72
73 struct atmel_mci_caps { 73 struct atmel_mci_caps {
74 bool has_dma; 74 bool has_dma;
75 bool has_pdc; 75 bool has_pdc;
76 bool has_cfg_reg; 76 bool has_cfg_reg;
77 bool has_cstor_reg; 77 bool has_cstor_reg;
78 bool has_highspeed; 78 bool has_highspeed;
79 bool has_rwproof; 79 bool has_rwproof;
80 bool has_odd_clk_div; 80 bool has_odd_clk_div;
81 bool has_bad_data_ordering; 81 bool has_bad_data_ordering;
82 bool need_reset_after_xfer; 82 bool need_reset_after_xfer;
83 bool need_blksz_mul_4; 83 bool need_blksz_mul_4;
84 bool need_notbusy_for_read_ops;
84 }; 85 };
85 86
86 struct atmel_mci_dma { 87 struct atmel_mci_dma {
87 struct dma_chan *chan; 88 struct dma_chan *chan;
88 struct dma_async_tx_descriptor *data_desc; 89 struct dma_async_tx_descriptor *data_desc;
89 }; 90 };
90 91
91 /** 92 /**
92 * struct atmel_mci - MMC controller state shared between all slots 93 * struct atmel_mci - MMC controller state shared between all slots
93 * @lock: Spinlock protecting the queue and associated data. 94 * @lock: Spinlock protecting the queue and associated data.
94 * @regs: Pointer to MMIO registers. 95 * @regs: Pointer to MMIO registers.
95 * @sg: Scatterlist entry currently being processed by PIO or PDC code. 96 * @sg: Scatterlist entry currently being processed by PIO or PDC code.
96 * @pio_offset: Offset into the current scatterlist entry. 97 * @pio_offset: Offset into the current scatterlist entry.
97 * @buffer: Buffer used if we don't have the r/w proof capability. We 98 * @buffer: Buffer used if we don't have the r/w proof capability. We
98 * don't have the time to switch pdc buffers so we have to use only 99 * don't have the time to switch pdc buffers so we have to use only
99 * one buffer for the full transaction. 100 * one buffer for the full transaction.
100 * @buf_size: size of the buffer. 101 * @buf_size: size of the buffer.
101 * @phys_buf_addr: buffer address needed for pdc. 102 * @phys_buf_addr: buffer address needed for pdc.
102 * @cur_slot: The slot which is currently using the controller. 103 * @cur_slot: The slot which is currently using the controller.
103 * @mrq: The request currently being processed on @cur_slot, 104 * @mrq: The request currently being processed on @cur_slot,
104 * or NULL if the controller is idle. 105 * or NULL if the controller is idle.
105 * @cmd: The command currently being sent to the card, or NULL. 106 * @cmd: The command currently being sent to the card, or NULL.
106 * @data: The data currently being transferred, or NULL if no data 107 * @data: The data currently being transferred, or NULL if no data
107 * transfer is in progress. 108 * transfer is in progress.
108 * @data_size: just data->blocks * data->blksz. 109 * @data_size: just data->blocks * data->blksz.
109 * @dma: DMA client state. 110 * @dma: DMA client state.
110 * @data_chan: DMA channel being used for the current data transfer. 111 * @data_chan: DMA channel being used for the current data transfer.
111 * @cmd_status: Snapshot of SR taken upon completion of the current 112 * @cmd_status: Snapshot of SR taken upon completion of the current
112 * command. Only valid when EVENT_CMD_COMPLETE is pending. 113 * command. Only valid when EVENT_CMD_COMPLETE is pending.
113 * @data_status: Snapshot of SR taken upon completion of the current 114 * @data_status: Snapshot of SR taken upon completion of the current
114 * data transfer. Only valid when EVENT_DATA_COMPLETE or 115 * data transfer. Only valid when EVENT_DATA_COMPLETE or
115 * EVENT_DATA_ERROR is pending. 116 * EVENT_DATA_ERROR is pending.
116 * @stop_cmdr: Value to be loaded into CMDR when the stop command is 117 * @stop_cmdr: Value to be loaded into CMDR when the stop command is
117 * to be sent. 118 * to be sent.
118 * @tasklet: Tasklet running the request state machine. 119 * @tasklet: Tasklet running the request state machine.
119 * @pending_events: Bitmask of events flagged by the interrupt handler 120 * @pending_events: Bitmask of events flagged by the interrupt handler
120 * to be processed by the tasklet. 121 * to be processed by the tasklet.
121 * @completed_events: Bitmask of events which the state machine has 122 * @completed_events: Bitmask of events which the state machine has
122 * processed. 123 * processed.
123 * @state: Tasklet state. 124 * @state: Tasklet state.
124 * @queue: List of slots waiting for access to the controller. 125 * @queue: List of slots waiting for access to the controller.
125 * @need_clock_update: Update the clock rate before the next request. 126 * @need_clock_update: Update the clock rate before the next request.
126 * @need_reset: Reset controller before next request. 127 * @need_reset: Reset controller before next request.
127 * @timer: Timer to balance the data timeout error flag which cannot rise. 128 * @timer: Timer to balance the data timeout error flag which cannot rise.
128 * @mode_reg: Value of the MR register. 129 * @mode_reg: Value of the MR register.
129 * @cfg_reg: Value of the CFG register. 130 * @cfg_reg: Value of the CFG register.
130 * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus 131 * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus
131 * rate and timeout calculations. 132 * rate and timeout calculations.
132 * @mapbase: Physical address of the MMIO registers. 133 * @mapbase: Physical address of the MMIO registers.
133 * @mck: The peripheral bus clock hooked up to the MMC controller. 134 * @mck: The peripheral bus clock hooked up to the MMC controller.
134 * @pdev: Platform device associated with the MMC controller. 135 * @pdev: Platform device associated with the MMC controller.
135 * @slot: Slots sharing this MMC controller. 136 * @slot: Slots sharing this MMC controller.
136 * @caps: MCI capabilities depending on MCI version. 137 * @caps: MCI capabilities depending on MCI version.
137 * @prepare_data: function to setup MCI before data transfer which 138 * @prepare_data: function to setup MCI before data transfer which
138 * depends on MCI capabilities. 139 * depends on MCI capabilities.
139 * @submit_data: function to start data transfer which depends on MCI 140 * @submit_data: function to start data transfer which depends on MCI
140 * capabilities. 141 * capabilities.
141 * @stop_transfer: function to stop data transfer which depends on MCI 142 * @stop_transfer: function to stop data transfer which depends on MCI
142 * capabilities. 143 * capabilities.
143 * 144 *
144 * Locking 145 * Locking
145 * ======= 146 * =======
146 * 147 *
147 * @lock is a softirq-safe spinlock protecting @queue as well as 148 * @lock is a softirq-safe spinlock protecting @queue as well as
148 * @cur_slot, @mrq and @state. These must always be updated 149 * @cur_slot, @mrq and @state. These must always be updated
149 * at the same time while holding @lock. 150 * at the same time while holding @lock.
150 * 151 *
151 * @lock also protects mode_reg and need_clock_update since these are 152 * @lock also protects mode_reg and need_clock_update since these are
152 * used to synchronize mode register updates with the queue 153 * used to synchronize mode register updates with the queue
153 * processing. 154 * processing.
154 * 155 *
155 * The @mrq field of struct atmel_mci_slot is also protected by @lock, 156 * The @mrq field of struct atmel_mci_slot is also protected by @lock,
156 * and must always be written at the same time as the slot is added to 157 * and must always be written at the same time as the slot is added to
157 * @queue. 158 * @queue.
158 * 159 *
159 * @pending_events and @completed_events are accessed using atomic bit 160 * @pending_events and @completed_events are accessed using atomic bit
160 * operations, so they don't need any locking. 161 * operations, so they don't need any locking.
161 * 162 *
162 * None of the fields touched by the interrupt handler need any 163 * None of the fields touched by the interrupt handler need any
163 * locking. However, ordering is important: Before EVENT_DATA_ERROR or 164 * locking. However, ordering is important: Before EVENT_DATA_ERROR or
164 * EVENT_DATA_COMPLETE is set in @pending_events, all data-related 165 * EVENT_DATA_COMPLETE is set in @pending_events, all data-related
165 * interrupts must be disabled and @data_status updated with a 166 * interrupts must be disabled and @data_status updated with a
166 * snapshot of SR. Similarly, before EVENT_CMD_COMPLETE is set, the 167 * snapshot of SR. Similarly, before EVENT_CMD_COMPLETE is set, the
167 * CMDRDY interrupt must be disabled and @cmd_status updated with a 168 * CMDRDY interrupt must be disabled and @cmd_status updated with a
168 * snapshot of SR, and before EVENT_XFER_COMPLETE can be set, the 169 * snapshot of SR, and before EVENT_XFER_COMPLETE can be set, the
169 * bytes_xfered field of @data must be written. This is ensured by 170 * bytes_xfered field of @data must be written. This is ensured by
170 * using barriers. 171 * using barriers.
171 */ 172 */
172 struct atmel_mci { 173 struct atmel_mci {
173 spinlock_t lock; 174 spinlock_t lock;
174 void __iomem *regs; 175 void __iomem *regs;
175 176
176 struct scatterlist *sg; 177 struct scatterlist *sg;
177 unsigned int pio_offset; 178 unsigned int pio_offset;
178 unsigned int *buffer; 179 unsigned int *buffer;
179 unsigned int buf_size; 180 unsigned int buf_size;
180 dma_addr_t buf_phys_addr; 181 dma_addr_t buf_phys_addr;
181 182
182 struct atmel_mci_slot *cur_slot; 183 struct atmel_mci_slot *cur_slot;
183 struct mmc_request *mrq; 184 struct mmc_request *mrq;
184 struct mmc_command *cmd; 185 struct mmc_command *cmd;
185 struct mmc_data *data; 186 struct mmc_data *data;
186 unsigned int data_size; 187 unsigned int data_size;
187 188
188 struct atmel_mci_dma dma; 189 struct atmel_mci_dma dma;
189 struct dma_chan *data_chan; 190 struct dma_chan *data_chan;
190 struct dma_slave_config dma_conf; 191 struct dma_slave_config dma_conf;
191 192
192 u32 cmd_status; 193 u32 cmd_status;
193 u32 data_status; 194 u32 data_status;
194 u32 stop_cmdr; 195 u32 stop_cmdr;
195 196
196 struct tasklet_struct tasklet; 197 struct tasklet_struct tasklet;
197 unsigned long pending_events; 198 unsigned long pending_events;
198 unsigned long completed_events; 199 unsigned long completed_events;
199 enum atmel_mci_state state; 200 enum atmel_mci_state state;
200 struct list_head queue; 201 struct list_head queue;
201 202
202 bool need_clock_update; 203 bool need_clock_update;
203 bool need_reset; 204 bool need_reset;
204 struct timer_list timer; 205 struct timer_list timer;
205 u32 mode_reg; 206 u32 mode_reg;
206 u32 cfg_reg; 207 u32 cfg_reg;
207 unsigned long bus_hz; 208 unsigned long bus_hz;
208 unsigned long mapbase; 209 unsigned long mapbase;
209 struct clk *mck; 210 struct clk *mck;
210 struct platform_device *pdev; 211 struct platform_device *pdev;
211 212
212 struct atmel_mci_slot *slot[ATMCI_MAX_NR_SLOTS]; 213 struct atmel_mci_slot *slot[ATMCI_MAX_NR_SLOTS];
213 214
214 struct atmel_mci_caps caps; 215 struct atmel_mci_caps caps;
215 216
216 u32 (*prepare_data)(struct atmel_mci *host, struct mmc_data *data); 217 u32 (*prepare_data)(struct atmel_mci *host, struct mmc_data *data);
217 void (*submit_data)(struct atmel_mci *host, struct mmc_data *data); 218 void (*submit_data)(struct atmel_mci *host, struct mmc_data *data);
218 void (*stop_transfer)(struct atmel_mci *host); 219 void (*stop_transfer)(struct atmel_mci *host);
219 }; 220 };
220 221
221 /** 222 /**
222 * struct atmel_mci_slot - MMC slot state 223 * struct atmel_mci_slot - MMC slot state
223 * @mmc: The mmc_host representing this slot. 224 * @mmc: The mmc_host representing this slot.
224 * @host: The MMC controller this slot is using. 225 * @host: The MMC controller this slot is using.
225 * @sdc_reg: Value of SDCR to be written before using this slot. 226 * @sdc_reg: Value of SDCR to be written before using this slot.
226 * @sdio_irq: SDIO irq mask for this slot. 227 * @sdio_irq: SDIO irq mask for this slot.
227 * @mrq: mmc_request currently being processed or waiting to be 228 * @mrq: mmc_request currently being processed or waiting to be
228 * processed, or NULL when the slot is idle. 229 * processed, or NULL when the slot is idle.
229 * @queue_node: List node for placing this node in the @queue list of 230 * @queue_node: List node for placing this node in the @queue list of
230 * &struct atmel_mci. 231 * &struct atmel_mci.
231 * @clock: Clock rate configured by set_ios(). Protected by host->lock. 232 * @clock: Clock rate configured by set_ios(). Protected by host->lock.
232 * @flags: Random state bits associated with the slot. 233 * @flags: Random state bits associated with the slot.
233 * @detect_pin: GPIO pin used for card detection, or negative if not 234 * @detect_pin: GPIO pin used for card detection, or negative if not
234 * available. 235 * available.
235 * @wp_pin: GPIO pin used for card write protect sending, or negative 236 * @wp_pin: GPIO pin used for card write protect sending, or negative
236 * if not available. 237 * if not available.
237 * @detect_is_active_high: The state of the detect pin when it is active. 238 * @detect_is_active_high: The state of the detect pin when it is active.
238 * @detect_timer: Timer used for debouncing @detect_pin interrupts. 239 * @detect_timer: Timer used for debouncing @detect_pin interrupts.
239 */ 240 */
240 struct atmel_mci_slot { 241 struct atmel_mci_slot {
241 struct mmc_host *mmc; 242 struct mmc_host *mmc;
242 struct atmel_mci *host; 243 struct atmel_mci *host;
243 244
244 u32 sdc_reg; 245 u32 sdc_reg;
245 u32 sdio_irq; 246 u32 sdio_irq;
246 247
247 struct mmc_request *mrq; 248 struct mmc_request *mrq;
248 struct list_head queue_node; 249 struct list_head queue_node;
249 250
250 unsigned int clock; 251 unsigned int clock;
251 unsigned long flags; 252 unsigned long flags;
252 #define ATMCI_CARD_PRESENT 0 253 #define ATMCI_CARD_PRESENT 0
253 #define ATMCI_CARD_NEED_INIT 1 254 #define ATMCI_CARD_NEED_INIT 1
254 #define ATMCI_SHUTDOWN 2 255 #define ATMCI_SHUTDOWN 2
255 #define ATMCI_SUSPENDED 3 256 #define ATMCI_SUSPENDED 3
256 257
257 int detect_pin; 258 int detect_pin;
258 int wp_pin; 259 int wp_pin;
259 bool detect_is_active_high; 260 bool detect_is_active_high;
260 261
261 struct timer_list detect_timer; 262 struct timer_list detect_timer;
262 }; 263 };
263 264
264 #define atmci_test_and_clear_pending(host, event) \ 265 #define atmci_test_and_clear_pending(host, event) \
265 test_and_clear_bit(event, &host->pending_events) 266 test_and_clear_bit(event, &host->pending_events)
266 #define atmci_set_completed(host, event) \ 267 #define atmci_set_completed(host, event) \
267 set_bit(event, &host->completed_events) 268 set_bit(event, &host->completed_events)
268 #define atmci_set_pending(host, event) \ 269 #define atmci_set_pending(host, event) \
269 set_bit(event, &host->pending_events) 270 set_bit(event, &host->pending_events)
270 271
271 /* 272 /*
272 * The debugfs stuff below is mostly optimized away when 273 * The debugfs stuff below is mostly optimized away when
273 * CONFIG_DEBUG_FS is not set. 274 * CONFIG_DEBUG_FS is not set.
274 */ 275 */
275 static int atmci_req_show(struct seq_file *s, void *v) 276 static int atmci_req_show(struct seq_file *s, void *v)
276 { 277 {
277 struct atmel_mci_slot *slot = s->private; 278 struct atmel_mci_slot *slot = s->private;
278 struct mmc_request *mrq; 279 struct mmc_request *mrq;
279 struct mmc_command *cmd; 280 struct mmc_command *cmd;
280 struct mmc_command *stop; 281 struct mmc_command *stop;
281 struct mmc_data *data; 282 struct mmc_data *data;
282 283
283 /* Make sure we get a consistent snapshot */ 284 /* Make sure we get a consistent snapshot */
284 spin_lock_bh(&slot->host->lock); 285 spin_lock_bh(&slot->host->lock);
285 mrq = slot->mrq; 286 mrq = slot->mrq;
286 287
287 if (mrq) { 288 if (mrq) {
288 cmd = mrq->cmd; 289 cmd = mrq->cmd;
289 data = mrq->data; 290 data = mrq->data;
290 stop = mrq->stop; 291 stop = mrq->stop;
291 292
292 if (cmd) 293 if (cmd)
293 seq_printf(s, 294 seq_printf(s,
294 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", 295 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
295 cmd->opcode, cmd->arg, cmd->flags, 296 cmd->opcode, cmd->arg, cmd->flags,
296 cmd->resp[0], cmd->resp[1], cmd->resp[2], 297 cmd->resp[0], cmd->resp[1], cmd->resp[2],
297 cmd->resp[3], cmd->error); 298 cmd->resp[3], cmd->error);
298 if (data) 299 if (data)
299 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n", 300 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
300 data->bytes_xfered, data->blocks, 301 data->bytes_xfered, data->blocks,
301 data->blksz, data->flags, data->error); 302 data->blksz, data->flags, data->error);
302 if (stop) 303 if (stop)
303 seq_printf(s, 304 seq_printf(s,
304 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", 305 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
305 stop->opcode, stop->arg, stop->flags, 306 stop->opcode, stop->arg, stop->flags,
306 stop->resp[0], stop->resp[1], stop->resp[2], 307 stop->resp[0], stop->resp[1], stop->resp[2],
307 stop->resp[3], stop->error); 308 stop->resp[3], stop->error);
308 } 309 }
309 310
310 spin_unlock_bh(&slot->host->lock); 311 spin_unlock_bh(&slot->host->lock);
311 312
312 return 0; 313 return 0;
313 } 314 }
314 315
315 static int atmci_req_open(struct inode *inode, struct file *file) 316 static int atmci_req_open(struct inode *inode, struct file *file)
316 { 317 {
317 return single_open(file, atmci_req_show, inode->i_private); 318 return single_open(file, atmci_req_show, inode->i_private);
318 } 319 }
319 320
320 static const struct file_operations atmci_req_fops = { 321 static const struct file_operations atmci_req_fops = {
321 .owner = THIS_MODULE, 322 .owner = THIS_MODULE,
322 .open = atmci_req_open, 323 .open = atmci_req_open,
323 .read = seq_read, 324 .read = seq_read,
324 .llseek = seq_lseek, 325 .llseek = seq_lseek,
325 .release = single_release, 326 .release = single_release,
326 }; 327 };
327 328
328 static void atmci_show_status_reg(struct seq_file *s, 329 static void atmci_show_status_reg(struct seq_file *s,
329 const char *regname, u32 value) 330 const char *regname, u32 value)
330 { 331 {
331 static const char *sr_bit[] = { 332 static const char *sr_bit[] = {
332 [0] = "CMDRDY", 333 [0] = "CMDRDY",
333 [1] = "RXRDY", 334 [1] = "RXRDY",
334 [2] = "TXRDY", 335 [2] = "TXRDY",
335 [3] = "BLKE", 336 [3] = "BLKE",
336 [4] = "DTIP", 337 [4] = "DTIP",
337 [5] = "NOTBUSY", 338 [5] = "NOTBUSY",
338 [6] = "ENDRX", 339 [6] = "ENDRX",
339 [7] = "ENDTX", 340 [7] = "ENDTX",
340 [8] = "SDIOIRQA", 341 [8] = "SDIOIRQA",
341 [9] = "SDIOIRQB", 342 [9] = "SDIOIRQB",
342 [12] = "SDIOWAIT", 343 [12] = "SDIOWAIT",
343 [14] = "RXBUFF", 344 [14] = "RXBUFF",
344 [15] = "TXBUFE", 345 [15] = "TXBUFE",
345 [16] = "RINDE", 346 [16] = "RINDE",
346 [17] = "RDIRE", 347 [17] = "RDIRE",
347 [18] = "RCRCE", 348 [18] = "RCRCE",
348 [19] = "RENDE", 349 [19] = "RENDE",
349 [20] = "RTOE", 350 [20] = "RTOE",
350 [21] = "DCRCE", 351 [21] = "DCRCE",
351 [22] = "DTOE", 352 [22] = "DTOE",
352 [23] = "CSTOE", 353 [23] = "CSTOE",
353 [24] = "BLKOVRE", 354 [24] = "BLKOVRE",
354 [25] = "DMADONE", 355 [25] = "DMADONE",
355 [26] = "FIFOEMPTY", 356 [26] = "FIFOEMPTY",
356 [27] = "XFRDONE", 357 [27] = "XFRDONE",
357 [30] = "OVRE", 358 [30] = "OVRE",
358 [31] = "UNRE", 359 [31] = "UNRE",
359 }; 360 };
360 unsigned int i; 361 unsigned int i;
361 362
362 seq_printf(s, "%s:\t0x%08x", regname, value); 363 seq_printf(s, "%s:\t0x%08x", regname, value);
363 for (i = 0; i < ARRAY_SIZE(sr_bit); i++) { 364 for (i = 0; i < ARRAY_SIZE(sr_bit); i++) {
364 if (value & (1 << i)) { 365 if (value & (1 << i)) {
365 if (sr_bit[i]) 366 if (sr_bit[i])
366 seq_printf(s, " %s", sr_bit[i]); 367 seq_printf(s, " %s", sr_bit[i]);
367 else 368 else
368 seq_puts(s, " UNKNOWN"); 369 seq_puts(s, " UNKNOWN");
369 } 370 }
370 } 371 }
371 seq_putc(s, '\n'); 372 seq_putc(s, '\n');
372 } 373 }
373 374
374 static int atmci_regs_show(struct seq_file *s, void *v) 375 static int atmci_regs_show(struct seq_file *s, void *v)
375 { 376 {
376 struct atmel_mci *host = s->private; 377 struct atmel_mci *host = s->private;
377 u32 *buf; 378 u32 *buf;
378 379
379 buf = kmalloc(ATMCI_REGS_SIZE, GFP_KERNEL); 380 buf = kmalloc(ATMCI_REGS_SIZE, GFP_KERNEL);
380 if (!buf) 381 if (!buf)
381 return -ENOMEM; 382 return -ENOMEM;
382 383
383 /* 384 /*
384 * Grab a more or less consistent snapshot. Note that we're 385 * Grab a more or less consistent snapshot. Note that we're
385 * not disabling interrupts, so IMR and SR may not be 386 * not disabling interrupts, so IMR and SR may not be
386 * consistent. 387 * consistent.
387 */ 388 */
388 spin_lock_bh(&host->lock); 389 spin_lock_bh(&host->lock);
389 clk_enable(host->mck); 390 clk_enable(host->mck);
390 memcpy_fromio(buf, host->regs, ATMCI_REGS_SIZE); 391 memcpy_fromio(buf, host->regs, ATMCI_REGS_SIZE);
391 clk_disable(host->mck); 392 clk_disable(host->mck);
392 spin_unlock_bh(&host->lock); 393 spin_unlock_bh(&host->lock);
393 394
394 seq_printf(s, "MR:\t0x%08x%s%s ", 395 seq_printf(s, "MR:\t0x%08x%s%s ",
395 buf[ATMCI_MR / 4], 396 buf[ATMCI_MR / 4],
396 buf[ATMCI_MR / 4] & ATMCI_MR_RDPROOF ? " RDPROOF" : "", 397 buf[ATMCI_MR / 4] & ATMCI_MR_RDPROOF ? " RDPROOF" : "",
397 buf[ATMCI_MR / 4] & ATMCI_MR_WRPROOF ? " WRPROOF" : ""); 398 buf[ATMCI_MR / 4] & ATMCI_MR_WRPROOF ? " WRPROOF" : "");
398 if (host->caps.has_odd_clk_div) 399 if (host->caps.has_odd_clk_div)
399 seq_printf(s, "{CLKDIV,CLKODD}=%u\n", 400 seq_printf(s, "{CLKDIV,CLKODD}=%u\n",
400 ((buf[ATMCI_MR / 4] & 0xff) << 1) 401 ((buf[ATMCI_MR / 4] & 0xff) << 1)
401 | ((buf[ATMCI_MR / 4] >> 16) & 1)); 402 | ((buf[ATMCI_MR / 4] >> 16) & 1));
402 else 403 else
403 seq_printf(s, "CLKDIV=%u\n", 404 seq_printf(s, "CLKDIV=%u\n",
404 (buf[ATMCI_MR / 4] & 0xff)); 405 (buf[ATMCI_MR / 4] & 0xff));
405 seq_printf(s, "DTOR:\t0x%08x\n", buf[ATMCI_DTOR / 4]); 406 seq_printf(s, "DTOR:\t0x%08x\n", buf[ATMCI_DTOR / 4]);
406 seq_printf(s, "SDCR:\t0x%08x\n", buf[ATMCI_SDCR / 4]); 407 seq_printf(s, "SDCR:\t0x%08x\n", buf[ATMCI_SDCR / 4]);
407 seq_printf(s, "ARGR:\t0x%08x\n", buf[ATMCI_ARGR / 4]); 408 seq_printf(s, "ARGR:\t0x%08x\n", buf[ATMCI_ARGR / 4]);
408 seq_printf(s, "BLKR:\t0x%08x BCNT=%u BLKLEN=%u\n", 409 seq_printf(s, "BLKR:\t0x%08x BCNT=%u BLKLEN=%u\n",
409 buf[ATMCI_BLKR / 4], 410 buf[ATMCI_BLKR / 4],
410 buf[ATMCI_BLKR / 4] & 0xffff, 411 buf[ATMCI_BLKR / 4] & 0xffff,
411 (buf[ATMCI_BLKR / 4] >> 16) & 0xffff); 412 (buf[ATMCI_BLKR / 4] >> 16) & 0xffff);
412 if (host->caps.has_cstor_reg) 413 if (host->caps.has_cstor_reg)
413 seq_printf(s, "CSTOR:\t0x%08x\n", buf[ATMCI_CSTOR / 4]); 414 seq_printf(s, "CSTOR:\t0x%08x\n", buf[ATMCI_CSTOR / 4]);
414 415
415 /* Don't read RSPR and RDR; it will consume the data there */ 416 /* Don't read RSPR and RDR; it will consume the data there */
416 417
417 atmci_show_status_reg(s, "SR", buf[ATMCI_SR / 4]); 418 atmci_show_status_reg(s, "SR", buf[ATMCI_SR / 4]);
418 atmci_show_status_reg(s, "IMR", buf[ATMCI_IMR / 4]); 419 atmci_show_status_reg(s, "IMR", buf[ATMCI_IMR / 4]);
419 420
420 if (host->caps.has_dma) { 421 if (host->caps.has_dma) {
421 u32 val; 422 u32 val;
422 423
423 val = buf[ATMCI_DMA / 4]; 424 val = buf[ATMCI_DMA / 4];
424 seq_printf(s, "DMA:\t0x%08x OFFSET=%u CHKSIZE=%u%s\n", 425 seq_printf(s, "DMA:\t0x%08x OFFSET=%u CHKSIZE=%u%s\n",
425 val, val & 3, 426 val, val & 3,
426 ((val >> 4) & 3) ? 427 ((val >> 4) & 3) ?
427 1 << (((val >> 4) & 3) + 1) : 1, 428 1 << (((val >> 4) & 3) + 1) : 1,
428 val & ATMCI_DMAEN ? " DMAEN" : ""); 429 val & ATMCI_DMAEN ? " DMAEN" : "");
429 } 430 }
430 if (host->caps.has_cfg_reg) { 431 if (host->caps.has_cfg_reg) {
431 u32 val; 432 u32 val;
432 433
433 val = buf[ATMCI_CFG / 4]; 434 val = buf[ATMCI_CFG / 4];
434 seq_printf(s, "CFG:\t0x%08x%s%s%s%s\n", 435 seq_printf(s, "CFG:\t0x%08x%s%s%s%s\n",
435 val, 436 val,
436 val & ATMCI_CFG_FIFOMODE_1DATA ? " FIFOMODE_ONE_DATA" : "", 437 val & ATMCI_CFG_FIFOMODE_1DATA ? " FIFOMODE_ONE_DATA" : "",
437 val & ATMCI_CFG_FERRCTRL_COR ? " FERRCTRL_CLEAR_ON_READ" : "", 438 val & ATMCI_CFG_FERRCTRL_COR ? " FERRCTRL_CLEAR_ON_READ" : "",
438 val & ATMCI_CFG_HSMODE ? " HSMODE" : "", 439 val & ATMCI_CFG_HSMODE ? " HSMODE" : "",
439 val & ATMCI_CFG_LSYNC ? " LSYNC" : ""); 440 val & ATMCI_CFG_LSYNC ? " LSYNC" : "");
440 } 441 }
441 442
442 kfree(buf); 443 kfree(buf);
443 444
444 return 0; 445 return 0;
445 } 446 }
446 447
447 static int atmci_regs_open(struct inode *inode, struct file *file) 448 static int atmci_regs_open(struct inode *inode, struct file *file)
448 { 449 {
449 return single_open(file, atmci_regs_show, inode->i_private); 450 return single_open(file, atmci_regs_show, inode->i_private);
450 } 451 }
451 452
452 static const struct file_operations atmci_regs_fops = { 453 static const struct file_operations atmci_regs_fops = {
453 .owner = THIS_MODULE, 454 .owner = THIS_MODULE,
454 .open = atmci_regs_open, 455 .open = atmci_regs_open,
455 .read = seq_read, 456 .read = seq_read,
456 .llseek = seq_lseek, 457 .llseek = seq_lseek,
457 .release = single_release, 458 .release = single_release,
458 }; 459 };
459 460
460 static void atmci_init_debugfs(struct atmel_mci_slot *slot) 461 static void atmci_init_debugfs(struct atmel_mci_slot *slot)
461 { 462 {
462 struct mmc_host *mmc = slot->mmc; 463 struct mmc_host *mmc = slot->mmc;
463 struct atmel_mci *host = slot->host; 464 struct atmel_mci *host = slot->host;
464 struct dentry *root; 465 struct dentry *root;
465 struct dentry *node; 466 struct dentry *node;
466 467
467 root = mmc->debugfs_root; 468 root = mmc->debugfs_root;
468 if (!root) 469 if (!root)
469 return; 470 return;
470 471
471 node = debugfs_create_file("regs", S_IRUSR, root, host, 472 node = debugfs_create_file("regs", S_IRUSR, root, host,
472 &atmci_regs_fops); 473 &atmci_regs_fops);
473 if (IS_ERR(node)) 474 if (IS_ERR(node))
474 return; 475 return;
475 if (!node) 476 if (!node)
476 goto err; 477 goto err;
477 478
478 node = debugfs_create_file("req", S_IRUSR, root, slot, &atmci_req_fops); 479 node = debugfs_create_file("req", S_IRUSR, root, slot, &atmci_req_fops);
479 if (!node) 480 if (!node)
480 goto err; 481 goto err;
481 482
482 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state); 483 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
483 if (!node) 484 if (!node)
484 goto err; 485 goto err;
485 486
486 node = debugfs_create_x32("pending_events", S_IRUSR, root, 487 node = debugfs_create_x32("pending_events", S_IRUSR, root,
487 (u32 *)&host->pending_events); 488 (u32 *)&host->pending_events);
488 if (!node) 489 if (!node)
489 goto err; 490 goto err;
490 491
491 node = debugfs_create_x32("completed_events", S_IRUSR, root, 492 node = debugfs_create_x32("completed_events", S_IRUSR, root,
492 (u32 *)&host->completed_events); 493 (u32 *)&host->completed_events);
493 if (!node) 494 if (!node)
494 goto err; 495 goto err;
495 496
496 return; 497 return;
497 498
498 err: 499 err:
499 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n"); 500 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
500 } 501 }
501 502
502 static inline unsigned int atmci_get_version(struct atmel_mci *host) 503 static inline unsigned int atmci_get_version(struct atmel_mci *host)
503 { 504 {
504 return atmci_readl(host, ATMCI_VERSION) & 0x00000fff; 505 return atmci_readl(host, ATMCI_VERSION) & 0x00000fff;
505 } 506 }
506 507
507 static void atmci_timeout_timer(unsigned long data) 508 static void atmci_timeout_timer(unsigned long data)
508 { 509 {
509 struct atmel_mci *host; 510 struct atmel_mci *host;
510 511
511 host = (struct atmel_mci *)data; 512 host = (struct atmel_mci *)data;
512 513
513 dev_dbg(&host->pdev->dev, "software timeout\n"); 514 dev_dbg(&host->pdev->dev, "software timeout\n");
514 515
515 if (host->mrq->cmd->data) { 516 if (host->mrq->cmd->data) {
516 host->mrq->cmd->data->error = -ETIMEDOUT; 517 host->mrq->cmd->data->error = -ETIMEDOUT;
517 host->data = NULL; 518 host->data = NULL;
518 } else { 519 } else {
519 host->mrq->cmd->error = -ETIMEDOUT; 520 host->mrq->cmd->error = -ETIMEDOUT;
520 host->cmd = NULL; 521 host->cmd = NULL;
521 } 522 }
522 host->need_reset = 1; 523 host->need_reset = 1;
523 host->state = STATE_END_REQUEST; 524 host->state = STATE_END_REQUEST;
524 smp_wmb(); 525 smp_wmb();
525 tasklet_schedule(&host->tasklet); 526 tasklet_schedule(&host->tasklet);
526 } 527 }
527 528
528 static inline unsigned int atmci_ns_to_clocks(struct atmel_mci *host, 529 static inline unsigned int atmci_ns_to_clocks(struct atmel_mci *host,
529 unsigned int ns) 530 unsigned int ns)
530 { 531 {
531 /* 532 /*
532 * It is easier here to use us instead of ns for the timeout, 533 * It is easier here to use us instead of ns for the timeout,
533 * it prevents from overflows during calculation. 534 * it prevents from overflows during calculation.
534 */ 535 */
535 unsigned int us = DIV_ROUND_UP(ns, 1000); 536 unsigned int us = DIV_ROUND_UP(ns, 1000);
536 537
537 /* Maximum clock frequency is host->bus_hz/2 */ 538 /* Maximum clock frequency is host->bus_hz/2 */
538 return us * (DIV_ROUND_UP(host->bus_hz, 2000000)); 539 return us * (DIV_ROUND_UP(host->bus_hz, 2000000));
539 } 540 }
540 541
541 static void atmci_set_timeout(struct atmel_mci *host, 542 static void atmci_set_timeout(struct atmel_mci *host,
542 struct atmel_mci_slot *slot, struct mmc_data *data) 543 struct atmel_mci_slot *slot, struct mmc_data *data)
543 { 544 {
544 static unsigned dtomul_to_shift[] = { 545 static unsigned dtomul_to_shift[] = {
545 0, 4, 7, 8, 10, 12, 16, 20 546 0, 4, 7, 8, 10, 12, 16, 20
546 }; 547 };
547 unsigned timeout; 548 unsigned timeout;
548 unsigned dtocyc; 549 unsigned dtocyc;
549 unsigned dtomul; 550 unsigned dtomul;
550 551
551 timeout = atmci_ns_to_clocks(host, data->timeout_ns) 552 timeout = atmci_ns_to_clocks(host, data->timeout_ns)
552 + data->timeout_clks; 553 + data->timeout_clks;
553 554
554 for (dtomul = 0; dtomul < 8; dtomul++) { 555 for (dtomul = 0; dtomul < 8; dtomul++) {
555 unsigned shift = dtomul_to_shift[dtomul]; 556 unsigned shift = dtomul_to_shift[dtomul];
556 dtocyc = (timeout + (1 << shift) - 1) >> shift; 557 dtocyc = (timeout + (1 << shift) - 1) >> shift;
557 if (dtocyc < 15) 558 if (dtocyc < 15)
558 break; 559 break;
559 } 560 }
560 561
561 if (dtomul >= 8) { 562 if (dtomul >= 8) {
562 dtomul = 7; 563 dtomul = 7;
563 dtocyc = 15; 564 dtocyc = 15;
564 } 565 }
565 566
566 dev_vdbg(&slot->mmc->class_dev, "setting timeout to %u cycles\n", 567 dev_vdbg(&slot->mmc->class_dev, "setting timeout to %u cycles\n",
567 dtocyc << dtomul_to_shift[dtomul]); 568 dtocyc << dtomul_to_shift[dtomul]);
568 atmci_writel(host, ATMCI_DTOR, (ATMCI_DTOMUL(dtomul) | ATMCI_DTOCYC(dtocyc))); 569 atmci_writel(host, ATMCI_DTOR, (ATMCI_DTOMUL(dtomul) | ATMCI_DTOCYC(dtocyc)));
569 } 570 }
570 571
571 /* 572 /*
572 * Return mask with command flags to be enabled for this command. 573 * Return mask with command flags to be enabled for this command.
573 */ 574 */
574 static u32 atmci_prepare_command(struct mmc_host *mmc, 575 static u32 atmci_prepare_command(struct mmc_host *mmc,
575 struct mmc_command *cmd) 576 struct mmc_command *cmd)
576 { 577 {
577 struct mmc_data *data; 578 struct mmc_data *data;
578 u32 cmdr; 579 u32 cmdr;
579 580
580 cmd->error = -EINPROGRESS; 581 cmd->error = -EINPROGRESS;
581 582
582 cmdr = ATMCI_CMDR_CMDNB(cmd->opcode); 583 cmdr = ATMCI_CMDR_CMDNB(cmd->opcode);
583 584
584 if (cmd->flags & MMC_RSP_PRESENT) { 585 if (cmd->flags & MMC_RSP_PRESENT) {
585 if (cmd->flags & MMC_RSP_136) 586 if (cmd->flags & MMC_RSP_136)
586 cmdr |= ATMCI_CMDR_RSPTYP_136BIT; 587 cmdr |= ATMCI_CMDR_RSPTYP_136BIT;
587 else 588 else
588 cmdr |= ATMCI_CMDR_RSPTYP_48BIT; 589 cmdr |= ATMCI_CMDR_RSPTYP_48BIT;
589 } 590 }
590 591
591 /* 592 /*
592 * This should really be MAXLAT_5 for CMD2 and ACMD41, but 593 * This should really be MAXLAT_5 for CMD2 and ACMD41, but
593 * it's too difficult to determine whether this is an ACMD or 594 * it's too difficult to determine whether this is an ACMD or
594 * not. Better make it 64. 595 * not. Better make it 64.
595 */ 596 */
596 cmdr |= ATMCI_CMDR_MAXLAT_64CYC; 597 cmdr |= ATMCI_CMDR_MAXLAT_64CYC;
597 598
598 if (mmc->ios.bus_mode == MMC_BUSMODE_OPENDRAIN) 599 if (mmc->ios.bus_mode == MMC_BUSMODE_OPENDRAIN)
599 cmdr |= ATMCI_CMDR_OPDCMD; 600 cmdr |= ATMCI_CMDR_OPDCMD;
600 601
601 data = cmd->data; 602 data = cmd->data;
602 if (data) { 603 if (data) {
603 cmdr |= ATMCI_CMDR_START_XFER; 604 cmdr |= ATMCI_CMDR_START_XFER;
604 605
605 if (cmd->opcode == SD_IO_RW_EXTENDED) { 606 if (cmd->opcode == SD_IO_RW_EXTENDED) {
606 cmdr |= ATMCI_CMDR_SDIO_BLOCK; 607 cmdr |= ATMCI_CMDR_SDIO_BLOCK;
607 } else { 608 } else {
608 if (data->flags & MMC_DATA_STREAM) 609 if (data->flags & MMC_DATA_STREAM)
609 cmdr |= ATMCI_CMDR_STREAM; 610 cmdr |= ATMCI_CMDR_STREAM;
610 else if (data->blocks > 1) 611 else if (data->blocks > 1)
611 cmdr |= ATMCI_CMDR_MULTI_BLOCK; 612 cmdr |= ATMCI_CMDR_MULTI_BLOCK;
612 else 613 else
613 cmdr |= ATMCI_CMDR_BLOCK; 614 cmdr |= ATMCI_CMDR_BLOCK;
614 } 615 }
615 616
616 if (data->flags & MMC_DATA_READ) 617 if (data->flags & MMC_DATA_READ)
617 cmdr |= ATMCI_CMDR_TRDIR_READ; 618 cmdr |= ATMCI_CMDR_TRDIR_READ;
618 } 619 }
619 620
620 return cmdr; 621 return cmdr;
621 } 622 }
622 623
623 static void atmci_send_command(struct atmel_mci *host, 624 static void atmci_send_command(struct atmel_mci *host,
624 struct mmc_command *cmd, u32 cmd_flags) 625 struct mmc_command *cmd, u32 cmd_flags)
625 { 626 {
626 WARN_ON(host->cmd); 627 WARN_ON(host->cmd);
627 host->cmd = cmd; 628 host->cmd = cmd;
628 629
629 dev_vdbg(&host->pdev->dev, 630 dev_vdbg(&host->pdev->dev,
630 "start command: ARGR=0x%08x CMDR=0x%08x\n", 631 "start command: ARGR=0x%08x CMDR=0x%08x\n",
631 cmd->arg, cmd_flags); 632 cmd->arg, cmd_flags);
632 633
633 atmci_writel(host, ATMCI_ARGR, cmd->arg); 634 atmci_writel(host, ATMCI_ARGR, cmd->arg);
634 atmci_writel(host, ATMCI_CMDR, cmd_flags); 635 atmci_writel(host, ATMCI_CMDR, cmd_flags);
635 } 636 }
636 637
637 static void atmci_send_stop_cmd(struct atmel_mci *host, struct mmc_data *data) 638 static void atmci_send_stop_cmd(struct atmel_mci *host, struct mmc_data *data)
638 { 639 {
639 dev_dbg(&host->pdev->dev, "send stop command\n"); 640 dev_dbg(&host->pdev->dev, "send stop command\n");
640 atmci_send_command(host, data->stop, host->stop_cmdr); 641 atmci_send_command(host, data->stop, host->stop_cmdr);
641 atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY); 642 atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
642 } 643 }
643 644
644 /* 645 /*
645 * Configure given PDC buffer taking care of alignement issues. 646 * Configure given PDC buffer taking care of alignement issues.
646 * Update host->data_size and host->sg. 647 * Update host->data_size and host->sg.
647 */ 648 */
648 static void atmci_pdc_set_single_buf(struct atmel_mci *host, 649 static void atmci_pdc_set_single_buf(struct atmel_mci *host,
649 enum atmci_xfer_dir dir, enum atmci_pdc_buf buf_nb) 650 enum atmci_xfer_dir dir, enum atmci_pdc_buf buf_nb)
650 { 651 {
651 u32 pointer_reg, counter_reg; 652 u32 pointer_reg, counter_reg;
652 unsigned int buf_size; 653 unsigned int buf_size;
653 654
654 if (dir == XFER_RECEIVE) { 655 if (dir == XFER_RECEIVE) {
655 pointer_reg = ATMEL_PDC_RPR; 656 pointer_reg = ATMEL_PDC_RPR;
656 counter_reg = ATMEL_PDC_RCR; 657 counter_reg = ATMEL_PDC_RCR;
657 } else { 658 } else {
658 pointer_reg = ATMEL_PDC_TPR; 659 pointer_reg = ATMEL_PDC_TPR;
659 counter_reg = ATMEL_PDC_TCR; 660 counter_reg = ATMEL_PDC_TCR;
660 } 661 }
661 662
662 if (buf_nb == PDC_SECOND_BUF) { 663 if (buf_nb == PDC_SECOND_BUF) {
663 pointer_reg += ATMEL_PDC_SCND_BUF_OFF; 664 pointer_reg += ATMEL_PDC_SCND_BUF_OFF;
664 counter_reg += ATMEL_PDC_SCND_BUF_OFF; 665 counter_reg += ATMEL_PDC_SCND_BUF_OFF;
665 } 666 }
666 667
667 if (!host->caps.has_rwproof) { 668 if (!host->caps.has_rwproof) {
668 buf_size = host->buf_size; 669 buf_size = host->buf_size;
669 atmci_writel(host, pointer_reg, host->buf_phys_addr); 670 atmci_writel(host, pointer_reg, host->buf_phys_addr);
670 } else { 671 } else {
671 buf_size = sg_dma_len(host->sg); 672 buf_size = sg_dma_len(host->sg);
672 atmci_writel(host, pointer_reg, sg_dma_address(host->sg)); 673 atmci_writel(host, pointer_reg, sg_dma_address(host->sg));
673 } 674 }
674 675
675 if (host->data_size <= buf_size) { 676 if (host->data_size <= buf_size) {
676 if (host->data_size & 0x3) { 677 if (host->data_size & 0x3) {
677 /* If size is different from modulo 4, transfer bytes */ 678 /* If size is different from modulo 4, transfer bytes */
678 atmci_writel(host, counter_reg, host->data_size); 679 atmci_writel(host, counter_reg, host->data_size);
679 atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCFBYTE); 680 atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCFBYTE);
680 } else { 681 } else {
681 /* Else transfer 32-bits words */ 682 /* Else transfer 32-bits words */
682 atmci_writel(host, counter_reg, host->data_size / 4); 683 atmci_writel(host, counter_reg, host->data_size / 4);
683 } 684 }
684 host->data_size = 0; 685 host->data_size = 0;
685 } else { 686 } else {
686 /* We assume the size of a page is 32-bits aligned */ 687 /* We assume the size of a page is 32-bits aligned */
687 atmci_writel(host, counter_reg, sg_dma_len(host->sg) / 4); 688 atmci_writel(host, counter_reg, sg_dma_len(host->sg) / 4);
688 host->data_size -= sg_dma_len(host->sg); 689 host->data_size -= sg_dma_len(host->sg);
689 if (host->data_size) 690 if (host->data_size)
690 host->sg = sg_next(host->sg); 691 host->sg = sg_next(host->sg);
691 } 692 }
692 } 693 }
693 694
694 /* 695 /*
695 * Configure PDC buffer according to the data size ie configuring one or two 696 * Configure PDC buffer according to the data size ie configuring one or two
696 * buffers. Don't use this function if you want to configure only the second 697 * buffers. Don't use this function if you want to configure only the second
697 * buffer. In this case, use atmci_pdc_set_single_buf. 698 * buffer. In this case, use atmci_pdc_set_single_buf.
698 */ 699 */
699 static void atmci_pdc_set_both_buf(struct atmel_mci *host, int dir) 700 static void atmci_pdc_set_both_buf(struct atmel_mci *host, int dir)
700 { 701 {
701 atmci_pdc_set_single_buf(host, dir, PDC_FIRST_BUF); 702 atmci_pdc_set_single_buf(host, dir, PDC_FIRST_BUF);
702 if (host->data_size) 703 if (host->data_size)
703 atmci_pdc_set_single_buf(host, dir, PDC_SECOND_BUF); 704 atmci_pdc_set_single_buf(host, dir, PDC_SECOND_BUF);
704 } 705 }
705 706
706 /* 707 /*
707 * Unmap sg lists, called when transfer is finished. 708 * Unmap sg lists, called when transfer is finished.
708 */ 709 */
709 static void atmci_pdc_cleanup(struct atmel_mci *host) 710 static void atmci_pdc_cleanup(struct atmel_mci *host)
710 { 711 {
711 struct mmc_data *data = host->data; 712 struct mmc_data *data = host->data;
712 713
713 if (data) 714 if (data)
714 dma_unmap_sg(&host->pdev->dev, 715 dma_unmap_sg(&host->pdev->dev,
715 data->sg, data->sg_len, 716 data->sg, data->sg_len,
716 ((data->flags & MMC_DATA_WRITE) 717 ((data->flags & MMC_DATA_WRITE)
717 ? DMA_TO_DEVICE : DMA_FROM_DEVICE)); 718 ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
718 } 719 }
719 720
720 /* 721 /*
721 * Disable PDC transfers. Update pending flags to EVENT_XFER_COMPLETE after 722 * Disable PDC transfers. Update pending flags to EVENT_XFER_COMPLETE after
722 * having received ATMCI_TXBUFE or ATMCI_RXBUFF interrupt. Enable ATMCI_NOTBUSY 723 * having received ATMCI_TXBUFE or ATMCI_RXBUFF interrupt. Enable ATMCI_NOTBUSY
723 * interrupt needed for both transfer directions. 724 * interrupt needed for both transfer directions.
724 */ 725 */
725 static void atmci_pdc_complete(struct atmel_mci *host) 726 static void atmci_pdc_complete(struct atmel_mci *host)
726 { 727 {
727 int transfer_size = host->data->blocks * host->data->blksz; 728 int transfer_size = host->data->blocks * host->data->blksz;
728 int i; 729 int i;
729 730
730 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS); 731 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
731 732
732 if ((!host->caps.has_rwproof) 733 if ((!host->caps.has_rwproof)
733 && (host->data->flags & MMC_DATA_READ)) { 734 && (host->data->flags & MMC_DATA_READ)) {
734 if (host->caps.has_bad_data_ordering) 735 if (host->caps.has_bad_data_ordering)
735 for (i = 0; i < transfer_size; i++) 736 for (i = 0; i < transfer_size; i++)
736 host->buffer[i] = swab32(host->buffer[i]); 737 host->buffer[i] = swab32(host->buffer[i]);
737 sg_copy_from_buffer(host->data->sg, host->data->sg_len, 738 sg_copy_from_buffer(host->data->sg, host->data->sg_len,
738 host->buffer, transfer_size); 739 host->buffer, transfer_size);
739 } 740 }
740 741
741 atmci_pdc_cleanup(host); 742 atmci_pdc_cleanup(host);
742 743
743 /* 744 /*
744 * If the card was removed, data will be NULL. No point trying 745 * If the card was removed, data will be NULL. No point trying
745 * to send the stop command or waiting for NBUSY in this case. 746 * to send the stop command or waiting for NBUSY in this case.
746 */ 747 */
747 if (host->data) { 748 if (host->data) {
748 dev_dbg(&host->pdev->dev, 749 dev_dbg(&host->pdev->dev,
749 "(%s) set pending xfer complete\n", __func__); 750 "(%s) set pending xfer complete\n", __func__);
750 atmci_set_pending(host, EVENT_XFER_COMPLETE); 751 atmci_set_pending(host, EVENT_XFER_COMPLETE);
751 tasklet_schedule(&host->tasklet); 752 tasklet_schedule(&host->tasklet);
752 } 753 }
753 } 754 }
754 755
755 static void atmci_dma_cleanup(struct atmel_mci *host) 756 static void atmci_dma_cleanup(struct atmel_mci *host)
756 { 757 {
757 struct mmc_data *data = host->data; 758 struct mmc_data *data = host->data;
758 759
759 if (data) 760 if (data)
760 dma_unmap_sg(host->dma.chan->device->dev, 761 dma_unmap_sg(host->dma.chan->device->dev,
761 data->sg, data->sg_len, 762 data->sg, data->sg_len,
762 ((data->flags & MMC_DATA_WRITE) 763 ((data->flags & MMC_DATA_WRITE)
763 ? DMA_TO_DEVICE : DMA_FROM_DEVICE)); 764 ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
764 } 765 }
765 766
766 /* 767 /*
767 * This function is called by the DMA driver from tasklet context. 768 * This function is called by the DMA driver from tasklet context.
768 */ 769 */
769 static void atmci_dma_complete(void *arg) 770 static void atmci_dma_complete(void *arg)
770 { 771 {
771 struct atmel_mci *host = arg; 772 struct atmel_mci *host = arg;
772 struct mmc_data *data = host->data; 773 struct mmc_data *data = host->data;
773 774
774 dev_vdbg(&host->pdev->dev, "DMA complete\n"); 775 dev_vdbg(&host->pdev->dev, "DMA complete\n");
775 776
776 if (host->caps.has_dma) 777 if (host->caps.has_dma)
777 /* Disable DMA hardware handshaking on MCI */ 778 /* Disable DMA hardware handshaking on MCI */
778 atmci_writel(host, ATMCI_DMA, atmci_readl(host, ATMCI_DMA) & ~ATMCI_DMAEN); 779 atmci_writel(host, ATMCI_DMA, atmci_readl(host, ATMCI_DMA) & ~ATMCI_DMAEN);
779 780
780 atmci_dma_cleanup(host); 781 atmci_dma_cleanup(host);
781 782
782 /* 783 /*
783 * If the card was removed, data will be NULL. No point trying 784 * If the card was removed, data will be NULL. No point trying
784 * to send the stop command or waiting for NBUSY in this case. 785 * to send the stop command or waiting for NBUSY in this case.
785 */ 786 */
786 if (data) { 787 if (data) {
787 dev_dbg(&host->pdev->dev, 788 dev_dbg(&host->pdev->dev,
788 "(%s) set pending xfer complete\n", __func__); 789 "(%s) set pending xfer complete\n", __func__);
789 atmci_set_pending(host, EVENT_XFER_COMPLETE); 790 atmci_set_pending(host, EVENT_XFER_COMPLETE);
790 tasklet_schedule(&host->tasklet); 791 tasklet_schedule(&host->tasklet);
791 792
792 /* 793 /*
793 * Regardless of what the documentation says, we have 794 * Regardless of what the documentation says, we have
794 * to wait for NOTBUSY even after block read 795 * to wait for NOTBUSY even after block read
795 * operations. 796 * operations.
796 * 797 *
797 * When the DMA transfer is complete, the controller 798 * When the DMA transfer is complete, the controller
798 * may still be reading the CRC from the card, i.e. 799 * may still be reading the CRC from the card, i.e.
799 * the data transfer is still in progress and we 800 * the data transfer is still in progress and we
800 * haven't seen all the potential error bits yet. 801 * haven't seen all the potential error bits yet.
801 * 802 *
802 * The interrupt handler will schedule a different 803 * The interrupt handler will schedule a different
803 * tasklet to finish things up when the data transfer 804 * tasklet to finish things up when the data transfer
804 * is completely done. 805 * is completely done.
805 * 806 *
806 * We may not complete the mmc request here anyway 807 * We may not complete the mmc request here anyway
807 * because the mmc layer may call back and cause us to 808 * because the mmc layer may call back and cause us to
808 * violate the "don't submit new operations from the 809 * violate the "don't submit new operations from the
809 * completion callback" rule of the dma engine 810 * completion callback" rule of the dma engine
810 * framework. 811 * framework.
811 */ 812 */
812 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); 813 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
813 } 814 }
814 } 815 }
815 816
816 /* 817 /*
817 * Returns a mask of interrupt flags to be enabled after the whole 818 * Returns a mask of interrupt flags to be enabled after the whole
818 * request has been prepared. 819 * request has been prepared.
819 */ 820 */
820 static u32 atmci_prepare_data(struct atmel_mci *host, struct mmc_data *data) 821 static u32 atmci_prepare_data(struct atmel_mci *host, struct mmc_data *data)
821 { 822 {
822 u32 iflags; 823 u32 iflags;
823 824
824 data->error = -EINPROGRESS; 825 data->error = -EINPROGRESS;
825 826
826 host->sg = data->sg; 827 host->sg = data->sg;
827 host->data = data; 828 host->data = data;
828 host->data_chan = NULL; 829 host->data_chan = NULL;
829 830
830 iflags = ATMCI_DATA_ERROR_FLAGS; 831 iflags = ATMCI_DATA_ERROR_FLAGS;
831 832
832 /* 833 /*
833 * Errata: MMC data write operation with less than 12 834 * Errata: MMC data write operation with less than 12
834 * bytes is impossible. 835 * bytes is impossible.
835 * 836 *
836 * Errata: MCI Transmit Data Register (TDR) FIFO 837 * Errata: MCI Transmit Data Register (TDR) FIFO
837 * corruption when length is not multiple of 4. 838 * corruption when length is not multiple of 4.
838 */ 839 */
839 if (data->blocks * data->blksz < 12 840 if (data->blocks * data->blksz < 12
840 || (data->blocks * data->blksz) & 3) 841 || (data->blocks * data->blksz) & 3)
841 host->need_reset = true; 842 host->need_reset = true;
842 843
843 host->pio_offset = 0; 844 host->pio_offset = 0;
844 if (data->flags & MMC_DATA_READ) 845 if (data->flags & MMC_DATA_READ)
845 iflags |= ATMCI_RXRDY; 846 iflags |= ATMCI_RXRDY;
846 else 847 else
847 iflags |= ATMCI_TXRDY; 848 iflags |= ATMCI_TXRDY;
848 849
849 return iflags; 850 return iflags;
850 } 851 }
851 852
852 /* 853 /*
853 * Set interrupt flags and set block length into the MCI mode register even 854 * Set interrupt flags and set block length into the MCI mode register even
854 * if this value is also accessible in the MCI block register. It seems to be 855 * if this value is also accessible in the MCI block register. It seems to be
855 * necessary before the High Speed MCI version. It also map sg and configure 856 * necessary before the High Speed MCI version. It also map sg and configure
856 * PDC registers. 857 * PDC registers.
857 */ 858 */
858 static u32 859 static u32
859 atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data) 860 atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
860 { 861 {
861 u32 iflags, tmp; 862 u32 iflags, tmp;
862 unsigned int sg_len; 863 unsigned int sg_len;
863 enum dma_data_direction dir; 864 enum dma_data_direction dir;
864 int i; 865 int i;
865 866
866 data->error = -EINPROGRESS; 867 data->error = -EINPROGRESS;
867 868
868 host->data = data; 869 host->data = data;
869 host->sg = data->sg; 870 host->sg = data->sg;
870 iflags = ATMCI_DATA_ERROR_FLAGS; 871 iflags = ATMCI_DATA_ERROR_FLAGS;
871 872
872 /* Enable pdc mode */ 873 /* Enable pdc mode */
873 atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCMODE); 874 atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCMODE);
874 875
875 if (data->flags & MMC_DATA_READ) { 876 if (data->flags & MMC_DATA_READ) {
876 dir = DMA_FROM_DEVICE; 877 dir = DMA_FROM_DEVICE;
877 iflags |= ATMCI_ENDRX | ATMCI_RXBUFF; 878 iflags |= ATMCI_ENDRX | ATMCI_RXBUFF;
878 } else { 879 } else {
879 dir = DMA_TO_DEVICE; 880 dir = DMA_TO_DEVICE;
880 iflags |= ATMCI_ENDTX | ATMCI_TXBUFE | ATMCI_BLKE; 881 iflags |= ATMCI_ENDTX | ATMCI_TXBUFE | ATMCI_BLKE;
881 } 882 }
882 883
883 /* Set BLKLEN */ 884 /* Set BLKLEN */
884 tmp = atmci_readl(host, ATMCI_MR); 885 tmp = atmci_readl(host, ATMCI_MR);
885 tmp &= 0x0000ffff; 886 tmp &= 0x0000ffff;
886 tmp |= ATMCI_BLKLEN(data->blksz); 887 tmp |= ATMCI_BLKLEN(data->blksz);
887 atmci_writel(host, ATMCI_MR, tmp); 888 atmci_writel(host, ATMCI_MR, tmp);
888 889
889 /* Configure PDC */ 890 /* Configure PDC */
890 host->data_size = data->blocks * data->blksz; 891 host->data_size = data->blocks * data->blksz;
891 sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, dir); 892 sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, dir);
892 893
893 if ((!host->caps.has_rwproof) 894 if ((!host->caps.has_rwproof)
894 && (host->data->flags & MMC_DATA_WRITE)) { 895 && (host->data->flags & MMC_DATA_WRITE)) {
895 sg_copy_to_buffer(host->data->sg, host->data->sg_len, 896 sg_copy_to_buffer(host->data->sg, host->data->sg_len,
896 host->buffer, host->data_size); 897 host->buffer, host->data_size);
897 if (host->caps.has_bad_data_ordering) 898 if (host->caps.has_bad_data_ordering)
898 for (i = 0; i < host->data_size; i++) 899 for (i = 0; i < host->data_size; i++)
899 host->buffer[i] = swab32(host->buffer[i]); 900 host->buffer[i] = swab32(host->buffer[i]);
900 } 901 }
901 902
902 if (host->data_size) 903 if (host->data_size)
903 atmci_pdc_set_both_buf(host, 904 atmci_pdc_set_both_buf(host,
904 ((dir == DMA_FROM_DEVICE) ? XFER_RECEIVE : XFER_TRANSMIT)); 905 ((dir == DMA_FROM_DEVICE) ? XFER_RECEIVE : XFER_TRANSMIT));
905 906
906 return iflags; 907 return iflags;
907 } 908 }
908 909
909 static u32 910 static u32
910 atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data) 911 atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
911 { 912 {
912 struct dma_chan *chan; 913 struct dma_chan *chan;
913 struct dma_async_tx_descriptor *desc; 914 struct dma_async_tx_descriptor *desc;
914 struct scatterlist *sg; 915 struct scatterlist *sg;
915 unsigned int i; 916 unsigned int i;
916 enum dma_data_direction direction; 917 enum dma_data_direction direction;
917 enum dma_transfer_direction slave_dirn; 918 enum dma_transfer_direction slave_dirn;
918 unsigned int sglen; 919 unsigned int sglen;
919 u32 maxburst; 920 u32 maxburst;
920 u32 iflags; 921 u32 iflags;
921 922
922 data->error = -EINPROGRESS; 923 data->error = -EINPROGRESS;
923 924
924 WARN_ON(host->data); 925 WARN_ON(host->data);
925 host->sg = NULL; 926 host->sg = NULL;
926 host->data = data; 927 host->data = data;
927 928
928 iflags = ATMCI_DATA_ERROR_FLAGS; 929 iflags = ATMCI_DATA_ERROR_FLAGS;
929 930
930 /* 931 /*
931 * We don't do DMA on "complex" transfers, i.e. with 932 * We don't do DMA on "complex" transfers, i.e. with
932 * non-word-aligned buffers or lengths. Also, we don't bother 933 * non-word-aligned buffers or lengths. Also, we don't bother
933 * with all the DMA setup overhead for short transfers. 934 * with all the DMA setup overhead for short transfers.
934 */ 935 */
935 if (data->blocks * data->blksz < ATMCI_DMA_THRESHOLD) 936 if (data->blocks * data->blksz < ATMCI_DMA_THRESHOLD)
936 return atmci_prepare_data(host, data); 937 return atmci_prepare_data(host, data);
937 if (data->blksz & 3) 938 if (data->blksz & 3)
938 return atmci_prepare_data(host, data); 939 return atmci_prepare_data(host, data);
939 940
940 for_each_sg(data->sg, sg, data->sg_len, i) { 941 for_each_sg(data->sg, sg, data->sg_len, i) {
941 if (sg->offset & 3 || sg->length & 3) 942 if (sg->offset & 3 || sg->length & 3)
942 return atmci_prepare_data(host, data); 943 return atmci_prepare_data(host, data);
943 } 944 }
944 945
945 /* If we don't have a channel, we can't do DMA */ 946 /* If we don't have a channel, we can't do DMA */
946 chan = host->dma.chan; 947 chan = host->dma.chan;
947 if (chan) 948 if (chan)
948 host->data_chan = chan; 949 host->data_chan = chan;
949 950
950 if (!chan) 951 if (!chan)
951 return -ENODEV; 952 return -ENODEV;
952 953
953 if (data->flags & MMC_DATA_READ) { 954 if (data->flags & MMC_DATA_READ) {
954 direction = DMA_FROM_DEVICE; 955 direction = DMA_FROM_DEVICE;
955 host->dma_conf.direction = slave_dirn = DMA_DEV_TO_MEM; 956 host->dma_conf.direction = slave_dirn = DMA_DEV_TO_MEM;
956 maxburst = atmci_convert_chksize(host->dma_conf.src_maxburst); 957 maxburst = atmci_convert_chksize(host->dma_conf.src_maxburst);
957 } else { 958 } else {
958 direction = DMA_TO_DEVICE; 959 direction = DMA_TO_DEVICE;
959 host->dma_conf.direction = slave_dirn = DMA_MEM_TO_DEV; 960 host->dma_conf.direction = slave_dirn = DMA_MEM_TO_DEV;
960 maxburst = atmci_convert_chksize(host->dma_conf.dst_maxburst); 961 maxburst = atmci_convert_chksize(host->dma_conf.dst_maxburst);
961 } 962 }
962 963
963 atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(maxburst) | ATMCI_DMAEN); 964 atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(maxburst) | ATMCI_DMAEN);
964 965
965 sglen = dma_map_sg(chan->device->dev, data->sg, 966 sglen = dma_map_sg(chan->device->dev, data->sg,
966 data->sg_len, direction); 967 data->sg_len, direction);
967 968
968 dmaengine_slave_config(chan, &host->dma_conf); 969 dmaengine_slave_config(chan, &host->dma_conf);
969 desc = dmaengine_prep_slave_sg(chan, 970 desc = dmaengine_prep_slave_sg(chan,
970 data->sg, sglen, slave_dirn, 971 data->sg, sglen, slave_dirn,
971 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 972 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
972 if (!desc) 973 if (!desc)
973 goto unmap_exit; 974 goto unmap_exit;
974 975
975 host->dma.data_desc = desc; 976 host->dma.data_desc = desc;
976 desc->callback = atmci_dma_complete; 977 desc->callback = atmci_dma_complete;
977 desc->callback_param = host; 978 desc->callback_param = host;
978 979
979 return iflags; 980 return iflags;
980 unmap_exit: 981 unmap_exit:
981 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, direction); 982 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, direction);
982 return -ENOMEM; 983 return -ENOMEM;
983 } 984 }
984 985
985 static void 986 static void
986 atmci_submit_data(struct atmel_mci *host, struct mmc_data *data) 987 atmci_submit_data(struct atmel_mci *host, struct mmc_data *data)
987 { 988 {
988 return; 989 return;
989 } 990 }
990 991
991 /* 992 /*
992 * Start PDC according to transfer direction. 993 * Start PDC according to transfer direction.
993 */ 994 */
994 static void 995 static void
995 atmci_submit_data_pdc(struct atmel_mci *host, struct mmc_data *data) 996 atmci_submit_data_pdc(struct atmel_mci *host, struct mmc_data *data)
996 { 997 {
997 if (data->flags & MMC_DATA_READ) 998 if (data->flags & MMC_DATA_READ)
998 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN); 999 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
999 else 1000 else
1000 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN); 1001 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
1001 } 1002 }
1002 1003
1003 static void 1004 static void
1004 atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data) 1005 atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
1005 { 1006 {
1006 struct dma_chan *chan = host->data_chan; 1007 struct dma_chan *chan = host->data_chan;
1007 struct dma_async_tx_descriptor *desc = host->dma.data_desc; 1008 struct dma_async_tx_descriptor *desc = host->dma.data_desc;
1008 1009
1009 if (chan) { 1010 if (chan) {
1010 dmaengine_submit(desc); 1011 dmaengine_submit(desc);
1011 dma_async_issue_pending(chan); 1012 dma_async_issue_pending(chan);
1012 } 1013 }
1013 } 1014 }
1014 1015
1015 static void atmci_stop_transfer(struct atmel_mci *host) 1016 static void atmci_stop_transfer(struct atmel_mci *host)
1016 { 1017 {
1017 dev_dbg(&host->pdev->dev, 1018 dev_dbg(&host->pdev->dev,
1018 "(%s) set pending xfer complete\n", __func__); 1019 "(%s) set pending xfer complete\n", __func__);
1019 atmci_set_pending(host, EVENT_XFER_COMPLETE); 1020 atmci_set_pending(host, EVENT_XFER_COMPLETE);
1020 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); 1021 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1021 } 1022 }
1022 1023
1023 /* 1024 /*
1024 * Stop data transfer because error(s) occured. 1025 * Stop data transfer because error(s) occured.
1025 */ 1026 */
1026 static void atmci_stop_transfer_pdc(struct atmel_mci *host) 1027 static void atmci_stop_transfer_pdc(struct atmel_mci *host)
1027 { 1028 {
1028 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS); 1029 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
1029 } 1030 }
1030 1031
1031 static void atmci_stop_transfer_dma(struct atmel_mci *host) 1032 static void atmci_stop_transfer_dma(struct atmel_mci *host)
1032 { 1033 {
1033 struct dma_chan *chan = host->data_chan; 1034 struct dma_chan *chan = host->data_chan;
1034 1035
1035 if (chan) { 1036 if (chan) {
1036 dmaengine_terminate_all(chan); 1037 dmaengine_terminate_all(chan);
1037 atmci_dma_cleanup(host); 1038 atmci_dma_cleanup(host);
1038 } else { 1039 } else {
1039 /* Data transfer was stopped by the interrupt handler */ 1040 /* Data transfer was stopped by the interrupt handler */
1040 dev_dbg(&host->pdev->dev, 1041 dev_dbg(&host->pdev->dev,
1041 "(%s) set pending xfer complete\n", __func__); 1042 "(%s) set pending xfer complete\n", __func__);
1042 atmci_set_pending(host, EVENT_XFER_COMPLETE); 1043 atmci_set_pending(host, EVENT_XFER_COMPLETE);
1043 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); 1044 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1044 } 1045 }
1045 } 1046 }
1046 1047
1047 /* 1048 /*
1048 * Start a request: prepare data if needed, prepare the command and activate 1049 * Start a request: prepare data if needed, prepare the command and activate
1049 * interrupts. 1050 * interrupts.
1050 */ 1051 */
1051 static void atmci_start_request(struct atmel_mci *host, 1052 static void atmci_start_request(struct atmel_mci *host,
1052 struct atmel_mci_slot *slot) 1053 struct atmel_mci_slot *slot)
1053 { 1054 {
1054 struct mmc_request *mrq; 1055 struct mmc_request *mrq;
1055 struct mmc_command *cmd; 1056 struct mmc_command *cmd;
1056 struct mmc_data *data; 1057 struct mmc_data *data;
1057 u32 iflags; 1058 u32 iflags;
1058 u32 cmdflags; 1059 u32 cmdflags;
1059 1060
1060 mrq = slot->mrq; 1061 mrq = slot->mrq;
1061 host->cur_slot = slot; 1062 host->cur_slot = slot;
1062 host->mrq = mrq; 1063 host->mrq = mrq;
1063 1064
1064 host->pending_events = 0; 1065 host->pending_events = 0;
1065 host->completed_events = 0; 1066 host->completed_events = 0;
1066 host->cmd_status = 0; 1067 host->cmd_status = 0;
1067 host->data_status = 0; 1068 host->data_status = 0;
1068 1069
1069 dev_dbg(&host->pdev->dev, "start request: cmd %u\n", mrq->cmd->opcode); 1070 dev_dbg(&host->pdev->dev, "start request: cmd %u\n", mrq->cmd->opcode);
1070 1071
1071 if (host->need_reset || host->caps.need_reset_after_xfer) { 1072 if (host->need_reset || host->caps.need_reset_after_xfer) {
1072 iflags = atmci_readl(host, ATMCI_IMR); 1073 iflags = atmci_readl(host, ATMCI_IMR);
1073 iflags &= (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB); 1074 iflags &= (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB);
1074 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST); 1075 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
1075 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN); 1076 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
1076 atmci_writel(host, ATMCI_MR, host->mode_reg); 1077 atmci_writel(host, ATMCI_MR, host->mode_reg);
1077 if (host->caps.has_cfg_reg) 1078 if (host->caps.has_cfg_reg)
1078 atmci_writel(host, ATMCI_CFG, host->cfg_reg); 1079 atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1079 atmci_writel(host, ATMCI_IER, iflags); 1080 atmci_writel(host, ATMCI_IER, iflags);
1080 host->need_reset = false; 1081 host->need_reset = false;
1081 } 1082 }
1082 atmci_writel(host, ATMCI_SDCR, slot->sdc_reg); 1083 atmci_writel(host, ATMCI_SDCR, slot->sdc_reg);
1083 1084
1084 iflags = atmci_readl(host, ATMCI_IMR); 1085 iflags = atmci_readl(host, ATMCI_IMR);
1085 if (iflags & ~(ATMCI_SDIOIRQA | ATMCI_SDIOIRQB)) 1086 if (iflags & ~(ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
1086 dev_dbg(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n", 1087 dev_dbg(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n",
1087 iflags); 1088 iflags);
1088 1089
1089 if (unlikely(test_and_clear_bit(ATMCI_CARD_NEED_INIT, &slot->flags))) { 1090 if (unlikely(test_and_clear_bit(ATMCI_CARD_NEED_INIT, &slot->flags))) {
1090 /* Send init sequence (74 clock cycles) */ 1091 /* Send init sequence (74 clock cycles) */
1091 atmci_writel(host, ATMCI_CMDR, ATMCI_CMDR_SPCMD_INIT); 1092 atmci_writel(host, ATMCI_CMDR, ATMCI_CMDR_SPCMD_INIT);
1092 while (!(atmci_readl(host, ATMCI_SR) & ATMCI_CMDRDY)) 1093 while (!(atmci_readl(host, ATMCI_SR) & ATMCI_CMDRDY))
1093 cpu_relax(); 1094 cpu_relax();
1094 } 1095 }
1095 iflags = 0; 1096 iflags = 0;
1096 data = mrq->data; 1097 data = mrq->data;
1097 if (data) { 1098 if (data) {
1098 atmci_set_timeout(host, slot, data); 1099 atmci_set_timeout(host, slot, data);
1099 1100
1100 /* Must set block count/size before sending command */ 1101 /* Must set block count/size before sending command */
1101 atmci_writel(host, ATMCI_BLKR, ATMCI_BCNT(data->blocks) 1102 atmci_writel(host, ATMCI_BLKR, ATMCI_BCNT(data->blocks)
1102 | ATMCI_BLKLEN(data->blksz)); 1103 | ATMCI_BLKLEN(data->blksz));
1103 dev_vdbg(&slot->mmc->class_dev, "BLKR=0x%08x\n", 1104 dev_vdbg(&slot->mmc->class_dev, "BLKR=0x%08x\n",
1104 ATMCI_BCNT(data->blocks) | ATMCI_BLKLEN(data->blksz)); 1105 ATMCI_BCNT(data->blocks) | ATMCI_BLKLEN(data->blksz));
1105 1106
1106 iflags |= host->prepare_data(host, data); 1107 iflags |= host->prepare_data(host, data);
1107 } 1108 }
1108 1109
1109 iflags |= ATMCI_CMDRDY; 1110 iflags |= ATMCI_CMDRDY;
1110 cmd = mrq->cmd; 1111 cmd = mrq->cmd;
1111 cmdflags = atmci_prepare_command(slot->mmc, cmd); 1112 cmdflags = atmci_prepare_command(slot->mmc, cmd);
1112 atmci_send_command(host, cmd, cmdflags); 1113 atmci_send_command(host, cmd, cmdflags);
1113 1114
1114 if (data) 1115 if (data)
1115 host->submit_data(host, data); 1116 host->submit_data(host, data);
1116 1117
1117 if (mrq->stop) { 1118 if (mrq->stop) {
1118 host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop); 1119 host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop);
1119 host->stop_cmdr |= ATMCI_CMDR_STOP_XFER; 1120 host->stop_cmdr |= ATMCI_CMDR_STOP_XFER;
1120 if (!(data->flags & MMC_DATA_WRITE)) 1121 if (!(data->flags & MMC_DATA_WRITE))
1121 host->stop_cmdr |= ATMCI_CMDR_TRDIR_READ; 1122 host->stop_cmdr |= ATMCI_CMDR_TRDIR_READ;
1122 if (data->flags & MMC_DATA_STREAM) 1123 if (data->flags & MMC_DATA_STREAM)
1123 host->stop_cmdr |= ATMCI_CMDR_STREAM; 1124 host->stop_cmdr |= ATMCI_CMDR_STREAM;
1124 else 1125 else
1125 host->stop_cmdr |= ATMCI_CMDR_MULTI_BLOCK; 1126 host->stop_cmdr |= ATMCI_CMDR_MULTI_BLOCK;
1126 } 1127 }
1127 1128
1128 /* 1129 /*
1129 * We could have enabled interrupts earlier, but I suspect 1130 * We could have enabled interrupts earlier, but I suspect
1130 * that would open up a nice can of interesting race 1131 * that would open up a nice can of interesting race
1131 * conditions (e.g. command and data complete, but stop not 1132 * conditions (e.g. command and data complete, but stop not
1132 * prepared yet.) 1133 * prepared yet.)
1133 */ 1134 */
1134 atmci_writel(host, ATMCI_IER, iflags); 1135 atmci_writel(host, ATMCI_IER, iflags);
1135 1136
1136 mod_timer(&host->timer, jiffies + msecs_to_jiffies(2000)); 1137 mod_timer(&host->timer, jiffies + msecs_to_jiffies(2000));
1137 } 1138 }
1138 1139
1139 static void atmci_queue_request(struct atmel_mci *host, 1140 static void atmci_queue_request(struct atmel_mci *host,
1140 struct atmel_mci_slot *slot, struct mmc_request *mrq) 1141 struct atmel_mci_slot *slot, struct mmc_request *mrq)
1141 { 1142 {
1142 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n", 1143 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1143 host->state); 1144 host->state);
1144 1145
1145 spin_lock_bh(&host->lock); 1146 spin_lock_bh(&host->lock);
1146 slot->mrq = mrq; 1147 slot->mrq = mrq;
1147 if (host->state == STATE_IDLE) { 1148 if (host->state == STATE_IDLE) {
1148 host->state = STATE_SENDING_CMD; 1149 host->state = STATE_SENDING_CMD;
1149 atmci_start_request(host, slot); 1150 atmci_start_request(host, slot);
1150 } else { 1151 } else {
1151 dev_dbg(&host->pdev->dev, "queue request\n"); 1152 dev_dbg(&host->pdev->dev, "queue request\n");
1152 list_add_tail(&slot->queue_node, &host->queue); 1153 list_add_tail(&slot->queue_node, &host->queue);
1153 } 1154 }
1154 spin_unlock_bh(&host->lock); 1155 spin_unlock_bh(&host->lock);
1155 } 1156 }
1156 1157
1157 static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq) 1158 static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1158 { 1159 {
1159 struct atmel_mci_slot *slot = mmc_priv(mmc); 1160 struct atmel_mci_slot *slot = mmc_priv(mmc);
1160 struct atmel_mci *host = slot->host; 1161 struct atmel_mci *host = slot->host;
1161 struct mmc_data *data; 1162 struct mmc_data *data;
1162 1163
1163 WARN_ON(slot->mrq); 1164 WARN_ON(slot->mrq);
1164 dev_dbg(&host->pdev->dev, "MRQ: cmd %u\n", mrq->cmd->opcode); 1165 dev_dbg(&host->pdev->dev, "MRQ: cmd %u\n", mrq->cmd->opcode);
1165 1166
1166 /* 1167 /*
1167 * We may "know" the card is gone even though there's still an 1168 * We may "know" the card is gone even though there's still an
1168 * electrical connection. If so, we really need to communicate 1169 * electrical connection. If so, we really need to communicate
1169 * this to the MMC core since there won't be any more 1170 * this to the MMC core since there won't be any more
1170 * interrupts as the card is completely removed. Otherwise, 1171 * interrupts as the card is completely removed. Otherwise,
1171 * the MMC core might believe the card is still there even 1172 * the MMC core might believe the card is still there even
1172 * though the card was just removed very slowly. 1173 * though the card was just removed very slowly.
1173 */ 1174 */
1174 if (!test_bit(ATMCI_CARD_PRESENT, &slot->flags)) { 1175 if (!test_bit(ATMCI_CARD_PRESENT, &slot->flags)) {
1175 mrq->cmd->error = -ENOMEDIUM; 1176 mrq->cmd->error = -ENOMEDIUM;
1176 mmc_request_done(mmc, mrq); 1177 mmc_request_done(mmc, mrq);
1177 return; 1178 return;
1178 } 1179 }
1179 1180
1180 /* We don't support multiple blocks of weird lengths. */ 1181 /* We don't support multiple blocks of weird lengths. */
1181 data = mrq->data; 1182 data = mrq->data;
1182 if (data && data->blocks > 1 && data->blksz & 3) { 1183 if (data && data->blocks > 1 && data->blksz & 3) {
1183 mrq->cmd->error = -EINVAL; 1184 mrq->cmd->error = -EINVAL;
1184 mmc_request_done(mmc, mrq); 1185 mmc_request_done(mmc, mrq);
1185 } 1186 }
1186 1187
1187 atmci_queue_request(host, slot, mrq); 1188 atmci_queue_request(host, slot, mrq);
1188 } 1189 }
1189 1190
1190 static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1191 static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1191 { 1192 {
1192 struct atmel_mci_slot *slot = mmc_priv(mmc); 1193 struct atmel_mci_slot *slot = mmc_priv(mmc);
1193 struct atmel_mci *host = slot->host; 1194 struct atmel_mci *host = slot->host;
1194 unsigned int i; 1195 unsigned int i;
1195 1196
1196 slot->sdc_reg &= ~ATMCI_SDCBUS_MASK; 1197 slot->sdc_reg &= ~ATMCI_SDCBUS_MASK;
1197 switch (ios->bus_width) { 1198 switch (ios->bus_width) {
1198 case MMC_BUS_WIDTH_1: 1199 case MMC_BUS_WIDTH_1:
1199 slot->sdc_reg |= ATMCI_SDCBUS_1BIT; 1200 slot->sdc_reg |= ATMCI_SDCBUS_1BIT;
1200 break; 1201 break;
1201 case MMC_BUS_WIDTH_4: 1202 case MMC_BUS_WIDTH_4:
1202 slot->sdc_reg |= ATMCI_SDCBUS_4BIT; 1203 slot->sdc_reg |= ATMCI_SDCBUS_4BIT;
1203 break; 1204 break;
1204 } 1205 }
1205 1206
1206 if (ios->clock) { 1207 if (ios->clock) {
1207 unsigned int clock_min = ~0U; 1208 unsigned int clock_min = ~0U;
1208 u32 clkdiv; 1209 u32 clkdiv;
1209 1210
1210 spin_lock_bh(&host->lock); 1211 spin_lock_bh(&host->lock);
1211 if (!host->mode_reg) { 1212 if (!host->mode_reg) {
1212 clk_enable(host->mck); 1213 clk_enable(host->mck);
1213 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST); 1214 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
1214 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN); 1215 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
1215 if (host->caps.has_cfg_reg) 1216 if (host->caps.has_cfg_reg)
1216 atmci_writel(host, ATMCI_CFG, host->cfg_reg); 1217 atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1217 } 1218 }
1218 1219
1219 /* 1220 /*
1220 * Use mirror of ios->clock to prevent race with mmc 1221 * Use mirror of ios->clock to prevent race with mmc
1221 * core ios update when finding the minimum. 1222 * core ios update when finding the minimum.
1222 */ 1223 */
1223 slot->clock = ios->clock; 1224 slot->clock = ios->clock;
1224 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) { 1225 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
1225 if (host->slot[i] && host->slot[i]->clock 1226 if (host->slot[i] && host->slot[i]->clock
1226 && host->slot[i]->clock < clock_min) 1227 && host->slot[i]->clock < clock_min)
1227 clock_min = host->slot[i]->clock; 1228 clock_min = host->slot[i]->clock;
1228 } 1229 }
1229 1230
1230 /* Calculate clock divider */ 1231 /* Calculate clock divider */
1231 if (host->caps.has_odd_clk_div) { 1232 if (host->caps.has_odd_clk_div) {
1232 clkdiv = DIV_ROUND_UP(host->bus_hz, clock_min) - 2; 1233 clkdiv = DIV_ROUND_UP(host->bus_hz, clock_min) - 2;
1233 if (clkdiv > 511) { 1234 if (clkdiv > 511) {
1234 dev_warn(&mmc->class_dev, 1235 dev_warn(&mmc->class_dev,
1235 "clock %u too slow; using %lu\n", 1236 "clock %u too slow; using %lu\n",
1236 clock_min, host->bus_hz / (511 + 2)); 1237 clock_min, host->bus_hz / (511 + 2));
1237 clkdiv = 511; 1238 clkdiv = 511;
1238 } 1239 }
1239 host->mode_reg = ATMCI_MR_CLKDIV(clkdiv >> 1) 1240 host->mode_reg = ATMCI_MR_CLKDIV(clkdiv >> 1)
1240 | ATMCI_MR_CLKODD(clkdiv & 1); 1241 | ATMCI_MR_CLKODD(clkdiv & 1);
1241 } else { 1242 } else {
1242 clkdiv = DIV_ROUND_UP(host->bus_hz, 2 * clock_min) - 1; 1243 clkdiv = DIV_ROUND_UP(host->bus_hz, 2 * clock_min) - 1;
1243 if (clkdiv > 255) { 1244 if (clkdiv > 255) {
1244 dev_warn(&mmc->class_dev, 1245 dev_warn(&mmc->class_dev,
1245 "clock %u too slow; using %lu\n", 1246 "clock %u too slow; using %lu\n",
1246 clock_min, host->bus_hz / (2 * 256)); 1247 clock_min, host->bus_hz / (2 * 256));
1247 clkdiv = 255; 1248 clkdiv = 255;
1248 } 1249 }
1249 host->mode_reg = ATMCI_MR_CLKDIV(clkdiv); 1250 host->mode_reg = ATMCI_MR_CLKDIV(clkdiv);
1250 } 1251 }
1251 1252
1252 /* 1253 /*
1253 * WRPROOF and RDPROOF prevent overruns/underruns by 1254 * WRPROOF and RDPROOF prevent overruns/underruns by
1254 * stopping the clock when the FIFO is full/empty. 1255 * stopping the clock when the FIFO is full/empty.
1255 * This state is not expected to last for long. 1256 * This state is not expected to last for long.
1256 */ 1257 */
1257 if (host->caps.has_rwproof) 1258 if (host->caps.has_rwproof)
1258 host->mode_reg |= (ATMCI_MR_WRPROOF | ATMCI_MR_RDPROOF); 1259 host->mode_reg |= (ATMCI_MR_WRPROOF | ATMCI_MR_RDPROOF);
1259 1260
1260 if (host->caps.has_cfg_reg) { 1261 if (host->caps.has_cfg_reg) {
1261 /* setup High Speed mode in relation with card capacity */ 1262 /* setup High Speed mode in relation with card capacity */
1262 if (ios->timing == MMC_TIMING_SD_HS) 1263 if (ios->timing == MMC_TIMING_SD_HS)
1263 host->cfg_reg |= ATMCI_CFG_HSMODE; 1264 host->cfg_reg |= ATMCI_CFG_HSMODE;
1264 else 1265 else
1265 host->cfg_reg &= ~ATMCI_CFG_HSMODE; 1266 host->cfg_reg &= ~ATMCI_CFG_HSMODE;
1266 } 1267 }
1267 1268
1268 if (list_empty(&host->queue)) { 1269 if (list_empty(&host->queue)) {
1269 atmci_writel(host, ATMCI_MR, host->mode_reg); 1270 atmci_writel(host, ATMCI_MR, host->mode_reg);
1270 if (host->caps.has_cfg_reg) 1271 if (host->caps.has_cfg_reg)
1271 atmci_writel(host, ATMCI_CFG, host->cfg_reg); 1272 atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1272 } else { 1273 } else {
1273 host->need_clock_update = true; 1274 host->need_clock_update = true;
1274 } 1275 }
1275 1276
1276 spin_unlock_bh(&host->lock); 1277 spin_unlock_bh(&host->lock);
1277 } else { 1278 } else {
1278 bool any_slot_active = false; 1279 bool any_slot_active = false;
1279 1280
1280 spin_lock_bh(&host->lock); 1281 spin_lock_bh(&host->lock);
1281 slot->clock = 0; 1282 slot->clock = 0;
1282 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) { 1283 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
1283 if (host->slot[i] && host->slot[i]->clock) { 1284 if (host->slot[i] && host->slot[i]->clock) {
1284 any_slot_active = true; 1285 any_slot_active = true;
1285 break; 1286 break;
1286 } 1287 }
1287 } 1288 }
1288 if (!any_slot_active) { 1289 if (!any_slot_active) {
1289 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS); 1290 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS);
1290 if (host->mode_reg) { 1291 if (host->mode_reg) {
1291 atmci_readl(host, ATMCI_MR); 1292 atmci_readl(host, ATMCI_MR);
1292 clk_disable(host->mck); 1293 clk_disable(host->mck);
1293 } 1294 }
1294 host->mode_reg = 0; 1295 host->mode_reg = 0;
1295 } 1296 }
1296 spin_unlock_bh(&host->lock); 1297 spin_unlock_bh(&host->lock);
1297 } 1298 }
1298 1299
1299 switch (ios->power_mode) { 1300 switch (ios->power_mode) {
1300 case MMC_POWER_UP: 1301 case MMC_POWER_UP:
1301 set_bit(ATMCI_CARD_NEED_INIT, &slot->flags); 1302 set_bit(ATMCI_CARD_NEED_INIT, &slot->flags);
1302 break; 1303 break;
1303 default: 1304 default:
1304 /* 1305 /*
1305 * TODO: None of the currently available AVR32-based 1306 * TODO: None of the currently available AVR32-based
1306 * boards allow MMC power to be turned off. Implement 1307 * boards allow MMC power to be turned off. Implement
1307 * power control when this can be tested properly. 1308 * power control when this can be tested properly.
1308 * 1309 *
1309 * We also need to hook this into the clock management 1310 * We also need to hook this into the clock management
1310 * somehow so that newly inserted cards aren't 1311 * somehow so that newly inserted cards aren't
1311 * subjected to a fast clock before we have a chance 1312 * subjected to a fast clock before we have a chance
1312 * to figure out what the maximum rate is. Currently, 1313 * to figure out what the maximum rate is. Currently,
1313 * there's no way to avoid this, and there never will 1314 * there's no way to avoid this, and there never will
1314 * be for boards that don't support power control. 1315 * be for boards that don't support power control.
1315 */ 1316 */
1316 break; 1317 break;
1317 } 1318 }
1318 } 1319 }
1319 1320
1320 static int atmci_get_ro(struct mmc_host *mmc) 1321 static int atmci_get_ro(struct mmc_host *mmc)
1321 { 1322 {
1322 int read_only = -ENOSYS; 1323 int read_only = -ENOSYS;
1323 struct atmel_mci_slot *slot = mmc_priv(mmc); 1324 struct atmel_mci_slot *slot = mmc_priv(mmc);
1324 1325
1325 if (gpio_is_valid(slot->wp_pin)) { 1326 if (gpio_is_valid(slot->wp_pin)) {
1326 read_only = gpio_get_value(slot->wp_pin); 1327 read_only = gpio_get_value(slot->wp_pin);
1327 dev_dbg(&mmc->class_dev, "card is %s\n", 1328 dev_dbg(&mmc->class_dev, "card is %s\n",
1328 read_only ? "read-only" : "read-write"); 1329 read_only ? "read-only" : "read-write");
1329 } 1330 }
1330 1331
1331 return read_only; 1332 return read_only;
1332 } 1333 }
1333 1334
1334 static int atmci_get_cd(struct mmc_host *mmc) 1335 static int atmci_get_cd(struct mmc_host *mmc)
1335 { 1336 {
1336 int present = -ENOSYS; 1337 int present = -ENOSYS;
1337 struct atmel_mci_slot *slot = mmc_priv(mmc); 1338 struct atmel_mci_slot *slot = mmc_priv(mmc);
1338 1339
1339 if (gpio_is_valid(slot->detect_pin)) { 1340 if (gpio_is_valid(slot->detect_pin)) {
1340 present = !(gpio_get_value(slot->detect_pin) ^ 1341 present = !(gpio_get_value(slot->detect_pin) ^
1341 slot->detect_is_active_high); 1342 slot->detect_is_active_high);
1342 dev_dbg(&mmc->class_dev, "card is %spresent\n", 1343 dev_dbg(&mmc->class_dev, "card is %spresent\n",
1343 present ? "" : "not "); 1344 present ? "" : "not ");
1344 } 1345 }
1345 1346
1346 return present; 1347 return present;
1347 } 1348 }
1348 1349
1349 static void atmci_enable_sdio_irq(struct mmc_host *mmc, int enable) 1350 static void atmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1350 { 1351 {
1351 struct atmel_mci_slot *slot = mmc_priv(mmc); 1352 struct atmel_mci_slot *slot = mmc_priv(mmc);
1352 struct atmel_mci *host = slot->host; 1353 struct atmel_mci *host = slot->host;
1353 1354
1354 if (enable) 1355 if (enable)
1355 atmci_writel(host, ATMCI_IER, slot->sdio_irq); 1356 atmci_writel(host, ATMCI_IER, slot->sdio_irq);
1356 else 1357 else
1357 atmci_writel(host, ATMCI_IDR, slot->sdio_irq); 1358 atmci_writel(host, ATMCI_IDR, slot->sdio_irq);
1358 } 1359 }
1359 1360
1360 static const struct mmc_host_ops atmci_ops = { 1361 static const struct mmc_host_ops atmci_ops = {
1361 .request = atmci_request, 1362 .request = atmci_request,
1362 .set_ios = atmci_set_ios, 1363 .set_ios = atmci_set_ios,
1363 .get_ro = atmci_get_ro, 1364 .get_ro = atmci_get_ro,
1364 .get_cd = atmci_get_cd, 1365 .get_cd = atmci_get_cd,
1365 .enable_sdio_irq = atmci_enable_sdio_irq, 1366 .enable_sdio_irq = atmci_enable_sdio_irq,
1366 }; 1367 };
1367 1368
1368 /* Called with host->lock held */ 1369 /* Called with host->lock held */
1369 static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq) 1370 static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)
1370 __releases(&host->lock) 1371 __releases(&host->lock)
1371 __acquires(&host->lock) 1372 __acquires(&host->lock)
1372 { 1373 {
1373 struct atmel_mci_slot *slot = NULL; 1374 struct atmel_mci_slot *slot = NULL;
1374 struct mmc_host *prev_mmc = host->cur_slot->mmc; 1375 struct mmc_host *prev_mmc = host->cur_slot->mmc;
1375 1376
1376 WARN_ON(host->cmd || host->data); 1377 WARN_ON(host->cmd || host->data);
1377 1378
1378 /* 1379 /*
1379 * Update the MMC clock rate if necessary. This may be 1380 * Update the MMC clock rate if necessary. This may be
1380 * necessary if set_ios() is called when a different slot is 1381 * necessary if set_ios() is called when a different slot is
1381 * busy transferring data. 1382 * busy transferring data.
1382 */ 1383 */
1383 if (host->need_clock_update) { 1384 if (host->need_clock_update) {
1384 atmci_writel(host, ATMCI_MR, host->mode_reg); 1385 atmci_writel(host, ATMCI_MR, host->mode_reg);
1385 if (host->caps.has_cfg_reg) 1386 if (host->caps.has_cfg_reg)
1386 atmci_writel(host, ATMCI_CFG, host->cfg_reg); 1387 atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1387 } 1388 }
1388 1389
1389 host->cur_slot->mrq = NULL; 1390 host->cur_slot->mrq = NULL;
1390 host->mrq = NULL; 1391 host->mrq = NULL;
1391 if (!list_empty(&host->queue)) { 1392 if (!list_empty(&host->queue)) {
1392 slot = list_entry(host->queue.next, 1393 slot = list_entry(host->queue.next,
1393 struct atmel_mci_slot, queue_node); 1394 struct atmel_mci_slot, queue_node);
1394 list_del(&slot->queue_node); 1395 list_del(&slot->queue_node);
1395 dev_vdbg(&host->pdev->dev, "list not empty: %s is next\n", 1396 dev_vdbg(&host->pdev->dev, "list not empty: %s is next\n",
1396 mmc_hostname(slot->mmc)); 1397 mmc_hostname(slot->mmc));
1397 host->state = STATE_SENDING_CMD; 1398 host->state = STATE_SENDING_CMD;
1398 atmci_start_request(host, slot); 1399 atmci_start_request(host, slot);
1399 } else { 1400 } else {
1400 dev_vdbg(&host->pdev->dev, "list empty\n"); 1401 dev_vdbg(&host->pdev->dev, "list empty\n");
1401 host->state = STATE_IDLE; 1402 host->state = STATE_IDLE;
1402 } 1403 }
1403 1404
1404 del_timer(&host->timer); 1405 del_timer(&host->timer);
1405 1406
1406 spin_unlock(&host->lock); 1407 spin_unlock(&host->lock);
1407 mmc_request_done(prev_mmc, mrq); 1408 mmc_request_done(prev_mmc, mrq);
1408 spin_lock(&host->lock); 1409 spin_lock(&host->lock);
1409 } 1410 }
1410 1411
1411 static void atmci_command_complete(struct atmel_mci *host, 1412 static void atmci_command_complete(struct atmel_mci *host,
1412 struct mmc_command *cmd) 1413 struct mmc_command *cmd)
1413 { 1414 {
1414 u32 status = host->cmd_status; 1415 u32 status = host->cmd_status;
1415 1416
1416 /* Read the response from the card (up to 16 bytes) */ 1417 /* Read the response from the card (up to 16 bytes) */
1417 cmd->resp[0] = atmci_readl(host, ATMCI_RSPR); 1418 cmd->resp[0] = atmci_readl(host, ATMCI_RSPR);
1418 cmd->resp[1] = atmci_readl(host, ATMCI_RSPR); 1419 cmd->resp[1] = atmci_readl(host, ATMCI_RSPR);
1419 cmd->resp[2] = atmci_readl(host, ATMCI_RSPR); 1420 cmd->resp[2] = atmci_readl(host, ATMCI_RSPR);
1420 cmd->resp[3] = atmci_readl(host, ATMCI_RSPR); 1421 cmd->resp[3] = atmci_readl(host, ATMCI_RSPR);
1421 1422
1422 if (status & ATMCI_RTOE) 1423 if (status & ATMCI_RTOE)
1423 cmd->error = -ETIMEDOUT; 1424 cmd->error = -ETIMEDOUT;
1424 else if ((cmd->flags & MMC_RSP_CRC) && (status & ATMCI_RCRCE)) 1425 else if ((cmd->flags & MMC_RSP_CRC) && (status & ATMCI_RCRCE))
1425 cmd->error = -EILSEQ; 1426 cmd->error = -EILSEQ;
1426 else if (status & (ATMCI_RINDE | ATMCI_RDIRE | ATMCI_RENDE)) 1427 else if (status & (ATMCI_RINDE | ATMCI_RDIRE | ATMCI_RENDE))
1427 cmd->error = -EIO; 1428 cmd->error = -EIO;
1428 else if (host->mrq->data && (host->mrq->data->blksz & 3)) { 1429 else if (host->mrq->data && (host->mrq->data->blksz & 3)) {
1429 if (host->caps.need_blksz_mul_4) { 1430 if (host->caps.need_blksz_mul_4) {
1430 cmd->error = -EINVAL; 1431 cmd->error = -EINVAL;
1431 host->need_reset = 1; 1432 host->need_reset = 1;
1432 } 1433 }
1433 } else 1434 } else
1434 cmd->error = 0; 1435 cmd->error = 0;
1435 } 1436 }
1436 1437
1437 static void atmci_detect_change(unsigned long data) 1438 static void atmci_detect_change(unsigned long data)
1438 { 1439 {
1439 struct atmel_mci_slot *slot = (struct atmel_mci_slot *)data; 1440 struct atmel_mci_slot *slot = (struct atmel_mci_slot *)data;
1440 bool present; 1441 bool present;
1441 bool present_old; 1442 bool present_old;
1442 1443
1443 /* 1444 /*
1444 * atmci_cleanup_slot() sets the ATMCI_SHUTDOWN flag before 1445 * atmci_cleanup_slot() sets the ATMCI_SHUTDOWN flag before
1445 * freeing the interrupt. We must not re-enable the interrupt 1446 * freeing the interrupt. We must not re-enable the interrupt
1446 * if it has been freed, and if we're shutting down, it 1447 * if it has been freed, and if we're shutting down, it
1447 * doesn't really matter whether the card is present or not. 1448 * doesn't really matter whether the card is present or not.
1448 */ 1449 */
1449 smp_rmb(); 1450 smp_rmb();
1450 if (test_bit(ATMCI_SHUTDOWN, &slot->flags)) 1451 if (test_bit(ATMCI_SHUTDOWN, &slot->flags))
1451 return; 1452 return;
1452 1453
1453 enable_irq(gpio_to_irq(slot->detect_pin)); 1454 enable_irq(gpio_to_irq(slot->detect_pin));
1454 present = !(gpio_get_value(slot->detect_pin) ^ 1455 present = !(gpio_get_value(slot->detect_pin) ^
1455 slot->detect_is_active_high); 1456 slot->detect_is_active_high);
1456 present_old = test_bit(ATMCI_CARD_PRESENT, &slot->flags); 1457 present_old = test_bit(ATMCI_CARD_PRESENT, &slot->flags);
1457 1458
1458 dev_vdbg(&slot->mmc->class_dev, "detect change: %d (was %d)\n", 1459 dev_vdbg(&slot->mmc->class_dev, "detect change: %d (was %d)\n",
1459 present, present_old); 1460 present, present_old);
1460 1461
1461 if (present != present_old) { 1462 if (present != present_old) {
1462 struct atmel_mci *host = slot->host; 1463 struct atmel_mci *host = slot->host;
1463 struct mmc_request *mrq; 1464 struct mmc_request *mrq;
1464 1465
1465 dev_dbg(&slot->mmc->class_dev, "card %s\n", 1466 dev_dbg(&slot->mmc->class_dev, "card %s\n",
1466 present ? "inserted" : "removed"); 1467 present ? "inserted" : "removed");
1467 1468
1468 spin_lock(&host->lock); 1469 spin_lock(&host->lock);
1469 1470
1470 if (!present) 1471 if (!present)
1471 clear_bit(ATMCI_CARD_PRESENT, &slot->flags); 1472 clear_bit(ATMCI_CARD_PRESENT, &slot->flags);
1472 else 1473 else
1473 set_bit(ATMCI_CARD_PRESENT, &slot->flags); 1474 set_bit(ATMCI_CARD_PRESENT, &slot->flags);
1474 1475
1475 /* Clean up queue if present */ 1476 /* Clean up queue if present */
1476 mrq = slot->mrq; 1477 mrq = slot->mrq;
1477 if (mrq) { 1478 if (mrq) {
1478 if (mrq == host->mrq) { 1479 if (mrq == host->mrq) {
1479 /* 1480 /*
1480 * Reset controller to terminate any ongoing 1481 * Reset controller to terminate any ongoing
1481 * commands or data transfers. 1482 * commands or data transfers.
1482 */ 1483 */
1483 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST); 1484 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
1484 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN); 1485 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
1485 atmci_writel(host, ATMCI_MR, host->mode_reg); 1486 atmci_writel(host, ATMCI_MR, host->mode_reg);
1486 if (host->caps.has_cfg_reg) 1487 if (host->caps.has_cfg_reg)
1487 atmci_writel(host, ATMCI_CFG, host->cfg_reg); 1488 atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1488 1489
1489 host->data = NULL; 1490 host->data = NULL;
1490 host->cmd = NULL; 1491 host->cmd = NULL;
1491 1492
1492 switch (host->state) { 1493 switch (host->state) {
1493 case STATE_IDLE: 1494 case STATE_IDLE:
1494 break; 1495 break;
1495 case STATE_SENDING_CMD: 1496 case STATE_SENDING_CMD:
1496 mrq->cmd->error = -ENOMEDIUM; 1497 mrq->cmd->error = -ENOMEDIUM;
1497 if (mrq->data) 1498 if (mrq->data)
1498 host->stop_transfer(host); 1499 host->stop_transfer(host);
1499 break; 1500 break;
1500 case STATE_DATA_XFER: 1501 case STATE_DATA_XFER:
1501 mrq->data->error = -ENOMEDIUM; 1502 mrq->data->error = -ENOMEDIUM;
1502 host->stop_transfer(host); 1503 host->stop_transfer(host);
1503 break; 1504 break;
1504 case STATE_WAITING_NOTBUSY: 1505 case STATE_WAITING_NOTBUSY:
1505 mrq->data->error = -ENOMEDIUM; 1506 mrq->data->error = -ENOMEDIUM;
1506 break; 1507 break;
1507 case STATE_SENDING_STOP: 1508 case STATE_SENDING_STOP:
1508 mrq->stop->error = -ENOMEDIUM; 1509 mrq->stop->error = -ENOMEDIUM;
1509 break; 1510 break;
1510 case STATE_END_REQUEST: 1511 case STATE_END_REQUEST:
1511 break; 1512 break;
1512 } 1513 }
1513 1514
1514 atmci_request_end(host, mrq); 1515 atmci_request_end(host, mrq);
1515 } else { 1516 } else {
1516 list_del(&slot->queue_node); 1517 list_del(&slot->queue_node);
1517 mrq->cmd->error = -ENOMEDIUM; 1518 mrq->cmd->error = -ENOMEDIUM;
1518 if (mrq->data) 1519 if (mrq->data)
1519 mrq->data->error = -ENOMEDIUM; 1520 mrq->data->error = -ENOMEDIUM;
1520 if (mrq->stop) 1521 if (mrq->stop)
1521 mrq->stop->error = -ENOMEDIUM; 1522 mrq->stop->error = -ENOMEDIUM;
1522 1523
1523 spin_unlock(&host->lock); 1524 spin_unlock(&host->lock);
1524 mmc_request_done(slot->mmc, mrq); 1525 mmc_request_done(slot->mmc, mrq);
1525 spin_lock(&host->lock); 1526 spin_lock(&host->lock);
1526 } 1527 }
1527 } 1528 }
1528 spin_unlock(&host->lock); 1529 spin_unlock(&host->lock);
1529 1530
1530 mmc_detect_change(slot->mmc, 0); 1531 mmc_detect_change(slot->mmc, 0);
1531 } 1532 }
1532 } 1533 }
1533 1534
1534 static void atmci_tasklet_func(unsigned long priv) 1535 static void atmci_tasklet_func(unsigned long priv)
1535 { 1536 {
1536 struct atmel_mci *host = (struct atmel_mci *)priv; 1537 struct atmel_mci *host = (struct atmel_mci *)priv;
1537 struct mmc_request *mrq = host->mrq; 1538 struct mmc_request *mrq = host->mrq;
1538 struct mmc_data *data = host->data; 1539 struct mmc_data *data = host->data;
1539 enum atmel_mci_state state = host->state; 1540 enum atmel_mci_state state = host->state;
1540 enum atmel_mci_state prev_state; 1541 enum atmel_mci_state prev_state;
1541 u32 status; 1542 u32 status;
1542 1543
1543 spin_lock(&host->lock); 1544 spin_lock(&host->lock);
1544 1545
1545 state = host->state; 1546 state = host->state;
1546 1547
1547 dev_vdbg(&host->pdev->dev, 1548 dev_vdbg(&host->pdev->dev,
1548 "tasklet: state %u pending/completed/mask %lx/%lx/%x\n", 1549 "tasklet: state %u pending/completed/mask %lx/%lx/%x\n",
1549 state, host->pending_events, host->completed_events, 1550 state, host->pending_events, host->completed_events,
1550 atmci_readl(host, ATMCI_IMR)); 1551 atmci_readl(host, ATMCI_IMR));
1551 1552
1552 do { 1553 do {
1553 prev_state = state; 1554 prev_state = state;
1554 dev_dbg(&host->pdev->dev, "FSM: state=%d\n", state); 1555 dev_dbg(&host->pdev->dev, "FSM: state=%d\n", state);
1555 1556
1556 switch (state) { 1557 switch (state) {
1557 case STATE_IDLE: 1558 case STATE_IDLE:
1558 break; 1559 break;
1559 1560
1560 case STATE_SENDING_CMD: 1561 case STATE_SENDING_CMD:
1561 /* 1562 /*
1562 * Command has been sent, we are waiting for command 1563 * Command has been sent, we are waiting for command
1563 * ready. Then we have three next states possible: 1564 * ready. Then we have three next states possible:
1564 * END_REQUEST by default, WAITING_NOTBUSY if it's a 1565 * END_REQUEST by default, WAITING_NOTBUSY if it's a
1565 * command needing it or DATA_XFER if there is data. 1566 * command needing it or DATA_XFER if there is data.
1566 */ 1567 */
1567 dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n"); 1568 dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n");
1568 if (!atmci_test_and_clear_pending(host, 1569 if (!atmci_test_and_clear_pending(host,
1569 EVENT_CMD_RDY)) 1570 EVENT_CMD_RDY))
1570 break; 1571 break;
1571 1572
1572 dev_dbg(&host->pdev->dev, "set completed cmd ready\n"); 1573 dev_dbg(&host->pdev->dev, "set completed cmd ready\n");
1573 host->cmd = NULL; 1574 host->cmd = NULL;
1574 atmci_set_completed(host, EVENT_CMD_RDY); 1575 atmci_set_completed(host, EVENT_CMD_RDY);
1575 atmci_command_complete(host, mrq->cmd); 1576 atmci_command_complete(host, mrq->cmd);
1576 if (mrq->data) { 1577 if (mrq->data) {
1577 dev_dbg(&host->pdev->dev, 1578 dev_dbg(&host->pdev->dev,
1578 "command with data transfer"); 1579 "command with data transfer");
1579 /* 1580 /*
1580 * If there is a command error don't start 1581 * If there is a command error don't start
1581 * data transfer. 1582 * data transfer.
1582 */ 1583 */
1583 if (mrq->cmd->error) { 1584 if (mrq->cmd->error) {
1584 host->stop_transfer(host); 1585 host->stop_transfer(host);
1585 host->data = NULL; 1586 host->data = NULL;
1586 atmci_writel(host, ATMCI_IDR, 1587 atmci_writel(host, ATMCI_IDR,
1587 ATMCI_TXRDY | ATMCI_RXRDY 1588 ATMCI_TXRDY | ATMCI_RXRDY
1588 | ATMCI_DATA_ERROR_FLAGS); 1589 | ATMCI_DATA_ERROR_FLAGS);
1589 state = STATE_END_REQUEST; 1590 state = STATE_END_REQUEST;
1590 } else 1591 } else
1591 state = STATE_DATA_XFER; 1592 state = STATE_DATA_XFER;
1592 } else if ((!mrq->data) && (mrq->cmd->flags & MMC_RSP_BUSY)) { 1593 } else if ((!mrq->data) && (mrq->cmd->flags & MMC_RSP_BUSY)) {
1593 dev_dbg(&host->pdev->dev, 1594 dev_dbg(&host->pdev->dev,
1594 "command response need waiting notbusy"); 1595 "command response need waiting notbusy");
1595 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); 1596 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1596 state = STATE_WAITING_NOTBUSY; 1597 state = STATE_WAITING_NOTBUSY;
1597 } else 1598 } else
1598 state = STATE_END_REQUEST; 1599 state = STATE_END_REQUEST;
1599 1600
1600 break; 1601 break;
1601 1602
1602 case STATE_DATA_XFER: 1603 case STATE_DATA_XFER:
1603 if (atmci_test_and_clear_pending(host, 1604 if (atmci_test_and_clear_pending(host,
1604 EVENT_DATA_ERROR)) { 1605 EVENT_DATA_ERROR)) {
1605 dev_dbg(&host->pdev->dev, "set completed data error\n"); 1606 dev_dbg(&host->pdev->dev, "set completed data error\n");
1606 atmci_set_completed(host, EVENT_DATA_ERROR); 1607 atmci_set_completed(host, EVENT_DATA_ERROR);
1607 state = STATE_END_REQUEST; 1608 state = STATE_END_REQUEST;
1608 break; 1609 break;
1609 } 1610 }
1610 1611
1611 /* 1612 /*
1612 * A data transfer is in progress. The event expected 1613 * A data transfer is in progress. The event expected
1613 * to move to the next state depends of data transfer 1614 * to move to the next state depends of data transfer
1614 * type (PDC or DMA). Once transfer done we can move 1615 * type (PDC or DMA). Once transfer done we can move
1615 * to the next step which is WAITING_NOTBUSY in write 1616 * to the next step which is WAITING_NOTBUSY in write
1616 * case and directly SENDING_STOP in read case. 1617 * case and directly SENDING_STOP in read case.
1617 */ 1618 */
1618 dev_dbg(&host->pdev->dev, "FSM: xfer complete?\n"); 1619 dev_dbg(&host->pdev->dev, "FSM: xfer complete?\n");
1619 if (!atmci_test_and_clear_pending(host, 1620 if (!atmci_test_and_clear_pending(host,
1620 EVENT_XFER_COMPLETE)) 1621 EVENT_XFER_COMPLETE))
1621 break; 1622 break;
1622 1623
1623 dev_dbg(&host->pdev->dev, 1624 dev_dbg(&host->pdev->dev,
1624 "(%s) set completed xfer complete\n", 1625 "(%s) set completed xfer complete\n",
1625 __func__); 1626 __func__);
1626 atmci_set_completed(host, EVENT_XFER_COMPLETE); 1627 atmci_set_completed(host, EVENT_XFER_COMPLETE);
1627 1628
1628 if (host->data->flags & MMC_DATA_WRITE) { 1629 if (host->caps.need_notbusy_for_read_ops ||
1630 (host->data->flags & MMC_DATA_WRITE)) {
1629 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); 1631 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1630 state = STATE_WAITING_NOTBUSY; 1632 state = STATE_WAITING_NOTBUSY;
1631 } else if (host->mrq->stop) { 1633 } else if (host->mrq->stop) {
1632 atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY); 1634 atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
1633 atmci_send_stop_cmd(host, data); 1635 atmci_send_stop_cmd(host, data);
1634 state = STATE_SENDING_STOP; 1636 state = STATE_SENDING_STOP;
1635 } else { 1637 } else {
1636 host->data = NULL; 1638 host->data = NULL;
1637 data->bytes_xfered = data->blocks * data->blksz; 1639 data->bytes_xfered = data->blocks * data->blksz;
1638 data->error = 0; 1640 data->error = 0;
1639 state = STATE_END_REQUEST; 1641 state = STATE_END_REQUEST;
1640 } 1642 }
1641 break; 1643 break;
1642 1644
1643 case STATE_WAITING_NOTBUSY: 1645 case STATE_WAITING_NOTBUSY:
1644 /* 1646 /*
1645 * We can be in the state for two reasons: a command 1647 * We can be in the state for two reasons: a command
1646 * requiring waiting not busy signal (stop command 1648 * requiring waiting not busy signal (stop command
1647 * included) or a write operation. In the latest case, 1649 * included) or a write operation. In the latest case,
1648 * we need to send a stop command. 1650 * we need to send a stop command.
1649 */ 1651 */
1650 dev_dbg(&host->pdev->dev, "FSM: not busy?\n"); 1652 dev_dbg(&host->pdev->dev, "FSM: not busy?\n");
1651 if (!atmci_test_and_clear_pending(host, 1653 if (!atmci_test_and_clear_pending(host,
1652 EVENT_NOTBUSY)) 1654 EVENT_NOTBUSY))
1653 break; 1655 break;
1654 1656
1655 dev_dbg(&host->pdev->dev, "set completed not busy\n"); 1657 dev_dbg(&host->pdev->dev, "set completed not busy\n");
1656 atmci_set_completed(host, EVENT_NOTBUSY); 1658 atmci_set_completed(host, EVENT_NOTBUSY);
1657 1659
1658 if (host->data) { 1660 if (host->data) {
1659 /* 1661 /*
1660 * For some commands such as CMD53, even if 1662 * For some commands such as CMD53, even if
1661 * there is data transfer, there is no stop 1663 * there is data transfer, there is no stop
1662 * command to send. 1664 * command to send.
1663 */ 1665 */
1664 if (host->mrq->stop) { 1666 if (host->mrq->stop) {
1665 atmci_writel(host, ATMCI_IER, 1667 atmci_writel(host, ATMCI_IER,
1666 ATMCI_CMDRDY); 1668 ATMCI_CMDRDY);
1667 atmci_send_stop_cmd(host, data); 1669 atmci_send_stop_cmd(host, data);
1668 state = STATE_SENDING_STOP; 1670 state = STATE_SENDING_STOP;
1669 } else { 1671 } else {
1670 host->data = NULL; 1672 host->data = NULL;
1671 data->bytes_xfered = data->blocks 1673 data->bytes_xfered = data->blocks
1672 * data->blksz; 1674 * data->blksz;
1673 data->error = 0; 1675 data->error = 0;
1674 state = STATE_END_REQUEST; 1676 state = STATE_END_REQUEST;
1675 } 1677 }
1676 } else 1678 } else
1677 state = STATE_END_REQUEST; 1679 state = STATE_END_REQUEST;
1678 break; 1680 break;
1679 1681
1680 case STATE_SENDING_STOP: 1682 case STATE_SENDING_STOP:
1681 /* 1683 /*
1682 * In this state, it is important to set host->data to 1684 * In this state, it is important to set host->data to
1683 * NULL (which is tested in the waiting notbusy state) 1685 * NULL (which is tested in the waiting notbusy state)
1684 * in order to go to the end request state instead of 1686 * in order to go to the end request state instead of
1685 * sending stop again. 1687 * sending stop again.
1686 */ 1688 */
1687 dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n"); 1689 dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n");
1688 if (!atmci_test_and_clear_pending(host, 1690 if (!atmci_test_and_clear_pending(host,
1689 EVENT_CMD_RDY)) 1691 EVENT_CMD_RDY))
1690 break; 1692 break;
1691 1693
1692 dev_dbg(&host->pdev->dev, "FSM: cmd ready\n"); 1694 dev_dbg(&host->pdev->dev, "FSM: cmd ready\n");
1693 host->cmd = NULL; 1695 host->cmd = NULL;
1694 data->bytes_xfered = data->blocks * data->blksz; 1696 data->bytes_xfered = data->blocks * data->blksz;
1695 data->error = 0; 1697 data->error = 0;
1696 atmci_command_complete(host, mrq->stop); 1698 atmci_command_complete(host, mrq->stop);
1697 if (mrq->stop->error) { 1699 if (mrq->stop->error) {
1698 host->stop_transfer(host); 1700 host->stop_transfer(host);
1699 atmci_writel(host, ATMCI_IDR, 1701 atmci_writel(host, ATMCI_IDR,
1700 ATMCI_TXRDY | ATMCI_RXRDY 1702 ATMCI_TXRDY | ATMCI_RXRDY
1701 | ATMCI_DATA_ERROR_FLAGS); 1703 | ATMCI_DATA_ERROR_FLAGS);
1702 state = STATE_END_REQUEST; 1704 state = STATE_END_REQUEST;
1703 } else { 1705 } else {
1704 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); 1706 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1705 state = STATE_WAITING_NOTBUSY; 1707 state = STATE_WAITING_NOTBUSY;
1706 } 1708 }
1707 host->data = NULL; 1709 host->data = NULL;
1708 break; 1710 break;
1709 1711
1710 case STATE_END_REQUEST: 1712 case STATE_END_REQUEST:
1711 atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY | ATMCI_RXRDY 1713 atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY | ATMCI_RXRDY
1712 | ATMCI_DATA_ERROR_FLAGS); 1714 | ATMCI_DATA_ERROR_FLAGS);
1713 status = host->data_status; 1715 status = host->data_status;
1714 if (unlikely(status)) { 1716 if (unlikely(status)) {
1715 host->stop_transfer(host); 1717 host->stop_transfer(host);
1716 host->data = NULL; 1718 host->data = NULL;
1717 if (status & ATMCI_DTOE) { 1719 if (status & ATMCI_DTOE) {
1718 data->error = -ETIMEDOUT; 1720 data->error = -ETIMEDOUT;
1719 } else if (status & ATMCI_DCRCE) { 1721 } else if (status & ATMCI_DCRCE) {
1720 data->error = -EILSEQ; 1722 data->error = -EILSEQ;
1721 } else { 1723 } else {
1722 data->error = -EIO; 1724 data->error = -EIO;
1723 } 1725 }
1724 } 1726 }
1725 1727
1726 atmci_request_end(host, host->mrq); 1728 atmci_request_end(host, host->mrq);
1727 state = STATE_IDLE; 1729 state = STATE_IDLE;
1728 break; 1730 break;
1729 } 1731 }
1730 } while (state != prev_state); 1732 } while (state != prev_state);
1731 1733
1732 host->state = state; 1734 host->state = state;
1733 1735
1734 spin_unlock(&host->lock); 1736 spin_unlock(&host->lock);
1735 } 1737 }
1736 1738
1737 static void atmci_read_data_pio(struct atmel_mci *host) 1739 static void atmci_read_data_pio(struct atmel_mci *host)
1738 { 1740 {
1739 struct scatterlist *sg = host->sg; 1741 struct scatterlist *sg = host->sg;
1740 void *buf = sg_virt(sg); 1742 void *buf = sg_virt(sg);
1741 unsigned int offset = host->pio_offset; 1743 unsigned int offset = host->pio_offset;
1742 struct mmc_data *data = host->data; 1744 struct mmc_data *data = host->data;
1743 u32 value; 1745 u32 value;
1744 u32 status; 1746 u32 status;
1745 unsigned int nbytes = 0; 1747 unsigned int nbytes = 0;
1746 1748
1747 do { 1749 do {
1748 value = atmci_readl(host, ATMCI_RDR); 1750 value = atmci_readl(host, ATMCI_RDR);
1749 if (likely(offset + 4 <= sg->length)) { 1751 if (likely(offset + 4 <= sg->length)) {
1750 put_unaligned(value, (u32 *)(buf + offset)); 1752 put_unaligned(value, (u32 *)(buf + offset));
1751 1753
1752 offset += 4; 1754 offset += 4;
1753 nbytes += 4; 1755 nbytes += 4;
1754 1756
1755 if (offset == sg->length) { 1757 if (offset == sg->length) {
1756 flush_dcache_page(sg_page(sg)); 1758 flush_dcache_page(sg_page(sg));
1757 host->sg = sg = sg_next(sg); 1759 host->sg = sg = sg_next(sg);
1758 if (!sg) 1760 if (!sg)
1759 goto done; 1761 goto done;
1760 1762
1761 offset = 0; 1763 offset = 0;
1762 buf = sg_virt(sg); 1764 buf = sg_virt(sg);
1763 } 1765 }
1764 } else { 1766 } else {
1765 unsigned int remaining = sg->length - offset; 1767 unsigned int remaining = sg->length - offset;
1766 memcpy(buf + offset, &value, remaining); 1768 memcpy(buf + offset, &value, remaining);
1767 nbytes += remaining; 1769 nbytes += remaining;
1768 1770
1769 flush_dcache_page(sg_page(sg)); 1771 flush_dcache_page(sg_page(sg));
1770 host->sg = sg = sg_next(sg); 1772 host->sg = sg = sg_next(sg);
1771 if (!sg) 1773 if (!sg)
1772 goto done; 1774 goto done;
1773 1775
1774 offset = 4 - remaining; 1776 offset = 4 - remaining;
1775 buf = sg_virt(sg); 1777 buf = sg_virt(sg);
1776 memcpy(buf, (u8 *)&value + remaining, offset); 1778 memcpy(buf, (u8 *)&value + remaining, offset);
1777 nbytes += offset; 1779 nbytes += offset;
1778 } 1780 }
1779 1781
1780 status = atmci_readl(host, ATMCI_SR); 1782 status = atmci_readl(host, ATMCI_SR);
1781 if (status & ATMCI_DATA_ERROR_FLAGS) { 1783 if (status & ATMCI_DATA_ERROR_FLAGS) {
1782 atmci_writel(host, ATMCI_IDR, (ATMCI_NOTBUSY | ATMCI_RXRDY 1784 atmci_writel(host, ATMCI_IDR, (ATMCI_NOTBUSY | ATMCI_RXRDY
1783 | ATMCI_DATA_ERROR_FLAGS)); 1785 | ATMCI_DATA_ERROR_FLAGS));
1784 host->data_status = status; 1786 host->data_status = status;
1785 data->bytes_xfered += nbytes; 1787 data->bytes_xfered += nbytes;
1786 return; 1788 return;
1787 } 1789 }
1788 } while (status & ATMCI_RXRDY); 1790 } while (status & ATMCI_RXRDY);
1789 1791
1790 host->pio_offset = offset; 1792 host->pio_offset = offset;
1791 data->bytes_xfered += nbytes; 1793 data->bytes_xfered += nbytes;
1792 1794
1793 return; 1795 return;
1794 1796
1795 done: 1797 done:
1796 atmci_writel(host, ATMCI_IDR, ATMCI_RXRDY); 1798 atmci_writel(host, ATMCI_IDR, ATMCI_RXRDY);
1797 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); 1799 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1798 data->bytes_xfered += nbytes; 1800 data->bytes_xfered += nbytes;
1799 smp_wmb(); 1801 smp_wmb();
1800 atmci_set_pending(host, EVENT_XFER_COMPLETE); 1802 atmci_set_pending(host, EVENT_XFER_COMPLETE);
1801 } 1803 }
1802 1804
1803 static void atmci_write_data_pio(struct atmel_mci *host) 1805 static void atmci_write_data_pio(struct atmel_mci *host)
1804 { 1806 {
1805 struct scatterlist *sg = host->sg; 1807 struct scatterlist *sg = host->sg;
1806 void *buf = sg_virt(sg); 1808 void *buf = sg_virt(sg);
1807 unsigned int offset = host->pio_offset; 1809 unsigned int offset = host->pio_offset;
1808 struct mmc_data *data = host->data; 1810 struct mmc_data *data = host->data;
1809 u32 value; 1811 u32 value;
1810 u32 status; 1812 u32 status;
1811 unsigned int nbytes = 0; 1813 unsigned int nbytes = 0;
1812 1814
1813 do { 1815 do {
1814 if (likely(offset + 4 <= sg->length)) { 1816 if (likely(offset + 4 <= sg->length)) {
1815 value = get_unaligned((u32 *)(buf + offset)); 1817 value = get_unaligned((u32 *)(buf + offset));
1816 atmci_writel(host, ATMCI_TDR, value); 1818 atmci_writel(host, ATMCI_TDR, value);
1817 1819
1818 offset += 4; 1820 offset += 4;
1819 nbytes += 4; 1821 nbytes += 4;
1820 if (offset == sg->length) { 1822 if (offset == sg->length) {
1821 host->sg = sg = sg_next(sg); 1823 host->sg = sg = sg_next(sg);
1822 if (!sg) 1824 if (!sg)
1823 goto done; 1825 goto done;
1824 1826
1825 offset = 0; 1827 offset = 0;
1826 buf = sg_virt(sg); 1828 buf = sg_virt(sg);
1827 } 1829 }
1828 } else { 1830 } else {
1829 unsigned int remaining = sg->length - offset; 1831 unsigned int remaining = sg->length - offset;
1830 1832
1831 value = 0; 1833 value = 0;
1832 memcpy(&value, buf + offset, remaining); 1834 memcpy(&value, buf + offset, remaining);
1833 nbytes += remaining; 1835 nbytes += remaining;
1834 1836
1835 host->sg = sg = sg_next(sg); 1837 host->sg = sg = sg_next(sg);
1836 if (!sg) { 1838 if (!sg) {
1837 atmci_writel(host, ATMCI_TDR, value); 1839 atmci_writel(host, ATMCI_TDR, value);
1838 goto done; 1840 goto done;
1839 } 1841 }
1840 1842
1841 offset = 4 - remaining; 1843 offset = 4 - remaining;
1842 buf = sg_virt(sg); 1844 buf = sg_virt(sg);
1843 memcpy((u8 *)&value + remaining, buf, offset); 1845 memcpy((u8 *)&value + remaining, buf, offset);
1844 atmci_writel(host, ATMCI_TDR, value); 1846 atmci_writel(host, ATMCI_TDR, value);
1845 nbytes += offset; 1847 nbytes += offset;
1846 } 1848 }
1847 1849
1848 status = atmci_readl(host, ATMCI_SR); 1850 status = atmci_readl(host, ATMCI_SR);
1849 if (status & ATMCI_DATA_ERROR_FLAGS) { 1851 if (status & ATMCI_DATA_ERROR_FLAGS) {
1850 atmci_writel(host, ATMCI_IDR, (ATMCI_NOTBUSY | ATMCI_TXRDY 1852 atmci_writel(host, ATMCI_IDR, (ATMCI_NOTBUSY | ATMCI_TXRDY
1851 | ATMCI_DATA_ERROR_FLAGS)); 1853 | ATMCI_DATA_ERROR_FLAGS));
1852 host->data_status = status; 1854 host->data_status = status;
1853 data->bytes_xfered += nbytes; 1855 data->bytes_xfered += nbytes;
1854 return; 1856 return;
1855 } 1857 }
1856 } while (status & ATMCI_TXRDY); 1858 } while (status & ATMCI_TXRDY);
1857 1859
1858 host->pio_offset = offset; 1860 host->pio_offset = offset;
1859 data->bytes_xfered += nbytes; 1861 data->bytes_xfered += nbytes;
1860 1862
1861 return; 1863 return;
1862 1864
1863 done: 1865 done:
1864 atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY); 1866 atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY);
1865 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); 1867 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1866 data->bytes_xfered += nbytes; 1868 data->bytes_xfered += nbytes;
1867 smp_wmb(); 1869 smp_wmb();
1868 atmci_set_pending(host, EVENT_XFER_COMPLETE); 1870 atmci_set_pending(host, EVENT_XFER_COMPLETE);
1869 } 1871 }
1870 1872
1871 static void atmci_sdio_interrupt(struct atmel_mci *host, u32 status) 1873 static void atmci_sdio_interrupt(struct atmel_mci *host, u32 status)
1872 { 1874 {
1873 int i; 1875 int i;
1874 1876
1875 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) { 1877 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
1876 struct atmel_mci_slot *slot = host->slot[i]; 1878 struct atmel_mci_slot *slot = host->slot[i];
1877 if (slot && (status & slot->sdio_irq)) { 1879 if (slot && (status & slot->sdio_irq)) {
1878 mmc_signal_sdio_irq(slot->mmc); 1880 mmc_signal_sdio_irq(slot->mmc);
1879 } 1881 }
1880 } 1882 }
1881 } 1883 }
1882 1884
1883 1885
1884 static irqreturn_t atmci_interrupt(int irq, void *dev_id) 1886 static irqreturn_t atmci_interrupt(int irq, void *dev_id)
1885 { 1887 {
1886 struct atmel_mci *host = dev_id; 1888 struct atmel_mci *host = dev_id;
1887 u32 status, mask, pending; 1889 u32 status, mask, pending;
1888 unsigned int pass_count = 0; 1890 unsigned int pass_count = 0;
1889 1891
1890 do { 1892 do {
1891 status = atmci_readl(host, ATMCI_SR); 1893 status = atmci_readl(host, ATMCI_SR);
1892 mask = atmci_readl(host, ATMCI_IMR); 1894 mask = atmci_readl(host, ATMCI_IMR);
1893 pending = status & mask; 1895 pending = status & mask;
1894 if (!pending) 1896 if (!pending)
1895 break; 1897 break;
1896 1898
1897 if (pending & ATMCI_DATA_ERROR_FLAGS) { 1899 if (pending & ATMCI_DATA_ERROR_FLAGS) {
1898 dev_dbg(&host->pdev->dev, "IRQ: data error\n"); 1900 dev_dbg(&host->pdev->dev, "IRQ: data error\n");
1899 atmci_writel(host, ATMCI_IDR, ATMCI_DATA_ERROR_FLAGS 1901 atmci_writel(host, ATMCI_IDR, ATMCI_DATA_ERROR_FLAGS
1900 | ATMCI_RXRDY | ATMCI_TXRDY 1902 | ATMCI_RXRDY | ATMCI_TXRDY
1901 | ATMCI_ENDRX | ATMCI_ENDTX 1903 | ATMCI_ENDRX | ATMCI_ENDTX
1902 | ATMCI_RXBUFF | ATMCI_TXBUFE); 1904 | ATMCI_RXBUFF | ATMCI_TXBUFE);
1903 1905
1904 host->data_status = status; 1906 host->data_status = status;
1905 dev_dbg(&host->pdev->dev, "set pending data error\n"); 1907 dev_dbg(&host->pdev->dev, "set pending data error\n");
1906 smp_wmb(); 1908 smp_wmb();
1907 atmci_set_pending(host, EVENT_DATA_ERROR); 1909 atmci_set_pending(host, EVENT_DATA_ERROR);
1908 tasklet_schedule(&host->tasklet); 1910 tasklet_schedule(&host->tasklet);
1909 } 1911 }
1910 1912
1911 if (pending & ATMCI_TXBUFE) { 1913 if (pending & ATMCI_TXBUFE) {
1912 dev_dbg(&host->pdev->dev, "IRQ: tx buffer empty\n"); 1914 dev_dbg(&host->pdev->dev, "IRQ: tx buffer empty\n");
1913 atmci_writel(host, ATMCI_IDR, ATMCI_TXBUFE); 1915 atmci_writel(host, ATMCI_IDR, ATMCI_TXBUFE);
1914 atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX); 1916 atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
1915 /* 1917 /*
1916 * We can receive this interruption before having configured 1918 * We can receive this interruption before having configured
1917 * the second pdc buffer, so we need to reconfigure first and 1919 * the second pdc buffer, so we need to reconfigure first and
1918 * second buffers again 1920 * second buffers again
1919 */ 1921 */
1920 if (host->data_size) { 1922 if (host->data_size) {
1921 atmci_pdc_set_both_buf(host, XFER_TRANSMIT); 1923 atmci_pdc_set_both_buf(host, XFER_TRANSMIT);
1922 atmci_writel(host, ATMCI_IER, ATMCI_ENDTX); 1924 atmci_writel(host, ATMCI_IER, ATMCI_ENDTX);
1923 atmci_writel(host, ATMCI_IER, ATMCI_TXBUFE); 1925 atmci_writel(host, ATMCI_IER, ATMCI_TXBUFE);
1924 } else { 1926 } else {
1925 atmci_pdc_complete(host); 1927 atmci_pdc_complete(host);
1926 } 1928 }
1927 } else if (pending & ATMCI_ENDTX) { 1929 } else if (pending & ATMCI_ENDTX) {
1928 dev_dbg(&host->pdev->dev, "IRQ: end of tx buffer\n"); 1930 dev_dbg(&host->pdev->dev, "IRQ: end of tx buffer\n");
1929 atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX); 1931 atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
1930 1932
1931 if (host->data_size) { 1933 if (host->data_size) {
1932 atmci_pdc_set_single_buf(host, 1934 atmci_pdc_set_single_buf(host,
1933 XFER_TRANSMIT, PDC_SECOND_BUF); 1935 XFER_TRANSMIT, PDC_SECOND_BUF);
1934 atmci_writel(host, ATMCI_IER, ATMCI_ENDTX); 1936 atmci_writel(host, ATMCI_IER, ATMCI_ENDTX);
1935 } 1937 }
1936 } 1938 }
1937 1939
1938 if (pending & ATMCI_RXBUFF) { 1940 if (pending & ATMCI_RXBUFF) {
1939 dev_dbg(&host->pdev->dev, "IRQ: rx buffer full\n"); 1941 dev_dbg(&host->pdev->dev, "IRQ: rx buffer full\n");
1940 atmci_writel(host, ATMCI_IDR, ATMCI_RXBUFF); 1942 atmci_writel(host, ATMCI_IDR, ATMCI_RXBUFF);
1941 atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX); 1943 atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
1942 /* 1944 /*
1943 * We can receive this interruption before having configured 1945 * We can receive this interruption before having configured
1944 * the second pdc buffer, so we need to reconfigure first and 1946 * the second pdc buffer, so we need to reconfigure first and
1945 * second buffers again 1947 * second buffers again
1946 */ 1948 */
1947 if (host->data_size) { 1949 if (host->data_size) {
1948 atmci_pdc_set_both_buf(host, XFER_RECEIVE); 1950 atmci_pdc_set_both_buf(host, XFER_RECEIVE);
1949 atmci_writel(host, ATMCI_IER, ATMCI_ENDRX); 1951 atmci_writel(host, ATMCI_IER, ATMCI_ENDRX);
1950 atmci_writel(host, ATMCI_IER, ATMCI_RXBUFF); 1952 atmci_writel(host, ATMCI_IER, ATMCI_RXBUFF);
1951 } else { 1953 } else {
1952 atmci_pdc_complete(host); 1954 atmci_pdc_complete(host);
1953 } 1955 }
1954 } else if (pending & ATMCI_ENDRX) { 1956 } else if (pending & ATMCI_ENDRX) {
1955 dev_dbg(&host->pdev->dev, "IRQ: end of rx buffer\n"); 1957 dev_dbg(&host->pdev->dev, "IRQ: end of rx buffer\n");
1956 atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX); 1958 atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
1957 1959
1958 if (host->data_size) { 1960 if (host->data_size) {
1959 atmci_pdc_set_single_buf(host, 1961 atmci_pdc_set_single_buf(host,
1960 XFER_RECEIVE, PDC_SECOND_BUF); 1962 XFER_RECEIVE, PDC_SECOND_BUF);
1961 atmci_writel(host, ATMCI_IER, ATMCI_ENDRX); 1963 atmci_writel(host, ATMCI_IER, ATMCI_ENDRX);
1962 } 1964 }
1963 } 1965 }
1964 1966
1965 /* 1967 /*
1966 * First mci IPs, so mainly the ones having pdc, have some 1968 * First mci IPs, so mainly the ones having pdc, have some
1967 * issues with the notbusy signal. You can't get it after 1969 * issues with the notbusy signal. You can't get it after
1968 * data transmission if you have not sent a stop command. 1970 * data transmission if you have not sent a stop command.
1969 * The appropriate workaround is to use the BLKE signal. 1971 * The appropriate workaround is to use the BLKE signal.
1970 */ 1972 */
1971 if (pending & ATMCI_BLKE) { 1973 if (pending & ATMCI_BLKE) {
1972 dev_dbg(&host->pdev->dev, "IRQ: blke\n"); 1974 dev_dbg(&host->pdev->dev, "IRQ: blke\n");
1973 atmci_writel(host, ATMCI_IDR, ATMCI_BLKE); 1975 atmci_writel(host, ATMCI_IDR, ATMCI_BLKE);
1974 smp_wmb(); 1976 smp_wmb();
1975 dev_dbg(&host->pdev->dev, "set pending notbusy\n"); 1977 dev_dbg(&host->pdev->dev, "set pending notbusy\n");
1976 atmci_set_pending(host, EVENT_NOTBUSY); 1978 atmci_set_pending(host, EVENT_NOTBUSY);
1977 tasklet_schedule(&host->tasklet); 1979 tasklet_schedule(&host->tasklet);
1978 } 1980 }
1979 1981
1980 if (pending & ATMCI_NOTBUSY) { 1982 if (pending & ATMCI_NOTBUSY) {
1981 dev_dbg(&host->pdev->dev, "IRQ: not_busy\n"); 1983 dev_dbg(&host->pdev->dev, "IRQ: not_busy\n");
1982 atmci_writel(host, ATMCI_IDR, ATMCI_NOTBUSY); 1984 atmci_writel(host, ATMCI_IDR, ATMCI_NOTBUSY);
1983 smp_wmb(); 1985 smp_wmb();
1984 dev_dbg(&host->pdev->dev, "set pending notbusy\n"); 1986 dev_dbg(&host->pdev->dev, "set pending notbusy\n");
1985 atmci_set_pending(host, EVENT_NOTBUSY); 1987 atmci_set_pending(host, EVENT_NOTBUSY);
1986 tasklet_schedule(&host->tasklet); 1988 tasklet_schedule(&host->tasklet);
1987 } 1989 }
1988 1990
1989 if (pending & ATMCI_RXRDY) 1991 if (pending & ATMCI_RXRDY)
1990 atmci_read_data_pio(host); 1992 atmci_read_data_pio(host);
1991 if (pending & ATMCI_TXRDY) 1993 if (pending & ATMCI_TXRDY)
1992 atmci_write_data_pio(host); 1994 atmci_write_data_pio(host);
1993 1995
1994 if (pending & ATMCI_CMDRDY) { 1996 if (pending & ATMCI_CMDRDY) {
1995 dev_dbg(&host->pdev->dev, "IRQ: cmd ready\n"); 1997 dev_dbg(&host->pdev->dev, "IRQ: cmd ready\n");
1996 atmci_writel(host, ATMCI_IDR, ATMCI_CMDRDY); 1998 atmci_writel(host, ATMCI_IDR, ATMCI_CMDRDY);
1997 host->cmd_status = status; 1999 host->cmd_status = status;
1998 smp_wmb(); 2000 smp_wmb();
1999 dev_dbg(&host->pdev->dev, "set pending cmd rdy\n"); 2001 dev_dbg(&host->pdev->dev, "set pending cmd rdy\n");
2000 atmci_set_pending(host, EVENT_CMD_RDY); 2002 atmci_set_pending(host, EVENT_CMD_RDY);
2001 tasklet_schedule(&host->tasklet); 2003 tasklet_schedule(&host->tasklet);
2002 } 2004 }
2003 2005
2004 if (pending & (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB)) 2006 if (pending & (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
2005 atmci_sdio_interrupt(host, status); 2007 atmci_sdio_interrupt(host, status);
2006 2008
2007 } while (pass_count++ < 5); 2009 } while (pass_count++ < 5);
2008 2010
2009 return pass_count ? IRQ_HANDLED : IRQ_NONE; 2011 return pass_count ? IRQ_HANDLED : IRQ_NONE;
2010 } 2012 }
2011 2013
2012 static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id) 2014 static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id)
2013 { 2015 {
2014 struct atmel_mci_slot *slot = dev_id; 2016 struct atmel_mci_slot *slot = dev_id;
2015 2017
2016 /* 2018 /*
2017 * Disable interrupts until the pin has stabilized and check 2019 * Disable interrupts until the pin has stabilized and check
2018 * the state then. Use mod_timer() since we may be in the 2020 * the state then. Use mod_timer() since we may be in the
2019 * middle of the timer routine when this interrupt triggers. 2021 * middle of the timer routine when this interrupt triggers.
2020 */ 2022 */
2021 disable_irq_nosync(irq); 2023 disable_irq_nosync(irq);
2022 mod_timer(&slot->detect_timer, jiffies + msecs_to_jiffies(20)); 2024 mod_timer(&slot->detect_timer, jiffies + msecs_to_jiffies(20));
2023 2025
2024 return IRQ_HANDLED; 2026 return IRQ_HANDLED;
2025 } 2027 }
2026 2028
2027 static int __init atmci_init_slot(struct atmel_mci *host, 2029 static int __init atmci_init_slot(struct atmel_mci *host,
2028 struct mci_slot_pdata *slot_data, unsigned int id, 2030 struct mci_slot_pdata *slot_data, unsigned int id,
2029 u32 sdc_reg, u32 sdio_irq) 2031 u32 sdc_reg, u32 sdio_irq)
2030 { 2032 {
2031 struct mmc_host *mmc; 2033 struct mmc_host *mmc;
2032 struct atmel_mci_slot *slot; 2034 struct atmel_mci_slot *slot;
2033 2035
2034 mmc = mmc_alloc_host(sizeof(struct atmel_mci_slot), &host->pdev->dev); 2036 mmc = mmc_alloc_host(sizeof(struct atmel_mci_slot), &host->pdev->dev);
2035 if (!mmc) 2037 if (!mmc)
2036 return -ENOMEM; 2038 return -ENOMEM;
2037 2039
2038 slot = mmc_priv(mmc); 2040 slot = mmc_priv(mmc);
2039 slot->mmc = mmc; 2041 slot->mmc = mmc;
2040 slot->host = host; 2042 slot->host = host;
2041 slot->detect_pin = slot_data->detect_pin; 2043 slot->detect_pin = slot_data->detect_pin;
2042 slot->wp_pin = slot_data->wp_pin; 2044 slot->wp_pin = slot_data->wp_pin;
2043 slot->detect_is_active_high = slot_data->detect_is_active_high; 2045 slot->detect_is_active_high = slot_data->detect_is_active_high;
2044 slot->sdc_reg = sdc_reg; 2046 slot->sdc_reg = sdc_reg;
2045 slot->sdio_irq = sdio_irq; 2047 slot->sdio_irq = sdio_irq;
2046 2048
2047 mmc->ops = &atmci_ops; 2049 mmc->ops = &atmci_ops;
2048 mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512); 2050 mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512);
2049 mmc->f_max = host->bus_hz / 2; 2051 mmc->f_max = host->bus_hz / 2;
2050 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 2052 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2051 if (sdio_irq) 2053 if (sdio_irq)
2052 mmc->caps |= MMC_CAP_SDIO_IRQ; 2054 mmc->caps |= MMC_CAP_SDIO_IRQ;
2053 if (host->caps.has_highspeed) 2055 if (host->caps.has_highspeed)
2054 mmc->caps |= MMC_CAP_SD_HIGHSPEED; 2056 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
2055 /* 2057 /*
2056 * Without the read/write proof capability, it is strongly suggested to 2058 * Without the read/write proof capability, it is strongly suggested to
2057 * use only one bit for data to prevent fifo underruns and overruns 2059 * use only one bit for data to prevent fifo underruns and overruns
2058 * which will corrupt data. 2060 * which will corrupt data.
2059 */ 2061 */
2060 if ((slot_data->bus_width >= 4) && host->caps.has_rwproof) 2062 if ((slot_data->bus_width >= 4) && host->caps.has_rwproof)
2061 mmc->caps |= MMC_CAP_4_BIT_DATA; 2063 mmc->caps |= MMC_CAP_4_BIT_DATA;
2062 2064
2063 if (atmci_get_version(host) < 0x200) { 2065 if (atmci_get_version(host) < 0x200) {
2064 mmc->max_segs = 256; 2066 mmc->max_segs = 256;
2065 mmc->max_blk_size = 4095; 2067 mmc->max_blk_size = 4095;
2066 mmc->max_blk_count = 256; 2068 mmc->max_blk_count = 256;
2067 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 2069 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2068 mmc->max_seg_size = mmc->max_blk_size * mmc->max_segs; 2070 mmc->max_seg_size = mmc->max_blk_size * mmc->max_segs;
2069 } else { 2071 } else {
2070 mmc->max_segs = 64; 2072 mmc->max_segs = 64;
2071 mmc->max_req_size = 32768 * 512; 2073 mmc->max_req_size = 32768 * 512;
2072 mmc->max_blk_size = 32768; 2074 mmc->max_blk_size = 32768;
2073 mmc->max_blk_count = 512; 2075 mmc->max_blk_count = 512;
2074 } 2076 }
2075 2077
2076 /* Assume card is present initially */ 2078 /* Assume card is present initially */
2077 set_bit(ATMCI_CARD_PRESENT, &slot->flags); 2079 set_bit(ATMCI_CARD_PRESENT, &slot->flags);
2078 if (gpio_is_valid(slot->detect_pin)) { 2080 if (gpio_is_valid(slot->detect_pin)) {
2079 if (gpio_request(slot->detect_pin, "mmc_detect")) { 2081 if (gpio_request(slot->detect_pin, "mmc_detect")) {
2080 dev_dbg(&mmc->class_dev, "no detect pin available\n"); 2082 dev_dbg(&mmc->class_dev, "no detect pin available\n");
2081 slot->detect_pin = -EBUSY; 2083 slot->detect_pin = -EBUSY;
2082 } else if (gpio_get_value(slot->detect_pin) ^ 2084 } else if (gpio_get_value(slot->detect_pin) ^
2083 slot->detect_is_active_high) { 2085 slot->detect_is_active_high) {
2084 clear_bit(ATMCI_CARD_PRESENT, &slot->flags); 2086 clear_bit(ATMCI_CARD_PRESENT, &slot->flags);
2085 } 2087 }
2086 } 2088 }
2087 2089
2088 if (!gpio_is_valid(slot->detect_pin)) 2090 if (!gpio_is_valid(slot->detect_pin))
2089 mmc->caps |= MMC_CAP_NEEDS_POLL; 2091 mmc->caps |= MMC_CAP_NEEDS_POLL;
2090 2092
2091 if (gpio_is_valid(slot->wp_pin)) { 2093 if (gpio_is_valid(slot->wp_pin)) {
2092 if (gpio_request(slot->wp_pin, "mmc_wp")) { 2094 if (gpio_request(slot->wp_pin, "mmc_wp")) {
2093 dev_dbg(&mmc->class_dev, "no WP pin available\n"); 2095 dev_dbg(&mmc->class_dev, "no WP pin available\n");
2094 slot->wp_pin = -EBUSY; 2096 slot->wp_pin = -EBUSY;
2095 } 2097 }
2096 } 2098 }
2097 2099
2098 host->slot[id] = slot; 2100 host->slot[id] = slot;
2099 mmc_add_host(mmc); 2101 mmc_add_host(mmc);
2100 2102
2101 if (gpio_is_valid(slot->detect_pin)) { 2103 if (gpio_is_valid(slot->detect_pin)) {
2102 int ret; 2104 int ret;
2103 2105
2104 setup_timer(&slot->detect_timer, atmci_detect_change, 2106 setup_timer(&slot->detect_timer, atmci_detect_change,
2105 (unsigned long)slot); 2107 (unsigned long)slot);
2106 2108
2107 ret = request_irq(gpio_to_irq(slot->detect_pin), 2109 ret = request_irq(gpio_to_irq(slot->detect_pin),
2108 atmci_detect_interrupt, 2110 atmci_detect_interrupt,
2109 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, 2111 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
2110 "mmc-detect", slot); 2112 "mmc-detect", slot);
2111 if (ret) { 2113 if (ret) {
2112 dev_dbg(&mmc->class_dev, 2114 dev_dbg(&mmc->class_dev,
2113 "could not request IRQ %d for detect pin\n", 2115 "could not request IRQ %d for detect pin\n",
2114 gpio_to_irq(slot->detect_pin)); 2116 gpio_to_irq(slot->detect_pin));
2115 gpio_free(slot->detect_pin); 2117 gpio_free(slot->detect_pin);
2116 slot->detect_pin = -EBUSY; 2118 slot->detect_pin = -EBUSY;
2117 } 2119 }
2118 } 2120 }
2119 2121
2120 atmci_init_debugfs(slot); 2122 atmci_init_debugfs(slot);
2121 2123
2122 return 0; 2124 return 0;
2123 } 2125 }
2124 2126
2125 static void __exit atmci_cleanup_slot(struct atmel_mci_slot *slot, 2127 static void __exit atmci_cleanup_slot(struct atmel_mci_slot *slot,
2126 unsigned int id) 2128 unsigned int id)
2127 { 2129 {
2128 /* Debugfs stuff is cleaned up by mmc core */ 2130 /* Debugfs stuff is cleaned up by mmc core */
2129 2131
2130 set_bit(ATMCI_SHUTDOWN, &slot->flags); 2132 set_bit(ATMCI_SHUTDOWN, &slot->flags);
2131 smp_wmb(); 2133 smp_wmb();
2132 2134
2133 mmc_remove_host(slot->mmc); 2135 mmc_remove_host(slot->mmc);
2134 2136
2135 if (gpio_is_valid(slot->detect_pin)) { 2137 if (gpio_is_valid(slot->detect_pin)) {
2136 int pin = slot->detect_pin; 2138 int pin = slot->detect_pin;
2137 2139
2138 free_irq(gpio_to_irq(pin), slot); 2140 free_irq(gpio_to_irq(pin), slot);
2139 del_timer_sync(&slot->detect_timer); 2141 del_timer_sync(&slot->detect_timer);
2140 gpio_free(pin); 2142 gpio_free(pin);
2141 } 2143 }
2142 if (gpio_is_valid(slot->wp_pin)) 2144 if (gpio_is_valid(slot->wp_pin))
2143 gpio_free(slot->wp_pin); 2145 gpio_free(slot->wp_pin);
2144 2146
2145 slot->host->slot[id] = NULL; 2147 slot->host->slot[id] = NULL;
2146 mmc_free_host(slot->mmc); 2148 mmc_free_host(slot->mmc);
2147 } 2149 }
2148 2150
2149 static bool atmci_filter(struct dma_chan *chan, void *slave) 2151 static bool atmci_filter(struct dma_chan *chan, void *slave)
2150 { 2152 {
2151 struct mci_dma_data *sl = slave; 2153 struct mci_dma_data *sl = slave;
2152 2154
2153 if (sl && find_slave_dev(sl) == chan->device->dev) { 2155 if (sl && find_slave_dev(sl) == chan->device->dev) {
2154 chan->private = slave_data_ptr(sl); 2156 chan->private = slave_data_ptr(sl);
2155 return true; 2157 return true;
2156 } else { 2158 } else {
2157 return false; 2159 return false;
2158 } 2160 }
2159 } 2161 }
2160 2162
2161 static bool atmci_configure_dma(struct atmel_mci *host) 2163 static bool atmci_configure_dma(struct atmel_mci *host)
2162 { 2164 {
2163 struct mci_platform_data *pdata; 2165 struct mci_platform_data *pdata;
2164 2166
2165 if (host == NULL) 2167 if (host == NULL)
2166 return false; 2168 return false;
2167 2169
2168 pdata = host->pdev->dev.platform_data; 2170 pdata = host->pdev->dev.platform_data;
2169 2171
2170 if (pdata && find_slave_dev(pdata->dma_slave)) { 2172 if (pdata && find_slave_dev(pdata->dma_slave)) {
2171 dma_cap_mask_t mask; 2173 dma_cap_mask_t mask;
2172 2174
2173 /* Try to grab a DMA channel */ 2175 /* Try to grab a DMA channel */
2174 dma_cap_zero(mask); 2176 dma_cap_zero(mask);
2175 dma_cap_set(DMA_SLAVE, mask); 2177 dma_cap_set(DMA_SLAVE, mask);
2176 host->dma.chan = 2178 host->dma.chan =
2177 dma_request_channel(mask, atmci_filter, pdata->dma_slave); 2179 dma_request_channel(mask, atmci_filter, pdata->dma_slave);
2178 } 2180 }
2179 if (!host->dma.chan) { 2181 if (!host->dma.chan) {
2180 dev_warn(&host->pdev->dev, "no DMA channel available\n"); 2182 dev_warn(&host->pdev->dev, "no DMA channel available\n");
2181 return false; 2183 return false;
2182 } else { 2184 } else {
2183 dev_info(&host->pdev->dev, 2185 dev_info(&host->pdev->dev,
2184 "using %s for DMA transfers\n", 2186 "using %s for DMA transfers\n",
2185 dma_chan_name(host->dma.chan)); 2187 dma_chan_name(host->dma.chan));
2186 2188
2187 host->dma_conf.src_addr = host->mapbase + ATMCI_RDR; 2189 host->dma_conf.src_addr = host->mapbase + ATMCI_RDR;
2188 host->dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 2190 host->dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2189 host->dma_conf.src_maxburst = 1; 2191 host->dma_conf.src_maxburst = 1;
2190 host->dma_conf.dst_addr = host->mapbase + ATMCI_TDR; 2192 host->dma_conf.dst_addr = host->mapbase + ATMCI_TDR;
2191 host->dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 2193 host->dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2192 host->dma_conf.dst_maxburst = 1; 2194 host->dma_conf.dst_maxburst = 1;
2193 host->dma_conf.device_fc = false; 2195 host->dma_conf.device_fc = false;
2194 return true; 2196 return true;
2195 } 2197 }
2196 } 2198 }
2197 2199
2198 /* 2200 /*
2199 * HSMCI (High Speed MCI) module is not fully compatible with MCI module. 2201 * HSMCI (High Speed MCI) module is not fully compatible with MCI module.
2200 * HSMCI provides DMA support and a new config register but no more supports 2202 * HSMCI provides DMA support and a new config register but no more supports
2201 * PDC. 2203 * PDC.
2202 */ 2204 */
2203 static void __init atmci_get_cap(struct atmel_mci *host) 2205 static void __init atmci_get_cap(struct atmel_mci *host)
2204 { 2206 {
2205 unsigned int version; 2207 unsigned int version;
2206 2208
2207 version = atmci_get_version(host); 2209 version = atmci_get_version(host);
2208 dev_info(&host->pdev->dev, 2210 dev_info(&host->pdev->dev,
2209 "version: 0x%x\n", version); 2211 "version: 0x%x\n", version);
2210 2212
2211 host->caps.has_dma = 0; 2213 host->caps.has_dma = 0;
2212 host->caps.has_pdc = 1; 2214 host->caps.has_pdc = 1;
2213 host->caps.has_cfg_reg = 0; 2215 host->caps.has_cfg_reg = 0;
2214 host->caps.has_cstor_reg = 0; 2216 host->caps.has_cstor_reg = 0;
2215 host->caps.has_highspeed = 0; 2217 host->caps.has_highspeed = 0;
2216 host->caps.has_rwproof = 0; 2218 host->caps.has_rwproof = 0;
2217 host->caps.has_odd_clk_div = 0; 2219 host->caps.has_odd_clk_div = 0;
2218 host->caps.has_bad_data_ordering = 1; 2220 host->caps.has_bad_data_ordering = 1;
2219 host->caps.need_reset_after_xfer = 1; 2221 host->caps.need_reset_after_xfer = 1;
2220 host->caps.need_blksz_mul_4 = 1; 2222 host->caps.need_blksz_mul_4 = 1;
2223 host->caps.need_notbusy_for_read_ops = 0;
2221 2224
2222 /* keep only major version number */ 2225 /* keep only major version number */
2223 switch (version & 0xf00) { 2226 switch (version & 0xf00) {
2224 case 0x500: 2227 case 0x500:
2225 host->caps.has_odd_clk_div = 1; 2228 host->caps.has_odd_clk_div = 1;
2226 case 0x400: 2229 case 0x400:
2227 case 0x300: 2230 case 0x300:
2228 #ifdef CONFIG_AT_HDMAC 2231 #ifdef CONFIG_AT_HDMAC
2229 host->caps.has_dma = 1; 2232 host->caps.has_dma = 1;
2230 #else 2233 #else
2231 dev_info(&host->pdev->dev, 2234 dev_info(&host->pdev->dev,
2232 "has dma capability but dma engine is not selected, then use pio\n"); 2235 "has dma capability but dma engine is not selected, then use pio\n");
2233 #endif 2236 #endif
2234 host->caps.has_pdc = 0; 2237 host->caps.has_pdc = 0;
2235 host->caps.has_cfg_reg = 1; 2238 host->caps.has_cfg_reg = 1;
2236 host->caps.has_cstor_reg = 1; 2239 host->caps.has_cstor_reg = 1;
2237 host->caps.has_highspeed = 1; 2240 host->caps.has_highspeed = 1;
2238 case 0x200: 2241 case 0x200:
2239 host->caps.has_rwproof = 1; 2242 host->caps.has_rwproof = 1;
2240 host->caps.need_blksz_mul_4 = 0; 2243 host->caps.need_blksz_mul_4 = 0;
2244 host->caps.need_notbusy_for_read_ops = 1;
2241 case 0x100: 2245 case 0x100:
2242 host->caps.has_bad_data_ordering = 0; 2246 host->caps.has_bad_data_ordering = 0;
2243 host->caps.need_reset_after_xfer = 0; 2247 host->caps.need_reset_after_xfer = 0;
2244 case 0x0: 2248 case 0x0:
2245 break; 2249 break;
2246 default: 2250 default:
2247 host->caps.has_pdc = 0; 2251 host->caps.has_pdc = 0;
2248 dev_warn(&host->pdev->dev, 2252 dev_warn(&host->pdev->dev,
2249 "Unmanaged mci version, set minimum capabilities\n"); 2253 "Unmanaged mci version, set minimum capabilities\n");
2250 break; 2254 break;
2251 } 2255 }
2252 } 2256 }
2253 2257
2254 static int __init atmci_probe(struct platform_device *pdev) 2258 static int __init atmci_probe(struct platform_device *pdev)
2255 { 2259 {
2256 struct mci_platform_data *pdata; 2260 struct mci_platform_data *pdata;
2257 struct atmel_mci *host; 2261 struct atmel_mci *host;
2258 struct resource *regs; 2262 struct resource *regs;
2259 unsigned int nr_slots; 2263 unsigned int nr_slots;
2260 int irq; 2264 int irq;
2261 int ret; 2265 int ret;
2262 2266
2263 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2267 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2264 if (!regs) 2268 if (!regs)
2265 return -ENXIO; 2269 return -ENXIO;
2266 pdata = pdev->dev.platform_data; 2270 pdata = pdev->dev.platform_data;
2267 if (!pdata) 2271 if (!pdata)
2268 return -ENXIO; 2272 return -ENXIO;
2269 irq = platform_get_irq(pdev, 0); 2273 irq = platform_get_irq(pdev, 0);
2270 if (irq < 0) 2274 if (irq < 0)
2271 return irq; 2275 return irq;
2272 2276
2273 host = kzalloc(sizeof(struct atmel_mci), GFP_KERNEL); 2277 host = kzalloc(sizeof(struct atmel_mci), GFP_KERNEL);
2274 if (!host) 2278 if (!host)
2275 return -ENOMEM; 2279 return -ENOMEM;
2276 2280
2277 host->pdev = pdev; 2281 host->pdev = pdev;
2278 spin_lock_init(&host->lock); 2282 spin_lock_init(&host->lock);
2279 INIT_LIST_HEAD(&host->queue); 2283 INIT_LIST_HEAD(&host->queue);
2280 2284
2281 host->mck = clk_get(&pdev->dev, "mci_clk"); 2285 host->mck = clk_get(&pdev->dev, "mci_clk");
2282 if (IS_ERR(host->mck)) { 2286 if (IS_ERR(host->mck)) {
2283 ret = PTR_ERR(host->mck); 2287 ret = PTR_ERR(host->mck);
2284 goto err_clk_get; 2288 goto err_clk_get;
2285 } 2289 }
2286 2290
2287 ret = -ENOMEM; 2291 ret = -ENOMEM;
2288 host->regs = ioremap(regs->start, resource_size(regs)); 2292 host->regs = ioremap(regs->start, resource_size(regs));
2289 if (!host->regs) 2293 if (!host->regs)
2290 goto err_ioremap; 2294 goto err_ioremap;
2291 2295
2292 clk_enable(host->mck); 2296 clk_enable(host->mck);
2293 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST); 2297 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
2294 host->bus_hz = clk_get_rate(host->mck); 2298 host->bus_hz = clk_get_rate(host->mck);
2295 clk_disable(host->mck); 2299 clk_disable(host->mck);
2296 2300
2297 host->mapbase = regs->start; 2301 host->mapbase = regs->start;
2298 2302
2299 tasklet_init(&host->tasklet, atmci_tasklet_func, (unsigned long)host); 2303 tasklet_init(&host->tasklet, atmci_tasklet_func, (unsigned long)host);
2300 2304
2301 ret = request_irq(irq, atmci_interrupt, 0, dev_name(&pdev->dev), host); 2305 ret = request_irq(irq, atmci_interrupt, 0, dev_name(&pdev->dev), host);
2302 if (ret) 2306 if (ret)
2303 goto err_request_irq; 2307 goto err_request_irq;
2304 2308
2305 /* Get MCI capabilities and set operations according to it */ 2309 /* Get MCI capabilities and set operations according to it */
2306 atmci_get_cap(host); 2310 atmci_get_cap(host);
2307 if (host->caps.has_dma && atmci_configure_dma(host)) { 2311 if (host->caps.has_dma && atmci_configure_dma(host)) {
2308 host->prepare_data = &atmci_prepare_data_dma; 2312 host->prepare_data = &atmci_prepare_data_dma;
2309 host->submit_data = &atmci_submit_data_dma; 2313 host->submit_data = &atmci_submit_data_dma;
2310 host->stop_transfer = &atmci_stop_transfer_dma; 2314 host->stop_transfer = &atmci_stop_transfer_dma;
2311 } else if (host->caps.has_pdc) { 2315 } else if (host->caps.has_pdc) {
2312 dev_info(&pdev->dev, "using PDC\n"); 2316 dev_info(&pdev->dev, "using PDC\n");
2313 host->prepare_data = &atmci_prepare_data_pdc; 2317 host->prepare_data = &atmci_prepare_data_pdc;
2314 host->submit_data = &atmci_submit_data_pdc; 2318 host->submit_data = &atmci_submit_data_pdc;
2315 host->stop_transfer = &atmci_stop_transfer_pdc; 2319 host->stop_transfer = &atmci_stop_transfer_pdc;
2316 } else { 2320 } else {
2317 dev_info(&pdev->dev, "using PIO\n"); 2321 dev_info(&pdev->dev, "using PIO\n");
2318 host->prepare_data = &atmci_prepare_data; 2322 host->prepare_data = &atmci_prepare_data;
2319 host->submit_data = &atmci_submit_data; 2323 host->submit_data = &atmci_submit_data;
2320 host->stop_transfer = &atmci_stop_transfer; 2324 host->stop_transfer = &atmci_stop_transfer;
2321 } 2325 }
2322 2326
2323 platform_set_drvdata(pdev, host); 2327 platform_set_drvdata(pdev, host);
2324 2328
2325 setup_timer(&host->timer, atmci_timeout_timer, (unsigned long)host); 2329 setup_timer(&host->timer, atmci_timeout_timer, (unsigned long)host);
2326 2330
2327 /* We need at least one slot to succeed */ 2331 /* We need at least one slot to succeed */
2328 nr_slots = 0; 2332 nr_slots = 0;
2329 ret = -ENODEV; 2333 ret = -ENODEV;
2330 if (pdata->slot[0].bus_width) { 2334 if (pdata->slot[0].bus_width) {
2331 ret = atmci_init_slot(host, &pdata->slot[0], 2335 ret = atmci_init_slot(host, &pdata->slot[0],
2332 0, ATMCI_SDCSEL_SLOT_A, ATMCI_SDIOIRQA); 2336 0, ATMCI_SDCSEL_SLOT_A, ATMCI_SDIOIRQA);
2333 if (!ret) { 2337 if (!ret) {
2334 nr_slots++; 2338 nr_slots++;
2335 host->buf_size = host->slot[0]->mmc->max_req_size; 2339 host->buf_size = host->slot[0]->mmc->max_req_size;
2336 } 2340 }
2337 } 2341 }
2338 if (pdata->slot[1].bus_width) { 2342 if (pdata->slot[1].bus_width) {
2339 ret = atmci_init_slot(host, &pdata->slot[1], 2343 ret = atmci_init_slot(host, &pdata->slot[1],
2340 1, ATMCI_SDCSEL_SLOT_B, ATMCI_SDIOIRQB); 2344 1, ATMCI_SDCSEL_SLOT_B, ATMCI_SDIOIRQB);
2341 if (!ret) { 2345 if (!ret) {
2342 nr_slots++; 2346 nr_slots++;
2343 if (host->slot[1]->mmc->max_req_size > host->buf_size) 2347 if (host->slot[1]->mmc->max_req_size > host->buf_size)
2344 host->buf_size = 2348 host->buf_size =
2345 host->slot[1]->mmc->max_req_size; 2349 host->slot[1]->mmc->max_req_size;
2346 } 2350 }
2347 } 2351 }
2348 2352
2349 if (!nr_slots) { 2353 if (!nr_slots) {
2350 dev_err(&pdev->dev, "init failed: no slot defined\n"); 2354 dev_err(&pdev->dev, "init failed: no slot defined\n");
2351 goto err_init_slot; 2355 goto err_init_slot;
2352 } 2356 }
2353 2357
2354 if (!host->caps.has_rwproof) { 2358 if (!host->caps.has_rwproof) {
2355 host->buffer = dma_alloc_coherent(&pdev->dev, host->buf_size, 2359 host->buffer = dma_alloc_coherent(&pdev->dev, host->buf_size,
2356 &host->buf_phys_addr, 2360 &host->buf_phys_addr,
2357 GFP_KERNEL); 2361 GFP_KERNEL);
2358 if (!host->buffer) { 2362 if (!host->buffer) {
2359 ret = -ENOMEM; 2363 ret = -ENOMEM;
2360 dev_err(&pdev->dev, "buffer allocation failed\n"); 2364 dev_err(&pdev->dev, "buffer allocation failed\n");
2361 goto err_init_slot; 2365 goto err_init_slot;
2362 } 2366 }
2363 } 2367 }
2364 2368
2365 dev_info(&pdev->dev, 2369 dev_info(&pdev->dev,
2366 "Atmel MCI controller at 0x%08lx irq %d, %u slots\n", 2370 "Atmel MCI controller at 0x%08lx irq %d, %u slots\n",
2367 host->mapbase, irq, nr_slots); 2371 host->mapbase, irq, nr_slots);
2368 2372
2369 return 0; 2373 return 0;
2370 2374
2371 err_init_slot: 2375 err_init_slot:
2372 if (host->dma.chan) 2376 if (host->dma.chan)
2373 dma_release_channel(host->dma.chan); 2377 dma_release_channel(host->dma.chan);
2374 free_irq(irq, host); 2378 free_irq(irq, host);
2375 err_request_irq: 2379 err_request_irq:
2376 iounmap(host->regs); 2380 iounmap(host->regs);
2377 err_ioremap: 2381 err_ioremap:
2378 clk_put(host->mck); 2382 clk_put(host->mck);
2379 err_clk_get: 2383 err_clk_get:
2380 kfree(host); 2384 kfree(host);
2381 return ret; 2385 return ret;
2382 } 2386 }
2383 2387
2384 static int __exit atmci_remove(struct platform_device *pdev) 2388 static int __exit atmci_remove(struct platform_device *pdev)
2385 { 2389 {
2386 struct atmel_mci *host = platform_get_drvdata(pdev); 2390 struct atmel_mci *host = platform_get_drvdata(pdev);
2387 unsigned int i; 2391 unsigned int i;
2388 2392
2389 platform_set_drvdata(pdev, NULL); 2393 platform_set_drvdata(pdev, NULL);
2390 2394
2391 if (host->buffer) 2395 if (host->buffer)
2392 dma_free_coherent(&pdev->dev, host->buf_size, 2396 dma_free_coherent(&pdev->dev, host->buf_size,
2393 host->buffer, host->buf_phys_addr); 2397 host->buffer, host->buf_phys_addr);
2394 2398
2395 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) { 2399 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
2396 if (host->slot[i]) 2400 if (host->slot[i])
2397 atmci_cleanup_slot(host->slot[i], i); 2401 atmci_cleanup_slot(host->slot[i], i);
2398 } 2402 }
2399 2403
2400 clk_enable(host->mck); 2404 clk_enable(host->mck);
2401 atmci_writel(host, ATMCI_IDR, ~0UL); 2405 atmci_writel(host, ATMCI_IDR, ~0UL);
2402 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS); 2406 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS);
2403 atmci_readl(host, ATMCI_SR); 2407 atmci_readl(host, ATMCI_SR);
2404 clk_disable(host->mck); 2408 clk_disable(host->mck);
2405 2409
2406 #ifdef CONFIG_MMC_ATMELMCI_DMA 2410 #ifdef CONFIG_MMC_ATMELMCI_DMA
2407 if (host->dma.chan) 2411 if (host->dma.chan)
2408 dma_release_channel(host->dma.chan); 2412 dma_release_channel(host->dma.chan);
2409 #endif 2413 #endif
2410 2414
2411 free_irq(platform_get_irq(pdev, 0), host); 2415 free_irq(platform_get_irq(pdev, 0), host);
2412 iounmap(host->regs); 2416 iounmap(host->regs);
2413 2417
2414 clk_put(host->mck); 2418 clk_put(host->mck);
2415 kfree(host); 2419 kfree(host);
2416 2420
2417 return 0; 2421 return 0;
2418 } 2422 }
2419 2423
2420 #ifdef CONFIG_PM 2424 #ifdef CONFIG_PM
2421 static int atmci_suspend(struct device *dev) 2425 static int atmci_suspend(struct device *dev)
2422 { 2426 {
2423 struct atmel_mci *host = dev_get_drvdata(dev); 2427 struct atmel_mci *host = dev_get_drvdata(dev);
2424 int i; 2428 int i;
2425 2429
2426 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) { 2430 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
2427 struct atmel_mci_slot *slot = host->slot[i]; 2431 struct atmel_mci_slot *slot = host->slot[i];
2428 int ret; 2432 int ret;
2429 2433
2430 if (!slot) 2434 if (!slot)
2431 continue; 2435 continue;
2432 ret = mmc_suspend_host(slot->mmc); 2436 ret = mmc_suspend_host(slot->mmc);
2433 if (ret < 0) { 2437 if (ret < 0) {
2434 while (--i >= 0) { 2438 while (--i >= 0) {
2435 slot = host->slot[i]; 2439 slot = host->slot[i];
2436 if (slot 2440 if (slot
2437 && test_bit(ATMCI_SUSPENDED, &slot->flags)) { 2441 && test_bit(ATMCI_SUSPENDED, &slot->flags)) {
2438 mmc_resume_host(host->slot[i]->mmc); 2442 mmc_resume_host(host->slot[i]->mmc);
2439 clear_bit(ATMCI_SUSPENDED, &slot->flags); 2443 clear_bit(ATMCI_SUSPENDED, &slot->flags);
2440 } 2444 }
2441 } 2445 }
2442 return ret; 2446 return ret;
2443 } else { 2447 } else {
2444 set_bit(ATMCI_SUSPENDED, &slot->flags); 2448 set_bit(ATMCI_SUSPENDED, &slot->flags);
2445 } 2449 }
2446 } 2450 }
2447 2451
2448 return 0; 2452 return 0;
2449 } 2453 }
2450 2454
2451 static int atmci_resume(struct device *dev) 2455 static int atmci_resume(struct device *dev)
2452 { 2456 {
2453 struct atmel_mci *host = dev_get_drvdata(dev); 2457 struct atmel_mci *host = dev_get_drvdata(dev);
2454 int i; 2458 int i;
2455 int ret = 0; 2459 int ret = 0;
2456 2460
2457 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) { 2461 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
2458 struct atmel_mci_slot *slot = host->slot[i]; 2462 struct atmel_mci_slot *slot = host->slot[i];
2459 int err; 2463 int err;
2460 2464
2461 slot = host->slot[i]; 2465 slot = host->slot[i];
2462 if (!slot) 2466 if (!slot)
2463 continue; 2467 continue;
2464 if (!test_bit(ATMCI_SUSPENDED, &slot->flags)) 2468 if (!test_bit(ATMCI_SUSPENDED, &slot->flags))
2465 continue; 2469 continue;
2466 err = mmc_resume_host(slot->mmc); 2470 err = mmc_resume_host(slot->mmc);
2467 if (err < 0) 2471 if (err < 0)
2468 ret = err; 2472 ret = err;
2469 else 2473 else
2470 clear_bit(ATMCI_SUSPENDED, &slot->flags); 2474 clear_bit(ATMCI_SUSPENDED, &slot->flags);
2471 } 2475 }
2472 2476
2473 return ret; 2477 return ret;
2474 } 2478 }
2475 static SIMPLE_DEV_PM_OPS(atmci_pm, atmci_suspend, atmci_resume); 2479 static SIMPLE_DEV_PM_OPS(atmci_pm, atmci_suspend, atmci_resume);
2476 #define ATMCI_PM_OPS (&atmci_pm) 2480 #define ATMCI_PM_OPS (&atmci_pm)
2477 #else 2481 #else
2478 #define ATMCI_PM_OPS NULL 2482 #define ATMCI_PM_OPS NULL
2479 #endif 2483 #endif
2480 2484
2481 static struct platform_driver atmci_driver = { 2485 static struct platform_driver atmci_driver = {
2482 .remove = __exit_p(atmci_remove), 2486 .remove = __exit_p(atmci_remove),
2483 .driver = { 2487 .driver = {
2484 .name = "atmel_mci", 2488 .name = "atmel_mci",
2485 .pm = ATMCI_PM_OPS, 2489 .pm = ATMCI_PM_OPS,
2486 }, 2490 },
2487 }; 2491 };
2488 2492
2489 static int __init atmci_init(void) 2493 static int __init atmci_init(void)
2490 { 2494 {
2491 return platform_driver_probe(&atmci_driver, atmci_probe); 2495 return platform_driver_probe(&atmci_driver, atmci_probe);
2492 } 2496 }
2493 2497
2494 static void __exit atmci_exit(void) 2498 static void __exit atmci_exit(void)
2495 { 2499 {
2496 platform_driver_unregister(&atmci_driver); 2500 platform_driver_unregister(&atmci_driver);
2497 } 2501 }
2498 2502
2499 late_initcall(atmci_init); /* try to load after dma driver when built-in */ 2503 late_initcall(atmci_init); /* try to load after dma driver when built-in */
2500 module_exit(atmci_exit); 2504 module_exit(atmci_exit);
2501 2505
2502 MODULE_DESCRIPTION("Atmel Multimedia Card Interface driver"); 2506 MODULE_DESCRIPTION("Atmel Multimedia Card Interface driver");
2503 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); 2507 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
2504 MODULE_LICENSE("GPL v2"); 2508 MODULE_LICENSE("GPL v2");
2505 2509
drivers/mmc/host/bfin_sdh.c
1 /* 1 /*
2 * bfin_sdh.c - Analog Devices Blackfin SDH Controller 2 * bfin_sdh.c - Analog Devices Blackfin SDH Controller
3 * 3 *
4 * Copyright (C) 2007-2009 Analog Device Inc. 4 * Copyright (C) 2007-2009 Analog Device Inc.
5 * 5 *
6 * Licensed under the GPL-2 or later. 6 * Licensed under the GPL-2 or later.
7 */ 7 */
8 8
9 #define DRIVER_NAME "bfin-sdh" 9 #define DRIVER_NAME "bfin-sdh"
10 10
11 #include <linux/module.h> 11 #include <linux/module.h>
12 #include <linux/init.h> 12 #include <linux/init.h>
13 #include <linux/ioport.h> 13 #include <linux/ioport.h>
14 #include <linux/platform_device.h> 14 #include <linux/platform_device.h>
15 #include <linux/delay.h> 15 #include <linux/delay.h>
16 #include <linux/interrupt.h> 16 #include <linux/interrupt.h>
17 #include <linux/dma-mapping.h> 17 #include <linux/dma-mapping.h>
18 #include <linux/mmc/host.h> 18 #include <linux/mmc/host.h>
19 #include <linux/proc_fs.h> 19 #include <linux/proc_fs.h>
20 #include <linux/gfp.h> 20 #include <linux/gfp.h>
21 21
22 #include <asm/cacheflush.h> 22 #include <asm/cacheflush.h>
23 #include <asm/dma.h> 23 #include <asm/dma.h>
24 #include <asm/portmux.h> 24 #include <asm/portmux.h>
25 #include <asm/bfin_sdh.h> 25 #include <asm/bfin_sdh.h>
26 26
27 #if defined(CONFIG_BF51x) 27 #if defined(CONFIG_BF51x)
28 #define bfin_read_SDH_PWR_CTL bfin_read_RSI_PWR_CTL 28 #define bfin_read_SDH_PWR_CTL bfin_read_RSI_PWR_CTL
29 #define bfin_write_SDH_PWR_CTL bfin_write_RSI_PWR_CTL 29 #define bfin_write_SDH_PWR_CTL bfin_write_RSI_PWR_CTL
30 #define bfin_read_SDH_CLK_CTL bfin_read_RSI_CLK_CTL 30 #define bfin_read_SDH_CLK_CTL bfin_read_RSI_CLK_CTL
31 #define bfin_write_SDH_CLK_CTL bfin_write_RSI_CLK_CTL 31 #define bfin_write_SDH_CLK_CTL bfin_write_RSI_CLK_CTL
32 #define bfin_write_SDH_ARGUMENT bfin_write_RSI_ARGUMENT 32 #define bfin_write_SDH_ARGUMENT bfin_write_RSI_ARGUMENT
33 #define bfin_write_SDH_COMMAND bfin_write_RSI_COMMAND 33 #define bfin_write_SDH_COMMAND bfin_write_RSI_COMMAND
34 #define bfin_write_SDH_DATA_TIMER bfin_write_RSI_DATA_TIMER 34 #define bfin_write_SDH_DATA_TIMER bfin_write_RSI_DATA_TIMER
35 #define bfin_read_SDH_RESPONSE0 bfin_read_RSI_RESPONSE0 35 #define bfin_read_SDH_RESPONSE0 bfin_read_RSI_RESPONSE0
36 #define bfin_read_SDH_RESPONSE1 bfin_read_RSI_RESPONSE1 36 #define bfin_read_SDH_RESPONSE1 bfin_read_RSI_RESPONSE1
37 #define bfin_read_SDH_RESPONSE2 bfin_read_RSI_RESPONSE2 37 #define bfin_read_SDH_RESPONSE2 bfin_read_RSI_RESPONSE2
38 #define bfin_read_SDH_RESPONSE3 bfin_read_RSI_RESPONSE3 38 #define bfin_read_SDH_RESPONSE3 bfin_read_RSI_RESPONSE3
39 #define bfin_write_SDH_DATA_LGTH bfin_write_RSI_DATA_LGTH 39 #define bfin_write_SDH_DATA_LGTH bfin_write_RSI_DATA_LGTH
40 #define bfin_read_SDH_DATA_CTL bfin_read_RSI_DATA_CTL 40 #define bfin_read_SDH_DATA_CTL bfin_read_RSI_DATA_CTL
41 #define bfin_write_SDH_DATA_CTL bfin_write_RSI_DATA_CTL 41 #define bfin_write_SDH_DATA_CTL bfin_write_RSI_DATA_CTL
42 #define bfin_read_SDH_DATA_CNT bfin_read_RSI_DATA_CNT 42 #define bfin_read_SDH_DATA_CNT bfin_read_RSI_DATA_CNT
43 #define bfin_write_SDH_STATUS_CLR bfin_write_RSI_STATUS_CLR 43 #define bfin_write_SDH_STATUS_CLR bfin_write_RSI_STATUS_CLR
44 #define bfin_read_SDH_E_STATUS bfin_read_RSI_E_STATUS 44 #define bfin_read_SDH_E_STATUS bfin_read_RSI_E_STATUS
45 #define bfin_write_SDH_E_STATUS bfin_write_RSI_E_STATUS 45 #define bfin_write_SDH_E_STATUS bfin_write_RSI_E_STATUS
46 #define bfin_read_SDH_STATUS bfin_read_RSI_STATUS 46 #define bfin_read_SDH_STATUS bfin_read_RSI_STATUS
47 #define bfin_write_SDH_MASK0 bfin_write_RSI_MASK0 47 #define bfin_write_SDH_MASK0 bfin_write_RSI_MASK0
48 #define bfin_read_SDH_CFG bfin_read_RSI_CFG 48 #define bfin_read_SDH_CFG bfin_read_RSI_CFG
49 #define bfin_write_SDH_CFG bfin_write_RSI_CFG 49 #define bfin_write_SDH_CFG bfin_write_RSI_CFG
50 #endif 50 #endif
51 51
52 struct dma_desc_array {
53 unsigned long start_addr;
54 unsigned short cfg;
55 unsigned short x_count;
56 short x_modify;
57 } __packed;
58
59 struct sdh_host { 52 struct sdh_host {
60 struct mmc_host *mmc; 53 struct mmc_host *mmc;
61 spinlock_t lock; 54 spinlock_t lock;
62 struct resource *res; 55 struct resource *res;
63 void __iomem *base; 56 void __iomem *base;
64 int irq; 57 int irq;
65 int stat_irq; 58 int stat_irq;
66 int dma_ch; 59 int dma_ch;
67 int dma_dir; 60 int dma_dir;
68 struct dma_desc_array *sg_cpu; 61 struct dma_desc_array *sg_cpu;
69 dma_addr_t sg_dma; 62 dma_addr_t sg_dma;
70 int dma_len; 63 int dma_len;
71 64
72 unsigned int imask; 65 unsigned int imask;
73 unsigned int power_mode; 66 unsigned int power_mode;
74 unsigned int clk_div; 67 unsigned int clk_div;
75 68
76 struct mmc_request *mrq; 69 struct mmc_request *mrq;
77 struct mmc_command *cmd; 70 struct mmc_command *cmd;
78 struct mmc_data *data; 71 struct mmc_data *data;
79 }; 72 };
80 73
81 static struct bfin_sd_host *get_sdh_data(struct platform_device *pdev) 74 static struct bfin_sd_host *get_sdh_data(struct platform_device *pdev)
82 { 75 {
83 return pdev->dev.platform_data; 76 return pdev->dev.platform_data;
84 } 77 }
85 78
86 static void sdh_stop_clock(struct sdh_host *host) 79 static void sdh_stop_clock(struct sdh_host *host)
87 { 80 {
88 bfin_write_SDH_CLK_CTL(bfin_read_SDH_CLK_CTL() & ~CLK_E); 81 bfin_write_SDH_CLK_CTL(bfin_read_SDH_CLK_CTL() & ~CLK_E);
89 SSYNC(); 82 SSYNC();
90 } 83 }
91 84
92 static void sdh_enable_stat_irq(struct sdh_host *host, unsigned int mask) 85 static void sdh_enable_stat_irq(struct sdh_host *host, unsigned int mask)
93 { 86 {
94 unsigned long flags; 87 unsigned long flags;
95 88
96 spin_lock_irqsave(&host->lock, flags); 89 spin_lock_irqsave(&host->lock, flags);
97 host->imask |= mask; 90 host->imask |= mask;
98 bfin_write_SDH_MASK0(mask); 91 bfin_write_SDH_MASK0(mask);
99 SSYNC(); 92 SSYNC();
100 spin_unlock_irqrestore(&host->lock, flags); 93 spin_unlock_irqrestore(&host->lock, flags);
101 } 94 }
102 95
103 static void sdh_disable_stat_irq(struct sdh_host *host, unsigned int mask) 96 static void sdh_disable_stat_irq(struct sdh_host *host, unsigned int mask)
104 { 97 {
105 unsigned long flags; 98 unsigned long flags;
106 99
107 spin_lock_irqsave(&host->lock, flags); 100 spin_lock_irqsave(&host->lock, flags);
108 host->imask &= ~mask; 101 host->imask &= ~mask;
109 bfin_write_SDH_MASK0(host->imask); 102 bfin_write_SDH_MASK0(host->imask);
110 SSYNC(); 103 SSYNC();
111 spin_unlock_irqrestore(&host->lock, flags); 104 spin_unlock_irqrestore(&host->lock, flags);
112 } 105 }
113 106
114 static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data) 107 static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data)
115 { 108 {
116 unsigned int length; 109 unsigned int length;
117 unsigned int data_ctl; 110 unsigned int data_ctl;
118 unsigned int dma_cfg; 111 unsigned int dma_cfg;
119 unsigned int cycle_ns, timeout; 112 unsigned int cycle_ns, timeout;
120 113
121 dev_dbg(mmc_dev(host->mmc), "%s enter flags: 0x%x\n", __func__, data->flags); 114 dev_dbg(mmc_dev(host->mmc), "%s enter flags: 0x%x\n", __func__, data->flags);
122 host->data = data; 115 host->data = data;
123 data_ctl = 0; 116 data_ctl = 0;
124 dma_cfg = 0; 117 dma_cfg = 0;
125 118
126 length = data->blksz * data->blocks; 119 length = data->blksz * data->blocks;
127 bfin_write_SDH_DATA_LGTH(length); 120 bfin_write_SDH_DATA_LGTH(length);
128 121
129 if (data->flags & MMC_DATA_STREAM) 122 if (data->flags & MMC_DATA_STREAM)
130 data_ctl |= DTX_MODE; 123 data_ctl |= DTX_MODE;
131 124
132 if (data->flags & MMC_DATA_READ) 125 if (data->flags & MMC_DATA_READ)
133 data_ctl |= DTX_DIR; 126 data_ctl |= DTX_DIR;
134 /* Only supports power-of-2 block size */ 127 /* Only supports power-of-2 block size */
135 if (data->blksz & (data->blksz - 1)) 128 if (data->blksz & (data->blksz - 1))
136 return -EINVAL; 129 return -EINVAL;
137 data_ctl |= ((ffs(data->blksz) - 1) << 4); 130 data_ctl |= ((ffs(data->blksz) - 1) << 4);
138 131
139 bfin_write_SDH_DATA_CTL(data_ctl); 132 bfin_write_SDH_DATA_CTL(data_ctl);
140 /* the time of a host clock period in ns */ 133 /* the time of a host clock period in ns */
141 cycle_ns = 1000000000 / (get_sclk() / (2 * (host->clk_div + 1))); 134 cycle_ns = 1000000000 / (get_sclk() / (2 * (host->clk_div + 1)));
142 timeout = data->timeout_ns / cycle_ns; 135 timeout = data->timeout_ns / cycle_ns;
143 timeout += data->timeout_clks; 136 timeout += data->timeout_clks;
144 bfin_write_SDH_DATA_TIMER(timeout); 137 bfin_write_SDH_DATA_TIMER(timeout);
145 SSYNC(); 138 SSYNC();
146 139
147 if (data->flags & MMC_DATA_READ) { 140 if (data->flags & MMC_DATA_READ) {
148 host->dma_dir = DMA_FROM_DEVICE; 141 host->dma_dir = DMA_FROM_DEVICE;
149 dma_cfg |= WNR; 142 dma_cfg |= WNR;
150 } else 143 } else
151 host->dma_dir = DMA_TO_DEVICE; 144 host->dma_dir = DMA_TO_DEVICE;
152 145
153 sdh_enable_stat_irq(host, (DAT_CRC_FAIL | DAT_TIME_OUT | DAT_END)); 146 sdh_enable_stat_irq(host, (DAT_CRC_FAIL | DAT_TIME_OUT | DAT_END));
154 host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma_dir); 147 host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma_dir);
155 #if defined(CONFIG_BF54x) 148 #if defined(CONFIG_BF54x)
156 dma_cfg |= DMAFLOW_ARRAY | NDSIZE_5 | RESTART | WDSIZE_32 | DMAEN; 149 dma_cfg |= DMAFLOW_ARRAY | NDSIZE_5 | RESTART | WDSIZE_32 | DMAEN;
157 { 150 {
158 struct scatterlist *sg; 151 struct scatterlist *sg;
159 int i; 152 int i;
160 for_each_sg(data->sg, sg, host->dma_len, i) { 153 for_each_sg(data->sg, sg, host->dma_len, i) {
161 host->sg_cpu[i].start_addr = sg_dma_address(sg); 154 host->sg_cpu[i].start_addr = sg_dma_address(sg);
162 host->sg_cpu[i].cfg = dma_cfg; 155 host->sg_cpu[i].cfg = dma_cfg;
163 host->sg_cpu[i].x_count = sg_dma_len(sg) / 4; 156 host->sg_cpu[i].x_count = sg_dma_len(sg) / 4;
164 host->sg_cpu[i].x_modify = 4; 157 host->sg_cpu[i].x_modify = 4;
165 dev_dbg(mmc_dev(host->mmc), "%d: start_addr:0x%lx, " 158 dev_dbg(mmc_dev(host->mmc), "%d: start_addr:0x%lx, "
166 "cfg:0x%x, x_count:0x%x, x_modify:0x%x\n", 159 "cfg:0x%x, x_count:0x%x, x_modify:0x%x\n",
167 i, host->sg_cpu[i].start_addr, 160 i, host->sg_cpu[i].start_addr,
168 host->sg_cpu[i].cfg, host->sg_cpu[i].x_count, 161 host->sg_cpu[i].cfg, host->sg_cpu[i].x_count,
169 host->sg_cpu[i].x_modify); 162 host->sg_cpu[i].x_modify);
170 } 163 }
171 } 164 }
172 flush_dcache_range((unsigned int)host->sg_cpu, 165 flush_dcache_range((unsigned int)host->sg_cpu,
173 (unsigned int)host->sg_cpu + 166 (unsigned int)host->sg_cpu +
174 host->dma_len * sizeof(struct dma_desc_array)); 167 host->dma_len * sizeof(struct dma_desc_array));
175 /* Set the last descriptor to stop mode */ 168 /* Set the last descriptor to stop mode */
176 host->sg_cpu[host->dma_len - 1].cfg &= ~(DMAFLOW | NDSIZE); 169 host->sg_cpu[host->dma_len - 1].cfg &= ~(DMAFLOW | NDSIZE);
177 host->sg_cpu[host->dma_len - 1].cfg |= DI_EN; 170 host->sg_cpu[host->dma_len - 1].cfg |= DI_EN;
178 171
179 set_dma_curr_desc_addr(host->dma_ch, (unsigned long *)host->sg_dma); 172 set_dma_curr_desc_addr(host->dma_ch, (unsigned long *)host->sg_dma);
180 set_dma_x_count(host->dma_ch, 0); 173 set_dma_x_count(host->dma_ch, 0);
181 set_dma_x_modify(host->dma_ch, 0); 174 set_dma_x_modify(host->dma_ch, 0);
182 set_dma_config(host->dma_ch, dma_cfg); 175 set_dma_config(host->dma_ch, dma_cfg);
183 #elif defined(CONFIG_BF51x) 176 #elif defined(CONFIG_BF51x)
184 /* RSI DMA doesn't work in array mode */ 177 /* RSI DMA doesn't work in array mode */
185 dma_cfg |= WDSIZE_32 | DMAEN; 178 dma_cfg |= WDSIZE_32 | DMAEN;
186 set_dma_start_addr(host->dma_ch, sg_dma_address(&data->sg[0])); 179 set_dma_start_addr(host->dma_ch, sg_dma_address(&data->sg[0]));
187 set_dma_x_count(host->dma_ch, length / 4); 180 set_dma_x_count(host->dma_ch, length / 4);
188 set_dma_x_modify(host->dma_ch, 4); 181 set_dma_x_modify(host->dma_ch, 4);
189 set_dma_config(host->dma_ch, dma_cfg); 182 set_dma_config(host->dma_ch, dma_cfg);
190 #endif 183 #endif
191 bfin_write_SDH_DATA_CTL(bfin_read_SDH_DATA_CTL() | DTX_DMA_E | DTX_E); 184 bfin_write_SDH_DATA_CTL(bfin_read_SDH_DATA_CTL() | DTX_DMA_E | DTX_E);
192 185
193 SSYNC(); 186 SSYNC();
194 187
195 dev_dbg(mmc_dev(host->mmc), "%s exit\n", __func__); 188 dev_dbg(mmc_dev(host->mmc), "%s exit\n", __func__);
196 return 0; 189 return 0;
197 } 190 }
198 191
199 static void sdh_start_cmd(struct sdh_host *host, struct mmc_command *cmd) 192 static void sdh_start_cmd(struct sdh_host *host, struct mmc_command *cmd)
200 { 193 {
201 unsigned int sdh_cmd; 194 unsigned int sdh_cmd;
202 unsigned int stat_mask; 195 unsigned int stat_mask;
203 196
204 dev_dbg(mmc_dev(host->mmc), "%s enter cmd: 0x%p\n", __func__, cmd); 197 dev_dbg(mmc_dev(host->mmc), "%s enter cmd: 0x%p\n", __func__, cmd);
205 WARN_ON(host->cmd != NULL); 198 WARN_ON(host->cmd != NULL);
206 host->cmd = cmd; 199 host->cmd = cmd;
207 200
208 sdh_cmd = 0; 201 sdh_cmd = 0;
209 stat_mask = 0; 202 stat_mask = 0;
210 203
211 sdh_cmd |= cmd->opcode; 204 sdh_cmd |= cmd->opcode;
212 205
213 if (cmd->flags & MMC_RSP_PRESENT) { 206 if (cmd->flags & MMC_RSP_PRESENT) {
214 sdh_cmd |= CMD_RSP; 207 sdh_cmd |= CMD_RSP;
215 stat_mask |= CMD_RESP_END; 208 stat_mask |= CMD_RESP_END;
216 } else { 209 } else {
217 stat_mask |= CMD_SENT; 210 stat_mask |= CMD_SENT;
218 } 211 }
219 212
220 if (cmd->flags & MMC_RSP_136) 213 if (cmd->flags & MMC_RSP_136)
221 sdh_cmd |= CMD_L_RSP; 214 sdh_cmd |= CMD_L_RSP;
222 215
223 stat_mask |= CMD_CRC_FAIL | CMD_TIME_OUT; 216 stat_mask |= CMD_CRC_FAIL | CMD_TIME_OUT;
224 217
225 sdh_enable_stat_irq(host, stat_mask); 218 sdh_enable_stat_irq(host, stat_mask);
226 219
227 bfin_write_SDH_ARGUMENT(cmd->arg); 220 bfin_write_SDH_ARGUMENT(cmd->arg);
228 bfin_write_SDH_COMMAND(sdh_cmd | CMD_E); 221 bfin_write_SDH_COMMAND(sdh_cmd | CMD_E);
229 bfin_write_SDH_CLK_CTL(bfin_read_SDH_CLK_CTL() | CLK_E); 222 bfin_write_SDH_CLK_CTL(bfin_read_SDH_CLK_CTL() | CLK_E);
230 SSYNC(); 223 SSYNC();
231 } 224 }
232 225
233 static void sdh_finish_request(struct sdh_host *host, struct mmc_request *mrq) 226 static void sdh_finish_request(struct sdh_host *host, struct mmc_request *mrq)
234 { 227 {
235 dev_dbg(mmc_dev(host->mmc), "%s enter\n", __func__); 228 dev_dbg(mmc_dev(host->mmc), "%s enter\n", __func__);
236 host->mrq = NULL; 229 host->mrq = NULL;
237 host->cmd = NULL; 230 host->cmd = NULL;
238 host->data = NULL; 231 host->data = NULL;
239 mmc_request_done(host->mmc, mrq); 232 mmc_request_done(host->mmc, mrq);
240 } 233 }
241 234
242 static int sdh_cmd_done(struct sdh_host *host, unsigned int stat) 235 static int sdh_cmd_done(struct sdh_host *host, unsigned int stat)
243 { 236 {
244 struct mmc_command *cmd = host->cmd; 237 struct mmc_command *cmd = host->cmd;
245 int ret = 0; 238 int ret = 0;
246 239
247 dev_dbg(mmc_dev(host->mmc), "%s enter cmd: %p\n", __func__, cmd); 240 dev_dbg(mmc_dev(host->mmc), "%s enter cmd: %p\n", __func__, cmd);
248 if (!cmd) 241 if (!cmd)
249 return 0; 242 return 0;
250 243
251 host->cmd = NULL; 244 host->cmd = NULL;
252 245
253 if (cmd->flags & MMC_RSP_PRESENT) { 246 if (cmd->flags & MMC_RSP_PRESENT) {
254 cmd->resp[0] = bfin_read_SDH_RESPONSE0(); 247 cmd->resp[0] = bfin_read_SDH_RESPONSE0();
255 if (cmd->flags & MMC_RSP_136) { 248 if (cmd->flags & MMC_RSP_136) {
256 cmd->resp[1] = bfin_read_SDH_RESPONSE1(); 249 cmd->resp[1] = bfin_read_SDH_RESPONSE1();
257 cmd->resp[2] = bfin_read_SDH_RESPONSE2(); 250 cmd->resp[2] = bfin_read_SDH_RESPONSE2();
258 cmd->resp[3] = bfin_read_SDH_RESPONSE3(); 251 cmd->resp[3] = bfin_read_SDH_RESPONSE3();
259 } 252 }
260 } 253 }
261 if (stat & CMD_TIME_OUT) 254 if (stat & CMD_TIME_OUT)
262 cmd->error = -ETIMEDOUT; 255 cmd->error = -ETIMEDOUT;
263 else if (stat & CMD_CRC_FAIL && cmd->flags & MMC_RSP_CRC) 256 else if (stat & CMD_CRC_FAIL && cmd->flags & MMC_RSP_CRC)
264 cmd->error = -EILSEQ; 257 cmd->error = -EILSEQ;
265 258
266 sdh_disable_stat_irq(host, (CMD_SENT | CMD_RESP_END | CMD_TIME_OUT | CMD_CRC_FAIL)); 259 sdh_disable_stat_irq(host, (CMD_SENT | CMD_RESP_END | CMD_TIME_OUT | CMD_CRC_FAIL));
267 260
268 if (host->data && !cmd->error) { 261 if (host->data && !cmd->error) {
269 if (host->data->flags & MMC_DATA_WRITE) { 262 if (host->data->flags & MMC_DATA_WRITE) {
270 ret = sdh_setup_data(host, host->data); 263 ret = sdh_setup_data(host, host->data);
271 if (ret) 264 if (ret)
272 return 0; 265 return 0;
273 } 266 }
274 267
275 sdh_enable_stat_irq(host, DAT_END | RX_OVERRUN | TX_UNDERRUN | DAT_TIME_OUT); 268 sdh_enable_stat_irq(host, DAT_END | RX_OVERRUN | TX_UNDERRUN | DAT_TIME_OUT);
276 } else 269 } else
277 sdh_finish_request(host, host->mrq); 270 sdh_finish_request(host, host->mrq);
278 271
279 return 1; 272 return 1;
280 } 273 }
281 274
282 static int sdh_data_done(struct sdh_host *host, unsigned int stat) 275 static int sdh_data_done(struct sdh_host *host, unsigned int stat)
283 { 276 {
284 struct mmc_data *data = host->data; 277 struct mmc_data *data = host->data;
285 278
286 dev_dbg(mmc_dev(host->mmc), "%s enter stat: 0x%x\n", __func__, stat); 279 dev_dbg(mmc_dev(host->mmc), "%s enter stat: 0x%x\n", __func__, stat);
287 if (!data) 280 if (!data)
288 return 0; 281 return 0;
289 282
290 disable_dma(host->dma_ch); 283 disable_dma(host->dma_ch);
291 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 284 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
292 host->dma_dir); 285 host->dma_dir);
293 286
294 if (stat & DAT_TIME_OUT) 287 if (stat & DAT_TIME_OUT)
295 data->error = -ETIMEDOUT; 288 data->error = -ETIMEDOUT;
296 else if (stat & DAT_CRC_FAIL) 289 else if (stat & DAT_CRC_FAIL)
297 data->error = -EILSEQ; 290 data->error = -EILSEQ;
298 else if (stat & (RX_OVERRUN | TX_UNDERRUN)) 291 else if (stat & (RX_OVERRUN | TX_UNDERRUN))
299 data->error = -EIO; 292 data->error = -EIO;
300 293
301 if (!data->error) 294 if (!data->error)
302 data->bytes_xfered = data->blocks * data->blksz; 295 data->bytes_xfered = data->blocks * data->blksz;
303 else 296 else
304 data->bytes_xfered = 0; 297 data->bytes_xfered = 0;
305 298
306 sdh_disable_stat_irq(host, DAT_END | DAT_TIME_OUT | DAT_CRC_FAIL | RX_OVERRUN | TX_UNDERRUN); 299 sdh_disable_stat_irq(host, DAT_END | DAT_TIME_OUT | DAT_CRC_FAIL | RX_OVERRUN | TX_UNDERRUN);
307 bfin_write_SDH_STATUS_CLR(DAT_END_STAT | DAT_TIMEOUT_STAT | \ 300 bfin_write_SDH_STATUS_CLR(DAT_END_STAT | DAT_TIMEOUT_STAT | \
308 DAT_CRC_FAIL_STAT | DAT_BLK_END_STAT | RX_OVERRUN | TX_UNDERRUN); 301 DAT_CRC_FAIL_STAT | DAT_BLK_END_STAT | RX_OVERRUN | TX_UNDERRUN);
309 bfin_write_SDH_DATA_CTL(0); 302 bfin_write_SDH_DATA_CTL(0);
310 SSYNC(); 303 SSYNC();
311 304
312 host->data = NULL; 305 host->data = NULL;
313 if (host->mrq->stop) { 306 if (host->mrq->stop) {
314 sdh_stop_clock(host); 307 sdh_stop_clock(host);
315 sdh_start_cmd(host, host->mrq->stop); 308 sdh_start_cmd(host, host->mrq->stop);
316 } else { 309 } else {
317 sdh_finish_request(host, host->mrq); 310 sdh_finish_request(host, host->mrq);
318 } 311 }
319 312
320 return 1; 313 return 1;
321 } 314 }
322 315
323 static void sdh_request(struct mmc_host *mmc, struct mmc_request *mrq) 316 static void sdh_request(struct mmc_host *mmc, struct mmc_request *mrq)
324 { 317 {
325 struct sdh_host *host = mmc_priv(mmc); 318 struct sdh_host *host = mmc_priv(mmc);
326 int ret = 0; 319 int ret = 0;
327 320
328 dev_dbg(mmc_dev(host->mmc), "%s enter, mrp:%p, cmd:%p\n", __func__, mrq, mrq->cmd); 321 dev_dbg(mmc_dev(host->mmc), "%s enter, mrp:%p, cmd:%p\n", __func__, mrq, mrq->cmd);
329 WARN_ON(host->mrq != NULL); 322 WARN_ON(host->mrq != NULL);
330 323
331 host->mrq = mrq; 324 host->mrq = mrq;
332 host->data = mrq->data; 325 host->data = mrq->data;
333 326
334 if (mrq->data && mrq->data->flags & MMC_DATA_READ) { 327 if (mrq->data && mrq->data->flags & MMC_DATA_READ) {
335 ret = sdh_setup_data(host, mrq->data); 328 ret = sdh_setup_data(host, mrq->data);
336 if (ret) 329 if (ret)
337 return; 330 return;
338 } 331 }
339 332
340 sdh_start_cmd(host, mrq->cmd); 333 sdh_start_cmd(host, mrq->cmd);
341 } 334 }
342 335
343 static void sdh_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 336 static void sdh_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
344 { 337 {
345 struct sdh_host *host; 338 struct sdh_host *host;
346 unsigned long flags; 339 unsigned long flags;
347 u16 clk_ctl = 0; 340 u16 clk_ctl = 0;
348 u16 pwr_ctl = 0; 341 u16 pwr_ctl = 0;
349 u16 cfg; 342 u16 cfg;
350 host = mmc_priv(mmc); 343 host = mmc_priv(mmc);
351 344
352 spin_lock_irqsave(&host->lock, flags); 345 spin_lock_irqsave(&host->lock, flags);
353 if (ios->clock) { 346 if (ios->clock) {
354 unsigned long sys_clk, ios_clk; 347 unsigned long sys_clk, ios_clk;
355 unsigned char clk_div; 348 unsigned char clk_div;
356 ios_clk = 2 * ios->clock; 349 ios_clk = 2 * ios->clock;
357 sys_clk = get_sclk(); 350 sys_clk = get_sclk();
358 clk_div = sys_clk / ios_clk; 351 clk_div = sys_clk / ios_clk;
359 if (sys_clk % ios_clk == 0) 352 if (sys_clk % ios_clk == 0)
360 clk_div -= 1; 353 clk_div -= 1;
361 clk_div = min_t(unsigned char, clk_div, 0xFF); 354 clk_div = min_t(unsigned char, clk_div, 0xFF);
362 clk_ctl |= clk_div; 355 clk_ctl |= clk_div;
363 clk_ctl |= CLK_E; 356 clk_ctl |= CLK_E;
364 host->clk_div = clk_div; 357 host->clk_div = clk_div;
365 } else 358 } else
366 sdh_stop_clock(host); 359 sdh_stop_clock(host);
367 360
368 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) 361 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
369 #ifdef CONFIG_SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND 362 #ifdef CONFIG_SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND
370 pwr_ctl |= ROD_CTL; 363 pwr_ctl |= ROD_CTL;
371 #else 364 #else
372 pwr_ctl |= SD_CMD_OD | ROD_CTL; 365 pwr_ctl |= SD_CMD_OD | ROD_CTL;
373 #endif 366 #endif
374 367
375 if (ios->bus_width == MMC_BUS_WIDTH_4) { 368 if (ios->bus_width == MMC_BUS_WIDTH_4) {
376 cfg = bfin_read_SDH_CFG(); 369 cfg = bfin_read_SDH_CFG();
377 cfg &= ~PD_SDDAT3; 370 cfg &= ~PD_SDDAT3;
378 cfg |= PUP_SDDAT3; 371 cfg |= PUP_SDDAT3;
379 /* Enable 4 bit SDIO */ 372 /* Enable 4 bit SDIO */
380 cfg |= (SD4E | MWE); 373 cfg |= (SD4E | MWE);
381 bfin_write_SDH_CFG(cfg); 374 bfin_write_SDH_CFG(cfg);
382 clk_ctl |= WIDE_BUS; 375 clk_ctl |= WIDE_BUS;
383 } else { 376 } else {
384 cfg = bfin_read_SDH_CFG(); 377 cfg = bfin_read_SDH_CFG();
385 cfg |= MWE; 378 cfg |= MWE;
386 bfin_write_SDH_CFG(cfg); 379 bfin_write_SDH_CFG(cfg);
387 } 380 }
388 381
389 bfin_write_SDH_CLK_CTL(clk_ctl); 382 bfin_write_SDH_CLK_CTL(clk_ctl);
390 383
391 host->power_mode = ios->power_mode; 384 host->power_mode = ios->power_mode;
392 if (ios->power_mode == MMC_POWER_ON) 385 if (ios->power_mode == MMC_POWER_ON)
393 pwr_ctl |= PWR_ON; 386 pwr_ctl |= PWR_ON;
394 387
395 bfin_write_SDH_PWR_CTL(pwr_ctl); 388 bfin_write_SDH_PWR_CTL(pwr_ctl);
396 SSYNC(); 389 SSYNC();
397 390
398 spin_unlock_irqrestore(&host->lock, flags); 391 spin_unlock_irqrestore(&host->lock, flags);
399 392
400 dev_dbg(mmc_dev(host->mmc), "SDH: clk_div = 0x%x actual clock:%ld expected clock:%d\n", 393 dev_dbg(mmc_dev(host->mmc), "SDH: clk_div = 0x%x actual clock:%ld expected clock:%d\n",
401 host->clk_div, 394 host->clk_div,
402 host->clk_div ? get_sclk() / (2 * (host->clk_div + 1)) : 0, 395 host->clk_div ? get_sclk() / (2 * (host->clk_div + 1)) : 0,
403 ios->clock); 396 ios->clock);
404 } 397 }
405 398
406 static const struct mmc_host_ops sdh_ops = { 399 static const struct mmc_host_ops sdh_ops = {
407 .request = sdh_request, 400 .request = sdh_request,
408 .set_ios = sdh_set_ios, 401 .set_ios = sdh_set_ios,
409 }; 402 };
410 403
411 static irqreturn_t sdh_dma_irq(int irq, void *devid) 404 static irqreturn_t sdh_dma_irq(int irq, void *devid)
412 { 405 {
413 struct sdh_host *host = devid; 406 struct sdh_host *host = devid;
414 407
415 dev_dbg(mmc_dev(host->mmc), "%s enter, irq_stat: 0x%04x\n", __func__, 408 dev_dbg(mmc_dev(host->mmc), "%s enter, irq_stat: 0x%04x\n", __func__,
416 get_dma_curr_irqstat(host->dma_ch)); 409 get_dma_curr_irqstat(host->dma_ch));
417 clear_dma_irqstat(host->dma_ch); 410 clear_dma_irqstat(host->dma_ch);
418 SSYNC(); 411 SSYNC();
419 412
420 return IRQ_HANDLED; 413 return IRQ_HANDLED;
421 } 414 }
422 415
423 static irqreturn_t sdh_stat_irq(int irq, void *devid) 416 static irqreturn_t sdh_stat_irq(int irq, void *devid)
424 { 417 {
425 struct sdh_host *host = devid; 418 struct sdh_host *host = devid;
426 unsigned int status; 419 unsigned int status;
427 int handled = 0; 420 int handled = 0;
428 421
429 dev_dbg(mmc_dev(host->mmc), "%s enter\n", __func__); 422 dev_dbg(mmc_dev(host->mmc), "%s enter\n", __func__);
430 status = bfin_read_SDH_E_STATUS(); 423 status = bfin_read_SDH_E_STATUS();
431 if (status & SD_CARD_DET) { 424 if (status & SD_CARD_DET) {
432 mmc_detect_change(host->mmc, 0); 425 mmc_detect_change(host->mmc, 0);
433 bfin_write_SDH_E_STATUS(SD_CARD_DET); 426 bfin_write_SDH_E_STATUS(SD_CARD_DET);
434 } 427 }
435 status = bfin_read_SDH_STATUS(); 428 status = bfin_read_SDH_STATUS();
436 if (status & (CMD_SENT | CMD_RESP_END | CMD_TIME_OUT | CMD_CRC_FAIL)) { 429 if (status & (CMD_SENT | CMD_RESP_END | CMD_TIME_OUT | CMD_CRC_FAIL)) {
437 handled |= sdh_cmd_done(host, status); 430 handled |= sdh_cmd_done(host, status);
438 bfin_write_SDH_STATUS_CLR(CMD_SENT_STAT | CMD_RESP_END_STAT | \ 431 bfin_write_SDH_STATUS_CLR(CMD_SENT_STAT | CMD_RESP_END_STAT | \
439 CMD_TIMEOUT_STAT | CMD_CRC_FAIL_STAT); 432 CMD_TIMEOUT_STAT | CMD_CRC_FAIL_STAT);
440 SSYNC(); 433 SSYNC();
441 } 434 }
442 435
443 status = bfin_read_SDH_STATUS(); 436 status = bfin_read_SDH_STATUS();
444 if (status & (DAT_END | DAT_TIME_OUT | DAT_CRC_FAIL | RX_OVERRUN | TX_UNDERRUN)) 437 if (status & (DAT_END | DAT_TIME_OUT | DAT_CRC_FAIL | RX_OVERRUN | TX_UNDERRUN))
445 handled |= sdh_data_done(host, status); 438 handled |= sdh_data_done(host, status);
446 439
447 dev_dbg(mmc_dev(host->mmc), "%s exit\n\n", __func__); 440 dev_dbg(mmc_dev(host->mmc), "%s exit\n\n", __func__);
448 441
449 return IRQ_RETVAL(handled); 442 return IRQ_RETVAL(handled);
450 } 443 }
451 444
452 static int __devinit sdh_probe(struct platform_device *pdev) 445 static int __devinit sdh_probe(struct platform_device *pdev)
453 { 446 {
454 struct mmc_host *mmc; 447 struct mmc_host *mmc;
455 struct sdh_host *host; 448 struct sdh_host *host;
456 struct bfin_sd_host *drv_data = get_sdh_data(pdev); 449 struct bfin_sd_host *drv_data = get_sdh_data(pdev);
457 int ret; 450 int ret;
458 451
459 if (!drv_data) { 452 if (!drv_data) {
460 dev_err(&pdev->dev, "missing platform driver data\n"); 453 dev_err(&pdev->dev, "missing platform driver data\n");
461 ret = -EINVAL; 454 ret = -EINVAL;
462 goto out; 455 goto out;
463 } 456 }
464 457
465 mmc = mmc_alloc_host(sizeof(struct sdh_host), &pdev->dev); 458 mmc = mmc_alloc_host(sizeof(struct sdh_host), &pdev->dev);
466 if (!mmc) { 459 if (!mmc) {
467 ret = -ENOMEM; 460 ret = -ENOMEM;
468 goto out; 461 goto out;
469 } 462 }
470 463
471 mmc->ops = &sdh_ops; 464 mmc->ops = &sdh_ops;
472 mmc->max_segs = 32; 465 mmc->max_segs = 32;
473 mmc->max_seg_size = 1 << 16; 466 mmc->max_seg_size = 1 << 16;
474 mmc->max_blk_size = 1 << 11; 467 mmc->max_blk_size = 1 << 11;
475 mmc->max_blk_count = 1 << 11; 468 mmc->max_blk_count = 1 << 11;
476 mmc->max_req_size = PAGE_SIZE; 469 mmc->max_req_size = PAGE_SIZE;
477 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 470 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
478 mmc->f_max = get_sclk(); 471 mmc->f_max = get_sclk();
479 mmc->f_min = mmc->f_max >> 9; 472 mmc->f_min = mmc->f_max >> 9;
480 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_NEEDS_POLL; 473 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_NEEDS_POLL;
481 host = mmc_priv(mmc); 474 host = mmc_priv(mmc);
482 host->mmc = mmc; 475 host->mmc = mmc;
483 476
484 spin_lock_init(&host->lock); 477 spin_lock_init(&host->lock);
485 host->irq = drv_data->irq_int0; 478 host->irq = drv_data->irq_int0;
486 host->dma_ch = drv_data->dma_chan; 479 host->dma_ch = drv_data->dma_chan;
487 480
488 ret = request_dma(host->dma_ch, DRIVER_NAME "DMA"); 481 ret = request_dma(host->dma_ch, DRIVER_NAME "DMA");
489 if (ret) { 482 if (ret) {
490 dev_err(&pdev->dev, "unable to request DMA channel\n"); 483 dev_err(&pdev->dev, "unable to request DMA channel\n");
491 goto out1; 484 goto out1;
492 } 485 }
493 486
494 ret = set_dma_callback(host->dma_ch, sdh_dma_irq, host); 487 ret = set_dma_callback(host->dma_ch, sdh_dma_irq, host);
495 if (ret) { 488 if (ret) {
496 dev_err(&pdev->dev, "unable to request DMA irq\n"); 489 dev_err(&pdev->dev, "unable to request DMA irq\n");
497 goto out2; 490 goto out2;
498 } 491 }
499 492
500 host->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &host->sg_dma, GFP_KERNEL); 493 host->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &host->sg_dma, GFP_KERNEL);
501 if (host->sg_cpu == NULL) { 494 if (host->sg_cpu == NULL) {
502 ret = -ENOMEM; 495 ret = -ENOMEM;
503 goto out2; 496 goto out2;
504 } 497 }
505 498
506 platform_set_drvdata(pdev, mmc); 499 platform_set_drvdata(pdev, mmc);
507 mmc_add_host(mmc); 500 mmc_add_host(mmc);
508 501
509 ret = request_irq(host->irq, sdh_stat_irq, 0, "SDH Status IRQ", host); 502 ret = request_irq(host->irq, sdh_stat_irq, 0, "SDH Status IRQ", host);
510 if (ret) { 503 if (ret) {
511 dev_err(&pdev->dev, "unable to request status irq\n"); 504 dev_err(&pdev->dev, "unable to request status irq\n");
512 goto out3; 505 goto out3;
513 } 506 }
514 507
515 ret = peripheral_request_list(drv_data->pin_req, DRIVER_NAME); 508 ret = peripheral_request_list(drv_data->pin_req, DRIVER_NAME);
516 if (ret) { 509 if (ret) {
517 dev_err(&pdev->dev, "unable to request peripheral pins\n"); 510 dev_err(&pdev->dev, "unable to request peripheral pins\n");
518 goto out4; 511 goto out4;
519 } 512 }
520 #if defined(CONFIG_BF54x) 513 #if defined(CONFIG_BF54x)
521 /* Secure Digital Host shares DMA with Nand controller */ 514 /* Secure Digital Host shares DMA with Nand controller */
522 bfin_write_DMAC1_PERIMUX(bfin_read_DMAC1_PERIMUX() | 0x1); 515 bfin_write_DMAC1_PERIMUX(bfin_read_DMAC1_PERIMUX() | 0x1);
523 #endif 516 #endif
524 517
525 bfin_write_SDH_CFG(bfin_read_SDH_CFG() | CLKS_EN); 518 bfin_write_SDH_CFG(bfin_read_SDH_CFG() | CLKS_EN);
526 SSYNC(); 519 SSYNC();
527 520
528 /* Disable card inserting detection pin. set MMC_CAP_NEES_POLL, and 521 /* Disable card inserting detection pin. set MMC_CAP_NEES_POLL, and
529 * mmc stack will do the detection. 522 * mmc stack will do the detection.
530 */ 523 */
531 bfin_write_SDH_CFG((bfin_read_SDH_CFG() & 0x1F) | (PUP_SDDAT | PUP_SDDAT3)); 524 bfin_write_SDH_CFG((bfin_read_SDH_CFG() & 0x1F) | (PUP_SDDAT | PUP_SDDAT3));
532 SSYNC(); 525 SSYNC();
533 526
534 return 0; 527 return 0;
535 528
536 out4: 529 out4:
537 free_irq(host->irq, host); 530 free_irq(host->irq, host);
538 out3: 531 out3:
539 mmc_remove_host(mmc); 532 mmc_remove_host(mmc);
540 dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); 533 dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
541 out2: 534 out2:
542 free_dma(host->dma_ch); 535 free_dma(host->dma_ch);
543 out1: 536 out1:
544 mmc_free_host(mmc); 537 mmc_free_host(mmc);
545 out: 538 out:
546 return ret; 539 return ret;
547 } 540 }
548 541
549 static int __devexit sdh_remove(struct platform_device *pdev) 542 static int __devexit sdh_remove(struct platform_device *pdev)
550 { 543 {
551 struct mmc_host *mmc = platform_get_drvdata(pdev); 544 struct mmc_host *mmc = platform_get_drvdata(pdev);
552 545
553 platform_set_drvdata(pdev, NULL); 546 platform_set_drvdata(pdev, NULL);
554 547
555 if (mmc) { 548 if (mmc) {
556 struct sdh_host *host = mmc_priv(mmc); 549 struct sdh_host *host = mmc_priv(mmc);
557 550
558 mmc_remove_host(mmc); 551 mmc_remove_host(mmc);
559 552
560 sdh_stop_clock(host); 553 sdh_stop_clock(host);
561 free_irq(host->irq, host); 554 free_irq(host->irq, host);
562 free_dma(host->dma_ch); 555 free_dma(host->dma_ch);
563 dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); 556 dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
564 557
565 mmc_free_host(mmc); 558 mmc_free_host(mmc);
566 } 559 }
567 560
568 return 0; 561 return 0;
569 } 562 }
570 563
571 #ifdef CONFIG_PM 564 #ifdef CONFIG_PM
572 static int sdh_suspend(struct platform_device *dev, pm_message_t state) 565 static int sdh_suspend(struct platform_device *dev, pm_message_t state)
573 { 566 {
574 struct mmc_host *mmc = platform_get_drvdata(dev); 567 struct mmc_host *mmc = platform_get_drvdata(dev);
575 struct bfin_sd_host *drv_data = get_sdh_data(dev); 568 struct bfin_sd_host *drv_data = get_sdh_data(dev);
576 int ret = 0; 569 int ret = 0;
577 570
578 if (mmc) 571 if (mmc)
579 ret = mmc_suspend_host(mmc); 572 ret = mmc_suspend_host(mmc);
580 573
581 bfin_write_SDH_PWR_CTL(bfin_read_SDH_PWR_CTL() & ~PWR_ON); 574 bfin_write_SDH_PWR_CTL(bfin_read_SDH_PWR_CTL() & ~PWR_ON);
582 peripheral_free_list(drv_data->pin_req); 575 peripheral_free_list(drv_data->pin_req);
583 576
584 return ret; 577 return ret;
585 } 578 }
586 579
587 static int sdh_resume(struct platform_device *dev) 580 static int sdh_resume(struct platform_device *dev)
588 { 581 {
589 struct mmc_host *mmc = platform_get_drvdata(dev); 582 struct mmc_host *mmc = platform_get_drvdata(dev);
590 struct bfin_sd_host *drv_data = get_sdh_data(dev); 583 struct bfin_sd_host *drv_data = get_sdh_data(dev);
591 int ret = 0; 584 int ret = 0;
592 585
593 ret = peripheral_request_list(drv_data->pin_req, DRIVER_NAME); 586 ret = peripheral_request_list(drv_data->pin_req, DRIVER_NAME);
594 if (ret) { 587 if (ret) {
595 dev_err(&dev->dev, "unable to request peripheral pins\n"); 588 dev_err(&dev->dev, "unable to request peripheral pins\n");
596 return ret; 589 return ret;
597 } 590 }
598 591
599 bfin_write_SDH_PWR_CTL(bfin_read_SDH_PWR_CTL() | PWR_ON); 592 bfin_write_SDH_PWR_CTL(bfin_read_SDH_PWR_CTL() | PWR_ON);
600 #if defined(CONFIG_BF54x) 593 #if defined(CONFIG_BF54x)
601 /* Secure Digital Host shares DMA with Nand controller */ 594 /* Secure Digital Host shares DMA with Nand controller */
602 bfin_write_DMAC1_PERIMUX(bfin_read_DMAC1_PERIMUX() | 0x1); 595 bfin_write_DMAC1_PERIMUX(bfin_read_DMAC1_PERIMUX() | 0x1);
603 #endif 596 #endif
604 bfin_write_SDH_CFG(bfin_read_SDH_CFG() | CLKS_EN); 597 bfin_write_SDH_CFG(bfin_read_SDH_CFG() | CLKS_EN);
605 SSYNC(); 598 SSYNC();
606 599
607 bfin_write_SDH_CFG((bfin_read_SDH_CFG() & 0x1F) | (PUP_SDDAT | PUP_SDDAT3)); 600 bfin_write_SDH_CFG((bfin_read_SDH_CFG() & 0x1F) | (PUP_SDDAT | PUP_SDDAT3));
608 SSYNC(); 601 SSYNC();
609 602
610 if (mmc) 603 if (mmc)
611 ret = mmc_resume_host(mmc); 604 ret = mmc_resume_host(mmc);
612 605
613 return ret; 606 return ret;
614 } 607 }
615 #else 608 #else
616 # define sdh_suspend NULL 609 # define sdh_suspend NULL
617 # define sdh_resume NULL 610 # define sdh_resume NULL
618 #endif 611 #endif
619 612
620 static struct platform_driver sdh_driver = { 613 static struct platform_driver sdh_driver = {
621 .probe = sdh_probe, 614 .probe = sdh_probe,
622 .remove = __devexit_p(sdh_remove), 615 .remove = __devexit_p(sdh_remove),
623 .suspend = sdh_suspend, 616 .suspend = sdh_suspend,
624 .resume = sdh_resume, 617 .resume = sdh_resume,
625 .driver = { 618 .driver = {
626 .name = DRIVER_NAME, 619 .name = DRIVER_NAME,
627 }, 620 },
628 }; 621 };
629 622
630 module_platform_driver(sdh_driver); 623 module_platform_driver(sdh_driver);
631 624
632 MODULE_DESCRIPTION("Blackfin Secure Digital Host Driver"); 625 MODULE_DESCRIPTION("Blackfin Secure Digital Host Driver");
633 MODULE_AUTHOR("Cliff Cai, Roy Huang"); 626 MODULE_AUTHOR("Cliff Cai, Roy Huang");
634 MODULE_LICENSE("GPL"); 627 MODULE_LICENSE("GPL");
635 628
drivers/mmc/host/dw_mmc.c
1 /* 1 /*
2 * Synopsys DesignWare Multimedia Card Interface driver 2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx) 3 * (Based on NXP driver for lpc 31xx)
4 * 4 *
5 * Copyright (C) 2009 NXP Semiconductors 5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd. 6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or 10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version. 11 * (at your option) any later version.
12 */ 12 */
13 13
14 #include <linux/blkdev.h> 14 #include <linux/blkdev.h>
15 #include <linux/clk.h> 15 #include <linux/clk.h>
16 #include <linux/debugfs.h> 16 #include <linux/debugfs.h>
17 #include <linux/device.h> 17 #include <linux/device.h>
18 #include <linux/dma-mapping.h> 18 #include <linux/dma-mapping.h>
19 #include <linux/err.h> 19 #include <linux/err.h>
20 #include <linux/init.h> 20 #include <linux/init.h>
21 #include <linux/interrupt.h> 21 #include <linux/interrupt.h>
22 #include <linux/ioport.h> 22 #include <linux/ioport.h>
23 #include <linux/module.h> 23 #include <linux/module.h>
24 #include <linux/platform_device.h> 24 #include <linux/platform_device.h>
25 #include <linux/seq_file.h> 25 #include <linux/seq_file.h>
26 #include <linux/slab.h> 26 #include <linux/slab.h>
27 #include <linux/stat.h> 27 #include <linux/stat.h>
28 #include <linux/delay.h> 28 #include <linux/delay.h>
29 #include <linux/irq.h> 29 #include <linux/irq.h>
30 #include <linux/mmc/host.h> 30 #include <linux/mmc/host.h>
31 #include <linux/mmc/mmc.h> 31 #include <linux/mmc/mmc.h>
32 #include <linux/mmc/dw_mmc.h> 32 #include <linux/mmc/dw_mmc.h>
33 #include <linux/bitops.h> 33 #include <linux/bitops.h>
34 #include <linux/regulator/consumer.h> 34 #include <linux/regulator/consumer.h>
35 #include <linux/workqueue.h> 35 #include <linux/workqueue.h>
36 36
37 #include "dw_mmc.h" 37 #include "dw_mmc.h"
38 38
39 /* Common flag combinations */ 39 /* Common flag combinations */
40 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DTO | SDMMC_INT_DCRC | \ 40 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DTO | SDMMC_INT_DCRC | \
41 SDMMC_INT_HTO | SDMMC_INT_SBE | \ 41 SDMMC_INT_HTO | SDMMC_INT_SBE | \
42 SDMMC_INT_EBE) 42 SDMMC_INT_EBE)
43 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \ 43 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
44 SDMMC_INT_RESP_ERR) 44 SDMMC_INT_RESP_ERR)
45 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \ 45 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
46 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE) 46 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
47 #define DW_MCI_SEND_STATUS 1 47 #define DW_MCI_SEND_STATUS 1
48 #define DW_MCI_RECV_STATUS 2 48 #define DW_MCI_RECV_STATUS 2
49 #define DW_MCI_DMA_THRESHOLD 16 49 #define DW_MCI_DMA_THRESHOLD 16
50 50
51 #ifdef CONFIG_MMC_DW_IDMAC 51 #ifdef CONFIG_MMC_DW_IDMAC
52 struct idmac_desc { 52 struct idmac_desc {
53 u32 des0; /* Control Descriptor */ 53 u32 des0; /* Control Descriptor */
54 #define IDMAC_DES0_DIC BIT(1) 54 #define IDMAC_DES0_DIC BIT(1)
55 #define IDMAC_DES0_LD BIT(2) 55 #define IDMAC_DES0_LD BIT(2)
56 #define IDMAC_DES0_FD BIT(3) 56 #define IDMAC_DES0_FD BIT(3)
57 #define IDMAC_DES0_CH BIT(4) 57 #define IDMAC_DES0_CH BIT(4)
58 #define IDMAC_DES0_ER BIT(5) 58 #define IDMAC_DES0_ER BIT(5)
59 #define IDMAC_DES0_CES BIT(30) 59 #define IDMAC_DES0_CES BIT(30)
60 #define IDMAC_DES0_OWN BIT(31) 60 #define IDMAC_DES0_OWN BIT(31)
61 61
62 u32 des1; /* Buffer sizes */ 62 u32 des1; /* Buffer sizes */
63 #define IDMAC_SET_BUFFER1_SIZE(d, s) \ 63 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
64 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff)) 64 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
65 65
66 u32 des2; /* buffer 1 physical address */ 66 u32 des2; /* buffer 1 physical address */
67 67
68 u32 des3; /* buffer 2 physical address */ 68 u32 des3; /* buffer 2 physical address */
69 }; 69 };
70 #endif /* CONFIG_MMC_DW_IDMAC */ 70 #endif /* CONFIG_MMC_DW_IDMAC */
71 71
72 /** 72 /**
73 * struct dw_mci_slot - MMC slot state 73 * struct dw_mci_slot - MMC slot state
74 * @mmc: The mmc_host representing this slot. 74 * @mmc: The mmc_host representing this slot.
75 * @host: The MMC controller this slot is using. 75 * @host: The MMC controller this slot is using.
76 * @ctype: Card type for this slot. 76 * @ctype: Card type for this slot.
77 * @mrq: mmc_request currently being processed or waiting to be 77 * @mrq: mmc_request currently being processed or waiting to be
78 * processed, or NULL when the slot is idle. 78 * processed, or NULL when the slot is idle.
79 * @queue_node: List node for placing this node in the @queue list of 79 * @queue_node: List node for placing this node in the @queue list of
80 * &struct dw_mci. 80 * &struct dw_mci.
81 * @clock: Clock rate configured by set_ios(). Protected by host->lock. 81 * @clock: Clock rate configured by set_ios(). Protected by host->lock.
82 * @flags: Random state bits associated with the slot. 82 * @flags: Random state bits associated with the slot.
83 * @id: Number of this slot. 83 * @id: Number of this slot.
84 * @last_detect_state: Most recently observed card detect state. 84 * @last_detect_state: Most recently observed card detect state.
85 */ 85 */
86 struct dw_mci_slot { 86 struct dw_mci_slot {
87 struct mmc_host *mmc; 87 struct mmc_host *mmc;
88 struct dw_mci *host; 88 struct dw_mci *host;
89 89
90 u32 ctype; 90 u32 ctype;
91 91
92 struct mmc_request *mrq; 92 struct mmc_request *mrq;
93 struct list_head queue_node; 93 struct list_head queue_node;
94 94
95 unsigned int clock; 95 unsigned int clock;
96 unsigned long flags; 96 unsigned long flags;
97 #define DW_MMC_CARD_PRESENT 0 97 #define DW_MMC_CARD_PRESENT 0
98 #define DW_MMC_CARD_NEED_INIT 1 98 #define DW_MMC_CARD_NEED_INIT 1
99 int id; 99 int id;
100 int last_detect_state; 100 int last_detect_state;
101 }; 101 };
102 102
103 #if defined(CONFIG_DEBUG_FS) 103 #if defined(CONFIG_DEBUG_FS)
104 static int dw_mci_req_show(struct seq_file *s, void *v) 104 static int dw_mci_req_show(struct seq_file *s, void *v)
105 { 105 {
106 struct dw_mci_slot *slot = s->private; 106 struct dw_mci_slot *slot = s->private;
107 struct mmc_request *mrq; 107 struct mmc_request *mrq;
108 struct mmc_command *cmd; 108 struct mmc_command *cmd;
109 struct mmc_command *stop; 109 struct mmc_command *stop;
110 struct mmc_data *data; 110 struct mmc_data *data;
111 111
112 /* Make sure we get a consistent snapshot */ 112 /* Make sure we get a consistent snapshot */
113 spin_lock_bh(&slot->host->lock); 113 spin_lock_bh(&slot->host->lock);
114 mrq = slot->mrq; 114 mrq = slot->mrq;
115 115
116 if (mrq) { 116 if (mrq) {
117 cmd = mrq->cmd; 117 cmd = mrq->cmd;
118 data = mrq->data; 118 data = mrq->data;
119 stop = mrq->stop; 119 stop = mrq->stop;
120 120
121 if (cmd) 121 if (cmd)
122 seq_printf(s, 122 seq_printf(s,
123 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", 123 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
124 cmd->opcode, cmd->arg, cmd->flags, 124 cmd->opcode, cmd->arg, cmd->flags,
125 cmd->resp[0], cmd->resp[1], cmd->resp[2], 125 cmd->resp[0], cmd->resp[1], cmd->resp[2],
126 cmd->resp[2], cmd->error); 126 cmd->resp[2], cmd->error);
127 if (data) 127 if (data)
128 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n", 128 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
129 data->bytes_xfered, data->blocks, 129 data->bytes_xfered, data->blocks,
130 data->blksz, data->flags, data->error); 130 data->blksz, data->flags, data->error);
131 if (stop) 131 if (stop)
132 seq_printf(s, 132 seq_printf(s,
133 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", 133 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
134 stop->opcode, stop->arg, stop->flags, 134 stop->opcode, stop->arg, stop->flags,
135 stop->resp[0], stop->resp[1], stop->resp[2], 135 stop->resp[0], stop->resp[1], stop->resp[2],
136 stop->resp[2], stop->error); 136 stop->resp[2], stop->error);
137 } 137 }
138 138
139 spin_unlock_bh(&slot->host->lock); 139 spin_unlock_bh(&slot->host->lock);
140 140
141 return 0; 141 return 0;
142 } 142 }
143 143
144 static int dw_mci_req_open(struct inode *inode, struct file *file) 144 static int dw_mci_req_open(struct inode *inode, struct file *file)
145 { 145 {
146 return single_open(file, dw_mci_req_show, inode->i_private); 146 return single_open(file, dw_mci_req_show, inode->i_private);
147 } 147 }
148 148
149 static const struct file_operations dw_mci_req_fops = { 149 static const struct file_operations dw_mci_req_fops = {
150 .owner = THIS_MODULE, 150 .owner = THIS_MODULE,
151 .open = dw_mci_req_open, 151 .open = dw_mci_req_open,
152 .read = seq_read, 152 .read = seq_read,
153 .llseek = seq_lseek, 153 .llseek = seq_lseek,
154 .release = single_release, 154 .release = single_release,
155 }; 155 };
156 156
157 static int dw_mci_regs_show(struct seq_file *s, void *v) 157 static int dw_mci_regs_show(struct seq_file *s, void *v)
158 { 158 {
159 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS); 159 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
160 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS); 160 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
161 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD); 161 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
162 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL); 162 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
163 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK); 163 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
164 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA); 164 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
165 165
166 return 0; 166 return 0;
167 } 167 }
168 168
169 static int dw_mci_regs_open(struct inode *inode, struct file *file) 169 static int dw_mci_regs_open(struct inode *inode, struct file *file)
170 { 170 {
171 return single_open(file, dw_mci_regs_show, inode->i_private); 171 return single_open(file, dw_mci_regs_show, inode->i_private);
172 } 172 }
173 173
174 static const struct file_operations dw_mci_regs_fops = { 174 static const struct file_operations dw_mci_regs_fops = {
175 .owner = THIS_MODULE, 175 .owner = THIS_MODULE,
176 .open = dw_mci_regs_open, 176 .open = dw_mci_regs_open,
177 .read = seq_read, 177 .read = seq_read,
178 .llseek = seq_lseek, 178 .llseek = seq_lseek,
179 .release = single_release, 179 .release = single_release,
180 }; 180 };
181 181
182 static void dw_mci_init_debugfs(struct dw_mci_slot *slot) 182 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
183 { 183 {
184 struct mmc_host *mmc = slot->mmc; 184 struct mmc_host *mmc = slot->mmc;
185 struct dw_mci *host = slot->host; 185 struct dw_mci *host = slot->host;
186 struct dentry *root; 186 struct dentry *root;
187 struct dentry *node; 187 struct dentry *node;
188 188
189 root = mmc->debugfs_root; 189 root = mmc->debugfs_root;
190 if (!root) 190 if (!root)
191 return; 191 return;
192 192
193 node = debugfs_create_file("regs", S_IRUSR, root, host, 193 node = debugfs_create_file("regs", S_IRUSR, root, host,
194 &dw_mci_regs_fops); 194 &dw_mci_regs_fops);
195 if (!node) 195 if (!node)
196 goto err; 196 goto err;
197 197
198 node = debugfs_create_file("req", S_IRUSR, root, slot, 198 node = debugfs_create_file("req", S_IRUSR, root, slot,
199 &dw_mci_req_fops); 199 &dw_mci_req_fops);
200 if (!node) 200 if (!node)
201 goto err; 201 goto err;
202 202
203 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state); 203 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
204 if (!node) 204 if (!node)
205 goto err; 205 goto err;
206 206
207 node = debugfs_create_x32("pending_events", S_IRUSR, root, 207 node = debugfs_create_x32("pending_events", S_IRUSR, root,
208 (u32 *)&host->pending_events); 208 (u32 *)&host->pending_events);
209 if (!node) 209 if (!node)
210 goto err; 210 goto err;
211 211
212 node = debugfs_create_x32("completed_events", S_IRUSR, root, 212 node = debugfs_create_x32("completed_events", S_IRUSR, root,
213 (u32 *)&host->completed_events); 213 (u32 *)&host->completed_events);
214 if (!node) 214 if (!node)
215 goto err; 215 goto err;
216 216
217 return; 217 return;
218 218
219 err: 219 err:
220 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n"); 220 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
221 } 221 }
222 #endif /* defined(CONFIG_DEBUG_FS) */ 222 #endif /* defined(CONFIG_DEBUG_FS) */
223 223
224 static void dw_mci_set_timeout(struct dw_mci *host) 224 static void dw_mci_set_timeout(struct dw_mci *host)
225 { 225 {
226 /* timeout (maximum) */ 226 /* timeout (maximum) */
227 mci_writel(host, TMOUT, 0xffffffff); 227 mci_writel(host, TMOUT, 0xffffffff);
228 } 228 }
229 229
230 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd) 230 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
231 { 231 {
232 struct mmc_data *data; 232 struct mmc_data *data;
233 u32 cmdr; 233 u32 cmdr;
234 cmd->error = -EINPROGRESS; 234 cmd->error = -EINPROGRESS;
235 235
236 cmdr = cmd->opcode; 236 cmdr = cmd->opcode;
237 237
238 if (cmdr == MMC_STOP_TRANSMISSION) 238 if (cmdr == MMC_STOP_TRANSMISSION)
239 cmdr |= SDMMC_CMD_STOP; 239 cmdr |= SDMMC_CMD_STOP;
240 else 240 else
241 cmdr |= SDMMC_CMD_PRV_DAT_WAIT; 241 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
242 242
243 if (cmd->flags & MMC_RSP_PRESENT) { 243 if (cmd->flags & MMC_RSP_PRESENT) {
244 /* We expect a response, so set this bit */ 244 /* We expect a response, so set this bit */
245 cmdr |= SDMMC_CMD_RESP_EXP; 245 cmdr |= SDMMC_CMD_RESP_EXP;
246 if (cmd->flags & MMC_RSP_136) 246 if (cmd->flags & MMC_RSP_136)
247 cmdr |= SDMMC_CMD_RESP_LONG; 247 cmdr |= SDMMC_CMD_RESP_LONG;
248 } 248 }
249 249
250 if (cmd->flags & MMC_RSP_CRC) 250 if (cmd->flags & MMC_RSP_CRC)
251 cmdr |= SDMMC_CMD_RESP_CRC; 251 cmdr |= SDMMC_CMD_RESP_CRC;
252 252
253 data = cmd->data; 253 data = cmd->data;
254 if (data) { 254 if (data) {
255 cmdr |= SDMMC_CMD_DAT_EXP; 255 cmdr |= SDMMC_CMD_DAT_EXP;
256 if (data->flags & MMC_DATA_STREAM) 256 if (data->flags & MMC_DATA_STREAM)
257 cmdr |= SDMMC_CMD_STRM_MODE; 257 cmdr |= SDMMC_CMD_STRM_MODE;
258 if (data->flags & MMC_DATA_WRITE) 258 if (data->flags & MMC_DATA_WRITE)
259 cmdr |= SDMMC_CMD_DAT_WR; 259 cmdr |= SDMMC_CMD_DAT_WR;
260 } 260 }
261 261
262 return cmdr; 262 return cmdr;
263 } 263 }
264 264
265 static void dw_mci_start_command(struct dw_mci *host, 265 static void dw_mci_start_command(struct dw_mci *host,
266 struct mmc_command *cmd, u32 cmd_flags) 266 struct mmc_command *cmd, u32 cmd_flags)
267 { 267 {
268 host->cmd = cmd; 268 host->cmd = cmd;
269 dev_vdbg(&host->dev, 269 dev_vdbg(&host->dev,
270 "start command: ARGR=0x%08x CMDR=0x%08x\n", 270 "start command: ARGR=0x%08x CMDR=0x%08x\n",
271 cmd->arg, cmd_flags); 271 cmd->arg, cmd_flags);
272 272
273 mci_writel(host, CMDARG, cmd->arg); 273 mci_writel(host, CMDARG, cmd->arg);
274 wmb(); 274 wmb();
275 275
276 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START); 276 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
277 } 277 }
278 278
279 static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data) 279 static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
280 { 280 {
281 dw_mci_start_command(host, data->stop, host->stop_cmdr); 281 dw_mci_start_command(host, data->stop, host->stop_cmdr);
282 } 282 }
283 283
284 /* DMA interface functions */ 284 /* DMA interface functions */
285 static void dw_mci_stop_dma(struct dw_mci *host) 285 static void dw_mci_stop_dma(struct dw_mci *host)
286 { 286 {
287 if (host->using_dma) { 287 if (host->using_dma) {
288 host->dma_ops->stop(host); 288 host->dma_ops->stop(host);
289 host->dma_ops->cleanup(host); 289 host->dma_ops->cleanup(host);
290 } else { 290 } else {
291 /* Data transfer was stopped by the interrupt handler */ 291 /* Data transfer was stopped by the interrupt handler */
292 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 292 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
293 } 293 }
294 } 294 }
295 295
296 static int dw_mci_get_dma_dir(struct mmc_data *data) 296 static int dw_mci_get_dma_dir(struct mmc_data *data)
297 { 297 {
298 if (data->flags & MMC_DATA_WRITE) 298 if (data->flags & MMC_DATA_WRITE)
299 return DMA_TO_DEVICE; 299 return DMA_TO_DEVICE;
300 else 300 else
301 return DMA_FROM_DEVICE; 301 return DMA_FROM_DEVICE;
302 } 302 }
303 303
304 #ifdef CONFIG_MMC_DW_IDMAC 304 #ifdef CONFIG_MMC_DW_IDMAC
305 static void dw_mci_dma_cleanup(struct dw_mci *host) 305 static void dw_mci_dma_cleanup(struct dw_mci *host)
306 { 306 {
307 struct mmc_data *data = host->data; 307 struct mmc_data *data = host->data;
308 308
309 if (data) 309 if (data)
310 if (!data->host_cookie) 310 if (!data->host_cookie)
311 dma_unmap_sg(&host->dev, 311 dma_unmap_sg(&host->dev,
312 data->sg, 312 data->sg,
313 data->sg_len, 313 data->sg_len,
314 dw_mci_get_dma_dir(data)); 314 dw_mci_get_dma_dir(data));
315 } 315 }
316 316
317 static void dw_mci_idmac_stop_dma(struct dw_mci *host) 317 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
318 { 318 {
319 u32 temp; 319 u32 temp;
320 320
321 /* Disable and reset the IDMAC interface */ 321 /* Disable and reset the IDMAC interface */
322 temp = mci_readl(host, CTRL); 322 temp = mci_readl(host, CTRL);
323 temp &= ~SDMMC_CTRL_USE_IDMAC; 323 temp &= ~SDMMC_CTRL_USE_IDMAC;
324 temp |= SDMMC_CTRL_DMA_RESET; 324 temp |= SDMMC_CTRL_DMA_RESET;
325 mci_writel(host, CTRL, temp); 325 mci_writel(host, CTRL, temp);
326 326
327 /* Stop the IDMAC running */ 327 /* Stop the IDMAC running */
328 temp = mci_readl(host, BMOD); 328 temp = mci_readl(host, BMOD);
329 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB); 329 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
330 mci_writel(host, BMOD, temp); 330 mci_writel(host, BMOD, temp);
331 } 331 }
332 332
333 static void dw_mci_idmac_complete_dma(struct dw_mci *host) 333 static void dw_mci_idmac_complete_dma(struct dw_mci *host)
334 { 334 {
335 struct mmc_data *data = host->data; 335 struct mmc_data *data = host->data;
336 336
337 dev_vdbg(&host->dev, "DMA complete\n"); 337 dev_vdbg(&host->dev, "DMA complete\n");
338 338
339 host->dma_ops->cleanup(host); 339 host->dma_ops->cleanup(host);
340 340
341 /* 341 /*
342 * If the card was removed, data will be NULL. No point in trying to 342 * If the card was removed, data will be NULL. No point in trying to
343 * send the stop command or waiting for NBUSY in this case. 343 * send the stop command or waiting for NBUSY in this case.
344 */ 344 */
345 if (data) { 345 if (data) {
346 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 346 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
347 tasklet_schedule(&host->tasklet); 347 tasklet_schedule(&host->tasklet);
348 } 348 }
349 } 349 }
350 350
351 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data, 351 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
352 unsigned int sg_len) 352 unsigned int sg_len)
353 { 353 {
354 int i; 354 int i;
355 struct idmac_desc *desc = host->sg_cpu; 355 struct idmac_desc *desc = host->sg_cpu;
356 356
357 for (i = 0; i < sg_len; i++, desc++) { 357 for (i = 0; i < sg_len; i++, desc++) {
358 unsigned int length = sg_dma_len(&data->sg[i]); 358 unsigned int length = sg_dma_len(&data->sg[i]);
359 u32 mem_addr = sg_dma_address(&data->sg[i]); 359 u32 mem_addr = sg_dma_address(&data->sg[i]);
360 360
361 /* Set the OWN bit and disable interrupts for this descriptor */ 361 /* Set the OWN bit and disable interrupts for this descriptor */
362 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH; 362 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
363 363
364 /* Buffer length */ 364 /* Buffer length */
365 IDMAC_SET_BUFFER1_SIZE(desc, length); 365 IDMAC_SET_BUFFER1_SIZE(desc, length);
366 366
367 /* Physical address to DMA to/from */ 367 /* Physical address to DMA to/from */
368 desc->des2 = mem_addr; 368 desc->des2 = mem_addr;
369 } 369 }
370 370
371 /* Set first descriptor */ 371 /* Set first descriptor */
372 desc = host->sg_cpu; 372 desc = host->sg_cpu;
373 desc->des0 |= IDMAC_DES0_FD; 373 desc->des0 |= IDMAC_DES0_FD;
374 374
375 /* Set last descriptor */ 375 /* Set last descriptor */
376 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc); 376 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
377 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC); 377 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
378 desc->des0 |= IDMAC_DES0_LD; 378 desc->des0 |= IDMAC_DES0_LD;
379 379
380 wmb(); 380 wmb();
381 } 381 }
382 382
383 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len) 383 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
384 { 384 {
385 u32 temp; 385 u32 temp;
386 386
387 dw_mci_translate_sglist(host, host->data, sg_len); 387 dw_mci_translate_sglist(host, host->data, sg_len);
388 388
389 /* Select IDMAC interface */ 389 /* Select IDMAC interface */
390 temp = mci_readl(host, CTRL); 390 temp = mci_readl(host, CTRL);
391 temp |= SDMMC_CTRL_USE_IDMAC; 391 temp |= SDMMC_CTRL_USE_IDMAC;
392 mci_writel(host, CTRL, temp); 392 mci_writel(host, CTRL, temp);
393 393
394 wmb(); 394 wmb();
395 395
396 /* Enable the IDMAC */ 396 /* Enable the IDMAC */
397 temp = mci_readl(host, BMOD); 397 temp = mci_readl(host, BMOD);
398 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB; 398 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
399 mci_writel(host, BMOD, temp); 399 mci_writel(host, BMOD, temp);
400 400
401 /* Start it running */ 401 /* Start it running */
402 mci_writel(host, PLDMND, 1); 402 mci_writel(host, PLDMND, 1);
403 } 403 }
404 404
405 static int dw_mci_idmac_init(struct dw_mci *host) 405 static int dw_mci_idmac_init(struct dw_mci *host)
406 { 406 {
407 struct idmac_desc *p; 407 struct idmac_desc *p;
408 int i, dma_support; 408 int i, dma_support;
409 409
410 /* Number of descriptors in the ring buffer */ 410 /* Number of descriptors in the ring buffer */
411 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc); 411 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
412 412
413 /* Check if Hardware Configuration Register has support for DMA */ 413 /* Check if Hardware Configuration Register has support for DMA */
414 dma_support = (mci_readl(host, HCON) >> 16) & 0x3; 414 dma_support = (mci_readl(host, HCON) >> 16) & 0x3;
415 415
416 if (!dma_support || dma_support > 2) { 416 if (!dma_support || dma_support > 2) {
417 dev_err(&host->dev, 417 dev_err(&host->dev,
418 "Host Controller does not support IDMA Tx.\n"); 418 "Host Controller does not support IDMA Tx.\n");
419 host->dma_ops = NULL; 419 host->dma_ops = NULL;
420 return -ENODEV; 420 return -ENODEV;
421 } 421 }
422 422
423 dev_info(&host->dev, "Using internal DMA controller.\n"); 423 dev_info(&host->dev, "Using internal DMA controller.\n");
424 424
425 /* Forward link the descriptor list */ 425 /* Forward link the descriptor list */
426 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++) 426 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
427 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1)); 427 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
428 428
429 /* Set the last descriptor as the end-of-ring descriptor */ 429 /* Set the last descriptor as the end-of-ring descriptor */
430 p->des3 = host->sg_dma; 430 p->des3 = host->sg_dma;
431 p->des0 = IDMAC_DES0_ER; 431 p->des0 = IDMAC_DES0_ER;
432 432
433 mci_writel(host, BMOD, SDMMC_IDMAC_SWRESET); 433 mci_writel(host, BMOD, SDMMC_IDMAC_SWRESET);
434 434
435 /* Mask out interrupts - get Tx & Rx complete only */ 435 /* Mask out interrupts - get Tx & Rx complete only */
436 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI | 436 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
437 SDMMC_IDMAC_INT_TI); 437 SDMMC_IDMAC_INT_TI);
438 438
439 /* Set the descriptor base address */ 439 /* Set the descriptor base address */
440 mci_writel(host, DBADDR, host->sg_dma); 440 mci_writel(host, DBADDR, host->sg_dma);
441 return 0; 441 return 0;
442 } 442 }
443 443
444 static struct dw_mci_dma_ops dw_mci_idmac_ops = { 444 static struct dw_mci_dma_ops dw_mci_idmac_ops = {
445 .init = dw_mci_idmac_init, 445 .init = dw_mci_idmac_init,
446 .start = dw_mci_idmac_start_dma, 446 .start = dw_mci_idmac_start_dma,
447 .stop = dw_mci_idmac_stop_dma, 447 .stop = dw_mci_idmac_stop_dma,
448 .complete = dw_mci_idmac_complete_dma, 448 .complete = dw_mci_idmac_complete_dma,
449 .cleanup = dw_mci_dma_cleanup, 449 .cleanup = dw_mci_dma_cleanup,
450 }; 450 };
451 #endif /* CONFIG_MMC_DW_IDMAC */ 451 #endif /* CONFIG_MMC_DW_IDMAC */
452 452
453 static int dw_mci_pre_dma_transfer(struct dw_mci *host, 453 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
454 struct mmc_data *data, 454 struct mmc_data *data,
455 bool next) 455 bool next)
456 { 456 {
457 struct scatterlist *sg; 457 struct scatterlist *sg;
458 unsigned int i, sg_len; 458 unsigned int i, sg_len;
459 459
460 if (!next && data->host_cookie) 460 if (!next && data->host_cookie)
461 return data->host_cookie; 461 return data->host_cookie;
462 462
463 /* 463 /*
464 * We don't do DMA on "complex" transfers, i.e. with 464 * We don't do DMA on "complex" transfers, i.e. with
465 * non-word-aligned buffers or lengths. Also, we don't bother 465 * non-word-aligned buffers or lengths. Also, we don't bother
466 * with all the DMA setup overhead for short transfers. 466 * with all the DMA setup overhead for short transfers.
467 */ 467 */
468 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD) 468 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
469 return -EINVAL; 469 return -EINVAL;
470 470
471 if (data->blksz & 3) 471 if (data->blksz & 3)
472 return -EINVAL; 472 return -EINVAL;
473 473
474 for_each_sg(data->sg, sg, data->sg_len, i) { 474 for_each_sg(data->sg, sg, data->sg_len, i) {
475 if (sg->offset & 3 || sg->length & 3) 475 if (sg->offset & 3 || sg->length & 3)
476 return -EINVAL; 476 return -EINVAL;
477 } 477 }
478 478
479 sg_len = dma_map_sg(&host->dev, 479 sg_len = dma_map_sg(&host->dev,
480 data->sg, 480 data->sg,
481 data->sg_len, 481 data->sg_len,
482 dw_mci_get_dma_dir(data)); 482 dw_mci_get_dma_dir(data));
483 if (sg_len == 0) 483 if (sg_len == 0)
484 return -EINVAL; 484 return -EINVAL;
485 485
486 if (next) 486 if (next)
487 data->host_cookie = sg_len; 487 data->host_cookie = sg_len;
488 488
489 return sg_len; 489 return sg_len;
490 } 490 }
491 491
492 static void dw_mci_pre_req(struct mmc_host *mmc, 492 static void dw_mci_pre_req(struct mmc_host *mmc,
493 struct mmc_request *mrq, 493 struct mmc_request *mrq,
494 bool is_first_req) 494 bool is_first_req)
495 { 495 {
496 struct dw_mci_slot *slot = mmc_priv(mmc); 496 struct dw_mci_slot *slot = mmc_priv(mmc);
497 struct mmc_data *data = mrq->data; 497 struct mmc_data *data = mrq->data;
498 498
499 if (!slot->host->use_dma || !data) 499 if (!slot->host->use_dma || !data)
500 return; 500 return;
501 501
502 if (data->host_cookie) { 502 if (data->host_cookie) {
503 data->host_cookie = 0; 503 data->host_cookie = 0;
504 return; 504 return;
505 } 505 }
506 506
507 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0) 507 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
508 data->host_cookie = 0; 508 data->host_cookie = 0;
509 } 509 }
510 510
511 static void dw_mci_post_req(struct mmc_host *mmc, 511 static void dw_mci_post_req(struct mmc_host *mmc,
512 struct mmc_request *mrq, 512 struct mmc_request *mrq,
513 int err) 513 int err)
514 { 514 {
515 struct dw_mci_slot *slot = mmc_priv(mmc); 515 struct dw_mci_slot *slot = mmc_priv(mmc);
516 struct mmc_data *data = mrq->data; 516 struct mmc_data *data = mrq->data;
517 517
518 if (!slot->host->use_dma || !data) 518 if (!slot->host->use_dma || !data)
519 return; 519 return;
520 520
521 if (data->host_cookie) 521 if (data->host_cookie)
522 dma_unmap_sg(&slot->host->dev, 522 dma_unmap_sg(&slot->host->dev,
523 data->sg, 523 data->sg,
524 data->sg_len, 524 data->sg_len,
525 dw_mci_get_dma_dir(data)); 525 dw_mci_get_dma_dir(data));
526 data->host_cookie = 0; 526 data->host_cookie = 0;
527 } 527 }
528 528
529 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data) 529 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
530 { 530 {
531 int sg_len; 531 int sg_len;
532 u32 temp; 532 u32 temp;
533 533
534 host->using_dma = 0; 534 host->using_dma = 0;
535 535
536 /* If we don't have a channel, we can't do DMA */ 536 /* If we don't have a channel, we can't do DMA */
537 if (!host->use_dma) 537 if (!host->use_dma)
538 return -ENODEV; 538 return -ENODEV;
539 539
540 sg_len = dw_mci_pre_dma_transfer(host, data, 0); 540 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
541 if (sg_len < 0) { 541 if (sg_len < 0) {
542 host->dma_ops->stop(host); 542 host->dma_ops->stop(host);
543 return sg_len; 543 return sg_len;
544 } 544 }
545 545
546 host->using_dma = 1; 546 host->using_dma = 1;
547 547
548 dev_vdbg(&host->dev, 548 dev_vdbg(&host->dev,
549 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n", 549 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
550 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma, 550 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
551 sg_len); 551 sg_len);
552 552
553 /* Enable the DMA interface */ 553 /* Enable the DMA interface */
554 temp = mci_readl(host, CTRL); 554 temp = mci_readl(host, CTRL);
555 temp |= SDMMC_CTRL_DMA_ENABLE; 555 temp |= SDMMC_CTRL_DMA_ENABLE;
556 mci_writel(host, CTRL, temp); 556 mci_writel(host, CTRL, temp);
557 557
558 /* Disable RX/TX IRQs, let DMA handle it */ 558 /* Disable RX/TX IRQs, let DMA handle it */
559 temp = mci_readl(host, INTMASK); 559 temp = mci_readl(host, INTMASK);
560 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR); 560 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
561 mci_writel(host, INTMASK, temp); 561 mci_writel(host, INTMASK, temp);
562 562
563 host->dma_ops->start(host, sg_len); 563 host->dma_ops->start(host, sg_len);
564 564
565 return 0; 565 return 0;
566 } 566 }
567 567
568 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data) 568 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
569 { 569 {
570 u32 temp; 570 u32 temp;
571 571
572 data->error = -EINPROGRESS; 572 data->error = -EINPROGRESS;
573 573
574 WARN_ON(host->data); 574 WARN_ON(host->data);
575 host->sg = NULL; 575 host->sg = NULL;
576 host->data = data; 576 host->data = data;
577 577
578 if (data->flags & MMC_DATA_READ) 578 if (data->flags & MMC_DATA_READ)
579 host->dir_status = DW_MCI_RECV_STATUS; 579 host->dir_status = DW_MCI_RECV_STATUS;
580 else 580 else
581 host->dir_status = DW_MCI_SEND_STATUS; 581 host->dir_status = DW_MCI_SEND_STATUS;
582 582
583 if (dw_mci_submit_data_dma(host, data)) { 583 if (dw_mci_submit_data_dma(host, data)) {
584 int flags = SG_MITER_ATOMIC; 584 int flags = SG_MITER_ATOMIC;
585 if (host->data->flags & MMC_DATA_READ) 585 if (host->data->flags & MMC_DATA_READ)
586 flags |= SG_MITER_TO_SG; 586 flags |= SG_MITER_TO_SG;
587 else 587 else
588 flags |= SG_MITER_FROM_SG; 588 flags |= SG_MITER_FROM_SG;
589 589
590 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 590 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
591 host->sg = data->sg; 591 host->sg = data->sg;
592 host->part_buf_start = 0; 592 host->part_buf_start = 0;
593 host->part_buf_count = 0; 593 host->part_buf_count = 0;
594 594
595 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR); 595 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
596 temp = mci_readl(host, INTMASK); 596 temp = mci_readl(host, INTMASK);
597 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR; 597 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
598 mci_writel(host, INTMASK, temp); 598 mci_writel(host, INTMASK, temp);
599 599
600 temp = mci_readl(host, CTRL); 600 temp = mci_readl(host, CTRL);
601 temp &= ~SDMMC_CTRL_DMA_ENABLE; 601 temp &= ~SDMMC_CTRL_DMA_ENABLE;
602 mci_writel(host, CTRL, temp); 602 mci_writel(host, CTRL, temp);
603 } 603 }
604 } 604 }
605 605
606 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg) 606 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
607 { 607 {
608 struct dw_mci *host = slot->host; 608 struct dw_mci *host = slot->host;
609 unsigned long timeout = jiffies + msecs_to_jiffies(500); 609 unsigned long timeout = jiffies + msecs_to_jiffies(500);
610 unsigned int cmd_status = 0; 610 unsigned int cmd_status = 0;
611 611
612 mci_writel(host, CMDARG, arg); 612 mci_writel(host, CMDARG, arg);
613 wmb(); 613 wmb();
614 mci_writel(host, CMD, SDMMC_CMD_START | cmd); 614 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
615 615
616 while (time_before(jiffies, timeout)) { 616 while (time_before(jiffies, timeout)) {
617 cmd_status = mci_readl(host, CMD); 617 cmd_status = mci_readl(host, CMD);
618 if (!(cmd_status & SDMMC_CMD_START)) 618 if (!(cmd_status & SDMMC_CMD_START))
619 return; 619 return;
620 } 620 }
621 dev_err(&slot->mmc->class_dev, 621 dev_err(&slot->mmc->class_dev,
622 "Timeout sending command (cmd %#x arg %#x status %#x)\n", 622 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
623 cmd, arg, cmd_status); 623 cmd, arg, cmd_status);
624 } 624 }
625 625
626 static void dw_mci_setup_bus(struct dw_mci_slot *slot) 626 static void dw_mci_setup_bus(struct dw_mci_slot *slot)
627 { 627 {
628 struct dw_mci *host = slot->host; 628 struct dw_mci *host = slot->host;
629 u32 div; 629 u32 div;
630 u32 clk_en_a;
630 631
631 if (slot->clock != host->current_speed) { 632 if (slot->clock != host->current_speed) {
632 div = host->bus_hz / slot->clock; 633 div = host->bus_hz / slot->clock;
633 if (host->bus_hz % slot->clock && host->bus_hz > slot->clock) 634 if (host->bus_hz % slot->clock && host->bus_hz > slot->clock)
634 /* 635 /*
635 * move the + 1 after the divide to prevent 636 * move the + 1 after the divide to prevent
636 * over-clocking the card. 637 * over-clocking the card.
637 */ 638 */
638 div += 1; 639 div += 1;
639 640
640 div = (host->bus_hz != slot->clock) ? DIV_ROUND_UP(div, 2) : 0; 641 div = (host->bus_hz != slot->clock) ? DIV_ROUND_UP(div, 2) : 0;
641 642
642 dev_info(&slot->mmc->class_dev, 643 dev_info(&slot->mmc->class_dev,
643 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ" 644 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ"
644 " div = %d)\n", slot->id, host->bus_hz, slot->clock, 645 " div = %d)\n", slot->id, host->bus_hz, slot->clock,
645 div ? ((host->bus_hz / div) >> 1) : host->bus_hz, div); 646 div ? ((host->bus_hz / div) >> 1) : host->bus_hz, div);
646 647
647 /* disable clock */ 648 /* disable clock */
648 mci_writel(host, CLKENA, 0); 649 mci_writel(host, CLKENA, 0);
649 mci_writel(host, CLKSRC, 0); 650 mci_writel(host, CLKSRC, 0);
650 651
651 /* inform CIU */ 652 /* inform CIU */
652 mci_send_cmd(slot, 653 mci_send_cmd(slot,
653 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0); 654 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
654 655
655 /* set clock to desired speed */ 656 /* set clock to desired speed */
656 mci_writel(host, CLKDIV, div); 657 mci_writel(host, CLKDIV, div);
657 658
658 /* inform CIU */ 659 /* inform CIU */
659 mci_send_cmd(slot, 660 mci_send_cmd(slot,
660 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0); 661 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
661 662
662 /* enable clock */ 663 /* enable clock; only low power if no SDIO */
663 mci_writel(host, CLKENA, ((SDMMC_CLKEN_ENABLE | 664 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
664 SDMMC_CLKEN_LOW_PWR) << slot->id)); 665 if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id)))
666 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
667 mci_writel(host, CLKENA, clk_en_a);
665 668
666 /* inform CIU */ 669 /* inform CIU */
667 mci_send_cmd(slot, 670 mci_send_cmd(slot,
668 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0); 671 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
669 672
670 host->current_speed = slot->clock; 673 host->current_speed = slot->clock;
671 } 674 }
672 675
673 /* Set the current slot bus width */ 676 /* Set the current slot bus width */
674 mci_writel(host, CTYPE, (slot->ctype << slot->id)); 677 mci_writel(host, CTYPE, (slot->ctype << slot->id));
675 } 678 }
676 679
677 static void __dw_mci_start_request(struct dw_mci *host, 680 static void __dw_mci_start_request(struct dw_mci *host,
678 struct dw_mci_slot *slot, 681 struct dw_mci_slot *slot,
679 struct mmc_command *cmd) 682 struct mmc_command *cmd)
680 { 683 {
681 struct mmc_request *mrq; 684 struct mmc_request *mrq;
682 struct mmc_data *data; 685 struct mmc_data *data;
683 u32 cmdflags; 686 u32 cmdflags;
684 687
685 mrq = slot->mrq; 688 mrq = slot->mrq;
686 if (host->pdata->select_slot) 689 if (host->pdata->select_slot)
687 host->pdata->select_slot(slot->id); 690 host->pdata->select_slot(slot->id);
688 691
689 /* Slot specific timing and width adjustment */ 692 /* Slot specific timing and width adjustment */
690 dw_mci_setup_bus(slot); 693 dw_mci_setup_bus(slot);
691 694
692 host->cur_slot = slot; 695 host->cur_slot = slot;
693 host->mrq = mrq; 696 host->mrq = mrq;
694 697
695 host->pending_events = 0; 698 host->pending_events = 0;
696 host->completed_events = 0; 699 host->completed_events = 0;
697 host->data_status = 0; 700 host->data_status = 0;
698 701
699 data = cmd->data; 702 data = cmd->data;
700 if (data) { 703 if (data) {
701 dw_mci_set_timeout(host); 704 dw_mci_set_timeout(host);
702 mci_writel(host, BYTCNT, data->blksz*data->blocks); 705 mci_writel(host, BYTCNT, data->blksz*data->blocks);
703 mci_writel(host, BLKSIZ, data->blksz); 706 mci_writel(host, BLKSIZ, data->blksz);
704 } 707 }
705 708
706 cmdflags = dw_mci_prepare_command(slot->mmc, cmd); 709 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
707 710
708 /* this is the first command, send the initialization clock */ 711 /* this is the first command, send the initialization clock */
709 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags)) 712 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
710 cmdflags |= SDMMC_CMD_INIT; 713 cmdflags |= SDMMC_CMD_INIT;
711 714
712 if (data) { 715 if (data) {
713 dw_mci_submit_data(host, data); 716 dw_mci_submit_data(host, data);
714 wmb(); 717 wmb();
715 } 718 }
716 719
717 dw_mci_start_command(host, cmd, cmdflags); 720 dw_mci_start_command(host, cmd, cmdflags);
718 721
719 if (mrq->stop) 722 if (mrq->stop)
720 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop); 723 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
721 } 724 }
722 725
723 static void dw_mci_start_request(struct dw_mci *host, 726 static void dw_mci_start_request(struct dw_mci *host,
724 struct dw_mci_slot *slot) 727 struct dw_mci_slot *slot)
725 { 728 {
726 struct mmc_request *mrq = slot->mrq; 729 struct mmc_request *mrq = slot->mrq;
727 struct mmc_command *cmd; 730 struct mmc_command *cmd;
728 731
729 cmd = mrq->sbc ? mrq->sbc : mrq->cmd; 732 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
730 __dw_mci_start_request(host, slot, cmd); 733 __dw_mci_start_request(host, slot, cmd);
731 } 734 }
732 735
733 /* must be called with host->lock held */ 736 /* must be called with host->lock held */
734 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot, 737 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
735 struct mmc_request *mrq) 738 struct mmc_request *mrq)
736 { 739 {
737 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n", 740 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
738 host->state); 741 host->state);
739 742
740 slot->mrq = mrq; 743 slot->mrq = mrq;
741 744
742 if (host->state == STATE_IDLE) { 745 if (host->state == STATE_IDLE) {
743 host->state = STATE_SENDING_CMD; 746 host->state = STATE_SENDING_CMD;
744 dw_mci_start_request(host, slot); 747 dw_mci_start_request(host, slot);
745 } else { 748 } else {
746 list_add_tail(&slot->queue_node, &host->queue); 749 list_add_tail(&slot->queue_node, &host->queue);
747 } 750 }
748 } 751 }
749 752
750 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq) 753 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
751 { 754 {
752 struct dw_mci_slot *slot = mmc_priv(mmc); 755 struct dw_mci_slot *slot = mmc_priv(mmc);
753 struct dw_mci *host = slot->host; 756 struct dw_mci *host = slot->host;
754 757
755 WARN_ON(slot->mrq); 758 WARN_ON(slot->mrq);
756 759
757 /* 760 /*
758 * The check for card presence and queueing of the request must be 761 * The check for card presence and queueing of the request must be
759 * atomic, otherwise the card could be removed in between and the 762 * atomic, otherwise the card could be removed in between and the
760 * request wouldn't fail until another card was inserted. 763 * request wouldn't fail until another card was inserted.
761 */ 764 */
762 spin_lock_bh(&host->lock); 765 spin_lock_bh(&host->lock);
763 766
764 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) { 767 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
765 spin_unlock_bh(&host->lock); 768 spin_unlock_bh(&host->lock);
766 mrq->cmd->error = -ENOMEDIUM; 769 mrq->cmd->error = -ENOMEDIUM;
767 mmc_request_done(mmc, mrq); 770 mmc_request_done(mmc, mrq);
768 return; 771 return;
769 } 772 }
770 773
771 dw_mci_queue_request(host, slot, mrq); 774 dw_mci_queue_request(host, slot, mrq);
772 775
773 spin_unlock_bh(&host->lock); 776 spin_unlock_bh(&host->lock);
774 } 777 }
775 778
776 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 779 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
777 { 780 {
778 struct dw_mci_slot *slot = mmc_priv(mmc); 781 struct dw_mci_slot *slot = mmc_priv(mmc);
779 u32 regs; 782 u32 regs;
780 783
781 /* set default 1 bit mode */ 784 /* set default 1 bit mode */
782 slot->ctype = SDMMC_CTYPE_1BIT; 785 slot->ctype = SDMMC_CTYPE_1BIT;
783 786
784 switch (ios->bus_width) { 787 switch (ios->bus_width) {
785 case MMC_BUS_WIDTH_1: 788 case MMC_BUS_WIDTH_1:
786 slot->ctype = SDMMC_CTYPE_1BIT; 789 slot->ctype = SDMMC_CTYPE_1BIT;
787 break; 790 break;
788 case MMC_BUS_WIDTH_4: 791 case MMC_BUS_WIDTH_4:
789 slot->ctype = SDMMC_CTYPE_4BIT; 792 slot->ctype = SDMMC_CTYPE_4BIT;
790 break; 793 break;
791 case MMC_BUS_WIDTH_8: 794 case MMC_BUS_WIDTH_8:
792 slot->ctype = SDMMC_CTYPE_8BIT; 795 slot->ctype = SDMMC_CTYPE_8BIT;
793 break; 796 break;
794 } 797 }
795 798
796 regs = mci_readl(slot->host, UHS_REG); 799 regs = mci_readl(slot->host, UHS_REG);
797 800
798 /* DDR mode set */ 801 /* DDR mode set */
799 if (ios->timing == MMC_TIMING_UHS_DDR50) 802 if (ios->timing == MMC_TIMING_UHS_DDR50)
800 regs |= (0x1 << slot->id) << 16; 803 regs |= (0x1 << slot->id) << 16;
801 else 804 else
802 regs &= ~(0x1 << slot->id) << 16; 805 regs &= ~(0x1 << slot->id) << 16;
803 806
804 mci_writel(slot->host, UHS_REG, regs); 807 mci_writel(slot->host, UHS_REG, regs);
805 808
806 if (ios->clock) { 809 if (ios->clock) {
807 /* 810 /*
808 * Use mirror of ios->clock to prevent race with mmc 811 * Use mirror of ios->clock to prevent race with mmc
809 * core ios update when finding the minimum. 812 * core ios update when finding the minimum.
810 */ 813 */
811 slot->clock = ios->clock; 814 slot->clock = ios->clock;
812 } 815 }
813 816
814 switch (ios->power_mode) { 817 switch (ios->power_mode) {
815 case MMC_POWER_UP: 818 case MMC_POWER_UP:
816 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags); 819 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
817 break; 820 break;
818 default: 821 default:
819 break; 822 break;
820 } 823 }
821 } 824 }
822 825
823 static int dw_mci_get_ro(struct mmc_host *mmc) 826 static int dw_mci_get_ro(struct mmc_host *mmc)
824 { 827 {
825 int read_only; 828 int read_only;
826 struct dw_mci_slot *slot = mmc_priv(mmc); 829 struct dw_mci_slot *slot = mmc_priv(mmc);
827 struct dw_mci_board *brd = slot->host->pdata; 830 struct dw_mci_board *brd = slot->host->pdata;
828 831
829 /* Use platform get_ro function, else try on board write protect */ 832 /* Use platform get_ro function, else try on board write protect */
830 if (brd->get_ro) 833 if (brd->get_ro)
831 read_only = brd->get_ro(slot->id); 834 read_only = brd->get_ro(slot->id);
832 else 835 else
833 read_only = 836 read_only =
834 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0; 837 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
835 838
836 dev_dbg(&mmc->class_dev, "card is %s\n", 839 dev_dbg(&mmc->class_dev, "card is %s\n",
837 read_only ? "read-only" : "read-write"); 840 read_only ? "read-only" : "read-write");
838 841
839 return read_only; 842 return read_only;
840 } 843 }
841 844
842 static int dw_mci_get_cd(struct mmc_host *mmc) 845 static int dw_mci_get_cd(struct mmc_host *mmc)
843 { 846 {
844 int present; 847 int present;
845 struct dw_mci_slot *slot = mmc_priv(mmc); 848 struct dw_mci_slot *slot = mmc_priv(mmc);
846 struct dw_mci_board *brd = slot->host->pdata; 849 struct dw_mci_board *brd = slot->host->pdata;
847 850
848 /* Use platform get_cd function, else try onboard card detect */ 851 /* Use platform get_cd function, else try onboard card detect */
849 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION) 852 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
850 present = 1; 853 present = 1;
851 else if (brd->get_cd) 854 else if (brd->get_cd)
852 present = !brd->get_cd(slot->id); 855 present = !brd->get_cd(slot->id);
853 else 856 else
854 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id)) 857 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
855 == 0 ? 1 : 0; 858 == 0 ? 1 : 0;
856 859
857 if (present) 860 if (present)
858 dev_dbg(&mmc->class_dev, "card is present\n"); 861 dev_dbg(&mmc->class_dev, "card is present\n");
859 else 862 else
860 dev_dbg(&mmc->class_dev, "card is not present\n"); 863 dev_dbg(&mmc->class_dev, "card is not present\n");
861 864
862 return present; 865 return present;
863 } 866 }
864 867
868 /*
869 * Disable lower power mode.
870 *
871 * Low power mode will stop the card clock when idle. According to the
872 * description of the CLKENA register we should disable low power mode
873 * for SDIO cards if we need SDIO interrupts to work.
874 *
875 * This function is fast if low power mode is already disabled.
876 */
877 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
878 {
879 struct dw_mci *host = slot->host;
880 u32 clk_en_a;
881 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
882
883 clk_en_a = mci_readl(host, CLKENA);
884
885 if (clk_en_a & clken_low_pwr) {
886 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
887 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
888 SDMMC_CMD_PRV_DAT_WAIT, 0);
889 }
890 }
891
865 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb) 892 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
866 { 893 {
867 struct dw_mci_slot *slot = mmc_priv(mmc); 894 struct dw_mci_slot *slot = mmc_priv(mmc);
868 struct dw_mci *host = slot->host; 895 struct dw_mci *host = slot->host;
869 u32 int_mask; 896 u32 int_mask;
870 897
871 /* Enable/disable Slot Specific SDIO interrupt */ 898 /* Enable/disable Slot Specific SDIO interrupt */
872 int_mask = mci_readl(host, INTMASK); 899 int_mask = mci_readl(host, INTMASK);
873 if (enb) { 900 if (enb) {
901 /*
902 * Turn off low power mode if it was enabled. This is a bit of
903 * a heavy operation and we disable / enable IRQs a lot, so
904 * we'll leave low power mode disabled and it will get
905 * re-enabled again in dw_mci_setup_bus().
906 */
907 dw_mci_disable_low_power(slot);
908
874 mci_writel(host, INTMASK, 909 mci_writel(host, INTMASK,
875 (int_mask | SDMMC_INT_SDIO(slot->id))); 910 (int_mask | SDMMC_INT_SDIO(slot->id)));
876 } else { 911 } else {
877 mci_writel(host, INTMASK, 912 mci_writel(host, INTMASK,
878 (int_mask & ~SDMMC_INT_SDIO(slot->id))); 913 (int_mask & ~SDMMC_INT_SDIO(slot->id)));
879 } 914 }
880 } 915 }
881 916
882 static const struct mmc_host_ops dw_mci_ops = { 917 static const struct mmc_host_ops dw_mci_ops = {
883 .request = dw_mci_request, 918 .request = dw_mci_request,
884 .pre_req = dw_mci_pre_req, 919 .pre_req = dw_mci_pre_req,
885 .post_req = dw_mci_post_req, 920 .post_req = dw_mci_post_req,
886 .set_ios = dw_mci_set_ios, 921 .set_ios = dw_mci_set_ios,
887 .get_ro = dw_mci_get_ro, 922 .get_ro = dw_mci_get_ro,
888 .get_cd = dw_mci_get_cd, 923 .get_cd = dw_mci_get_cd,
889 .enable_sdio_irq = dw_mci_enable_sdio_irq, 924 .enable_sdio_irq = dw_mci_enable_sdio_irq,
890 }; 925 };
891 926
892 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq) 927 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
893 __releases(&host->lock) 928 __releases(&host->lock)
894 __acquires(&host->lock) 929 __acquires(&host->lock)
895 { 930 {
896 struct dw_mci_slot *slot; 931 struct dw_mci_slot *slot;
897 struct mmc_host *prev_mmc = host->cur_slot->mmc; 932 struct mmc_host *prev_mmc = host->cur_slot->mmc;
898 933
899 WARN_ON(host->cmd || host->data); 934 WARN_ON(host->cmd || host->data);
900 935
901 host->cur_slot->mrq = NULL; 936 host->cur_slot->mrq = NULL;
902 host->mrq = NULL; 937 host->mrq = NULL;
903 if (!list_empty(&host->queue)) { 938 if (!list_empty(&host->queue)) {
904 slot = list_entry(host->queue.next, 939 slot = list_entry(host->queue.next,
905 struct dw_mci_slot, queue_node); 940 struct dw_mci_slot, queue_node);
906 list_del(&slot->queue_node); 941 list_del(&slot->queue_node);
907 dev_vdbg(&host->dev, "list not empty: %s is next\n", 942 dev_vdbg(&host->dev, "list not empty: %s is next\n",
908 mmc_hostname(slot->mmc)); 943 mmc_hostname(slot->mmc));
909 host->state = STATE_SENDING_CMD; 944 host->state = STATE_SENDING_CMD;
910 dw_mci_start_request(host, slot); 945 dw_mci_start_request(host, slot);
911 } else { 946 } else {
912 dev_vdbg(&host->dev, "list empty\n"); 947 dev_vdbg(&host->dev, "list empty\n");
913 host->state = STATE_IDLE; 948 host->state = STATE_IDLE;
914 } 949 }
915 950
916 spin_unlock(&host->lock); 951 spin_unlock(&host->lock);
917 mmc_request_done(prev_mmc, mrq); 952 mmc_request_done(prev_mmc, mrq);
918 spin_lock(&host->lock); 953 spin_lock(&host->lock);
919 } 954 }
920 955
921 static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd) 956 static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
922 { 957 {
923 u32 status = host->cmd_status; 958 u32 status = host->cmd_status;
924 959
925 host->cmd_status = 0; 960 host->cmd_status = 0;
926 961
927 /* Read the response from the card (up to 16 bytes) */ 962 /* Read the response from the card (up to 16 bytes) */
928 if (cmd->flags & MMC_RSP_PRESENT) { 963 if (cmd->flags & MMC_RSP_PRESENT) {
929 if (cmd->flags & MMC_RSP_136) { 964 if (cmd->flags & MMC_RSP_136) {
930 cmd->resp[3] = mci_readl(host, RESP0); 965 cmd->resp[3] = mci_readl(host, RESP0);
931 cmd->resp[2] = mci_readl(host, RESP1); 966 cmd->resp[2] = mci_readl(host, RESP1);
932 cmd->resp[1] = mci_readl(host, RESP2); 967 cmd->resp[1] = mci_readl(host, RESP2);
933 cmd->resp[0] = mci_readl(host, RESP3); 968 cmd->resp[0] = mci_readl(host, RESP3);
934 } else { 969 } else {
935 cmd->resp[0] = mci_readl(host, RESP0); 970 cmd->resp[0] = mci_readl(host, RESP0);
936 cmd->resp[1] = 0; 971 cmd->resp[1] = 0;
937 cmd->resp[2] = 0; 972 cmd->resp[2] = 0;
938 cmd->resp[3] = 0; 973 cmd->resp[3] = 0;
939 } 974 }
940 } 975 }
941 976
942 if (status & SDMMC_INT_RTO) 977 if (status & SDMMC_INT_RTO)
943 cmd->error = -ETIMEDOUT; 978 cmd->error = -ETIMEDOUT;
944 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC)) 979 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
945 cmd->error = -EILSEQ; 980 cmd->error = -EILSEQ;
946 else if (status & SDMMC_INT_RESP_ERR) 981 else if (status & SDMMC_INT_RESP_ERR)
947 cmd->error = -EIO; 982 cmd->error = -EIO;
948 else 983 else
949 cmd->error = 0; 984 cmd->error = 0;
950 985
951 if (cmd->error) { 986 if (cmd->error) {
952 /* newer ip versions need a delay between retries */ 987 /* newer ip versions need a delay between retries */
953 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY) 988 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
954 mdelay(20); 989 mdelay(20);
955 990
956 if (cmd->data) { 991 if (cmd->data) {
957 dw_mci_stop_dma(host); 992 dw_mci_stop_dma(host);
958 host->data = NULL; 993 host->data = NULL;
959 } 994 }
960 } 995 }
961 } 996 }
962 997
963 static void dw_mci_tasklet_func(unsigned long priv) 998 static void dw_mci_tasklet_func(unsigned long priv)
964 { 999 {
965 struct dw_mci *host = (struct dw_mci *)priv; 1000 struct dw_mci *host = (struct dw_mci *)priv;
966 struct mmc_data *data; 1001 struct mmc_data *data;
967 struct mmc_command *cmd; 1002 struct mmc_command *cmd;
968 enum dw_mci_state state; 1003 enum dw_mci_state state;
969 enum dw_mci_state prev_state; 1004 enum dw_mci_state prev_state;
970 u32 status, ctrl; 1005 u32 status, ctrl;
971 1006
972 spin_lock(&host->lock); 1007 spin_lock(&host->lock);
973 1008
974 state = host->state; 1009 state = host->state;
975 data = host->data; 1010 data = host->data;
976 1011
977 do { 1012 do {
978 prev_state = state; 1013 prev_state = state;
979 1014
980 switch (state) { 1015 switch (state) {
981 case STATE_IDLE: 1016 case STATE_IDLE:
982 break; 1017 break;
983 1018
984 case STATE_SENDING_CMD: 1019 case STATE_SENDING_CMD:
985 if (!test_and_clear_bit(EVENT_CMD_COMPLETE, 1020 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
986 &host->pending_events)) 1021 &host->pending_events))
987 break; 1022 break;
988 1023
989 cmd = host->cmd; 1024 cmd = host->cmd;
990 host->cmd = NULL; 1025 host->cmd = NULL;
991 set_bit(EVENT_CMD_COMPLETE, &host->completed_events); 1026 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
992 dw_mci_command_complete(host, cmd); 1027 dw_mci_command_complete(host, cmd);
993 if (cmd == host->mrq->sbc && !cmd->error) { 1028 if (cmd == host->mrq->sbc && !cmd->error) {
994 prev_state = state = STATE_SENDING_CMD; 1029 prev_state = state = STATE_SENDING_CMD;
995 __dw_mci_start_request(host, host->cur_slot, 1030 __dw_mci_start_request(host, host->cur_slot,
996 host->mrq->cmd); 1031 host->mrq->cmd);
997 goto unlock; 1032 goto unlock;
998 } 1033 }
999 1034
1000 if (!host->mrq->data || cmd->error) { 1035 if (!host->mrq->data || cmd->error) {
1001 dw_mci_request_end(host, host->mrq); 1036 dw_mci_request_end(host, host->mrq);
1002 goto unlock; 1037 goto unlock;
1003 } 1038 }
1004 1039
1005 prev_state = state = STATE_SENDING_DATA; 1040 prev_state = state = STATE_SENDING_DATA;
1006 /* fall through */ 1041 /* fall through */
1007 1042
1008 case STATE_SENDING_DATA: 1043 case STATE_SENDING_DATA:
1009 if (test_and_clear_bit(EVENT_DATA_ERROR, 1044 if (test_and_clear_bit(EVENT_DATA_ERROR,
1010 &host->pending_events)) { 1045 &host->pending_events)) {
1011 dw_mci_stop_dma(host); 1046 dw_mci_stop_dma(host);
1012 if (data->stop) 1047 if (data->stop)
1013 send_stop_cmd(host, data); 1048 send_stop_cmd(host, data);
1014 state = STATE_DATA_ERROR; 1049 state = STATE_DATA_ERROR;
1015 break; 1050 break;
1016 } 1051 }
1017 1052
1018 if (!test_and_clear_bit(EVENT_XFER_COMPLETE, 1053 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1019 &host->pending_events)) 1054 &host->pending_events))
1020 break; 1055 break;
1021 1056
1022 set_bit(EVENT_XFER_COMPLETE, &host->completed_events); 1057 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
1023 prev_state = state = STATE_DATA_BUSY; 1058 prev_state = state = STATE_DATA_BUSY;
1024 /* fall through */ 1059 /* fall through */
1025 1060
1026 case STATE_DATA_BUSY: 1061 case STATE_DATA_BUSY:
1027 if (!test_and_clear_bit(EVENT_DATA_COMPLETE, 1062 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1028 &host->pending_events)) 1063 &host->pending_events))
1029 break; 1064 break;
1030 1065
1031 host->data = NULL; 1066 host->data = NULL;
1032 set_bit(EVENT_DATA_COMPLETE, &host->completed_events); 1067 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
1033 status = host->data_status; 1068 status = host->data_status;
1034 1069
1035 if (status & DW_MCI_DATA_ERROR_FLAGS) { 1070 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1036 if (status & SDMMC_INT_DTO) { 1071 if (status & SDMMC_INT_DTO) {
1037 data->error = -ETIMEDOUT; 1072 data->error = -ETIMEDOUT;
1038 } else if (status & SDMMC_INT_DCRC) { 1073 } else if (status & SDMMC_INT_DCRC) {
1039 data->error = -EILSEQ; 1074 data->error = -EILSEQ;
1040 } else if (status & SDMMC_INT_EBE && 1075 } else if (status & SDMMC_INT_EBE &&
1041 host->dir_status == 1076 host->dir_status ==
1042 DW_MCI_SEND_STATUS) { 1077 DW_MCI_SEND_STATUS) {
1043 /* 1078 /*
1044 * No data CRC status was returned. 1079 * No data CRC status was returned.
1045 * The number of bytes transferred will 1080 * The number of bytes transferred will
1046 * be exaggerated in PIO mode. 1081 * be exaggerated in PIO mode.
1047 */ 1082 */
1048 data->bytes_xfered = 0; 1083 data->bytes_xfered = 0;
1049 data->error = -ETIMEDOUT; 1084 data->error = -ETIMEDOUT;
1050 } else { 1085 } else {
1051 dev_err(&host->dev, 1086 dev_err(&host->dev,
1052 "data FIFO error " 1087 "data FIFO error "
1053 "(status=%08x)\n", 1088 "(status=%08x)\n",
1054 status); 1089 status);
1055 data->error = -EIO; 1090 data->error = -EIO;
1056 } 1091 }
1057 /* 1092 /*
1058 * After an error, there may be data lingering 1093 * After an error, there may be data lingering
1059 * in the FIFO, so reset it - doing so 1094 * in the FIFO, so reset it - doing so
1060 * generates a block interrupt, hence setting 1095 * generates a block interrupt, hence setting
1061 * the scatter-gather pointer to NULL. 1096 * the scatter-gather pointer to NULL.
1062 */ 1097 */
1063 sg_miter_stop(&host->sg_miter); 1098 sg_miter_stop(&host->sg_miter);
1064 host->sg = NULL; 1099 host->sg = NULL;
1065 ctrl = mci_readl(host, CTRL); 1100 ctrl = mci_readl(host, CTRL);
1066 ctrl |= SDMMC_CTRL_FIFO_RESET; 1101 ctrl |= SDMMC_CTRL_FIFO_RESET;
1067 mci_writel(host, CTRL, ctrl); 1102 mci_writel(host, CTRL, ctrl);
1068 } else { 1103 } else {
1069 data->bytes_xfered = data->blocks * data->blksz; 1104 data->bytes_xfered = data->blocks * data->blksz;
1070 data->error = 0; 1105 data->error = 0;
1071 } 1106 }
1072 1107
1073 if (!data->stop) { 1108 if (!data->stop) {
1074 dw_mci_request_end(host, host->mrq); 1109 dw_mci_request_end(host, host->mrq);
1075 goto unlock; 1110 goto unlock;
1076 } 1111 }
1077 1112
1078 if (host->mrq->sbc && !data->error) { 1113 if (host->mrq->sbc && !data->error) {
1079 data->stop->error = 0; 1114 data->stop->error = 0;
1080 dw_mci_request_end(host, host->mrq); 1115 dw_mci_request_end(host, host->mrq);
1081 goto unlock; 1116 goto unlock;
1082 } 1117 }
1083 1118
1084 prev_state = state = STATE_SENDING_STOP; 1119 prev_state = state = STATE_SENDING_STOP;
1085 if (!data->error) 1120 if (!data->error)
1086 send_stop_cmd(host, data); 1121 send_stop_cmd(host, data);
1087 /* fall through */ 1122 /* fall through */
1088 1123
1089 case STATE_SENDING_STOP: 1124 case STATE_SENDING_STOP:
1090 if (!test_and_clear_bit(EVENT_CMD_COMPLETE, 1125 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1091 &host->pending_events)) 1126 &host->pending_events))
1092 break; 1127 break;
1093 1128
1094 host->cmd = NULL; 1129 host->cmd = NULL;
1095 dw_mci_command_complete(host, host->mrq->stop); 1130 dw_mci_command_complete(host, host->mrq->stop);
1096 dw_mci_request_end(host, host->mrq); 1131 dw_mci_request_end(host, host->mrq);
1097 goto unlock; 1132 goto unlock;
1098 1133
1099 case STATE_DATA_ERROR: 1134 case STATE_DATA_ERROR:
1100 if (!test_and_clear_bit(EVENT_XFER_COMPLETE, 1135 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1101 &host->pending_events)) 1136 &host->pending_events))
1102 break; 1137 break;
1103 1138
1104 state = STATE_DATA_BUSY; 1139 state = STATE_DATA_BUSY;
1105 break; 1140 break;
1106 } 1141 }
1107 } while (state != prev_state); 1142 } while (state != prev_state);
1108 1143
1109 host->state = state; 1144 host->state = state;
1110 unlock: 1145 unlock:
1111 spin_unlock(&host->lock); 1146 spin_unlock(&host->lock);
1112 1147
1113 } 1148 }
1114 1149
1115 /* push final bytes to part_buf, only use during push */ 1150 /* push final bytes to part_buf, only use during push */
1116 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt) 1151 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
1117 { 1152 {
1118 memcpy((void *)&host->part_buf, buf, cnt); 1153 memcpy((void *)&host->part_buf, buf, cnt);
1119 host->part_buf_count = cnt; 1154 host->part_buf_count = cnt;
1120 } 1155 }
1121 1156
1122 /* append bytes to part_buf, only use during push */ 1157 /* append bytes to part_buf, only use during push */
1123 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt) 1158 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1124 { 1159 {
1125 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count); 1160 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1126 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt); 1161 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1127 host->part_buf_count += cnt; 1162 host->part_buf_count += cnt;
1128 return cnt; 1163 return cnt;
1129 } 1164 }
1130 1165
1131 /* pull first bytes from part_buf, only use during pull */ 1166 /* pull first bytes from part_buf, only use during pull */
1132 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt) 1167 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1133 { 1168 {
1134 cnt = min(cnt, (int)host->part_buf_count); 1169 cnt = min(cnt, (int)host->part_buf_count);
1135 if (cnt) { 1170 if (cnt) {
1136 memcpy(buf, (void *)&host->part_buf + host->part_buf_start, 1171 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1137 cnt); 1172 cnt);
1138 host->part_buf_count -= cnt; 1173 host->part_buf_count -= cnt;
1139 host->part_buf_start += cnt; 1174 host->part_buf_start += cnt;
1140 } 1175 }
1141 return cnt; 1176 return cnt;
1142 } 1177 }
1143 1178
1144 /* pull final bytes from the part_buf, assuming it's just been filled */ 1179 /* pull final bytes from the part_buf, assuming it's just been filled */
1145 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt) 1180 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
1146 { 1181 {
1147 memcpy(buf, &host->part_buf, cnt); 1182 memcpy(buf, &host->part_buf, cnt);
1148 host->part_buf_start = cnt; 1183 host->part_buf_start = cnt;
1149 host->part_buf_count = (1 << host->data_shift) - cnt; 1184 host->part_buf_count = (1 << host->data_shift) - cnt;
1150 } 1185 }
1151 1186
1152 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt) 1187 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1153 { 1188 {
1154 /* try and push anything in the part_buf */ 1189 /* try and push anything in the part_buf */
1155 if (unlikely(host->part_buf_count)) { 1190 if (unlikely(host->part_buf_count)) {
1156 int len = dw_mci_push_part_bytes(host, buf, cnt); 1191 int len = dw_mci_push_part_bytes(host, buf, cnt);
1157 buf += len; 1192 buf += len;
1158 cnt -= len; 1193 cnt -= len;
1159 if (!sg_next(host->sg) || host->part_buf_count == 2) { 1194 if (!sg_next(host->sg) || host->part_buf_count == 2) {
1160 mci_writew(host, DATA(host->data_offset), 1195 mci_writew(host, DATA(host->data_offset),
1161 host->part_buf16); 1196 host->part_buf16);
1162 host->part_buf_count = 0; 1197 host->part_buf_count = 0;
1163 } 1198 }
1164 } 1199 }
1165 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1200 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1166 if (unlikely((unsigned long)buf & 0x1)) { 1201 if (unlikely((unsigned long)buf & 0x1)) {
1167 while (cnt >= 2) { 1202 while (cnt >= 2) {
1168 u16 aligned_buf[64]; 1203 u16 aligned_buf[64];
1169 int len = min(cnt & -2, (int)sizeof(aligned_buf)); 1204 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1170 int items = len >> 1; 1205 int items = len >> 1;
1171 int i; 1206 int i;
1172 /* memcpy from input buffer into aligned buffer */ 1207 /* memcpy from input buffer into aligned buffer */
1173 memcpy(aligned_buf, buf, len); 1208 memcpy(aligned_buf, buf, len);
1174 buf += len; 1209 buf += len;
1175 cnt -= len; 1210 cnt -= len;
1176 /* push data from aligned buffer into fifo */ 1211 /* push data from aligned buffer into fifo */
1177 for (i = 0; i < items; ++i) 1212 for (i = 0; i < items; ++i)
1178 mci_writew(host, DATA(host->data_offset), 1213 mci_writew(host, DATA(host->data_offset),
1179 aligned_buf[i]); 1214 aligned_buf[i]);
1180 } 1215 }
1181 } else 1216 } else
1182 #endif 1217 #endif
1183 { 1218 {
1184 u16 *pdata = buf; 1219 u16 *pdata = buf;
1185 for (; cnt >= 2; cnt -= 2) 1220 for (; cnt >= 2; cnt -= 2)
1186 mci_writew(host, DATA(host->data_offset), *pdata++); 1221 mci_writew(host, DATA(host->data_offset), *pdata++);
1187 buf = pdata; 1222 buf = pdata;
1188 } 1223 }
1189 /* put anything remaining in the part_buf */ 1224 /* put anything remaining in the part_buf */
1190 if (cnt) { 1225 if (cnt) {
1191 dw_mci_set_part_bytes(host, buf, cnt); 1226 dw_mci_set_part_bytes(host, buf, cnt);
1192 if (!sg_next(host->sg)) 1227 if (!sg_next(host->sg))
1193 mci_writew(host, DATA(host->data_offset), 1228 mci_writew(host, DATA(host->data_offset),
1194 host->part_buf16); 1229 host->part_buf16);
1195 } 1230 }
1196 } 1231 }
1197 1232
1198 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt) 1233 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
1199 { 1234 {
1200 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1235 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1201 if (unlikely((unsigned long)buf & 0x1)) { 1236 if (unlikely((unsigned long)buf & 0x1)) {
1202 while (cnt >= 2) { 1237 while (cnt >= 2) {
1203 /* pull data from fifo into aligned buffer */ 1238 /* pull data from fifo into aligned buffer */
1204 u16 aligned_buf[64]; 1239 u16 aligned_buf[64];
1205 int len = min(cnt & -2, (int)sizeof(aligned_buf)); 1240 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1206 int items = len >> 1; 1241 int items = len >> 1;
1207 int i; 1242 int i;
1208 for (i = 0; i < items; ++i) 1243 for (i = 0; i < items; ++i)
1209 aligned_buf[i] = mci_readw(host, 1244 aligned_buf[i] = mci_readw(host,
1210 DATA(host->data_offset)); 1245 DATA(host->data_offset));
1211 /* memcpy from aligned buffer into output buffer */ 1246 /* memcpy from aligned buffer into output buffer */
1212 memcpy(buf, aligned_buf, len); 1247 memcpy(buf, aligned_buf, len);
1213 buf += len; 1248 buf += len;
1214 cnt -= len; 1249 cnt -= len;
1215 } 1250 }
1216 } else 1251 } else
1217 #endif 1252 #endif
1218 { 1253 {
1219 u16 *pdata = buf; 1254 u16 *pdata = buf;
1220 for (; cnt >= 2; cnt -= 2) 1255 for (; cnt >= 2; cnt -= 2)
1221 *pdata++ = mci_readw(host, DATA(host->data_offset)); 1256 *pdata++ = mci_readw(host, DATA(host->data_offset));
1222 buf = pdata; 1257 buf = pdata;
1223 } 1258 }
1224 if (cnt) { 1259 if (cnt) {
1225 host->part_buf16 = mci_readw(host, DATA(host->data_offset)); 1260 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
1226 dw_mci_pull_final_bytes(host, buf, cnt); 1261 dw_mci_pull_final_bytes(host, buf, cnt);
1227 } 1262 }
1228 } 1263 }
1229 1264
1230 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt) 1265 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
1231 { 1266 {
1232 /* try and push anything in the part_buf */ 1267 /* try and push anything in the part_buf */
1233 if (unlikely(host->part_buf_count)) { 1268 if (unlikely(host->part_buf_count)) {
1234 int len = dw_mci_push_part_bytes(host, buf, cnt); 1269 int len = dw_mci_push_part_bytes(host, buf, cnt);
1235 buf += len; 1270 buf += len;
1236 cnt -= len; 1271 cnt -= len;
1237 if (!sg_next(host->sg) || host->part_buf_count == 4) { 1272 if (!sg_next(host->sg) || host->part_buf_count == 4) {
1238 mci_writel(host, DATA(host->data_offset), 1273 mci_writel(host, DATA(host->data_offset),
1239 host->part_buf32); 1274 host->part_buf32);
1240 host->part_buf_count = 0; 1275 host->part_buf_count = 0;
1241 } 1276 }
1242 } 1277 }
1243 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1278 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1244 if (unlikely((unsigned long)buf & 0x3)) { 1279 if (unlikely((unsigned long)buf & 0x3)) {
1245 while (cnt >= 4) { 1280 while (cnt >= 4) {
1246 u32 aligned_buf[32]; 1281 u32 aligned_buf[32];
1247 int len = min(cnt & -4, (int)sizeof(aligned_buf)); 1282 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1248 int items = len >> 2; 1283 int items = len >> 2;
1249 int i; 1284 int i;
1250 /* memcpy from input buffer into aligned buffer */ 1285 /* memcpy from input buffer into aligned buffer */
1251 memcpy(aligned_buf, buf, len); 1286 memcpy(aligned_buf, buf, len);
1252 buf += len; 1287 buf += len;
1253 cnt -= len; 1288 cnt -= len;
1254 /* push data from aligned buffer into fifo */ 1289 /* push data from aligned buffer into fifo */
1255 for (i = 0; i < items; ++i) 1290 for (i = 0; i < items; ++i)
1256 mci_writel(host, DATA(host->data_offset), 1291 mci_writel(host, DATA(host->data_offset),
1257 aligned_buf[i]); 1292 aligned_buf[i]);
1258 } 1293 }
1259 } else 1294 } else
1260 #endif 1295 #endif
1261 { 1296 {
1262 u32 *pdata = buf; 1297 u32 *pdata = buf;
1263 for (; cnt >= 4; cnt -= 4) 1298 for (; cnt >= 4; cnt -= 4)
1264 mci_writel(host, DATA(host->data_offset), *pdata++); 1299 mci_writel(host, DATA(host->data_offset), *pdata++);
1265 buf = pdata; 1300 buf = pdata;
1266 } 1301 }
1267 /* put anything remaining in the part_buf */ 1302 /* put anything remaining in the part_buf */
1268 if (cnt) { 1303 if (cnt) {
1269 dw_mci_set_part_bytes(host, buf, cnt); 1304 dw_mci_set_part_bytes(host, buf, cnt);
1270 if (!sg_next(host->sg)) 1305 if (!sg_next(host->sg))
1271 mci_writel(host, DATA(host->data_offset), 1306 mci_writel(host, DATA(host->data_offset),
1272 host->part_buf32); 1307 host->part_buf32);
1273 } 1308 }
1274 } 1309 }
1275 1310
1276 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt) 1311 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
1277 { 1312 {
1278 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1313 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1279 if (unlikely((unsigned long)buf & 0x3)) { 1314 if (unlikely((unsigned long)buf & 0x3)) {
1280 while (cnt >= 4) { 1315 while (cnt >= 4) {
1281 /* pull data from fifo into aligned buffer */ 1316 /* pull data from fifo into aligned buffer */
1282 u32 aligned_buf[32]; 1317 u32 aligned_buf[32];
1283 int len = min(cnt & -4, (int)sizeof(aligned_buf)); 1318 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1284 int items = len >> 2; 1319 int items = len >> 2;
1285 int i; 1320 int i;
1286 for (i = 0; i < items; ++i) 1321 for (i = 0; i < items; ++i)
1287 aligned_buf[i] = mci_readl(host, 1322 aligned_buf[i] = mci_readl(host,
1288 DATA(host->data_offset)); 1323 DATA(host->data_offset));
1289 /* memcpy from aligned buffer into output buffer */ 1324 /* memcpy from aligned buffer into output buffer */
1290 memcpy(buf, aligned_buf, len); 1325 memcpy(buf, aligned_buf, len);
1291 buf += len; 1326 buf += len;
1292 cnt -= len; 1327 cnt -= len;
1293 } 1328 }
1294 } else 1329 } else
1295 #endif 1330 #endif
1296 { 1331 {
1297 u32 *pdata = buf; 1332 u32 *pdata = buf;
1298 for (; cnt >= 4; cnt -= 4) 1333 for (; cnt >= 4; cnt -= 4)
1299 *pdata++ = mci_readl(host, DATA(host->data_offset)); 1334 *pdata++ = mci_readl(host, DATA(host->data_offset));
1300 buf = pdata; 1335 buf = pdata;
1301 } 1336 }
1302 if (cnt) { 1337 if (cnt) {
1303 host->part_buf32 = mci_readl(host, DATA(host->data_offset)); 1338 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
1304 dw_mci_pull_final_bytes(host, buf, cnt); 1339 dw_mci_pull_final_bytes(host, buf, cnt);
1305 } 1340 }
1306 } 1341 }
1307 1342
1308 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt) 1343 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1309 { 1344 {
1310 /* try and push anything in the part_buf */ 1345 /* try and push anything in the part_buf */
1311 if (unlikely(host->part_buf_count)) { 1346 if (unlikely(host->part_buf_count)) {
1312 int len = dw_mci_push_part_bytes(host, buf, cnt); 1347 int len = dw_mci_push_part_bytes(host, buf, cnt);
1313 buf += len; 1348 buf += len;
1314 cnt -= len; 1349 cnt -= len;
1315 if (!sg_next(host->sg) || host->part_buf_count == 8) { 1350 if (!sg_next(host->sg) || host->part_buf_count == 8) {
1316 mci_writew(host, DATA(host->data_offset), 1351 mci_writew(host, DATA(host->data_offset),
1317 host->part_buf); 1352 host->part_buf);
1318 host->part_buf_count = 0; 1353 host->part_buf_count = 0;
1319 } 1354 }
1320 } 1355 }
1321 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1356 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1322 if (unlikely((unsigned long)buf & 0x7)) { 1357 if (unlikely((unsigned long)buf & 0x7)) {
1323 while (cnt >= 8) { 1358 while (cnt >= 8) {
1324 u64 aligned_buf[16]; 1359 u64 aligned_buf[16];
1325 int len = min(cnt & -8, (int)sizeof(aligned_buf)); 1360 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1326 int items = len >> 3; 1361 int items = len >> 3;
1327 int i; 1362 int i;
1328 /* memcpy from input buffer into aligned buffer */ 1363 /* memcpy from input buffer into aligned buffer */
1329 memcpy(aligned_buf, buf, len); 1364 memcpy(aligned_buf, buf, len);
1330 buf += len; 1365 buf += len;
1331 cnt -= len; 1366 cnt -= len;
1332 /* push data from aligned buffer into fifo */ 1367 /* push data from aligned buffer into fifo */
1333 for (i = 0; i < items; ++i) 1368 for (i = 0; i < items; ++i)
1334 mci_writeq(host, DATA(host->data_offset), 1369 mci_writeq(host, DATA(host->data_offset),
1335 aligned_buf[i]); 1370 aligned_buf[i]);
1336 } 1371 }
1337 } else 1372 } else
1338 #endif 1373 #endif
1339 { 1374 {
1340 u64 *pdata = buf; 1375 u64 *pdata = buf;
1341 for (; cnt >= 8; cnt -= 8) 1376 for (; cnt >= 8; cnt -= 8)
1342 mci_writeq(host, DATA(host->data_offset), *pdata++); 1377 mci_writeq(host, DATA(host->data_offset), *pdata++);
1343 buf = pdata; 1378 buf = pdata;
1344 } 1379 }
1345 /* put anything remaining in the part_buf */ 1380 /* put anything remaining in the part_buf */
1346 if (cnt) { 1381 if (cnt) {
1347 dw_mci_set_part_bytes(host, buf, cnt); 1382 dw_mci_set_part_bytes(host, buf, cnt);
1348 if (!sg_next(host->sg)) 1383 if (!sg_next(host->sg))
1349 mci_writeq(host, DATA(host->data_offset), 1384 mci_writeq(host, DATA(host->data_offset),
1350 host->part_buf); 1385 host->part_buf);
1351 } 1386 }
1352 } 1387 }
1353 1388
1354 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt) 1389 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1355 { 1390 {
1356 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1391 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1357 if (unlikely((unsigned long)buf & 0x7)) { 1392 if (unlikely((unsigned long)buf & 0x7)) {
1358 while (cnt >= 8) { 1393 while (cnt >= 8) {
1359 /* pull data from fifo into aligned buffer */ 1394 /* pull data from fifo into aligned buffer */
1360 u64 aligned_buf[16]; 1395 u64 aligned_buf[16];
1361 int len = min(cnt & -8, (int)sizeof(aligned_buf)); 1396 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1362 int items = len >> 3; 1397 int items = len >> 3;
1363 int i; 1398 int i;
1364 for (i = 0; i < items; ++i) 1399 for (i = 0; i < items; ++i)
1365 aligned_buf[i] = mci_readq(host, 1400 aligned_buf[i] = mci_readq(host,
1366 DATA(host->data_offset)); 1401 DATA(host->data_offset));
1367 /* memcpy from aligned buffer into output buffer */ 1402 /* memcpy from aligned buffer into output buffer */
1368 memcpy(buf, aligned_buf, len); 1403 memcpy(buf, aligned_buf, len);
1369 buf += len; 1404 buf += len;
1370 cnt -= len; 1405 cnt -= len;
1371 } 1406 }
1372 } else 1407 } else
1373 #endif 1408 #endif
1374 { 1409 {
1375 u64 *pdata = buf; 1410 u64 *pdata = buf;
1376 for (; cnt >= 8; cnt -= 8) 1411 for (; cnt >= 8; cnt -= 8)
1377 *pdata++ = mci_readq(host, DATA(host->data_offset)); 1412 *pdata++ = mci_readq(host, DATA(host->data_offset));
1378 buf = pdata; 1413 buf = pdata;
1379 } 1414 }
1380 if (cnt) { 1415 if (cnt) {
1381 host->part_buf = mci_readq(host, DATA(host->data_offset)); 1416 host->part_buf = mci_readq(host, DATA(host->data_offset));
1382 dw_mci_pull_final_bytes(host, buf, cnt); 1417 dw_mci_pull_final_bytes(host, buf, cnt);
1383 } 1418 }
1384 } 1419 }
1385 1420
1386 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt) 1421 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
1387 { 1422 {
1388 int len; 1423 int len;
1389 1424
1390 /* get remaining partial bytes */ 1425 /* get remaining partial bytes */
1391 len = dw_mci_pull_part_bytes(host, buf, cnt); 1426 len = dw_mci_pull_part_bytes(host, buf, cnt);
1392 if (unlikely(len == cnt)) 1427 if (unlikely(len == cnt))
1393 return; 1428 return;
1394 buf += len; 1429 buf += len;
1395 cnt -= len; 1430 cnt -= len;
1396 1431
1397 /* get the rest of the data */ 1432 /* get the rest of the data */
1398 host->pull_data(host, buf, cnt); 1433 host->pull_data(host, buf, cnt);
1399 } 1434 }
1400 1435
1401 static void dw_mci_read_data_pio(struct dw_mci *host) 1436 static void dw_mci_read_data_pio(struct dw_mci *host)
1402 { 1437 {
1403 struct sg_mapping_iter *sg_miter = &host->sg_miter; 1438 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1404 void *buf; 1439 void *buf;
1405 unsigned int offset; 1440 unsigned int offset;
1406 struct mmc_data *data = host->data; 1441 struct mmc_data *data = host->data;
1407 int shift = host->data_shift; 1442 int shift = host->data_shift;
1408 u32 status; 1443 u32 status;
1409 unsigned int nbytes = 0, len; 1444 unsigned int nbytes = 0, len;
1410 unsigned int remain, fcnt; 1445 unsigned int remain, fcnt;
1411 1446
1412 do { 1447 do {
1413 if (!sg_miter_next(sg_miter)) 1448 if (!sg_miter_next(sg_miter))
1414 goto done; 1449 goto done;
1415 1450
1416 host->sg = sg_miter->__sg; 1451 host->sg = sg_miter->__sg;
1417 buf = sg_miter->addr; 1452 buf = sg_miter->addr;
1418 remain = sg_miter->length; 1453 remain = sg_miter->length;
1419 offset = 0; 1454 offset = 0;
1420 1455
1421 do { 1456 do {
1422 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS)) 1457 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
1423 << shift) + host->part_buf_count; 1458 << shift) + host->part_buf_count;
1424 len = min(remain, fcnt); 1459 len = min(remain, fcnt);
1425 if (!len) 1460 if (!len)
1426 break; 1461 break;
1427 dw_mci_pull_data(host, (void *)(buf + offset), len); 1462 dw_mci_pull_data(host, (void *)(buf + offset), len);
1428 offset += len; 1463 offset += len;
1429 nbytes += len; 1464 nbytes += len;
1430 remain -= len; 1465 remain -= len;
1431 } while (remain); 1466 } while (remain);
1432 sg_miter->consumed = offset;
1433 1467
1468 sg_miter->consumed = offset;
1434 status = mci_readl(host, MINTSTS); 1469 status = mci_readl(host, MINTSTS);
1435 mci_writel(host, RINTSTS, SDMMC_INT_RXDR); 1470 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1436 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1437 host->data_status = status;
1438 data->bytes_xfered += nbytes;
1439 sg_miter_stop(sg_miter);
1440 host->sg = NULL;
1441 smp_wmb();
1442
1443 set_bit(EVENT_DATA_ERROR, &host->pending_events);
1444
1445 tasklet_schedule(&host->tasklet);
1446 return;
1447 }
1448 } while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/ 1471 } while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/
1449 data->bytes_xfered += nbytes; 1472 data->bytes_xfered += nbytes;
1450 1473
1451 if (!remain) { 1474 if (!remain) {
1452 if (!sg_miter_next(sg_miter)) 1475 if (!sg_miter_next(sg_miter))
1453 goto done; 1476 goto done;
1454 sg_miter->consumed = 0; 1477 sg_miter->consumed = 0;
1455 } 1478 }
1456 sg_miter_stop(sg_miter); 1479 sg_miter_stop(sg_miter);
1457 return; 1480 return;
1458 1481
1459 done: 1482 done:
1460 data->bytes_xfered += nbytes; 1483 data->bytes_xfered += nbytes;
1461 sg_miter_stop(sg_miter); 1484 sg_miter_stop(sg_miter);
1462 host->sg = NULL; 1485 host->sg = NULL;
1463 smp_wmb(); 1486 smp_wmb();
1464 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 1487 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1465 } 1488 }
1466 1489
1467 static void dw_mci_write_data_pio(struct dw_mci *host) 1490 static void dw_mci_write_data_pio(struct dw_mci *host)
1468 { 1491 {
1469 struct sg_mapping_iter *sg_miter = &host->sg_miter; 1492 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1470 void *buf; 1493 void *buf;
1471 unsigned int offset; 1494 unsigned int offset;
1472 struct mmc_data *data = host->data; 1495 struct mmc_data *data = host->data;
1473 int shift = host->data_shift; 1496 int shift = host->data_shift;
1474 u32 status; 1497 u32 status;
1475 unsigned int nbytes = 0, len; 1498 unsigned int nbytes = 0, len;
1476 unsigned int fifo_depth = host->fifo_depth; 1499 unsigned int fifo_depth = host->fifo_depth;
1477 unsigned int remain, fcnt; 1500 unsigned int remain, fcnt;
1478 1501
1479 do { 1502 do {
1480 if (!sg_miter_next(sg_miter)) 1503 if (!sg_miter_next(sg_miter))
1481 goto done; 1504 goto done;
1482 1505
1483 host->sg = sg_miter->__sg; 1506 host->sg = sg_miter->__sg;
1484 buf = sg_miter->addr; 1507 buf = sg_miter->addr;
1485 remain = sg_miter->length; 1508 remain = sg_miter->length;
1486 offset = 0; 1509 offset = 0;
1487 1510
1488 do { 1511 do {
1489 fcnt = ((fifo_depth - 1512 fcnt = ((fifo_depth -
1490 SDMMC_GET_FCNT(mci_readl(host, STATUS))) 1513 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
1491 << shift) - host->part_buf_count; 1514 << shift) - host->part_buf_count;
1492 len = min(remain, fcnt); 1515 len = min(remain, fcnt);
1493 if (!len) 1516 if (!len)
1494 break; 1517 break;
1495 host->push_data(host, (void *)(buf + offset), len); 1518 host->push_data(host, (void *)(buf + offset), len);
1496 offset += len; 1519 offset += len;
1497 nbytes += len; 1520 nbytes += len;
1498 remain -= len; 1521 remain -= len;
1499 } while (remain); 1522 } while (remain);
1500 sg_miter->consumed = offset;
1501 1523
1524 sg_miter->consumed = offset;
1502 status = mci_readl(host, MINTSTS); 1525 status = mci_readl(host, MINTSTS);
1503 mci_writel(host, RINTSTS, SDMMC_INT_TXDR); 1526 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1504 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1505 host->data_status = status;
1506 data->bytes_xfered += nbytes;
1507 sg_miter_stop(sg_miter);
1508 host->sg = NULL;
1509
1510 smp_wmb();
1511
1512 set_bit(EVENT_DATA_ERROR, &host->pending_events);
1513
1514 tasklet_schedule(&host->tasklet);
1515 return;
1516 }
1517 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */ 1527 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
1518 data->bytes_xfered += nbytes; 1528 data->bytes_xfered += nbytes;
1519 1529
1520 if (!remain) { 1530 if (!remain) {
1521 if (!sg_miter_next(sg_miter)) 1531 if (!sg_miter_next(sg_miter))
1522 goto done; 1532 goto done;
1523 sg_miter->consumed = 0; 1533 sg_miter->consumed = 0;
1524 } 1534 }
1525 sg_miter_stop(sg_miter); 1535 sg_miter_stop(sg_miter);
1526 return; 1536 return;
1527 1537
1528 done: 1538 done:
1529 data->bytes_xfered += nbytes; 1539 data->bytes_xfered += nbytes;
1530 sg_miter_stop(sg_miter); 1540 sg_miter_stop(sg_miter);
1531 host->sg = NULL; 1541 host->sg = NULL;
1532 smp_wmb(); 1542 smp_wmb();
1533 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 1543 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1534 } 1544 }
1535 1545
1536 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status) 1546 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
1537 { 1547 {
1538 if (!host->cmd_status) 1548 if (!host->cmd_status)
1539 host->cmd_status = status; 1549 host->cmd_status = status;
1540 1550
1541 smp_wmb(); 1551 smp_wmb();
1542 1552
1543 set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 1553 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1544 tasklet_schedule(&host->tasklet); 1554 tasklet_schedule(&host->tasklet);
1545 } 1555 }
1546 1556
1547 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) 1557 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1548 { 1558 {
1549 struct dw_mci *host = dev_id; 1559 struct dw_mci *host = dev_id;
1550 u32 status, pending; 1560 u32 pending;
1551 unsigned int pass_count = 0; 1561 unsigned int pass_count = 0;
1552 int i; 1562 int i;
1553 1563
1554 do { 1564 do {
1555 status = mci_readl(host, RINTSTS);
1556 pending = mci_readl(host, MINTSTS); /* read-only mask reg */ 1565 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
1557 1566
1558 /* 1567 /*
1559 * DTO fix - version 2.10a and below, and only if internal DMA 1568 * DTO fix - version 2.10a and below, and only if internal DMA
1560 * is configured. 1569 * is configured.
1561 */ 1570 */
1562 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) { 1571 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
1563 if (!pending && 1572 if (!pending &&
1564 ((mci_readl(host, STATUS) >> 17) & 0x1fff)) 1573 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
1565 pending |= SDMMC_INT_DATA_OVER; 1574 pending |= SDMMC_INT_DATA_OVER;
1566 } 1575 }
1567 1576
1568 if (!pending) 1577 if (!pending)
1569 break; 1578 break;
1570 1579
1571 if (pending & DW_MCI_CMD_ERROR_FLAGS) { 1580 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
1572 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS); 1581 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
1573 host->cmd_status = status; 1582 host->cmd_status = pending;
1574 smp_wmb(); 1583 smp_wmb();
1575 set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 1584 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1576 } 1585 }
1577 1586
1578 if (pending & DW_MCI_DATA_ERROR_FLAGS) { 1587 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
1579 /* if there is an error report DATA_ERROR */ 1588 /* if there is an error report DATA_ERROR */
1580 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS); 1589 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
1581 host->data_status = status; 1590 host->data_status = pending;
1582 smp_wmb(); 1591 smp_wmb();
1583 set_bit(EVENT_DATA_ERROR, &host->pending_events); 1592 set_bit(EVENT_DATA_ERROR, &host->pending_events);
1584 if (!(pending & (SDMMC_INT_DTO | SDMMC_INT_DCRC | 1593 tasklet_schedule(&host->tasklet);
1585 SDMMC_INT_SBE | SDMMC_INT_EBE)))
1586 tasklet_schedule(&host->tasklet);
1587 } 1594 }
1588 1595
1589 if (pending & SDMMC_INT_DATA_OVER) { 1596 if (pending & SDMMC_INT_DATA_OVER) {
1590 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER); 1597 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1591 if (!host->data_status) 1598 if (!host->data_status)
1592 host->data_status = status; 1599 host->data_status = pending;
1593 smp_wmb(); 1600 smp_wmb();
1594 if (host->dir_status == DW_MCI_RECV_STATUS) { 1601 if (host->dir_status == DW_MCI_RECV_STATUS) {
1595 if (host->sg != NULL) 1602 if (host->sg != NULL)
1596 dw_mci_read_data_pio(host); 1603 dw_mci_read_data_pio(host);
1597 } 1604 }
1598 set_bit(EVENT_DATA_COMPLETE, &host->pending_events); 1605 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1599 tasklet_schedule(&host->tasklet); 1606 tasklet_schedule(&host->tasklet);
1600 } 1607 }
1601 1608
1602 if (pending & SDMMC_INT_RXDR) { 1609 if (pending & SDMMC_INT_RXDR) {
1603 mci_writel(host, RINTSTS, SDMMC_INT_RXDR); 1610 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1604 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg) 1611 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
1605 dw_mci_read_data_pio(host); 1612 dw_mci_read_data_pio(host);
1606 } 1613 }
1607 1614
1608 if (pending & SDMMC_INT_TXDR) { 1615 if (pending & SDMMC_INT_TXDR) {
1609 mci_writel(host, RINTSTS, SDMMC_INT_TXDR); 1616 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1610 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg) 1617 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
1611 dw_mci_write_data_pio(host); 1618 dw_mci_write_data_pio(host);
1612 } 1619 }
1613 1620
1614 if (pending & SDMMC_INT_CMD_DONE) { 1621 if (pending & SDMMC_INT_CMD_DONE) {
1615 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE); 1622 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
1616 dw_mci_cmd_interrupt(host, status); 1623 dw_mci_cmd_interrupt(host, pending);
1617 } 1624 }
1618 1625
1619 if (pending & SDMMC_INT_CD) { 1626 if (pending & SDMMC_INT_CD) {
1620 mci_writel(host, RINTSTS, SDMMC_INT_CD); 1627 mci_writel(host, RINTSTS, SDMMC_INT_CD);
1621 queue_work(host->card_workqueue, &host->card_work); 1628 queue_work(host->card_workqueue, &host->card_work);
1622 } 1629 }
1623 1630
1624 /* Handle SDIO Interrupts */ 1631 /* Handle SDIO Interrupts */
1625 for (i = 0; i < host->num_slots; i++) { 1632 for (i = 0; i < host->num_slots; i++) {
1626 struct dw_mci_slot *slot = host->slot[i]; 1633 struct dw_mci_slot *slot = host->slot[i];
1627 if (pending & SDMMC_INT_SDIO(i)) { 1634 if (pending & SDMMC_INT_SDIO(i)) {
1628 mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i)); 1635 mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
1629 mmc_signal_sdio_irq(slot->mmc); 1636 mmc_signal_sdio_irq(slot->mmc);
1630 } 1637 }
1631 } 1638 }
1632 1639
1633 } while (pass_count++ < 5); 1640 } while (pass_count++ < 5);
1634 1641
1635 #ifdef CONFIG_MMC_DW_IDMAC 1642 #ifdef CONFIG_MMC_DW_IDMAC
1636 /* Handle DMA interrupts */ 1643 /* Handle DMA interrupts */
1637 pending = mci_readl(host, IDSTS); 1644 pending = mci_readl(host, IDSTS);
1638 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) { 1645 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
1639 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI); 1646 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
1640 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI); 1647 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
1641 host->dma_ops->complete(host); 1648 host->dma_ops->complete(host);
1642 } 1649 }
1643 #endif 1650 #endif
1644 1651
1645 return IRQ_HANDLED; 1652 return IRQ_HANDLED;
1646 } 1653 }
1647 1654
1648 static void dw_mci_work_routine_card(struct work_struct *work) 1655 static void dw_mci_work_routine_card(struct work_struct *work)
1649 { 1656 {
1650 struct dw_mci *host = container_of(work, struct dw_mci, card_work); 1657 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
1651 int i; 1658 int i;
1652 1659
1653 for (i = 0; i < host->num_slots; i++) { 1660 for (i = 0; i < host->num_slots; i++) {
1654 struct dw_mci_slot *slot = host->slot[i]; 1661 struct dw_mci_slot *slot = host->slot[i];
1655 struct mmc_host *mmc = slot->mmc; 1662 struct mmc_host *mmc = slot->mmc;
1656 struct mmc_request *mrq; 1663 struct mmc_request *mrq;
1657 int present; 1664 int present;
1658 u32 ctrl; 1665 u32 ctrl;
1659 1666
1660 present = dw_mci_get_cd(mmc); 1667 present = dw_mci_get_cd(mmc);
1661 while (present != slot->last_detect_state) { 1668 while (present != slot->last_detect_state) {
1662 dev_dbg(&slot->mmc->class_dev, "card %s\n", 1669 dev_dbg(&slot->mmc->class_dev, "card %s\n",
1663 present ? "inserted" : "removed"); 1670 present ? "inserted" : "removed");
1664 1671
1665 /* Power up slot (before spin_lock, may sleep) */ 1672 /* Power up slot (before spin_lock, may sleep) */
1666 if (present != 0 && host->pdata->setpower) 1673 if (present != 0 && host->pdata->setpower)
1667 host->pdata->setpower(slot->id, mmc->ocr_avail); 1674 host->pdata->setpower(slot->id, mmc->ocr_avail);
1668 1675
1669 spin_lock_bh(&host->lock); 1676 spin_lock_bh(&host->lock);
1670 1677
1671 /* Card change detected */ 1678 /* Card change detected */
1672 slot->last_detect_state = present; 1679 slot->last_detect_state = present;
1673 1680
1674 /* Mark card as present if applicable */ 1681 /* Mark card as present if applicable */
1675 if (present != 0) 1682 if (present != 0)
1676 set_bit(DW_MMC_CARD_PRESENT, &slot->flags); 1683 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1677 1684
1678 /* Clean up queue if present */ 1685 /* Clean up queue if present */
1679 mrq = slot->mrq; 1686 mrq = slot->mrq;
1680 if (mrq) { 1687 if (mrq) {
1681 if (mrq == host->mrq) { 1688 if (mrq == host->mrq) {
1682 host->data = NULL; 1689 host->data = NULL;
1683 host->cmd = NULL; 1690 host->cmd = NULL;
1684 1691
1685 switch (host->state) { 1692 switch (host->state) {
1686 case STATE_IDLE: 1693 case STATE_IDLE:
1687 break; 1694 break;
1688 case STATE_SENDING_CMD: 1695 case STATE_SENDING_CMD:
1689 mrq->cmd->error = -ENOMEDIUM; 1696 mrq->cmd->error = -ENOMEDIUM;
1690 if (!mrq->data) 1697 if (!mrq->data)
1691 break; 1698 break;
1692 /* fall through */ 1699 /* fall through */
1693 case STATE_SENDING_DATA: 1700 case STATE_SENDING_DATA:
1694 mrq->data->error = -ENOMEDIUM; 1701 mrq->data->error = -ENOMEDIUM;
1695 dw_mci_stop_dma(host); 1702 dw_mci_stop_dma(host);
1696 break; 1703 break;
1697 case STATE_DATA_BUSY: 1704 case STATE_DATA_BUSY:
1698 case STATE_DATA_ERROR: 1705 case STATE_DATA_ERROR:
1699 if (mrq->data->error == -EINPROGRESS) 1706 if (mrq->data->error == -EINPROGRESS)
1700 mrq->data->error = -ENOMEDIUM; 1707 mrq->data->error = -ENOMEDIUM;
1701 if (!mrq->stop) 1708 if (!mrq->stop)
1702 break; 1709 break;
1703 /* fall through */ 1710 /* fall through */
1704 case STATE_SENDING_STOP: 1711 case STATE_SENDING_STOP:
1705 mrq->stop->error = -ENOMEDIUM; 1712 mrq->stop->error = -ENOMEDIUM;
1706 break; 1713 break;
1707 } 1714 }
1708 1715
1709 dw_mci_request_end(host, mrq); 1716 dw_mci_request_end(host, mrq);
1710 } else { 1717 } else {
1711 list_del(&slot->queue_node); 1718 list_del(&slot->queue_node);
1712 mrq->cmd->error = -ENOMEDIUM; 1719 mrq->cmd->error = -ENOMEDIUM;
1713 if (mrq->data) 1720 if (mrq->data)
1714 mrq->data->error = -ENOMEDIUM; 1721 mrq->data->error = -ENOMEDIUM;
1715 if (mrq->stop) 1722 if (mrq->stop)
1716 mrq->stop->error = -ENOMEDIUM; 1723 mrq->stop->error = -ENOMEDIUM;
1717 1724
1718 spin_unlock(&host->lock); 1725 spin_unlock(&host->lock);
1719 mmc_request_done(slot->mmc, mrq); 1726 mmc_request_done(slot->mmc, mrq);
1720 spin_lock(&host->lock); 1727 spin_lock(&host->lock);
1721 } 1728 }
1722 } 1729 }
1723 1730
1724 /* Power down slot */ 1731 /* Power down slot */
1725 if (present == 0) { 1732 if (present == 0) {
1726 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags); 1733 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1727 1734
1728 /* 1735 /*
1729 * Clear down the FIFO - doing so generates a 1736 * Clear down the FIFO - doing so generates a
1730 * block interrupt, hence setting the 1737 * block interrupt, hence setting the
1731 * scatter-gather pointer to NULL. 1738 * scatter-gather pointer to NULL.
1732 */ 1739 */
1733 sg_miter_stop(&host->sg_miter); 1740 sg_miter_stop(&host->sg_miter);
1734 host->sg = NULL; 1741 host->sg = NULL;
1735 1742
1736 ctrl = mci_readl(host, CTRL); 1743 ctrl = mci_readl(host, CTRL);
1737 ctrl |= SDMMC_CTRL_FIFO_RESET; 1744 ctrl |= SDMMC_CTRL_FIFO_RESET;
1738 mci_writel(host, CTRL, ctrl); 1745 mci_writel(host, CTRL, ctrl);
1739 1746
1740 #ifdef CONFIG_MMC_DW_IDMAC 1747 #ifdef CONFIG_MMC_DW_IDMAC
1741 ctrl = mci_readl(host, BMOD); 1748 ctrl = mci_readl(host, BMOD);
1742 /* Software reset of DMA */ 1749 /* Software reset of DMA */
1743 ctrl |= SDMMC_IDMAC_SWRESET; 1750 ctrl |= SDMMC_IDMAC_SWRESET;
1744 mci_writel(host, BMOD, ctrl); 1751 mci_writel(host, BMOD, ctrl);
1745 #endif 1752 #endif
1746 1753
1747 } 1754 }
1748 1755
1749 spin_unlock_bh(&host->lock); 1756 spin_unlock_bh(&host->lock);
1750 1757
1751 /* Power down slot (after spin_unlock, may sleep) */ 1758 /* Power down slot (after spin_unlock, may sleep) */
1752 if (present == 0 && host->pdata->setpower) 1759 if (present == 0 && host->pdata->setpower)
1753 host->pdata->setpower(slot->id, 0); 1760 host->pdata->setpower(slot->id, 0);
1754 1761
1755 present = dw_mci_get_cd(mmc); 1762 present = dw_mci_get_cd(mmc);
1756 } 1763 }
1757 1764
1758 mmc_detect_change(slot->mmc, 1765 mmc_detect_change(slot->mmc,
1759 msecs_to_jiffies(host->pdata->detect_delay_ms)); 1766 msecs_to_jiffies(host->pdata->detect_delay_ms));
1760 } 1767 }
1761 } 1768 }
1762 1769
1763 static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id) 1770 static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
1764 { 1771 {
1765 struct mmc_host *mmc; 1772 struct mmc_host *mmc;
1766 struct dw_mci_slot *slot; 1773 struct dw_mci_slot *slot;
1767 1774
1768 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), &host->dev); 1775 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), &host->dev);
1769 if (!mmc) 1776 if (!mmc)
1770 return -ENOMEM; 1777 return -ENOMEM;
1771 1778
1772 slot = mmc_priv(mmc); 1779 slot = mmc_priv(mmc);
1773 slot->id = id; 1780 slot->id = id;
1774 slot->mmc = mmc; 1781 slot->mmc = mmc;
1775 slot->host = host; 1782 slot->host = host;
1776 1783
1777 mmc->ops = &dw_mci_ops; 1784 mmc->ops = &dw_mci_ops;
1778 mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510); 1785 mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510);
1779 mmc->f_max = host->bus_hz; 1786 mmc->f_max = host->bus_hz;
1780 1787
1781 if (host->pdata->get_ocr) 1788 if (host->pdata->get_ocr)
1782 mmc->ocr_avail = host->pdata->get_ocr(id); 1789 mmc->ocr_avail = host->pdata->get_ocr(id);
1783 else 1790 else
1784 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 1791 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1785 1792
1786 /* 1793 /*
1787 * Start with slot power disabled, it will be enabled when a card 1794 * Start with slot power disabled, it will be enabled when a card
1788 * is detected. 1795 * is detected.
1789 */ 1796 */
1790 if (host->pdata->setpower) 1797 if (host->pdata->setpower)
1791 host->pdata->setpower(id, 0); 1798 host->pdata->setpower(id, 0);
1792 1799
1793 if (host->pdata->caps) 1800 if (host->pdata->caps)
1794 mmc->caps = host->pdata->caps; 1801 mmc->caps = host->pdata->caps;
1795 1802
1796 if (host->pdata->caps2) 1803 if (host->pdata->caps2)
1797 mmc->caps2 = host->pdata->caps2; 1804 mmc->caps2 = host->pdata->caps2;
1798 1805
1799 if (host->pdata->get_bus_wd) 1806 if (host->pdata->get_bus_wd)
1800 if (host->pdata->get_bus_wd(slot->id) >= 4) 1807 if (host->pdata->get_bus_wd(slot->id) >= 4)
1801 mmc->caps |= MMC_CAP_4_BIT_DATA; 1808 mmc->caps |= MMC_CAP_4_BIT_DATA;
1802 1809
1803 if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED) 1810 if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED)
1804 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; 1811 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
1805 1812
1806 if (mmc->caps2 & MMC_CAP2_POWEROFF_NOTIFY) 1813 if (mmc->caps2 & MMC_CAP2_POWEROFF_NOTIFY)
1807 mmc->power_notify_type = MMC_HOST_PW_NOTIFY_SHORT; 1814 mmc->power_notify_type = MMC_HOST_PW_NOTIFY_SHORT;
1808 else 1815 else
1809 mmc->power_notify_type = MMC_HOST_PW_NOTIFY_NONE; 1816 mmc->power_notify_type = MMC_HOST_PW_NOTIFY_NONE;
1810 1817
1811 if (host->pdata->blk_settings) { 1818 if (host->pdata->blk_settings) {
1812 mmc->max_segs = host->pdata->blk_settings->max_segs; 1819 mmc->max_segs = host->pdata->blk_settings->max_segs;
1813 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size; 1820 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
1814 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count; 1821 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
1815 mmc->max_req_size = host->pdata->blk_settings->max_req_size; 1822 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
1816 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size; 1823 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
1817 } else { 1824 } else {
1818 /* Useful defaults if platform data is unset. */ 1825 /* Useful defaults if platform data is unset. */
1819 #ifdef CONFIG_MMC_DW_IDMAC 1826 #ifdef CONFIG_MMC_DW_IDMAC
1820 mmc->max_segs = host->ring_size; 1827 mmc->max_segs = host->ring_size;
1821 mmc->max_blk_size = 65536; 1828 mmc->max_blk_size = 65536;
1822 mmc->max_blk_count = host->ring_size; 1829 mmc->max_blk_count = host->ring_size;
1823 mmc->max_seg_size = 0x1000; 1830 mmc->max_seg_size = 0x1000;
1824 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count; 1831 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
1825 #else 1832 #else
1826 mmc->max_segs = 64; 1833 mmc->max_segs = 64;
1827 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */ 1834 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
1828 mmc->max_blk_count = 512; 1835 mmc->max_blk_count = 512;
1829 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 1836 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1830 mmc->max_seg_size = mmc->max_req_size; 1837 mmc->max_seg_size = mmc->max_req_size;
1831 #endif /* CONFIG_MMC_DW_IDMAC */ 1838 #endif /* CONFIG_MMC_DW_IDMAC */
1832 } 1839 }
1833 1840
1834 host->vmmc = regulator_get(mmc_dev(mmc), "vmmc"); 1841 host->vmmc = regulator_get(mmc_dev(mmc), "vmmc");
1835 if (IS_ERR(host->vmmc)) { 1842 if (IS_ERR(host->vmmc)) {
1836 pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc)); 1843 pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
1837 host->vmmc = NULL; 1844 host->vmmc = NULL;
1838 } else 1845 } else
1839 regulator_enable(host->vmmc); 1846 regulator_enable(host->vmmc);
1840 1847
1841 if (dw_mci_get_cd(mmc)) 1848 if (dw_mci_get_cd(mmc))
1842 set_bit(DW_MMC_CARD_PRESENT, &slot->flags); 1849 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1843 else 1850 else
1844 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags); 1851 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1845 1852
1846 host->slot[id] = slot; 1853 host->slot[id] = slot;
1847 mmc_add_host(mmc); 1854 mmc_add_host(mmc);
1848 1855
1849 #if defined(CONFIG_DEBUG_FS) 1856 #if defined(CONFIG_DEBUG_FS)
1850 dw_mci_init_debugfs(slot); 1857 dw_mci_init_debugfs(slot);
1851 #endif 1858 #endif
1852 1859
1853 /* Card initially undetected */ 1860 /* Card initially undetected */
1854 slot->last_detect_state = 0; 1861 slot->last_detect_state = 0;
1855 1862
1856 /* 1863 /*
1857 * Card may have been plugged in prior to boot so we 1864 * Card may have been plugged in prior to boot so we
1858 * need to run the detect tasklet 1865 * need to run the detect tasklet
1859 */ 1866 */
1860 queue_work(host->card_workqueue, &host->card_work); 1867 queue_work(host->card_workqueue, &host->card_work);
1861 1868
1862 return 0; 1869 return 0;
1863 } 1870 }
1864 1871
1865 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id) 1872 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
1866 { 1873 {
1867 /* Shutdown detect IRQ */ 1874 /* Shutdown detect IRQ */
1868 if (slot->host->pdata->exit) 1875 if (slot->host->pdata->exit)
1869 slot->host->pdata->exit(id); 1876 slot->host->pdata->exit(id);
1870 1877
1871 /* Debugfs stuff is cleaned up by mmc core */ 1878 /* Debugfs stuff is cleaned up by mmc core */
1872 mmc_remove_host(slot->mmc); 1879 mmc_remove_host(slot->mmc);
1873 slot->host->slot[id] = NULL; 1880 slot->host->slot[id] = NULL;
1874 mmc_free_host(slot->mmc); 1881 mmc_free_host(slot->mmc);
1875 } 1882 }
1876 1883
1877 static void dw_mci_init_dma(struct dw_mci *host) 1884 static void dw_mci_init_dma(struct dw_mci *host)
1878 { 1885 {
1879 /* Alloc memory for sg translation */ 1886 /* Alloc memory for sg translation */
1880 host->sg_cpu = dma_alloc_coherent(&host->dev, PAGE_SIZE, 1887 host->sg_cpu = dma_alloc_coherent(&host->dev, PAGE_SIZE,
1881 &host->sg_dma, GFP_KERNEL); 1888 &host->sg_dma, GFP_KERNEL);
1882 if (!host->sg_cpu) { 1889 if (!host->sg_cpu) {
1883 dev_err(&host->dev, "%s: could not alloc DMA memory\n", 1890 dev_err(&host->dev, "%s: could not alloc DMA memory\n",
1884 __func__); 1891 __func__);
1885 goto no_dma; 1892 goto no_dma;
1886 } 1893 }
1887 1894
1888 /* Determine which DMA interface to use */ 1895 /* Determine which DMA interface to use */
1889 #ifdef CONFIG_MMC_DW_IDMAC 1896 #ifdef CONFIG_MMC_DW_IDMAC
1890 host->dma_ops = &dw_mci_idmac_ops; 1897 host->dma_ops = &dw_mci_idmac_ops;
1891 #endif 1898 #endif
1892 1899
1893 if (!host->dma_ops) 1900 if (!host->dma_ops)
1894 goto no_dma; 1901 goto no_dma;
1895 1902
1896 if (host->dma_ops->init && host->dma_ops->start && 1903 if (host->dma_ops->init && host->dma_ops->start &&
1897 host->dma_ops->stop && host->dma_ops->cleanup) { 1904 host->dma_ops->stop && host->dma_ops->cleanup) {
1898 if (host->dma_ops->init(host)) { 1905 if (host->dma_ops->init(host)) {
1899 dev_err(&host->dev, "%s: Unable to initialize " 1906 dev_err(&host->dev, "%s: Unable to initialize "
1900 "DMA Controller.\n", __func__); 1907 "DMA Controller.\n", __func__);
1901 goto no_dma; 1908 goto no_dma;
1902 } 1909 }
1903 } else { 1910 } else {
1904 dev_err(&host->dev, "DMA initialization not found.\n"); 1911 dev_err(&host->dev, "DMA initialization not found.\n");
1905 goto no_dma; 1912 goto no_dma;
1906 } 1913 }
1907 1914
1908 host->use_dma = 1; 1915 host->use_dma = 1;
1909 return; 1916 return;
1910 1917
1911 no_dma: 1918 no_dma:
1912 dev_info(&host->dev, "Using PIO mode.\n"); 1919 dev_info(&host->dev, "Using PIO mode.\n");
1913 host->use_dma = 0; 1920 host->use_dma = 0;
1914 return; 1921 return;
1915 } 1922 }
1916 1923
1917 static bool mci_wait_reset(struct device *dev, struct dw_mci *host) 1924 static bool mci_wait_reset(struct device *dev, struct dw_mci *host)
1918 { 1925 {
1919 unsigned long timeout = jiffies + msecs_to_jiffies(500); 1926 unsigned long timeout = jiffies + msecs_to_jiffies(500);
1920 unsigned int ctrl; 1927 unsigned int ctrl;
1921 1928
1922 mci_writel(host, CTRL, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET | 1929 mci_writel(host, CTRL, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
1923 SDMMC_CTRL_DMA_RESET)); 1930 SDMMC_CTRL_DMA_RESET));
1924 1931
1925 /* wait till resets clear */ 1932 /* wait till resets clear */
1926 do { 1933 do {
1927 ctrl = mci_readl(host, CTRL); 1934 ctrl = mci_readl(host, CTRL);
1928 if (!(ctrl & (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET | 1935 if (!(ctrl & (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
1929 SDMMC_CTRL_DMA_RESET))) 1936 SDMMC_CTRL_DMA_RESET)))
1930 return true; 1937 return true;
1931 } while (time_before(jiffies, timeout)); 1938 } while (time_before(jiffies, timeout));
1932 1939
1933 dev_err(dev, "Timeout resetting block (ctrl %#x)\n", ctrl); 1940 dev_err(dev, "Timeout resetting block (ctrl %#x)\n", ctrl);
1934 1941
1935 return false; 1942 return false;
1936 } 1943 }
1937 1944
1938 int dw_mci_probe(struct dw_mci *host) 1945 int dw_mci_probe(struct dw_mci *host)
1939 { 1946 {
1940 int width, i, ret = 0; 1947 int width, i, ret = 0;
1941 u32 fifo_size; 1948 u32 fifo_size;
1942 1949
1943 if (!host->pdata || !host->pdata->init) { 1950 if (!host->pdata || !host->pdata->init) {
1944 dev_err(&host->dev, 1951 dev_err(&host->dev,
1945 "Platform data must supply init function\n"); 1952 "Platform data must supply init function\n");
1946 return -ENODEV; 1953 return -ENODEV;
1947 } 1954 }
1948 1955
1949 if (!host->pdata->select_slot && host->pdata->num_slots > 1) { 1956 if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
1950 dev_err(&host->dev, 1957 dev_err(&host->dev,
1951 "Platform data must supply select_slot function\n"); 1958 "Platform data must supply select_slot function\n");
1952 return -ENODEV; 1959 return -ENODEV;
1953 } 1960 }
1954 1961
1955 if (!host->pdata->bus_hz) { 1962 if (!host->pdata->bus_hz) {
1956 dev_err(&host->dev, 1963 dev_err(&host->dev,
1957 "Platform data must supply bus speed\n"); 1964 "Platform data must supply bus speed\n");
1958 return -ENODEV; 1965 return -ENODEV;
1959 } 1966 }
1960 1967
1961 host->bus_hz = host->pdata->bus_hz; 1968 host->bus_hz = host->pdata->bus_hz;
1962 host->quirks = host->pdata->quirks; 1969 host->quirks = host->pdata->quirks;
1963 1970
1964 spin_lock_init(&host->lock); 1971 spin_lock_init(&host->lock);
1965 INIT_LIST_HEAD(&host->queue); 1972 INIT_LIST_HEAD(&host->queue);
1966 1973
1967 /* 1974 /*
1968 * Get the host data width - this assumes that HCON has been set with 1975 * Get the host data width - this assumes that HCON has been set with
1969 * the correct values. 1976 * the correct values.
1970 */ 1977 */
1971 i = (mci_readl(host, HCON) >> 7) & 0x7; 1978 i = (mci_readl(host, HCON) >> 7) & 0x7;
1972 if (!i) { 1979 if (!i) {
1973 host->push_data = dw_mci_push_data16; 1980 host->push_data = dw_mci_push_data16;
1974 host->pull_data = dw_mci_pull_data16; 1981 host->pull_data = dw_mci_pull_data16;
1975 width = 16; 1982 width = 16;
1976 host->data_shift = 1; 1983 host->data_shift = 1;
1977 } else if (i == 2) { 1984 } else if (i == 2) {
1978 host->push_data = dw_mci_push_data64; 1985 host->push_data = dw_mci_push_data64;
1979 host->pull_data = dw_mci_pull_data64; 1986 host->pull_data = dw_mci_pull_data64;
1980 width = 64; 1987 width = 64;
1981 host->data_shift = 3; 1988 host->data_shift = 3;
1982 } else { 1989 } else {
1983 /* Check for a reserved value, and warn if it is */ 1990 /* Check for a reserved value, and warn if it is */
1984 WARN((i != 1), 1991 WARN((i != 1),
1985 "HCON reports a reserved host data width!\n" 1992 "HCON reports a reserved host data width!\n"
1986 "Defaulting to 32-bit access.\n"); 1993 "Defaulting to 32-bit access.\n");
1987 host->push_data = dw_mci_push_data32; 1994 host->push_data = dw_mci_push_data32;
1988 host->pull_data = dw_mci_pull_data32; 1995 host->pull_data = dw_mci_pull_data32;
1989 width = 32; 1996 width = 32;
1990 host->data_shift = 2; 1997 host->data_shift = 2;
1991 } 1998 }
1992 1999
1993 /* Reset all blocks */ 2000 /* Reset all blocks */
1994 if (!mci_wait_reset(&host->dev, host)) 2001 if (!mci_wait_reset(&host->dev, host))
1995 return -ENODEV; 2002 return -ENODEV;
1996 2003
1997 host->dma_ops = host->pdata->dma_ops; 2004 host->dma_ops = host->pdata->dma_ops;
1998 dw_mci_init_dma(host); 2005 dw_mci_init_dma(host);
1999 2006
2000 /* Clear the interrupts for the host controller */ 2007 /* Clear the interrupts for the host controller */
2001 mci_writel(host, RINTSTS, 0xFFFFFFFF); 2008 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2002 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ 2009 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2003 2010
2004 /* Put in max timeout */ 2011 /* Put in max timeout */
2005 mci_writel(host, TMOUT, 0xFFFFFFFF); 2012 mci_writel(host, TMOUT, 0xFFFFFFFF);
2006 2013
2007 /* 2014 /*
2008 * FIFO threshold settings RxMark = fifo_size / 2 - 1, 2015 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
2009 * Tx Mark = fifo_size / 2 DMA Size = 8 2016 * Tx Mark = fifo_size / 2 DMA Size = 8
2010 */ 2017 */
2011 if (!host->pdata->fifo_depth) { 2018 if (!host->pdata->fifo_depth) {
2012 /* 2019 /*
2013 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may 2020 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2014 * have been overwritten by the bootloader, just like we're 2021 * have been overwritten by the bootloader, just like we're
2015 * about to do, so if you know the value for your hardware, you 2022 * about to do, so if you know the value for your hardware, you
2016 * should put it in the platform data. 2023 * should put it in the platform data.
2017 */ 2024 */
2018 fifo_size = mci_readl(host, FIFOTH); 2025 fifo_size = mci_readl(host, FIFOTH);
2019 fifo_size = 1 + ((fifo_size >> 16) & 0xfff); 2026 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
2020 } else { 2027 } else {
2021 fifo_size = host->pdata->fifo_depth; 2028 fifo_size = host->pdata->fifo_depth;
2022 } 2029 }
2023 host->fifo_depth = fifo_size; 2030 host->fifo_depth = fifo_size;
2024 host->fifoth_val = ((0x2 << 28) | ((fifo_size/2 - 1) << 16) | 2031 host->fifoth_val = ((0x2 << 28) | ((fifo_size/2 - 1) << 16) |
2025 ((fifo_size/2) << 0)); 2032 ((fifo_size/2) << 0));
2026 mci_writel(host, FIFOTH, host->fifoth_val); 2033 mci_writel(host, FIFOTH, host->fifoth_val);
2027 2034
2028 /* disable clock to CIU */ 2035 /* disable clock to CIU */
2029 mci_writel(host, CLKENA, 0); 2036 mci_writel(host, CLKENA, 0);
2030 mci_writel(host, CLKSRC, 0); 2037 mci_writel(host, CLKSRC, 0);
2031 2038
2032 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host); 2039 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
2033 host->card_workqueue = alloc_workqueue("dw-mci-card", 2040 host->card_workqueue = alloc_workqueue("dw-mci-card",
2034 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1); 2041 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
2035 if (!host->card_workqueue) 2042 if (!host->card_workqueue)
2036 goto err_dmaunmap; 2043 goto err_dmaunmap;
2037 INIT_WORK(&host->card_work, dw_mci_work_routine_card); 2044 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
2038 ret = request_irq(host->irq, dw_mci_interrupt, host->irq_flags, "dw-mci", host); 2045 ret = request_irq(host->irq, dw_mci_interrupt, host->irq_flags, "dw-mci", host);
2039 if (ret) 2046 if (ret)
2040 goto err_workqueue; 2047 goto err_workqueue;
2041 2048
2042 if (host->pdata->num_slots) 2049 if (host->pdata->num_slots)
2043 host->num_slots = host->pdata->num_slots; 2050 host->num_slots = host->pdata->num_slots;
2044 else 2051 else
2045 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1; 2052 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
2046 2053
2047 /* We need at least one slot to succeed */ 2054 /* We need at least one slot to succeed */
2048 for (i = 0; i < host->num_slots; i++) { 2055 for (i = 0; i < host->num_slots; i++) {
2049 ret = dw_mci_init_slot(host, i); 2056 ret = dw_mci_init_slot(host, i);
2050 if (ret) { 2057 if (ret) {
2051 ret = -ENODEV; 2058 ret = -ENODEV;
2052 goto err_init_slot; 2059 goto err_init_slot;
2053 } 2060 }
2054 } 2061 }
2055 2062
2056 /* 2063 /*
2057 * In 2.40a spec, Data offset is changed. 2064 * In 2.40a spec, Data offset is changed.
2058 * Need to check the version-id and set data-offset for DATA register. 2065 * Need to check the version-id and set data-offset for DATA register.
2059 */ 2066 */
2060 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID)); 2067 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2061 dev_info(&host->dev, "Version ID is %04x\n", host->verid); 2068 dev_info(&host->dev, "Version ID is %04x\n", host->verid);
2062 2069
2063 if (host->verid < DW_MMC_240A) 2070 if (host->verid < DW_MMC_240A)
2064 host->data_offset = DATA_OFFSET; 2071 host->data_offset = DATA_OFFSET;
2065 else 2072 else
2066 host->data_offset = DATA_240A_OFFSET; 2073 host->data_offset = DATA_240A_OFFSET;
2067 2074
2068 /* 2075 /*
2069 * Enable interrupts for command done, data over, data empty, card det, 2076 * Enable interrupts for command done, data over, data empty, card det,
2070 * receive ready and error such as transmit, receive timeout, crc error 2077 * receive ready and error such as transmit, receive timeout, crc error
2071 */ 2078 */
2072 mci_writel(host, RINTSTS, 0xFFFFFFFF); 2079 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2073 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | 2080 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2074 SDMMC_INT_TXDR | SDMMC_INT_RXDR | 2081 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2075 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD); 2082 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2076 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */ 2083 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
2077 2084
2078 dev_info(&host->dev, "DW MMC controller at irq %d, " 2085 dev_info(&host->dev, "DW MMC controller at irq %d, "
2079 "%d bit host data width, " 2086 "%d bit host data width, "
2080 "%u deep fifo\n", 2087 "%u deep fifo\n",
2081 host->irq, width, fifo_size); 2088 host->irq, width, fifo_size);
2082 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) 2089 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
2083 dev_info(&host->dev, "Internal DMAC interrupt fix enabled.\n"); 2090 dev_info(&host->dev, "Internal DMAC interrupt fix enabled.\n");
2084 2091
2085 return 0; 2092 return 0;
2086 2093
2087 err_init_slot: 2094 err_init_slot:
2088 /* De-init any initialized slots */ 2095 /* De-init any initialized slots */
2089 while (i > 0) { 2096 while (i > 0) {
2090 if (host->slot[i]) 2097 if (host->slot[i])
2091 dw_mci_cleanup_slot(host->slot[i], i); 2098 dw_mci_cleanup_slot(host->slot[i], i);
2092 i--; 2099 i--;
2093 } 2100 }
2094 free_irq(host->irq, host); 2101 free_irq(host->irq, host);
2095 2102
2096 err_workqueue: 2103 err_workqueue:
2097 destroy_workqueue(host->card_workqueue); 2104 destroy_workqueue(host->card_workqueue);
2098 2105
2099 err_dmaunmap: 2106 err_dmaunmap:
2100 if (host->use_dma && host->dma_ops->exit) 2107 if (host->use_dma && host->dma_ops->exit)
2101 host->dma_ops->exit(host); 2108 host->dma_ops->exit(host);
2102 dma_free_coherent(&host->dev, PAGE_SIZE, 2109 dma_free_coherent(&host->dev, PAGE_SIZE,
2103 host->sg_cpu, host->sg_dma); 2110 host->sg_cpu, host->sg_dma);
2104 2111
2105 if (host->vmmc) { 2112 if (host->vmmc) {
2106 regulator_disable(host->vmmc); 2113 regulator_disable(host->vmmc);
2107 regulator_put(host->vmmc); 2114 regulator_put(host->vmmc);
2108 } 2115 }
2109 return ret; 2116 return ret;
2110 } 2117 }
2111 EXPORT_SYMBOL(dw_mci_probe); 2118 EXPORT_SYMBOL(dw_mci_probe);
2112 2119
2113 void dw_mci_remove(struct dw_mci *host) 2120 void dw_mci_remove(struct dw_mci *host)
2114 { 2121 {
2115 int i; 2122 int i;
2116 2123
2117 mci_writel(host, RINTSTS, 0xFFFFFFFF); 2124 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2118 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ 2125 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2119 2126
2120 for (i = 0; i < host->num_slots; i++) { 2127 for (i = 0; i < host->num_slots; i++) {
2121 dev_dbg(&host->dev, "remove slot %d\n", i); 2128 dev_dbg(&host->dev, "remove slot %d\n", i);
2122 if (host->slot[i]) 2129 if (host->slot[i])
2123 dw_mci_cleanup_slot(host->slot[i], i); 2130 dw_mci_cleanup_slot(host->slot[i], i);
2124 } 2131 }
2125 2132
2126 /* disable clock to CIU */ 2133 /* disable clock to CIU */
2127 mci_writel(host, CLKENA, 0); 2134 mci_writel(host, CLKENA, 0);
2128 mci_writel(host, CLKSRC, 0); 2135 mci_writel(host, CLKSRC, 0);
2129 2136
2130 free_irq(host->irq, host); 2137 free_irq(host->irq, host);
2131 destroy_workqueue(host->card_workqueue); 2138 destroy_workqueue(host->card_workqueue);
2132 dma_free_coherent(&host->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); 2139 dma_free_coherent(&host->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
2133 2140
2134 if (host->use_dma && host->dma_ops->exit) 2141 if (host->use_dma && host->dma_ops->exit)
2135 host->dma_ops->exit(host); 2142 host->dma_ops->exit(host);
2136 2143
2137 if (host->vmmc) { 2144 if (host->vmmc) {
2138 regulator_disable(host->vmmc); 2145 regulator_disable(host->vmmc);
2139 regulator_put(host->vmmc); 2146 regulator_put(host->vmmc);
2140 } 2147 }
2141 2148
2142 } 2149 }
2143 EXPORT_SYMBOL(dw_mci_remove); 2150 EXPORT_SYMBOL(dw_mci_remove);
2144 2151
2145 2152
2146 2153
2147 #ifdef CONFIG_PM_SLEEP 2154 #ifdef CONFIG_PM_SLEEP
2148 /* 2155 /*
2149 * TODO: we should probably disable the clock to the card in the suspend path. 2156 * TODO: we should probably disable the clock to the card in the suspend path.
2150 */ 2157 */
2151 int dw_mci_suspend(struct dw_mci *host) 2158 int dw_mci_suspend(struct dw_mci *host)
2152 { 2159 {
2153 int i, ret = 0; 2160 int i, ret = 0;
2154 2161
2155 for (i = 0; i < host->num_slots; i++) { 2162 for (i = 0; i < host->num_slots; i++) {
2156 struct dw_mci_slot *slot = host->slot[i]; 2163 struct dw_mci_slot *slot = host->slot[i];
2157 if (!slot) 2164 if (!slot)
2158 continue; 2165 continue;
2159 ret = mmc_suspend_host(slot->mmc); 2166 ret = mmc_suspend_host(slot->mmc);
2160 if (ret < 0) { 2167 if (ret < 0) {
2161 while (--i >= 0) { 2168 while (--i >= 0) {
2162 slot = host->slot[i]; 2169 slot = host->slot[i];
2163 if (slot) 2170 if (slot)
2164 mmc_resume_host(host->slot[i]->mmc); 2171 mmc_resume_host(host->slot[i]->mmc);
2165 } 2172 }
2166 return ret; 2173 return ret;
2167 } 2174 }
2168 } 2175 }
2169 2176
2170 if (host->vmmc) 2177 if (host->vmmc)
2171 regulator_disable(host->vmmc); 2178 regulator_disable(host->vmmc);
2172 2179
2173 return 0; 2180 return 0;
2174 } 2181 }
2175 EXPORT_SYMBOL(dw_mci_suspend); 2182 EXPORT_SYMBOL(dw_mci_suspend);
2176 2183
2177 int dw_mci_resume(struct dw_mci *host) 2184 int dw_mci_resume(struct dw_mci *host)
2178 { 2185 {
2179 int i, ret; 2186 int i, ret;
2180 2187
2181 if (host->vmmc) 2188 if (host->vmmc)
2182 regulator_enable(host->vmmc); 2189 regulator_enable(host->vmmc);
2183 2190
2184 if (!mci_wait_reset(&host->dev, host)) { 2191 if (!mci_wait_reset(&host->dev, host)) {
2185 ret = -ENODEV; 2192 ret = -ENODEV;
2186 return ret; 2193 return ret;
2187 } 2194 }
2188 2195
2189 if (host->use_dma && host->dma_ops->init) 2196 if (host->use_dma && host->dma_ops->init)
2190 host->dma_ops->init(host); 2197 host->dma_ops->init(host);
2191 2198
2192 /* Restore the old value at FIFOTH register */ 2199 /* Restore the old value at FIFOTH register */
2193 mci_writel(host, FIFOTH, host->fifoth_val); 2200 mci_writel(host, FIFOTH, host->fifoth_val);
2194 2201
2195 mci_writel(host, RINTSTS, 0xFFFFFFFF); 2202 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2196 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | 2203 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2197 SDMMC_INT_TXDR | SDMMC_INT_RXDR | 2204 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2198 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD); 2205 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2199 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); 2206 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
2200 2207
2201 for (i = 0; i < host->num_slots; i++) { 2208 for (i = 0; i < host->num_slots; i++) {
drivers/mmc/host/mxs-mmc.c
1 /* 1 /*
2 * Portions copyright (C) 2003 Russell King, PXA MMCI Driver 2 * Portions copyright (C) 2003 Russell King, PXA MMCI Driver
3 * Portions copyright (C) 2004-2005 Pierre Ossman, W83L51xD SD/MMC driver 3 * Portions copyright (C) 2004-2005 Pierre Ossman, W83L51xD SD/MMC driver
4 * 4 *
5 * Copyright 2008 Embedded Alley Solutions, Inc. 5 * Copyright 2008 Embedded Alley Solutions, Inc.
6 * Copyright 2009-2011 Freescale Semiconductor, Inc. 6 * Copyright 2009-2011 Freescale Semiconductor, Inc.
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or 10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version. 11 * (at your option) any later version.
12 * 12 *
13 * This program is distributed in the hope that it will be useful, 13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details. 16 * GNU General Public License for more details.
17 * 17 *
18 * You should have received a copy of the GNU General Public License along 18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc., 19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
21 */ 21 */
22 22
23 #include <linux/kernel.h> 23 #include <linux/kernel.h>
24 #include <linux/init.h> 24 #include <linux/init.h>
25 #include <linux/ioport.h> 25 #include <linux/ioport.h>
26 #include <linux/of.h> 26 #include <linux/of.h>
27 #include <linux/of_device.h> 27 #include <linux/of_device.h>
28 #include <linux/of_gpio.h> 28 #include <linux/of_gpio.h>
29 #include <linux/platform_device.h> 29 #include <linux/platform_device.h>
30 #include <linux/delay.h> 30 #include <linux/delay.h>
31 #include <linux/interrupt.h> 31 #include <linux/interrupt.h>
32 #include <linux/dma-mapping.h> 32 #include <linux/dma-mapping.h>
33 #include <linux/dmaengine.h> 33 #include <linux/dmaengine.h>
34 #include <linux/highmem.h> 34 #include <linux/highmem.h>
35 #include <linux/clk.h> 35 #include <linux/clk.h>
36 #include <linux/err.h> 36 #include <linux/err.h>
37 #include <linux/completion.h> 37 #include <linux/completion.h>
38 #include <linux/mmc/host.h> 38 #include <linux/mmc/host.h>
39 #include <linux/mmc/mmc.h> 39 #include <linux/mmc/mmc.h>
40 #include <linux/mmc/sdio.h> 40 #include <linux/mmc/sdio.h>
41 #include <linux/gpio.h> 41 #include <linux/gpio.h>
42 #include <linux/regulator/consumer.h> 42 #include <linux/regulator/consumer.h>
43 #include <linux/module.h> 43 #include <linux/module.h>
44 #include <linux/fsl/mxs-dma.h> 44 #include <linux/fsl/mxs-dma.h>
45 #include <linux/pinctrl/consumer.h> 45 #include <linux/pinctrl/consumer.h>
46 #include <linux/stmp_device.h> 46 #include <linux/stmp_device.h>
47 #include <linux/mmc/mxs-mmc.h> 47 #include <linux/mmc/mxs-mmc.h>
48 48
49 #define DRIVER_NAME "mxs-mmc" 49 #define DRIVER_NAME "mxs-mmc"
50 50
51 /* card detect polling timeout */ 51 /* card detect polling timeout */
52 #define MXS_MMC_DETECT_TIMEOUT (HZ/2) 52 #define MXS_MMC_DETECT_TIMEOUT (HZ/2)
53 53
54 #define ssp_is_old(host) ((host)->devid == IMX23_MMC) 54 #define ssp_is_old(host) ((host)->devid == IMX23_MMC)
55 55
56 /* SSP registers */ 56 /* SSP registers */
57 #define HW_SSP_CTRL0 0x000 57 #define HW_SSP_CTRL0 0x000
58 #define BM_SSP_CTRL0_RUN (1 << 29) 58 #define BM_SSP_CTRL0_RUN (1 << 29)
59 #define BM_SSP_CTRL0_SDIO_IRQ_CHECK (1 << 28) 59 #define BM_SSP_CTRL0_SDIO_IRQ_CHECK (1 << 28)
60 #define BM_SSP_CTRL0_IGNORE_CRC (1 << 26) 60 #define BM_SSP_CTRL0_IGNORE_CRC (1 << 26)
61 #define BM_SSP_CTRL0_READ (1 << 25) 61 #define BM_SSP_CTRL0_READ (1 << 25)
62 #define BM_SSP_CTRL0_DATA_XFER (1 << 24) 62 #define BM_SSP_CTRL0_DATA_XFER (1 << 24)
63 #define BP_SSP_CTRL0_BUS_WIDTH (22) 63 #define BP_SSP_CTRL0_BUS_WIDTH (22)
64 #define BM_SSP_CTRL0_BUS_WIDTH (0x3 << 22) 64 #define BM_SSP_CTRL0_BUS_WIDTH (0x3 << 22)
65 #define BM_SSP_CTRL0_WAIT_FOR_IRQ (1 << 21) 65 #define BM_SSP_CTRL0_WAIT_FOR_IRQ (1 << 21)
66 #define BM_SSP_CTRL0_LONG_RESP (1 << 19) 66 #define BM_SSP_CTRL0_LONG_RESP (1 << 19)
67 #define BM_SSP_CTRL0_GET_RESP (1 << 17) 67 #define BM_SSP_CTRL0_GET_RESP (1 << 17)
68 #define BM_SSP_CTRL0_ENABLE (1 << 16) 68 #define BM_SSP_CTRL0_ENABLE (1 << 16)
69 #define BP_SSP_CTRL0_XFER_COUNT (0) 69 #define BP_SSP_CTRL0_XFER_COUNT (0)
70 #define BM_SSP_CTRL0_XFER_COUNT (0xffff) 70 #define BM_SSP_CTRL0_XFER_COUNT (0xffff)
71 #define HW_SSP_CMD0 0x010 71 #define HW_SSP_CMD0 0x010
72 #define BM_SSP_CMD0_DBL_DATA_RATE_EN (1 << 25) 72 #define BM_SSP_CMD0_DBL_DATA_RATE_EN (1 << 25)
73 #define BM_SSP_CMD0_SLOW_CLKING_EN (1 << 22) 73 #define BM_SSP_CMD0_SLOW_CLKING_EN (1 << 22)
74 #define BM_SSP_CMD0_CONT_CLKING_EN (1 << 21) 74 #define BM_SSP_CMD0_CONT_CLKING_EN (1 << 21)
75 #define BM_SSP_CMD0_APPEND_8CYC (1 << 20) 75 #define BM_SSP_CMD0_APPEND_8CYC (1 << 20)
76 #define BP_SSP_CMD0_BLOCK_SIZE (16) 76 #define BP_SSP_CMD0_BLOCK_SIZE (16)
77 #define BM_SSP_CMD0_BLOCK_SIZE (0xf << 16) 77 #define BM_SSP_CMD0_BLOCK_SIZE (0xf << 16)
78 #define BP_SSP_CMD0_BLOCK_COUNT (8) 78 #define BP_SSP_CMD0_BLOCK_COUNT (8)
79 #define BM_SSP_CMD0_BLOCK_COUNT (0xff << 8) 79 #define BM_SSP_CMD0_BLOCK_COUNT (0xff << 8)
80 #define BP_SSP_CMD0_CMD (0) 80 #define BP_SSP_CMD0_CMD (0)
81 #define BM_SSP_CMD0_CMD (0xff) 81 #define BM_SSP_CMD0_CMD (0xff)
82 #define HW_SSP_CMD1 0x020 82 #define HW_SSP_CMD1 0x020
83 #define HW_SSP_XFER_SIZE 0x030 83 #define HW_SSP_XFER_SIZE 0x030
84 #define HW_SSP_BLOCK_SIZE 0x040 84 #define HW_SSP_BLOCK_SIZE 0x040
85 #define BP_SSP_BLOCK_SIZE_BLOCK_COUNT (4) 85 #define BP_SSP_BLOCK_SIZE_BLOCK_COUNT (4)
86 #define BM_SSP_BLOCK_SIZE_BLOCK_COUNT (0xffffff << 4) 86 #define BM_SSP_BLOCK_SIZE_BLOCK_COUNT (0xffffff << 4)
87 #define BP_SSP_BLOCK_SIZE_BLOCK_SIZE (0) 87 #define BP_SSP_BLOCK_SIZE_BLOCK_SIZE (0)
88 #define BM_SSP_BLOCK_SIZE_BLOCK_SIZE (0xf) 88 #define BM_SSP_BLOCK_SIZE_BLOCK_SIZE (0xf)
89 #define HW_SSP_TIMING(h) (ssp_is_old(h) ? 0x050 : 0x070) 89 #define HW_SSP_TIMING(h) (ssp_is_old(h) ? 0x050 : 0x070)
90 #define BP_SSP_TIMING_TIMEOUT (16) 90 #define BP_SSP_TIMING_TIMEOUT (16)
91 #define BM_SSP_TIMING_TIMEOUT (0xffff << 16) 91 #define BM_SSP_TIMING_TIMEOUT (0xffff << 16)
92 #define BP_SSP_TIMING_CLOCK_DIVIDE (8) 92 #define BP_SSP_TIMING_CLOCK_DIVIDE (8)
93 #define BM_SSP_TIMING_CLOCK_DIVIDE (0xff << 8) 93 #define BM_SSP_TIMING_CLOCK_DIVIDE (0xff << 8)
94 #define BP_SSP_TIMING_CLOCK_RATE (0) 94 #define BP_SSP_TIMING_CLOCK_RATE (0)
95 #define BM_SSP_TIMING_CLOCK_RATE (0xff) 95 #define BM_SSP_TIMING_CLOCK_RATE (0xff)
96 #define HW_SSP_CTRL1(h) (ssp_is_old(h) ? 0x060 : 0x080) 96 #define HW_SSP_CTRL1(h) (ssp_is_old(h) ? 0x060 : 0x080)
97 #define BM_SSP_CTRL1_SDIO_IRQ (1 << 31) 97 #define BM_SSP_CTRL1_SDIO_IRQ (1 << 31)
98 #define BM_SSP_CTRL1_SDIO_IRQ_EN (1 << 30) 98 #define BM_SSP_CTRL1_SDIO_IRQ_EN (1 << 30)
99 #define BM_SSP_CTRL1_RESP_ERR_IRQ (1 << 29) 99 #define BM_SSP_CTRL1_RESP_ERR_IRQ (1 << 29)
100 #define BM_SSP_CTRL1_RESP_ERR_IRQ_EN (1 << 28) 100 #define BM_SSP_CTRL1_RESP_ERR_IRQ_EN (1 << 28)
101 #define BM_SSP_CTRL1_RESP_TIMEOUT_IRQ (1 << 27) 101 #define BM_SSP_CTRL1_RESP_TIMEOUT_IRQ (1 << 27)
102 #define BM_SSP_CTRL1_RESP_TIMEOUT_IRQ_EN (1 << 26) 102 #define BM_SSP_CTRL1_RESP_TIMEOUT_IRQ_EN (1 << 26)
103 #define BM_SSP_CTRL1_DATA_TIMEOUT_IRQ (1 << 25) 103 #define BM_SSP_CTRL1_DATA_TIMEOUT_IRQ (1 << 25)
104 #define BM_SSP_CTRL1_DATA_TIMEOUT_IRQ_EN (1 << 24) 104 #define BM_SSP_CTRL1_DATA_TIMEOUT_IRQ_EN (1 << 24)
105 #define BM_SSP_CTRL1_DATA_CRC_IRQ (1 << 23) 105 #define BM_SSP_CTRL1_DATA_CRC_IRQ (1 << 23)
106 #define BM_SSP_CTRL1_DATA_CRC_IRQ_EN (1 << 22) 106 #define BM_SSP_CTRL1_DATA_CRC_IRQ_EN (1 << 22)
107 #define BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ (1 << 21) 107 #define BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ (1 << 21)
108 #define BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ_EN (1 << 20) 108 #define BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ_EN (1 << 20)
109 #define BM_SSP_CTRL1_RECV_TIMEOUT_IRQ (1 << 17) 109 #define BM_SSP_CTRL1_RECV_TIMEOUT_IRQ (1 << 17)
110 #define BM_SSP_CTRL1_RECV_TIMEOUT_IRQ_EN (1 << 16) 110 #define BM_SSP_CTRL1_RECV_TIMEOUT_IRQ_EN (1 << 16)
111 #define BM_SSP_CTRL1_FIFO_OVERRUN_IRQ (1 << 15) 111 #define BM_SSP_CTRL1_FIFO_OVERRUN_IRQ (1 << 15)
112 #define BM_SSP_CTRL1_FIFO_OVERRUN_IRQ_EN (1 << 14) 112 #define BM_SSP_CTRL1_FIFO_OVERRUN_IRQ_EN (1 << 14)
113 #define BM_SSP_CTRL1_DMA_ENABLE (1 << 13) 113 #define BM_SSP_CTRL1_DMA_ENABLE (1 << 13)
114 #define BM_SSP_CTRL1_POLARITY (1 << 9) 114 #define BM_SSP_CTRL1_POLARITY (1 << 9)
115 #define BP_SSP_CTRL1_WORD_LENGTH (4) 115 #define BP_SSP_CTRL1_WORD_LENGTH (4)
116 #define BM_SSP_CTRL1_WORD_LENGTH (0xf << 4) 116 #define BM_SSP_CTRL1_WORD_LENGTH (0xf << 4)
117 #define BP_SSP_CTRL1_SSP_MODE (0) 117 #define BP_SSP_CTRL1_SSP_MODE (0)
118 #define BM_SSP_CTRL1_SSP_MODE (0xf) 118 #define BM_SSP_CTRL1_SSP_MODE (0xf)
119 #define HW_SSP_SDRESP0(h) (ssp_is_old(h) ? 0x080 : 0x0a0) 119 #define HW_SSP_SDRESP0(h) (ssp_is_old(h) ? 0x080 : 0x0a0)
120 #define HW_SSP_SDRESP1(h) (ssp_is_old(h) ? 0x090 : 0x0b0) 120 #define HW_SSP_SDRESP1(h) (ssp_is_old(h) ? 0x090 : 0x0b0)
121 #define HW_SSP_SDRESP2(h) (ssp_is_old(h) ? 0x0a0 : 0x0c0) 121 #define HW_SSP_SDRESP2(h) (ssp_is_old(h) ? 0x0a0 : 0x0c0)
122 #define HW_SSP_SDRESP3(h) (ssp_is_old(h) ? 0x0b0 : 0x0d0) 122 #define HW_SSP_SDRESP3(h) (ssp_is_old(h) ? 0x0b0 : 0x0d0)
123 #define HW_SSP_STATUS(h) (ssp_is_old(h) ? 0x0c0 : 0x100) 123 #define HW_SSP_STATUS(h) (ssp_is_old(h) ? 0x0c0 : 0x100)
124 #define BM_SSP_STATUS_CARD_DETECT (1 << 28) 124 #define BM_SSP_STATUS_CARD_DETECT (1 << 28)
125 #define BM_SSP_STATUS_SDIO_IRQ (1 << 17) 125 #define BM_SSP_STATUS_SDIO_IRQ (1 << 17)
126 126
127 #define BF_SSP(value, field) (((value) << BP_SSP_##field) & BM_SSP_##field) 127 #define BF_SSP(value, field) (((value) << BP_SSP_##field) & BM_SSP_##field)
128 128
129 #define MXS_MMC_IRQ_BITS (BM_SSP_CTRL1_SDIO_IRQ | \ 129 #define MXS_MMC_IRQ_BITS (BM_SSP_CTRL1_SDIO_IRQ | \
130 BM_SSP_CTRL1_RESP_ERR_IRQ | \ 130 BM_SSP_CTRL1_RESP_ERR_IRQ | \
131 BM_SSP_CTRL1_RESP_TIMEOUT_IRQ | \ 131 BM_SSP_CTRL1_RESP_TIMEOUT_IRQ | \
132 BM_SSP_CTRL1_DATA_TIMEOUT_IRQ | \ 132 BM_SSP_CTRL1_DATA_TIMEOUT_IRQ | \
133 BM_SSP_CTRL1_DATA_CRC_IRQ | \ 133 BM_SSP_CTRL1_DATA_CRC_IRQ | \
134 BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ | \ 134 BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ | \
135 BM_SSP_CTRL1_RECV_TIMEOUT_IRQ | \ 135 BM_SSP_CTRL1_RECV_TIMEOUT_IRQ | \
136 BM_SSP_CTRL1_FIFO_OVERRUN_IRQ) 136 BM_SSP_CTRL1_FIFO_OVERRUN_IRQ)
137 137
138 #define SSP_PIO_NUM 3 138 #define SSP_PIO_NUM 3
139 139
140 enum mxs_mmc_id { 140 enum mxs_mmc_id {
141 IMX23_MMC, 141 IMX23_MMC,
142 IMX28_MMC, 142 IMX28_MMC,
143 }; 143 };
144 144
145 struct mxs_mmc_host { 145 struct mxs_mmc_host {
146 struct mmc_host *mmc; 146 struct mmc_host *mmc;
147 struct mmc_request *mrq; 147 struct mmc_request *mrq;
148 struct mmc_command *cmd; 148 struct mmc_command *cmd;
149 struct mmc_data *data; 149 struct mmc_data *data;
150 150
151 void __iomem *base; 151 void __iomem *base;
152 int dma_channel; 152 int dma_channel;
153 struct clk *clk; 153 struct clk *clk;
154 unsigned int clk_rate; 154 unsigned int clk_rate;
155 155
156 struct dma_chan *dmach; 156 struct dma_chan *dmach;
157 struct mxs_dma_data dma_data; 157 struct mxs_dma_data dma_data;
158 unsigned int dma_dir; 158 unsigned int dma_dir;
159 enum dma_transfer_direction slave_dirn; 159 enum dma_transfer_direction slave_dirn;
160 u32 ssp_pio_words[SSP_PIO_NUM]; 160 u32 ssp_pio_words[SSP_PIO_NUM];
161 161
162 enum mxs_mmc_id devid; 162 enum mxs_mmc_id devid;
163 unsigned char bus_width; 163 unsigned char bus_width;
164 spinlock_t lock; 164 spinlock_t lock;
165 int sdio_irq_en; 165 int sdio_irq_en;
166 int wp_gpio; 166 int wp_gpio;
167 bool wp_inverted; 167 bool wp_inverted;
168 }; 168 };
169 169
170 static int mxs_mmc_get_ro(struct mmc_host *mmc) 170 static int mxs_mmc_get_ro(struct mmc_host *mmc)
171 { 171 {
172 struct mxs_mmc_host *host = mmc_priv(mmc); 172 struct mxs_mmc_host *host = mmc_priv(mmc);
173 int ret; 173 int ret;
174 174
175 if (!gpio_is_valid(host->wp_gpio)) 175 if (!gpio_is_valid(host->wp_gpio))
176 return -EINVAL; 176 return -EINVAL;
177 177
178 ret = gpio_get_value(host->wp_gpio); 178 ret = gpio_get_value(host->wp_gpio);
179 179
180 if (host->wp_inverted) 180 if (host->wp_inverted)
181 ret = !ret; 181 ret = !ret;
182 182
183 return ret; 183 return ret;
184 } 184 }
185 185
186 static int mxs_mmc_get_cd(struct mmc_host *mmc) 186 static int mxs_mmc_get_cd(struct mmc_host *mmc)
187 { 187 {
188 struct mxs_mmc_host *host = mmc_priv(mmc); 188 struct mxs_mmc_host *host = mmc_priv(mmc);
189 189
190 return !(readl(host->base + HW_SSP_STATUS(host)) & 190 return !(readl(host->base + HW_SSP_STATUS(host)) &
191 BM_SSP_STATUS_CARD_DETECT); 191 BM_SSP_STATUS_CARD_DETECT);
192 } 192 }
193 193
194 static void mxs_mmc_reset(struct mxs_mmc_host *host) 194 static void mxs_mmc_reset(struct mxs_mmc_host *host)
195 { 195 {
196 u32 ctrl0, ctrl1; 196 u32 ctrl0, ctrl1;
197 197
198 stmp_reset_block(host->base); 198 stmp_reset_block(host->base);
199 199
200 ctrl0 = BM_SSP_CTRL0_IGNORE_CRC; 200 ctrl0 = BM_SSP_CTRL0_IGNORE_CRC;
201 ctrl1 = BF_SSP(0x3, CTRL1_SSP_MODE) | 201 ctrl1 = BF_SSP(0x3, CTRL1_SSP_MODE) |
202 BF_SSP(0x7, CTRL1_WORD_LENGTH) | 202 BF_SSP(0x7, CTRL1_WORD_LENGTH) |
203 BM_SSP_CTRL1_DMA_ENABLE | 203 BM_SSP_CTRL1_DMA_ENABLE |
204 BM_SSP_CTRL1_POLARITY | 204 BM_SSP_CTRL1_POLARITY |
205 BM_SSP_CTRL1_RECV_TIMEOUT_IRQ_EN | 205 BM_SSP_CTRL1_RECV_TIMEOUT_IRQ_EN |
206 BM_SSP_CTRL1_DATA_CRC_IRQ_EN | 206 BM_SSP_CTRL1_DATA_CRC_IRQ_EN |
207 BM_SSP_CTRL1_DATA_TIMEOUT_IRQ_EN | 207 BM_SSP_CTRL1_DATA_TIMEOUT_IRQ_EN |
208 BM_SSP_CTRL1_RESP_TIMEOUT_IRQ_EN | 208 BM_SSP_CTRL1_RESP_TIMEOUT_IRQ_EN |
209 BM_SSP_CTRL1_RESP_ERR_IRQ_EN; 209 BM_SSP_CTRL1_RESP_ERR_IRQ_EN;
210 210
211 writel(BF_SSP(0xffff, TIMING_TIMEOUT) | 211 writel(BF_SSP(0xffff, TIMING_TIMEOUT) |
212 BF_SSP(2, TIMING_CLOCK_DIVIDE) | 212 BF_SSP(2, TIMING_CLOCK_DIVIDE) |
213 BF_SSP(0, TIMING_CLOCK_RATE), 213 BF_SSP(0, TIMING_CLOCK_RATE),
214 host->base + HW_SSP_TIMING(host)); 214 host->base + HW_SSP_TIMING(host));
215 215
216 if (host->sdio_irq_en) { 216 if (host->sdio_irq_en) {
217 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK; 217 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
218 ctrl1 |= BM_SSP_CTRL1_SDIO_IRQ_EN; 218 ctrl1 |= BM_SSP_CTRL1_SDIO_IRQ_EN;
219 } 219 }
220 220
221 writel(ctrl0, host->base + HW_SSP_CTRL0); 221 writel(ctrl0, host->base + HW_SSP_CTRL0);
222 writel(ctrl1, host->base + HW_SSP_CTRL1(host)); 222 writel(ctrl1, host->base + HW_SSP_CTRL1(host));
223 } 223 }
224 224
225 static void mxs_mmc_start_cmd(struct mxs_mmc_host *host, 225 static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
226 struct mmc_command *cmd); 226 struct mmc_command *cmd);
227 227
228 static void mxs_mmc_request_done(struct mxs_mmc_host *host) 228 static void mxs_mmc_request_done(struct mxs_mmc_host *host)
229 { 229 {
230 struct mmc_command *cmd = host->cmd; 230 struct mmc_command *cmd = host->cmd;
231 struct mmc_data *data = host->data; 231 struct mmc_data *data = host->data;
232 struct mmc_request *mrq = host->mrq; 232 struct mmc_request *mrq = host->mrq;
233 233
234 if (mmc_resp_type(cmd) & MMC_RSP_PRESENT) { 234 if (mmc_resp_type(cmd) & MMC_RSP_PRESENT) {
235 if (mmc_resp_type(cmd) & MMC_RSP_136) { 235 if (mmc_resp_type(cmd) & MMC_RSP_136) {
236 cmd->resp[3] = readl(host->base + HW_SSP_SDRESP0(host)); 236 cmd->resp[3] = readl(host->base + HW_SSP_SDRESP0(host));
237 cmd->resp[2] = readl(host->base + HW_SSP_SDRESP1(host)); 237 cmd->resp[2] = readl(host->base + HW_SSP_SDRESP1(host));
238 cmd->resp[1] = readl(host->base + HW_SSP_SDRESP2(host)); 238 cmd->resp[1] = readl(host->base + HW_SSP_SDRESP2(host));
239 cmd->resp[0] = readl(host->base + HW_SSP_SDRESP3(host)); 239 cmd->resp[0] = readl(host->base + HW_SSP_SDRESP3(host));
240 } else { 240 } else {
241 cmd->resp[0] = readl(host->base + HW_SSP_SDRESP0(host)); 241 cmd->resp[0] = readl(host->base + HW_SSP_SDRESP0(host));
242 } 242 }
243 } 243 }
244 244
245 if (data) { 245 if (data) {
246 dma_unmap_sg(mmc_dev(host->mmc), data->sg, 246 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
247 data->sg_len, host->dma_dir); 247 data->sg_len, host->dma_dir);
248 /* 248 /*
249 * If there was an error on any block, we mark all 249 * If there was an error on any block, we mark all
250 * data blocks as being in error. 250 * data blocks as being in error.
251 */ 251 */
252 if (!data->error) 252 if (!data->error)
253 data->bytes_xfered = data->blocks * data->blksz; 253 data->bytes_xfered = data->blocks * data->blksz;
254 else 254 else
255 data->bytes_xfered = 0; 255 data->bytes_xfered = 0;
256 256
257 host->data = NULL; 257 host->data = NULL;
258 if (mrq->stop) { 258 if (mrq->stop) {
259 mxs_mmc_start_cmd(host, mrq->stop); 259 mxs_mmc_start_cmd(host, mrq->stop);
260 return; 260 return;
261 } 261 }
262 } 262 }
263 263
264 host->mrq = NULL; 264 host->mrq = NULL;
265 mmc_request_done(host->mmc, mrq); 265 mmc_request_done(host->mmc, mrq);
266 } 266 }
267 267
268 static void mxs_mmc_dma_irq_callback(void *param) 268 static void mxs_mmc_dma_irq_callback(void *param)
269 { 269 {
270 struct mxs_mmc_host *host = param; 270 struct mxs_mmc_host *host = param;
271 271
272 mxs_mmc_request_done(host); 272 mxs_mmc_request_done(host);
273 } 273 }
274 274
275 static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id) 275 static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id)
276 { 276 {
277 struct mxs_mmc_host *host = dev_id; 277 struct mxs_mmc_host *host = dev_id;
278 struct mmc_command *cmd = host->cmd; 278 struct mmc_command *cmd = host->cmd;
279 struct mmc_data *data = host->data; 279 struct mmc_data *data = host->data;
280 u32 stat; 280 u32 stat;
281 281
282 spin_lock(&host->lock); 282 spin_lock(&host->lock);
283 283
284 stat = readl(host->base + HW_SSP_CTRL1(host)); 284 stat = readl(host->base + HW_SSP_CTRL1(host));
285 writel(stat & MXS_MMC_IRQ_BITS, 285 writel(stat & MXS_MMC_IRQ_BITS,
286 host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_CLR); 286 host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_CLR);
287 287
288 spin_unlock(&host->lock);
289
288 if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN)) 290 if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN))
289 mmc_signal_sdio_irq(host->mmc); 291 mmc_signal_sdio_irq(host->mmc);
290 292
291 spin_unlock(&host->lock);
292
293 if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ) 293 if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ)
294 cmd->error = -ETIMEDOUT; 294 cmd->error = -ETIMEDOUT;
295 else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ) 295 else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ)
296 cmd->error = -EIO; 296 cmd->error = -EIO;
297 297
298 if (data) { 298 if (data) {
299 if (stat & (BM_SSP_CTRL1_DATA_TIMEOUT_IRQ | 299 if (stat & (BM_SSP_CTRL1_DATA_TIMEOUT_IRQ |
300 BM_SSP_CTRL1_RECV_TIMEOUT_IRQ)) 300 BM_SSP_CTRL1_RECV_TIMEOUT_IRQ))
301 data->error = -ETIMEDOUT; 301 data->error = -ETIMEDOUT;
302 else if (stat & BM_SSP_CTRL1_DATA_CRC_IRQ) 302 else if (stat & BM_SSP_CTRL1_DATA_CRC_IRQ)
303 data->error = -EILSEQ; 303 data->error = -EILSEQ;
304 else if (stat & (BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ | 304 else if (stat & (BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ |
305 BM_SSP_CTRL1_FIFO_OVERRUN_IRQ)) 305 BM_SSP_CTRL1_FIFO_OVERRUN_IRQ))
306 data->error = -EIO; 306 data->error = -EIO;
307 } 307 }
308 308
309 return IRQ_HANDLED; 309 return IRQ_HANDLED;
310 } 310 }
311 311
312 static struct dma_async_tx_descriptor *mxs_mmc_prep_dma( 312 static struct dma_async_tx_descriptor *mxs_mmc_prep_dma(
313 struct mxs_mmc_host *host, unsigned long flags) 313 struct mxs_mmc_host *host, unsigned long flags)
314 { 314 {
315 struct dma_async_tx_descriptor *desc; 315 struct dma_async_tx_descriptor *desc;
316 struct mmc_data *data = host->data; 316 struct mmc_data *data = host->data;
317 struct scatterlist * sgl; 317 struct scatterlist * sgl;
318 unsigned int sg_len; 318 unsigned int sg_len;
319 319
320 if (data) { 320 if (data) {
321 /* data */ 321 /* data */
322 dma_map_sg(mmc_dev(host->mmc), data->sg, 322 dma_map_sg(mmc_dev(host->mmc), data->sg,
323 data->sg_len, host->dma_dir); 323 data->sg_len, host->dma_dir);
324 sgl = data->sg; 324 sgl = data->sg;
325 sg_len = data->sg_len; 325 sg_len = data->sg_len;
326 } else { 326 } else {
327 /* pio */ 327 /* pio */
328 sgl = (struct scatterlist *) host->ssp_pio_words; 328 sgl = (struct scatterlist *) host->ssp_pio_words;
329 sg_len = SSP_PIO_NUM; 329 sg_len = SSP_PIO_NUM;
330 } 330 }
331 331
332 desc = dmaengine_prep_slave_sg(host->dmach, 332 desc = dmaengine_prep_slave_sg(host->dmach,
333 sgl, sg_len, host->slave_dirn, flags); 333 sgl, sg_len, host->slave_dirn, flags);
334 if (desc) { 334 if (desc) {
335 desc->callback = mxs_mmc_dma_irq_callback; 335 desc->callback = mxs_mmc_dma_irq_callback;
336 desc->callback_param = host; 336 desc->callback_param = host;
337 } else { 337 } else {
338 if (data) 338 if (data)
339 dma_unmap_sg(mmc_dev(host->mmc), data->sg, 339 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
340 data->sg_len, host->dma_dir); 340 data->sg_len, host->dma_dir);
341 } 341 }
342 342
343 return desc; 343 return desc;
344 } 344 }
345 345
346 static void mxs_mmc_bc(struct mxs_mmc_host *host) 346 static void mxs_mmc_bc(struct mxs_mmc_host *host)
347 { 347 {
348 struct mmc_command *cmd = host->cmd; 348 struct mmc_command *cmd = host->cmd;
349 struct dma_async_tx_descriptor *desc; 349 struct dma_async_tx_descriptor *desc;
350 u32 ctrl0, cmd0, cmd1; 350 u32 ctrl0, cmd0, cmd1;
351 351
352 ctrl0 = BM_SSP_CTRL0_ENABLE | BM_SSP_CTRL0_IGNORE_CRC; 352 ctrl0 = BM_SSP_CTRL0_ENABLE | BM_SSP_CTRL0_IGNORE_CRC;
353 cmd0 = BF_SSP(cmd->opcode, CMD0_CMD) | BM_SSP_CMD0_APPEND_8CYC; 353 cmd0 = BF_SSP(cmd->opcode, CMD0_CMD) | BM_SSP_CMD0_APPEND_8CYC;
354 cmd1 = cmd->arg; 354 cmd1 = cmd->arg;
355 355
356 if (host->sdio_irq_en) { 356 if (host->sdio_irq_en) {
357 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK; 357 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
358 cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN; 358 cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
359 } 359 }
360 360
361 host->ssp_pio_words[0] = ctrl0; 361 host->ssp_pio_words[0] = ctrl0;
362 host->ssp_pio_words[1] = cmd0; 362 host->ssp_pio_words[1] = cmd0;
363 host->ssp_pio_words[2] = cmd1; 363 host->ssp_pio_words[2] = cmd1;
364 host->dma_dir = DMA_NONE; 364 host->dma_dir = DMA_NONE;
365 host->slave_dirn = DMA_TRANS_NONE; 365 host->slave_dirn = DMA_TRANS_NONE;
366 desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK); 366 desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK);
367 if (!desc) 367 if (!desc)
368 goto out; 368 goto out;
369 369
370 dmaengine_submit(desc); 370 dmaengine_submit(desc);
371 dma_async_issue_pending(host->dmach); 371 dma_async_issue_pending(host->dmach);
372 return; 372 return;
373 373
374 out: 374 out:
375 dev_warn(mmc_dev(host->mmc), 375 dev_warn(mmc_dev(host->mmc),
376 "%s: failed to prep dma\n", __func__); 376 "%s: failed to prep dma\n", __func__);
377 } 377 }
378 378
379 static void mxs_mmc_ac(struct mxs_mmc_host *host) 379 static void mxs_mmc_ac(struct mxs_mmc_host *host)
380 { 380 {
381 struct mmc_command *cmd = host->cmd; 381 struct mmc_command *cmd = host->cmd;
382 struct dma_async_tx_descriptor *desc; 382 struct dma_async_tx_descriptor *desc;
383 u32 ignore_crc, get_resp, long_resp; 383 u32 ignore_crc, get_resp, long_resp;
384 u32 ctrl0, cmd0, cmd1; 384 u32 ctrl0, cmd0, cmd1;
385 385
386 ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ? 386 ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ?
387 0 : BM_SSP_CTRL0_IGNORE_CRC; 387 0 : BM_SSP_CTRL0_IGNORE_CRC;
388 get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ? 388 get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ?
389 BM_SSP_CTRL0_GET_RESP : 0; 389 BM_SSP_CTRL0_GET_RESP : 0;
390 long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ? 390 long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ?
391 BM_SSP_CTRL0_LONG_RESP : 0; 391 BM_SSP_CTRL0_LONG_RESP : 0;
392 392
393 ctrl0 = BM_SSP_CTRL0_ENABLE | ignore_crc | get_resp | long_resp; 393 ctrl0 = BM_SSP_CTRL0_ENABLE | ignore_crc | get_resp | long_resp;
394 cmd0 = BF_SSP(cmd->opcode, CMD0_CMD); 394 cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
395 cmd1 = cmd->arg; 395 cmd1 = cmd->arg;
396 396
397 if (host->sdio_irq_en) { 397 if (host->sdio_irq_en) {
398 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK; 398 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
399 cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN; 399 cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
400 } 400 }
401 401
402 host->ssp_pio_words[0] = ctrl0; 402 host->ssp_pio_words[0] = ctrl0;
403 host->ssp_pio_words[1] = cmd0; 403 host->ssp_pio_words[1] = cmd0;
404 host->ssp_pio_words[2] = cmd1; 404 host->ssp_pio_words[2] = cmd1;
405 host->dma_dir = DMA_NONE; 405 host->dma_dir = DMA_NONE;
406 host->slave_dirn = DMA_TRANS_NONE; 406 host->slave_dirn = DMA_TRANS_NONE;
407 desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK); 407 desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK);
408 if (!desc) 408 if (!desc)
409 goto out; 409 goto out;
410 410
411 dmaengine_submit(desc); 411 dmaengine_submit(desc);
412 dma_async_issue_pending(host->dmach); 412 dma_async_issue_pending(host->dmach);
413 return; 413 return;
414 414
415 out: 415 out:
416 dev_warn(mmc_dev(host->mmc), 416 dev_warn(mmc_dev(host->mmc),
417 "%s: failed to prep dma\n", __func__); 417 "%s: failed to prep dma\n", __func__);
418 } 418 }
419 419
420 static unsigned short mxs_ns_to_ssp_ticks(unsigned clock_rate, unsigned ns) 420 static unsigned short mxs_ns_to_ssp_ticks(unsigned clock_rate, unsigned ns)
421 { 421 {
422 const unsigned int ssp_timeout_mul = 4096; 422 const unsigned int ssp_timeout_mul = 4096;
423 /* 423 /*
424 * Calculate ticks in ms since ns are large numbers 424 * Calculate ticks in ms since ns are large numbers
425 * and might overflow 425 * and might overflow
426 */ 426 */
427 const unsigned int clock_per_ms = clock_rate / 1000; 427 const unsigned int clock_per_ms = clock_rate / 1000;
428 const unsigned int ms = ns / 1000; 428 const unsigned int ms = ns / 1000;
429 const unsigned int ticks = ms * clock_per_ms; 429 const unsigned int ticks = ms * clock_per_ms;
430 const unsigned int ssp_ticks = ticks / ssp_timeout_mul; 430 const unsigned int ssp_ticks = ticks / ssp_timeout_mul;
431 431
432 WARN_ON(ssp_ticks == 0); 432 WARN_ON(ssp_ticks == 0);
433 return ssp_ticks; 433 return ssp_ticks;
434 } 434 }
435 435
436 static void mxs_mmc_adtc(struct mxs_mmc_host *host) 436 static void mxs_mmc_adtc(struct mxs_mmc_host *host)
437 { 437 {
438 struct mmc_command *cmd = host->cmd; 438 struct mmc_command *cmd = host->cmd;
439 struct mmc_data *data = cmd->data; 439 struct mmc_data *data = cmd->data;
440 struct dma_async_tx_descriptor *desc; 440 struct dma_async_tx_descriptor *desc;
441 struct scatterlist *sgl = data->sg, *sg; 441 struct scatterlist *sgl = data->sg, *sg;
442 unsigned int sg_len = data->sg_len; 442 unsigned int sg_len = data->sg_len;
443 int i; 443 int i;
444 444
445 unsigned short dma_data_dir, timeout; 445 unsigned short dma_data_dir, timeout;
446 enum dma_transfer_direction slave_dirn; 446 enum dma_transfer_direction slave_dirn;
447 unsigned int data_size = 0, log2_blksz; 447 unsigned int data_size = 0, log2_blksz;
448 unsigned int blocks = data->blocks; 448 unsigned int blocks = data->blocks;
449 449
450 u32 ignore_crc, get_resp, long_resp, read; 450 u32 ignore_crc, get_resp, long_resp, read;
451 u32 ctrl0, cmd0, cmd1, val; 451 u32 ctrl0, cmd0, cmd1, val;
452 452
453 ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ? 453 ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ?
454 0 : BM_SSP_CTRL0_IGNORE_CRC; 454 0 : BM_SSP_CTRL0_IGNORE_CRC;
455 get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ? 455 get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ?
456 BM_SSP_CTRL0_GET_RESP : 0; 456 BM_SSP_CTRL0_GET_RESP : 0;
457 long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ? 457 long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ?
458 BM_SSP_CTRL0_LONG_RESP : 0; 458 BM_SSP_CTRL0_LONG_RESP : 0;
459 459
460 if (data->flags & MMC_DATA_WRITE) { 460 if (data->flags & MMC_DATA_WRITE) {
461 dma_data_dir = DMA_TO_DEVICE; 461 dma_data_dir = DMA_TO_DEVICE;
462 slave_dirn = DMA_MEM_TO_DEV; 462 slave_dirn = DMA_MEM_TO_DEV;
463 read = 0; 463 read = 0;
464 } else { 464 } else {
465 dma_data_dir = DMA_FROM_DEVICE; 465 dma_data_dir = DMA_FROM_DEVICE;
466 slave_dirn = DMA_DEV_TO_MEM; 466 slave_dirn = DMA_DEV_TO_MEM;
467 read = BM_SSP_CTRL0_READ; 467 read = BM_SSP_CTRL0_READ;
468 } 468 }
469 469
470 ctrl0 = BF_SSP(host->bus_width, CTRL0_BUS_WIDTH) | 470 ctrl0 = BF_SSP(host->bus_width, CTRL0_BUS_WIDTH) |
471 ignore_crc | get_resp | long_resp | 471 ignore_crc | get_resp | long_resp |
472 BM_SSP_CTRL0_DATA_XFER | read | 472 BM_SSP_CTRL0_DATA_XFER | read |
473 BM_SSP_CTRL0_WAIT_FOR_IRQ | 473 BM_SSP_CTRL0_WAIT_FOR_IRQ |
474 BM_SSP_CTRL0_ENABLE; 474 BM_SSP_CTRL0_ENABLE;
475 475
476 cmd0 = BF_SSP(cmd->opcode, CMD0_CMD); 476 cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
477 477
478 /* get logarithm to base 2 of block size for setting register */ 478 /* get logarithm to base 2 of block size for setting register */
479 log2_blksz = ilog2(data->blksz); 479 log2_blksz = ilog2(data->blksz);
480 480
481 /* 481 /*
482 * take special care of the case that data size from data->sg 482 * take special care of the case that data size from data->sg
483 * is not equal to blocks x blksz 483 * is not equal to blocks x blksz
484 */ 484 */
485 for_each_sg(sgl, sg, sg_len, i) 485 for_each_sg(sgl, sg, sg_len, i)
486 data_size += sg->length; 486 data_size += sg->length;
487 487
488 if (data_size != data->blocks * data->blksz) 488 if (data_size != data->blocks * data->blksz)
489 blocks = 1; 489 blocks = 1;
490 490
491 /* xfer count, block size and count need to be set differently */ 491 /* xfer count, block size and count need to be set differently */
492 if (ssp_is_old(host)) { 492 if (ssp_is_old(host)) {
493 ctrl0 |= BF_SSP(data_size, CTRL0_XFER_COUNT); 493 ctrl0 |= BF_SSP(data_size, CTRL0_XFER_COUNT);
494 cmd0 |= BF_SSP(log2_blksz, CMD0_BLOCK_SIZE) | 494 cmd0 |= BF_SSP(log2_blksz, CMD0_BLOCK_SIZE) |
495 BF_SSP(blocks - 1, CMD0_BLOCK_COUNT); 495 BF_SSP(blocks - 1, CMD0_BLOCK_COUNT);
496 } else { 496 } else {
497 writel(data_size, host->base + HW_SSP_XFER_SIZE); 497 writel(data_size, host->base + HW_SSP_XFER_SIZE);
498 writel(BF_SSP(log2_blksz, BLOCK_SIZE_BLOCK_SIZE) | 498 writel(BF_SSP(log2_blksz, BLOCK_SIZE_BLOCK_SIZE) |
499 BF_SSP(blocks - 1, BLOCK_SIZE_BLOCK_COUNT), 499 BF_SSP(blocks - 1, BLOCK_SIZE_BLOCK_COUNT),
500 host->base + HW_SSP_BLOCK_SIZE); 500 host->base + HW_SSP_BLOCK_SIZE);
501 } 501 }
502 502
503 if ((cmd->opcode == MMC_STOP_TRANSMISSION) || 503 if ((cmd->opcode == MMC_STOP_TRANSMISSION) ||
504 (cmd->opcode == SD_IO_RW_EXTENDED)) 504 (cmd->opcode == SD_IO_RW_EXTENDED))
505 cmd0 |= BM_SSP_CMD0_APPEND_8CYC; 505 cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
506 506
507 cmd1 = cmd->arg; 507 cmd1 = cmd->arg;
508 508
509 if (host->sdio_irq_en) { 509 if (host->sdio_irq_en) {
510 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK; 510 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
511 cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN; 511 cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
512 } 512 }
513 513
514 /* set the timeout count */ 514 /* set the timeout count */
515 timeout = mxs_ns_to_ssp_ticks(host->clk_rate, data->timeout_ns); 515 timeout = mxs_ns_to_ssp_ticks(host->clk_rate, data->timeout_ns);
516 val = readl(host->base + HW_SSP_TIMING(host)); 516 val = readl(host->base + HW_SSP_TIMING(host));
517 val &= ~(BM_SSP_TIMING_TIMEOUT); 517 val &= ~(BM_SSP_TIMING_TIMEOUT);
518 val |= BF_SSP(timeout, TIMING_TIMEOUT); 518 val |= BF_SSP(timeout, TIMING_TIMEOUT);
519 writel(val, host->base + HW_SSP_TIMING(host)); 519 writel(val, host->base + HW_SSP_TIMING(host));
520 520
521 /* pio */ 521 /* pio */
522 host->ssp_pio_words[0] = ctrl0; 522 host->ssp_pio_words[0] = ctrl0;
523 host->ssp_pio_words[1] = cmd0; 523 host->ssp_pio_words[1] = cmd0;
524 host->ssp_pio_words[2] = cmd1; 524 host->ssp_pio_words[2] = cmd1;
525 host->dma_dir = DMA_NONE; 525 host->dma_dir = DMA_NONE;
526 host->slave_dirn = DMA_TRANS_NONE; 526 host->slave_dirn = DMA_TRANS_NONE;
527 desc = mxs_mmc_prep_dma(host, 0); 527 desc = mxs_mmc_prep_dma(host, 0);
528 if (!desc) 528 if (!desc)
529 goto out; 529 goto out;
530 530
531 /* append data sg */ 531 /* append data sg */
532 WARN_ON(host->data != NULL); 532 WARN_ON(host->data != NULL);
533 host->data = data; 533 host->data = data;
534 host->dma_dir = dma_data_dir; 534 host->dma_dir = dma_data_dir;
535 host->slave_dirn = slave_dirn; 535 host->slave_dirn = slave_dirn;
536 desc = mxs_mmc_prep_dma(host, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 536 desc = mxs_mmc_prep_dma(host, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
537 if (!desc) 537 if (!desc)
538 goto out; 538 goto out;
539 539
540 dmaengine_submit(desc); 540 dmaengine_submit(desc);
541 dma_async_issue_pending(host->dmach); 541 dma_async_issue_pending(host->dmach);
542 return; 542 return;
543 out: 543 out:
544 dev_warn(mmc_dev(host->mmc), 544 dev_warn(mmc_dev(host->mmc),
545 "%s: failed to prep dma\n", __func__); 545 "%s: failed to prep dma\n", __func__);
546 } 546 }
547 547
548 static void mxs_mmc_start_cmd(struct mxs_mmc_host *host, 548 static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
549 struct mmc_command *cmd) 549 struct mmc_command *cmd)
550 { 550 {
551 host->cmd = cmd; 551 host->cmd = cmd;
552 552
553 switch (mmc_cmd_type(cmd)) { 553 switch (mmc_cmd_type(cmd)) {
554 case MMC_CMD_BC: 554 case MMC_CMD_BC:
555 mxs_mmc_bc(host); 555 mxs_mmc_bc(host);
556 break; 556 break;
557 case MMC_CMD_BCR: 557 case MMC_CMD_BCR:
558 mxs_mmc_ac(host); 558 mxs_mmc_ac(host);
559 break; 559 break;
560 case MMC_CMD_AC: 560 case MMC_CMD_AC:
561 mxs_mmc_ac(host); 561 mxs_mmc_ac(host);
562 break; 562 break;
563 case MMC_CMD_ADTC: 563 case MMC_CMD_ADTC:
564 mxs_mmc_adtc(host); 564 mxs_mmc_adtc(host);
565 break; 565 break;
566 default: 566 default:
567 dev_warn(mmc_dev(host->mmc), 567 dev_warn(mmc_dev(host->mmc),
568 "%s: unknown MMC command\n", __func__); 568 "%s: unknown MMC command\n", __func__);
569 break; 569 break;
570 } 570 }
571 } 571 }
572 572
573 static void mxs_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) 573 static void mxs_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
574 { 574 {
575 struct mxs_mmc_host *host = mmc_priv(mmc); 575 struct mxs_mmc_host *host = mmc_priv(mmc);
576 576
577 WARN_ON(host->mrq != NULL); 577 WARN_ON(host->mrq != NULL);
578 host->mrq = mrq; 578 host->mrq = mrq;
579 mxs_mmc_start_cmd(host, mrq->cmd); 579 mxs_mmc_start_cmd(host, mrq->cmd);
580 } 580 }
581 581
582 static void mxs_mmc_set_clk_rate(struct mxs_mmc_host *host, unsigned int rate) 582 static void mxs_mmc_set_clk_rate(struct mxs_mmc_host *host, unsigned int rate)
583 { 583 {
584 unsigned int ssp_clk, ssp_sck; 584 unsigned int ssp_clk, ssp_sck;
585 u32 clock_divide, clock_rate; 585 u32 clock_divide, clock_rate;
586 u32 val; 586 u32 val;
587 587
588 ssp_clk = clk_get_rate(host->clk); 588 ssp_clk = clk_get_rate(host->clk);
589 589
590 for (clock_divide = 2; clock_divide <= 254; clock_divide += 2) { 590 for (clock_divide = 2; clock_divide <= 254; clock_divide += 2) {
591 clock_rate = DIV_ROUND_UP(ssp_clk, rate * clock_divide); 591 clock_rate = DIV_ROUND_UP(ssp_clk, rate * clock_divide);
592 clock_rate = (clock_rate > 0) ? clock_rate - 1 : 0; 592 clock_rate = (clock_rate > 0) ? clock_rate - 1 : 0;
593 if (clock_rate <= 255) 593 if (clock_rate <= 255)
594 break; 594 break;
595 } 595 }
596 596
597 if (clock_divide > 254) { 597 if (clock_divide > 254) {
598 dev_err(mmc_dev(host->mmc), 598 dev_err(mmc_dev(host->mmc),
599 "%s: cannot set clock to %d\n", __func__, rate); 599 "%s: cannot set clock to %d\n", __func__, rate);
600 return; 600 return;
601 } 601 }
602 602
603 ssp_sck = ssp_clk / clock_divide / (1 + clock_rate); 603 ssp_sck = ssp_clk / clock_divide / (1 + clock_rate);
604 604
605 val = readl(host->base + HW_SSP_TIMING(host)); 605 val = readl(host->base + HW_SSP_TIMING(host));
606 val &= ~(BM_SSP_TIMING_CLOCK_DIVIDE | BM_SSP_TIMING_CLOCK_RATE); 606 val &= ~(BM_SSP_TIMING_CLOCK_DIVIDE | BM_SSP_TIMING_CLOCK_RATE);
607 val |= BF_SSP(clock_divide, TIMING_CLOCK_DIVIDE); 607 val |= BF_SSP(clock_divide, TIMING_CLOCK_DIVIDE);
608 val |= BF_SSP(clock_rate, TIMING_CLOCK_RATE); 608 val |= BF_SSP(clock_rate, TIMING_CLOCK_RATE);
609 writel(val, host->base + HW_SSP_TIMING(host)); 609 writel(val, host->base + HW_SSP_TIMING(host));
610 610
611 host->clk_rate = ssp_sck; 611 host->clk_rate = ssp_sck;
612 612
613 dev_dbg(mmc_dev(host->mmc), 613 dev_dbg(mmc_dev(host->mmc),
614 "%s: clock_divide %d, clock_rate %d, ssp_clk %d, rate_actual %d, rate_requested %d\n", 614 "%s: clock_divide %d, clock_rate %d, ssp_clk %d, rate_actual %d, rate_requested %d\n",
615 __func__, clock_divide, clock_rate, ssp_clk, ssp_sck, rate); 615 __func__, clock_divide, clock_rate, ssp_clk, ssp_sck, rate);
616 } 616 }
617 617
618 static void mxs_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 618 static void mxs_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
619 { 619 {
620 struct mxs_mmc_host *host = mmc_priv(mmc); 620 struct mxs_mmc_host *host = mmc_priv(mmc);
621 621
622 if (ios->bus_width == MMC_BUS_WIDTH_8) 622 if (ios->bus_width == MMC_BUS_WIDTH_8)
623 host->bus_width = 2; 623 host->bus_width = 2;
624 else if (ios->bus_width == MMC_BUS_WIDTH_4) 624 else if (ios->bus_width == MMC_BUS_WIDTH_4)
625 host->bus_width = 1; 625 host->bus_width = 1;
626 else 626 else
627 host->bus_width = 0; 627 host->bus_width = 0;
628 628
629 if (ios->clock) 629 if (ios->clock)
630 mxs_mmc_set_clk_rate(host, ios->clock); 630 mxs_mmc_set_clk_rate(host, ios->clock);
631 } 631 }
632 632
633 static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) 633 static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
634 { 634 {
635 struct mxs_mmc_host *host = mmc_priv(mmc); 635 struct mxs_mmc_host *host = mmc_priv(mmc);
636 unsigned long flags; 636 unsigned long flags;
637 637
638 spin_lock_irqsave(&host->lock, flags); 638 spin_lock_irqsave(&host->lock, flags);
639 639
640 host->sdio_irq_en = enable; 640 host->sdio_irq_en = enable;
641 641
642 if (enable) { 642 if (enable) {
643 writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK, 643 writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
644 host->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); 644 host->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
645 writel(BM_SSP_CTRL1_SDIO_IRQ_EN, 645 writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
646 host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_SET); 646 host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_SET);
647
648 if (readl(host->base + HW_SSP_STATUS(host)) &
649 BM_SSP_STATUS_SDIO_IRQ)
650 mmc_signal_sdio_irq(host->mmc);
651
652 } else { 647 } else {
653 writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK, 648 writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
654 host->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); 649 host->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
655 writel(BM_SSP_CTRL1_SDIO_IRQ_EN, 650 writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
656 host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_CLR); 651 host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_CLR);
657 } 652 }
658 653
659 spin_unlock_irqrestore(&host->lock, flags); 654 spin_unlock_irqrestore(&host->lock, flags);
655
656 if (enable && readl(host->base + HW_SSP_STATUS(host)) &
657 BM_SSP_STATUS_SDIO_IRQ)
658 mmc_signal_sdio_irq(host->mmc);
659
660 } 660 }
661 661
662 static const struct mmc_host_ops mxs_mmc_ops = { 662 static const struct mmc_host_ops mxs_mmc_ops = {
663 .request = mxs_mmc_request, 663 .request = mxs_mmc_request,
664 .get_ro = mxs_mmc_get_ro, 664 .get_ro = mxs_mmc_get_ro,
665 .get_cd = mxs_mmc_get_cd, 665 .get_cd = mxs_mmc_get_cd,
666 .set_ios = mxs_mmc_set_ios, 666 .set_ios = mxs_mmc_set_ios,
667 .enable_sdio_irq = mxs_mmc_enable_sdio_irq, 667 .enable_sdio_irq = mxs_mmc_enable_sdio_irq,
668 }; 668 };
669 669
670 static bool mxs_mmc_dma_filter(struct dma_chan *chan, void *param) 670 static bool mxs_mmc_dma_filter(struct dma_chan *chan, void *param)
671 { 671 {
672 struct mxs_mmc_host *host = param; 672 struct mxs_mmc_host *host = param;
673 673
674 if (!mxs_dma_is_apbh(chan)) 674 if (!mxs_dma_is_apbh(chan))
675 return false; 675 return false;
676 676
677 if (chan->chan_id != host->dma_channel) 677 if (chan->chan_id != host->dma_channel)
678 return false; 678 return false;
679 679
680 chan->private = &host->dma_data; 680 chan->private = &host->dma_data;
681 681
682 return true; 682 return true;
683 } 683 }
684 684
685 static struct platform_device_id mxs_mmc_ids[] = { 685 static struct platform_device_id mxs_mmc_ids[] = {
686 { 686 {
687 .name = "imx23-mmc", 687 .name = "imx23-mmc",
688 .driver_data = IMX23_MMC, 688 .driver_data = IMX23_MMC,
689 }, { 689 }, {
690 .name = "imx28-mmc", 690 .name = "imx28-mmc",
691 .driver_data = IMX28_MMC, 691 .driver_data = IMX28_MMC,
692 }, { 692 }, {
693 /* sentinel */ 693 /* sentinel */
694 } 694 }
695 }; 695 };
696 MODULE_DEVICE_TABLE(platform, mxs_mmc_ids); 696 MODULE_DEVICE_TABLE(platform, mxs_mmc_ids);
697 697
698 static const struct of_device_id mxs_mmc_dt_ids[] = { 698 static const struct of_device_id mxs_mmc_dt_ids[] = {
699 { .compatible = "fsl,imx23-mmc", .data = (void *) IMX23_MMC, }, 699 { .compatible = "fsl,imx23-mmc", .data = (void *) IMX23_MMC, },
700 { .compatible = "fsl,imx28-mmc", .data = (void *) IMX28_MMC, }, 700 { .compatible = "fsl,imx28-mmc", .data = (void *) IMX28_MMC, },
701 { /* sentinel */ } 701 { /* sentinel */ }
702 }; 702 };
703 MODULE_DEVICE_TABLE(of, mxs_mmc_dt_ids); 703 MODULE_DEVICE_TABLE(of, mxs_mmc_dt_ids);
704 704
705 static int mxs_mmc_probe(struct platform_device *pdev) 705 static int mxs_mmc_probe(struct platform_device *pdev)
706 { 706 {
707 const struct of_device_id *of_id = 707 const struct of_device_id *of_id =
708 of_match_device(mxs_mmc_dt_ids, &pdev->dev); 708 of_match_device(mxs_mmc_dt_ids, &pdev->dev);
709 struct device_node *np = pdev->dev.of_node; 709 struct device_node *np = pdev->dev.of_node;
710 struct mxs_mmc_host *host; 710 struct mxs_mmc_host *host;
711 struct mmc_host *mmc; 711 struct mmc_host *mmc;
712 struct resource *iores, *dmares; 712 struct resource *iores, *dmares;
713 struct mxs_mmc_platform_data *pdata; 713 struct mxs_mmc_platform_data *pdata;
714 struct pinctrl *pinctrl; 714 struct pinctrl *pinctrl;
715 int ret = 0, irq_err, irq_dma; 715 int ret = 0, irq_err, irq_dma;
716 dma_cap_mask_t mask; 716 dma_cap_mask_t mask;
717 struct regulator *reg_vmmc; 717 struct regulator *reg_vmmc;
718 enum of_gpio_flags flags; 718 enum of_gpio_flags flags;
719 719
720 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); 720 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
721 dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0); 721 dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0);
722 irq_err = platform_get_irq(pdev, 0); 722 irq_err = platform_get_irq(pdev, 0);
723 irq_dma = platform_get_irq(pdev, 1); 723 irq_dma = platform_get_irq(pdev, 1);
724 if (!iores || irq_err < 0 || irq_dma < 0) 724 if (!iores || irq_err < 0 || irq_dma < 0)
725 return -EINVAL; 725 return -EINVAL;
726 726
727 mmc = mmc_alloc_host(sizeof(struct mxs_mmc_host), &pdev->dev); 727 mmc = mmc_alloc_host(sizeof(struct mxs_mmc_host), &pdev->dev);
728 if (!mmc) 728 if (!mmc)
729 return -ENOMEM; 729 return -ENOMEM;
730 730
731 host = mmc_priv(mmc); 731 host = mmc_priv(mmc);
732 host->base = devm_request_and_ioremap(&pdev->dev, iores); 732 host->base = devm_request_and_ioremap(&pdev->dev, iores);
733 if (!host->base) { 733 if (!host->base) {
734 ret = -EADDRNOTAVAIL; 734 ret = -EADDRNOTAVAIL;
735 goto out_mmc_free; 735 goto out_mmc_free;
736 } 736 }
737 737
738 if (np) { 738 if (np) {
739 host->devid = (enum mxs_mmc_id) of_id->data; 739 host->devid = (enum mxs_mmc_id) of_id->data;
740 /* 740 /*
741 * TODO: This is a temporary solution and should be changed 741 * TODO: This is a temporary solution and should be changed
742 * to use generic DMA binding later when the helpers get in. 742 * to use generic DMA binding later when the helpers get in.
743 */ 743 */
744 ret = of_property_read_u32(np, "fsl,ssp-dma-channel", 744 ret = of_property_read_u32(np, "fsl,ssp-dma-channel",
745 &host->dma_channel); 745 &host->dma_channel);
746 if (ret) { 746 if (ret) {
747 dev_err(mmc_dev(host->mmc), 747 dev_err(mmc_dev(host->mmc),
748 "failed to get dma channel\n"); 748 "failed to get dma channel\n");
749 goto out_mmc_free; 749 goto out_mmc_free;
750 } 750 }
751 } else { 751 } else {
752 host->devid = pdev->id_entry->driver_data; 752 host->devid = pdev->id_entry->driver_data;
753 host->dma_channel = dmares->start; 753 host->dma_channel = dmares->start;
754 } 754 }
755 755
756 host->mmc = mmc; 756 host->mmc = mmc;
757 host->sdio_irq_en = 0; 757 host->sdio_irq_en = 0;
758 758
759 reg_vmmc = devm_regulator_get(&pdev->dev, "vmmc"); 759 reg_vmmc = devm_regulator_get(&pdev->dev, "vmmc");
760 if (!IS_ERR(reg_vmmc)) { 760 if (!IS_ERR(reg_vmmc)) {
761 ret = regulator_enable(reg_vmmc); 761 ret = regulator_enable(reg_vmmc);
762 if (ret) { 762 if (ret) {
763 dev_err(&pdev->dev, 763 dev_err(&pdev->dev,
764 "Failed to enable vmmc regulator: %d\n", ret); 764 "Failed to enable vmmc regulator: %d\n", ret);
765 goto out_mmc_free; 765 goto out_mmc_free;
766 } 766 }
767 } 767 }
768 768
769 pinctrl = devm_pinctrl_get_select_default(&pdev->dev); 769 pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
770 if (IS_ERR(pinctrl)) { 770 if (IS_ERR(pinctrl)) {
771 ret = PTR_ERR(pinctrl); 771 ret = PTR_ERR(pinctrl);
772 goto out_mmc_free; 772 goto out_mmc_free;
773 } 773 }
774 774
775 host->clk = clk_get(&pdev->dev, NULL); 775 host->clk = clk_get(&pdev->dev, NULL);
776 if (IS_ERR(host->clk)) { 776 if (IS_ERR(host->clk)) {
777 ret = PTR_ERR(host->clk); 777 ret = PTR_ERR(host->clk);
778 goto out_mmc_free; 778 goto out_mmc_free;
779 } 779 }
780 clk_prepare_enable(host->clk); 780 clk_prepare_enable(host->clk);
781 781
782 mxs_mmc_reset(host); 782 mxs_mmc_reset(host);
783 783
784 dma_cap_zero(mask); 784 dma_cap_zero(mask);
785 dma_cap_set(DMA_SLAVE, mask); 785 dma_cap_set(DMA_SLAVE, mask);
786 host->dma_data.chan_irq = irq_dma; 786 host->dma_data.chan_irq = irq_dma;
787 host->dmach = dma_request_channel(mask, mxs_mmc_dma_filter, host); 787 host->dmach = dma_request_channel(mask, mxs_mmc_dma_filter, host);
788 if (!host->dmach) { 788 if (!host->dmach) {
789 dev_err(mmc_dev(host->mmc), 789 dev_err(mmc_dev(host->mmc),
790 "%s: failed to request dma\n", __func__); 790 "%s: failed to request dma\n", __func__);
791 goto out_clk_put; 791 goto out_clk_put;
792 } 792 }
793 793
794 /* set mmc core parameters */ 794 /* set mmc core parameters */
795 mmc->ops = &mxs_mmc_ops; 795 mmc->ops = &mxs_mmc_ops;
796 mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED | 796 mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
797 MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL; 797 MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL;
798 798
799 pdata = mmc_dev(host->mmc)->platform_data; 799 pdata = mmc_dev(host->mmc)->platform_data;
800 if (!pdata) { 800 if (!pdata) {
801 u32 bus_width = 0; 801 u32 bus_width = 0;
802 of_property_read_u32(np, "bus-width", &bus_width); 802 of_property_read_u32(np, "bus-width", &bus_width);
803 if (bus_width == 4) 803 if (bus_width == 4)
804 mmc->caps |= MMC_CAP_4_BIT_DATA; 804 mmc->caps |= MMC_CAP_4_BIT_DATA;
805 else if (bus_width == 8) 805 else if (bus_width == 8)
806 mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA; 806 mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
807 host->wp_gpio = of_get_named_gpio_flags(np, "wp-gpios", 0, 807 host->wp_gpio = of_get_named_gpio_flags(np, "wp-gpios", 0,
808 &flags); 808 &flags);
809 if (flags & OF_GPIO_ACTIVE_LOW) 809 if (flags & OF_GPIO_ACTIVE_LOW)
810 host->wp_inverted = 1; 810 host->wp_inverted = 1;
811 } else { 811 } else {
812 if (pdata->flags & SLOTF_8_BIT_CAPABLE) 812 if (pdata->flags & SLOTF_8_BIT_CAPABLE)
813 mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA; 813 mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
814 if (pdata->flags & SLOTF_4_BIT_CAPABLE) 814 if (pdata->flags & SLOTF_4_BIT_CAPABLE)
815 mmc->caps |= MMC_CAP_4_BIT_DATA; 815 mmc->caps |= MMC_CAP_4_BIT_DATA;
816 host->wp_gpio = pdata->wp_gpio; 816 host->wp_gpio = pdata->wp_gpio;
817 } 817 }
818 818
819 mmc->f_min = 400000; 819 mmc->f_min = 400000;
820 mmc->f_max = 288000000; 820 mmc->f_max = 288000000;
821 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 821 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
822 822
823 mmc->max_segs = 52; 823 mmc->max_segs = 52;
824 mmc->max_blk_size = 1 << 0xf; 824 mmc->max_blk_size = 1 << 0xf;
825 mmc->max_blk_count = (ssp_is_old(host)) ? 0xff : 0xffffff; 825 mmc->max_blk_count = (ssp_is_old(host)) ? 0xff : 0xffffff;
826 mmc->max_req_size = (ssp_is_old(host)) ? 0xffff : 0xffffffff; 826 mmc->max_req_size = (ssp_is_old(host)) ? 0xffff : 0xffffffff;
827 mmc->max_seg_size = dma_get_max_seg_size(host->dmach->device->dev); 827 mmc->max_seg_size = dma_get_max_seg_size(host->dmach->device->dev);
828 828
829 platform_set_drvdata(pdev, mmc); 829 platform_set_drvdata(pdev, mmc);
830 830
831 ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0, 831 ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0,
832 DRIVER_NAME, host); 832 DRIVER_NAME, host);
833 if (ret) 833 if (ret)
834 goto out_free_dma; 834 goto out_free_dma;
835 835
836 spin_lock_init(&host->lock); 836 spin_lock_init(&host->lock);
837 837
838 ret = mmc_add_host(mmc); 838 ret = mmc_add_host(mmc);
839 if (ret) 839 if (ret)
840 goto out_free_dma; 840 goto out_free_dma;
841 841
842 dev_info(mmc_dev(host->mmc), "initialized\n"); 842 dev_info(mmc_dev(host->mmc), "initialized\n");
843 843
844 return 0; 844 return 0;
845 845
846 out_free_dma: 846 out_free_dma:
847 if (host->dmach) 847 if (host->dmach)
848 dma_release_channel(host->dmach); 848 dma_release_channel(host->dmach);
849 out_clk_put: 849 out_clk_put:
850 clk_disable_unprepare(host->clk); 850 clk_disable_unprepare(host->clk);
851 clk_put(host->clk); 851 clk_put(host->clk);
852 out_mmc_free: 852 out_mmc_free:
853 mmc_free_host(mmc); 853 mmc_free_host(mmc);
854 return ret; 854 return ret;
855 } 855 }
856 856
857 static int mxs_mmc_remove(struct platform_device *pdev) 857 static int mxs_mmc_remove(struct platform_device *pdev)
858 { 858 {
859 struct mmc_host *mmc = platform_get_drvdata(pdev); 859 struct mmc_host *mmc = platform_get_drvdata(pdev);
860 struct mxs_mmc_host *host = mmc_priv(mmc); 860 struct mxs_mmc_host *host = mmc_priv(mmc);
861 861
862 mmc_remove_host(mmc); 862 mmc_remove_host(mmc);
863 863
864 platform_set_drvdata(pdev, NULL); 864 platform_set_drvdata(pdev, NULL);
865 865
866 if (host->dmach) 866 if (host->dmach)
867 dma_release_channel(host->dmach); 867 dma_release_channel(host->dmach);
868 868
869 clk_disable_unprepare(host->clk); 869 clk_disable_unprepare(host->clk);
870 clk_put(host->clk); 870 clk_put(host->clk);
871 871
872 mmc_free_host(mmc); 872 mmc_free_host(mmc);
873 873
874 return 0; 874 return 0;
875 } 875 }
876 876
877 #ifdef CONFIG_PM 877 #ifdef CONFIG_PM
878 static int mxs_mmc_suspend(struct device *dev) 878 static int mxs_mmc_suspend(struct device *dev)
879 { 879 {
880 struct mmc_host *mmc = dev_get_drvdata(dev); 880 struct mmc_host *mmc = dev_get_drvdata(dev);
881 struct mxs_mmc_host *host = mmc_priv(mmc); 881 struct mxs_mmc_host *host = mmc_priv(mmc);
882 int ret = 0; 882 int ret = 0;
883 883
884 ret = mmc_suspend_host(mmc); 884 ret = mmc_suspend_host(mmc);
885 885
886 clk_disable_unprepare(host->clk); 886 clk_disable_unprepare(host->clk);
887 887
888 return ret; 888 return ret;
889 } 889 }
890 890
891 static int mxs_mmc_resume(struct device *dev) 891 static int mxs_mmc_resume(struct device *dev)
892 { 892 {
893 struct mmc_host *mmc = dev_get_drvdata(dev); 893 struct mmc_host *mmc = dev_get_drvdata(dev);
894 struct mxs_mmc_host *host = mmc_priv(mmc); 894 struct mxs_mmc_host *host = mmc_priv(mmc);
895 int ret = 0; 895 int ret = 0;
896 896
897 clk_prepare_enable(host->clk); 897 clk_prepare_enable(host->clk);
898 898
899 ret = mmc_resume_host(mmc); 899 ret = mmc_resume_host(mmc);
900 900
901 return ret; 901 return ret;
902 } 902 }
903 903
904 static const struct dev_pm_ops mxs_mmc_pm_ops = { 904 static const struct dev_pm_ops mxs_mmc_pm_ops = {
905 .suspend = mxs_mmc_suspend, 905 .suspend = mxs_mmc_suspend,
906 .resume = mxs_mmc_resume, 906 .resume = mxs_mmc_resume,
907 }; 907 };
908 #endif 908 #endif
909 909
910 static struct platform_driver mxs_mmc_driver = { 910 static struct platform_driver mxs_mmc_driver = {
911 .probe = mxs_mmc_probe, 911 .probe = mxs_mmc_probe,
912 .remove = mxs_mmc_remove, 912 .remove = mxs_mmc_remove,
913 .id_table = mxs_mmc_ids, 913 .id_table = mxs_mmc_ids,
914 .driver = { 914 .driver = {
915 .name = DRIVER_NAME, 915 .name = DRIVER_NAME,
916 .owner = THIS_MODULE, 916 .owner = THIS_MODULE,
917 #ifdef CONFIG_PM 917 #ifdef CONFIG_PM
918 .pm = &mxs_mmc_pm_ops, 918 .pm = &mxs_mmc_pm_ops,
919 #endif 919 #endif
920 .of_match_table = mxs_mmc_dt_ids, 920 .of_match_table = mxs_mmc_dt_ids,
921 }, 921 },
922 }; 922 };
drivers/mmc/host/omap.c
1 /* 1 /*
2 * linux/drivers/mmc/host/omap.c 2 * linux/drivers/mmc/host/omap.c
3 * 3 *
4 * Copyright (C) 2004 Nokia Corporation 4 * Copyright (C) 2004 Nokia Corporation
5 * Written by Tuukka Tikkanen and Juha Yrjรถlรค<juha.yrjola@nokia.com> 5 * Written by Tuukka Tikkanen and Juha Yrjรถlรค<juha.yrjola@nokia.com>
6 * Misc hacks here and there by Tony Lindgren <tony@atomide.com> 6 * Misc hacks here and there by Tony Lindgren <tony@atomide.com>
7 * Other hacks (DMA, SD, etc) by David Brownell 7 * Other hacks (DMA, SD, etc) by David Brownell
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as 10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 */ 12 */
13 13
14 #include <linux/module.h> 14 #include <linux/module.h>
15 #include <linux/moduleparam.h> 15 #include <linux/moduleparam.h>
16 #include <linux/init.h> 16 #include <linux/init.h>
17 #include <linux/ioport.h> 17 #include <linux/ioport.h>
18 #include <linux/platform_device.h> 18 #include <linux/platform_device.h>
19 #include <linux/interrupt.h> 19 #include <linux/interrupt.h>
20 #include <linux/dmaengine.h> 20 #include <linux/dmaengine.h>
21 #include <linux/dma-mapping.h> 21 #include <linux/dma-mapping.h>
22 #include <linux/delay.h> 22 #include <linux/delay.h>
23 #include <linux/spinlock.h> 23 #include <linux/spinlock.h>
24 #include <linux/timer.h> 24 #include <linux/timer.h>
25 #include <linux/omap-dma.h> 25 #include <linux/omap-dma.h>
26 #include <linux/mmc/host.h> 26 #include <linux/mmc/host.h>
27 #include <linux/mmc/card.h> 27 #include <linux/mmc/card.h>
28 #include <linux/clk.h> 28 #include <linux/clk.h>
29 #include <linux/scatterlist.h> 29 #include <linux/scatterlist.h>
30 #include <linux/i2c/tps65010.h> 30 #include <linux/i2c/tps65010.h>
31 #include <linux/slab.h> 31 #include <linux/slab.h>
32 32
33 #include <asm/io.h> 33 #include <asm/io.h>
34 #include <asm/irq.h> 34 #include <asm/irq.h>
35 35
36 #include <plat/board.h> 36 #include <plat/board.h>
37 #include <plat/mmc.h> 37 #include <plat/mmc.h>
38 #include <asm/gpio.h> 38 #include <asm/gpio.h>
39 #include <plat/dma.h> 39 #include <plat/dma.h>
40 #include <plat/mux.h> 40 #include <plat/mux.h>
41 #include <plat/fpga.h> 41 #include <plat/fpga.h>
42 42
43 #define OMAP_MMC_REG_CMD 0x00 43 #define OMAP_MMC_REG_CMD 0x00
44 #define OMAP_MMC_REG_ARGL 0x01 44 #define OMAP_MMC_REG_ARGL 0x01
45 #define OMAP_MMC_REG_ARGH 0x02 45 #define OMAP_MMC_REG_ARGH 0x02
46 #define OMAP_MMC_REG_CON 0x03 46 #define OMAP_MMC_REG_CON 0x03
47 #define OMAP_MMC_REG_STAT 0x04 47 #define OMAP_MMC_REG_STAT 0x04
48 #define OMAP_MMC_REG_IE 0x05 48 #define OMAP_MMC_REG_IE 0x05
49 #define OMAP_MMC_REG_CTO 0x06 49 #define OMAP_MMC_REG_CTO 0x06
50 #define OMAP_MMC_REG_DTO 0x07 50 #define OMAP_MMC_REG_DTO 0x07
51 #define OMAP_MMC_REG_DATA 0x08 51 #define OMAP_MMC_REG_DATA 0x08
52 #define OMAP_MMC_REG_BLEN 0x09 52 #define OMAP_MMC_REG_BLEN 0x09
53 #define OMAP_MMC_REG_NBLK 0x0a 53 #define OMAP_MMC_REG_NBLK 0x0a
54 #define OMAP_MMC_REG_BUF 0x0b 54 #define OMAP_MMC_REG_BUF 0x0b
55 #define OMAP_MMC_REG_SDIO 0x0d 55 #define OMAP_MMC_REG_SDIO 0x0d
56 #define OMAP_MMC_REG_REV 0x0f 56 #define OMAP_MMC_REG_REV 0x0f
57 #define OMAP_MMC_REG_RSP0 0x10 57 #define OMAP_MMC_REG_RSP0 0x10
58 #define OMAP_MMC_REG_RSP1 0x11 58 #define OMAP_MMC_REG_RSP1 0x11
59 #define OMAP_MMC_REG_RSP2 0x12 59 #define OMAP_MMC_REG_RSP2 0x12
60 #define OMAP_MMC_REG_RSP3 0x13 60 #define OMAP_MMC_REG_RSP3 0x13
61 #define OMAP_MMC_REG_RSP4 0x14 61 #define OMAP_MMC_REG_RSP4 0x14
62 #define OMAP_MMC_REG_RSP5 0x15 62 #define OMAP_MMC_REG_RSP5 0x15
63 #define OMAP_MMC_REG_RSP6 0x16 63 #define OMAP_MMC_REG_RSP6 0x16
64 #define OMAP_MMC_REG_RSP7 0x17 64 #define OMAP_MMC_REG_RSP7 0x17
65 #define OMAP_MMC_REG_IOSR 0x18 65 #define OMAP_MMC_REG_IOSR 0x18
66 #define OMAP_MMC_REG_SYSC 0x19 66 #define OMAP_MMC_REG_SYSC 0x19
67 #define OMAP_MMC_REG_SYSS 0x1a 67 #define OMAP_MMC_REG_SYSS 0x1a
68 68
69 #define OMAP_MMC_STAT_CARD_ERR (1 << 14) 69 #define OMAP_MMC_STAT_CARD_ERR (1 << 14)
70 #define OMAP_MMC_STAT_CARD_IRQ (1 << 13) 70 #define OMAP_MMC_STAT_CARD_IRQ (1 << 13)
71 #define OMAP_MMC_STAT_OCR_BUSY (1 << 12) 71 #define OMAP_MMC_STAT_OCR_BUSY (1 << 12)
72 #define OMAP_MMC_STAT_A_EMPTY (1 << 11) 72 #define OMAP_MMC_STAT_A_EMPTY (1 << 11)
73 #define OMAP_MMC_STAT_A_FULL (1 << 10) 73 #define OMAP_MMC_STAT_A_FULL (1 << 10)
74 #define OMAP_MMC_STAT_CMD_CRC (1 << 8) 74 #define OMAP_MMC_STAT_CMD_CRC (1 << 8)
75 #define OMAP_MMC_STAT_CMD_TOUT (1 << 7) 75 #define OMAP_MMC_STAT_CMD_TOUT (1 << 7)
76 #define OMAP_MMC_STAT_DATA_CRC (1 << 6) 76 #define OMAP_MMC_STAT_DATA_CRC (1 << 6)
77 #define OMAP_MMC_STAT_DATA_TOUT (1 << 5) 77 #define OMAP_MMC_STAT_DATA_TOUT (1 << 5)
78 #define OMAP_MMC_STAT_END_BUSY (1 << 4) 78 #define OMAP_MMC_STAT_END_BUSY (1 << 4)
79 #define OMAP_MMC_STAT_END_OF_DATA (1 << 3) 79 #define OMAP_MMC_STAT_END_OF_DATA (1 << 3)
80 #define OMAP_MMC_STAT_CARD_BUSY (1 << 2) 80 #define OMAP_MMC_STAT_CARD_BUSY (1 << 2)
81 #define OMAP_MMC_STAT_END_OF_CMD (1 << 0) 81 #define OMAP_MMC_STAT_END_OF_CMD (1 << 0)
82 82
83 #define OMAP_MMC_REG(host, reg) (OMAP_MMC_REG_##reg << (host)->reg_shift) 83 #define OMAP_MMC_REG(host, reg) (OMAP_MMC_REG_##reg << (host)->reg_shift)
84 #define OMAP_MMC_READ(host, reg) __raw_readw((host)->virt_base + OMAP_MMC_REG(host, reg)) 84 #define OMAP_MMC_READ(host, reg) __raw_readw((host)->virt_base + OMAP_MMC_REG(host, reg))
85 #define OMAP_MMC_WRITE(host, reg, val) __raw_writew((val), (host)->virt_base + OMAP_MMC_REG(host, reg)) 85 #define OMAP_MMC_WRITE(host, reg, val) __raw_writew((val), (host)->virt_base + OMAP_MMC_REG(host, reg))
86 86
87 /* 87 /*
88 * Command types 88 * Command types
89 */ 89 */
90 #define OMAP_MMC_CMDTYPE_BC 0 90 #define OMAP_MMC_CMDTYPE_BC 0
91 #define OMAP_MMC_CMDTYPE_BCR 1 91 #define OMAP_MMC_CMDTYPE_BCR 1
92 #define OMAP_MMC_CMDTYPE_AC 2 92 #define OMAP_MMC_CMDTYPE_AC 2
93 #define OMAP_MMC_CMDTYPE_ADTC 3 93 #define OMAP_MMC_CMDTYPE_ADTC 3
94 94
95 95
96 #define DRIVER_NAME "mmci-omap" 96 #define DRIVER_NAME "mmci-omap"
97 97
98 /* Specifies how often in millisecs to poll for card status changes 98 /* Specifies how often in millisecs to poll for card status changes
99 * when the cover switch is open */ 99 * when the cover switch is open */
100 #define OMAP_MMC_COVER_POLL_DELAY 500 100 #define OMAP_MMC_COVER_POLL_DELAY 500
101 101
102 struct mmc_omap_host; 102 struct mmc_omap_host;
103 103
104 struct mmc_omap_slot { 104 struct mmc_omap_slot {
105 int id; 105 int id;
106 unsigned int vdd; 106 unsigned int vdd;
107 u16 saved_con; 107 u16 saved_con;
108 u16 bus_mode; 108 u16 bus_mode;
109 unsigned int fclk_freq; 109 unsigned int fclk_freq;
110 unsigned powered:1; 110 unsigned powered:1;
111 111
112 struct tasklet_struct cover_tasklet; 112 struct tasklet_struct cover_tasklet;
113 struct timer_list cover_timer; 113 struct timer_list cover_timer;
114 unsigned cover_open; 114 unsigned cover_open;
115 115
116 struct mmc_request *mrq; 116 struct mmc_request *mrq;
117 struct mmc_omap_host *host; 117 struct mmc_omap_host *host;
118 struct mmc_host *mmc; 118 struct mmc_host *mmc;
119 struct omap_mmc_slot_data *pdata; 119 struct omap_mmc_slot_data *pdata;
120 }; 120 };
121 121
122 struct mmc_omap_host { 122 struct mmc_omap_host {
123 int initialized; 123 int initialized;
124 int suspended; 124 int suspended;
125 struct mmc_request * mrq; 125 struct mmc_request * mrq;
126 struct mmc_command * cmd; 126 struct mmc_command * cmd;
127 struct mmc_data * data; 127 struct mmc_data * data;
128 struct mmc_host * mmc; 128 struct mmc_host * mmc;
129 struct device * dev; 129 struct device * dev;
130 unsigned char id; /* 16xx chips have 2 MMC blocks */ 130 unsigned char id; /* 16xx chips have 2 MMC blocks */
131 struct clk * iclk; 131 struct clk * iclk;
132 struct clk * fclk; 132 struct clk * fclk;
133 struct dma_chan *dma_rx; 133 struct dma_chan *dma_rx;
134 u32 dma_rx_burst; 134 u32 dma_rx_burst;
135 struct dma_chan *dma_tx; 135 struct dma_chan *dma_tx;
136 u32 dma_tx_burst; 136 u32 dma_tx_burst;
137 struct resource *mem_res; 137 struct resource *mem_res;
138 void __iomem *virt_base; 138 void __iomem *virt_base;
139 unsigned int phys_base; 139 unsigned int phys_base;
140 int irq; 140 int irq;
141 unsigned char bus_mode; 141 unsigned char bus_mode;
142 unsigned char hw_bus_mode; 142 unsigned char hw_bus_mode;
143 unsigned int reg_shift; 143 unsigned int reg_shift;
144 144
145 struct work_struct cmd_abort_work; 145 struct work_struct cmd_abort_work;
146 unsigned abort:1; 146 unsigned abort:1;
147 struct timer_list cmd_abort_timer; 147 struct timer_list cmd_abort_timer;
148 148
149 struct work_struct slot_release_work; 149 struct work_struct slot_release_work;
150 struct mmc_omap_slot *next_slot; 150 struct mmc_omap_slot *next_slot;
151 struct work_struct send_stop_work; 151 struct work_struct send_stop_work;
152 struct mmc_data *stop_data; 152 struct mmc_data *stop_data;
153 153
154 unsigned int sg_len; 154 unsigned int sg_len;
155 int sg_idx; 155 int sg_idx;
156 u16 * buffer; 156 u16 * buffer;
157 u32 buffer_bytes_left; 157 u32 buffer_bytes_left;
158 u32 total_bytes_left; 158 u32 total_bytes_left;
159 159
160 unsigned use_dma:1; 160 unsigned use_dma:1;
161 unsigned brs_received:1, dma_done:1; 161 unsigned brs_received:1, dma_done:1;
162 unsigned dma_in_use:1; 162 unsigned dma_in_use:1;
163 spinlock_t dma_lock; 163 spinlock_t dma_lock;
164 164
165 struct mmc_omap_slot *slots[OMAP_MMC_MAX_SLOTS]; 165 struct mmc_omap_slot *slots[OMAP_MMC_MAX_SLOTS];
166 struct mmc_omap_slot *current_slot; 166 struct mmc_omap_slot *current_slot;
167 spinlock_t slot_lock; 167 spinlock_t slot_lock;
168 wait_queue_head_t slot_wq; 168 wait_queue_head_t slot_wq;
169 int nr_slots; 169 int nr_slots;
170 170
171 struct timer_list clk_timer; 171 struct timer_list clk_timer;
172 spinlock_t clk_lock; /* for changing enabled state */ 172 spinlock_t clk_lock; /* for changing enabled state */
173 unsigned int fclk_enabled:1; 173 unsigned int fclk_enabled:1;
174 struct workqueue_struct *mmc_omap_wq; 174 struct workqueue_struct *mmc_omap_wq;
175 175
176 struct omap_mmc_platform_data *pdata; 176 struct omap_mmc_platform_data *pdata;
177 }; 177 };
178 178
179 179
180 static void mmc_omap_fclk_offdelay(struct mmc_omap_slot *slot) 180 static void mmc_omap_fclk_offdelay(struct mmc_omap_slot *slot)
181 { 181 {
182 unsigned long tick_ns; 182 unsigned long tick_ns;
183 183
184 if (slot != NULL && slot->host->fclk_enabled && slot->fclk_freq > 0) { 184 if (slot != NULL && slot->host->fclk_enabled && slot->fclk_freq > 0) {
185 tick_ns = (1000000000 + slot->fclk_freq - 1) / slot->fclk_freq; 185 tick_ns = (1000000000 + slot->fclk_freq - 1) / slot->fclk_freq;
186 ndelay(8 * tick_ns); 186 ndelay(8 * tick_ns);
187 } 187 }
188 } 188 }
189 189
190 static void mmc_omap_fclk_enable(struct mmc_omap_host *host, unsigned int enable) 190 static void mmc_omap_fclk_enable(struct mmc_omap_host *host, unsigned int enable)
191 { 191 {
192 unsigned long flags; 192 unsigned long flags;
193 193
194 spin_lock_irqsave(&host->clk_lock, flags); 194 spin_lock_irqsave(&host->clk_lock, flags);
195 if (host->fclk_enabled != enable) { 195 if (host->fclk_enabled != enable) {
196 host->fclk_enabled = enable; 196 host->fclk_enabled = enable;
197 if (enable) 197 if (enable)
198 clk_enable(host->fclk); 198 clk_enable(host->fclk);
199 else 199 else
200 clk_disable(host->fclk); 200 clk_disable(host->fclk);
201 } 201 }
202 spin_unlock_irqrestore(&host->clk_lock, flags); 202 spin_unlock_irqrestore(&host->clk_lock, flags);
203 } 203 }
204 204
205 static void mmc_omap_select_slot(struct mmc_omap_slot *slot, int claimed) 205 static void mmc_omap_select_slot(struct mmc_omap_slot *slot, int claimed)
206 { 206 {
207 struct mmc_omap_host *host = slot->host; 207 struct mmc_omap_host *host = slot->host;
208 unsigned long flags; 208 unsigned long flags;
209 209
210 if (claimed) 210 if (claimed)
211 goto no_claim; 211 goto no_claim;
212 spin_lock_irqsave(&host->slot_lock, flags); 212 spin_lock_irqsave(&host->slot_lock, flags);
213 while (host->mmc != NULL) { 213 while (host->mmc != NULL) {
214 spin_unlock_irqrestore(&host->slot_lock, flags); 214 spin_unlock_irqrestore(&host->slot_lock, flags);
215 wait_event(host->slot_wq, host->mmc == NULL); 215 wait_event(host->slot_wq, host->mmc == NULL);
216 spin_lock_irqsave(&host->slot_lock, flags); 216 spin_lock_irqsave(&host->slot_lock, flags);
217 } 217 }
218 host->mmc = slot->mmc; 218 host->mmc = slot->mmc;
219 spin_unlock_irqrestore(&host->slot_lock, flags); 219 spin_unlock_irqrestore(&host->slot_lock, flags);
220 no_claim: 220 no_claim:
221 del_timer(&host->clk_timer); 221 del_timer(&host->clk_timer);
222 if (host->current_slot != slot || !claimed) 222 if (host->current_slot != slot || !claimed)
223 mmc_omap_fclk_offdelay(host->current_slot); 223 mmc_omap_fclk_offdelay(host->current_slot);
224 224
225 if (host->current_slot != slot) { 225 if (host->current_slot != slot) {
226 OMAP_MMC_WRITE(host, CON, slot->saved_con & 0xFC00); 226 OMAP_MMC_WRITE(host, CON, slot->saved_con & 0xFC00);
227 if (host->pdata->switch_slot != NULL) 227 if (host->pdata->switch_slot != NULL)
228 host->pdata->switch_slot(mmc_dev(slot->mmc), slot->id); 228 host->pdata->switch_slot(mmc_dev(slot->mmc), slot->id);
229 host->current_slot = slot; 229 host->current_slot = slot;
230 } 230 }
231 231
232 if (claimed) { 232 if (claimed) {
233 mmc_omap_fclk_enable(host, 1); 233 mmc_omap_fclk_enable(host, 1);
234 234
235 /* Doing the dummy read here seems to work around some bug 235 /* Doing the dummy read here seems to work around some bug
236 * at least in OMAP24xx silicon where the command would not 236 * at least in OMAP24xx silicon where the command would not
237 * start after writing the CMD register. Sigh. */ 237 * start after writing the CMD register. Sigh. */
238 OMAP_MMC_READ(host, CON); 238 OMAP_MMC_READ(host, CON);
239 239
240 OMAP_MMC_WRITE(host, CON, slot->saved_con); 240 OMAP_MMC_WRITE(host, CON, slot->saved_con);
241 } else 241 } else
242 mmc_omap_fclk_enable(host, 0); 242 mmc_omap_fclk_enable(host, 0);
243 } 243 }
244 244
245 static void mmc_omap_start_request(struct mmc_omap_host *host, 245 static void mmc_omap_start_request(struct mmc_omap_host *host,
246 struct mmc_request *req); 246 struct mmc_request *req);
247 247
248 static void mmc_omap_slot_release_work(struct work_struct *work) 248 static void mmc_omap_slot_release_work(struct work_struct *work)
249 { 249 {
250 struct mmc_omap_host *host = container_of(work, struct mmc_omap_host, 250 struct mmc_omap_host *host = container_of(work, struct mmc_omap_host,
251 slot_release_work); 251 slot_release_work);
252 struct mmc_omap_slot *next_slot = host->next_slot; 252 struct mmc_omap_slot *next_slot = host->next_slot;
253 struct mmc_request *rq; 253 struct mmc_request *rq;
254 254
255 host->next_slot = NULL; 255 host->next_slot = NULL;
256 mmc_omap_select_slot(next_slot, 1); 256 mmc_omap_select_slot(next_slot, 1);
257 257
258 rq = next_slot->mrq; 258 rq = next_slot->mrq;
259 next_slot->mrq = NULL; 259 next_slot->mrq = NULL;
260 mmc_omap_start_request(host, rq); 260 mmc_omap_start_request(host, rq);
261 } 261 }
262 262
263 static void mmc_omap_release_slot(struct mmc_omap_slot *slot, int clk_enabled) 263 static void mmc_omap_release_slot(struct mmc_omap_slot *slot, int clk_enabled)
264 { 264 {
265 struct mmc_omap_host *host = slot->host; 265 struct mmc_omap_host *host = slot->host;
266 unsigned long flags; 266 unsigned long flags;
267 int i; 267 int i;
268 268
269 BUG_ON(slot == NULL || host->mmc == NULL); 269 BUG_ON(slot == NULL || host->mmc == NULL);
270 270
271 if (clk_enabled) 271 if (clk_enabled)
272 /* Keeps clock running for at least 8 cycles on valid freq */ 272 /* Keeps clock running for at least 8 cycles on valid freq */
273 mod_timer(&host->clk_timer, jiffies + HZ/10); 273 mod_timer(&host->clk_timer, jiffies + HZ/10);
274 else { 274 else {
275 del_timer(&host->clk_timer); 275 del_timer(&host->clk_timer);
276 mmc_omap_fclk_offdelay(slot); 276 mmc_omap_fclk_offdelay(slot);
277 mmc_omap_fclk_enable(host, 0); 277 mmc_omap_fclk_enable(host, 0);
278 } 278 }
279 279
280 spin_lock_irqsave(&host->slot_lock, flags); 280 spin_lock_irqsave(&host->slot_lock, flags);
281 /* Check for any pending requests */ 281 /* Check for any pending requests */
282 for (i = 0; i < host->nr_slots; i++) { 282 for (i = 0; i < host->nr_slots; i++) {
283 struct mmc_omap_slot *new_slot; 283 struct mmc_omap_slot *new_slot;
284 284
285 if (host->slots[i] == NULL || host->slots[i]->mrq == NULL) 285 if (host->slots[i] == NULL || host->slots[i]->mrq == NULL)
286 continue; 286 continue;
287 287
288 BUG_ON(host->next_slot != NULL); 288 BUG_ON(host->next_slot != NULL);
289 new_slot = host->slots[i]; 289 new_slot = host->slots[i];
290 /* The current slot should not have a request in queue */ 290 /* The current slot should not have a request in queue */
291 BUG_ON(new_slot == host->current_slot); 291 BUG_ON(new_slot == host->current_slot);
292 292
293 host->next_slot = new_slot; 293 host->next_slot = new_slot;
294 host->mmc = new_slot->mmc; 294 host->mmc = new_slot->mmc;
295 spin_unlock_irqrestore(&host->slot_lock, flags); 295 spin_unlock_irqrestore(&host->slot_lock, flags);
296 queue_work(host->mmc_omap_wq, &host->slot_release_work); 296 queue_work(host->mmc_omap_wq, &host->slot_release_work);
297 return; 297 return;
298 } 298 }
299 299
300 host->mmc = NULL; 300 host->mmc = NULL;
301 wake_up(&host->slot_wq); 301 wake_up(&host->slot_wq);
302 spin_unlock_irqrestore(&host->slot_lock, flags); 302 spin_unlock_irqrestore(&host->slot_lock, flags);
303 } 303 }
304 304
305 static inline 305 static inline
306 int mmc_omap_cover_is_open(struct mmc_omap_slot *slot) 306 int mmc_omap_cover_is_open(struct mmc_omap_slot *slot)
307 { 307 {
308 if (slot->pdata->get_cover_state) 308 if (slot->pdata->get_cover_state)
309 return slot->pdata->get_cover_state(mmc_dev(slot->mmc), 309 return slot->pdata->get_cover_state(mmc_dev(slot->mmc),
310 slot->id); 310 slot->id);
311 return 0; 311 return 0;
312 } 312 }
313 313
314 static ssize_t 314 static ssize_t
315 mmc_omap_show_cover_switch(struct device *dev, struct device_attribute *attr, 315 mmc_omap_show_cover_switch(struct device *dev, struct device_attribute *attr,
316 char *buf) 316 char *buf)
317 { 317 {
318 struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev); 318 struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
319 struct mmc_omap_slot *slot = mmc_priv(mmc); 319 struct mmc_omap_slot *slot = mmc_priv(mmc);
320 320
321 return sprintf(buf, "%s\n", mmc_omap_cover_is_open(slot) ? "open" : 321 return sprintf(buf, "%s\n", mmc_omap_cover_is_open(slot) ? "open" :
322 "closed"); 322 "closed");
323 } 323 }
324 324
325 static DEVICE_ATTR(cover_switch, S_IRUGO, mmc_omap_show_cover_switch, NULL); 325 static DEVICE_ATTR(cover_switch, S_IRUGO, mmc_omap_show_cover_switch, NULL);
326 326
327 static ssize_t 327 static ssize_t
328 mmc_omap_show_slot_name(struct device *dev, struct device_attribute *attr, 328 mmc_omap_show_slot_name(struct device *dev, struct device_attribute *attr,
329 char *buf) 329 char *buf)
330 { 330 {
331 struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev); 331 struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
332 struct mmc_omap_slot *slot = mmc_priv(mmc); 332 struct mmc_omap_slot *slot = mmc_priv(mmc);
333 333
334 return sprintf(buf, "%s\n", slot->pdata->name); 334 return sprintf(buf, "%s\n", slot->pdata->name);
335 } 335 }
336 336
337 static DEVICE_ATTR(slot_name, S_IRUGO, mmc_omap_show_slot_name, NULL); 337 static DEVICE_ATTR(slot_name, S_IRUGO, mmc_omap_show_slot_name, NULL);
338 338
339 static void 339 static void
340 mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd) 340 mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd)
341 { 341 {
342 u32 cmdreg; 342 u32 cmdreg;
343 u32 resptype; 343 u32 resptype;
344 u32 cmdtype; 344 u32 cmdtype;
345 345
346 host->cmd = cmd; 346 host->cmd = cmd;
347 347
348 resptype = 0; 348 resptype = 0;
349 cmdtype = 0; 349 cmdtype = 0;
350 350
351 /* Our hardware needs to know exact type */ 351 /* Our hardware needs to know exact type */
352 switch (mmc_resp_type(cmd)) { 352 switch (mmc_resp_type(cmd)) {
353 case MMC_RSP_NONE: 353 case MMC_RSP_NONE:
354 break; 354 break;
355 case MMC_RSP_R1: 355 case MMC_RSP_R1:
356 case MMC_RSP_R1B: 356 case MMC_RSP_R1B:
357 /* resp 1, 1b, 6, 7 */ 357 /* resp 1, 1b, 6, 7 */
358 resptype = 1; 358 resptype = 1;
359 break; 359 break;
360 case MMC_RSP_R2: 360 case MMC_RSP_R2:
361 resptype = 2; 361 resptype = 2;
362 break; 362 break;
363 case MMC_RSP_R3: 363 case MMC_RSP_R3:
364 resptype = 3; 364 resptype = 3;
365 break; 365 break;
366 default: 366 default:
367 dev_err(mmc_dev(host->mmc), "Invalid response type: %04x\n", mmc_resp_type(cmd)); 367 dev_err(mmc_dev(host->mmc), "Invalid response type: %04x\n", mmc_resp_type(cmd));
368 break; 368 break;
369 } 369 }
370 370
371 if (mmc_cmd_type(cmd) == MMC_CMD_ADTC) { 371 if (mmc_cmd_type(cmd) == MMC_CMD_ADTC) {
372 cmdtype = OMAP_MMC_CMDTYPE_ADTC; 372 cmdtype = OMAP_MMC_CMDTYPE_ADTC;
373 } else if (mmc_cmd_type(cmd) == MMC_CMD_BC) { 373 } else if (mmc_cmd_type(cmd) == MMC_CMD_BC) {
374 cmdtype = OMAP_MMC_CMDTYPE_BC; 374 cmdtype = OMAP_MMC_CMDTYPE_BC;
375 } else if (mmc_cmd_type(cmd) == MMC_CMD_BCR) { 375 } else if (mmc_cmd_type(cmd) == MMC_CMD_BCR) {
376 cmdtype = OMAP_MMC_CMDTYPE_BCR; 376 cmdtype = OMAP_MMC_CMDTYPE_BCR;
377 } else { 377 } else {
378 cmdtype = OMAP_MMC_CMDTYPE_AC; 378 cmdtype = OMAP_MMC_CMDTYPE_AC;
379 } 379 }
380 380
381 cmdreg = cmd->opcode | (resptype << 8) | (cmdtype << 12); 381 cmdreg = cmd->opcode | (resptype << 8) | (cmdtype << 12);
382 382
383 if (host->current_slot->bus_mode == MMC_BUSMODE_OPENDRAIN) 383 if (host->current_slot->bus_mode == MMC_BUSMODE_OPENDRAIN)
384 cmdreg |= 1 << 6; 384 cmdreg |= 1 << 6;
385 385
386 if (cmd->flags & MMC_RSP_BUSY) 386 if (cmd->flags & MMC_RSP_BUSY)
387 cmdreg |= 1 << 11; 387 cmdreg |= 1 << 11;
388 388
389 if (host->data && !(host->data->flags & MMC_DATA_WRITE)) 389 if (host->data && !(host->data->flags & MMC_DATA_WRITE))
390 cmdreg |= 1 << 15; 390 cmdreg |= 1 << 15;
391 391
392 mod_timer(&host->cmd_abort_timer, jiffies + HZ/2); 392 mod_timer(&host->cmd_abort_timer, jiffies + HZ/2);
393 393
394 OMAP_MMC_WRITE(host, CTO, 200); 394 OMAP_MMC_WRITE(host, CTO, 200);
395 OMAP_MMC_WRITE(host, ARGL, cmd->arg & 0xffff); 395 OMAP_MMC_WRITE(host, ARGL, cmd->arg & 0xffff);
396 OMAP_MMC_WRITE(host, ARGH, cmd->arg >> 16); 396 OMAP_MMC_WRITE(host, ARGH, cmd->arg >> 16);
397 OMAP_MMC_WRITE(host, IE, 397 OMAP_MMC_WRITE(host, IE,
398 OMAP_MMC_STAT_A_EMPTY | OMAP_MMC_STAT_A_FULL | 398 OMAP_MMC_STAT_A_EMPTY | OMAP_MMC_STAT_A_FULL |
399 OMAP_MMC_STAT_CMD_CRC | OMAP_MMC_STAT_CMD_TOUT | 399 OMAP_MMC_STAT_CMD_CRC | OMAP_MMC_STAT_CMD_TOUT |
400 OMAP_MMC_STAT_DATA_CRC | OMAP_MMC_STAT_DATA_TOUT | 400 OMAP_MMC_STAT_DATA_CRC | OMAP_MMC_STAT_DATA_TOUT |
401 OMAP_MMC_STAT_END_OF_CMD | OMAP_MMC_STAT_CARD_ERR | 401 OMAP_MMC_STAT_END_OF_CMD | OMAP_MMC_STAT_CARD_ERR |
402 OMAP_MMC_STAT_END_OF_DATA); 402 OMAP_MMC_STAT_END_OF_DATA);
403 OMAP_MMC_WRITE(host, CMD, cmdreg); 403 OMAP_MMC_WRITE(host, CMD, cmdreg);
404 } 404 }
405 405
406 static void 406 static void
407 mmc_omap_release_dma(struct mmc_omap_host *host, struct mmc_data *data, 407 mmc_omap_release_dma(struct mmc_omap_host *host, struct mmc_data *data,
408 int abort) 408 int abort)
409 { 409 {
410 enum dma_data_direction dma_data_dir; 410 enum dma_data_direction dma_data_dir;
411 struct device *dev = mmc_dev(host->mmc); 411 struct device *dev = mmc_dev(host->mmc);
412 struct dma_chan *c; 412 struct dma_chan *c;
413 413
414 if (data->flags & MMC_DATA_WRITE) { 414 if (data->flags & MMC_DATA_WRITE) {
415 dma_data_dir = DMA_TO_DEVICE; 415 dma_data_dir = DMA_TO_DEVICE;
416 c = host->dma_tx; 416 c = host->dma_tx;
417 } else { 417 } else {
418 dma_data_dir = DMA_FROM_DEVICE; 418 dma_data_dir = DMA_FROM_DEVICE;
419 c = host->dma_rx; 419 c = host->dma_rx;
420 } 420 }
421 if (c) { 421 if (c) {
422 if (data->error) { 422 if (data->error) {
423 dmaengine_terminate_all(c); 423 dmaengine_terminate_all(c);
424 /* Claim nothing transferred on error... */ 424 /* Claim nothing transferred on error... */
425 data->bytes_xfered = 0; 425 data->bytes_xfered = 0;
426 } 426 }
427 dev = c->device->dev; 427 dev = c->device->dev;
428 } 428 }
429 dma_unmap_sg(dev, data->sg, host->sg_len, dma_data_dir); 429 dma_unmap_sg(dev, data->sg, host->sg_len, dma_data_dir);
430 } 430 }
431 431
432 static void mmc_omap_send_stop_work(struct work_struct *work) 432 static void mmc_omap_send_stop_work(struct work_struct *work)
433 { 433 {
434 struct mmc_omap_host *host = container_of(work, struct mmc_omap_host, 434 struct mmc_omap_host *host = container_of(work, struct mmc_omap_host,
435 send_stop_work); 435 send_stop_work);
436 struct mmc_omap_slot *slot = host->current_slot; 436 struct mmc_omap_slot *slot = host->current_slot;
437 struct mmc_data *data = host->stop_data; 437 struct mmc_data *data = host->stop_data;
438 unsigned long tick_ns; 438 unsigned long tick_ns;
439 439
440 tick_ns = (1000000000 + slot->fclk_freq - 1)/slot->fclk_freq; 440 tick_ns = (1000000000 + slot->fclk_freq - 1)/slot->fclk_freq;
441 ndelay(8*tick_ns); 441 ndelay(8*tick_ns);
442 442
443 mmc_omap_start_command(host, data->stop); 443 mmc_omap_start_command(host, data->stop);
444 } 444 }
445 445
446 static void 446 static void
447 mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data) 447 mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data)
448 { 448 {
449 if (host->dma_in_use) 449 if (host->dma_in_use)
450 mmc_omap_release_dma(host, data, data->error); 450 mmc_omap_release_dma(host, data, data->error);
451 451
452 host->data = NULL; 452 host->data = NULL;
453 host->sg_len = 0; 453 host->sg_len = 0;
454 454
455 /* NOTE: MMC layer will sometimes poll-wait CMD13 next, issuing 455 /* NOTE: MMC layer will sometimes poll-wait CMD13 next, issuing
456 * dozens of requests until the card finishes writing data. 456 * dozens of requests until the card finishes writing data.
457 * It'd be cheaper to just wait till an EOFB interrupt arrives... 457 * It'd be cheaper to just wait till an EOFB interrupt arrives...
458 */ 458 */
459 459
460 if (!data->stop) { 460 if (!data->stop) {
461 struct mmc_host *mmc; 461 struct mmc_host *mmc;
462 462
463 host->mrq = NULL; 463 host->mrq = NULL;
464 mmc = host->mmc; 464 mmc = host->mmc;
465 mmc_omap_release_slot(host->current_slot, 1); 465 mmc_omap_release_slot(host->current_slot, 1);
466 mmc_request_done(mmc, data->mrq); 466 mmc_request_done(mmc, data->mrq);
467 return; 467 return;
468 } 468 }
469 469
470 host->stop_data = data; 470 host->stop_data = data;
471 queue_work(host->mmc_omap_wq, &host->send_stop_work); 471 queue_work(host->mmc_omap_wq, &host->send_stop_work);
472 } 472 }
473 473
474 static void 474 static void
475 mmc_omap_send_abort(struct mmc_omap_host *host, int maxloops) 475 mmc_omap_send_abort(struct mmc_omap_host *host, int maxloops)
476 { 476 {
477 struct mmc_omap_slot *slot = host->current_slot; 477 struct mmc_omap_slot *slot = host->current_slot;
478 unsigned int restarts, passes, timeout; 478 unsigned int restarts, passes, timeout;
479 u16 stat = 0; 479 u16 stat = 0;
480 480
481 /* Sending abort takes 80 clocks. Have some extra and round up */ 481 /* Sending abort takes 80 clocks. Have some extra and round up */
482 timeout = (120*1000000 + slot->fclk_freq - 1)/slot->fclk_freq; 482 timeout = (120*1000000 + slot->fclk_freq - 1)/slot->fclk_freq;
483 restarts = 0; 483 restarts = 0;
484 while (restarts < maxloops) { 484 while (restarts < maxloops) {
485 OMAP_MMC_WRITE(host, STAT, 0xFFFF); 485 OMAP_MMC_WRITE(host, STAT, 0xFFFF);
486 OMAP_MMC_WRITE(host, CMD, (3 << 12) | (1 << 7)); 486 OMAP_MMC_WRITE(host, CMD, (3 << 12) | (1 << 7));
487 487
488 passes = 0; 488 passes = 0;
489 while (passes < timeout) { 489 while (passes < timeout) {
490 stat = OMAP_MMC_READ(host, STAT); 490 stat = OMAP_MMC_READ(host, STAT);
491 if (stat & OMAP_MMC_STAT_END_OF_CMD) 491 if (stat & OMAP_MMC_STAT_END_OF_CMD)
492 goto out; 492 goto out;
493 udelay(1); 493 udelay(1);
494 passes++; 494 passes++;
495 } 495 }
496 496
497 restarts++; 497 restarts++;
498 } 498 }
499 out: 499 out:
500 OMAP_MMC_WRITE(host, STAT, stat); 500 OMAP_MMC_WRITE(host, STAT, stat);
501 } 501 }
502 502
503 static void 503 static void
504 mmc_omap_abort_xfer(struct mmc_omap_host *host, struct mmc_data *data) 504 mmc_omap_abort_xfer(struct mmc_omap_host *host, struct mmc_data *data)
505 { 505 {
506 if (host->dma_in_use) 506 if (host->dma_in_use)
507 mmc_omap_release_dma(host, data, 1); 507 mmc_omap_release_dma(host, data, 1);
508 508
509 host->data = NULL; 509 host->data = NULL;
510 host->sg_len = 0; 510 host->sg_len = 0;
511 511
512 mmc_omap_send_abort(host, 10000); 512 mmc_omap_send_abort(host, 10000);
513 } 513 }
514 514
515 static void 515 static void
516 mmc_omap_end_of_data(struct mmc_omap_host *host, struct mmc_data *data) 516 mmc_omap_end_of_data(struct mmc_omap_host *host, struct mmc_data *data)
517 { 517 {
518 unsigned long flags; 518 unsigned long flags;
519 int done; 519 int done;
520 520
521 if (!host->dma_in_use) { 521 if (!host->dma_in_use) {
522 mmc_omap_xfer_done(host, data); 522 mmc_omap_xfer_done(host, data);
523 return; 523 return;
524 } 524 }
525 done = 0; 525 done = 0;
526 spin_lock_irqsave(&host->dma_lock, flags); 526 spin_lock_irqsave(&host->dma_lock, flags);
527 if (host->dma_done) 527 if (host->dma_done)
528 done = 1; 528 done = 1;
529 else 529 else
530 host->brs_received = 1; 530 host->brs_received = 1;
531 spin_unlock_irqrestore(&host->dma_lock, flags); 531 spin_unlock_irqrestore(&host->dma_lock, flags);
532 if (done) 532 if (done)
533 mmc_omap_xfer_done(host, data); 533 mmc_omap_xfer_done(host, data);
534 } 534 }
535 535
536 static void 536 static void
537 mmc_omap_dma_done(struct mmc_omap_host *host, struct mmc_data *data) 537 mmc_omap_dma_done(struct mmc_omap_host *host, struct mmc_data *data)
538 { 538 {
539 unsigned long flags; 539 unsigned long flags;
540 int done; 540 int done;
541 541
542 done = 0; 542 done = 0;
543 spin_lock_irqsave(&host->dma_lock, flags); 543 spin_lock_irqsave(&host->dma_lock, flags);
544 if (host->brs_received) 544 if (host->brs_received)
545 done = 1; 545 done = 1;
546 else 546 else
547 host->dma_done = 1; 547 host->dma_done = 1;
548 spin_unlock_irqrestore(&host->dma_lock, flags); 548 spin_unlock_irqrestore(&host->dma_lock, flags);
549 if (done) 549 if (done)
550 mmc_omap_xfer_done(host, data); 550 mmc_omap_xfer_done(host, data);
551 } 551 }
552 552
553 static void 553 static void
554 mmc_omap_cmd_done(struct mmc_omap_host *host, struct mmc_command *cmd) 554 mmc_omap_cmd_done(struct mmc_omap_host *host, struct mmc_command *cmd)
555 { 555 {
556 host->cmd = NULL; 556 host->cmd = NULL;
557 557
558 del_timer(&host->cmd_abort_timer); 558 del_timer(&host->cmd_abort_timer);
559 559
560 if (cmd->flags & MMC_RSP_PRESENT) { 560 if (cmd->flags & MMC_RSP_PRESENT) {
561 if (cmd->flags & MMC_RSP_136) { 561 if (cmd->flags & MMC_RSP_136) {
562 /* response type 2 */ 562 /* response type 2 */
563 cmd->resp[3] = 563 cmd->resp[3] =
564 OMAP_MMC_READ(host, RSP0) | 564 OMAP_MMC_READ(host, RSP0) |
565 (OMAP_MMC_READ(host, RSP1) << 16); 565 (OMAP_MMC_READ(host, RSP1) << 16);
566 cmd->resp[2] = 566 cmd->resp[2] =
567 OMAP_MMC_READ(host, RSP2) | 567 OMAP_MMC_READ(host, RSP2) |
568 (OMAP_MMC_READ(host, RSP3) << 16); 568 (OMAP_MMC_READ(host, RSP3) << 16);
569 cmd->resp[1] = 569 cmd->resp[1] =
570 OMAP_MMC_READ(host, RSP4) | 570 OMAP_MMC_READ(host, RSP4) |
571 (OMAP_MMC_READ(host, RSP5) << 16); 571 (OMAP_MMC_READ(host, RSP5) << 16);
572 cmd->resp[0] = 572 cmd->resp[0] =
573 OMAP_MMC_READ(host, RSP6) | 573 OMAP_MMC_READ(host, RSP6) |
574 (OMAP_MMC_READ(host, RSP7) << 16); 574 (OMAP_MMC_READ(host, RSP7) << 16);
575 } else { 575 } else {
576 /* response types 1, 1b, 3, 4, 5, 6 */ 576 /* response types 1, 1b, 3, 4, 5, 6 */
577 cmd->resp[0] = 577 cmd->resp[0] =
578 OMAP_MMC_READ(host, RSP6) | 578 OMAP_MMC_READ(host, RSP6) |
579 (OMAP_MMC_READ(host, RSP7) << 16); 579 (OMAP_MMC_READ(host, RSP7) << 16);
580 } 580 }
581 } 581 }
582 582
583 if (host->data == NULL || cmd->error) { 583 if (host->data == NULL || cmd->error) {
584 struct mmc_host *mmc; 584 struct mmc_host *mmc;
585 585
586 if (host->data != NULL) 586 if (host->data != NULL)
587 mmc_omap_abort_xfer(host, host->data); 587 mmc_omap_abort_xfer(host, host->data);
588 host->mrq = NULL; 588 host->mrq = NULL;
589 mmc = host->mmc; 589 mmc = host->mmc;
590 mmc_omap_release_slot(host->current_slot, 1); 590 mmc_omap_release_slot(host->current_slot, 1);
591 mmc_request_done(mmc, cmd->mrq); 591 mmc_request_done(mmc, cmd->mrq);
592 } 592 }
593 } 593 }
594 594
595 /* 595 /*
596 * Abort stuck command. Can occur when card is removed while it is being 596 * Abort stuck command. Can occur when card is removed while it is being
597 * read. 597 * read.
598 */ 598 */
599 static void mmc_omap_abort_command(struct work_struct *work) 599 static void mmc_omap_abort_command(struct work_struct *work)
600 { 600 {
601 struct mmc_omap_host *host = container_of(work, struct mmc_omap_host, 601 struct mmc_omap_host *host = container_of(work, struct mmc_omap_host,
602 cmd_abort_work); 602 cmd_abort_work);
603 BUG_ON(!host->cmd); 603 BUG_ON(!host->cmd);
604 604
605 dev_dbg(mmc_dev(host->mmc), "Aborting stuck command CMD%d\n", 605 dev_dbg(mmc_dev(host->mmc), "Aborting stuck command CMD%d\n",
606 host->cmd->opcode); 606 host->cmd->opcode);
607 607
608 if (host->cmd->error == 0) 608 if (host->cmd->error == 0)
609 host->cmd->error = -ETIMEDOUT; 609 host->cmd->error = -ETIMEDOUT;
610 610
611 if (host->data == NULL) { 611 if (host->data == NULL) {
612 struct mmc_command *cmd; 612 struct mmc_command *cmd;
613 struct mmc_host *mmc; 613 struct mmc_host *mmc;
614 614
615 cmd = host->cmd; 615 cmd = host->cmd;
616 host->cmd = NULL; 616 host->cmd = NULL;
617 mmc_omap_send_abort(host, 10000); 617 mmc_omap_send_abort(host, 10000);
618 618
619 host->mrq = NULL; 619 host->mrq = NULL;
620 mmc = host->mmc; 620 mmc = host->mmc;
621 mmc_omap_release_slot(host->current_slot, 1); 621 mmc_omap_release_slot(host->current_slot, 1);
622 mmc_request_done(mmc, cmd->mrq); 622 mmc_request_done(mmc, cmd->mrq);
623 } else 623 } else
624 mmc_omap_cmd_done(host, host->cmd); 624 mmc_omap_cmd_done(host, host->cmd);
625 625
626 host->abort = 0; 626 host->abort = 0;
627 enable_irq(host->irq); 627 enable_irq(host->irq);
628 } 628 }
629 629
630 static void 630 static void
631 mmc_omap_cmd_timer(unsigned long data) 631 mmc_omap_cmd_timer(unsigned long data)
632 { 632 {
633 struct mmc_omap_host *host = (struct mmc_omap_host *) data; 633 struct mmc_omap_host *host = (struct mmc_omap_host *) data;
634 unsigned long flags; 634 unsigned long flags;
635 635
636 spin_lock_irqsave(&host->slot_lock, flags); 636 spin_lock_irqsave(&host->slot_lock, flags);
637 if (host->cmd != NULL && !host->abort) { 637 if (host->cmd != NULL && !host->abort) {
638 OMAP_MMC_WRITE(host, IE, 0); 638 OMAP_MMC_WRITE(host, IE, 0);
639 disable_irq(host->irq); 639 disable_irq(host->irq);
640 host->abort = 1; 640 host->abort = 1;
641 queue_work(host->mmc_omap_wq, &host->cmd_abort_work); 641 queue_work(host->mmc_omap_wq, &host->cmd_abort_work);
642 } 642 }
643 spin_unlock_irqrestore(&host->slot_lock, flags); 643 spin_unlock_irqrestore(&host->slot_lock, flags);
644 } 644 }
645 645
646 /* PIO only */ 646 /* PIO only */
647 static void 647 static void
648 mmc_omap_sg_to_buf(struct mmc_omap_host *host) 648 mmc_omap_sg_to_buf(struct mmc_omap_host *host)
649 { 649 {
650 struct scatterlist *sg; 650 struct scatterlist *sg;
651 651
652 sg = host->data->sg + host->sg_idx; 652 sg = host->data->sg + host->sg_idx;
653 host->buffer_bytes_left = sg->length; 653 host->buffer_bytes_left = sg->length;
654 host->buffer = sg_virt(sg); 654 host->buffer = sg_virt(sg);
655 if (host->buffer_bytes_left > host->total_bytes_left) 655 if (host->buffer_bytes_left > host->total_bytes_left)
656 host->buffer_bytes_left = host->total_bytes_left; 656 host->buffer_bytes_left = host->total_bytes_left;
657 } 657 }
658 658
659 static void 659 static void
660 mmc_omap_clk_timer(unsigned long data) 660 mmc_omap_clk_timer(unsigned long data)
661 { 661 {
662 struct mmc_omap_host *host = (struct mmc_omap_host *) data; 662 struct mmc_omap_host *host = (struct mmc_omap_host *) data;
663 663
664 mmc_omap_fclk_enable(host, 0); 664 mmc_omap_fclk_enable(host, 0);
665 } 665 }
666 666
667 /* PIO only */ 667 /* PIO only */
668 static void 668 static void
669 mmc_omap_xfer_data(struct mmc_omap_host *host, int write) 669 mmc_omap_xfer_data(struct mmc_omap_host *host, int write)
670 { 670 {
671 int n; 671 int n, nwords;
672 672
673 if (host->buffer_bytes_left == 0) { 673 if (host->buffer_bytes_left == 0) {
674 host->sg_idx++; 674 host->sg_idx++;
675 BUG_ON(host->sg_idx == host->sg_len); 675 BUG_ON(host->sg_idx == host->sg_len);
676 mmc_omap_sg_to_buf(host); 676 mmc_omap_sg_to_buf(host);
677 } 677 }
678 n = 64; 678 n = 64;
679 if (n > host->buffer_bytes_left) 679 if (n > host->buffer_bytes_left)
680 n = host->buffer_bytes_left; 680 n = host->buffer_bytes_left;
681
682 nwords = n / 2;
683 nwords += n & 1; /* handle odd number of bytes to transfer */
684
681 host->buffer_bytes_left -= n; 685 host->buffer_bytes_left -= n;
682 host->total_bytes_left -= n; 686 host->total_bytes_left -= n;
683 host->data->bytes_xfered += n; 687 host->data->bytes_xfered += n;
684 688
685 if (write) { 689 if (write) {
686 __raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n); 690 __raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA),
691 host->buffer, nwords);
687 } else { 692 } else {
688 __raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n); 693 __raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA),
694 host->buffer, nwords);
689 } 695 }
696
697 host->buffer += nwords;
690 } 698 }
691 699
692 static inline void mmc_omap_report_irq(u16 status) 700 static inline void mmc_omap_report_irq(u16 status)
693 { 701 {
694 static const char *mmc_omap_status_bits[] = { 702 static const char *mmc_omap_status_bits[] = {
695 "EOC", "CD", "CB", "BRS", "EOFB", "DTO", "DCRC", "CTO", 703 "EOC", "CD", "CB", "BRS", "EOFB", "DTO", "DCRC", "CTO",
696 "CCRC", "CRW", "AF", "AE", "OCRB", "CIRQ", "CERR" 704 "CCRC", "CRW", "AF", "AE", "OCRB", "CIRQ", "CERR"
697 }; 705 };
698 int i, c = 0; 706 int i, c = 0;
699 707
700 for (i = 0; i < ARRAY_SIZE(mmc_omap_status_bits); i++) 708 for (i = 0; i < ARRAY_SIZE(mmc_omap_status_bits); i++)
701 if (status & (1 << i)) { 709 if (status & (1 << i)) {
702 if (c) 710 if (c)
703 printk(" "); 711 printk(" ");
704 printk("%s", mmc_omap_status_bits[i]); 712 printk("%s", mmc_omap_status_bits[i]);
705 c++; 713 c++;
706 } 714 }
707 } 715 }
708 716
709 static irqreturn_t mmc_omap_irq(int irq, void *dev_id) 717 static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
710 { 718 {
711 struct mmc_omap_host * host = (struct mmc_omap_host *)dev_id; 719 struct mmc_omap_host * host = (struct mmc_omap_host *)dev_id;
712 u16 status; 720 u16 status;
713 int end_command; 721 int end_command;
714 int end_transfer; 722 int end_transfer;
715 int transfer_error, cmd_error; 723 int transfer_error, cmd_error;
716 724
717 if (host->cmd == NULL && host->data == NULL) { 725 if (host->cmd == NULL && host->data == NULL) {
718 status = OMAP_MMC_READ(host, STAT); 726 status = OMAP_MMC_READ(host, STAT);
719 dev_info(mmc_dev(host->slots[0]->mmc), 727 dev_info(mmc_dev(host->slots[0]->mmc),
720 "Spurious IRQ 0x%04x\n", status); 728 "Spurious IRQ 0x%04x\n", status);
721 if (status != 0) { 729 if (status != 0) {
722 OMAP_MMC_WRITE(host, STAT, status); 730 OMAP_MMC_WRITE(host, STAT, status);
723 OMAP_MMC_WRITE(host, IE, 0); 731 OMAP_MMC_WRITE(host, IE, 0);
724 } 732 }
725 return IRQ_HANDLED; 733 return IRQ_HANDLED;
726 } 734 }
727 735
728 end_command = 0; 736 end_command = 0;
729 end_transfer = 0; 737 end_transfer = 0;
730 transfer_error = 0; 738 transfer_error = 0;
731 cmd_error = 0; 739 cmd_error = 0;
732 740
733 while ((status = OMAP_MMC_READ(host, STAT)) != 0) { 741 while ((status = OMAP_MMC_READ(host, STAT)) != 0) {
734 int cmd; 742 int cmd;
735 743
736 OMAP_MMC_WRITE(host, STAT, status); 744 OMAP_MMC_WRITE(host, STAT, status);
737 if (host->cmd != NULL) 745 if (host->cmd != NULL)
738 cmd = host->cmd->opcode; 746 cmd = host->cmd->opcode;
739 else 747 else
740 cmd = -1; 748 cmd = -1;
741 #ifdef CONFIG_MMC_DEBUG 749 #ifdef CONFIG_MMC_DEBUG
742 dev_dbg(mmc_dev(host->mmc), "MMC IRQ %04x (CMD %d): ", 750 dev_dbg(mmc_dev(host->mmc), "MMC IRQ %04x (CMD %d): ",
743 status, cmd); 751 status, cmd);
744 mmc_omap_report_irq(status); 752 mmc_omap_report_irq(status);
745 printk("\n"); 753 printk("\n");
746 #endif 754 #endif
747 if (host->total_bytes_left) { 755 if (host->total_bytes_left) {
748 if ((status & OMAP_MMC_STAT_A_FULL) || 756 if ((status & OMAP_MMC_STAT_A_FULL) ||
749 (status & OMAP_MMC_STAT_END_OF_DATA)) 757 (status & OMAP_MMC_STAT_END_OF_DATA))
750 mmc_omap_xfer_data(host, 0); 758 mmc_omap_xfer_data(host, 0);
751 if (status & OMAP_MMC_STAT_A_EMPTY) 759 if (status & OMAP_MMC_STAT_A_EMPTY)
752 mmc_omap_xfer_data(host, 1); 760 mmc_omap_xfer_data(host, 1);
753 } 761 }
754 762
755 if (status & OMAP_MMC_STAT_END_OF_DATA) 763 if (status & OMAP_MMC_STAT_END_OF_DATA)
756 end_transfer = 1; 764 end_transfer = 1;
757 765
758 if (status & OMAP_MMC_STAT_DATA_TOUT) { 766 if (status & OMAP_MMC_STAT_DATA_TOUT) {
759 dev_dbg(mmc_dev(host->mmc), "data timeout (CMD%d)\n", 767 dev_dbg(mmc_dev(host->mmc), "data timeout (CMD%d)\n",
760 cmd); 768 cmd);
761 if (host->data) { 769 if (host->data) {
762 host->data->error = -ETIMEDOUT; 770 host->data->error = -ETIMEDOUT;
763 transfer_error = 1; 771 transfer_error = 1;
764 } 772 }
765 } 773 }
766 774
767 if (status & OMAP_MMC_STAT_DATA_CRC) { 775 if (status & OMAP_MMC_STAT_DATA_CRC) {
768 if (host->data) { 776 if (host->data) {
769 host->data->error = -EILSEQ; 777 host->data->error = -EILSEQ;
770 dev_dbg(mmc_dev(host->mmc), 778 dev_dbg(mmc_dev(host->mmc),
771 "data CRC error, bytes left %d\n", 779 "data CRC error, bytes left %d\n",
772 host->total_bytes_left); 780 host->total_bytes_left);
773 transfer_error = 1; 781 transfer_error = 1;
774 } else { 782 } else {
775 dev_dbg(mmc_dev(host->mmc), "data CRC error\n"); 783 dev_dbg(mmc_dev(host->mmc), "data CRC error\n");
776 } 784 }
777 } 785 }
778 786
779 if (status & OMAP_MMC_STAT_CMD_TOUT) { 787 if (status & OMAP_MMC_STAT_CMD_TOUT) {
780 /* Timeouts are routine with some commands */ 788 /* Timeouts are routine with some commands */
781 if (host->cmd) { 789 if (host->cmd) {
782 struct mmc_omap_slot *slot = 790 struct mmc_omap_slot *slot =
783 host->current_slot; 791 host->current_slot;
784 if (slot == NULL || 792 if (slot == NULL ||
785 !mmc_omap_cover_is_open(slot)) 793 !mmc_omap_cover_is_open(slot))
786 dev_err(mmc_dev(host->mmc), 794 dev_err(mmc_dev(host->mmc),
787 "command timeout (CMD%d)\n", 795 "command timeout (CMD%d)\n",
788 cmd); 796 cmd);
789 host->cmd->error = -ETIMEDOUT; 797 host->cmd->error = -ETIMEDOUT;
790 end_command = 1; 798 end_command = 1;
791 cmd_error = 1; 799 cmd_error = 1;
792 } 800 }
793 } 801 }
794 802
795 if (status & OMAP_MMC_STAT_CMD_CRC) { 803 if (status & OMAP_MMC_STAT_CMD_CRC) {
796 if (host->cmd) { 804 if (host->cmd) {
797 dev_err(mmc_dev(host->mmc), 805 dev_err(mmc_dev(host->mmc),
798 "command CRC error (CMD%d, arg 0x%08x)\n", 806 "command CRC error (CMD%d, arg 0x%08x)\n",
799 cmd, host->cmd->arg); 807 cmd, host->cmd->arg);
800 host->cmd->error = -EILSEQ; 808 host->cmd->error = -EILSEQ;
801 end_command = 1; 809 end_command = 1;
802 cmd_error = 1; 810 cmd_error = 1;
803 } else 811 } else
804 dev_err(mmc_dev(host->mmc), 812 dev_err(mmc_dev(host->mmc),
805 "command CRC error without cmd?\n"); 813 "command CRC error without cmd?\n");
806 } 814 }
807 815
808 if (status & OMAP_MMC_STAT_CARD_ERR) { 816 if (status & OMAP_MMC_STAT_CARD_ERR) {
809 dev_dbg(mmc_dev(host->mmc), 817 dev_dbg(mmc_dev(host->mmc),
810 "ignoring card status error (CMD%d)\n", 818 "ignoring card status error (CMD%d)\n",
811 cmd); 819 cmd);
812 end_command = 1; 820 end_command = 1;
813 } 821 }
814 822
815 /* 823 /*
816 * NOTE: On 1610 the END_OF_CMD may come too early when 824 * NOTE: On 1610 the END_OF_CMD may come too early when
817 * starting a write 825 * starting a write
818 */ 826 */
819 if ((status & OMAP_MMC_STAT_END_OF_CMD) && 827 if ((status & OMAP_MMC_STAT_END_OF_CMD) &&
820 (!(status & OMAP_MMC_STAT_A_EMPTY))) { 828 (!(status & OMAP_MMC_STAT_A_EMPTY))) {
821 end_command = 1; 829 end_command = 1;
822 } 830 }
823 } 831 }
824 832
825 if (cmd_error && host->data) { 833 if (cmd_error && host->data) {
826 del_timer(&host->cmd_abort_timer); 834 del_timer(&host->cmd_abort_timer);
827 host->abort = 1; 835 host->abort = 1;
828 OMAP_MMC_WRITE(host, IE, 0); 836 OMAP_MMC_WRITE(host, IE, 0);
829 disable_irq_nosync(host->irq); 837 disable_irq_nosync(host->irq);
830 queue_work(host->mmc_omap_wq, &host->cmd_abort_work); 838 queue_work(host->mmc_omap_wq, &host->cmd_abort_work);
831 return IRQ_HANDLED; 839 return IRQ_HANDLED;
832 } 840 }
833 841
834 if (end_command && host->cmd) 842 if (end_command && host->cmd)
835 mmc_omap_cmd_done(host, host->cmd); 843 mmc_omap_cmd_done(host, host->cmd);
836 if (host->data != NULL) { 844 if (host->data != NULL) {
837 if (transfer_error) 845 if (transfer_error)
838 mmc_omap_xfer_done(host, host->data); 846 mmc_omap_xfer_done(host, host->data);
839 else if (end_transfer) 847 else if (end_transfer)
840 mmc_omap_end_of_data(host, host->data); 848 mmc_omap_end_of_data(host, host->data);
841 } 849 }
842 850
843 return IRQ_HANDLED; 851 return IRQ_HANDLED;
844 } 852 }
845 853
846 void omap_mmc_notify_cover_event(struct device *dev, int num, int is_closed) 854 void omap_mmc_notify_cover_event(struct device *dev, int num, int is_closed)
847 { 855 {
848 int cover_open; 856 int cover_open;
849 struct mmc_omap_host *host = dev_get_drvdata(dev); 857 struct mmc_omap_host *host = dev_get_drvdata(dev);
850 struct mmc_omap_slot *slot = host->slots[num]; 858 struct mmc_omap_slot *slot = host->slots[num];
851 859
852 BUG_ON(num >= host->nr_slots); 860 BUG_ON(num >= host->nr_slots);
853 861
854 /* Other subsystems can call in here before we're initialised. */ 862 /* Other subsystems can call in here before we're initialised. */
855 if (host->nr_slots == 0 || !host->slots[num]) 863 if (host->nr_slots == 0 || !host->slots[num])
856 return; 864 return;
857 865
858 cover_open = mmc_omap_cover_is_open(slot); 866 cover_open = mmc_omap_cover_is_open(slot);
859 if (cover_open != slot->cover_open) { 867 if (cover_open != slot->cover_open) {
860 slot->cover_open = cover_open; 868 slot->cover_open = cover_open;
861 sysfs_notify(&slot->mmc->class_dev.kobj, NULL, "cover_switch"); 869 sysfs_notify(&slot->mmc->class_dev.kobj, NULL, "cover_switch");
862 } 870 }
863 871
864 tasklet_hi_schedule(&slot->cover_tasklet); 872 tasklet_hi_schedule(&slot->cover_tasklet);
865 } 873 }
866 874
867 static void mmc_omap_cover_timer(unsigned long arg) 875 static void mmc_omap_cover_timer(unsigned long arg)
868 { 876 {
869 struct mmc_omap_slot *slot = (struct mmc_omap_slot *) arg; 877 struct mmc_omap_slot *slot = (struct mmc_omap_slot *) arg;
870 tasklet_schedule(&slot->cover_tasklet); 878 tasklet_schedule(&slot->cover_tasklet);
871 } 879 }
872 880
873 static void mmc_omap_cover_handler(unsigned long param) 881 static void mmc_omap_cover_handler(unsigned long param)
874 { 882 {
875 struct mmc_omap_slot *slot = (struct mmc_omap_slot *)param; 883 struct mmc_omap_slot *slot = (struct mmc_omap_slot *)param;
876 int cover_open = mmc_omap_cover_is_open(slot); 884 int cover_open = mmc_omap_cover_is_open(slot);
877 885
878 mmc_detect_change(slot->mmc, 0); 886 mmc_detect_change(slot->mmc, 0);
879 if (!cover_open) 887 if (!cover_open)
880 return; 888 return;
881 889
882 /* 890 /*
883 * If no card is inserted, we postpone polling until 891 * If no card is inserted, we postpone polling until
884 * the cover has been closed. 892 * the cover has been closed.
885 */ 893 */
886 if (slot->mmc->card == NULL || !mmc_card_present(slot->mmc->card)) 894 if (slot->mmc->card == NULL || !mmc_card_present(slot->mmc->card))
887 return; 895 return;
888 896
889 mod_timer(&slot->cover_timer, 897 mod_timer(&slot->cover_timer,
890 jiffies + msecs_to_jiffies(OMAP_MMC_COVER_POLL_DELAY)); 898 jiffies + msecs_to_jiffies(OMAP_MMC_COVER_POLL_DELAY));
891 } 899 }
892 900
893 static void mmc_omap_dma_callback(void *priv) 901 static void mmc_omap_dma_callback(void *priv)
894 { 902 {
895 struct mmc_omap_host *host = priv; 903 struct mmc_omap_host *host = priv;
896 struct mmc_data *data = host->data; 904 struct mmc_data *data = host->data;
897 905
898 /* If we got to the end of DMA, assume everything went well */ 906 /* If we got to the end of DMA, assume everything went well */
899 data->bytes_xfered += data->blocks * data->blksz; 907 data->bytes_xfered += data->blocks * data->blksz;
900 908
901 mmc_omap_dma_done(host, data); 909 mmc_omap_dma_done(host, data);
902 } 910 }
903 911
904 static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_request *req) 912 static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_request *req)
905 { 913 {
906 u16 reg; 914 u16 reg;
907 915
908 reg = OMAP_MMC_READ(host, SDIO); 916 reg = OMAP_MMC_READ(host, SDIO);
909 reg &= ~(1 << 5); 917 reg &= ~(1 << 5);
910 OMAP_MMC_WRITE(host, SDIO, reg); 918 OMAP_MMC_WRITE(host, SDIO, reg);
911 /* Set maximum timeout */ 919 /* Set maximum timeout */
912 OMAP_MMC_WRITE(host, CTO, 0xff); 920 OMAP_MMC_WRITE(host, CTO, 0xff);
913 } 921 }
914 922
915 static inline void set_data_timeout(struct mmc_omap_host *host, struct mmc_request *req) 923 static inline void set_data_timeout(struct mmc_omap_host *host, struct mmc_request *req)
916 { 924 {
917 unsigned int timeout, cycle_ns; 925 unsigned int timeout, cycle_ns;
918 u16 reg; 926 u16 reg;
919 927
920 cycle_ns = 1000000000 / host->current_slot->fclk_freq; 928 cycle_ns = 1000000000 / host->current_slot->fclk_freq;
921 timeout = req->data->timeout_ns / cycle_ns; 929 timeout = req->data->timeout_ns / cycle_ns;
922 timeout += req->data->timeout_clks; 930 timeout += req->data->timeout_clks;
923 931
924 /* Check if we need to use timeout multiplier register */ 932 /* Check if we need to use timeout multiplier register */
925 reg = OMAP_MMC_READ(host, SDIO); 933 reg = OMAP_MMC_READ(host, SDIO);
926 if (timeout > 0xffff) { 934 if (timeout > 0xffff) {
927 reg |= (1 << 5); 935 reg |= (1 << 5);
928 timeout /= 1024; 936 timeout /= 1024;
929 } else 937 } else
930 reg &= ~(1 << 5); 938 reg &= ~(1 << 5);
931 OMAP_MMC_WRITE(host, SDIO, reg); 939 OMAP_MMC_WRITE(host, SDIO, reg);
932 OMAP_MMC_WRITE(host, DTO, timeout); 940 OMAP_MMC_WRITE(host, DTO, timeout);
933 } 941 }
934 942
935 static void 943 static void
936 mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req) 944 mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
937 { 945 {
938 struct mmc_data *data = req->data; 946 struct mmc_data *data = req->data;
939 int i, use_dma, block_size; 947 int i, use_dma, block_size;
940 unsigned sg_len; 948 unsigned sg_len;
941 949
942 host->data = data; 950 host->data = data;
943 if (data == NULL) { 951 if (data == NULL) {
944 OMAP_MMC_WRITE(host, BLEN, 0); 952 OMAP_MMC_WRITE(host, BLEN, 0);
945 OMAP_MMC_WRITE(host, NBLK, 0); 953 OMAP_MMC_WRITE(host, NBLK, 0);
946 OMAP_MMC_WRITE(host, BUF, 0); 954 OMAP_MMC_WRITE(host, BUF, 0);
947 host->dma_in_use = 0; 955 host->dma_in_use = 0;
948 set_cmd_timeout(host, req); 956 set_cmd_timeout(host, req);
949 return; 957 return;
950 } 958 }
951 959
952 block_size = data->blksz; 960 block_size = data->blksz;
953 961
954 OMAP_MMC_WRITE(host, NBLK, data->blocks - 1); 962 OMAP_MMC_WRITE(host, NBLK, data->blocks - 1);
955 OMAP_MMC_WRITE(host, BLEN, block_size - 1); 963 OMAP_MMC_WRITE(host, BLEN, block_size - 1);
956 set_data_timeout(host, req); 964 set_data_timeout(host, req);
957 965
958 /* cope with calling layer confusion; it issues "single 966 /* cope with calling layer confusion; it issues "single
959 * block" writes using multi-block scatterlists. 967 * block" writes using multi-block scatterlists.
960 */ 968 */
961 sg_len = (data->blocks == 1) ? 1 : data->sg_len; 969 sg_len = (data->blocks == 1) ? 1 : data->sg_len;
962 970
963 /* Only do DMA for entire blocks */ 971 /* Only do DMA for entire blocks */
964 use_dma = host->use_dma; 972 use_dma = host->use_dma;
965 if (use_dma) { 973 if (use_dma) {
966 for (i = 0; i < sg_len; i++) { 974 for (i = 0; i < sg_len; i++) {
967 if ((data->sg[i].length % block_size) != 0) { 975 if ((data->sg[i].length % block_size) != 0) {
968 use_dma = 0; 976 use_dma = 0;
969 break; 977 break;
970 } 978 }
971 } 979 }
972 } 980 }
973 981
974 host->sg_idx = 0; 982 host->sg_idx = 0;
975 if (use_dma) { 983 if (use_dma) {
976 enum dma_data_direction dma_data_dir; 984 enum dma_data_direction dma_data_dir;
977 struct dma_async_tx_descriptor *tx; 985 struct dma_async_tx_descriptor *tx;
978 struct dma_chan *c; 986 struct dma_chan *c;
979 u32 burst, *bp; 987 u32 burst, *bp;
980 u16 buf; 988 u16 buf;
981 989
982 /* 990 /*
983 * FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx 991 * FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx
984 * and 24xx. Use 16 or 32 word frames when the 992 * and 24xx. Use 16 or 32 word frames when the
985 * blocksize is at least that large. Blocksize is 993 * blocksize is at least that large. Blocksize is
986 * usually 512 bytes; but not for some SD reads. 994 * usually 512 bytes; but not for some SD reads.
987 */ 995 */
988 burst = cpu_is_omap15xx() ? 32 : 64; 996 burst = cpu_is_omap15xx() ? 32 : 64;
989 if (burst > data->blksz) 997 if (burst > data->blksz)
990 burst = data->blksz; 998 burst = data->blksz;
991 999
992 burst >>= 1; 1000 burst >>= 1;
993 1001
994 if (data->flags & MMC_DATA_WRITE) { 1002 if (data->flags & MMC_DATA_WRITE) {
995 c = host->dma_tx; 1003 c = host->dma_tx;
996 bp = &host->dma_tx_burst; 1004 bp = &host->dma_tx_burst;
997 buf = 0x0f80 | (burst - 1) << 0; 1005 buf = 0x0f80 | (burst - 1) << 0;
998 dma_data_dir = DMA_TO_DEVICE; 1006 dma_data_dir = DMA_TO_DEVICE;
999 } else { 1007 } else {
1000 c = host->dma_rx; 1008 c = host->dma_rx;
1001 bp = &host->dma_rx_burst; 1009 bp = &host->dma_rx_burst;
1002 buf = 0x800f | (burst - 1) << 8; 1010 buf = 0x800f | (burst - 1) << 8;
1003 dma_data_dir = DMA_FROM_DEVICE; 1011 dma_data_dir = DMA_FROM_DEVICE;
1004 } 1012 }
1005 1013
1006 if (!c) 1014 if (!c)
1007 goto use_pio; 1015 goto use_pio;
1008 1016
1009 /* Only reconfigure if we have a different burst size */ 1017 /* Only reconfigure if we have a different burst size */
1010 if (*bp != burst) { 1018 if (*bp != burst) {
1011 struct dma_slave_config cfg; 1019 struct dma_slave_config cfg;
1012 1020
1013 cfg.src_addr = host->phys_base + OMAP_MMC_REG(host, DATA); 1021 cfg.src_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
1014 cfg.dst_addr = host->phys_base + OMAP_MMC_REG(host, DATA); 1022 cfg.dst_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
1015 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 1023 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
1016 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 1024 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
1017 cfg.src_maxburst = burst; 1025 cfg.src_maxburst = burst;
1018 cfg.dst_maxburst = burst; 1026 cfg.dst_maxburst = burst;
1019 1027
1020 if (dmaengine_slave_config(c, &cfg)) 1028 if (dmaengine_slave_config(c, &cfg))
1021 goto use_pio; 1029 goto use_pio;
1022 1030
1023 *bp = burst; 1031 *bp = burst;
1024 } 1032 }
1025 1033
1026 host->sg_len = dma_map_sg(c->device->dev, data->sg, sg_len, 1034 host->sg_len = dma_map_sg(c->device->dev, data->sg, sg_len,
1027 dma_data_dir); 1035 dma_data_dir);
1028 if (host->sg_len == 0) 1036 if (host->sg_len == 0)
1029 goto use_pio; 1037 goto use_pio;
1030 1038
1031 tx = dmaengine_prep_slave_sg(c, data->sg, host->sg_len, 1039 tx = dmaengine_prep_slave_sg(c, data->sg, host->sg_len,
1032 data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, 1040 data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
1033 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1041 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1034 if (!tx) 1042 if (!tx)
1035 goto use_pio; 1043 goto use_pio;
1036 1044
1037 OMAP_MMC_WRITE(host, BUF, buf); 1045 OMAP_MMC_WRITE(host, BUF, buf);
1038 1046
1039 tx->callback = mmc_omap_dma_callback; 1047 tx->callback = mmc_omap_dma_callback;
1040 tx->callback_param = host; 1048 tx->callback_param = host;
1041 dmaengine_submit(tx); 1049 dmaengine_submit(tx);
1042 host->brs_received = 0; 1050 host->brs_received = 0;
1043 host->dma_done = 0; 1051 host->dma_done = 0;
1044 host->dma_in_use = 1; 1052 host->dma_in_use = 1;
1045 return; 1053 return;
1046 } 1054 }
1047 use_pio: 1055 use_pio:
1048 1056
1049 /* Revert to PIO? */ 1057 /* Revert to PIO? */
1050 OMAP_MMC_WRITE(host, BUF, 0x1f1f); 1058 OMAP_MMC_WRITE(host, BUF, 0x1f1f);
1051 host->total_bytes_left = data->blocks * block_size; 1059 host->total_bytes_left = data->blocks * block_size;
1052 host->sg_len = sg_len; 1060 host->sg_len = sg_len;
1053 mmc_omap_sg_to_buf(host); 1061 mmc_omap_sg_to_buf(host);
1054 host->dma_in_use = 0; 1062 host->dma_in_use = 0;
1055 } 1063 }
1056 1064
1057 static void mmc_omap_start_request(struct mmc_omap_host *host, 1065 static void mmc_omap_start_request(struct mmc_omap_host *host,
1058 struct mmc_request *req) 1066 struct mmc_request *req)
1059 { 1067 {
1060 BUG_ON(host->mrq != NULL); 1068 BUG_ON(host->mrq != NULL);
1061 1069
1062 host->mrq = req; 1070 host->mrq = req;
1063 1071
1064 /* only touch fifo AFTER the controller readies it */ 1072 /* only touch fifo AFTER the controller readies it */
1065 mmc_omap_prepare_data(host, req); 1073 mmc_omap_prepare_data(host, req);
1066 mmc_omap_start_command(host, req->cmd); 1074 mmc_omap_start_command(host, req->cmd);
1067 if (host->dma_in_use) { 1075 if (host->dma_in_use) {
1068 struct dma_chan *c = host->data->flags & MMC_DATA_WRITE ? 1076 struct dma_chan *c = host->data->flags & MMC_DATA_WRITE ?
1069 host->dma_tx : host->dma_rx; 1077 host->dma_tx : host->dma_rx;
1070 1078
1071 dma_async_issue_pending(c); 1079 dma_async_issue_pending(c);
1072 } 1080 }
1073 } 1081 }
1074 1082
1075 static void mmc_omap_request(struct mmc_host *mmc, struct mmc_request *req) 1083 static void mmc_omap_request(struct mmc_host *mmc, struct mmc_request *req)
1076 { 1084 {
1077 struct mmc_omap_slot *slot = mmc_priv(mmc); 1085 struct mmc_omap_slot *slot = mmc_priv(mmc);
1078 struct mmc_omap_host *host = slot->host; 1086 struct mmc_omap_host *host = slot->host;
1079 unsigned long flags; 1087 unsigned long flags;
1080 1088
1081 spin_lock_irqsave(&host->slot_lock, flags); 1089 spin_lock_irqsave(&host->slot_lock, flags);
1082 if (host->mmc != NULL) { 1090 if (host->mmc != NULL) {
1083 BUG_ON(slot->mrq != NULL); 1091 BUG_ON(slot->mrq != NULL);
1084 slot->mrq = req; 1092 slot->mrq = req;
1085 spin_unlock_irqrestore(&host->slot_lock, flags); 1093 spin_unlock_irqrestore(&host->slot_lock, flags);
1086 return; 1094 return;
1087 } else 1095 } else
1088 host->mmc = mmc; 1096 host->mmc = mmc;
1089 spin_unlock_irqrestore(&host->slot_lock, flags); 1097 spin_unlock_irqrestore(&host->slot_lock, flags);
1090 mmc_omap_select_slot(slot, 1); 1098 mmc_omap_select_slot(slot, 1);
1091 mmc_omap_start_request(host, req); 1099 mmc_omap_start_request(host, req);
1092 } 1100 }
1093 1101
1094 static void mmc_omap_set_power(struct mmc_omap_slot *slot, int power_on, 1102 static void mmc_omap_set_power(struct mmc_omap_slot *slot, int power_on,
1095 int vdd) 1103 int vdd)
1096 { 1104 {
1097 struct mmc_omap_host *host; 1105 struct mmc_omap_host *host;
1098 1106
1099 host = slot->host; 1107 host = slot->host;
1100 1108
1101 if (slot->pdata->set_power != NULL) 1109 if (slot->pdata->set_power != NULL)
1102 slot->pdata->set_power(mmc_dev(slot->mmc), slot->id, power_on, 1110 slot->pdata->set_power(mmc_dev(slot->mmc), slot->id, power_on,
1103 vdd); 1111 vdd);
1104 1112
1105 if (cpu_is_omap24xx()) { 1113 if (cpu_is_omap24xx()) {
1106 u16 w; 1114 u16 w;
1107 1115
1108 if (power_on) { 1116 if (power_on) {
1109 w = OMAP_MMC_READ(host, CON); 1117 w = OMAP_MMC_READ(host, CON);
1110 OMAP_MMC_WRITE(host, CON, w | (1 << 11)); 1118 OMAP_MMC_WRITE(host, CON, w | (1 << 11));
1111 } else { 1119 } else {
1112 w = OMAP_MMC_READ(host, CON); 1120 w = OMAP_MMC_READ(host, CON);
1113 OMAP_MMC_WRITE(host, CON, w & ~(1 << 11)); 1121 OMAP_MMC_WRITE(host, CON, w & ~(1 << 11));
1114 } 1122 }
1115 } 1123 }
1116 } 1124 }
1117 1125
1118 static int mmc_omap_calc_divisor(struct mmc_host *mmc, struct mmc_ios *ios) 1126 static int mmc_omap_calc_divisor(struct mmc_host *mmc, struct mmc_ios *ios)
1119 { 1127 {
1120 struct mmc_omap_slot *slot = mmc_priv(mmc); 1128 struct mmc_omap_slot *slot = mmc_priv(mmc);
1121 struct mmc_omap_host *host = slot->host; 1129 struct mmc_omap_host *host = slot->host;
1122 int func_clk_rate = clk_get_rate(host->fclk); 1130 int func_clk_rate = clk_get_rate(host->fclk);
1123 int dsor; 1131 int dsor;
1124 1132
1125 if (ios->clock == 0) 1133 if (ios->clock == 0)
1126 return 0; 1134 return 0;
1127 1135
1128 dsor = func_clk_rate / ios->clock; 1136 dsor = func_clk_rate / ios->clock;
1129 if (dsor < 1) 1137 if (dsor < 1)
1130 dsor = 1; 1138 dsor = 1;
1131 1139
1132 if (func_clk_rate / dsor > ios->clock) 1140 if (func_clk_rate / dsor > ios->clock)
1133 dsor++; 1141 dsor++;
1134 1142
1135 if (dsor > 250) 1143 if (dsor > 250)
1136 dsor = 250; 1144 dsor = 250;
1137 1145
1138 slot->fclk_freq = func_clk_rate / dsor; 1146 slot->fclk_freq = func_clk_rate / dsor;
1139 1147
1140 if (ios->bus_width == MMC_BUS_WIDTH_4) 1148 if (ios->bus_width == MMC_BUS_WIDTH_4)
1141 dsor |= 1 << 15; 1149 dsor |= 1 << 15;
1142 1150
1143 return dsor; 1151 return dsor;
1144 } 1152 }
1145 1153
1146 static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1154 static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1147 { 1155 {
1148 struct mmc_omap_slot *slot = mmc_priv(mmc); 1156 struct mmc_omap_slot *slot = mmc_priv(mmc);
1149 struct mmc_omap_host *host = slot->host; 1157 struct mmc_omap_host *host = slot->host;
1150 int i, dsor; 1158 int i, dsor;
1151 int clk_enabled; 1159 int clk_enabled;
1152 1160
1153 mmc_omap_select_slot(slot, 0); 1161 mmc_omap_select_slot(slot, 0);
1154 1162
1155 dsor = mmc_omap_calc_divisor(mmc, ios); 1163 dsor = mmc_omap_calc_divisor(mmc, ios);
1156 1164
1157 if (ios->vdd != slot->vdd) 1165 if (ios->vdd != slot->vdd)
1158 slot->vdd = ios->vdd; 1166 slot->vdd = ios->vdd;
1159 1167
1160 clk_enabled = 0; 1168 clk_enabled = 0;
1161 switch (ios->power_mode) { 1169 switch (ios->power_mode) {
1162 case MMC_POWER_OFF: 1170 case MMC_POWER_OFF:
1163 mmc_omap_set_power(slot, 0, ios->vdd); 1171 mmc_omap_set_power(slot, 0, ios->vdd);
1164 break; 1172 break;
1165 case MMC_POWER_UP: 1173 case MMC_POWER_UP:
1166 /* Cannot touch dsor yet, just power up MMC */ 1174 /* Cannot touch dsor yet, just power up MMC */
1167 mmc_omap_set_power(slot, 1, ios->vdd); 1175 mmc_omap_set_power(slot, 1, ios->vdd);
1168 goto exit; 1176 goto exit;
1169 case MMC_POWER_ON: 1177 case MMC_POWER_ON:
1170 mmc_omap_fclk_enable(host, 1); 1178 mmc_omap_fclk_enable(host, 1);
1171 clk_enabled = 1; 1179 clk_enabled = 1;
1172 dsor |= 1 << 11; 1180 dsor |= 1 << 11;
1173 break; 1181 break;
1174 } 1182 }
1175 1183
1176 if (slot->bus_mode != ios->bus_mode) { 1184 if (slot->bus_mode != ios->bus_mode) {
1177 if (slot->pdata->set_bus_mode != NULL) 1185 if (slot->pdata->set_bus_mode != NULL)
1178 slot->pdata->set_bus_mode(mmc_dev(mmc), slot->id, 1186 slot->pdata->set_bus_mode(mmc_dev(mmc), slot->id,
1179 ios->bus_mode); 1187 ios->bus_mode);
1180 slot->bus_mode = ios->bus_mode; 1188 slot->bus_mode = ios->bus_mode;
1181 } 1189 }
1182 1190
1183 /* On insanely high arm_per frequencies something sometimes 1191 /* On insanely high arm_per frequencies something sometimes
1184 * goes somehow out of sync, and the POW bit is not being set, 1192 * goes somehow out of sync, and the POW bit is not being set,
1185 * which results in the while loop below getting stuck. 1193 * which results in the while loop below getting stuck.
1186 * Writing to the CON register twice seems to do the trick. */ 1194 * Writing to the CON register twice seems to do the trick. */
1187 for (i = 0; i < 2; i++) 1195 for (i = 0; i < 2; i++)
1188 OMAP_MMC_WRITE(host, CON, dsor); 1196 OMAP_MMC_WRITE(host, CON, dsor);
1189 slot->saved_con = dsor; 1197 slot->saved_con = dsor;
1190 if (ios->power_mode == MMC_POWER_ON) { 1198 if (ios->power_mode == MMC_POWER_ON) {
1191 /* worst case at 400kHz, 80 cycles makes 200 microsecs */ 1199 /* worst case at 400kHz, 80 cycles makes 200 microsecs */
1192 int usecs = 250; 1200 int usecs = 250;
1193 1201
1194 /* Send clock cycles, poll completion */ 1202 /* Send clock cycles, poll completion */
1195 OMAP_MMC_WRITE(host, IE, 0); 1203 OMAP_MMC_WRITE(host, IE, 0);
1196 OMAP_MMC_WRITE(host, STAT, 0xffff); 1204 OMAP_MMC_WRITE(host, STAT, 0xffff);
1197 OMAP_MMC_WRITE(host, CMD, 1 << 7); 1205 OMAP_MMC_WRITE(host, CMD, 1 << 7);
1198 while (usecs > 0 && (OMAP_MMC_READ(host, STAT) & 1) == 0) { 1206 while (usecs > 0 && (OMAP_MMC_READ(host, STAT) & 1) == 0) {
1199 udelay(1); 1207 udelay(1);
1200 usecs--; 1208 usecs--;
1201 } 1209 }
1202 OMAP_MMC_WRITE(host, STAT, 1); 1210 OMAP_MMC_WRITE(host, STAT, 1);
1203 } 1211 }
1204 1212
1205 exit: 1213 exit:
1206 mmc_omap_release_slot(slot, clk_enabled); 1214 mmc_omap_release_slot(slot, clk_enabled);
1207 } 1215 }
1208 1216
1209 static const struct mmc_host_ops mmc_omap_ops = { 1217 static const struct mmc_host_ops mmc_omap_ops = {
1210 .request = mmc_omap_request, 1218 .request = mmc_omap_request,
1211 .set_ios = mmc_omap_set_ios, 1219 .set_ios = mmc_omap_set_ios,
1212 }; 1220 };
1213 1221
1214 static int __devinit mmc_omap_new_slot(struct mmc_omap_host *host, int id) 1222 static int __devinit mmc_omap_new_slot(struct mmc_omap_host *host, int id)
1215 { 1223 {
1216 struct mmc_omap_slot *slot = NULL; 1224 struct mmc_omap_slot *slot = NULL;
1217 struct mmc_host *mmc; 1225 struct mmc_host *mmc;
1218 int r; 1226 int r;
1219 1227
1220 mmc = mmc_alloc_host(sizeof(struct mmc_omap_slot), host->dev); 1228 mmc = mmc_alloc_host(sizeof(struct mmc_omap_slot), host->dev);
1221 if (mmc == NULL) 1229 if (mmc == NULL)
1222 return -ENOMEM; 1230 return -ENOMEM;
1223 1231
1224 slot = mmc_priv(mmc); 1232 slot = mmc_priv(mmc);
1225 slot->host = host; 1233 slot->host = host;
1226 slot->mmc = mmc; 1234 slot->mmc = mmc;
1227 slot->id = id; 1235 slot->id = id;
1228 slot->pdata = &host->pdata->slots[id]; 1236 slot->pdata = &host->pdata->slots[id];
1229 1237
1230 host->slots[id] = slot; 1238 host->slots[id] = slot;
1231 1239
1232 mmc->caps = 0; 1240 mmc->caps = 0;
1233 if (host->pdata->slots[id].wires >= 4) 1241 if (host->pdata->slots[id].wires >= 4)
1234 mmc->caps |= MMC_CAP_4_BIT_DATA; 1242 mmc->caps |= MMC_CAP_4_BIT_DATA;
1235 1243
1236 mmc->ops = &mmc_omap_ops; 1244 mmc->ops = &mmc_omap_ops;
1237 mmc->f_min = 400000; 1245 mmc->f_min = 400000;
1238 1246
1239 if (cpu_class_is_omap2()) 1247 if (cpu_class_is_omap2())
1240 mmc->f_max = 48000000; 1248 mmc->f_max = 48000000;
1241 else 1249 else
1242 mmc->f_max = 24000000; 1250 mmc->f_max = 24000000;
1243 if (host->pdata->max_freq) 1251 if (host->pdata->max_freq)
1244 mmc->f_max = min(host->pdata->max_freq, mmc->f_max); 1252 mmc->f_max = min(host->pdata->max_freq, mmc->f_max);
1245 mmc->ocr_avail = slot->pdata->ocr_mask; 1253 mmc->ocr_avail = slot->pdata->ocr_mask;
1246 1254
1247 /* Use scatterlist DMA to reduce per-transfer costs. 1255 /* Use scatterlist DMA to reduce per-transfer costs.
1248 * NOTE max_seg_size assumption that small blocks aren't 1256 * NOTE max_seg_size assumption that small blocks aren't
1249 * normally used (except e.g. for reading SD registers). 1257 * normally used (except e.g. for reading SD registers).
1250 */ 1258 */
1251 mmc->max_segs = 32; 1259 mmc->max_segs = 32;
1252 mmc->max_blk_size = 2048; /* BLEN is 11 bits (+1) */ 1260 mmc->max_blk_size = 2048; /* BLEN is 11 bits (+1) */
1253 mmc->max_blk_count = 2048; /* NBLK is 11 bits (+1) */ 1261 mmc->max_blk_count = 2048; /* NBLK is 11 bits (+1) */
1254 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 1262 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1255 mmc->max_seg_size = mmc->max_req_size; 1263 mmc->max_seg_size = mmc->max_req_size;
1256 1264
1257 r = mmc_add_host(mmc); 1265 r = mmc_add_host(mmc);
1258 if (r < 0) 1266 if (r < 0)
1259 goto err_remove_host; 1267 goto err_remove_host;
1260 1268
1261 if (slot->pdata->name != NULL) { 1269 if (slot->pdata->name != NULL) {
1262 r = device_create_file(&mmc->class_dev, 1270 r = device_create_file(&mmc->class_dev,
1263 &dev_attr_slot_name); 1271 &dev_attr_slot_name);
1264 if (r < 0) 1272 if (r < 0)
1265 goto err_remove_host; 1273 goto err_remove_host;
1266 } 1274 }
1267 1275
1268 if (slot->pdata->get_cover_state != NULL) { 1276 if (slot->pdata->get_cover_state != NULL) {
1269 r = device_create_file(&mmc->class_dev, 1277 r = device_create_file(&mmc->class_dev,
1270 &dev_attr_cover_switch); 1278 &dev_attr_cover_switch);
1271 if (r < 0) 1279 if (r < 0)
1272 goto err_remove_slot_name; 1280 goto err_remove_slot_name;
1273 1281
1274 setup_timer(&slot->cover_timer, mmc_omap_cover_timer, 1282 setup_timer(&slot->cover_timer, mmc_omap_cover_timer,
1275 (unsigned long)slot); 1283 (unsigned long)slot);
1276 tasklet_init(&slot->cover_tasklet, mmc_omap_cover_handler, 1284 tasklet_init(&slot->cover_tasklet, mmc_omap_cover_handler,
1277 (unsigned long)slot); 1285 (unsigned long)slot);
1278 tasklet_schedule(&slot->cover_tasklet); 1286 tasklet_schedule(&slot->cover_tasklet);
1279 } 1287 }
1280 1288
1281 return 0; 1289 return 0;
1282 1290
1283 err_remove_slot_name: 1291 err_remove_slot_name:
1284 if (slot->pdata->name != NULL) 1292 if (slot->pdata->name != NULL)
1285 device_remove_file(&mmc->class_dev, &dev_attr_slot_name); 1293 device_remove_file(&mmc->class_dev, &dev_attr_slot_name);
1286 err_remove_host: 1294 err_remove_host:
1287 mmc_remove_host(mmc); 1295 mmc_remove_host(mmc);
1288 mmc_free_host(mmc); 1296 mmc_free_host(mmc);
1289 return r; 1297 return r;
1290 } 1298 }
1291 1299
1292 static void mmc_omap_remove_slot(struct mmc_omap_slot *slot) 1300 static void mmc_omap_remove_slot(struct mmc_omap_slot *slot)
1293 { 1301 {
1294 struct mmc_host *mmc = slot->mmc; 1302 struct mmc_host *mmc = slot->mmc;
1295 1303
1296 if (slot->pdata->name != NULL) 1304 if (slot->pdata->name != NULL)
1297 device_remove_file(&mmc->class_dev, &dev_attr_slot_name); 1305 device_remove_file(&mmc->class_dev, &dev_attr_slot_name);
1298 if (slot->pdata->get_cover_state != NULL) 1306 if (slot->pdata->get_cover_state != NULL)
1299 device_remove_file(&mmc->class_dev, &dev_attr_cover_switch); 1307 device_remove_file(&mmc->class_dev, &dev_attr_cover_switch);
1300 1308
1301 tasklet_kill(&slot->cover_tasklet); 1309 tasklet_kill(&slot->cover_tasklet);
1302 del_timer_sync(&slot->cover_timer); 1310 del_timer_sync(&slot->cover_timer);
1303 flush_workqueue(slot->host->mmc_omap_wq); 1311 flush_workqueue(slot->host->mmc_omap_wq);
1304 1312
1305 mmc_remove_host(mmc); 1313 mmc_remove_host(mmc);
1306 mmc_free_host(mmc); 1314 mmc_free_host(mmc);
1307 } 1315 }
1308 1316
1309 static int __devinit mmc_omap_probe(struct platform_device *pdev) 1317 static int __devinit mmc_omap_probe(struct platform_device *pdev)
1310 { 1318 {
1311 struct omap_mmc_platform_data *pdata = pdev->dev.platform_data; 1319 struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
1312 struct mmc_omap_host *host = NULL; 1320 struct mmc_omap_host *host = NULL;
1313 struct resource *res; 1321 struct resource *res;
1314 dma_cap_mask_t mask; 1322 dma_cap_mask_t mask;
1315 unsigned sig; 1323 unsigned sig;
1316 int i, ret = 0; 1324 int i, ret = 0;
1317 int irq; 1325 int irq;
1318 1326
1319 if (pdata == NULL) { 1327 if (pdata == NULL) {
1320 dev_err(&pdev->dev, "platform data missing\n"); 1328 dev_err(&pdev->dev, "platform data missing\n");
1321 return -ENXIO; 1329 return -ENXIO;
1322 } 1330 }
1323 if (pdata->nr_slots == 0) { 1331 if (pdata->nr_slots == 0) {
1324 dev_err(&pdev->dev, "no slots\n"); 1332 dev_err(&pdev->dev, "no slots\n");
1325 return -ENXIO; 1333 return -ENXIO;
1326 } 1334 }
1327 1335
1328 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1336 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1329 irq = platform_get_irq(pdev, 0); 1337 irq = platform_get_irq(pdev, 0);
1330 if (res == NULL || irq < 0) 1338 if (res == NULL || irq < 0)
1331 return -ENXIO; 1339 return -ENXIO;
1332 1340
1333 res = request_mem_region(res->start, resource_size(res), 1341 res = request_mem_region(res->start, resource_size(res),
1334 pdev->name); 1342 pdev->name);
1335 if (res == NULL) 1343 if (res == NULL)
1336 return -EBUSY; 1344 return -EBUSY;
1337 1345
1338 host = kzalloc(sizeof(struct mmc_omap_host), GFP_KERNEL); 1346 host = kzalloc(sizeof(struct mmc_omap_host), GFP_KERNEL);
1339 if (host == NULL) { 1347 if (host == NULL) {
1340 ret = -ENOMEM; 1348 ret = -ENOMEM;
1341 goto err_free_mem_region; 1349 goto err_free_mem_region;
1342 } 1350 }
1343 1351
1344 INIT_WORK(&host->slot_release_work, mmc_omap_slot_release_work); 1352 INIT_WORK(&host->slot_release_work, mmc_omap_slot_release_work);
1345 INIT_WORK(&host->send_stop_work, mmc_omap_send_stop_work); 1353 INIT_WORK(&host->send_stop_work, mmc_omap_send_stop_work);
1346 1354
1347 INIT_WORK(&host->cmd_abort_work, mmc_omap_abort_command); 1355 INIT_WORK(&host->cmd_abort_work, mmc_omap_abort_command);
1348 setup_timer(&host->cmd_abort_timer, mmc_omap_cmd_timer, 1356 setup_timer(&host->cmd_abort_timer, mmc_omap_cmd_timer,
1349 (unsigned long) host); 1357 (unsigned long) host);
1350 1358
1351 spin_lock_init(&host->clk_lock); 1359 spin_lock_init(&host->clk_lock);
1352 setup_timer(&host->clk_timer, mmc_omap_clk_timer, (unsigned long) host); 1360 setup_timer(&host->clk_timer, mmc_omap_clk_timer, (unsigned long) host);
1353 1361
1354 spin_lock_init(&host->dma_lock); 1362 spin_lock_init(&host->dma_lock);
1355 spin_lock_init(&host->slot_lock); 1363 spin_lock_init(&host->slot_lock);
1356 init_waitqueue_head(&host->slot_wq); 1364 init_waitqueue_head(&host->slot_wq);
1357 1365
1358 host->pdata = pdata; 1366 host->pdata = pdata;
1359 host->dev = &pdev->dev; 1367 host->dev = &pdev->dev;
1360 platform_set_drvdata(pdev, host); 1368 platform_set_drvdata(pdev, host);
1361 1369
1362 host->id = pdev->id; 1370 host->id = pdev->id;
1363 host->mem_res = res; 1371 host->mem_res = res;
1364 host->irq = irq; 1372 host->irq = irq;
1365 host->use_dma = 1; 1373 host->use_dma = 1;
1366 host->irq = irq; 1374 host->irq = irq;
1367 host->phys_base = host->mem_res->start; 1375 host->phys_base = host->mem_res->start;
1368 host->virt_base = ioremap(res->start, resource_size(res)); 1376 host->virt_base = ioremap(res->start, resource_size(res));
1369 if (!host->virt_base) 1377 if (!host->virt_base)
1370 goto err_ioremap; 1378 goto err_ioremap;
1371 1379
1372 host->iclk = clk_get(&pdev->dev, "ick"); 1380 host->iclk = clk_get(&pdev->dev, "ick");
1373 if (IS_ERR(host->iclk)) { 1381 if (IS_ERR(host->iclk)) {
1374 ret = PTR_ERR(host->iclk); 1382 ret = PTR_ERR(host->iclk);
1375 goto err_free_mmc_host; 1383 goto err_free_mmc_host;
1376 } 1384 }
1377 clk_enable(host->iclk); 1385 clk_enable(host->iclk);
1378 1386
1379 host->fclk = clk_get(&pdev->dev, "fck"); 1387 host->fclk = clk_get(&pdev->dev, "fck");
1380 if (IS_ERR(host->fclk)) { 1388 if (IS_ERR(host->fclk)) {
1381 ret = PTR_ERR(host->fclk); 1389 ret = PTR_ERR(host->fclk);
1382 goto err_free_iclk; 1390 goto err_free_iclk;
1383 } 1391 }
1384 1392
1385 dma_cap_zero(mask); 1393 dma_cap_zero(mask);
1386 dma_cap_set(DMA_SLAVE, mask); 1394 dma_cap_set(DMA_SLAVE, mask);
1387 1395
1388 host->dma_tx_burst = -1; 1396 host->dma_tx_burst = -1;
1389 host->dma_rx_burst = -1; 1397 host->dma_rx_burst = -1;
1390 1398
1391 if (cpu_is_omap24xx()) 1399 if (cpu_is_omap24xx())
1392 sig = host->id == 0 ? OMAP24XX_DMA_MMC1_TX : OMAP24XX_DMA_MMC2_TX; 1400 sig = host->id == 0 ? OMAP24XX_DMA_MMC1_TX : OMAP24XX_DMA_MMC2_TX;
1393 else 1401 else
1394 sig = host->id == 0 ? OMAP_DMA_MMC_TX : OMAP_DMA_MMC2_TX; 1402 sig = host->id == 0 ? OMAP_DMA_MMC_TX : OMAP_DMA_MMC2_TX;
1395 host->dma_tx = dma_request_channel(mask, omap_dma_filter_fn, &sig); 1403 host->dma_tx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
1396 #if 0 1404 #if 0
1397 if (!host->dma_tx) { 1405 if (!host->dma_tx) {
1398 dev_err(host->dev, "unable to obtain TX DMA engine channel %u\n", 1406 dev_err(host->dev, "unable to obtain TX DMA engine channel %u\n",
1399 sig); 1407 sig);
1400 goto err_dma; 1408 goto err_dma;
1401 } 1409 }
1402 #else 1410 #else
1403 if (!host->dma_tx) 1411 if (!host->dma_tx)
1404 dev_warn(host->dev, "unable to obtain TX DMA engine channel %u\n", 1412 dev_warn(host->dev, "unable to obtain TX DMA engine channel %u\n",
1405 sig); 1413 sig);
1406 #endif 1414 #endif
1407 if (cpu_is_omap24xx()) 1415 if (cpu_is_omap24xx())
1408 sig = host->id == 0 ? OMAP24XX_DMA_MMC1_RX : OMAP24XX_DMA_MMC2_RX; 1416 sig = host->id == 0 ? OMAP24XX_DMA_MMC1_RX : OMAP24XX_DMA_MMC2_RX;
1409 else 1417 else
1410 sig = host->id == 0 ? OMAP_DMA_MMC_RX : OMAP_DMA_MMC2_RX; 1418 sig = host->id == 0 ? OMAP_DMA_MMC_RX : OMAP_DMA_MMC2_RX;
1411 host->dma_rx = dma_request_channel(mask, omap_dma_filter_fn, &sig); 1419 host->dma_rx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
1412 #if 0 1420 #if 0
1413 if (!host->dma_rx) { 1421 if (!host->dma_rx) {
1414 dev_err(host->dev, "unable to obtain RX DMA engine channel %u\n", 1422 dev_err(host->dev, "unable to obtain RX DMA engine channel %u\n",
1415 sig); 1423 sig);
1416 goto err_dma; 1424 goto err_dma;
1417 } 1425 }
1418 #else 1426 #else
1419 if (!host->dma_rx) 1427 if (!host->dma_rx)
1420 dev_warn(host->dev, "unable to obtain RX DMA engine channel %u\n", 1428 dev_warn(host->dev, "unable to obtain RX DMA engine channel %u\n",
1421 sig); 1429 sig);
1422 #endif 1430 #endif
1423 1431
1424 ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host); 1432 ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host);
1425 if (ret) 1433 if (ret)
1426 goto err_free_dma; 1434 goto err_free_dma;
1427 1435
1428 if (pdata->init != NULL) { 1436 if (pdata->init != NULL) {
1429 ret = pdata->init(&pdev->dev); 1437 ret = pdata->init(&pdev->dev);
1430 if (ret < 0) 1438 if (ret < 0)
1431 goto err_free_irq; 1439 goto err_free_irq;
1432 } 1440 }
1433 1441
1434 host->nr_slots = pdata->nr_slots; 1442 host->nr_slots = pdata->nr_slots;
1435 host->reg_shift = (cpu_is_omap7xx() ? 1 : 2); 1443 host->reg_shift = (cpu_is_omap7xx() ? 1 : 2);
1436 1444
1437 host->mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0); 1445 host->mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
1438 if (!host->mmc_omap_wq) 1446 if (!host->mmc_omap_wq)
1439 goto err_plat_cleanup; 1447 goto err_plat_cleanup;
1440 1448
1441 for (i = 0; i < pdata->nr_slots; i++) { 1449 for (i = 0; i < pdata->nr_slots; i++) {
1442 ret = mmc_omap_new_slot(host, i); 1450 ret = mmc_omap_new_slot(host, i);
1443 if (ret < 0) { 1451 if (ret < 0) {
1444 while (--i >= 0) 1452 while (--i >= 0)
1445 mmc_omap_remove_slot(host->slots[i]); 1453 mmc_omap_remove_slot(host->slots[i]);
1446 1454
1447 goto err_destroy_wq; 1455 goto err_destroy_wq;
1448 } 1456 }
1449 } 1457 }
1450 1458
1451 return 0; 1459 return 0;
1452 1460
1453 err_destroy_wq: 1461 err_destroy_wq:
1454 destroy_workqueue(host->mmc_omap_wq); 1462 destroy_workqueue(host->mmc_omap_wq);
1455 err_plat_cleanup: 1463 err_plat_cleanup:
1456 if (pdata->cleanup) 1464 if (pdata->cleanup)
1457 pdata->cleanup(&pdev->dev); 1465 pdata->cleanup(&pdev->dev);
1458 err_free_irq: 1466 err_free_irq:
1459 free_irq(host->irq, host); 1467 free_irq(host->irq, host);
1460 err_free_dma: 1468 err_free_dma:
1461 if (host->dma_tx) 1469 if (host->dma_tx)
1462 dma_release_channel(host->dma_tx); 1470 dma_release_channel(host->dma_tx);
1463 if (host->dma_rx) 1471 if (host->dma_rx)
1464 dma_release_channel(host->dma_rx); 1472 dma_release_channel(host->dma_rx);
1465 clk_put(host->fclk); 1473 clk_put(host->fclk);
1466 err_free_iclk: 1474 err_free_iclk:
1467 clk_disable(host->iclk); 1475 clk_disable(host->iclk);
1468 clk_put(host->iclk); 1476 clk_put(host->iclk);
1469 err_free_mmc_host: 1477 err_free_mmc_host:
1470 iounmap(host->virt_base); 1478 iounmap(host->virt_base);
1471 err_ioremap: 1479 err_ioremap:
1472 kfree(host); 1480 kfree(host);
1473 err_free_mem_region: 1481 err_free_mem_region:
1474 release_mem_region(res->start, resource_size(res)); 1482 release_mem_region(res->start, resource_size(res));
1475 return ret; 1483 return ret;
1476 } 1484 }
1477 1485
1478 static int __devexit mmc_omap_remove(struct platform_device *pdev) 1486 static int __devexit mmc_omap_remove(struct platform_device *pdev)
1479 { 1487 {
1480 struct mmc_omap_host *host = platform_get_drvdata(pdev); 1488 struct mmc_omap_host *host = platform_get_drvdata(pdev);
1481 int i; 1489 int i;
1482 1490
1483 platform_set_drvdata(pdev, NULL); 1491 platform_set_drvdata(pdev, NULL);
1484 1492
1485 BUG_ON(host == NULL); 1493 BUG_ON(host == NULL);
1486 1494
1487 for (i = 0; i < host->nr_slots; i++) 1495 for (i = 0; i < host->nr_slots; i++)
1488 mmc_omap_remove_slot(host->slots[i]); 1496 mmc_omap_remove_slot(host->slots[i]);
1489 1497
1490 if (host->pdata->cleanup) 1498 if (host->pdata->cleanup)
1491 host->pdata->cleanup(&pdev->dev); 1499 host->pdata->cleanup(&pdev->dev);
1492 1500
1493 mmc_omap_fclk_enable(host, 0); 1501 mmc_omap_fclk_enable(host, 0);
1494 free_irq(host->irq, host); 1502 free_irq(host->irq, host);
1495 clk_put(host->fclk); 1503 clk_put(host->fclk);
1496 clk_disable(host->iclk); 1504 clk_disable(host->iclk);
1497 clk_put(host->iclk); 1505 clk_put(host->iclk);
1498 1506
1499 if (host->dma_tx) 1507 if (host->dma_tx)
1500 dma_release_channel(host->dma_tx); 1508 dma_release_channel(host->dma_tx);
1501 if (host->dma_rx) 1509 if (host->dma_rx)
1502 dma_release_channel(host->dma_rx); 1510 dma_release_channel(host->dma_rx);
1503 1511
1504 iounmap(host->virt_base); 1512 iounmap(host->virt_base);
1505 release_mem_region(pdev->resource[0].start, 1513 release_mem_region(pdev->resource[0].start,
1506 pdev->resource[0].end - pdev->resource[0].start + 1); 1514 pdev->resource[0].end - pdev->resource[0].start + 1);
1507 destroy_workqueue(host->mmc_omap_wq); 1515 destroy_workqueue(host->mmc_omap_wq);
1508 1516
1509 kfree(host); 1517 kfree(host);
1510 1518
1511 return 0; 1519 return 0;
1512 } 1520 }
1513 1521
1514 #ifdef CONFIG_PM 1522 #ifdef CONFIG_PM
1515 static int mmc_omap_suspend(struct platform_device *pdev, pm_message_t mesg) 1523 static int mmc_omap_suspend(struct platform_device *pdev, pm_message_t mesg)
1516 { 1524 {
1517 int i, ret = 0; 1525 int i, ret = 0;
1518 struct mmc_omap_host *host = platform_get_drvdata(pdev); 1526 struct mmc_omap_host *host = platform_get_drvdata(pdev);
1519 1527
1520 if (host == NULL || host->suspended) 1528 if (host == NULL || host->suspended)
1521 return 0; 1529 return 0;
1522 1530
1523 for (i = 0; i < host->nr_slots; i++) { 1531 for (i = 0; i < host->nr_slots; i++) {
1524 struct mmc_omap_slot *slot; 1532 struct mmc_omap_slot *slot;
1525 1533
1526 slot = host->slots[i]; 1534 slot = host->slots[i];
1527 ret = mmc_suspend_host(slot->mmc); 1535 ret = mmc_suspend_host(slot->mmc);
1528 if (ret < 0) { 1536 if (ret < 0) {
1529 while (--i >= 0) { 1537 while (--i >= 0) {
1530 slot = host->slots[i]; 1538 slot = host->slots[i];
1531 mmc_resume_host(slot->mmc); 1539 mmc_resume_host(slot->mmc);
1532 } 1540 }
1533 return ret; 1541 return ret;
1534 } 1542 }
1535 } 1543 }
1536 host->suspended = 1; 1544 host->suspended = 1;
1537 return 0; 1545 return 0;
1538 } 1546 }
1539 1547
1540 static int mmc_omap_resume(struct platform_device *pdev) 1548 static int mmc_omap_resume(struct platform_device *pdev)
1541 { 1549 {
1542 int i, ret = 0; 1550 int i, ret = 0;
1543 struct mmc_omap_host *host = platform_get_drvdata(pdev); 1551 struct mmc_omap_host *host = platform_get_drvdata(pdev);
1544 1552
1545 if (host == NULL || !host->suspended) 1553 if (host == NULL || !host->suspended)
1546 return 0; 1554 return 0;
1547 1555
1548 for (i = 0; i < host->nr_slots; i++) { 1556 for (i = 0; i < host->nr_slots; i++) {
1549 struct mmc_omap_slot *slot; 1557 struct mmc_omap_slot *slot;
1550 slot = host->slots[i]; 1558 slot = host->slots[i];
1551 ret = mmc_resume_host(slot->mmc); 1559 ret = mmc_resume_host(slot->mmc);
1552 if (ret < 0) 1560 if (ret < 0)
1553 return ret; 1561 return ret;
1554 1562
1555 host->suspended = 0; 1563 host->suspended = 0;
1556 } 1564 }
1557 return 0; 1565 return 0;
1558 } 1566 }
1559 #else 1567 #else
1560 #define mmc_omap_suspend NULL 1568 #define mmc_omap_suspend NULL
1561 #define mmc_omap_resume NULL 1569 #define mmc_omap_resume NULL
1562 #endif 1570 #endif
1563 1571
1564 static struct platform_driver mmc_omap_driver = { 1572 static struct platform_driver mmc_omap_driver = {
1565 .probe = mmc_omap_probe, 1573 .probe = mmc_omap_probe,
1566 .remove = __devexit_p(mmc_omap_remove), 1574 .remove = __devexit_p(mmc_omap_remove),
1567 .suspend = mmc_omap_suspend, 1575 .suspend = mmc_omap_suspend,
1568 .resume = mmc_omap_resume, 1576 .resume = mmc_omap_resume,
1569 .driver = { 1577 .driver = {
1570 .name = DRIVER_NAME, 1578 .name = DRIVER_NAME,
1571 .owner = THIS_MODULE, 1579 .owner = THIS_MODULE,
1572 }, 1580 },
1573 }; 1581 };
1574 1582
1575 module_platform_driver(mmc_omap_driver); 1583 module_platform_driver(mmc_omap_driver);
1576 MODULE_DESCRIPTION("OMAP Multimedia Card driver"); 1584 MODULE_DESCRIPTION("OMAP Multimedia Card driver");
1577 MODULE_LICENSE("GPL"); 1585 MODULE_LICENSE("GPL");
1578 MODULE_ALIAS("platform:" DRIVER_NAME); 1586 MODULE_ALIAS("platform:" DRIVER_NAME);
1579 MODULE_AUTHOR("Juha Yrjรถlรค"); 1587 MODULE_AUTHOR("Juha Yrjรถlรค");
1580 1588
drivers/mmc/host/sdhci-esdhc.h
1 /* 1 /*
2 * Freescale eSDHC controller driver generics for OF and pltfm. 2 * Freescale eSDHC controller driver generics for OF and pltfm.
3 * 3 *
4 * Copyright (c) 2007 Freescale Semiconductor, Inc. 4 * Copyright (c) 2007 Freescale Semiconductor, Inc.
5 * Copyright (c) 2009 MontaVista Software, Inc. 5 * Copyright (c) 2009 MontaVista Software, Inc.
6 * Copyright (c) 2010 Pengutronix e.K. 6 * Copyright (c) 2010 Pengutronix e.K.
7 * Author: Wolfram Sang <w.sang@pengutronix.de> 7 * Author: Wolfram Sang <w.sang@pengutronix.de>
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by 10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License. 11 * the Free Software Foundation; either version 2 of the License.
12 */ 12 */
13 13
14 #ifndef _DRIVERS_MMC_SDHCI_ESDHC_H 14 #ifndef _DRIVERS_MMC_SDHCI_ESDHC_H
15 #define _DRIVERS_MMC_SDHCI_ESDHC_H 15 #define _DRIVERS_MMC_SDHCI_ESDHC_H
16 16
17 /* 17 /*
18 * Ops and quirks for the Freescale eSDHC controller. 18 * Ops and quirks for the Freescale eSDHC controller.
19 */ 19 */
20 20
21 #define ESDHC_DEFAULT_QUIRKS (SDHCI_QUIRK_FORCE_BLK_SZ_2048 | \ 21 #define ESDHC_DEFAULT_QUIRKS (SDHCI_QUIRK_FORCE_BLK_SZ_2048 | \
22 SDHCI_QUIRK_NO_BUSY_IRQ | \ 22 SDHCI_QUIRK_NO_BUSY_IRQ | \
23 SDHCI_QUIRK_NONSTANDARD_CLOCK | \ 23 SDHCI_QUIRK_NONSTANDARD_CLOCK | \
24 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \ 24 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \
25 SDHCI_QUIRK_PIO_NEEDS_DELAY | \ 25 SDHCI_QUIRK_PIO_NEEDS_DELAY | \
26 SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET) 26 SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
27 27
28 #define ESDHC_SYSTEM_CONTROL 0x2c 28 #define ESDHC_SYSTEM_CONTROL 0x2c
29 #define ESDHC_CLOCK_MASK 0x0000fff0 29 #define ESDHC_CLOCK_MASK 0x0000fff0
30 #define ESDHC_PREDIV_SHIFT 8 30 #define ESDHC_PREDIV_SHIFT 8
31 #define ESDHC_DIVIDER_SHIFT 4 31 #define ESDHC_DIVIDER_SHIFT 4
32 #define ESDHC_CLOCK_PEREN 0x00000004 32 #define ESDHC_CLOCK_PEREN 0x00000004
33 #define ESDHC_CLOCK_HCKEN 0x00000002 33 #define ESDHC_CLOCK_HCKEN 0x00000002
34 #define ESDHC_CLOCK_IPGEN 0x00000001 34 #define ESDHC_CLOCK_IPGEN 0x00000001
35 35
36 /* pltfm-specific */ 36 /* pltfm-specific */
37 #define ESDHC_HOST_CONTROL_LE 0x20 37 #define ESDHC_HOST_CONTROL_LE 0x20
38 38
39 /* OF-specific */ 39 /* OF-specific */
40 #define ESDHC_DMA_SYSCTL 0x40c 40 #define ESDHC_DMA_SYSCTL 0x40c
41 #define ESDHC_DMA_SNOOP 0x00000040 41 #define ESDHC_DMA_SNOOP 0x00000040
42 42
43 #define ESDHC_HOST_CONTROL_RES 0x05 43 #define ESDHC_HOST_CONTROL_RES 0x05
44 44
45 static inline void esdhc_set_clock(struct sdhci_host *host, unsigned int clock) 45 static inline void esdhc_set_clock(struct sdhci_host *host, unsigned int clock)
46 { 46 {
47 int pre_div = 2; 47 int pre_div = 2;
48 int div = 1; 48 int div = 1;
49 u32 temp; 49 u32 temp;
50 50
51 if (clock == 0)
52 goto out;
53
51 temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); 54 temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
52 temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN 55 temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
53 | ESDHC_CLOCK_MASK); 56 | ESDHC_CLOCK_MASK);
54 sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); 57 sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
55
56 if (clock == 0)
57 goto out;
58 58
59 while (host->max_clk / pre_div / 16 > clock && pre_div < 256) 59 while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
60 pre_div *= 2; 60 pre_div *= 2;
61 61
62 while (host->max_clk / pre_div / div > clock && div < 16) 62 while (host->max_clk / pre_div / div > clock && div < 16)
63 div++; 63 div++;
64 64
65 dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n", 65 dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
66 clock, host->max_clk / pre_div / div); 66 clock, host->max_clk / pre_div / div);
67 67
68 pre_div >>= 1; 68 pre_div >>= 1;
69 div--; 69 div--;
70 70
71 temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); 71 temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
72 temp |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN 72 temp |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
73 | (div << ESDHC_DIVIDER_SHIFT) 73 | (div << ESDHC_DIVIDER_SHIFT)
74 | (pre_div << ESDHC_PREDIV_SHIFT)); 74 | (pre_div << ESDHC_PREDIV_SHIFT));
75 sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); 75 sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
76 mdelay(1); 76 mdelay(1);
77 out: 77 out:
78 host->clock = clock; 78 host->clock = clock;
79 } 79 }
include/linux/mmc/card.h
1 /* 1 /*
2 * linux/include/linux/mmc/card.h 2 * linux/include/linux/mmc/card.h
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as 5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 * 7 *
8 * Card driver specific definitions. 8 * Card driver specific definitions.
9 */ 9 */
10 #ifndef LINUX_MMC_CARD_H 10 #ifndef LINUX_MMC_CARD_H
11 #define LINUX_MMC_CARD_H 11 #define LINUX_MMC_CARD_H
12 12
13 #include <linux/device.h> 13 #include <linux/device.h>
14 #include <linux/mmc/core.h> 14 #include <linux/mmc/core.h>
15 #include <linux/mod_devicetable.h> 15 #include <linux/mod_devicetable.h>
16 16
17 struct mmc_cid { 17 struct mmc_cid {
18 unsigned int manfid; 18 unsigned int manfid;
19 char prod_name[8]; 19 char prod_name[8];
20 unsigned int serial; 20 unsigned int serial;
21 unsigned short oemid; 21 unsigned short oemid;
22 unsigned short year; 22 unsigned short year;
23 unsigned char hwrev; 23 unsigned char hwrev;
24 unsigned char fwrev; 24 unsigned char fwrev;
25 unsigned char month; 25 unsigned char month;
26 }; 26 };
27 27
28 struct mmc_csd { 28 struct mmc_csd {
29 unsigned char structure; 29 unsigned char structure;
30 unsigned char mmca_vsn; 30 unsigned char mmca_vsn;
31 unsigned short cmdclass; 31 unsigned short cmdclass;
32 unsigned short tacc_clks; 32 unsigned short tacc_clks;
33 unsigned int tacc_ns; 33 unsigned int tacc_ns;
34 unsigned int c_size; 34 unsigned int c_size;
35 unsigned int r2w_factor; 35 unsigned int r2w_factor;
36 unsigned int max_dtr; 36 unsigned int max_dtr;
37 unsigned int erase_size; /* In sectors */ 37 unsigned int erase_size; /* In sectors */
38 unsigned int read_blkbits; 38 unsigned int read_blkbits;
39 unsigned int write_blkbits; 39 unsigned int write_blkbits;
40 unsigned int capacity; 40 unsigned int capacity;
41 unsigned int read_partial:1, 41 unsigned int read_partial:1,
42 read_misalign:1, 42 read_misalign:1,
43 write_partial:1, 43 write_partial:1,
44 write_misalign:1; 44 write_misalign:1;
45 }; 45 };
46 46
47 struct mmc_ext_csd { 47 struct mmc_ext_csd {
48 u8 rev; 48 u8 rev;
49 u8 erase_group_def; 49 u8 erase_group_def;
50 u8 sec_feature_support; 50 u8 sec_feature_support;
51 u8 rel_sectors; 51 u8 rel_sectors;
52 u8 rel_param; 52 u8 rel_param;
53 u8 part_config; 53 u8 part_config;
54 u8 cache_ctrl; 54 u8 cache_ctrl;
55 u8 rst_n_function; 55 u8 rst_n_function;
56 unsigned int part_time; /* Units: ms */ 56 unsigned int part_time; /* Units: ms */
57 unsigned int sa_timeout; /* Units: 100ns */ 57 unsigned int sa_timeout; /* Units: 100ns */
58 unsigned int generic_cmd6_time; /* Units: 10ms */ 58 unsigned int generic_cmd6_time; /* Units: 10ms */
59 unsigned int power_off_longtime; /* Units: ms */ 59 unsigned int power_off_longtime; /* Units: ms */
60 unsigned int hs_max_dtr; 60 unsigned int hs_max_dtr;
61 #define MMC_HIGH_26_MAX_DTR 26000000 61 #define MMC_HIGH_26_MAX_DTR 26000000
62 #define MMC_HIGH_52_MAX_DTR 52000000 62 #define MMC_HIGH_52_MAX_DTR 52000000
63 #define MMC_HIGH_DDR_MAX_DTR 52000000 63 #define MMC_HIGH_DDR_MAX_DTR 52000000
64 #define MMC_HS200_MAX_DTR 200000000 64 #define MMC_HS200_MAX_DTR 200000000
65 unsigned int sectors; 65 unsigned int sectors;
66 unsigned int card_type; 66 unsigned int card_type;
67 unsigned int hc_erase_size; /* In sectors */ 67 unsigned int hc_erase_size; /* In sectors */
68 unsigned int hc_erase_timeout; /* In milliseconds */ 68 unsigned int hc_erase_timeout; /* In milliseconds */
69 unsigned int sec_trim_mult; /* Secure trim multiplier */ 69 unsigned int sec_trim_mult; /* Secure trim multiplier */
70 unsigned int sec_erase_mult; /* Secure erase multiplier */ 70 unsigned int sec_erase_mult; /* Secure erase multiplier */
71 unsigned int trim_timeout; /* In milliseconds */ 71 unsigned int trim_timeout; /* In milliseconds */
72 bool enhanced_area_en; /* enable bit */ 72 bool enhanced_area_en; /* enable bit */
73 unsigned long long enhanced_area_offset; /* Units: Byte */ 73 unsigned long long enhanced_area_offset; /* Units: Byte */
74 unsigned int enhanced_area_size; /* Units: KB */ 74 unsigned int enhanced_area_size; /* Units: KB */
75 unsigned int cache_size; /* Units: KB */ 75 unsigned int cache_size; /* Units: KB */
76 bool hpi_en; /* HPI enablebit */ 76 bool hpi_en; /* HPI enablebit */
77 bool hpi; /* HPI support bit */ 77 bool hpi; /* HPI support bit */
78 unsigned int hpi_cmd; /* cmd used as HPI */ 78 unsigned int hpi_cmd; /* cmd used as HPI */
79 unsigned int data_sector_size; /* 512 bytes or 4KB */ 79 unsigned int data_sector_size; /* 512 bytes or 4KB */
80 unsigned int data_tag_unit_size; /* DATA TAG UNIT size */ 80 unsigned int data_tag_unit_size; /* DATA TAG UNIT size */
81 unsigned int boot_ro_lock; /* ro lock support */ 81 unsigned int boot_ro_lock; /* ro lock support */
82 bool boot_ro_lockable; 82 bool boot_ro_lockable;
83 u8 raw_partition_support; /* 160 */ 83 u8 raw_partition_support; /* 160 */
84 u8 raw_erased_mem_count; /* 181 */ 84 u8 raw_erased_mem_count; /* 181 */
85 u8 raw_ext_csd_structure; /* 194 */ 85 u8 raw_ext_csd_structure; /* 194 */
86 u8 raw_card_type; /* 196 */ 86 u8 raw_card_type; /* 196 */
87 u8 out_of_int_time; /* 198 */ 87 u8 out_of_int_time; /* 198 */
88 u8 raw_s_a_timeout; /* 217 */ 88 u8 raw_s_a_timeout; /* 217 */
89 u8 raw_hc_erase_gap_size; /* 221 */ 89 u8 raw_hc_erase_gap_size; /* 221 */
90 u8 raw_erase_timeout_mult; /* 223 */ 90 u8 raw_erase_timeout_mult; /* 223 */
91 u8 raw_hc_erase_grp_size; /* 224 */ 91 u8 raw_hc_erase_grp_size; /* 224 */
92 u8 raw_sec_trim_mult; /* 229 */ 92 u8 raw_sec_trim_mult; /* 229 */
93 u8 raw_sec_erase_mult; /* 230 */ 93 u8 raw_sec_erase_mult; /* 230 */
94 u8 raw_sec_feature_support;/* 231 */ 94 u8 raw_sec_feature_support;/* 231 */
95 u8 raw_trim_mult; /* 232 */ 95 u8 raw_trim_mult; /* 232 */
96 u8 raw_sectors[4]; /* 212 - 4 bytes */ 96 u8 raw_sectors[4]; /* 212 - 4 bytes */
97 97
98 unsigned int feature_support; 98 unsigned int feature_support;
99 #define MMC_DISCARD_FEATURE BIT(0) /* CMD38 feature */ 99 #define MMC_DISCARD_FEATURE BIT(0) /* CMD38 feature */
100 }; 100 };
101 101
102 struct sd_scr { 102 struct sd_scr {
103 unsigned char sda_vsn; 103 unsigned char sda_vsn;
104 unsigned char sda_spec3; 104 unsigned char sda_spec3;
105 unsigned char bus_widths; 105 unsigned char bus_widths;
106 #define SD_SCR_BUS_WIDTH_1 (1<<0) 106 #define SD_SCR_BUS_WIDTH_1 (1<<0)
107 #define SD_SCR_BUS_WIDTH_4 (1<<2) 107 #define SD_SCR_BUS_WIDTH_4 (1<<2)
108 unsigned char cmds; 108 unsigned char cmds;
109 #define SD_SCR_CMD20_SUPPORT (1<<0) 109 #define SD_SCR_CMD20_SUPPORT (1<<0)
110 #define SD_SCR_CMD23_SUPPORT (1<<1) 110 #define SD_SCR_CMD23_SUPPORT (1<<1)
111 }; 111 };
112 112
113 struct sd_ssr { 113 struct sd_ssr {
114 unsigned int au; /* In sectors */ 114 unsigned int au; /* In sectors */
115 unsigned int erase_timeout; /* In milliseconds */ 115 unsigned int erase_timeout; /* In milliseconds */
116 unsigned int erase_offset; /* In milliseconds */ 116 unsigned int erase_offset; /* In milliseconds */
117 }; 117 };
118 118
119 struct sd_switch_caps { 119 struct sd_switch_caps {
120 unsigned int hs_max_dtr; 120 unsigned int hs_max_dtr;
121 unsigned int uhs_max_dtr; 121 unsigned int uhs_max_dtr;
122 #define HIGH_SPEED_MAX_DTR 50000000 122 #define HIGH_SPEED_MAX_DTR 50000000
123 #define UHS_SDR104_MAX_DTR 208000000 123 #define UHS_SDR104_MAX_DTR 208000000
124 #define UHS_SDR50_MAX_DTR 100000000 124 #define UHS_SDR50_MAX_DTR 100000000
125 #define UHS_DDR50_MAX_DTR 50000000 125 #define UHS_DDR50_MAX_DTR 50000000
126 #define UHS_SDR25_MAX_DTR UHS_DDR50_MAX_DTR 126 #define UHS_SDR25_MAX_DTR UHS_DDR50_MAX_DTR
127 #define UHS_SDR12_MAX_DTR 25000000 127 #define UHS_SDR12_MAX_DTR 25000000
128 unsigned int sd3_bus_mode; 128 unsigned int sd3_bus_mode;
129 #define UHS_SDR12_BUS_SPEED 0 129 #define UHS_SDR12_BUS_SPEED 0
130 #define HIGH_SPEED_BUS_SPEED 1 130 #define HIGH_SPEED_BUS_SPEED 1
131 #define UHS_SDR25_BUS_SPEED 1 131 #define UHS_SDR25_BUS_SPEED 1
132 #define UHS_SDR50_BUS_SPEED 2 132 #define UHS_SDR50_BUS_SPEED 2
133 #define UHS_SDR104_BUS_SPEED 3 133 #define UHS_SDR104_BUS_SPEED 3
134 #define UHS_DDR50_BUS_SPEED 4 134 #define UHS_DDR50_BUS_SPEED 4
135 135
136 #define SD_MODE_HIGH_SPEED (1 << HIGH_SPEED_BUS_SPEED) 136 #define SD_MODE_HIGH_SPEED (1 << HIGH_SPEED_BUS_SPEED)
137 #define SD_MODE_UHS_SDR12 (1 << UHS_SDR12_BUS_SPEED) 137 #define SD_MODE_UHS_SDR12 (1 << UHS_SDR12_BUS_SPEED)
138 #define SD_MODE_UHS_SDR25 (1 << UHS_SDR25_BUS_SPEED) 138 #define SD_MODE_UHS_SDR25 (1 << UHS_SDR25_BUS_SPEED)
139 #define SD_MODE_UHS_SDR50 (1 << UHS_SDR50_BUS_SPEED) 139 #define SD_MODE_UHS_SDR50 (1 << UHS_SDR50_BUS_SPEED)
140 #define SD_MODE_UHS_SDR104 (1 << UHS_SDR104_BUS_SPEED) 140 #define SD_MODE_UHS_SDR104 (1 << UHS_SDR104_BUS_SPEED)
141 #define SD_MODE_UHS_DDR50 (1 << UHS_DDR50_BUS_SPEED) 141 #define SD_MODE_UHS_DDR50 (1 << UHS_DDR50_BUS_SPEED)
142 unsigned int sd3_drv_type; 142 unsigned int sd3_drv_type;
143 #define SD_DRIVER_TYPE_B 0x01 143 #define SD_DRIVER_TYPE_B 0x01
144 #define SD_DRIVER_TYPE_A 0x02 144 #define SD_DRIVER_TYPE_A 0x02
145 #define SD_DRIVER_TYPE_C 0x04 145 #define SD_DRIVER_TYPE_C 0x04
146 #define SD_DRIVER_TYPE_D 0x08 146 #define SD_DRIVER_TYPE_D 0x08
147 unsigned int sd3_curr_limit; 147 unsigned int sd3_curr_limit;
148 #define SD_SET_CURRENT_LIMIT_200 0 148 #define SD_SET_CURRENT_LIMIT_200 0
149 #define SD_SET_CURRENT_LIMIT_400 1 149 #define SD_SET_CURRENT_LIMIT_400 1
150 #define SD_SET_CURRENT_LIMIT_600 2 150 #define SD_SET_CURRENT_LIMIT_600 2
151 #define SD_SET_CURRENT_LIMIT_800 3 151 #define SD_SET_CURRENT_LIMIT_800 3
152 #define SD_SET_CURRENT_NO_CHANGE (-1) 152 #define SD_SET_CURRENT_NO_CHANGE (-1)
153 153
154 #define SD_MAX_CURRENT_200 (1 << SD_SET_CURRENT_LIMIT_200) 154 #define SD_MAX_CURRENT_200 (1 << SD_SET_CURRENT_LIMIT_200)
155 #define SD_MAX_CURRENT_400 (1 << SD_SET_CURRENT_LIMIT_400) 155 #define SD_MAX_CURRENT_400 (1 << SD_SET_CURRENT_LIMIT_400)
156 #define SD_MAX_CURRENT_600 (1 << SD_SET_CURRENT_LIMIT_600) 156 #define SD_MAX_CURRENT_600 (1 << SD_SET_CURRENT_LIMIT_600)
157 #define SD_MAX_CURRENT_800 (1 << SD_SET_CURRENT_LIMIT_800) 157 #define SD_MAX_CURRENT_800 (1 << SD_SET_CURRENT_LIMIT_800)
158 }; 158 };
159 159
160 struct sdio_cccr { 160 struct sdio_cccr {
161 unsigned int sdio_vsn; 161 unsigned int sdio_vsn;
162 unsigned int sd_vsn; 162 unsigned int sd_vsn;
163 unsigned int multi_block:1, 163 unsigned int multi_block:1,
164 low_speed:1, 164 low_speed:1,
165 wide_bus:1, 165 wide_bus:1,
166 high_power:1, 166 high_power:1,
167 high_speed:1, 167 high_speed:1,
168 disable_cd:1; 168 disable_cd:1;
169 }; 169 };
170 170
171 struct sdio_cis { 171 struct sdio_cis {
172 unsigned short vendor; 172 unsigned short vendor;
173 unsigned short device; 173 unsigned short device;
174 unsigned short blksize; 174 unsigned short blksize;
175 unsigned int max_dtr; 175 unsigned int max_dtr;
176 }; 176 };
177 177
178 struct mmc_host; 178 struct mmc_host;
179 struct sdio_func; 179 struct sdio_func;
180 struct sdio_func_tuple; 180 struct sdio_func_tuple;
181 181
182 #define SDIO_MAX_FUNCS 7 182 #define SDIO_MAX_FUNCS 7
183 183
184 /* The number of MMC physical partitions. These consist of: 184 /* The number of MMC physical partitions. These consist of:
185 * boot partitions (2), general purpose partitions (4) in MMC v4.4. 185 * boot partitions (2), general purpose partitions (4) in MMC v4.4.
186 */ 186 */
187 #define MMC_NUM_BOOT_PARTITION 2 187 #define MMC_NUM_BOOT_PARTITION 2
188 #define MMC_NUM_GP_PARTITION 4 188 #define MMC_NUM_GP_PARTITION 4
189 #define MMC_NUM_PHY_PARTITION 6 189 #define MMC_NUM_PHY_PARTITION 6
190 #define MAX_MMC_PART_NAME_LEN 20 190 #define MAX_MMC_PART_NAME_LEN 20
191 191
192 /* 192 /*
193 * MMC Physical partitions 193 * MMC Physical partitions
194 */ 194 */
195 struct mmc_part { 195 struct mmc_part {
196 unsigned int size; /* partition size (in bytes) */ 196 unsigned int size; /* partition size (in bytes) */
197 unsigned int part_cfg; /* partition type */ 197 unsigned int part_cfg; /* partition type */
198 char name[MAX_MMC_PART_NAME_LEN]; 198 char name[MAX_MMC_PART_NAME_LEN];
199 bool force_ro; /* to make boot parts RO by default */ 199 bool force_ro; /* to make boot parts RO by default */
200 unsigned int area_type; 200 unsigned int area_type;
201 #define MMC_BLK_DATA_AREA_MAIN (1<<0) 201 #define MMC_BLK_DATA_AREA_MAIN (1<<0)
202 #define MMC_BLK_DATA_AREA_BOOT (1<<1) 202 #define MMC_BLK_DATA_AREA_BOOT (1<<1)
203 #define MMC_BLK_DATA_AREA_GP (1<<2) 203 #define MMC_BLK_DATA_AREA_GP (1<<2)
204 }; 204 };
205 205
206 /* 206 /*
207 * MMC device 207 * MMC device
208 */ 208 */
209 struct mmc_card { 209 struct mmc_card {
210 struct mmc_host *host; /* the host this device belongs to */ 210 struct mmc_host *host; /* the host this device belongs to */
211 struct device dev; /* the device */ 211 struct device dev; /* the device */
212 unsigned int rca; /* relative card address of device */ 212 unsigned int rca; /* relative card address of device */
213 unsigned int type; /* card type */ 213 unsigned int type; /* card type */
214 #define MMC_TYPE_MMC 0 /* MMC card */ 214 #define MMC_TYPE_MMC 0 /* MMC card */
215 #define MMC_TYPE_SD 1 /* SD card */ 215 #define MMC_TYPE_SD 1 /* SD card */
216 #define MMC_TYPE_SDIO 2 /* SDIO card */ 216 #define MMC_TYPE_SDIO 2 /* SDIO card */
217 #define MMC_TYPE_SD_COMBO 3 /* SD combo (IO+mem) card */ 217 #define MMC_TYPE_SD_COMBO 3 /* SD combo (IO+mem) card */
218 unsigned int state; /* (our) card state */ 218 unsigned int state; /* (our) card state */
219 #define MMC_STATE_PRESENT (1<<0) /* present in sysfs */ 219 #define MMC_STATE_PRESENT (1<<0) /* present in sysfs */
220 #define MMC_STATE_READONLY (1<<1) /* card is read-only */ 220 #define MMC_STATE_READONLY (1<<1) /* card is read-only */
221 #define MMC_STATE_HIGHSPEED (1<<2) /* card is in high speed mode */ 221 #define MMC_STATE_HIGHSPEED (1<<2) /* card is in high speed mode */
222 #define MMC_STATE_BLOCKADDR (1<<3) /* card uses block-addressing */ 222 #define MMC_STATE_BLOCKADDR (1<<3) /* card uses block-addressing */
223 #define MMC_STATE_HIGHSPEED_DDR (1<<4) /* card is in high speed mode */ 223 #define MMC_STATE_HIGHSPEED_DDR (1<<4) /* card is in high speed mode */
224 #define MMC_STATE_ULTRAHIGHSPEED (1<<5) /* card is in ultra high speed mode */ 224 #define MMC_STATE_ULTRAHIGHSPEED (1<<5) /* card is in ultra high speed mode */
225 #define MMC_CARD_SDXC (1<<6) /* card is SDXC */ 225 #define MMC_CARD_SDXC (1<<6) /* card is SDXC */
226 #define MMC_CARD_REMOVED (1<<7) /* card has been removed */ 226 #define MMC_CARD_REMOVED (1<<7) /* card has been removed */
227 #define MMC_STATE_HIGHSPEED_200 (1<<8) /* card is in HS200 mode */ 227 #define MMC_STATE_HIGHSPEED_200 (1<<8) /* card is in HS200 mode */
228 #define MMC_STATE_SLEEP (1<<9) /* card is in sleep state */ 228 #define MMC_STATE_SLEEP (1<<9) /* card is in sleep state */
229 unsigned int quirks; /* card quirks */ 229 unsigned int quirks; /* card quirks */
230 #define MMC_QUIRK_LENIENT_FN0 (1<<0) /* allow SDIO FN0 writes outside of the VS CCCR range */ 230 #define MMC_QUIRK_LENIENT_FN0 (1<<0) /* allow SDIO FN0 writes outside of the VS CCCR range */
231 #define MMC_QUIRK_BLKSZ_FOR_BYTE_MODE (1<<1) /* use func->cur_blksize */ 231 #define MMC_QUIRK_BLKSZ_FOR_BYTE_MODE (1<<1) /* use func->cur_blksize */
232 /* for byte mode */ 232 /* for byte mode */
233 #define MMC_QUIRK_NONSTD_SDIO (1<<2) /* non-standard SDIO card attached */ 233 #define MMC_QUIRK_NONSTD_SDIO (1<<2) /* non-standard SDIO card attached */
234 /* (missing CIA registers) */ 234 /* (missing CIA registers) */
235 #define MMC_QUIRK_BROKEN_CLK_GATING (1<<3) /* clock gating the sdio bus will make card fail */ 235 #define MMC_QUIRK_BROKEN_CLK_GATING (1<<3) /* clock gating the sdio bus will make card fail */
236 #define MMC_QUIRK_NONSTD_FUNC_IF (1<<4) /* SDIO card has nonstd function interfaces */ 236 #define MMC_QUIRK_NONSTD_FUNC_IF (1<<4) /* SDIO card has nonstd function interfaces */
237 #define MMC_QUIRK_DISABLE_CD (1<<5) /* disconnect CD/DAT[3] resistor */ 237 #define MMC_QUIRK_DISABLE_CD (1<<5) /* disconnect CD/DAT[3] resistor */
238 #define MMC_QUIRK_INAND_CMD38 (1<<6) /* iNAND devices have broken CMD38 */ 238 #define MMC_QUIRK_INAND_CMD38 (1<<6) /* iNAND devices have broken CMD38 */
239 #define MMC_QUIRK_BLK_NO_CMD23 (1<<7) /* Avoid CMD23 for regular multiblock */ 239 #define MMC_QUIRK_BLK_NO_CMD23 (1<<7) /* Avoid CMD23 for regular multiblock */
240 #define MMC_QUIRK_BROKEN_BYTE_MODE_512 (1<<8) /* Avoid sending 512 bytes in */ 240 #define MMC_QUIRK_BROKEN_BYTE_MODE_512 (1<<8) /* Avoid sending 512 bytes in */
241 #define MMC_QUIRK_LONG_READ_TIME (1<<9) /* Data read time > CSD says */ 241 #define MMC_QUIRK_LONG_READ_TIME (1<<9) /* Data read time > CSD says */
242 #define MMC_QUIRK_SEC_ERASE_TRIM_BROKEN (1<<10) /* Skip secure for erase/trim */
242 /* byte mode */ 243 /* byte mode */
243 unsigned int poweroff_notify_state; /* eMMC4.5 notify feature */ 244 unsigned int poweroff_notify_state; /* eMMC4.5 notify feature */
244 #define MMC_NO_POWER_NOTIFICATION 0 245 #define MMC_NO_POWER_NOTIFICATION 0
245 #define MMC_POWERED_ON 1 246 #define MMC_POWERED_ON 1
246 #define MMC_POWEROFF_SHORT 2 247 #define MMC_POWEROFF_SHORT 2
247 #define MMC_POWEROFF_LONG 3 248 #define MMC_POWEROFF_LONG 3
248 249
249 unsigned int erase_size; /* erase size in sectors */ 250 unsigned int erase_size; /* erase size in sectors */
250 unsigned int erase_shift; /* if erase unit is power 2 */ 251 unsigned int erase_shift; /* if erase unit is power 2 */
251 unsigned int pref_erase; /* in sectors */ 252 unsigned int pref_erase; /* in sectors */
252 u8 erased_byte; /* value of erased bytes */ 253 u8 erased_byte; /* value of erased bytes */
253 254
254 u32 raw_cid[4]; /* raw card CID */ 255 u32 raw_cid[4]; /* raw card CID */
255 u32 raw_csd[4]; /* raw card CSD */ 256 u32 raw_csd[4]; /* raw card CSD */
256 u32 raw_scr[2]; /* raw card SCR */ 257 u32 raw_scr[2]; /* raw card SCR */
257 struct mmc_cid cid; /* card identification */ 258 struct mmc_cid cid; /* card identification */
258 struct mmc_csd csd; /* card specific */ 259 struct mmc_csd csd; /* card specific */
259 struct mmc_ext_csd ext_csd; /* mmc v4 extended card specific */ 260 struct mmc_ext_csd ext_csd; /* mmc v4 extended card specific */
260 struct sd_scr scr; /* extra SD information */ 261 struct sd_scr scr; /* extra SD information */
261 struct sd_ssr ssr; /* yet more SD information */ 262 struct sd_ssr ssr; /* yet more SD information */
262 struct sd_switch_caps sw_caps; /* switch (CMD6) caps */ 263 struct sd_switch_caps sw_caps; /* switch (CMD6) caps */
263 264
264 unsigned int sdio_funcs; /* number of SDIO functions */ 265 unsigned int sdio_funcs; /* number of SDIO functions */
265 struct sdio_cccr cccr; /* common card info */ 266 struct sdio_cccr cccr; /* common card info */
266 struct sdio_cis cis; /* common tuple info */ 267 struct sdio_cis cis; /* common tuple info */
267 struct sdio_func *sdio_func[SDIO_MAX_FUNCS]; /* SDIO functions (devices) */ 268 struct sdio_func *sdio_func[SDIO_MAX_FUNCS]; /* SDIO functions (devices) */
268 struct sdio_func *sdio_single_irq; /* SDIO function when only one IRQ active */ 269 struct sdio_func *sdio_single_irq; /* SDIO function when only one IRQ active */
269 unsigned num_info; /* number of info strings */ 270 unsigned num_info; /* number of info strings */
270 const char **info; /* info strings */ 271 const char **info; /* info strings */
271 struct sdio_func_tuple *tuples; /* unknown common tuples */ 272 struct sdio_func_tuple *tuples; /* unknown common tuples */
272 273
273 unsigned int sd_bus_speed; /* Bus Speed Mode set for the card */ 274 unsigned int sd_bus_speed; /* Bus Speed Mode set for the card */
274 275
275 struct dentry *debugfs_root; 276 struct dentry *debugfs_root;
276 struct mmc_part part[MMC_NUM_PHY_PARTITION]; /* physical partitions */ 277 struct mmc_part part[MMC_NUM_PHY_PARTITION]; /* physical partitions */
277 unsigned int nr_parts; 278 unsigned int nr_parts;
278 }; 279 };
279 280
280 /* 281 /*
281 * This function fill contents in mmc_part. 282 * This function fill contents in mmc_part.
282 */ 283 */
283 static inline void mmc_part_add(struct mmc_card *card, unsigned int size, 284 static inline void mmc_part_add(struct mmc_card *card, unsigned int size,
284 unsigned int part_cfg, char *name, int idx, bool ro, 285 unsigned int part_cfg, char *name, int idx, bool ro,
285 int area_type) 286 int area_type)
286 { 287 {
287 card->part[card->nr_parts].size = size; 288 card->part[card->nr_parts].size = size;
288 card->part[card->nr_parts].part_cfg = part_cfg; 289 card->part[card->nr_parts].part_cfg = part_cfg;
289 sprintf(card->part[card->nr_parts].name, name, idx); 290 sprintf(card->part[card->nr_parts].name, name, idx);
290 card->part[card->nr_parts].force_ro = ro; 291 card->part[card->nr_parts].force_ro = ro;
291 card->part[card->nr_parts].area_type = area_type; 292 card->part[card->nr_parts].area_type = area_type;
292 card->nr_parts++; 293 card->nr_parts++;
293 } 294 }
294 295
295 /* 296 /*
296 * The world is not perfect and supplies us with broken mmc/sdio devices. 297 * The world is not perfect and supplies us with broken mmc/sdio devices.
297 * For at least some of these bugs we need a work-around. 298 * For at least some of these bugs we need a work-around.
298 */ 299 */
299 300
300 struct mmc_fixup { 301 struct mmc_fixup {
301 /* CID-specific fields. */ 302 /* CID-specific fields. */
302 const char *name; 303 const char *name;
303 304
304 /* Valid revision range */ 305 /* Valid revision range */
305 u64 rev_start, rev_end; 306 u64 rev_start, rev_end;
306 307
307 unsigned int manfid; 308 unsigned int manfid;
308 unsigned short oemid; 309 unsigned short oemid;
309 310
310 /* SDIO-specfic fields. You can use SDIO_ANY_ID here of course */ 311 /* SDIO-specfic fields. You can use SDIO_ANY_ID here of course */
311 u16 cis_vendor, cis_device; 312 u16 cis_vendor, cis_device;
312 313
313 void (*vendor_fixup)(struct mmc_card *card, int data); 314 void (*vendor_fixup)(struct mmc_card *card, int data);
314 int data; 315 int data;
315 }; 316 };
316 317
317 #define CID_MANFID_ANY (-1u) 318 #define CID_MANFID_ANY (-1u)
318 #define CID_OEMID_ANY ((unsigned short) -1) 319 #define CID_OEMID_ANY ((unsigned short) -1)
319 #define CID_NAME_ANY (NULL) 320 #define CID_NAME_ANY (NULL)
320 321
321 #define END_FIXUP { 0 } 322 #define END_FIXUP { 0 }
322 323
323 #define _FIXUP_EXT(_name, _manfid, _oemid, _rev_start, _rev_end, \ 324 #define _FIXUP_EXT(_name, _manfid, _oemid, _rev_start, _rev_end, \
324 _cis_vendor, _cis_device, \ 325 _cis_vendor, _cis_device, \
325 _fixup, _data) \ 326 _fixup, _data) \
326 { \ 327 { \
327 .name = (_name), \ 328 .name = (_name), \
328 .manfid = (_manfid), \ 329 .manfid = (_manfid), \
329 .oemid = (_oemid), \ 330 .oemid = (_oemid), \
330 .rev_start = (_rev_start), \ 331 .rev_start = (_rev_start), \
331 .rev_end = (_rev_end), \ 332 .rev_end = (_rev_end), \
332 .cis_vendor = (_cis_vendor), \ 333 .cis_vendor = (_cis_vendor), \
333 .cis_device = (_cis_device), \ 334 .cis_device = (_cis_device), \
334 .vendor_fixup = (_fixup), \ 335 .vendor_fixup = (_fixup), \
335 .data = (_data), \ 336 .data = (_data), \
336 } 337 }
337 338
338 #define MMC_FIXUP_REV(_name, _manfid, _oemid, _rev_start, _rev_end, \ 339 #define MMC_FIXUP_REV(_name, _manfid, _oemid, _rev_start, _rev_end, \
339 _fixup, _data) \ 340 _fixup, _data) \
340 _FIXUP_EXT(_name, _manfid, \ 341 _FIXUP_EXT(_name, _manfid, \
341 _oemid, _rev_start, _rev_end, \ 342 _oemid, _rev_start, _rev_end, \
342 SDIO_ANY_ID, SDIO_ANY_ID, \ 343 SDIO_ANY_ID, SDIO_ANY_ID, \
343 _fixup, _data) \ 344 _fixup, _data) \
344 345
345 #define MMC_FIXUP(_name, _manfid, _oemid, _fixup, _data) \ 346 #define MMC_FIXUP(_name, _manfid, _oemid, _fixup, _data) \
346 MMC_FIXUP_REV(_name, _manfid, _oemid, 0, -1ull, _fixup, _data) 347 MMC_FIXUP_REV(_name, _manfid, _oemid, 0, -1ull, _fixup, _data)
347 348
348 #define SDIO_FIXUP(_vendor, _device, _fixup, _data) \ 349 #define SDIO_FIXUP(_vendor, _device, _fixup, _data) \
349 _FIXUP_EXT(CID_NAME_ANY, CID_MANFID_ANY, \ 350 _FIXUP_EXT(CID_NAME_ANY, CID_MANFID_ANY, \
350 CID_OEMID_ANY, 0, -1ull, \ 351 CID_OEMID_ANY, 0, -1ull, \
351 _vendor, _device, \ 352 _vendor, _device, \
352 _fixup, _data) \ 353 _fixup, _data) \
353 354
354 #define cid_rev(hwrev, fwrev, year, month) \ 355 #define cid_rev(hwrev, fwrev, year, month) \
355 (((u64) hwrev) << 40 | \ 356 (((u64) hwrev) << 40 | \
356 ((u64) fwrev) << 32 | \ 357 ((u64) fwrev) << 32 | \
357 ((u64) year) << 16 | \ 358 ((u64) year) << 16 | \
358 ((u64) month)) 359 ((u64) month))
359 360
360 #define cid_rev_card(card) \ 361 #define cid_rev_card(card) \
361 cid_rev(card->cid.hwrev, \ 362 cid_rev(card->cid.hwrev, \
362 card->cid.fwrev, \ 363 card->cid.fwrev, \
363 card->cid.year, \ 364 card->cid.year, \
364 card->cid.month) 365 card->cid.month)
365 366
366 /* 367 /*
367 * Unconditionally quirk add/remove. 368 * Unconditionally quirk add/remove.
368 */ 369 */
369 370
370 static inline void __maybe_unused add_quirk(struct mmc_card *card, int data) 371 static inline void __maybe_unused add_quirk(struct mmc_card *card, int data)
371 { 372 {
372 card->quirks |= data; 373 card->quirks |= data;
373 } 374 }
374 375
375 static inline void __maybe_unused remove_quirk(struct mmc_card *card, int data) 376 static inline void __maybe_unused remove_quirk(struct mmc_card *card, int data)
376 { 377 {
377 card->quirks &= ~data; 378 card->quirks &= ~data;
378 } 379 }
379 380
380 #define mmc_card_mmc(c) ((c)->type == MMC_TYPE_MMC) 381 #define mmc_card_mmc(c) ((c)->type == MMC_TYPE_MMC)
381 #define mmc_card_sd(c) ((c)->type == MMC_TYPE_SD) 382 #define mmc_card_sd(c) ((c)->type == MMC_TYPE_SD)
382 #define mmc_card_sdio(c) ((c)->type == MMC_TYPE_SDIO) 383 #define mmc_card_sdio(c) ((c)->type == MMC_TYPE_SDIO)
383 384
384 #define mmc_card_present(c) ((c)->state & MMC_STATE_PRESENT) 385 #define mmc_card_present(c) ((c)->state & MMC_STATE_PRESENT)
385 #define mmc_card_readonly(c) ((c)->state & MMC_STATE_READONLY) 386 #define mmc_card_readonly(c) ((c)->state & MMC_STATE_READONLY)
386 #define mmc_card_highspeed(c) ((c)->state & MMC_STATE_HIGHSPEED) 387 #define mmc_card_highspeed(c) ((c)->state & MMC_STATE_HIGHSPEED)
387 #define mmc_card_hs200(c) ((c)->state & MMC_STATE_HIGHSPEED_200) 388 #define mmc_card_hs200(c) ((c)->state & MMC_STATE_HIGHSPEED_200)
388 #define mmc_card_blockaddr(c) ((c)->state & MMC_STATE_BLOCKADDR) 389 #define mmc_card_blockaddr(c) ((c)->state & MMC_STATE_BLOCKADDR)
389 #define mmc_card_ddr_mode(c) ((c)->state & MMC_STATE_HIGHSPEED_DDR) 390 #define mmc_card_ddr_mode(c) ((c)->state & MMC_STATE_HIGHSPEED_DDR)
390 #define mmc_card_uhs(c) ((c)->state & MMC_STATE_ULTRAHIGHSPEED) 391 #define mmc_card_uhs(c) ((c)->state & MMC_STATE_ULTRAHIGHSPEED)
391 #define mmc_sd_card_uhs(c) ((c)->state & MMC_STATE_ULTRAHIGHSPEED) 392 #define mmc_sd_card_uhs(c) ((c)->state & MMC_STATE_ULTRAHIGHSPEED)
392 #define mmc_card_ext_capacity(c) ((c)->state & MMC_CARD_SDXC) 393 #define mmc_card_ext_capacity(c) ((c)->state & MMC_CARD_SDXC)
393 #define mmc_card_removed(c) ((c) && ((c)->state & MMC_CARD_REMOVED)) 394 #define mmc_card_removed(c) ((c) && ((c)->state & MMC_CARD_REMOVED))
394 #define mmc_card_is_sleep(c) ((c)->state & MMC_STATE_SLEEP) 395 #define mmc_card_is_sleep(c) ((c)->state & MMC_STATE_SLEEP)
395 396
396 #define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT) 397 #define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT)
397 #define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY) 398 #define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY)
398 #define mmc_card_set_highspeed(c) ((c)->state |= MMC_STATE_HIGHSPEED) 399 #define mmc_card_set_highspeed(c) ((c)->state |= MMC_STATE_HIGHSPEED)
399 #define mmc_card_set_hs200(c) ((c)->state |= MMC_STATE_HIGHSPEED_200) 400 #define mmc_card_set_hs200(c) ((c)->state |= MMC_STATE_HIGHSPEED_200)
400 #define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR) 401 #define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR)
401 #define mmc_card_set_ddr_mode(c) ((c)->state |= MMC_STATE_HIGHSPEED_DDR) 402 #define mmc_card_set_ddr_mode(c) ((c)->state |= MMC_STATE_HIGHSPEED_DDR)
402 #define mmc_card_set_uhs(c) ((c)->state |= MMC_STATE_ULTRAHIGHSPEED) 403 #define mmc_card_set_uhs(c) ((c)->state |= MMC_STATE_ULTRAHIGHSPEED)
403 #define mmc_sd_card_set_uhs(c) ((c)->state |= MMC_STATE_ULTRAHIGHSPEED) 404 #define mmc_sd_card_set_uhs(c) ((c)->state |= MMC_STATE_ULTRAHIGHSPEED)
404 #define mmc_card_set_ext_capacity(c) ((c)->state |= MMC_CARD_SDXC) 405 #define mmc_card_set_ext_capacity(c) ((c)->state |= MMC_CARD_SDXC)
405 #define mmc_card_set_removed(c) ((c)->state |= MMC_CARD_REMOVED) 406 #define mmc_card_set_removed(c) ((c)->state |= MMC_CARD_REMOVED)
406 #define mmc_card_set_sleep(c) ((c)->state |= MMC_STATE_SLEEP) 407 #define mmc_card_set_sleep(c) ((c)->state |= MMC_STATE_SLEEP)
407 408
408 #define mmc_card_clr_sleep(c) ((c)->state &= ~MMC_STATE_SLEEP) 409 #define mmc_card_clr_sleep(c) ((c)->state &= ~MMC_STATE_SLEEP)
409 /* 410 /*
410 * Quirk add/remove for MMC products. 411 * Quirk add/remove for MMC products.
411 */ 412 */
412 413
413 static inline void __maybe_unused add_quirk_mmc(struct mmc_card *card, int data) 414 static inline void __maybe_unused add_quirk_mmc(struct mmc_card *card, int data)
414 { 415 {
415 if (mmc_card_mmc(card)) 416 if (mmc_card_mmc(card))
416 card->quirks |= data; 417 card->quirks |= data;
417 } 418 }
418 419
419 static inline void __maybe_unused remove_quirk_mmc(struct mmc_card *card, 420 static inline void __maybe_unused remove_quirk_mmc(struct mmc_card *card,
420 int data) 421 int data)
421 { 422 {
422 if (mmc_card_mmc(card)) 423 if (mmc_card_mmc(card))
423 card->quirks &= ~data; 424 card->quirks &= ~data;
424 } 425 }
425 426
426 /* 427 /*
427 * Quirk add/remove for SD products. 428 * Quirk add/remove for SD products.
428 */ 429 */
429 430
430 static inline void __maybe_unused add_quirk_sd(struct mmc_card *card, int data) 431 static inline void __maybe_unused add_quirk_sd(struct mmc_card *card, int data)
431 { 432 {
432 if (mmc_card_sd(card)) 433 if (mmc_card_sd(card))
433 card->quirks |= data; 434 card->quirks |= data;
434 } 435 }
435 436
436 static inline void __maybe_unused remove_quirk_sd(struct mmc_card *card, 437 static inline void __maybe_unused remove_quirk_sd(struct mmc_card *card,
437 int data) 438 int data)
438 { 439 {
439 if (mmc_card_sd(card)) 440 if (mmc_card_sd(card))
440 card->quirks &= ~data; 441 card->quirks &= ~data;
441 } 442 }
442 443
443 static inline int mmc_card_lenient_fn0(const struct mmc_card *c) 444 static inline int mmc_card_lenient_fn0(const struct mmc_card *c)
444 { 445 {
445 return c->quirks & MMC_QUIRK_LENIENT_FN0; 446 return c->quirks & MMC_QUIRK_LENIENT_FN0;
446 } 447 }
447 448
448 static inline int mmc_blksz_for_byte_mode(const struct mmc_card *c) 449 static inline int mmc_blksz_for_byte_mode(const struct mmc_card *c)
449 { 450 {
450 return c->quirks & MMC_QUIRK_BLKSZ_FOR_BYTE_MODE; 451 return c->quirks & MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
451 } 452 }
452 453
453 static inline int mmc_card_disable_cd(const struct mmc_card *c) 454 static inline int mmc_card_disable_cd(const struct mmc_card *c)
454 { 455 {
455 return c->quirks & MMC_QUIRK_DISABLE_CD; 456 return c->quirks & MMC_QUIRK_DISABLE_CD;
456 } 457 }
457 458
458 static inline int mmc_card_nonstd_func_interface(const struct mmc_card *c) 459 static inline int mmc_card_nonstd_func_interface(const struct mmc_card *c)
459 { 460 {
460 return c->quirks & MMC_QUIRK_NONSTD_FUNC_IF; 461 return c->quirks & MMC_QUIRK_NONSTD_FUNC_IF;
461 } 462 }
462 463
463 static inline int mmc_card_broken_byte_mode_512(const struct mmc_card *c) 464 static inline int mmc_card_broken_byte_mode_512(const struct mmc_card *c)
464 { 465 {
465 return c->quirks & MMC_QUIRK_BROKEN_BYTE_MODE_512; 466 return c->quirks & MMC_QUIRK_BROKEN_BYTE_MODE_512;
466 } 467 }
467 468
468 static inline int mmc_card_long_read_time(const struct mmc_card *c) 469 static inline int mmc_card_long_read_time(const struct mmc_card *c)
469 { 470 {
470 return c->quirks & MMC_QUIRK_LONG_READ_TIME; 471 return c->quirks & MMC_QUIRK_LONG_READ_TIME;
471 } 472 }
472 473
473 #define mmc_card_name(c) ((c)->cid.prod_name) 474 #define mmc_card_name(c) ((c)->cid.prod_name)
474 #define mmc_card_id(c) (dev_name(&(c)->dev)) 475 #define mmc_card_id(c) (dev_name(&(c)->dev))
475 476
476 #define mmc_dev_to_card(d) container_of(d, struct mmc_card, dev) 477 #define mmc_dev_to_card(d) container_of(d, struct mmc_card, dev)
477 478
478 #define mmc_list_to_card(l) container_of(l, struct mmc_card, node) 479 #define mmc_list_to_card(l) container_of(l, struct mmc_card, node)
479 #define mmc_get_drvdata(c) dev_get_drvdata(&(c)->dev) 480 #define mmc_get_drvdata(c) dev_get_drvdata(&(c)->dev)
480 #define mmc_set_drvdata(c,d) dev_set_drvdata(&(c)->dev, d) 481 #define mmc_set_drvdata(c,d) dev_set_drvdata(&(c)->dev, d)
481 482
482 /* 483 /*
483 * MMC device driver (e.g., Flash card, I/O card...) 484 * MMC device driver (e.g., Flash card, I/O card...)
484 */ 485 */
485 struct mmc_driver { 486 struct mmc_driver {
486 struct device_driver drv; 487 struct device_driver drv;
487 int (*probe)(struct mmc_card *); 488 int (*probe)(struct mmc_card *);
488 void (*remove)(struct mmc_card *); 489 void (*remove)(struct mmc_card *);
489 int (*suspend)(struct mmc_card *); 490 int (*suspend)(struct mmc_card *);
490 int (*resume)(struct mmc_card *); 491 int (*resume)(struct mmc_card *);
491 }; 492 };
492 493
493 extern int mmc_register_driver(struct mmc_driver *); 494 extern int mmc_register_driver(struct mmc_driver *);
494 extern void mmc_unregister_driver(struct mmc_driver *); 495 extern void mmc_unregister_driver(struct mmc_driver *);
495 496
496 extern void mmc_fixup_device(struct mmc_card *card, 497 extern void mmc_fixup_device(struct mmc_card *card,
497 const struct mmc_fixup *table); 498 const struct mmc_fixup *table);
498 499
499 #endif /* LINUX_MMC_CARD_H */ 500 #endif /* LINUX_MMC_CARD_H */
500 501