Commit 0324896e2ec7602224b9dcd981b19b1827498cd9

Authored by Krzysztof Kozlowski
Committed by Greg Kroah-Hartman
1 parent 9abaccf3bf

mmc: sdhci: Fix sleep in atomic after inserting SD card

commit 2836766a9d0bd02c66073f8dd44796e6cc23848d upstream.

Sleep in atomic context happened on Trats2 board after inserting or
removing SD card because mmc_gpio_get_cd() was called under spin lock.

Fix this by moving card detection earlier, before acquiring spin lock.
The mmc_gpio_get_cd() call does not have to be protected by spin lock
because it does not access any sdhci internal data.
The sdhci_do_get_cd() call access host flags (SDHCI_DEVICE_DEAD). After
moving it out side of spin lock it could theoretically race with driver
removal but still there is no actual protection against manual card
eject.

Dmesg after inserting SD card:
[   41.663414] BUG: sleeping function called from invalid context at drivers/gpio/gpiolib.c:1511
[   41.670469] in_atomic(): 1, irqs_disabled(): 128, pid: 30, name: kworker/u8:1
[   41.677580] INFO: lockdep is turned off.
[   41.681486] irq event stamp: 61972
[   41.684872] hardirqs last  enabled at (61971): [<c0490ee0>] _raw_spin_unlock_irq+0x24/0x5c
[   41.693118] hardirqs last disabled at (61972): [<c04907ac>] _raw_spin_lock_irq+0x18/0x54
[   41.701190] softirqs last  enabled at (61648): [<c0026fd4>] __do_softirq+0x234/0x2c8
[   41.708914] softirqs last disabled at (61631): [<c00273a0>] irq_exit+0xd0/0x114
[   41.716206] Preemption disabled at:[<  (null)>]   (null)
[   41.721500]
[   41.722985] CPU: 3 PID: 30 Comm: kworker/u8:1 Tainted: G        W      3.18.0-rc5-next-20141121 #883
[   41.732111] Workqueue: kmmcd mmc_rescan
[   41.735945] [<c0014d2c>] (unwind_backtrace) from [<c0011c80>] (show_stack+0x10/0x14)
[   41.743661] [<c0011c80>] (show_stack) from [<c0489d14>] (dump_stack+0x70/0xbc)
[   41.750867] [<c0489d14>] (dump_stack) from [<c0228b74>] (gpiod_get_raw_value_cansleep+0x18/0x30)
[   41.759628] [<c0228b74>] (gpiod_get_raw_value_cansleep) from [<c03646e8>] (mmc_gpio_get_cd+0x38/0x58)
[   41.768821] [<c03646e8>] (mmc_gpio_get_cd) from [<c036d378>] (sdhci_request+0x50/0x1a4)
[   41.776808] [<c036d378>] (sdhci_request) from [<c0357934>] (mmc_start_request+0x138/0x268)
[   41.785051] [<c0357934>] (mmc_start_request) from [<c0357cc8>] (mmc_wait_for_req+0x58/0x1a0)
[   41.793469] [<c0357cc8>] (mmc_wait_for_req) from [<c0357e68>] (mmc_wait_for_cmd+0x58/0x78)
[   41.801714] [<c0357e68>] (mmc_wait_for_cmd) from [<c0361c00>] (mmc_io_rw_direct_host+0x98/0x124)
[   41.810480] [<c0361c00>] (mmc_io_rw_direct_host) from [<c03620f8>] (sdio_reset+0x2c/0x64)
[   41.818641] [<c03620f8>] (sdio_reset) from [<c035a3d8>] (mmc_rescan+0x254/0x2e4)
[   41.826028] [<c035a3d8>] (mmc_rescan) from [<c003a0e0>] (process_one_work+0x180/0x3f4)
[   41.833920] [<c003a0e0>] (process_one_work) from [<c003a3bc>] (worker_thread+0x34/0x4b0)
[   41.841991] [<c003a3bc>] (worker_thread) from [<c003fed8>] (kthread+0xe4/0x104)
[   41.849285] [<c003fed8>] (kthread) from [<c000f268>] (ret_from_fork+0x14/0x2c)
[   42.038276] mmc0: new high speed SDHC card at address 1234

Signed-off-by: Krzysztof Kozlowski <k.kozlowski@samsung.com>
Fixes: 94144a465dd0 ("mmc: sdhci: add get_cd() implementation")
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

Showing 1 changed file with 6 additions and 2 deletions Inline Diff

drivers/mmc/host/sdhci.c
1 /* 1 /*
2 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver 2 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
3 * 3 *
4 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. 4 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at 8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version. 9 * your option) any later version.
10 * 10 *
11 * Thanks to the following companies for their support: 11 * Thanks to the following companies for their support:
12 * 12 *
13 * - JMicron (hardware and technical support) 13 * - JMicron (hardware and technical support)
14 */ 14 */
15 15
16 #include <linux/delay.h> 16 #include <linux/delay.h>
17 #include <linux/highmem.h> 17 #include <linux/highmem.h>
18 #include <linux/io.h> 18 #include <linux/io.h>
19 #include <linux/module.h> 19 #include <linux/module.h>
20 #include <linux/dma-mapping.h> 20 #include <linux/dma-mapping.h>
21 #include <linux/slab.h> 21 #include <linux/slab.h>
22 #include <linux/scatterlist.h> 22 #include <linux/scatterlist.h>
23 #include <linux/regulator/consumer.h> 23 #include <linux/regulator/consumer.h>
24 #include <linux/pm_runtime.h> 24 #include <linux/pm_runtime.h>
25 25
26 #include <linux/leds.h> 26 #include <linux/leds.h>
27 27
28 #include <linux/mmc/mmc.h> 28 #include <linux/mmc/mmc.h>
29 #include <linux/mmc/host.h> 29 #include <linux/mmc/host.h>
30 #include <linux/mmc/card.h> 30 #include <linux/mmc/card.h>
31 #include <linux/mmc/slot-gpio.h> 31 #include <linux/mmc/slot-gpio.h>
32 32
33 #include "sdhci.h" 33 #include "sdhci.h"
34 34
35 #define DRIVER_NAME "sdhci" 35 #define DRIVER_NAME "sdhci"
36 36
37 #define DBG(f, x...) \ 37 #define DBG(f, x...) \
38 pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x) 38 pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
39 39
40 #if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \ 40 #if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \
41 defined(CONFIG_MMC_SDHCI_MODULE)) 41 defined(CONFIG_MMC_SDHCI_MODULE))
42 #define SDHCI_USE_LEDS_CLASS 42 #define SDHCI_USE_LEDS_CLASS
43 #endif 43 #endif
44 44
45 #define MAX_TUNING_LOOP 40 45 #define MAX_TUNING_LOOP 40
46 46
47 #define ADMA_SIZE ((128 * 2 + 1) * 4) 47 #define ADMA_SIZE ((128 * 2 + 1) * 4)
48 48
49 static unsigned int debug_quirks = 0; 49 static unsigned int debug_quirks = 0;
50 static unsigned int debug_quirks2; 50 static unsigned int debug_quirks2;
51 51
52 static void sdhci_finish_data(struct sdhci_host *); 52 static void sdhci_finish_data(struct sdhci_host *);
53 53
54 static void sdhci_finish_command(struct sdhci_host *); 54 static void sdhci_finish_command(struct sdhci_host *);
55 static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode); 55 static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
56 static void sdhci_tuning_timer(unsigned long data); 56 static void sdhci_tuning_timer(unsigned long data);
57 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable); 57 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
58 58
59 #ifdef CONFIG_PM_RUNTIME 59 #ifdef CONFIG_PM_RUNTIME
60 static int sdhci_runtime_pm_get(struct sdhci_host *host); 60 static int sdhci_runtime_pm_get(struct sdhci_host *host);
61 static int sdhci_runtime_pm_put(struct sdhci_host *host); 61 static int sdhci_runtime_pm_put(struct sdhci_host *host);
62 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host); 62 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host);
63 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host); 63 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host);
64 #else 64 #else
65 static inline int sdhci_runtime_pm_get(struct sdhci_host *host) 65 static inline int sdhci_runtime_pm_get(struct sdhci_host *host)
66 { 66 {
67 return 0; 67 return 0;
68 } 68 }
69 static inline int sdhci_runtime_pm_put(struct sdhci_host *host) 69 static inline int sdhci_runtime_pm_put(struct sdhci_host *host)
70 { 70 {
71 return 0; 71 return 0;
72 } 72 }
73 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host) 73 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
74 { 74 {
75 } 75 }
76 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host) 76 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
77 { 77 {
78 } 78 }
79 #endif 79 #endif
80 80
81 static void sdhci_dumpregs(struct sdhci_host *host) 81 static void sdhci_dumpregs(struct sdhci_host *host)
82 { 82 {
83 pr_debug(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n", 83 pr_debug(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
84 mmc_hostname(host->mmc)); 84 mmc_hostname(host->mmc));
85 85
86 pr_debug(DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n", 86 pr_debug(DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n",
87 sdhci_readl(host, SDHCI_DMA_ADDRESS), 87 sdhci_readl(host, SDHCI_DMA_ADDRESS),
88 sdhci_readw(host, SDHCI_HOST_VERSION)); 88 sdhci_readw(host, SDHCI_HOST_VERSION));
89 pr_debug(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n", 89 pr_debug(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n",
90 sdhci_readw(host, SDHCI_BLOCK_SIZE), 90 sdhci_readw(host, SDHCI_BLOCK_SIZE),
91 sdhci_readw(host, SDHCI_BLOCK_COUNT)); 91 sdhci_readw(host, SDHCI_BLOCK_COUNT));
92 pr_debug(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n", 92 pr_debug(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
93 sdhci_readl(host, SDHCI_ARGUMENT), 93 sdhci_readl(host, SDHCI_ARGUMENT),
94 sdhci_readw(host, SDHCI_TRANSFER_MODE)); 94 sdhci_readw(host, SDHCI_TRANSFER_MODE));
95 pr_debug(DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n", 95 pr_debug(DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n",
96 sdhci_readl(host, SDHCI_PRESENT_STATE), 96 sdhci_readl(host, SDHCI_PRESENT_STATE),
97 sdhci_readb(host, SDHCI_HOST_CONTROL)); 97 sdhci_readb(host, SDHCI_HOST_CONTROL));
98 pr_debug(DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n", 98 pr_debug(DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n",
99 sdhci_readb(host, SDHCI_POWER_CONTROL), 99 sdhci_readb(host, SDHCI_POWER_CONTROL),
100 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL)); 100 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
101 pr_debug(DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n", 101 pr_debug(DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n",
102 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL), 102 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
103 sdhci_readw(host, SDHCI_CLOCK_CONTROL)); 103 sdhci_readw(host, SDHCI_CLOCK_CONTROL));
104 pr_debug(DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n", 104 pr_debug(DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n",
105 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL), 105 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
106 sdhci_readl(host, SDHCI_INT_STATUS)); 106 sdhci_readl(host, SDHCI_INT_STATUS));
107 pr_debug(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n", 107 pr_debug(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
108 sdhci_readl(host, SDHCI_INT_ENABLE), 108 sdhci_readl(host, SDHCI_INT_ENABLE),
109 sdhci_readl(host, SDHCI_SIGNAL_ENABLE)); 109 sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
110 pr_debug(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n", 110 pr_debug(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
111 sdhci_readw(host, SDHCI_ACMD12_ERR), 111 sdhci_readw(host, SDHCI_ACMD12_ERR),
112 sdhci_readw(host, SDHCI_SLOT_INT_STATUS)); 112 sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
113 pr_debug(DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n", 113 pr_debug(DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n",
114 sdhci_readl(host, SDHCI_CAPABILITIES), 114 sdhci_readl(host, SDHCI_CAPABILITIES),
115 sdhci_readl(host, SDHCI_CAPABILITIES_1)); 115 sdhci_readl(host, SDHCI_CAPABILITIES_1));
116 pr_debug(DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n", 116 pr_debug(DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n",
117 sdhci_readw(host, SDHCI_COMMAND), 117 sdhci_readw(host, SDHCI_COMMAND),
118 sdhci_readl(host, SDHCI_MAX_CURRENT)); 118 sdhci_readl(host, SDHCI_MAX_CURRENT));
119 pr_debug(DRIVER_NAME ": Host ctl2: 0x%08x\n", 119 pr_debug(DRIVER_NAME ": Host ctl2: 0x%08x\n",
120 sdhci_readw(host, SDHCI_HOST_CONTROL2)); 120 sdhci_readw(host, SDHCI_HOST_CONTROL2));
121 121
122 if (host->flags & SDHCI_USE_ADMA) 122 if (host->flags & SDHCI_USE_ADMA)
123 pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n", 123 pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
124 readl(host->ioaddr + SDHCI_ADMA_ERROR), 124 readl(host->ioaddr + SDHCI_ADMA_ERROR),
125 readl(host->ioaddr + SDHCI_ADMA_ADDRESS)); 125 readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
126 126
127 pr_debug(DRIVER_NAME ": ===========================================\n"); 127 pr_debug(DRIVER_NAME ": ===========================================\n");
128 } 128 }
129 129
130 /*****************************************************************************\ 130 /*****************************************************************************\
131 * * 131 * *
132 * Low level functions * 132 * Low level functions *
133 * * 133 * *
134 \*****************************************************************************/ 134 \*****************************************************************************/
135 135
136 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable) 136 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
137 { 137 {
138 u32 present; 138 u32 present;
139 139
140 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) || 140 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
141 (host->mmc->caps & MMC_CAP_NONREMOVABLE)) 141 (host->mmc->caps & MMC_CAP_NONREMOVABLE))
142 return; 142 return;
143 143
144 if (enable) { 144 if (enable) {
145 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 145 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
146 SDHCI_CARD_PRESENT; 146 SDHCI_CARD_PRESENT;
147 147
148 host->ier |= present ? SDHCI_INT_CARD_REMOVE : 148 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
149 SDHCI_INT_CARD_INSERT; 149 SDHCI_INT_CARD_INSERT;
150 } else { 150 } else {
151 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); 151 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
152 } 152 }
153 153
154 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 154 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
155 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 155 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
156 } 156 }
157 157
158 static void sdhci_enable_card_detection(struct sdhci_host *host) 158 static void sdhci_enable_card_detection(struct sdhci_host *host)
159 { 159 {
160 sdhci_set_card_detection(host, true); 160 sdhci_set_card_detection(host, true);
161 } 161 }
162 162
163 static void sdhci_disable_card_detection(struct sdhci_host *host) 163 static void sdhci_disable_card_detection(struct sdhci_host *host)
164 { 164 {
165 sdhci_set_card_detection(host, false); 165 sdhci_set_card_detection(host, false);
166 } 166 }
167 167
168 void sdhci_reset(struct sdhci_host *host, u8 mask) 168 void sdhci_reset(struct sdhci_host *host, u8 mask)
169 { 169 {
170 unsigned long timeout; 170 unsigned long timeout;
171 171
172 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET); 172 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
173 173
174 if (mask & SDHCI_RESET_ALL) { 174 if (mask & SDHCI_RESET_ALL) {
175 host->clock = 0; 175 host->clock = 0;
176 /* Reset-all turns off SD Bus Power */ 176 /* Reset-all turns off SD Bus Power */
177 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 177 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
178 sdhci_runtime_pm_bus_off(host); 178 sdhci_runtime_pm_bus_off(host);
179 } 179 }
180 180
181 /* Wait max 100 ms */ 181 /* Wait max 100 ms */
182 timeout = 100; 182 timeout = 100;
183 183
184 /* hw clears the bit when it's done */ 184 /* hw clears the bit when it's done */
185 while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) { 185 while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
186 if (timeout == 0) { 186 if (timeout == 0) {
187 pr_err("%s: Reset 0x%x never completed.\n", 187 pr_err("%s: Reset 0x%x never completed.\n",
188 mmc_hostname(host->mmc), (int)mask); 188 mmc_hostname(host->mmc), (int)mask);
189 sdhci_dumpregs(host); 189 sdhci_dumpregs(host);
190 return; 190 return;
191 } 191 }
192 timeout--; 192 timeout--;
193 mdelay(1); 193 mdelay(1);
194 } 194 }
195 } 195 }
196 EXPORT_SYMBOL_GPL(sdhci_reset); 196 EXPORT_SYMBOL_GPL(sdhci_reset);
197 197
198 static void sdhci_do_reset(struct sdhci_host *host, u8 mask) 198 static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
199 { 199 {
200 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { 200 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
201 if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & 201 if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) &
202 SDHCI_CARD_PRESENT)) 202 SDHCI_CARD_PRESENT))
203 return; 203 return;
204 } 204 }
205 205
206 host->ops->reset(host, mask); 206 host->ops->reset(host, mask);
207 207
208 if (mask & SDHCI_RESET_ALL) { 208 if (mask & SDHCI_RESET_ALL) {
209 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 209 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
210 if (host->ops->enable_dma) 210 if (host->ops->enable_dma)
211 host->ops->enable_dma(host); 211 host->ops->enable_dma(host);
212 } 212 }
213 213
214 /* Resetting the controller clears many */ 214 /* Resetting the controller clears many */
215 host->preset_enabled = false; 215 host->preset_enabled = false;
216 } 216 }
217 } 217 }
218 218
219 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios); 219 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios);
220 220
221 static void sdhci_init(struct sdhci_host *host, int soft) 221 static void sdhci_init(struct sdhci_host *host, int soft)
222 { 222 {
223 if (soft) 223 if (soft)
224 sdhci_do_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA); 224 sdhci_do_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA);
225 else 225 else
226 sdhci_do_reset(host, SDHCI_RESET_ALL); 226 sdhci_do_reset(host, SDHCI_RESET_ALL);
227 227
228 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT | 228 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
229 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | 229 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
230 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC | 230 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
231 SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END | 231 SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
232 SDHCI_INT_RESPONSE; 232 SDHCI_INT_RESPONSE;
233 233
234 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 234 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
235 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 235 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
236 236
237 if (soft) { 237 if (soft) {
238 /* force clock reconfiguration */ 238 /* force clock reconfiguration */
239 host->clock = 0; 239 host->clock = 0;
240 sdhci_set_ios(host->mmc, &host->mmc->ios); 240 sdhci_set_ios(host->mmc, &host->mmc->ios);
241 } 241 }
242 } 242 }
243 243
244 static void sdhci_reinit(struct sdhci_host *host) 244 static void sdhci_reinit(struct sdhci_host *host)
245 { 245 {
246 sdhci_init(host, 0); 246 sdhci_init(host, 0);
247 /* 247 /*
248 * Retuning stuffs are affected by different cards inserted and only 248 * Retuning stuffs are affected by different cards inserted and only
249 * applicable to UHS-I cards. So reset these fields to their initial 249 * applicable to UHS-I cards. So reset these fields to their initial
250 * value when card is removed. 250 * value when card is removed.
251 */ 251 */
252 if (host->flags & SDHCI_USING_RETUNING_TIMER) { 252 if (host->flags & SDHCI_USING_RETUNING_TIMER) {
253 host->flags &= ~SDHCI_USING_RETUNING_TIMER; 253 host->flags &= ~SDHCI_USING_RETUNING_TIMER;
254 254
255 del_timer_sync(&host->tuning_timer); 255 del_timer_sync(&host->tuning_timer);
256 host->flags &= ~SDHCI_NEEDS_RETUNING; 256 host->flags &= ~SDHCI_NEEDS_RETUNING;
257 host->mmc->max_blk_count = 257 host->mmc->max_blk_count =
258 (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535; 258 (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
259 } 259 }
260 sdhci_enable_card_detection(host); 260 sdhci_enable_card_detection(host);
261 } 261 }
262 262
263 static void sdhci_activate_led(struct sdhci_host *host) 263 static void sdhci_activate_led(struct sdhci_host *host)
264 { 264 {
265 u8 ctrl; 265 u8 ctrl;
266 266
267 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 267 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
268 ctrl |= SDHCI_CTRL_LED; 268 ctrl |= SDHCI_CTRL_LED;
269 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 269 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
270 } 270 }
271 271
272 static void sdhci_deactivate_led(struct sdhci_host *host) 272 static void sdhci_deactivate_led(struct sdhci_host *host)
273 { 273 {
274 u8 ctrl; 274 u8 ctrl;
275 275
276 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 276 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
277 ctrl &= ~SDHCI_CTRL_LED; 277 ctrl &= ~SDHCI_CTRL_LED;
278 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 278 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
279 } 279 }
280 280
281 #ifdef SDHCI_USE_LEDS_CLASS 281 #ifdef SDHCI_USE_LEDS_CLASS
282 static void sdhci_led_control(struct led_classdev *led, 282 static void sdhci_led_control(struct led_classdev *led,
283 enum led_brightness brightness) 283 enum led_brightness brightness)
284 { 284 {
285 struct sdhci_host *host = container_of(led, struct sdhci_host, led); 285 struct sdhci_host *host = container_of(led, struct sdhci_host, led);
286 unsigned long flags; 286 unsigned long flags;
287 287
288 spin_lock_irqsave(&host->lock, flags); 288 spin_lock_irqsave(&host->lock, flags);
289 289
290 if (host->runtime_suspended) 290 if (host->runtime_suspended)
291 goto out; 291 goto out;
292 292
293 if (brightness == LED_OFF) 293 if (brightness == LED_OFF)
294 sdhci_deactivate_led(host); 294 sdhci_deactivate_led(host);
295 else 295 else
296 sdhci_activate_led(host); 296 sdhci_activate_led(host);
297 out: 297 out:
298 spin_unlock_irqrestore(&host->lock, flags); 298 spin_unlock_irqrestore(&host->lock, flags);
299 } 299 }
300 #endif 300 #endif
301 301
302 /*****************************************************************************\ 302 /*****************************************************************************\
303 * * 303 * *
304 * Core functions * 304 * Core functions *
305 * * 305 * *
306 \*****************************************************************************/ 306 \*****************************************************************************/
307 307
308 static void sdhci_read_block_pio(struct sdhci_host *host) 308 static void sdhci_read_block_pio(struct sdhci_host *host)
309 { 309 {
310 unsigned long flags; 310 unsigned long flags;
311 size_t blksize, len, chunk; 311 size_t blksize, len, chunk;
312 u32 uninitialized_var(scratch); 312 u32 uninitialized_var(scratch);
313 u8 *buf; 313 u8 *buf;
314 314
315 DBG("PIO reading\n"); 315 DBG("PIO reading\n");
316 316
317 blksize = host->data->blksz; 317 blksize = host->data->blksz;
318 chunk = 0; 318 chunk = 0;
319 319
320 local_irq_save(flags); 320 local_irq_save(flags);
321 321
322 while (blksize) { 322 while (blksize) {
323 if (!sg_miter_next(&host->sg_miter)) 323 if (!sg_miter_next(&host->sg_miter))
324 BUG(); 324 BUG();
325 325
326 len = min(host->sg_miter.length, blksize); 326 len = min(host->sg_miter.length, blksize);
327 327
328 blksize -= len; 328 blksize -= len;
329 host->sg_miter.consumed = len; 329 host->sg_miter.consumed = len;
330 330
331 buf = host->sg_miter.addr; 331 buf = host->sg_miter.addr;
332 332
333 while (len) { 333 while (len) {
334 if (chunk == 0) { 334 if (chunk == 0) {
335 scratch = sdhci_readl(host, SDHCI_BUFFER); 335 scratch = sdhci_readl(host, SDHCI_BUFFER);
336 chunk = 4; 336 chunk = 4;
337 } 337 }
338 338
339 *buf = scratch & 0xFF; 339 *buf = scratch & 0xFF;
340 340
341 buf++; 341 buf++;
342 scratch >>= 8; 342 scratch >>= 8;
343 chunk--; 343 chunk--;
344 len--; 344 len--;
345 } 345 }
346 } 346 }
347 347
348 sg_miter_stop(&host->sg_miter); 348 sg_miter_stop(&host->sg_miter);
349 349
350 local_irq_restore(flags); 350 local_irq_restore(flags);
351 } 351 }
352 352
353 static void sdhci_write_block_pio(struct sdhci_host *host) 353 static void sdhci_write_block_pio(struct sdhci_host *host)
354 { 354 {
355 unsigned long flags; 355 unsigned long flags;
356 size_t blksize, len, chunk; 356 size_t blksize, len, chunk;
357 u32 scratch; 357 u32 scratch;
358 u8 *buf; 358 u8 *buf;
359 359
360 DBG("PIO writing\n"); 360 DBG("PIO writing\n");
361 361
362 blksize = host->data->blksz; 362 blksize = host->data->blksz;
363 chunk = 0; 363 chunk = 0;
364 scratch = 0; 364 scratch = 0;
365 365
366 local_irq_save(flags); 366 local_irq_save(flags);
367 367
368 while (blksize) { 368 while (blksize) {
369 if (!sg_miter_next(&host->sg_miter)) 369 if (!sg_miter_next(&host->sg_miter))
370 BUG(); 370 BUG();
371 371
372 len = min(host->sg_miter.length, blksize); 372 len = min(host->sg_miter.length, blksize);
373 373
374 blksize -= len; 374 blksize -= len;
375 host->sg_miter.consumed = len; 375 host->sg_miter.consumed = len;
376 376
377 buf = host->sg_miter.addr; 377 buf = host->sg_miter.addr;
378 378
379 while (len) { 379 while (len) {
380 scratch |= (u32)*buf << (chunk * 8); 380 scratch |= (u32)*buf << (chunk * 8);
381 381
382 buf++; 382 buf++;
383 chunk++; 383 chunk++;
384 len--; 384 len--;
385 385
386 if ((chunk == 4) || ((len == 0) && (blksize == 0))) { 386 if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
387 sdhci_writel(host, scratch, SDHCI_BUFFER); 387 sdhci_writel(host, scratch, SDHCI_BUFFER);
388 chunk = 0; 388 chunk = 0;
389 scratch = 0; 389 scratch = 0;
390 } 390 }
391 } 391 }
392 } 392 }
393 393
394 sg_miter_stop(&host->sg_miter); 394 sg_miter_stop(&host->sg_miter);
395 395
396 local_irq_restore(flags); 396 local_irq_restore(flags);
397 } 397 }
398 398
399 static void sdhci_transfer_pio(struct sdhci_host *host) 399 static void sdhci_transfer_pio(struct sdhci_host *host)
400 { 400 {
401 u32 mask; 401 u32 mask;
402 402
403 BUG_ON(!host->data); 403 BUG_ON(!host->data);
404 404
405 if (host->blocks == 0) 405 if (host->blocks == 0)
406 return; 406 return;
407 407
408 if (host->data->flags & MMC_DATA_READ) 408 if (host->data->flags & MMC_DATA_READ)
409 mask = SDHCI_DATA_AVAILABLE; 409 mask = SDHCI_DATA_AVAILABLE;
410 else 410 else
411 mask = SDHCI_SPACE_AVAILABLE; 411 mask = SDHCI_SPACE_AVAILABLE;
412 412
413 /* 413 /*
414 * Some controllers (JMicron JMB38x) mess up the buffer bits 414 * Some controllers (JMicron JMB38x) mess up the buffer bits
415 * for transfers < 4 bytes. As long as it is just one block, 415 * for transfers < 4 bytes. As long as it is just one block,
416 * we can ignore the bits. 416 * we can ignore the bits.
417 */ 417 */
418 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) && 418 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
419 (host->data->blocks == 1)) 419 (host->data->blocks == 1))
420 mask = ~0; 420 mask = ~0;
421 421
422 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) { 422 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
423 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY) 423 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
424 udelay(100); 424 udelay(100);
425 425
426 if (host->data->flags & MMC_DATA_READ) 426 if (host->data->flags & MMC_DATA_READ)
427 sdhci_read_block_pio(host); 427 sdhci_read_block_pio(host);
428 else 428 else
429 sdhci_write_block_pio(host); 429 sdhci_write_block_pio(host);
430 430
431 host->blocks--; 431 host->blocks--;
432 if (host->blocks == 0) 432 if (host->blocks == 0)
433 break; 433 break;
434 } 434 }
435 435
436 DBG("PIO transfer complete.\n"); 436 DBG("PIO transfer complete.\n");
437 } 437 }
438 438
439 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags) 439 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
440 { 440 {
441 local_irq_save(*flags); 441 local_irq_save(*flags);
442 return kmap_atomic(sg_page(sg)) + sg->offset; 442 return kmap_atomic(sg_page(sg)) + sg->offset;
443 } 443 }
444 444
445 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags) 445 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
446 { 446 {
447 kunmap_atomic(buffer); 447 kunmap_atomic(buffer);
448 local_irq_restore(*flags); 448 local_irq_restore(*flags);
449 } 449 }
450 450
451 static void sdhci_set_adma_desc(u8 *desc, u32 addr, int len, unsigned cmd) 451 static void sdhci_set_adma_desc(u8 *desc, u32 addr, int len, unsigned cmd)
452 { 452 {
453 __le32 *dataddr = (__le32 __force *)(desc + 4); 453 __le32 *dataddr = (__le32 __force *)(desc + 4);
454 __le16 *cmdlen = (__le16 __force *)desc; 454 __le16 *cmdlen = (__le16 __force *)desc;
455 455
456 /* SDHCI specification says ADMA descriptors should be 4 byte 456 /* SDHCI specification says ADMA descriptors should be 4 byte
457 * aligned, so using 16 or 32bit operations should be safe. */ 457 * aligned, so using 16 or 32bit operations should be safe. */
458 458
459 cmdlen[0] = cpu_to_le16(cmd); 459 cmdlen[0] = cpu_to_le16(cmd);
460 cmdlen[1] = cpu_to_le16(len); 460 cmdlen[1] = cpu_to_le16(len);
461 461
462 dataddr[0] = cpu_to_le32(addr); 462 dataddr[0] = cpu_to_le32(addr);
463 } 463 }
464 464
465 static int sdhci_adma_table_pre(struct sdhci_host *host, 465 static int sdhci_adma_table_pre(struct sdhci_host *host,
466 struct mmc_data *data) 466 struct mmc_data *data)
467 { 467 {
468 int direction; 468 int direction;
469 469
470 u8 *desc; 470 u8 *desc;
471 u8 *align; 471 u8 *align;
472 dma_addr_t addr; 472 dma_addr_t addr;
473 dma_addr_t align_addr; 473 dma_addr_t align_addr;
474 int len, offset; 474 int len, offset;
475 475
476 struct scatterlist *sg; 476 struct scatterlist *sg;
477 int i; 477 int i;
478 char *buffer; 478 char *buffer;
479 unsigned long flags; 479 unsigned long flags;
480 480
481 /* 481 /*
482 * The spec does not specify endianness of descriptor table. 482 * The spec does not specify endianness of descriptor table.
483 * We currently guess that it is LE. 483 * We currently guess that it is LE.
484 */ 484 */
485 485
486 if (data->flags & MMC_DATA_READ) 486 if (data->flags & MMC_DATA_READ)
487 direction = DMA_FROM_DEVICE; 487 direction = DMA_FROM_DEVICE;
488 else 488 else
489 direction = DMA_TO_DEVICE; 489 direction = DMA_TO_DEVICE;
490 490
491 host->align_addr = dma_map_single(mmc_dev(host->mmc), 491 host->align_addr = dma_map_single(mmc_dev(host->mmc),
492 host->align_buffer, 128 * 4, direction); 492 host->align_buffer, 128 * 4, direction);
493 if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr)) 493 if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
494 goto fail; 494 goto fail;
495 BUG_ON(host->align_addr & 0x3); 495 BUG_ON(host->align_addr & 0x3);
496 496
497 host->sg_count = dma_map_sg(mmc_dev(host->mmc), 497 host->sg_count = dma_map_sg(mmc_dev(host->mmc),
498 data->sg, data->sg_len, direction); 498 data->sg, data->sg_len, direction);
499 if (host->sg_count == 0) 499 if (host->sg_count == 0)
500 goto unmap_align; 500 goto unmap_align;
501 501
502 desc = host->adma_desc; 502 desc = host->adma_desc;
503 align = host->align_buffer; 503 align = host->align_buffer;
504 504
505 align_addr = host->align_addr; 505 align_addr = host->align_addr;
506 506
507 for_each_sg(data->sg, sg, host->sg_count, i) { 507 for_each_sg(data->sg, sg, host->sg_count, i) {
508 addr = sg_dma_address(sg); 508 addr = sg_dma_address(sg);
509 len = sg_dma_len(sg); 509 len = sg_dma_len(sg);
510 510
511 /* 511 /*
512 * The SDHCI specification states that ADMA 512 * The SDHCI specification states that ADMA
513 * addresses must be 32-bit aligned. If they 513 * addresses must be 32-bit aligned. If they
514 * aren't, then we use a bounce buffer for 514 * aren't, then we use a bounce buffer for
515 * the (up to three) bytes that screw up the 515 * the (up to three) bytes that screw up the
516 * alignment. 516 * alignment.
517 */ 517 */
518 offset = (4 - (addr & 0x3)) & 0x3; 518 offset = (4 - (addr & 0x3)) & 0x3;
519 if (offset) { 519 if (offset) {
520 if (data->flags & MMC_DATA_WRITE) { 520 if (data->flags & MMC_DATA_WRITE) {
521 buffer = sdhci_kmap_atomic(sg, &flags); 521 buffer = sdhci_kmap_atomic(sg, &flags);
522 WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3)); 522 WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
523 memcpy(align, buffer, offset); 523 memcpy(align, buffer, offset);
524 sdhci_kunmap_atomic(buffer, &flags); 524 sdhci_kunmap_atomic(buffer, &flags);
525 } 525 }
526 526
527 /* tran, valid */ 527 /* tran, valid */
528 sdhci_set_adma_desc(desc, align_addr, offset, 0x21); 528 sdhci_set_adma_desc(desc, align_addr, offset, 0x21);
529 529
530 BUG_ON(offset > 65536); 530 BUG_ON(offset > 65536);
531 531
532 align += 4; 532 align += 4;
533 align_addr += 4; 533 align_addr += 4;
534 534
535 desc += 8; 535 desc += 8;
536 536
537 addr += offset; 537 addr += offset;
538 len -= offset; 538 len -= offset;
539 } 539 }
540 540
541 BUG_ON(len > 65536); 541 BUG_ON(len > 65536);
542 542
543 /* tran, valid */ 543 /* tran, valid */
544 sdhci_set_adma_desc(desc, addr, len, 0x21); 544 sdhci_set_adma_desc(desc, addr, len, 0x21);
545 desc += 8; 545 desc += 8;
546 546
547 /* 547 /*
548 * If this triggers then we have a calculation bug 548 * If this triggers then we have a calculation bug
549 * somewhere. :/ 549 * somewhere. :/
550 */ 550 */
551 WARN_ON((desc - host->adma_desc) > ADMA_SIZE); 551 WARN_ON((desc - host->adma_desc) > ADMA_SIZE);
552 } 552 }
553 553
554 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) { 554 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
555 /* 555 /*
556 * Mark the last descriptor as the terminating descriptor 556 * Mark the last descriptor as the terminating descriptor
557 */ 557 */
558 if (desc != host->adma_desc) { 558 if (desc != host->adma_desc) {
559 desc -= 8; 559 desc -= 8;
560 desc[0] |= 0x2; /* end */ 560 desc[0] |= 0x2; /* end */
561 } 561 }
562 } else { 562 } else {
563 /* 563 /*
564 * Add a terminating entry. 564 * Add a terminating entry.
565 */ 565 */
566 566
567 /* nop, end, valid */ 567 /* nop, end, valid */
568 sdhci_set_adma_desc(desc, 0, 0, 0x3); 568 sdhci_set_adma_desc(desc, 0, 0, 0x3);
569 } 569 }
570 570
571 /* 571 /*
572 * Resync align buffer as we might have changed it. 572 * Resync align buffer as we might have changed it.
573 */ 573 */
574 if (data->flags & MMC_DATA_WRITE) { 574 if (data->flags & MMC_DATA_WRITE) {
575 dma_sync_single_for_device(mmc_dev(host->mmc), 575 dma_sync_single_for_device(mmc_dev(host->mmc),
576 host->align_addr, 128 * 4, direction); 576 host->align_addr, 128 * 4, direction);
577 } 577 }
578 578
579 return 0; 579 return 0;
580 580
581 unmap_align: 581 unmap_align:
582 dma_unmap_single(mmc_dev(host->mmc), host->align_addr, 582 dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
583 128 * 4, direction); 583 128 * 4, direction);
584 fail: 584 fail:
585 return -EINVAL; 585 return -EINVAL;
586 } 586 }
587 587
588 static void sdhci_adma_table_post(struct sdhci_host *host, 588 static void sdhci_adma_table_post(struct sdhci_host *host,
589 struct mmc_data *data) 589 struct mmc_data *data)
590 { 590 {
591 int direction; 591 int direction;
592 592
593 struct scatterlist *sg; 593 struct scatterlist *sg;
594 int i, size; 594 int i, size;
595 u8 *align; 595 u8 *align;
596 char *buffer; 596 char *buffer;
597 unsigned long flags; 597 unsigned long flags;
598 bool has_unaligned; 598 bool has_unaligned;
599 599
600 if (data->flags & MMC_DATA_READ) 600 if (data->flags & MMC_DATA_READ)
601 direction = DMA_FROM_DEVICE; 601 direction = DMA_FROM_DEVICE;
602 else 602 else
603 direction = DMA_TO_DEVICE; 603 direction = DMA_TO_DEVICE;
604 604
605 dma_unmap_single(mmc_dev(host->mmc), host->align_addr, 605 dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
606 128 * 4, direction); 606 128 * 4, direction);
607 607
608 /* Do a quick scan of the SG list for any unaligned mappings */ 608 /* Do a quick scan of the SG list for any unaligned mappings */
609 has_unaligned = false; 609 has_unaligned = false;
610 for_each_sg(data->sg, sg, host->sg_count, i) 610 for_each_sg(data->sg, sg, host->sg_count, i)
611 if (sg_dma_address(sg) & 3) { 611 if (sg_dma_address(sg) & 3) {
612 has_unaligned = true; 612 has_unaligned = true;
613 break; 613 break;
614 } 614 }
615 615
616 if (has_unaligned && data->flags & MMC_DATA_READ) { 616 if (has_unaligned && data->flags & MMC_DATA_READ) {
617 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg, 617 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
618 data->sg_len, direction); 618 data->sg_len, direction);
619 619
620 align = host->align_buffer; 620 align = host->align_buffer;
621 621
622 for_each_sg(data->sg, sg, host->sg_count, i) { 622 for_each_sg(data->sg, sg, host->sg_count, i) {
623 if (sg_dma_address(sg) & 0x3) { 623 if (sg_dma_address(sg) & 0x3) {
624 size = 4 - (sg_dma_address(sg) & 0x3); 624 size = 4 - (sg_dma_address(sg) & 0x3);
625 625
626 buffer = sdhci_kmap_atomic(sg, &flags); 626 buffer = sdhci_kmap_atomic(sg, &flags);
627 WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3)); 627 WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
628 memcpy(buffer, align, size); 628 memcpy(buffer, align, size);
629 sdhci_kunmap_atomic(buffer, &flags); 629 sdhci_kunmap_atomic(buffer, &flags);
630 630
631 align += 4; 631 align += 4;
632 } 632 }
633 } 633 }
634 } 634 }
635 635
636 dma_unmap_sg(mmc_dev(host->mmc), data->sg, 636 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
637 data->sg_len, direction); 637 data->sg_len, direction);
638 } 638 }
639 639
640 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd) 640 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
641 { 641 {
642 u8 count; 642 u8 count;
643 struct mmc_data *data = cmd->data; 643 struct mmc_data *data = cmd->data;
644 unsigned target_timeout, current_timeout; 644 unsigned target_timeout, current_timeout;
645 645
646 /* 646 /*
647 * If the host controller provides us with an incorrect timeout 647 * If the host controller provides us with an incorrect timeout
648 * value, just skip the check and use 0xE. The hardware may take 648 * value, just skip the check and use 0xE. The hardware may take
649 * longer to time out, but that's much better than having a too-short 649 * longer to time out, but that's much better than having a too-short
650 * timeout value. 650 * timeout value.
651 */ 651 */
652 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL) 652 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
653 return 0xE; 653 return 0xE;
654 654
655 /* Unspecified timeout, assume max */ 655 /* Unspecified timeout, assume max */
656 if (!data && !cmd->busy_timeout) 656 if (!data && !cmd->busy_timeout)
657 return 0xE; 657 return 0xE;
658 658
659 /* timeout in us */ 659 /* timeout in us */
660 if (!data) 660 if (!data)
661 target_timeout = cmd->busy_timeout * 1000; 661 target_timeout = cmd->busy_timeout * 1000;
662 else { 662 else {
663 target_timeout = data->timeout_ns / 1000; 663 target_timeout = data->timeout_ns / 1000;
664 if (host->clock) 664 if (host->clock)
665 target_timeout += data->timeout_clks / host->clock; 665 target_timeout += data->timeout_clks / host->clock;
666 } 666 }
667 667
668 /* 668 /*
669 * Figure out needed cycles. 669 * Figure out needed cycles.
670 * We do this in steps in order to fit inside a 32 bit int. 670 * We do this in steps in order to fit inside a 32 bit int.
671 * The first step is the minimum timeout, which will have a 671 * The first step is the minimum timeout, which will have a
672 * minimum resolution of 6 bits: 672 * minimum resolution of 6 bits:
673 * (1) 2^13*1000 > 2^22, 673 * (1) 2^13*1000 > 2^22,
674 * (2) host->timeout_clk < 2^16 674 * (2) host->timeout_clk < 2^16
675 * => 675 * =>
676 * (1) / (2) > 2^6 676 * (1) / (2) > 2^6
677 */ 677 */
678 count = 0; 678 count = 0;
679 current_timeout = (1 << 13) * 1000 / host->timeout_clk; 679 current_timeout = (1 << 13) * 1000 / host->timeout_clk;
680 while (current_timeout < target_timeout) { 680 while (current_timeout < target_timeout) {
681 count++; 681 count++;
682 current_timeout <<= 1; 682 current_timeout <<= 1;
683 if (count >= 0xF) 683 if (count >= 0xF)
684 break; 684 break;
685 } 685 }
686 686
687 if (count >= 0xF) { 687 if (count >= 0xF) {
688 DBG("%s: Too large timeout 0x%x requested for CMD%d!\n", 688 DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
689 mmc_hostname(host->mmc), count, cmd->opcode); 689 mmc_hostname(host->mmc), count, cmd->opcode);
690 count = 0xE; 690 count = 0xE;
691 } 691 }
692 692
693 return count; 693 return count;
694 } 694 }
695 695
696 static void sdhci_set_transfer_irqs(struct sdhci_host *host) 696 static void sdhci_set_transfer_irqs(struct sdhci_host *host)
697 { 697 {
698 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL; 698 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
699 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR; 699 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
700 700
701 if (host->flags & SDHCI_REQ_USE_DMA) 701 if (host->flags & SDHCI_REQ_USE_DMA)
702 host->ier = (host->ier & ~pio_irqs) | dma_irqs; 702 host->ier = (host->ier & ~pio_irqs) | dma_irqs;
703 else 703 else
704 host->ier = (host->ier & ~dma_irqs) | pio_irqs; 704 host->ier = (host->ier & ~dma_irqs) | pio_irqs;
705 705
706 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 706 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
707 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 707 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
708 } 708 }
709 709
710 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) 710 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
711 { 711 {
712 u8 count; 712 u8 count;
713 713
714 if (host->ops->set_timeout) { 714 if (host->ops->set_timeout) {
715 host->ops->set_timeout(host, cmd); 715 host->ops->set_timeout(host, cmd);
716 } else { 716 } else {
717 count = sdhci_calc_timeout(host, cmd); 717 count = sdhci_calc_timeout(host, cmd);
718 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL); 718 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
719 } 719 }
720 } 720 }
721 721
722 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) 722 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
723 { 723 {
724 u8 ctrl; 724 u8 ctrl;
725 struct mmc_data *data = cmd->data; 725 struct mmc_data *data = cmd->data;
726 int ret; 726 int ret;
727 727
728 WARN_ON(host->data); 728 WARN_ON(host->data);
729 729
730 if (data || (cmd->flags & MMC_RSP_BUSY)) 730 if (data || (cmd->flags & MMC_RSP_BUSY))
731 sdhci_set_timeout(host, cmd); 731 sdhci_set_timeout(host, cmd);
732 732
733 if (!data) 733 if (!data)
734 return; 734 return;
735 735
736 /* Sanity checks */ 736 /* Sanity checks */
737 BUG_ON(data->blksz * data->blocks > 524288); 737 BUG_ON(data->blksz * data->blocks > 524288);
738 BUG_ON(data->blksz > host->mmc->max_blk_size); 738 BUG_ON(data->blksz > host->mmc->max_blk_size);
739 BUG_ON(data->blocks > 65535); 739 BUG_ON(data->blocks > 65535);
740 740
741 host->data = data; 741 host->data = data;
742 host->data_early = 0; 742 host->data_early = 0;
743 host->data->bytes_xfered = 0; 743 host->data->bytes_xfered = 0;
744 744
745 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) 745 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))
746 host->flags |= SDHCI_REQ_USE_DMA; 746 host->flags |= SDHCI_REQ_USE_DMA;
747 747
748 /* 748 /*
749 * FIXME: This doesn't account for merging when mapping the 749 * FIXME: This doesn't account for merging when mapping the
750 * scatterlist. 750 * scatterlist.
751 */ 751 */
752 if (host->flags & SDHCI_REQ_USE_DMA) { 752 if (host->flags & SDHCI_REQ_USE_DMA) {
753 int broken, i; 753 int broken, i;
754 struct scatterlist *sg; 754 struct scatterlist *sg;
755 755
756 broken = 0; 756 broken = 0;
757 if (host->flags & SDHCI_USE_ADMA) { 757 if (host->flags & SDHCI_USE_ADMA) {
758 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) 758 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
759 broken = 1; 759 broken = 1;
760 } else { 760 } else {
761 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) 761 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
762 broken = 1; 762 broken = 1;
763 } 763 }
764 764
765 if (unlikely(broken)) { 765 if (unlikely(broken)) {
766 for_each_sg(data->sg, sg, data->sg_len, i) { 766 for_each_sg(data->sg, sg, data->sg_len, i) {
767 if (sg->length & 0x3) { 767 if (sg->length & 0x3) {
768 DBG("Reverting to PIO because of " 768 DBG("Reverting to PIO because of "
769 "transfer size (%d)\n", 769 "transfer size (%d)\n",
770 sg->length); 770 sg->length);
771 host->flags &= ~SDHCI_REQ_USE_DMA; 771 host->flags &= ~SDHCI_REQ_USE_DMA;
772 break; 772 break;
773 } 773 }
774 } 774 }
775 } 775 }
776 } 776 }
777 777
778 /* 778 /*
779 * The assumption here being that alignment is the same after 779 * The assumption here being that alignment is the same after
780 * translation to device address space. 780 * translation to device address space.
781 */ 781 */
782 if (host->flags & SDHCI_REQ_USE_DMA) { 782 if (host->flags & SDHCI_REQ_USE_DMA) {
783 int broken, i; 783 int broken, i;
784 struct scatterlist *sg; 784 struct scatterlist *sg;
785 785
786 broken = 0; 786 broken = 0;
787 if (host->flags & SDHCI_USE_ADMA) { 787 if (host->flags & SDHCI_USE_ADMA) {
788 /* 788 /*
789 * As we use 3 byte chunks to work around 789 * As we use 3 byte chunks to work around
790 * alignment problems, we need to check this 790 * alignment problems, we need to check this
791 * quirk. 791 * quirk.
792 */ 792 */
793 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) 793 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
794 broken = 1; 794 broken = 1;
795 } else { 795 } else {
796 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) 796 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
797 broken = 1; 797 broken = 1;
798 } 798 }
799 799
800 if (unlikely(broken)) { 800 if (unlikely(broken)) {
801 for_each_sg(data->sg, sg, data->sg_len, i) { 801 for_each_sg(data->sg, sg, data->sg_len, i) {
802 if (sg->offset & 0x3) { 802 if (sg->offset & 0x3) {
803 DBG("Reverting to PIO because of " 803 DBG("Reverting to PIO because of "
804 "bad alignment\n"); 804 "bad alignment\n");
805 host->flags &= ~SDHCI_REQ_USE_DMA; 805 host->flags &= ~SDHCI_REQ_USE_DMA;
806 break; 806 break;
807 } 807 }
808 } 808 }
809 } 809 }
810 } 810 }
811 811
812 if (host->flags & SDHCI_REQ_USE_DMA) { 812 if (host->flags & SDHCI_REQ_USE_DMA) {
813 if (host->flags & SDHCI_USE_ADMA) { 813 if (host->flags & SDHCI_USE_ADMA) {
814 ret = sdhci_adma_table_pre(host, data); 814 ret = sdhci_adma_table_pre(host, data);
815 if (ret) { 815 if (ret) {
816 /* 816 /*
817 * This only happens when someone fed 817 * This only happens when someone fed
818 * us an invalid request. 818 * us an invalid request.
819 */ 819 */
820 WARN_ON(1); 820 WARN_ON(1);
821 host->flags &= ~SDHCI_REQ_USE_DMA; 821 host->flags &= ~SDHCI_REQ_USE_DMA;
822 } else { 822 } else {
823 sdhci_writel(host, host->adma_addr, 823 sdhci_writel(host, host->adma_addr,
824 SDHCI_ADMA_ADDRESS); 824 SDHCI_ADMA_ADDRESS);
825 } 825 }
826 } else { 826 } else {
827 int sg_cnt; 827 int sg_cnt;
828 828
829 sg_cnt = dma_map_sg(mmc_dev(host->mmc), 829 sg_cnt = dma_map_sg(mmc_dev(host->mmc),
830 data->sg, data->sg_len, 830 data->sg, data->sg_len,
831 (data->flags & MMC_DATA_READ) ? 831 (data->flags & MMC_DATA_READ) ?
832 DMA_FROM_DEVICE : 832 DMA_FROM_DEVICE :
833 DMA_TO_DEVICE); 833 DMA_TO_DEVICE);
834 if (sg_cnt == 0) { 834 if (sg_cnt == 0) {
835 /* 835 /*
836 * This only happens when someone fed 836 * This only happens when someone fed
837 * us an invalid request. 837 * us an invalid request.
838 */ 838 */
839 WARN_ON(1); 839 WARN_ON(1);
840 host->flags &= ~SDHCI_REQ_USE_DMA; 840 host->flags &= ~SDHCI_REQ_USE_DMA;
841 } else { 841 } else {
842 WARN_ON(sg_cnt != 1); 842 WARN_ON(sg_cnt != 1);
843 sdhci_writel(host, sg_dma_address(data->sg), 843 sdhci_writel(host, sg_dma_address(data->sg),
844 SDHCI_DMA_ADDRESS); 844 SDHCI_DMA_ADDRESS);
845 } 845 }
846 } 846 }
847 } 847 }
848 848
849 /* 849 /*
850 * Always adjust the DMA selection as some controllers 850 * Always adjust the DMA selection as some controllers
851 * (e.g. JMicron) can't do PIO properly when the selection 851 * (e.g. JMicron) can't do PIO properly when the selection
852 * is ADMA. 852 * is ADMA.
853 */ 853 */
854 if (host->version >= SDHCI_SPEC_200) { 854 if (host->version >= SDHCI_SPEC_200) {
855 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 855 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
856 ctrl &= ~SDHCI_CTRL_DMA_MASK; 856 ctrl &= ~SDHCI_CTRL_DMA_MASK;
857 if ((host->flags & SDHCI_REQ_USE_DMA) && 857 if ((host->flags & SDHCI_REQ_USE_DMA) &&
858 (host->flags & SDHCI_USE_ADMA)) 858 (host->flags & SDHCI_USE_ADMA))
859 ctrl |= SDHCI_CTRL_ADMA32; 859 ctrl |= SDHCI_CTRL_ADMA32;
860 else 860 else
861 ctrl |= SDHCI_CTRL_SDMA; 861 ctrl |= SDHCI_CTRL_SDMA;
862 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 862 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
863 } 863 }
864 864
865 if (!(host->flags & SDHCI_REQ_USE_DMA)) { 865 if (!(host->flags & SDHCI_REQ_USE_DMA)) {
866 int flags; 866 int flags;
867 867
868 flags = SG_MITER_ATOMIC; 868 flags = SG_MITER_ATOMIC;
869 if (host->data->flags & MMC_DATA_READ) 869 if (host->data->flags & MMC_DATA_READ)
870 flags |= SG_MITER_TO_SG; 870 flags |= SG_MITER_TO_SG;
871 else 871 else
872 flags |= SG_MITER_FROM_SG; 872 flags |= SG_MITER_FROM_SG;
873 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 873 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
874 host->blocks = data->blocks; 874 host->blocks = data->blocks;
875 } 875 }
876 876
877 sdhci_set_transfer_irqs(host); 877 sdhci_set_transfer_irqs(host);
878 878
879 /* Set the DMA boundary value and block size */ 879 /* Set the DMA boundary value and block size */
880 sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG, 880 sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG,
881 data->blksz), SDHCI_BLOCK_SIZE); 881 data->blksz), SDHCI_BLOCK_SIZE);
882 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT); 882 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
883 } 883 }
884 884
885 static void sdhci_set_transfer_mode(struct sdhci_host *host, 885 static void sdhci_set_transfer_mode(struct sdhci_host *host,
886 struct mmc_command *cmd) 886 struct mmc_command *cmd)
887 { 887 {
888 u16 mode; 888 u16 mode;
889 struct mmc_data *data = cmd->data; 889 struct mmc_data *data = cmd->data;
890 890
891 if (data == NULL) { 891 if (data == NULL) {
892 /* clear Auto CMD settings for no data CMDs */ 892 /* clear Auto CMD settings for no data CMDs */
893 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE); 893 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
894 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 | 894 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
895 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE); 895 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
896 return; 896 return;
897 } 897 }
898 898
899 WARN_ON(!host->data); 899 WARN_ON(!host->data);
900 900
901 mode = SDHCI_TRNS_BLK_CNT_EN; 901 mode = SDHCI_TRNS_BLK_CNT_EN;
902 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) { 902 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
903 mode |= SDHCI_TRNS_MULTI; 903 mode |= SDHCI_TRNS_MULTI;
904 /* 904 /*
905 * If we are sending CMD23, CMD12 never gets sent 905 * If we are sending CMD23, CMD12 never gets sent
906 * on successful completion (so no Auto-CMD12). 906 * on successful completion (so no Auto-CMD12).
907 */ 907 */
908 if (!host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) 908 if (!host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD12))
909 mode |= SDHCI_TRNS_AUTO_CMD12; 909 mode |= SDHCI_TRNS_AUTO_CMD12;
910 else if (host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) { 910 else if (host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
911 mode |= SDHCI_TRNS_AUTO_CMD23; 911 mode |= SDHCI_TRNS_AUTO_CMD23;
912 sdhci_writel(host, host->mrq->sbc->arg, SDHCI_ARGUMENT2); 912 sdhci_writel(host, host->mrq->sbc->arg, SDHCI_ARGUMENT2);
913 } 913 }
914 } 914 }
915 915
916 if (data->flags & MMC_DATA_READ) 916 if (data->flags & MMC_DATA_READ)
917 mode |= SDHCI_TRNS_READ; 917 mode |= SDHCI_TRNS_READ;
918 if (host->flags & SDHCI_REQ_USE_DMA) 918 if (host->flags & SDHCI_REQ_USE_DMA)
919 mode |= SDHCI_TRNS_DMA; 919 mode |= SDHCI_TRNS_DMA;
920 920
921 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE); 921 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
922 } 922 }
923 923
924 static void sdhci_finish_data(struct sdhci_host *host) 924 static void sdhci_finish_data(struct sdhci_host *host)
925 { 925 {
926 struct mmc_data *data; 926 struct mmc_data *data;
927 927
928 BUG_ON(!host->data); 928 BUG_ON(!host->data);
929 929
930 data = host->data; 930 data = host->data;
931 host->data = NULL; 931 host->data = NULL;
932 932
933 if (host->flags & SDHCI_REQ_USE_DMA) { 933 if (host->flags & SDHCI_REQ_USE_DMA) {
934 if (host->flags & SDHCI_USE_ADMA) 934 if (host->flags & SDHCI_USE_ADMA)
935 sdhci_adma_table_post(host, data); 935 sdhci_adma_table_post(host, data);
936 else { 936 else {
937 dma_unmap_sg(mmc_dev(host->mmc), data->sg, 937 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
938 data->sg_len, (data->flags & MMC_DATA_READ) ? 938 data->sg_len, (data->flags & MMC_DATA_READ) ?
939 DMA_FROM_DEVICE : DMA_TO_DEVICE); 939 DMA_FROM_DEVICE : DMA_TO_DEVICE);
940 } 940 }
941 } 941 }
942 942
943 /* 943 /*
944 * The specification states that the block count register must 944 * The specification states that the block count register must
945 * be updated, but it does not specify at what point in the 945 * be updated, but it does not specify at what point in the
946 * data flow. That makes the register entirely useless to read 946 * data flow. That makes the register entirely useless to read
947 * back so we have to assume that nothing made it to the card 947 * back so we have to assume that nothing made it to the card
948 * in the event of an error. 948 * in the event of an error.
949 */ 949 */
950 if (data->error) 950 if (data->error)
951 data->bytes_xfered = 0; 951 data->bytes_xfered = 0;
952 else 952 else
953 data->bytes_xfered = data->blksz * data->blocks; 953 data->bytes_xfered = data->blksz * data->blocks;
954 954
955 /* 955 /*
956 * Need to send CMD12 if - 956 * Need to send CMD12 if -
957 * a) open-ended multiblock transfer (no CMD23) 957 * a) open-ended multiblock transfer (no CMD23)
958 * b) error in multiblock transfer 958 * b) error in multiblock transfer
959 */ 959 */
960 if (data->stop && 960 if (data->stop &&
961 (data->error || 961 (data->error ||
962 !host->mrq->sbc)) { 962 !host->mrq->sbc)) {
963 963
964 /* 964 /*
965 * The controller needs a reset of internal state machines 965 * The controller needs a reset of internal state machines
966 * upon error conditions. 966 * upon error conditions.
967 */ 967 */
968 if (data->error) { 968 if (data->error) {
969 sdhci_do_reset(host, SDHCI_RESET_CMD); 969 sdhci_do_reset(host, SDHCI_RESET_CMD);
970 sdhci_do_reset(host, SDHCI_RESET_DATA); 970 sdhci_do_reset(host, SDHCI_RESET_DATA);
971 } 971 }
972 972
973 sdhci_send_command(host, data->stop); 973 sdhci_send_command(host, data->stop);
974 } else 974 } else
975 tasklet_schedule(&host->finish_tasklet); 975 tasklet_schedule(&host->finish_tasklet);
976 } 976 }
977 977
978 void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) 978 void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
979 { 979 {
980 int flags; 980 int flags;
981 u32 mask; 981 u32 mask;
982 unsigned long timeout; 982 unsigned long timeout;
983 983
984 WARN_ON(host->cmd); 984 WARN_ON(host->cmd);
985 985
986 /* Wait max 10 ms */ 986 /* Wait max 10 ms */
987 timeout = 10; 987 timeout = 10;
988 988
989 mask = SDHCI_CMD_INHIBIT; 989 mask = SDHCI_CMD_INHIBIT;
990 if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY)) 990 if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY))
991 mask |= SDHCI_DATA_INHIBIT; 991 mask |= SDHCI_DATA_INHIBIT;
992 992
993 /* We shouldn't wait for data inihibit for stop commands, even 993 /* We shouldn't wait for data inihibit for stop commands, even
994 though they might use busy signaling */ 994 though they might use busy signaling */
995 if (host->mrq->data && (cmd == host->mrq->data->stop)) 995 if (host->mrq->data && (cmd == host->mrq->data->stop))
996 mask &= ~SDHCI_DATA_INHIBIT; 996 mask &= ~SDHCI_DATA_INHIBIT;
997 997
998 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) { 998 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
999 if (timeout == 0) { 999 if (timeout == 0) {
1000 pr_err("%s: Controller never released " 1000 pr_err("%s: Controller never released "
1001 "inhibit bit(s).\n", mmc_hostname(host->mmc)); 1001 "inhibit bit(s).\n", mmc_hostname(host->mmc));
1002 sdhci_dumpregs(host); 1002 sdhci_dumpregs(host);
1003 cmd->error = -EIO; 1003 cmd->error = -EIO;
1004 tasklet_schedule(&host->finish_tasklet); 1004 tasklet_schedule(&host->finish_tasklet);
1005 return; 1005 return;
1006 } 1006 }
1007 timeout--; 1007 timeout--;
1008 mdelay(1); 1008 mdelay(1);
1009 } 1009 }
1010 1010
1011 timeout = jiffies; 1011 timeout = jiffies;
1012 if (!cmd->data && cmd->busy_timeout > 9000) 1012 if (!cmd->data && cmd->busy_timeout > 9000)
1013 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ; 1013 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
1014 else 1014 else
1015 timeout += 10 * HZ; 1015 timeout += 10 * HZ;
1016 mod_timer(&host->timer, timeout); 1016 mod_timer(&host->timer, timeout);
1017 1017
1018 host->cmd = cmd; 1018 host->cmd = cmd;
1019 host->busy_handle = 0; 1019 host->busy_handle = 0;
1020 1020
1021 sdhci_prepare_data(host, cmd); 1021 sdhci_prepare_data(host, cmd);
1022 1022
1023 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT); 1023 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1024 1024
1025 sdhci_set_transfer_mode(host, cmd); 1025 sdhci_set_transfer_mode(host, cmd);
1026 1026
1027 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) { 1027 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1028 pr_err("%s: Unsupported response type!\n", 1028 pr_err("%s: Unsupported response type!\n",
1029 mmc_hostname(host->mmc)); 1029 mmc_hostname(host->mmc));
1030 cmd->error = -EINVAL; 1030 cmd->error = -EINVAL;
1031 tasklet_schedule(&host->finish_tasklet); 1031 tasklet_schedule(&host->finish_tasklet);
1032 return; 1032 return;
1033 } 1033 }
1034 1034
1035 if (!(cmd->flags & MMC_RSP_PRESENT)) 1035 if (!(cmd->flags & MMC_RSP_PRESENT))
1036 flags = SDHCI_CMD_RESP_NONE; 1036 flags = SDHCI_CMD_RESP_NONE;
1037 else if (cmd->flags & MMC_RSP_136) 1037 else if (cmd->flags & MMC_RSP_136)
1038 flags = SDHCI_CMD_RESP_LONG; 1038 flags = SDHCI_CMD_RESP_LONG;
1039 else if (cmd->flags & MMC_RSP_BUSY) 1039 else if (cmd->flags & MMC_RSP_BUSY)
1040 flags = SDHCI_CMD_RESP_SHORT_BUSY; 1040 flags = SDHCI_CMD_RESP_SHORT_BUSY;
1041 else 1041 else
1042 flags = SDHCI_CMD_RESP_SHORT; 1042 flags = SDHCI_CMD_RESP_SHORT;
1043 1043
1044 if (cmd->flags & MMC_RSP_CRC) 1044 if (cmd->flags & MMC_RSP_CRC)
1045 flags |= SDHCI_CMD_CRC; 1045 flags |= SDHCI_CMD_CRC;
1046 if (cmd->flags & MMC_RSP_OPCODE) 1046 if (cmd->flags & MMC_RSP_OPCODE)
1047 flags |= SDHCI_CMD_INDEX; 1047 flags |= SDHCI_CMD_INDEX;
1048 1048
1049 /* CMD19 is special in that the Data Present Select should be set */ 1049 /* CMD19 is special in that the Data Present Select should be set */
1050 if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK || 1050 if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
1051 cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200) 1051 cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1052 flags |= SDHCI_CMD_DATA; 1052 flags |= SDHCI_CMD_DATA;
1053 1053
1054 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND); 1054 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1055 } 1055 }
1056 EXPORT_SYMBOL_GPL(sdhci_send_command); 1056 EXPORT_SYMBOL_GPL(sdhci_send_command);
1057 1057
1058 static void sdhci_finish_command(struct sdhci_host *host) 1058 static void sdhci_finish_command(struct sdhci_host *host)
1059 { 1059 {
1060 int i; 1060 int i;
1061 1061
1062 BUG_ON(host->cmd == NULL); 1062 BUG_ON(host->cmd == NULL);
1063 1063
1064 if (host->cmd->flags & MMC_RSP_PRESENT) { 1064 if (host->cmd->flags & MMC_RSP_PRESENT) {
1065 if (host->cmd->flags & MMC_RSP_136) { 1065 if (host->cmd->flags & MMC_RSP_136) {
1066 /* CRC is stripped so we need to do some shifting. */ 1066 /* CRC is stripped so we need to do some shifting. */
1067 for (i = 0;i < 4;i++) { 1067 for (i = 0;i < 4;i++) {
1068 host->cmd->resp[i] = sdhci_readl(host, 1068 host->cmd->resp[i] = sdhci_readl(host,
1069 SDHCI_RESPONSE + (3-i)*4) << 8; 1069 SDHCI_RESPONSE + (3-i)*4) << 8;
1070 if (i != 3) 1070 if (i != 3)
1071 host->cmd->resp[i] |= 1071 host->cmd->resp[i] |=
1072 sdhci_readb(host, 1072 sdhci_readb(host,
1073 SDHCI_RESPONSE + (3-i)*4-1); 1073 SDHCI_RESPONSE + (3-i)*4-1);
1074 } 1074 }
1075 } else { 1075 } else {
1076 host->cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE); 1076 host->cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1077 } 1077 }
1078 } 1078 }
1079 1079
1080 host->cmd->error = 0; 1080 host->cmd->error = 0;
1081 1081
1082 /* Finished CMD23, now send actual command. */ 1082 /* Finished CMD23, now send actual command. */
1083 if (host->cmd == host->mrq->sbc) { 1083 if (host->cmd == host->mrq->sbc) {
1084 host->cmd = NULL; 1084 host->cmd = NULL;
1085 sdhci_send_command(host, host->mrq->cmd); 1085 sdhci_send_command(host, host->mrq->cmd);
1086 } else { 1086 } else {
1087 1087
1088 /* Processed actual command. */ 1088 /* Processed actual command. */
1089 if (host->data && host->data_early) 1089 if (host->data && host->data_early)
1090 sdhci_finish_data(host); 1090 sdhci_finish_data(host);
1091 1091
1092 if (!host->cmd->data) 1092 if (!host->cmd->data)
1093 tasklet_schedule(&host->finish_tasklet); 1093 tasklet_schedule(&host->finish_tasklet);
1094 1094
1095 host->cmd = NULL; 1095 host->cmd = NULL;
1096 } 1096 }
1097 } 1097 }
1098 1098
1099 static u16 sdhci_get_preset_value(struct sdhci_host *host) 1099 static u16 sdhci_get_preset_value(struct sdhci_host *host)
1100 { 1100 {
1101 u16 preset = 0; 1101 u16 preset = 0;
1102 1102
1103 switch (host->timing) { 1103 switch (host->timing) {
1104 case MMC_TIMING_UHS_SDR12: 1104 case MMC_TIMING_UHS_SDR12:
1105 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); 1105 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1106 break; 1106 break;
1107 case MMC_TIMING_UHS_SDR25: 1107 case MMC_TIMING_UHS_SDR25:
1108 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25); 1108 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
1109 break; 1109 break;
1110 case MMC_TIMING_UHS_SDR50: 1110 case MMC_TIMING_UHS_SDR50:
1111 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50); 1111 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
1112 break; 1112 break;
1113 case MMC_TIMING_UHS_SDR104: 1113 case MMC_TIMING_UHS_SDR104:
1114 case MMC_TIMING_MMC_HS200: 1114 case MMC_TIMING_MMC_HS200:
1115 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104); 1115 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
1116 break; 1116 break;
1117 case MMC_TIMING_UHS_DDR50: 1117 case MMC_TIMING_UHS_DDR50:
1118 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50); 1118 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
1119 break; 1119 break;
1120 default: 1120 default:
1121 pr_warn("%s: Invalid UHS-I mode selected\n", 1121 pr_warn("%s: Invalid UHS-I mode selected\n",
1122 mmc_hostname(host->mmc)); 1122 mmc_hostname(host->mmc));
1123 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); 1123 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1124 break; 1124 break;
1125 } 1125 }
1126 return preset; 1126 return preset;
1127 } 1127 }
1128 1128
1129 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) 1129 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1130 { 1130 {
1131 int div = 0; /* Initialized for compiler warning */ 1131 int div = 0; /* Initialized for compiler warning */
1132 int real_div = div, clk_mul = 1; 1132 int real_div = div, clk_mul = 1;
1133 u16 clk = 0; 1133 u16 clk = 0;
1134 unsigned long timeout; 1134 unsigned long timeout;
1135 1135
1136 host->mmc->actual_clock = 0; 1136 host->mmc->actual_clock = 0;
1137 1137
1138 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); 1138 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1139 1139
1140 if (clock == 0) 1140 if (clock == 0)
1141 return; 1141 return;
1142 1142
1143 if (host->version >= SDHCI_SPEC_300) { 1143 if (host->version >= SDHCI_SPEC_300) {
1144 if (host->preset_enabled) { 1144 if (host->preset_enabled) {
1145 u16 pre_val; 1145 u16 pre_val;
1146 1146
1147 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1147 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1148 pre_val = sdhci_get_preset_value(host); 1148 pre_val = sdhci_get_preset_value(host);
1149 div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK) 1149 div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK)
1150 >> SDHCI_PRESET_SDCLK_FREQ_SHIFT; 1150 >> SDHCI_PRESET_SDCLK_FREQ_SHIFT;
1151 if (host->clk_mul && 1151 if (host->clk_mul &&
1152 (pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) { 1152 (pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) {
1153 clk = SDHCI_PROG_CLOCK_MODE; 1153 clk = SDHCI_PROG_CLOCK_MODE;
1154 real_div = div + 1; 1154 real_div = div + 1;
1155 clk_mul = host->clk_mul; 1155 clk_mul = host->clk_mul;
1156 } else { 1156 } else {
1157 real_div = max_t(int, 1, div << 1); 1157 real_div = max_t(int, 1, div << 1);
1158 } 1158 }
1159 goto clock_set; 1159 goto clock_set;
1160 } 1160 }
1161 1161
1162 /* 1162 /*
1163 * Check if the Host Controller supports Programmable Clock 1163 * Check if the Host Controller supports Programmable Clock
1164 * Mode. 1164 * Mode.
1165 */ 1165 */
1166 if (host->clk_mul) { 1166 if (host->clk_mul) {
1167 for (div = 1; div <= 1024; div++) { 1167 for (div = 1; div <= 1024; div++) {
1168 if ((host->max_clk * host->clk_mul / div) 1168 if ((host->max_clk * host->clk_mul / div)
1169 <= clock) 1169 <= clock)
1170 break; 1170 break;
1171 } 1171 }
1172 /* 1172 /*
1173 * Set Programmable Clock Mode in the Clock 1173 * Set Programmable Clock Mode in the Clock
1174 * Control register. 1174 * Control register.
1175 */ 1175 */
1176 clk = SDHCI_PROG_CLOCK_MODE; 1176 clk = SDHCI_PROG_CLOCK_MODE;
1177 real_div = div; 1177 real_div = div;
1178 clk_mul = host->clk_mul; 1178 clk_mul = host->clk_mul;
1179 div--; 1179 div--;
1180 } else { 1180 } else {
1181 /* Version 3.00 divisors must be a multiple of 2. */ 1181 /* Version 3.00 divisors must be a multiple of 2. */
1182 if (host->max_clk <= clock) 1182 if (host->max_clk <= clock)
1183 div = 1; 1183 div = 1;
1184 else { 1184 else {
1185 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300; 1185 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1186 div += 2) { 1186 div += 2) {
1187 if ((host->max_clk / div) <= clock) 1187 if ((host->max_clk / div) <= clock)
1188 break; 1188 break;
1189 } 1189 }
1190 } 1190 }
1191 real_div = div; 1191 real_div = div;
1192 div >>= 1; 1192 div >>= 1;
1193 } 1193 }
1194 } else { 1194 } else {
1195 /* Version 2.00 divisors must be a power of 2. */ 1195 /* Version 2.00 divisors must be a power of 2. */
1196 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) { 1196 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1197 if ((host->max_clk / div) <= clock) 1197 if ((host->max_clk / div) <= clock)
1198 break; 1198 break;
1199 } 1199 }
1200 real_div = div; 1200 real_div = div;
1201 div >>= 1; 1201 div >>= 1;
1202 } 1202 }
1203 1203
1204 clock_set: 1204 clock_set:
1205 if (real_div) 1205 if (real_div)
1206 host->mmc->actual_clock = (host->max_clk * clk_mul) / real_div; 1206 host->mmc->actual_clock = (host->max_clk * clk_mul) / real_div;
1207 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT; 1207 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1208 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN) 1208 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1209 << SDHCI_DIVIDER_HI_SHIFT; 1209 << SDHCI_DIVIDER_HI_SHIFT;
1210 clk |= SDHCI_CLOCK_INT_EN; 1210 clk |= SDHCI_CLOCK_INT_EN;
1211 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1211 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1212 1212
1213 /* Wait max 20 ms */ 1213 /* Wait max 20 ms */
1214 timeout = 20; 1214 timeout = 20;
1215 while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL)) 1215 while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
1216 & SDHCI_CLOCK_INT_STABLE)) { 1216 & SDHCI_CLOCK_INT_STABLE)) {
1217 if (timeout == 0) { 1217 if (timeout == 0) {
1218 pr_err("%s: Internal clock never " 1218 pr_err("%s: Internal clock never "
1219 "stabilised.\n", mmc_hostname(host->mmc)); 1219 "stabilised.\n", mmc_hostname(host->mmc));
1220 sdhci_dumpregs(host); 1220 sdhci_dumpregs(host);
1221 return; 1221 return;
1222 } 1222 }
1223 timeout--; 1223 timeout--;
1224 mdelay(1); 1224 mdelay(1);
1225 } 1225 }
1226 1226
1227 clk |= SDHCI_CLOCK_CARD_EN; 1227 clk |= SDHCI_CLOCK_CARD_EN;
1228 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1228 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1229 } 1229 }
1230 EXPORT_SYMBOL_GPL(sdhci_set_clock); 1230 EXPORT_SYMBOL_GPL(sdhci_set_clock);
1231 1231
1232 static void sdhci_set_power(struct sdhci_host *host, unsigned char mode, 1232 static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1233 unsigned short vdd) 1233 unsigned short vdd)
1234 { 1234 {
1235 struct mmc_host *mmc = host->mmc; 1235 struct mmc_host *mmc = host->mmc;
1236 u8 pwr = 0; 1236 u8 pwr = 0;
1237 1237
1238 if (!IS_ERR(mmc->supply.vmmc)) { 1238 if (!IS_ERR(mmc->supply.vmmc)) {
1239 spin_unlock_irq(&host->lock); 1239 spin_unlock_irq(&host->lock);
1240 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); 1240 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
1241 spin_lock_irq(&host->lock); 1241 spin_lock_irq(&host->lock);
1242 return; 1242 return;
1243 } 1243 }
1244 1244
1245 if (mode != MMC_POWER_OFF) { 1245 if (mode != MMC_POWER_OFF) {
1246 switch (1 << vdd) { 1246 switch (1 << vdd) {
1247 case MMC_VDD_165_195: 1247 case MMC_VDD_165_195:
1248 pwr = SDHCI_POWER_180; 1248 pwr = SDHCI_POWER_180;
1249 break; 1249 break;
1250 case MMC_VDD_29_30: 1250 case MMC_VDD_29_30:
1251 case MMC_VDD_30_31: 1251 case MMC_VDD_30_31:
1252 pwr = SDHCI_POWER_300; 1252 pwr = SDHCI_POWER_300;
1253 break; 1253 break;
1254 case MMC_VDD_32_33: 1254 case MMC_VDD_32_33:
1255 case MMC_VDD_33_34: 1255 case MMC_VDD_33_34:
1256 pwr = SDHCI_POWER_330; 1256 pwr = SDHCI_POWER_330;
1257 break; 1257 break;
1258 default: 1258 default:
1259 BUG(); 1259 BUG();
1260 } 1260 }
1261 } 1261 }
1262 1262
1263 if (host->pwr == pwr) 1263 if (host->pwr == pwr)
1264 return; 1264 return;
1265 1265
1266 host->pwr = pwr; 1266 host->pwr = pwr;
1267 1267
1268 if (pwr == 0) { 1268 if (pwr == 0) {
1269 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 1269 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1270 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 1270 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1271 sdhci_runtime_pm_bus_off(host); 1271 sdhci_runtime_pm_bus_off(host);
1272 vdd = 0; 1272 vdd = 0;
1273 } else { 1273 } else {
1274 /* 1274 /*
1275 * Spec says that we should clear the power reg before setting 1275 * Spec says that we should clear the power reg before setting
1276 * a new value. Some controllers don't seem to like this though. 1276 * a new value. Some controllers don't seem to like this though.
1277 */ 1277 */
1278 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) 1278 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
1279 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 1279 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1280 1280
1281 /* 1281 /*
1282 * At least the Marvell CaFe chip gets confused if we set the 1282 * At least the Marvell CaFe chip gets confused if we set the
1283 * voltage and set turn on power at the same time, so set the 1283 * voltage and set turn on power at the same time, so set the
1284 * voltage first. 1284 * voltage first.
1285 */ 1285 */
1286 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER) 1286 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
1287 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 1287 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1288 1288
1289 pwr |= SDHCI_POWER_ON; 1289 pwr |= SDHCI_POWER_ON;
1290 1290
1291 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 1291 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1292 1292
1293 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 1293 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1294 sdhci_runtime_pm_bus_on(host); 1294 sdhci_runtime_pm_bus_on(host);
1295 1295
1296 /* 1296 /*
1297 * Some controllers need an extra 10ms delay of 10ms before 1297 * Some controllers need an extra 10ms delay of 10ms before
1298 * they can apply clock after applying power 1298 * they can apply clock after applying power
1299 */ 1299 */
1300 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER) 1300 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
1301 mdelay(10); 1301 mdelay(10);
1302 } 1302 }
1303 } 1303 }
1304 1304
1305 /*****************************************************************************\ 1305 /*****************************************************************************\
1306 * * 1306 * *
1307 * MMC callbacks * 1307 * MMC callbacks *
1308 * * 1308 * *
1309 \*****************************************************************************/ 1309 \*****************************************************************************/
1310 1310
1311 static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) 1311 static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1312 { 1312 {
1313 struct sdhci_host *host; 1313 struct sdhci_host *host;
1314 int present; 1314 int present;
1315 unsigned long flags; 1315 unsigned long flags;
1316 u32 tuning_opcode; 1316 u32 tuning_opcode;
1317 1317
1318 host = mmc_priv(mmc); 1318 host = mmc_priv(mmc);
1319 1319
1320 sdhci_runtime_pm_get(host); 1320 sdhci_runtime_pm_get(host);
1321 1321
1322 present = mmc_gpio_get_cd(host->mmc);
1323
1322 spin_lock_irqsave(&host->lock, flags); 1324 spin_lock_irqsave(&host->lock, flags);
1323 1325
1324 WARN_ON(host->mrq != NULL); 1326 WARN_ON(host->mrq != NULL);
1325 1327
1326 #ifndef SDHCI_USE_LEDS_CLASS 1328 #ifndef SDHCI_USE_LEDS_CLASS
1327 sdhci_activate_led(host); 1329 sdhci_activate_led(host);
1328 #endif 1330 #endif
1329 1331
1330 /* 1332 /*
1331 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED 1333 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
1332 * requests if Auto-CMD12 is enabled. 1334 * requests if Auto-CMD12 is enabled.
1333 */ 1335 */
1334 if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) { 1336 if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
1335 if (mrq->stop) { 1337 if (mrq->stop) {
1336 mrq->data->stop = NULL; 1338 mrq->data->stop = NULL;
1337 mrq->stop = NULL; 1339 mrq->stop = NULL;
1338 } 1340 }
1339 } 1341 }
1340 1342
1341 host->mrq = mrq; 1343 host->mrq = mrq;
1342 1344
1343 /* 1345 /*
1344 * Firstly check card presence from cd-gpio. The return could 1346 * Firstly check card presence from cd-gpio. The return could
1345 * be one of the following possibilities: 1347 * be one of the following possibilities:
1346 * negative: cd-gpio is not available 1348 * negative: cd-gpio is not available
1347 * zero: cd-gpio is used, and card is removed 1349 * zero: cd-gpio is used, and card is removed
1348 * one: cd-gpio is used, and card is present 1350 * one: cd-gpio is used, and card is present
1349 */ 1351 */
1350 present = mmc_gpio_get_cd(host->mmc);
1351 if (present < 0) { 1352 if (present < 0) {
1352 /* If polling, assume that the card is always present. */ 1353 /* If polling, assume that the card is always present. */
1353 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) 1354 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
1354 present = 1; 1355 present = 1;
1355 else 1356 else
1356 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 1357 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
1357 SDHCI_CARD_PRESENT; 1358 SDHCI_CARD_PRESENT;
1358 } 1359 }
1359 1360
1360 if (!present || host->flags & SDHCI_DEVICE_DEAD) { 1361 if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1361 host->mrq->cmd->error = -ENOMEDIUM; 1362 host->mrq->cmd->error = -ENOMEDIUM;
1362 tasklet_schedule(&host->finish_tasklet); 1363 tasklet_schedule(&host->finish_tasklet);
1363 } else { 1364 } else {
1364 u32 present_state; 1365 u32 present_state;
1365 1366
1366 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE); 1367 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
1367 /* 1368 /*
1368 * Check if the re-tuning timer has already expired and there 1369 * Check if the re-tuning timer has already expired and there
1369 * is no on-going data transfer and DAT0 is not busy. If so, 1370 * is no on-going data transfer and DAT0 is not busy. If so,
1370 * we need to execute tuning procedure before sending command. 1371 * we need to execute tuning procedure before sending command.
1371 */ 1372 */
1372 if ((host->flags & SDHCI_NEEDS_RETUNING) && 1373 if ((host->flags & SDHCI_NEEDS_RETUNING) &&
1373 !(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ)) && 1374 !(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ)) &&
1374 (present_state & SDHCI_DATA_0_LVL_MASK)) { 1375 (present_state & SDHCI_DATA_0_LVL_MASK)) {
1375 if (mmc->card) { 1376 if (mmc->card) {
1376 /* eMMC uses cmd21 but sd and sdio use cmd19 */ 1377 /* eMMC uses cmd21 but sd and sdio use cmd19 */
1377 tuning_opcode = 1378 tuning_opcode =
1378 mmc->card->type == MMC_TYPE_MMC ? 1379 mmc->card->type == MMC_TYPE_MMC ?
1379 MMC_SEND_TUNING_BLOCK_HS200 : 1380 MMC_SEND_TUNING_BLOCK_HS200 :
1380 MMC_SEND_TUNING_BLOCK; 1381 MMC_SEND_TUNING_BLOCK;
1381 1382
1382 /* Here we need to set the host->mrq to NULL, 1383 /* Here we need to set the host->mrq to NULL,
1383 * in case the pending finish_tasklet 1384 * in case the pending finish_tasklet
1384 * finishes it incorrectly. 1385 * finishes it incorrectly.
1385 */ 1386 */
1386 host->mrq = NULL; 1387 host->mrq = NULL;
1387 1388
1388 spin_unlock_irqrestore(&host->lock, flags); 1389 spin_unlock_irqrestore(&host->lock, flags);
1389 sdhci_execute_tuning(mmc, tuning_opcode); 1390 sdhci_execute_tuning(mmc, tuning_opcode);
1390 spin_lock_irqsave(&host->lock, flags); 1391 spin_lock_irqsave(&host->lock, flags);
1391 1392
1392 /* Restore original mmc_request structure */ 1393 /* Restore original mmc_request structure */
1393 host->mrq = mrq; 1394 host->mrq = mrq;
1394 } 1395 }
1395 } 1396 }
1396 1397
1397 if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23)) 1398 if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
1398 sdhci_send_command(host, mrq->sbc); 1399 sdhci_send_command(host, mrq->sbc);
1399 else 1400 else
1400 sdhci_send_command(host, mrq->cmd); 1401 sdhci_send_command(host, mrq->cmd);
1401 } 1402 }
1402 1403
1403 mmiowb(); 1404 mmiowb();
1404 spin_unlock_irqrestore(&host->lock, flags); 1405 spin_unlock_irqrestore(&host->lock, flags);
1405 } 1406 }
1406 1407
1407 void sdhci_set_bus_width(struct sdhci_host *host, int width) 1408 void sdhci_set_bus_width(struct sdhci_host *host, int width)
1408 { 1409 {
1409 u8 ctrl; 1410 u8 ctrl;
1410 1411
1411 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 1412 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1412 if (width == MMC_BUS_WIDTH_8) { 1413 if (width == MMC_BUS_WIDTH_8) {
1413 ctrl &= ~SDHCI_CTRL_4BITBUS; 1414 ctrl &= ~SDHCI_CTRL_4BITBUS;
1414 if (host->version >= SDHCI_SPEC_300) 1415 if (host->version >= SDHCI_SPEC_300)
1415 ctrl |= SDHCI_CTRL_8BITBUS; 1416 ctrl |= SDHCI_CTRL_8BITBUS;
1416 } else { 1417 } else {
1417 if (host->version >= SDHCI_SPEC_300) 1418 if (host->version >= SDHCI_SPEC_300)
1418 ctrl &= ~SDHCI_CTRL_8BITBUS; 1419 ctrl &= ~SDHCI_CTRL_8BITBUS;
1419 if (width == MMC_BUS_WIDTH_4) 1420 if (width == MMC_BUS_WIDTH_4)
1420 ctrl |= SDHCI_CTRL_4BITBUS; 1421 ctrl |= SDHCI_CTRL_4BITBUS;
1421 else 1422 else
1422 ctrl &= ~SDHCI_CTRL_4BITBUS; 1423 ctrl &= ~SDHCI_CTRL_4BITBUS;
1423 } 1424 }
1424 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1425 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1425 } 1426 }
1426 EXPORT_SYMBOL_GPL(sdhci_set_bus_width); 1427 EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
1427 1428
1428 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing) 1429 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
1429 { 1430 {
1430 u16 ctrl_2; 1431 u16 ctrl_2;
1431 1432
1432 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1433 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1433 /* Select Bus Speed Mode for host */ 1434 /* Select Bus Speed Mode for host */
1434 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; 1435 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1435 if ((timing == MMC_TIMING_MMC_HS200) || 1436 if ((timing == MMC_TIMING_MMC_HS200) ||
1436 (timing == MMC_TIMING_UHS_SDR104)) 1437 (timing == MMC_TIMING_UHS_SDR104))
1437 ctrl_2 |= SDHCI_CTRL_UHS_SDR104; 1438 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1438 else if (timing == MMC_TIMING_UHS_SDR12) 1439 else if (timing == MMC_TIMING_UHS_SDR12)
1439 ctrl_2 |= SDHCI_CTRL_UHS_SDR12; 1440 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
1440 else if (timing == MMC_TIMING_UHS_SDR25) 1441 else if (timing == MMC_TIMING_UHS_SDR25)
1441 ctrl_2 |= SDHCI_CTRL_UHS_SDR25; 1442 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
1442 else if (timing == MMC_TIMING_UHS_SDR50) 1443 else if (timing == MMC_TIMING_UHS_SDR50)
1443 ctrl_2 |= SDHCI_CTRL_UHS_SDR50; 1444 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
1444 else if ((timing == MMC_TIMING_UHS_DDR50) || 1445 else if ((timing == MMC_TIMING_UHS_DDR50) ||
1445 (timing == MMC_TIMING_MMC_DDR52)) 1446 (timing == MMC_TIMING_MMC_DDR52))
1446 ctrl_2 |= SDHCI_CTRL_UHS_DDR50; 1447 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1447 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 1448 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1448 } 1449 }
1449 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling); 1450 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
1450 1451
1451 static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios) 1452 static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
1452 { 1453 {
1453 unsigned long flags; 1454 unsigned long flags;
1454 u8 ctrl; 1455 u8 ctrl;
1455 struct mmc_host *mmc = host->mmc; 1456 struct mmc_host *mmc = host->mmc;
1456 1457
1457 spin_lock_irqsave(&host->lock, flags); 1458 spin_lock_irqsave(&host->lock, flags);
1458 1459
1459 if (host->flags & SDHCI_DEVICE_DEAD) { 1460 if (host->flags & SDHCI_DEVICE_DEAD) {
1460 spin_unlock_irqrestore(&host->lock, flags); 1461 spin_unlock_irqrestore(&host->lock, flags);
1461 if (!IS_ERR(mmc->supply.vmmc) && 1462 if (!IS_ERR(mmc->supply.vmmc) &&
1462 ios->power_mode == MMC_POWER_OFF) 1463 ios->power_mode == MMC_POWER_OFF)
1463 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 1464 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1464 return; 1465 return;
1465 } 1466 }
1466 1467
1467 /* 1468 /*
1468 * Reset the chip on each power off. 1469 * Reset the chip on each power off.
1469 * Should clear out any weird states. 1470 * Should clear out any weird states.
1470 */ 1471 */
1471 if (ios->power_mode == MMC_POWER_OFF) { 1472 if (ios->power_mode == MMC_POWER_OFF) {
1472 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 1473 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
1473 sdhci_reinit(host); 1474 sdhci_reinit(host);
1474 } 1475 }
1475 1476
1476 if (host->version >= SDHCI_SPEC_300 && 1477 if (host->version >= SDHCI_SPEC_300 &&
1477 (ios->power_mode == MMC_POWER_UP) && 1478 (ios->power_mode == MMC_POWER_UP) &&
1478 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) 1479 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
1479 sdhci_enable_preset_value(host, false); 1480 sdhci_enable_preset_value(host, false);
1480 1481
1481 if (!ios->clock || ios->clock != host->clock) { 1482 if (!ios->clock || ios->clock != host->clock) {
1482 host->ops->set_clock(host, ios->clock); 1483 host->ops->set_clock(host, ios->clock);
1483 host->clock = ios->clock; 1484 host->clock = ios->clock;
1484 1485
1485 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK && 1486 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
1486 host->clock) { 1487 host->clock) {
1487 host->timeout_clk = host->mmc->actual_clock ? 1488 host->timeout_clk = host->mmc->actual_clock ?
1488 host->mmc->actual_clock / 1000 : 1489 host->mmc->actual_clock / 1000 :
1489 host->clock / 1000; 1490 host->clock / 1000;
1490 host->mmc->max_busy_timeout = 1491 host->mmc->max_busy_timeout =
1491 host->ops->get_max_timeout_count ? 1492 host->ops->get_max_timeout_count ?
1492 host->ops->get_max_timeout_count(host) : 1493 host->ops->get_max_timeout_count(host) :
1493 1 << 27; 1494 1 << 27;
1494 host->mmc->max_busy_timeout /= host->timeout_clk; 1495 host->mmc->max_busy_timeout /= host->timeout_clk;
1495 } 1496 }
1496 } 1497 }
1497 1498
1498 sdhci_set_power(host, ios->power_mode, ios->vdd); 1499 sdhci_set_power(host, ios->power_mode, ios->vdd);
1499 1500
1500 if (host->ops->platform_send_init_74_clocks) 1501 if (host->ops->platform_send_init_74_clocks)
1501 host->ops->platform_send_init_74_clocks(host, ios->power_mode); 1502 host->ops->platform_send_init_74_clocks(host, ios->power_mode);
1502 1503
1503 host->ops->set_bus_width(host, ios->bus_width); 1504 host->ops->set_bus_width(host, ios->bus_width);
1504 1505
1505 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 1506 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1506 1507
1507 if ((ios->timing == MMC_TIMING_SD_HS || 1508 if ((ios->timing == MMC_TIMING_SD_HS ||
1508 ios->timing == MMC_TIMING_MMC_HS) 1509 ios->timing == MMC_TIMING_MMC_HS)
1509 && !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) 1510 && !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
1510 ctrl |= SDHCI_CTRL_HISPD; 1511 ctrl |= SDHCI_CTRL_HISPD;
1511 else 1512 else
1512 ctrl &= ~SDHCI_CTRL_HISPD; 1513 ctrl &= ~SDHCI_CTRL_HISPD;
1513 1514
1514 if (host->version >= SDHCI_SPEC_300) { 1515 if (host->version >= SDHCI_SPEC_300) {
1515 u16 clk, ctrl_2; 1516 u16 clk, ctrl_2;
1516 1517
1517 /* In case of UHS-I modes, set High Speed Enable */ 1518 /* In case of UHS-I modes, set High Speed Enable */
1518 if ((ios->timing == MMC_TIMING_MMC_HS200) || 1519 if ((ios->timing == MMC_TIMING_MMC_HS200) ||
1519 (ios->timing == MMC_TIMING_MMC_DDR52) || 1520 (ios->timing == MMC_TIMING_MMC_DDR52) ||
1520 (ios->timing == MMC_TIMING_UHS_SDR50) || 1521 (ios->timing == MMC_TIMING_UHS_SDR50) ||
1521 (ios->timing == MMC_TIMING_UHS_SDR104) || 1522 (ios->timing == MMC_TIMING_UHS_SDR104) ||
1522 (ios->timing == MMC_TIMING_UHS_DDR50) || 1523 (ios->timing == MMC_TIMING_UHS_DDR50) ||
1523 (ios->timing == MMC_TIMING_UHS_SDR25)) 1524 (ios->timing == MMC_TIMING_UHS_SDR25))
1524 ctrl |= SDHCI_CTRL_HISPD; 1525 ctrl |= SDHCI_CTRL_HISPD;
1525 1526
1526 if (!host->preset_enabled) { 1527 if (!host->preset_enabled) {
1527 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1528 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1528 /* 1529 /*
1529 * We only need to set Driver Strength if the 1530 * We only need to set Driver Strength if the
1530 * preset value enable is not set. 1531 * preset value enable is not set.
1531 */ 1532 */
1532 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1533 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1533 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK; 1534 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
1534 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A) 1535 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
1535 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A; 1536 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
1536 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C) 1537 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
1537 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C; 1538 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
1538 1539
1539 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 1540 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1540 } else { 1541 } else {
1541 /* 1542 /*
1542 * According to SDHC Spec v3.00, if the Preset Value 1543 * According to SDHC Spec v3.00, if the Preset Value
1543 * Enable in the Host Control 2 register is set, we 1544 * Enable in the Host Control 2 register is set, we
1544 * need to reset SD Clock Enable before changing High 1545 * need to reset SD Clock Enable before changing High
1545 * Speed Enable to avoid generating clock gliches. 1546 * Speed Enable to avoid generating clock gliches.
1546 */ 1547 */
1547 1548
1548 /* Reset SD Clock Enable */ 1549 /* Reset SD Clock Enable */
1549 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1550 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1550 clk &= ~SDHCI_CLOCK_CARD_EN; 1551 clk &= ~SDHCI_CLOCK_CARD_EN;
1551 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1552 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1552 1553
1553 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1554 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1554 1555
1555 /* Re-enable SD Clock */ 1556 /* Re-enable SD Clock */
1556 host->ops->set_clock(host, host->clock); 1557 host->ops->set_clock(host, host->clock);
1557 } 1558 }
1558 1559
1559 /* Reset SD Clock Enable */ 1560 /* Reset SD Clock Enable */
1560 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1561 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1561 clk &= ~SDHCI_CLOCK_CARD_EN; 1562 clk &= ~SDHCI_CLOCK_CARD_EN;
1562 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1563 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1563 1564
1564 host->ops->set_uhs_signaling(host, ios->timing); 1565 host->ops->set_uhs_signaling(host, ios->timing);
1565 host->timing = ios->timing; 1566 host->timing = ios->timing;
1566 1567
1567 if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) && 1568 if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
1568 ((ios->timing == MMC_TIMING_UHS_SDR12) || 1569 ((ios->timing == MMC_TIMING_UHS_SDR12) ||
1569 (ios->timing == MMC_TIMING_UHS_SDR25) || 1570 (ios->timing == MMC_TIMING_UHS_SDR25) ||
1570 (ios->timing == MMC_TIMING_UHS_SDR50) || 1571 (ios->timing == MMC_TIMING_UHS_SDR50) ||
1571 (ios->timing == MMC_TIMING_UHS_SDR104) || 1572 (ios->timing == MMC_TIMING_UHS_SDR104) ||
1572 (ios->timing == MMC_TIMING_UHS_DDR50))) { 1573 (ios->timing == MMC_TIMING_UHS_DDR50))) {
1573 u16 preset; 1574 u16 preset;
1574 1575
1575 sdhci_enable_preset_value(host, true); 1576 sdhci_enable_preset_value(host, true);
1576 preset = sdhci_get_preset_value(host); 1577 preset = sdhci_get_preset_value(host);
1577 ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK) 1578 ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
1578 >> SDHCI_PRESET_DRV_SHIFT; 1579 >> SDHCI_PRESET_DRV_SHIFT;
1579 } 1580 }
1580 1581
1581 /* Re-enable SD Clock */ 1582 /* Re-enable SD Clock */
1582 host->ops->set_clock(host, host->clock); 1583 host->ops->set_clock(host, host->clock);
1583 } else 1584 } else
1584 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1585 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1585 1586
1586 /* 1587 /*
1587 * Some (ENE) controllers go apeshit on some ios operation, 1588 * Some (ENE) controllers go apeshit on some ios operation,
1588 * signalling timeout and CRC errors even on CMD0. Resetting 1589 * signalling timeout and CRC errors even on CMD0. Resetting
1589 * it on each ios seems to solve the problem. 1590 * it on each ios seems to solve the problem.
1590 */ 1591 */
1591 if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS) 1592 if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
1592 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 1593 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1593 1594
1594 mmiowb(); 1595 mmiowb();
1595 spin_unlock_irqrestore(&host->lock, flags); 1596 spin_unlock_irqrestore(&host->lock, flags);
1596 } 1597 }
1597 1598
1598 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1599 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1599 { 1600 {
1600 struct sdhci_host *host = mmc_priv(mmc); 1601 struct sdhci_host *host = mmc_priv(mmc);
1601 1602
1602 sdhci_runtime_pm_get(host); 1603 sdhci_runtime_pm_get(host);
1603 sdhci_do_set_ios(host, ios); 1604 sdhci_do_set_ios(host, ios);
1604 sdhci_runtime_pm_put(host); 1605 sdhci_runtime_pm_put(host);
1605 } 1606 }
1606 1607
1607 static int sdhci_do_get_cd(struct sdhci_host *host) 1608 static int sdhci_do_get_cd(struct sdhci_host *host)
1608 { 1609 {
1609 int gpio_cd = mmc_gpio_get_cd(host->mmc); 1610 int gpio_cd = mmc_gpio_get_cd(host->mmc);
1610 1611
1611 if (host->flags & SDHCI_DEVICE_DEAD) 1612 if (host->flags & SDHCI_DEVICE_DEAD)
1612 return 0; 1613 return 0;
1613 1614
1614 /* If polling/nonremovable, assume that the card is always present. */ 1615 /* If polling/nonremovable, assume that the card is always present. */
1615 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) || 1616 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
1616 (host->mmc->caps & MMC_CAP_NONREMOVABLE)) 1617 (host->mmc->caps & MMC_CAP_NONREMOVABLE))
1617 return 1; 1618 return 1;
1618 1619
1619 /* Try slot gpio detect */ 1620 /* Try slot gpio detect */
1620 if (!IS_ERR_VALUE(gpio_cd)) 1621 if (!IS_ERR_VALUE(gpio_cd))
1621 return !!gpio_cd; 1622 return !!gpio_cd;
1622 1623
1623 /* Host native card detect */ 1624 /* Host native card detect */
1624 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); 1625 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
1625 } 1626 }
1626 1627
1627 static int sdhci_get_cd(struct mmc_host *mmc) 1628 static int sdhci_get_cd(struct mmc_host *mmc)
1628 { 1629 {
1629 struct sdhci_host *host = mmc_priv(mmc); 1630 struct sdhci_host *host = mmc_priv(mmc);
1630 int ret; 1631 int ret;
1631 1632
1632 sdhci_runtime_pm_get(host); 1633 sdhci_runtime_pm_get(host);
1633 ret = sdhci_do_get_cd(host); 1634 ret = sdhci_do_get_cd(host);
1634 sdhci_runtime_pm_put(host); 1635 sdhci_runtime_pm_put(host);
1635 return ret; 1636 return ret;
1636 } 1637 }
1637 1638
1638 static int sdhci_check_ro(struct sdhci_host *host) 1639 static int sdhci_check_ro(struct sdhci_host *host)
1639 { 1640 {
1640 unsigned long flags; 1641 unsigned long flags;
1641 int is_readonly; 1642 int is_readonly;
1642 1643
1643 spin_lock_irqsave(&host->lock, flags); 1644 spin_lock_irqsave(&host->lock, flags);
1644 1645
1645 if (host->flags & SDHCI_DEVICE_DEAD) 1646 if (host->flags & SDHCI_DEVICE_DEAD)
1646 is_readonly = 0; 1647 is_readonly = 0;
1647 else if (host->ops->get_ro) 1648 else if (host->ops->get_ro)
1648 is_readonly = host->ops->get_ro(host); 1649 is_readonly = host->ops->get_ro(host);
1649 else 1650 else
1650 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE) 1651 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
1651 & SDHCI_WRITE_PROTECT); 1652 & SDHCI_WRITE_PROTECT);
1652 1653
1653 spin_unlock_irqrestore(&host->lock, flags); 1654 spin_unlock_irqrestore(&host->lock, flags);
1654 1655
1655 /* This quirk needs to be replaced by a callback-function later */ 1656 /* This quirk needs to be replaced by a callback-function later */
1656 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ? 1657 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
1657 !is_readonly : is_readonly; 1658 !is_readonly : is_readonly;
1658 } 1659 }
1659 1660
1660 #define SAMPLE_COUNT 5 1661 #define SAMPLE_COUNT 5
1661 1662
1662 static int sdhci_do_get_ro(struct sdhci_host *host) 1663 static int sdhci_do_get_ro(struct sdhci_host *host)
1663 { 1664 {
1664 int i, ro_count; 1665 int i, ro_count;
1665 1666
1666 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT)) 1667 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
1667 return sdhci_check_ro(host); 1668 return sdhci_check_ro(host);
1668 1669
1669 ro_count = 0; 1670 ro_count = 0;
1670 for (i = 0; i < SAMPLE_COUNT; i++) { 1671 for (i = 0; i < SAMPLE_COUNT; i++) {
1671 if (sdhci_check_ro(host)) { 1672 if (sdhci_check_ro(host)) {
1672 if (++ro_count > SAMPLE_COUNT / 2) 1673 if (++ro_count > SAMPLE_COUNT / 2)
1673 return 1; 1674 return 1;
1674 } 1675 }
1675 msleep(30); 1676 msleep(30);
1676 } 1677 }
1677 return 0; 1678 return 0;
1678 } 1679 }
1679 1680
1680 static void sdhci_hw_reset(struct mmc_host *mmc) 1681 static void sdhci_hw_reset(struct mmc_host *mmc)
1681 { 1682 {
1682 struct sdhci_host *host = mmc_priv(mmc); 1683 struct sdhci_host *host = mmc_priv(mmc);
1683 1684
1684 if (host->ops && host->ops->hw_reset) 1685 if (host->ops && host->ops->hw_reset)
1685 host->ops->hw_reset(host); 1686 host->ops->hw_reset(host);
1686 } 1687 }
1687 1688
1688 static int sdhci_get_ro(struct mmc_host *mmc) 1689 static int sdhci_get_ro(struct mmc_host *mmc)
1689 { 1690 {
1690 struct sdhci_host *host = mmc_priv(mmc); 1691 struct sdhci_host *host = mmc_priv(mmc);
1691 int ret; 1692 int ret;
1692 1693
1693 sdhci_runtime_pm_get(host); 1694 sdhci_runtime_pm_get(host);
1694 ret = sdhci_do_get_ro(host); 1695 ret = sdhci_do_get_ro(host);
1695 sdhci_runtime_pm_put(host); 1696 sdhci_runtime_pm_put(host);
1696 return ret; 1697 return ret;
1697 } 1698 }
1698 1699
1699 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable) 1700 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
1700 { 1701 {
1701 if (!(host->flags & SDHCI_DEVICE_DEAD)) { 1702 if (!(host->flags & SDHCI_DEVICE_DEAD)) {
1702 if (enable) 1703 if (enable)
1703 host->ier |= SDHCI_INT_CARD_INT; 1704 host->ier |= SDHCI_INT_CARD_INT;
1704 else 1705 else
1705 host->ier &= ~SDHCI_INT_CARD_INT; 1706 host->ier &= ~SDHCI_INT_CARD_INT;
1706 1707
1707 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 1708 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1708 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 1709 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1709 mmiowb(); 1710 mmiowb();
1710 } 1711 }
1711 } 1712 }
1712 1713
1713 static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) 1714 static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1714 { 1715 {
1715 struct sdhci_host *host = mmc_priv(mmc); 1716 struct sdhci_host *host = mmc_priv(mmc);
1716 unsigned long flags; 1717 unsigned long flags;
1717 1718
1718 sdhci_runtime_pm_get(host); 1719 sdhci_runtime_pm_get(host);
1719 1720
1720 spin_lock_irqsave(&host->lock, flags); 1721 spin_lock_irqsave(&host->lock, flags);
1721 if (enable) 1722 if (enable)
1722 host->flags |= SDHCI_SDIO_IRQ_ENABLED; 1723 host->flags |= SDHCI_SDIO_IRQ_ENABLED;
1723 else 1724 else
1724 host->flags &= ~SDHCI_SDIO_IRQ_ENABLED; 1725 host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;
1725 1726
1726 sdhci_enable_sdio_irq_nolock(host, enable); 1727 sdhci_enable_sdio_irq_nolock(host, enable);
1727 spin_unlock_irqrestore(&host->lock, flags); 1728 spin_unlock_irqrestore(&host->lock, flags);
1728 1729
1729 sdhci_runtime_pm_put(host); 1730 sdhci_runtime_pm_put(host);
1730 } 1731 }
1731 1732
1732 static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host, 1733 static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
1733 struct mmc_ios *ios) 1734 struct mmc_ios *ios)
1734 { 1735 {
1735 struct mmc_host *mmc = host->mmc; 1736 struct mmc_host *mmc = host->mmc;
1736 u16 ctrl; 1737 u16 ctrl;
1737 int ret; 1738 int ret;
1738 1739
1739 /* 1740 /*
1740 * Signal Voltage Switching is only applicable for Host Controllers 1741 * Signal Voltage Switching is only applicable for Host Controllers
1741 * v3.00 and above. 1742 * v3.00 and above.
1742 */ 1743 */
1743 if (host->version < SDHCI_SPEC_300) 1744 if (host->version < SDHCI_SPEC_300)
1744 return 0; 1745 return 0;
1745 1746
1746 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1747 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1747 1748
1748 switch (ios->signal_voltage) { 1749 switch (ios->signal_voltage) {
1749 case MMC_SIGNAL_VOLTAGE_330: 1750 case MMC_SIGNAL_VOLTAGE_330:
1750 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */ 1751 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1751 ctrl &= ~SDHCI_CTRL_VDD_180; 1752 ctrl &= ~SDHCI_CTRL_VDD_180;
1752 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 1753 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1753 1754
1754 if (!IS_ERR(mmc->supply.vqmmc)) { 1755 if (!IS_ERR(mmc->supply.vqmmc)) {
1755 ret = regulator_set_voltage(mmc->supply.vqmmc, 2700000, 1756 ret = regulator_set_voltage(mmc->supply.vqmmc, 2700000,
1756 3600000); 1757 3600000);
1757 if (ret) { 1758 if (ret) {
1758 pr_warn("%s: Switching to 3.3V signalling voltage failed\n", 1759 pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
1759 mmc_hostname(mmc)); 1760 mmc_hostname(mmc));
1760 return -EIO; 1761 return -EIO;
1761 } 1762 }
1762 } 1763 }
1763 /* Wait for 5ms */ 1764 /* Wait for 5ms */
1764 usleep_range(5000, 5500); 1765 usleep_range(5000, 5500);
1765 1766
1766 /* 3.3V regulator output should be stable within 5 ms */ 1767 /* 3.3V regulator output should be stable within 5 ms */
1767 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1768 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1768 if (!(ctrl & SDHCI_CTRL_VDD_180)) 1769 if (!(ctrl & SDHCI_CTRL_VDD_180))
1769 return 0; 1770 return 0;
1770 1771
1771 pr_warn("%s: 3.3V regulator output did not became stable\n", 1772 pr_warn("%s: 3.3V regulator output did not became stable\n",
1772 mmc_hostname(mmc)); 1773 mmc_hostname(mmc));
1773 1774
1774 return -EAGAIN; 1775 return -EAGAIN;
1775 case MMC_SIGNAL_VOLTAGE_180: 1776 case MMC_SIGNAL_VOLTAGE_180:
1776 if (!IS_ERR(mmc->supply.vqmmc)) { 1777 if (!IS_ERR(mmc->supply.vqmmc)) {
1777 ret = regulator_set_voltage(mmc->supply.vqmmc, 1778 ret = regulator_set_voltage(mmc->supply.vqmmc,
1778 1700000, 1950000); 1779 1700000, 1950000);
1779 if (ret) { 1780 if (ret) {
1780 pr_warn("%s: Switching to 1.8V signalling voltage failed\n", 1781 pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
1781 mmc_hostname(mmc)); 1782 mmc_hostname(mmc));
1782 return -EIO; 1783 return -EIO;
1783 } 1784 }
1784 } 1785 }
1785 1786
1786 /* 1787 /*
1787 * Enable 1.8V Signal Enable in the Host Control2 1788 * Enable 1.8V Signal Enable in the Host Control2
1788 * register 1789 * register
1789 */ 1790 */
1790 ctrl |= SDHCI_CTRL_VDD_180; 1791 ctrl |= SDHCI_CTRL_VDD_180;
1791 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 1792 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1792 1793
1793 /* 1.8V regulator output should be stable within 5 ms */ 1794 /* 1.8V regulator output should be stable within 5 ms */
1794 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1795 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1795 if (ctrl & SDHCI_CTRL_VDD_180) 1796 if (ctrl & SDHCI_CTRL_VDD_180)
1796 return 0; 1797 return 0;
1797 1798
1798 pr_warn("%s: 1.8V regulator output did not became stable\n", 1799 pr_warn("%s: 1.8V regulator output did not became stable\n",
1799 mmc_hostname(mmc)); 1800 mmc_hostname(mmc));
1800 1801
1801 return -EAGAIN; 1802 return -EAGAIN;
1802 case MMC_SIGNAL_VOLTAGE_120: 1803 case MMC_SIGNAL_VOLTAGE_120:
1803 if (!IS_ERR(mmc->supply.vqmmc)) { 1804 if (!IS_ERR(mmc->supply.vqmmc)) {
1804 ret = regulator_set_voltage(mmc->supply.vqmmc, 1100000, 1805 ret = regulator_set_voltage(mmc->supply.vqmmc, 1100000,
1805 1300000); 1806 1300000);
1806 if (ret) { 1807 if (ret) {
1807 pr_warn("%s: Switching to 1.2V signalling voltage failed\n", 1808 pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
1808 mmc_hostname(mmc)); 1809 mmc_hostname(mmc));
1809 return -EIO; 1810 return -EIO;
1810 } 1811 }
1811 } 1812 }
1812 return 0; 1813 return 0;
1813 default: 1814 default:
1814 /* No signal voltage switch required */ 1815 /* No signal voltage switch required */
1815 return 0; 1816 return 0;
1816 } 1817 }
1817 } 1818 }
1818 1819
1819 static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, 1820 static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
1820 struct mmc_ios *ios) 1821 struct mmc_ios *ios)
1821 { 1822 {
1822 struct sdhci_host *host = mmc_priv(mmc); 1823 struct sdhci_host *host = mmc_priv(mmc);
1823 int err; 1824 int err;
1824 1825
1825 if (host->version < SDHCI_SPEC_300) 1826 if (host->version < SDHCI_SPEC_300)
1826 return 0; 1827 return 0;
1827 sdhci_runtime_pm_get(host); 1828 sdhci_runtime_pm_get(host);
1828 err = sdhci_do_start_signal_voltage_switch(host, ios); 1829 err = sdhci_do_start_signal_voltage_switch(host, ios);
1829 sdhci_runtime_pm_put(host); 1830 sdhci_runtime_pm_put(host);
1830 return err; 1831 return err;
1831 } 1832 }
1832 1833
1833 static int sdhci_card_busy(struct mmc_host *mmc) 1834 static int sdhci_card_busy(struct mmc_host *mmc)
1834 { 1835 {
1835 struct sdhci_host *host = mmc_priv(mmc); 1836 struct sdhci_host *host = mmc_priv(mmc);
1836 u32 present_state; 1837 u32 present_state;
1837 1838
1838 sdhci_runtime_pm_get(host); 1839 sdhci_runtime_pm_get(host);
1839 /* Check whether DAT[3:0] is 0000 */ 1840 /* Check whether DAT[3:0] is 0000 */
1840 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE); 1841 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
1841 sdhci_runtime_pm_put(host); 1842 sdhci_runtime_pm_put(host);
1842 1843
1843 return !(present_state & SDHCI_DATA_LVL_MASK); 1844 return !(present_state & SDHCI_DATA_LVL_MASK);
1844 } 1845 }
1845 1846
1846 static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) 1847 static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1847 { 1848 {
1848 struct sdhci_host *host = mmc_priv(mmc); 1849 struct sdhci_host *host = mmc_priv(mmc);
1849 u16 ctrl; 1850 u16 ctrl;
1850 int tuning_loop_counter = MAX_TUNING_LOOP; 1851 int tuning_loop_counter = MAX_TUNING_LOOP;
1851 int err = 0; 1852 int err = 0;
1852 unsigned long flags; 1853 unsigned long flags;
1853 1854
1854 sdhci_runtime_pm_get(host); 1855 sdhci_runtime_pm_get(host);
1855 spin_lock_irqsave(&host->lock, flags); 1856 spin_lock_irqsave(&host->lock, flags);
1856 1857
1857 /* 1858 /*
1858 * The Host Controller needs tuning only in case of SDR104 mode 1859 * The Host Controller needs tuning only in case of SDR104 mode
1859 * and for SDR50 mode when Use Tuning for SDR50 is set in the 1860 * and for SDR50 mode when Use Tuning for SDR50 is set in the
1860 * Capabilities register. 1861 * Capabilities register.
1861 * If the Host Controller supports the HS200 mode then the 1862 * If the Host Controller supports the HS200 mode then the
1862 * tuning function has to be executed. 1863 * tuning function has to be executed.
1863 */ 1864 */
1864 switch (host->timing) { 1865 switch (host->timing) {
1865 case MMC_TIMING_MMC_HS200: 1866 case MMC_TIMING_MMC_HS200:
1866 case MMC_TIMING_UHS_SDR104: 1867 case MMC_TIMING_UHS_SDR104:
1867 break; 1868 break;
1868 1869
1869 case MMC_TIMING_UHS_SDR50: 1870 case MMC_TIMING_UHS_SDR50:
1870 if (host->flags & SDHCI_SDR50_NEEDS_TUNING || 1871 if (host->flags & SDHCI_SDR50_NEEDS_TUNING ||
1871 host->flags & SDHCI_SDR104_NEEDS_TUNING) 1872 host->flags & SDHCI_SDR104_NEEDS_TUNING)
1872 break; 1873 break;
1873 /* FALLTHROUGH */ 1874 /* FALLTHROUGH */
1874 1875
1875 default: 1876 default:
1876 spin_unlock_irqrestore(&host->lock, flags); 1877 spin_unlock_irqrestore(&host->lock, flags);
1877 sdhci_runtime_pm_put(host); 1878 sdhci_runtime_pm_put(host);
1878 return 0; 1879 return 0;
1879 } 1880 }
1880 1881
1881 if (host->ops->platform_execute_tuning) { 1882 if (host->ops->platform_execute_tuning) {
1882 spin_unlock_irqrestore(&host->lock, flags); 1883 spin_unlock_irqrestore(&host->lock, flags);
1883 err = host->ops->platform_execute_tuning(host, opcode); 1884 err = host->ops->platform_execute_tuning(host, opcode);
1884 sdhci_runtime_pm_put(host); 1885 sdhci_runtime_pm_put(host);
1885 return err; 1886 return err;
1886 } 1887 }
1887 1888
1888 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1889 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1889 ctrl |= SDHCI_CTRL_EXEC_TUNING; 1890 ctrl |= SDHCI_CTRL_EXEC_TUNING;
1890 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 1891 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1891 1892
1892 /* 1893 /*
1893 * As per the Host Controller spec v3.00, tuning command 1894 * As per the Host Controller spec v3.00, tuning command
1894 * generates Buffer Read Ready interrupt, so enable that. 1895 * generates Buffer Read Ready interrupt, so enable that.
1895 * 1896 *
1896 * Note: The spec clearly says that when tuning sequence 1897 * Note: The spec clearly says that when tuning sequence
1897 * is being performed, the controller does not generate 1898 * is being performed, the controller does not generate
1898 * interrupts other than Buffer Read Ready interrupt. But 1899 * interrupts other than Buffer Read Ready interrupt. But
1899 * to make sure we don't hit a controller bug, we _only_ 1900 * to make sure we don't hit a controller bug, we _only_
1900 * enable Buffer Read Ready interrupt here. 1901 * enable Buffer Read Ready interrupt here.
1901 */ 1902 */
1902 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE); 1903 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
1903 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE); 1904 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
1904 1905
1905 /* 1906 /*
1906 * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number 1907 * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number
1907 * of loops reaches 40 times or a timeout of 150ms occurs. 1908 * of loops reaches 40 times or a timeout of 150ms occurs.
1908 */ 1909 */
1909 do { 1910 do {
1910 struct mmc_command cmd = {0}; 1911 struct mmc_command cmd = {0};
1911 struct mmc_request mrq = {NULL}; 1912 struct mmc_request mrq = {NULL};
1912 1913
1913 cmd.opcode = opcode; 1914 cmd.opcode = opcode;
1914 cmd.arg = 0; 1915 cmd.arg = 0;
1915 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; 1916 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
1916 cmd.retries = 0; 1917 cmd.retries = 0;
1917 cmd.data = NULL; 1918 cmd.data = NULL;
1918 cmd.error = 0; 1919 cmd.error = 0;
1919 1920
1920 if (tuning_loop_counter-- == 0) 1921 if (tuning_loop_counter-- == 0)
1921 break; 1922 break;
1922 1923
1923 mrq.cmd = &cmd; 1924 mrq.cmd = &cmd;
1924 host->mrq = &mrq; 1925 host->mrq = &mrq;
1925 1926
1926 /* 1927 /*
1927 * In response to CMD19, the card sends 64 bytes of tuning 1928 * In response to CMD19, the card sends 64 bytes of tuning
1928 * block to the Host Controller. So we set the block size 1929 * block to the Host Controller. So we set the block size
1929 * to 64 here. 1930 * to 64 here.
1930 */ 1931 */
1931 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200) { 1932 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1932 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) 1933 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8)
1933 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128), 1934 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128),
1934 SDHCI_BLOCK_SIZE); 1935 SDHCI_BLOCK_SIZE);
1935 else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) 1936 else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4)
1936 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64), 1937 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
1937 SDHCI_BLOCK_SIZE); 1938 SDHCI_BLOCK_SIZE);
1938 } else { 1939 } else {
1939 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64), 1940 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
1940 SDHCI_BLOCK_SIZE); 1941 SDHCI_BLOCK_SIZE);
1941 } 1942 }
1942 1943
1943 /* 1944 /*
1944 * The tuning block is sent by the card to the host controller. 1945 * The tuning block is sent by the card to the host controller.
1945 * So we set the TRNS_READ bit in the Transfer Mode register. 1946 * So we set the TRNS_READ bit in the Transfer Mode register.
1946 * This also takes care of setting DMA Enable and Multi Block 1947 * This also takes care of setting DMA Enable and Multi Block
1947 * Select in the same register to 0. 1948 * Select in the same register to 0.
1948 */ 1949 */
1949 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE); 1950 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
1950 1951
1951 sdhci_send_command(host, &cmd); 1952 sdhci_send_command(host, &cmd);
1952 1953
1953 host->cmd = NULL; 1954 host->cmd = NULL;
1954 host->mrq = NULL; 1955 host->mrq = NULL;
1955 1956
1956 spin_unlock_irqrestore(&host->lock, flags); 1957 spin_unlock_irqrestore(&host->lock, flags);
1957 /* Wait for Buffer Read Ready interrupt */ 1958 /* Wait for Buffer Read Ready interrupt */
1958 wait_event_interruptible_timeout(host->buf_ready_int, 1959 wait_event_interruptible_timeout(host->buf_ready_int,
1959 (host->tuning_done == 1), 1960 (host->tuning_done == 1),
1960 msecs_to_jiffies(50)); 1961 msecs_to_jiffies(50));
1961 spin_lock_irqsave(&host->lock, flags); 1962 spin_lock_irqsave(&host->lock, flags);
1962 1963
1963 if (!host->tuning_done) { 1964 if (!host->tuning_done) {
1964 pr_info(DRIVER_NAME ": Timeout waiting for " 1965 pr_info(DRIVER_NAME ": Timeout waiting for "
1965 "Buffer Read Ready interrupt during tuning " 1966 "Buffer Read Ready interrupt during tuning "
1966 "procedure, falling back to fixed sampling " 1967 "procedure, falling back to fixed sampling "
1967 "clock\n"); 1968 "clock\n");
1968 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1969 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1969 ctrl &= ~SDHCI_CTRL_TUNED_CLK; 1970 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
1970 ctrl &= ~SDHCI_CTRL_EXEC_TUNING; 1971 ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
1971 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 1972 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1972 1973
1973 err = -EIO; 1974 err = -EIO;
1974 goto out; 1975 goto out;
1975 } 1976 }
1976 1977
1977 host->tuning_done = 0; 1978 host->tuning_done = 0;
1978 1979
1979 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1980 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1980 1981
1981 /* eMMC spec does not require a delay between tuning cycles */ 1982 /* eMMC spec does not require a delay between tuning cycles */
1982 if (opcode == MMC_SEND_TUNING_BLOCK) 1983 if (opcode == MMC_SEND_TUNING_BLOCK)
1983 mdelay(1); 1984 mdelay(1);
1984 } while (ctrl & SDHCI_CTRL_EXEC_TUNING); 1985 } while (ctrl & SDHCI_CTRL_EXEC_TUNING);
1985 1986
1986 /* 1987 /*
1987 * The Host Driver has exhausted the maximum number of loops allowed, 1988 * The Host Driver has exhausted the maximum number of loops allowed,
1988 * so use fixed sampling frequency. 1989 * so use fixed sampling frequency.
1989 */ 1990 */
1990 if (tuning_loop_counter < 0) { 1991 if (tuning_loop_counter < 0) {
1991 ctrl &= ~SDHCI_CTRL_TUNED_CLK; 1992 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
1992 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 1993 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1993 } 1994 }
1994 if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) { 1995 if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) {
1995 pr_info(DRIVER_NAME ": Tuning procedure" 1996 pr_info(DRIVER_NAME ": Tuning procedure"
1996 " failed, falling back to fixed sampling" 1997 " failed, falling back to fixed sampling"
1997 " clock\n"); 1998 " clock\n");
1998 err = -EIO; 1999 err = -EIO;
1999 } 2000 }
2000 2001
2001 out: 2002 out:
2002 /* 2003 /*
2003 * If this is the very first time we are here, we start the retuning 2004 * If this is the very first time we are here, we start the retuning
2004 * timer. Since only during the first time, SDHCI_NEEDS_RETUNING 2005 * timer. Since only during the first time, SDHCI_NEEDS_RETUNING
2005 * flag won't be set, we check this condition before actually starting 2006 * flag won't be set, we check this condition before actually starting
2006 * the timer. 2007 * the timer.
2007 */ 2008 */
2008 if (!(host->flags & SDHCI_NEEDS_RETUNING) && host->tuning_count && 2009 if (!(host->flags & SDHCI_NEEDS_RETUNING) && host->tuning_count &&
2009 (host->tuning_mode == SDHCI_TUNING_MODE_1)) { 2010 (host->tuning_mode == SDHCI_TUNING_MODE_1)) {
2010 host->flags |= SDHCI_USING_RETUNING_TIMER; 2011 host->flags |= SDHCI_USING_RETUNING_TIMER;
2011 mod_timer(&host->tuning_timer, jiffies + 2012 mod_timer(&host->tuning_timer, jiffies +
2012 host->tuning_count * HZ); 2013 host->tuning_count * HZ);
2013 /* Tuning mode 1 limits the maximum data length to 4MB */ 2014 /* Tuning mode 1 limits the maximum data length to 4MB */
2014 mmc->max_blk_count = (4 * 1024 * 1024) / mmc->max_blk_size; 2015 mmc->max_blk_count = (4 * 1024 * 1024) / mmc->max_blk_size;
2015 } else if (host->flags & SDHCI_USING_RETUNING_TIMER) { 2016 } else if (host->flags & SDHCI_USING_RETUNING_TIMER) {
2016 host->flags &= ~SDHCI_NEEDS_RETUNING; 2017 host->flags &= ~SDHCI_NEEDS_RETUNING;
2017 /* Reload the new initial value for timer */ 2018 /* Reload the new initial value for timer */
2018 mod_timer(&host->tuning_timer, jiffies + 2019 mod_timer(&host->tuning_timer, jiffies +
2019 host->tuning_count * HZ); 2020 host->tuning_count * HZ);
2020 } 2021 }
2021 2022
2022 /* 2023 /*
2023 * In case tuning fails, host controllers which support re-tuning can 2024 * In case tuning fails, host controllers which support re-tuning can
2024 * try tuning again at a later time, when the re-tuning timer expires. 2025 * try tuning again at a later time, when the re-tuning timer expires.
2025 * So for these controllers, we return 0. Since there might be other 2026 * So for these controllers, we return 0. Since there might be other
2026 * controllers who do not have this capability, we return error for 2027 * controllers who do not have this capability, we return error for
2027 * them. SDHCI_USING_RETUNING_TIMER means the host is currently using 2028 * them. SDHCI_USING_RETUNING_TIMER means the host is currently using
2028 * a retuning timer to do the retuning for the card. 2029 * a retuning timer to do the retuning for the card.
2029 */ 2030 */
2030 if (err && (host->flags & SDHCI_USING_RETUNING_TIMER)) 2031 if (err && (host->flags & SDHCI_USING_RETUNING_TIMER))
2031 err = 0; 2032 err = 0;
2032 2033
2033 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 2034 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2034 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 2035 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2035 spin_unlock_irqrestore(&host->lock, flags); 2036 spin_unlock_irqrestore(&host->lock, flags);
2036 sdhci_runtime_pm_put(host); 2037 sdhci_runtime_pm_put(host);
2037 2038
2038 return err; 2039 return err;
2039 } 2040 }
2040 2041
2041 2042
2042 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable) 2043 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2043 { 2044 {
2044 /* Host Controller v3.00 defines preset value registers */ 2045 /* Host Controller v3.00 defines preset value registers */
2045 if (host->version < SDHCI_SPEC_300) 2046 if (host->version < SDHCI_SPEC_300)
2046 return; 2047 return;
2047 2048
2048 /* 2049 /*
2049 * We only enable or disable Preset Value if they are not already 2050 * We only enable or disable Preset Value if they are not already
2050 * enabled or disabled respectively. Otherwise, we bail out. 2051 * enabled or disabled respectively. Otherwise, we bail out.
2051 */ 2052 */
2052 if (host->preset_enabled != enable) { 2053 if (host->preset_enabled != enable) {
2053 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2054 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2054 2055
2055 if (enable) 2056 if (enable)
2056 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE; 2057 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
2057 else 2058 else
2058 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE; 2059 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
2059 2060
2060 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2061 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2061 2062
2062 if (enable) 2063 if (enable)
2063 host->flags |= SDHCI_PV_ENABLED; 2064 host->flags |= SDHCI_PV_ENABLED;
2064 else 2065 else
2065 host->flags &= ~SDHCI_PV_ENABLED; 2066 host->flags &= ~SDHCI_PV_ENABLED;
2066 2067
2067 host->preset_enabled = enable; 2068 host->preset_enabled = enable;
2068 } 2069 }
2069 } 2070 }
2070 2071
2071 static void sdhci_card_event(struct mmc_host *mmc) 2072 static void sdhci_card_event(struct mmc_host *mmc)
2072 { 2073 {
2073 struct sdhci_host *host = mmc_priv(mmc); 2074 struct sdhci_host *host = mmc_priv(mmc);
2074 unsigned long flags; 2075 unsigned long flags;
2076 int present;
2075 2077
2076 /* First check if client has provided their own card event */ 2078 /* First check if client has provided their own card event */
2077 if (host->ops->card_event) 2079 if (host->ops->card_event)
2078 host->ops->card_event(host); 2080 host->ops->card_event(host);
2079 2081
2082 present = sdhci_do_get_cd(host);
2083
2080 spin_lock_irqsave(&host->lock, flags); 2084 spin_lock_irqsave(&host->lock, flags);
2081 2085
2082 /* Check host->mrq first in case we are runtime suspended */ 2086 /* Check host->mrq first in case we are runtime suspended */
2083 if (host->mrq && !sdhci_do_get_cd(host)) { 2087 if (host->mrq && !present) {
2084 pr_err("%s: Card removed during transfer!\n", 2088 pr_err("%s: Card removed during transfer!\n",
2085 mmc_hostname(host->mmc)); 2089 mmc_hostname(host->mmc));
2086 pr_err("%s: Resetting controller.\n", 2090 pr_err("%s: Resetting controller.\n",
2087 mmc_hostname(host->mmc)); 2091 mmc_hostname(host->mmc));
2088 2092
2089 sdhci_do_reset(host, SDHCI_RESET_CMD); 2093 sdhci_do_reset(host, SDHCI_RESET_CMD);
2090 sdhci_do_reset(host, SDHCI_RESET_DATA); 2094 sdhci_do_reset(host, SDHCI_RESET_DATA);
2091 2095
2092 host->mrq->cmd->error = -ENOMEDIUM; 2096 host->mrq->cmd->error = -ENOMEDIUM;
2093 tasklet_schedule(&host->finish_tasklet); 2097 tasklet_schedule(&host->finish_tasklet);
2094 } 2098 }
2095 2099
2096 spin_unlock_irqrestore(&host->lock, flags); 2100 spin_unlock_irqrestore(&host->lock, flags);
2097 } 2101 }
2098 2102
2099 static const struct mmc_host_ops sdhci_ops = { 2103 static const struct mmc_host_ops sdhci_ops = {
2100 .request = sdhci_request, 2104 .request = sdhci_request,
2101 .set_ios = sdhci_set_ios, 2105 .set_ios = sdhci_set_ios,
2102 .get_cd = sdhci_get_cd, 2106 .get_cd = sdhci_get_cd,
2103 .get_ro = sdhci_get_ro, 2107 .get_ro = sdhci_get_ro,
2104 .hw_reset = sdhci_hw_reset, 2108 .hw_reset = sdhci_hw_reset,
2105 .enable_sdio_irq = sdhci_enable_sdio_irq, 2109 .enable_sdio_irq = sdhci_enable_sdio_irq,
2106 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch, 2110 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
2107 .execute_tuning = sdhci_execute_tuning, 2111 .execute_tuning = sdhci_execute_tuning,
2108 .card_event = sdhci_card_event, 2112 .card_event = sdhci_card_event,
2109 .card_busy = sdhci_card_busy, 2113 .card_busy = sdhci_card_busy,
2110 }; 2114 };
2111 2115
2112 /*****************************************************************************\ 2116 /*****************************************************************************\
2113 * * 2117 * *
2114 * Tasklets * 2118 * Tasklets *
2115 * * 2119 * *
2116 \*****************************************************************************/ 2120 \*****************************************************************************/
2117 2121
2118 static void sdhci_tasklet_finish(unsigned long param) 2122 static void sdhci_tasklet_finish(unsigned long param)
2119 { 2123 {
2120 struct sdhci_host *host; 2124 struct sdhci_host *host;
2121 unsigned long flags; 2125 unsigned long flags;
2122 struct mmc_request *mrq; 2126 struct mmc_request *mrq;
2123 2127
2124 host = (struct sdhci_host*)param; 2128 host = (struct sdhci_host*)param;
2125 2129
2126 spin_lock_irqsave(&host->lock, flags); 2130 spin_lock_irqsave(&host->lock, flags);
2127 2131
2128 /* 2132 /*
2129 * If this tasklet gets rescheduled while running, it will 2133 * If this tasklet gets rescheduled while running, it will
2130 * be run again afterwards but without any active request. 2134 * be run again afterwards but without any active request.
2131 */ 2135 */
2132 if (!host->mrq) { 2136 if (!host->mrq) {
2133 spin_unlock_irqrestore(&host->lock, flags); 2137 spin_unlock_irqrestore(&host->lock, flags);
2134 return; 2138 return;
2135 } 2139 }
2136 2140
2137 del_timer(&host->timer); 2141 del_timer(&host->timer);
2138 2142
2139 mrq = host->mrq; 2143 mrq = host->mrq;
2140 2144
2141 /* 2145 /*
2142 * The controller needs a reset of internal state machines 2146 * The controller needs a reset of internal state machines
2143 * upon error conditions. 2147 * upon error conditions.
2144 */ 2148 */
2145 if (!(host->flags & SDHCI_DEVICE_DEAD) && 2149 if (!(host->flags & SDHCI_DEVICE_DEAD) &&
2146 ((mrq->cmd && mrq->cmd->error) || 2150 ((mrq->cmd && mrq->cmd->error) ||
2147 (mrq->data && (mrq->data->error || 2151 (mrq->data && (mrq->data->error ||
2148 (mrq->data->stop && mrq->data->stop->error))) || 2152 (mrq->data->stop && mrq->data->stop->error))) ||
2149 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) { 2153 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
2150 2154
2151 /* Some controllers need this kick or reset won't work here */ 2155 /* Some controllers need this kick or reset won't work here */
2152 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) 2156 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
2153 /* This is to force an update */ 2157 /* This is to force an update */
2154 host->ops->set_clock(host, host->clock); 2158 host->ops->set_clock(host, host->clock);
2155 2159
2156 /* Spec says we should do both at the same time, but Ricoh 2160 /* Spec says we should do both at the same time, but Ricoh
2157 controllers do not like that. */ 2161 controllers do not like that. */
2158 sdhci_do_reset(host, SDHCI_RESET_CMD); 2162 sdhci_do_reset(host, SDHCI_RESET_CMD);
2159 sdhci_do_reset(host, SDHCI_RESET_DATA); 2163 sdhci_do_reset(host, SDHCI_RESET_DATA);
2160 } 2164 }
2161 2165
2162 host->mrq = NULL; 2166 host->mrq = NULL;
2163 host->cmd = NULL; 2167 host->cmd = NULL;
2164 host->data = NULL; 2168 host->data = NULL;
2165 2169
2166 #ifndef SDHCI_USE_LEDS_CLASS 2170 #ifndef SDHCI_USE_LEDS_CLASS
2167 sdhci_deactivate_led(host); 2171 sdhci_deactivate_led(host);
2168 #endif 2172 #endif
2169 2173
2170 mmiowb(); 2174 mmiowb();
2171 spin_unlock_irqrestore(&host->lock, flags); 2175 spin_unlock_irqrestore(&host->lock, flags);
2172 2176
2173 mmc_request_done(host->mmc, mrq); 2177 mmc_request_done(host->mmc, mrq);
2174 sdhci_runtime_pm_put(host); 2178 sdhci_runtime_pm_put(host);
2175 } 2179 }
2176 2180
2177 static void sdhci_timeout_timer(unsigned long data) 2181 static void sdhci_timeout_timer(unsigned long data)
2178 { 2182 {
2179 struct sdhci_host *host; 2183 struct sdhci_host *host;
2180 unsigned long flags; 2184 unsigned long flags;
2181 2185
2182 host = (struct sdhci_host*)data; 2186 host = (struct sdhci_host*)data;
2183 2187
2184 spin_lock_irqsave(&host->lock, flags); 2188 spin_lock_irqsave(&host->lock, flags);
2185 2189
2186 if (host->mrq) { 2190 if (host->mrq) {
2187 pr_err("%s: Timeout waiting for hardware " 2191 pr_err("%s: Timeout waiting for hardware "
2188 "interrupt.\n", mmc_hostname(host->mmc)); 2192 "interrupt.\n", mmc_hostname(host->mmc));
2189 sdhci_dumpregs(host); 2193 sdhci_dumpregs(host);
2190 2194
2191 if (host->data) { 2195 if (host->data) {
2192 host->data->error = -ETIMEDOUT; 2196 host->data->error = -ETIMEDOUT;
2193 sdhci_finish_data(host); 2197 sdhci_finish_data(host);
2194 } else { 2198 } else {
2195 if (host->cmd) 2199 if (host->cmd)
2196 host->cmd->error = -ETIMEDOUT; 2200 host->cmd->error = -ETIMEDOUT;
2197 else 2201 else
2198 host->mrq->cmd->error = -ETIMEDOUT; 2202 host->mrq->cmd->error = -ETIMEDOUT;
2199 2203
2200 tasklet_schedule(&host->finish_tasklet); 2204 tasklet_schedule(&host->finish_tasklet);
2201 } 2205 }
2202 } 2206 }
2203 2207
2204 mmiowb(); 2208 mmiowb();
2205 spin_unlock_irqrestore(&host->lock, flags); 2209 spin_unlock_irqrestore(&host->lock, flags);
2206 } 2210 }
2207 2211
2208 static void sdhci_tuning_timer(unsigned long data) 2212 static void sdhci_tuning_timer(unsigned long data)
2209 { 2213 {
2210 struct sdhci_host *host; 2214 struct sdhci_host *host;
2211 unsigned long flags; 2215 unsigned long flags;
2212 2216
2213 host = (struct sdhci_host *)data; 2217 host = (struct sdhci_host *)data;
2214 2218
2215 spin_lock_irqsave(&host->lock, flags); 2219 spin_lock_irqsave(&host->lock, flags);
2216 2220
2217 host->flags |= SDHCI_NEEDS_RETUNING; 2221 host->flags |= SDHCI_NEEDS_RETUNING;
2218 2222
2219 spin_unlock_irqrestore(&host->lock, flags); 2223 spin_unlock_irqrestore(&host->lock, flags);
2220 } 2224 }
2221 2225
2222 /*****************************************************************************\ 2226 /*****************************************************************************\
2223 * * 2227 * *
2224 * Interrupt handling * 2228 * Interrupt handling *
2225 * * 2229 * *
2226 \*****************************************************************************/ 2230 \*****************************************************************************/
2227 2231
2228 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask) 2232 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask)
2229 { 2233 {
2230 BUG_ON(intmask == 0); 2234 BUG_ON(intmask == 0);
2231 2235
2232 if (!host->cmd) { 2236 if (!host->cmd) {
2233 pr_err("%s: Got command interrupt 0x%08x even " 2237 pr_err("%s: Got command interrupt 0x%08x even "
2234 "though no command operation was in progress.\n", 2238 "though no command operation was in progress.\n",
2235 mmc_hostname(host->mmc), (unsigned)intmask); 2239 mmc_hostname(host->mmc), (unsigned)intmask);
2236 sdhci_dumpregs(host); 2240 sdhci_dumpregs(host);
2237 return; 2241 return;
2238 } 2242 }
2239 2243
2240 if (intmask & SDHCI_INT_TIMEOUT) 2244 if (intmask & SDHCI_INT_TIMEOUT)
2241 host->cmd->error = -ETIMEDOUT; 2245 host->cmd->error = -ETIMEDOUT;
2242 else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT | 2246 else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
2243 SDHCI_INT_INDEX)) 2247 SDHCI_INT_INDEX))
2244 host->cmd->error = -EILSEQ; 2248 host->cmd->error = -EILSEQ;
2245 2249
2246 if (host->cmd->error) { 2250 if (host->cmd->error) {
2247 tasklet_schedule(&host->finish_tasklet); 2251 tasklet_schedule(&host->finish_tasklet);
2248 return; 2252 return;
2249 } 2253 }
2250 2254
2251 /* 2255 /*
2252 * The host can send and interrupt when the busy state has 2256 * The host can send and interrupt when the busy state has
2253 * ended, allowing us to wait without wasting CPU cycles. 2257 * ended, allowing us to wait without wasting CPU cycles.
2254 * Unfortunately this is overloaded on the "data complete" 2258 * Unfortunately this is overloaded on the "data complete"
2255 * interrupt, so we need to take some care when handling 2259 * interrupt, so we need to take some care when handling
2256 * it. 2260 * it.
2257 * 2261 *
2258 * Note: The 1.0 specification is a bit ambiguous about this 2262 * Note: The 1.0 specification is a bit ambiguous about this
2259 * feature so there might be some problems with older 2263 * feature so there might be some problems with older
2260 * controllers. 2264 * controllers.
2261 */ 2265 */
2262 if (host->cmd->flags & MMC_RSP_BUSY) { 2266 if (host->cmd->flags & MMC_RSP_BUSY) {
2263 if (host->cmd->data) 2267 if (host->cmd->data)
2264 DBG("Cannot wait for busy signal when also " 2268 DBG("Cannot wait for busy signal when also "
2265 "doing a data transfer"); 2269 "doing a data transfer");
2266 else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) 2270 else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ)
2267 && !host->busy_handle) { 2271 && !host->busy_handle) {
2268 /* Mark that command complete before busy is ended */ 2272 /* Mark that command complete before busy is ended */
2269 host->busy_handle = 1; 2273 host->busy_handle = 1;
2270 return; 2274 return;
2271 } 2275 }
2272 2276
2273 /* The controller does not support the end-of-busy IRQ, 2277 /* The controller does not support the end-of-busy IRQ,
2274 * fall through and take the SDHCI_INT_RESPONSE */ 2278 * fall through and take the SDHCI_INT_RESPONSE */
2275 } else if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) && 2279 } else if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
2276 host->cmd->opcode == MMC_STOP_TRANSMISSION && !host->data) { 2280 host->cmd->opcode == MMC_STOP_TRANSMISSION && !host->data) {
2277 *mask &= ~SDHCI_INT_DATA_END; 2281 *mask &= ~SDHCI_INT_DATA_END;
2278 } 2282 }
2279 2283
2280 if (intmask & SDHCI_INT_RESPONSE) 2284 if (intmask & SDHCI_INT_RESPONSE)
2281 sdhci_finish_command(host); 2285 sdhci_finish_command(host);
2282 } 2286 }
2283 2287
2284 #ifdef CONFIG_MMC_DEBUG 2288 #ifdef CONFIG_MMC_DEBUG
2285 static void sdhci_show_adma_error(struct sdhci_host *host) 2289 static void sdhci_show_adma_error(struct sdhci_host *host)
2286 { 2290 {
2287 const char *name = mmc_hostname(host->mmc); 2291 const char *name = mmc_hostname(host->mmc);
2288 u8 *desc = host->adma_desc; 2292 u8 *desc = host->adma_desc;
2289 __le32 *dma; 2293 __le32 *dma;
2290 __le16 *len; 2294 __le16 *len;
2291 u8 attr; 2295 u8 attr;
2292 2296
2293 sdhci_dumpregs(host); 2297 sdhci_dumpregs(host);
2294 2298
2295 while (true) { 2299 while (true) {
2296 dma = (__le32 *)(desc + 4); 2300 dma = (__le32 *)(desc + 4);
2297 len = (__le16 *)(desc + 2); 2301 len = (__le16 *)(desc + 2);
2298 attr = *desc; 2302 attr = *desc;
2299 2303
2300 DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n", 2304 DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
2301 name, desc, le32_to_cpu(*dma), le16_to_cpu(*len), attr); 2305 name, desc, le32_to_cpu(*dma), le16_to_cpu(*len), attr);
2302 2306
2303 desc += 8; 2307 desc += 8;
2304 2308
2305 if (attr & 2) 2309 if (attr & 2)
2306 break; 2310 break;
2307 } 2311 }
2308 } 2312 }
2309 #else 2313 #else
2310 static void sdhci_show_adma_error(struct sdhci_host *host) { } 2314 static void sdhci_show_adma_error(struct sdhci_host *host) { }
2311 #endif 2315 #endif
2312 2316
2313 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) 2317 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
2314 { 2318 {
2315 u32 command; 2319 u32 command;
2316 BUG_ON(intmask == 0); 2320 BUG_ON(intmask == 0);
2317 2321
2318 /* CMD19 generates _only_ Buffer Read Ready interrupt */ 2322 /* CMD19 generates _only_ Buffer Read Ready interrupt */
2319 if (intmask & SDHCI_INT_DATA_AVAIL) { 2323 if (intmask & SDHCI_INT_DATA_AVAIL) {
2320 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)); 2324 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
2321 if (command == MMC_SEND_TUNING_BLOCK || 2325 if (command == MMC_SEND_TUNING_BLOCK ||
2322 command == MMC_SEND_TUNING_BLOCK_HS200) { 2326 command == MMC_SEND_TUNING_BLOCK_HS200) {
2323 host->tuning_done = 1; 2327 host->tuning_done = 1;
2324 wake_up(&host->buf_ready_int); 2328 wake_up(&host->buf_ready_int);
2325 return; 2329 return;
2326 } 2330 }
2327 } 2331 }
2328 2332
2329 if (!host->data) { 2333 if (!host->data) {
2330 /* 2334 /*
2331 * The "data complete" interrupt is also used to 2335 * The "data complete" interrupt is also used to
2332 * indicate that a busy state has ended. See comment 2336 * indicate that a busy state has ended. See comment
2333 * above in sdhci_cmd_irq(). 2337 * above in sdhci_cmd_irq().
2334 */ 2338 */
2335 if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) { 2339 if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) {
2336 if (intmask & SDHCI_INT_DATA_TIMEOUT) { 2340 if (intmask & SDHCI_INT_DATA_TIMEOUT) {
2337 host->cmd->error = -ETIMEDOUT; 2341 host->cmd->error = -ETIMEDOUT;
2338 tasklet_schedule(&host->finish_tasklet); 2342 tasklet_schedule(&host->finish_tasklet);
2339 return; 2343 return;
2340 } 2344 }
2341 if (intmask & SDHCI_INT_DATA_END) { 2345 if (intmask & SDHCI_INT_DATA_END) {
2342 /* 2346 /*
2343 * Some cards handle busy-end interrupt 2347 * Some cards handle busy-end interrupt
2344 * before the command completed, so make 2348 * before the command completed, so make
2345 * sure we do things in the proper order. 2349 * sure we do things in the proper order.
2346 */ 2350 */
2347 if (host->busy_handle) 2351 if (host->busy_handle)
2348 sdhci_finish_command(host); 2352 sdhci_finish_command(host);
2349 else 2353 else
2350 host->busy_handle = 1; 2354 host->busy_handle = 1;
2351 return; 2355 return;
2352 } 2356 }
2353 } 2357 }
2354 2358
2355 pr_err("%s: Got data interrupt 0x%08x even " 2359 pr_err("%s: Got data interrupt 0x%08x even "
2356 "though no data operation was in progress.\n", 2360 "though no data operation was in progress.\n",
2357 mmc_hostname(host->mmc), (unsigned)intmask); 2361 mmc_hostname(host->mmc), (unsigned)intmask);
2358 sdhci_dumpregs(host); 2362 sdhci_dumpregs(host);
2359 2363
2360 return; 2364 return;
2361 } 2365 }
2362 2366
2363 if (intmask & SDHCI_INT_DATA_TIMEOUT) 2367 if (intmask & SDHCI_INT_DATA_TIMEOUT)
2364 host->data->error = -ETIMEDOUT; 2368 host->data->error = -ETIMEDOUT;
2365 else if (intmask & SDHCI_INT_DATA_END_BIT) 2369 else if (intmask & SDHCI_INT_DATA_END_BIT)
2366 host->data->error = -EILSEQ; 2370 host->data->error = -EILSEQ;
2367 else if ((intmask & SDHCI_INT_DATA_CRC) && 2371 else if ((intmask & SDHCI_INT_DATA_CRC) &&
2368 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)) 2372 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
2369 != MMC_BUS_TEST_R) 2373 != MMC_BUS_TEST_R)
2370 host->data->error = -EILSEQ; 2374 host->data->error = -EILSEQ;
2371 else if (intmask & SDHCI_INT_ADMA_ERROR) { 2375 else if (intmask & SDHCI_INT_ADMA_ERROR) {
2372 pr_err("%s: ADMA error\n", mmc_hostname(host->mmc)); 2376 pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
2373 sdhci_show_adma_error(host); 2377 sdhci_show_adma_error(host);
2374 host->data->error = -EIO; 2378 host->data->error = -EIO;
2375 if (host->ops->adma_workaround) 2379 if (host->ops->adma_workaround)
2376 host->ops->adma_workaround(host, intmask); 2380 host->ops->adma_workaround(host, intmask);
2377 } 2381 }
2378 2382
2379 if (host->data->error) 2383 if (host->data->error)
2380 sdhci_finish_data(host); 2384 sdhci_finish_data(host);
2381 else { 2385 else {
2382 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL)) 2386 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
2383 sdhci_transfer_pio(host); 2387 sdhci_transfer_pio(host);
2384 2388
2385 /* 2389 /*
2386 * We currently don't do anything fancy with DMA 2390 * We currently don't do anything fancy with DMA
2387 * boundaries, but as we can't disable the feature 2391 * boundaries, but as we can't disable the feature
2388 * we need to at least restart the transfer. 2392 * we need to at least restart the transfer.
2389 * 2393 *
2390 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS) 2394 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
2391 * should return a valid address to continue from, but as 2395 * should return a valid address to continue from, but as
2392 * some controllers are faulty, don't trust them. 2396 * some controllers are faulty, don't trust them.
2393 */ 2397 */
2394 if (intmask & SDHCI_INT_DMA_END) { 2398 if (intmask & SDHCI_INT_DMA_END) {
2395 u32 dmastart, dmanow; 2399 u32 dmastart, dmanow;
2396 dmastart = sg_dma_address(host->data->sg); 2400 dmastart = sg_dma_address(host->data->sg);
2397 dmanow = dmastart + host->data->bytes_xfered; 2401 dmanow = dmastart + host->data->bytes_xfered;
2398 /* 2402 /*
2399 * Force update to the next DMA block boundary. 2403 * Force update to the next DMA block boundary.
2400 */ 2404 */
2401 dmanow = (dmanow & 2405 dmanow = (dmanow &
2402 ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) + 2406 ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
2403 SDHCI_DEFAULT_BOUNDARY_SIZE; 2407 SDHCI_DEFAULT_BOUNDARY_SIZE;
2404 host->data->bytes_xfered = dmanow - dmastart; 2408 host->data->bytes_xfered = dmanow - dmastart;
2405 DBG("%s: DMA base 0x%08x, transferred 0x%06x bytes," 2409 DBG("%s: DMA base 0x%08x, transferred 0x%06x bytes,"
2406 " next 0x%08x\n", 2410 " next 0x%08x\n",
2407 mmc_hostname(host->mmc), dmastart, 2411 mmc_hostname(host->mmc), dmastart,
2408 host->data->bytes_xfered, dmanow); 2412 host->data->bytes_xfered, dmanow);
2409 sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS); 2413 sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
2410 } 2414 }
2411 2415
2412 if (intmask & SDHCI_INT_DATA_END) { 2416 if (intmask & SDHCI_INT_DATA_END) {
2413 if (host->cmd) { 2417 if (host->cmd) {
2414 /* 2418 /*
2415 * Data managed to finish before the 2419 * Data managed to finish before the
2416 * command completed. Make sure we do 2420 * command completed. Make sure we do
2417 * things in the proper order. 2421 * things in the proper order.
2418 */ 2422 */
2419 host->data_early = 1; 2423 host->data_early = 1;
2420 } else { 2424 } else {
2421 sdhci_finish_data(host); 2425 sdhci_finish_data(host);
2422 } 2426 }
2423 } 2427 }
2424 } 2428 }
2425 } 2429 }
2426 2430
2427 static irqreturn_t sdhci_irq(int irq, void *dev_id) 2431 static irqreturn_t sdhci_irq(int irq, void *dev_id)
2428 { 2432 {
2429 irqreturn_t result = IRQ_NONE; 2433 irqreturn_t result = IRQ_NONE;
2430 struct sdhci_host *host = dev_id; 2434 struct sdhci_host *host = dev_id;
2431 u32 intmask, mask, unexpected = 0; 2435 u32 intmask, mask, unexpected = 0;
2432 int max_loops = 16; 2436 int max_loops = 16;
2433 2437
2434 spin_lock(&host->lock); 2438 spin_lock(&host->lock);
2435 2439
2436 if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) { 2440 if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) {
2437 spin_unlock(&host->lock); 2441 spin_unlock(&host->lock);
2438 return IRQ_NONE; 2442 return IRQ_NONE;
2439 } 2443 }
2440 2444
2441 intmask = sdhci_readl(host, SDHCI_INT_STATUS); 2445 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2442 if (!intmask || intmask == 0xffffffff) { 2446 if (!intmask || intmask == 0xffffffff) {
2443 result = IRQ_NONE; 2447 result = IRQ_NONE;
2444 goto out; 2448 goto out;
2445 } 2449 }
2446 2450
2447 do { 2451 do {
2448 /* Clear selected interrupts. */ 2452 /* Clear selected interrupts. */
2449 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | 2453 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2450 SDHCI_INT_BUS_POWER); 2454 SDHCI_INT_BUS_POWER);
2451 sdhci_writel(host, mask, SDHCI_INT_STATUS); 2455 sdhci_writel(host, mask, SDHCI_INT_STATUS);
2452 2456
2453 DBG("*** %s got interrupt: 0x%08x\n", 2457 DBG("*** %s got interrupt: 0x%08x\n",
2454 mmc_hostname(host->mmc), intmask); 2458 mmc_hostname(host->mmc), intmask);
2455 2459
2456 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 2460 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2457 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 2461 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
2458 SDHCI_CARD_PRESENT; 2462 SDHCI_CARD_PRESENT;
2459 2463
2460 /* 2464 /*
2461 * There is a observation on i.mx esdhc. INSERT 2465 * There is a observation on i.mx esdhc. INSERT
2462 * bit will be immediately set again when it gets 2466 * bit will be immediately set again when it gets
2463 * cleared, if a card is inserted. We have to mask 2467 * cleared, if a card is inserted. We have to mask
2464 * the irq to prevent interrupt storm which will 2468 * the irq to prevent interrupt storm which will
2465 * freeze the system. And the REMOVE gets the 2469 * freeze the system. And the REMOVE gets the
2466 * same situation. 2470 * same situation.
2467 * 2471 *
2468 * More testing are needed here to ensure it works 2472 * More testing are needed here to ensure it works
2469 * for other platforms though. 2473 * for other platforms though.
2470 */ 2474 */
2471 host->ier &= ~(SDHCI_INT_CARD_INSERT | 2475 host->ier &= ~(SDHCI_INT_CARD_INSERT |
2472 SDHCI_INT_CARD_REMOVE); 2476 SDHCI_INT_CARD_REMOVE);
2473 host->ier |= present ? SDHCI_INT_CARD_REMOVE : 2477 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
2474 SDHCI_INT_CARD_INSERT; 2478 SDHCI_INT_CARD_INSERT;
2475 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 2479 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2476 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 2480 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2477 2481
2478 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT | 2482 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
2479 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS); 2483 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
2480 2484
2481 host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT | 2485 host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
2482 SDHCI_INT_CARD_REMOVE); 2486 SDHCI_INT_CARD_REMOVE);
2483 result = IRQ_WAKE_THREAD; 2487 result = IRQ_WAKE_THREAD;
2484 } 2488 }
2485 2489
2486 if (intmask & SDHCI_INT_CMD_MASK) 2490 if (intmask & SDHCI_INT_CMD_MASK)
2487 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, 2491 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK,
2488 &intmask); 2492 &intmask);
2489 2493
2490 if (intmask & SDHCI_INT_DATA_MASK) 2494 if (intmask & SDHCI_INT_DATA_MASK)
2491 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK); 2495 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
2492 2496
2493 if (intmask & SDHCI_INT_BUS_POWER) 2497 if (intmask & SDHCI_INT_BUS_POWER)
2494 pr_err("%s: Card is consuming too much power!\n", 2498 pr_err("%s: Card is consuming too much power!\n",
2495 mmc_hostname(host->mmc)); 2499 mmc_hostname(host->mmc));
2496 2500
2497 if (intmask & SDHCI_INT_CARD_INT) { 2501 if (intmask & SDHCI_INT_CARD_INT) {
2498 sdhci_enable_sdio_irq_nolock(host, false); 2502 sdhci_enable_sdio_irq_nolock(host, false);
2499 host->thread_isr |= SDHCI_INT_CARD_INT; 2503 host->thread_isr |= SDHCI_INT_CARD_INT;
2500 result = IRQ_WAKE_THREAD; 2504 result = IRQ_WAKE_THREAD;
2501 } 2505 }
2502 2506
2503 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE | 2507 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
2504 SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | 2508 SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2505 SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER | 2509 SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
2506 SDHCI_INT_CARD_INT); 2510 SDHCI_INT_CARD_INT);
2507 2511
2508 if (intmask) { 2512 if (intmask) {
2509 unexpected |= intmask; 2513 unexpected |= intmask;
2510 sdhci_writel(host, intmask, SDHCI_INT_STATUS); 2514 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
2511 } 2515 }
2512 2516
2513 if (result == IRQ_NONE) 2517 if (result == IRQ_NONE)
2514 result = IRQ_HANDLED; 2518 result = IRQ_HANDLED;
2515 2519
2516 intmask = sdhci_readl(host, SDHCI_INT_STATUS); 2520 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2517 } while (intmask && --max_loops); 2521 } while (intmask && --max_loops);
2518 out: 2522 out:
2519 spin_unlock(&host->lock); 2523 spin_unlock(&host->lock);
2520 2524
2521 if (unexpected) { 2525 if (unexpected) {
2522 pr_err("%s: Unexpected interrupt 0x%08x.\n", 2526 pr_err("%s: Unexpected interrupt 0x%08x.\n",
2523 mmc_hostname(host->mmc), unexpected); 2527 mmc_hostname(host->mmc), unexpected);
2524 sdhci_dumpregs(host); 2528 sdhci_dumpregs(host);
2525 } 2529 }
2526 2530
2527 return result; 2531 return result;
2528 } 2532 }
2529 2533
2530 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id) 2534 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
2531 { 2535 {
2532 struct sdhci_host *host = dev_id; 2536 struct sdhci_host *host = dev_id;
2533 unsigned long flags; 2537 unsigned long flags;
2534 u32 isr; 2538 u32 isr;
2535 2539
2536 spin_lock_irqsave(&host->lock, flags); 2540 spin_lock_irqsave(&host->lock, flags);
2537 isr = host->thread_isr; 2541 isr = host->thread_isr;
2538 host->thread_isr = 0; 2542 host->thread_isr = 0;
2539 spin_unlock_irqrestore(&host->lock, flags); 2543 spin_unlock_irqrestore(&host->lock, flags);
2540 2544
2541 if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 2545 if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2542 sdhci_card_event(host->mmc); 2546 sdhci_card_event(host->mmc);
2543 mmc_detect_change(host->mmc, msecs_to_jiffies(200)); 2547 mmc_detect_change(host->mmc, msecs_to_jiffies(200));
2544 } 2548 }
2545 2549
2546 if (isr & SDHCI_INT_CARD_INT) { 2550 if (isr & SDHCI_INT_CARD_INT) {
2547 sdio_run_irqs(host->mmc); 2551 sdio_run_irqs(host->mmc);
2548 2552
2549 spin_lock_irqsave(&host->lock, flags); 2553 spin_lock_irqsave(&host->lock, flags);
2550 if (host->flags & SDHCI_SDIO_IRQ_ENABLED) 2554 if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
2551 sdhci_enable_sdio_irq_nolock(host, true); 2555 sdhci_enable_sdio_irq_nolock(host, true);
2552 spin_unlock_irqrestore(&host->lock, flags); 2556 spin_unlock_irqrestore(&host->lock, flags);
2553 } 2557 }
2554 2558
2555 return isr ? IRQ_HANDLED : IRQ_NONE; 2559 return isr ? IRQ_HANDLED : IRQ_NONE;
2556 } 2560 }
2557 2561
2558 /*****************************************************************************\ 2562 /*****************************************************************************\
2559 * * 2563 * *
2560 * Suspend/resume * 2564 * Suspend/resume *
2561 * * 2565 * *
2562 \*****************************************************************************/ 2566 \*****************************************************************************/
2563 2567
2564 #ifdef CONFIG_PM 2568 #ifdef CONFIG_PM
2565 void sdhci_enable_irq_wakeups(struct sdhci_host *host) 2569 void sdhci_enable_irq_wakeups(struct sdhci_host *host)
2566 { 2570 {
2567 u8 val; 2571 u8 val;
2568 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE 2572 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
2569 | SDHCI_WAKE_ON_INT; 2573 | SDHCI_WAKE_ON_INT;
2570 2574
2571 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); 2575 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
2572 val |= mask ; 2576 val |= mask ;
2573 /* Avoid fake wake up */ 2577 /* Avoid fake wake up */
2574 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) 2578 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
2575 val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE); 2579 val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE);
2576 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); 2580 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
2577 } 2581 }
2578 EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups); 2582 EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
2579 2583
2580 static void sdhci_disable_irq_wakeups(struct sdhci_host *host) 2584 static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
2581 { 2585 {
2582 u8 val; 2586 u8 val;
2583 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE 2587 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
2584 | SDHCI_WAKE_ON_INT; 2588 | SDHCI_WAKE_ON_INT;
2585 2589
2586 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); 2590 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
2587 val &= ~mask; 2591 val &= ~mask;
2588 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); 2592 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
2589 } 2593 }
2590 2594
2591 int sdhci_suspend_host(struct sdhci_host *host) 2595 int sdhci_suspend_host(struct sdhci_host *host)
2592 { 2596 {
2593 sdhci_disable_card_detection(host); 2597 sdhci_disable_card_detection(host);
2594 2598
2595 /* Disable tuning since we are suspending */ 2599 /* Disable tuning since we are suspending */
2596 if (host->flags & SDHCI_USING_RETUNING_TIMER) { 2600 if (host->flags & SDHCI_USING_RETUNING_TIMER) {
2597 del_timer_sync(&host->tuning_timer); 2601 del_timer_sync(&host->tuning_timer);
2598 host->flags &= ~SDHCI_NEEDS_RETUNING; 2602 host->flags &= ~SDHCI_NEEDS_RETUNING;
2599 } 2603 }
2600 2604
2601 if (!device_may_wakeup(mmc_dev(host->mmc))) { 2605 if (!device_may_wakeup(mmc_dev(host->mmc))) {
2602 host->ier = 0; 2606 host->ier = 0;
2603 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 2607 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
2604 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 2608 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
2605 free_irq(host->irq, host); 2609 free_irq(host->irq, host);
2606 } else { 2610 } else {
2607 sdhci_enable_irq_wakeups(host); 2611 sdhci_enable_irq_wakeups(host);
2608 enable_irq_wake(host->irq); 2612 enable_irq_wake(host->irq);
2609 } 2613 }
2610 return 0; 2614 return 0;
2611 } 2615 }
2612 2616
2613 EXPORT_SYMBOL_GPL(sdhci_suspend_host); 2617 EXPORT_SYMBOL_GPL(sdhci_suspend_host);
2614 2618
2615 int sdhci_resume_host(struct sdhci_host *host) 2619 int sdhci_resume_host(struct sdhci_host *host)
2616 { 2620 {
2617 int ret = 0; 2621 int ret = 0;
2618 2622
2619 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 2623 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2620 if (host->ops->enable_dma) 2624 if (host->ops->enable_dma)
2621 host->ops->enable_dma(host); 2625 host->ops->enable_dma(host);
2622 } 2626 }
2623 2627
2624 if (!device_may_wakeup(mmc_dev(host->mmc))) { 2628 if (!device_may_wakeup(mmc_dev(host->mmc))) {
2625 ret = request_threaded_irq(host->irq, sdhci_irq, 2629 ret = request_threaded_irq(host->irq, sdhci_irq,
2626 sdhci_thread_irq, IRQF_SHARED, 2630 sdhci_thread_irq, IRQF_SHARED,
2627 mmc_hostname(host->mmc), host); 2631 mmc_hostname(host->mmc), host);
2628 if (ret) 2632 if (ret)
2629 return ret; 2633 return ret;
2630 } else { 2634 } else {
2631 sdhci_disable_irq_wakeups(host); 2635 sdhci_disable_irq_wakeups(host);
2632 disable_irq_wake(host->irq); 2636 disable_irq_wake(host->irq);
2633 } 2637 }
2634 2638
2635 if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) && 2639 if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
2636 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) { 2640 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
2637 /* Card keeps power but host controller does not */ 2641 /* Card keeps power but host controller does not */
2638 sdhci_init(host, 0); 2642 sdhci_init(host, 0);
2639 host->pwr = 0; 2643 host->pwr = 0;
2640 host->clock = 0; 2644 host->clock = 0;
2641 sdhci_do_set_ios(host, &host->mmc->ios); 2645 sdhci_do_set_ios(host, &host->mmc->ios);
2642 } else { 2646 } else {
2643 sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER)); 2647 sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
2644 mmiowb(); 2648 mmiowb();
2645 } 2649 }
2646 2650
2647 sdhci_enable_card_detection(host); 2651 sdhci_enable_card_detection(host);
2648 2652
2649 /* Set the re-tuning expiration flag */ 2653 /* Set the re-tuning expiration flag */
2650 if (host->flags & SDHCI_USING_RETUNING_TIMER) 2654 if (host->flags & SDHCI_USING_RETUNING_TIMER)
2651 host->flags |= SDHCI_NEEDS_RETUNING; 2655 host->flags |= SDHCI_NEEDS_RETUNING;
2652 2656
2653 return ret; 2657 return ret;
2654 } 2658 }
2655 2659
2656 EXPORT_SYMBOL_GPL(sdhci_resume_host); 2660 EXPORT_SYMBOL_GPL(sdhci_resume_host);
2657 #endif /* CONFIG_PM */ 2661 #endif /* CONFIG_PM */
2658 2662
2659 #ifdef CONFIG_PM_RUNTIME 2663 #ifdef CONFIG_PM_RUNTIME
2660 2664
2661 static int sdhci_runtime_pm_get(struct sdhci_host *host) 2665 static int sdhci_runtime_pm_get(struct sdhci_host *host)
2662 { 2666 {
2663 return pm_runtime_get_sync(host->mmc->parent); 2667 return pm_runtime_get_sync(host->mmc->parent);
2664 } 2668 }
2665 2669
2666 static int sdhci_runtime_pm_put(struct sdhci_host *host) 2670 static int sdhci_runtime_pm_put(struct sdhci_host *host)
2667 { 2671 {
2668 pm_runtime_mark_last_busy(host->mmc->parent); 2672 pm_runtime_mark_last_busy(host->mmc->parent);
2669 return pm_runtime_put_autosuspend(host->mmc->parent); 2673 return pm_runtime_put_autosuspend(host->mmc->parent);
2670 } 2674 }
2671 2675
2672 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host) 2676 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
2673 { 2677 {
2674 if (host->runtime_suspended || host->bus_on) 2678 if (host->runtime_suspended || host->bus_on)
2675 return; 2679 return;
2676 host->bus_on = true; 2680 host->bus_on = true;
2677 pm_runtime_get_noresume(host->mmc->parent); 2681 pm_runtime_get_noresume(host->mmc->parent);
2678 } 2682 }
2679 2683
2680 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host) 2684 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
2681 { 2685 {
2682 if (host->runtime_suspended || !host->bus_on) 2686 if (host->runtime_suspended || !host->bus_on)
2683 return; 2687 return;
2684 host->bus_on = false; 2688 host->bus_on = false;
2685 pm_runtime_put_noidle(host->mmc->parent); 2689 pm_runtime_put_noidle(host->mmc->parent);
2686 } 2690 }
2687 2691
2688 int sdhci_runtime_suspend_host(struct sdhci_host *host) 2692 int sdhci_runtime_suspend_host(struct sdhci_host *host)
2689 { 2693 {
2690 unsigned long flags; 2694 unsigned long flags;
2691 2695
2692 /* Disable tuning since we are suspending */ 2696 /* Disable tuning since we are suspending */
2693 if (host->flags & SDHCI_USING_RETUNING_TIMER) { 2697 if (host->flags & SDHCI_USING_RETUNING_TIMER) {
2694 del_timer_sync(&host->tuning_timer); 2698 del_timer_sync(&host->tuning_timer);
2695 host->flags &= ~SDHCI_NEEDS_RETUNING; 2699 host->flags &= ~SDHCI_NEEDS_RETUNING;
2696 } 2700 }
2697 2701
2698 spin_lock_irqsave(&host->lock, flags); 2702 spin_lock_irqsave(&host->lock, flags);
2699 host->ier &= SDHCI_INT_CARD_INT; 2703 host->ier &= SDHCI_INT_CARD_INT;
2700 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 2704 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2701 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 2705 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2702 spin_unlock_irqrestore(&host->lock, flags); 2706 spin_unlock_irqrestore(&host->lock, flags);
2703 2707
2704 synchronize_hardirq(host->irq); 2708 synchronize_hardirq(host->irq);
2705 2709
2706 spin_lock_irqsave(&host->lock, flags); 2710 spin_lock_irqsave(&host->lock, flags);
2707 host->runtime_suspended = true; 2711 host->runtime_suspended = true;
2708 spin_unlock_irqrestore(&host->lock, flags); 2712 spin_unlock_irqrestore(&host->lock, flags);
2709 2713
2710 return 0; 2714 return 0;
2711 } 2715 }
2712 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host); 2716 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
2713 2717
2714 int sdhci_runtime_resume_host(struct sdhci_host *host) 2718 int sdhci_runtime_resume_host(struct sdhci_host *host)
2715 { 2719 {
2716 unsigned long flags; 2720 unsigned long flags;
2717 int host_flags = host->flags; 2721 int host_flags = host->flags;
2718 2722
2719 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 2723 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2720 if (host->ops->enable_dma) 2724 if (host->ops->enable_dma)
2721 host->ops->enable_dma(host); 2725 host->ops->enable_dma(host);
2722 } 2726 }
2723 2727
2724 sdhci_init(host, 0); 2728 sdhci_init(host, 0);
2725 2729
2726 /* Force clock and power re-program */ 2730 /* Force clock and power re-program */
2727 host->pwr = 0; 2731 host->pwr = 0;
2728 host->clock = 0; 2732 host->clock = 0;
2729 sdhci_do_set_ios(host, &host->mmc->ios); 2733 sdhci_do_set_ios(host, &host->mmc->ios);
2730 2734
2731 sdhci_do_start_signal_voltage_switch(host, &host->mmc->ios); 2735 sdhci_do_start_signal_voltage_switch(host, &host->mmc->ios);
2732 if ((host_flags & SDHCI_PV_ENABLED) && 2736 if ((host_flags & SDHCI_PV_ENABLED) &&
2733 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) { 2737 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
2734 spin_lock_irqsave(&host->lock, flags); 2738 spin_lock_irqsave(&host->lock, flags);
2735 sdhci_enable_preset_value(host, true); 2739 sdhci_enable_preset_value(host, true);
2736 spin_unlock_irqrestore(&host->lock, flags); 2740 spin_unlock_irqrestore(&host->lock, flags);
2737 } 2741 }
2738 2742
2739 /* Set the re-tuning expiration flag */ 2743 /* Set the re-tuning expiration flag */
2740 if (host->flags & SDHCI_USING_RETUNING_TIMER) 2744 if (host->flags & SDHCI_USING_RETUNING_TIMER)
2741 host->flags |= SDHCI_NEEDS_RETUNING; 2745 host->flags |= SDHCI_NEEDS_RETUNING;
2742 2746
2743 spin_lock_irqsave(&host->lock, flags); 2747 spin_lock_irqsave(&host->lock, flags);
2744 2748
2745 host->runtime_suspended = false; 2749 host->runtime_suspended = false;
2746 2750
2747 /* Enable SDIO IRQ */ 2751 /* Enable SDIO IRQ */
2748 if (host->flags & SDHCI_SDIO_IRQ_ENABLED) 2752 if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
2749 sdhci_enable_sdio_irq_nolock(host, true); 2753 sdhci_enable_sdio_irq_nolock(host, true);
2750 2754
2751 /* Enable Card Detection */ 2755 /* Enable Card Detection */
2752 sdhci_enable_card_detection(host); 2756 sdhci_enable_card_detection(host);
2753 2757
2754 spin_unlock_irqrestore(&host->lock, flags); 2758 spin_unlock_irqrestore(&host->lock, flags);
2755 2759
2756 return 0; 2760 return 0;
2757 } 2761 }
2758 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host); 2762 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
2759 2763
2760 #endif 2764 #endif
2761 2765
2762 /*****************************************************************************\ 2766 /*****************************************************************************\
2763 * * 2767 * *
2764 * Device allocation/registration * 2768 * Device allocation/registration *
2765 * * 2769 * *
2766 \*****************************************************************************/ 2770 \*****************************************************************************/
2767 2771
2768 struct sdhci_host *sdhci_alloc_host(struct device *dev, 2772 struct sdhci_host *sdhci_alloc_host(struct device *dev,
2769 size_t priv_size) 2773 size_t priv_size)
2770 { 2774 {
2771 struct mmc_host *mmc; 2775 struct mmc_host *mmc;
2772 struct sdhci_host *host; 2776 struct sdhci_host *host;
2773 2777
2774 WARN_ON(dev == NULL); 2778 WARN_ON(dev == NULL);
2775 2779
2776 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev); 2780 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
2777 if (!mmc) 2781 if (!mmc)
2778 return ERR_PTR(-ENOMEM); 2782 return ERR_PTR(-ENOMEM);
2779 2783
2780 host = mmc_priv(mmc); 2784 host = mmc_priv(mmc);
2781 host->mmc = mmc; 2785 host->mmc = mmc;
2782 2786
2783 return host; 2787 return host;
2784 } 2788 }
2785 2789
2786 EXPORT_SYMBOL_GPL(sdhci_alloc_host); 2790 EXPORT_SYMBOL_GPL(sdhci_alloc_host);
2787 2791
2788 int sdhci_add_host(struct sdhci_host *host) 2792 int sdhci_add_host(struct sdhci_host *host)
2789 { 2793 {
2790 struct mmc_host *mmc; 2794 struct mmc_host *mmc;
2791 u32 caps[2] = {0, 0}; 2795 u32 caps[2] = {0, 0};
2792 u32 max_current_caps; 2796 u32 max_current_caps;
2793 unsigned int ocr_avail; 2797 unsigned int ocr_avail;
2794 unsigned int override_timeout_clk; 2798 unsigned int override_timeout_clk;
2795 int ret; 2799 int ret;
2796 2800
2797 WARN_ON(host == NULL); 2801 WARN_ON(host == NULL);
2798 if (host == NULL) 2802 if (host == NULL)
2799 return -EINVAL; 2803 return -EINVAL;
2800 2804
2801 mmc = host->mmc; 2805 mmc = host->mmc;
2802 2806
2803 if (debug_quirks) 2807 if (debug_quirks)
2804 host->quirks = debug_quirks; 2808 host->quirks = debug_quirks;
2805 if (debug_quirks2) 2809 if (debug_quirks2)
2806 host->quirks2 = debug_quirks2; 2810 host->quirks2 = debug_quirks2;
2807 2811
2808 override_timeout_clk = host->timeout_clk; 2812 override_timeout_clk = host->timeout_clk;
2809 2813
2810 sdhci_do_reset(host, SDHCI_RESET_ALL); 2814 sdhci_do_reset(host, SDHCI_RESET_ALL);
2811 2815
2812 host->version = sdhci_readw(host, SDHCI_HOST_VERSION); 2816 host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
2813 host->version = (host->version & SDHCI_SPEC_VER_MASK) 2817 host->version = (host->version & SDHCI_SPEC_VER_MASK)
2814 >> SDHCI_SPEC_VER_SHIFT; 2818 >> SDHCI_SPEC_VER_SHIFT;
2815 if (host->version > SDHCI_SPEC_300) { 2819 if (host->version > SDHCI_SPEC_300) {
2816 pr_err("%s: Unknown controller version (%d). " 2820 pr_err("%s: Unknown controller version (%d). "
2817 "You may experience problems.\n", mmc_hostname(mmc), 2821 "You may experience problems.\n", mmc_hostname(mmc),
2818 host->version); 2822 host->version);
2819 } 2823 }
2820 2824
2821 caps[0] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps : 2825 caps[0] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps :
2822 sdhci_readl(host, SDHCI_CAPABILITIES); 2826 sdhci_readl(host, SDHCI_CAPABILITIES);
2823 2827
2824 if (host->version >= SDHCI_SPEC_300) 2828 if (host->version >= SDHCI_SPEC_300)
2825 caps[1] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? 2829 caps[1] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ?
2826 host->caps1 : 2830 host->caps1 :
2827 sdhci_readl(host, SDHCI_CAPABILITIES_1); 2831 sdhci_readl(host, SDHCI_CAPABILITIES_1);
2828 2832
2829 if (host->quirks & SDHCI_QUIRK_FORCE_DMA) 2833 if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
2830 host->flags |= SDHCI_USE_SDMA; 2834 host->flags |= SDHCI_USE_SDMA;
2831 else if (!(caps[0] & SDHCI_CAN_DO_SDMA)) 2835 else if (!(caps[0] & SDHCI_CAN_DO_SDMA))
2832 DBG("Controller doesn't have SDMA capability\n"); 2836 DBG("Controller doesn't have SDMA capability\n");
2833 else 2837 else
2834 host->flags |= SDHCI_USE_SDMA; 2838 host->flags |= SDHCI_USE_SDMA;
2835 2839
2836 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) && 2840 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
2837 (host->flags & SDHCI_USE_SDMA)) { 2841 (host->flags & SDHCI_USE_SDMA)) {
2838 DBG("Disabling DMA as it is marked broken\n"); 2842 DBG("Disabling DMA as it is marked broken\n");
2839 host->flags &= ~SDHCI_USE_SDMA; 2843 host->flags &= ~SDHCI_USE_SDMA;
2840 } 2844 }
2841 2845
2842 if ((host->version >= SDHCI_SPEC_200) && 2846 if ((host->version >= SDHCI_SPEC_200) &&
2843 (caps[0] & SDHCI_CAN_DO_ADMA2)) 2847 (caps[0] & SDHCI_CAN_DO_ADMA2))
2844 host->flags |= SDHCI_USE_ADMA; 2848 host->flags |= SDHCI_USE_ADMA;
2845 2849
2846 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) && 2850 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
2847 (host->flags & SDHCI_USE_ADMA)) { 2851 (host->flags & SDHCI_USE_ADMA)) {
2848 DBG("Disabling ADMA as it is marked broken\n"); 2852 DBG("Disabling ADMA as it is marked broken\n");
2849 host->flags &= ~SDHCI_USE_ADMA; 2853 host->flags &= ~SDHCI_USE_ADMA;
2850 } 2854 }
2851 2855
2852 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 2856 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2853 if (host->ops->enable_dma) { 2857 if (host->ops->enable_dma) {
2854 if (host->ops->enable_dma(host)) { 2858 if (host->ops->enable_dma(host)) {
2855 pr_warn("%s: No suitable DMA available - falling back to PIO\n", 2859 pr_warn("%s: No suitable DMA available - falling back to PIO\n",
2856 mmc_hostname(mmc)); 2860 mmc_hostname(mmc));
2857 host->flags &= 2861 host->flags &=
2858 ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA); 2862 ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
2859 } 2863 }
2860 } 2864 }
2861 } 2865 }
2862 2866
2863 if (host->flags & SDHCI_USE_ADMA) { 2867 if (host->flags & SDHCI_USE_ADMA) {
2864 /* 2868 /*
2865 * We need to allocate descriptors for all sg entries 2869 * We need to allocate descriptors for all sg entries
2866 * (128) and potentially one alignment transfer for 2870 * (128) and potentially one alignment transfer for
2867 * each of those entries. 2871 * each of those entries.
2868 */ 2872 */
2869 host->adma_desc = dma_alloc_coherent(mmc_dev(mmc), 2873 host->adma_desc = dma_alloc_coherent(mmc_dev(mmc),
2870 ADMA_SIZE, &host->adma_addr, 2874 ADMA_SIZE, &host->adma_addr,
2871 GFP_KERNEL); 2875 GFP_KERNEL);
2872 host->align_buffer = kmalloc(128 * 4, GFP_KERNEL); 2876 host->align_buffer = kmalloc(128 * 4, GFP_KERNEL);
2873 if (!host->adma_desc || !host->align_buffer) { 2877 if (!host->adma_desc || !host->align_buffer) {
2874 dma_free_coherent(mmc_dev(mmc), ADMA_SIZE, 2878 dma_free_coherent(mmc_dev(mmc), ADMA_SIZE,
2875 host->adma_desc, host->adma_addr); 2879 host->adma_desc, host->adma_addr);
2876 kfree(host->align_buffer); 2880 kfree(host->align_buffer);
2877 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n", 2881 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
2878 mmc_hostname(mmc)); 2882 mmc_hostname(mmc));
2879 host->flags &= ~SDHCI_USE_ADMA; 2883 host->flags &= ~SDHCI_USE_ADMA;
2880 host->adma_desc = NULL; 2884 host->adma_desc = NULL;
2881 host->align_buffer = NULL; 2885 host->align_buffer = NULL;
2882 } else if (host->adma_addr & 3) { 2886 } else if (host->adma_addr & 3) {
2883 pr_warn("%s: unable to allocate aligned ADMA descriptor\n", 2887 pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
2884 mmc_hostname(mmc)); 2888 mmc_hostname(mmc));
2885 host->flags &= ~SDHCI_USE_ADMA; 2889 host->flags &= ~SDHCI_USE_ADMA;
2886 dma_free_coherent(mmc_dev(mmc), ADMA_SIZE, 2890 dma_free_coherent(mmc_dev(mmc), ADMA_SIZE,
2887 host->adma_desc, host->adma_addr); 2891 host->adma_desc, host->adma_addr);
2888 kfree(host->align_buffer); 2892 kfree(host->align_buffer);
2889 host->adma_desc = NULL; 2893 host->adma_desc = NULL;
2890 host->align_buffer = NULL; 2894 host->align_buffer = NULL;
2891 } 2895 }
2892 } 2896 }
2893 2897
2894 /* 2898 /*
2895 * If we use DMA, then it's up to the caller to set the DMA 2899 * If we use DMA, then it's up to the caller to set the DMA
2896 * mask, but PIO does not need the hw shim so we set a new 2900 * mask, but PIO does not need the hw shim so we set a new
2897 * mask here in that case. 2901 * mask here in that case.
2898 */ 2902 */
2899 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) { 2903 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
2900 host->dma_mask = DMA_BIT_MASK(64); 2904 host->dma_mask = DMA_BIT_MASK(64);
2901 mmc_dev(mmc)->dma_mask = &host->dma_mask; 2905 mmc_dev(mmc)->dma_mask = &host->dma_mask;
2902 } 2906 }
2903 2907
2904 if (host->version >= SDHCI_SPEC_300) 2908 if (host->version >= SDHCI_SPEC_300)
2905 host->max_clk = (caps[0] & SDHCI_CLOCK_V3_BASE_MASK) 2909 host->max_clk = (caps[0] & SDHCI_CLOCK_V3_BASE_MASK)
2906 >> SDHCI_CLOCK_BASE_SHIFT; 2910 >> SDHCI_CLOCK_BASE_SHIFT;
2907 else 2911 else
2908 host->max_clk = (caps[0] & SDHCI_CLOCK_BASE_MASK) 2912 host->max_clk = (caps[0] & SDHCI_CLOCK_BASE_MASK)
2909 >> SDHCI_CLOCK_BASE_SHIFT; 2913 >> SDHCI_CLOCK_BASE_SHIFT;
2910 2914
2911 host->max_clk *= 1000000; 2915 host->max_clk *= 1000000;
2912 if (host->max_clk == 0 || host->quirks & 2916 if (host->max_clk == 0 || host->quirks &
2913 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) { 2917 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
2914 if (!host->ops->get_max_clock) { 2918 if (!host->ops->get_max_clock) {
2915 pr_err("%s: Hardware doesn't specify base clock " 2919 pr_err("%s: Hardware doesn't specify base clock "
2916 "frequency.\n", mmc_hostname(mmc)); 2920 "frequency.\n", mmc_hostname(mmc));
2917 return -ENODEV; 2921 return -ENODEV;
2918 } 2922 }
2919 host->max_clk = host->ops->get_max_clock(host); 2923 host->max_clk = host->ops->get_max_clock(host);
2920 } 2924 }
2921 2925
2922 /* 2926 /*
2923 * In case of Host Controller v3.00, find out whether clock 2927 * In case of Host Controller v3.00, find out whether clock
2924 * multiplier is supported. 2928 * multiplier is supported.
2925 */ 2929 */
2926 host->clk_mul = (caps[1] & SDHCI_CLOCK_MUL_MASK) >> 2930 host->clk_mul = (caps[1] & SDHCI_CLOCK_MUL_MASK) >>
2927 SDHCI_CLOCK_MUL_SHIFT; 2931 SDHCI_CLOCK_MUL_SHIFT;
2928 2932
2929 /* 2933 /*
2930 * In case the value in Clock Multiplier is 0, then programmable 2934 * In case the value in Clock Multiplier is 0, then programmable
2931 * clock mode is not supported, otherwise the actual clock 2935 * clock mode is not supported, otherwise the actual clock
2932 * multiplier is one more than the value of Clock Multiplier 2936 * multiplier is one more than the value of Clock Multiplier
2933 * in the Capabilities Register. 2937 * in the Capabilities Register.
2934 */ 2938 */
2935 if (host->clk_mul) 2939 if (host->clk_mul)
2936 host->clk_mul += 1; 2940 host->clk_mul += 1;
2937 2941
2938 /* 2942 /*
2939 * Set host parameters. 2943 * Set host parameters.
2940 */ 2944 */
2941 mmc->ops = &sdhci_ops; 2945 mmc->ops = &sdhci_ops;
2942 mmc->f_max = host->max_clk; 2946 mmc->f_max = host->max_clk;
2943 if (host->ops->get_min_clock) 2947 if (host->ops->get_min_clock)
2944 mmc->f_min = host->ops->get_min_clock(host); 2948 mmc->f_min = host->ops->get_min_clock(host);
2945 else if (host->version >= SDHCI_SPEC_300) { 2949 else if (host->version >= SDHCI_SPEC_300) {
2946 if (host->clk_mul) { 2950 if (host->clk_mul) {
2947 mmc->f_min = (host->max_clk * host->clk_mul) / 1024; 2951 mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
2948 mmc->f_max = host->max_clk * host->clk_mul; 2952 mmc->f_max = host->max_clk * host->clk_mul;
2949 } else 2953 } else
2950 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; 2954 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
2951 } else 2955 } else
2952 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; 2956 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
2953 2957
2954 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) { 2958 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
2955 host->timeout_clk = (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> 2959 host->timeout_clk = (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >>
2956 SDHCI_TIMEOUT_CLK_SHIFT; 2960 SDHCI_TIMEOUT_CLK_SHIFT;
2957 if (host->timeout_clk == 0) { 2961 if (host->timeout_clk == 0) {
2958 if (host->ops->get_timeout_clock) { 2962 if (host->ops->get_timeout_clock) {
2959 host->timeout_clk = 2963 host->timeout_clk =
2960 host->ops->get_timeout_clock(host); 2964 host->ops->get_timeout_clock(host);
2961 } else { 2965 } else {
2962 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n", 2966 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
2963 mmc_hostname(mmc)); 2967 mmc_hostname(mmc));
2964 return -ENODEV; 2968 return -ENODEV;
2965 } 2969 }
2966 } 2970 }
2967 2971
2968 if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT) 2972 if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
2969 host->timeout_clk *= 1000; 2973 host->timeout_clk *= 1000;
2970 2974
2971 mmc->max_busy_timeout = host->ops->get_max_timeout_count ? 2975 mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
2972 host->ops->get_max_timeout_count(host) : 1 << 27; 2976 host->ops->get_max_timeout_count(host) : 1 << 27;
2973 mmc->max_busy_timeout /= host->timeout_clk; 2977 mmc->max_busy_timeout /= host->timeout_clk;
2974 } 2978 }
2975 2979
2976 if (override_timeout_clk) 2980 if (override_timeout_clk)
2977 host->timeout_clk = override_timeout_clk; 2981 host->timeout_clk = override_timeout_clk;
2978 2982
2979 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23; 2983 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
2980 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; 2984 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
2981 2985
2982 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) 2986 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
2983 host->flags |= SDHCI_AUTO_CMD12; 2987 host->flags |= SDHCI_AUTO_CMD12;
2984 2988
2985 /* Auto-CMD23 stuff only works in ADMA or PIO. */ 2989 /* Auto-CMD23 stuff only works in ADMA or PIO. */
2986 if ((host->version >= SDHCI_SPEC_300) && 2990 if ((host->version >= SDHCI_SPEC_300) &&
2987 ((host->flags & SDHCI_USE_ADMA) || 2991 ((host->flags & SDHCI_USE_ADMA) ||
2988 !(host->flags & SDHCI_USE_SDMA))) { 2992 !(host->flags & SDHCI_USE_SDMA))) {
2989 host->flags |= SDHCI_AUTO_CMD23; 2993 host->flags |= SDHCI_AUTO_CMD23;
2990 DBG("%s: Auto-CMD23 available\n", mmc_hostname(mmc)); 2994 DBG("%s: Auto-CMD23 available\n", mmc_hostname(mmc));
2991 } else { 2995 } else {
2992 DBG("%s: Auto-CMD23 unavailable\n", mmc_hostname(mmc)); 2996 DBG("%s: Auto-CMD23 unavailable\n", mmc_hostname(mmc));
2993 } 2997 }
2994 2998
2995 /* 2999 /*
2996 * A controller may support 8-bit width, but the board itself 3000 * A controller may support 8-bit width, but the board itself
2997 * might not have the pins brought out. Boards that support 3001 * might not have the pins brought out. Boards that support
2998 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in 3002 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
2999 * their platform code before calling sdhci_add_host(), and we 3003 * their platform code before calling sdhci_add_host(), and we
3000 * won't assume 8-bit width for hosts without that CAP. 3004 * won't assume 8-bit width for hosts without that CAP.
3001 */ 3005 */
3002 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA)) 3006 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
3003 mmc->caps |= MMC_CAP_4_BIT_DATA; 3007 mmc->caps |= MMC_CAP_4_BIT_DATA;
3004 3008
3005 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23) 3009 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
3006 mmc->caps &= ~MMC_CAP_CMD23; 3010 mmc->caps &= ~MMC_CAP_CMD23;
3007 3011
3008 if (caps[0] & SDHCI_CAN_DO_HISPD) 3012 if (caps[0] & SDHCI_CAN_DO_HISPD)
3009 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; 3013 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3010 3014
3011 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && 3015 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3012 !(mmc->caps & MMC_CAP_NONREMOVABLE)) 3016 !(mmc->caps & MMC_CAP_NONREMOVABLE))
3013 mmc->caps |= MMC_CAP_NEEDS_POLL; 3017 mmc->caps |= MMC_CAP_NEEDS_POLL;
3014 3018
3015 /* If there are external regulators, get them */ 3019 /* If there are external regulators, get them */
3016 if (mmc_regulator_get_supply(mmc) == -EPROBE_DEFER) 3020 if (mmc_regulator_get_supply(mmc) == -EPROBE_DEFER)
3017 return -EPROBE_DEFER; 3021 return -EPROBE_DEFER;
3018 3022
3019 /* If vqmmc regulator and no 1.8V signalling, then there's no UHS */ 3023 /* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
3020 if (!IS_ERR(mmc->supply.vqmmc)) { 3024 if (!IS_ERR(mmc->supply.vqmmc)) {
3021 ret = regulator_enable(mmc->supply.vqmmc); 3025 ret = regulator_enable(mmc->supply.vqmmc);
3022 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000, 3026 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
3023 1950000)) 3027 1950000))
3024 caps[1] &= ~(SDHCI_SUPPORT_SDR104 | 3028 caps[1] &= ~(SDHCI_SUPPORT_SDR104 |
3025 SDHCI_SUPPORT_SDR50 | 3029 SDHCI_SUPPORT_SDR50 |
3026 SDHCI_SUPPORT_DDR50); 3030 SDHCI_SUPPORT_DDR50);
3027 if (ret) { 3031 if (ret) {
3028 pr_warn("%s: Failed to enable vqmmc regulator: %d\n", 3032 pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
3029 mmc_hostname(mmc), ret); 3033 mmc_hostname(mmc), ret);
3030 mmc->supply.vqmmc = NULL; 3034 mmc->supply.vqmmc = NULL;
3031 } 3035 }
3032 } 3036 }
3033 3037
3034 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) 3038 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V)
3035 caps[1] &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | 3039 caps[1] &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3036 SDHCI_SUPPORT_DDR50); 3040 SDHCI_SUPPORT_DDR50);
3037 3041
3038 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */ 3042 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
3039 if (caps[1] & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | 3043 if (caps[1] & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3040 SDHCI_SUPPORT_DDR50)) 3044 SDHCI_SUPPORT_DDR50))
3041 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25; 3045 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
3042 3046
3043 /* SDR104 supports also implies SDR50 support */ 3047 /* SDR104 supports also implies SDR50 support */
3044 if (caps[1] & SDHCI_SUPPORT_SDR104) { 3048 if (caps[1] & SDHCI_SUPPORT_SDR104) {
3045 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50; 3049 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3046 /* SD3.0: SDR104 is supported so (for eMMC) the caps2 3050 /* SD3.0: SDR104 is supported so (for eMMC) the caps2
3047 * field can be promoted to support HS200. 3051 * field can be promoted to support HS200.
3048 */ 3052 */
3049 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200)) { 3053 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200)) {
3050 mmc->caps2 |= MMC_CAP2_HS200; 3054 mmc->caps2 |= MMC_CAP2_HS200;
3051 if (IS_ERR(mmc->supply.vqmmc) || 3055 if (IS_ERR(mmc->supply.vqmmc) ||
3052 !regulator_is_supported_voltage 3056 !regulator_is_supported_voltage
3053 (mmc->supply.vqmmc, 1100000, 1300000)) 3057 (mmc->supply.vqmmc, 1100000, 1300000))
3054 mmc->caps2 &= ~MMC_CAP2_HS200_1_2V_SDR; 3058 mmc->caps2 &= ~MMC_CAP2_HS200_1_2V_SDR;
3055 } 3059 }
3056 } else if (caps[1] & SDHCI_SUPPORT_SDR50) 3060 } else if (caps[1] & SDHCI_SUPPORT_SDR50)
3057 mmc->caps |= MMC_CAP_UHS_SDR50; 3061 mmc->caps |= MMC_CAP_UHS_SDR50;
3058 3062
3059 if ((caps[1] & SDHCI_SUPPORT_DDR50) && 3063 if ((caps[1] & SDHCI_SUPPORT_DDR50) &&
3060 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50)) 3064 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
3061 mmc->caps |= MMC_CAP_UHS_DDR50; 3065 mmc->caps |= MMC_CAP_UHS_DDR50;
3062 3066
3063 /* Does the host need tuning for SDR50? */ 3067 /* Does the host need tuning for SDR50? */
3064 if (caps[1] & SDHCI_USE_SDR50_TUNING) 3068 if (caps[1] & SDHCI_USE_SDR50_TUNING)
3065 host->flags |= SDHCI_SDR50_NEEDS_TUNING; 3069 host->flags |= SDHCI_SDR50_NEEDS_TUNING;
3066 3070
3067 /* Does the host need tuning for SDR104 / HS200? */ 3071 /* Does the host need tuning for SDR104 / HS200? */
3068 if (mmc->caps2 & MMC_CAP2_HS200) 3072 if (mmc->caps2 & MMC_CAP2_HS200)
3069 host->flags |= SDHCI_SDR104_NEEDS_TUNING; 3073 host->flags |= SDHCI_SDR104_NEEDS_TUNING;
3070 3074
3071 /* Driver Type(s) (A, C, D) supported by the host */ 3075 /* Driver Type(s) (A, C, D) supported by the host */
3072 if (caps[1] & SDHCI_DRIVER_TYPE_A) 3076 if (caps[1] & SDHCI_DRIVER_TYPE_A)
3073 mmc->caps |= MMC_CAP_DRIVER_TYPE_A; 3077 mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
3074 if (caps[1] & SDHCI_DRIVER_TYPE_C) 3078 if (caps[1] & SDHCI_DRIVER_TYPE_C)
3075 mmc->caps |= MMC_CAP_DRIVER_TYPE_C; 3079 mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
3076 if (caps[1] & SDHCI_DRIVER_TYPE_D) 3080 if (caps[1] & SDHCI_DRIVER_TYPE_D)
3077 mmc->caps |= MMC_CAP_DRIVER_TYPE_D; 3081 mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
3078 3082
3079 /* Initial value for re-tuning timer count */ 3083 /* Initial value for re-tuning timer count */
3080 host->tuning_count = (caps[1] & SDHCI_RETUNING_TIMER_COUNT_MASK) >> 3084 host->tuning_count = (caps[1] & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
3081 SDHCI_RETUNING_TIMER_COUNT_SHIFT; 3085 SDHCI_RETUNING_TIMER_COUNT_SHIFT;
3082 3086
3083 /* 3087 /*
3084 * In case Re-tuning Timer is not disabled, the actual value of 3088 * In case Re-tuning Timer is not disabled, the actual value of
3085 * re-tuning timer will be 2 ^ (n - 1). 3089 * re-tuning timer will be 2 ^ (n - 1).
3086 */ 3090 */
3087 if (host->tuning_count) 3091 if (host->tuning_count)
3088 host->tuning_count = 1 << (host->tuning_count - 1); 3092 host->tuning_count = 1 << (host->tuning_count - 1);
3089 3093
3090 /* Re-tuning mode supported by the Host Controller */ 3094 /* Re-tuning mode supported by the Host Controller */
3091 host->tuning_mode = (caps[1] & SDHCI_RETUNING_MODE_MASK) >> 3095 host->tuning_mode = (caps[1] & SDHCI_RETUNING_MODE_MASK) >>
3092 SDHCI_RETUNING_MODE_SHIFT; 3096 SDHCI_RETUNING_MODE_SHIFT;
3093 3097
3094 ocr_avail = 0; 3098 ocr_avail = 0;
3095 3099
3096 /* 3100 /*
3097 * According to SD Host Controller spec v3.00, if the Host System 3101 * According to SD Host Controller spec v3.00, if the Host System
3098 * can afford more than 150mA, Host Driver should set XPC to 1. Also 3102 * can afford more than 150mA, Host Driver should set XPC to 1. Also
3099 * the value is meaningful only if Voltage Support in the Capabilities 3103 * the value is meaningful only if Voltage Support in the Capabilities
3100 * register is set. The actual current value is 4 times the register 3104 * register is set. The actual current value is 4 times the register
3101 * value. 3105 * value.
3102 */ 3106 */
3103 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT); 3107 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
3104 if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) { 3108 if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
3105 int curr = regulator_get_current_limit(mmc->supply.vmmc); 3109 int curr = regulator_get_current_limit(mmc->supply.vmmc);
3106 if (curr > 0) { 3110 if (curr > 0) {
3107 3111
3108 /* convert to SDHCI_MAX_CURRENT format */ 3112 /* convert to SDHCI_MAX_CURRENT format */
3109 curr = curr/1000; /* convert to mA */ 3113 curr = curr/1000; /* convert to mA */
3110 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER; 3114 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
3111 3115
3112 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT); 3116 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
3113 max_current_caps = 3117 max_current_caps =
3114 (curr << SDHCI_MAX_CURRENT_330_SHIFT) | 3118 (curr << SDHCI_MAX_CURRENT_330_SHIFT) |
3115 (curr << SDHCI_MAX_CURRENT_300_SHIFT) | 3119 (curr << SDHCI_MAX_CURRENT_300_SHIFT) |
3116 (curr << SDHCI_MAX_CURRENT_180_SHIFT); 3120 (curr << SDHCI_MAX_CURRENT_180_SHIFT);
3117 } 3121 }
3118 } 3122 }
3119 3123
3120 if (caps[0] & SDHCI_CAN_VDD_330) { 3124 if (caps[0] & SDHCI_CAN_VDD_330) {
3121 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34; 3125 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
3122 3126
3123 mmc->max_current_330 = ((max_current_caps & 3127 mmc->max_current_330 = ((max_current_caps &
3124 SDHCI_MAX_CURRENT_330_MASK) >> 3128 SDHCI_MAX_CURRENT_330_MASK) >>
3125 SDHCI_MAX_CURRENT_330_SHIFT) * 3129 SDHCI_MAX_CURRENT_330_SHIFT) *
3126 SDHCI_MAX_CURRENT_MULTIPLIER; 3130 SDHCI_MAX_CURRENT_MULTIPLIER;
3127 } 3131 }
3128 if (caps[0] & SDHCI_CAN_VDD_300) { 3132 if (caps[0] & SDHCI_CAN_VDD_300) {
3129 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31; 3133 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
3130 3134
3131 mmc->max_current_300 = ((max_current_caps & 3135 mmc->max_current_300 = ((max_current_caps &
3132 SDHCI_MAX_CURRENT_300_MASK) >> 3136 SDHCI_MAX_CURRENT_300_MASK) >>
3133 SDHCI_MAX_CURRENT_300_SHIFT) * 3137 SDHCI_MAX_CURRENT_300_SHIFT) *
3134 SDHCI_MAX_CURRENT_MULTIPLIER; 3138 SDHCI_MAX_CURRENT_MULTIPLIER;
3135 } 3139 }
3136 if (caps[0] & SDHCI_CAN_VDD_180) { 3140 if (caps[0] & SDHCI_CAN_VDD_180) {
3137 ocr_avail |= MMC_VDD_165_195; 3141 ocr_avail |= MMC_VDD_165_195;
3138 3142
3139 mmc->max_current_180 = ((max_current_caps & 3143 mmc->max_current_180 = ((max_current_caps &
3140 SDHCI_MAX_CURRENT_180_MASK) >> 3144 SDHCI_MAX_CURRENT_180_MASK) >>
3141 SDHCI_MAX_CURRENT_180_SHIFT) * 3145 SDHCI_MAX_CURRENT_180_SHIFT) *
3142 SDHCI_MAX_CURRENT_MULTIPLIER; 3146 SDHCI_MAX_CURRENT_MULTIPLIER;
3143 } 3147 }
3144 3148
3145 /* If OCR set by external regulators, use it instead */ 3149 /* If OCR set by external regulators, use it instead */
3146 if (mmc->ocr_avail) 3150 if (mmc->ocr_avail)
3147 ocr_avail = mmc->ocr_avail; 3151 ocr_avail = mmc->ocr_avail;
3148 3152
3149 if (host->ocr_mask) 3153 if (host->ocr_mask)
3150 ocr_avail &= host->ocr_mask; 3154 ocr_avail &= host->ocr_mask;
3151 3155
3152 mmc->ocr_avail = ocr_avail; 3156 mmc->ocr_avail = ocr_avail;
3153 mmc->ocr_avail_sdio = ocr_avail; 3157 mmc->ocr_avail_sdio = ocr_avail;
3154 if (host->ocr_avail_sdio) 3158 if (host->ocr_avail_sdio)
3155 mmc->ocr_avail_sdio &= host->ocr_avail_sdio; 3159 mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
3156 mmc->ocr_avail_sd = ocr_avail; 3160 mmc->ocr_avail_sd = ocr_avail;
3157 if (host->ocr_avail_sd) 3161 if (host->ocr_avail_sd)
3158 mmc->ocr_avail_sd &= host->ocr_avail_sd; 3162 mmc->ocr_avail_sd &= host->ocr_avail_sd;
3159 else /* normal SD controllers don't support 1.8V */ 3163 else /* normal SD controllers don't support 1.8V */
3160 mmc->ocr_avail_sd &= ~MMC_VDD_165_195; 3164 mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
3161 mmc->ocr_avail_mmc = ocr_avail; 3165 mmc->ocr_avail_mmc = ocr_avail;
3162 if (host->ocr_avail_mmc) 3166 if (host->ocr_avail_mmc)
3163 mmc->ocr_avail_mmc &= host->ocr_avail_mmc; 3167 mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
3164 3168
3165 if (mmc->ocr_avail == 0) { 3169 if (mmc->ocr_avail == 0) {
3166 pr_err("%s: Hardware doesn't report any " 3170 pr_err("%s: Hardware doesn't report any "
3167 "support voltages.\n", mmc_hostname(mmc)); 3171 "support voltages.\n", mmc_hostname(mmc));
3168 return -ENODEV; 3172 return -ENODEV;
3169 } 3173 }
3170 3174
3171 spin_lock_init(&host->lock); 3175 spin_lock_init(&host->lock);
3172 3176
3173 /* 3177 /*
3174 * Maximum number of segments. Depends on if the hardware 3178 * Maximum number of segments. Depends on if the hardware
3175 * can do scatter/gather or not. 3179 * can do scatter/gather or not.
3176 */ 3180 */
3177 if (host->flags & SDHCI_USE_ADMA) 3181 if (host->flags & SDHCI_USE_ADMA)
3178 mmc->max_segs = 128; 3182 mmc->max_segs = 128;
3179 else if (host->flags & SDHCI_USE_SDMA) 3183 else if (host->flags & SDHCI_USE_SDMA)
3180 mmc->max_segs = 1; 3184 mmc->max_segs = 1;
3181 else /* PIO */ 3185 else /* PIO */
3182 mmc->max_segs = 128; 3186 mmc->max_segs = 128;
3183 3187
3184 /* 3188 /*
3185 * Maximum number of sectors in one transfer. Limited by DMA boundary 3189 * Maximum number of sectors in one transfer. Limited by DMA boundary
3186 * size (512KiB). 3190 * size (512KiB).
3187 */ 3191 */
3188 mmc->max_req_size = 524288; 3192 mmc->max_req_size = 524288;
3189 3193
3190 /* 3194 /*
3191 * Maximum segment size. Could be one segment with the maximum number 3195 * Maximum segment size. Could be one segment with the maximum number
3192 * of bytes. When doing hardware scatter/gather, each entry cannot 3196 * of bytes. When doing hardware scatter/gather, each entry cannot
3193 * be larger than 64 KiB though. 3197 * be larger than 64 KiB though.
3194 */ 3198 */
3195 if (host->flags & SDHCI_USE_ADMA) { 3199 if (host->flags & SDHCI_USE_ADMA) {
3196 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) 3200 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
3197 mmc->max_seg_size = 65535; 3201 mmc->max_seg_size = 65535;
3198 else 3202 else
3199 mmc->max_seg_size = 65536; 3203 mmc->max_seg_size = 65536;
3200 } else { 3204 } else {
3201 mmc->max_seg_size = mmc->max_req_size; 3205 mmc->max_seg_size = mmc->max_req_size;
3202 } 3206 }
3203 3207
3204 /* 3208 /*
3205 * Maximum block size. This varies from controller to controller and 3209 * Maximum block size. This varies from controller to controller and
3206 * is specified in the capabilities register. 3210 * is specified in the capabilities register.
3207 */ 3211 */
3208 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) { 3212 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
3209 mmc->max_blk_size = 2; 3213 mmc->max_blk_size = 2;
3210 } else { 3214 } else {
3211 mmc->max_blk_size = (caps[0] & SDHCI_MAX_BLOCK_MASK) >> 3215 mmc->max_blk_size = (caps[0] & SDHCI_MAX_BLOCK_MASK) >>
3212 SDHCI_MAX_BLOCK_SHIFT; 3216 SDHCI_MAX_BLOCK_SHIFT;
3213 if (mmc->max_blk_size >= 3) { 3217 if (mmc->max_blk_size >= 3) {
3214 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n", 3218 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
3215 mmc_hostname(mmc)); 3219 mmc_hostname(mmc));
3216 mmc->max_blk_size = 0; 3220 mmc->max_blk_size = 0;
3217 } 3221 }
3218 } 3222 }
3219 3223
3220 mmc->max_blk_size = 512 << mmc->max_blk_size; 3224 mmc->max_blk_size = 512 << mmc->max_blk_size;
3221 3225
3222 /* 3226 /*
3223 * Maximum block count. 3227 * Maximum block count.
3224 */ 3228 */
3225 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535; 3229 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
3226 3230
3227 /* 3231 /*
3228 * Init tasklets. 3232 * Init tasklets.
3229 */ 3233 */
3230 tasklet_init(&host->finish_tasklet, 3234 tasklet_init(&host->finish_tasklet,
3231 sdhci_tasklet_finish, (unsigned long)host); 3235 sdhci_tasklet_finish, (unsigned long)host);
3232 3236
3233 setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host); 3237 setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
3234 3238
3235 if (host->version >= SDHCI_SPEC_300) { 3239 if (host->version >= SDHCI_SPEC_300) {
3236 init_waitqueue_head(&host->buf_ready_int); 3240 init_waitqueue_head(&host->buf_ready_int);
3237 3241
3238 /* Initialize re-tuning timer */ 3242 /* Initialize re-tuning timer */
3239 init_timer(&host->tuning_timer); 3243 init_timer(&host->tuning_timer);
3240 host->tuning_timer.data = (unsigned long)host; 3244 host->tuning_timer.data = (unsigned long)host;
3241 host->tuning_timer.function = sdhci_tuning_timer; 3245 host->tuning_timer.function = sdhci_tuning_timer;
3242 } 3246 }
3243 3247
3244 sdhci_init(host, 0); 3248 sdhci_init(host, 0);
3245 3249
3246 ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq, 3250 ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
3247 IRQF_SHARED, mmc_hostname(mmc), host); 3251 IRQF_SHARED, mmc_hostname(mmc), host);
3248 if (ret) { 3252 if (ret) {
3249 pr_err("%s: Failed to request IRQ %d: %d\n", 3253 pr_err("%s: Failed to request IRQ %d: %d\n",
3250 mmc_hostname(mmc), host->irq, ret); 3254 mmc_hostname(mmc), host->irq, ret);
3251 goto untasklet; 3255 goto untasklet;
3252 } 3256 }
3253 3257
3254 #ifdef CONFIG_MMC_DEBUG 3258 #ifdef CONFIG_MMC_DEBUG
3255 sdhci_dumpregs(host); 3259 sdhci_dumpregs(host);
3256 #endif 3260 #endif
3257 3261
3258 #ifdef SDHCI_USE_LEDS_CLASS 3262 #ifdef SDHCI_USE_LEDS_CLASS
3259 snprintf(host->led_name, sizeof(host->led_name), 3263 snprintf(host->led_name, sizeof(host->led_name),
3260 "%s::", mmc_hostname(mmc)); 3264 "%s::", mmc_hostname(mmc));
3261 host->led.name = host->led_name; 3265 host->led.name = host->led_name;
3262 host->led.brightness = LED_OFF; 3266 host->led.brightness = LED_OFF;
3263 host->led.default_trigger = mmc_hostname(mmc); 3267 host->led.default_trigger = mmc_hostname(mmc);
3264 host->led.brightness_set = sdhci_led_control; 3268 host->led.brightness_set = sdhci_led_control;
3265 3269
3266 ret = led_classdev_register(mmc_dev(mmc), &host->led); 3270 ret = led_classdev_register(mmc_dev(mmc), &host->led);
3267 if (ret) { 3271 if (ret) {
3268 pr_err("%s: Failed to register LED device: %d\n", 3272 pr_err("%s: Failed to register LED device: %d\n",
3269 mmc_hostname(mmc), ret); 3273 mmc_hostname(mmc), ret);
3270 goto reset; 3274 goto reset;
3271 } 3275 }
3272 #endif 3276 #endif
3273 3277
3274 mmiowb(); 3278 mmiowb();
3275 3279
3276 mmc_add_host(mmc); 3280 mmc_add_host(mmc);
3277 3281
3278 pr_info("%s: SDHCI controller on %s [%s] using %s\n", 3282 pr_info("%s: SDHCI controller on %s [%s] using %s\n",
3279 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)), 3283 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
3280 (host->flags & SDHCI_USE_ADMA) ? "ADMA" : 3284 (host->flags & SDHCI_USE_ADMA) ? "ADMA" :
3281 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO"); 3285 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
3282 3286
3283 sdhci_enable_card_detection(host); 3287 sdhci_enable_card_detection(host);
3284 3288
3285 return 0; 3289 return 0;
3286 3290
3287 #ifdef SDHCI_USE_LEDS_CLASS 3291 #ifdef SDHCI_USE_LEDS_CLASS
3288 reset: 3292 reset:
3289 sdhci_do_reset(host, SDHCI_RESET_ALL); 3293 sdhci_do_reset(host, SDHCI_RESET_ALL);
3290 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 3294 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3291 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 3295 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3292 free_irq(host->irq, host); 3296 free_irq(host->irq, host);
3293 #endif 3297 #endif
3294 untasklet: 3298 untasklet:
3295 tasklet_kill(&host->finish_tasklet); 3299 tasklet_kill(&host->finish_tasklet);
3296 3300
3297 return ret; 3301 return ret;
3298 } 3302 }
3299 3303
3300 EXPORT_SYMBOL_GPL(sdhci_add_host); 3304 EXPORT_SYMBOL_GPL(sdhci_add_host);
3301 3305
3302 void sdhci_remove_host(struct sdhci_host *host, int dead) 3306 void sdhci_remove_host(struct sdhci_host *host, int dead)
3303 { 3307 {
3304 struct mmc_host *mmc = host->mmc; 3308 struct mmc_host *mmc = host->mmc;
3305 unsigned long flags; 3309 unsigned long flags;
3306 3310
3307 if (dead) { 3311 if (dead) {
3308 spin_lock_irqsave(&host->lock, flags); 3312 spin_lock_irqsave(&host->lock, flags);
3309 3313
3310 host->flags |= SDHCI_DEVICE_DEAD; 3314 host->flags |= SDHCI_DEVICE_DEAD;
3311 3315
3312 if (host->mrq) { 3316 if (host->mrq) {
3313 pr_err("%s: Controller removed during " 3317 pr_err("%s: Controller removed during "
3314 " transfer!\n", mmc_hostname(mmc)); 3318 " transfer!\n", mmc_hostname(mmc));
3315 3319
3316 host->mrq->cmd->error = -ENOMEDIUM; 3320 host->mrq->cmd->error = -ENOMEDIUM;
3317 tasklet_schedule(&host->finish_tasklet); 3321 tasklet_schedule(&host->finish_tasklet);
3318 } 3322 }
3319 3323
3320 spin_unlock_irqrestore(&host->lock, flags); 3324 spin_unlock_irqrestore(&host->lock, flags);
3321 } 3325 }
3322 3326
3323 sdhci_disable_card_detection(host); 3327 sdhci_disable_card_detection(host);
3324 3328
3325 mmc_remove_host(mmc); 3329 mmc_remove_host(mmc);
3326 3330
3327 #ifdef SDHCI_USE_LEDS_CLASS 3331 #ifdef SDHCI_USE_LEDS_CLASS
3328 led_classdev_unregister(&host->led); 3332 led_classdev_unregister(&host->led);
3329 #endif 3333 #endif
3330 3334
3331 if (!dead) 3335 if (!dead)
3332 sdhci_do_reset(host, SDHCI_RESET_ALL); 3336 sdhci_do_reset(host, SDHCI_RESET_ALL);
3333 3337
3334 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 3338 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3335 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 3339 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3336 free_irq(host->irq, host); 3340 free_irq(host->irq, host);
3337 3341
3338 del_timer_sync(&host->timer); 3342 del_timer_sync(&host->timer);
3339 3343
3340 tasklet_kill(&host->finish_tasklet); 3344 tasklet_kill(&host->finish_tasklet);
3341 3345
3342 if (!IS_ERR(mmc->supply.vmmc)) 3346 if (!IS_ERR(mmc->supply.vmmc))
3343 regulator_disable(mmc->supply.vmmc); 3347 regulator_disable(mmc->supply.vmmc);
3344 3348
3345 if (!IS_ERR(mmc->supply.vqmmc)) 3349 if (!IS_ERR(mmc->supply.vqmmc))
3346 regulator_disable(mmc->supply.vqmmc); 3350 regulator_disable(mmc->supply.vqmmc);
3347 3351
3348 if (host->adma_desc) 3352 if (host->adma_desc)
3349 dma_free_coherent(mmc_dev(mmc), ADMA_SIZE, 3353 dma_free_coherent(mmc_dev(mmc), ADMA_SIZE,
3350 host->adma_desc, host->adma_addr); 3354 host->adma_desc, host->adma_addr);
3351 kfree(host->align_buffer); 3355 kfree(host->align_buffer);
3352 3356
3353 host->adma_desc = NULL; 3357 host->adma_desc = NULL;
3354 host->align_buffer = NULL; 3358 host->align_buffer = NULL;
3355 } 3359 }
3356 3360
3357 EXPORT_SYMBOL_GPL(sdhci_remove_host); 3361 EXPORT_SYMBOL_GPL(sdhci_remove_host);
3358 3362
3359 void sdhci_free_host(struct sdhci_host *host) 3363 void sdhci_free_host(struct sdhci_host *host)
3360 { 3364 {
3361 mmc_free_host(host->mmc); 3365 mmc_free_host(host->mmc);
3362 } 3366 }
3363 3367
3364 EXPORT_SYMBOL_GPL(sdhci_free_host); 3368 EXPORT_SYMBOL_GPL(sdhci_free_host);
3365 3369
3366 /*****************************************************************************\ 3370 /*****************************************************************************\
3367 * * 3371 * *
3368 * Driver init/exit * 3372 * Driver init/exit *
3369 * * 3373 * *
3370 \*****************************************************************************/ 3374 \*****************************************************************************/
3371 3375
3372 static int __init sdhci_drv_init(void) 3376 static int __init sdhci_drv_init(void)
3373 { 3377 {
3374 pr_info(DRIVER_NAME 3378 pr_info(DRIVER_NAME
3375 ": Secure Digital Host Controller Interface driver\n"); 3379 ": Secure Digital Host Controller Interface driver\n");
3376 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n"); 3380 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
3377 3381
3378 return 0; 3382 return 0;
3379 } 3383 }
3380 3384
3381 static void __exit sdhci_drv_exit(void) 3385 static void __exit sdhci_drv_exit(void)
3382 { 3386 {
3383 } 3387 }
3384 3388
3385 module_init(sdhci_drv_init); 3389 module_init(sdhci_drv_init);
3386 module_exit(sdhci_drv_exit); 3390 module_exit(sdhci_drv_exit);
3387 3391
3388 module_param(debug_quirks, uint, 0444); 3392 module_param(debug_quirks, uint, 0444);
3389 module_param(debug_quirks2, uint, 0444); 3393 module_param(debug_quirks2, uint, 0444);
3390 3394
3391 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>"); 3395 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
3392 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver"); 3396 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
3393 MODULE_LICENSE("GPL"); 3397 MODULE_LICENSE("GPL");
3394 3398
3395 MODULE_PARM_DESC(debug_quirks, "Force certain quirks."); 3399 MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
3396 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks."); 3400 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");