Commit 0e79f9ae1610c15f5e5959c39d7c39071619de97
Committed by
Vinod Koul
1 parent
1ff8df4f53
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
mmc: sh_mmcif: switch to the new DMA channel allocation and configuration
Using the "private" field from struct dma_chan is deprecated. The sh dmaengine driver now also supports the preferred DMA channel allocation and configuration method, using a standard filter function and a channel configuration operation. This patch updates sh_mmcif to use this new method. Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de> Cc: Chris Ball <cjb@laptop.org> Signed-off-by: Vinod Koul <vinod.koul@linux.intel.com>
Showing 1 changed file with 47 additions and 35 deletions Inline Diff
drivers/mmc/host/sh_mmcif.c
1 | /* | 1 | /* |
2 | * MMCIF eMMC driver. | 2 | * MMCIF eMMC driver. |
3 | * | 3 | * |
4 | * Copyright (C) 2010 Renesas Solutions Corp. | 4 | * Copyright (C) 2010 Renesas Solutions Corp. |
5 | * Yusuke Goda <yusuke.goda.sx@renesas.com> | 5 | * Yusuke Goda <yusuke.goda.sx@renesas.com> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License as published by | 8 | * it under the terms of the GNU General Public License as published by |
9 | * the Free Software Foundation; either version 2 of the License. | 9 | * the Free Software Foundation; either version 2 of the License. |
10 | * | 10 | * |
11 | * | 11 | * |
12 | * TODO | 12 | * TODO |
13 | * 1. DMA | 13 | * 1. DMA |
14 | * 2. Power management | 14 | * 2. Power management |
15 | * 3. Handle MMC errors better | 15 | * 3. Handle MMC errors better |
16 | * | 16 | * |
17 | */ | 17 | */ |
18 | 18 | ||
19 | /* | 19 | /* |
20 | * The MMCIF driver is now processing MMC requests asynchronously, according | 20 | * The MMCIF driver is now processing MMC requests asynchronously, according |
21 | * to the Linux MMC API requirement. | 21 | * to the Linux MMC API requirement. |
22 | * | 22 | * |
23 | * The MMCIF driver processes MMC requests in up to 3 stages: command, optional | 23 | * The MMCIF driver processes MMC requests in up to 3 stages: command, optional |
24 | * data, and optional stop. To achieve asynchronous processing each of these | 24 | * data, and optional stop. To achieve asynchronous processing each of these |
25 | * stages is split into two halves: a top and a bottom half. The top half | 25 | * stages is split into two halves: a top and a bottom half. The top half |
26 | * initialises the hardware, installs a timeout handler to handle completion | 26 | * initialises the hardware, installs a timeout handler to handle completion |
27 | * timeouts, and returns. In case of the command stage this immediately returns | 27 | * timeouts, and returns. In case of the command stage this immediately returns |
28 | * control to the caller, leaving all further processing to run asynchronously. | 28 | * control to the caller, leaving all further processing to run asynchronously. |
29 | * All further request processing is performed by the bottom halves. | 29 | * All further request processing is performed by the bottom halves. |
30 | * | 30 | * |
31 | * The bottom half further consists of a "hard" IRQ handler, an IRQ handler | 31 | * The bottom half further consists of a "hard" IRQ handler, an IRQ handler |
32 | * thread, a DMA completion callback, if DMA is used, a timeout work, and | 32 | * thread, a DMA completion callback, if DMA is used, a timeout work, and |
33 | * request- and stage-specific handler methods. | 33 | * request- and stage-specific handler methods. |
34 | * | 34 | * |
35 | * Each bottom half run begins with either a hardware interrupt, a DMA callback | 35 | * Each bottom half run begins with either a hardware interrupt, a DMA callback |
36 | * invocation, or a timeout work run. In case of an error or a successful | 36 | * invocation, or a timeout work run. In case of an error or a successful |
37 | * processing completion, the MMC core is informed and the request processing is | 37 | * processing completion, the MMC core is informed and the request processing is |
38 | * finished. In case processing has to continue, i.e., if data has to be read | 38 | * finished. In case processing has to continue, i.e., if data has to be read |
39 | * from or written to the card, or if a stop command has to be sent, the next | 39 | * from or written to the card, or if a stop command has to be sent, the next |
40 | * top half is called, which performs the necessary hardware handling and | 40 | * top half is called, which performs the necessary hardware handling and |
41 | * reschedules the timeout work. This returns the driver state machine into the | 41 | * reschedules the timeout work. This returns the driver state machine into the |
42 | * bottom half waiting state. | 42 | * bottom half waiting state. |
43 | */ | 43 | */ |
44 | 44 | ||
45 | #include <linux/bitops.h> | 45 | #include <linux/bitops.h> |
46 | #include <linux/clk.h> | 46 | #include <linux/clk.h> |
47 | #include <linux/completion.h> | 47 | #include <linux/completion.h> |
48 | #include <linux/delay.h> | 48 | #include <linux/delay.h> |
49 | #include <linux/dma-mapping.h> | 49 | #include <linux/dma-mapping.h> |
50 | #include <linux/dmaengine.h> | 50 | #include <linux/dmaengine.h> |
51 | #include <linux/mmc/card.h> | 51 | #include <linux/mmc/card.h> |
52 | #include <linux/mmc/core.h> | 52 | #include <linux/mmc/core.h> |
53 | #include <linux/mmc/host.h> | 53 | #include <linux/mmc/host.h> |
54 | #include <linux/mmc/mmc.h> | 54 | #include <linux/mmc/mmc.h> |
55 | #include <linux/mmc/sdio.h> | 55 | #include <linux/mmc/sdio.h> |
56 | #include <linux/mmc/sh_mmcif.h> | 56 | #include <linux/mmc/sh_mmcif.h> |
57 | #include <linux/pagemap.h> | 57 | #include <linux/pagemap.h> |
58 | #include <linux/platform_device.h> | 58 | #include <linux/platform_device.h> |
59 | #include <linux/pm_qos.h> | 59 | #include <linux/pm_qos.h> |
60 | #include <linux/pm_runtime.h> | 60 | #include <linux/pm_runtime.h> |
61 | #include <linux/spinlock.h> | 61 | #include <linux/spinlock.h> |
62 | #include <linux/module.h> | 62 | #include <linux/module.h> |
63 | 63 | ||
64 | #define DRIVER_NAME "sh_mmcif" | 64 | #define DRIVER_NAME "sh_mmcif" |
65 | #define DRIVER_VERSION "2010-04-28" | 65 | #define DRIVER_VERSION "2010-04-28" |
66 | 66 | ||
67 | /* CE_CMD_SET */ | 67 | /* CE_CMD_SET */ |
68 | #define CMD_MASK 0x3f000000 | 68 | #define CMD_MASK 0x3f000000 |
69 | #define CMD_SET_RTYP_NO ((0 << 23) | (0 << 22)) | 69 | #define CMD_SET_RTYP_NO ((0 << 23) | (0 << 22)) |
70 | #define CMD_SET_RTYP_6B ((0 << 23) | (1 << 22)) /* R1/R1b/R3/R4/R5 */ | 70 | #define CMD_SET_RTYP_6B ((0 << 23) | (1 << 22)) /* R1/R1b/R3/R4/R5 */ |
71 | #define CMD_SET_RTYP_17B ((1 << 23) | (0 << 22)) /* R2 */ | 71 | #define CMD_SET_RTYP_17B ((1 << 23) | (0 << 22)) /* R2 */ |
72 | #define CMD_SET_RBSY (1 << 21) /* R1b */ | 72 | #define CMD_SET_RBSY (1 << 21) /* R1b */ |
73 | #define CMD_SET_CCSEN (1 << 20) | 73 | #define CMD_SET_CCSEN (1 << 20) |
74 | #define CMD_SET_WDAT (1 << 19) /* 1: on data, 0: no data */ | 74 | #define CMD_SET_WDAT (1 << 19) /* 1: on data, 0: no data */ |
75 | #define CMD_SET_DWEN (1 << 18) /* 1: write, 0: read */ | 75 | #define CMD_SET_DWEN (1 << 18) /* 1: write, 0: read */ |
76 | #define CMD_SET_CMLTE (1 << 17) /* 1: multi block trans, 0: single */ | 76 | #define CMD_SET_CMLTE (1 << 17) /* 1: multi block trans, 0: single */ |
77 | #define CMD_SET_CMD12EN (1 << 16) /* 1: CMD12 auto issue */ | 77 | #define CMD_SET_CMD12EN (1 << 16) /* 1: CMD12 auto issue */ |
78 | #define CMD_SET_RIDXC_INDEX ((0 << 15) | (0 << 14)) /* index check */ | 78 | #define CMD_SET_RIDXC_INDEX ((0 << 15) | (0 << 14)) /* index check */ |
79 | #define CMD_SET_RIDXC_BITS ((0 << 15) | (1 << 14)) /* check bits check */ | 79 | #define CMD_SET_RIDXC_BITS ((0 << 15) | (1 << 14)) /* check bits check */ |
80 | #define CMD_SET_RIDXC_NO ((1 << 15) | (0 << 14)) /* no check */ | 80 | #define CMD_SET_RIDXC_NO ((1 << 15) | (0 << 14)) /* no check */ |
81 | #define CMD_SET_CRC7C ((0 << 13) | (0 << 12)) /* CRC7 check*/ | 81 | #define CMD_SET_CRC7C ((0 << 13) | (0 << 12)) /* CRC7 check*/ |
82 | #define CMD_SET_CRC7C_BITS ((0 << 13) | (1 << 12)) /* check bits check*/ | 82 | #define CMD_SET_CRC7C_BITS ((0 << 13) | (1 << 12)) /* check bits check*/ |
83 | #define CMD_SET_CRC7C_INTERNAL ((1 << 13) | (0 << 12)) /* internal CRC7 check*/ | 83 | #define CMD_SET_CRC7C_INTERNAL ((1 << 13) | (0 << 12)) /* internal CRC7 check*/ |
84 | #define CMD_SET_CRC16C (1 << 10) /* 0: CRC16 check*/ | 84 | #define CMD_SET_CRC16C (1 << 10) /* 0: CRC16 check*/ |
85 | #define CMD_SET_CRCSTE (1 << 8) /* 1: not receive CRC status */ | 85 | #define CMD_SET_CRCSTE (1 << 8) /* 1: not receive CRC status */ |
86 | #define CMD_SET_TBIT (1 << 7) /* 1: tran mission bit "Low" */ | 86 | #define CMD_SET_TBIT (1 << 7) /* 1: tran mission bit "Low" */ |
87 | #define CMD_SET_OPDM (1 << 6) /* 1: open/drain */ | 87 | #define CMD_SET_OPDM (1 << 6) /* 1: open/drain */ |
88 | #define CMD_SET_CCSH (1 << 5) | 88 | #define CMD_SET_CCSH (1 << 5) |
89 | #define CMD_SET_DATW_1 ((0 << 1) | (0 << 0)) /* 1bit */ | 89 | #define CMD_SET_DATW_1 ((0 << 1) | (0 << 0)) /* 1bit */ |
90 | #define CMD_SET_DATW_4 ((0 << 1) | (1 << 0)) /* 4bit */ | 90 | #define CMD_SET_DATW_4 ((0 << 1) | (1 << 0)) /* 4bit */ |
91 | #define CMD_SET_DATW_8 ((1 << 1) | (0 << 0)) /* 8bit */ | 91 | #define CMD_SET_DATW_8 ((1 << 1) | (0 << 0)) /* 8bit */ |
92 | 92 | ||
93 | /* CE_CMD_CTRL */ | 93 | /* CE_CMD_CTRL */ |
94 | #define CMD_CTRL_BREAK (1 << 0) | 94 | #define CMD_CTRL_BREAK (1 << 0) |
95 | 95 | ||
96 | /* CE_BLOCK_SET */ | 96 | /* CE_BLOCK_SET */ |
97 | #define BLOCK_SIZE_MASK 0x0000ffff | 97 | #define BLOCK_SIZE_MASK 0x0000ffff |
98 | 98 | ||
99 | /* CE_INT */ | 99 | /* CE_INT */ |
100 | #define INT_CCSDE (1 << 29) | 100 | #define INT_CCSDE (1 << 29) |
101 | #define INT_CMD12DRE (1 << 26) | 101 | #define INT_CMD12DRE (1 << 26) |
102 | #define INT_CMD12RBE (1 << 25) | 102 | #define INT_CMD12RBE (1 << 25) |
103 | #define INT_CMD12CRE (1 << 24) | 103 | #define INT_CMD12CRE (1 << 24) |
104 | #define INT_DTRANE (1 << 23) | 104 | #define INT_DTRANE (1 << 23) |
105 | #define INT_BUFRE (1 << 22) | 105 | #define INT_BUFRE (1 << 22) |
106 | #define INT_BUFWEN (1 << 21) | 106 | #define INT_BUFWEN (1 << 21) |
107 | #define INT_BUFREN (1 << 20) | 107 | #define INT_BUFREN (1 << 20) |
108 | #define INT_CCSRCV (1 << 19) | 108 | #define INT_CCSRCV (1 << 19) |
109 | #define INT_RBSYE (1 << 17) | 109 | #define INT_RBSYE (1 << 17) |
110 | #define INT_CRSPE (1 << 16) | 110 | #define INT_CRSPE (1 << 16) |
111 | #define INT_CMDVIO (1 << 15) | 111 | #define INT_CMDVIO (1 << 15) |
112 | #define INT_BUFVIO (1 << 14) | 112 | #define INT_BUFVIO (1 << 14) |
113 | #define INT_WDATERR (1 << 11) | 113 | #define INT_WDATERR (1 << 11) |
114 | #define INT_RDATERR (1 << 10) | 114 | #define INT_RDATERR (1 << 10) |
115 | #define INT_RIDXERR (1 << 9) | 115 | #define INT_RIDXERR (1 << 9) |
116 | #define INT_RSPERR (1 << 8) | 116 | #define INT_RSPERR (1 << 8) |
117 | #define INT_CCSTO (1 << 5) | 117 | #define INT_CCSTO (1 << 5) |
118 | #define INT_CRCSTO (1 << 4) | 118 | #define INT_CRCSTO (1 << 4) |
119 | #define INT_WDATTO (1 << 3) | 119 | #define INT_WDATTO (1 << 3) |
120 | #define INT_RDATTO (1 << 2) | 120 | #define INT_RDATTO (1 << 2) |
121 | #define INT_RBSYTO (1 << 1) | 121 | #define INT_RBSYTO (1 << 1) |
122 | #define INT_RSPTO (1 << 0) | 122 | #define INT_RSPTO (1 << 0) |
123 | #define INT_ERR_STS (INT_CMDVIO | INT_BUFVIO | INT_WDATERR | \ | 123 | #define INT_ERR_STS (INT_CMDVIO | INT_BUFVIO | INT_WDATERR | \ |
124 | INT_RDATERR | INT_RIDXERR | INT_RSPERR | \ | 124 | INT_RDATERR | INT_RIDXERR | INT_RSPERR | \ |
125 | INT_CCSTO | INT_CRCSTO | INT_WDATTO | \ | 125 | INT_CCSTO | INT_CRCSTO | INT_WDATTO | \ |
126 | INT_RDATTO | INT_RBSYTO | INT_RSPTO) | 126 | INT_RDATTO | INT_RBSYTO | INT_RSPTO) |
127 | 127 | ||
128 | /* CE_INT_MASK */ | 128 | /* CE_INT_MASK */ |
129 | #define MASK_ALL 0x00000000 | 129 | #define MASK_ALL 0x00000000 |
130 | #define MASK_MCCSDE (1 << 29) | 130 | #define MASK_MCCSDE (1 << 29) |
131 | #define MASK_MCMD12DRE (1 << 26) | 131 | #define MASK_MCMD12DRE (1 << 26) |
132 | #define MASK_MCMD12RBE (1 << 25) | 132 | #define MASK_MCMD12RBE (1 << 25) |
133 | #define MASK_MCMD12CRE (1 << 24) | 133 | #define MASK_MCMD12CRE (1 << 24) |
134 | #define MASK_MDTRANE (1 << 23) | 134 | #define MASK_MDTRANE (1 << 23) |
135 | #define MASK_MBUFRE (1 << 22) | 135 | #define MASK_MBUFRE (1 << 22) |
136 | #define MASK_MBUFWEN (1 << 21) | 136 | #define MASK_MBUFWEN (1 << 21) |
137 | #define MASK_MBUFREN (1 << 20) | 137 | #define MASK_MBUFREN (1 << 20) |
138 | #define MASK_MCCSRCV (1 << 19) | 138 | #define MASK_MCCSRCV (1 << 19) |
139 | #define MASK_MRBSYE (1 << 17) | 139 | #define MASK_MRBSYE (1 << 17) |
140 | #define MASK_MCRSPE (1 << 16) | 140 | #define MASK_MCRSPE (1 << 16) |
141 | #define MASK_MCMDVIO (1 << 15) | 141 | #define MASK_MCMDVIO (1 << 15) |
142 | #define MASK_MBUFVIO (1 << 14) | 142 | #define MASK_MBUFVIO (1 << 14) |
143 | #define MASK_MWDATERR (1 << 11) | 143 | #define MASK_MWDATERR (1 << 11) |
144 | #define MASK_MRDATERR (1 << 10) | 144 | #define MASK_MRDATERR (1 << 10) |
145 | #define MASK_MRIDXERR (1 << 9) | 145 | #define MASK_MRIDXERR (1 << 9) |
146 | #define MASK_MRSPERR (1 << 8) | 146 | #define MASK_MRSPERR (1 << 8) |
147 | #define MASK_MCCSTO (1 << 5) | 147 | #define MASK_MCCSTO (1 << 5) |
148 | #define MASK_MCRCSTO (1 << 4) | 148 | #define MASK_MCRCSTO (1 << 4) |
149 | #define MASK_MWDATTO (1 << 3) | 149 | #define MASK_MWDATTO (1 << 3) |
150 | #define MASK_MRDATTO (1 << 2) | 150 | #define MASK_MRDATTO (1 << 2) |
151 | #define MASK_MRBSYTO (1 << 1) | 151 | #define MASK_MRBSYTO (1 << 1) |
152 | #define MASK_MRSPTO (1 << 0) | 152 | #define MASK_MRSPTO (1 << 0) |
153 | 153 | ||
154 | #define MASK_START_CMD (MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR | \ | 154 | #define MASK_START_CMD (MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR | \ |
155 | MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR | \ | 155 | MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR | \ |
156 | MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO | \ | 156 | MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO | \ |
157 | MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO) | 157 | MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO) |
158 | 158 | ||
159 | /* CE_HOST_STS1 */ | 159 | /* CE_HOST_STS1 */ |
160 | #define STS1_CMDSEQ (1 << 31) | 160 | #define STS1_CMDSEQ (1 << 31) |
161 | 161 | ||
162 | /* CE_HOST_STS2 */ | 162 | /* CE_HOST_STS2 */ |
163 | #define STS2_CRCSTE (1 << 31) | 163 | #define STS2_CRCSTE (1 << 31) |
164 | #define STS2_CRC16E (1 << 30) | 164 | #define STS2_CRC16E (1 << 30) |
165 | #define STS2_AC12CRCE (1 << 29) | 165 | #define STS2_AC12CRCE (1 << 29) |
166 | #define STS2_RSPCRC7E (1 << 28) | 166 | #define STS2_RSPCRC7E (1 << 28) |
167 | #define STS2_CRCSTEBE (1 << 27) | 167 | #define STS2_CRCSTEBE (1 << 27) |
168 | #define STS2_RDATEBE (1 << 26) | 168 | #define STS2_RDATEBE (1 << 26) |
169 | #define STS2_AC12REBE (1 << 25) | 169 | #define STS2_AC12REBE (1 << 25) |
170 | #define STS2_RSPEBE (1 << 24) | 170 | #define STS2_RSPEBE (1 << 24) |
171 | #define STS2_AC12IDXE (1 << 23) | 171 | #define STS2_AC12IDXE (1 << 23) |
172 | #define STS2_RSPIDXE (1 << 22) | 172 | #define STS2_RSPIDXE (1 << 22) |
173 | #define STS2_CCSTO (1 << 15) | 173 | #define STS2_CCSTO (1 << 15) |
174 | #define STS2_RDATTO (1 << 14) | 174 | #define STS2_RDATTO (1 << 14) |
175 | #define STS2_DATBSYTO (1 << 13) | 175 | #define STS2_DATBSYTO (1 << 13) |
176 | #define STS2_CRCSTTO (1 << 12) | 176 | #define STS2_CRCSTTO (1 << 12) |
177 | #define STS2_AC12BSYTO (1 << 11) | 177 | #define STS2_AC12BSYTO (1 << 11) |
178 | #define STS2_RSPBSYTO (1 << 10) | 178 | #define STS2_RSPBSYTO (1 << 10) |
179 | #define STS2_AC12RSPTO (1 << 9) | 179 | #define STS2_AC12RSPTO (1 << 9) |
180 | #define STS2_RSPTO (1 << 8) | 180 | #define STS2_RSPTO (1 << 8) |
181 | #define STS2_CRC_ERR (STS2_CRCSTE | STS2_CRC16E | \ | 181 | #define STS2_CRC_ERR (STS2_CRCSTE | STS2_CRC16E | \ |
182 | STS2_AC12CRCE | STS2_RSPCRC7E | STS2_CRCSTEBE) | 182 | STS2_AC12CRCE | STS2_RSPCRC7E | STS2_CRCSTEBE) |
183 | #define STS2_TIMEOUT_ERR (STS2_CCSTO | STS2_RDATTO | \ | 183 | #define STS2_TIMEOUT_ERR (STS2_CCSTO | STS2_RDATTO | \ |
184 | STS2_DATBSYTO | STS2_CRCSTTO | \ | 184 | STS2_DATBSYTO | STS2_CRCSTTO | \ |
185 | STS2_AC12BSYTO | STS2_RSPBSYTO | \ | 185 | STS2_AC12BSYTO | STS2_RSPBSYTO | \ |
186 | STS2_AC12RSPTO | STS2_RSPTO) | 186 | STS2_AC12RSPTO | STS2_RSPTO) |
187 | 187 | ||
188 | #define CLKDEV_EMMC_DATA 52000000 /* 52MHz */ | 188 | #define CLKDEV_EMMC_DATA 52000000 /* 52MHz */ |
189 | #define CLKDEV_MMC_DATA 20000000 /* 20MHz */ | 189 | #define CLKDEV_MMC_DATA 20000000 /* 20MHz */ |
190 | #define CLKDEV_INIT 400000 /* 400 KHz */ | 190 | #define CLKDEV_INIT 400000 /* 400 KHz */ |
191 | 191 | ||
192 | enum mmcif_state { | 192 | enum mmcif_state { |
193 | STATE_IDLE, | 193 | STATE_IDLE, |
194 | STATE_REQUEST, | 194 | STATE_REQUEST, |
195 | STATE_IOS, | 195 | STATE_IOS, |
196 | }; | 196 | }; |
197 | 197 | ||
198 | enum mmcif_wait_for { | 198 | enum mmcif_wait_for { |
199 | MMCIF_WAIT_FOR_REQUEST, | 199 | MMCIF_WAIT_FOR_REQUEST, |
200 | MMCIF_WAIT_FOR_CMD, | 200 | MMCIF_WAIT_FOR_CMD, |
201 | MMCIF_WAIT_FOR_MREAD, | 201 | MMCIF_WAIT_FOR_MREAD, |
202 | MMCIF_WAIT_FOR_MWRITE, | 202 | MMCIF_WAIT_FOR_MWRITE, |
203 | MMCIF_WAIT_FOR_READ, | 203 | MMCIF_WAIT_FOR_READ, |
204 | MMCIF_WAIT_FOR_WRITE, | 204 | MMCIF_WAIT_FOR_WRITE, |
205 | MMCIF_WAIT_FOR_READ_END, | 205 | MMCIF_WAIT_FOR_READ_END, |
206 | MMCIF_WAIT_FOR_WRITE_END, | 206 | MMCIF_WAIT_FOR_WRITE_END, |
207 | MMCIF_WAIT_FOR_STOP, | 207 | MMCIF_WAIT_FOR_STOP, |
208 | }; | 208 | }; |
209 | 209 | ||
210 | struct sh_mmcif_host { | 210 | struct sh_mmcif_host { |
211 | struct mmc_host *mmc; | 211 | struct mmc_host *mmc; |
212 | struct mmc_request *mrq; | 212 | struct mmc_request *mrq; |
213 | struct platform_device *pd; | 213 | struct platform_device *pd; |
214 | struct sh_dmae_slave dma_slave_tx; | ||
215 | struct sh_dmae_slave dma_slave_rx; | ||
216 | struct clk *hclk; | 214 | struct clk *hclk; |
217 | unsigned int clk; | 215 | unsigned int clk; |
218 | int bus_width; | 216 | int bus_width; |
219 | bool sd_error; | 217 | bool sd_error; |
220 | bool dying; | 218 | bool dying; |
221 | long timeout; | 219 | long timeout; |
222 | void __iomem *addr; | 220 | void __iomem *addr; |
223 | u32 *pio_ptr; | 221 | u32 *pio_ptr; |
224 | spinlock_t lock; /* protect sh_mmcif_host::state */ | 222 | spinlock_t lock; /* protect sh_mmcif_host::state */ |
225 | enum mmcif_state state; | 223 | enum mmcif_state state; |
226 | enum mmcif_wait_for wait_for; | 224 | enum mmcif_wait_for wait_for; |
227 | struct delayed_work timeout_work; | 225 | struct delayed_work timeout_work; |
228 | size_t blocksize; | 226 | size_t blocksize; |
229 | int sg_idx; | 227 | int sg_idx; |
230 | int sg_blkidx; | 228 | int sg_blkidx; |
231 | bool power; | 229 | bool power; |
232 | bool card_present; | 230 | bool card_present; |
233 | 231 | ||
234 | /* DMA support */ | 232 | /* DMA support */ |
235 | struct dma_chan *chan_rx; | 233 | struct dma_chan *chan_rx; |
236 | struct dma_chan *chan_tx; | 234 | struct dma_chan *chan_tx; |
237 | struct completion dma_complete; | 235 | struct completion dma_complete; |
238 | bool dma_active; | 236 | bool dma_active; |
239 | }; | 237 | }; |
240 | 238 | ||
241 | static inline void sh_mmcif_bitset(struct sh_mmcif_host *host, | 239 | static inline void sh_mmcif_bitset(struct sh_mmcif_host *host, |
242 | unsigned int reg, u32 val) | 240 | unsigned int reg, u32 val) |
243 | { | 241 | { |
244 | writel(val | readl(host->addr + reg), host->addr + reg); | 242 | writel(val | readl(host->addr + reg), host->addr + reg); |
245 | } | 243 | } |
246 | 244 | ||
247 | static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host, | 245 | static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host, |
248 | unsigned int reg, u32 val) | 246 | unsigned int reg, u32 val) |
249 | { | 247 | { |
250 | writel(~val & readl(host->addr + reg), host->addr + reg); | 248 | writel(~val & readl(host->addr + reg), host->addr + reg); |
251 | } | 249 | } |
252 | 250 | ||
253 | static void mmcif_dma_complete(void *arg) | 251 | static void mmcif_dma_complete(void *arg) |
254 | { | 252 | { |
255 | struct sh_mmcif_host *host = arg; | 253 | struct sh_mmcif_host *host = arg; |
256 | struct mmc_data *data = host->mrq->data; | 254 | struct mmc_data *data = host->mrq->data; |
257 | 255 | ||
258 | dev_dbg(&host->pd->dev, "Command completed\n"); | 256 | dev_dbg(&host->pd->dev, "Command completed\n"); |
259 | 257 | ||
260 | if (WARN(!data, "%s: NULL data in DMA completion!\n", | 258 | if (WARN(!data, "%s: NULL data in DMA completion!\n", |
261 | dev_name(&host->pd->dev))) | 259 | dev_name(&host->pd->dev))) |
262 | return; | 260 | return; |
263 | 261 | ||
264 | if (data->flags & MMC_DATA_READ) | 262 | if (data->flags & MMC_DATA_READ) |
265 | dma_unmap_sg(host->chan_rx->device->dev, | 263 | dma_unmap_sg(host->chan_rx->device->dev, |
266 | data->sg, data->sg_len, | 264 | data->sg, data->sg_len, |
267 | DMA_FROM_DEVICE); | 265 | DMA_FROM_DEVICE); |
268 | else | 266 | else |
269 | dma_unmap_sg(host->chan_tx->device->dev, | 267 | dma_unmap_sg(host->chan_tx->device->dev, |
270 | data->sg, data->sg_len, | 268 | data->sg, data->sg_len, |
271 | DMA_TO_DEVICE); | 269 | DMA_TO_DEVICE); |
272 | 270 | ||
273 | complete(&host->dma_complete); | 271 | complete(&host->dma_complete); |
274 | } | 272 | } |
275 | 273 | ||
276 | static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host) | 274 | static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host) |
277 | { | 275 | { |
278 | struct mmc_data *data = host->mrq->data; | 276 | struct mmc_data *data = host->mrq->data; |
279 | struct scatterlist *sg = data->sg; | 277 | struct scatterlist *sg = data->sg; |
280 | struct dma_async_tx_descriptor *desc = NULL; | 278 | struct dma_async_tx_descriptor *desc = NULL; |
281 | struct dma_chan *chan = host->chan_rx; | 279 | struct dma_chan *chan = host->chan_rx; |
282 | dma_cookie_t cookie = -EINVAL; | 280 | dma_cookie_t cookie = -EINVAL; |
283 | int ret; | 281 | int ret; |
284 | 282 | ||
285 | ret = dma_map_sg(chan->device->dev, sg, data->sg_len, | 283 | ret = dma_map_sg(chan->device->dev, sg, data->sg_len, |
286 | DMA_FROM_DEVICE); | 284 | DMA_FROM_DEVICE); |
287 | if (ret > 0) { | 285 | if (ret > 0) { |
288 | host->dma_active = true; | 286 | host->dma_active = true; |
289 | desc = dmaengine_prep_slave_sg(chan, sg, ret, | 287 | desc = dmaengine_prep_slave_sg(chan, sg, ret, |
290 | DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 288 | DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
291 | } | 289 | } |
292 | 290 | ||
293 | if (desc) { | 291 | if (desc) { |
294 | desc->callback = mmcif_dma_complete; | 292 | desc->callback = mmcif_dma_complete; |
295 | desc->callback_param = host; | 293 | desc->callback_param = host; |
296 | cookie = dmaengine_submit(desc); | 294 | cookie = dmaengine_submit(desc); |
297 | sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN); | 295 | sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN); |
298 | dma_async_issue_pending(chan); | 296 | dma_async_issue_pending(chan); |
299 | } | 297 | } |
300 | dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n", | 298 | dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n", |
301 | __func__, data->sg_len, ret, cookie); | 299 | __func__, data->sg_len, ret, cookie); |
302 | 300 | ||
303 | if (!desc) { | 301 | if (!desc) { |
304 | /* DMA failed, fall back to PIO */ | 302 | /* DMA failed, fall back to PIO */ |
305 | if (ret >= 0) | 303 | if (ret >= 0) |
306 | ret = -EIO; | 304 | ret = -EIO; |
307 | host->chan_rx = NULL; | 305 | host->chan_rx = NULL; |
308 | host->dma_active = false; | 306 | host->dma_active = false; |
309 | dma_release_channel(chan); | 307 | dma_release_channel(chan); |
310 | /* Free the Tx channel too */ | 308 | /* Free the Tx channel too */ |
311 | chan = host->chan_tx; | 309 | chan = host->chan_tx; |
312 | if (chan) { | 310 | if (chan) { |
313 | host->chan_tx = NULL; | 311 | host->chan_tx = NULL; |
314 | dma_release_channel(chan); | 312 | dma_release_channel(chan); |
315 | } | 313 | } |
316 | dev_warn(&host->pd->dev, | 314 | dev_warn(&host->pd->dev, |
317 | "DMA failed: %d, falling back to PIO\n", ret); | 315 | "DMA failed: %d, falling back to PIO\n", ret); |
318 | sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN); | 316 | sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN); |
319 | } | 317 | } |
320 | 318 | ||
321 | dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__, | 319 | dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__, |
322 | desc, cookie, data->sg_len); | 320 | desc, cookie, data->sg_len); |
323 | } | 321 | } |
324 | 322 | ||
325 | static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host) | 323 | static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host) |
326 | { | 324 | { |
327 | struct mmc_data *data = host->mrq->data; | 325 | struct mmc_data *data = host->mrq->data; |
328 | struct scatterlist *sg = data->sg; | 326 | struct scatterlist *sg = data->sg; |
329 | struct dma_async_tx_descriptor *desc = NULL; | 327 | struct dma_async_tx_descriptor *desc = NULL; |
330 | struct dma_chan *chan = host->chan_tx; | 328 | struct dma_chan *chan = host->chan_tx; |
331 | dma_cookie_t cookie = -EINVAL; | 329 | dma_cookie_t cookie = -EINVAL; |
332 | int ret; | 330 | int ret; |
333 | 331 | ||
334 | ret = dma_map_sg(chan->device->dev, sg, data->sg_len, | 332 | ret = dma_map_sg(chan->device->dev, sg, data->sg_len, |
335 | DMA_TO_DEVICE); | 333 | DMA_TO_DEVICE); |
336 | if (ret > 0) { | 334 | if (ret > 0) { |
337 | host->dma_active = true; | 335 | host->dma_active = true; |
338 | desc = dmaengine_prep_slave_sg(chan, sg, ret, | 336 | desc = dmaengine_prep_slave_sg(chan, sg, ret, |
339 | DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 337 | DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
340 | } | 338 | } |
341 | 339 | ||
342 | if (desc) { | 340 | if (desc) { |
343 | desc->callback = mmcif_dma_complete; | 341 | desc->callback = mmcif_dma_complete; |
344 | desc->callback_param = host; | 342 | desc->callback_param = host; |
345 | cookie = dmaengine_submit(desc); | 343 | cookie = dmaengine_submit(desc); |
346 | sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN); | 344 | sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN); |
347 | dma_async_issue_pending(chan); | 345 | dma_async_issue_pending(chan); |
348 | } | 346 | } |
349 | dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n", | 347 | dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n", |
350 | __func__, data->sg_len, ret, cookie); | 348 | __func__, data->sg_len, ret, cookie); |
351 | 349 | ||
352 | if (!desc) { | 350 | if (!desc) { |
353 | /* DMA failed, fall back to PIO */ | 351 | /* DMA failed, fall back to PIO */ |
354 | if (ret >= 0) | 352 | if (ret >= 0) |
355 | ret = -EIO; | 353 | ret = -EIO; |
356 | host->chan_tx = NULL; | 354 | host->chan_tx = NULL; |
357 | host->dma_active = false; | 355 | host->dma_active = false; |
358 | dma_release_channel(chan); | 356 | dma_release_channel(chan); |
359 | /* Free the Rx channel too */ | 357 | /* Free the Rx channel too */ |
360 | chan = host->chan_rx; | 358 | chan = host->chan_rx; |
361 | if (chan) { | 359 | if (chan) { |
362 | host->chan_rx = NULL; | 360 | host->chan_rx = NULL; |
363 | dma_release_channel(chan); | 361 | dma_release_channel(chan); |
364 | } | 362 | } |
365 | dev_warn(&host->pd->dev, | 363 | dev_warn(&host->pd->dev, |
366 | "DMA failed: %d, falling back to PIO\n", ret); | 364 | "DMA failed: %d, falling back to PIO\n", ret); |
367 | sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN); | 365 | sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN); |
368 | } | 366 | } |
369 | 367 | ||
370 | dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d\n", __func__, | 368 | dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d\n", __func__, |
371 | desc, cookie); | 369 | desc, cookie); |
372 | } | 370 | } |
373 | 371 | ||
374 | static bool sh_mmcif_filter(struct dma_chan *chan, void *arg) | ||
375 | { | ||
376 | dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg); | ||
377 | chan->private = arg; | ||
378 | return true; | ||
379 | } | ||
380 | |||
381 | static void sh_mmcif_request_dma(struct sh_mmcif_host *host, | 372 | static void sh_mmcif_request_dma(struct sh_mmcif_host *host, |
382 | struct sh_mmcif_plat_data *pdata) | 373 | struct sh_mmcif_plat_data *pdata) |
383 | { | 374 | { |
384 | struct sh_dmae_slave *tx, *rx; | 375 | struct resource *res = platform_get_resource(host->pd, IORESOURCE_MEM, 0); |
376 | struct dma_slave_config cfg; | ||
377 | dma_cap_mask_t mask; | ||
378 | int ret; | ||
379 | |||
385 | host->dma_active = false; | 380 | host->dma_active = false; |
386 | 381 | ||
382 | if (pdata->slave_id_tx <= 0 || pdata->slave_id_rx <= 0) | ||
383 | return; | ||
384 | |||
387 | /* We can only either use DMA for both Tx and Rx or not use it at all */ | 385 | /* We can only either use DMA for both Tx and Rx or not use it at all */ |
388 | tx = &host->dma_slave_tx; | 386 | dma_cap_zero(mask); |
389 | tx->shdma_slave.slave_id = pdata->slave_id_tx; | 387 | dma_cap_set(DMA_SLAVE, mask); |
390 | rx = &host->dma_slave_rx; | ||
391 | rx->shdma_slave.slave_id = pdata->slave_id_rx; | ||
392 | 388 | ||
393 | if (tx->shdma_slave.slave_id > 0 && rx->shdma_slave.slave_id > 0) { | 389 | host->chan_tx = dma_request_channel(mask, shdma_chan_filter, |
394 | dma_cap_mask_t mask; | 390 | (void *)pdata->slave_id_tx); |
391 | dev_dbg(&host->pd->dev, "%s: TX: got channel %p\n", __func__, | ||
392 | host->chan_tx); | ||
395 | 393 | ||
396 | dma_cap_zero(mask); | 394 | if (!host->chan_tx) |
397 | dma_cap_set(DMA_SLAVE, mask); | 395 | return; |
398 | 396 | ||
399 | host->chan_tx = dma_request_channel(mask, sh_mmcif_filter, | 397 | cfg.slave_id = pdata->slave_id_tx; |
400 | &tx->shdma_slave); | 398 | cfg.direction = DMA_MEM_TO_DEV; |
401 | dev_dbg(&host->pd->dev, "%s: TX: got channel %p\n", __func__, | 399 | cfg.dst_addr = res->start + MMCIF_CE_DATA; |
402 | host->chan_tx); | 400 | cfg.src_addr = 0; |
401 | ret = dmaengine_slave_config(host->chan_tx, &cfg); | ||
402 | if (ret < 0) | ||
403 | goto ecfgtx; | ||
403 | 404 | ||
404 | if (!host->chan_tx) | 405 | host->chan_rx = dma_request_channel(mask, shdma_chan_filter, |
405 | return; | 406 | (void *)pdata->slave_id_rx); |
407 | dev_dbg(&host->pd->dev, "%s: RX: got channel %p\n", __func__, | ||
408 | host->chan_rx); | ||
406 | 409 | ||
407 | host->chan_rx = dma_request_channel(mask, sh_mmcif_filter, | 410 | if (!host->chan_rx) |
408 | &rx->shdma_slave); | 411 | goto erqrx; |
409 | dev_dbg(&host->pd->dev, "%s: RX: got channel %p\n", __func__, | ||
410 | host->chan_rx); | ||
411 | 412 | ||
412 | if (!host->chan_rx) { | 413 | cfg.slave_id = pdata->slave_id_rx; |
413 | dma_release_channel(host->chan_tx); | 414 | cfg.direction = DMA_DEV_TO_MEM; |
414 | host->chan_tx = NULL; | 415 | cfg.dst_addr = 0; |
415 | return; | 416 | cfg.src_addr = res->start + MMCIF_CE_DATA; |
416 | } | 417 | ret = dmaengine_slave_config(host->chan_rx, &cfg); |
418 | if (ret < 0) | ||
419 | goto ecfgrx; | ||
417 | 420 | ||
418 | init_completion(&host->dma_complete); | 421 | init_completion(&host->dma_complete); |
419 | } | 422 | |
423 | return; | ||
424 | |||
425 | ecfgrx: | ||
426 | dma_release_channel(host->chan_rx); | ||
427 | host->chan_rx = NULL; | ||
428 | erqrx: | ||
429 | ecfgtx: | ||
430 | dma_release_channel(host->chan_tx); | ||
431 | host->chan_tx = NULL; | ||
420 | } | 432 | } |
421 | 433 | ||
422 | static void sh_mmcif_release_dma(struct sh_mmcif_host *host) | 434 | static void sh_mmcif_release_dma(struct sh_mmcif_host *host) |
423 | { | 435 | { |
424 | sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN); | 436 | sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN); |
425 | /* Descriptors are freed automatically */ | 437 | /* Descriptors are freed automatically */ |
426 | if (host->chan_tx) { | 438 | if (host->chan_tx) { |
427 | struct dma_chan *chan = host->chan_tx; | 439 | struct dma_chan *chan = host->chan_tx; |
428 | host->chan_tx = NULL; | 440 | host->chan_tx = NULL; |
429 | dma_release_channel(chan); | 441 | dma_release_channel(chan); |
430 | } | 442 | } |
431 | if (host->chan_rx) { | 443 | if (host->chan_rx) { |
432 | struct dma_chan *chan = host->chan_rx; | 444 | struct dma_chan *chan = host->chan_rx; |
433 | host->chan_rx = NULL; | 445 | host->chan_rx = NULL; |
434 | dma_release_channel(chan); | 446 | dma_release_channel(chan); |
435 | } | 447 | } |
436 | 448 | ||
437 | host->dma_active = false; | 449 | host->dma_active = false; |
438 | } | 450 | } |
439 | 451 | ||
440 | static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk) | 452 | static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk) |
441 | { | 453 | { |
442 | struct sh_mmcif_plat_data *p = host->pd->dev.platform_data; | 454 | struct sh_mmcif_plat_data *p = host->pd->dev.platform_data; |
443 | 455 | ||
444 | sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE); | 456 | sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE); |
445 | sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR); | 457 | sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR); |
446 | 458 | ||
447 | if (!clk) | 459 | if (!clk) |
448 | return; | 460 | return; |
449 | if (p->sup_pclk && clk == host->clk) | 461 | if (p->sup_pclk && clk == host->clk) |
450 | sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_SUP_PCLK); | 462 | sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_SUP_PCLK); |
451 | else | 463 | else |
452 | sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR & | 464 | sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR & |
453 | ((fls(DIV_ROUND_UP(host->clk, | 465 | ((fls(DIV_ROUND_UP(host->clk, |
454 | clk) - 1) - 1) << 16)); | 466 | clk) - 1) - 1) << 16)); |
455 | 467 | ||
456 | sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE); | 468 | sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE); |
457 | } | 469 | } |
458 | 470 | ||
459 | static void sh_mmcif_sync_reset(struct sh_mmcif_host *host) | 471 | static void sh_mmcif_sync_reset(struct sh_mmcif_host *host) |
460 | { | 472 | { |
461 | u32 tmp; | 473 | u32 tmp; |
462 | 474 | ||
463 | tmp = 0x010f0000 & sh_mmcif_readl(host->addr, MMCIF_CE_CLK_CTRL); | 475 | tmp = 0x010f0000 & sh_mmcif_readl(host->addr, MMCIF_CE_CLK_CTRL); |
464 | 476 | ||
465 | sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_ON); | 477 | sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_ON); |
466 | sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_OFF); | 478 | sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_OFF); |
467 | sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp | | 479 | sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp | |
468 | SRSPTO_256 | SRBSYTO_29 | SRWDTO_29 | SCCSTO_29); | 480 | SRSPTO_256 | SRBSYTO_29 | SRWDTO_29 | SCCSTO_29); |
469 | /* byte swap on */ | 481 | /* byte swap on */ |
470 | sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP); | 482 | sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP); |
471 | } | 483 | } |
472 | 484 | ||
473 | static int sh_mmcif_error_manage(struct sh_mmcif_host *host) | 485 | static int sh_mmcif_error_manage(struct sh_mmcif_host *host) |
474 | { | 486 | { |
475 | u32 state1, state2; | 487 | u32 state1, state2; |
476 | int ret, timeout; | 488 | int ret, timeout; |
477 | 489 | ||
478 | host->sd_error = false; | 490 | host->sd_error = false; |
479 | 491 | ||
480 | state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1); | 492 | state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1); |
481 | state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2); | 493 | state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2); |
482 | dev_dbg(&host->pd->dev, "ERR HOST_STS1 = %08x\n", state1); | 494 | dev_dbg(&host->pd->dev, "ERR HOST_STS1 = %08x\n", state1); |
483 | dev_dbg(&host->pd->dev, "ERR HOST_STS2 = %08x\n", state2); | 495 | dev_dbg(&host->pd->dev, "ERR HOST_STS2 = %08x\n", state2); |
484 | 496 | ||
485 | if (state1 & STS1_CMDSEQ) { | 497 | if (state1 & STS1_CMDSEQ) { |
486 | sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK); | 498 | sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK); |
487 | sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK); | 499 | sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK); |
488 | for (timeout = 10000000; timeout; timeout--) { | 500 | for (timeout = 10000000; timeout; timeout--) { |
489 | if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1) | 501 | if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1) |
490 | & STS1_CMDSEQ)) | 502 | & STS1_CMDSEQ)) |
491 | break; | 503 | break; |
492 | mdelay(1); | 504 | mdelay(1); |
493 | } | 505 | } |
494 | if (!timeout) { | 506 | if (!timeout) { |
495 | dev_err(&host->pd->dev, | 507 | dev_err(&host->pd->dev, |
496 | "Forced end of command sequence timeout err\n"); | 508 | "Forced end of command sequence timeout err\n"); |
497 | return -EIO; | 509 | return -EIO; |
498 | } | 510 | } |
499 | sh_mmcif_sync_reset(host); | 511 | sh_mmcif_sync_reset(host); |
500 | dev_dbg(&host->pd->dev, "Forced end of command sequence\n"); | 512 | dev_dbg(&host->pd->dev, "Forced end of command sequence\n"); |
501 | return -EIO; | 513 | return -EIO; |
502 | } | 514 | } |
503 | 515 | ||
504 | if (state2 & STS2_CRC_ERR) { | 516 | if (state2 & STS2_CRC_ERR) { |
505 | dev_dbg(&host->pd->dev, ": CRC error\n"); | 517 | dev_dbg(&host->pd->dev, ": CRC error\n"); |
506 | ret = -EIO; | 518 | ret = -EIO; |
507 | } else if (state2 & STS2_TIMEOUT_ERR) { | 519 | } else if (state2 & STS2_TIMEOUT_ERR) { |
508 | dev_dbg(&host->pd->dev, ": Timeout\n"); | 520 | dev_dbg(&host->pd->dev, ": Timeout\n"); |
509 | ret = -ETIMEDOUT; | 521 | ret = -ETIMEDOUT; |
510 | } else { | 522 | } else { |
511 | dev_dbg(&host->pd->dev, ": End/Index error\n"); | 523 | dev_dbg(&host->pd->dev, ": End/Index error\n"); |
512 | ret = -EIO; | 524 | ret = -EIO; |
513 | } | 525 | } |
514 | return ret; | 526 | return ret; |
515 | } | 527 | } |
516 | 528 | ||
517 | static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p) | 529 | static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p) |
518 | { | 530 | { |
519 | struct mmc_data *data = host->mrq->data; | 531 | struct mmc_data *data = host->mrq->data; |
520 | 532 | ||
521 | host->sg_blkidx += host->blocksize; | 533 | host->sg_blkidx += host->blocksize; |
522 | 534 | ||
523 | /* data->sg->length must be a multiple of host->blocksize? */ | 535 | /* data->sg->length must be a multiple of host->blocksize? */ |
524 | BUG_ON(host->sg_blkidx > data->sg->length); | 536 | BUG_ON(host->sg_blkidx > data->sg->length); |
525 | 537 | ||
526 | if (host->sg_blkidx == data->sg->length) { | 538 | if (host->sg_blkidx == data->sg->length) { |
527 | host->sg_blkidx = 0; | 539 | host->sg_blkidx = 0; |
528 | if (++host->sg_idx < data->sg_len) | 540 | if (++host->sg_idx < data->sg_len) |
529 | host->pio_ptr = sg_virt(++data->sg); | 541 | host->pio_ptr = sg_virt(++data->sg); |
530 | } else { | 542 | } else { |
531 | host->pio_ptr = p; | 543 | host->pio_ptr = p; |
532 | } | 544 | } |
533 | 545 | ||
534 | if (host->sg_idx == data->sg_len) | 546 | if (host->sg_idx == data->sg_len) |
535 | return false; | 547 | return false; |
536 | 548 | ||
537 | return true; | 549 | return true; |
538 | } | 550 | } |
539 | 551 | ||
540 | static void sh_mmcif_single_read(struct sh_mmcif_host *host, | 552 | static void sh_mmcif_single_read(struct sh_mmcif_host *host, |
541 | struct mmc_request *mrq) | 553 | struct mmc_request *mrq) |
542 | { | 554 | { |
543 | host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & | 555 | host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & |
544 | BLOCK_SIZE_MASK) + 3; | 556 | BLOCK_SIZE_MASK) + 3; |
545 | 557 | ||
546 | host->wait_for = MMCIF_WAIT_FOR_READ; | 558 | host->wait_for = MMCIF_WAIT_FOR_READ; |
547 | schedule_delayed_work(&host->timeout_work, host->timeout); | 559 | schedule_delayed_work(&host->timeout_work, host->timeout); |
548 | 560 | ||
549 | /* buf read enable */ | 561 | /* buf read enable */ |
550 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); | 562 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); |
551 | } | 563 | } |
552 | 564 | ||
553 | static bool sh_mmcif_read_block(struct sh_mmcif_host *host) | 565 | static bool sh_mmcif_read_block(struct sh_mmcif_host *host) |
554 | { | 566 | { |
555 | struct mmc_data *data = host->mrq->data; | 567 | struct mmc_data *data = host->mrq->data; |
556 | u32 *p = sg_virt(data->sg); | 568 | u32 *p = sg_virt(data->sg); |
557 | int i; | 569 | int i; |
558 | 570 | ||
559 | if (host->sd_error) { | 571 | if (host->sd_error) { |
560 | data->error = sh_mmcif_error_manage(host); | 572 | data->error = sh_mmcif_error_manage(host); |
561 | return false; | 573 | return false; |
562 | } | 574 | } |
563 | 575 | ||
564 | for (i = 0; i < host->blocksize / 4; i++) | 576 | for (i = 0; i < host->blocksize / 4; i++) |
565 | *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA); | 577 | *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA); |
566 | 578 | ||
567 | /* buffer read end */ | 579 | /* buffer read end */ |
568 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE); | 580 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE); |
569 | host->wait_for = MMCIF_WAIT_FOR_READ_END; | 581 | host->wait_for = MMCIF_WAIT_FOR_READ_END; |
570 | 582 | ||
571 | return true; | 583 | return true; |
572 | } | 584 | } |
573 | 585 | ||
574 | static void sh_mmcif_multi_read(struct sh_mmcif_host *host, | 586 | static void sh_mmcif_multi_read(struct sh_mmcif_host *host, |
575 | struct mmc_request *mrq) | 587 | struct mmc_request *mrq) |
576 | { | 588 | { |
577 | struct mmc_data *data = mrq->data; | 589 | struct mmc_data *data = mrq->data; |
578 | 590 | ||
579 | if (!data->sg_len || !data->sg->length) | 591 | if (!data->sg_len || !data->sg->length) |
580 | return; | 592 | return; |
581 | 593 | ||
582 | host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & | 594 | host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & |
583 | BLOCK_SIZE_MASK; | 595 | BLOCK_SIZE_MASK; |
584 | 596 | ||
585 | host->wait_for = MMCIF_WAIT_FOR_MREAD; | 597 | host->wait_for = MMCIF_WAIT_FOR_MREAD; |
586 | host->sg_idx = 0; | 598 | host->sg_idx = 0; |
587 | host->sg_blkidx = 0; | 599 | host->sg_blkidx = 0; |
588 | host->pio_ptr = sg_virt(data->sg); | 600 | host->pio_ptr = sg_virt(data->sg); |
589 | schedule_delayed_work(&host->timeout_work, host->timeout); | 601 | schedule_delayed_work(&host->timeout_work, host->timeout); |
590 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); | 602 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); |
591 | } | 603 | } |
592 | 604 | ||
593 | static bool sh_mmcif_mread_block(struct sh_mmcif_host *host) | 605 | static bool sh_mmcif_mread_block(struct sh_mmcif_host *host) |
594 | { | 606 | { |
595 | struct mmc_data *data = host->mrq->data; | 607 | struct mmc_data *data = host->mrq->data; |
596 | u32 *p = host->pio_ptr; | 608 | u32 *p = host->pio_ptr; |
597 | int i; | 609 | int i; |
598 | 610 | ||
599 | if (host->sd_error) { | 611 | if (host->sd_error) { |
600 | data->error = sh_mmcif_error_manage(host); | 612 | data->error = sh_mmcif_error_manage(host); |
601 | return false; | 613 | return false; |
602 | } | 614 | } |
603 | 615 | ||
604 | BUG_ON(!data->sg->length); | 616 | BUG_ON(!data->sg->length); |
605 | 617 | ||
606 | for (i = 0; i < host->blocksize / 4; i++) | 618 | for (i = 0; i < host->blocksize / 4; i++) |
607 | *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA); | 619 | *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA); |
608 | 620 | ||
609 | if (!sh_mmcif_next_block(host, p)) | 621 | if (!sh_mmcif_next_block(host, p)) |
610 | return false; | 622 | return false; |
611 | 623 | ||
612 | schedule_delayed_work(&host->timeout_work, host->timeout); | 624 | schedule_delayed_work(&host->timeout_work, host->timeout); |
613 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); | 625 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); |
614 | 626 | ||
615 | return true; | 627 | return true; |
616 | } | 628 | } |
617 | 629 | ||
618 | static void sh_mmcif_single_write(struct sh_mmcif_host *host, | 630 | static void sh_mmcif_single_write(struct sh_mmcif_host *host, |
619 | struct mmc_request *mrq) | 631 | struct mmc_request *mrq) |
620 | { | 632 | { |
621 | host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & | 633 | host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & |
622 | BLOCK_SIZE_MASK) + 3; | 634 | BLOCK_SIZE_MASK) + 3; |
623 | 635 | ||
624 | host->wait_for = MMCIF_WAIT_FOR_WRITE; | 636 | host->wait_for = MMCIF_WAIT_FOR_WRITE; |
625 | schedule_delayed_work(&host->timeout_work, host->timeout); | 637 | schedule_delayed_work(&host->timeout_work, host->timeout); |
626 | 638 | ||
627 | /* buf write enable */ | 639 | /* buf write enable */ |
628 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); | 640 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); |
629 | } | 641 | } |
630 | 642 | ||
631 | static bool sh_mmcif_write_block(struct sh_mmcif_host *host) | 643 | static bool sh_mmcif_write_block(struct sh_mmcif_host *host) |
632 | { | 644 | { |
633 | struct mmc_data *data = host->mrq->data; | 645 | struct mmc_data *data = host->mrq->data; |
634 | u32 *p = sg_virt(data->sg); | 646 | u32 *p = sg_virt(data->sg); |
635 | int i; | 647 | int i; |
636 | 648 | ||
637 | if (host->sd_error) { | 649 | if (host->sd_error) { |
638 | data->error = sh_mmcif_error_manage(host); | 650 | data->error = sh_mmcif_error_manage(host); |
639 | return false; | 651 | return false; |
640 | } | 652 | } |
641 | 653 | ||
642 | for (i = 0; i < host->blocksize / 4; i++) | 654 | for (i = 0; i < host->blocksize / 4; i++) |
643 | sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++); | 655 | sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++); |
644 | 656 | ||
645 | /* buffer write end */ | 657 | /* buffer write end */ |
646 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE); | 658 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE); |
647 | host->wait_for = MMCIF_WAIT_FOR_WRITE_END; | 659 | host->wait_for = MMCIF_WAIT_FOR_WRITE_END; |
648 | 660 | ||
649 | return true; | 661 | return true; |
650 | } | 662 | } |
651 | 663 | ||
652 | static void sh_mmcif_multi_write(struct sh_mmcif_host *host, | 664 | static void sh_mmcif_multi_write(struct sh_mmcif_host *host, |
653 | struct mmc_request *mrq) | 665 | struct mmc_request *mrq) |
654 | { | 666 | { |
655 | struct mmc_data *data = mrq->data; | 667 | struct mmc_data *data = mrq->data; |
656 | 668 | ||
657 | if (!data->sg_len || !data->sg->length) | 669 | if (!data->sg_len || !data->sg->length) |
658 | return; | 670 | return; |
659 | 671 | ||
660 | host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & | 672 | host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & |
661 | BLOCK_SIZE_MASK; | 673 | BLOCK_SIZE_MASK; |
662 | 674 | ||
663 | host->wait_for = MMCIF_WAIT_FOR_MWRITE; | 675 | host->wait_for = MMCIF_WAIT_FOR_MWRITE; |
664 | host->sg_idx = 0; | 676 | host->sg_idx = 0; |
665 | host->sg_blkidx = 0; | 677 | host->sg_blkidx = 0; |
666 | host->pio_ptr = sg_virt(data->sg); | 678 | host->pio_ptr = sg_virt(data->sg); |
667 | schedule_delayed_work(&host->timeout_work, host->timeout); | 679 | schedule_delayed_work(&host->timeout_work, host->timeout); |
668 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); | 680 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); |
669 | } | 681 | } |
670 | 682 | ||
671 | static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host) | 683 | static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host) |
672 | { | 684 | { |
673 | struct mmc_data *data = host->mrq->data; | 685 | struct mmc_data *data = host->mrq->data; |
674 | u32 *p = host->pio_ptr; | 686 | u32 *p = host->pio_ptr; |
675 | int i; | 687 | int i; |
676 | 688 | ||
677 | if (host->sd_error) { | 689 | if (host->sd_error) { |
678 | data->error = sh_mmcif_error_manage(host); | 690 | data->error = sh_mmcif_error_manage(host); |
679 | return false; | 691 | return false; |
680 | } | 692 | } |
681 | 693 | ||
682 | BUG_ON(!data->sg->length); | 694 | BUG_ON(!data->sg->length); |
683 | 695 | ||
684 | for (i = 0; i < host->blocksize / 4; i++) | 696 | for (i = 0; i < host->blocksize / 4; i++) |
685 | sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++); | 697 | sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++); |
686 | 698 | ||
687 | if (!sh_mmcif_next_block(host, p)) | 699 | if (!sh_mmcif_next_block(host, p)) |
688 | return false; | 700 | return false; |
689 | 701 | ||
690 | schedule_delayed_work(&host->timeout_work, host->timeout); | 702 | schedule_delayed_work(&host->timeout_work, host->timeout); |
691 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); | 703 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); |
692 | 704 | ||
693 | return true; | 705 | return true; |
694 | } | 706 | } |
695 | 707 | ||
696 | static void sh_mmcif_get_response(struct sh_mmcif_host *host, | 708 | static void sh_mmcif_get_response(struct sh_mmcif_host *host, |
697 | struct mmc_command *cmd) | 709 | struct mmc_command *cmd) |
698 | { | 710 | { |
699 | if (cmd->flags & MMC_RSP_136) { | 711 | if (cmd->flags & MMC_RSP_136) { |
700 | cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP3); | 712 | cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP3); |
701 | cmd->resp[1] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP2); | 713 | cmd->resp[1] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP2); |
702 | cmd->resp[2] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP1); | 714 | cmd->resp[2] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP1); |
703 | cmd->resp[3] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0); | 715 | cmd->resp[3] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0); |
704 | } else | 716 | } else |
705 | cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0); | 717 | cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0); |
706 | } | 718 | } |
707 | 719 | ||
708 | static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host, | 720 | static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host, |
709 | struct mmc_command *cmd) | 721 | struct mmc_command *cmd) |
710 | { | 722 | { |
711 | cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP_CMD12); | 723 | cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP_CMD12); |
712 | } | 724 | } |
713 | 725 | ||
714 | static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host, | 726 | static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host, |
715 | struct mmc_request *mrq) | 727 | struct mmc_request *mrq) |
716 | { | 728 | { |
717 | struct mmc_data *data = mrq->data; | 729 | struct mmc_data *data = mrq->data; |
718 | struct mmc_command *cmd = mrq->cmd; | 730 | struct mmc_command *cmd = mrq->cmd; |
719 | u32 opc = cmd->opcode; | 731 | u32 opc = cmd->opcode; |
720 | u32 tmp = 0; | 732 | u32 tmp = 0; |
721 | 733 | ||
722 | /* Response Type check */ | 734 | /* Response Type check */ |
723 | switch (mmc_resp_type(cmd)) { | 735 | switch (mmc_resp_type(cmd)) { |
724 | case MMC_RSP_NONE: | 736 | case MMC_RSP_NONE: |
725 | tmp |= CMD_SET_RTYP_NO; | 737 | tmp |= CMD_SET_RTYP_NO; |
726 | break; | 738 | break; |
727 | case MMC_RSP_R1: | 739 | case MMC_RSP_R1: |
728 | case MMC_RSP_R1B: | 740 | case MMC_RSP_R1B: |
729 | case MMC_RSP_R3: | 741 | case MMC_RSP_R3: |
730 | tmp |= CMD_SET_RTYP_6B; | 742 | tmp |= CMD_SET_RTYP_6B; |
731 | break; | 743 | break; |
732 | case MMC_RSP_R2: | 744 | case MMC_RSP_R2: |
733 | tmp |= CMD_SET_RTYP_17B; | 745 | tmp |= CMD_SET_RTYP_17B; |
734 | break; | 746 | break; |
735 | default: | 747 | default: |
736 | dev_err(&host->pd->dev, "Unsupported response type.\n"); | 748 | dev_err(&host->pd->dev, "Unsupported response type.\n"); |
737 | break; | 749 | break; |
738 | } | 750 | } |
739 | switch (opc) { | 751 | switch (opc) { |
740 | /* RBSY */ | 752 | /* RBSY */ |
741 | case MMC_SWITCH: | 753 | case MMC_SWITCH: |
742 | case MMC_STOP_TRANSMISSION: | 754 | case MMC_STOP_TRANSMISSION: |
743 | case MMC_SET_WRITE_PROT: | 755 | case MMC_SET_WRITE_PROT: |
744 | case MMC_CLR_WRITE_PROT: | 756 | case MMC_CLR_WRITE_PROT: |
745 | case MMC_ERASE: | 757 | case MMC_ERASE: |
746 | tmp |= CMD_SET_RBSY; | 758 | tmp |= CMD_SET_RBSY; |
747 | break; | 759 | break; |
748 | } | 760 | } |
749 | /* WDAT / DATW */ | 761 | /* WDAT / DATW */ |
750 | if (data) { | 762 | if (data) { |
751 | tmp |= CMD_SET_WDAT; | 763 | tmp |= CMD_SET_WDAT; |
752 | switch (host->bus_width) { | 764 | switch (host->bus_width) { |
753 | case MMC_BUS_WIDTH_1: | 765 | case MMC_BUS_WIDTH_1: |
754 | tmp |= CMD_SET_DATW_1; | 766 | tmp |= CMD_SET_DATW_1; |
755 | break; | 767 | break; |
756 | case MMC_BUS_WIDTH_4: | 768 | case MMC_BUS_WIDTH_4: |
757 | tmp |= CMD_SET_DATW_4; | 769 | tmp |= CMD_SET_DATW_4; |
758 | break; | 770 | break; |
759 | case MMC_BUS_WIDTH_8: | 771 | case MMC_BUS_WIDTH_8: |
760 | tmp |= CMD_SET_DATW_8; | 772 | tmp |= CMD_SET_DATW_8; |
761 | break; | 773 | break; |
762 | default: | 774 | default: |
763 | dev_err(&host->pd->dev, "Unsupported bus width.\n"); | 775 | dev_err(&host->pd->dev, "Unsupported bus width.\n"); |
764 | break; | 776 | break; |
765 | } | 777 | } |
766 | } | 778 | } |
767 | /* DWEN */ | 779 | /* DWEN */ |
768 | if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) | 780 | if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) |
769 | tmp |= CMD_SET_DWEN; | 781 | tmp |= CMD_SET_DWEN; |
770 | /* CMLTE/CMD12EN */ | 782 | /* CMLTE/CMD12EN */ |
771 | if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) { | 783 | if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) { |
772 | tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN; | 784 | tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN; |
773 | sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET, | 785 | sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET, |
774 | data->blocks << 16); | 786 | data->blocks << 16); |
775 | } | 787 | } |
776 | /* RIDXC[1:0] check bits */ | 788 | /* RIDXC[1:0] check bits */ |
777 | if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID || | 789 | if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID || |
778 | opc == MMC_SEND_CSD || opc == MMC_SEND_CID) | 790 | opc == MMC_SEND_CSD || opc == MMC_SEND_CID) |
779 | tmp |= CMD_SET_RIDXC_BITS; | 791 | tmp |= CMD_SET_RIDXC_BITS; |
780 | /* RCRC7C[1:0] check bits */ | 792 | /* RCRC7C[1:0] check bits */ |
781 | if (opc == MMC_SEND_OP_COND) | 793 | if (opc == MMC_SEND_OP_COND) |
782 | tmp |= CMD_SET_CRC7C_BITS; | 794 | tmp |= CMD_SET_CRC7C_BITS; |
783 | /* RCRC7C[1:0] internal CRC7 */ | 795 | /* RCRC7C[1:0] internal CRC7 */ |
784 | if (opc == MMC_ALL_SEND_CID || | 796 | if (opc == MMC_ALL_SEND_CID || |
785 | opc == MMC_SEND_CSD || opc == MMC_SEND_CID) | 797 | opc == MMC_SEND_CSD || opc == MMC_SEND_CID) |
786 | tmp |= CMD_SET_CRC7C_INTERNAL; | 798 | tmp |= CMD_SET_CRC7C_INTERNAL; |
787 | 799 | ||
788 | return (opc << 24) | tmp; | 800 | return (opc << 24) | tmp; |
789 | } | 801 | } |
790 | 802 | ||
791 | static int sh_mmcif_data_trans(struct sh_mmcif_host *host, | 803 | static int sh_mmcif_data_trans(struct sh_mmcif_host *host, |
792 | struct mmc_request *mrq, u32 opc) | 804 | struct mmc_request *mrq, u32 opc) |
793 | { | 805 | { |
794 | switch (opc) { | 806 | switch (opc) { |
795 | case MMC_READ_MULTIPLE_BLOCK: | 807 | case MMC_READ_MULTIPLE_BLOCK: |
796 | sh_mmcif_multi_read(host, mrq); | 808 | sh_mmcif_multi_read(host, mrq); |
797 | return 0; | 809 | return 0; |
798 | case MMC_WRITE_MULTIPLE_BLOCK: | 810 | case MMC_WRITE_MULTIPLE_BLOCK: |
799 | sh_mmcif_multi_write(host, mrq); | 811 | sh_mmcif_multi_write(host, mrq); |
800 | return 0; | 812 | return 0; |
801 | case MMC_WRITE_BLOCK: | 813 | case MMC_WRITE_BLOCK: |
802 | sh_mmcif_single_write(host, mrq); | 814 | sh_mmcif_single_write(host, mrq); |
803 | return 0; | 815 | return 0; |
804 | case MMC_READ_SINGLE_BLOCK: | 816 | case MMC_READ_SINGLE_BLOCK: |
805 | case MMC_SEND_EXT_CSD: | 817 | case MMC_SEND_EXT_CSD: |
806 | sh_mmcif_single_read(host, mrq); | 818 | sh_mmcif_single_read(host, mrq); |
807 | return 0; | 819 | return 0; |
808 | default: | 820 | default: |
809 | dev_err(&host->pd->dev, "UNSUPPORTED CMD = d'%08d\n", opc); | 821 | dev_err(&host->pd->dev, "UNSUPPORTED CMD = d'%08d\n", opc); |
810 | return -EINVAL; | 822 | return -EINVAL; |
811 | } | 823 | } |
812 | } | 824 | } |
813 | 825 | ||
814 | static void sh_mmcif_start_cmd(struct sh_mmcif_host *host, | 826 | static void sh_mmcif_start_cmd(struct sh_mmcif_host *host, |
815 | struct mmc_request *mrq) | 827 | struct mmc_request *mrq) |
816 | { | 828 | { |
817 | struct mmc_command *cmd = mrq->cmd; | 829 | struct mmc_command *cmd = mrq->cmd; |
818 | u32 opc = cmd->opcode; | 830 | u32 opc = cmd->opcode; |
819 | u32 mask; | 831 | u32 mask; |
820 | 832 | ||
821 | switch (opc) { | 833 | switch (opc) { |
822 | /* response busy check */ | 834 | /* response busy check */ |
823 | case MMC_SWITCH: | 835 | case MMC_SWITCH: |
824 | case MMC_STOP_TRANSMISSION: | 836 | case MMC_STOP_TRANSMISSION: |
825 | case MMC_SET_WRITE_PROT: | 837 | case MMC_SET_WRITE_PROT: |
826 | case MMC_CLR_WRITE_PROT: | 838 | case MMC_CLR_WRITE_PROT: |
827 | case MMC_ERASE: | 839 | case MMC_ERASE: |
828 | mask = MASK_START_CMD | MASK_MRBSYE; | 840 | mask = MASK_START_CMD | MASK_MRBSYE; |
829 | break; | 841 | break; |
830 | default: | 842 | default: |
831 | mask = MASK_START_CMD | MASK_MCRSPE; | 843 | mask = MASK_START_CMD | MASK_MCRSPE; |
832 | break; | 844 | break; |
833 | } | 845 | } |
834 | 846 | ||
835 | if (mrq->data) { | 847 | if (mrq->data) { |
836 | sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0); | 848 | sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0); |
837 | sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, | 849 | sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, |
838 | mrq->data->blksz); | 850 | mrq->data->blksz); |
839 | } | 851 | } |
840 | opc = sh_mmcif_set_cmd(host, mrq); | 852 | opc = sh_mmcif_set_cmd(host, mrq); |
841 | 853 | ||
842 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0); | 854 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0); |
843 | sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask); | 855 | sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask); |
844 | /* set arg */ | 856 | /* set arg */ |
845 | sh_mmcif_writel(host->addr, MMCIF_CE_ARG, cmd->arg); | 857 | sh_mmcif_writel(host->addr, MMCIF_CE_ARG, cmd->arg); |
846 | /* set cmd */ | 858 | /* set cmd */ |
847 | sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc); | 859 | sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc); |
848 | 860 | ||
849 | host->wait_for = MMCIF_WAIT_FOR_CMD; | 861 | host->wait_for = MMCIF_WAIT_FOR_CMD; |
850 | schedule_delayed_work(&host->timeout_work, host->timeout); | 862 | schedule_delayed_work(&host->timeout_work, host->timeout); |
851 | } | 863 | } |
852 | 864 | ||
853 | static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host, | 865 | static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host, |
854 | struct mmc_request *mrq) | 866 | struct mmc_request *mrq) |
855 | { | 867 | { |
856 | switch (mrq->cmd->opcode) { | 868 | switch (mrq->cmd->opcode) { |
857 | case MMC_READ_MULTIPLE_BLOCK: | 869 | case MMC_READ_MULTIPLE_BLOCK: |
858 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE); | 870 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE); |
859 | break; | 871 | break; |
860 | case MMC_WRITE_MULTIPLE_BLOCK: | 872 | case MMC_WRITE_MULTIPLE_BLOCK: |
861 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE); | 873 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE); |
862 | break; | 874 | break; |
863 | default: | 875 | default: |
864 | dev_err(&host->pd->dev, "unsupported stop cmd\n"); | 876 | dev_err(&host->pd->dev, "unsupported stop cmd\n"); |
865 | mrq->stop->error = sh_mmcif_error_manage(host); | 877 | mrq->stop->error = sh_mmcif_error_manage(host); |
866 | return; | 878 | return; |
867 | } | 879 | } |
868 | 880 | ||
869 | host->wait_for = MMCIF_WAIT_FOR_STOP; | 881 | host->wait_for = MMCIF_WAIT_FOR_STOP; |
870 | schedule_delayed_work(&host->timeout_work, host->timeout); | 882 | schedule_delayed_work(&host->timeout_work, host->timeout); |
871 | } | 883 | } |
872 | 884 | ||
873 | static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq) | 885 | static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq) |
874 | { | 886 | { |
875 | struct sh_mmcif_host *host = mmc_priv(mmc); | 887 | struct sh_mmcif_host *host = mmc_priv(mmc); |
876 | unsigned long flags; | 888 | unsigned long flags; |
877 | 889 | ||
878 | spin_lock_irqsave(&host->lock, flags); | 890 | spin_lock_irqsave(&host->lock, flags); |
879 | if (host->state != STATE_IDLE) { | 891 | if (host->state != STATE_IDLE) { |
880 | spin_unlock_irqrestore(&host->lock, flags); | 892 | spin_unlock_irqrestore(&host->lock, flags); |
881 | mrq->cmd->error = -EAGAIN; | 893 | mrq->cmd->error = -EAGAIN; |
882 | mmc_request_done(mmc, mrq); | 894 | mmc_request_done(mmc, mrq); |
883 | return; | 895 | return; |
884 | } | 896 | } |
885 | 897 | ||
886 | host->state = STATE_REQUEST; | 898 | host->state = STATE_REQUEST; |
887 | spin_unlock_irqrestore(&host->lock, flags); | 899 | spin_unlock_irqrestore(&host->lock, flags); |
888 | 900 | ||
889 | switch (mrq->cmd->opcode) { | 901 | switch (mrq->cmd->opcode) { |
890 | /* MMCIF does not support SD/SDIO command */ | 902 | /* MMCIF does not support SD/SDIO command */ |
891 | case SD_IO_SEND_OP_COND: | 903 | case SD_IO_SEND_OP_COND: |
892 | case MMC_APP_CMD: | 904 | case MMC_APP_CMD: |
893 | host->state = STATE_IDLE; | 905 | host->state = STATE_IDLE; |
894 | mrq->cmd->error = -ETIMEDOUT; | 906 | mrq->cmd->error = -ETIMEDOUT; |
895 | mmc_request_done(mmc, mrq); | 907 | mmc_request_done(mmc, mrq); |
896 | return; | 908 | return; |
897 | case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */ | 909 | case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */ |
898 | if (!mrq->data) { | 910 | if (!mrq->data) { |
899 | /* send_if_cond cmd (not support) */ | 911 | /* send_if_cond cmd (not support) */ |
900 | host->state = STATE_IDLE; | 912 | host->state = STATE_IDLE; |
901 | mrq->cmd->error = -ETIMEDOUT; | 913 | mrq->cmd->error = -ETIMEDOUT; |
902 | mmc_request_done(mmc, mrq); | 914 | mmc_request_done(mmc, mrq); |
903 | return; | 915 | return; |
904 | } | 916 | } |
905 | break; | 917 | break; |
906 | default: | 918 | default: |
907 | break; | 919 | break; |
908 | } | 920 | } |
909 | 921 | ||
910 | host->mrq = mrq; | 922 | host->mrq = mrq; |
911 | 923 | ||
912 | sh_mmcif_start_cmd(host, mrq); | 924 | sh_mmcif_start_cmd(host, mrq); |
913 | } | 925 | } |
914 | 926 | ||
915 | static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | 927 | static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) |
916 | { | 928 | { |
917 | struct sh_mmcif_host *host = mmc_priv(mmc); | 929 | struct sh_mmcif_host *host = mmc_priv(mmc); |
918 | struct sh_mmcif_plat_data *p = host->pd->dev.platform_data; | 930 | struct sh_mmcif_plat_data *p = host->pd->dev.platform_data; |
919 | unsigned long flags; | 931 | unsigned long flags; |
920 | 932 | ||
921 | spin_lock_irqsave(&host->lock, flags); | 933 | spin_lock_irqsave(&host->lock, flags); |
922 | if (host->state != STATE_IDLE) { | 934 | if (host->state != STATE_IDLE) { |
923 | spin_unlock_irqrestore(&host->lock, flags); | 935 | spin_unlock_irqrestore(&host->lock, flags); |
924 | return; | 936 | return; |
925 | } | 937 | } |
926 | 938 | ||
927 | host->state = STATE_IOS; | 939 | host->state = STATE_IOS; |
928 | spin_unlock_irqrestore(&host->lock, flags); | 940 | spin_unlock_irqrestore(&host->lock, flags); |
929 | 941 | ||
930 | if (ios->power_mode == MMC_POWER_UP) { | 942 | if (ios->power_mode == MMC_POWER_UP) { |
931 | if (!host->card_present) { | 943 | if (!host->card_present) { |
932 | /* See if we also get DMA */ | 944 | /* See if we also get DMA */ |
933 | sh_mmcif_request_dma(host, host->pd->dev.platform_data); | 945 | sh_mmcif_request_dma(host, host->pd->dev.platform_data); |
934 | host->card_present = true; | 946 | host->card_present = true; |
935 | } | 947 | } |
936 | } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) { | 948 | } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) { |
937 | /* clock stop */ | 949 | /* clock stop */ |
938 | sh_mmcif_clock_control(host, 0); | 950 | sh_mmcif_clock_control(host, 0); |
939 | if (ios->power_mode == MMC_POWER_OFF) { | 951 | if (ios->power_mode == MMC_POWER_OFF) { |
940 | if (host->card_present) { | 952 | if (host->card_present) { |
941 | sh_mmcif_release_dma(host); | 953 | sh_mmcif_release_dma(host); |
942 | host->card_present = false; | 954 | host->card_present = false; |
943 | } | 955 | } |
944 | } | 956 | } |
945 | if (host->power) { | 957 | if (host->power) { |
946 | pm_runtime_put(&host->pd->dev); | 958 | pm_runtime_put(&host->pd->dev); |
947 | host->power = false; | 959 | host->power = false; |
948 | if (p->down_pwr && ios->power_mode == MMC_POWER_OFF) | 960 | if (p->down_pwr && ios->power_mode == MMC_POWER_OFF) |
949 | p->down_pwr(host->pd); | 961 | p->down_pwr(host->pd); |
950 | } | 962 | } |
951 | host->state = STATE_IDLE; | 963 | host->state = STATE_IDLE; |
952 | return; | 964 | return; |
953 | } | 965 | } |
954 | 966 | ||
955 | if (ios->clock) { | 967 | if (ios->clock) { |
956 | if (!host->power) { | 968 | if (!host->power) { |
957 | if (p->set_pwr) | 969 | if (p->set_pwr) |
958 | p->set_pwr(host->pd, ios->power_mode); | 970 | p->set_pwr(host->pd, ios->power_mode); |
959 | pm_runtime_get_sync(&host->pd->dev); | 971 | pm_runtime_get_sync(&host->pd->dev); |
960 | host->power = true; | 972 | host->power = true; |
961 | sh_mmcif_sync_reset(host); | 973 | sh_mmcif_sync_reset(host); |
962 | } | 974 | } |
963 | sh_mmcif_clock_control(host, ios->clock); | 975 | sh_mmcif_clock_control(host, ios->clock); |
964 | } | 976 | } |
965 | 977 | ||
966 | host->bus_width = ios->bus_width; | 978 | host->bus_width = ios->bus_width; |
967 | host->state = STATE_IDLE; | 979 | host->state = STATE_IDLE; |
968 | } | 980 | } |
969 | 981 | ||
970 | static int sh_mmcif_get_cd(struct mmc_host *mmc) | 982 | static int sh_mmcif_get_cd(struct mmc_host *mmc) |
971 | { | 983 | { |
972 | struct sh_mmcif_host *host = mmc_priv(mmc); | 984 | struct sh_mmcif_host *host = mmc_priv(mmc); |
973 | struct sh_mmcif_plat_data *p = host->pd->dev.platform_data; | 985 | struct sh_mmcif_plat_data *p = host->pd->dev.platform_data; |
974 | 986 | ||
975 | if (!p->get_cd) | 987 | if (!p->get_cd) |
976 | return -ENOSYS; | 988 | return -ENOSYS; |
977 | else | 989 | else |
978 | return p->get_cd(host->pd); | 990 | return p->get_cd(host->pd); |
979 | } | 991 | } |
980 | 992 | ||
981 | static struct mmc_host_ops sh_mmcif_ops = { | 993 | static struct mmc_host_ops sh_mmcif_ops = { |
982 | .request = sh_mmcif_request, | 994 | .request = sh_mmcif_request, |
983 | .set_ios = sh_mmcif_set_ios, | 995 | .set_ios = sh_mmcif_set_ios, |
984 | .get_cd = sh_mmcif_get_cd, | 996 | .get_cd = sh_mmcif_get_cd, |
985 | }; | 997 | }; |
986 | 998 | ||
987 | static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host) | 999 | static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host) |
988 | { | 1000 | { |
989 | struct mmc_command *cmd = host->mrq->cmd; | 1001 | struct mmc_command *cmd = host->mrq->cmd; |
990 | struct mmc_data *data = host->mrq->data; | 1002 | struct mmc_data *data = host->mrq->data; |
991 | long time; | 1003 | long time; |
992 | 1004 | ||
993 | if (host->sd_error) { | 1005 | if (host->sd_error) { |
994 | switch (cmd->opcode) { | 1006 | switch (cmd->opcode) { |
995 | case MMC_ALL_SEND_CID: | 1007 | case MMC_ALL_SEND_CID: |
996 | case MMC_SELECT_CARD: | 1008 | case MMC_SELECT_CARD: |
997 | case MMC_APP_CMD: | 1009 | case MMC_APP_CMD: |
998 | cmd->error = -ETIMEDOUT; | 1010 | cmd->error = -ETIMEDOUT; |
999 | host->sd_error = false; | 1011 | host->sd_error = false; |
1000 | break; | 1012 | break; |
1001 | default: | 1013 | default: |
1002 | cmd->error = sh_mmcif_error_manage(host); | 1014 | cmd->error = sh_mmcif_error_manage(host); |
1003 | dev_dbg(&host->pd->dev, "Cmd(d'%d) error %d\n", | 1015 | dev_dbg(&host->pd->dev, "Cmd(d'%d) error %d\n", |
1004 | cmd->opcode, cmd->error); | 1016 | cmd->opcode, cmd->error); |
1005 | break; | 1017 | break; |
1006 | } | 1018 | } |
1007 | return false; | 1019 | return false; |
1008 | } | 1020 | } |
1009 | if (!(cmd->flags & MMC_RSP_PRESENT)) { | 1021 | if (!(cmd->flags & MMC_RSP_PRESENT)) { |
1010 | cmd->error = 0; | 1022 | cmd->error = 0; |
1011 | return false; | 1023 | return false; |
1012 | } | 1024 | } |
1013 | 1025 | ||
1014 | sh_mmcif_get_response(host, cmd); | 1026 | sh_mmcif_get_response(host, cmd); |
1015 | 1027 | ||
1016 | if (!data) | 1028 | if (!data) |
1017 | return false; | 1029 | return false; |
1018 | 1030 | ||
1019 | if (data->flags & MMC_DATA_READ) { | 1031 | if (data->flags & MMC_DATA_READ) { |
1020 | if (host->chan_rx) | 1032 | if (host->chan_rx) |
1021 | sh_mmcif_start_dma_rx(host); | 1033 | sh_mmcif_start_dma_rx(host); |
1022 | } else { | 1034 | } else { |
1023 | if (host->chan_tx) | 1035 | if (host->chan_tx) |
1024 | sh_mmcif_start_dma_tx(host); | 1036 | sh_mmcif_start_dma_tx(host); |
1025 | } | 1037 | } |
1026 | 1038 | ||
1027 | if (!host->dma_active) { | 1039 | if (!host->dma_active) { |
1028 | data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode); | 1040 | data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode); |
1029 | if (!data->error) | 1041 | if (!data->error) |
1030 | return true; | 1042 | return true; |
1031 | return false; | 1043 | return false; |
1032 | } | 1044 | } |
1033 | 1045 | ||
1034 | /* Running in the IRQ thread, can sleep */ | 1046 | /* Running in the IRQ thread, can sleep */ |
1035 | time = wait_for_completion_interruptible_timeout(&host->dma_complete, | 1047 | time = wait_for_completion_interruptible_timeout(&host->dma_complete, |
1036 | host->timeout); | 1048 | host->timeout); |
1037 | if (host->sd_error) { | 1049 | if (host->sd_error) { |
1038 | dev_err(host->mmc->parent, | 1050 | dev_err(host->mmc->parent, |
1039 | "Error IRQ while waiting for DMA completion!\n"); | 1051 | "Error IRQ while waiting for DMA completion!\n"); |
1040 | /* Woken up by an error IRQ: abort DMA */ | 1052 | /* Woken up by an error IRQ: abort DMA */ |
1041 | if (data->flags & MMC_DATA_READ) | 1053 | if (data->flags & MMC_DATA_READ) |
1042 | dmaengine_terminate_all(host->chan_rx); | 1054 | dmaengine_terminate_all(host->chan_rx); |
1043 | else | 1055 | else |
1044 | dmaengine_terminate_all(host->chan_tx); | 1056 | dmaengine_terminate_all(host->chan_tx); |
1045 | data->error = sh_mmcif_error_manage(host); | 1057 | data->error = sh_mmcif_error_manage(host); |
1046 | } else if (!time) { | 1058 | } else if (!time) { |
1047 | data->error = -ETIMEDOUT; | 1059 | data->error = -ETIMEDOUT; |
1048 | } else if (time < 0) { | 1060 | } else if (time < 0) { |
1049 | data->error = time; | 1061 | data->error = time; |
1050 | } | 1062 | } |
1051 | sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, | 1063 | sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, |
1052 | BUF_ACC_DMAREN | BUF_ACC_DMAWEN); | 1064 | BUF_ACC_DMAREN | BUF_ACC_DMAWEN); |
1053 | host->dma_active = false; | 1065 | host->dma_active = false; |
1054 | 1066 | ||
1055 | if (data->error) | 1067 | if (data->error) |
1056 | data->bytes_xfered = 0; | 1068 | data->bytes_xfered = 0; |
1057 | 1069 | ||
1058 | return false; | 1070 | return false; |
1059 | } | 1071 | } |
1060 | 1072 | ||
1061 | static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id) | 1073 | static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id) |
1062 | { | 1074 | { |
1063 | struct sh_mmcif_host *host = dev_id; | 1075 | struct sh_mmcif_host *host = dev_id; |
1064 | struct mmc_request *mrq = host->mrq; | 1076 | struct mmc_request *mrq = host->mrq; |
1065 | struct mmc_data *data = mrq->data; | 1077 | struct mmc_data *data = mrq->data; |
1066 | 1078 | ||
1067 | cancel_delayed_work_sync(&host->timeout_work); | 1079 | cancel_delayed_work_sync(&host->timeout_work); |
1068 | 1080 | ||
1069 | /* | 1081 | /* |
1070 | * All handlers return true, if processing continues, and false, if the | 1082 | * All handlers return true, if processing continues, and false, if the |
1071 | * request has to be completed - successfully or not | 1083 | * request has to be completed - successfully or not |
1072 | */ | 1084 | */ |
1073 | switch (host->wait_for) { | 1085 | switch (host->wait_for) { |
1074 | case MMCIF_WAIT_FOR_REQUEST: | 1086 | case MMCIF_WAIT_FOR_REQUEST: |
1075 | /* We're too late, the timeout has already kicked in */ | 1087 | /* We're too late, the timeout has already kicked in */ |
1076 | return IRQ_HANDLED; | 1088 | return IRQ_HANDLED; |
1077 | case MMCIF_WAIT_FOR_CMD: | 1089 | case MMCIF_WAIT_FOR_CMD: |
1078 | if (sh_mmcif_end_cmd(host)) | 1090 | if (sh_mmcif_end_cmd(host)) |
1079 | /* Wait for data */ | 1091 | /* Wait for data */ |
1080 | return IRQ_HANDLED; | 1092 | return IRQ_HANDLED; |
1081 | break; | 1093 | break; |
1082 | case MMCIF_WAIT_FOR_MREAD: | 1094 | case MMCIF_WAIT_FOR_MREAD: |
1083 | if (sh_mmcif_mread_block(host)) | 1095 | if (sh_mmcif_mread_block(host)) |
1084 | /* Wait for more data */ | 1096 | /* Wait for more data */ |
1085 | return IRQ_HANDLED; | 1097 | return IRQ_HANDLED; |
1086 | break; | 1098 | break; |
1087 | case MMCIF_WAIT_FOR_READ: | 1099 | case MMCIF_WAIT_FOR_READ: |
1088 | if (sh_mmcif_read_block(host)) | 1100 | if (sh_mmcif_read_block(host)) |
1089 | /* Wait for data end */ | 1101 | /* Wait for data end */ |
1090 | return IRQ_HANDLED; | 1102 | return IRQ_HANDLED; |
1091 | break; | 1103 | break; |
1092 | case MMCIF_WAIT_FOR_MWRITE: | 1104 | case MMCIF_WAIT_FOR_MWRITE: |
1093 | if (sh_mmcif_mwrite_block(host)) | 1105 | if (sh_mmcif_mwrite_block(host)) |
1094 | /* Wait data to write */ | 1106 | /* Wait data to write */ |
1095 | return IRQ_HANDLED; | 1107 | return IRQ_HANDLED; |
1096 | break; | 1108 | break; |
1097 | case MMCIF_WAIT_FOR_WRITE: | 1109 | case MMCIF_WAIT_FOR_WRITE: |
1098 | if (sh_mmcif_write_block(host)) | 1110 | if (sh_mmcif_write_block(host)) |
1099 | /* Wait for data end */ | 1111 | /* Wait for data end */ |
1100 | return IRQ_HANDLED; | 1112 | return IRQ_HANDLED; |
1101 | break; | 1113 | break; |
1102 | case MMCIF_WAIT_FOR_STOP: | 1114 | case MMCIF_WAIT_FOR_STOP: |
1103 | if (host->sd_error) { | 1115 | if (host->sd_error) { |
1104 | mrq->stop->error = sh_mmcif_error_manage(host); | 1116 | mrq->stop->error = sh_mmcif_error_manage(host); |
1105 | break; | 1117 | break; |
1106 | } | 1118 | } |
1107 | sh_mmcif_get_cmd12response(host, mrq->stop); | 1119 | sh_mmcif_get_cmd12response(host, mrq->stop); |
1108 | mrq->stop->error = 0; | 1120 | mrq->stop->error = 0; |
1109 | break; | 1121 | break; |
1110 | case MMCIF_WAIT_FOR_READ_END: | 1122 | case MMCIF_WAIT_FOR_READ_END: |
1111 | case MMCIF_WAIT_FOR_WRITE_END: | 1123 | case MMCIF_WAIT_FOR_WRITE_END: |
1112 | if (host->sd_error) | 1124 | if (host->sd_error) |
1113 | data->error = sh_mmcif_error_manage(host); | 1125 | data->error = sh_mmcif_error_manage(host); |
1114 | break; | 1126 | break; |
1115 | default: | 1127 | default: |
1116 | BUG(); | 1128 | BUG(); |
1117 | } | 1129 | } |
1118 | 1130 | ||
1119 | if (host->wait_for != MMCIF_WAIT_FOR_STOP) { | 1131 | if (host->wait_for != MMCIF_WAIT_FOR_STOP) { |
1120 | if (!mrq->cmd->error && data && !data->error) | 1132 | if (!mrq->cmd->error && data && !data->error) |
1121 | data->bytes_xfered = | 1133 | data->bytes_xfered = |
1122 | data->blocks * data->blksz; | 1134 | data->blocks * data->blksz; |
1123 | 1135 | ||
1124 | if (mrq->stop && !mrq->cmd->error && (!data || !data->error)) { | 1136 | if (mrq->stop && !mrq->cmd->error && (!data || !data->error)) { |
1125 | sh_mmcif_stop_cmd(host, mrq); | 1137 | sh_mmcif_stop_cmd(host, mrq); |
1126 | if (!mrq->stop->error) | 1138 | if (!mrq->stop->error) |
1127 | return IRQ_HANDLED; | 1139 | return IRQ_HANDLED; |
1128 | } | 1140 | } |
1129 | } | 1141 | } |
1130 | 1142 | ||
1131 | host->wait_for = MMCIF_WAIT_FOR_REQUEST; | 1143 | host->wait_for = MMCIF_WAIT_FOR_REQUEST; |
1132 | host->state = STATE_IDLE; | 1144 | host->state = STATE_IDLE; |
1133 | host->mrq = NULL; | 1145 | host->mrq = NULL; |
1134 | mmc_request_done(host->mmc, mrq); | 1146 | mmc_request_done(host->mmc, mrq); |
1135 | 1147 | ||
1136 | return IRQ_HANDLED; | 1148 | return IRQ_HANDLED; |
1137 | } | 1149 | } |
1138 | 1150 | ||
1139 | static irqreturn_t sh_mmcif_intr(int irq, void *dev_id) | 1151 | static irqreturn_t sh_mmcif_intr(int irq, void *dev_id) |
1140 | { | 1152 | { |
1141 | struct sh_mmcif_host *host = dev_id; | 1153 | struct sh_mmcif_host *host = dev_id; |
1142 | u32 state; | 1154 | u32 state; |
1143 | int err = 0; | 1155 | int err = 0; |
1144 | 1156 | ||
1145 | state = sh_mmcif_readl(host->addr, MMCIF_CE_INT); | 1157 | state = sh_mmcif_readl(host->addr, MMCIF_CE_INT); |
1146 | 1158 | ||
1147 | if (state & INT_ERR_STS) { | 1159 | if (state & INT_ERR_STS) { |
1148 | /* error interrupts - process first */ | 1160 | /* error interrupts - process first */ |
1149 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state); | 1161 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state); |
1150 | sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state); | 1162 | sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state); |
1151 | err = 1; | 1163 | err = 1; |
1152 | } else if (state & INT_RBSYE) { | 1164 | } else if (state & INT_RBSYE) { |
1153 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, | 1165 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, |
1154 | ~(INT_RBSYE | INT_CRSPE)); | 1166 | ~(INT_RBSYE | INT_CRSPE)); |
1155 | sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MRBSYE); | 1167 | sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MRBSYE); |
1156 | } else if (state & INT_CRSPE) { | 1168 | } else if (state & INT_CRSPE) { |
1157 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_CRSPE); | 1169 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_CRSPE); |
1158 | sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCRSPE); | 1170 | sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCRSPE); |
1159 | } else if (state & INT_BUFREN) { | 1171 | } else if (state & INT_BUFREN) { |
1160 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFREN); | 1172 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFREN); |
1161 | sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); | 1173 | sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); |
1162 | } else if (state & INT_BUFWEN) { | 1174 | } else if (state & INT_BUFWEN) { |
1163 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFWEN); | 1175 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFWEN); |
1164 | sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); | 1176 | sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); |
1165 | } else if (state & INT_CMD12DRE) { | 1177 | } else if (state & INT_CMD12DRE) { |
1166 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, | 1178 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, |
1167 | ~(INT_CMD12DRE | INT_CMD12RBE | | 1179 | ~(INT_CMD12DRE | INT_CMD12RBE | |
1168 | INT_CMD12CRE | INT_BUFRE)); | 1180 | INT_CMD12CRE | INT_BUFRE)); |
1169 | sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE); | 1181 | sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE); |
1170 | } else if (state & INT_BUFRE) { | 1182 | } else if (state & INT_BUFRE) { |
1171 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFRE); | 1183 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFRE); |
1172 | sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFRE); | 1184 | sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFRE); |
1173 | } else if (state & INT_DTRANE) { | 1185 | } else if (state & INT_DTRANE) { |
1174 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_DTRANE); | 1186 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_DTRANE); |
1175 | sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MDTRANE); | 1187 | sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MDTRANE); |
1176 | } else if (state & INT_CMD12RBE) { | 1188 | } else if (state & INT_CMD12RBE) { |
1177 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, | 1189 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, |
1178 | ~(INT_CMD12RBE | INT_CMD12CRE)); | 1190 | ~(INT_CMD12RBE | INT_CMD12CRE)); |
1179 | sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE); | 1191 | sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE); |
1180 | } else { | 1192 | } else { |
1181 | dev_dbg(&host->pd->dev, "Unsupported interrupt: 0x%x\n", state); | 1193 | dev_dbg(&host->pd->dev, "Unsupported interrupt: 0x%x\n", state); |
1182 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state); | 1194 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state); |
1183 | sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state); | 1195 | sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state); |
1184 | err = 1; | 1196 | err = 1; |
1185 | } | 1197 | } |
1186 | if (err) { | 1198 | if (err) { |
1187 | host->sd_error = true; | 1199 | host->sd_error = true; |
1188 | dev_dbg(&host->pd->dev, "int err state = %08x\n", state); | 1200 | dev_dbg(&host->pd->dev, "int err state = %08x\n", state); |
1189 | } | 1201 | } |
1190 | if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) { | 1202 | if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) { |
1191 | if (!host->dma_active) | 1203 | if (!host->dma_active) |
1192 | return IRQ_WAKE_THREAD; | 1204 | return IRQ_WAKE_THREAD; |
1193 | else if (host->sd_error) | 1205 | else if (host->sd_error) |
1194 | mmcif_dma_complete(host); | 1206 | mmcif_dma_complete(host); |
1195 | } else { | 1207 | } else { |
1196 | dev_dbg(&host->pd->dev, "Unexpected IRQ 0x%x\n", state); | 1208 | dev_dbg(&host->pd->dev, "Unexpected IRQ 0x%x\n", state); |
1197 | } | 1209 | } |
1198 | 1210 | ||
1199 | return IRQ_HANDLED; | 1211 | return IRQ_HANDLED; |
1200 | } | 1212 | } |
1201 | 1213 | ||
1202 | static void mmcif_timeout_work(struct work_struct *work) | 1214 | static void mmcif_timeout_work(struct work_struct *work) |
1203 | { | 1215 | { |
1204 | struct delayed_work *d = container_of(work, struct delayed_work, work); | 1216 | struct delayed_work *d = container_of(work, struct delayed_work, work); |
1205 | struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work); | 1217 | struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work); |
1206 | struct mmc_request *mrq = host->mrq; | 1218 | struct mmc_request *mrq = host->mrq; |
1207 | 1219 | ||
1208 | if (host->dying) | 1220 | if (host->dying) |
1209 | /* Don't run after mmc_remove_host() */ | 1221 | /* Don't run after mmc_remove_host() */ |
1210 | return; | 1222 | return; |
1211 | 1223 | ||
1212 | /* | 1224 | /* |
1213 | * Handle races with cancel_delayed_work(), unless | 1225 | * Handle races with cancel_delayed_work(), unless |
1214 | * cancel_delayed_work_sync() is used | 1226 | * cancel_delayed_work_sync() is used |
1215 | */ | 1227 | */ |
1216 | switch (host->wait_for) { | 1228 | switch (host->wait_for) { |
1217 | case MMCIF_WAIT_FOR_CMD: | 1229 | case MMCIF_WAIT_FOR_CMD: |
1218 | mrq->cmd->error = sh_mmcif_error_manage(host); | 1230 | mrq->cmd->error = sh_mmcif_error_manage(host); |
1219 | break; | 1231 | break; |
1220 | case MMCIF_WAIT_FOR_STOP: | 1232 | case MMCIF_WAIT_FOR_STOP: |
1221 | mrq->stop->error = sh_mmcif_error_manage(host); | 1233 | mrq->stop->error = sh_mmcif_error_manage(host); |
1222 | break; | 1234 | break; |
1223 | case MMCIF_WAIT_FOR_MREAD: | 1235 | case MMCIF_WAIT_FOR_MREAD: |
1224 | case MMCIF_WAIT_FOR_MWRITE: | 1236 | case MMCIF_WAIT_FOR_MWRITE: |
1225 | case MMCIF_WAIT_FOR_READ: | 1237 | case MMCIF_WAIT_FOR_READ: |
1226 | case MMCIF_WAIT_FOR_WRITE: | 1238 | case MMCIF_WAIT_FOR_WRITE: |
1227 | case MMCIF_WAIT_FOR_READ_END: | 1239 | case MMCIF_WAIT_FOR_READ_END: |
1228 | case MMCIF_WAIT_FOR_WRITE_END: | 1240 | case MMCIF_WAIT_FOR_WRITE_END: |
1229 | mrq->data->error = sh_mmcif_error_manage(host); | 1241 | mrq->data->error = sh_mmcif_error_manage(host); |
1230 | break; | 1242 | break; |
1231 | default: | 1243 | default: |
1232 | BUG(); | 1244 | BUG(); |
1233 | } | 1245 | } |
1234 | 1246 | ||
1235 | host->state = STATE_IDLE; | 1247 | host->state = STATE_IDLE; |
1236 | host->wait_for = MMCIF_WAIT_FOR_REQUEST; | 1248 | host->wait_for = MMCIF_WAIT_FOR_REQUEST; |
1237 | host->mrq = NULL; | 1249 | host->mrq = NULL; |
1238 | mmc_request_done(host->mmc, mrq); | 1250 | mmc_request_done(host->mmc, mrq); |
1239 | } | 1251 | } |
1240 | 1252 | ||
1241 | static int __devinit sh_mmcif_probe(struct platform_device *pdev) | 1253 | static int __devinit sh_mmcif_probe(struct platform_device *pdev) |
1242 | { | 1254 | { |
1243 | int ret = 0, irq[2]; | 1255 | int ret = 0, irq[2]; |
1244 | struct mmc_host *mmc; | 1256 | struct mmc_host *mmc; |
1245 | struct sh_mmcif_host *host; | 1257 | struct sh_mmcif_host *host; |
1246 | struct sh_mmcif_plat_data *pd; | 1258 | struct sh_mmcif_plat_data *pd; |
1247 | struct resource *res; | 1259 | struct resource *res; |
1248 | void __iomem *reg; | 1260 | void __iomem *reg; |
1249 | char clk_name[8]; | 1261 | char clk_name[8]; |
1250 | 1262 | ||
1251 | irq[0] = platform_get_irq(pdev, 0); | 1263 | irq[0] = platform_get_irq(pdev, 0); |
1252 | irq[1] = platform_get_irq(pdev, 1); | 1264 | irq[1] = platform_get_irq(pdev, 1); |
1253 | if (irq[0] < 0 || irq[1] < 0) { | 1265 | if (irq[0] < 0 || irq[1] < 0) { |
1254 | dev_err(&pdev->dev, "Get irq error\n"); | 1266 | dev_err(&pdev->dev, "Get irq error\n"); |
1255 | return -ENXIO; | 1267 | return -ENXIO; |
1256 | } | 1268 | } |
1257 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1269 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1258 | if (!res) { | 1270 | if (!res) { |
1259 | dev_err(&pdev->dev, "platform_get_resource error.\n"); | 1271 | dev_err(&pdev->dev, "platform_get_resource error.\n"); |
1260 | return -ENXIO; | 1272 | return -ENXIO; |
1261 | } | 1273 | } |
1262 | reg = ioremap(res->start, resource_size(res)); | 1274 | reg = ioremap(res->start, resource_size(res)); |
1263 | if (!reg) { | 1275 | if (!reg) { |
1264 | dev_err(&pdev->dev, "ioremap error.\n"); | 1276 | dev_err(&pdev->dev, "ioremap error.\n"); |
1265 | return -ENOMEM; | 1277 | return -ENOMEM; |
1266 | } | 1278 | } |
1267 | pd = pdev->dev.platform_data; | 1279 | pd = pdev->dev.platform_data; |
1268 | if (!pd) { | 1280 | if (!pd) { |
1269 | dev_err(&pdev->dev, "sh_mmcif plat data error.\n"); | 1281 | dev_err(&pdev->dev, "sh_mmcif plat data error.\n"); |
1270 | ret = -ENXIO; | 1282 | ret = -ENXIO; |
1271 | goto clean_up; | 1283 | goto clean_up; |
1272 | } | 1284 | } |
1273 | mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), &pdev->dev); | 1285 | mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), &pdev->dev); |
1274 | if (!mmc) { | 1286 | if (!mmc) { |
1275 | ret = -ENOMEM; | 1287 | ret = -ENOMEM; |
1276 | goto clean_up; | 1288 | goto clean_up; |
1277 | } | 1289 | } |
1278 | host = mmc_priv(mmc); | 1290 | host = mmc_priv(mmc); |
1279 | host->mmc = mmc; | 1291 | host->mmc = mmc; |
1280 | host->addr = reg; | 1292 | host->addr = reg; |
1281 | host->timeout = 1000; | 1293 | host->timeout = 1000; |
1282 | 1294 | ||
1283 | snprintf(clk_name, sizeof(clk_name), "mmc%d", pdev->id); | 1295 | snprintf(clk_name, sizeof(clk_name), "mmc%d", pdev->id); |
1284 | host->hclk = clk_get(&pdev->dev, clk_name); | 1296 | host->hclk = clk_get(&pdev->dev, clk_name); |
1285 | if (IS_ERR(host->hclk)) { | 1297 | if (IS_ERR(host->hclk)) { |
1286 | dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); | 1298 | dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); |
1287 | ret = PTR_ERR(host->hclk); | 1299 | ret = PTR_ERR(host->hclk); |
1288 | goto clean_up1; | 1300 | goto clean_up1; |
1289 | } | 1301 | } |
1290 | clk_enable(host->hclk); | 1302 | clk_enable(host->hclk); |
1291 | host->clk = clk_get_rate(host->hclk); | 1303 | host->clk = clk_get_rate(host->hclk); |
1292 | host->pd = pdev; | 1304 | host->pd = pdev; |
1293 | 1305 | ||
1294 | spin_lock_init(&host->lock); | 1306 | spin_lock_init(&host->lock); |
1295 | 1307 | ||
1296 | mmc->ops = &sh_mmcif_ops; | 1308 | mmc->ops = &sh_mmcif_ops; |
1297 | mmc->f_max = host->clk / 2; | 1309 | mmc->f_max = host->clk / 2; |
1298 | mmc->f_min = host->clk / 512; | 1310 | mmc->f_min = host->clk / 512; |
1299 | if (pd->ocr) | 1311 | if (pd->ocr) |
1300 | mmc->ocr_avail = pd->ocr; | 1312 | mmc->ocr_avail = pd->ocr; |
1301 | mmc->caps = MMC_CAP_MMC_HIGHSPEED; | 1313 | mmc->caps = MMC_CAP_MMC_HIGHSPEED; |
1302 | if (pd->caps) | 1314 | if (pd->caps) |
1303 | mmc->caps |= pd->caps; | 1315 | mmc->caps |= pd->caps; |
1304 | mmc->max_segs = 32; | 1316 | mmc->max_segs = 32; |
1305 | mmc->max_blk_size = 512; | 1317 | mmc->max_blk_size = 512; |
1306 | mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs; | 1318 | mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs; |
1307 | mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size; | 1319 | mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size; |
1308 | mmc->max_seg_size = mmc->max_req_size; | 1320 | mmc->max_seg_size = mmc->max_req_size; |
1309 | 1321 | ||
1310 | sh_mmcif_sync_reset(host); | 1322 | sh_mmcif_sync_reset(host); |
1311 | platform_set_drvdata(pdev, host); | 1323 | platform_set_drvdata(pdev, host); |
1312 | 1324 | ||
1313 | pm_runtime_enable(&pdev->dev); | 1325 | pm_runtime_enable(&pdev->dev); |
1314 | host->power = false; | 1326 | host->power = false; |
1315 | 1327 | ||
1316 | ret = pm_runtime_resume(&pdev->dev); | 1328 | ret = pm_runtime_resume(&pdev->dev); |
1317 | if (ret < 0) | 1329 | if (ret < 0) |
1318 | goto clean_up2; | 1330 | goto clean_up2; |
1319 | 1331 | ||
1320 | INIT_DELAYED_WORK(&host->timeout_work, mmcif_timeout_work); | 1332 | INIT_DELAYED_WORK(&host->timeout_work, mmcif_timeout_work); |
1321 | 1333 | ||
1322 | sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); | 1334 | sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); |
1323 | 1335 | ||
1324 | ret = request_threaded_irq(irq[0], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:error", host); | 1336 | ret = request_threaded_irq(irq[0], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:error", host); |
1325 | if (ret) { | 1337 | if (ret) { |
1326 | dev_err(&pdev->dev, "request_irq error (sh_mmc:error)\n"); | 1338 | dev_err(&pdev->dev, "request_irq error (sh_mmc:error)\n"); |
1327 | goto clean_up3; | 1339 | goto clean_up3; |
1328 | } | 1340 | } |
1329 | ret = request_threaded_irq(irq[1], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:int", host); | 1341 | ret = request_threaded_irq(irq[1], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:int", host); |
1330 | if (ret) { | 1342 | if (ret) { |
1331 | dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n"); | 1343 | dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n"); |
1332 | goto clean_up4; | 1344 | goto clean_up4; |
1333 | } | 1345 | } |
1334 | 1346 | ||
1335 | ret = mmc_add_host(mmc); | 1347 | ret = mmc_add_host(mmc); |
1336 | if (ret < 0) | 1348 | if (ret < 0) |
1337 | goto clean_up5; | 1349 | goto clean_up5; |
1338 | 1350 | ||
1339 | dev_pm_qos_expose_latency_limit(&pdev->dev, 100); | 1351 | dev_pm_qos_expose_latency_limit(&pdev->dev, 100); |
1340 | 1352 | ||
1341 | dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION); | 1353 | dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION); |
1342 | dev_dbg(&pdev->dev, "chip ver H'%04x\n", | 1354 | dev_dbg(&pdev->dev, "chip ver H'%04x\n", |
1343 | sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff); | 1355 | sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff); |
1344 | return ret; | 1356 | return ret; |
1345 | 1357 | ||
1346 | clean_up5: | 1358 | clean_up5: |
1347 | free_irq(irq[1], host); | 1359 | free_irq(irq[1], host); |
1348 | clean_up4: | 1360 | clean_up4: |
1349 | free_irq(irq[0], host); | 1361 | free_irq(irq[0], host); |
1350 | clean_up3: | 1362 | clean_up3: |
1351 | pm_runtime_suspend(&pdev->dev); | 1363 | pm_runtime_suspend(&pdev->dev); |
1352 | clean_up2: | 1364 | clean_up2: |
1353 | pm_runtime_disable(&pdev->dev); | 1365 | pm_runtime_disable(&pdev->dev); |
1354 | clk_disable(host->hclk); | 1366 | clk_disable(host->hclk); |
1355 | clean_up1: | 1367 | clean_up1: |
1356 | mmc_free_host(mmc); | 1368 | mmc_free_host(mmc); |
1357 | clean_up: | 1369 | clean_up: |
1358 | if (reg) | 1370 | if (reg) |
1359 | iounmap(reg); | 1371 | iounmap(reg); |
1360 | return ret; | 1372 | return ret; |
1361 | } | 1373 | } |
1362 | 1374 | ||
1363 | static int __devexit sh_mmcif_remove(struct platform_device *pdev) | 1375 | static int __devexit sh_mmcif_remove(struct platform_device *pdev) |
1364 | { | 1376 | { |
1365 | struct sh_mmcif_host *host = platform_get_drvdata(pdev); | 1377 | struct sh_mmcif_host *host = platform_get_drvdata(pdev); |
1366 | int irq[2]; | 1378 | int irq[2]; |
1367 | 1379 | ||
1368 | host->dying = true; | 1380 | host->dying = true; |
1369 | pm_runtime_get_sync(&pdev->dev); | 1381 | pm_runtime_get_sync(&pdev->dev); |
1370 | 1382 | ||
1371 | dev_pm_qos_hide_latency_limit(&pdev->dev); | 1383 | dev_pm_qos_hide_latency_limit(&pdev->dev); |
1372 | 1384 | ||
1373 | mmc_remove_host(host->mmc); | 1385 | mmc_remove_host(host->mmc); |
1374 | sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); | 1386 | sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); |
1375 | 1387 | ||
1376 | /* | 1388 | /* |
1377 | * FIXME: cancel_delayed_work(_sync)() and free_irq() race with the | 1389 | * FIXME: cancel_delayed_work(_sync)() and free_irq() race with the |
1378 | * mmc_remove_host() call above. But swapping order doesn't help either | 1390 | * mmc_remove_host() call above. But swapping order doesn't help either |
1379 | * (a query on the linux-mmc mailing list didn't bring any replies). | 1391 | * (a query on the linux-mmc mailing list didn't bring any replies). |
1380 | */ | 1392 | */ |
1381 | cancel_delayed_work_sync(&host->timeout_work); | 1393 | cancel_delayed_work_sync(&host->timeout_work); |
1382 | 1394 | ||
1383 | if (host->addr) | 1395 | if (host->addr) |
1384 | iounmap(host->addr); | 1396 | iounmap(host->addr); |
1385 | 1397 | ||
1386 | irq[0] = platform_get_irq(pdev, 0); | 1398 | irq[0] = platform_get_irq(pdev, 0); |
1387 | irq[1] = platform_get_irq(pdev, 1); | 1399 | irq[1] = platform_get_irq(pdev, 1); |
1388 | 1400 | ||
1389 | free_irq(irq[0], host); | 1401 | free_irq(irq[0], host); |
1390 | free_irq(irq[1], host); | 1402 | free_irq(irq[1], host); |
1391 | 1403 | ||
1392 | platform_set_drvdata(pdev, NULL); | 1404 | platform_set_drvdata(pdev, NULL); |
1393 | 1405 | ||
1394 | clk_disable(host->hclk); | 1406 | clk_disable(host->hclk); |
1395 | mmc_free_host(host->mmc); | 1407 | mmc_free_host(host->mmc); |
1396 | pm_runtime_put_sync(&pdev->dev); | 1408 | pm_runtime_put_sync(&pdev->dev); |
1397 | pm_runtime_disable(&pdev->dev); | 1409 | pm_runtime_disable(&pdev->dev); |
1398 | 1410 | ||
1399 | return 0; | 1411 | return 0; |
1400 | } | 1412 | } |
1401 | 1413 | ||
1402 | #ifdef CONFIG_PM | 1414 | #ifdef CONFIG_PM |
1403 | static int sh_mmcif_suspend(struct device *dev) | 1415 | static int sh_mmcif_suspend(struct device *dev) |
1404 | { | 1416 | { |
1405 | struct platform_device *pdev = to_platform_device(dev); | 1417 | struct platform_device *pdev = to_platform_device(dev); |
1406 | struct sh_mmcif_host *host = platform_get_drvdata(pdev); | 1418 | struct sh_mmcif_host *host = platform_get_drvdata(pdev); |
1407 | int ret = mmc_suspend_host(host->mmc); | 1419 | int ret = mmc_suspend_host(host->mmc); |
1408 | 1420 | ||
1409 | if (!ret) { | 1421 | if (!ret) { |
1410 | sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); | 1422 | sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); |
1411 | clk_disable(host->hclk); | 1423 | clk_disable(host->hclk); |
1412 | } | 1424 | } |
1413 | 1425 | ||
1414 | return ret; | 1426 | return ret; |
1415 | } | 1427 | } |
1416 | 1428 | ||
1417 | static int sh_mmcif_resume(struct device *dev) | 1429 | static int sh_mmcif_resume(struct device *dev) |
1418 | { | 1430 | { |
1419 | struct platform_device *pdev = to_platform_device(dev); | 1431 | struct platform_device *pdev = to_platform_device(dev); |
1420 | struct sh_mmcif_host *host = platform_get_drvdata(pdev); | 1432 | struct sh_mmcif_host *host = platform_get_drvdata(pdev); |
1421 | 1433 | ||
1422 | clk_enable(host->hclk); | 1434 | clk_enable(host->hclk); |
1423 | 1435 | ||
1424 | return mmc_resume_host(host->mmc); | 1436 | return mmc_resume_host(host->mmc); |
1425 | } | 1437 | } |
1426 | #else | 1438 | #else |
1427 | #define sh_mmcif_suspend NULL | 1439 | #define sh_mmcif_suspend NULL |
1428 | #define sh_mmcif_resume NULL | 1440 | #define sh_mmcif_resume NULL |
1429 | #endif /* CONFIG_PM */ | 1441 | #endif /* CONFIG_PM */ |
1430 | 1442 | ||
1431 | static const struct dev_pm_ops sh_mmcif_dev_pm_ops = { | 1443 | static const struct dev_pm_ops sh_mmcif_dev_pm_ops = { |
1432 | .suspend = sh_mmcif_suspend, | 1444 | .suspend = sh_mmcif_suspend, |
1433 | .resume = sh_mmcif_resume, | 1445 | .resume = sh_mmcif_resume, |
1434 | }; | 1446 | }; |
1435 | 1447 | ||
1436 | static struct platform_driver sh_mmcif_driver = { | 1448 | static struct platform_driver sh_mmcif_driver = { |
1437 | .probe = sh_mmcif_probe, | 1449 | .probe = sh_mmcif_probe, |
1438 | .remove = sh_mmcif_remove, | 1450 | .remove = sh_mmcif_remove, |