Commit 12b4fdb4f6bccb5459a2f75fbe0eab253bfceab4
Committed by
James Bottomley
1 parent
7d568652d3
Exists in
smarc-imx_3.14.28_1.0.0_ga
and in
1 other branch
[SCSI] ufs: add dme configuration primitives
Implements to support GET and SET operations of the DME. These operations are used to configure the behavior of the UNIPRO. Along with basic operation, {Peer/AttrSetType} can be mixed. Signed-off-by: Seungwon Jeon <tgih.jun@samsung.com> Reviewed-by: Subhash Jadavani <subhashj@codeaurora.org> Tested-by: Yaniv Gardi <ygardi@codeaurora.org> Signed-off-by: Santosh Y <santoshsy@gmail.com> Signed-off-by: James Bottomley <JBottomley@Parallels.com>
Showing 3 changed files with 145 additions and 0 deletions Inline Diff
drivers/scsi/ufs/ufshcd.c
1 | /* | 1 | /* |
2 | * Universal Flash Storage Host controller driver Core | 2 | * Universal Flash Storage Host controller driver Core |
3 | * | 3 | * |
4 | * This code is based on drivers/scsi/ufs/ufshcd.c | 4 | * This code is based on drivers/scsi/ufs/ufshcd.c |
5 | * Copyright (C) 2011-2013 Samsung India Software Operations | 5 | * Copyright (C) 2011-2013 Samsung India Software Operations |
6 | * | 6 | * |
7 | * Authors: | 7 | * Authors: |
8 | * Santosh Yaraganavi <santosh.sy@samsung.com> | 8 | * Santosh Yaraganavi <santosh.sy@samsung.com> |
9 | * Vinayak Holikatti <h.vinayak@samsung.com> | 9 | * Vinayak Holikatti <h.vinayak@samsung.com> |
10 | * | 10 | * |
11 | * This program is free software; you can redistribute it and/or | 11 | * This program is free software; you can redistribute it and/or |
12 | * modify it under the terms of the GNU General Public License | 12 | * modify it under the terms of the GNU General Public License |
13 | * as published by the Free Software Foundation; either version 2 | 13 | * as published by the Free Software Foundation; either version 2 |
14 | * of the License, or (at your option) any later version. | 14 | * of the License, or (at your option) any later version. |
15 | * See the COPYING file in the top-level directory or visit | 15 | * See the COPYING file in the top-level directory or visit |
16 | * <http://www.gnu.org/licenses/gpl-2.0.html> | 16 | * <http://www.gnu.org/licenses/gpl-2.0.html> |
17 | * | 17 | * |
18 | * This program is distributed in the hope that it will be useful, | 18 | * This program is distributed in the hope that it will be useful, |
19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
21 | * GNU General Public License for more details. | 21 | * GNU General Public License for more details. |
22 | * | 22 | * |
23 | * This program is provided "AS IS" and "WITH ALL FAULTS" and | 23 | * This program is provided "AS IS" and "WITH ALL FAULTS" and |
24 | * without warranty of any kind. You are solely responsible for | 24 | * without warranty of any kind. You are solely responsible for |
25 | * determining the appropriateness of using and distributing | 25 | * determining the appropriateness of using and distributing |
26 | * the program and assume all risks associated with your exercise | 26 | * the program and assume all risks associated with your exercise |
27 | * of rights with respect to the program, including but not limited | 27 | * of rights with respect to the program, including but not limited |
28 | * to infringement of third party rights, the risks and costs of | 28 | * to infringement of third party rights, the risks and costs of |
29 | * program errors, damage to or loss of data, programs or equipment, | 29 | * program errors, damage to or loss of data, programs or equipment, |
30 | * and unavailability or interruption of operations. Under no | 30 | * and unavailability or interruption of operations. Under no |
31 | * circumstances will the contributor of this Program be liable for | 31 | * circumstances will the contributor of this Program be liable for |
32 | * any damages of any kind arising from your use or distribution of | 32 | * any damages of any kind arising from your use or distribution of |
33 | * this program. | 33 | * this program. |
34 | */ | 34 | */ |
35 | 35 | ||
36 | #include <linux/async.h> | 36 | #include <linux/async.h> |
37 | 37 | ||
38 | #include "ufshcd.h" | 38 | #include "ufshcd.h" |
39 | 39 | ||
40 | #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ | 40 | #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ |
41 | UTP_TASK_REQ_COMPL |\ | 41 | UTP_TASK_REQ_COMPL |\ |
42 | UFSHCD_ERROR_MASK) | 42 | UFSHCD_ERROR_MASK) |
43 | /* UIC command timeout, unit: ms */ | 43 | /* UIC command timeout, unit: ms */ |
44 | #define UIC_CMD_TIMEOUT 500 | 44 | #define UIC_CMD_TIMEOUT 500 |
45 | 45 | ||
46 | /* NOP OUT retries waiting for NOP IN response */ | 46 | /* NOP OUT retries waiting for NOP IN response */ |
47 | #define NOP_OUT_RETRIES 10 | 47 | #define NOP_OUT_RETRIES 10 |
48 | /* Timeout after 30 msecs if NOP OUT hangs without response */ | 48 | /* Timeout after 30 msecs if NOP OUT hangs without response */ |
49 | #define NOP_OUT_TIMEOUT 30 /* msecs */ | 49 | #define NOP_OUT_TIMEOUT 30 /* msecs */ |
50 | 50 | ||
51 | /* Query request retries */ | 51 | /* Query request retries */ |
52 | #define QUERY_REQ_RETRIES 10 | 52 | #define QUERY_REQ_RETRIES 10 |
53 | /* Query request timeout */ | 53 | /* Query request timeout */ |
54 | #define QUERY_REQ_TIMEOUT 30 /* msec */ | 54 | #define QUERY_REQ_TIMEOUT 30 /* msec */ |
55 | 55 | ||
56 | /* Expose the flag value from utp_upiu_query.value */ | 56 | /* Expose the flag value from utp_upiu_query.value */ |
57 | #define MASK_QUERY_UPIU_FLAG_LOC 0xFF | 57 | #define MASK_QUERY_UPIU_FLAG_LOC 0xFF |
58 | 58 | ||
59 | /* Interrupt aggregation default timeout, unit: 40us */ | 59 | /* Interrupt aggregation default timeout, unit: 40us */ |
60 | #define INT_AGGR_DEF_TO 0x02 | 60 | #define INT_AGGR_DEF_TO 0x02 |
61 | 61 | ||
62 | enum { | 62 | enum { |
63 | UFSHCD_MAX_CHANNEL = 0, | 63 | UFSHCD_MAX_CHANNEL = 0, |
64 | UFSHCD_MAX_ID = 1, | 64 | UFSHCD_MAX_ID = 1, |
65 | UFSHCD_MAX_LUNS = 8, | 65 | UFSHCD_MAX_LUNS = 8, |
66 | UFSHCD_CMD_PER_LUN = 32, | 66 | UFSHCD_CMD_PER_LUN = 32, |
67 | UFSHCD_CAN_QUEUE = 32, | 67 | UFSHCD_CAN_QUEUE = 32, |
68 | }; | 68 | }; |
69 | 69 | ||
70 | /* UFSHCD states */ | 70 | /* UFSHCD states */ |
71 | enum { | 71 | enum { |
72 | UFSHCD_STATE_OPERATIONAL, | 72 | UFSHCD_STATE_OPERATIONAL, |
73 | UFSHCD_STATE_RESET, | 73 | UFSHCD_STATE_RESET, |
74 | UFSHCD_STATE_ERROR, | 74 | UFSHCD_STATE_ERROR, |
75 | }; | 75 | }; |
76 | 76 | ||
77 | /* Interrupt configuration options */ | 77 | /* Interrupt configuration options */ |
78 | enum { | 78 | enum { |
79 | UFSHCD_INT_DISABLE, | 79 | UFSHCD_INT_DISABLE, |
80 | UFSHCD_INT_ENABLE, | 80 | UFSHCD_INT_ENABLE, |
81 | UFSHCD_INT_CLEAR, | 81 | UFSHCD_INT_CLEAR, |
82 | }; | 82 | }; |
83 | 83 | ||
84 | /* | 84 | /* |
85 | * ufshcd_wait_for_register - wait for register value to change | 85 | * ufshcd_wait_for_register - wait for register value to change |
86 | * @hba - per-adapter interface | 86 | * @hba - per-adapter interface |
87 | * @reg - mmio register offset | 87 | * @reg - mmio register offset |
88 | * @mask - mask to apply to read register value | 88 | * @mask - mask to apply to read register value |
89 | * @val - wait condition | 89 | * @val - wait condition |
90 | * @interval_us - polling interval in microsecs | 90 | * @interval_us - polling interval in microsecs |
91 | * @timeout_ms - timeout in millisecs | 91 | * @timeout_ms - timeout in millisecs |
92 | * | 92 | * |
93 | * Returns -ETIMEDOUT on error, zero on success | 93 | * Returns -ETIMEDOUT on error, zero on success |
94 | */ | 94 | */ |
95 | static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, | 95 | static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, |
96 | u32 val, unsigned long interval_us, unsigned long timeout_ms) | 96 | u32 val, unsigned long interval_us, unsigned long timeout_ms) |
97 | { | 97 | { |
98 | int err = 0; | 98 | int err = 0; |
99 | unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); | 99 | unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); |
100 | 100 | ||
101 | /* ignore bits that we don't intend to wait on */ | 101 | /* ignore bits that we don't intend to wait on */ |
102 | val = val & mask; | 102 | val = val & mask; |
103 | 103 | ||
104 | while ((ufshcd_readl(hba, reg) & mask) != val) { | 104 | while ((ufshcd_readl(hba, reg) & mask) != val) { |
105 | /* wakeup within 50us of expiry */ | 105 | /* wakeup within 50us of expiry */ |
106 | usleep_range(interval_us, interval_us + 50); | 106 | usleep_range(interval_us, interval_us + 50); |
107 | 107 | ||
108 | if (time_after(jiffies, timeout)) { | 108 | if (time_after(jiffies, timeout)) { |
109 | if ((ufshcd_readl(hba, reg) & mask) != val) | 109 | if ((ufshcd_readl(hba, reg) & mask) != val) |
110 | err = -ETIMEDOUT; | 110 | err = -ETIMEDOUT; |
111 | break; | 111 | break; |
112 | } | 112 | } |
113 | } | 113 | } |
114 | 114 | ||
115 | return err; | 115 | return err; |
116 | } | 116 | } |
117 | 117 | ||
118 | /** | 118 | /** |
119 | * ufshcd_get_intr_mask - Get the interrupt bit mask | 119 | * ufshcd_get_intr_mask - Get the interrupt bit mask |
120 | * @hba - Pointer to adapter instance | 120 | * @hba - Pointer to adapter instance |
121 | * | 121 | * |
122 | * Returns interrupt bit mask per version | 122 | * Returns interrupt bit mask per version |
123 | */ | 123 | */ |
124 | static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba) | 124 | static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba) |
125 | { | 125 | { |
126 | if (hba->ufs_version == UFSHCI_VERSION_10) | 126 | if (hba->ufs_version == UFSHCI_VERSION_10) |
127 | return INTERRUPT_MASK_ALL_VER_10; | 127 | return INTERRUPT_MASK_ALL_VER_10; |
128 | else | 128 | else |
129 | return INTERRUPT_MASK_ALL_VER_11; | 129 | return INTERRUPT_MASK_ALL_VER_11; |
130 | } | 130 | } |
131 | 131 | ||
132 | /** | 132 | /** |
133 | * ufshcd_get_ufs_version - Get the UFS version supported by the HBA | 133 | * ufshcd_get_ufs_version - Get the UFS version supported by the HBA |
134 | * @hba - Pointer to adapter instance | 134 | * @hba - Pointer to adapter instance |
135 | * | 135 | * |
136 | * Returns UFSHCI version supported by the controller | 136 | * Returns UFSHCI version supported by the controller |
137 | */ | 137 | */ |
138 | static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba) | 138 | static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba) |
139 | { | 139 | { |
140 | return ufshcd_readl(hba, REG_UFS_VERSION); | 140 | return ufshcd_readl(hba, REG_UFS_VERSION); |
141 | } | 141 | } |
142 | 142 | ||
143 | /** | 143 | /** |
144 | * ufshcd_is_device_present - Check if any device connected to | 144 | * ufshcd_is_device_present - Check if any device connected to |
145 | * the host controller | 145 | * the host controller |
146 | * @reg_hcs - host controller status register value | 146 | * @reg_hcs - host controller status register value |
147 | * | 147 | * |
148 | * Returns 1 if device present, 0 if no device detected | 148 | * Returns 1 if device present, 0 if no device detected |
149 | */ | 149 | */ |
150 | static inline int ufshcd_is_device_present(u32 reg_hcs) | 150 | static inline int ufshcd_is_device_present(u32 reg_hcs) |
151 | { | 151 | { |
152 | return (DEVICE_PRESENT & reg_hcs) ? 1 : 0; | 152 | return (DEVICE_PRESENT & reg_hcs) ? 1 : 0; |
153 | } | 153 | } |
154 | 154 | ||
155 | /** | 155 | /** |
156 | * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status | 156 | * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status |
157 | * @lrb: pointer to local command reference block | 157 | * @lrb: pointer to local command reference block |
158 | * | 158 | * |
159 | * This function is used to get the OCS field from UTRD | 159 | * This function is used to get the OCS field from UTRD |
160 | * Returns the OCS field in the UTRD | 160 | * Returns the OCS field in the UTRD |
161 | */ | 161 | */ |
162 | static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp) | 162 | static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp) |
163 | { | 163 | { |
164 | return lrbp->utr_descriptor_ptr->header.dword_2 & MASK_OCS; | 164 | return lrbp->utr_descriptor_ptr->header.dword_2 & MASK_OCS; |
165 | } | 165 | } |
166 | 166 | ||
167 | /** | 167 | /** |
168 | * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status | 168 | * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status |
169 | * @task_req_descp: pointer to utp_task_req_desc structure | 169 | * @task_req_descp: pointer to utp_task_req_desc structure |
170 | * | 170 | * |
171 | * This function is used to get the OCS field from UTMRD | 171 | * This function is used to get the OCS field from UTMRD |
172 | * Returns the OCS field in the UTMRD | 172 | * Returns the OCS field in the UTMRD |
173 | */ | 173 | */ |
174 | static inline int | 174 | static inline int |
175 | ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp) | 175 | ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp) |
176 | { | 176 | { |
177 | return task_req_descp->header.dword_2 & MASK_OCS; | 177 | return task_req_descp->header.dword_2 & MASK_OCS; |
178 | } | 178 | } |
179 | 179 | ||
180 | /** | 180 | /** |
181 | * ufshcd_get_tm_free_slot - get a free slot for task management request | 181 | * ufshcd_get_tm_free_slot - get a free slot for task management request |
182 | * @hba: per adapter instance | 182 | * @hba: per adapter instance |
183 | * | 183 | * |
184 | * Returns maximum number of task management request slots in case of | 184 | * Returns maximum number of task management request slots in case of |
185 | * task management queue full or returns the free slot number | 185 | * task management queue full or returns the free slot number |
186 | */ | 186 | */ |
187 | static inline int ufshcd_get_tm_free_slot(struct ufs_hba *hba) | 187 | static inline int ufshcd_get_tm_free_slot(struct ufs_hba *hba) |
188 | { | 188 | { |
189 | return find_first_zero_bit(&hba->outstanding_tasks, hba->nutmrs); | 189 | return find_first_zero_bit(&hba->outstanding_tasks, hba->nutmrs); |
190 | } | 190 | } |
191 | 191 | ||
192 | /** | 192 | /** |
193 | * ufshcd_utrl_clear - Clear a bit in UTRLCLR register | 193 | * ufshcd_utrl_clear - Clear a bit in UTRLCLR register |
194 | * @hba: per adapter instance | 194 | * @hba: per adapter instance |
195 | * @pos: position of the bit to be cleared | 195 | * @pos: position of the bit to be cleared |
196 | */ | 196 | */ |
197 | static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos) | 197 | static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos) |
198 | { | 198 | { |
199 | ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR); | 199 | ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR); |
200 | } | 200 | } |
201 | 201 | ||
202 | /** | 202 | /** |
203 | * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY | 203 | * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY |
204 | * @reg: Register value of host controller status | 204 | * @reg: Register value of host controller status |
205 | * | 205 | * |
206 | * Returns integer, 0 on Success and positive value if failed | 206 | * Returns integer, 0 on Success and positive value if failed |
207 | */ | 207 | */ |
208 | static inline int ufshcd_get_lists_status(u32 reg) | 208 | static inline int ufshcd_get_lists_status(u32 reg) |
209 | { | 209 | { |
210 | /* | 210 | /* |
211 | * The mask 0xFF is for the following HCS register bits | 211 | * The mask 0xFF is for the following HCS register bits |
212 | * Bit Description | 212 | * Bit Description |
213 | * 0 Device Present | 213 | * 0 Device Present |
214 | * 1 UTRLRDY | 214 | * 1 UTRLRDY |
215 | * 2 UTMRLRDY | 215 | * 2 UTMRLRDY |
216 | * 3 UCRDY | 216 | * 3 UCRDY |
217 | * 4 HEI | 217 | * 4 HEI |
218 | * 5 DEI | 218 | * 5 DEI |
219 | * 6-7 reserved | 219 | * 6-7 reserved |
220 | */ | 220 | */ |
221 | return (((reg) & (0xFF)) >> 1) ^ (0x07); | 221 | return (((reg) & (0xFF)) >> 1) ^ (0x07); |
222 | } | 222 | } |
223 | 223 | ||
224 | /** | 224 | /** |
225 | * ufshcd_get_uic_cmd_result - Get the UIC command result | 225 | * ufshcd_get_uic_cmd_result - Get the UIC command result |
226 | * @hba: Pointer to adapter instance | 226 | * @hba: Pointer to adapter instance |
227 | * | 227 | * |
228 | * This function gets the result of UIC command completion | 228 | * This function gets the result of UIC command completion |
229 | * Returns 0 on success, non zero value on error | 229 | * Returns 0 on success, non zero value on error |
230 | */ | 230 | */ |
231 | static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba) | 231 | static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba) |
232 | { | 232 | { |
233 | return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) & | 233 | return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) & |
234 | MASK_UIC_COMMAND_RESULT; | 234 | MASK_UIC_COMMAND_RESULT; |
235 | } | 235 | } |
236 | 236 | ||
237 | /** | 237 | /** |
238 | * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command | ||
239 | * @hba: Pointer to adapter instance | ||
240 | * | ||
241 | * This function gets UIC command argument3 | ||
242 | * Returns 0 on success, non zero value on error | ||
243 | */ | ||
244 | static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba) | ||
245 | { | ||
246 | return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3); | ||
247 | } | ||
248 | |||
249 | /** | ||
238 | * ufshcd_get_req_rsp - returns the TR response transaction type | 250 | * ufshcd_get_req_rsp - returns the TR response transaction type |
239 | * @ucd_rsp_ptr: pointer to response UPIU | 251 | * @ucd_rsp_ptr: pointer to response UPIU |
240 | */ | 252 | */ |
241 | static inline int | 253 | static inline int |
242 | ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr) | 254 | ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr) |
243 | { | 255 | { |
244 | return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24; | 256 | return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24; |
245 | } | 257 | } |
246 | 258 | ||
247 | /** | 259 | /** |
248 | * ufshcd_get_rsp_upiu_result - Get the result from response UPIU | 260 | * ufshcd_get_rsp_upiu_result - Get the result from response UPIU |
249 | * @ucd_rsp_ptr: pointer to response UPIU | 261 | * @ucd_rsp_ptr: pointer to response UPIU |
250 | * | 262 | * |
251 | * This function gets the response status and scsi_status from response UPIU | 263 | * This function gets the response status and scsi_status from response UPIU |
252 | * Returns the response result code. | 264 | * Returns the response result code. |
253 | */ | 265 | */ |
254 | static inline int | 266 | static inline int |
255 | ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr) | 267 | ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr) |
256 | { | 268 | { |
257 | return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT; | 269 | return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT; |
258 | } | 270 | } |
259 | 271 | ||
260 | /* | 272 | /* |
261 | * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length | 273 | * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length |
262 | * from response UPIU | 274 | * from response UPIU |
263 | * @ucd_rsp_ptr: pointer to response UPIU | 275 | * @ucd_rsp_ptr: pointer to response UPIU |
264 | * | 276 | * |
265 | * Return the data segment length. | 277 | * Return the data segment length. |
266 | */ | 278 | */ |
267 | static inline unsigned int | 279 | static inline unsigned int |
268 | ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr) | 280 | ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr) |
269 | { | 281 | { |
270 | return be32_to_cpu(ucd_rsp_ptr->header.dword_2) & | 282 | return be32_to_cpu(ucd_rsp_ptr->header.dword_2) & |
271 | MASK_RSP_UPIU_DATA_SEG_LEN; | 283 | MASK_RSP_UPIU_DATA_SEG_LEN; |
272 | } | 284 | } |
273 | 285 | ||
274 | /** | 286 | /** |
275 | * ufshcd_is_exception_event - Check if the device raised an exception event | 287 | * ufshcd_is_exception_event - Check if the device raised an exception event |
276 | * @ucd_rsp_ptr: pointer to response UPIU | 288 | * @ucd_rsp_ptr: pointer to response UPIU |
277 | * | 289 | * |
278 | * The function checks if the device raised an exception event indicated in | 290 | * The function checks if the device raised an exception event indicated in |
279 | * the Device Information field of response UPIU. | 291 | * the Device Information field of response UPIU. |
280 | * | 292 | * |
281 | * Returns true if exception is raised, false otherwise. | 293 | * Returns true if exception is raised, false otherwise. |
282 | */ | 294 | */ |
283 | static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr) | 295 | static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr) |
284 | { | 296 | { |
285 | return be32_to_cpu(ucd_rsp_ptr->header.dword_2) & | 297 | return be32_to_cpu(ucd_rsp_ptr->header.dword_2) & |
286 | MASK_RSP_EXCEPTION_EVENT ? true : false; | 298 | MASK_RSP_EXCEPTION_EVENT ? true : false; |
287 | } | 299 | } |
288 | 300 | ||
289 | /** | 301 | /** |
290 | * ufshcd_reset_intr_aggr - Reset interrupt aggregation values. | 302 | * ufshcd_reset_intr_aggr - Reset interrupt aggregation values. |
291 | * @hba: per adapter instance | 303 | * @hba: per adapter instance |
292 | */ | 304 | */ |
293 | static inline void | 305 | static inline void |
294 | ufshcd_reset_intr_aggr(struct ufs_hba *hba) | 306 | ufshcd_reset_intr_aggr(struct ufs_hba *hba) |
295 | { | 307 | { |
296 | ufshcd_writel(hba, INT_AGGR_ENABLE | | 308 | ufshcd_writel(hba, INT_AGGR_ENABLE | |
297 | INT_AGGR_COUNTER_AND_TIMER_RESET, | 309 | INT_AGGR_COUNTER_AND_TIMER_RESET, |
298 | REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); | 310 | REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); |
299 | } | 311 | } |
300 | 312 | ||
301 | /** | 313 | /** |
302 | * ufshcd_config_intr_aggr - Configure interrupt aggregation values. | 314 | * ufshcd_config_intr_aggr - Configure interrupt aggregation values. |
303 | * @hba: per adapter instance | 315 | * @hba: per adapter instance |
304 | * @cnt: Interrupt aggregation counter threshold | 316 | * @cnt: Interrupt aggregation counter threshold |
305 | * @tmout: Interrupt aggregation timeout value | 317 | * @tmout: Interrupt aggregation timeout value |
306 | */ | 318 | */ |
307 | static inline void | 319 | static inline void |
308 | ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout) | 320 | ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout) |
309 | { | 321 | { |
310 | ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE | | 322 | ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE | |
311 | INT_AGGR_COUNTER_THLD_VAL(cnt) | | 323 | INT_AGGR_COUNTER_THLD_VAL(cnt) | |
312 | INT_AGGR_TIMEOUT_VAL(tmout), | 324 | INT_AGGR_TIMEOUT_VAL(tmout), |
313 | REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); | 325 | REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); |
314 | } | 326 | } |
315 | 327 | ||
316 | /** | 328 | /** |
317 | * ufshcd_enable_run_stop_reg - Enable run-stop registers, | 329 | * ufshcd_enable_run_stop_reg - Enable run-stop registers, |
318 | * When run-stop registers are set to 1, it indicates the | 330 | * When run-stop registers are set to 1, it indicates the |
319 | * host controller that it can process the requests | 331 | * host controller that it can process the requests |
320 | * @hba: per adapter instance | 332 | * @hba: per adapter instance |
321 | */ | 333 | */ |
322 | static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba) | 334 | static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba) |
323 | { | 335 | { |
324 | ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT, | 336 | ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT, |
325 | REG_UTP_TASK_REQ_LIST_RUN_STOP); | 337 | REG_UTP_TASK_REQ_LIST_RUN_STOP); |
326 | ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT, | 338 | ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT, |
327 | REG_UTP_TRANSFER_REQ_LIST_RUN_STOP); | 339 | REG_UTP_TRANSFER_REQ_LIST_RUN_STOP); |
328 | } | 340 | } |
329 | 341 | ||
330 | /** | 342 | /** |
331 | * ufshcd_hba_start - Start controller initialization sequence | 343 | * ufshcd_hba_start - Start controller initialization sequence |
332 | * @hba: per adapter instance | 344 | * @hba: per adapter instance |
333 | */ | 345 | */ |
334 | static inline void ufshcd_hba_start(struct ufs_hba *hba) | 346 | static inline void ufshcd_hba_start(struct ufs_hba *hba) |
335 | { | 347 | { |
336 | ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE); | 348 | ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE); |
337 | } | 349 | } |
338 | 350 | ||
339 | /** | 351 | /** |
340 | * ufshcd_is_hba_active - Get controller state | 352 | * ufshcd_is_hba_active - Get controller state |
341 | * @hba: per adapter instance | 353 | * @hba: per adapter instance |
342 | * | 354 | * |
343 | * Returns zero if controller is active, 1 otherwise | 355 | * Returns zero if controller is active, 1 otherwise |
344 | */ | 356 | */ |
345 | static inline int ufshcd_is_hba_active(struct ufs_hba *hba) | 357 | static inline int ufshcd_is_hba_active(struct ufs_hba *hba) |
346 | { | 358 | { |
347 | return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1; | 359 | return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1; |
348 | } | 360 | } |
349 | 361 | ||
350 | /** | 362 | /** |
351 | * ufshcd_send_command - Send SCSI or device management commands | 363 | * ufshcd_send_command - Send SCSI or device management commands |
352 | * @hba: per adapter instance | 364 | * @hba: per adapter instance |
353 | * @task_tag: Task tag of the command | 365 | * @task_tag: Task tag of the command |
354 | */ | 366 | */ |
355 | static inline | 367 | static inline |
356 | void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) | 368 | void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) |
357 | { | 369 | { |
358 | __set_bit(task_tag, &hba->outstanding_reqs); | 370 | __set_bit(task_tag, &hba->outstanding_reqs); |
359 | ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL); | 371 | ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL); |
360 | } | 372 | } |
361 | 373 | ||
362 | /** | 374 | /** |
363 | * ufshcd_copy_sense_data - Copy sense data in case of check condition | 375 | * ufshcd_copy_sense_data - Copy sense data in case of check condition |
364 | * @lrb - pointer to local reference block | 376 | * @lrb - pointer to local reference block |
365 | */ | 377 | */ |
366 | static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp) | 378 | static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp) |
367 | { | 379 | { |
368 | int len; | 380 | int len; |
369 | if (lrbp->sense_buffer && | 381 | if (lrbp->sense_buffer && |
370 | ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) { | 382 | ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) { |
371 | len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len); | 383 | len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len); |
372 | memcpy(lrbp->sense_buffer, | 384 | memcpy(lrbp->sense_buffer, |
373 | lrbp->ucd_rsp_ptr->sr.sense_data, | 385 | lrbp->ucd_rsp_ptr->sr.sense_data, |
374 | min_t(int, len, SCSI_SENSE_BUFFERSIZE)); | 386 | min_t(int, len, SCSI_SENSE_BUFFERSIZE)); |
375 | } | 387 | } |
376 | } | 388 | } |
377 | 389 | ||
378 | /** | 390 | /** |
379 | * ufshcd_query_to_cpu() - formats the buffer to native cpu endian | 391 | * ufshcd_query_to_cpu() - formats the buffer to native cpu endian |
380 | * @response: upiu query response to convert | 392 | * @response: upiu query response to convert |
381 | */ | 393 | */ |
382 | static inline void ufshcd_query_to_cpu(struct utp_upiu_query *response) | 394 | static inline void ufshcd_query_to_cpu(struct utp_upiu_query *response) |
383 | { | 395 | { |
384 | response->length = be16_to_cpu(response->length); | 396 | response->length = be16_to_cpu(response->length); |
385 | response->value = be32_to_cpu(response->value); | 397 | response->value = be32_to_cpu(response->value); |
386 | } | 398 | } |
387 | 399 | ||
388 | /** | 400 | /** |
389 | * ufshcd_query_to_be() - formats the buffer to big endian | 401 | * ufshcd_query_to_be() - formats the buffer to big endian |
390 | * @request: upiu query request to convert | 402 | * @request: upiu query request to convert |
391 | */ | 403 | */ |
392 | static inline void ufshcd_query_to_be(struct utp_upiu_query *request) | 404 | static inline void ufshcd_query_to_be(struct utp_upiu_query *request) |
393 | { | 405 | { |
394 | request->length = cpu_to_be16(request->length); | 406 | request->length = cpu_to_be16(request->length); |
395 | request->value = cpu_to_be32(request->value); | 407 | request->value = cpu_to_be32(request->value); |
396 | } | 408 | } |
397 | 409 | ||
398 | /** | 410 | /** |
399 | * ufshcd_copy_query_response() - Copy the Query Response and the data | 411 | * ufshcd_copy_query_response() - Copy the Query Response and the data |
400 | * descriptor | 412 | * descriptor |
401 | * @hba: per adapter instance | 413 | * @hba: per adapter instance |
402 | * @lrb - pointer to local reference block | 414 | * @lrb - pointer to local reference block |
403 | */ | 415 | */ |
404 | static | 416 | static |
405 | void ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) | 417 | void ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) |
406 | { | 418 | { |
407 | struct ufs_query_res *query_res = &hba->dev_cmd.query.response; | 419 | struct ufs_query_res *query_res = &hba->dev_cmd.query.response; |
408 | 420 | ||
409 | /* Get the UPIU response */ | 421 | /* Get the UPIU response */ |
410 | query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >> | 422 | query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >> |
411 | UPIU_RSP_CODE_OFFSET; | 423 | UPIU_RSP_CODE_OFFSET; |
412 | 424 | ||
413 | memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE); | 425 | memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE); |
414 | ufshcd_query_to_cpu(&query_res->upiu_res); | 426 | ufshcd_query_to_cpu(&query_res->upiu_res); |
415 | 427 | ||
416 | 428 | ||
417 | /* Get the descriptor */ | 429 | /* Get the descriptor */ |
418 | if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) { | 430 | if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) { |
419 | u8 *descp = (u8 *)&lrbp->ucd_rsp_ptr + | 431 | u8 *descp = (u8 *)&lrbp->ucd_rsp_ptr + |
420 | GENERAL_UPIU_REQUEST_SIZE; | 432 | GENERAL_UPIU_REQUEST_SIZE; |
421 | u16 len; | 433 | u16 len; |
422 | 434 | ||
423 | /* data segment length */ | 435 | /* data segment length */ |
424 | len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) & | 436 | len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) & |
425 | MASK_QUERY_DATA_SEG_LEN; | 437 | MASK_QUERY_DATA_SEG_LEN; |
426 | 438 | ||
427 | memcpy(hba->dev_cmd.query.descriptor, descp, | 439 | memcpy(hba->dev_cmd.query.descriptor, descp, |
428 | min_t(u16, len, QUERY_DESC_MAX_SIZE)); | 440 | min_t(u16, len, QUERY_DESC_MAX_SIZE)); |
429 | } | 441 | } |
430 | } | 442 | } |
431 | 443 | ||
432 | /** | 444 | /** |
433 | * ufshcd_hba_capabilities - Read controller capabilities | 445 | * ufshcd_hba_capabilities - Read controller capabilities |
434 | * @hba: per adapter instance | 446 | * @hba: per adapter instance |
435 | */ | 447 | */ |
436 | static inline void ufshcd_hba_capabilities(struct ufs_hba *hba) | 448 | static inline void ufshcd_hba_capabilities(struct ufs_hba *hba) |
437 | { | 449 | { |
438 | hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES); | 450 | hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES); |
439 | 451 | ||
440 | /* nutrs and nutmrs are 0 based values */ | 452 | /* nutrs and nutmrs are 0 based values */ |
441 | hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1; | 453 | hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1; |
442 | hba->nutmrs = | 454 | hba->nutmrs = |
443 | ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1; | 455 | ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1; |
444 | } | 456 | } |
445 | 457 | ||
446 | /** | 458 | /** |
447 | * ufshcd_ready_for_uic_cmd - Check if controller is ready | 459 | * ufshcd_ready_for_uic_cmd - Check if controller is ready |
448 | * to accept UIC commands | 460 | * to accept UIC commands |
449 | * @hba: per adapter instance | 461 | * @hba: per adapter instance |
450 | * Return true on success, else false | 462 | * Return true on success, else false |
451 | */ | 463 | */ |
452 | static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba) | 464 | static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba) |
453 | { | 465 | { |
454 | if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY) | 466 | if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY) |
455 | return true; | 467 | return true; |
456 | else | 468 | else |
457 | return false; | 469 | return false; |
458 | } | 470 | } |
459 | 471 | ||
460 | /** | 472 | /** |
461 | * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers | 473 | * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers |
462 | * @hba: per adapter instance | 474 | * @hba: per adapter instance |
463 | * @uic_cmd: UIC command | 475 | * @uic_cmd: UIC command |
464 | * | 476 | * |
465 | * Mutex must be held. | 477 | * Mutex must be held. |
466 | */ | 478 | */ |
467 | static inline void | 479 | static inline void |
468 | ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) | 480 | ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) |
469 | { | 481 | { |
470 | WARN_ON(hba->active_uic_cmd); | 482 | WARN_ON(hba->active_uic_cmd); |
471 | 483 | ||
472 | hba->active_uic_cmd = uic_cmd; | 484 | hba->active_uic_cmd = uic_cmd; |
473 | 485 | ||
474 | /* Write Args */ | 486 | /* Write Args */ |
475 | ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1); | 487 | ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1); |
476 | ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2); | 488 | ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2); |
477 | ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3); | 489 | ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3); |
478 | 490 | ||
479 | /* Write UIC Cmd */ | 491 | /* Write UIC Cmd */ |
480 | ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK, | 492 | ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK, |
481 | REG_UIC_COMMAND); | 493 | REG_UIC_COMMAND); |
482 | } | 494 | } |
483 | 495 | ||
484 | /** | 496 | /** |
485 | * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command | 497 | * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command |
486 | * @hba: per adapter instance | 498 | * @hba: per adapter instance |
487 | * @uic_command: UIC command | 499 | * @uic_command: UIC command |
488 | * | 500 | * |
489 | * Must be called with mutex held. | 501 | * Must be called with mutex held. |
490 | * Returns 0 only if success. | 502 | * Returns 0 only if success. |
491 | */ | 503 | */ |
492 | static int | 504 | static int |
493 | ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) | 505 | ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) |
494 | { | 506 | { |
495 | int ret; | 507 | int ret; |
496 | unsigned long flags; | 508 | unsigned long flags; |
497 | 509 | ||
498 | if (wait_for_completion_timeout(&uic_cmd->done, | 510 | if (wait_for_completion_timeout(&uic_cmd->done, |
499 | msecs_to_jiffies(UIC_CMD_TIMEOUT))) | 511 | msecs_to_jiffies(UIC_CMD_TIMEOUT))) |
500 | ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT; | 512 | ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT; |
501 | else | 513 | else |
502 | ret = -ETIMEDOUT; | 514 | ret = -ETIMEDOUT; |
503 | 515 | ||
504 | spin_lock_irqsave(hba->host->host_lock, flags); | 516 | spin_lock_irqsave(hba->host->host_lock, flags); |
505 | hba->active_uic_cmd = NULL; | 517 | hba->active_uic_cmd = NULL; |
506 | spin_unlock_irqrestore(hba->host->host_lock, flags); | 518 | spin_unlock_irqrestore(hba->host->host_lock, flags); |
507 | 519 | ||
508 | return ret; | 520 | return ret; |
509 | } | 521 | } |
510 | 522 | ||
511 | /** | 523 | /** |
512 | * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result | 524 | * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result |
513 | * @hba: per adapter instance | 525 | * @hba: per adapter instance |
514 | * @uic_cmd: UIC command | 526 | * @uic_cmd: UIC command |
515 | * | 527 | * |
516 | * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called | 528 | * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called |
517 | * with mutex held. | 529 | * with mutex held. |
518 | * Returns 0 only if success. | 530 | * Returns 0 only if success. |
519 | */ | 531 | */ |
520 | static int | 532 | static int |
521 | __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) | 533 | __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) |
522 | { | 534 | { |
523 | int ret; | 535 | int ret; |
524 | unsigned long flags; | 536 | unsigned long flags; |
525 | 537 | ||
526 | if (!ufshcd_ready_for_uic_cmd(hba)) { | 538 | if (!ufshcd_ready_for_uic_cmd(hba)) { |
527 | dev_err(hba->dev, | 539 | dev_err(hba->dev, |
528 | "Controller not ready to accept UIC commands\n"); | 540 | "Controller not ready to accept UIC commands\n"); |
529 | return -EIO; | 541 | return -EIO; |
530 | } | 542 | } |
531 | 543 | ||
532 | init_completion(&uic_cmd->done); | 544 | init_completion(&uic_cmd->done); |
533 | 545 | ||
534 | spin_lock_irqsave(hba->host->host_lock, flags); | 546 | spin_lock_irqsave(hba->host->host_lock, flags); |
535 | ufshcd_dispatch_uic_cmd(hba, uic_cmd); | 547 | ufshcd_dispatch_uic_cmd(hba, uic_cmd); |
536 | spin_unlock_irqrestore(hba->host->host_lock, flags); | 548 | spin_unlock_irqrestore(hba->host->host_lock, flags); |
537 | 549 | ||
538 | ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd); | 550 | ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd); |
539 | 551 | ||
540 | return ret; | 552 | return ret; |
541 | } | 553 | } |
542 | 554 | ||
543 | /** | 555 | /** |
544 | * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result | 556 | * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result |
545 | * @hba: per adapter instance | 557 | * @hba: per adapter instance |
546 | * @uic_cmd: UIC command | 558 | * @uic_cmd: UIC command |
547 | * | 559 | * |
548 | * Returns 0 only if success. | 560 | * Returns 0 only if success. |
549 | */ | 561 | */ |
550 | static int | 562 | static int |
551 | ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) | 563 | ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) |
552 | { | 564 | { |
553 | int ret; | 565 | int ret; |
554 | 566 | ||
555 | mutex_lock(&hba->uic_cmd_mutex); | 567 | mutex_lock(&hba->uic_cmd_mutex); |
556 | ret = __ufshcd_send_uic_cmd(hba, uic_cmd); | 568 | ret = __ufshcd_send_uic_cmd(hba, uic_cmd); |
557 | mutex_unlock(&hba->uic_cmd_mutex); | 569 | mutex_unlock(&hba->uic_cmd_mutex); |
558 | 570 | ||
559 | return ret; | 571 | return ret; |
560 | } | 572 | } |
561 | 573 | ||
562 | /** | 574 | /** |
563 | * ufshcd_map_sg - Map scatter-gather list to prdt | 575 | * ufshcd_map_sg - Map scatter-gather list to prdt |
564 | * @lrbp - pointer to local reference block | 576 | * @lrbp - pointer to local reference block |
565 | * | 577 | * |
566 | * Returns 0 in case of success, non-zero value in case of failure | 578 | * Returns 0 in case of success, non-zero value in case of failure |
567 | */ | 579 | */ |
568 | static int ufshcd_map_sg(struct ufshcd_lrb *lrbp) | 580 | static int ufshcd_map_sg(struct ufshcd_lrb *lrbp) |
569 | { | 581 | { |
570 | struct ufshcd_sg_entry *prd_table; | 582 | struct ufshcd_sg_entry *prd_table; |
571 | struct scatterlist *sg; | 583 | struct scatterlist *sg; |
572 | struct scsi_cmnd *cmd; | 584 | struct scsi_cmnd *cmd; |
573 | int sg_segments; | 585 | int sg_segments; |
574 | int i; | 586 | int i; |
575 | 587 | ||
576 | cmd = lrbp->cmd; | 588 | cmd = lrbp->cmd; |
577 | sg_segments = scsi_dma_map(cmd); | 589 | sg_segments = scsi_dma_map(cmd); |
578 | if (sg_segments < 0) | 590 | if (sg_segments < 0) |
579 | return sg_segments; | 591 | return sg_segments; |
580 | 592 | ||
581 | if (sg_segments) { | 593 | if (sg_segments) { |
582 | lrbp->utr_descriptor_ptr->prd_table_length = | 594 | lrbp->utr_descriptor_ptr->prd_table_length = |
583 | cpu_to_le16((u16) (sg_segments)); | 595 | cpu_to_le16((u16) (sg_segments)); |
584 | 596 | ||
585 | prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr; | 597 | prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr; |
586 | 598 | ||
587 | scsi_for_each_sg(cmd, sg, sg_segments, i) { | 599 | scsi_for_each_sg(cmd, sg, sg_segments, i) { |
588 | prd_table[i].size = | 600 | prd_table[i].size = |
589 | cpu_to_le32(((u32) sg_dma_len(sg))-1); | 601 | cpu_to_le32(((u32) sg_dma_len(sg))-1); |
590 | prd_table[i].base_addr = | 602 | prd_table[i].base_addr = |
591 | cpu_to_le32(lower_32_bits(sg->dma_address)); | 603 | cpu_to_le32(lower_32_bits(sg->dma_address)); |
592 | prd_table[i].upper_addr = | 604 | prd_table[i].upper_addr = |
593 | cpu_to_le32(upper_32_bits(sg->dma_address)); | 605 | cpu_to_le32(upper_32_bits(sg->dma_address)); |
594 | } | 606 | } |
595 | } else { | 607 | } else { |
596 | lrbp->utr_descriptor_ptr->prd_table_length = 0; | 608 | lrbp->utr_descriptor_ptr->prd_table_length = 0; |
597 | } | 609 | } |
598 | 610 | ||
599 | return 0; | 611 | return 0; |
600 | } | 612 | } |
601 | 613 | ||
602 | /** | 614 | /** |
603 | * ufshcd_enable_intr - enable interrupts | 615 | * ufshcd_enable_intr - enable interrupts |
604 | * @hba: per adapter instance | 616 | * @hba: per adapter instance |
605 | * @intrs: interrupt bits | 617 | * @intrs: interrupt bits |
606 | */ | 618 | */ |
607 | static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs) | 619 | static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs) |
608 | { | 620 | { |
609 | u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); | 621 | u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); |
610 | 622 | ||
611 | if (hba->ufs_version == UFSHCI_VERSION_10) { | 623 | if (hba->ufs_version == UFSHCI_VERSION_10) { |
612 | u32 rw; | 624 | u32 rw; |
613 | rw = set & INTERRUPT_MASK_RW_VER_10; | 625 | rw = set & INTERRUPT_MASK_RW_VER_10; |
614 | set = rw | ((set ^ intrs) & intrs); | 626 | set = rw | ((set ^ intrs) & intrs); |
615 | } else { | 627 | } else { |
616 | set |= intrs; | 628 | set |= intrs; |
617 | } | 629 | } |
618 | 630 | ||
619 | ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); | 631 | ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); |
620 | } | 632 | } |
621 | 633 | ||
622 | /** | 634 | /** |
623 | * ufshcd_disable_intr - disable interrupts | 635 | * ufshcd_disable_intr - disable interrupts |
624 | * @hba: per adapter instance | 636 | * @hba: per adapter instance |
625 | * @intrs: interrupt bits | 637 | * @intrs: interrupt bits |
626 | */ | 638 | */ |
627 | static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs) | 639 | static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs) |
628 | { | 640 | { |
629 | u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); | 641 | u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); |
630 | 642 | ||
631 | if (hba->ufs_version == UFSHCI_VERSION_10) { | 643 | if (hba->ufs_version == UFSHCI_VERSION_10) { |
632 | u32 rw; | 644 | u32 rw; |
633 | rw = (set & INTERRUPT_MASK_RW_VER_10) & | 645 | rw = (set & INTERRUPT_MASK_RW_VER_10) & |
634 | ~(intrs & INTERRUPT_MASK_RW_VER_10); | 646 | ~(intrs & INTERRUPT_MASK_RW_VER_10); |
635 | set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10); | 647 | set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10); |
636 | 648 | ||
637 | } else { | 649 | } else { |
638 | set &= ~intrs; | 650 | set &= ~intrs; |
639 | } | 651 | } |
640 | 652 | ||
641 | ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); | 653 | ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); |
642 | } | 654 | } |
643 | 655 | ||
644 | /** | 656 | /** |
645 | * ufshcd_prepare_req_desc_hdr() - Fills the requests header | 657 | * ufshcd_prepare_req_desc_hdr() - Fills the requests header |
646 | * descriptor according to request | 658 | * descriptor according to request |
647 | * @lrbp: pointer to local reference block | 659 | * @lrbp: pointer to local reference block |
648 | * @upiu_flags: flags required in the header | 660 | * @upiu_flags: flags required in the header |
649 | * @cmd_dir: requests data direction | 661 | * @cmd_dir: requests data direction |
650 | */ | 662 | */ |
651 | static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp, | 663 | static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp, |
652 | u32 *upiu_flags, enum dma_data_direction cmd_dir) | 664 | u32 *upiu_flags, enum dma_data_direction cmd_dir) |
653 | { | 665 | { |
654 | struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr; | 666 | struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr; |
655 | u32 data_direction; | 667 | u32 data_direction; |
656 | u32 dword_0; | 668 | u32 dword_0; |
657 | 669 | ||
658 | if (cmd_dir == DMA_FROM_DEVICE) { | 670 | if (cmd_dir == DMA_FROM_DEVICE) { |
659 | data_direction = UTP_DEVICE_TO_HOST; | 671 | data_direction = UTP_DEVICE_TO_HOST; |
660 | *upiu_flags = UPIU_CMD_FLAGS_READ; | 672 | *upiu_flags = UPIU_CMD_FLAGS_READ; |
661 | } else if (cmd_dir == DMA_TO_DEVICE) { | 673 | } else if (cmd_dir == DMA_TO_DEVICE) { |
662 | data_direction = UTP_HOST_TO_DEVICE; | 674 | data_direction = UTP_HOST_TO_DEVICE; |
663 | *upiu_flags = UPIU_CMD_FLAGS_WRITE; | 675 | *upiu_flags = UPIU_CMD_FLAGS_WRITE; |
664 | } else { | 676 | } else { |
665 | data_direction = UTP_NO_DATA_TRANSFER; | 677 | data_direction = UTP_NO_DATA_TRANSFER; |
666 | *upiu_flags = UPIU_CMD_FLAGS_NONE; | 678 | *upiu_flags = UPIU_CMD_FLAGS_NONE; |
667 | } | 679 | } |
668 | 680 | ||
669 | dword_0 = data_direction | (lrbp->command_type | 681 | dword_0 = data_direction | (lrbp->command_type |
670 | << UPIU_COMMAND_TYPE_OFFSET); | 682 | << UPIU_COMMAND_TYPE_OFFSET); |
671 | if (lrbp->intr_cmd) | 683 | if (lrbp->intr_cmd) |
672 | dword_0 |= UTP_REQ_DESC_INT_CMD; | 684 | dword_0 |= UTP_REQ_DESC_INT_CMD; |
673 | 685 | ||
674 | /* Transfer request descriptor header fields */ | 686 | /* Transfer request descriptor header fields */ |
675 | req_desc->header.dword_0 = cpu_to_le32(dword_0); | 687 | req_desc->header.dword_0 = cpu_to_le32(dword_0); |
676 | 688 | ||
677 | /* | 689 | /* |
678 | * assigning invalid value for command status. Controller | 690 | * assigning invalid value for command status. Controller |
679 | * updates OCS on command completion, with the command | 691 | * updates OCS on command completion, with the command |
680 | * status | 692 | * status |
681 | */ | 693 | */ |
682 | req_desc->header.dword_2 = | 694 | req_desc->header.dword_2 = |
683 | cpu_to_le32(OCS_INVALID_COMMAND_STATUS); | 695 | cpu_to_le32(OCS_INVALID_COMMAND_STATUS); |
684 | } | 696 | } |
685 | 697 | ||
686 | /** | 698 | /** |
687 | * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc, | 699 | * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc, |
688 | * for scsi commands | 700 | * for scsi commands |
689 | * @lrbp - local reference block pointer | 701 | * @lrbp - local reference block pointer |
690 | * @upiu_flags - flags | 702 | * @upiu_flags - flags |
691 | */ | 703 | */ |
692 | static | 704 | static |
693 | void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags) | 705 | void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags) |
694 | { | 706 | { |
695 | struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; | 707 | struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; |
696 | 708 | ||
697 | /* command descriptor fields */ | 709 | /* command descriptor fields */ |
698 | ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD( | 710 | ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD( |
699 | UPIU_TRANSACTION_COMMAND, upiu_flags, | 711 | UPIU_TRANSACTION_COMMAND, upiu_flags, |
700 | lrbp->lun, lrbp->task_tag); | 712 | lrbp->lun, lrbp->task_tag); |
701 | ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD( | 713 | ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD( |
702 | UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0); | 714 | UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0); |
703 | 715 | ||
704 | /* Total EHS length and Data segment length will be zero */ | 716 | /* Total EHS length and Data segment length will be zero */ |
705 | ucd_req_ptr->header.dword_2 = 0; | 717 | ucd_req_ptr->header.dword_2 = 0; |
706 | 718 | ||
707 | ucd_req_ptr->sc.exp_data_transfer_len = | 719 | ucd_req_ptr->sc.exp_data_transfer_len = |
708 | cpu_to_be32(lrbp->cmd->sdb.length); | 720 | cpu_to_be32(lrbp->cmd->sdb.length); |
709 | 721 | ||
710 | memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, | 722 | memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, |
711 | (min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE))); | 723 | (min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE))); |
712 | } | 724 | } |
713 | 725 | ||
714 | /** | 726 | /** |
715 | * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc, | 727 | * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc, |
716 | * for query requsts | 728 | * for query requsts |
717 | * @hba: UFS hba | 729 | * @hba: UFS hba |
718 | * @lrbp: local reference block pointer | 730 | * @lrbp: local reference block pointer |
719 | * @upiu_flags: flags | 731 | * @upiu_flags: flags |
720 | */ | 732 | */ |
721 | static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba, | 733 | static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba, |
722 | struct ufshcd_lrb *lrbp, u32 upiu_flags) | 734 | struct ufshcd_lrb *lrbp, u32 upiu_flags) |
723 | { | 735 | { |
724 | struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; | 736 | struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; |
725 | struct ufs_query *query = &hba->dev_cmd.query; | 737 | struct ufs_query *query = &hba->dev_cmd.query; |
726 | u16 len = query->request.upiu_req.length; | 738 | u16 len = query->request.upiu_req.length; |
727 | u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE; | 739 | u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE; |
728 | 740 | ||
729 | /* Query request header */ | 741 | /* Query request header */ |
730 | ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD( | 742 | ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD( |
731 | UPIU_TRANSACTION_QUERY_REQ, upiu_flags, | 743 | UPIU_TRANSACTION_QUERY_REQ, upiu_flags, |
732 | lrbp->lun, lrbp->task_tag); | 744 | lrbp->lun, lrbp->task_tag); |
733 | ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD( | 745 | ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD( |
734 | 0, query->request.query_func, 0, 0); | 746 | 0, query->request.query_func, 0, 0); |
735 | 747 | ||
736 | /* Data segment length */ | 748 | /* Data segment length */ |
737 | ucd_req_ptr->header.dword_2 = UPIU_HEADER_DWORD( | 749 | ucd_req_ptr->header.dword_2 = UPIU_HEADER_DWORD( |
738 | 0, 0, len >> 8, (u8)len); | 750 | 0, 0, len >> 8, (u8)len); |
739 | 751 | ||
740 | /* Copy the Query Request buffer as is */ | 752 | /* Copy the Query Request buffer as is */ |
741 | memcpy(&ucd_req_ptr->qr, &query->request.upiu_req, | 753 | memcpy(&ucd_req_ptr->qr, &query->request.upiu_req, |
742 | QUERY_OSF_SIZE); | 754 | QUERY_OSF_SIZE); |
743 | ufshcd_query_to_be(&ucd_req_ptr->qr); | 755 | ufshcd_query_to_be(&ucd_req_ptr->qr); |
744 | 756 | ||
745 | /* Copy the Descriptor */ | 757 | /* Copy the Descriptor */ |
746 | if ((len > 0) && (query->request.upiu_req.opcode == | 758 | if ((len > 0) && (query->request.upiu_req.opcode == |
747 | UPIU_QUERY_OPCODE_WRITE_DESC)) { | 759 | UPIU_QUERY_OPCODE_WRITE_DESC)) { |
748 | memcpy(descp, query->descriptor, | 760 | memcpy(descp, query->descriptor, |
749 | min_t(u16, len, QUERY_DESC_MAX_SIZE)); | 761 | min_t(u16, len, QUERY_DESC_MAX_SIZE)); |
750 | } | 762 | } |
751 | } | 763 | } |
752 | 764 | ||
753 | static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp) | 765 | static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp) |
754 | { | 766 | { |
755 | struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; | 767 | struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; |
756 | 768 | ||
757 | memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req)); | 769 | memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req)); |
758 | 770 | ||
759 | /* command descriptor fields */ | 771 | /* command descriptor fields */ |
760 | ucd_req_ptr->header.dword_0 = | 772 | ucd_req_ptr->header.dword_0 = |
761 | UPIU_HEADER_DWORD( | 773 | UPIU_HEADER_DWORD( |
762 | UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag); | 774 | UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag); |
763 | } | 775 | } |
764 | 776 | ||
765 | /** | 777 | /** |
766 | * ufshcd_compose_upiu - form UFS Protocol Information Unit(UPIU) | 778 | * ufshcd_compose_upiu - form UFS Protocol Information Unit(UPIU) |
767 | * @hba - per adapter instance | 779 | * @hba - per adapter instance |
768 | * @lrb - pointer to local reference block | 780 | * @lrb - pointer to local reference block |
769 | */ | 781 | */ |
770 | static int ufshcd_compose_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) | 782 | static int ufshcd_compose_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) |
771 | { | 783 | { |
772 | u32 upiu_flags; | 784 | u32 upiu_flags; |
773 | int ret = 0; | 785 | int ret = 0; |
774 | 786 | ||
775 | switch (lrbp->command_type) { | 787 | switch (lrbp->command_type) { |
776 | case UTP_CMD_TYPE_SCSI: | 788 | case UTP_CMD_TYPE_SCSI: |
777 | if (likely(lrbp->cmd)) { | 789 | if (likely(lrbp->cmd)) { |
778 | ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, | 790 | ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, |
779 | lrbp->cmd->sc_data_direction); | 791 | lrbp->cmd->sc_data_direction); |
780 | ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags); | 792 | ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags); |
781 | } else { | 793 | } else { |
782 | ret = -EINVAL; | 794 | ret = -EINVAL; |
783 | } | 795 | } |
784 | break; | 796 | break; |
785 | case UTP_CMD_TYPE_DEV_MANAGE: | 797 | case UTP_CMD_TYPE_DEV_MANAGE: |
786 | ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE); | 798 | ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE); |
787 | if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY) | 799 | if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY) |
788 | ufshcd_prepare_utp_query_req_upiu( | 800 | ufshcd_prepare_utp_query_req_upiu( |
789 | hba, lrbp, upiu_flags); | 801 | hba, lrbp, upiu_flags); |
790 | else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP) | 802 | else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP) |
791 | ufshcd_prepare_utp_nop_upiu(lrbp); | 803 | ufshcd_prepare_utp_nop_upiu(lrbp); |
792 | else | 804 | else |
793 | ret = -EINVAL; | 805 | ret = -EINVAL; |
794 | break; | 806 | break; |
795 | case UTP_CMD_TYPE_UFS: | 807 | case UTP_CMD_TYPE_UFS: |
796 | /* For UFS native command implementation */ | 808 | /* For UFS native command implementation */ |
797 | ret = -ENOTSUPP; | 809 | ret = -ENOTSUPP; |
798 | dev_err(hba->dev, "%s: UFS native command are not supported\n", | 810 | dev_err(hba->dev, "%s: UFS native command are not supported\n", |
799 | __func__); | 811 | __func__); |
800 | break; | 812 | break; |
801 | default: | 813 | default: |
802 | ret = -ENOTSUPP; | 814 | ret = -ENOTSUPP; |
803 | dev_err(hba->dev, "%s: unknown command type: 0x%x\n", | 815 | dev_err(hba->dev, "%s: unknown command type: 0x%x\n", |
804 | __func__, lrbp->command_type); | 816 | __func__, lrbp->command_type); |
805 | break; | 817 | break; |
806 | } /* end of switch */ | 818 | } /* end of switch */ |
807 | 819 | ||
808 | return ret; | 820 | return ret; |
809 | } | 821 | } |
810 | 822 | ||
811 | /** | 823 | /** |
812 | * ufshcd_queuecommand - main entry point for SCSI requests | 824 | * ufshcd_queuecommand - main entry point for SCSI requests |
813 | * @cmd: command from SCSI Midlayer | 825 | * @cmd: command from SCSI Midlayer |
814 | * @done: call back function | 826 | * @done: call back function |
815 | * | 827 | * |
816 | * Returns 0 for success, non-zero in case of failure | 828 | * Returns 0 for success, non-zero in case of failure |
817 | */ | 829 | */ |
818 | static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) | 830 | static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) |
819 | { | 831 | { |
820 | struct ufshcd_lrb *lrbp; | 832 | struct ufshcd_lrb *lrbp; |
821 | struct ufs_hba *hba; | 833 | struct ufs_hba *hba; |
822 | unsigned long flags; | 834 | unsigned long flags; |
823 | int tag; | 835 | int tag; |
824 | int err = 0; | 836 | int err = 0; |
825 | 837 | ||
826 | hba = shost_priv(host); | 838 | hba = shost_priv(host); |
827 | 839 | ||
828 | tag = cmd->request->tag; | 840 | tag = cmd->request->tag; |
829 | 841 | ||
830 | if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) { | 842 | if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) { |
831 | err = SCSI_MLQUEUE_HOST_BUSY; | 843 | err = SCSI_MLQUEUE_HOST_BUSY; |
832 | goto out; | 844 | goto out; |
833 | } | 845 | } |
834 | 846 | ||
835 | /* acquire the tag to make sure device cmds don't use it */ | 847 | /* acquire the tag to make sure device cmds don't use it */ |
836 | if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) { | 848 | if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) { |
837 | /* | 849 | /* |
838 | * Dev manage command in progress, requeue the command. | 850 | * Dev manage command in progress, requeue the command. |
839 | * Requeuing the command helps in cases where the request *may* | 851 | * Requeuing the command helps in cases where the request *may* |
840 | * find different tag instead of waiting for dev manage command | 852 | * find different tag instead of waiting for dev manage command |
841 | * completion. | 853 | * completion. |
842 | */ | 854 | */ |
843 | err = SCSI_MLQUEUE_HOST_BUSY; | 855 | err = SCSI_MLQUEUE_HOST_BUSY; |
844 | goto out; | 856 | goto out; |
845 | } | 857 | } |
846 | 858 | ||
847 | lrbp = &hba->lrb[tag]; | 859 | lrbp = &hba->lrb[tag]; |
848 | 860 | ||
849 | WARN_ON(lrbp->cmd); | 861 | WARN_ON(lrbp->cmd); |
850 | lrbp->cmd = cmd; | 862 | lrbp->cmd = cmd; |
851 | lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE; | 863 | lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE; |
852 | lrbp->sense_buffer = cmd->sense_buffer; | 864 | lrbp->sense_buffer = cmd->sense_buffer; |
853 | lrbp->task_tag = tag; | 865 | lrbp->task_tag = tag; |
854 | lrbp->lun = cmd->device->lun; | 866 | lrbp->lun = cmd->device->lun; |
855 | lrbp->intr_cmd = false; | 867 | lrbp->intr_cmd = false; |
856 | lrbp->command_type = UTP_CMD_TYPE_SCSI; | 868 | lrbp->command_type = UTP_CMD_TYPE_SCSI; |
857 | 869 | ||
858 | /* form UPIU before issuing the command */ | 870 | /* form UPIU before issuing the command */ |
859 | ufshcd_compose_upiu(hba, lrbp); | 871 | ufshcd_compose_upiu(hba, lrbp); |
860 | err = ufshcd_map_sg(lrbp); | 872 | err = ufshcd_map_sg(lrbp); |
861 | if (err) { | 873 | if (err) { |
862 | lrbp->cmd = NULL; | 874 | lrbp->cmd = NULL; |
863 | clear_bit_unlock(tag, &hba->lrb_in_use); | 875 | clear_bit_unlock(tag, &hba->lrb_in_use); |
864 | goto out; | 876 | goto out; |
865 | } | 877 | } |
866 | 878 | ||
867 | /* issue command to the controller */ | 879 | /* issue command to the controller */ |
868 | spin_lock_irqsave(hba->host->host_lock, flags); | 880 | spin_lock_irqsave(hba->host->host_lock, flags); |
869 | ufshcd_send_command(hba, tag); | 881 | ufshcd_send_command(hba, tag); |
870 | spin_unlock_irqrestore(hba->host->host_lock, flags); | 882 | spin_unlock_irqrestore(hba->host->host_lock, flags); |
871 | out: | 883 | out: |
872 | return err; | 884 | return err; |
873 | } | 885 | } |
874 | 886 | ||
875 | static int ufshcd_compose_dev_cmd(struct ufs_hba *hba, | 887 | static int ufshcd_compose_dev_cmd(struct ufs_hba *hba, |
876 | struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag) | 888 | struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag) |
877 | { | 889 | { |
878 | lrbp->cmd = NULL; | 890 | lrbp->cmd = NULL; |
879 | lrbp->sense_bufflen = 0; | 891 | lrbp->sense_bufflen = 0; |
880 | lrbp->sense_buffer = NULL; | 892 | lrbp->sense_buffer = NULL; |
881 | lrbp->task_tag = tag; | 893 | lrbp->task_tag = tag; |
882 | lrbp->lun = 0; /* device management cmd is not specific to any LUN */ | 894 | lrbp->lun = 0; /* device management cmd is not specific to any LUN */ |
883 | lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE; | 895 | lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE; |
884 | lrbp->intr_cmd = true; /* No interrupt aggregation */ | 896 | lrbp->intr_cmd = true; /* No interrupt aggregation */ |
885 | hba->dev_cmd.type = cmd_type; | 897 | hba->dev_cmd.type = cmd_type; |
886 | 898 | ||
887 | return ufshcd_compose_upiu(hba, lrbp); | 899 | return ufshcd_compose_upiu(hba, lrbp); |
888 | } | 900 | } |
889 | 901 | ||
890 | static int | 902 | static int |
891 | ufshcd_clear_cmd(struct ufs_hba *hba, int tag) | 903 | ufshcd_clear_cmd(struct ufs_hba *hba, int tag) |
892 | { | 904 | { |
893 | int err = 0; | 905 | int err = 0; |
894 | unsigned long flags; | 906 | unsigned long flags; |
895 | u32 mask = 1 << tag; | 907 | u32 mask = 1 << tag; |
896 | 908 | ||
897 | /* clear outstanding transaction before retry */ | 909 | /* clear outstanding transaction before retry */ |
898 | spin_lock_irqsave(hba->host->host_lock, flags); | 910 | spin_lock_irqsave(hba->host->host_lock, flags); |
899 | ufshcd_utrl_clear(hba, tag); | 911 | ufshcd_utrl_clear(hba, tag); |
900 | spin_unlock_irqrestore(hba->host->host_lock, flags); | 912 | spin_unlock_irqrestore(hba->host->host_lock, flags); |
901 | 913 | ||
902 | /* | 914 | /* |
903 | * wait for for h/w to clear corresponding bit in door-bell. | 915 | * wait for for h/w to clear corresponding bit in door-bell. |
904 | * max. wait is 1 sec. | 916 | * max. wait is 1 sec. |
905 | */ | 917 | */ |
906 | err = ufshcd_wait_for_register(hba, | 918 | err = ufshcd_wait_for_register(hba, |
907 | REG_UTP_TRANSFER_REQ_DOOR_BELL, | 919 | REG_UTP_TRANSFER_REQ_DOOR_BELL, |
908 | mask, ~mask, 1000, 1000); | 920 | mask, ~mask, 1000, 1000); |
909 | 921 | ||
910 | return err; | 922 | return err; |
911 | } | 923 | } |
912 | 924 | ||
913 | /** | 925 | /** |
914 | * ufshcd_dev_cmd_completion() - handles device management command responses | 926 | * ufshcd_dev_cmd_completion() - handles device management command responses |
915 | * @hba: per adapter instance | 927 | * @hba: per adapter instance |
916 | * @lrbp: pointer to local reference block | 928 | * @lrbp: pointer to local reference block |
917 | */ | 929 | */ |
918 | static int | 930 | static int |
919 | ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) | 931 | ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) |
920 | { | 932 | { |
921 | int resp; | 933 | int resp; |
922 | int err = 0; | 934 | int err = 0; |
923 | 935 | ||
924 | resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr); | 936 | resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr); |
925 | 937 | ||
926 | switch (resp) { | 938 | switch (resp) { |
927 | case UPIU_TRANSACTION_NOP_IN: | 939 | case UPIU_TRANSACTION_NOP_IN: |
928 | if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) { | 940 | if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) { |
929 | err = -EINVAL; | 941 | err = -EINVAL; |
930 | dev_err(hba->dev, "%s: unexpected response %x\n", | 942 | dev_err(hba->dev, "%s: unexpected response %x\n", |
931 | __func__, resp); | 943 | __func__, resp); |
932 | } | 944 | } |
933 | break; | 945 | break; |
934 | case UPIU_TRANSACTION_QUERY_RSP: | 946 | case UPIU_TRANSACTION_QUERY_RSP: |
935 | ufshcd_copy_query_response(hba, lrbp); | 947 | ufshcd_copy_query_response(hba, lrbp); |
936 | break; | 948 | break; |
937 | case UPIU_TRANSACTION_REJECT_UPIU: | 949 | case UPIU_TRANSACTION_REJECT_UPIU: |
938 | /* TODO: handle Reject UPIU Response */ | 950 | /* TODO: handle Reject UPIU Response */ |
939 | err = -EPERM; | 951 | err = -EPERM; |
940 | dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n", | 952 | dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n", |
941 | __func__); | 953 | __func__); |
942 | break; | 954 | break; |
943 | default: | 955 | default: |
944 | err = -EINVAL; | 956 | err = -EINVAL; |
945 | dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n", | 957 | dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n", |
946 | __func__, resp); | 958 | __func__, resp); |
947 | break; | 959 | break; |
948 | } | 960 | } |
949 | 961 | ||
950 | return err; | 962 | return err; |
951 | } | 963 | } |
952 | 964 | ||
953 | static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, | 965 | static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, |
954 | struct ufshcd_lrb *lrbp, int max_timeout) | 966 | struct ufshcd_lrb *lrbp, int max_timeout) |
955 | { | 967 | { |
956 | int err = 0; | 968 | int err = 0; |
957 | unsigned long time_left; | 969 | unsigned long time_left; |
958 | unsigned long flags; | 970 | unsigned long flags; |
959 | 971 | ||
960 | time_left = wait_for_completion_timeout(hba->dev_cmd.complete, | 972 | time_left = wait_for_completion_timeout(hba->dev_cmd.complete, |
961 | msecs_to_jiffies(max_timeout)); | 973 | msecs_to_jiffies(max_timeout)); |
962 | 974 | ||
963 | spin_lock_irqsave(hba->host->host_lock, flags); | 975 | spin_lock_irqsave(hba->host->host_lock, flags); |
964 | hba->dev_cmd.complete = NULL; | 976 | hba->dev_cmd.complete = NULL; |
965 | if (likely(time_left)) { | 977 | if (likely(time_left)) { |
966 | err = ufshcd_get_tr_ocs(lrbp); | 978 | err = ufshcd_get_tr_ocs(lrbp); |
967 | if (!err) | 979 | if (!err) |
968 | err = ufshcd_dev_cmd_completion(hba, lrbp); | 980 | err = ufshcd_dev_cmd_completion(hba, lrbp); |
969 | } | 981 | } |
970 | spin_unlock_irqrestore(hba->host->host_lock, flags); | 982 | spin_unlock_irqrestore(hba->host->host_lock, flags); |
971 | 983 | ||
972 | if (!time_left) { | 984 | if (!time_left) { |
973 | err = -ETIMEDOUT; | 985 | err = -ETIMEDOUT; |
974 | if (!ufshcd_clear_cmd(hba, lrbp->task_tag)) | 986 | if (!ufshcd_clear_cmd(hba, lrbp->task_tag)) |
975 | /* sucessfully cleared the command, retry if needed */ | 987 | /* sucessfully cleared the command, retry if needed */ |
976 | err = -EAGAIN; | 988 | err = -EAGAIN; |
977 | } | 989 | } |
978 | 990 | ||
979 | return err; | 991 | return err; |
980 | } | 992 | } |
981 | 993 | ||
982 | /** | 994 | /** |
983 | * ufshcd_get_dev_cmd_tag - Get device management command tag | 995 | * ufshcd_get_dev_cmd_tag - Get device management command tag |
984 | * @hba: per-adapter instance | 996 | * @hba: per-adapter instance |
985 | * @tag: pointer to variable with available slot value | 997 | * @tag: pointer to variable with available slot value |
986 | * | 998 | * |
987 | * Get a free slot and lock it until device management command | 999 | * Get a free slot and lock it until device management command |
988 | * completes. | 1000 | * completes. |
989 | * | 1001 | * |
990 | * Returns false if free slot is unavailable for locking, else | 1002 | * Returns false if free slot is unavailable for locking, else |
991 | * return true with tag value in @tag. | 1003 | * return true with tag value in @tag. |
992 | */ | 1004 | */ |
993 | static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out) | 1005 | static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out) |
994 | { | 1006 | { |
995 | int tag; | 1007 | int tag; |
996 | bool ret = false; | 1008 | bool ret = false; |
997 | unsigned long tmp; | 1009 | unsigned long tmp; |
998 | 1010 | ||
999 | if (!tag_out) | 1011 | if (!tag_out) |
1000 | goto out; | 1012 | goto out; |
1001 | 1013 | ||
1002 | do { | 1014 | do { |
1003 | tmp = ~hba->lrb_in_use; | 1015 | tmp = ~hba->lrb_in_use; |
1004 | tag = find_last_bit(&tmp, hba->nutrs); | 1016 | tag = find_last_bit(&tmp, hba->nutrs); |
1005 | if (tag >= hba->nutrs) | 1017 | if (tag >= hba->nutrs) |
1006 | goto out; | 1018 | goto out; |
1007 | } while (test_and_set_bit_lock(tag, &hba->lrb_in_use)); | 1019 | } while (test_and_set_bit_lock(tag, &hba->lrb_in_use)); |
1008 | 1020 | ||
1009 | *tag_out = tag; | 1021 | *tag_out = tag; |
1010 | ret = true; | 1022 | ret = true; |
1011 | out: | 1023 | out: |
1012 | return ret; | 1024 | return ret; |
1013 | } | 1025 | } |
1014 | 1026 | ||
1015 | static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag) | 1027 | static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag) |
1016 | { | 1028 | { |
1017 | clear_bit_unlock(tag, &hba->lrb_in_use); | 1029 | clear_bit_unlock(tag, &hba->lrb_in_use); |
1018 | } | 1030 | } |
1019 | 1031 | ||
1020 | /** | 1032 | /** |
1021 | * ufshcd_exec_dev_cmd - API for sending device management requests | 1033 | * ufshcd_exec_dev_cmd - API for sending device management requests |
1022 | * @hba - UFS hba | 1034 | * @hba - UFS hba |
1023 | * @cmd_type - specifies the type (NOP, Query...) | 1035 | * @cmd_type - specifies the type (NOP, Query...) |
1024 | * @timeout - time in seconds | 1036 | * @timeout - time in seconds |
1025 | * | 1037 | * |
1026 | * NOTE: Since there is only one available tag for device management commands, | 1038 | * NOTE: Since there is only one available tag for device management commands, |
1027 | * it is expected you hold the hba->dev_cmd.lock mutex. | 1039 | * it is expected you hold the hba->dev_cmd.lock mutex. |
1028 | */ | 1040 | */ |
1029 | static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, | 1041 | static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, |
1030 | enum dev_cmd_type cmd_type, int timeout) | 1042 | enum dev_cmd_type cmd_type, int timeout) |
1031 | { | 1043 | { |
1032 | struct ufshcd_lrb *lrbp; | 1044 | struct ufshcd_lrb *lrbp; |
1033 | int err; | 1045 | int err; |
1034 | int tag; | 1046 | int tag; |
1035 | struct completion wait; | 1047 | struct completion wait; |
1036 | unsigned long flags; | 1048 | unsigned long flags; |
1037 | 1049 | ||
1038 | /* | 1050 | /* |
1039 | * Get free slot, sleep if slots are unavailable. | 1051 | * Get free slot, sleep if slots are unavailable. |
1040 | * Even though we use wait_event() which sleeps indefinitely, | 1052 | * Even though we use wait_event() which sleeps indefinitely, |
1041 | * the maximum wait time is bounded by SCSI request timeout. | 1053 | * the maximum wait time is bounded by SCSI request timeout. |
1042 | */ | 1054 | */ |
1043 | wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag)); | 1055 | wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag)); |
1044 | 1056 | ||
1045 | init_completion(&wait); | 1057 | init_completion(&wait); |
1046 | lrbp = &hba->lrb[tag]; | 1058 | lrbp = &hba->lrb[tag]; |
1047 | WARN_ON(lrbp->cmd); | 1059 | WARN_ON(lrbp->cmd); |
1048 | err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag); | 1060 | err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag); |
1049 | if (unlikely(err)) | 1061 | if (unlikely(err)) |
1050 | goto out_put_tag; | 1062 | goto out_put_tag; |
1051 | 1063 | ||
1052 | hba->dev_cmd.complete = &wait; | 1064 | hba->dev_cmd.complete = &wait; |
1053 | 1065 | ||
1054 | spin_lock_irqsave(hba->host->host_lock, flags); | 1066 | spin_lock_irqsave(hba->host->host_lock, flags); |
1055 | ufshcd_send_command(hba, tag); | 1067 | ufshcd_send_command(hba, tag); |
1056 | spin_unlock_irqrestore(hba->host->host_lock, flags); | 1068 | spin_unlock_irqrestore(hba->host->host_lock, flags); |
1057 | 1069 | ||
1058 | err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout); | 1070 | err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout); |
1059 | 1071 | ||
1060 | out_put_tag: | 1072 | out_put_tag: |
1061 | ufshcd_put_dev_cmd_tag(hba, tag); | 1073 | ufshcd_put_dev_cmd_tag(hba, tag); |
1062 | wake_up(&hba->dev_cmd.tag_wq); | 1074 | wake_up(&hba->dev_cmd.tag_wq); |
1063 | return err; | 1075 | return err; |
1064 | } | 1076 | } |
1065 | 1077 | ||
1066 | /** | 1078 | /** |
1067 | * ufshcd_query_flag() - API function for sending flag query requests | 1079 | * ufshcd_query_flag() - API function for sending flag query requests |
1068 | * hba: per-adapter instance | 1080 | * hba: per-adapter instance |
1069 | * query_opcode: flag query to perform | 1081 | * query_opcode: flag query to perform |
1070 | * idn: flag idn to access | 1082 | * idn: flag idn to access |
1071 | * flag_res: the flag value after the query request completes | 1083 | * flag_res: the flag value after the query request completes |
1072 | * | 1084 | * |
1073 | * Returns 0 for success, non-zero in case of failure | 1085 | * Returns 0 for success, non-zero in case of failure |
1074 | */ | 1086 | */ |
1075 | static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, | 1087 | static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, |
1076 | enum flag_idn idn, bool *flag_res) | 1088 | enum flag_idn idn, bool *flag_res) |
1077 | { | 1089 | { |
1078 | struct ufs_query_req *request; | 1090 | struct ufs_query_req *request; |
1079 | struct ufs_query_res *response; | 1091 | struct ufs_query_res *response; |
1080 | int err; | 1092 | int err; |
1081 | 1093 | ||
1082 | BUG_ON(!hba); | 1094 | BUG_ON(!hba); |
1083 | 1095 | ||
1084 | mutex_lock(&hba->dev_cmd.lock); | 1096 | mutex_lock(&hba->dev_cmd.lock); |
1085 | request = &hba->dev_cmd.query.request; | 1097 | request = &hba->dev_cmd.query.request; |
1086 | response = &hba->dev_cmd.query.response; | 1098 | response = &hba->dev_cmd.query.response; |
1087 | memset(request, 0, sizeof(struct ufs_query_req)); | 1099 | memset(request, 0, sizeof(struct ufs_query_req)); |
1088 | memset(response, 0, sizeof(struct ufs_query_res)); | 1100 | memset(response, 0, sizeof(struct ufs_query_res)); |
1089 | 1101 | ||
1090 | switch (opcode) { | 1102 | switch (opcode) { |
1091 | case UPIU_QUERY_OPCODE_SET_FLAG: | 1103 | case UPIU_QUERY_OPCODE_SET_FLAG: |
1092 | case UPIU_QUERY_OPCODE_CLEAR_FLAG: | 1104 | case UPIU_QUERY_OPCODE_CLEAR_FLAG: |
1093 | case UPIU_QUERY_OPCODE_TOGGLE_FLAG: | 1105 | case UPIU_QUERY_OPCODE_TOGGLE_FLAG: |
1094 | request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; | 1106 | request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; |
1095 | break; | 1107 | break; |
1096 | case UPIU_QUERY_OPCODE_READ_FLAG: | 1108 | case UPIU_QUERY_OPCODE_READ_FLAG: |
1097 | request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; | 1109 | request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; |
1098 | if (!flag_res) { | 1110 | if (!flag_res) { |
1099 | /* No dummy reads */ | 1111 | /* No dummy reads */ |
1100 | dev_err(hba->dev, "%s: Invalid argument for read request\n", | 1112 | dev_err(hba->dev, "%s: Invalid argument for read request\n", |
1101 | __func__); | 1113 | __func__); |
1102 | err = -EINVAL; | 1114 | err = -EINVAL; |
1103 | goto out_unlock; | 1115 | goto out_unlock; |
1104 | } | 1116 | } |
1105 | break; | 1117 | break; |
1106 | default: | 1118 | default: |
1107 | dev_err(hba->dev, | 1119 | dev_err(hba->dev, |
1108 | "%s: Expected query flag opcode but got = %d\n", | 1120 | "%s: Expected query flag opcode but got = %d\n", |
1109 | __func__, opcode); | 1121 | __func__, opcode); |
1110 | err = -EINVAL; | 1122 | err = -EINVAL; |
1111 | goto out_unlock; | 1123 | goto out_unlock; |
1112 | } | 1124 | } |
1113 | request->upiu_req.opcode = opcode; | 1125 | request->upiu_req.opcode = opcode; |
1114 | request->upiu_req.idn = idn; | 1126 | request->upiu_req.idn = idn; |
1115 | 1127 | ||
1116 | /* Send query request */ | 1128 | /* Send query request */ |
1117 | err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, | 1129 | err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, |
1118 | QUERY_REQ_TIMEOUT); | 1130 | QUERY_REQ_TIMEOUT); |
1119 | 1131 | ||
1120 | if (err) { | 1132 | if (err) { |
1121 | dev_err(hba->dev, | 1133 | dev_err(hba->dev, |
1122 | "%s: Sending flag query for idn %d failed, err = %d\n", | 1134 | "%s: Sending flag query for idn %d failed, err = %d\n", |
1123 | __func__, idn, err); | 1135 | __func__, idn, err); |
1124 | goto out_unlock; | 1136 | goto out_unlock; |
1125 | } | 1137 | } |
1126 | 1138 | ||
1127 | if (flag_res) | 1139 | if (flag_res) |
1128 | *flag_res = (response->upiu_res.value & | 1140 | *flag_res = (response->upiu_res.value & |
1129 | MASK_QUERY_UPIU_FLAG_LOC) & 0x1; | 1141 | MASK_QUERY_UPIU_FLAG_LOC) & 0x1; |
1130 | 1142 | ||
1131 | out_unlock: | 1143 | out_unlock: |
1132 | mutex_unlock(&hba->dev_cmd.lock); | 1144 | mutex_unlock(&hba->dev_cmd.lock); |
1133 | return err; | 1145 | return err; |
1134 | } | 1146 | } |
1135 | 1147 | ||
1136 | /** | 1148 | /** |
1137 | * ufshcd_query_attr - API function for sending attribute requests | 1149 | * ufshcd_query_attr - API function for sending attribute requests |
1138 | * hba: per-adapter instance | 1150 | * hba: per-adapter instance |
1139 | * opcode: attribute opcode | 1151 | * opcode: attribute opcode |
1140 | * idn: attribute idn to access | 1152 | * idn: attribute idn to access |
1141 | * index: index field | 1153 | * index: index field |
1142 | * selector: selector field | 1154 | * selector: selector field |
1143 | * attr_val: the attribute value after the query request completes | 1155 | * attr_val: the attribute value after the query request completes |
1144 | * | 1156 | * |
1145 | * Returns 0 for success, non-zero in case of failure | 1157 | * Returns 0 for success, non-zero in case of failure |
1146 | */ | 1158 | */ |
1147 | int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, | 1159 | int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, |
1148 | enum attr_idn idn, u8 index, u8 selector, u32 *attr_val) | 1160 | enum attr_idn idn, u8 index, u8 selector, u32 *attr_val) |
1149 | { | 1161 | { |
1150 | struct ufs_query_req *request; | 1162 | struct ufs_query_req *request; |
1151 | struct ufs_query_res *response; | 1163 | struct ufs_query_res *response; |
1152 | int err; | 1164 | int err; |
1153 | 1165 | ||
1154 | BUG_ON(!hba); | 1166 | BUG_ON(!hba); |
1155 | 1167 | ||
1156 | if (!attr_val) { | 1168 | if (!attr_val) { |
1157 | dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n", | 1169 | dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n", |
1158 | __func__, opcode); | 1170 | __func__, opcode); |
1159 | err = -EINVAL; | 1171 | err = -EINVAL; |
1160 | goto out; | 1172 | goto out; |
1161 | } | 1173 | } |
1162 | 1174 | ||
1163 | mutex_lock(&hba->dev_cmd.lock); | 1175 | mutex_lock(&hba->dev_cmd.lock); |
1164 | request = &hba->dev_cmd.query.request; | 1176 | request = &hba->dev_cmd.query.request; |
1165 | response = &hba->dev_cmd.query.response; | 1177 | response = &hba->dev_cmd.query.response; |
1166 | memset(request, 0, sizeof(struct ufs_query_req)); | 1178 | memset(request, 0, sizeof(struct ufs_query_req)); |
1167 | memset(response, 0, sizeof(struct ufs_query_res)); | 1179 | memset(response, 0, sizeof(struct ufs_query_res)); |
1168 | 1180 | ||
1169 | switch (opcode) { | 1181 | switch (opcode) { |
1170 | case UPIU_QUERY_OPCODE_WRITE_ATTR: | 1182 | case UPIU_QUERY_OPCODE_WRITE_ATTR: |
1171 | request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; | 1183 | request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; |
1172 | request->upiu_req.value = *attr_val; | 1184 | request->upiu_req.value = *attr_val; |
1173 | break; | 1185 | break; |
1174 | case UPIU_QUERY_OPCODE_READ_ATTR: | 1186 | case UPIU_QUERY_OPCODE_READ_ATTR: |
1175 | request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; | 1187 | request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; |
1176 | break; | 1188 | break; |
1177 | default: | 1189 | default: |
1178 | dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n", | 1190 | dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n", |
1179 | __func__, opcode); | 1191 | __func__, opcode); |
1180 | err = -EINVAL; | 1192 | err = -EINVAL; |
1181 | goto out_unlock; | 1193 | goto out_unlock; |
1182 | } | 1194 | } |
1183 | 1195 | ||
1184 | request->upiu_req.opcode = opcode; | 1196 | request->upiu_req.opcode = opcode; |
1185 | request->upiu_req.idn = idn; | 1197 | request->upiu_req.idn = idn; |
1186 | request->upiu_req.index = index; | 1198 | request->upiu_req.index = index; |
1187 | request->upiu_req.selector = selector; | 1199 | request->upiu_req.selector = selector; |
1188 | 1200 | ||
1189 | /* Send query request */ | 1201 | /* Send query request */ |
1190 | err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, | 1202 | err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, |
1191 | QUERY_REQ_TIMEOUT); | 1203 | QUERY_REQ_TIMEOUT); |
1192 | 1204 | ||
1193 | if (err) { | 1205 | if (err) { |
1194 | dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n", | 1206 | dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n", |
1195 | __func__, opcode, idn, err); | 1207 | __func__, opcode, idn, err); |
1196 | goto out_unlock; | 1208 | goto out_unlock; |
1197 | } | 1209 | } |
1198 | 1210 | ||
1199 | *attr_val = response->upiu_res.value; | 1211 | *attr_val = response->upiu_res.value; |
1200 | 1212 | ||
1201 | out_unlock: | 1213 | out_unlock: |
1202 | mutex_unlock(&hba->dev_cmd.lock); | 1214 | mutex_unlock(&hba->dev_cmd.lock); |
1203 | out: | 1215 | out: |
1204 | return err; | 1216 | return err; |
1205 | } | 1217 | } |
1206 | 1218 | ||
1207 | /** | 1219 | /** |
1208 | * ufshcd_memory_alloc - allocate memory for host memory space data structures | 1220 | * ufshcd_memory_alloc - allocate memory for host memory space data structures |
1209 | * @hba: per adapter instance | 1221 | * @hba: per adapter instance |
1210 | * | 1222 | * |
1211 | * 1. Allocate DMA memory for Command Descriptor array | 1223 | * 1. Allocate DMA memory for Command Descriptor array |
1212 | * Each command descriptor consist of Command UPIU, Response UPIU and PRDT | 1224 | * Each command descriptor consist of Command UPIU, Response UPIU and PRDT |
1213 | * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL). | 1225 | * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL). |
1214 | * 3. Allocate DMA memory for UTP Task Management Request Descriptor List | 1226 | * 3. Allocate DMA memory for UTP Task Management Request Descriptor List |
1215 | * (UTMRDL) | 1227 | * (UTMRDL) |
1216 | * 4. Allocate memory for local reference block(lrb). | 1228 | * 4. Allocate memory for local reference block(lrb). |
1217 | * | 1229 | * |
1218 | * Returns 0 for success, non-zero in case of failure | 1230 | * Returns 0 for success, non-zero in case of failure |
1219 | */ | 1231 | */ |
1220 | static int ufshcd_memory_alloc(struct ufs_hba *hba) | 1232 | static int ufshcd_memory_alloc(struct ufs_hba *hba) |
1221 | { | 1233 | { |
1222 | size_t utmrdl_size, utrdl_size, ucdl_size; | 1234 | size_t utmrdl_size, utrdl_size, ucdl_size; |
1223 | 1235 | ||
1224 | /* Allocate memory for UTP command descriptors */ | 1236 | /* Allocate memory for UTP command descriptors */ |
1225 | ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs); | 1237 | ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs); |
1226 | hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev, | 1238 | hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev, |
1227 | ucdl_size, | 1239 | ucdl_size, |
1228 | &hba->ucdl_dma_addr, | 1240 | &hba->ucdl_dma_addr, |
1229 | GFP_KERNEL); | 1241 | GFP_KERNEL); |
1230 | 1242 | ||
1231 | /* | 1243 | /* |
1232 | * UFSHCI requires UTP command descriptor to be 128 byte aligned. | 1244 | * UFSHCI requires UTP command descriptor to be 128 byte aligned. |
1233 | * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE | 1245 | * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE |
1234 | * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will | 1246 | * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will |
1235 | * be aligned to 128 bytes as well | 1247 | * be aligned to 128 bytes as well |
1236 | */ | 1248 | */ |
1237 | if (!hba->ucdl_base_addr || | 1249 | if (!hba->ucdl_base_addr || |
1238 | WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) { | 1250 | WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) { |
1239 | dev_err(hba->dev, | 1251 | dev_err(hba->dev, |
1240 | "Command Descriptor Memory allocation failed\n"); | 1252 | "Command Descriptor Memory allocation failed\n"); |
1241 | goto out; | 1253 | goto out; |
1242 | } | 1254 | } |
1243 | 1255 | ||
1244 | /* | 1256 | /* |
1245 | * Allocate memory for UTP Transfer descriptors | 1257 | * Allocate memory for UTP Transfer descriptors |
1246 | * UFSHCI requires 1024 byte alignment of UTRD | 1258 | * UFSHCI requires 1024 byte alignment of UTRD |
1247 | */ | 1259 | */ |
1248 | utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs); | 1260 | utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs); |
1249 | hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev, | 1261 | hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev, |
1250 | utrdl_size, | 1262 | utrdl_size, |
1251 | &hba->utrdl_dma_addr, | 1263 | &hba->utrdl_dma_addr, |
1252 | GFP_KERNEL); | 1264 | GFP_KERNEL); |
1253 | if (!hba->utrdl_base_addr || | 1265 | if (!hba->utrdl_base_addr || |
1254 | WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) { | 1266 | WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) { |
1255 | dev_err(hba->dev, | 1267 | dev_err(hba->dev, |
1256 | "Transfer Descriptor Memory allocation failed\n"); | 1268 | "Transfer Descriptor Memory allocation failed\n"); |
1257 | goto out; | 1269 | goto out; |
1258 | } | 1270 | } |
1259 | 1271 | ||
1260 | /* | 1272 | /* |
1261 | * Allocate memory for UTP Task Management descriptors | 1273 | * Allocate memory for UTP Task Management descriptors |
1262 | * UFSHCI requires 1024 byte alignment of UTMRD | 1274 | * UFSHCI requires 1024 byte alignment of UTMRD |
1263 | */ | 1275 | */ |
1264 | utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs; | 1276 | utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs; |
1265 | hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev, | 1277 | hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev, |
1266 | utmrdl_size, | 1278 | utmrdl_size, |
1267 | &hba->utmrdl_dma_addr, | 1279 | &hba->utmrdl_dma_addr, |
1268 | GFP_KERNEL); | 1280 | GFP_KERNEL); |
1269 | if (!hba->utmrdl_base_addr || | 1281 | if (!hba->utmrdl_base_addr || |
1270 | WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) { | 1282 | WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) { |
1271 | dev_err(hba->dev, | 1283 | dev_err(hba->dev, |
1272 | "Task Management Descriptor Memory allocation failed\n"); | 1284 | "Task Management Descriptor Memory allocation failed\n"); |
1273 | goto out; | 1285 | goto out; |
1274 | } | 1286 | } |
1275 | 1287 | ||
1276 | /* Allocate memory for local reference block */ | 1288 | /* Allocate memory for local reference block */ |
1277 | hba->lrb = devm_kzalloc(hba->dev, | 1289 | hba->lrb = devm_kzalloc(hba->dev, |
1278 | hba->nutrs * sizeof(struct ufshcd_lrb), | 1290 | hba->nutrs * sizeof(struct ufshcd_lrb), |
1279 | GFP_KERNEL); | 1291 | GFP_KERNEL); |
1280 | if (!hba->lrb) { | 1292 | if (!hba->lrb) { |
1281 | dev_err(hba->dev, "LRB Memory allocation failed\n"); | 1293 | dev_err(hba->dev, "LRB Memory allocation failed\n"); |
1282 | goto out; | 1294 | goto out; |
1283 | } | 1295 | } |
1284 | return 0; | 1296 | return 0; |
1285 | out: | 1297 | out: |
1286 | return -ENOMEM; | 1298 | return -ENOMEM; |
1287 | } | 1299 | } |
1288 | 1300 | ||
1289 | /** | 1301 | /** |
1290 | * ufshcd_host_memory_configure - configure local reference block with | 1302 | * ufshcd_host_memory_configure - configure local reference block with |
1291 | * memory offsets | 1303 | * memory offsets |
1292 | * @hba: per adapter instance | 1304 | * @hba: per adapter instance |
1293 | * | 1305 | * |
1294 | * Configure Host memory space | 1306 | * Configure Host memory space |
1295 | * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA | 1307 | * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA |
1296 | * address. | 1308 | * address. |
1297 | * 2. Update each UTRD with Response UPIU offset, Response UPIU length | 1309 | * 2. Update each UTRD with Response UPIU offset, Response UPIU length |
1298 | * and PRDT offset. | 1310 | * and PRDT offset. |
1299 | * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT | 1311 | * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT |
1300 | * into local reference block. | 1312 | * into local reference block. |
1301 | */ | 1313 | */ |
1302 | static void ufshcd_host_memory_configure(struct ufs_hba *hba) | 1314 | static void ufshcd_host_memory_configure(struct ufs_hba *hba) |
1303 | { | 1315 | { |
1304 | struct utp_transfer_cmd_desc *cmd_descp; | 1316 | struct utp_transfer_cmd_desc *cmd_descp; |
1305 | struct utp_transfer_req_desc *utrdlp; | 1317 | struct utp_transfer_req_desc *utrdlp; |
1306 | dma_addr_t cmd_desc_dma_addr; | 1318 | dma_addr_t cmd_desc_dma_addr; |
1307 | dma_addr_t cmd_desc_element_addr; | 1319 | dma_addr_t cmd_desc_element_addr; |
1308 | u16 response_offset; | 1320 | u16 response_offset; |
1309 | u16 prdt_offset; | 1321 | u16 prdt_offset; |
1310 | int cmd_desc_size; | 1322 | int cmd_desc_size; |
1311 | int i; | 1323 | int i; |
1312 | 1324 | ||
1313 | utrdlp = hba->utrdl_base_addr; | 1325 | utrdlp = hba->utrdl_base_addr; |
1314 | cmd_descp = hba->ucdl_base_addr; | 1326 | cmd_descp = hba->ucdl_base_addr; |
1315 | 1327 | ||
1316 | response_offset = | 1328 | response_offset = |
1317 | offsetof(struct utp_transfer_cmd_desc, response_upiu); | 1329 | offsetof(struct utp_transfer_cmd_desc, response_upiu); |
1318 | prdt_offset = | 1330 | prdt_offset = |
1319 | offsetof(struct utp_transfer_cmd_desc, prd_table); | 1331 | offsetof(struct utp_transfer_cmd_desc, prd_table); |
1320 | 1332 | ||
1321 | cmd_desc_size = sizeof(struct utp_transfer_cmd_desc); | 1333 | cmd_desc_size = sizeof(struct utp_transfer_cmd_desc); |
1322 | cmd_desc_dma_addr = hba->ucdl_dma_addr; | 1334 | cmd_desc_dma_addr = hba->ucdl_dma_addr; |
1323 | 1335 | ||
1324 | for (i = 0; i < hba->nutrs; i++) { | 1336 | for (i = 0; i < hba->nutrs; i++) { |
1325 | /* Configure UTRD with command descriptor base address */ | 1337 | /* Configure UTRD with command descriptor base address */ |
1326 | cmd_desc_element_addr = | 1338 | cmd_desc_element_addr = |
1327 | (cmd_desc_dma_addr + (cmd_desc_size * i)); | 1339 | (cmd_desc_dma_addr + (cmd_desc_size * i)); |
1328 | utrdlp[i].command_desc_base_addr_lo = | 1340 | utrdlp[i].command_desc_base_addr_lo = |
1329 | cpu_to_le32(lower_32_bits(cmd_desc_element_addr)); | 1341 | cpu_to_le32(lower_32_bits(cmd_desc_element_addr)); |
1330 | utrdlp[i].command_desc_base_addr_hi = | 1342 | utrdlp[i].command_desc_base_addr_hi = |
1331 | cpu_to_le32(upper_32_bits(cmd_desc_element_addr)); | 1343 | cpu_to_le32(upper_32_bits(cmd_desc_element_addr)); |
1332 | 1344 | ||
1333 | /* Response upiu and prdt offset should be in double words */ | 1345 | /* Response upiu and prdt offset should be in double words */ |
1334 | utrdlp[i].response_upiu_offset = | 1346 | utrdlp[i].response_upiu_offset = |
1335 | cpu_to_le16((response_offset >> 2)); | 1347 | cpu_to_le16((response_offset >> 2)); |
1336 | utrdlp[i].prd_table_offset = | 1348 | utrdlp[i].prd_table_offset = |
1337 | cpu_to_le16((prdt_offset >> 2)); | 1349 | cpu_to_le16((prdt_offset >> 2)); |
1338 | utrdlp[i].response_upiu_length = | 1350 | utrdlp[i].response_upiu_length = |
1339 | cpu_to_le16(ALIGNED_UPIU_SIZE >> 2); | 1351 | cpu_to_le16(ALIGNED_UPIU_SIZE >> 2); |
1340 | 1352 | ||
1341 | hba->lrb[i].utr_descriptor_ptr = (utrdlp + i); | 1353 | hba->lrb[i].utr_descriptor_ptr = (utrdlp + i); |
1342 | hba->lrb[i].ucd_req_ptr = | 1354 | hba->lrb[i].ucd_req_ptr = |
1343 | (struct utp_upiu_req *)(cmd_descp + i); | 1355 | (struct utp_upiu_req *)(cmd_descp + i); |
1344 | hba->lrb[i].ucd_rsp_ptr = | 1356 | hba->lrb[i].ucd_rsp_ptr = |
1345 | (struct utp_upiu_rsp *)cmd_descp[i].response_upiu; | 1357 | (struct utp_upiu_rsp *)cmd_descp[i].response_upiu; |
1346 | hba->lrb[i].ucd_prdt_ptr = | 1358 | hba->lrb[i].ucd_prdt_ptr = |
1347 | (struct ufshcd_sg_entry *)cmd_descp[i].prd_table; | 1359 | (struct ufshcd_sg_entry *)cmd_descp[i].prd_table; |
1348 | } | 1360 | } |
1349 | } | 1361 | } |
1350 | 1362 | ||
1351 | /** | 1363 | /** |
1352 | * ufshcd_dme_link_startup - Notify Unipro to perform link startup | 1364 | * ufshcd_dme_link_startup - Notify Unipro to perform link startup |
1353 | * @hba: per adapter instance | 1365 | * @hba: per adapter instance |
1354 | * | 1366 | * |
1355 | * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer, | 1367 | * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer, |
1356 | * in order to initialize the Unipro link startup procedure. | 1368 | * in order to initialize the Unipro link startup procedure. |
1357 | * Once the Unipro links are up, the device connected to the controller | 1369 | * Once the Unipro links are up, the device connected to the controller |
1358 | * is detected. | 1370 | * is detected. |
1359 | * | 1371 | * |
1360 | * Returns 0 on success, non-zero value on failure | 1372 | * Returns 0 on success, non-zero value on failure |
1361 | */ | 1373 | */ |
1362 | static int ufshcd_dme_link_startup(struct ufs_hba *hba) | 1374 | static int ufshcd_dme_link_startup(struct ufs_hba *hba) |
1363 | { | 1375 | { |
1364 | struct uic_command uic_cmd = {0}; | 1376 | struct uic_command uic_cmd = {0}; |
1365 | int ret; | 1377 | int ret; |
1366 | 1378 | ||
1367 | uic_cmd.command = UIC_CMD_DME_LINK_STARTUP; | 1379 | uic_cmd.command = UIC_CMD_DME_LINK_STARTUP; |
1368 | 1380 | ||
1369 | ret = ufshcd_send_uic_cmd(hba, &uic_cmd); | 1381 | ret = ufshcd_send_uic_cmd(hba, &uic_cmd); |
1370 | if (ret) | 1382 | if (ret) |
1371 | dev_err(hba->dev, | 1383 | dev_err(hba->dev, |
1372 | "dme-link-startup: error code %d\n", ret); | 1384 | "dme-link-startup: error code %d\n", ret); |
1373 | return ret; | 1385 | return ret; |
1374 | } | 1386 | } |
1375 | 1387 | ||
1376 | /** | 1388 | /** |
1389 | * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET | ||
1390 | * @hba: per adapter instance | ||
1391 | * @attr_sel: uic command argument1 | ||
1392 | * @attr_set: attribute set type as uic command argument2 | ||
1393 | * @mib_val: setting value as uic command argument3 | ||
1394 | * @peer: indicate whether peer or local | ||
1395 | * | ||
1396 | * Returns 0 on success, non-zero value on failure | ||
1397 | */ | ||
1398 | int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, | ||
1399 | u8 attr_set, u32 mib_val, u8 peer) | ||
1400 | { | ||
1401 | struct uic_command uic_cmd = {0}; | ||
1402 | static const char *const action[] = { | ||
1403 | "dme-set", | ||
1404 | "dme-peer-set" | ||
1405 | }; | ||
1406 | const char *set = action[!!peer]; | ||
1407 | int ret; | ||
1408 | |||
1409 | uic_cmd.command = peer ? | ||
1410 | UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET; | ||
1411 | uic_cmd.argument1 = attr_sel; | ||
1412 | uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set); | ||
1413 | uic_cmd.argument3 = mib_val; | ||
1414 | |||
1415 | ret = ufshcd_send_uic_cmd(hba, &uic_cmd); | ||
1416 | if (ret) | ||
1417 | dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n", | ||
1418 | set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret); | ||
1419 | |||
1420 | return ret; | ||
1421 | } | ||
1422 | EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr); | ||
1423 | |||
1424 | /** | ||
1425 | * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET | ||
1426 | * @hba: per adapter instance | ||
1427 | * @attr_sel: uic command argument1 | ||
1428 | * @mib_val: the value of the attribute as returned by the UIC command | ||
1429 | * @peer: indicate whether peer or local | ||
1430 | * | ||
1431 | * Returns 0 on success, non-zero value on failure | ||
1432 | */ | ||
1433 | int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, | ||
1434 | u32 *mib_val, u8 peer) | ||
1435 | { | ||
1436 | struct uic_command uic_cmd = {0}; | ||
1437 | static const char *const action[] = { | ||
1438 | "dme-get", | ||
1439 | "dme-peer-get" | ||
1440 | }; | ||
1441 | const char *get = action[!!peer]; | ||
1442 | int ret; | ||
1443 | |||
1444 | uic_cmd.command = peer ? | ||
1445 | UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET; | ||
1446 | uic_cmd.argument1 = attr_sel; | ||
1447 | |||
1448 | ret = ufshcd_send_uic_cmd(hba, &uic_cmd); | ||
1449 | if (ret) { | ||
1450 | dev_err(hba->dev, "%s: attr-id 0x%x error code %d\n", | ||
1451 | get, UIC_GET_ATTR_ID(attr_sel), ret); | ||
1452 | goto out; | ||
1453 | } | ||
1454 | |||
1455 | if (mib_val) | ||
1456 | *mib_val = uic_cmd.argument3; | ||
1457 | out: | ||
1458 | return ret; | ||
1459 | } | ||
1460 | EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr); | ||
1461 | |||
1462 | /** | ||
1377 | * ufshcd_complete_dev_init() - checks device readiness | 1463 | * ufshcd_complete_dev_init() - checks device readiness |
1378 | * hba: per-adapter instance | 1464 | * hba: per-adapter instance |
1379 | * | 1465 | * |
1380 | * Set fDeviceInit flag and poll until device toggles it. | 1466 | * Set fDeviceInit flag and poll until device toggles it. |
1381 | */ | 1467 | */ |
1382 | static int ufshcd_complete_dev_init(struct ufs_hba *hba) | 1468 | static int ufshcd_complete_dev_init(struct ufs_hba *hba) |
1383 | { | 1469 | { |
1384 | int i, retries, err = 0; | 1470 | int i, retries, err = 0; |
1385 | bool flag_res = 1; | 1471 | bool flag_res = 1; |
1386 | 1472 | ||
1387 | for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { | 1473 | for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { |
1388 | /* Set the fDeviceInit flag */ | 1474 | /* Set the fDeviceInit flag */ |
1389 | err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG, | 1475 | err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG, |
1390 | QUERY_FLAG_IDN_FDEVICEINIT, NULL); | 1476 | QUERY_FLAG_IDN_FDEVICEINIT, NULL); |
1391 | if (!err || err == -ETIMEDOUT) | 1477 | if (!err || err == -ETIMEDOUT) |
1392 | break; | 1478 | break; |
1393 | dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err); | 1479 | dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err); |
1394 | } | 1480 | } |
1395 | if (err) { | 1481 | if (err) { |
1396 | dev_err(hba->dev, | 1482 | dev_err(hba->dev, |
1397 | "%s setting fDeviceInit flag failed with error %d\n", | 1483 | "%s setting fDeviceInit flag failed with error %d\n", |
1398 | __func__, err); | 1484 | __func__, err); |
1399 | goto out; | 1485 | goto out; |
1400 | } | 1486 | } |
1401 | 1487 | ||
1402 | /* poll for max. 100 iterations for fDeviceInit flag to clear */ | 1488 | /* poll for max. 100 iterations for fDeviceInit flag to clear */ |
1403 | for (i = 0; i < 100 && !err && flag_res; i++) { | 1489 | for (i = 0; i < 100 && !err && flag_res; i++) { |
1404 | for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { | 1490 | for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { |
1405 | err = ufshcd_query_flag(hba, | 1491 | err = ufshcd_query_flag(hba, |
1406 | UPIU_QUERY_OPCODE_READ_FLAG, | 1492 | UPIU_QUERY_OPCODE_READ_FLAG, |
1407 | QUERY_FLAG_IDN_FDEVICEINIT, &flag_res); | 1493 | QUERY_FLAG_IDN_FDEVICEINIT, &flag_res); |
1408 | if (!err || err == -ETIMEDOUT) | 1494 | if (!err || err == -ETIMEDOUT) |
1409 | break; | 1495 | break; |
1410 | dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, | 1496 | dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, |
1411 | err); | 1497 | err); |
1412 | } | 1498 | } |
1413 | } | 1499 | } |
1414 | if (err) | 1500 | if (err) |
1415 | dev_err(hba->dev, | 1501 | dev_err(hba->dev, |
1416 | "%s reading fDeviceInit flag failed with error %d\n", | 1502 | "%s reading fDeviceInit flag failed with error %d\n", |
1417 | __func__, err); | 1503 | __func__, err); |
1418 | else if (flag_res) | 1504 | else if (flag_res) |
1419 | dev_err(hba->dev, | 1505 | dev_err(hba->dev, |
1420 | "%s fDeviceInit was not cleared by the device\n", | 1506 | "%s fDeviceInit was not cleared by the device\n", |
1421 | __func__); | 1507 | __func__); |
1422 | 1508 | ||
1423 | out: | 1509 | out: |
1424 | return err; | 1510 | return err; |
1425 | } | 1511 | } |
1426 | 1512 | ||
1427 | /** | 1513 | /** |
1428 | * ufshcd_make_hba_operational - Make UFS controller operational | 1514 | * ufshcd_make_hba_operational - Make UFS controller operational |
1429 | * @hba: per adapter instance | 1515 | * @hba: per adapter instance |
1430 | * | 1516 | * |
1431 | * To bring UFS host controller to operational state, | 1517 | * To bring UFS host controller to operational state, |
1432 | * 1. Check if device is present | 1518 | * 1. Check if device is present |
1433 | * 2. Enable required interrupts | 1519 | * 2. Enable required interrupts |
1434 | * 3. Configure interrupt aggregation | 1520 | * 3. Configure interrupt aggregation |
1435 | * 4. Program UTRL and UTMRL base addres | 1521 | * 4. Program UTRL and UTMRL base addres |
1436 | * 5. Configure run-stop-registers | 1522 | * 5. Configure run-stop-registers |
1437 | * | 1523 | * |
1438 | * Returns 0 on success, non-zero value on failure | 1524 | * Returns 0 on success, non-zero value on failure |
1439 | */ | 1525 | */ |
1440 | static int ufshcd_make_hba_operational(struct ufs_hba *hba) | 1526 | static int ufshcd_make_hba_operational(struct ufs_hba *hba) |
1441 | { | 1527 | { |
1442 | int err = 0; | 1528 | int err = 0; |
1443 | u32 reg; | 1529 | u32 reg; |
1444 | 1530 | ||
1445 | /* check if device present */ | 1531 | /* check if device present */ |
1446 | reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS); | 1532 | reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS); |
1447 | if (!ufshcd_is_device_present(reg)) { | 1533 | if (!ufshcd_is_device_present(reg)) { |
1448 | dev_err(hba->dev, "cc: Device not present\n"); | 1534 | dev_err(hba->dev, "cc: Device not present\n"); |
1449 | err = -ENXIO; | 1535 | err = -ENXIO; |
1450 | goto out; | 1536 | goto out; |
1451 | } | 1537 | } |
1452 | 1538 | ||
1453 | /* Enable required interrupts */ | 1539 | /* Enable required interrupts */ |
1454 | ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS); | 1540 | ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS); |
1455 | 1541 | ||
1456 | /* Configure interrupt aggregation */ | 1542 | /* Configure interrupt aggregation */ |
1457 | ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO); | 1543 | ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO); |
1458 | 1544 | ||
1459 | /* Configure UTRL and UTMRL base address registers */ | 1545 | /* Configure UTRL and UTMRL base address registers */ |
1460 | ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr), | 1546 | ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr), |
1461 | REG_UTP_TRANSFER_REQ_LIST_BASE_L); | 1547 | REG_UTP_TRANSFER_REQ_LIST_BASE_L); |
1462 | ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr), | 1548 | ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr), |
1463 | REG_UTP_TRANSFER_REQ_LIST_BASE_H); | 1549 | REG_UTP_TRANSFER_REQ_LIST_BASE_H); |
1464 | ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr), | 1550 | ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr), |
1465 | REG_UTP_TASK_REQ_LIST_BASE_L); | 1551 | REG_UTP_TASK_REQ_LIST_BASE_L); |
1466 | ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr), | 1552 | ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr), |
1467 | REG_UTP_TASK_REQ_LIST_BASE_H); | 1553 | REG_UTP_TASK_REQ_LIST_BASE_H); |
1468 | 1554 | ||
1469 | /* | 1555 | /* |
1470 | * UCRDY, UTMRLDY and UTRLRDY bits must be 1 | 1556 | * UCRDY, UTMRLDY and UTRLRDY bits must be 1 |
1471 | * DEI, HEI bits must be 0 | 1557 | * DEI, HEI bits must be 0 |
1472 | */ | 1558 | */ |
1473 | if (!(ufshcd_get_lists_status(reg))) { | 1559 | if (!(ufshcd_get_lists_status(reg))) { |
1474 | ufshcd_enable_run_stop_reg(hba); | 1560 | ufshcd_enable_run_stop_reg(hba); |
1475 | } else { | 1561 | } else { |
1476 | dev_err(hba->dev, | 1562 | dev_err(hba->dev, |
1477 | "Host controller not ready to process requests"); | 1563 | "Host controller not ready to process requests"); |
1478 | err = -EIO; | 1564 | err = -EIO; |
1479 | goto out; | 1565 | goto out; |
1480 | } | 1566 | } |
1481 | 1567 | ||
1482 | if (hba->ufshcd_state == UFSHCD_STATE_RESET) | 1568 | if (hba->ufshcd_state == UFSHCD_STATE_RESET) |
1483 | scsi_unblock_requests(hba->host); | 1569 | scsi_unblock_requests(hba->host); |
1484 | 1570 | ||
1485 | hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; | 1571 | hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; |
1486 | 1572 | ||
1487 | out: | 1573 | out: |
1488 | return err; | 1574 | return err; |
1489 | } | 1575 | } |
1490 | 1576 | ||
1491 | /** | 1577 | /** |
1492 | * ufshcd_hba_enable - initialize the controller | 1578 | * ufshcd_hba_enable - initialize the controller |
1493 | * @hba: per adapter instance | 1579 | * @hba: per adapter instance |
1494 | * | 1580 | * |
1495 | * The controller resets itself and controller firmware initialization | 1581 | * The controller resets itself and controller firmware initialization |
1496 | * sequence kicks off. When controller is ready it will set | 1582 | * sequence kicks off. When controller is ready it will set |
1497 | * the Host Controller Enable bit to 1. | 1583 | * the Host Controller Enable bit to 1. |
1498 | * | 1584 | * |
1499 | * Returns 0 on success, non-zero value on failure | 1585 | * Returns 0 on success, non-zero value on failure |
1500 | */ | 1586 | */ |
1501 | static int ufshcd_hba_enable(struct ufs_hba *hba) | 1587 | static int ufshcd_hba_enable(struct ufs_hba *hba) |
1502 | { | 1588 | { |
1503 | int retry; | 1589 | int retry; |
1504 | 1590 | ||
1505 | /* | 1591 | /* |
1506 | * msleep of 1 and 5 used in this function might result in msleep(20), | 1592 | * msleep of 1 and 5 used in this function might result in msleep(20), |
1507 | * but it was necessary to send the UFS FPGA to reset mode during | 1593 | * but it was necessary to send the UFS FPGA to reset mode during |
1508 | * development and testing of this driver. msleep can be changed to | 1594 | * development and testing of this driver. msleep can be changed to |
1509 | * mdelay and retry count can be reduced based on the controller. | 1595 | * mdelay and retry count can be reduced based on the controller. |
1510 | */ | 1596 | */ |
1511 | if (!ufshcd_is_hba_active(hba)) { | 1597 | if (!ufshcd_is_hba_active(hba)) { |
1512 | 1598 | ||
1513 | /* change controller state to "reset state" */ | 1599 | /* change controller state to "reset state" */ |
1514 | ufshcd_hba_stop(hba); | 1600 | ufshcd_hba_stop(hba); |
1515 | 1601 | ||
1516 | /* | 1602 | /* |
1517 | * This delay is based on the testing done with UFS host | 1603 | * This delay is based on the testing done with UFS host |
1518 | * controller FPGA. The delay can be changed based on the | 1604 | * controller FPGA. The delay can be changed based on the |
1519 | * host controller used. | 1605 | * host controller used. |
1520 | */ | 1606 | */ |
1521 | msleep(5); | 1607 | msleep(5); |
1522 | } | 1608 | } |
1523 | 1609 | ||
1524 | /* start controller initialization sequence */ | 1610 | /* start controller initialization sequence */ |
1525 | ufshcd_hba_start(hba); | 1611 | ufshcd_hba_start(hba); |
1526 | 1612 | ||
1527 | /* | 1613 | /* |
1528 | * To initialize a UFS host controller HCE bit must be set to 1. | 1614 | * To initialize a UFS host controller HCE bit must be set to 1. |
1529 | * During initialization the HCE bit value changes from 1->0->1. | 1615 | * During initialization the HCE bit value changes from 1->0->1. |
1530 | * When the host controller completes initialization sequence | 1616 | * When the host controller completes initialization sequence |
1531 | * it sets the value of HCE bit to 1. The same HCE bit is read back | 1617 | * it sets the value of HCE bit to 1. The same HCE bit is read back |
1532 | * to check if the controller has completed initialization sequence. | 1618 | * to check if the controller has completed initialization sequence. |
1533 | * So without this delay the value HCE = 1, set in the previous | 1619 | * So without this delay the value HCE = 1, set in the previous |
1534 | * instruction might be read back. | 1620 | * instruction might be read back. |
1535 | * This delay can be changed based on the controller. | 1621 | * This delay can be changed based on the controller. |
1536 | */ | 1622 | */ |
1537 | msleep(1); | 1623 | msleep(1); |
1538 | 1624 | ||
1539 | /* wait for the host controller to complete initialization */ | 1625 | /* wait for the host controller to complete initialization */ |
1540 | retry = 10; | 1626 | retry = 10; |
1541 | while (ufshcd_is_hba_active(hba)) { | 1627 | while (ufshcd_is_hba_active(hba)) { |
1542 | if (retry) { | 1628 | if (retry) { |
1543 | retry--; | 1629 | retry--; |
1544 | } else { | 1630 | } else { |
1545 | dev_err(hba->dev, | 1631 | dev_err(hba->dev, |
1546 | "Controller enable failed\n"); | 1632 | "Controller enable failed\n"); |
1547 | return -EIO; | 1633 | return -EIO; |
1548 | } | 1634 | } |
1549 | msleep(5); | 1635 | msleep(5); |
1550 | } | 1636 | } |
1551 | return 0; | 1637 | return 0; |
1552 | } | 1638 | } |
1553 | 1639 | ||
1554 | /** | 1640 | /** |
1555 | * ufshcd_link_startup - Initialize unipro link startup | 1641 | * ufshcd_link_startup - Initialize unipro link startup |
1556 | * @hba: per adapter instance | 1642 | * @hba: per adapter instance |
1557 | * | 1643 | * |
1558 | * Returns 0 for success, non-zero in case of failure | 1644 | * Returns 0 for success, non-zero in case of failure |
1559 | */ | 1645 | */ |
1560 | static int ufshcd_link_startup(struct ufs_hba *hba) | 1646 | static int ufshcd_link_startup(struct ufs_hba *hba) |
1561 | { | 1647 | { |
1562 | int ret; | 1648 | int ret; |
1563 | 1649 | ||
1564 | /* enable UIC related interrupts */ | 1650 | /* enable UIC related interrupts */ |
1565 | ufshcd_enable_intr(hba, UIC_COMMAND_COMPL); | 1651 | ufshcd_enable_intr(hba, UIC_COMMAND_COMPL); |
1566 | 1652 | ||
1567 | ret = ufshcd_dme_link_startup(hba); | 1653 | ret = ufshcd_dme_link_startup(hba); |
1568 | if (ret) | 1654 | if (ret) |
1569 | goto out; | 1655 | goto out; |
1570 | 1656 | ||
1571 | ret = ufshcd_make_hba_operational(hba); | 1657 | ret = ufshcd_make_hba_operational(hba); |
1572 | 1658 | ||
1573 | out: | 1659 | out: |
1574 | if (ret) | 1660 | if (ret) |
1575 | dev_err(hba->dev, "link startup failed %d\n", ret); | 1661 | dev_err(hba->dev, "link startup failed %d\n", ret); |
1576 | return ret; | 1662 | return ret; |
1577 | } | 1663 | } |
1578 | 1664 | ||
1579 | /** | 1665 | /** |
1580 | * ufshcd_verify_dev_init() - Verify device initialization | 1666 | * ufshcd_verify_dev_init() - Verify device initialization |
1581 | * @hba: per-adapter instance | 1667 | * @hba: per-adapter instance |
1582 | * | 1668 | * |
1583 | * Send NOP OUT UPIU and wait for NOP IN response to check whether the | 1669 | * Send NOP OUT UPIU and wait for NOP IN response to check whether the |
1584 | * device Transport Protocol (UTP) layer is ready after a reset. | 1670 | * device Transport Protocol (UTP) layer is ready after a reset. |
1585 | * If the UTP layer at the device side is not initialized, it may | 1671 | * If the UTP layer at the device side is not initialized, it may |
1586 | * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT | 1672 | * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT |
1587 | * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations. | 1673 | * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations. |
1588 | */ | 1674 | */ |
1589 | static int ufshcd_verify_dev_init(struct ufs_hba *hba) | 1675 | static int ufshcd_verify_dev_init(struct ufs_hba *hba) |
1590 | { | 1676 | { |
1591 | int err = 0; | 1677 | int err = 0; |
1592 | int retries; | 1678 | int retries; |
1593 | 1679 | ||
1594 | mutex_lock(&hba->dev_cmd.lock); | 1680 | mutex_lock(&hba->dev_cmd.lock); |
1595 | for (retries = NOP_OUT_RETRIES; retries > 0; retries--) { | 1681 | for (retries = NOP_OUT_RETRIES; retries > 0; retries--) { |
1596 | err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP, | 1682 | err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP, |
1597 | NOP_OUT_TIMEOUT); | 1683 | NOP_OUT_TIMEOUT); |
1598 | 1684 | ||
1599 | if (!err || err == -ETIMEDOUT) | 1685 | if (!err || err == -ETIMEDOUT) |
1600 | break; | 1686 | break; |
1601 | 1687 | ||
1602 | dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err); | 1688 | dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err); |
1603 | } | 1689 | } |
1604 | mutex_unlock(&hba->dev_cmd.lock); | 1690 | mutex_unlock(&hba->dev_cmd.lock); |
1605 | 1691 | ||
1606 | if (err) | 1692 | if (err) |
1607 | dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err); | 1693 | dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err); |
1608 | return err; | 1694 | return err; |
1609 | } | 1695 | } |
1610 | 1696 | ||
1611 | /** | 1697 | /** |
1612 | * ufshcd_do_reset - reset the host controller | 1698 | * ufshcd_do_reset - reset the host controller |
1613 | * @hba: per adapter instance | 1699 | * @hba: per adapter instance |
1614 | * | 1700 | * |
1615 | * Returns SUCCESS/FAILED | 1701 | * Returns SUCCESS/FAILED |
1616 | */ | 1702 | */ |
1617 | static int ufshcd_do_reset(struct ufs_hba *hba) | 1703 | static int ufshcd_do_reset(struct ufs_hba *hba) |
1618 | { | 1704 | { |
1619 | struct ufshcd_lrb *lrbp; | 1705 | struct ufshcd_lrb *lrbp; |
1620 | unsigned long flags; | 1706 | unsigned long flags; |
1621 | int tag; | 1707 | int tag; |
1622 | 1708 | ||
1623 | /* block commands from midlayer */ | 1709 | /* block commands from midlayer */ |
1624 | scsi_block_requests(hba->host); | 1710 | scsi_block_requests(hba->host); |
1625 | 1711 | ||
1626 | spin_lock_irqsave(hba->host->host_lock, flags); | 1712 | spin_lock_irqsave(hba->host->host_lock, flags); |
1627 | hba->ufshcd_state = UFSHCD_STATE_RESET; | 1713 | hba->ufshcd_state = UFSHCD_STATE_RESET; |
1628 | 1714 | ||
1629 | /* send controller to reset state */ | 1715 | /* send controller to reset state */ |
1630 | ufshcd_hba_stop(hba); | 1716 | ufshcd_hba_stop(hba); |
1631 | spin_unlock_irqrestore(hba->host->host_lock, flags); | 1717 | spin_unlock_irqrestore(hba->host->host_lock, flags); |
1632 | 1718 | ||
1633 | /* abort outstanding commands */ | 1719 | /* abort outstanding commands */ |
1634 | for (tag = 0; tag < hba->nutrs; tag++) { | 1720 | for (tag = 0; tag < hba->nutrs; tag++) { |
1635 | if (test_bit(tag, &hba->outstanding_reqs)) { | 1721 | if (test_bit(tag, &hba->outstanding_reqs)) { |
1636 | lrbp = &hba->lrb[tag]; | 1722 | lrbp = &hba->lrb[tag]; |
1637 | if (lrbp->cmd) { | 1723 | if (lrbp->cmd) { |
1638 | scsi_dma_unmap(lrbp->cmd); | 1724 | scsi_dma_unmap(lrbp->cmd); |
1639 | lrbp->cmd->result = DID_RESET << 16; | 1725 | lrbp->cmd->result = DID_RESET << 16; |
1640 | lrbp->cmd->scsi_done(lrbp->cmd); | 1726 | lrbp->cmd->scsi_done(lrbp->cmd); |
1641 | lrbp->cmd = NULL; | 1727 | lrbp->cmd = NULL; |
1642 | clear_bit_unlock(tag, &hba->lrb_in_use); | 1728 | clear_bit_unlock(tag, &hba->lrb_in_use); |
1643 | } | 1729 | } |
1644 | } | 1730 | } |
1645 | } | 1731 | } |
1646 | 1732 | ||
1647 | /* complete device management command */ | 1733 | /* complete device management command */ |
1648 | if (hba->dev_cmd.complete) | 1734 | if (hba->dev_cmd.complete) |
1649 | complete(hba->dev_cmd.complete); | 1735 | complete(hba->dev_cmd.complete); |
1650 | 1736 | ||
1651 | /* clear outstanding request/task bit maps */ | 1737 | /* clear outstanding request/task bit maps */ |
1652 | hba->outstanding_reqs = 0; | 1738 | hba->outstanding_reqs = 0; |
1653 | hba->outstanding_tasks = 0; | 1739 | hba->outstanding_tasks = 0; |
1654 | 1740 | ||
1655 | /* Host controller enable */ | 1741 | /* Host controller enable */ |
1656 | if (ufshcd_hba_enable(hba)) { | 1742 | if (ufshcd_hba_enable(hba)) { |
1657 | dev_err(hba->dev, | 1743 | dev_err(hba->dev, |
1658 | "Reset: Controller initialization failed\n"); | 1744 | "Reset: Controller initialization failed\n"); |
1659 | return FAILED; | 1745 | return FAILED; |
1660 | } | 1746 | } |
1661 | 1747 | ||
1662 | if (ufshcd_link_startup(hba)) { | 1748 | if (ufshcd_link_startup(hba)) { |
1663 | dev_err(hba->dev, | 1749 | dev_err(hba->dev, |
1664 | "Reset: Link start-up failed\n"); | 1750 | "Reset: Link start-up failed\n"); |
1665 | return FAILED; | 1751 | return FAILED; |
1666 | } | 1752 | } |
1667 | 1753 | ||
1668 | return SUCCESS; | 1754 | return SUCCESS; |
1669 | } | 1755 | } |
1670 | 1756 | ||
1671 | /** | 1757 | /** |
1672 | * ufshcd_slave_alloc - handle initial SCSI device configurations | 1758 | * ufshcd_slave_alloc - handle initial SCSI device configurations |
1673 | * @sdev: pointer to SCSI device | 1759 | * @sdev: pointer to SCSI device |
1674 | * | 1760 | * |
1675 | * Returns success | 1761 | * Returns success |
1676 | */ | 1762 | */ |
1677 | static int ufshcd_slave_alloc(struct scsi_device *sdev) | 1763 | static int ufshcd_slave_alloc(struct scsi_device *sdev) |
1678 | { | 1764 | { |
1679 | struct ufs_hba *hba; | 1765 | struct ufs_hba *hba; |
1680 | 1766 | ||
1681 | hba = shost_priv(sdev->host); | 1767 | hba = shost_priv(sdev->host); |
1682 | sdev->tagged_supported = 1; | 1768 | sdev->tagged_supported = 1; |
1683 | 1769 | ||
1684 | /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */ | 1770 | /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */ |
1685 | sdev->use_10_for_ms = 1; | 1771 | sdev->use_10_for_ms = 1; |
1686 | scsi_set_tag_type(sdev, MSG_SIMPLE_TAG); | 1772 | scsi_set_tag_type(sdev, MSG_SIMPLE_TAG); |
1687 | 1773 | ||
1688 | /* | 1774 | /* |
1689 | * Inform SCSI Midlayer that the LUN queue depth is same as the | 1775 | * Inform SCSI Midlayer that the LUN queue depth is same as the |
1690 | * controller queue depth. If a LUN queue depth is less than the | 1776 | * controller queue depth. If a LUN queue depth is less than the |
1691 | * controller queue depth and if the LUN reports | 1777 | * controller queue depth and if the LUN reports |
1692 | * SAM_STAT_TASK_SET_FULL, the LUN queue depth will be adjusted | 1778 | * SAM_STAT_TASK_SET_FULL, the LUN queue depth will be adjusted |
1693 | * with scsi_adjust_queue_depth. | 1779 | * with scsi_adjust_queue_depth. |
1694 | */ | 1780 | */ |
1695 | scsi_activate_tcq(sdev, hba->nutrs); | 1781 | scsi_activate_tcq(sdev, hba->nutrs); |
1696 | return 0; | 1782 | return 0; |
1697 | } | 1783 | } |
1698 | 1784 | ||
1699 | /** | 1785 | /** |
1700 | * ufshcd_slave_destroy - remove SCSI device configurations | 1786 | * ufshcd_slave_destroy - remove SCSI device configurations |
1701 | * @sdev: pointer to SCSI device | 1787 | * @sdev: pointer to SCSI device |
1702 | */ | 1788 | */ |
1703 | static void ufshcd_slave_destroy(struct scsi_device *sdev) | 1789 | static void ufshcd_slave_destroy(struct scsi_device *sdev) |
1704 | { | 1790 | { |
1705 | struct ufs_hba *hba; | 1791 | struct ufs_hba *hba; |
1706 | 1792 | ||
1707 | hba = shost_priv(sdev->host); | 1793 | hba = shost_priv(sdev->host); |
1708 | scsi_deactivate_tcq(sdev, hba->nutrs); | 1794 | scsi_deactivate_tcq(sdev, hba->nutrs); |
1709 | } | 1795 | } |
1710 | 1796 | ||
1711 | /** | 1797 | /** |
1712 | * ufshcd_task_req_compl - handle task management request completion | 1798 | * ufshcd_task_req_compl - handle task management request completion |
1713 | * @hba: per adapter instance | 1799 | * @hba: per adapter instance |
1714 | * @index: index of the completed request | 1800 | * @index: index of the completed request |
1715 | * | 1801 | * |
1716 | * Returns SUCCESS/FAILED | 1802 | * Returns SUCCESS/FAILED |
1717 | */ | 1803 | */ |
1718 | static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index) | 1804 | static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index) |
1719 | { | 1805 | { |
1720 | struct utp_task_req_desc *task_req_descp; | 1806 | struct utp_task_req_desc *task_req_descp; |
1721 | struct utp_upiu_task_rsp *task_rsp_upiup; | 1807 | struct utp_upiu_task_rsp *task_rsp_upiup; |
1722 | unsigned long flags; | 1808 | unsigned long flags; |
1723 | int ocs_value; | 1809 | int ocs_value; |
1724 | int task_result; | 1810 | int task_result; |
1725 | 1811 | ||
1726 | spin_lock_irqsave(hba->host->host_lock, flags); | 1812 | spin_lock_irqsave(hba->host->host_lock, flags); |
1727 | 1813 | ||
1728 | /* Clear completed tasks from outstanding_tasks */ | 1814 | /* Clear completed tasks from outstanding_tasks */ |
1729 | __clear_bit(index, &hba->outstanding_tasks); | 1815 | __clear_bit(index, &hba->outstanding_tasks); |
1730 | 1816 | ||
1731 | task_req_descp = hba->utmrdl_base_addr; | 1817 | task_req_descp = hba->utmrdl_base_addr; |
1732 | ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]); | 1818 | ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]); |
1733 | 1819 | ||
1734 | if (ocs_value == OCS_SUCCESS) { | 1820 | if (ocs_value == OCS_SUCCESS) { |
1735 | task_rsp_upiup = (struct utp_upiu_task_rsp *) | 1821 | task_rsp_upiup = (struct utp_upiu_task_rsp *) |
1736 | task_req_descp[index].task_rsp_upiu; | 1822 | task_req_descp[index].task_rsp_upiu; |
1737 | task_result = be32_to_cpu(task_rsp_upiup->header.dword_1); | 1823 | task_result = be32_to_cpu(task_rsp_upiup->header.dword_1); |
1738 | task_result = ((task_result & MASK_TASK_RESPONSE) >> 8); | 1824 | task_result = ((task_result & MASK_TASK_RESPONSE) >> 8); |
1739 | 1825 | ||
1740 | if (task_result != UPIU_TASK_MANAGEMENT_FUNC_COMPL && | 1826 | if (task_result != UPIU_TASK_MANAGEMENT_FUNC_COMPL && |
1741 | task_result != UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) | 1827 | task_result != UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) |
1742 | task_result = FAILED; | 1828 | task_result = FAILED; |
1743 | else | 1829 | else |
1744 | task_result = SUCCESS; | 1830 | task_result = SUCCESS; |
1745 | } else { | 1831 | } else { |
1746 | task_result = FAILED; | 1832 | task_result = FAILED; |
1747 | dev_err(hba->dev, | 1833 | dev_err(hba->dev, |
1748 | "trc: Invalid ocs = %x\n", ocs_value); | 1834 | "trc: Invalid ocs = %x\n", ocs_value); |
1749 | } | 1835 | } |
1750 | spin_unlock_irqrestore(hba->host->host_lock, flags); | 1836 | spin_unlock_irqrestore(hba->host->host_lock, flags); |
1751 | return task_result; | 1837 | return task_result; |
1752 | } | 1838 | } |
1753 | 1839 | ||
1754 | /** | 1840 | /** |
1755 | * ufshcd_adjust_lun_qdepth - Update LUN queue depth if device responds with | 1841 | * ufshcd_adjust_lun_qdepth - Update LUN queue depth if device responds with |
1756 | * SAM_STAT_TASK_SET_FULL SCSI command status. | 1842 | * SAM_STAT_TASK_SET_FULL SCSI command status. |
1757 | * @cmd: pointer to SCSI command | 1843 | * @cmd: pointer to SCSI command |
1758 | */ | 1844 | */ |
1759 | static void ufshcd_adjust_lun_qdepth(struct scsi_cmnd *cmd) | 1845 | static void ufshcd_adjust_lun_qdepth(struct scsi_cmnd *cmd) |
1760 | { | 1846 | { |
1761 | struct ufs_hba *hba; | 1847 | struct ufs_hba *hba; |
1762 | int i; | 1848 | int i; |
1763 | int lun_qdepth = 0; | 1849 | int lun_qdepth = 0; |
1764 | 1850 | ||
1765 | hba = shost_priv(cmd->device->host); | 1851 | hba = shost_priv(cmd->device->host); |
1766 | 1852 | ||
1767 | /* | 1853 | /* |
1768 | * LUN queue depth can be obtained by counting outstanding commands | 1854 | * LUN queue depth can be obtained by counting outstanding commands |
1769 | * on the LUN. | 1855 | * on the LUN. |
1770 | */ | 1856 | */ |
1771 | for (i = 0; i < hba->nutrs; i++) { | 1857 | for (i = 0; i < hba->nutrs; i++) { |
1772 | if (test_bit(i, &hba->outstanding_reqs)) { | 1858 | if (test_bit(i, &hba->outstanding_reqs)) { |
1773 | 1859 | ||
1774 | /* | 1860 | /* |
1775 | * Check if the outstanding command belongs | 1861 | * Check if the outstanding command belongs |
1776 | * to the LUN which reported SAM_STAT_TASK_SET_FULL. | 1862 | * to the LUN which reported SAM_STAT_TASK_SET_FULL. |
1777 | */ | 1863 | */ |
1778 | if (cmd->device->lun == hba->lrb[i].lun) | 1864 | if (cmd->device->lun == hba->lrb[i].lun) |
1779 | lun_qdepth++; | 1865 | lun_qdepth++; |
1780 | } | 1866 | } |
1781 | } | 1867 | } |
1782 | 1868 | ||
1783 | /* | 1869 | /* |
1784 | * LUN queue depth will be total outstanding commands, except the | 1870 | * LUN queue depth will be total outstanding commands, except the |
1785 | * command for which the LUN reported SAM_STAT_TASK_SET_FULL. | 1871 | * command for which the LUN reported SAM_STAT_TASK_SET_FULL. |
1786 | */ | 1872 | */ |
1787 | scsi_adjust_queue_depth(cmd->device, MSG_SIMPLE_TAG, lun_qdepth - 1); | 1873 | scsi_adjust_queue_depth(cmd->device, MSG_SIMPLE_TAG, lun_qdepth - 1); |
1788 | } | 1874 | } |
1789 | 1875 | ||
1790 | /** | 1876 | /** |
1791 | * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status | 1877 | * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status |
1792 | * @lrb: pointer to local reference block of completed command | 1878 | * @lrb: pointer to local reference block of completed command |
1793 | * @scsi_status: SCSI command status | 1879 | * @scsi_status: SCSI command status |
1794 | * | 1880 | * |
1795 | * Returns value base on SCSI command status | 1881 | * Returns value base on SCSI command status |
1796 | */ | 1882 | */ |
1797 | static inline int | 1883 | static inline int |
1798 | ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status) | 1884 | ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status) |
1799 | { | 1885 | { |
1800 | int result = 0; | 1886 | int result = 0; |
1801 | 1887 | ||
1802 | switch (scsi_status) { | 1888 | switch (scsi_status) { |
1803 | case SAM_STAT_CHECK_CONDITION: | 1889 | case SAM_STAT_CHECK_CONDITION: |
1804 | ufshcd_copy_sense_data(lrbp); | 1890 | ufshcd_copy_sense_data(lrbp); |
1805 | case SAM_STAT_GOOD: | 1891 | case SAM_STAT_GOOD: |
1806 | result |= DID_OK << 16 | | 1892 | result |= DID_OK << 16 | |
1807 | COMMAND_COMPLETE << 8 | | 1893 | COMMAND_COMPLETE << 8 | |
1808 | scsi_status; | 1894 | scsi_status; |
1809 | break; | 1895 | break; |
1810 | case SAM_STAT_TASK_SET_FULL: | 1896 | case SAM_STAT_TASK_SET_FULL: |
1811 | /* | 1897 | /* |
1812 | * If a LUN reports SAM_STAT_TASK_SET_FULL, then the LUN queue | 1898 | * If a LUN reports SAM_STAT_TASK_SET_FULL, then the LUN queue |
1813 | * depth needs to be adjusted to the exact number of | 1899 | * depth needs to be adjusted to the exact number of |
1814 | * outstanding commands the LUN can handle at any given time. | 1900 | * outstanding commands the LUN can handle at any given time. |
1815 | */ | 1901 | */ |
1816 | ufshcd_adjust_lun_qdepth(lrbp->cmd); | 1902 | ufshcd_adjust_lun_qdepth(lrbp->cmd); |
1817 | case SAM_STAT_BUSY: | 1903 | case SAM_STAT_BUSY: |
1818 | case SAM_STAT_TASK_ABORTED: | 1904 | case SAM_STAT_TASK_ABORTED: |
1819 | ufshcd_copy_sense_data(lrbp); | 1905 | ufshcd_copy_sense_data(lrbp); |
1820 | result |= scsi_status; | 1906 | result |= scsi_status; |
1821 | break; | 1907 | break; |
1822 | default: | 1908 | default: |
1823 | result |= DID_ERROR << 16; | 1909 | result |= DID_ERROR << 16; |
1824 | break; | 1910 | break; |
1825 | } /* end of switch */ | 1911 | } /* end of switch */ |
1826 | 1912 | ||
1827 | return result; | 1913 | return result; |
1828 | } | 1914 | } |
1829 | 1915 | ||
1830 | /** | 1916 | /** |
1831 | * ufshcd_transfer_rsp_status - Get overall status of the response | 1917 | * ufshcd_transfer_rsp_status - Get overall status of the response |
1832 | * @hba: per adapter instance | 1918 | * @hba: per adapter instance |
1833 | * @lrb: pointer to local reference block of completed command | 1919 | * @lrb: pointer to local reference block of completed command |
1834 | * | 1920 | * |
1835 | * Returns result of the command to notify SCSI midlayer | 1921 | * Returns result of the command to notify SCSI midlayer |
1836 | */ | 1922 | */ |
1837 | static inline int | 1923 | static inline int |
1838 | ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) | 1924 | ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) |
1839 | { | 1925 | { |
1840 | int result = 0; | 1926 | int result = 0; |
1841 | int scsi_status; | 1927 | int scsi_status; |
1842 | int ocs; | 1928 | int ocs; |
1843 | 1929 | ||
1844 | /* overall command status of utrd */ | 1930 | /* overall command status of utrd */ |
1845 | ocs = ufshcd_get_tr_ocs(lrbp); | 1931 | ocs = ufshcd_get_tr_ocs(lrbp); |
1846 | 1932 | ||
1847 | switch (ocs) { | 1933 | switch (ocs) { |
1848 | case OCS_SUCCESS: | 1934 | case OCS_SUCCESS: |
1849 | result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr); | 1935 | result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr); |
1850 | 1936 | ||
1851 | switch (result) { | 1937 | switch (result) { |
1852 | case UPIU_TRANSACTION_RESPONSE: | 1938 | case UPIU_TRANSACTION_RESPONSE: |
1853 | /* | 1939 | /* |
1854 | * get the response UPIU result to extract | 1940 | * get the response UPIU result to extract |
1855 | * the SCSI command status | 1941 | * the SCSI command status |
1856 | */ | 1942 | */ |
1857 | result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr); | 1943 | result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr); |
1858 | 1944 | ||
1859 | /* | 1945 | /* |
1860 | * get the result based on SCSI status response | 1946 | * get the result based on SCSI status response |
1861 | * to notify the SCSI midlayer of the command status | 1947 | * to notify the SCSI midlayer of the command status |
1862 | */ | 1948 | */ |
1863 | scsi_status = result & MASK_SCSI_STATUS; | 1949 | scsi_status = result & MASK_SCSI_STATUS; |
1864 | result = ufshcd_scsi_cmd_status(lrbp, scsi_status); | 1950 | result = ufshcd_scsi_cmd_status(lrbp, scsi_status); |
1865 | 1951 | ||
1866 | if (ufshcd_is_exception_event(lrbp->ucd_rsp_ptr)) | 1952 | if (ufshcd_is_exception_event(lrbp->ucd_rsp_ptr)) |
1867 | schedule_work(&hba->eeh_work); | 1953 | schedule_work(&hba->eeh_work); |
1868 | break; | 1954 | break; |
1869 | case UPIU_TRANSACTION_REJECT_UPIU: | 1955 | case UPIU_TRANSACTION_REJECT_UPIU: |
1870 | /* TODO: handle Reject UPIU Response */ | 1956 | /* TODO: handle Reject UPIU Response */ |
1871 | result = DID_ERROR << 16; | 1957 | result = DID_ERROR << 16; |
1872 | dev_err(hba->dev, | 1958 | dev_err(hba->dev, |
1873 | "Reject UPIU not fully implemented\n"); | 1959 | "Reject UPIU not fully implemented\n"); |
1874 | break; | 1960 | break; |
1875 | default: | 1961 | default: |
1876 | result = DID_ERROR << 16; | 1962 | result = DID_ERROR << 16; |
1877 | dev_err(hba->dev, | 1963 | dev_err(hba->dev, |
1878 | "Unexpected request response code = %x\n", | 1964 | "Unexpected request response code = %x\n", |
1879 | result); | 1965 | result); |
1880 | break; | 1966 | break; |
1881 | } | 1967 | } |
1882 | break; | 1968 | break; |
1883 | case OCS_ABORTED: | 1969 | case OCS_ABORTED: |
1884 | result |= DID_ABORT << 16; | 1970 | result |= DID_ABORT << 16; |
1885 | break; | 1971 | break; |
1886 | case OCS_INVALID_CMD_TABLE_ATTR: | 1972 | case OCS_INVALID_CMD_TABLE_ATTR: |
1887 | case OCS_INVALID_PRDT_ATTR: | 1973 | case OCS_INVALID_PRDT_ATTR: |
1888 | case OCS_MISMATCH_DATA_BUF_SIZE: | 1974 | case OCS_MISMATCH_DATA_BUF_SIZE: |
1889 | case OCS_MISMATCH_RESP_UPIU_SIZE: | 1975 | case OCS_MISMATCH_RESP_UPIU_SIZE: |
1890 | case OCS_PEER_COMM_FAILURE: | 1976 | case OCS_PEER_COMM_FAILURE: |
1891 | case OCS_FATAL_ERROR: | 1977 | case OCS_FATAL_ERROR: |
1892 | default: | 1978 | default: |
1893 | result |= DID_ERROR << 16; | 1979 | result |= DID_ERROR << 16; |
1894 | dev_err(hba->dev, | 1980 | dev_err(hba->dev, |
1895 | "OCS error from controller = %x\n", ocs); | 1981 | "OCS error from controller = %x\n", ocs); |
1896 | break; | 1982 | break; |
1897 | } /* end of switch */ | 1983 | } /* end of switch */ |
1898 | 1984 | ||
1899 | return result; | 1985 | return result; |
1900 | } | 1986 | } |
1901 | 1987 | ||
1902 | /** | 1988 | /** |
1903 | * ufshcd_uic_cmd_compl - handle completion of uic command | 1989 | * ufshcd_uic_cmd_compl - handle completion of uic command |
1904 | * @hba: per adapter instance | 1990 | * @hba: per adapter instance |
1905 | */ | 1991 | */ |
1906 | static void ufshcd_uic_cmd_compl(struct ufs_hba *hba) | 1992 | static void ufshcd_uic_cmd_compl(struct ufs_hba *hba) |
1907 | { | 1993 | { |
1908 | if (hba->active_uic_cmd) { | 1994 | if (hba->active_uic_cmd) { |
1909 | hba->active_uic_cmd->argument2 |= | 1995 | hba->active_uic_cmd->argument2 |= |
1910 | ufshcd_get_uic_cmd_result(hba); | 1996 | ufshcd_get_uic_cmd_result(hba); |
1997 | hba->active_uic_cmd->argument3 = | ||
1998 | ufshcd_get_dme_attr_val(hba); | ||
1911 | complete(&hba->active_uic_cmd->done); | 1999 | complete(&hba->active_uic_cmd->done); |
1912 | } | 2000 | } |
1913 | } | 2001 | } |
1914 | 2002 | ||
1915 | /** | 2003 | /** |
1916 | * ufshcd_transfer_req_compl - handle SCSI and query command completion | 2004 | * ufshcd_transfer_req_compl - handle SCSI and query command completion |
1917 | * @hba: per adapter instance | 2005 | * @hba: per adapter instance |
1918 | */ | 2006 | */ |
1919 | static void ufshcd_transfer_req_compl(struct ufs_hba *hba) | 2007 | static void ufshcd_transfer_req_compl(struct ufs_hba *hba) |
1920 | { | 2008 | { |
1921 | struct ufshcd_lrb *lrbp; | 2009 | struct ufshcd_lrb *lrbp; |
1922 | struct scsi_cmnd *cmd; | 2010 | struct scsi_cmnd *cmd; |
1923 | unsigned long completed_reqs; | 2011 | unsigned long completed_reqs; |
1924 | u32 tr_doorbell; | 2012 | u32 tr_doorbell; |
1925 | int result; | 2013 | int result; |
1926 | int index; | 2014 | int index; |
1927 | bool int_aggr_reset = false; | 2015 | bool int_aggr_reset = false; |
1928 | 2016 | ||
1929 | tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); | 2017 | tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); |
1930 | completed_reqs = tr_doorbell ^ hba->outstanding_reqs; | 2018 | completed_reqs = tr_doorbell ^ hba->outstanding_reqs; |
1931 | 2019 | ||
1932 | for (index = 0; index < hba->nutrs; index++) { | 2020 | for (index = 0; index < hba->nutrs; index++) { |
1933 | if (test_bit(index, &completed_reqs)) { | 2021 | if (test_bit(index, &completed_reqs)) { |
1934 | lrbp = &hba->lrb[index]; | 2022 | lrbp = &hba->lrb[index]; |
1935 | cmd = lrbp->cmd; | 2023 | cmd = lrbp->cmd; |
1936 | /* | 2024 | /* |
1937 | * Don't skip resetting interrupt aggregation counters | 2025 | * Don't skip resetting interrupt aggregation counters |
1938 | * if a regular command is present. | 2026 | * if a regular command is present. |
1939 | */ | 2027 | */ |
1940 | int_aggr_reset |= !lrbp->intr_cmd; | 2028 | int_aggr_reset |= !lrbp->intr_cmd; |
1941 | 2029 | ||
1942 | if (cmd) { | 2030 | if (cmd) { |
1943 | result = ufshcd_transfer_rsp_status(hba, lrbp); | 2031 | result = ufshcd_transfer_rsp_status(hba, lrbp); |
1944 | scsi_dma_unmap(cmd); | 2032 | scsi_dma_unmap(cmd); |
1945 | cmd->result = result; | 2033 | cmd->result = result; |
1946 | /* Mark completed command as NULL in LRB */ | 2034 | /* Mark completed command as NULL in LRB */ |
1947 | lrbp->cmd = NULL; | 2035 | lrbp->cmd = NULL; |
1948 | clear_bit_unlock(index, &hba->lrb_in_use); | 2036 | clear_bit_unlock(index, &hba->lrb_in_use); |
1949 | /* Do not touch lrbp after scsi done */ | 2037 | /* Do not touch lrbp after scsi done */ |
1950 | cmd->scsi_done(cmd); | 2038 | cmd->scsi_done(cmd); |
1951 | } else if (lrbp->command_type == | 2039 | } else if (lrbp->command_type == |
1952 | UTP_CMD_TYPE_DEV_MANAGE) { | 2040 | UTP_CMD_TYPE_DEV_MANAGE) { |
1953 | if (hba->dev_cmd.complete) | 2041 | if (hba->dev_cmd.complete) |
1954 | complete(hba->dev_cmd.complete); | 2042 | complete(hba->dev_cmd.complete); |
1955 | } | 2043 | } |
1956 | } /* end of if */ | 2044 | } /* end of if */ |
1957 | } /* end of for */ | 2045 | } /* end of for */ |
1958 | 2046 | ||
1959 | /* clear corresponding bits of completed commands */ | 2047 | /* clear corresponding bits of completed commands */ |
1960 | hba->outstanding_reqs ^= completed_reqs; | 2048 | hba->outstanding_reqs ^= completed_reqs; |
1961 | 2049 | ||
1962 | /* we might have free'd some tags above */ | 2050 | /* we might have free'd some tags above */ |
1963 | wake_up(&hba->dev_cmd.tag_wq); | 2051 | wake_up(&hba->dev_cmd.tag_wq); |
1964 | 2052 | ||
1965 | /* Reset interrupt aggregation counters */ | 2053 | /* Reset interrupt aggregation counters */ |
1966 | if (int_aggr_reset) | 2054 | if (int_aggr_reset) |
1967 | ufshcd_reset_intr_aggr(hba); | 2055 | ufshcd_reset_intr_aggr(hba); |
1968 | } | 2056 | } |
1969 | 2057 | ||
1970 | /** | 2058 | /** |
1971 | * ufshcd_disable_ee - disable exception event | 2059 | * ufshcd_disable_ee - disable exception event |
1972 | * @hba: per-adapter instance | 2060 | * @hba: per-adapter instance |
1973 | * @mask: exception event to disable | 2061 | * @mask: exception event to disable |
1974 | * | 2062 | * |
1975 | * Disables exception event in the device so that the EVENT_ALERT | 2063 | * Disables exception event in the device so that the EVENT_ALERT |
1976 | * bit is not set. | 2064 | * bit is not set. |
1977 | * | 2065 | * |
1978 | * Returns zero on success, non-zero error value on failure. | 2066 | * Returns zero on success, non-zero error value on failure. |
1979 | */ | 2067 | */ |
1980 | static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask) | 2068 | static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask) |
1981 | { | 2069 | { |
1982 | int err = 0; | 2070 | int err = 0; |
1983 | u32 val; | 2071 | u32 val; |
1984 | 2072 | ||
1985 | if (!(hba->ee_ctrl_mask & mask)) | 2073 | if (!(hba->ee_ctrl_mask & mask)) |
1986 | goto out; | 2074 | goto out; |
1987 | 2075 | ||
1988 | val = hba->ee_ctrl_mask & ~mask; | 2076 | val = hba->ee_ctrl_mask & ~mask; |
1989 | val &= 0xFFFF; /* 2 bytes */ | 2077 | val &= 0xFFFF; /* 2 bytes */ |
1990 | err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, | 2078 | err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, |
1991 | QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val); | 2079 | QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val); |
1992 | if (!err) | 2080 | if (!err) |
1993 | hba->ee_ctrl_mask &= ~mask; | 2081 | hba->ee_ctrl_mask &= ~mask; |
1994 | out: | 2082 | out: |
1995 | return err; | 2083 | return err; |
1996 | } | 2084 | } |
1997 | 2085 | ||
1998 | /** | 2086 | /** |
1999 | * ufshcd_enable_ee - enable exception event | 2087 | * ufshcd_enable_ee - enable exception event |
2000 | * @hba: per-adapter instance | 2088 | * @hba: per-adapter instance |
2001 | * @mask: exception event to enable | 2089 | * @mask: exception event to enable |
2002 | * | 2090 | * |
2003 | * Enable corresponding exception event in the device to allow | 2091 | * Enable corresponding exception event in the device to allow |
2004 | * device to alert host in critical scenarios. | 2092 | * device to alert host in critical scenarios. |
2005 | * | 2093 | * |
2006 | * Returns zero on success, non-zero error value on failure. | 2094 | * Returns zero on success, non-zero error value on failure. |
2007 | */ | 2095 | */ |
2008 | static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask) | 2096 | static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask) |
2009 | { | 2097 | { |
2010 | int err = 0; | 2098 | int err = 0; |
2011 | u32 val; | 2099 | u32 val; |
2012 | 2100 | ||
2013 | if (hba->ee_ctrl_mask & mask) | 2101 | if (hba->ee_ctrl_mask & mask) |
2014 | goto out; | 2102 | goto out; |
2015 | 2103 | ||
2016 | val = hba->ee_ctrl_mask | mask; | 2104 | val = hba->ee_ctrl_mask | mask; |
2017 | val &= 0xFFFF; /* 2 bytes */ | 2105 | val &= 0xFFFF; /* 2 bytes */ |
2018 | err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, | 2106 | err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, |
2019 | QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val); | 2107 | QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val); |
2020 | if (!err) | 2108 | if (!err) |
2021 | hba->ee_ctrl_mask |= mask; | 2109 | hba->ee_ctrl_mask |= mask; |
2022 | out: | 2110 | out: |
2023 | return err; | 2111 | return err; |
2024 | } | 2112 | } |
2025 | 2113 | ||
2026 | /** | 2114 | /** |
2027 | * ufshcd_enable_auto_bkops - Allow device managed BKOPS | 2115 | * ufshcd_enable_auto_bkops - Allow device managed BKOPS |
2028 | * @hba: per-adapter instance | 2116 | * @hba: per-adapter instance |
2029 | * | 2117 | * |
2030 | * Allow device to manage background operations on its own. Enabling | 2118 | * Allow device to manage background operations on its own. Enabling |
2031 | * this might lead to inconsistent latencies during normal data transfers | 2119 | * this might lead to inconsistent latencies during normal data transfers |
2032 | * as the device is allowed to manage its own way of handling background | 2120 | * as the device is allowed to manage its own way of handling background |
2033 | * operations. | 2121 | * operations. |
2034 | * | 2122 | * |
2035 | * Returns zero on success, non-zero on failure. | 2123 | * Returns zero on success, non-zero on failure. |
2036 | */ | 2124 | */ |
2037 | static int ufshcd_enable_auto_bkops(struct ufs_hba *hba) | 2125 | static int ufshcd_enable_auto_bkops(struct ufs_hba *hba) |
2038 | { | 2126 | { |
2039 | int err = 0; | 2127 | int err = 0; |
2040 | 2128 | ||
2041 | if (hba->auto_bkops_enabled) | 2129 | if (hba->auto_bkops_enabled) |
2042 | goto out; | 2130 | goto out; |
2043 | 2131 | ||
2044 | err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG, | 2132 | err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG, |
2045 | QUERY_FLAG_IDN_BKOPS_EN, NULL); | 2133 | QUERY_FLAG_IDN_BKOPS_EN, NULL); |
2046 | if (err) { | 2134 | if (err) { |
2047 | dev_err(hba->dev, "%s: failed to enable bkops %d\n", | 2135 | dev_err(hba->dev, "%s: failed to enable bkops %d\n", |
2048 | __func__, err); | 2136 | __func__, err); |
2049 | goto out; | 2137 | goto out; |
2050 | } | 2138 | } |
2051 | 2139 | ||
2052 | hba->auto_bkops_enabled = true; | 2140 | hba->auto_bkops_enabled = true; |
2053 | 2141 | ||
2054 | /* No need of URGENT_BKOPS exception from the device */ | 2142 | /* No need of URGENT_BKOPS exception from the device */ |
2055 | err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS); | 2143 | err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS); |
2056 | if (err) | 2144 | if (err) |
2057 | dev_err(hba->dev, "%s: failed to disable exception event %d\n", | 2145 | dev_err(hba->dev, "%s: failed to disable exception event %d\n", |
2058 | __func__, err); | 2146 | __func__, err); |
2059 | out: | 2147 | out: |
2060 | return err; | 2148 | return err; |
2061 | } | 2149 | } |
2062 | 2150 | ||
2063 | /** | 2151 | /** |
2064 | * ufshcd_disable_auto_bkops - block device in doing background operations | 2152 | * ufshcd_disable_auto_bkops - block device in doing background operations |
2065 | * @hba: per-adapter instance | 2153 | * @hba: per-adapter instance |
2066 | * | 2154 | * |
2067 | * Disabling background operations improves command response latency but | 2155 | * Disabling background operations improves command response latency but |
2068 | * has drawback of device moving into critical state where the device is | 2156 | * has drawback of device moving into critical state where the device is |
2069 | * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the | 2157 | * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the |
2070 | * host is idle so that BKOPS are managed effectively without any negative | 2158 | * host is idle so that BKOPS are managed effectively without any negative |
2071 | * impacts. | 2159 | * impacts. |
2072 | * | 2160 | * |
2073 | * Returns zero on success, non-zero on failure. | 2161 | * Returns zero on success, non-zero on failure. |
2074 | */ | 2162 | */ |
2075 | static int ufshcd_disable_auto_bkops(struct ufs_hba *hba) | 2163 | static int ufshcd_disable_auto_bkops(struct ufs_hba *hba) |
2076 | { | 2164 | { |
2077 | int err = 0; | 2165 | int err = 0; |
2078 | 2166 | ||
2079 | if (!hba->auto_bkops_enabled) | 2167 | if (!hba->auto_bkops_enabled) |
2080 | goto out; | 2168 | goto out; |
2081 | 2169 | ||
2082 | /* | 2170 | /* |
2083 | * If host assisted BKOPs is to be enabled, make sure | 2171 | * If host assisted BKOPs is to be enabled, make sure |
2084 | * urgent bkops exception is allowed. | 2172 | * urgent bkops exception is allowed. |
2085 | */ | 2173 | */ |
2086 | err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS); | 2174 | err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS); |
2087 | if (err) { | 2175 | if (err) { |
2088 | dev_err(hba->dev, "%s: failed to enable exception event %d\n", | 2176 | dev_err(hba->dev, "%s: failed to enable exception event %d\n", |
2089 | __func__, err); | 2177 | __func__, err); |
2090 | goto out; | 2178 | goto out; |
2091 | } | 2179 | } |
2092 | 2180 | ||
2093 | err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG, | 2181 | err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG, |
2094 | QUERY_FLAG_IDN_BKOPS_EN, NULL); | 2182 | QUERY_FLAG_IDN_BKOPS_EN, NULL); |
2095 | if (err) { | 2183 | if (err) { |
2096 | dev_err(hba->dev, "%s: failed to disable bkops %d\n", | 2184 | dev_err(hba->dev, "%s: failed to disable bkops %d\n", |
2097 | __func__, err); | 2185 | __func__, err); |
2098 | ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS); | 2186 | ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS); |
2099 | goto out; | 2187 | goto out; |
2100 | } | 2188 | } |
2101 | 2189 | ||
2102 | hba->auto_bkops_enabled = false; | 2190 | hba->auto_bkops_enabled = false; |
2103 | out: | 2191 | out: |
2104 | return err; | 2192 | return err; |
2105 | } | 2193 | } |
2106 | 2194 | ||
2107 | /** | 2195 | /** |
2108 | * ufshcd_force_reset_auto_bkops - force enable of auto bkops | 2196 | * ufshcd_force_reset_auto_bkops - force enable of auto bkops |
2109 | * @hba: per adapter instance | 2197 | * @hba: per adapter instance |
2110 | * | 2198 | * |
2111 | * After a device reset the device may toggle the BKOPS_EN flag | 2199 | * After a device reset the device may toggle the BKOPS_EN flag |
2112 | * to default value. The s/w tracking variables should be updated | 2200 | * to default value. The s/w tracking variables should be updated |
2113 | * as well. Do this by forcing enable of auto bkops. | 2201 | * as well. Do this by forcing enable of auto bkops. |
2114 | */ | 2202 | */ |
2115 | static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba) | 2203 | static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba) |
2116 | { | 2204 | { |
2117 | hba->auto_bkops_enabled = false; | 2205 | hba->auto_bkops_enabled = false; |
2118 | hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS; | 2206 | hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS; |
2119 | ufshcd_enable_auto_bkops(hba); | 2207 | ufshcd_enable_auto_bkops(hba); |
2120 | } | 2208 | } |
2121 | 2209 | ||
2122 | static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status) | 2210 | static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status) |
2123 | { | 2211 | { |
2124 | return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, | 2212 | return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, |
2125 | QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status); | 2213 | QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status); |
2126 | } | 2214 | } |
2127 | 2215 | ||
2128 | /** | 2216 | /** |
2129 | * ufshcd_urgent_bkops - handle urgent bkops exception event | 2217 | * ufshcd_urgent_bkops - handle urgent bkops exception event |
2130 | * @hba: per-adapter instance | 2218 | * @hba: per-adapter instance |
2131 | * | 2219 | * |
2132 | * Enable fBackgroundOpsEn flag in the device to permit background | 2220 | * Enable fBackgroundOpsEn flag in the device to permit background |
2133 | * operations. | 2221 | * operations. |
2134 | */ | 2222 | */ |
2135 | static int ufshcd_urgent_bkops(struct ufs_hba *hba) | 2223 | static int ufshcd_urgent_bkops(struct ufs_hba *hba) |
2136 | { | 2224 | { |
2137 | int err; | 2225 | int err; |
2138 | u32 status = 0; | 2226 | u32 status = 0; |
2139 | 2227 | ||
2140 | err = ufshcd_get_bkops_status(hba, &status); | 2228 | err = ufshcd_get_bkops_status(hba, &status); |
2141 | if (err) { | 2229 | if (err) { |
2142 | dev_err(hba->dev, "%s: failed to get BKOPS status %d\n", | 2230 | dev_err(hba->dev, "%s: failed to get BKOPS status %d\n", |
2143 | __func__, err); | 2231 | __func__, err); |
2144 | goto out; | 2232 | goto out; |
2145 | } | 2233 | } |
2146 | 2234 | ||
2147 | status = status & 0xF; | 2235 | status = status & 0xF; |
2148 | 2236 | ||
2149 | /* handle only if status indicates performance impact or critical */ | 2237 | /* handle only if status indicates performance impact or critical */ |
2150 | if (status >= BKOPS_STATUS_PERF_IMPACT) | 2238 | if (status >= BKOPS_STATUS_PERF_IMPACT) |
2151 | err = ufshcd_enable_auto_bkops(hba); | 2239 | err = ufshcd_enable_auto_bkops(hba); |
2152 | out: | 2240 | out: |
2153 | return err; | 2241 | return err; |
2154 | } | 2242 | } |
2155 | 2243 | ||
2156 | static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status) | 2244 | static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status) |
2157 | { | 2245 | { |
2158 | return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, | 2246 | return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, |
2159 | QUERY_ATTR_IDN_EE_STATUS, 0, 0, status); | 2247 | QUERY_ATTR_IDN_EE_STATUS, 0, 0, status); |
2160 | } | 2248 | } |
2161 | 2249 | ||
2162 | /** | 2250 | /** |
2163 | * ufshcd_exception_event_handler - handle exceptions raised by device | 2251 | * ufshcd_exception_event_handler - handle exceptions raised by device |
2164 | * @work: pointer to work data | 2252 | * @work: pointer to work data |
2165 | * | 2253 | * |
2166 | * Read bExceptionEventStatus attribute from the device and handle the | 2254 | * Read bExceptionEventStatus attribute from the device and handle the |
2167 | * exception event accordingly. | 2255 | * exception event accordingly. |
2168 | */ | 2256 | */ |
2169 | static void ufshcd_exception_event_handler(struct work_struct *work) | 2257 | static void ufshcd_exception_event_handler(struct work_struct *work) |
2170 | { | 2258 | { |
2171 | struct ufs_hba *hba; | 2259 | struct ufs_hba *hba; |
2172 | int err; | 2260 | int err; |
2173 | u32 status = 0; | 2261 | u32 status = 0; |
2174 | hba = container_of(work, struct ufs_hba, eeh_work); | 2262 | hba = container_of(work, struct ufs_hba, eeh_work); |
2175 | 2263 | ||
2176 | pm_runtime_get_sync(hba->dev); | 2264 | pm_runtime_get_sync(hba->dev); |
2177 | err = ufshcd_get_ee_status(hba, &status); | 2265 | err = ufshcd_get_ee_status(hba, &status); |
2178 | if (err) { | 2266 | if (err) { |
2179 | dev_err(hba->dev, "%s: failed to get exception status %d\n", | 2267 | dev_err(hba->dev, "%s: failed to get exception status %d\n", |
2180 | __func__, err); | 2268 | __func__, err); |
2181 | goto out; | 2269 | goto out; |
2182 | } | 2270 | } |
2183 | 2271 | ||
2184 | status &= hba->ee_ctrl_mask; | 2272 | status &= hba->ee_ctrl_mask; |
2185 | if (status & MASK_EE_URGENT_BKOPS) { | 2273 | if (status & MASK_EE_URGENT_BKOPS) { |
2186 | err = ufshcd_urgent_bkops(hba); | 2274 | err = ufshcd_urgent_bkops(hba); |
2187 | if (err) | 2275 | if (err) |
2188 | dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n", | 2276 | dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n", |
2189 | __func__, err); | 2277 | __func__, err); |
2190 | } | 2278 | } |
2191 | out: | 2279 | out: |
2192 | pm_runtime_put_sync(hba->dev); | 2280 | pm_runtime_put_sync(hba->dev); |
2193 | return; | 2281 | return; |
2194 | } | 2282 | } |
2195 | 2283 | ||
2196 | /** | 2284 | /** |
2197 | * ufshcd_fatal_err_handler - handle fatal errors | 2285 | * ufshcd_fatal_err_handler - handle fatal errors |
2198 | * @hba: per adapter instance | 2286 | * @hba: per adapter instance |
2199 | */ | 2287 | */ |
2200 | static void ufshcd_fatal_err_handler(struct work_struct *work) | 2288 | static void ufshcd_fatal_err_handler(struct work_struct *work) |
2201 | { | 2289 | { |
2202 | struct ufs_hba *hba; | 2290 | struct ufs_hba *hba; |
2203 | hba = container_of(work, struct ufs_hba, feh_workq); | 2291 | hba = container_of(work, struct ufs_hba, feh_workq); |
2204 | 2292 | ||
2205 | pm_runtime_get_sync(hba->dev); | 2293 | pm_runtime_get_sync(hba->dev); |
2206 | /* check if reset is already in progress */ | 2294 | /* check if reset is already in progress */ |
2207 | if (hba->ufshcd_state != UFSHCD_STATE_RESET) | 2295 | if (hba->ufshcd_state != UFSHCD_STATE_RESET) |
2208 | ufshcd_do_reset(hba); | 2296 | ufshcd_do_reset(hba); |
2209 | pm_runtime_put_sync(hba->dev); | 2297 | pm_runtime_put_sync(hba->dev); |
2210 | } | 2298 | } |
2211 | 2299 | ||
2212 | /** | 2300 | /** |
2213 | * ufshcd_err_handler - Check for fatal errors | 2301 | * ufshcd_err_handler - Check for fatal errors |
2214 | * @work: pointer to a work queue structure | 2302 | * @work: pointer to a work queue structure |
2215 | */ | 2303 | */ |
2216 | static void ufshcd_err_handler(struct ufs_hba *hba) | 2304 | static void ufshcd_err_handler(struct ufs_hba *hba) |
2217 | { | 2305 | { |
2218 | u32 reg; | 2306 | u32 reg; |
2219 | 2307 | ||
2220 | if (hba->errors & INT_FATAL_ERRORS) | 2308 | if (hba->errors & INT_FATAL_ERRORS) |
2221 | goto fatal_eh; | 2309 | goto fatal_eh; |
2222 | 2310 | ||
2223 | if (hba->errors & UIC_ERROR) { | 2311 | if (hba->errors & UIC_ERROR) { |
2224 | reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER); | 2312 | reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER); |
2225 | if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) | 2313 | if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) |
2226 | goto fatal_eh; | 2314 | goto fatal_eh; |
2227 | } | 2315 | } |
2228 | return; | 2316 | return; |
2229 | fatal_eh: | 2317 | fatal_eh: |
2230 | hba->ufshcd_state = UFSHCD_STATE_ERROR; | 2318 | hba->ufshcd_state = UFSHCD_STATE_ERROR; |
2231 | schedule_work(&hba->feh_workq); | 2319 | schedule_work(&hba->feh_workq); |
2232 | } | 2320 | } |
2233 | 2321 | ||
2234 | /** | 2322 | /** |
2235 | * ufshcd_tmc_handler - handle task management function completion | 2323 | * ufshcd_tmc_handler - handle task management function completion |
2236 | * @hba: per adapter instance | 2324 | * @hba: per adapter instance |
2237 | */ | 2325 | */ |
2238 | static void ufshcd_tmc_handler(struct ufs_hba *hba) | 2326 | static void ufshcd_tmc_handler(struct ufs_hba *hba) |
2239 | { | 2327 | { |
2240 | u32 tm_doorbell; | 2328 | u32 tm_doorbell; |
2241 | 2329 | ||
2242 | tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); | 2330 | tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); |
2243 | hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks; | 2331 | hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks; |
2244 | wake_up_interruptible(&hba->ufshcd_tm_wait_queue); | 2332 | wake_up_interruptible(&hba->ufshcd_tm_wait_queue); |
2245 | } | 2333 | } |
2246 | 2334 | ||
2247 | /** | 2335 | /** |
2248 | * ufshcd_sl_intr - Interrupt service routine | 2336 | * ufshcd_sl_intr - Interrupt service routine |
2249 | * @hba: per adapter instance | 2337 | * @hba: per adapter instance |
2250 | * @intr_status: contains interrupts generated by the controller | 2338 | * @intr_status: contains interrupts generated by the controller |
2251 | */ | 2339 | */ |
2252 | static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) | 2340 | static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) |
2253 | { | 2341 | { |
2254 | hba->errors = UFSHCD_ERROR_MASK & intr_status; | 2342 | hba->errors = UFSHCD_ERROR_MASK & intr_status; |
2255 | if (hba->errors) | 2343 | if (hba->errors) |
2256 | ufshcd_err_handler(hba); | 2344 | ufshcd_err_handler(hba); |
2257 | 2345 | ||
2258 | if (intr_status & UIC_COMMAND_COMPL) | 2346 | if (intr_status & UIC_COMMAND_COMPL) |
2259 | ufshcd_uic_cmd_compl(hba); | 2347 | ufshcd_uic_cmd_compl(hba); |
2260 | 2348 | ||
2261 | if (intr_status & UTP_TASK_REQ_COMPL) | 2349 | if (intr_status & UTP_TASK_REQ_COMPL) |
2262 | ufshcd_tmc_handler(hba); | 2350 | ufshcd_tmc_handler(hba); |
2263 | 2351 | ||
2264 | if (intr_status & UTP_TRANSFER_REQ_COMPL) | 2352 | if (intr_status & UTP_TRANSFER_REQ_COMPL) |
2265 | ufshcd_transfer_req_compl(hba); | 2353 | ufshcd_transfer_req_compl(hba); |
2266 | } | 2354 | } |
2267 | 2355 | ||
2268 | /** | 2356 | /** |
2269 | * ufshcd_intr - Main interrupt service routine | 2357 | * ufshcd_intr - Main interrupt service routine |
2270 | * @irq: irq number | 2358 | * @irq: irq number |
2271 | * @__hba: pointer to adapter instance | 2359 | * @__hba: pointer to adapter instance |
2272 | * | 2360 | * |
2273 | * Returns IRQ_HANDLED - If interrupt is valid | 2361 | * Returns IRQ_HANDLED - If interrupt is valid |
2274 | * IRQ_NONE - If invalid interrupt | 2362 | * IRQ_NONE - If invalid interrupt |
2275 | */ | 2363 | */ |
2276 | static irqreturn_t ufshcd_intr(int irq, void *__hba) | 2364 | static irqreturn_t ufshcd_intr(int irq, void *__hba) |
2277 | { | 2365 | { |
2278 | u32 intr_status; | 2366 | u32 intr_status; |
2279 | irqreturn_t retval = IRQ_NONE; | 2367 | irqreturn_t retval = IRQ_NONE; |
2280 | struct ufs_hba *hba = __hba; | 2368 | struct ufs_hba *hba = __hba; |
2281 | 2369 | ||
2282 | spin_lock(hba->host->host_lock); | 2370 | spin_lock(hba->host->host_lock); |
2283 | intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); | 2371 | intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); |
2284 | 2372 | ||
2285 | if (intr_status) { | 2373 | if (intr_status) { |
2286 | ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); | 2374 | ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); |
2287 | ufshcd_sl_intr(hba, intr_status); | 2375 | ufshcd_sl_intr(hba, intr_status); |
2288 | retval = IRQ_HANDLED; | 2376 | retval = IRQ_HANDLED; |
2289 | } | 2377 | } |
2290 | spin_unlock(hba->host->host_lock); | 2378 | spin_unlock(hba->host->host_lock); |
2291 | return retval; | 2379 | return retval; |
2292 | } | 2380 | } |
2293 | 2381 | ||
2294 | /** | 2382 | /** |
2295 | * ufshcd_issue_tm_cmd - issues task management commands to controller | 2383 | * ufshcd_issue_tm_cmd - issues task management commands to controller |
2296 | * @hba: per adapter instance | 2384 | * @hba: per adapter instance |
2297 | * @lrbp: pointer to local reference block | 2385 | * @lrbp: pointer to local reference block |
2298 | * | 2386 | * |
2299 | * Returns SUCCESS/FAILED | 2387 | * Returns SUCCESS/FAILED |
2300 | */ | 2388 | */ |
2301 | static int | 2389 | static int |
2302 | ufshcd_issue_tm_cmd(struct ufs_hba *hba, | 2390 | ufshcd_issue_tm_cmd(struct ufs_hba *hba, |
2303 | struct ufshcd_lrb *lrbp, | 2391 | struct ufshcd_lrb *lrbp, |
2304 | u8 tm_function) | 2392 | u8 tm_function) |
2305 | { | 2393 | { |
2306 | struct utp_task_req_desc *task_req_descp; | 2394 | struct utp_task_req_desc *task_req_descp; |
2307 | struct utp_upiu_task_req *task_req_upiup; | 2395 | struct utp_upiu_task_req *task_req_upiup; |
2308 | struct Scsi_Host *host; | 2396 | struct Scsi_Host *host; |
2309 | unsigned long flags; | 2397 | unsigned long flags; |
2310 | int free_slot = 0; | 2398 | int free_slot = 0; |
2311 | int err; | 2399 | int err; |
2312 | 2400 | ||
2313 | host = hba->host; | 2401 | host = hba->host; |
2314 | 2402 | ||
2315 | spin_lock_irqsave(host->host_lock, flags); | 2403 | spin_lock_irqsave(host->host_lock, flags); |
2316 | 2404 | ||
2317 | /* If task management queue is full */ | 2405 | /* If task management queue is full */ |
2318 | free_slot = ufshcd_get_tm_free_slot(hba); | 2406 | free_slot = ufshcd_get_tm_free_slot(hba); |
2319 | if (free_slot >= hba->nutmrs) { | 2407 | if (free_slot >= hba->nutmrs) { |
2320 | spin_unlock_irqrestore(host->host_lock, flags); | 2408 | spin_unlock_irqrestore(host->host_lock, flags); |
2321 | dev_err(hba->dev, "Task management queue full\n"); | 2409 | dev_err(hba->dev, "Task management queue full\n"); |
2322 | err = FAILED; | 2410 | err = FAILED; |
2323 | goto out; | 2411 | goto out; |
2324 | } | 2412 | } |
2325 | 2413 | ||
2326 | task_req_descp = hba->utmrdl_base_addr; | 2414 | task_req_descp = hba->utmrdl_base_addr; |
2327 | task_req_descp += free_slot; | 2415 | task_req_descp += free_slot; |
2328 | 2416 | ||
2329 | /* Configure task request descriptor */ | 2417 | /* Configure task request descriptor */ |
2330 | task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD); | 2418 | task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD); |
2331 | task_req_descp->header.dword_2 = | 2419 | task_req_descp->header.dword_2 = |
2332 | cpu_to_le32(OCS_INVALID_COMMAND_STATUS); | 2420 | cpu_to_le32(OCS_INVALID_COMMAND_STATUS); |
2333 | 2421 | ||
2334 | /* Configure task request UPIU */ | 2422 | /* Configure task request UPIU */ |
2335 | task_req_upiup = | 2423 | task_req_upiup = |
2336 | (struct utp_upiu_task_req *) task_req_descp->task_req_upiu; | 2424 | (struct utp_upiu_task_req *) task_req_descp->task_req_upiu; |
2337 | task_req_upiup->header.dword_0 = | 2425 | task_req_upiup->header.dword_0 = |
2338 | UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0, | 2426 | UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0, |
2339 | lrbp->lun, lrbp->task_tag); | 2427 | lrbp->lun, lrbp->task_tag); |
2340 | task_req_upiup->header.dword_1 = | 2428 | task_req_upiup->header.dword_1 = |
2341 | UPIU_HEADER_DWORD(0, tm_function, 0, 0); | 2429 | UPIU_HEADER_DWORD(0, tm_function, 0, 0); |
2342 | 2430 | ||
2343 | task_req_upiup->input_param1 = lrbp->lun; | 2431 | task_req_upiup->input_param1 = lrbp->lun; |
2344 | task_req_upiup->input_param1 = | 2432 | task_req_upiup->input_param1 = |
2345 | cpu_to_be32(task_req_upiup->input_param1); | 2433 | cpu_to_be32(task_req_upiup->input_param1); |
2346 | task_req_upiup->input_param2 = lrbp->task_tag; | 2434 | task_req_upiup->input_param2 = lrbp->task_tag; |
2347 | task_req_upiup->input_param2 = | 2435 | task_req_upiup->input_param2 = |
2348 | cpu_to_be32(task_req_upiup->input_param2); | 2436 | cpu_to_be32(task_req_upiup->input_param2); |
2349 | 2437 | ||
2350 | /* send command to the controller */ | 2438 | /* send command to the controller */ |
2351 | __set_bit(free_slot, &hba->outstanding_tasks); | 2439 | __set_bit(free_slot, &hba->outstanding_tasks); |
2352 | ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL); | 2440 | ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL); |
2353 | 2441 | ||
2354 | spin_unlock_irqrestore(host->host_lock, flags); | 2442 | spin_unlock_irqrestore(host->host_lock, flags); |
2355 | 2443 | ||
2356 | /* wait until the task management command is completed */ | 2444 | /* wait until the task management command is completed */ |
2357 | err = | 2445 | err = |
2358 | wait_event_interruptible_timeout(hba->ufshcd_tm_wait_queue, | 2446 | wait_event_interruptible_timeout(hba->ufshcd_tm_wait_queue, |
2359 | (test_bit(free_slot, | 2447 | (test_bit(free_slot, |
2360 | &hba->tm_condition) != 0), | 2448 | &hba->tm_condition) != 0), |
2361 | 60 * HZ); | 2449 | 60 * HZ); |
2362 | if (!err) { | 2450 | if (!err) { |
2363 | dev_err(hba->dev, | 2451 | dev_err(hba->dev, |
2364 | "Task management command timed-out\n"); | 2452 | "Task management command timed-out\n"); |
2365 | err = FAILED; | 2453 | err = FAILED; |
2366 | goto out; | 2454 | goto out; |
2367 | } | 2455 | } |
2368 | clear_bit(free_slot, &hba->tm_condition); | 2456 | clear_bit(free_slot, &hba->tm_condition); |
2369 | err = ufshcd_task_req_compl(hba, free_slot); | 2457 | err = ufshcd_task_req_compl(hba, free_slot); |
2370 | out: | 2458 | out: |
2371 | return err; | 2459 | return err; |
2372 | } | 2460 | } |
2373 | 2461 | ||
2374 | /** | 2462 | /** |
2375 | * ufshcd_device_reset - reset device and abort all the pending commands | 2463 | * ufshcd_device_reset - reset device and abort all the pending commands |
2376 | * @cmd: SCSI command pointer | 2464 | * @cmd: SCSI command pointer |
2377 | * | 2465 | * |
2378 | * Returns SUCCESS/FAILED | 2466 | * Returns SUCCESS/FAILED |
2379 | */ | 2467 | */ |
2380 | static int ufshcd_device_reset(struct scsi_cmnd *cmd) | 2468 | static int ufshcd_device_reset(struct scsi_cmnd *cmd) |
2381 | { | 2469 | { |
2382 | struct Scsi_Host *host; | 2470 | struct Scsi_Host *host; |
2383 | struct ufs_hba *hba; | 2471 | struct ufs_hba *hba; |
2384 | unsigned int tag; | 2472 | unsigned int tag; |
2385 | u32 pos; | 2473 | u32 pos; |
2386 | int err; | 2474 | int err; |
2387 | 2475 | ||
2388 | host = cmd->device->host; | 2476 | host = cmd->device->host; |
2389 | hba = shost_priv(host); | 2477 | hba = shost_priv(host); |
2390 | tag = cmd->request->tag; | 2478 | tag = cmd->request->tag; |
2391 | 2479 | ||
2392 | err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_LOGICAL_RESET); | 2480 | err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_LOGICAL_RESET); |
2393 | if (err == FAILED) | 2481 | if (err == FAILED) |
2394 | goto out; | 2482 | goto out; |
2395 | 2483 | ||
2396 | for (pos = 0; pos < hba->nutrs; pos++) { | 2484 | for (pos = 0; pos < hba->nutrs; pos++) { |
2397 | if (test_bit(pos, &hba->outstanding_reqs) && | 2485 | if (test_bit(pos, &hba->outstanding_reqs) && |
2398 | (hba->lrb[tag].lun == hba->lrb[pos].lun)) { | 2486 | (hba->lrb[tag].lun == hba->lrb[pos].lun)) { |
2399 | 2487 | ||
2400 | /* clear the respective UTRLCLR register bit */ | 2488 | /* clear the respective UTRLCLR register bit */ |
2401 | ufshcd_utrl_clear(hba, pos); | 2489 | ufshcd_utrl_clear(hba, pos); |
2402 | 2490 | ||
2403 | clear_bit(pos, &hba->outstanding_reqs); | 2491 | clear_bit(pos, &hba->outstanding_reqs); |
2404 | 2492 | ||
2405 | if (hba->lrb[pos].cmd) { | 2493 | if (hba->lrb[pos].cmd) { |
2406 | scsi_dma_unmap(hba->lrb[pos].cmd); | 2494 | scsi_dma_unmap(hba->lrb[pos].cmd); |
2407 | hba->lrb[pos].cmd->result = | 2495 | hba->lrb[pos].cmd->result = |
2408 | DID_ABORT << 16; | 2496 | DID_ABORT << 16; |
2409 | hba->lrb[pos].cmd->scsi_done(cmd); | 2497 | hba->lrb[pos].cmd->scsi_done(cmd); |
2410 | hba->lrb[pos].cmd = NULL; | 2498 | hba->lrb[pos].cmd = NULL; |
2411 | clear_bit_unlock(pos, &hba->lrb_in_use); | 2499 | clear_bit_unlock(pos, &hba->lrb_in_use); |
2412 | wake_up(&hba->dev_cmd.tag_wq); | 2500 | wake_up(&hba->dev_cmd.tag_wq); |
2413 | } | 2501 | } |
2414 | } | 2502 | } |
2415 | } /* end of for */ | 2503 | } /* end of for */ |
2416 | out: | 2504 | out: |
2417 | return err; | 2505 | return err; |
2418 | } | 2506 | } |
2419 | 2507 | ||
2420 | /** | 2508 | /** |
2421 | * ufshcd_host_reset - Main reset function registered with scsi layer | 2509 | * ufshcd_host_reset - Main reset function registered with scsi layer |
2422 | * @cmd: SCSI command pointer | 2510 | * @cmd: SCSI command pointer |
2423 | * | 2511 | * |
2424 | * Returns SUCCESS/FAILED | 2512 | * Returns SUCCESS/FAILED |
2425 | */ | 2513 | */ |
2426 | static int ufshcd_host_reset(struct scsi_cmnd *cmd) | 2514 | static int ufshcd_host_reset(struct scsi_cmnd *cmd) |
2427 | { | 2515 | { |
2428 | struct ufs_hba *hba; | 2516 | struct ufs_hba *hba; |
2429 | 2517 | ||
2430 | hba = shost_priv(cmd->device->host); | 2518 | hba = shost_priv(cmd->device->host); |
2431 | 2519 | ||
2432 | if (hba->ufshcd_state == UFSHCD_STATE_RESET) | 2520 | if (hba->ufshcd_state == UFSHCD_STATE_RESET) |
2433 | return SUCCESS; | 2521 | return SUCCESS; |
2434 | 2522 | ||
2435 | return ufshcd_do_reset(hba); | 2523 | return ufshcd_do_reset(hba); |
2436 | } | 2524 | } |
2437 | 2525 | ||
2438 | /** | 2526 | /** |
2439 | * ufshcd_abort - abort a specific command | 2527 | * ufshcd_abort - abort a specific command |
2440 | * @cmd: SCSI command pointer | 2528 | * @cmd: SCSI command pointer |
2441 | * | 2529 | * |
2442 | * Returns SUCCESS/FAILED | 2530 | * Returns SUCCESS/FAILED |
2443 | */ | 2531 | */ |
2444 | static int ufshcd_abort(struct scsi_cmnd *cmd) | 2532 | static int ufshcd_abort(struct scsi_cmnd *cmd) |
2445 | { | 2533 | { |
2446 | struct Scsi_Host *host; | 2534 | struct Scsi_Host *host; |
2447 | struct ufs_hba *hba; | 2535 | struct ufs_hba *hba; |
2448 | unsigned long flags; | 2536 | unsigned long flags; |
2449 | unsigned int tag; | 2537 | unsigned int tag; |
2450 | int err; | 2538 | int err; |
2451 | 2539 | ||
2452 | host = cmd->device->host; | 2540 | host = cmd->device->host; |
2453 | hba = shost_priv(host); | 2541 | hba = shost_priv(host); |
2454 | tag = cmd->request->tag; | 2542 | tag = cmd->request->tag; |
2455 | 2543 | ||
2456 | spin_lock_irqsave(host->host_lock, flags); | 2544 | spin_lock_irqsave(host->host_lock, flags); |
2457 | 2545 | ||
2458 | /* check if command is still pending */ | 2546 | /* check if command is still pending */ |
2459 | if (!(test_bit(tag, &hba->outstanding_reqs))) { | 2547 | if (!(test_bit(tag, &hba->outstanding_reqs))) { |
2460 | err = FAILED; | 2548 | err = FAILED; |
2461 | spin_unlock_irqrestore(host->host_lock, flags); | 2549 | spin_unlock_irqrestore(host->host_lock, flags); |
2462 | goto out; | 2550 | goto out; |
2463 | } | 2551 | } |
2464 | spin_unlock_irqrestore(host->host_lock, flags); | 2552 | spin_unlock_irqrestore(host->host_lock, flags); |
2465 | 2553 | ||
2466 | err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_ABORT_TASK); | 2554 | err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_ABORT_TASK); |
2467 | if (err == FAILED) | 2555 | if (err == FAILED) |
2468 | goto out; | 2556 | goto out; |
2469 | 2557 | ||
2470 | scsi_dma_unmap(cmd); | 2558 | scsi_dma_unmap(cmd); |
2471 | 2559 | ||
2472 | spin_lock_irqsave(host->host_lock, flags); | 2560 | spin_lock_irqsave(host->host_lock, flags); |
2473 | 2561 | ||
2474 | /* clear the respective UTRLCLR register bit */ | 2562 | /* clear the respective UTRLCLR register bit */ |
2475 | ufshcd_utrl_clear(hba, tag); | 2563 | ufshcd_utrl_clear(hba, tag); |
2476 | 2564 | ||
2477 | __clear_bit(tag, &hba->outstanding_reqs); | 2565 | __clear_bit(tag, &hba->outstanding_reqs); |
2478 | hba->lrb[tag].cmd = NULL; | 2566 | hba->lrb[tag].cmd = NULL; |
2479 | spin_unlock_irqrestore(host->host_lock, flags); | 2567 | spin_unlock_irqrestore(host->host_lock, flags); |
2480 | 2568 | ||
2481 | clear_bit_unlock(tag, &hba->lrb_in_use); | 2569 | clear_bit_unlock(tag, &hba->lrb_in_use); |
2482 | wake_up(&hba->dev_cmd.tag_wq); | 2570 | wake_up(&hba->dev_cmd.tag_wq); |
2483 | out: | 2571 | out: |
2484 | return err; | 2572 | return err; |
2485 | } | 2573 | } |
2486 | 2574 | ||
2487 | /** | 2575 | /** |
2488 | * ufshcd_async_scan - asynchronous execution for link startup | 2576 | * ufshcd_async_scan - asynchronous execution for link startup |
2489 | * @data: data pointer to pass to this function | 2577 | * @data: data pointer to pass to this function |
2490 | * @cookie: cookie data | 2578 | * @cookie: cookie data |
2491 | */ | 2579 | */ |
2492 | static void ufshcd_async_scan(void *data, async_cookie_t cookie) | 2580 | static void ufshcd_async_scan(void *data, async_cookie_t cookie) |
2493 | { | 2581 | { |
2494 | struct ufs_hba *hba = (struct ufs_hba *)data; | 2582 | struct ufs_hba *hba = (struct ufs_hba *)data; |
2495 | int ret; | 2583 | int ret; |
2496 | 2584 | ||
2497 | ret = ufshcd_link_startup(hba); | 2585 | ret = ufshcd_link_startup(hba); |
2498 | if (ret) | 2586 | if (ret) |
2499 | goto out; | 2587 | goto out; |
2500 | 2588 | ||
2501 | ret = ufshcd_verify_dev_init(hba); | 2589 | ret = ufshcd_verify_dev_init(hba); |
2502 | if (ret) | 2590 | if (ret) |
2503 | goto out; | 2591 | goto out; |
2504 | 2592 | ||
2505 | ret = ufshcd_complete_dev_init(hba); | 2593 | ret = ufshcd_complete_dev_init(hba); |
2506 | if (ret) | 2594 | if (ret) |
2507 | goto out; | 2595 | goto out; |
2508 | 2596 | ||
2509 | ufshcd_force_reset_auto_bkops(hba); | 2597 | ufshcd_force_reset_auto_bkops(hba); |
2510 | scsi_scan_host(hba->host); | 2598 | scsi_scan_host(hba->host); |
2511 | pm_runtime_put_sync(hba->dev); | 2599 | pm_runtime_put_sync(hba->dev); |
2512 | out: | 2600 | out: |
2513 | return; | 2601 | return; |
2514 | } | 2602 | } |
2515 | 2603 | ||
2516 | static struct scsi_host_template ufshcd_driver_template = { | 2604 | static struct scsi_host_template ufshcd_driver_template = { |
2517 | .module = THIS_MODULE, | 2605 | .module = THIS_MODULE, |
2518 | .name = UFSHCD, | 2606 | .name = UFSHCD, |
2519 | .proc_name = UFSHCD, | 2607 | .proc_name = UFSHCD, |
2520 | .queuecommand = ufshcd_queuecommand, | 2608 | .queuecommand = ufshcd_queuecommand, |
2521 | .slave_alloc = ufshcd_slave_alloc, | 2609 | .slave_alloc = ufshcd_slave_alloc, |
2522 | .slave_destroy = ufshcd_slave_destroy, | 2610 | .slave_destroy = ufshcd_slave_destroy, |
2523 | .eh_abort_handler = ufshcd_abort, | 2611 | .eh_abort_handler = ufshcd_abort, |
2524 | .eh_device_reset_handler = ufshcd_device_reset, | 2612 | .eh_device_reset_handler = ufshcd_device_reset, |
2525 | .eh_host_reset_handler = ufshcd_host_reset, | 2613 | .eh_host_reset_handler = ufshcd_host_reset, |
2526 | .this_id = -1, | 2614 | .this_id = -1, |
2527 | .sg_tablesize = SG_ALL, | 2615 | .sg_tablesize = SG_ALL, |
2528 | .cmd_per_lun = UFSHCD_CMD_PER_LUN, | 2616 | .cmd_per_lun = UFSHCD_CMD_PER_LUN, |
2529 | .can_queue = UFSHCD_CAN_QUEUE, | 2617 | .can_queue = UFSHCD_CAN_QUEUE, |
2530 | }; | 2618 | }; |
2531 | 2619 | ||
2532 | /** | 2620 | /** |
2533 | * ufshcd_suspend - suspend power management function | 2621 | * ufshcd_suspend - suspend power management function |
2534 | * @hba: per adapter instance | 2622 | * @hba: per adapter instance |
2535 | * @state: power state | 2623 | * @state: power state |
2536 | * | 2624 | * |
2537 | * Returns -ENOSYS | 2625 | * Returns -ENOSYS |
2538 | */ | 2626 | */ |
2539 | int ufshcd_suspend(struct ufs_hba *hba, pm_message_t state) | 2627 | int ufshcd_suspend(struct ufs_hba *hba, pm_message_t state) |
2540 | { | 2628 | { |
2541 | /* | 2629 | /* |
2542 | * TODO: | 2630 | * TODO: |
2543 | * 1. Block SCSI requests from SCSI midlayer | 2631 | * 1. Block SCSI requests from SCSI midlayer |
2544 | * 2. Change the internal driver state to non operational | 2632 | * 2. Change the internal driver state to non operational |
2545 | * 3. Set UTRLRSR and UTMRLRSR bits to zero | 2633 | * 3. Set UTRLRSR and UTMRLRSR bits to zero |
2546 | * 4. Wait until outstanding commands are completed | 2634 | * 4. Wait until outstanding commands are completed |
2547 | * 5. Set HCE to zero to send the UFS host controller to reset state | 2635 | * 5. Set HCE to zero to send the UFS host controller to reset state |
2548 | */ | 2636 | */ |
2549 | 2637 | ||
2550 | return -ENOSYS; | 2638 | return -ENOSYS; |
2551 | } | 2639 | } |
2552 | EXPORT_SYMBOL_GPL(ufshcd_suspend); | 2640 | EXPORT_SYMBOL_GPL(ufshcd_suspend); |
2553 | 2641 | ||
2554 | /** | 2642 | /** |
2555 | * ufshcd_resume - resume power management function | 2643 | * ufshcd_resume - resume power management function |
2556 | * @hba: per adapter instance | 2644 | * @hba: per adapter instance |
2557 | * | 2645 | * |
2558 | * Returns -ENOSYS | 2646 | * Returns -ENOSYS |
2559 | */ | 2647 | */ |
2560 | int ufshcd_resume(struct ufs_hba *hba) | 2648 | int ufshcd_resume(struct ufs_hba *hba) |
2561 | { | 2649 | { |
2562 | /* | 2650 | /* |
2563 | * TODO: | 2651 | * TODO: |
2564 | * 1. Set HCE to 1, to start the UFS host controller | 2652 | * 1. Set HCE to 1, to start the UFS host controller |
2565 | * initialization process | 2653 | * initialization process |
2566 | * 2. Set UTRLRSR and UTMRLRSR bits to 1 | 2654 | * 2. Set UTRLRSR and UTMRLRSR bits to 1 |
2567 | * 3. Change the internal driver state to operational | 2655 | * 3. Change the internal driver state to operational |
2568 | * 4. Unblock SCSI requests from SCSI midlayer | 2656 | * 4. Unblock SCSI requests from SCSI midlayer |
2569 | */ | 2657 | */ |
2570 | 2658 | ||
2571 | return -ENOSYS; | 2659 | return -ENOSYS; |
2572 | } | 2660 | } |
2573 | EXPORT_SYMBOL_GPL(ufshcd_resume); | 2661 | EXPORT_SYMBOL_GPL(ufshcd_resume); |
2574 | 2662 | ||
2575 | int ufshcd_runtime_suspend(struct ufs_hba *hba) | 2663 | int ufshcd_runtime_suspend(struct ufs_hba *hba) |
2576 | { | 2664 | { |
2577 | if (!hba) | 2665 | if (!hba) |
2578 | return 0; | 2666 | return 0; |
2579 | 2667 | ||
2580 | /* | 2668 | /* |
2581 | * The device is idle with no requests in the queue, | 2669 | * The device is idle with no requests in the queue, |
2582 | * allow background operations. | 2670 | * allow background operations. |
2583 | */ | 2671 | */ |
2584 | return ufshcd_enable_auto_bkops(hba); | 2672 | return ufshcd_enable_auto_bkops(hba); |
2585 | } | 2673 | } |
2586 | EXPORT_SYMBOL(ufshcd_runtime_suspend); | 2674 | EXPORT_SYMBOL(ufshcd_runtime_suspend); |
2587 | 2675 | ||
2588 | int ufshcd_runtime_resume(struct ufs_hba *hba) | 2676 | int ufshcd_runtime_resume(struct ufs_hba *hba) |
2589 | { | 2677 | { |
2590 | if (!hba) | 2678 | if (!hba) |
2591 | return 0; | 2679 | return 0; |
2592 | 2680 | ||
2593 | return ufshcd_disable_auto_bkops(hba); | 2681 | return ufshcd_disable_auto_bkops(hba); |
2594 | } | 2682 | } |
2595 | EXPORT_SYMBOL(ufshcd_runtime_resume); | 2683 | EXPORT_SYMBOL(ufshcd_runtime_resume); |
2596 | 2684 | ||
2597 | int ufshcd_runtime_idle(struct ufs_hba *hba) | 2685 | int ufshcd_runtime_idle(struct ufs_hba *hba) |
2598 | { | 2686 | { |
2599 | return 0; | 2687 | return 0; |
2600 | } | 2688 | } |
2601 | EXPORT_SYMBOL(ufshcd_runtime_idle); | 2689 | EXPORT_SYMBOL(ufshcd_runtime_idle); |
2602 | 2690 | ||
2603 | /** | 2691 | /** |
2604 | * ufshcd_remove - de-allocate SCSI host and host memory space | 2692 | * ufshcd_remove - de-allocate SCSI host and host memory space |
2605 | * data structure memory | 2693 | * data structure memory |
2606 | * @hba - per adapter instance | 2694 | * @hba - per adapter instance |
2607 | */ | 2695 | */ |
2608 | void ufshcd_remove(struct ufs_hba *hba) | 2696 | void ufshcd_remove(struct ufs_hba *hba) |
2609 | { | 2697 | { |
2610 | scsi_remove_host(hba->host); | 2698 | scsi_remove_host(hba->host); |
2611 | /* disable interrupts */ | 2699 | /* disable interrupts */ |
2612 | ufshcd_disable_intr(hba, hba->intr_mask); | 2700 | ufshcd_disable_intr(hba, hba->intr_mask); |
2613 | ufshcd_hba_stop(hba); | 2701 | ufshcd_hba_stop(hba); |
2614 | 2702 | ||
2615 | scsi_host_put(hba->host); | 2703 | scsi_host_put(hba->host); |
2616 | } | 2704 | } |
2617 | EXPORT_SYMBOL_GPL(ufshcd_remove); | 2705 | EXPORT_SYMBOL_GPL(ufshcd_remove); |
2618 | 2706 | ||
2619 | /** | 2707 | /** |
2620 | * ufshcd_init - Driver initialization routine | 2708 | * ufshcd_init - Driver initialization routine |
2621 | * @dev: pointer to device handle | 2709 | * @dev: pointer to device handle |
2622 | * @hba_handle: driver private handle | 2710 | * @hba_handle: driver private handle |
2623 | * @mmio_base: base register address | 2711 | * @mmio_base: base register address |
2624 | * @irq: Interrupt line of device | 2712 | * @irq: Interrupt line of device |
2625 | * Returns 0 on success, non-zero value on failure | 2713 | * Returns 0 on success, non-zero value on failure |
2626 | */ | 2714 | */ |
2627 | int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle, | 2715 | int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle, |
2628 | void __iomem *mmio_base, unsigned int irq) | 2716 | void __iomem *mmio_base, unsigned int irq) |
2629 | { | 2717 | { |
2630 | struct Scsi_Host *host; | 2718 | struct Scsi_Host *host; |
2631 | struct ufs_hba *hba; | 2719 | struct ufs_hba *hba; |
2632 | int err; | 2720 | int err; |
2633 | 2721 | ||
2634 | if (!dev) { | 2722 | if (!dev) { |
2635 | dev_err(dev, | 2723 | dev_err(dev, |
2636 | "Invalid memory reference for dev is NULL\n"); | 2724 | "Invalid memory reference for dev is NULL\n"); |
2637 | err = -ENODEV; | 2725 | err = -ENODEV; |
2638 | goto out_error; | 2726 | goto out_error; |
2639 | } | 2727 | } |
2640 | 2728 | ||
2641 | if (!mmio_base) { | 2729 | if (!mmio_base) { |
2642 | dev_err(dev, | 2730 | dev_err(dev, |
2643 | "Invalid memory reference for mmio_base is NULL\n"); | 2731 | "Invalid memory reference for mmio_base is NULL\n"); |
2644 | err = -ENODEV; | 2732 | err = -ENODEV; |
2645 | goto out_error; | 2733 | goto out_error; |
2646 | } | 2734 | } |
2647 | 2735 | ||
2648 | host = scsi_host_alloc(&ufshcd_driver_template, | 2736 | host = scsi_host_alloc(&ufshcd_driver_template, |
2649 | sizeof(struct ufs_hba)); | 2737 | sizeof(struct ufs_hba)); |
2650 | if (!host) { | 2738 | if (!host) { |
2651 | dev_err(dev, "scsi_host_alloc failed\n"); | 2739 | dev_err(dev, "scsi_host_alloc failed\n"); |
2652 | err = -ENOMEM; | 2740 | err = -ENOMEM; |
2653 | goto out_error; | 2741 | goto out_error; |
2654 | } | 2742 | } |
2655 | hba = shost_priv(host); | 2743 | hba = shost_priv(host); |
2656 | hba->host = host; | 2744 | hba->host = host; |
2657 | hba->dev = dev; | 2745 | hba->dev = dev; |
2658 | hba->mmio_base = mmio_base; | 2746 | hba->mmio_base = mmio_base; |
2659 | hba->irq = irq; | 2747 | hba->irq = irq; |
2660 | 2748 | ||
2661 | /* Read capabilities registers */ | 2749 | /* Read capabilities registers */ |
2662 | ufshcd_hba_capabilities(hba); | 2750 | ufshcd_hba_capabilities(hba); |
2663 | 2751 | ||
2664 | /* Get UFS version supported by the controller */ | 2752 | /* Get UFS version supported by the controller */ |
2665 | hba->ufs_version = ufshcd_get_ufs_version(hba); | 2753 | hba->ufs_version = ufshcd_get_ufs_version(hba); |
2666 | 2754 | ||
2667 | /* Get Interrupt bit mask per version */ | 2755 | /* Get Interrupt bit mask per version */ |
2668 | hba->intr_mask = ufshcd_get_intr_mask(hba); | 2756 | hba->intr_mask = ufshcd_get_intr_mask(hba); |
2669 | 2757 | ||
2670 | /* Allocate memory for host memory space */ | 2758 | /* Allocate memory for host memory space */ |
2671 | err = ufshcd_memory_alloc(hba); | 2759 | err = ufshcd_memory_alloc(hba); |
2672 | if (err) { | 2760 | if (err) { |
2673 | dev_err(hba->dev, "Memory allocation failed\n"); | 2761 | dev_err(hba->dev, "Memory allocation failed\n"); |
2674 | goto out_disable; | 2762 | goto out_disable; |
2675 | } | 2763 | } |
2676 | 2764 | ||
2677 | /* Configure LRB */ | 2765 | /* Configure LRB */ |
2678 | ufshcd_host_memory_configure(hba); | 2766 | ufshcd_host_memory_configure(hba); |
2679 | 2767 | ||
2680 | host->can_queue = hba->nutrs; | 2768 | host->can_queue = hba->nutrs; |
2681 | host->cmd_per_lun = hba->nutrs; | 2769 | host->cmd_per_lun = hba->nutrs; |
2682 | host->max_id = UFSHCD_MAX_ID; | 2770 | host->max_id = UFSHCD_MAX_ID; |
2683 | host->max_lun = UFSHCD_MAX_LUNS; | 2771 | host->max_lun = UFSHCD_MAX_LUNS; |
2684 | host->max_channel = UFSHCD_MAX_CHANNEL; | 2772 | host->max_channel = UFSHCD_MAX_CHANNEL; |
2685 | host->unique_id = host->host_no; | 2773 | host->unique_id = host->host_no; |
2686 | host->max_cmd_len = MAX_CDB_SIZE; | 2774 | host->max_cmd_len = MAX_CDB_SIZE; |
2687 | 2775 | ||
2688 | /* Initailize wait queue for task management */ | 2776 | /* Initailize wait queue for task management */ |
2689 | init_waitqueue_head(&hba->ufshcd_tm_wait_queue); | 2777 | init_waitqueue_head(&hba->ufshcd_tm_wait_queue); |
2690 | 2778 | ||
2691 | /* Initialize work queues */ | 2779 | /* Initialize work queues */ |
2692 | INIT_WORK(&hba->feh_workq, ufshcd_fatal_err_handler); | 2780 | INIT_WORK(&hba->feh_workq, ufshcd_fatal_err_handler); |
2693 | INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler); | 2781 | INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler); |
2694 | 2782 | ||
2695 | /* Initialize UIC command mutex */ | 2783 | /* Initialize UIC command mutex */ |
2696 | mutex_init(&hba->uic_cmd_mutex); | 2784 | mutex_init(&hba->uic_cmd_mutex); |
2697 | 2785 | ||
2698 | /* Initialize mutex for device management commands */ | 2786 | /* Initialize mutex for device management commands */ |
2699 | mutex_init(&hba->dev_cmd.lock); | 2787 | mutex_init(&hba->dev_cmd.lock); |
2700 | 2788 | ||
2701 | /* Initialize device management tag acquire wait queue */ | 2789 | /* Initialize device management tag acquire wait queue */ |
2702 | init_waitqueue_head(&hba->dev_cmd.tag_wq); | 2790 | init_waitqueue_head(&hba->dev_cmd.tag_wq); |
2703 | 2791 | ||
2704 | /* IRQ registration */ | 2792 | /* IRQ registration */ |
2705 | err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba); | 2793 | err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba); |
2706 | if (err) { | 2794 | if (err) { |
2707 | dev_err(hba->dev, "request irq failed\n"); | 2795 | dev_err(hba->dev, "request irq failed\n"); |
2708 | goto out_disable; | 2796 | goto out_disable; |
2709 | } | 2797 | } |
2710 | 2798 | ||
2711 | /* Enable SCSI tag mapping */ | 2799 | /* Enable SCSI tag mapping */ |
2712 | err = scsi_init_shared_tag_map(host, host->can_queue); | 2800 | err = scsi_init_shared_tag_map(host, host->can_queue); |
2713 | if (err) { | 2801 | if (err) { |
2714 | dev_err(hba->dev, "init shared queue failed\n"); | 2802 | dev_err(hba->dev, "init shared queue failed\n"); |
2715 | goto out_disable; | 2803 | goto out_disable; |
2716 | } | 2804 | } |
2717 | 2805 | ||
2718 | err = scsi_add_host(host, hba->dev); | 2806 | err = scsi_add_host(host, hba->dev); |
2719 | if (err) { | 2807 | if (err) { |
2720 | dev_err(hba->dev, "scsi_add_host failed\n"); | 2808 | dev_err(hba->dev, "scsi_add_host failed\n"); |
2721 | goto out_disable; | 2809 | goto out_disable; |
2722 | } | 2810 | } |
2723 | 2811 | ||
2724 | /* Host controller enable */ | 2812 | /* Host controller enable */ |
2725 | err = ufshcd_hba_enable(hba); | 2813 | err = ufshcd_hba_enable(hba); |
2726 | if (err) { | 2814 | if (err) { |
2727 | dev_err(hba->dev, "Host controller enable failed\n"); | 2815 | dev_err(hba->dev, "Host controller enable failed\n"); |
2728 | goto out_remove_scsi_host; | 2816 | goto out_remove_scsi_host; |
2729 | } | 2817 | } |
2730 | 2818 | ||
2731 | *hba_handle = hba; | 2819 | *hba_handle = hba; |
2732 | 2820 | ||
2733 | /* Hold auto suspend until async scan completes */ | 2821 | /* Hold auto suspend until async scan completes */ |
2734 | pm_runtime_get_sync(dev); | 2822 | pm_runtime_get_sync(dev); |
2735 | 2823 | ||
2736 | async_schedule(ufshcd_async_scan, hba); | 2824 | async_schedule(ufshcd_async_scan, hba); |
2737 | 2825 | ||
2738 | return 0; | 2826 | return 0; |
2739 | 2827 | ||
2740 | out_remove_scsi_host: | 2828 | out_remove_scsi_host: |
2741 | scsi_remove_host(hba->host); | 2829 | scsi_remove_host(hba->host); |
2742 | out_disable: | 2830 | out_disable: |
2743 | scsi_host_put(host); | 2831 | scsi_host_put(host); |
2744 | out_error: | 2832 | out_error: |
2745 | return err; | 2833 | return err; |
2746 | } | 2834 | } |
2747 | EXPORT_SYMBOL_GPL(ufshcd_init); | 2835 | EXPORT_SYMBOL_GPL(ufshcd_init); |
2748 | 2836 | ||
2749 | MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>"); | 2837 | MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>"); |
2750 | MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>"); | 2838 | MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>"); |
2751 | MODULE_DESCRIPTION("Generic UFS host controller driver Core"); | 2839 | MODULE_DESCRIPTION("Generic UFS host controller driver Core"); |
2752 | MODULE_LICENSE("GPL"); | 2840 | MODULE_LICENSE("GPL"); |
2753 | MODULE_VERSION(UFSHCD_DRIVER_VERSION); | 2841 | MODULE_VERSION(UFSHCD_DRIVER_VERSION); |
2754 | 2842 |
drivers/scsi/ufs/ufshcd.h
1 | /* | 1 | /* |
2 | * Universal Flash Storage Host controller driver | 2 | * Universal Flash Storage Host controller driver |
3 | * | 3 | * |
4 | * This code is based on drivers/scsi/ufs/ufshcd.h | 4 | * This code is based on drivers/scsi/ufs/ufshcd.h |
5 | * Copyright (C) 2011-2013 Samsung India Software Operations | 5 | * Copyright (C) 2011-2013 Samsung India Software Operations |
6 | * | 6 | * |
7 | * Authors: | 7 | * Authors: |
8 | * Santosh Yaraganavi <santosh.sy@samsung.com> | 8 | * Santosh Yaraganavi <santosh.sy@samsung.com> |
9 | * Vinayak Holikatti <h.vinayak@samsung.com> | 9 | * Vinayak Holikatti <h.vinayak@samsung.com> |
10 | * | 10 | * |
11 | * This program is free software; you can redistribute it and/or | 11 | * This program is free software; you can redistribute it and/or |
12 | * modify it under the terms of the GNU General Public License | 12 | * modify it under the terms of the GNU General Public License |
13 | * as published by the Free Software Foundation; either version 2 | 13 | * as published by the Free Software Foundation; either version 2 |
14 | * of the License, or (at your option) any later version. | 14 | * of the License, or (at your option) any later version. |
15 | * See the COPYING file in the top-level directory or visit | 15 | * See the COPYING file in the top-level directory or visit |
16 | * <http://www.gnu.org/licenses/gpl-2.0.html> | 16 | * <http://www.gnu.org/licenses/gpl-2.0.html> |
17 | * | 17 | * |
18 | * This program is distributed in the hope that it will be useful, | 18 | * This program is distributed in the hope that it will be useful, |
19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
21 | * GNU General Public License for more details. | 21 | * GNU General Public License for more details. |
22 | * | 22 | * |
23 | * This program is provided "AS IS" and "WITH ALL FAULTS" and | 23 | * This program is provided "AS IS" and "WITH ALL FAULTS" and |
24 | * without warranty of any kind. You are solely responsible for | 24 | * without warranty of any kind. You are solely responsible for |
25 | * determining the appropriateness of using and distributing | 25 | * determining the appropriateness of using and distributing |
26 | * the program and assume all risks associated with your exercise | 26 | * the program and assume all risks associated with your exercise |
27 | * of rights with respect to the program, including but not limited | 27 | * of rights with respect to the program, including but not limited |
28 | * to infringement of third party rights, the risks and costs of | 28 | * to infringement of third party rights, the risks and costs of |
29 | * program errors, damage to or loss of data, programs or equipment, | 29 | * program errors, damage to or loss of data, programs or equipment, |
30 | * and unavailability or interruption of operations. Under no | 30 | * and unavailability or interruption of operations. Under no |
31 | * circumstances will the contributor of this Program be liable for | 31 | * circumstances will the contributor of this Program be liable for |
32 | * any damages of any kind arising from your use or distribution of | 32 | * any damages of any kind arising from your use or distribution of |
33 | * this program. | 33 | * this program. |
34 | */ | 34 | */ |
35 | 35 | ||
36 | #ifndef _UFSHCD_H | 36 | #ifndef _UFSHCD_H |
37 | #define _UFSHCD_H | 37 | #define _UFSHCD_H |
38 | 38 | ||
39 | #include <linux/module.h> | 39 | #include <linux/module.h> |
40 | #include <linux/kernel.h> | 40 | #include <linux/kernel.h> |
41 | #include <linux/init.h> | 41 | #include <linux/init.h> |
42 | #include <linux/interrupt.h> | 42 | #include <linux/interrupt.h> |
43 | #include <linux/io.h> | 43 | #include <linux/io.h> |
44 | #include <linux/delay.h> | 44 | #include <linux/delay.h> |
45 | #include <linux/slab.h> | 45 | #include <linux/slab.h> |
46 | #include <linux/spinlock.h> | 46 | #include <linux/spinlock.h> |
47 | #include <linux/workqueue.h> | 47 | #include <linux/workqueue.h> |
48 | #include <linux/errno.h> | 48 | #include <linux/errno.h> |
49 | #include <linux/types.h> | 49 | #include <linux/types.h> |
50 | #include <linux/wait.h> | 50 | #include <linux/wait.h> |
51 | #include <linux/bitops.h> | 51 | #include <linux/bitops.h> |
52 | #include <linux/pm_runtime.h> | 52 | #include <linux/pm_runtime.h> |
53 | #include <linux/clk.h> | 53 | #include <linux/clk.h> |
54 | #include <linux/completion.h> | 54 | #include <linux/completion.h> |
55 | 55 | ||
56 | #include <asm/irq.h> | 56 | #include <asm/irq.h> |
57 | #include <asm/byteorder.h> | 57 | #include <asm/byteorder.h> |
58 | #include <scsi/scsi.h> | 58 | #include <scsi/scsi.h> |
59 | #include <scsi/scsi_cmnd.h> | 59 | #include <scsi/scsi_cmnd.h> |
60 | #include <scsi/scsi_host.h> | 60 | #include <scsi/scsi_host.h> |
61 | #include <scsi/scsi_tcq.h> | 61 | #include <scsi/scsi_tcq.h> |
62 | #include <scsi/scsi_dbg.h> | 62 | #include <scsi/scsi_dbg.h> |
63 | #include <scsi/scsi_eh.h> | 63 | #include <scsi/scsi_eh.h> |
64 | 64 | ||
65 | #include "ufs.h" | 65 | #include "ufs.h" |
66 | #include "ufshci.h" | 66 | #include "ufshci.h" |
67 | 67 | ||
68 | #define UFSHCD "ufshcd" | 68 | #define UFSHCD "ufshcd" |
69 | #define UFSHCD_DRIVER_VERSION "0.2" | 69 | #define UFSHCD_DRIVER_VERSION "0.2" |
70 | 70 | ||
71 | enum dev_cmd_type { | 71 | enum dev_cmd_type { |
72 | DEV_CMD_TYPE_NOP = 0x0, | 72 | DEV_CMD_TYPE_NOP = 0x0, |
73 | DEV_CMD_TYPE_QUERY = 0x1, | 73 | DEV_CMD_TYPE_QUERY = 0x1, |
74 | }; | 74 | }; |
75 | 75 | ||
76 | /** | 76 | /** |
77 | * struct uic_command - UIC command structure | 77 | * struct uic_command - UIC command structure |
78 | * @command: UIC command | 78 | * @command: UIC command |
79 | * @argument1: UIC command argument 1 | 79 | * @argument1: UIC command argument 1 |
80 | * @argument2: UIC command argument 2 | 80 | * @argument2: UIC command argument 2 |
81 | * @argument3: UIC command argument 3 | 81 | * @argument3: UIC command argument 3 |
82 | * @cmd_active: Indicate if UIC command is outstanding | 82 | * @cmd_active: Indicate if UIC command is outstanding |
83 | * @result: UIC command result | 83 | * @result: UIC command result |
84 | * @done: UIC command completion | 84 | * @done: UIC command completion |
85 | */ | 85 | */ |
86 | struct uic_command { | 86 | struct uic_command { |
87 | u32 command; | 87 | u32 command; |
88 | u32 argument1; | 88 | u32 argument1; |
89 | u32 argument2; | 89 | u32 argument2; |
90 | u32 argument3; | 90 | u32 argument3; |
91 | int cmd_active; | 91 | int cmd_active; |
92 | int result; | 92 | int result; |
93 | struct completion done; | 93 | struct completion done; |
94 | }; | 94 | }; |
95 | 95 | ||
96 | /** | 96 | /** |
97 | * struct ufshcd_lrb - local reference block | 97 | * struct ufshcd_lrb - local reference block |
98 | * @utr_descriptor_ptr: UTRD address of the command | 98 | * @utr_descriptor_ptr: UTRD address of the command |
99 | * @ucd_req_ptr: UCD address of the command | 99 | * @ucd_req_ptr: UCD address of the command |
100 | * @ucd_rsp_ptr: Response UPIU address for this command | 100 | * @ucd_rsp_ptr: Response UPIU address for this command |
101 | * @ucd_prdt_ptr: PRDT address of the command | 101 | * @ucd_prdt_ptr: PRDT address of the command |
102 | * @cmd: pointer to SCSI command | 102 | * @cmd: pointer to SCSI command |
103 | * @sense_buffer: pointer to sense buffer address of the SCSI command | 103 | * @sense_buffer: pointer to sense buffer address of the SCSI command |
104 | * @sense_bufflen: Length of the sense buffer | 104 | * @sense_bufflen: Length of the sense buffer |
105 | * @scsi_status: SCSI status of the command | 105 | * @scsi_status: SCSI status of the command |
106 | * @command_type: SCSI, UFS, Query. | 106 | * @command_type: SCSI, UFS, Query. |
107 | * @task_tag: Task tag of the command | 107 | * @task_tag: Task tag of the command |
108 | * @lun: LUN of the command | 108 | * @lun: LUN of the command |
109 | * @intr_cmd: Interrupt command (doesn't participate in interrupt aggregation) | 109 | * @intr_cmd: Interrupt command (doesn't participate in interrupt aggregation) |
110 | */ | 110 | */ |
111 | struct ufshcd_lrb { | 111 | struct ufshcd_lrb { |
112 | struct utp_transfer_req_desc *utr_descriptor_ptr; | 112 | struct utp_transfer_req_desc *utr_descriptor_ptr; |
113 | struct utp_upiu_req *ucd_req_ptr; | 113 | struct utp_upiu_req *ucd_req_ptr; |
114 | struct utp_upiu_rsp *ucd_rsp_ptr; | 114 | struct utp_upiu_rsp *ucd_rsp_ptr; |
115 | struct ufshcd_sg_entry *ucd_prdt_ptr; | 115 | struct ufshcd_sg_entry *ucd_prdt_ptr; |
116 | 116 | ||
117 | struct scsi_cmnd *cmd; | 117 | struct scsi_cmnd *cmd; |
118 | u8 *sense_buffer; | 118 | u8 *sense_buffer; |
119 | unsigned int sense_bufflen; | 119 | unsigned int sense_bufflen; |
120 | int scsi_status; | 120 | int scsi_status; |
121 | 121 | ||
122 | int command_type; | 122 | int command_type; |
123 | int task_tag; | 123 | int task_tag; |
124 | unsigned int lun; | 124 | unsigned int lun; |
125 | bool intr_cmd; | 125 | bool intr_cmd; |
126 | }; | 126 | }; |
127 | 127 | ||
128 | /** | 128 | /** |
129 | * struct ufs_query - holds relevent data structures for query request | 129 | * struct ufs_query - holds relevent data structures for query request |
130 | * @request: request upiu and function | 130 | * @request: request upiu and function |
131 | * @descriptor: buffer for sending/receiving descriptor | 131 | * @descriptor: buffer for sending/receiving descriptor |
132 | * @response: response upiu and response | 132 | * @response: response upiu and response |
133 | */ | 133 | */ |
134 | struct ufs_query { | 134 | struct ufs_query { |
135 | struct ufs_query_req request; | 135 | struct ufs_query_req request; |
136 | u8 *descriptor; | 136 | u8 *descriptor; |
137 | struct ufs_query_res response; | 137 | struct ufs_query_res response; |
138 | }; | 138 | }; |
139 | 139 | ||
140 | /** | 140 | /** |
141 | * struct ufs_dev_cmd - all assosiated fields with device management commands | 141 | * struct ufs_dev_cmd - all assosiated fields with device management commands |
142 | * @type: device management command type - Query, NOP OUT | 142 | * @type: device management command type - Query, NOP OUT |
143 | * @lock: lock to allow one command at a time | 143 | * @lock: lock to allow one command at a time |
144 | * @complete: internal commands completion | 144 | * @complete: internal commands completion |
145 | * @tag_wq: wait queue until free command slot is available | 145 | * @tag_wq: wait queue until free command slot is available |
146 | */ | 146 | */ |
147 | struct ufs_dev_cmd { | 147 | struct ufs_dev_cmd { |
148 | enum dev_cmd_type type; | 148 | enum dev_cmd_type type; |
149 | struct mutex lock; | 149 | struct mutex lock; |
150 | struct completion *complete; | 150 | struct completion *complete; |
151 | wait_queue_head_t tag_wq; | 151 | wait_queue_head_t tag_wq; |
152 | struct ufs_query query; | 152 | struct ufs_query query; |
153 | }; | 153 | }; |
154 | 154 | ||
155 | /** | 155 | /** |
156 | * struct ufs_hba - per adapter private structure | 156 | * struct ufs_hba - per adapter private structure |
157 | * @mmio_base: UFSHCI base register address | 157 | * @mmio_base: UFSHCI base register address |
158 | * @ucdl_base_addr: UFS Command Descriptor base address | 158 | * @ucdl_base_addr: UFS Command Descriptor base address |
159 | * @utrdl_base_addr: UTP Transfer Request Descriptor base address | 159 | * @utrdl_base_addr: UTP Transfer Request Descriptor base address |
160 | * @utmrdl_base_addr: UTP Task Management Descriptor base address | 160 | * @utmrdl_base_addr: UTP Task Management Descriptor base address |
161 | * @ucdl_dma_addr: UFS Command Descriptor DMA address | 161 | * @ucdl_dma_addr: UFS Command Descriptor DMA address |
162 | * @utrdl_dma_addr: UTRDL DMA address | 162 | * @utrdl_dma_addr: UTRDL DMA address |
163 | * @utmrdl_dma_addr: UTMRDL DMA address | 163 | * @utmrdl_dma_addr: UTMRDL DMA address |
164 | * @host: Scsi_Host instance of the driver | 164 | * @host: Scsi_Host instance of the driver |
165 | * @dev: device handle | 165 | * @dev: device handle |
166 | * @lrb: local reference block | 166 | * @lrb: local reference block |
167 | * @lrb_in_use: lrb in use | 167 | * @lrb_in_use: lrb in use |
168 | * @outstanding_tasks: Bits representing outstanding task requests | 168 | * @outstanding_tasks: Bits representing outstanding task requests |
169 | * @outstanding_reqs: Bits representing outstanding transfer requests | 169 | * @outstanding_reqs: Bits representing outstanding transfer requests |
170 | * @capabilities: UFS Controller Capabilities | 170 | * @capabilities: UFS Controller Capabilities |
171 | * @nutrs: Transfer Request Queue depth supported by controller | 171 | * @nutrs: Transfer Request Queue depth supported by controller |
172 | * @nutmrs: Task Management Queue depth supported by controller | 172 | * @nutmrs: Task Management Queue depth supported by controller |
173 | * @ufs_version: UFS Version to which controller complies | 173 | * @ufs_version: UFS Version to which controller complies |
174 | * @irq: Irq number of the controller | 174 | * @irq: Irq number of the controller |
175 | * @active_uic_cmd: handle of active UIC command | 175 | * @active_uic_cmd: handle of active UIC command |
176 | * @uic_cmd_mutex: mutex for uic command | 176 | * @uic_cmd_mutex: mutex for uic command |
177 | * @ufshcd_tm_wait_queue: wait queue for task management | 177 | * @ufshcd_tm_wait_queue: wait queue for task management |
178 | * @tm_condition: condition variable for task management | 178 | * @tm_condition: condition variable for task management |
179 | * @ufshcd_state: UFSHCD states | 179 | * @ufshcd_state: UFSHCD states |
180 | * @intr_mask: Interrupt Mask Bits | 180 | * @intr_mask: Interrupt Mask Bits |
181 | * @ee_ctrl_mask: Exception event control mask | 181 | * @ee_ctrl_mask: Exception event control mask |
182 | * @feh_workq: Work queue for fatal controller error handling | 182 | * @feh_workq: Work queue for fatal controller error handling |
183 | * @eeh_work: Worker to handle exception events | 183 | * @eeh_work: Worker to handle exception events |
184 | * @errors: HBA errors | 184 | * @errors: HBA errors |
185 | * @dev_cmd: ufs device management command information | 185 | * @dev_cmd: ufs device management command information |
186 | * @auto_bkops_enabled: to track whether bkops is enabled in device | 186 | * @auto_bkops_enabled: to track whether bkops is enabled in device |
187 | */ | 187 | */ |
188 | struct ufs_hba { | 188 | struct ufs_hba { |
189 | void __iomem *mmio_base; | 189 | void __iomem *mmio_base; |
190 | 190 | ||
191 | /* Virtual memory reference */ | 191 | /* Virtual memory reference */ |
192 | struct utp_transfer_cmd_desc *ucdl_base_addr; | 192 | struct utp_transfer_cmd_desc *ucdl_base_addr; |
193 | struct utp_transfer_req_desc *utrdl_base_addr; | 193 | struct utp_transfer_req_desc *utrdl_base_addr; |
194 | struct utp_task_req_desc *utmrdl_base_addr; | 194 | struct utp_task_req_desc *utmrdl_base_addr; |
195 | 195 | ||
196 | /* DMA memory reference */ | 196 | /* DMA memory reference */ |
197 | dma_addr_t ucdl_dma_addr; | 197 | dma_addr_t ucdl_dma_addr; |
198 | dma_addr_t utrdl_dma_addr; | 198 | dma_addr_t utrdl_dma_addr; |
199 | dma_addr_t utmrdl_dma_addr; | 199 | dma_addr_t utmrdl_dma_addr; |
200 | 200 | ||
201 | struct Scsi_Host *host; | 201 | struct Scsi_Host *host; |
202 | struct device *dev; | 202 | struct device *dev; |
203 | 203 | ||
204 | struct ufshcd_lrb *lrb; | 204 | struct ufshcd_lrb *lrb; |
205 | unsigned long lrb_in_use; | 205 | unsigned long lrb_in_use; |
206 | 206 | ||
207 | unsigned long outstanding_tasks; | 207 | unsigned long outstanding_tasks; |
208 | unsigned long outstanding_reqs; | 208 | unsigned long outstanding_reqs; |
209 | 209 | ||
210 | u32 capabilities; | 210 | u32 capabilities; |
211 | int nutrs; | 211 | int nutrs; |
212 | int nutmrs; | 212 | int nutmrs; |
213 | u32 ufs_version; | 213 | u32 ufs_version; |
214 | unsigned int irq; | 214 | unsigned int irq; |
215 | 215 | ||
216 | struct uic_command *active_uic_cmd; | 216 | struct uic_command *active_uic_cmd; |
217 | struct mutex uic_cmd_mutex; | 217 | struct mutex uic_cmd_mutex; |
218 | 218 | ||
219 | wait_queue_head_t ufshcd_tm_wait_queue; | 219 | wait_queue_head_t ufshcd_tm_wait_queue; |
220 | unsigned long tm_condition; | 220 | unsigned long tm_condition; |
221 | 221 | ||
222 | u32 ufshcd_state; | 222 | u32 ufshcd_state; |
223 | u32 intr_mask; | 223 | u32 intr_mask; |
224 | u16 ee_ctrl_mask; | 224 | u16 ee_ctrl_mask; |
225 | 225 | ||
226 | /* Work Queues */ | 226 | /* Work Queues */ |
227 | struct work_struct feh_workq; | 227 | struct work_struct feh_workq; |
228 | struct work_struct eeh_work; | 228 | struct work_struct eeh_work; |
229 | 229 | ||
230 | /* HBA Errors */ | 230 | /* HBA Errors */ |
231 | u32 errors; | 231 | u32 errors; |
232 | 232 | ||
233 | /* Device management request data */ | 233 | /* Device management request data */ |
234 | struct ufs_dev_cmd dev_cmd; | 234 | struct ufs_dev_cmd dev_cmd; |
235 | 235 | ||
236 | bool auto_bkops_enabled; | 236 | bool auto_bkops_enabled; |
237 | }; | 237 | }; |
238 | 238 | ||
239 | #define ufshcd_writel(hba, val, reg) \ | 239 | #define ufshcd_writel(hba, val, reg) \ |
240 | writel((val), (hba)->mmio_base + (reg)) | 240 | writel((val), (hba)->mmio_base + (reg)) |
241 | #define ufshcd_readl(hba, reg) \ | 241 | #define ufshcd_readl(hba, reg) \ |
242 | readl((hba)->mmio_base + (reg)) | 242 | readl((hba)->mmio_base + (reg)) |
243 | 243 | ||
244 | int ufshcd_init(struct device *, struct ufs_hba ** , void __iomem * , | 244 | int ufshcd_init(struct device *, struct ufs_hba ** , void __iomem * , |
245 | unsigned int); | 245 | unsigned int); |
246 | void ufshcd_remove(struct ufs_hba *); | 246 | void ufshcd_remove(struct ufs_hba *); |
247 | 247 | ||
248 | /** | 248 | /** |
249 | * ufshcd_hba_stop - Send controller to reset state | 249 | * ufshcd_hba_stop - Send controller to reset state |
250 | * @hba: per adapter instance | 250 | * @hba: per adapter instance |
251 | */ | 251 | */ |
252 | static inline void ufshcd_hba_stop(struct ufs_hba *hba) | 252 | static inline void ufshcd_hba_stop(struct ufs_hba *hba) |
253 | { | 253 | { |
254 | ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE); | 254 | ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE); |
255 | } | 255 | } |
256 | 256 | ||
257 | static inline void check_upiu_size(void) | 257 | static inline void check_upiu_size(void) |
258 | { | 258 | { |
259 | BUILD_BUG_ON(ALIGNED_UPIU_SIZE < | 259 | BUILD_BUG_ON(ALIGNED_UPIU_SIZE < |
260 | GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE); | 260 | GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE); |
261 | } | 261 | } |
262 | 262 | ||
263 | extern int ufshcd_runtime_suspend(struct ufs_hba *hba); | 263 | extern int ufshcd_runtime_suspend(struct ufs_hba *hba); |
264 | extern int ufshcd_runtime_resume(struct ufs_hba *hba); | 264 | extern int ufshcd_runtime_resume(struct ufs_hba *hba); |
265 | extern int ufshcd_runtime_idle(struct ufs_hba *hba); | 265 | extern int ufshcd_runtime_idle(struct ufs_hba *hba); |
266 | extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, | ||
267 | u8 attr_set, u32 mib_val, u8 peer); | ||
268 | extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, | ||
269 | u32 *mib_val, u8 peer); | ||
270 | |||
271 | /* UIC command interfaces for DME primitives */ | ||
272 | #define DME_LOCAL 0 | ||
273 | #define DME_PEER 1 | ||
274 | #define ATTR_SET_NOR 0 /* NORMAL */ | ||
275 | #define ATTR_SET_ST 1 /* STATIC */ | ||
276 | |||
277 | static inline int ufshcd_dme_set(struct ufs_hba *hba, u32 attr_sel, | ||
278 | u32 mib_val) | ||
279 | { | ||
280 | return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR, | ||
281 | mib_val, DME_LOCAL); | ||
282 | } | ||
283 | |||
284 | static inline int ufshcd_dme_st_set(struct ufs_hba *hba, u32 attr_sel, | ||
285 | u32 mib_val) | ||
286 | { | ||
287 | return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST, | ||
288 | mib_val, DME_LOCAL); | ||
289 | } | ||
290 | |||
291 | static inline int ufshcd_dme_peer_set(struct ufs_hba *hba, u32 attr_sel, | ||
292 | u32 mib_val) | ||
293 | { | ||
294 | return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR, | ||
295 | mib_val, DME_PEER); | ||
296 | } | ||
297 | |||
298 | static inline int ufshcd_dme_peer_st_set(struct ufs_hba *hba, u32 attr_sel, | ||
299 | u32 mib_val) | ||
300 | { | ||
301 | return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST, | ||
302 | mib_val, DME_PEER); | ||
303 | } | ||
304 | |||
305 | static inline int ufshcd_dme_get(struct ufs_hba *hba, | ||
306 | u32 attr_sel, u32 *mib_val) | ||
307 | { | ||
308 | return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_LOCAL); | ||
309 | } | ||
310 | |||
311 | static inline int ufshcd_dme_peer_get(struct ufs_hba *hba, | ||
312 | u32 attr_sel, u32 *mib_val) | ||
313 | { | ||
314 | return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER); | ||
315 | } | ||
316 | |||
266 | #endif /* End of Header */ | 317 | #endif /* End of Header */ |
267 | 318 |
drivers/scsi/ufs/ufshci.h
1 | /* | 1 | /* |
2 | * Universal Flash Storage Host controller driver | 2 | * Universal Flash Storage Host controller driver |
3 | * | 3 | * |
4 | * This code is based on drivers/scsi/ufs/ufshci.h | 4 | * This code is based on drivers/scsi/ufs/ufshci.h |
5 | * Copyright (C) 2011-2013 Samsung India Software Operations | 5 | * Copyright (C) 2011-2013 Samsung India Software Operations |
6 | * | 6 | * |
7 | * Authors: | 7 | * Authors: |
8 | * Santosh Yaraganavi <santosh.sy@samsung.com> | 8 | * Santosh Yaraganavi <santosh.sy@samsung.com> |
9 | * Vinayak Holikatti <h.vinayak@samsung.com> | 9 | * Vinayak Holikatti <h.vinayak@samsung.com> |
10 | * | 10 | * |
11 | * This program is free software; you can redistribute it and/or | 11 | * This program is free software; you can redistribute it and/or |
12 | * modify it under the terms of the GNU General Public License | 12 | * modify it under the terms of the GNU General Public License |
13 | * as published by the Free Software Foundation; either version 2 | 13 | * as published by the Free Software Foundation; either version 2 |
14 | * of the License, or (at your option) any later version. | 14 | * of the License, or (at your option) any later version. |
15 | * See the COPYING file in the top-level directory or visit | 15 | * See the COPYING file in the top-level directory or visit |
16 | * <http://www.gnu.org/licenses/gpl-2.0.html> | 16 | * <http://www.gnu.org/licenses/gpl-2.0.html> |
17 | * | 17 | * |
18 | * This program is distributed in the hope that it will be useful, | 18 | * This program is distributed in the hope that it will be useful, |
19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
21 | * GNU General Public License for more details. | 21 | * GNU General Public License for more details. |
22 | * | 22 | * |
23 | * This program is provided "AS IS" and "WITH ALL FAULTS" and | 23 | * This program is provided "AS IS" and "WITH ALL FAULTS" and |
24 | * without warranty of any kind. You are solely responsible for | 24 | * without warranty of any kind. You are solely responsible for |
25 | * determining the appropriateness of using and distributing | 25 | * determining the appropriateness of using and distributing |
26 | * the program and assume all risks associated with your exercise | 26 | * the program and assume all risks associated with your exercise |
27 | * of rights with respect to the program, including but not limited | 27 | * of rights with respect to the program, including but not limited |
28 | * to infringement of third party rights, the risks and costs of | 28 | * to infringement of third party rights, the risks and costs of |
29 | * program errors, damage to or loss of data, programs or equipment, | 29 | * program errors, damage to or loss of data, programs or equipment, |
30 | * and unavailability or interruption of operations. Under no | 30 | * and unavailability or interruption of operations. Under no |
31 | * circumstances will the contributor of this Program be liable for | 31 | * circumstances will the contributor of this Program be liable for |
32 | * any damages of any kind arising from your use or distribution of | 32 | * any damages of any kind arising from your use or distribution of |
33 | * this program. | 33 | * this program. |
34 | */ | 34 | */ |
35 | 35 | ||
36 | #ifndef _UFSHCI_H | 36 | #ifndef _UFSHCI_H |
37 | #define _UFSHCI_H | 37 | #define _UFSHCI_H |
38 | 38 | ||
39 | enum { | 39 | enum { |
40 | TASK_REQ_UPIU_SIZE_DWORDS = 8, | 40 | TASK_REQ_UPIU_SIZE_DWORDS = 8, |
41 | TASK_RSP_UPIU_SIZE_DWORDS = 8, | 41 | TASK_RSP_UPIU_SIZE_DWORDS = 8, |
42 | ALIGNED_UPIU_SIZE = 512, | 42 | ALIGNED_UPIU_SIZE = 512, |
43 | }; | 43 | }; |
44 | 44 | ||
45 | /* UFSHCI Registers */ | 45 | /* UFSHCI Registers */ |
46 | enum { | 46 | enum { |
47 | REG_CONTROLLER_CAPABILITIES = 0x00, | 47 | REG_CONTROLLER_CAPABILITIES = 0x00, |
48 | REG_UFS_VERSION = 0x08, | 48 | REG_UFS_VERSION = 0x08, |
49 | REG_CONTROLLER_DEV_ID = 0x10, | 49 | REG_CONTROLLER_DEV_ID = 0x10, |
50 | REG_CONTROLLER_PROD_ID = 0x14, | 50 | REG_CONTROLLER_PROD_ID = 0x14, |
51 | REG_INTERRUPT_STATUS = 0x20, | 51 | REG_INTERRUPT_STATUS = 0x20, |
52 | REG_INTERRUPT_ENABLE = 0x24, | 52 | REG_INTERRUPT_ENABLE = 0x24, |
53 | REG_CONTROLLER_STATUS = 0x30, | 53 | REG_CONTROLLER_STATUS = 0x30, |
54 | REG_CONTROLLER_ENABLE = 0x34, | 54 | REG_CONTROLLER_ENABLE = 0x34, |
55 | REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER = 0x38, | 55 | REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER = 0x38, |
56 | REG_UIC_ERROR_CODE_DATA_LINK_LAYER = 0x3C, | 56 | REG_UIC_ERROR_CODE_DATA_LINK_LAYER = 0x3C, |
57 | REG_UIC_ERROR_CODE_NETWORK_LAYER = 0x40, | 57 | REG_UIC_ERROR_CODE_NETWORK_LAYER = 0x40, |
58 | REG_UIC_ERROR_CODE_TRANSPORT_LAYER = 0x44, | 58 | REG_UIC_ERROR_CODE_TRANSPORT_LAYER = 0x44, |
59 | REG_UIC_ERROR_CODE_DME = 0x48, | 59 | REG_UIC_ERROR_CODE_DME = 0x48, |
60 | REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL = 0x4C, | 60 | REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL = 0x4C, |
61 | REG_UTP_TRANSFER_REQ_LIST_BASE_L = 0x50, | 61 | REG_UTP_TRANSFER_REQ_LIST_BASE_L = 0x50, |
62 | REG_UTP_TRANSFER_REQ_LIST_BASE_H = 0x54, | 62 | REG_UTP_TRANSFER_REQ_LIST_BASE_H = 0x54, |
63 | REG_UTP_TRANSFER_REQ_DOOR_BELL = 0x58, | 63 | REG_UTP_TRANSFER_REQ_DOOR_BELL = 0x58, |
64 | REG_UTP_TRANSFER_REQ_LIST_CLEAR = 0x5C, | 64 | REG_UTP_TRANSFER_REQ_LIST_CLEAR = 0x5C, |
65 | REG_UTP_TRANSFER_REQ_LIST_RUN_STOP = 0x60, | 65 | REG_UTP_TRANSFER_REQ_LIST_RUN_STOP = 0x60, |
66 | REG_UTP_TASK_REQ_LIST_BASE_L = 0x70, | 66 | REG_UTP_TASK_REQ_LIST_BASE_L = 0x70, |
67 | REG_UTP_TASK_REQ_LIST_BASE_H = 0x74, | 67 | REG_UTP_TASK_REQ_LIST_BASE_H = 0x74, |
68 | REG_UTP_TASK_REQ_DOOR_BELL = 0x78, | 68 | REG_UTP_TASK_REQ_DOOR_BELL = 0x78, |
69 | REG_UTP_TASK_REQ_LIST_CLEAR = 0x7C, | 69 | REG_UTP_TASK_REQ_LIST_CLEAR = 0x7C, |
70 | REG_UTP_TASK_REQ_LIST_RUN_STOP = 0x80, | 70 | REG_UTP_TASK_REQ_LIST_RUN_STOP = 0x80, |
71 | REG_UIC_COMMAND = 0x90, | 71 | REG_UIC_COMMAND = 0x90, |
72 | REG_UIC_COMMAND_ARG_1 = 0x94, | 72 | REG_UIC_COMMAND_ARG_1 = 0x94, |
73 | REG_UIC_COMMAND_ARG_2 = 0x98, | 73 | REG_UIC_COMMAND_ARG_2 = 0x98, |
74 | REG_UIC_COMMAND_ARG_3 = 0x9C, | 74 | REG_UIC_COMMAND_ARG_3 = 0x9C, |
75 | }; | 75 | }; |
76 | 76 | ||
77 | /* Controller capability masks */ | 77 | /* Controller capability masks */ |
78 | enum { | 78 | enum { |
79 | MASK_TRANSFER_REQUESTS_SLOTS = 0x0000001F, | 79 | MASK_TRANSFER_REQUESTS_SLOTS = 0x0000001F, |
80 | MASK_TASK_MANAGEMENT_REQUEST_SLOTS = 0x00070000, | 80 | MASK_TASK_MANAGEMENT_REQUEST_SLOTS = 0x00070000, |
81 | MASK_64_ADDRESSING_SUPPORT = 0x01000000, | 81 | MASK_64_ADDRESSING_SUPPORT = 0x01000000, |
82 | MASK_OUT_OF_ORDER_DATA_DELIVERY_SUPPORT = 0x02000000, | 82 | MASK_OUT_OF_ORDER_DATA_DELIVERY_SUPPORT = 0x02000000, |
83 | MASK_UIC_DME_TEST_MODE_SUPPORT = 0x04000000, | 83 | MASK_UIC_DME_TEST_MODE_SUPPORT = 0x04000000, |
84 | }; | 84 | }; |
85 | 85 | ||
86 | /* UFS Version 08h */ | 86 | /* UFS Version 08h */ |
87 | #define MINOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 0) | 87 | #define MINOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 0) |
88 | #define MAJOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 16) | 88 | #define MAJOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 16) |
89 | 89 | ||
90 | /* Controller UFSHCI version */ | 90 | /* Controller UFSHCI version */ |
91 | enum { | 91 | enum { |
92 | UFSHCI_VERSION_10 = 0x00010000, | 92 | UFSHCI_VERSION_10 = 0x00010000, |
93 | UFSHCI_VERSION_11 = 0x00010100, | 93 | UFSHCI_VERSION_11 = 0x00010100, |
94 | }; | 94 | }; |
95 | 95 | ||
96 | /* | 96 | /* |
97 | * HCDDID - Host Controller Identification Descriptor | 97 | * HCDDID - Host Controller Identification Descriptor |
98 | * - Device ID and Device Class 10h | 98 | * - Device ID and Device Class 10h |
99 | */ | 99 | */ |
100 | #define DEVICE_CLASS UFS_MASK(0xFFFF, 0) | 100 | #define DEVICE_CLASS UFS_MASK(0xFFFF, 0) |
101 | #define DEVICE_ID UFS_MASK(0xFF, 24) | 101 | #define DEVICE_ID UFS_MASK(0xFF, 24) |
102 | 102 | ||
103 | /* | 103 | /* |
104 | * HCPMID - Host Controller Identification Descriptor | 104 | * HCPMID - Host Controller Identification Descriptor |
105 | * - Product/Manufacturer ID 14h | 105 | * - Product/Manufacturer ID 14h |
106 | */ | 106 | */ |
107 | #define MANUFACTURE_ID_MASK UFS_MASK(0xFFFF, 0) | 107 | #define MANUFACTURE_ID_MASK UFS_MASK(0xFFFF, 0) |
108 | #define PRODUCT_ID_MASK UFS_MASK(0xFFFF, 16) | 108 | #define PRODUCT_ID_MASK UFS_MASK(0xFFFF, 16) |
109 | 109 | ||
110 | #define UFS_BIT(x) (1L << (x)) | 110 | #define UFS_BIT(x) (1L << (x)) |
111 | 111 | ||
112 | #define UTP_TRANSFER_REQ_COMPL UFS_BIT(0) | 112 | #define UTP_TRANSFER_REQ_COMPL UFS_BIT(0) |
113 | #define UIC_DME_END_PT_RESET UFS_BIT(1) | 113 | #define UIC_DME_END_PT_RESET UFS_BIT(1) |
114 | #define UIC_ERROR UFS_BIT(2) | 114 | #define UIC_ERROR UFS_BIT(2) |
115 | #define UIC_TEST_MODE UFS_BIT(3) | 115 | #define UIC_TEST_MODE UFS_BIT(3) |
116 | #define UIC_POWER_MODE UFS_BIT(4) | 116 | #define UIC_POWER_MODE UFS_BIT(4) |
117 | #define UIC_HIBERNATE_EXIT UFS_BIT(5) | 117 | #define UIC_HIBERNATE_EXIT UFS_BIT(5) |
118 | #define UIC_HIBERNATE_ENTER UFS_BIT(6) | 118 | #define UIC_HIBERNATE_ENTER UFS_BIT(6) |
119 | #define UIC_LINK_LOST UFS_BIT(7) | 119 | #define UIC_LINK_LOST UFS_BIT(7) |
120 | #define UIC_LINK_STARTUP UFS_BIT(8) | 120 | #define UIC_LINK_STARTUP UFS_BIT(8) |
121 | #define UTP_TASK_REQ_COMPL UFS_BIT(9) | 121 | #define UTP_TASK_REQ_COMPL UFS_BIT(9) |
122 | #define UIC_COMMAND_COMPL UFS_BIT(10) | 122 | #define UIC_COMMAND_COMPL UFS_BIT(10) |
123 | #define DEVICE_FATAL_ERROR UFS_BIT(11) | 123 | #define DEVICE_FATAL_ERROR UFS_BIT(11) |
124 | #define CONTROLLER_FATAL_ERROR UFS_BIT(16) | 124 | #define CONTROLLER_FATAL_ERROR UFS_BIT(16) |
125 | #define SYSTEM_BUS_FATAL_ERROR UFS_BIT(17) | 125 | #define SYSTEM_BUS_FATAL_ERROR UFS_BIT(17) |
126 | 126 | ||
127 | #define UFSHCD_ERROR_MASK (UIC_ERROR |\ | 127 | #define UFSHCD_ERROR_MASK (UIC_ERROR |\ |
128 | DEVICE_FATAL_ERROR |\ | 128 | DEVICE_FATAL_ERROR |\ |
129 | CONTROLLER_FATAL_ERROR |\ | 129 | CONTROLLER_FATAL_ERROR |\ |
130 | SYSTEM_BUS_FATAL_ERROR) | 130 | SYSTEM_BUS_FATAL_ERROR) |
131 | 131 | ||
132 | #define INT_FATAL_ERRORS (DEVICE_FATAL_ERROR |\ | 132 | #define INT_FATAL_ERRORS (DEVICE_FATAL_ERROR |\ |
133 | CONTROLLER_FATAL_ERROR |\ | 133 | CONTROLLER_FATAL_ERROR |\ |
134 | SYSTEM_BUS_FATAL_ERROR) | 134 | SYSTEM_BUS_FATAL_ERROR) |
135 | 135 | ||
136 | /* HCS - Host Controller Status 30h */ | 136 | /* HCS - Host Controller Status 30h */ |
137 | #define DEVICE_PRESENT UFS_BIT(0) | 137 | #define DEVICE_PRESENT UFS_BIT(0) |
138 | #define UTP_TRANSFER_REQ_LIST_READY UFS_BIT(1) | 138 | #define UTP_TRANSFER_REQ_LIST_READY UFS_BIT(1) |
139 | #define UTP_TASK_REQ_LIST_READY UFS_BIT(2) | 139 | #define UTP_TASK_REQ_LIST_READY UFS_BIT(2) |
140 | #define UIC_COMMAND_READY UFS_BIT(3) | 140 | #define UIC_COMMAND_READY UFS_BIT(3) |
141 | #define HOST_ERROR_INDICATOR UFS_BIT(4) | 141 | #define HOST_ERROR_INDICATOR UFS_BIT(4) |
142 | #define DEVICE_ERROR_INDICATOR UFS_BIT(5) | 142 | #define DEVICE_ERROR_INDICATOR UFS_BIT(5) |
143 | #define UIC_POWER_MODE_CHANGE_REQ_STATUS_MASK UFS_MASK(0x7, 8) | 143 | #define UIC_POWER_MODE_CHANGE_REQ_STATUS_MASK UFS_MASK(0x7, 8) |
144 | 144 | ||
145 | /* HCE - Host Controller Enable 34h */ | 145 | /* HCE - Host Controller Enable 34h */ |
146 | #define CONTROLLER_ENABLE UFS_BIT(0) | 146 | #define CONTROLLER_ENABLE UFS_BIT(0) |
147 | #define CONTROLLER_DISABLE 0x0 | 147 | #define CONTROLLER_DISABLE 0x0 |
148 | 148 | ||
149 | /* UECPA - Host UIC Error Code PHY Adapter Layer 38h */ | 149 | /* UECPA - Host UIC Error Code PHY Adapter Layer 38h */ |
150 | #define UIC_PHY_ADAPTER_LAYER_ERROR UFS_BIT(31) | 150 | #define UIC_PHY_ADAPTER_LAYER_ERROR UFS_BIT(31) |
151 | #define UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK 0x1F | 151 | #define UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK 0x1F |
152 | 152 | ||
153 | /* UECDL - Host UIC Error Code Data Link Layer 3Ch */ | 153 | /* UECDL - Host UIC Error Code Data Link Layer 3Ch */ |
154 | #define UIC_DATA_LINK_LAYER_ERROR UFS_BIT(31) | 154 | #define UIC_DATA_LINK_LAYER_ERROR UFS_BIT(31) |
155 | #define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0x7FFF | 155 | #define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0x7FFF |
156 | #define UIC_DATA_LINK_LAYER_ERROR_PA_INIT 0x2000 | 156 | #define UIC_DATA_LINK_LAYER_ERROR_PA_INIT 0x2000 |
157 | 157 | ||
158 | /* UECN - Host UIC Error Code Network Layer 40h */ | 158 | /* UECN - Host UIC Error Code Network Layer 40h */ |
159 | #define UIC_NETWORK_LAYER_ERROR UFS_BIT(31) | 159 | #define UIC_NETWORK_LAYER_ERROR UFS_BIT(31) |
160 | #define UIC_NETWORK_LAYER_ERROR_CODE_MASK 0x7 | 160 | #define UIC_NETWORK_LAYER_ERROR_CODE_MASK 0x7 |
161 | 161 | ||
162 | /* UECT - Host UIC Error Code Transport Layer 44h */ | 162 | /* UECT - Host UIC Error Code Transport Layer 44h */ |
163 | #define UIC_TRANSPORT_LAYER_ERROR UFS_BIT(31) | 163 | #define UIC_TRANSPORT_LAYER_ERROR UFS_BIT(31) |
164 | #define UIC_TRANSPORT_LAYER_ERROR_CODE_MASK 0x7F | 164 | #define UIC_TRANSPORT_LAYER_ERROR_CODE_MASK 0x7F |
165 | 165 | ||
166 | /* UECDME - Host UIC Error Code DME 48h */ | 166 | /* UECDME - Host UIC Error Code DME 48h */ |
167 | #define UIC_DME_ERROR UFS_BIT(31) | 167 | #define UIC_DME_ERROR UFS_BIT(31) |
168 | #define UIC_DME_ERROR_CODE_MASK 0x1 | 168 | #define UIC_DME_ERROR_CODE_MASK 0x1 |
169 | 169 | ||
170 | #define INT_AGGR_TIMEOUT_VAL_MASK 0xFF | 170 | #define INT_AGGR_TIMEOUT_VAL_MASK 0xFF |
171 | #define INT_AGGR_COUNTER_THRESHOLD_MASK UFS_MASK(0x1F, 8) | 171 | #define INT_AGGR_COUNTER_THRESHOLD_MASK UFS_MASK(0x1F, 8) |
172 | #define INT_AGGR_COUNTER_AND_TIMER_RESET UFS_BIT(16) | 172 | #define INT_AGGR_COUNTER_AND_TIMER_RESET UFS_BIT(16) |
173 | #define INT_AGGR_STATUS_BIT UFS_BIT(20) | 173 | #define INT_AGGR_STATUS_BIT UFS_BIT(20) |
174 | #define INT_AGGR_PARAM_WRITE UFS_BIT(24) | 174 | #define INT_AGGR_PARAM_WRITE UFS_BIT(24) |
175 | #define INT_AGGR_ENABLE UFS_BIT(31) | 175 | #define INT_AGGR_ENABLE UFS_BIT(31) |
176 | 176 | ||
177 | /* UTRLRSR - UTP Transfer Request Run-Stop Register 60h */ | 177 | /* UTRLRSR - UTP Transfer Request Run-Stop Register 60h */ |
178 | #define UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT UFS_BIT(0) | 178 | #define UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT UFS_BIT(0) |
179 | 179 | ||
180 | /* UTMRLRSR - UTP Task Management Request Run-Stop Register 80h */ | 180 | /* UTMRLRSR - UTP Task Management Request Run-Stop Register 80h */ |
181 | #define UTP_TASK_REQ_LIST_RUN_STOP_BIT UFS_BIT(0) | 181 | #define UTP_TASK_REQ_LIST_RUN_STOP_BIT UFS_BIT(0) |
182 | 182 | ||
183 | /* UICCMD - UIC Command */ | 183 | /* UICCMD - UIC Command */ |
184 | #define COMMAND_OPCODE_MASK 0xFF | 184 | #define COMMAND_OPCODE_MASK 0xFF |
185 | #define GEN_SELECTOR_INDEX_MASK 0xFFFF | 185 | #define GEN_SELECTOR_INDEX_MASK 0xFFFF |
186 | 186 | ||
187 | #define MIB_ATTRIBUTE_MASK UFS_MASK(0xFFFF, 16) | 187 | #define MIB_ATTRIBUTE_MASK UFS_MASK(0xFFFF, 16) |
188 | #define RESET_LEVEL 0xFF | 188 | #define RESET_LEVEL 0xFF |
189 | 189 | ||
190 | #define ATTR_SET_TYPE_MASK UFS_MASK(0xFF, 16) | 190 | #define ATTR_SET_TYPE_MASK UFS_MASK(0xFF, 16) |
191 | #define CONFIG_RESULT_CODE_MASK 0xFF | 191 | #define CONFIG_RESULT_CODE_MASK 0xFF |
192 | #define GENERIC_ERROR_CODE_MASK 0xFF | 192 | #define GENERIC_ERROR_CODE_MASK 0xFF |
193 | 193 | ||
194 | #define UIC_ARG_MIB_SEL(attr, sel) ((((attr) & 0xFFFF) << 16) |\ | ||
195 | ((sel) & 0xFFFF)) | ||
196 | #define UIC_ARG_MIB(attr) UIC_ARG_MIB_SEL(attr, 0) | ||
197 | #define UIC_ARG_ATTR_TYPE(t) (((t) & 0xFF) << 16) | ||
198 | #define UIC_GET_ATTR_ID(v) (((v) >> 16) & 0xFFFF) | ||
199 | |||
194 | /* UIC Commands */ | 200 | /* UIC Commands */ |
195 | enum { | 201 | enum { |
196 | UIC_CMD_DME_GET = 0x01, | 202 | UIC_CMD_DME_GET = 0x01, |
197 | UIC_CMD_DME_SET = 0x02, | 203 | UIC_CMD_DME_SET = 0x02, |
198 | UIC_CMD_DME_PEER_GET = 0x03, | 204 | UIC_CMD_DME_PEER_GET = 0x03, |
199 | UIC_CMD_DME_PEER_SET = 0x04, | 205 | UIC_CMD_DME_PEER_SET = 0x04, |
200 | UIC_CMD_DME_POWERON = 0x10, | 206 | UIC_CMD_DME_POWERON = 0x10, |
201 | UIC_CMD_DME_POWEROFF = 0x11, | 207 | UIC_CMD_DME_POWEROFF = 0x11, |
202 | UIC_CMD_DME_ENABLE = 0x12, | 208 | UIC_CMD_DME_ENABLE = 0x12, |
203 | UIC_CMD_DME_RESET = 0x14, | 209 | UIC_CMD_DME_RESET = 0x14, |
204 | UIC_CMD_DME_END_PT_RST = 0x15, | 210 | UIC_CMD_DME_END_PT_RST = 0x15, |
205 | UIC_CMD_DME_LINK_STARTUP = 0x16, | 211 | UIC_CMD_DME_LINK_STARTUP = 0x16, |
206 | UIC_CMD_DME_HIBER_ENTER = 0x17, | 212 | UIC_CMD_DME_HIBER_ENTER = 0x17, |
207 | UIC_CMD_DME_HIBER_EXIT = 0x18, | 213 | UIC_CMD_DME_HIBER_EXIT = 0x18, |
208 | UIC_CMD_DME_TEST_MODE = 0x1A, | 214 | UIC_CMD_DME_TEST_MODE = 0x1A, |
209 | }; | 215 | }; |
210 | 216 | ||
211 | /* UIC Config result code / Generic error code */ | 217 | /* UIC Config result code / Generic error code */ |
212 | enum { | 218 | enum { |
213 | UIC_CMD_RESULT_SUCCESS = 0x00, | 219 | UIC_CMD_RESULT_SUCCESS = 0x00, |
214 | UIC_CMD_RESULT_INVALID_ATTR = 0x01, | 220 | UIC_CMD_RESULT_INVALID_ATTR = 0x01, |
215 | UIC_CMD_RESULT_FAILURE = 0x01, | 221 | UIC_CMD_RESULT_FAILURE = 0x01, |
216 | UIC_CMD_RESULT_INVALID_ATTR_VALUE = 0x02, | 222 | UIC_CMD_RESULT_INVALID_ATTR_VALUE = 0x02, |
217 | UIC_CMD_RESULT_READ_ONLY_ATTR = 0x03, | 223 | UIC_CMD_RESULT_READ_ONLY_ATTR = 0x03, |
218 | UIC_CMD_RESULT_WRITE_ONLY_ATTR = 0x04, | 224 | UIC_CMD_RESULT_WRITE_ONLY_ATTR = 0x04, |
219 | UIC_CMD_RESULT_BAD_INDEX = 0x05, | 225 | UIC_CMD_RESULT_BAD_INDEX = 0x05, |
220 | UIC_CMD_RESULT_LOCKED_ATTR = 0x06, | 226 | UIC_CMD_RESULT_LOCKED_ATTR = 0x06, |
221 | UIC_CMD_RESULT_BAD_TEST_FEATURE_INDEX = 0x07, | 227 | UIC_CMD_RESULT_BAD_TEST_FEATURE_INDEX = 0x07, |
222 | UIC_CMD_RESULT_PEER_COMM_FAILURE = 0x08, | 228 | UIC_CMD_RESULT_PEER_COMM_FAILURE = 0x08, |
223 | UIC_CMD_RESULT_BUSY = 0x09, | 229 | UIC_CMD_RESULT_BUSY = 0x09, |
224 | UIC_CMD_RESULT_DME_FAILURE = 0x0A, | 230 | UIC_CMD_RESULT_DME_FAILURE = 0x0A, |
225 | }; | 231 | }; |
226 | 232 | ||
227 | #define MASK_UIC_COMMAND_RESULT 0xFF | 233 | #define MASK_UIC_COMMAND_RESULT 0xFF |
228 | 234 | ||
229 | #define INT_AGGR_COUNTER_THLD_VAL(c) (((c) & 0x1F) << 8) | 235 | #define INT_AGGR_COUNTER_THLD_VAL(c) (((c) & 0x1F) << 8) |
230 | #define INT_AGGR_TIMEOUT_VAL(t) (((t) & 0xFF) << 0) | 236 | #define INT_AGGR_TIMEOUT_VAL(t) (((t) & 0xFF) << 0) |
231 | 237 | ||
232 | /* Interrupt disable masks */ | 238 | /* Interrupt disable masks */ |
233 | enum { | 239 | enum { |
234 | /* Interrupt disable mask for UFSHCI v1.0 */ | 240 | /* Interrupt disable mask for UFSHCI v1.0 */ |
235 | INTERRUPT_MASK_ALL_VER_10 = 0x30FFF, | 241 | INTERRUPT_MASK_ALL_VER_10 = 0x30FFF, |
236 | INTERRUPT_MASK_RW_VER_10 = 0x30000, | 242 | INTERRUPT_MASK_RW_VER_10 = 0x30000, |
237 | 243 | ||
238 | /* Interrupt disable mask for UFSHCI v1.1 */ | 244 | /* Interrupt disable mask for UFSHCI v1.1 */ |
239 | INTERRUPT_MASK_ALL_VER_11 = 0x31FFF, | 245 | INTERRUPT_MASK_ALL_VER_11 = 0x31FFF, |
240 | }; | 246 | }; |
241 | 247 | ||
242 | /* | 248 | /* |
243 | * Request Descriptor Definitions | 249 | * Request Descriptor Definitions |
244 | */ | 250 | */ |
245 | 251 | ||
246 | /* Transfer request command type */ | 252 | /* Transfer request command type */ |
247 | enum { | 253 | enum { |
248 | UTP_CMD_TYPE_SCSI = 0x0, | 254 | UTP_CMD_TYPE_SCSI = 0x0, |
249 | UTP_CMD_TYPE_UFS = 0x1, | 255 | UTP_CMD_TYPE_UFS = 0x1, |
250 | UTP_CMD_TYPE_DEV_MANAGE = 0x2, | 256 | UTP_CMD_TYPE_DEV_MANAGE = 0x2, |
251 | }; | 257 | }; |
252 | 258 | ||
253 | enum { | 259 | enum { |
254 | UTP_SCSI_COMMAND = 0x00000000, | 260 | UTP_SCSI_COMMAND = 0x00000000, |
255 | UTP_NATIVE_UFS_COMMAND = 0x10000000, | 261 | UTP_NATIVE_UFS_COMMAND = 0x10000000, |
256 | UTP_DEVICE_MANAGEMENT_FUNCTION = 0x20000000, | 262 | UTP_DEVICE_MANAGEMENT_FUNCTION = 0x20000000, |
257 | UTP_REQ_DESC_INT_CMD = 0x01000000, | 263 | UTP_REQ_DESC_INT_CMD = 0x01000000, |
258 | }; | 264 | }; |
259 | 265 | ||
260 | /* UTP Transfer Request Data Direction (DD) */ | 266 | /* UTP Transfer Request Data Direction (DD) */ |
261 | enum { | 267 | enum { |
262 | UTP_NO_DATA_TRANSFER = 0x00000000, | 268 | UTP_NO_DATA_TRANSFER = 0x00000000, |
263 | UTP_HOST_TO_DEVICE = 0x02000000, | 269 | UTP_HOST_TO_DEVICE = 0x02000000, |
264 | UTP_DEVICE_TO_HOST = 0x04000000, | 270 | UTP_DEVICE_TO_HOST = 0x04000000, |
265 | }; | 271 | }; |
266 | 272 | ||
267 | /* Overall command status values */ | 273 | /* Overall command status values */ |
268 | enum { | 274 | enum { |
269 | OCS_SUCCESS = 0x0, | 275 | OCS_SUCCESS = 0x0, |
270 | OCS_INVALID_CMD_TABLE_ATTR = 0x1, | 276 | OCS_INVALID_CMD_TABLE_ATTR = 0x1, |
271 | OCS_INVALID_PRDT_ATTR = 0x2, | 277 | OCS_INVALID_PRDT_ATTR = 0x2, |
272 | OCS_MISMATCH_DATA_BUF_SIZE = 0x3, | 278 | OCS_MISMATCH_DATA_BUF_SIZE = 0x3, |
273 | OCS_MISMATCH_RESP_UPIU_SIZE = 0x4, | 279 | OCS_MISMATCH_RESP_UPIU_SIZE = 0x4, |
274 | OCS_PEER_COMM_FAILURE = 0x5, | 280 | OCS_PEER_COMM_FAILURE = 0x5, |
275 | OCS_ABORTED = 0x6, | 281 | OCS_ABORTED = 0x6, |
276 | OCS_FATAL_ERROR = 0x7, | 282 | OCS_FATAL_ERROR = 0x7, |
277 | OCS_INVALID_COMMAND_STATUS = 0x0F, | 283 | OCS_INVALID_COMMAND_STATUS = 0x0F, |
278 | MASK_OCS = 0x0F, | 284 | MASK_OCS = 0x0F, |
279 | }; | 285 | }; |
280 | 286 | ||
281 | /** | 287 | /** |
282 | * struct ufshcd_sg_entry - UFSHCI PRD Entry | 288 | * struct ufshcd_sg_entry - UFSHCI PRD Entry |
283 | * @base_addr: Lower 32bit physical address DW-0 | 289 | * @base_addr: Lower 32bit physical address DW-0 |
284 | * @upper_addr: Upper 32bit physical address DW-1 | 290 | * @upper_addr: Upper 32bit physical address DW-1 |
285 | * @reserved: Reserved for future use DW-2 | 291 | * @reserved: Reserved for future use DW-2 |
286 | * @size: size of physical segment DW-3 | 292 | * @size: size of physical segment DW-3 |
287 | */ | 293 | */ |
288 | struct ufshcd_sg_entry { | 294 | struct ufshcd_sg_entry { |
289 | u32 base_addr; | 295 | u32 base_addr; |
290 | u32 upper_addr; | 296 | u32 upper_addr; |
291 | u32 reserved; | 297 | u32 reserved; |
292 | u32 size; | 298 | u32 size; |
293 | }; | 299 | }; |
294 | 300 | ||
295 | /** | 301 | /** |
296 | * struct utp_transfer_cmd_desc - UFS Command Descriptor structure | 302 | * struct utp_transfer_cmd_desc - UFS Command Descriptor structure |
297 | * @command_upiu: Command UPIU Frame address | 303 | * @command_upiu: Command UPIU Frame address |
298 | * @response_upiu: Response UPIU Frame address | 304 | * @response_upiu: Response UPIU Frame address |
299 | * @prd_table: Physical Region Descriptor | 305 | * @prd_table: Physical Region Descriptor |
300 | */ | 306 | */ |
301 | struct utp_transfer_cmd_desc { | 307 | struct utp_transfer_cmd_desc { |
302 | u8 command_upiu[ALIGNED_UPIU_SIZE]; | 308 | u8 command_upiu[ALIGNED_UPIU_SIZE]; |
303 | u8 response_upiu[ALIGNED_UPIU_SIZE]; | 309 | u8 response_upiu[ALIGNED_UPIU_SIZE]; |
304 | struct ufshcd_sg_entry prd_table[SG_ALL]; | 310 | struct ufshcd_sg_entry prd_table[SG_ALL]; |
305 | }; | 311 | }; |
306 | 312 | ||
307 | /** | 313 | /** |
308 | * struct request_desc_header - Descriptor Header common to both UTRD and UTMRD | 314 | * struct request_desc_header - Descriptor Header common to both UTRD and UTMRD |
309 | * @dword0: Descriptor Header DW0 | 315 | * @dword0: Descriptor Header DW0 |
310 | * @dword1: Descriptor Header DW1 | 316 | * @dword1: Descriptor Header DW1 |
311 | * @dword2: Descriptor Header DW2 | 317 | * @dword2: Descriptor Header DW2 |
312 | * @dword3: Descriptor Header DW3 | 318 | * @dword3: Descriptor Header DW3 |
313 | */ | 319 | */ |
314 | struct request_desc_header { | 320 | struct request_desc_header { |
315 | u32 dword_0; | 321 | u32 dword_0; |
316 | u32 dword_1; | 322 | u32 dword_1; |
317 | u32 dword_2; | 323 | u32 dword_2; |
318 | u32 dword_3; | 324 | u32 dword_3; |
319 | }; | 325 | }; |
320 | 326 | ||
321 | /** | 327 | /** |
322 | * struct utp_transfer_req_desc - UTRD structure | 328 | * struct utp_transfer_req_desc - UTRD structure |
323 | * @header: UTRD header DW-0 to DW-3 | 329 | * @header: UTRD header DW-0 to DW-3 |
324 | * @command_desc_base_addr_lo: UCD base address low DW-4 | 330 | * @command_desc_base_addr_lo: UCD base address low DW-4 |
325 | * @command_desc_base_addr_hi: UCD base address high DW-5 | 331 | * @command_desc_base_addr_hi: UCD base address high DW-5 |
326 | * @response_upiu_length: response UPIU length DW-6 | 332 | * @response_upiu_length: response UPIU length DW-6 |
327 | * @response_upiu_offset: response UPIU offset DW-6 | 333 | * @response_upiu_offset: response UPIU offset DW-6 |
328 | * @prd_table_length: Physical region descriptor length DW-7 | 334 | * @prd_table_length: Physical region descriptor length DW-7 |
329 | * @prd_table_offset: Physical region descriptor offset DW-7 | 335 | * @prd_table_offset: Physical region descriptor offset DW-7 |
330 | */ | 336 | */ |
331 | struct utp_transfer_req_desc { | 337 | struct utp_transfer_req_desc { |
332 | 338 | ||
333 | /* DW 0-3 */ | 339 | /* DW 0-3 */ |
334 | struct request_desc_header header; | 340 | struct request_desc_header header; |
335 | 341 | ||
336 | /* DW 4-5*/ | 342 | /* DW 4-5*/ |
337 | u32 command_desc_base_addr_lo; | 343 | u32 command_desc_base_addr_lo; |
338 | u32 command_desc_base_addr_hi; | 344 | u32 command_desc_base_addr_hi; |
339 | 345 | ||
340 | /* DW 6 */ | 346 | /* DW 6 */ |
341 | u16 response_upiu_length; | 347 | u16 response_upiu_length; |
342 | u16 response_upiu_offset; | 348 | u16 response_upiu_offset; |
343 | 349 | ||
344 | /* DW 7 */ | 350 | /* DW 7 */ |
345 | u16 prd_table_length; | 351 | u16 prd_table_length; |
346 | u16 prd_table_offset; | 352 | u16 prd_table_offset; |
347 | }; | 353 | }; |
348 | 354 | ||
349 | /** | 355 | /** |
350 | * struct utp_task_req_desc - UTMRD structure | 356 | * struct utp_task_req_desc - UTMRD structure |
351 | * @header: UTMRD header DW-0 to DW-3 | 357 | * @header: UTMRD header DW-0 to DW-3 |
352 | * @task_req_upiu: Pointer to task request UPIU DW-4 to DW-11 | 358 | * @task_req_upiu: Pointer to task request UPIU DW-4 to DW-11 |
353 | * @task_rsp_upiu: Pointer to task response UPIU DW12 to DW-19 | 359 | * @task_rsp_upiu: Pointer to task response UPIU DW12 to DW-19 |
354 | */ | 360 | */ |
355 | struct utp_task_req_desc { | 361 | struct utp_task_req_desc { |
356 | 362 | ||
357 | /* DW 0-3 */ | 363 | /* DW 0-3 */ |
358 | struct request_desc_header header; | 364 | struct request_desc_header header; |
359 | 365 | ||
360 | /* DW 4-11 */ | 366 | /* DW 4-11 */ |
361 | u32 task_req_upiu[TASK_REQ_UPIU_SIZE_DWORDS]; | 367 | u32 task_req_upiu[TASK_REQ_UPIU_SIZE_DWORDS]; |
362 | 368 | ||
363 | /* DW 12-19 */ | 369 | /* DW 12-19 */ |
364 | u32 task_rsp_upiu[TASK_RSP_UPIU_SIZE_DWORDS]; | 370 | u32 task_rsp_upiu[TASK_RSP_UPIU_SIZE_DWORDS]; |
365 | }; | 371 | }; |
366 | 372 | ||
367 | #endif /* End of Header */ | 373 | #endif /* End of Header */ |
368 | 374 |