Commit d7cd8fc525c9322ceb1f1de26d7c6201aef9d842
Committed by
Greg Kroah-Hartman
1 parent
61c39c6dcc
iommu/msm: Fix error handling in msm_iommu_unmap()
commit 05df1f3c2afaef5672627f2b7095f0d4c4dbc3a0 upstream. Error handling in msm_iommu_unmap() is broken. On some error conditions retval is set to a non-zero value which causes the function to return 'len' at the end. This hides the error from the user. Zero should be returned in those error cases. Cc: David Brown <davidb@codeaurora.org> Cc: Stepan Moskovchenko <stepanm@codeaurora.org> Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Acked-by: David Brown <davidb@codeaurora.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Showing 1 changed file with 1 additions and 6 deletions Inline Diff
drivers/iommu/msm_iommu.c
1 | /* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. | 1 | /* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. |
2 | * | 2 | * |
3 | * This program is free software; you can redistribute it and/or modify | 3 | * This program is free software; you can redistribute it and/or modify |
4 | * it under the terms of the GNU General Public License version 2 and | 4 | * it under the terms of the GNU General Public License version 2 and |
5 | * only version 2 as published by the Free Software Foundation. | 5 | * only version 2 as published by the Free Software Foundation. |
6 | * | 6 | * |
7 | * This program is distributed in the hope that it will be useful, | 7 | * This program is distributed in the hope that it will be useful, |
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
10 | * GNU General Public License for more details. | 10 | * GNU General Public License for more details. |
11 | * | 11 | * |
12 | * You should have received a copy of the GNU General Public License | 12 | * You should have received a copy of the GNU General Public License |
13 | * along with this program; if not, write to the Free Software | 13 | * along with this program; if not, write to the Free Software |
14 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | 14 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA |
15 | * 02110-1301, USA. | 15 | * 02110-1301, USA. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 18 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
19 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/platform_device.h> | 21 | #include <linux/platform_device.h> |
22 | #include <linux/errno.h> | 22 | #include <linux/errno.h> |
23 | #include <linux/io.h> | 23 | #include <linux/io.h> |
24 | #include <linux/interrupt.h> | 24 | #include <linux/interrupt.h> |
25 | #include <linux/list.h> | 25 | #include <linux/list.h> |
26 | #include <linux/spinlock.h> | 26 | #include <linux/spinlock.h> |
27 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
28 | #include <linux/iommu.h> | 28 | #include <linux/iommu.h> |
29 | #include <linux/clk.h> | 29 | #include <linux/clk.h> |
30 | 30 | ||
31 | #include <asm/cacheflush.h> | 31 | #include <asm/cacheflush.h> |
32 | #include <asm/sizes.h> | 32 | #include <asm/sizes.h> |
33 | 33 | ||
34 | #include <mach/iommu_hw-8xxx.h> | 34 | #include <mach/iommu_hw-8xxx.h> |
35 | #include <mach/iommu.h> | 35 | #include <mach/iommu.h> |
36 | 36 | ||
37 | #define MRC(reg, processor, op1, crn, crm, op2) \ | 37 | #define MRC(reg, processor, op1, crn, crm, op2) \ |
38 | __asm__ __volatile__ ( \ | 38 | __asm__ __volatile__ ( \ |
39 | " mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \ | 39 | " mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \ |
40 | : "=r" (reg)) | 40 | : "=r" (reg)) |
41 | 41 | ||
42 | #define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0) | 42 | #define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0) |
43 | #define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1) | 43 | #define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1) |
44 | 44 | ||
45 | static int msm_iommu_tex_class[4]; | 45 | static int msm_iommu_tex_class[4]; |
46 | 46 | ||
47 | DEFINE_SPINLOCK(msm_iommu_lock); | 47 | DEFINE_SPINLOCK(msm_iommu_lock); |
48 | 48 | ||
49 | struct msm_priv { | 49 | struct msm_priv { |
50 | unsigned long *pgtable; | 50 | unsigned long *pgtable; |
51 | struct list_head list_attached; | 51 | struct list_head list_attached; |
52 | }; | 52 | }; |
53 | 53 | ||
54 | static int __enable_clocks(struct msm_iommu_drvdata *drvdata) | 54 | static int __enable_clocks(struct msm_iommu_drvdata *drvdata) |
55 | { | 55 | { |
56 | int ret; | 56 | int ret; |
57 | 57 | ||
58 | ret = clk_enable(drvdata->pclk); | 58 | ret = clk_enable(drvdata->pclk); |
59 | if (ret) | 59 | if (ret) |
60 | goto fail; | 60 | goto fail; |
61 | 61 | ||
62 | if (drvdata->clk) { | 62 | if (drvdata->clk) { |
63 | ret = clk_enable(drvdata->clk); | 63 | ret = clk_enable(drvdata->clk); |
64 | if (ret) | 64 | if (ret) |
65 | clk_disable(drvdata->pclk); | 65 | clk_disable(drvdata->pclk); |
66 | } | 66 | } |
67 | fail: | 67 | fail: |
68 | return ret; | 68 | return ret; |
69 | } | 69 | } |
70 | 70 | ||
71 | static void __disable_clocks(struct msm_iommu_drvdata *drvdata) | 71 | static void __disable_clocks(struct msm_iommu_drvdata *drvdata) |
72 | { | 72 | { |
73 | if (drvdata->clk) | 73 | if (drvdata->clk) |
74 | clk_disable(drvdata->clk); | 74 | clk_disable(drvdata->clk); |
75 | clk_disable(drvdata->pclk); | 75 | clk_disable(drvdata->pclk); |
76 | } | 76 | } |
77 | 77 | ||
78 | static int __flush_iotlb(struct iommu_domain *domain) | 78 | static int __flush_iotlb(struct iommu_domain *domain) |
79 | { | 79 | { |
80 | struct msm_priv *priv = domain->priv; | 80 | struct msm_priv *priv = domain->priv; |
81 | struct msm_iommu_drvdata *iommu_drvdata; | 81 | struct msm_iommu_drvdata *iommu_drvdata; |
82 | struct msm_iommu_ctx_drvdata *ctx_drvdata; | 82 | struct msm_iommu_ctx_drvdata *ctx_drvdata; |
83 | int ret = 0; | 83 | int ret = 0; |
84 | #ifndef CONFIG_IOMMU_PGTABLES_L2 | 84 | #ifndef CONFIG_IOMMU_PGTABLES_L2 |
85 | unsigned long *fl_table = priv->pgtable; | 85 | unsigned long *fl_table = priv->pgtable; |
86 | int i; | 86 | int i; |
87 | 87 | ||
88 | if (!list_empty(&priv->list_attached)) { | 88 | if (!list_empty(&priv->list_attached)) { |
89 | dmac_flush_range(fl_table, fl_table + SZ_16K); | 89 | dmac_flush_range(fl_table, fl_table + SZ_16K); |
90 | 90 | ||
91 | for (i = 0; i < NUM_FL_PTE; i++) | 91 | for (i = 0; i < NUM_FL_PTE; i++) |
92 | if ((fl_table[i] & 0x03) == FL_TYPE_TABLE) { | 92 | if ((fl_table[i] & 0x03) == FL_TYPE_TABLE) { |
93 | void *sl_table = __va(fl_table[i] & | 93 | void *sl_table = __va(fl_table[i] & |
94 | FL_BASE_MASK); | 94 | FL_BASE_MASK); |
95 | dmac_flush_range(sl_table, sl_table + SZ_4K); | 95 | dmac_flush_range(sl_table, sl_table + SZ_4K); |
96 | } | 96 | } |
97 | } | 97 | } |
98 | #endif | 98 | #endif |
99 | 99 | ||
100 | list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) { | 100 | list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) { |
101 | if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent) | 101 | if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent) |
102 | BUG(); | 102 | BUG(); |
103 | 103 | ||
104 | iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent); | 104 | iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent); |
105 | BUG_ON(!iommu_drvdata); | 105 | BUG_ON(!iommu_drvdata); |
106 | 106 | ||
107 | ret = __enable_clocks(iommu_drvdata); | 107 | ret = __enable_clocks(iommu_drvdata); |
108 | if (ret) | 108 | if (ret) |
109 | goto fail; | 109 | goto fail; |
110 | 110 | ||
111 | SET_CTX_TLBIALL(iommu_drvdata->base, ctx_drvdata->num, 0); | 111 | SET_CTX_TLBIALL(iommu_drvdata->base, ctx_drvdata->num, 0); |
112 | __disable_clocks(iommu_drvdata); | 112 | __disable_clocks(iommu_drvdata); |
113 | } | 113 | } |
114 | fail: | 114 | fail: |
115 | return ret; | 115 | return ret; |
116 | } | 116 | } |
117 | 117 | ||
118 | static void __reset_context(void __iomem *base, int ctx) | 118 | static void __reset_context(void __iomem *base, int ctx) |
119 | { | 119 | { |
120 | SET_BPRCOSH(base, ctx, 0); | 120 | SET_BPRCOSH(base, ctx, 0); |
121 | SET_BPRCISH(base, ctx, 0); | 121 | SET_BPRCISH(base, ctx, 0); |
122 | SET_BPRCNSH(base, ctx, 0); | 122 | SET_BPRCNSH(base, ctx, 0); |
123 | SET_BPSHCFG(base, ctx, 0); | 123 | SET_BPSHCFG(base, ctx, 0); |
124 | SET_BPMTCFG(base, ctx, 0); | 124 | SET_BPMTCFG(base, ctx, 0); |
125 | SET_ACTLR(base, ctx, 0); | 125 | SET_ACTLR(base, ctx, 0); |
126 | SET_SCTLR(base, ctx, 0); | 126 | SET_SCTLR(base, ctx, 0); |
127 | SET_FSRRESTORE(base, ctx, 0); | 127 | SET_FSRRESTORE(base, ctx, 0); |
128 | SET_TTBR0(base, ctx, 0); | 128 | SET_TTBR0(base, ctx, 0); |
129 | SET_TTBR1(base, ctx, 0); | 129 | SET_TTBR1(base, ctx, 0); |
130 | SET_TTBCR(base, ctx, 0); | 130 | SET_TTBCR(base, ctx, 0); |
131 | SET_BFBCR(base, ctx, 0); | 131 | SET_BFBCR(base, ctx, 0); |
132 | SET_PAR(base, ctx, 0); | 132 | SET_PAR(base, ctx, 0); |
133 | SET_FAR(base, ctx, 0); | 133 | SET_FAR(base, ctx, 0); |
134 | SET_CTX_TLBIALL(base, ctx, 0); | 134 | SET_CTX_TLBIALL(base, ctx, 0); |
135 | SET_TLBFLPTER(base, ctx, 0); | 135 | SET_TLBFLPTER(base, ctx, 0); |
136 | SET_TLBSLPTER(base, ctx, 0); | 136 | SET_TLBSLPTER(base, ctx, 0); |
137 | SET_TLBLKCR(base, ctx, 0); | 137 | SET_TLBLKCR(base, ctx, 0); |
138 | SET_PRRR(base, ctx, 0); | 138 | SET_PRRR(base, ctx, 0); |
139 | SET_NMRR(base, ctx, 0); | 139 | SET_NMRR(base, ctx, 0); |
140 | } | 140 | } |
141 | 141 | ||
142 | static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable) | 142 | static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable) |
143 | { | 143 | { |
144 | unsigned int prrr, nmrr; | 144 | unsigned int prrr, nmrr; |
145 | __reset_context(base, ctx); | 145 | __reset_context(base, ctx); |
146 | 146 | ||
147 | /* Set up HTW mode */ | 147 | /* Set up HTW mode */ |
148 | /* TLB miss configuration: perform HTW on miss */ | 148 | /* TLB miss configuration: perform HTW on miss */ |
149 | SET_TLBMCFG(base, ctx, 0x3); | 149 | SET_TLBMCFG(base, ctx, 0x3); |
150 | 150 | ||
151 | /* V2P configuration: HTW for access */ | 151 | /* V2P configuration: HTW for access */ |
152 | SET_V2PCFG(base, ctx, 0x3); | 152 | SET_V2PCFG(base, ctx, 0x3); |
153 | 153 | ||
154 | SET_TTBCR(base, ctx, 0); | 154 | SET_TTBCR(base, ctx, 0); |
155 | SET_TTBR0_PA(base, ctx, (pgtable >> 14)); | 155 | SET_TTBR0_PA(base, ctx, (pgtable >> 14)); |
156 | 156 | ||
157 | /* Invalidate the TLB for this context */ | 157 | /* Invalidate the TLB for this context */ |
158 | SET_CTX_TLBIALL(base, ctx, 0); | 158 | SET_CTX_TLBIALL(base, ctx, 0); |
159 | 159 | ||
160 | /* Set interrupt number to "secure" interrupt */ | 160 | /* Set interrupt number to "secure" interrupt */ |
161 | SET_IRPTNDX(base, ctx, 0); | 161 | SET_IRPTNDX(base, ctx, 0); |
162 | 162 | ||
163 | /* Enable context fault interrupt */ | 163 | /* Enable context fault interrupt */ |
164 | SET_CFEIE(base, ctx, 1); | 164 | SET_CFEIE(base, ctx, 1); |
165 | 165 | ||
166 | /* Stall access on a context fault and let the handler deal with it */ | 166 | /* Stall access on a context fault and let the handler deal with it */ |
167 | SET_CFCFG(base, ctx, 1); | 167 | SET_CFCFG(base, ctx, 1); |
168 | 168 | ||
169 | /* Redirect all cacheable requests to L2 slave port. */ | 169 | /* Redirect all cacheable requests to L2 slave port. */ |
170 | SET_RCISH(base, ctx, 1); | 170 | SET_RCISH(base, ctx, 1); |
171 | SET_RCOSH(base, ctx, 1); | 171 | SET_RCOSH(base, ctx, 1); |
172 | SET_RCNSH(base, ctx, 1); | 172 | SET_RCNSH(base, ctx, 1); |
173 | 173 | ||
174 | /* Turn on TEX Remap */ | 174 | /* Turn on TEX Remap */ |
175 | SET_TRE(base, ctx, 1); | 175 | SET_TRE(base, ctx, 1); |
176 | 176 | ||
177 | /* Set TEX remap attributes */ | 177 | /* Set TEX remap attributes */ |
178 | RCP15_PRRR(prrr); | 178 | RCP15_PRRR(prrr); |
179 | RCP15_NMRR(nmrr); | 179 | RCP15_NMRR(nmrr); |
180 | SET_PRRR(base, ctx, prrr); | 180 | SET_PRRR(base, ctx, prrr); |
181 | SET_NMRR(base, ctx, nmrr); | 181 | SET_NMRR(base, ctx, nmrr); |
182 | 182 | ||
183 | /* Turn on BFB prefetch */ | 183 | /* Turn on BFB prefetch */ |
184 | SET_BFBDFE(base, ctx, 1); | 184 | SET_BFBDFE(base, ctx, 1); |
185 | 185 | ||
186 | #ifdef CONFIG_IOMMU_PGTABLES_L2 | 186 | #ifdef CONFIG_IOMMU_PGTABLES_L2 |
187 | /* Configure page tables as inner-cacheable and shareable to reduce | 187 | /* Configure page tables as inner-cacheable and shareable to reduce |
188 | * the TLB miss penalty. | 188 | * the TLB miss penalty. |
189 | */ | 189 | */ |
190 | SET_TTBR0_SH(base, ctx, 1); | 190 | SET_TTBR0_SH(base, ctx, 1); |
191 | SET_TTBR1_SH(base, ctx, 1); | 191 | SET_TTBR1_SH(base, ctx, 1); |
192 | 192 | ||
193 | SET_TTBR0_NOS(base, ctx, 1); | 193 | SET_TTBR0_NOS(base, ctx, 1); |
194 | SET_TTBR1_NOS(base, ctx, 1); | 194 | SET_TTBR1_NOS(base, ctx, 1); |
195 | 195 | ||
196 | SET_TTBR0_IRGNH(base, ctx, 0); /* WB, WA */ | 196 | SET_TTBR0_IRGNH(base, ctx, 0); /* WB, WA */ |
197 | SET_TTBR0_IRGNL(base, ctx, 1); | 197 | SET_TTBR0_IRGNL(base, ctx, 1); |
198 | 198 | ||
199 | SET_TTBR1_IRGNH(base, ctx, 0); /* WB, WA */ | 199 | SET_TTBR1_IRGNH(base, ctx, 0); /* WB, WA */ |
200 | SET_TTBR1_IRGNL(base, ctx, 1); | 200 | SET_TTBR1_IRGNL(base, ctx, 1); |
201 | 201 | ||
202 | SET_TTBR0_ORGN(base, ctx, 1); /* WB, WA */ | 202 | SET_TTBR0_ORGN(base, ctx, 1); /* WB, WA */ |
203 | SET_TTBR1_ORGN(base, ctx, 1); /* WB, WA */ | 203 | SET_TTBR1_ORGN(base, ctx, 1); /* WB, WA */ |
204 | #endif | 204 | #endif |
205 | 205 | ||
206 | /* Enable the MMU */ | 206 | /* Enable the MMU */ |
207 | SET_M(base, ctx, 1); | 207 | SET_M(base, ctx, 1); |
208 | } | 208 | } |
209 | 209 | ||
210 | static int msm_iommu_domain_init(struct iommu_domain *domain) | 210 | static int msm_iommu_domain_init(struct iommu_domain *domain) |
211 | { | 211 | { |
212 | struct msm_priv *priv = kzalloc(sizeof(*priv), GFP_KERNEL); | 212 | struct msm_priv *priv = kzalloc(sizeof(*priv), GFP_KERNEL); |
213 | 213 | ||
214 | if (!priv) | 214 | if (!priv) |
215 | goto fail_nomem; | 215 | goto fail_nomem; |
216 | 216 | ||
217 | INIT_LIST_HEAD(&priv->list_attached); | 217 | INIT_LIST_HEAD(&priv->list_attached); |
218 | priv->pgtable = (unsigned long *)__get_free_pages(GFP_KERNEL, | 218 | priv->pgtable = (unsigned long *)__get_free_pages(GFP_KERNEL, |
219 | get_order(SZ_16K)); | 219 | get_order(SZ_16K)); |
220 | 220 | ||
221 | if (!priv->pgtable) | 221 | if (!priv->pgtable) |
222 | goto fail_nomem; | 222 | goto fail_nomem; |
223 | 223 | ||
224 | memset(priv->pgtable, 0, SZ_16K); | 224 | memset(priv->pgtable, 0, SZ_16K); |
225 | domain->priv = priv; | 225 | domain->priv = priv; |
226 | return 0; | 226 | return 0; |
227 | 227 | ||
228 | fail_nomem: | 228 | fail_nomem: |
229 | kfree(priv); | 229 | kfree(priv); |
230 | return -ENOMEM; | 230 | return -ENOMEM; |
231 | } | 231 | } |
232 | 232 | ||
233 | static void msm_iommu_domain_destroy(struct iommu_domain *domain) | 233 | static void msm_iommu_domain_destroy(struct iommu_domain *domain) |
234 | { | 234 | { |
235 | struct msm_priv *priv; | 235 | struct msm_priv *priv; |
236 | unsigned long flags; | 236 | unsigned long flags; |
237 | unsigned long *fl_table; | 237 | unsigned long *fl_table; |
238 | int i; | 238 | int i; |
239 | 239 | ||
240 | spin_lock_irqsave(&msm_iommu_lock, flags); | 240 | spin_lock_irqsave(&msm_iommu_lock, flags); |
241 | priv = domain->priv; | 241 | priv = domain->priv; |
242 | domain->priv = NULL; | 242 | domain->priv = NULL; |
243 | 243 | ||
244 | if (priv) { | 244 | if (priv) { |
245 | fl_table = priv->pgtable; | 245 | fl_table = priv->pgtable; |
246 | 246 | ||
247 | for (i = 0; i < NUM_FL_PTE; i++) | 247 | for (i = 0; i < NUM_FL_PTE; i++) |
248 | if ((fl_table[i] & 0x03) == FL_TYPE_TABLE) | 248 | if ((fl_table[i] & 0x03) == FL_TYPE_TABLE) |
249 | free_page((unsigned long) __va(((fl_table[i]) & | 249 | free_page((unsigned long) __va(((fl_table[i]) & |
250 | FL_BASE_MASK))); | 250 | FL_BASE_MASK))); |
251 | 251 | ||
252 | free_pages((unsigned long)priv->pgtable, get_order(SZ_16K)); | 252 | free_pages((unsigned long)priv->pgtable, get_order(SZ_16K)); |
253 | priv->pgtable = NULL; | 253 | priv->pgtable = NULL; |
254 | } | 254 | } |
255 | 255 | ||
256 | kfree(priv); | 256 | kfree(priv); |
257 | spin_unlock_irqrestore(&msm_iommu_lock, flags); | 257 | spin_unlock_irqrestore(&msm_iommu_lock, flags); |
258 | } | 258 | } |
259 | 259 | ||
260 | static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) | 260 | static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) |
261 | { | 261 | { |
262 | struct msm_priv *priv; | 262 | struct msm_priv *priv; |
263 | struct msm_iommu_ctx_dev *ctx_dev; | 263 | struct msm_iommu_ctx_dev *ctx_dev; |
264 | struct msm_iommu_drvdata *iommu_drvdata; | 264 | struct msm_iommu_drvdata *iommu_drvdata; |
265 | struct msm_iommu_ctx_drvdata *ctx_drvdata; | 265 | struct msm_iommu_ctx_drvdata *ctx_drvdata; |
266 | struct msm_iommu_ctx_drvdata *tmp_drvdata; | 266 | struct msm_iommu_ctx_drvdata *tmp_drvdata; |
267 | int ret = 0; | 267 | int ret = 0; |
268 | unsigned long flags; | 268 | unsigned long flags; |
269 | 269 | ||
270 | spin_lock_irqsave(&msm_iommu_lock, flags); | 270 | spin_lock_irqsave(&msm_iommu_lock, flags); |
271 | 271 | ||
272 | priv = domain->priv; | 272 | priv = domain->priv; |
273 | 273 | ||
274 | if (!priv || !dev) { | 274 | if (!priv || !dev) { |
275 | ret = -EINVAL; | 275 | ret = -EINVAL; |
276 | goto fail; | 276 | goto fail; |
277 | } | 277 | } |
278 | 278 | ||
279 | iommu_drvdata = dev_get_drvdata(dev->parent); | 279 | iommu_drvdata = dev_get_drvdata(dev->parent); |
280 | ctx_drvdata = dev_get_drvdata(dev); | 280 | ctx_drvdata = dev_get_drvdata(dev); |
281 | ctx_dev = dev->platform_data; | 281 | ctx_dev = dev->platform_data; |
282 | 282 | ||
283 | if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) { | 283 | if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) { |
284 | ret = -EINVAL; | 284 | ret = -EINVAL; |
285 | goto fail; | 285 | goto fail; |
286 | } | 286 | } |
287 | 287 | ||
288 | if (!list_empty(&ctx_drvdata->attached_elm)) { | 288 | if (!list_empty(&ctx_drvdata->attached_elm)) { |
289 | ret = -EBUSY; | 289 | ret = -EBUSY; |
290 | goto fail; | 290 | goto fail; |
291 | } | 291 | } |
292 | 292 | ||
293 | list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm) | 293 | list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm) |
294 | if (tmp_drvdata == ctx_drvdata) { | 294 | if (tmp_drvdata == ctx_drvdata) { |
295 | ret = -EBUSY; | 295 | ret = -EBUSY; |
296 | goto fail; | 296 | goto fail; |
297 | } | 297 | } |
298 | 298 | ||
299 | ret = __enable_clocks(iommu_drvdata); | 299 | ret = __enable_clocks(iommu_drvdata); |
300 | if (ret) | 300 | if (ret) |
301 | goto fail; | 301 | goto fail; |
302 | 302 | ||
303 | __program_context(iommu_drvdata->base, ctx_dev->num, | 303 | __program_context(iommu_drvdata->base, ctx_dev->num, |
304 | __pa(priv->pgtable)); | 304 | __pa(priv->pgtable)); |
305 | 305 | ||
306 | __disable_clocks(iommu_drvdata); | 306 | __disable_clocks(iommu_drvdata); |
307 | list_add(&(ctx_drvdata->attached_elm), &priv->list_attached); | 307 | list_add(&(ctx_drvdata->attached_elm), &priv->list_attached); |
308 | ret = __flush_iotlb(domain); | 308 | ret = __flush_iotlb(domain); |
309 | 309 | ||
310 | fail: | 310 | fail: |
311 | spin_unlock_irqrestore(&msm_iommu_lock, flags); | 311 | spin_unlock_irqrestore(&msm_iommu_lock, flags); |
312 | return ret; | 312 | return ret; |
313 | } | 313 | } |
314 | 314 | ||
315 | static void msm_iommu_detach_dev(struct iommu_domain *domain, | 315 | static void msm_iommu_detach_dev(struct iommu_domain *domain, |
316 | struct device *dev) | 316 | struct device *dev) |
317 | { | 317 | { |
318 | struct msm_priv *priv; | 318 | struct msm_priv *priv; |
319 | struct msm_iommu_ctx_dev *ctx_dev; | 319 | struct msm_iommu_ctx_dev *ctx_dev; |
320 | struct msm_iommu_drvdata *iommu_drvdata; | 320 | struct msm_iommu_drvdata *iommu_drvdata; |
321 | struct msm_iommu_ctx_drvdata *ctx_drvdata; | 321 | struct msm_iommu_ctx_drvdata *ctx_drvdata; |
322 | unsigned long flags; | 322 | unsigned long flags; |
323 | int ret; | 323 | int ret; |
324 | 324 | ||
325 | spin_lock_irqsave(&msm_iommu_lock, flags); | 325 | spin_lock_irqsave(&msm_iommu_lock, flags); |
326 | priv = domain->priv; | 326 | priv = domain->priv; |
327 | 327 | ||
328 | if (!priv || !dev) | 328 | if (!priv || !dev) |
329 | goto fail; | 329 | goto fail; |
330 | 330 | ||
331 | iommu_drvdata = dev_get_drvdata(dev->parent); | 331 | iommu_drvdata = dev_get_drvdata(dev->parent); |
332 | ctx_drvdata = dev_get_drvdata(dev); | 332 | ctx_drvdata = dev_get_drvdata(dev); |
333 | ctx_dev = dev->platform_data; | 333 | ctx_dev = dev->platform_data; |
334 | 334 | ||
335 | if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) | 335 | if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) |
336 | goto fail; | 336 | goto fail; |
337 | 337 | ||
338 | ret = __flush_iotlb(domain); | 338 | ret = __flush_iotlb(domain); |
339 | if (ret) | 339 | if (ret) |
340 | goto fail; | 340 | goto fail; |
341 | 341 | ||
342 | ret = __enable_clocks(iommu_drvdata); | 342 | ret = __enable_clocks(iommu_drvdata); |
343 | if (ret) | 343 | if (ret) |
344 | goto fail; | 344 | goto fail; |
345 | 345 | ||
346 | __reset_context(iommu_drvdata->base, ctx_dev->num); | 346 | __reset_context(iommu_drvdata->base, ctx_dev->num); |
347 | __disable_clocks(iommu_drvdata); | 347 | __disable_clocks(iommu_drvdata); |
348 | list_del_init(&ctx_drvdata->attached_elm); | 348 | list_del_init(&ctx_drvdata->attached_elm); |
349 | 349 | ||
350 | fail: | 350 | fail: |
351 | spin_unlock_irqrestore(&msm_iommu_lock, flags); | 351 | spin_unlock_irqrestore(&msm_iommu_lock, flags); |
352 | } | 352 | } |
353 | 353 | ||
354 | static int msm_iommu_map(struct iommu_domain *domain, unsigned long va, | 354 | static int msm_iommu_map(struct iommu_domain *domain, unsigned long va, |
355 | phys_addr_t pa, int order, int prot) | 355 | phys_addr_t pa, int order, int prot) |
356 | { | 356 | { |
357 | struct msm_priv *priv; | 357 | struct msm_priv *priv; |
358 | unsigned long flags; | 358 | unsigned long flags; |
359 | unsigned long *fl_table; | 359 | unsigned long *fl_table; |
360 | unsigned long *fl_pte; | 360 | unsigned long *fl_pte; |
361 | unsigned long fl_offset; | 361 | unsigned long fl_offset; |
362 | unsigned long *sl_table; | 362 | unsigned long *sl_table; |
363 | unsigned long *sl_pte; | 363 | unsigned long *sl_pte; |
364 | unsigned long sl_offset; | 364 | unsigned long sl_offset; |
365 | unsigned int pgprot; | 365 | unsigned int pgprot; |
366 | size_t len = 0x1000UL << order; | 366 | size_t len = 0x1000UL << order; |
367 | int ret = 0, tex, sh; | 367 | int ret = 0, tex, sh; |
368 | 368 | ||
369 | spin_lock_irqsave(&msm_iommu_lock, flags); | 369 | spin_lock_irqsave(&msm_iommu_lock, flags); |
370 | 370 | ||
371 | sh = (prot & MSM_IOMMU_ATTR_SH) ? 1 : 0; | 371 | sh = (prot & MSM_IOMMU_ATTR_SH) ? 1 : 0; |
372 | tex = msm_iommu_tex_class[prot & MSM_IOMMU_CP_MASK]; | 372 | tex = msm_iommu_tex_class[prot & MSM_IOMMU_CP_MASK]; |
373 | 373 | ||
374 | if (tex < 0 || tex > NUM_TEX_CLASS - 1) { | 374 | if (tex < 0 || tex > NUM_TEX_CLASS - 1) { |
375 | ret = -EINVAL; | 375 | ret = -EINVAL; |
376 | goto fail; | 376 | goto fail; |
377 | } | 377 | } |
378 | 378 | ||
379 | priv = domain->priv; | 379 | priv = domain->priv; |
380 | if (!priv) { | 380 | if (!priv) { |
381 | ret = -EINVAL; | 381 | ret = -EINVAL; |
382 | goto fail; | 382 | goto fail; |
383 | } | 383 | } |
384 | 384 | ||
385 | fl_table = priv->pgtable; | 385 | fl_table = priv->pgtable; |
386 | 386 | ||
387 | if (len != SZ_16M && len != SZ_1M && | 387 | if (len != SZ_16M && len != SZ_1M && |
388 | len != SZ_64K && len != SZ_4K) { | 388 | len != SZ_64K && len != SZ_4K) { |
389 | pr_debug("Bad size: %d\n", len); | 389 | pr_debug("Bad size: %d\n", len); |
390 | ret = -EINVAL; | 390 | ret = -EINVAL; |
391 | goto fail; | 391 | goto fail; |
392 | } | 392 | } |
393 | 393 | ||
394 | if (!fl_table) { | 394 | if (!fl_table) { |
395 | pr_debug("Null page table\n"); | 395 | pr_debug("Null page table\n"); |
396 | ret = -EINVAL; | 396 | ret = -EINVAL; |
397 | goto fail; | 397 | goto fail; |
398 | } | 398 | } |
399 | 399 | ||
400 | if (len == SZ_16M || len == SZ_1M) { | 400 | if (len == SZ_16M || len == SZ_1M) { |
401 | pgprot = sh ? FL_SHARED : 0; | 401 | pgprot = sh ? FL_SHARED : 0; |
402 | pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0; | 402 | pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0; |
403 | pgprot |= tex & 0x02 ? FL_CACHEABLE : 0; | 403 | pgprot |= tex & 0x02 ? FL_CACHEABLE : 0; |
404 | pgprot |= tex & 0x04 ? FL_TEX0 : 0; | 404 | pgprot |= tex & 0x04 ? FL_TEX0 : 0; |
405 | } else { | 405 | } else { |
406 | pgprot = sh ? SL_SHARED : 0; | 406 | pgprot = sh ? SL_SHARED : 0; |
407 | pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0; | 407 | pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0; |
408 | pgprot |= tex & 0x02 ? SL_CACHEABLE : 0; | 408 | pgprot |= tex & 0x02 ? SL_CACHEABLE : 0; |
409 | pgprot |= tex & 0x04 ? SL_TEX0 : 0; | 409 | pgprot |= tex & 0x04 ? SL_TEX0 : 0; |
410 | } | 410 | } |
411 | 411 | ||
412 | fl_offset = FL_OFFSET(va); /* Upper 12 bits */ | 412 | fl_offset = FL_OFFSET(va); /* Upper 12 bits */ |
413 | fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */ | 413 | fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */ |
414 | 414 | ||
415 | if (len == SZ_16M) { | 415 | if (len == SZ_16M) { |
416 | int i = 0; | 416 | int i = 0; |
417 | for (i = 0; i < 16; i++) | 417 | for (i = 0; i < 16; i++) |
418 | *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION | | 418 | *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION | |
419 | FL_AP_READ | FL_AP_WRITE | FL_TYPE_SECT | | 419 | FL_AP_READ | FL_AP_WRITE | FL_TYPE_SECT | |
420 | FL_SHARED | FL_NG | pgprot; | 420 | FL_SHARED | FL_NG | pgprot; |
421 | } | 421 | } |
422 | 422 | ||
423 | if (len == SZ_1M) | 423 | if (len == SZ_1M) |
424 | *fl_pte = (pa & 0xFFF00000) | FL_AP_READ | FL_AP_WRITE | FL_NG | | 424 | *fl_pte = (pa & 0xFFF00000) | FL_AP_READ | FL_AP_WRITE | FL_NG | |
425 | FL_TYPE_SECT | FL_SHARED | pgprot; | 425 | FL_TYPE_SECT | FL_SHARED | pgprot; |
426 | 426 | ||
427 | /* Need a 2nd level table */ | 427 | /* Need a 2nd level table */ |
428 | if ((len == SZ_4K || len == SZ_64K) && (*fl_pte) == 0) { | 428 | if ((len == SZ_4K || len == SZ_64K) && (*fl_pte) == 0) { |
429 | unsigned long *sl; | 429 | unsigned long *sl; |
430 | sl = (unsigned long *) __get_free_pages(GFP_ATOMIC, | 430 | sl = (unsigned long *) __get_free_pages(GFP_ATOMIC, |
431 | get_order(SZ_4K)); | 431 | get_order(SZ_4K)); |
432 | 432 | ||
433 | if (!sl) { | 433 | if (!sl) { |
434 | pr_debug("Could not allocate second level table\n"); | 434 | pr_debug("Could not allocate second level table\n"); |
435 | ret = -ENOMEM; | 435 | ret = -ENOMEM; |
436 | goto fail; | 436 | goto fail; |
437 | } | 437 | } |
438 | 438 | ||
439 | memset(sl, 0, SZ_4K); | 439 | memset(sl, 0, SZ_4K); |
440 | *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | FL_TYPE_TABLE); | 440 | *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | FL_TYPE_TABLE); |
441 | } | 441 | } |
442 | 442 | ||
443 | sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK)); | 443 | sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK)); |
444 | sl_offset = SL_OFFSET(va); | 444 | sl_offset = SL_OFFSET(va); |
445 | sl_pte = sl_table + sl_offset; | 445 | sl_pte = sl_table + sl_offset; |
446 | 446 | ||
447 | 447 | ||
448 | if (len == SZ_4K) | 448 | if (len == SZ_4K) |
449 | *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_AP0 | SL_AP1 | SL_NG | | 449 | *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_AP0 | SL_AP1 | SL_NG | |
450 | SL_SHARED | SL_TYPE_SMALL | pgprot; | 450 | SL_SHARED | SL_TYPE_SMALL | pgprot; |
451 | 451 | ||
452 | if (len == SZ_64K) { | 452 | if (len == SZ_64K) { |
453 | int i; | 453 | int i; |
454 | 454 | ||
455 | for (i = 0; i < 16; i++) | 455 | for (i = 0; i < 16; i++) |
456 | *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_AP0 | | 456 | *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_AP0 | |
457 | SL_NG | SL_AP1 | SL_SHARED | SL_TYPE_LARGE | pgprot; | 457 | SL_NG | SL_AP1 | SL_SHARED | SL_TYPE_LARGE | pgprot; |
458 | } | 458 | } |
459 | 459 | ||
460 | ret = __flush_iotlb(domain); | 460 | ret = __flush_iotlb(domain); |
461 | fail: | 461 | fail: |
462 | spin_unlock_irqrestore(&msm_iommu_lock, flags); | 462 | spin_unlock_irqrestore(&msm_iommu_lock, flags); |
463 | return ret; | 463 | return ret; |
464 | } | 464 | } |
465 | 465 | ||
466 | static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va, | 466 | static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va, |
467 | int order) | 467 | int order) |
468 | { | 468 | { |
469 | struct msm_priv *priv; | 469 | struct msm_priv *priv; |
470 | unsigned long flags; | 470 | unsigned long flags; |
471 | unsigned long *fl_table; | 471 | unsigned long *fl_table; |
472 | unsigned long *fl_pte; | 472 | unsigned long *fl_pte; |
473 | unsigned long fl_offset; | 473 | unsigned long fl_offset; |
474 | unsigned long *sl_table; | 474 | unsigned long *sl_table; |
475 | unsigned long *sl_pte; | 475 | unsigned long *sl_pte; |
476 | unsigned long sl_offset; | 476 | unsigned long sl_offset; |
477 | size_t len = 0x1000UL << order; | 477 | size_t len = 0x1000UL << order; |
478 | int i, ret = 0; | 478 | int i, ret = 0; |
479 | 479 | ||
480 | spin_lock_irqsave(&msm_iommu_lock, flags); | 480 | spin_lock_irqsave(&msm_iommu_lock, flags); |
481 | 481 | ||
482 | priv = domain->priv; | 482 | priv = domain->priv; |
483 | 483 | ||
484 | if (!priv) { | 484 | if (!priv) |
485 | ret = -ENODEV; | ||
486 | goto fail; | 485 | goto fail; |
487 | } | ||
488 | 486 | ||
489 | fl_table = priv->pgtable; | 487 | fl_table = priv->pgtable; |
490 | 488 | ||
491 | if (len != SZ_16M && len != SZ_1M && | 489 | if (len != SZ_16M && len != SZ_1M && |
492 | len != SZ_64K && len != SZ_4K) { | 490 | len != SZ_64K && len != SZ_4K) { |
493 | pr_debug("Bad length: %d\n", len); | 491 | pr_debug("Bad length: %d\n", len); |
494 | ret = -EINVAL; | ||
495 | goto fail; | 492 | goto fail; |
496 | } | 493 | } |
497 | 494 | ||
498 | if (!fl_table) { | 495 | if (!fl_table) { |
499 | pr_debug("Null page table\n"); | 496 | pr_debug("Null page table\n"); |
500 | ret = -EINVAL; | ||
501 | goto fail; | 497 | goto fail; |
502 | } | 498 | } |
503 | 499 | ||
504 | fl_offset = FL_OFFSET(va); /* Upper 12 bits */ | 500 | fl_offset = FL_OFFSET(va); /* Upper 12 bits */ |
505 | fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */ | 501 | fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */ |
506 | 502 | ||
507 | if (*fl_pte == 0) { | 503 | if (*fl_pte == 0) { |
508 | pr_debug("First level PTE is 0\n"); | 504 | pr_debug("First level PTE is 0\n"); |
509 | ret = -ENODEV; | ||
510 | goto fail; | 505 | goto fail; |
511 | } | 506 | } |
512 | 507 | ||
513 | /* Unmap supersection */ | 508 | /* Unmap supersection */ |
514 | if (len == SZ_16M) | 509 | if (len == SZ_16M) |
515 | for (i = 0; i < 16; i++) | 510 | for (i = 0; i < 16; i++) |
516 | *(fl_pte+i) = 0; | 511 | *(fl_pte+i) = 0; |
517 | 512 | ||
518 | if (len == SZ_1M) | 513 | if (len == SZ_1M) |
519 | *fl_pte = 0; | 514 | *fl_pte = 0; |
520 | 515 | ||
521 | sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK)); | 516 | sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK)); |
522 | sl_offset = SL_OFFSET(va); | 517 | sl_offset = SL_OFFSET(va); |
523 | sl_pte = sl_table + sl_offset; | 518 | sl_pte = sl_table + sl_offset; |
524 | 519 | ||
525 | if (len == SZ_64K) { | 520 | if (len == SZ_64K) { |
526 | for (i = 0; i < 16; i++) | 521 | for (i = 0; i < 16; i++) |
527 | *(sl_pte+i) = 0; | 522 | *(sl_pte+i) = 0; |
528 | } | 523 | } |
529 | 524 | ||
530 | if (len == SZ_4K) | 525 | if (len == SZ_4K) |
531 | *sl_pte = 0; | 526 | *sl_pte = 0; |
532 | 527 | ||
533 | if (len == SZ_4K || len == SZ_64K) { | 528 | if (len == SZ_4K || len == SZ_64K) { |
534 | int used = 0; | 529 | int used = 0; |
535 | 530 | ||
536 | for (i = 0; i < NUM_SL_PTE; i++) | 531 | for (i = 0; i < NUM_SL_PTE; i++) |
537 | if (sl_table[i]) | 532 | if (sl_table[i]) |
538 | used = 1; | 533 | used = 1; |
539 | if (!used) { | 534 | if (!used) { |
540 | free_page((unsigned long)sl_table); | 535 | free_page((unsigned long)sl_table); |
541 | *fl_pte = 0; | 536 | *fl_pte = 0; |
542 | } | 537 | } |
543 | } | 538 | } |
544 | 539 | ||
545 | ret = __flush_iotlb(domain); | 540 | ret = __flush_iotlb(domain); |
546 | 541 | ||
547 | /* | 542 | /* |
548 | * the IOMMU API requires us to return the order of the unmapped | 543 | * the IOMMU API requires us to return the order of the unmapped |
549 | * page (on success). | 544 | * page (on success). |
550 | */ | 545 | */ |
551 | if (!ret) | 546 | if (!ret) |
552 | ret = order; | 547 | ret = order; |
553 | fail: | 548 | fail: |
554 | spin_unlock_irqrestore(&msm_iommu_lock, flags); | 549 | spin_unlock_irqrestore(&msm_iommu_lock, flags); |
555 | return ret; | 550 | return ret; |
556 | } | 551 | } |
557 | 552 | ||
558 | static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain, | 553 | static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain, |
559 | unsigned long va) | 554 | unsigned long va) |
560 | { | 555 | { |
561 | struct msm_priv *priv; | 556 | struct msm_priv *priv; |
562 | struct msm_iommu_drvdata *iommu_drvdata; | 557 | struct msm_iommu_drvdata *iommu_drvdata; |
563 | struct msm_iommu_ctx_drvdata *ctx_drvdata; | 558 | struct msm_iommu_ctx_drvdata *ctx_drvdata; |
564 | unsigned int par; | 559 | unsigned int par; |
565 | unsigned long flags; | 560 | unsigned long flags; |
566 | void __iomem *base; | 561 | void __iomem *base; |
567 | phys_addr_t ret = 0; | 562 | phys_addr_t ret = 0; |
568 | int ctx; | 563 | int ctx; |
569 | 564 | ||
570 | spin_lock_irqsave(&msm_iommu_lock, flags); | 565 | spin_lock_irqsave(&msm_iommu_lock, flags); |
571 | 566 | ||
572 | priv = domain->priv; | 567 | priv = domain->priv; |
573 | if (list_empty(&priv->list_attached)) | 568 | if (list_empty(&priv->list_attached)) |
574 | goto fail; | 569 | goto fail; |
575 | 570 | ||
576 | ctx_drvdata = list_entry(priv->list_attached.next, | 571 | ctx_drvdata = list_entry(priv->list_attached.next, |
577 | struct msm_iommu_ctx_drvdata, attached_elm); | 572 | struct msm_iommu_ctx_drvdata, attached_elm); |
578 | iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent); | 573 | iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent); |
579 | 574 | ||
580 | base = iommu_drvdata->base; | 575 | base = iommu_drvdata->base; |
581 | ctx = ctx_drvdata->num; | 576 | ctx = ctx_drvdata->num; |
582 | 577 | ||
583 | ret = __enable_clocks(iommu_drvdata); | 578 | ret = __enable_clocks(iommu_drvdata); |
584 | if (ret) | 579 | if (ret) |
585 | goto fail; | 580 | goto fail; |
586 | 581 | ||
587 | /* Invalidate context TLB */ | 582 | /* Invalidate context TLB */ |
588 | SET_CTX_TLBIALL(base, ctx, 0); | 583 | SET_CTX_TLBIALL(base, ctx, 0); |
589 | SET_V2PPR(base, ctx, va & V2Pxx_VA); | 584 | SET_V2PPR(base, ctx, va & V2Pxx_VA); |
590 | 585 | ||
591 | par = GET_PAR(base, ctx); | 586 | par = GET_PAR(base, ctx); |
592 | 587 | ||
593 | /* We are dealing with a supersection */ | 588 | /* We are dealing with a supersection */ |
594 | if (GET_NOFAULT_SS(base, ctx)) | 589 | if (GET_NOFAULT_SS(base, ctx)) |
595 | ret = (par & 0xFF000000) | (va & 0x00FFFFFF); | 590 | ret = (par & 0xFF000000) | (va & 0x00FFFFFF); |
596 | else /* Upper 20 bits from PAR, lower 12 from VA */ | 591 | else /* Upper 20 bits from PAR, lower 12 from VA */ |
597 | ret = (par & 0xFFFFF000) | (va & 0x00000FFF); | 592 | ret = (par & 0xFFFFF000) | (va & 0x00000FFF); |
598 | 593 | ||
599 | if (GET_FAULT(base, ctx)) | 594 | if (GET_FAULT(base, ctx)) |
600 | ret = 0; | 595 | ret = 0; |
601 | 596 | ||
602 | __disable_clocks(iommu_drvdata); | 597 | __disable_clocks(iommu_drvdata); |
603 | fail: | 598 | fail: |
604 | spin_unlock_irqrestore(&msm_iommu_lock, flags); | 599 | spin_unlock_irqrestore(&msm_iommu_lock, flags); |
605 | return ret; | 600 | return ret; |
606 | } | 601 | } |
607 | 602 | ||
608 | static int msm_iommu_domain_has_cap(struct iommu_domain *domain, | 603 | static int msm_iommu_domain_has_cap(struct iommu_domain *domain, |
609 | unsigned long cap) | 604 | unsigned long cap) |
610 | { | 605 | { |
611 | return 0; | 606 | return 0; |
612 | } | 607 | } |
613 | 608 | ||
614 | static void print_ctx_regs(void __iomem *base, int ctx) | 609 | static void print_ctx_regs(void __iomem *base, int ctx) |
615 | { | 610 | { |
616 | unsigned int fsr = GET_FSR(base, ctx); | 611 | unsigned int fsr = GET_FSR(base, ctx); |
617 | pr_err("FAR = %08x PAR = %08x\n", | 612 | pr_err("FAR = %08x PAR = %08x\n", |
618 | GET_FAR(base, ctx), GET_PAR(base, ctx)); | 613 | GET_FAR(base, ctx), GET_PAR(base, ctx)); |
619 | pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr, | 614 | pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr, |
620 | (fsr & 0x02) ? "TF " : "", | 615 | (fsr & 0x02) ? "TF " : "", |
621 | (fsr & 0x04) ? "AFF " : "", | 616 | (fsr & 0x04) ? "AFF " : "", |
622 | (fsr & 0x08) ? "APF " : "", | 617 | (fsr & 0x08) ? "APF " : "", |
623 | (fsr & 0x10) ? "TLBMF " : "", | 618 | (fsr & 0x10) ? "TLBMF " : "", |
624 | (fsr & 0x20) ? "HTWDEEF " : "", | 619 | (fsr & 0x20) ? "HTWDEEF " : "", |
625 | (fsr & 0x40) ? "HTWSEEF " : "", | 620 | (fsr & 0x40) ? "HTWSEEF " : "", |
626 | (fsr & 0x80) ? "MHF " : "", | 621 | (fsr & 0x80) ? "MHF " : "", |
627 | (fsr & 0x10000) ? "SL " : "", | 622 | (fsr & 0x10000) ? "SL " : "", |
628 | (fsr & 0x40000000) ? "SS " : "", | 623 | (fsr & 0x40000000) ? "SS " : "", |
629 | (fsr & 0x80000000) ? "MULTI " : ""); | 624 | (fsr & 0x80000000) ? "MULTI " : ""); |
630 | 625 | ||
631 | pr_err("FSYNR0 = %08x FSYNR1 = %08x\n", | 626 | pr_err("FSYNR0 = %08x FSYNR1 = %08x\n", |
632 | GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx)); | 627 | GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx)); |
633 | pr_err("TTBR0 = %08x TTBR1 = %08x\n", | 628 | pr_err("TTBR0 = %08x TTBR1 = %08x\n", |
634 | GET_TTBR0(base, ctx), GET_TTBR1(base, ctx)); | 629 | GET_TTBR0(base, ctx), GET_TTBR1(base, ctx)); |
635 | pr_err("SCTLR = %08x ACTLR = %08x\n", | 630 | pr_err("SCTLR = %08x ACTLR = %08x\n", |
636 | GET_SCTLR(base, ctx), GET_ACTLR(base, ctx)); | 631 | GET_SCTLR(base, ctx), GET_ACTLR(base, ctx)); |
637 | pr_err("PRRR = %08x NMRR = %08x\n", | 632 | pr_err("PRRR = %08x NMRR = %08x\n", |
638 | GET_PRRR(base, ctx), GET_NMRR(base, ctx)); | 633 | GET_PRRR(base, ctx), GET_NMRR(base, ctx)); |
639 | } | 634 | } |
640 | 635 | ||
641 | irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id) | 636 | irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id) |
642 | { | 637 | { |
643 | struct msm_iommu_drvdata *drvdata = dev_id; | 638 | struct msm_iommu_drvdata *drvdata = dev_id; |
644 | void __iomem *base; | 639 | void __iomem *base; |
645 | unsigned int fsr; | 640 | unsigned int fsr; |
646 | int i, ret; | 641 | int i, ret; |
647 | 642 | ||
648 | spin_lock(&msm_iommu_lock); | 643 | spin_lock(&msm_iommu_lock); |
649 | 644 | ||
650 | if (!drvdata) { | 645 | if (!drvdata) { |
651 | pr_err("Invalid device ID in context interrupt handler\n"); | 646 | pr_err("Invalid device ID in context interrupt handler\n"); |
652 | goto fail; | 647 | goto fail; |
653 | } | 648 | } |
654 | 649 | ||
655 | base = drvdata->base; | 650 | base = drvdata->base; |
656 | 651 | ||
657 | pr_err("Unexpected IOMMU page fault!\n"); | 652 | pr_err("Unexpected IOMMU page fault!\n"); |
658 | pr_err("base = %08x\n", (unsigned int) base); | 653 | pr_err("base = %08x\n", (unsigned int) base); |
659 | 654 | ||
660 | ret = __enable_clocks(drvdata); | 655 | ret = __enable_clocks(drvdata); |
661 | if (ret) | 656 | if (ret) |
662 | goto fail; | 657 | goto fail; |
663 | 658 | ||
664 | for (i = 0; i < drvdata->ncb; i++) { | 659 | for (i = 0; i < drvdata->ncb; i++) { |
665 | fsr = GET_FSR(base, i); | 660 | fsr = GET_FSR(base, i); |
666 | if (fsr) { | 661 | if (fsr) { |
667 | pr_err("Fault occurred in context %d.\n", i); | 662 | pr_err("Fault occurred in context %d.\n", i); |
668 | pr_err("Interesting registers:\n"); | 663 | pr_err("Interesting registers:\n"); |
669 | print_ctx_regs(base, i); | 664 | print_ctx_regs(base, i); |
670 | SET_FSR(base, i, 0x4000000F); | 665 | SET_FSR(base, i, 0x4000000F); |
671 | } | 666 | } |
672 | } | 667 | } |
673 | __disable_clocks(drvdata); | 668 | __disable_clocks(drvdata); |
674 | fail: | 669 | fail: |
675 | spin_unlock(&msm_iommu_lock); | 670 | spin_unlock(&msm_iommu_lock); |
676 | return 0; | 671 | return 0; |
677 | } | 672 | } |
678 | 673 | ||
679 | static struct iommu_ops msm_iommu_ops = { | 674 | static struct iommu_ops msm_iommu_ops = { |
680 | .domain_init = msm_iommu_domain_init, | 675 | .domain_init = msm_iommu_domain_init, |
681 | .domain_destroy = msm_iommu_domain_destroy, | 676 | .domain_destroy = msm_iommu_domain_destroy, |
682 | .attach_dev = msm_iommu_attach_dev, | 677 | .attach_dev = msm_iommu_attach_dev, |
683 | .detach_dev = msm_iommu_detach_dev, | 678 | .detach_dev = msm_iommu_detach_dev, |
684 | .map = msm_iommu_map, | 679 | .map = msm_iommu_map, |
685 | .unmap = msm_iommu_unmap, | 680 | .unmap = msm_iommu_unmap, |
686 | .iova_to_phys = msm_iommu_iova_to_phys, | 681 | .iova_to_phys = msm_iommu_iova_to_phys, |
687 | .domain_has_cap = msm_iommu_domain_has_cap | 682 | .domain_has_cap = msm_iommu_domain_has_cap |
688 | }; | 683 | }; |
689 | 684 | ||
690 | static int __init get_tex_class(int icp, int ocp, int mt, int nos) | 685 | static int __init get_tex_class(int icp, int ocp, int mt, int nos) |
691 | { | 686 | { |
692 | int i = 0; | 687 | int i = 0; |
693 | unsigned int prrr = 0; | 688 | unsigned int prrr = 0; |
694 | unsigned int nmrr = 0; | 689 | unsigned int nmrr = 0; |
695 | int c_icp, c_ocp, c_mt, c_nos; | 690 | int c_icp, c_ocp, c_mt, c_nos; |
696 | 691 | ||
697 | RCP15_PRRR(prrr); | 692 | RCP15_PRRR(prrr); |
698 | RCP15_NMRR(nmrr); | 693 | RCP15_NMRR(nmrr); |
699 | 694 | ||
700 | for (i = 0; i < NUM_TEX_CLASS; i++) { | 695 | for (i = 0; i < NUM_TEX_CLASS; i++) { |
701 | c_nos = PRRR_NOS(prrr, i); | 696 | c_nos = PRRR_NOS(prrr, i); |
702 | c_mt = PRRR_MT(prrr, i); | 697 | c_mt = PRRR_MT(prrr, i); |
703 | c_icp = NMRR_ICP(nmrr, i); | 698 | c_icp = NMRR_ICP(nmrr, i); |
704 | c_ocp = NMRR_OCP(nmrr, i); | 699 | c_ocp = NMRR_OCP(nmrr, i); |
705 | 700 | ||
706 | if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos) | 701 | if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos) |
707 | return i; | 702 | return i; |
708 | } | 703 | } |
709 | 704 | ||
710 | return -ENODEV; | 705 | return -ENODEV; |
711 | } | 706 | } |
712 | 707 | ||
713 | static void __init setup_iommu_tex_classes(void) | 708 | static void __init setup_iommu_tex_classes(void) |
714 | { | 709 | { |
715 | msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] = | 710 | msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] = |
716 | get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1); | 711 | get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1); |
717 | 712 | ||
718 | msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] = | 713 | msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] = |
719 | get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1); | 714 | get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1); |
720 | 715 | ||
721 | msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] = | 716 | msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] = |
722 | get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1); | 717 | get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1); |
723 | 718 | ||
724 | msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] = | 719 | msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] = |
725 | get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1); | 720 | get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1); |
726 | } | 721 | } |
727 | 722 | ||
728 | static int __init msm_iommu_init(void) | 723 | static int __init msm_iommu_init(void) |
729 | { | 724 | { |
730 | setup_iommu_tex_classes(); | 725 | setup_iommu_tex_classes(); |
731 | bus_set_iommu(&platform_bus_type, &msm_iommu_ops); | 726 | bus_set_iommu(&platform_bus_type, &msm_iommu_ops); |
732 | return 0; | 727 | return 0; |
733 | } | 728 | } |
734 | 729 | ||
735 | subsys_initcall(msm_iommu_init); | 730 | subsys_initcall(msm_iommu_init); |
736 | 731 | ||
737 | MODULE_LICENSE("GPL v2"); | 732 | MODULE_LICENSE("GPL v2"); |
738 | MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>"); | 733 | MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>"); |
739 | 734 |