Blame view
drivers/iommu/msm_iommu.c
17.1 KB
41f3f5138
|
1 |
/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. |
0720d1f05
|
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 |
* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/errno.h> #include <linux/io.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/iommu.h> |
41f3f5138
|
29 |
#include <linux/clk.h> |
0720d1f05
|
30 31 32 |
#include <asm/cacheflush.h> #include <asm/sizes.h> |
0b559df5c
|
33 34 |
#include "msm_iommu_hw-8xxx.h" #include "msm_iommu.h" |
0720d1f05
|
35 |
|
100832c9b
|
36 37 38 39 40 41 42 43 |
#define MRC(reg, processor, op1, crn, crm, op2) \ __asm__ __volatile__ ( \ " mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 " " \ : "=r" (reg)) #define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0) #define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1) |
834272755
|
44 45 |
/* bitmap of the page sizes currently supported */ #define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M) |
100832c9b
|
46 |
static int msm_iommu_tex_class[4]; |
0720d1f05
|
47 48 49 50 51 |
DEFINE_SPINLOCK(msm_iommu_lock); struct msm_priv { unsigned long *pgtable; struct list_head list_attached; |
3e116c3cd
|
52 |
struct iommu_domain domain; |
0720d1f05
|
53 |
}; |
3e116c3cd
|
54 55 56 57 |
static struct msm_priv *to_msm_priv(struct iommu_domain *dom) { return container_of(dom, struct msm_priv, domain); } |
41f3f5138
|
58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 |
static int __enable_clocks(struct msm_iommu_drvdata *drvdata) { int ret; ret = clk_enable(drvdata->pclk); if (ret) goto fail; if (drvdata->clk) { ret = clk_enable(drvdata->clk); if (ret) clk_disable(drvdata->pclk); } fail: return ret; } static void __disable_clocks(struct msm_iommu_drvdata *drvdata) { |
c72acf69e
|
77 |
clk_disable(drvdata->clk); |
41f3f5138
|
78 79 |
clk_disable(drvdata->pclk); } |
33069739d
|
80 |
static int __flush_iotlb(struct iommu_domain *domain) |
0720d1f05
|
81 |
{ |
3e116c3cd
|
82 |
struct msm_priv *priv = to_msm_priv(domain); |
0720d1f05
|
83 84 |
struct msm_iommu_drvdata *iommu_drvdata; struct msm_iommu_ctx_drvdata *ctx_drvdata; |
33069739d
|
85 |
int ret = 0; |
0720d1f05
|
86 87 88 |
#ifndef CONFIG_IOMMU_PGTABLES_L2 unsigned long *fl_table = priv->pgtable; int i; |
f6f41eb9c
|
89 90 |
if (!list_empty(&priv->list_attached)) { dmac_flush_range(fl_table, fl_table + SZ_16K); |
0720d1f05
|
91 |
|
f6f41eb9c
|
92 93 94 95 96 97 98 |
for (i = 0; i < NUM_FL_PTE; i++) if ((fl_table[i] & 0x03) == FL_TYPE_TABLE) { void *sl_table = __va(fl_table[i] & FL_BASE_MASK); dmac_flush_range(sl_table, sl_table + SZ_4K); } } |
0720d1f05
|
99 100 101 |
#endif list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) { |
6e6cfbc85
|
102 103 |
BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent); |
0720d1f05
|
104 105 |
iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent); |
41f3f5138
|
106 107 108 109 110 |
BUG_ON(!iommu_drvdata); ret = __enable_clocks(iommu_drvdata); if (ret) goto fail; |
0720d1f05
|
111 |
SET_CTX_TLBIALL(iommu_drvdata->base, ctx_drvdata->num, 0); |
41f3f5138
|
112 |
__disable_clocks(iommu_drvdata); |
0720d1f05
|
113 |
} |
41f3f5138
|
114 |
fail: |
33069739d
|
115 |
return ret; |
0720d1f05
|
116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 |
} static void __reset_context(void __iomem *base, int ctx) { SET_BPRCOSH(base, ctx, 0); SET_BPRCISH(base, ctx, 0); SET_BPRCNSH(base, ctx, 0); SET_BPSHCFG(base, ctx, 0); SET_BPMTCFG(base, ctx, 0); SET_ACTLR(base, ctx, 0); SET_SCTLR(base, ctx, 0); SET_FSRRESTORE(base, ctx, 0); SET_TTBR0(base, ctx, 0); SET_TTBR1(base, ctx, 0); SET_TTBCR(base, ctx, 0); SET_BFBCR(base, ctx, 0); SET_PAR(base, ctx, 0); SET_FAR(base, ctx, 0); SET_CTX_TLBIALL(base, ctx, 0); SET_TLBFLPTER(base, ctx, 0); SET_TLBSLPTER(base, ctx, 0); SET_TLBLKCR(base, ctx, 0); SET_PRRR(base, ctx, 0); SET_NMRR(base, ctx, 0); |
0720d1f05
|
140 141 142 143 |
} static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable) { |
100832c9b
|
144 |
unsigned int prrr, nmrr; |
0720d1f05
|
145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 |
__reset_context(base, ctx); /* Set up HTW mode */ /* TLB miss configuration: perform HTW on miss */ SET_TLBMCFG(base, ctx, 0x3); /* V2P configuration: HTW for access */ SET_V2PCFG(base, ctx, 0x3); SET_TTBCR(base, ctx, 0); SET_TTBR0_PA(base, ctx, (pgtable >> 14)); /* Invalidate the TLB for this context */ SET_CTX_TLBIALL(base, ctx, 0); /* Set interrupt number to "secure" interrupt */ SET_IRPTNDX(base, ctx, 0); /* Enable context fault interrupt */ SET_CFEIE(base, ctx, 1); /* Stall access on a context fault and let the handler deal with it */ SET_CFCFG(base, ctx, 1); /* Redirect all cacheable requests to L2 slave port. */ SET_RCISH(base, ctx, 1); SET_RCOSH(base, ctx, 1); SET_RCNSH(base, ctx, 1); /* Turn on TEX Remap */ SET_TRE(base, ctx, 1); |
100832c9b
|
176 177 178 179 180 |
/* Set TEX remap attributes */ RCP15_PRRR(prrr); RCP15_NMRR(nmrr); SET_PRRR(base, ctx, prrr); SET_NMRR(base, ctx, nmrr); |
0720d1f05
|
181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 |
/* Turn on BFB prefetch */ SET_BFBDFE(base, ctx, 1); #ifdef CONFIG_IOMMU_PGTABLES_L2 /* Configure page tables as inner-cacheable and shareable to reduce * the TLB miss penalty. */ SET_TTBR0_SH(base, ctx, 1); SET_TTBR1_SH(base, ctx, 1); SET_TTBR0_NOS(base, ctx, 1); SET_TTBR1_NOS(base, ctx, 1); SET_TTBR0_IRGNH(base, ctx, 0); /* WB, WA */ SET_TTBR0_IRGNL(base, ctx, 1); SET_TTBR1_IRGNH(base, ctx, 0); /* WB, WA */ SET_TTBR1_IRGNL(base, ctx, 1); SET_TTBR0_ORGN(base, ctx, 1); /* WB, WA */ SET_TTBR1_ORGN(base, ctx, 1); /* WB, WA */ #endif /* Enable the MMU */ SET_M(base, ctx, 1); } |
3e116c3cd
|
208 |
static struct iommu_domain *msm_iommu_domain_alloc(unsigned type) |
0720d1f05
|
209 |
{ |
3e116c3cd
|
210 |
struct msm_priv *priv; |
0720d1f05
|
211 |
|
3e116c3cd
|
212 213 214 215 |
if (type != IOMMU_DOMAIN_UNMANAGED) return NULL; priv = kzalloc(sizeof(*priv), GFP_KERNEL); |
0720d1f05
|
216 217 218 219 220 221 222 223 224 225 226 |
if (!priv) goto fail_nomem; INIT_LIST_HEAD(&priv->list_attached); priv->pgtable = (unsigned long *)__get_free_pages(GFP_KERNEL, get_order(SZ_16K)); if (!priv->pgtable) goto fail_nomem; memset(priv->pgtable, 0, SZ_16K); |
4be6a290b
|
227 |
|
3e116c3cd
|
228 229 230 |
priv->domain.geometry.aperture_start = 0; priv->domain.geometry.aperture_end = (1ULL << 32) - 1; priv->domain.geometry.force_aperture = true; |
4be6a290b
|
231 |
|
3e116c3cd
|
232 |
return &priv->domain; |
0720d1f05
|
233 234 235 |
fail_nomem: kfree(priv); |
3e116c3cd
|
236 |
return NULL; |
0720d1f05
|
237 |
} |
3e116c3cd
|
238 |
static void msm_iommu_domain_free(struct iommu_domain *domain) |
0720d1f05
|
239 240 241 242 243 244 245 |
{ struct msm_priv *priv; unsigned long flags; unsigned long *fl_table; int i; spin_lock_irqsave(&msm_iommu_lock, flags); |
3e116c3cd
|
246 |
priv = to_msm_priv(domain); |
0720d1f05
|
247 |
|
3e116c3cd
|
248 |
fl_table = priv->pgtable; |
0720d1f05
|
249 |
|
3e116c3cd
|
250 251 252 253 |
for (i = 0; i < NUM_FL_PTE; i++) if ((fl_table[i] & 0x03) == FL_TYPE_TABLE) free_page((unsigned long) __va(((fl_table[i]) & FL_BASE_MASK))); |
0720d1f05
|
254 |
|
3e116c3cd
|
255 256 |
free_pages((unsigned long)priv->pgtable, get_order(SZ_16K)); priv->pgtable = NULL; |
0720d1f05
|
257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 |
kfree(priv); spin_unlock_irqrestore(&msm_iommu_lock, flags); } static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) { struct msm_priv *priv; struct msm_iommu_ctx_dev *ctx_dev; struct msm_iommu_drvdata *iommu_drvdata; struct msm_iommu_ctx_drvdata *ctx_drvdata; struct msm_iommu_ctx_drvdata *tmp_drvdata; int ret = 0; unsigned long flags; spin_lock_irqsave(&msm_iommu_lock, flags); |
3e116c3cd
|
273 |
priv = to_msm_priv(domain); |
0720d1f05
|
274 |
|
3e116c3cd
|
275 |
if (!dev) { |
0720d1f05
|
276 277 278 279 280 281 282 283 284 285 286 287 |
ret = -EINVAL; goto fail; } iommu_drvdata = dev_get_drvdata(dev->parent); ctx_drvdata = dev_get_drvdata(dev); ctx_dev = dev->platform_data; if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) { ret = -EINVAL; goto fail; } |
00d4b2bb0
|
288 289 290 291 |
if (!list_empty(&ctx_drvdata->attached_elm)) { ret = -EBUSY; goto fail; } |
0720d1f05
|
292 293 294 295 296 |
list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm) if (tmp_drvdata == ctx_drvdata) { ret = -EBUSY; goto fail; } |
41f3f5138
|
297 298 299 |
ret = __enable_clocks(iommu_drvdata); if (ret) goto fail; |
0720d1f05
|
300 301 |
__program_context(iommu_drvdata->base, ctx_dev->num, __pa(priv->pgtable)); |
41f3f5138
|
302 |
__disable_clocks(iommu_drvdata); |
0720d1f05
|
303 |
list_add(&(ctx_drvdata->attached_elm), &priv->list_attached); |
33069739d
|
304 |
ret = __flush_iotlb(domain); |
0720d1f05
|
305 306 307 308 309 310 311 312 313 314 315 316 317 318 |
fail: spin_unlock_irqrestore(&msm_iommu_lock, flags); return ret; } static void msm_iommu_detach_dev(struct iommu_domain *domain, struct device *dev) { struct msm_priv *priv; struct msm_iommu_ctx_dev *ctx_dev; struct msm_iommu_drvdata *iommu_drvdata; struct msm_iommu_ctx_drvdata *ctx_drvdata; unsigned long flags; |
33069739d
|
319 |
int ret; |
0720d1f05
|
320 321 |
spin_lock_irqsave(&msm_iommu_lock, flags); |
3e116c3cd
|
322 |
priv = to_msm_priv(domain); |
0720d1f05
|
323 |
|
3e116c3cd
|
324 |
if (!dev) |
0720d1f05
|
325 326 327 328 329 330 331 332 |
goto fail; iommu_drvdata = dev_get_drvdata(dev->parent); ctx_drvdata = dev_get_drvdata(dev); ctx_dev = dev->platform_data; if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) goto fail; |
33069739d
|
333 334 335 |
ret = __flush_iotlb(domain); if (ret) goto fail; |
41f3f5138
|
336 337 338 |
ret = __enable_clocks(iommu_drvdata); if (ret) goto fail; |
0720d1f05
|
339 |
__reset_context(iommu_drvdata->base, ctx_dev->num); |
41f3f5138
|
340 |
__disable_clocks(iommu_drvdata); |
0720d1f05
|
341 342 343 344 345 346 347 |
list_del_init(&ctx_drvdata->attached_elm); fail: spin_unlock_irqrestore(&msm_iommu_lock, flags); } static int msm_iommu_map(struct iommu_domain *domain, unsigned long va, |
5009065d3
|
348 |
phys_addr_t pa, size_t len, int prot) |
0720d1f05
|
349 350 351 352 353 354 355 356 357 |
{ struct msm_priv *priv; unsigned long flags; unsigned long *fl_table; unsigned long *fl_pte; unsigned long fl_offset; unsigned long *sl_table; unsigned long *sl_pte; unsigned long sl_offset; |
100832c9b
|
358 |
unsigned int pgprot; |
100832c9b
|
359 |
int ret = 0, tex, sh; |
0720d1f05
|
360 361 |
spin_lock_irqsave(&msm_iommu_lock, flags); |
0720d1f05
|
362 |
|
100832c9b
|
363 364 365 366 367 368 369 |
sh = (prot & MSM_IOMMU_ATTR_SH) ? 1 : 0; tex = msm_iommu_tex_class[prot & MSM_IOMMU_CP_MASK]; if (tex < 0 || tex > NUM_TEX_CLASS - 1) { ret = -EINVAL; goto fail; } |
3e116c3cd
|
370 |
priv = to_msm_priv(domain); |
0720d1f05
|
371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 |
fl_table = priv->pgtable; if (len != SZ_16M && len != SZ_1M && len != SZ_64K && len != SZ_4K) { pr_debug("Bad size: %d ", len); ret = -EINVAL; goto fail; } if (!fl_table) { pr_debug("Null page table "); ret = -EINVAL; goto fail; } |
100832c9b
|
388 389 390 391 392 393 394 395 396 397 398 |
if (len == SZ_16M || len == SZ_1M) { pgprot = sh ? FL_SHARED : 0; pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0; pgprot |= tex & 0x02 ? FL_CACHEABLE : 0; pgprot |= tex & 0x04 ? FL_TEX0 : 0; } else { pgprot = sh ? SL_SHARED : 0; pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0; pgprot |= tex & 0x02 ? SL_CACHEABLE : 0; pgprot |= tex & 0x04 ? SL_TEX0 : 0; } |
0720d1f05
|
399 400 401 402 403 404 405 406 |
fl_offset = FL_OFFSET(va); /* Upper 12 bits */ fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */ if (len == SZ_16M) { int i = 0; for (i = 0; i < 16; i++) *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION | FL_AP_READ | FL_AP_WRITE | FL_TYPE_SECT | |
2e8c8ba98
|
407 |
FL_SHARED | FL_NG | pgprot; |
0720d1f05
|
408 409 410 |
} if (len == SZ_1M) |
2e8c8ba98
|
411 |
*fl_pte = (pa & 0xFFF00000) | FL_AP_READ | FL_AP_WRITE | FL_NG | |
100832c9b
|
412 |
FL_TYPE_SECT | FL_SHARED | pgprot; |
0720d1f05
|
413 414 415 416 |
/* Need a 2nd level table */ if ((len == SZ_4K || len == SZ_64K) && (*fl_pte) == 0) { unsigned long *sl; |
294b2dea8
|
417 |
sl = (unsigned long *) __get_free_pages(GFP_ATOMIC, |
0720d1f05
|
418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 |
get_order(SZ_4K)); if (!sl) { pr_debug("Could not allocate second level table "); ret = -ENOMEM; goto fail; } memset(sl, 0, SZ_4K); *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | FL_TYPE_TABLE); } sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK)); sl_offset = SL_OFFSET(va); sl_pte = sl_table + sl_offset; if (len == SZ_4K) |
2e8c8ba98
|
437 |
*sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_AP0 | SL_AP1 | SL_NG | |
100832c9b
|
438 |
SL_SHARED | SL_TYPE_SMALL | pgprot; |
0720d1f05
|
439 440 441 442 443 444 |
if (len == SZ_64K) { int i; for (i = 0; i < 16; i++) *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_AP0 | |
2e8c8ba98
|
445 |
SL_NG | SL_AP1 | SL_SHARED | SL_TYPE_LARGE | pgprot; |
0720d1f05
|
446 |
} |
33069739d
|
447 |
ret = __flush_iotlb(domain); |
0720d1f05
|
448 449 450 451 |
fail: spin_unlock_irqrestore(&msm_iommu_lock, flags); return ret; } |
5009065d3
|
452 453 |
static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va, size_t len) |
0720d1f05
|
454 455 456 457 458 459 460 461 462 |
{ struct msm_priv *priv; unsigned long flags; unsigned long *fl_table; unsigned long *fl_pte; unsigned long fl_offset; unsigned long *sl_table; unsigned long *sl_pte; unsigned long sl_offset; |
0720d1f05
|
463 464 465 |
int i, ret = 0; spin_lock_irqsave(&msm_iommu_lock, flags); |
3e116c3cd
|
466 |
priv = to_msm_priv(domain); |
0720d1f05
|
467 468 469 470 471 472 473 |
fl_table = priv->pgtable; if (len != SZ_16M && len != SZ_1M && len != SZ_64K && len != SZ_4K) { pr_debug("Bad length: %d ", len); |
0720d1f05
|
474 475 476 477 478 479 |
goto fail; } if (!fl_table) { pr_debug("Null page table "); |
0720d1f05
|
480 481 482 483 484 485 486 487 488 |
goto fail; } fl_offset = FL_OFFSET(va); /* Upper 12 bits */ fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */ if (*fl_pte == 0) { pr_debug("First level PTE is 0 "); |
0720d1f05
|
489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 |
goto fail; } /* Unmap supersection */ if (len == SZ_16M) for (i = 0; i < 16; i++) *(fl_pte+i) = 0; if (len == SZ_1M) *fl_pte = 0; sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK)); sl_offset = SL_OFFSET(va); sl_pte = sl_table + sl_offset; if (len == SZ_64K) { for (i = 0; i < 16; i++) *(sl_pte+i) = 0; } if (len == SZ_4K) *sl_pte = 0; if (len == SZ_4K || len == SZ_64K) { int used = 0; for (i = 0; i < NUM_SL_PTE; i++) if (sl_table[i]) used = 1; if (!used) { free_page((unsigned long)sl_table); *fl_pte = 0; } } |
33069739d
|
523 |
ret = __flush_iotlb(domain); |
9e28547f8
|
524 |
|
0720d1f05
|
525 526 |
fail: spin_unlock_irqrestore(&msm_iommu_lock, flags); |
5009065d3
|
527 528 529 530 |
/* the IOMMU API requires us to return how many bytes were unmapped */ len = ret ? 0 : len; return len; |
0720d1f05
|
531 532 533 |
} static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain, |
bb5547acf
|
534 |
dma_addr_t va) |
0720d1f05
|
535 536 537 538 539 540 541 542 543 544 545 |
{ struct msm_priv *priv; struct msm_iommu_drvdata *iommu_drvdata; struct msm_iommu_ctx_drvdata *ctx_drvdata; unsigned int par; unsigned long flags; void __iomem *base; phys_addr_t ret = 0; int ctx; spin_lock_irqsave(&msm_iommu_lock, flags); |
3e116c3cd
|
546 |
priv = to_msm_priv(domain); |
0720d1f05
|
547 548 549 550 551 552 553 554 555 |
if (list_empty(&priv->list_attached)) goto fail; ctx_drvdata = list_entry(priv->list_attached.next, struct msm_iommu_ctx_drvdata, attached_elm); iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent); base = iommu_drvdata->base; ctx = ctx_drvdata->num; |
41f3f5138
|
556 557 558 |
ret = __enable_clocks(iommu_drvdata); if (ret) goto fail; |
0720d1f05
|
559 560 |
/* Invalidate context TLB */ SET_CTX_TLBIALL(base, ctx, 0); |
b0e7808d5
|
561 |
SET_V2PPR(base, ctx, va & V2Pxx_VA); |
0720d1f05
|
562 |
|
0720d1f05
|
563 564 565 566 567 568 569 |
par = GET_PAR(base, ctx); /* We are dealing with a supersection */ if (GET_NOFAULT_SS(base, ctx)) ret = (par & 0xFF000000) | (va & 0x00FFFFFF); else /* Upper 20 bits from PAR, lower 12 from VA */ ret = (par & 0xFFFFF000) | (va & 0x00000FFF); |
33069739d
|
570 571 |
if (GET_FAULT(base, ctx)) ret = 0; |
41f3f5138
|
572 |
__disable_clocks(iommu_drvdata); |
0720d1f05
|
573 574 575 576 |
fail: spin_unlock_irqrestore(&msm_iommu_lock, flags); return ret; } |
4480845ee
|
577 |
static bool msm_iommu_capable(enum iommu_cap cap) |
0720d1f05
|
578 |
{ |
4480845ee
|
579 |
return false; |
0720d1f05
|
580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 |
} static void print_ctx_regs(void __iomem *base, int ctx) { unsigned int fsr = GET_FSR(base, ctx); pr_err("FAR = %08x PAR = %08x ", GET_FAR(base, ctx), GET_PAR(base, ctx)); pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s] ", fsr, (fsr & 0x02) ? "TF " : "", (fsr & 0x04) ? "AFF " : "", (fsr & 0x08) ? "APF " : "", (fsr & 0x10) ? "TLBMF " : "", (fsr & 0x20) ? "HTWDEEF " : "", (fsr & 0x40) ? "HTWSEEF " : "", (fsr & 0x80) ? "MHF " : "", (fsr & 0x10000) ? "SL " : "", (fsr & 0x40000000) ? "SS " : "", (fsr & 0x80000000) ? "MULTI " : ""); pr_err("FSYNR0 = %08x FSYNR1 = %08x ", GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx)); pr_err("TTBR0 = %08x TTBR1 = %08x ", GET_TTBR0(base, ctx), GET_TTBR1(base, ctx)); pr_err("SCTLR = %08x ACTLR = %08x ", GET_SCTLR(base, ctx), GET_ACTLR(base, ctx)); pr_err("PRRR = %08x NMRR = %08x ", GET_PRRR(base, ctx), GET_NMRR(base, ctx)); } irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id) { struct msm_iommu_drvdata *drvdata = dev_id; void __iomem *base; |
33069739d
|
619 |
unsigned int fsr; |
a43d8c101
|
620 |
int i, ret; |
0720d1f05
|
621 622 623 624 625 626 627 628 629 630 |
spin_lock(&msm_iommu_lock); if (!drvdata) { pr_err("Invalid device ID in context interrupt handler "); goto fail; } base = drvdata->base; |
0720d1f05
|
631 632 633 634 |
pr_err("Unexpected IOMMU page fault! "); pr_err("base = %08x ", (unsigned int) base); |
41f3f5138
|
635 636 637 |
ret = __enable_clocks(drvdata); if (ret) goto fail; |
a43d8c101
|
638 |
for (i = 0; i < drvdata->ncb; i++) { |
0720d1f05
|
639 640 641 642 643 644 645 646 647 648 |
fsr = GET_FSR(base, i); if (fsr) { pr_err("Fault occurred in context %d. ", i); pr_err("Interesting registers: "); print_ctx_regs(base, i); SET_FSR(base, i, 0x4000000F); } } |
41f3f5138
|
649 |
__disable_clocks(drvdata); |
0720d1f05
|
650 651 652 653 |
fail: spin_unlock(&msm_iommu_lock); return 0; } |
b22f6434c
|
654 |
static const struct iommu_ops msm_iommu_ops = { |
4480845ee
|
655 |
.capable = msm_iommu_capable, |
3e116c3cd
|
656 657 |
.domain_alloc = msm_iommu_domain_alloc, .domain_free = msm_iommu_domain_free, |
0720d1f05
|
658 659 660 661 |
.attach_dev = msm_iommu_attach_dev, .detach_dev = msm_iommu_detach_dev, .map = msm_iommu_map, .unmap = msm_iommu_unmap, |
315786ebb
|
662 |
.map_sg = default_iommu_map_sg, |
0720d1f05
|
663 |
.iova_to_phys = msm_iommu_iova_to_phys, |
834272755
|
664 |
.pgsize_bitmap = MSM_IOMMU_PGSIZES, |
0720d1f05
|
665 |
}; |
100832c9b
|
666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 |
static int __init get_tex_class(int icp, int ocp, int mt, int nos) { int i = 0; unsigned int prrr = 0; unsigned int nmrr = 0; int c_icp, c_ocp, c_mt, c_nos; RCP15_PRRR(prrr); RCP15_NMRR(nmrr); for (i = 0; i < NUM_TEX_CLASS; i++) { c_nos = PRRR_NOS(prrr, i); c_mt = PRRR_MT(prrr, i); c_icp = NMRR_ICP(nmrr, i); c_ocp = NMRR_OCP(nmrr, i); if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos) return i; } return -ENODEV; } static void __init setup_iommu_tex_classes(void) { msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] = get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1); msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] = get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1); msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] = get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1); msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] = get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1); } |
516cbc793
|
703 |
static int __init msm_iommu_init(void) |
0720d1f05
|
704 |
{ |
100832c9b
|
705 |
setup_iommu_tex_classes(); |
85eebbc5e
|
706 |
bus_set_iommu(&platform_bus_type, &msm_iommu_ops); |
0720d1f05
|
707 708 709 710 711 712 713 |
return 0; } subsys_initcall(msm_iommu_init); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>"); |