Blame view
drivers/iommu/omap-iommu.c
26.5 KB
a9dcad5e3
|
1 2 3 |
/* * omap iommu: tlb and pagetable primitives * |
c127c7dc1
|
4 |
* Copyright (C) 2008-2010 Nokia Corporation |
a9dcad5e3
|
5 6 7 8 9 10 11 12 13 14 15 |
* * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>, * Paul Mundt and Toshihiro Kobayashi * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/err.h> #include <linux/module.h> |
5a0e3ad6a
|
16 |
#include <linux/slab.h> |
a9dcad5e3
|
17 18 19 20 |
#include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/clk.h> #include <linux/platform_device.h> |
f626b52d4
|
21 22 23 |
#include <linux/iommu.h> #include <linux/mutex.h> #include <linux/spinlock.h> |
a9dcad5e3
|
24 25 |
#include <asm/cacheflush.h> |
ce491cf85
|
26 |
#include <plat/iommu.h> |
a9dcad5e3
|
27 |
|
fcf3a6ef4
|
28 |
#include <plat/iopgtable.h> |
a9dcad5e3
|
29 |
|
37c2836c4
|
30 31 32 33 |
#define for_each_iotlb_cr(obj, n, __i, cr) \ for (__i = 0; \ (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \ __i++) |
66bc8cf3b
|
34 35 |
/* bitmap of the page sizes currently supported */ #define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M) |
f626b52d4
|
36 37 38 39 40 41 42 43 44 |
/** * struct omap_iommu_domain - omap iommu domain * @pgtable: the page table * @iommu_dev: an omap iommu device attached to this domain. only a single * iommu device can be attached for now. * @lock: domain lock, should be taken when attaching/detaching */ struct omap_iommu_domain { u32 *pgtable; |
6c32df437
|
45 |
struct omap_iommu *iommu_dev; |
f626b52d4
|
46 47 |
spinlock_t lock; }; |
a9dcad5e3
|
48 49 50 51 52 53 54 |
/* accommodate the difference between omap1 and omap2/3 */ static const struct iommu_functions *arch_iommu; static struct platform_driver omap_iommu_driver; static struct kmem_cache *iopte_cachep; /** |
6c32df437
|
55 |
* omap_install_iommu_arch - Install archtecure specific iommu functions |
a9dcad5e3
|
56 57 58 59 60 |
* @ops: a pointer to architecture specific iommu functions * * There are several kind of iommu algorithm(tlb, pagetable) among * omap series. This interface installs such an iommu algorighm. **/ |
6c32df437
|
61 |
int omap_install_iommu_arch(const struct iommu_functions *ops) |
a9dcad5e3
|
62 63 64 65 66 67 68 |
{ if (arch_iommu) return -EBUSY; arch_iommu = ops; return 0; } |
6c32df437
|
69 |
EXPORT_SYMBOL_GPL(omap_install_iommu_arch); |
a9dcad5e3
|
70 71 |
/** |
6c32df437
|
72 |
* omap_uninstall_iommu_arch - Uninstall archtecure specific iommu functions |
a9dcad5e3
|
73 74 75 76 |
* @ops: a pointer to architecture specific iommu functions * * This interface uninstalls the iommu algorighm installed previously. **/ |
6c32df437
|
77 |
void omap_uninstall_iommu_arch(const struct iommu_functions *ops) |
a9dcad5e3
|
78 79 80 81 82 83 84 |
{ if (arch_iommu != ops) pr_err("%s: not your arch ", __func__); arch_iommu = NULL; } |
6c32df437
|
85 |
EXPORT_SYMBOL_GPL(omap_uninstall_iommu_arch); |
a9dcad5e3
|
86 87 |
/** |
6c32df437
|
88 |
* omap_iommu_save_ctx - Save registers for pm off-mode support |
fabdbca8c
|
89 |
* @dev: client device |
a9dcad5e3
|
90 |
**/ |
fabdbca8c
|
91 |
void omap_iommu_save_ctx(struct device *dev) |
a9dcad5e3
|
92 |
{ |
fabdbca8c
|
93 |
struct omap_iommu *obj = dev_to_omap_iommu(dev); |
a9dcad5e3
|
94 95 |
arch_iommu->save_ctx(obj); } |
6c32df437
|
96 |
EXPORT_SYMBOL_GPL(omap_iommu_save_ctx); |
a9dcad5e3
|
97 98 |
/** |
6c32df437
|
99 |
* omap_iommu_restore_ctx - Restore registers for pm off-mode support |
fabdbca8c
|
100 |
* @dev: client device |
a9dcad5e3
|
101 |
**/ |
fabdbca8c
|
102 |
void omap_iommu_restore_ctx(struct device *dev) |
a9dcad5e3
|
103 |
{ |
fabdbca8c
|
104 |
struct omap_iommu *obj = dev_to_omap_iommu(dev); |
a9dcad5e3
|
105 106 |
arch_iommu->restore_ctx(obj); } |
6c32df437
|
107 |
EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx); |
a9dcad5e3
|
108 109 |
/** |
6c32df437
|
110 |
* omap_iommu_arch_version - Return running iommu arch version |
a9dcad5e3
|
111 |
**/ |
6c32df437
|
112 |
u32 omap_iommu_arch_version(void) |
a9dcad5e3
|
113 114 115 |
{ return arch_iommu->version; } |
6c32df437
|
116 |
EXPORT_SYMBOL_GPL(omap_iommu_arch_version); |
a9dcad5e3
|
117 |
|
6c32df437
|
118 |
static int iommu_enable(struct omap_iommu *obj) |
a9dcad5e3
|
119 120 121 122 123 |
{ int err; if (!obj) return -EINVAL; |
ef4815ab1
|
124 125 |
if (!arch_iommu) return -ENODEV; |
a9dcad5e3
|
126 127 128 129 130 131 132 |
clk_enable(obj->clk); err = arch_iommu->enable(obj); clk_disable(obj->clk); return err; } |
6c32df437
|
133 |
static void iommu_disable(struct omap_iommu *obj) |
a9dcad5e3
|
134 135 136 137 138 139 140 141 142 143 144 145 146 147 |
{ if (!obj) return; clk_enable(obj->clk); arch_iommu->disable(obj); clk_disable(obj->clk); } /* * TLB operations */ |
6c32df437
|
148 |
void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e) |
a9dcad5e3
|
149 150 151 152 153 |
{ BUG_ON(!cr || !e); arch_iommu->cr_to_e(cr, e); } |
6c32df437
|
154 |
EXPORT_SYMBOL_GPL(omap_iotlb_cr_to_e); |
a9dcad5e3
|
155 156 157 158 159 160 161 162 |
static inline int iotlb_cr_valid(struct cr_regs *cr) { if (!cr) return -EINVAL; return arch_iommu->cr_valid(cr); } |
6c32df437
|
163 |
static inline struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj, |
a9dcad5e3
|
164 165 166 167 168 169 170 |
struct iotlb_entry *e) { if (!e) return NULL; return arch_iommu->alloc_cr(obj, e); } |
e1f238133
|
171 |
static u32 iotlb_cr_to_virt(struct cr_regs *cr) |
a9dcad5e3
|
172 173 174 |
{ return arch_iommu->cr_to_virt(cr); } |
a9dcad5e3
|
175 176 177 178 179 |
static u32 get_iopte_attr(struct iotlb_entry *e) { return arch_iommu->get_pte_attr(e); } |
6c32df437
|
180 |
static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da) |
a9dcad5e3
|
181 182 183 |
{ return arch_iommu->fault_isr(obj, da); } |
6c32df437
|
184 |
static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l) |
a9dcad5e3
|
185 186 187 188 189 190 191 |
{ u32 val; val = iommu_read_reg(obj, MMU_LOCK); l->base = MMU_LOCK_BASE(val); l->vict = MMU_LOCK_VICT(val); |
a9dcad5e3
|
192 |
} |
6c32df437
|
193 |
static void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l) |
a9dcad5e3
|
194 195 |
{ u32 val; |
a9dcad5e3
|
196 197 198 199 200 |
val = (l->base << MMU_LOCK_BASE_SHIFT); val |= (l->vict << MMU_LOCK_VICT_SHIFT); iommu_write_reg(obj, val, MMU_LOCK); } |
6c32df437
|
201 |
static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr) |
a9dcad5e3
|
202 203 204 |
{ arch_iommu->tlb_read_cr(obj, cr); } |
6c32df437
|
205 |
static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr) |
a9dcad5e3
|
206 207 208 209 210 211 212 213 214 215 216 217 218 |
{ arch_iommu->tlb_load_cr(obj, cr); iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); iommu_write_reg(obj, 1, MMU_LD_TLB); } /** * iotlb_dump_cr - Dump an iommu tlb entry into buf * @obj: target iommu * @cr: contents of cam and ram register * @buf: output buffer **/ |
6c32df437
|
219 |
static inline ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr, |
a9dcad5e3
|
220 221 222 223 224 225 |
char *buf) { BUG_ON(!cr || !buf); return arch_iommu->dump_cr(obj, cr, buf); } |
37c2836c4
|
226 |
/* only used in iotlb iteration for-loop */ |
6c32df437
|
227 |
static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n) |
37c2836c4
|
228 229 230 231 232 233 234 235 236 237 238 |
{ struct cr_regs cr; struct iotlb_lock l; iotlb_lock_get(obj, &l); l.vict = n; iotlb_lock_set(obj, &l); iotlb_read_cr(obj, &cr); return cr; } |
a9dcad5e3
|
239 240 241 242 243 |
/** * load_iotlb_entry - Set an iommu tlb entry * @obj: target iommu * @e: an iommu tlb entry info **/ |
5da14a471
|
244 |
#ifdef PREFETCH_IOTLB |
6c32df437
|
245 |
static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) |
a9dcad5e3
|
246 |
{ |
a9dcad5e3
|
247 248 249 250 251 252 253 254 |
int err = 0; struct iotlb_lock l; struct cr_regs *cr; if (!obj || !obj->nr_tlb_entries || !e) return -EINVAL; clk_enable(obj->clk); |
be6d8026a
|
255 256 257 258 |
iotlb_lock_get(obj, &l); if (l.base == obj->nr_tlb_entries) { dev_warn(obj->dev, "%s: preserve entries full ", __func__); |
a9dcad5e3
|
259 260 261 |
err = -EBUSY; goto out; } |
be6d8026a
|
262 |
if (!e->prsvd) { |
37c2836c4
|
263 264 |
int i; struct cr_regs tmp; |
be6d8026a
|
265 |
|
37c2836c4
|
266 |
for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp) |
be6d8026a
|
267 268 |
if (!iotlb_cr_valid(&tmp)) break; |
37c2836c4
|
269 |
|
be6d8026a
|
270 271 272 273 274 275 |
if (i == obj->nr_tlb_entries) { dev_dbg(obj->dev, "%s: full: no entry ", __func__); err = -EBUSY; goto out; } |
37c2836c4
|
276 277 |
iotlb_lock_get(obj, &l); |
be6d8026a
|
278 279 280 281 |
} else { l.vict = l.base; iotlb_lock_set(obj, &l); } |
a9dcad5e3
|
282 283 284 285 286 287 288 289 290 |
cr = iotlb_alloc_cr(obj, e); if (IS_ERR(cr)) { clk_disable(obj->clk); return PTR_ERR(cr); } iotlb_load_cr(obj, cr); kfree(cr); |
be6d8026a
|
291 292 |
if (e->prsvd) l.base++; |
a9dcad5e3
|
293 294 |
/* increment victim for next tlb load */ if (++l.vict == obj->nr_tlb_entries) |
be6d8026a
|
295 |
l.vict = l.base; |
a9dcad5e3
|
296 297 298 299 300 |
iotlb_lock_set(obj, &l); out: clk_disable(obj->clk); return err; } |
a9dcad5e3
|
301 |
|
5da14a471
|
302 |
#else /* !PREFETCH_IOTLB */ |
6c32df437
|
303 |
static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) |
5da14a471
|
304 305 306 307 308 |
{ return 0; } #endif /* !PREFETCH_IOTLB */ |
6c32df437
|
309 |
static int prefetch_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) |
5da14a471
|
310 311 312 |
{ return load_iotlb_entry(obj, e); } |
a9dcad5e3
|
313 314 315 316 317 318 319 320 |
/** * flush_iotlb_page - Clear an iommu tlb entry * @obj: target iommu * @da: iommu device virtual address * * Clear an iommu tlb entry which includes 'da' address. **/ |
6c32df437
|
321 |
static void flush_iotlb_page(struct omap_iommu *obj, u32 da) |
a9dcad5e3
|
322 |
{ |
a9dcad5e3
|
323 |
int i; |
37c2836c4
|
324 |
struct cr_regs cr; |
a9dcad5e3
|
325 326 |
clk_enable(obj->clk); |
37c2836c4
|
327 |
for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) { |
a9dcad5e3
|
328 329 |
u32 start; size_t bytes; |
a9dcad5e3
|
330 331 332 333 334 335 336 337 338 339 |
if (!iotlb_cr_valid(&cr)) continue; start = iotlb_cr_to_virt(&cr); bytes = iopgsz_to_bytes(cr.cam & 3); if ((start <= da) && (da < start + bytes)) { dev_dbg(obj->dev, "%s: %08x<=%08x(%x) ", __func__, start, da, bytes); |
0fa035e52
|
340 |
iotlb_load_cr(obj, &cr); |
a9dcad5e3
|
341 342 343 344 345 346 347 348 349 |
iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); } } clk_disable(obj->clk); if (i == obj->nr_tlb_entries) dev_dbg(obj->dev, "%s: no page for %08x ", __func__, da); } |
a9dcad5e3
|
350 351 352 353 354 |
/** * flush_iotlb_all - Clear all iommu tlb entries * @obj: target iommu **/ |
6c32df437
|
355 |
static void flush_iotlb_all(struct omap_iommu *obj) |
a9dcad5e3
|
356 357 358 359 360 361 362 363 364 365 366 367 368 |
{ struct iotlb_lock l; clk_enable(obj->clk); l.base = 0; l.vict = 0; iotlb_lock_set(obj, &l); iommu_write_reg(obj, 1, MMU_GFLUSH); clk_disable(obj->clk); } |
ddfa975a8
|
369 |
|
e4efd94bd
|
370 |
#if defined(CONFIG_OMAP_IOMMU_DEBUG) || defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE) |
a9dcad5e3
|
371 |
|
6c32df437
|
372 |
ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes) |
a9dcad5e3
|
373 |
{ |
a9dcad5e3
|
374 375 376 377 |
if (!obj || !buf) return -EINVAL; clk_enable(obj->clk); |
14e0e6796
|
378 |
bytes = arch_iommu->dump_ctx(obj, buf, bytes); |
a9dcad5e3
|
379 380 381 382 383 |
clk_disable(obj->clk); return bytes; } |
6c32df437
|
384 |
EXPORT_SYMBOL_GPL(omap_iommu_dump_ctx); |
a9dcad5e3
|
385 |
|
6c32df437
|
386 387 |
static int __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num) |
a9dcad5e3
|
388 389 |
{ int i; |
37c2836c4
|
390 391 |
struct iotlb_lock saved; struct cr_regs tmp; |
a9dcad5e3
|
392 393 394 |
struct cr_regs *p = crs; clk_enable(obj->clk); |
a9dcad5e3
|
395 |
iotlb_lock_get(obj, &saved); |
a9dcad5e3
|
396 |
|
37c2836c4
|
397 |
for_each_iotlb_cr(obj, num, i, tmp) { |
a9dcad5e3
|
398 399 |
if (!iotlb_cr_valid(&tmp)) continue; |
a9dcad5e3
|
400 401 |
*p++ = tmp; } |
37c2836c4
|
402 |
|
a9dcad5e3
|
403 404 405 406 407 408 409 |
iotlb_lock_set(obj, &saved); clk_disable(obj->clk); return p - crs; } /** |
6c32df437
|
410 |
* omap_dump_tlb_entries - dump cr arrays to given buffer |
a9dcad5e3
|
411 412 413 |
* @obj: target iommu * @buf: output buffer **/ |
6c32df437
|
414 |
size_t omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t bytes) |
a9dcad5e3
|
415 |
{ |
14e0e6796
|
416 |
int i, num; |
a9dcad5e3
|
417 418 |
struct cr_regs *cr; char *p = buf; |
14e0e6796
|
419 420 421 422 |
num = bytes / sizeof(*cr); num = min(obj->nr_tlb_entries, num); cr = kcalloc(num, sizeof(*cr), GFP_KERNEL); |
a9dcad5e3
|
423 424 |
if (!cr) return 0; |
14e0e6796
|
425 426 |
num = __dump_tlb_entries(obj, cr, num); for (i = 0; i < num; i++) |
a9dcad5e3
|
427 428 429 430 431 |
p += iotlb_dump_cr(obj, cr + i, p); kfree(cr); return p - buf; } |
6c32df437
|
432 |
EXPORT_SYMBOL_GPL(omap_dump_tlb_entries); |
a9dcad5e3
|
433 |
|
6c32df437
|
434 |
int omap_foreach_iommu_device(void *data, int (*fn)(struct device *, void *)) |
a9dcad5e3
|
435 436 437 438 |
{ return driver_for_each_device(&omap_iommu_driver.driver, NULL, data, fn); } |
6c32df437
|
439 |
EXPORT_SYMBOL_GPL(omap_foreach_iommu_device); |
a9dcad5e3
|
440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 |
#endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */ /* * H/W pagetable operations */ static void flush_iopgd_range(u32 *first, u32 *last) { /* FIXME: L2 cache should be taken care of if it exists */ do { asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd" : : "r" (first)); first += L1_CACHE_BYTES / sizeof(*first); } while (first <= last); } static void flush_iopte_range(u32 *first, u32 *last) { /* FIXME: L2 cache should be taken care of if it exists */ do { asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte" : : "r" (first)); first += L1_CACHE_BYTES / sizeof(*first); } while (first <= last); } static void iopte_free(u32 *iopte) { /* Note: freed iopte's must be clean ready for re-use */ kmem_cache_free(iopte_cachep, iopte); } |
6c32df437
|
471 |
static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, u32 da) |
a9dcad5e3
|
472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 |
{ u32 *iopte; /* a table has already existed */ if (*iopgd) goto pte_ready; /* * do the allocation outside the page table lock */ spin_unlock(&obj->page_table_lock); iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL); spin_lock(&obj->page_table_lock); if (!*iopgd) { if (!iopte) return ERR_PTR(-ENOMEM); *iopgd = virt_to_phys(iopte) | IOPGD_TABLE; flush_iopgd_range(iopgd, iopgd); dev_vdbg(obj->dev, "%s: a new pte:%p ", __func__, iopte); } else { /* We raced, free the reduniovant table */ iopte_free(iopte); } pte_ready: iopte = iopte_offset(iopgd, da); dev_vdbg(obj->dev, "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x ", __func__, da, iopgd, *iopgd, iopte, *iopte); return iopte; } |
6c32df437
|
510 |
static int iopgd_alloc_section(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) |
a9dcad5e3
|
511 512 |
{ u32 *iopgd = iopgd_offset(obj, da); |
4abb76174
|
513 514 515 516 517 518 |
if ((da | pa) & ~IOSECTION_MASK) { dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx ", __func__, da, pa, IOSECTION_SIZE); return -EINVAL; } |
a9dcad5e3
|
519 520 521 522 |
*iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION; flush_iopgd_range(iopgd, iopgd); return 0; } |
6c32df437
|
523 |
static int iopgd_alloc_super(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) |
a9dcad5e3
|
524 525 526 |
{ u32 *iopgd = iopgd_offset(obj, da); int i; |
4abb76174
|
527 528 529 530 531 532 |
if ((da | pa) & ~IOSUPER_MASK) { dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx ", __func__, da, pa, IOSUPER_SIZE); return -EINVAL; } |
a9dcad5e3
|
533 534 535 536 537 |
for (i = 0; i < 16; i++) *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER; flush_iopgd_range(iopgd, iopgd + 15); return 0; } |
6c32df437
|
538 |
static int iopte_alloc_page(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) |
a9dcad5e3
|
539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 |
{ u32 *iopgd = iopgd_offset(obj, da); u32 *iopte = iopte_alloc(obj, iopgd, da); if (IS_ERR(iopte)) return PTR_ERR(iopte); *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL; flush_iopte_range(iopte, iopte); dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x ", __func__, da, pa, iopte, *iopte); return 0; } |
6c32df437
|
555 |
static int iopte_alloc_large(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) |
a9dcad5e3
|
556 557 558 559 |
{ u32 *iopgd = iopgd_offset(obj, da); u32 *iopte = iopte_alloc(obj, iopgd, da); int i; |
4abb76174
|
560 561 562 563 564 565 |
if ((da | pa) & ~IOLARGE_MASK) { dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx ", __func__, da, pa, IOLARGE_SIZE); return -EINVAL; } |
a9dcad5e3
|
566 567 568 569 570 571 572 573 |
if (IS_ERR(iopte)) return PTR_ERR(iopte); for (i = 0; i < 16; i++) *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE; flush_iopte_range(iopte, iopte + 15); return 0; } |
6c32df437
|
574 575 |
static int iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e) |
a9dcad5e3
|
576 |
{ |
6c32df437
|
577 |
int (*fn)(struct omap_iommu *, u32, u32, u32); |
a9dcad5e3
|
578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 |
u32 prot; int err; if (!obj || !e) return -EINVAL; switch (e->pgsz) { case MMU_CAM_PGSZ_16M: fn = iopgd_alloc_super; break; case MMU_CAM_PGSZ_1M: fn = iopgd_alloc_section; break; case MMU_CAM_PGSZ_64K: fn = iopte_alloc_large; break; case MMU_CAM_PGSZ_4K: fn = iopte_alloc_page; break; default: fn = NULL; BUG(); break; } prot = get_iopte_attr(e); spin_lock(&obj->page_table_lock); err = fn(obj, e->da, e->pa, prot); spin_unlock(&obj->page_table_lock); return err; } /** |
6c32df437
|
613 |
* omap_iopgtable_store_entry - Make an iommu pte entry |
a9dcad5e3
|
614 615 616 |
* @obj: target iommu * @e: an iommu tlb entry info **/ |
6c32df437
|
617 |
int omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e) |
a9dcad5e3
|
618 619 620 621 622 |
{ int err; flush_iotlb_page(obj, e->da); err = iopgtable_store_entry_core(obj, e); |
a9dcad5e3
|
623 |
if (!err) |
5da14a471
|
624 |
prefetch_iotlb_entry(obj, e); |
a9dcad5e3
|
625 626 |
return err; } |
6c32df437
|
627 |
EXPORT_SYMBOL_GPL(omap_iopgtable_store_entry); |
a9dcad5e3
|
628 629 630 631 632 633 634 635 |
/** * iopgtable_lookup_entry - Lookup an iommu pte entry * @obj: target iommu * @da: iommu device virtual address * @ppgd: iommu pgd entry pointer to be returned * @ppte: iommu pte entry pointer to be returned **/ |
e1f238133
|
636 637 |
static void iopgtable_lookup_entry(struct omap_iommu *obj, u32 da, u32 **ppgd, u32 **ppte) |
a9dcad5e3
|
638 639 640 641 642 643 |
{ u32 *iopgd, *iopte = NULL; iopgd = iopgd_offset(obj, da); if (!*iopgd) goto out; |
a1a54456d
|
644 |
if (iopgd_is_table(*iopgd)) |
a9dcad5e3
|
645 646 647 648 649 |
iopte = iopte_offset(iopgd, da); out: *ppgd = iopgd; *ppte = iopte; } |
a9dcad5e3
|
650 |
|
6c32df437
|
651 |
static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da) |
a9dcad5e3
|
652 653 654 655 656 657 658 |
{ size_t bytes; u32 *iopgd = iopgd_offset(obj, da); int nent = 1; if (!*iopgd) return 0; |
a1a54456d
|
659 |
if (iopgd_is_table(*iopgd)) { |
a9dcad5e3
|
660 661 662 663 664 665 666 |
int i; u32 *iopte = iopte_offset(iopgd, da); bytes = IOPTE_SIZE; if (*iopte & IOPTE_LARGE) { nent *= 16; /* rewind to the 1st entry */ |
c127c7dc1
|
667 |
iopte = iopte_offset(iopgd, (da & IOLARGE_MASK)); |
a9dcad5e3
|
668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 |
} bytes *= nent; memset(iopte, 0, nent * sizeof(*iopte)); flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte)); /* * do table walk to check if this table is necessary or not */ iopte = iopte_offset(iopgd, 0); for (i = 0; i < PTRS_PER_IOPTE; i++) if (iopte[i]) goto out; iopte_free(iopte); nent = 1; /* for the next L1 entry */ } else { bytes = IOPGD_SIZE; |
dcc730dc9
|
685 |
if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) { |
a9dcad5e3
|
686 687 |
nent *= 16; /* rewind to the 1st entry */ |
8d33ea588
|
688 |
iopgd = iopgd_offset(obj, (da & IOSUPER_MASK)); |
a9dcad5e3
|
689 690 691 692 693 694 695 696 697 698 699 700 701 702 |
} bytes *= nent; } memset(iopgd, 0, nent * sizeof(*iopgd)); flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd)); out: return bytes; } /** * iopgtable_clear_entry - Remove an iommu pte entry * @obj: target iommu * @da: iommu device virtual address **/ |
6c32df437
|
703 |
static size_t iopgtable_clear_entry(struct omap_iommu *obj, u32 da) |
a9dcad5e3
|
704 705 706 707 708 709 710 711 712 713 714 715 |
{ size_t bytes; spin_lock(&obj->page_table_lock); bytes = iopgtable_clear_entry_core(obj, da); flush_iotlb_page(obj, da); spin_unlock(&obj->page_table_lock); return bytes; } |
a9dcad5e3
|
716 |
|
6c32df437
|
717 |
static void iopgtable_clear_entry_all(struct omap_iommu *obj) |
a9dcad5e3
|
718 719 720 721 722 723 724 725 726 727 728 729 730 731 |
{ int i; spin_lock(&obj->page_table_lock); for (i = 0; i < PTRS_PER_IOPGD; i++) { u32 da; u32 *iopgd; da = i << IOPGD_SHIFT; iopgd = iopgd_offset(obj, da); if (!*iopgd) continue; |
a1a54456d
|
732 |
if (iopgd_is_table(*iopgd)) |
a9dcad5e3
|
733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 |
iopte_free(iopte_offset(iopgd, 0)); *iopgd = 0; flush_iopgd_range(iopgd, iopgd); } flush_iotlb_all(obj); spin_unlock(&obj->page_table_lock); } /* * Device IOMMU generic operations */ static irqreturn_t iommu_fault_handler(int irq, void *data) { |
d594f1f31
|
749 |
u32 da, errs; |
a9dcad5e3
|
750 |
u32 *iopgd, *iopte; |
6c32df437
|
751 |
struct omap_iommu *obj = data; |
e7f10f02e
|
752 |
struct iommu_domain *domain = obj->domain; |
a9dcad5e3
|
753 754 755 |
if (!obj->refcount) return IRQ_NONE; |
a9dcad5e3
|
756 |
clk_enable(obj->clk); |
d594f1f31
|
757 |
errs = iommu_report_fault(obj, &da); |
a9dcad5e3
|
758 |
clk_disable(obj->clk); |
c56b2ddd5
|
759 760 |
if (errs == 0) return IRQ_HANDLED; |
d594f1f31
|
761 762 |
/* Fault callback or TLB/PTE Dynamic loading */ |
e7f10f02e
|
763 |
if (!report_iommu_fault(domain, obj->dev, da, 0)) |
a9dcad5e3
|
764 |
return IRQ_HANDLED; |
37b298100
|
765 |
iommu_disable(obj); |
a9dcad5e3
|
766 |
iopgd = iopgd_offset(obj, da); |
a1a54456d
|
767 |
if (!iopgd_is_table(*iopgd)) { |
d594f1f31
|
768 769 770 |
dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p " "*pgd:px%08x ", obj->name, errs, da, iopgd, *iopgd); |
a9dcad5e3
|
771 772 773 774 |
return IRQ_NONE; } iopte = iopte_offset(iopgd, da); |
d594f1f31
|
775 776 777 778 |
dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x " "pte:0x%p *pte:0x%08x ", obj->name, errs, da, iopgd, *iopgd, iopte, *iopte); |
a9dcad5e3
|
779 780 781 782 783 784 |
return IRQ_NONE; } static int device_match_by_alias(struct device *dev, void *data) { |
6c32df437
|
785 |
struct omap_iommu *obj = to_iommu(dev); |
a9dcad5e3
|
786 787 788 789 790 791 792 793 794 |
const char *name = data; pr_debug("%s: %s %s ", __func__, obj->name, name); return strcmp(obj->name, name) == 0; } /** |
f626b52d4
|
795 |
* omap_iommu_attach() - attach iommu device to an iommu domain |
fabdbca8c
|
796 |
* @name: name of target omap iommu device |
f626b52d4
|
797 |
* @iopgd: page table |
a9dcad5e3
|
798 |
**/ |
fabdbca8c
|
799 |
static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd) |
a9dcad5e3
|
800 801 |
{ int err = -ENOMEM; |
fabdbca8c
|
802 803 804 805 806 807 808 809 810 811 |
struct device *dev; struct omap_iommu *obj; dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name, device_match_by_alias); if (!dev) return NULL; obj = to_iommu(dev); |
a9dcad5e3
|
812 |
|
f626b52d4
|
813 |
spin_lock(&obj->iommu_lock); |
a9dcad5e3
|
814 |
|
f626b52d4
|
815 816 817 818 819 820 |
/* an iommu device can only be attached once */ if (++obj->refcount > 1) { dev_err(dev, "%s: already attached! ", obj->name); err = -EBUSY; goto err_enable; |
a9dcad5e3
|
821 |
} |
f626b52d4
|
822 823 824 825 826 |
obj->iopgd = iopgd; err = iommu_enable(obj); if (err) goto err_enable; flush_iotlb_all(obj); |
a9dcad5e3
|
827 828 |
if (!try_module_get(obj->owner)) goto err_module; |
f626b52d4
|
829 |
spin_unlock(&obj->iommu_lock); |
a9dcad5e3
|
830 831 832 833 834 835 836 837 838 839 |
dev_dbg(obj->dev, "%s: %s ", __func__, obj->name); return obj; err_module: if (obj->refcount == 1) iommu_disable(obj); err_enable: obj->refcount--; |
f626b52d4
|
840 |
spin_unlock(&obj->iommu_lock); |
a9dcad5e3
|
841 842 |
return ERR_PTR(err); } |
a9dcad5e3
|
843 844 |
/** |
f626b52d4
|
845 |
* omap_iommu_detach - release iommu device |
a9dcad5e3
|
846 847 |
* @obj: target iommu **/ |
6c32df437
|
848 |
static void omap_iommu_detach(struct omap_iommu *obj) |
a9dcad5e3
|
849 |
{ |
acf9d467d
|
850 |
if (!obj || IS_ERR(obj)) |
a9dcad5e3
|
851 |
return; |
f626b52d4
|
852 |
spin_lock(&obj->iommu_lock); |
a9dcad5e3
|
853 854 855 856 857 |
if (--obj->refcount == 0) iommu_disable(obj); module_put(obj->owner); |
f626b52d4
|
858 |
obj->iopgd = NULL; |
d594f1f31
|
859 |
|
f626b52d4
|
860 |
spin_unlock(&obj->iommu_lock); |
d594f1f31
|
861 |
|
a9dcad5e3
|
862 863 |
dev_dbg(obj->dev, "%s: %s ", __func__, obj->name); |
d594f1f31
|
864 |
} |
d594f1f31
|
865 |
|
a9dcad5e3
|
866 867 868 869 870 871 |
/* * OMAP Device MMU(IOMMU) detection */ static int __devinit omap_iommu_probe(struct platform_device *pdev) { int err = -ENODEV; |
a9dcad5e3
|
872 |
int irq; |
6c32df437
|
873 |
struct omap_iommu *obj; |
a9dcad5e3
|
874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 |
struct resource *res; struct iommu_platform_data *pdata = pdev->dev.platform_data; if (pdev->num_resources != 2) return -EINVAL; obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); if (!obj) return -ENOMEM; obj->clk = clk_get(&pdev->dev, pdata->clk_name); if (IS_ERR(obj->clk)) goto err_clk; obj->nr_tlb_entries = pdata->nr_tlb_entries; obj->name = pdata->name; obj->dev = &pdev->dev; obj->ctx = (void *)obj + sizeof(*obj); |
c7f4ab26e
|
892 893 |
obj->da_start = pdata->da_start; obj->da_end = pdata->da_end; |
a9dcad5e3
|
894 |
|
f626b52d4
|
895 |
spin_lock_init(&obj->iommu_lock); |
a9dcad5e3
|
896 897 898 899 900 901 902 903 904 |
mutex_init(&obj->mmap_lock); spin_lock_init(&obj->page_table_lock); INIT_LIST_HEAD(&obj->mmap); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { err = -ENODEV; goto err_mem; } |
a9dcad5e3
|
905 906 907 908 909 910 911 |
res = request_mem_region(res->start, resource_size(res), dev_name(&pdev->dev)); if (!res) { err = -EIO; goto err_mem; } |
da4a0f764
|
912 913 914 915 916 |
obj->regbase = ioremap(res->start, resource_size(res)); if (!obj->regbase) { err = -ENOMEM; goto err_ioremap; } |
a9dcad5e3
|
917 918 919 920 921 922 923 924 925 926 |
irq = platform_get_irq(pdev, 0); if (irq < 0) { err = -ENODEV; goto err_irq; } err = request_irq(irq, iommu_fault_handler, IRQF_SHARED, dev_name(&pdev->dev), obj); if (err < 0) goto err_irq; platform_set_drvdata(pdev, obj); |
a9dcad5e3
|
927 928 929 |
dev_info(&pdev->dev, "%s registered ", obj->name); return 0; |
a9dcad5e3
|
930 |
err_irq: |
a9dcad5e3
|
931 |
iounmap(obj->regbase); |
da4a0f764
|
932 933 |
err_ioremap: release_mem_region(res->start, resource_size(res)); |
a9dcad5e3
|
934 935 936 937 938 939 940 941 942 943 944 |
err_mem: clk_put(obj->clk); err_clk: kfree(obj); return err; } static int __devexit omap_iommu_remove(struct platform_device *pdev) { int irq; struct resource *res; |
6c32df437
|
945 |
struct omap_iommu *obj = platform_get_drvdata(pdev); |
a9dcad5e3
|
946 947 948 949 |
platform_set_drvdata(pdev, NULL); iopgtable_clear_entry_all(obj); |
a9dcad5e3
|
950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 |
irq = platform_get_irq(pdev, 0); free_irq(irq, obj); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(res->start, resource_size(res)); iounmap(obj->regbase); clk_put(obj->clk); dev_info(&pdev->dev, "%s removed ", obj->name); kfree(obj); return 0; } static struct platform_driver omap_iommu_driver = { .probe = omap_iommu_probe, .remove = __devexit_p(omap_iommu_remove), .driver = { .name = "omap-iommu", }, }; static void iopte_cachep_ctor(void *iopte) { clean_dcache_area(iopte, IOPTE_TABLE_SIZE); } |
f626b52d4
|
976 |
static int omap_iommu_map(struct iommu_domain *domain, unsigned long da, |
5009065d3
|
977 |
phys_addr_t pa, size_t bytes, int prot) |
f626b52d4
|
978 979 |
{ struct omap_iommu_domain *omap_domain = domain->priv; |
6c32df437
|
980 |
struct omap_iommu *oiommu = omap_domain->iommu_dev; |
f626b52d4
|
981 |
struct device *dev = oiommu->dev; |
f626b52d4
|
982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 |
struct iotlb_entry e; int omap_pgsz; u32 ret, flags; /* we only support mapping a single iommu page for now */ omap_pgsz = bytes_to_iopgsz(bytes); if (omap_pgsz < 0) { dev_err(dev, "invalid size to map: %d ", bytes); return -EINVAL; } dev_dbg(dev, "mapping da 0x%lx to pa 0x%x size 0x%x ", da, pa, bytes); flags = omap_pgsz | prot; iotlb_init_entry(&e, da, pa, flags); |
6c32df437
|
1000 |
ret = omap_iopgtable_store_entry(oiommu, &e); |
b4550d415
|
1001 |
if (ret) |
6c32df437
|
1002 1003 |
dev_err(dev, "omap_iopgtable_store_entry failed: %d ", ret); |
f626b52d4
|
1004 |
|
b4550d415
|
1005 |
return ret; |
f626b52d4
|
1006 |
} |
5009065d3
|
1007 1008 |
static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da, size_t size) |
f626b52d4
|
1009 1010 |
{ struct omap_iommu_domain *omap_domain = domain->priv; |
6c32df437
|
1011 |
struct omap_iommu *oiommu = omap_domain->iommu_dev; |
f626b52d4
|
1012 |
struct device *dev = oiommu->dev; |
f626b52d4
|
1013 |
|
5009065d3
|
1014 1015 |
dev_dbg(dev, "unmapping da 0x%lx size %u ", da, size); |
f626b52d4
|
1016 |
|
5009065d3
|
1017 |
return iopgtable_clear_entry(oiommu, da); |
f626b52d4
|
1018 1019 1020 1021 1022 1023 |
} static int omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) { struct omap_iommu_domain *omap_domain = domain->priv; |
6c32df437
|
1024 |
struct omap_iommu *oiommu; |
fabdbca8c
|
1025 |
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; |
f626b52d4
|
1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 |
int ret = 0; spin_lock(&omap_domain->lock); /* only a single device is supported per domain for now */ if (omap_domain->iommu_dev) { dev_err(dev, "iommu domain is already attached "); ret = -EBUSY; goto out; } /* get a handle to and enable the omap iommu */ |
fabdbca8c
|
1039 |
oiommu = omap_iommu_attach(arch_data->name, omap_domain->pgtable); |
f626b52d4
|
1040 1041 1042 1043 1044 1045 |
if (IS_ERR(oiommu)) { ret = PTR_ERR(oiommu); dev_err(dev, "can't get omap iommu: %d ", ret); goto out; } |
fabdbca8c
|
1046 |
omap_domain->iommu_dev = arch_data->iommu_dev = oiommu; |
e7f10f02e
|
1047 |
oiommu->domain = domain; |
f626b52d4
|
1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 |
out: spin_unlock(&omap_domain->lock); return ret; } static void omap_iommu_detach_dev(struct iommu_domain *domain, struct device *dev) { struct omap_iommu_domain *omap_domain = domain->priv; |
fabdbca8c
|
1058 1059 |
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; struct omap_iommu *oiommu = dev_to_omap_iommu(dev); |
f626b52d4
|
1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 |
spin_lock(&omap_domain->lock); /* only a single device is supported per domain for now */ if (omap_domain->iommu_dev != oiommu) { dev_err(dev, "invalid iommu device "); goto out; } iopgtable_clear_entry_all(oiommu); omap_iommu_detach(oiommu); |
fabdbca8c
|
1073 |
omap_domain->iommu_dev = arch_data->iommu_dev = NULL; |
f626b52d4
|
1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 |
out: spin_unlock(&omap_domain->lock); } static int omap_iommu_domain_init(struct iommu_domain *domain) { struct omap_iommu_domain *omap_domain; omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL); if (!omap_domain) { pr_err("kzalloc failed "); goto out; } omap_domain->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_KERNEL); if (!omap_domain->pgtable) { pr_err("kzalloc failed "); goto fail_nomem; } /* * should never fail, but please keep this around to ensure * we keep the hardware happy */ BUG_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE)); clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE); spin_lock_init(&omap_domain->lock); domain->priv = omap_domain; return 0; fail_nomem: kfree(omap_domain); out: return -ENOMEM; } /* assume device was already detached */ static void omap_iommu_domain_destroy(struct iommu_domain *domain) { struct omap_iommu_domain *omap_domain = domain->priv; domain->priv = NULL; kfree(omap_domain->pgtable); kfree(omap_domain); } static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain, unsigned long da) { struct omap_iommu_domain *omap_domain = domain->priv; |
6c32df437
|
1131 |
struct omap_iommu *oiommu = omap_domain->iommu_dev; |
f626b52d4
|
1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 |
struct device *dev = oiommu->dev; u32 *pgd, *pte; phys_addr_t ret = 0; iopgtable_lookup_entry(oiommu, da, &pgd, &pte); if (pte) { if (iopte_is_small(*pte)) ret = omap_iommu_translate(*pte, da, IOPTE_MASK); else if (iopte_is_large(*pte)) ret = omap_iommu_translate(*pte, da, IOLARGE_MASK); else |
1a36ea815
|
1144 |
dev_err(dev, "bogus pte 0x%x, da 0x%lx", *pte, da); |
f626b52d4
|
1145 1146 1147 1148 1149 1150 |
} else { if (iopgd_is_section(*pgd)) ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK); else if (iopgd_is_super(*pgd)) ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK); else |
1a36ea815
|
1151 |
dev_err(dev, "bogus pgd 0x%x, da 0x%lx", *pgd, da); |
f626b52d4
|
1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 |
} return ret; } static int omap_iommu_domain_has_cap(struct iommu_domain *domain, unsigned long cap) { return 0; } static struct iommu_ops omap_iommu_ops = { .domain_init = omap_iommu_domain_init, .domain_destroy = omap_iommu_domain_destroy, .attach_dev = omap_iommu_attach_dev, .detach_dev = omap_iommu_detach_dev, .map = omap_iommu_map, .unmap = omap_iommu_unmap, .iova_to_phys = omap_iommu_iova_to_phys, .domain_has_cap = omap_iommu_domain_has_cap, |
66bc8cf3b
|
1172 |
.pgsize_bitmap = OMAP_IOMMU_PGSIZES, |
f626b52d4
|
1173 |
}; |
a9dcad5e3
|
1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 |
static int __init omap_iommu_init(void) { struct kmem_cache *p; const unsigned long flags = SLAB_HWCACHE_ALIGN; size_t align = 1 << 10; /* L2 pagetable alignement */ p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags, iopte_cachep_ctor); if (!p) return -ENOMEM; iopte_cachep = p; |
a65bc64f9
|
1185 |
bus_set_iommu(&platform_bus_type, &omap_iommu_ops); |
f626b52d4
|
1186 |
|
a9dcad5e3
|
1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 |
return platform_driver_register(&omap_iommu_driver); } module_init(omap_iommu_init); static void __exit omap_iommu_exit(void) { kmem_cache_destroy(iopte_cachep); platform_driver_unregister(&omap_iommu_driver); } module_exit(omap_iommu_exit); MODULE_DESCRIPTION("omap iommu: tlb and pagetable primitives"); MODULE_ALIAS("platform:omap-iommu"); MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi"); MODULE_LICENSE("GPL v2"); |