Commit 87f8e57327bd8d85fb5b46cad29ac281430cc50d
Committed by
Joerg Roedel
1 parent
a33977206c
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
iommu/omap: Keep mmu enabled when requested
The purpose of the mmu is to handle the memory accesses requested by its users. Typically, the mmu is bundled with the processing unit in a single IP block, which makes them to share the same clock to be functional. Currently, iommu code assumes that its user will be indirectly clocking it, but being a separate mmu driver, it should handle its own clocks, so as long as the mmu is requested it will be powered ON and once detached it will be powered OFF. The remaining clock handling out of iommu_enable and iommu_disable corresponds to paths that can be accessed through debugfs, some of them doesn't work if the module is not enabled first, but in future if the mmu is idled withouth freeing, these are needed to debug. Signed-off-by: Omar Ramirez Luna <omar.luna@linaro.org> Tested-by: Ohad Ben-Cohen <ohad@wizery.com> Signed-off-by: Joerg Roedel <joro@8bytes.org>
Showing 1 changed file with 0 additions and 3 deletions Inline Diff
drivers/iommu/omap-iommu.c
1 | /* | 1 | /* |
2 | * omap iommu: tlb and pagetable primitives | 2 | * omap iommu: tlb and pagetable primitives |
3 | * | 3 | * |
4 | * Copyright (C) 2008-2010 Nokia Corporation | 4 | * Copyright (C) 2008-2010 Nokia Corporation |
5 | * | 5 | * |
6 | * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>, | 6 | * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>, |
7 | * Paul Mundt and Toshihiro Kobayashi | 7 | * Paul Mundt and Toshihiro Kobayashi |
8 | * | 8 | * |
9 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU General Public License version 2 as | 10 | * it under the terms of the GNU General Public License version 2 as |
11 | * published by the Free Software Foundation. | 11 | * published by the Free Software Foundation. |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/err.h> | 14 | #include <linux/err.h> |
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
17 | #include <linux/interrupt.h> | 17 | #include <linux/interrupt.h> |
18 | #include <linux/ioport.h> | 18 | #include <linux/ioport.h> |
19 | #include <linux/clk.h> | 19 | #include <linux/clk.h> |
20 | #include <linux/platform_device.h> | 20 | #include <linux/platform_device.h> |
21 | #include <linux/iommu.h> | 21 | #include <linux/iommu.h> |
22 | #include <linux/omap-iommu.h> | 22 | #include <linux/omap-iommu.h> |
23 | #include <linux/mutex.h> | 23 | #include <linux/mutex.h> |
24 | #include <linux/spinlock.h> | 24 | #include <linux/spinlock.h> |
25 | #include <linux/io.h> | 25 | #include <linux/io.h> |
26 | 26 | ||
27 | #include <asm/cacheflush.h> | 27 | #include <asm/cacheflush.h> |
28 | 28 | ||
29 | #include <linux/platform_data/iommu-omap.h> | 29 | #include <linux/platform_data/iommu-omap.h> |
30 | 30 | ||
31 | #include "omap-iopgtable.h" | 31 | #include "omap-iopgtable.h" |
32 | #include "omap-iommu.h" | 32 | #include "omap-iommu.h" |
33 | 33 | ||
34 | #define for_each_iotlb_cr(obj, n, __i, cr) \ | 34 | #define for_each_iotlb_cr(obj, n, __i, cr) \ |
35 | for (__i = 0; \ | 35 | for (__i = 0; \ |
36 | (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \ | 36 | (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \ |
37 | __i++) | 37 | __i++) |
38 | 38 | ||
39 | /* bitmap of the page sizes currently supported */ | 39 | /* bitmap of the page sizes currently supported */ |
40 | #define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M) | 40 | #define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M) |
41 | 41 | ||
42 | /** | 42 | /** |
43 | * struct omap_iommu_domain - omap iommu domain | 43 | * struct omap_iommu_domain - omap iommu domain |
44 | * @pgtable: the page table | 44 | * @pgtable: the page table |
45 | * @iommu_dev: an omap iommu device attached to this domain. only a single | 45 | * @iommu_dev: an omap iommu device attached to this domain. only a single |
46 | * iommu device can be attached for now. | 46 | * iommu device can be attached for now. |
47 | * @dev: Device using this domain. | 47 | * @dev: Device using this domain. |
48 | * @lock: domain lock, should be taken when attaching/detaching | 48 | * @lock: domain lock, should be taken when attaching/detaching |
49 | */ | 49 | */ |
50 | struct omap_iommu_domain { | 50 | struct omap_iommu_domain { |
51 | u32 *pgtable; | 51 | u32 *pgtable; |
52 | struct omap_iommu *iommu_dev; | 52 | struct omap_iommu *iommu_dev; |
53 | struct device *dev; | 53 | struct device *dev; |
54 | spinlock_t lock; | 54 | spinlock_t lock; |
55 | }; | 55 | }; |
56 | 56 | ||
57 | #define MMU_LOCK_BASE_SHIFT 10 | 57 | #define MMU_LOCK_BASE_SHIFT 10 |
58 | #define MMU_LOCK_BASE_MASK (0x1f << MMU_LOCK_BASE_SHIFT) | 58 | #define MMU_LOCK_BASE_MASK (0x1f << MMU_LOCK_BASE_SHIFT) |
59 | #define MMU_LOCK_BASE(x) \ | 59 | #define MMU_LOCK_BASE(x) \ |
60 | ((x & MMU_LOCK_BASE_MASK) >> MMU_LOCK_BASE_SHIFT) | 60 | ((x & MMU_LOCK_BASE_MASK) >> MMU_LOCK_BASE_SHIFT) |
61 | 61 | ||
62 | #define MMU_LOCK_VICT_SHIFT 4 | 62 | #define MMU_LOCK_VICT_SHIFT 4 |
63 | #define MMU_LOCK_VICT_MASK (0x1f << MMU_LOCK_VICT_SHIFT) | 63 | #define MMU_LOCK_VICT_MASK (0x1f << MMU_LOCK_VICT_SHIFT) |
64 | #define MMU_LOCK_VICT(x) \ | 64 | #define MMU_LOCK_VICT(x) \ |
65 | ((x & MMU_LOCK_VICT_MASK) >> MMU_LOCK_VICT_SHIFT) | 65 | ((x & MMU_LOCK_VICT_MASK) >> MMU_LOCK_VICT_SHIFT) |
66 | 66 | ||
67 | struct iotlb_lock { | 67 | struct iotlb_lock { |
68 | short base; | 68 | short base; |
69 | short vict; | 69 | short vict; |
70 | }; | 70 | }; |
71 | 71 | ||
72 | /* accommodate the difference between omap1 and omap2/3 */ | 72 | /* accommodate the difference between omap1 and omap2/3 */ |
73 | static const struct iommu_functions *arch_iommu; | 73 | static const struct iommu_functions *arch_iommu; |
74 | 74 | ||
75 | static struct platform_driver omap_iommu_driver; | 75 | static struct platform_driver omap_iommu_driver; |
76 | static struct kmem_cache *iopte_cachep; | 76 | static struct kmem_cache *iopte_cachep; |
77 | 77 | ||
78 | /** | 78 | /** |
79 | * omap_install_iommu_arch - Install archtecure specific iommu functions | 79 | * omap_install_iommu_arch - Install archtecure specific iommu functions |
80 | * @ops: a pointer to architecture specific iommu functions | 80 | * @ops: a pointer to architecture specific iommu functions |
81 | * | 81 | * |
82 | * There are several kind of iommu algorithm(tlb, pagetable) among | 82 | * There are several kind of iommu algorithm(tlb, pagetable) among |
83 | * omap series. This interface installs such an iommu algorighm. | 83 | * omap series. This interface installs such an iommu algorighm. |
84 | **/ | 84 | **/ |
85 | int omap_install_iommu_arch(const struct iommu_functions *ops) | 85 | int omap_install_iommu_arch(const struct iommu_functions *ops) |
86 | { | 86 | { |
87 | if (arch_iommu) | 87 | if (arch_iommu) |
88 | return -EBUSY; | 88 | return -EBUSY; |
89 | 89 | ||
90 | arch_iommu = ops; | 90 | arch_iommu = ops; |
91 | return 0; | 91 | return 0; |
92 | } | 92 | } |
93 | EXPORT_SYMBOL_GPL(omap_install_iommu_arch); | 93 | EXPORT_SYMBOL_GPL(omap_install_iommu_arch); |
94 | 94 | ||
95 | /** | 95 | /** |
96 | * omap_uninstall_iommu_arch - Uninstall archtecure specific iommu functions | 96 | * omap_uninstall_iommu_arch - Uninstall archtecure specific iommu functions |
97 | * @ops: a pointer to architecture specific iommu functions | 97 | * @ops: a pointer to architecture specific iommu functions |
98 | * | 98 | * |
99 | * This interface uninstalls the iommu algorighm installed previously. | 99 | * This interface uninstalls the iommu algorighm installed previously. |
100 | **/ | 100 | **/ |
101 | void omap_uninstall_iommu_arch(const struct iommu_functions *ops) | 101 | void omap_uninstall_iommu_arch(const struct iommu_functions *ops) |
102 | { | 102 | { |
103 | if (arch_iommu != ops) | 103 | if (arch_iommu != ops) |
104 | pr_err("%s: not your arch\n", __func__); | 104 | pr_err("%s: not your arch\n", __func__); |
105 | 105 | ||
106 | arch_iommu = NULL; | 106 | arch_iommu = NULL; |
107 | } | 107 | } |
108 | EXPORT_SYMBOL_GPL(omap_uninstall_iommu_arch); | 108 | EXPORT_SYMBOL_GPL(omap_uninstall_iommu_arch); |
109 | 109 | ||
110 | /** | 110 | /** |
111 | * omap_iommu_save_ctx - Save registers for pm off-mode support | 111 | * omap_iommu_save_ctx - Save registers for pm off-mode support |
112 | * @dev: client device | 112 | * @dev: client device |
113 | **/ | 113 | **/ |
114 | void omap_iommu_save_ctx(struct device *dev) | 114 | void omap_iommu_save_ctx(struct device *dev) |
115 | { | 115 | { |
116 | struct omap_iommu *obj = dev_to_omap_iommu(dev); | 116 | struct omap_iommu *obj = dev_to_omap_iommu(dev); |
117 | 117 | ||
118 | arch_iommu->save_ctx(obj); | 118 | arch_iommu->save_ctx(obj); |
119 | } | 119 | } |
120 | EXPORT_SYMBOL_GPL(omap_iommu_save_ctx); | 120 | EXPORT_SYMBOL_GPL(omap_iommu_save_ctx); |
121 | 121 | ||
122 | /** | 122 | /** |
123 | * omap_iommu_restore_ctx - Restore registers for pm off-mode support | 123 | * omap_iommu_restore_ctx - Restore registers for pm off-mode support |
124 | * @dev: client device | 124 | * @dev: client device |
125 | **/ | 125 | **/ |
126 | void omap_iommu_restore_ctx(struct device *dev) | 126 | void omap_iommu_restore_ctx(struct device *dev) |
127 | { | 127 | { |
128 | struct omap_iommu *obj = dev_to_omap_iommu(dev); | 128 | struct omap_iommu *obj = dev_to_omap_iommu(dev); |
129 | 129 | ||
130 | arch_iommu->restore_ctx(obj); | 130 | arch_iommu->restore_ctx(obj); |
131 | } | 131 | } |
132 | EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx); | 132 | EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx); |
133 | 133 | ||
134 | /** | 134 | /** |
135 | * omap_iommu_arch_version - Return running iommu arch version | 135 | * omap_iommu_arch_version - Return running iommu arch version |
136 | **/ | 136 | **/ |
137 | u32 omap_iommu_arch_version(void) | 137 | u32 omap_iommu_arch_version(void) |
138 | { | 138 | { |
139 | return arch_iommu->version; | 139 | return arch_iommu->version; |
140 | } | 140 | } |
141 | EXPORT_SYMBOL_GPL(omap_iommu_arch_version); | 141 | EXPORT_SYMBOL_GPL(omap_iommu_arch_version); |
142 | 142 | ||
143 | static int iommu_enable(struct omap_iommu *obj) | 143 | static int iommu_enable(struct omap_iommu *obj) |
144 | { | 144 | { |
145 | int err; | 145 | int err; |
146 | 146 | ||
147 | if (!obj) | 147 | if (!obj) |
148 | return -EINVAL; | 148 | return -EINVAL; |
149 | 149 | ||
150 | if (!arch_iommu) | 150 | if (!arch_iommu) |
151 | return -ENODEV; | 151 | return -ENODEV; |
152 | 152 | ||
153 | clk_enable(obj->clk); | 153 | clk_enable(obj->clk); |
154 | 154 | ||
155 | err = arch_iommu->enable(obj); | 155 | err = arch_iommu->enable(obj); |
156 | 156 | ||
157 | clk_disable(obj->clk); | ||
158 | return err; | 157 | return err; |
159 | } | 158 | } |
160 | 159 | ||
161 | static void iommu_disable(struct omap_iommu *obj) | 160 | static void iommu_disable(struct omap_iommu *obj) |
162 | { | 161 | { |
163 | if (!obj) | 162 | if (!obj) |
164 | return; | 163 | return; |
165 | |||
166 | clk_enable(obj->clk); | ||
167 | 164 | ||
168 | arch_iommu->disable(obj); | 165 | arch_iommu->disable(obj); |
169 | 166 | ||
170 | clk_disable(obj->clk); | 167 | clk_disable(obj->clk); |
171 | } | 168 | } |
172 | 169 | ||
173 | /* | 170 | /* |
174 | * TLB operations | 171 | * TLB operations |
175 | */ | 172 | */ |
176 | void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e) | 173 | void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e) |
177 | { | 174 | { |
178 | BUG_ON(!cr || !e); | 175 | BUG_ON(!cr || !e); |
179 | 176 | ||
180 | arch_iommu->cr_to_e(cr, e); | 177 | arch_iommu->cr_to_e(cr, e); |
181 | } | 178 | } |
182 | EXPORT_SYMBOL_GPL(omap_iotlb_cr_to_e); | 179 | EXPORT_SYMBOL_GPL(omap_iotlb_cr_to_e); |
183 | 180 | ||
184 | static inline int iotlb_cr_valid(struct cr_regs *cr) | 181 | static inline int iotlb_cr_valid(struct cr_regs *cr) |
185 | { | 182 | { |
186 | if (!cr) | 183 | if (!cr) |
187 | return -EINVAL; | 184 | return -EINVAL; |
188 | 185 | ||
189 | return arch_iommu->cr_valid(cr); | 186 | return arch_iommu->cr_valid(cr); |
190 | } | 187 | } |
191 | 188 | ||
192 | static inline struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj, | 189 | static inline struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj, |
193 | struct iotlb_entry *e) | 190 | struct iotlb_entry *e) |
194 | { | 191 | { |
195 | if (!e) | 192 | if (!e) |
196 | return NULL; | 193 | return NULL; |
197 | 194 | ||
198 | return arch_iommu->alloc_cr(obj, e); | 195 | return arch_iommu->alloc_cr(obj, e); |
199 | } | 196 | } |
200 | 197 | ||
201 | static u32 iotlb_cr_to_virt(struct cr_regs *cr) | 198 | static u32 iotlb_cr_to_virt(struct cr_regs *cr) |
202 | { | 199 | { |
203 | return arch_iommu->cr_to_virt(cr); | 200 | return arch_iommu->cr_to_virt(cr); |
204 | } | 201 | } |
205 | 202 | ||
206 | static u32 get_iopte_attr(struct iotlb_entry *e) | 203 | static u32 get_iopte_attr(struct iotlb_entry *e) |
207 | { | 204 | { |
208 | return arch_iommu->get_pte_attr(e); | 205 | return arch_iommu->get_pte_attr(e); |
209 | } | 206 | } |
210 | 207 | ||
211 | static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da) | 208 | static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da) |
212 | { | 209 | { |
213 | return arch_iommu->fault_isr(obj, da); | 210 | return arch_iommu->fault_isr(obj, da); |
214 | } | 211 | } |
215 | 212 | ||
216 | static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l) | 213 | static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l) |
217 | { | 214 | { |
218 | u32 val; | 215 | u32 val; |
219 | 216 | ||
220 | val = iommu_read_reg(obj, MMU_LOCK); | 217 | val = iommu_read_reg(obj, MMU_LOCK); |
221 | 218 | ||
222 | l->base = MMU_LOCK_BASE(val); | 219 | l->base = MMU_LOCK_BASE(val); |
223 | l->vict = MMU_LOCK_VICT(val); | 220 | l->vict = MMU_LOCK_VICT(val); |
224 | 221 | ||
225 | } | 222 | } |
226 | 223 | ||
227 | static void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l) | 224 | static void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l) |
228 | { | 225 | { |
229 | u32 val; | 226 | u32 val; |
230 | 227 | ||
231 | val = (l->base << MMU_LOCK_BASE_SHIFT); | 228 | val = (l->base << MMU_LOCK_BASE_SHIFT); |
232 | val |= (l->vict << MMU_LOCK_VICT_SHIFT); | 229 | val |= (l->vict << MMU_LOCK_VICT_SHIFT); |
233 | 230 | ||
234 | iommu_write_reg(obj, val, MMU_LOCK); | 231 | iommu_write_reg(obj, val, MMU_LOCK); |
235 | } | 232 | } |
236 | 233 | ||
237 | static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr) | 234 | static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr) |
238 | { | 235 | { |
239 | arch_iommu->tlb_read_cr(obj, cr); | 236 | arch_iommu->tlb_read_cr(obj, cr); |
240 | } | 237 | } |
241 | 238 | ||
242 | static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr) | 239 | static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr) |
243 | { | 240 | { |
244 | arch_iommu->tlb_load_cr(obj, cr); | 241 | arch_iommu->tlb_load_cr(obj, cr); |
245 | 242 | ||
246 | iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); | 243 | iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); |
247 | iommu_write_reg(obj, 1, MMU_LD_TLB); | 244 | iommu_write_reg(obj, 1, MMU_LD_TLB); |
248 | } | 245 | } |
249 | 246 | ||
250 | /** | 247 | /** |
251 | * iotlb_dump_cr - Dump an iommu tlb entry into buf | 248 | * iotlb_dump_cr - Dump an iommu tlb entry into buf |
252 | * @obj: target iommu | 249 | * @obj: target iommu |
253 | * @cr: contents of cam and ram register | 250 | * @cr: contents of cam and ram register |
254 | * @buf: output buffer | 251 | * @buf: output buffer |
255 | **/ | 252 | **/ |
256 | static inline ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr, | 253 | static inline ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr, |
257 | char *buf) | 254 | char *buf) |
258 | { | 255 | { |
259 | BUG_ON(!cr || !buf); | 256 | BUG_ON(!cr || !buf); |
260 | 257 | ||
261 | return arch_iommu->dump_cr(obj, cr, buf); | 258 | return arch_iommu->dump_cr(obj, cr, buf); |
262 | } | 259 | } |
263 | 260 | ||
264 | /* only used in iotlb iteration for-loop */ | 261 | /* only used in iotlb iteration for-loop */ |
265 | static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n) | 262 | static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n) |
266 | { | 263 | { |
267 | struct cr_regs cr; | 264 | struct cr_regs cr; |
268 | struct iotlb_lock l; | 265 | struct iotlb_lock l; |
269 | 266 | ||
270 | iotlb_lock_get(obj, &l); | 267 | iotlb_lock_get(obj, &l); |
271 | l.vict = n; | 268 | l.vict = n; |
272 | iotlb_lock_set(obj, &l); | 269 | iotlb_lock_set(obj, &l); |
273 | iotlb_read_cr(obj, &cr); | 270 | iotlb_read_cr(obj, &cr); |
274 | 271 | ||
275 | return cr; | 272 | return cr; |
276 | } | 273 | } |
277 | 274 | ||
278 | /** | 275 | /** |
279 | * load_iotlb_entry - Set an iommu tlb entry | 276 | * load_iotlb_entry - Set an iommu tlb entry |
280 | * @obj: target iommu | 277 | * @obj: target iommu |
281 | * @e: an iommu tlb entry info | 278 | * @e: an iommu tlb entry info |
282 | **/ | 279 | **/ |
283 | #ifdef PREFETCH_IOTLB | 280 | #ifdef PREFETCH_IOTLB |
284 | static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) | 281 | static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) |
285 | { | 282 | { |
286 | int err = 0; | 283 | int err = 0; |
287 | struct iotlb_lock l; | 284 | struct iotlb_lock l; |
288 | struct cr_regs *cr; | 285 | struct cr_regs *cr; |
289 | 286 | ||
290 | if (!obj || !obj->nr_tlb_entries || !e) | 287 | if (!obj || !obj->nr_tlb_entries || !e) |
291 | return -EINVAL; | 288 | return -EINVAL; |
292 | 289 | ||
293 | clk_enable(obj->clk); | 290 | clk_enable(obj->clk); |
294 | 291 | ||
295 | iotlb_lock_get(obj, &l); | 292 | iotlb_lock_get(obj, &l); |
296 | if (l.base == obj->nr_tlb_entries) { | 293 | if (l.base == obj->nr_tlb_entries) { |
297 | dev_warn(obj->dev, "%s: preserve entries full\n", __func__); | 294 | dev_warn(obj->dev, "%s: preserve entries full\n", __func__); |
298 | err = -EBUSY; | 295 | err = -EBUSY; |
299 | goto out; | 296 | goto out; |
300 | } | 297 | } |
301 | if (!e->prsvd) { | 298 | if (!e->prsvd) { |
302 | int i; | 299 | int i; |
303 | struct cr_regs tmp; | 300 | struct cr_regs tmp; |
304 | 301 | ||
305 | for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp) | 302 | for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp) |
306 | if (!iotlb_cr_valid(&tmp)) | 303 | if (!iotlb_cr_valid(&tmp)) |
307 | break; | 304 | break; |
308 | 305 | ||
309 | if (i == obj->nr_tlb_entries) { | 306 | if (i == obj->nr_tlb_entries) { |
310 | dev_dbg(obj->dev, "%s: full: no entry\n", __func__); | 307 | dev_dbg(obj->dev, "%s: full: no entry\n", __func__); |
311 | err = -EBUSY; | 308 | err = -EBUSY; |
312 | goto out; | 309 | goto out; |
313 | } | 310 | } |
314 | 311 | ||
315 | iotlb_lock_get(obj, &l); | 312 | iotlb_lock_get(obj, &l); |
316 | } else { | 313 | } else { |
317 | l.vict = l.base; | 314 | l.vict = l.base; |
318 | iotlb_lock_set(obj, &l); | 315 | iotlb_lock_set(obj, &l); |
319 | } | 316 | } |
320 | 317 | ||
321 | cr = iotlb_alloc_cr(obj, e); | 318 | cr = iotlb_alloc_cr(obj, e); |
322 | if (IS_ERR(cr)) { | 319 | if (IS_ERR(cr)) { |
323 | clk_disable(obj->clk); | 320 | clk_disable(obj->clk); |
324 | return PTR_ERR(cr); | 321 | return PTR_ERR(cr); |
325 | } | 322 | } |
326 | 323 | ||
327 | iotlb_load_cr(obj, cr); | 324 | iotlb_load_cr(obj, cr); |
328 | kfree(cr); | 325 | kfree(cr); |
329 | 326 | ||
330 | if (e->prsvd) | 327 | if (e->prsvd) |
331 | l.base++; | 328 | l.base++; |
332 | /* increment victim for next tlb load */ | 329 | /* increment victim for next tlb load */ |
333 | if (++l.vict == obj->nr_tlb_entries) | 330 | if (++l.vict == obj->nr_tlb_entries) |
334 | l.vict = l.base; | 331 | l.vict = l.base; |
335 | iotlb_lock_set(obj, &l); | 332 | iotlb_lock_set(obj, &l); |
336 | out: | 333 | out: |
337 | clk_disable(obj->clk); | 334 | clk_disable(obj->clk); |
338 | return err; | 335 | return err; |
339 | } | 336 | } |
340 | 337 | ||
341 | #else /* !PREFETCH_IOTLB */ | 338 | #else /* !PREFETCH_IOTLB */ |
342 | 339 | ||
343 | static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) | 340 | static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) |
344 | { | 341 | { |
345 | return 0; | 342 | return 0; |
346 | } | 343 | } |
347 | 344 | ||
348 | #endif /* !PREFETCH_IOTLB */ | 345 | #endif /* !PREFETCH_IOTLB */ |
349 | 346 | ||
350 | static int prefetch_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) | 347 | static int prefetch_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) |
351 | { | 348 | { |
352 | return load_iotlb_entry(obj, e); | 349 | return load_iotlb_entry(obj, e); |
353 | } | 350 | } |
354 | 351 | ||
355 | /** | 352 | /** |
356 | * flush_iotlb_page - Clear an iommu tlb entry | 353 | * flush_iotlb_page - Clear an iommu tlb entry |
357 | * @obj: target iommu | 354 | * @obj: target iommu |
358 | * @da: iommu device virtual address | 355 | * @da: iommu device virtual address |
359 | * | 356 | * |
360 | * Clear an iommu tlb entry which includes 'da' address. | 357 | * Clear an iommu tlb entry which includes 'da' address. |
361 | **/ | 358 | **/ |
362 | static void flush_iotlb_page(struct omap_iommu *obj, u32 da) | 359 | static void flush_iotlb_page(struct omap_iommu *obj, u32 da) |
363 | { | 360 | { |
364 | int i; | 361 | int i; |
365 | struct cr_regs cr; | 362 | struct cr_regs cr; |
366 | 363 | ||
367 | clk_enable(obj->clk); | 364 | clk_enable(obj->clk); |
368 | 365 | ||
369 | for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) { | 366 | for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) { |
370 | u32 start; | 367 | u32 start; |
371 | size_t bytes; | 368 | size_t bytes; |
372 | 369 | ||
373 | if (!iotlb_cr_valid(&cr)) | 370 | if (!iotlb_cr_valid(&cr)) |
374 | continue; | 371 | continue; |
375 | 372 | ||
376 | start = iotlb_cr_to_virt(&cr); | 373 | start = iotlb_cr_to_virt(&cr); |
377 | bytes = iopgsz_to_bytes(cr.cam & 3); | 374 | bytes = iopgsz_to_bytes(cr.cam & 3); |
378 | 375 | ||
379 | if ((start <= da) && (da < start + bytes)) { | 376 | if ((start <= da) && (da < start + bytes)) { |
380 | dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n", | 377 | dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n", |
381 | __func__, start, da, bytes); | 378 | __func__, start, da, bytes); |
382 | iotlb_load_cr(obj, &cr); | 379 | iotlb_load_cr(obj, &cr); |
383 | iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); | 380 | iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); |
384 | } | 381 | } |
385 | } | 382 | } |
386 | clk_disable(obj->clk); | 383 | clk_disable(obj->clk); |
387 | 384 | ||
388 | if (i == obj->nr_tlb_entries) | 385 | if (i == obj->nr_tlb_entries) |
389 | dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da); | 386 | dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da); |
390 | } | 387 | } |
391 | 388 | ||
392 | /** | 389 | /** |
393 | * flush_iotlb_all - Clear all iommu tlb entries | 390 | * flush_iotlb_all - Clear all iommu tlb entries |
394 | * @obj: target iommu | 391 | * @obj: target iommu |
395 | **/ | 392 | **/ |
396 | static void flush_iotlb_all(struct omap_iommu *obj) | 393 | static void flush_iotlb_all(struct omap_iommu *obj) |
397 | { | 394 | { |
398 | struct iotlb_lock l; | 395 | struct iotlb_lock l; |
399 | 396 | ||
400 | clk_enable(obj->clk); | 397 | clk_enable(obj->clk); |
401 | 398 | ||
402 | l.base = 0; | 399 | l.base = 0; |
403 | l.vict = 0; | 400 | l.vict = 0; |
404 | iotlb_lock_set(obj, &l); | 401 | iotlb_lock_set(obj, &l); |
405 | 402 | ||
406 | iommu_write_reg(obj, 1, MMU_GFLUSH); | 403 | iommu_write_reg(obj, 1, MMU_GFLUSH); |
407 | 404 | ||
408 | clk_disable(obj->clk); | 405 | clk_disable(obj->clk); |
409 | } | 406 | } |
410 | 407 | ||
411 | #if defined(CONFIG_OMAP_IOMMU_DEBUG) || defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE) | 408 | #if defined(CONFIG_OMAP_IOMMU_DEBUG) || defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE) |
412 | 409 | ||
413 | ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes) | 410 | ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes) |
414 | { | 411 | { |
415 | if (!obj || !buf) | 412 | if (!obj || !buf) |
416 | return -EINVAL; | 413 | return -EINVAL; |
417 | 414 | ||
418 | clk_enable(obj->clk); | 415 | clk_enable(obj->clk); |
419 | 416 | ||
420 | bytes = arch_iommu->dump_ctx(obj, buf, bytes); | 417 | bytes = arch_iommu->dump_ctx(obj, buf, bytes); |
421 | 418 | ||
422 | clk_disable(obj->clk); | 419 | clk_disable(obj->clk); |
423 | 420 | ||
424 | return bytes; | 421 | return bytes; |
425 | } | 422 | } |
426 | EXPORT_SYMBOL_GPL(omap_iommu_dump_ctx); | 423 | EXPORT_SYMBOL_GPL(omap_iommu_dump_ctx); |
427 | 424 | ||
428 | static int | 425 | static int |
429 | __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num) | 426 | __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num) |
430 | { | 427 | { |
431 | int i; | 428 | int i; |
432 | struct iotlb_lock saved; | 429 | struct iotlb_lock saved; |
433 | struct cr_regs tmp; | 430 | struct cr_regs tmp; |
434 | struct cr_regs *p = crs; | 431 | struct cr_regs *p = crs; |
435 | 432 | ||
436 | clk_enable(obj->clk); | 433 | clk_enable(obj->clk); |
437 | iotlb_lock_get(obj, &saved); | 434 | iotlb_lock_get(obj, &saved); |
438 | 435 | ||
439 | for_each_iotlb_cr(obj, num, i, tmp) { | 436 | for_each_iotlb_cr(obj, num, i, tmp) { |
440 | if (!iotlb_cr_valid(&tmp)) | 437 | if (!iotlb_cr_valid(&tmp)) |
441 | continue; | 438 | continue; |
442 | *p++ = tmp; | 439 | *p++ = tmp; |
443 | } | 440 | } |
444 | 441 | ||
445 | iotlb_lock_set(obj, &saved); | 442 | iotlb_lock_set(obj, &saved); |
446 | clk_disable(obj->clk); | 443 | clk_disable(obj->clk); |
447 | 444 | ||
448 | return p - crs; | 445 | return p - crs; |
449 | } | 446 | } |
450 | 447 | ||
451 | /** | 448 | /** |
452 | * omap_dump_tlb_entries - dump cr arrays to given buffer | 449 | * omap_dump_tlb_entries - dump cr arrays to given buffer |
453 | * @obj: target iommu | 450 | * @obj: target iommu |
454 | * @buf: output buffer | 451 | * @buf: output buffer |
455 | **/ | 452 | **/ |
456 | size_t omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t bytes) | 453 | size_t omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t bytes) |
457 | { | 454 | { |
458 | int i, num; | 455 | int i, num; |
459 | struct cr_regs *cr; | 456 | struct cr_regs *cr; |
460 | char *p = buf; | 457 | char *p = buf; |
461 | 458 | ||
462 | num = bytes / sizeof(*cr); | 459 | num = bytes / sizeof(*cr); |
463 | num = min(obj->nr_tlb_entries, num); | 460 | num = min(obj->nr_tlb_entries, num); |
464 | 461 | ||
465 | cr = kcalloc(num, sizeof(*cr), GFP_KERNEL); | 462 | cr = kcalloc(num, sizeof(*cr), GFP_KERNEL); |
466 | if (!cr) | 463 | if (!cr) |
467 | return 0; | 464 | return 0; |
468 | 465 | ||
469 | num = __dump_tlb_entries(obj, cr, num); | 466 | num = __dump_tlb_entries(obj, cr, num); |
470 | for (i = 0; i < num; i++) | 467 | for (i = 0; i < num; i++) |
471 | p += iotlb_dump_cr(obj, cr + i, p); | 468 | p += iotlb_dump_cr(obj, cr + i, p); |
472 | kfree(cr); | 469 | kfree(cr); |
473 | 470 | ||
474 | return p - buf; | 471 | return p - buf; |
475 | } | 472 | } |
476 | EXPORT_SYMBOL_GPL(omap_dump_tlb_entries); | 473 | EXPORT_SYMBOL_GPL(omap_dump_tlb_entries); |
477 | 474 | ||
478 | int omap_foreach_iommu_device(void *data, int (*fn)(struct device *, void *)) | 475 | int omap_foreach_iommu_device(void *data, int (*fn)(struct device *, void *)) |
479 | { | 476 | { |
480 | return driver_for_each_device(&omap_iommu_driver.driver, | 477 | return driver_for_each_device(&omap_iommu_driver.driver, |
481 | NULL, data, fn); | 478 | NULL, data, fn); |
482 | } | 479 | } |
483 | EXPORT_SYMBOL_GPL(omap_foreach_iommu_device); | 480 | EXPORT_SYMBOL_GPL(omap_foreach_iommu_device); |
484 | 481 | ||
485 | #endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */ | 482 | #endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */ |
486 | 483 | ||
487 | /* | 484 | /* |
488 | * H/W pagetable operations | 485 | * H/W pagetable operations |
489 | */ | 486 | */ |
490 | static void flush_iopgd_range(u32 *first, u32 *last) | 487 | static void flush_iopgd_range(u32 *first, u32 *last) |
491 | { | 488 | { |
492 | /* FIXME: L2 cache should be taken care of if it exists */ | 489 | /* FIXME: L2 cache should be taken care of if it exists */ |
493 | do { | 490 | do { |
494 | asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd" | 491 | asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd" |
495 | : : "r" (first)); | 492 | : : "r" (first)); |
496 | first += L1_CACHE_BYTES / sizeof(*first); | 493 | first += L1_CACHE_BYTES / sizeof(*first); |
497 | } while (first <= last); | 494 | } while (first <= last); |
498 | } | 495 | } |
499 | 496 | ||
500 | static void flush_iopte_range(u32 *first, u32 *last) | 497 | static void flush_iopte_range(u32 *first, u32 *last) |
501 | { | 498 | { |
502 | /* FIXME: L2 cache should be taken care of if it exists */ | 499 | /* FIXME: L2 cache should be taken care of if it exists */ |
503 | do { | 500 | do { |
504 | asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte" | 501 | asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte" |
505 | : : "r" (first)); | 502 | : : "r" (first)); |
506 | first += L1_CACHE_BYTES / sizeof(*first); | 503 | first += L1_CACHE_BYTES / sizeof(*first); |
507 | } while (first <= last); | 504 | } while (first <= last); |
508 | } | 505 | } |
509 | 506 | ||
510 | static void iopte_free(u32 *iopte) | 507 | static void iopte_free(u32 *iopte) |
511 | { | 508 | { |
512 | /* Note: freed iopte's must be clean ready for re-use */ | 509 | /* Note: freed iopte's must be clean ready for re-use */ |
513 | kmem_cache_free(iopte_cachep, iopte); | 510 | kmem_cache_free(iopte_cachep, iopte); |
514 | } | 511 | } |
515 | 512 | ||
516 | static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, u32 da) | 513 | static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, u32 da) |
517 | { | 514 | { |
518 | u32 *iopte; | 515 | u32 *iopte; |
519 | 516 | ||
520 | /* a table has already existed */ | 517 | /* a table has already existed */ |
521 | if (*iopgd) | 518 | if (*iopgd) |
522 | goto pte_ready; | 519 | goto pte_ready; |
523 | 520 | ||
524 | /* | 521 | /* |
525 | * do the allocation outside the page table lock | 522 | * do the allocation outside the page table lock |
526 | */ | 523 | */ |
527 | spin_unlock(&obj->page_table_lock); | 524 | spin_unlock(&obj->page_table_lock); |
528 | iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL); | 525 | iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL); |
529 | spin_lock(&obj->page_table_lock); | 526 | spin_lock(&obj->page_table_lock); |
530 | 527 | ||
531 | if (!*iopgd) { | 528 | if (!*iopgd) { |
532 | if (!iopte) | 529 | if (!iopte) |
533 | return ERR_PTR(-ENOMEM); | 530 | return ERR_PTR(-ENOMEM); |
534 | 531 | ||
535 | *iopgd = virt_to_phys(iopte) | IOPGD_TABLE; | 532 | *iopgd = virt_to_phys(iopte) | IOPGD_TABLE; |
536 | flush_iopgd_range(iopgd, iopgd); | 533 | flush_iopgd_range(iopgd, iopgd); |
537 | 534 | ||
538 | dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte); | 535 | dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte); |
539 | } else { | 536 | } else { |
540 | /* We raced, free the reduniovant table */ | 537 | /* We raced, free the reduniovant table */ |
541 | iopte_free(iopte); | 538 | iopte_free(iopte); |
542 | } | 539 | } |
543 | 540 | ||
544 | pte_ready: | 541 | pte_ready: |
545 | iopte = iopte_offset(iopgd, da); | 542 | iopte = iopte_offset(iopgd, da); |
546 | 543 | ||
547 | dev_vdbg(obj->dev, | 544 | dev_vdbg(obj->dev, |
548 | "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n", | 545 | "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n", |
549 | __func__, da, iopgd, *iopgd, iopte, *iopte); | 546 | __func__, da, iopgd, *iopgd, iopte, *iopte); |
550 | 547 | ||
551 | return iopte; | 548 | return iopte; |
552 | } | 549 | } |
553 | 550 | ||
554 | static int iopgd_alloc_section(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) | 551 | static int iopgd_alloc_section(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) |
555 | { | 552 | { |
556 | u32 *iopgd = iopgd_offset(obj, da); | 553 | u32 *iopgd = iopgd_offset(obj, da); |
557 | 554 | ||
558 | if ((da | pa) & ~IOSECTION_MASK) { | 555 | if ((da | pa) & ~IOSECTION_MASK) { |
559 | dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", | 556 | dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", |
560 | __func__, da, pa, IOSECTION_SIZE); | 557 | __func__, da, pa, IOSECTION_SIZE); |
561 | return -EINVAL; | 558 | return -EINVAL; |
562 | } | 559 | } |
563 | 560 | ||
564 | *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION; | 561 | *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION; |
565 | flush_iopgd_range(iopgd, iopgd); | 562 | flush_iopgd_range(iopgd, iopgd); |
566 | return 0; | 563 | return 0; |
567 | } | 564 | } |
568 | 565 | ||
569 | static int iopgd_alloc_super(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) | 566 | static int iopgd_alloc_super(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) |
570 | { | 567 | { |
571 | u32 *iopgd = iopgd_offset(obj, da); | 568 | u32 *iopgd = iopgd_offset(obj, da); |
572 | int i; | 569 | int i; |
573 | 570 | ||
574 | if ((da | pa) & ~IOSUPER_MASK) { | 571 | if ((da | pa) & ~IOSUPER_MASK) { |
575 | dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", | 572 | dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", |
576 | __func__, da, pa, IOSUPER_SIZE); | 573 | __func__, da, pa, IOSUPER_SIZE); |
577 | return -EINVAL; | 574 | return -EINVAL; |
578 | } | 575 | } |
579 | 576 | ||
580 | for (i = 0; i < 16; i++) | 577 | for (i = 0; i < 16; i++) |
581 | *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER; | 578 | *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER; |
582 | flush_iopgd_range(iopgd, iopgd + 15); | 579 | flush_iopgd_range(iopgd, iopgd + 15); |
583 | return 0; | 580 | return 0; |
584 | } | 581 | } |
585 | 582 | ||
586 | static int iopte_alloc_page(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) | 583 | static int iopte_alloc_page(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) |
587 | { | 584 | { |
588 | u32 *iopgd = iopgd_offset(obj, da); | 585 | u32 *iopgd = iopgd_offset(obj, da); |
589 | u32 *iopte = iopte_alloc(obj, iopgd, da); | 586 | u32 *iopte = iopte_alloc(obj, iopgd, da); |
590 | 587 | ||
591 | if (IS_ERR(iopte)) | 588 | if (IS_ERR(iopte)) |
592 | return PTR_ERR(iopte); | 589 | return PTR_ERR(iopte); |
593 | 590 | ||
594 | *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL; | 591 | *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL; |
595 | flush_iopte_range(iopte, iopte); | 592 | flush_iopte_range(iopte, iopte); |
596 | 593 | ||
597 | dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n", | 594 | dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n", |
598 | __func__, da, pa, iopte, *iopte); | 595 | __func__, da, pa, iopte, *iopte); |
599 | 596 | ||
600 | return 0; | 597 | return 0; |
601 | } | 598 | } |
602 | 599 | ||
603 | static int iopte_alloc_large(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) | 600 | static int iopte_alloc_large(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) |
604 | { | 601 | { |
605 | u32 *iopgd = iopgd_offset(obj, da); | 602 | u32 *iopgd = iopgd_offset(obj, da); |
606 | u32 *iopte = iopte_alloc(obj, iopgd, da); | 603 | u32 *iopte = iopte_alloc(obj, iopgd, da); |
607 | int i; | 604 | int i; |
608 | 605 | ||
609 | if ((da | pa) & ~IOLARGE_MASK) { | 606 | if ((da | pa) & ~IOLARGE_MASK) { |
610 | dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", | 607 | dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", |
611 | __func__, da, pa, IOLARGE_SIZE); | 608 | __func__, da, pa, IOLARGE_SIZE); |
612 | return -EINVAL; | 609 | return -EINVAL; |
613 | } | 610 | } |
614 | 611 | ||
615 | if (IS_ERR(iopte)) | 612 | if (IS_ERR(iopte)) |
616 | return PTR_ERR(iopte); | 613 | return PTR_ERR(iopte); |
617 | 614 | ||
618 | for (i = 0; i < 16; i++) | 615 | for (i = 0; i < 16; i++) |
619 | *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE; | 616 | *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE; |
620 | flush_iopte_range(iopte, iopte + 15); | 617 | flush_iopte_range(iopte, iopte + 15); |
621 | return 0; | 618 | return 0; |
622 | } | 619 | } |
623 | 620 | ||
624 | static int | 621 | static int |
625 | iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e) | 622 | iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e) |
626 | { | 623 | { |
627 | int (*fn)(struct omap_iommu *, u32, u32, u32); | 624 | int (*fn)(struct omap_iommu *, u32, u32, u32); |
628 | u32 prot; | 625 | u32 prot; |
629 | int err; | 626 | int err; |
630 | 627 | ||
631 | if (!obj || !e) | 628 | if (!obj || !e) |
632 | return -EINVAL; | 629 | return -EINVAL; |
633 | 630 | ||
634 | switch (e->pgsz) { | 631 | switch (e->pgsz) { |
635 | case MMU_CAM_PGSZ_16M: | 632 | case MMU_CAM_PGSZ_16M: |
636 | fn = iopgd_alloc_super; | 633 | fn = iopgd_alloc_super; |
637 | break; | 634 | break; |
638 | case MMU_CAM_PGSZ_1M: | 635 | case MMU_CAM_PGSZ_1M: |
639 | fn = iopgd_alloc_section; | 636 | fn = iopgd_alloc_section; |
640 | break; | 637 | break; |
641 | case MMU_CAM_PGSZ_64K: | 638 | case MMU_CAM_PGSZ_64K: |
642 | fn = iopte_alloc_large; | 639 | fn = iopte_alloc_large; |
643 | break; | 640 | break; |
644 | case MMU_CAM_PGSZ_4K: | 641 | case MMU_CAM_PGSZ_4K: |
645 | fn = iopte_alloc_page; | 642 | fn = iopte_alloc_page; |
646 | break; | 643 | break; |
647 | default: | 644 | default: |
648 | fn = NULL; | 645 | fn = NULL; |
649 | BUG(); | 646 | BUG(); |
650 | break; | 647 | break; |
651 | } | 648 | } |
652 | 649 | ||
653 | prot = get_iopte_attr(e); | 650 | prot = get_iopte_attr(e); |
654 | 651 | ||
655 | spin_lock(&obj->page_table_lock); | 652 | spin_lock(&obj->page_table_lock); |
656 | err = fn(obj, e->da, e->pa, prot); | 653 | err = fn(obj, e->da, e->pa, prot); |
657 | spin_unlock(&obj->page_table_lock); | 654 | spin_unlock(&obj->page_table_lock); |
658 | 655 | ||
659 | return err; | 656 | return err; |
660 | } | 657 | } |
661 | 658 | ||
662 | /** | 659 | /** |
663 | * omap_iopgtable_store_entry - Make an iommu pte entry | 660 | * omap_iopgtable_store_entry - Make an iommu pte entry |
664 | * @obj: target iommu | 661 | * @obj: target iommu |
665 | * @e: an iommu tlb entry info | 662 | * @e: an iommu tlb entry info |
666 | **/ | 663 | **/ |
667 | int omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e) | 664 | int omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e) |
668 | { | 665 | { |
669 | int err; | 666 | int err; |
670 | 667 | ||
671 | flush_iotlb_page(obj, e->da); | 668 | flush_iotlb_page(obj, e->da); |
672 | err = iopgtable_store_entry_core(obj, e); | 669 | err = iopgtable_store_entry_core(obj, e); |
673 | if (!err) | 670 | if (!err) |
674 | prefetch_iotlb_entry(obj, e); | 671 | prefetch_iotlb_entry(obj, e); |
675 | return err; | 672 | return err; |
676 | } | 673 | } |
677 | EXPORT_SYMBOL_GPL(omap_iopgtable_store_entry); | 674 | EXPORT_SYMBOL_GPL(omap_iopgtable_store_entry); |
678 | 675 | ||
679 | /** | 676 | /** |
680 | * iopgtable_lookup_entry - Lookup an iommu pte entry | 677 | * iopgtable_lookup_entry - Lookup an iommu pte entry |
681 | * @obj: target iommu | 678 | * @obj: target iommu |
682 | * @da: iommu device virtual address | 679 | * @da: iommu device virtual address |
683 | * @ppgd: iommu pgd entry pointer to be returned | 680 | * @ppgd: iommu pgd entry pointer to be returned |
684 | * @ppte: iommu pte entry pointer to be returned | 681 | * @ppte: iommu pte entry pointer to be returned |
685 | **/ | 682 | **/ |
686 | static void | 683 | static void |
687 | iopgtable_lookup_entry(struct omap_iommu *obj, u32 da, u32 **ppgd, u32 **ppte) | 684 | iopgtable_lookup_entry(struct omap_iommu *obj, u32 da, u32 **ppgd, u32 **ppte) |
688 | { | 685 | { |
689 | u32 *iopgd, *iopte = NULL; | 686 | u32 *iopgd, *iopte = NULL; |
690 | 687 | ||
691 | iopgd = iopgd_offset(obj, da); | 688 | iopgd = iopgd_offset(obj, da); |
692 | if (!*iopgd) | 689 | if (!*iopgd) |
693 | goto out; | 690 | goto out; |
694 | 691 | ||
695 | if (iopgd_is_table(*iopgd)) | 692 | if (iopgd_is_table(*iopgd)) |
696 | iopte = iopte_offset(iopgd, da); | 693 | iopte = iopte_offset(iopgd, da); |
697 | out: | 694 | out: |
698 | *ppgd = iopgd; | 695 | *ppgd = iopgd; |
699 | *ppte = iopte; | 696 | *ppte = iopte; |
700 | } | 697 | } |
701 | 698 | ||
702 | static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da) | 699 | static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da) |
703 | { | 700 | { |
704 | size_t bytes; | 701 | size_t bytes; |
705 | u32 *iopgd = iopgd_offset(obj, da); | 702 | u32 *iopgd = iopgd_offset(obj, da); |
706 | int nent = 1; | 703 | int nent = 1; |
707 | 704 | ||
708 | if (!*iopgd) | 705 | if (!*iopgd) |
709 | return 0; | 706 | return 0; |
710 | 707 | ||
711 | if (iopgd_is_table(*iopgd)) { | 708 | if (iopgd_is_table(*iopgd)) { |
712 | int i; | 709 | int i; |
713 | u32 *iopte = iopte_offset(iopgd, da); | 710 | u32 *iopte = iopte_offset(iopgd, da); |
714 | 711 | ||
715 | bytes = IOPTE_SIZE; | 712 | bytes = IOPTE_SIZE; |
716 | if (*iopte & IOPTE_LARGE) { | 713 | if (*iopte & IOPTE_LARGE) { |
717 | nent *= 16; | 714 | nent *= 16; |
718 | /* rewind to the 1st entry */ | 715 | /* rewind to the 1st entry */ |
719 | iopte = iopte_offset(iopgd, (da & IOLARGE_MASK)); | 716 | iopte = iopte_offset(iopgd, (da & IOLARGE_MASK)); |
720 | } | 717 | } |
721 | bytes *= nent; | 718 | bytes *= nent; |
722 | memset(iopte, 0, nent * sizeof(*iopte)); | 719 | memset(iopte, 0, nent * sizeof(*iopte)); |
723 | flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte)); | 720 | flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte)); |
724 | 721 | ||
725 | /* | 722 | /* |
726 | * do table walk to check if this table is necessary or not | 723 | * do table walk to check if this table is necessary or not |
727 | */ | 724 | */ |
728 | iopte = iopte_offset(iopgd, 0); | 725 | iopte = iopte_offset(iopgd, 0); |
729 | for (i = 0; i < PTRS_PER_IOPTE; i++) | 726 | for (i = 0; i < PTRS_PER_IOPTE; i++) |
730 | if (iopte[i]) | 727 | if (iopte[i]) |
731 | goto out; | 728 | goto out; |
732 | 729 | ||
733 | iopte_free(iopte); | 730 | iopte_free(iopte); |
734 | nent = 1; /* for the next L1 entry */ | 731 | nent = 1; /* for the next L1 entry */ |
735 | } else { | 732 | } else { |
736 | bytes = IOPGD_SIZE; | 733 | bytes = IOPGD_SIZE; |
737 | if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) { | 734 | if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) { |
738 | nent *= 16; | 735 | nent *= 16; |
739 | /* rewind to the 1st entry */ | 736 | /* rewind to the 1st entry */ |
740 | iopgd = iopgd_offset(obj, (da & IOSUPER_MASK)); | 737 | iopgd = iopgd_offset(obj, (da & IOSUPER_MASK)); |
741 | } | 738 | } |
742 | bytes *= nent; | 739 | bytes *= nent; |
743 | } | 740 | } |
744 | memset(iopgd, 0, nent * sizeof(*iopgd)); | 741 | memset(iopgd, 0, nent * sizeof(*iopgd)); |
745 | flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd)); | 742 | flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd)); |
746 | out: | 743 | out: |
747 | return bytes; | 744 | return bytes; |
748 | } | 745 | } |
749 | 746 | ||
750 | /** | 747 | /** |
751 | * iopgtable_clear_entry - Remove an iommu pte entry | 748 | * iopgtable_clear_entry - Remove an iommu pte entry |
752 | * @obj: target iommu | 749 | * @obj: target iommu |
753 | * @da: iommu device virtual address | 750 | * @da: iommu device virtual address |
754 | **/ | 751 | **/ |
755 | static size_t iopgtable_clear_entry(struct omap_iommu *obj, u32 da) | 752 | static size_t iopgtable_clear_entry(struct omap_iommu *obj, u32 da) |
756 | { | 753 | { |
757 | size_t bytes; | 754 | size_t bytes; |
758 | 755 | ||
759 | spin_lock(&obj->page_table_lock); | 756 | spin_lock(&obj->page_table_lock); |
760 | 757 | ||
761 | bytes = iopgtable_clear_entry_core(obj, da); | 758 | bytes = iopgtable_clear_entry_core(obj, da); |
762 | flush_iotlb_page(obj, da); | 759 | flush_iotlb_page(obj, da); |
763 | 760 | ||
764 | spin_unlock(&obj->page_table_lock); | 761 | spin_unlock(&obj->page_table_lock); |
765 | 762 | ||
766 | return bytes; | 763 | return bytes; |
767 | } | 764 | } |
768 | 765 | ||
769 | static void iopgtable_clear_entry_all(struct omap_iommu *obj) | 766 | static void iopgtable_clear_entry_all(struct omap_iommu *obj) |
770 | { | 767 | { |
771 | int i; | 768 | int i; |
772 | 769 | ||
773 | spin_lock(&obj->page_table_lock); | 770 | spin_lock(&obj->page_table_lock); |
774 | 771 | ||
775 | for (i = 0; i < PTRS_PER_IOPGD; i++) { | 772 | for (i = 0; i < PTRS_PER_IOPGD; i++) { |
776 | u32 da; | 773 | u32 da; |
777 | u32 *iopgd; | 774 | u32 *iopgd; |
778 | 775 | ||
779 | da = i << IOPGD_SHIFT; | 776 | da = i << IOPGD_SHIFT; |
780 | iopgd = iopgd_offset(obj, da); | 777 | iopgd = iopgd_offset(obj, da); |
781 | 778 | ||
782 | if (!*iopgd) | 779 | if (!*iopgd) |
783 | continue; | 780 | continue; |
784 | 781 | ||
785 | if (iopgd_is_table(*iopgd)) | 782 | if (iopgd_is_table(*iopgd)) |
786 | iopte_free(iopte_offset(iopgd, 0)); | 783 | iopte_free(iopte_offset(iopgd, 0)); |
787 | 784 | ||
788 | *iopgd = 0; | 785 | *iopgd = 0; |
789 | flush_iopgd_range(iopgd, iopgd); | 786 | flush_iopgd_range(iopgd, iopgd); |
790 | } | 787 | } |
791 | 788 | ||
792 | flush_iotlb_all(obj); | 789 | flush_iotlb_all(obj); |
793 | 790 | ||
794 | spin_unlock(&obj->page_table_lock); | 791 | spin_unlock(&obj->page_table_lock); |
795 | } | 792 | } |
796 | 793 | ||
797 | /* | 794 | /* |
798 | * Device IOMMU generic operations | 795 | * Device IOMMU generic operations |
799 | */ | 796 | */ |
800 | static irqreturn_t iommu_fault_handler(int irq, void *data) | 797 | static irqreturn_t iommu_fault_handler(int irq, void *data) |
801 | { | 798 | { |
802 | u32 da, errs; | 799 | u32 da, errs; |
803 | u32 *iopgd, *iopte; | 800 | u32 *iopgd, *iopte; |
804 | struct omap_iommu *obj = data; | 801 | struct omap_iommu *obj = data; |
805 | struct iommu_domain *domain = obj->domain; | 802 | struct iommu_domain *domain = obj->domain; |
806 | 803 | ||
807 | if (!obj->refcount) | 804 | if (!obj->refcount) |
808 | return IRQ_NONE; | 805 | return IRQ_NONE; |
809 | 806 | ||
810 | errs = iommu_report_fault(obj, &da); | 807 | errs = iommu_report_fault(obj, &da); |
811 | if (errs == 0) | 808 | if (errs == 0) |
812 | return IRQ_HANDLED; | 809 | return IRQ_HANDLED; |
813 | 810 | ||
814 | /* Fault callback or TLB/PTE Dynamic loading */ | 811 | /* Fault callback or TLB/PTE Dynamic loading */ |
815 | if (!report_iommu_fault(domain, obj->dev, da, 0)) | 812 | if (!report_iommu_fault(domain, obj->dev, da, 0)) |
816 | return IRQ_HANDLED; | 813 | return IRQ_HANDLED; |
817 | 814 | ||
818 | iommu_disable(obj); | 815 | iommu_disable(obj); |
819 | 816 | ||
820 | iopgd = iopgd_offset(obj, da); | 817 | iopgd = iopgd_offset(obj, da); |
821 | 818 | ||
822 | if (!iopgd_is_table(*iopgd)) { | 819 | if (!iopgd_is_table(*iopgd)) { |
823 | dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p " | 820 | dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p " |
824 | "*pgd:px%08x\n", obj->name, errs, da, iopgd, *iopgd); | 821 | "*pgd:px%08x\n", obj->name, errs, da, iopgd, *iopgd); |
825 | return IRQ_NONE; | 822 | return IRQ_NONE; |
826 | } | 823 | } |
827 | 824 | ||
828 | iopte = iopte_offset(iopgd, da); | 825 | iopte = iopte_offset(iopgd, da); |
829 | 826 | ||
830 | dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x " | 827 | dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x " |
831 | "pte:0x%p *pte:0x%08x\n", obj->name, errs, da, iopgd, *iopgd, | 828 | "pte:0x%p *pte:0x%08x\n", obj->name, errs, da, iopgd, *iopgd, |
832 | iopte, *iopte); | 829 | iopte, *iopte); |
833 | 830 | ||
834 | return IRQ_NONE; | 831 | return IRQ_NONE; |
835 | } | 832 | } |
836 | 833 | ||
837 | static int device_match_by_alias(struct device *dev, void *data) | 834 | static int device_match_by_alias(struct device *dev, void *data) |
838 | { | 835 | { |
839 | struct omap_iommu *obj = to_iommu(dev); | 836 | struct omap_iommu *obj = to_iommu(dev); |
840 | const char *name = data; | 837 | const char *name = data; |
841 | 838 | ||
842 | pr_debug("%s: %s %s\n", __func__, obj->name, name); | 839 | pr_debug("%s: %s %s\n", __func__, obj->name, name); |
843 | 840 | ||
844 | return strcmp(obj->name, name) == 0; | 841 | return strcmp(obj->name, name) == 0; |
845 | } | 842 | } |
846 | 843 | ||
847 | /** | 844 | /** |
848 | * omap_iommu_attach() - attach iommu device to an iommu domain | 845 | * omap_iommu_attach() - attach iommu device to an iommu domain |
849 | * @name: name of target omap iommu device | 846 | * @name: name of target omap iommu device |
850 | * @iopgd: page table | 847 | * @iopgd: page table |
851 | **/ | 848 | **/ |
852 | static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd) | 849 | static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd) |
853 | { | 850 | { |
854 | int err = -ENOMEM; | 851 | int err = -ENOMEM; |
855 | struct device *dev; | 852 | struct device *dev; |
856 | struct omap_iommu *obj; | 853 | struct omap_iommu *obj; |
857 | 854 | ||
858 | dev = driver_find_device(&omap_iommu_driver.driver, NULL, | 855 | dev = driver_find_device(&omap_iommu_driver.driver, NULL, |
859 | (void *)name, | 856 | (void *)name, |
860 | device_match_by_alias); | 857 | device_match_by_alias); |
861 | if (!dev) | 858 | if (!dev) |
862 | return NULL; | 859 | return NULL; |
863 | 860 | ||
864 | obj = to_iommu(dev); | 861 | obj = to_iommu(dev); |
865 | 862 | ||
866 | spin_lock(&obj->iommu_lock); | 863 | spin_lock(&obj->iommu_lock); |
867 | 864 | ||
868 | /* an iommu device can only be attached once */ | 865 | /* an iommu device can only be attached once */ |
869 | if (++obj->refcount > 1) { | 866 | if (++obj->refcount > 1) { |
870 | dev_err(dev, "%s: already attached!\n", obj->name); | 867 | dev_err(dev, "%s: already attached!\n", obj->name); |
871 | err = -EBUSY; | 868 | err = -EBUSY; |
872 | goto err_enable; | 869 | goto err_enable; |
873 | } | 870 | } |
874 | 871 | ||
875 | obj->iopgd = iopgd; | 872 | obj->iopgd = iopgd; |
876 | err = iommu_enable(obj); | 873 | err = iommu_enable(obj); |
877 | if (err) | 874 | if (err) |
878 | goto err_enable; | 875 | goto err_enable; |
879 | flush_iotlb_all(obj); | 876 | flush_iotlb_all(obj); |
880 | 877 | ||
881 | if (!try_module_get(obj->owner)) | 878 | if (!try_module_get(obj->owner)) |
882 | goto err_module; | 879 | goto err_module; |
883 | 880 | ||
884 | spin_unlock(&obj->iommu_lock); | 881 | spin_unlock(&obj->iommu_lock); |
885 | 882 | ||
886 | dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); | 883 | dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); |
887 | return obj; | 884 | return obj; |
888 | 885 | ||
889 | err_module: | 886 | err_module: |
890 | if (obj->refcount == 1) | 887 | if (obj->refcount == 1) |
891 | iommu_disable(obj); | 888 | iommu_disable(obj); |
892 | err_enable: | 889 | err_enable: |
893 | obj->refcount--; | 890 | obj->refcount--; |
894 | spin_unlock(&obj->iommu_lock); | 891 | spin_unlock(&obj->iommu_lock); |
895 | return ERR_PTR(err); | 892 | return ERR_PTR(err); |
896 | } | 893 | } |
897 | 894 | ||
898 | /** | 895 | /** |
899 | * omap_iommu_detach - release iommu device | 896 | * omap_iommu_detach - release iommu device |
900 | * @obj: target iommu | 897 | * @obj: target iommu |
901 | **/ | 898 | **/ |
902 | static void omap_iommu_detach(struct omap_iommu *obj) | 899 | static void omap_iommu_detach(struct omap_iommu *obj) |
903 | { | 900 | { |
904 | if (!obj || IS_ERR(obj)) | 901 | if (!obj || IS_ERR(obj)) |
905 | return; | 902 | return; |
906 | 903 | ||
907 | spin_lock(&obj->iommu_lock); | 904 | spin_lock(&obj->iommu_lock); |
908 | 905 | ||
909 | if (--obj->refcount == 0) | 906 | if (--obj->refcount == 0) |
910 | iommu_disable(obj); | 907 | iommu_disable(obj); |
911 | 908 | ||
912 | module_put(obj->owner); | 909 | module_put(obj->owner); |
913 | 910 | ||
914 | obj->iopgd = NULL; | 911 | obj->iopgd = NULL; |
915 | 912 | ||
916 | spin_unlock(&obj->iommu_lock); | 913 | spin_unlock(&obj->iommu_lock); |
917 | 914 | ||
918 | dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); | 915 | dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); |
919 | } | 916 | } |
920 | 917 | ||
921 | /* | 918 | /* |
922 | * OMAP Device MMU(IOMMU) detection | 919 | * OMAP Device MMU(IOMMU) detection |
923 | */ | 920 | */ |
924 | static int __devinit omap_iommu_probe(struct platform_device *pdev) | 921 | static int __devinit omap_iommu_probe(struct platform_device *pdev) |
925 | { | 922 | { |
926 | int err = -ENODEV; | 923 | int err = -ENODEV; |
927 | int irq; | 924 | int irq; |
928 | struct omap_iommu *obj; | 925 | struct omap_iommu *obj; |
929 | struct resource *res; | 926 | struct resource *res; |
930 | struct iommu_platform_data *pdata = pdev->dev.platform_data; | 927 | struct iommu_platform_data *pdata = pdev->dev.platform_data; |
931 | 928 | ||
932 | if (pdev->num_resources != 2) | 929 | if (pdev->num_resources != 2) |
933 | return -EINVAL; | 930 | return -EINVAL; |
934 | 931 | ||
935 | obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); | 932 | obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); |
936 | if (!obj) | 933 | if (!obj) |
937 | return -ENOMEM; | 934 | return -ENOMEM; |
938 | 935 | ||
939 | obj->clk = clk_get(&pdev->dev, pdata->clk_name); | 936 | obj->clk = clk_get(&pdev->dev, pdata->clk_name); |
940 | if (IS_ERR(obj->clk)) | 937 | if (IS_ERR(obj->clk)) |
941 | goto err_clk; | 938 | goto err_clk; |
942 | 939 | ||
943 | obj->nr_tlb_entries = pdata->nr_tlb_entries; | 940 | obj->nr_tlb_entries = pdata->nr_tlb_entries; |
944 | obj->name = pdata->name; | 941 | obj->name = pdata->name; |
945 | obj->dev = &pdev->dev; | 942 | obj->dev = &pdev->dev; |
946 | obj->ctx = (void *)obj + sizeof(*obj); | 943 | obj->ctx = (void *)obj + sizeof(*obj); |
947 | obj->da_start = pdata->da_start; | 944 | obj->da_start = pdata->da_start; |
948 | obj->da_end = pdata->da_end; | 945 | obj->da_end = pdata->da_end; |
949 | 946 | ||
950 | spin_lock_init(&obj->iommu_lock); | 947 | spin_lock_init(&obj->iommu_lock); |
951 | mutex_init(&obj->mmap_lock); | 948 | mutex_init(&obj->mmap_lock); |
952 | spin_lock_init(&obj->page_table_lock); | 949 | spin_lock_init(&obj->page_table_lock); |
953 | INIT_LIST_HEAD(&obj->mmap); | 950 | INIT_LIST_HEAD(&obj->mmap); |
954 | 951 | ||
955 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 952 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
956 | if (!res) { | 953 | if (!res) { |
957 | err = -ENODEV; | 954 | err = -ENODEV; |
958 | goto err_mem; | 955 | goto err_mem; |
959 | } | 956 | } |
960 | 957 | ||
961 | res = request_mem_region(res->start, resource_size(res), | 958 | res = request_mem_region(res->start, resource_size(res), |
962 | dev_name(&pdev->dev)); | 959 | dev_name(&pdev->dev)); |
963 | if (!res) { | 960 | if (!res) { |
964 | err = -EIO; | 961 | err = -EIO; |
965 | goto err_mem; | 962 | goto err_mem; |
966 | } | 963 | } |
967 | 964 | ||
968 | obj->regbase = ioremap(res->start, resource_size(res)); | 965 | obj->regbase = ioremap(res->start, resource_size(res)); |
969 | if (!obj->regbase) { | 966 | if (!obj->regbase) { |
970 | err = -ENOMEM; | 967 | err = -ENOMEM; |
971 | goto err_ioremap; | 968 | goto err_ioremap; |
972 | } | 969 | } |
973 | 970 | ||
974 | irq = platform_get_irq(pdev, 0); | 971 | irq = platform_get_irq(pdev, 0); |
975 | if (irq < 0) { | 972 | if (irq < 0) { |
976 | err = -ENODEV; | 973 | err = -ENODEV; |
977 | goto err_irq; | 974 | goto err_irq; |
978 | } | 975 | } |
979 | err = request_irq(irq, iommu_fault_handler, IRQF_SHARED, | 976 | err = request_irq(irq, iommu_fault_handler, IRQF_SHARED, |
980 | dev_name(&pdev->dev), obj); | 977 | dev_name(&pdev->dev), obj); |
981 | if (err < 0) | 978 | if (err < 0) |
982 | goto err_irq; | 979 | goto err_irq; |
983 | platform_set_drvdata(pdev, obj); | 980 | platform_set_drvdata(pdev, obj); |
984 | 981 | ||
985 | dev_info(&pdev->dev, "%s registered\n", obj->name); | 982 | dev_info(&pdev->dev, "%s registered\n", obj->name); |
986 | return 0; | 983 | return 0; |
987 | 984 | ||
988 | err_irq: | 985 | err_irq: |
989 | iounmap(obj->regbase); | 986 | iounmap(obj->regbase); |
990 | err_ioremap: | 987 | err_ioremap: |
991 | release_mem_region(res->start, resource_size(res)); | 988 | release_mem_region(res->start, resource_size(res)); |
992 | err_mem: | 989 | err_mem: |
993 | clk_put(obj->clk); | 990 | clk_put(obj->clk); |
994 | err_clk: | 991 | err_clk: |
995 | kfree(obj); | 992 | kfree(obj); |
996 | return err; | 993 | return err; |
997 | } | 994 | } |
998 | 995 | ||
999 | static int __devexit omap_iommu_remove(struct platform_device *pdev) | 996 | static int __devexit omap_iommu_remove(struct platform_device *pdev) |
1000 | { | 997 | { |
1001 | int irq; | 998 | int irq; |
1002 | struct resource *res; | 999 | struct resource *res; |
1003 | struct omap_iommu *obj = platform_get_drvdata(pdev); | 1000 | struct omap_iommu *obj = platform_get_drvdata(pdev); |
1004 | 1001 | ||
1005 | platform_set_drvdata(pdev, NULL); | 1002 | platform_set_drvdata(pdev, NULL); |
1006 | 1003 | ||
1007 | iopgtable_clear_entry_all(obj); | 1004 | iopgtable_clear_entry_all(obj); |
1008 | 1005 | ||
1009 | irq = platform_get_irq(pdev, 0); | 1006 | irq = platform_get_irq(pdev, 0); |
1010 | free_irq(irq, obj); | 1007 | free_irq(irq, obj); |
1011 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1008 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1012 | release_mem_region(res->start, resource_size(res)); | 1009 | release_mem_region(res->start, resource_size(res)); |
1013 | iounmap(obj->regbase); | 1010 | iounmap(obj->regbase); |
1014 | 1011 | ||
1015 | clk_put(obj->clk); | 1012 | clk_put(obj->clk); |
1016 | dev_info(&pdev->dev, "%s removed\n", obj->name); | 1013 | dev_info(&pdev->dev, "%s removed\n", obj->name); |
1017 | kfree(obj); | 1014 | kfree(obj); |
1018 | return 0; | 1015 | return 0; |
1019 | } | 1016 | } |
1020 | 1017 | ||
1021 | static struct platform_driver omap_iommu_driver = { | 1018 | static struct platform_driver omap_iommu_driver = { |
1022 | .probe = omap_iommu_probe, | 1019 | .probe = omap_iommu_probe, |
1023 | .remove = __devexit_p(omap_iommu_remove), | 1020 | .remove = __devexit_p(omap_iommu_remove), |
1024 | .driver = { | 1021 | .driver = { |
1025 | .name = "omap-iommu", | 1022 | .name = "omap-iommu", |
1026 | }, | 1023 | }, |
1027 | }; | 1024 | }; |
1028 | 1025 | ||
1029 | static void iopte_cachep_ctor(void *iopte) | 1026 | static void iopte_cachep_ctor(void *iopte) |
1030 | { | 1027 | { |
1031 | clean_dcache_area(iopte, IOPTE_TABLE_SIZE); | 1028 | clean_dcache_area(iopte, IOPTE_TABLE_SIZE); |
1032 | } | 1029 | } |
1033 | 1030 | ||
1034 | static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, | 1031 | static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, |
1035 | u32 flags) | 1032 | u32 flags) |
1036 | { | 1033 | { |
1037 | memset(e, 0, sizeof(*e)); | 1034 | memset(e, 0, sizeof(*e)); |
1038 | 1035 | ||
1039 | e->da = da; | 1036 | e->da = da; |
1040 | e->pa = pa; | 1037 | e->pa = pa; |
1041 | e->valid = 1; | 1038 | e->valid = 1; |
1042 | /* FIXME: add OMAP1 support */ | 1039 | /* FIXME: add OMAP1 support */ |
1043 | e->pgsz = flags & MMU_CAM_PGSZ_MASK; | 1040 | e->pgsz = flags & MMU_CAM_PGSZ_MASK; |
1044 | e->endian = flags & MMU_RAM_ENDIAN_MASK; | 1041 | e->endian = flags & MMU_RAM_ENDIAN_MASK; |
1045 | e->elsz = flags & MMU_RAM_ELSZ_MASK; | 1042 | e->elsz = flags & MMU_RAM_ELSZ_MASK; |
1046 | e->mixed = flags & MMU_RAM_MIXED_MASK; | 1043 | e->mixed = flags & MMU_RAM_MIXED_MASK; |
1047 | 1044 | ||
1048 | return iopgsz_to_bytes(e->pgsz); | 1045 | return iopgsz_to_bytes(e->pgsz); |
1049 | } | 1046 | } |
1050 | 1047 | ||
1051 | static int omap_iommu_map(struct iommu_domain *domain, unsigned long da, | 1048 | static int omap_iommu_map(struct iommu_domain *domain, unsigned long da, |
1052 | phys_addr_t pa, size_t bytes, int prot) | 1049 | phys_addr_t pa, size_t bytes, int prot) |
1053 | { | 1050 | { |
1054 | struct omap_iommu_domain *omap_domain = domain->priv; | 1051 | struct omap_iommu_domain *omap_domain = domain->priv; |
1055 | struct omap_iommu *oiommu = omap_domain->iommu_dev; | 1052 | struct omap_iommu *oiommu = omap_domain->iommu_dev; |
1056 | struct device *dev = oiommu->dev; | 1053 | struct device *dev = oiommu->dev; |
1057 | struct iotlb_entry e; | 1054 | struct iotlb_entry e; |
1058 | int omap_pgsz; | 1055 | int omap_pgsz; |
1059 | u32 ret, flags; | 1056 | u32 ret, flags; |
1060 | 1057 | ||
1061 | /* we only support mapping a single iommu page for now */ | 1058 | /* we only support mapping a single iommu page for now */ |
1062 | omap_pgsz = bytes_to_iopgsz(bytes); | 1059 | omap_pgsz = bytes_to_iopgsz(bytes); |
1063 | if (omap_pgsz < 0) { | 1060 | if (omap_pgsz < 0) { |
1064 | dev_err(dev, "invalid size to map: %d\n", bytes); | 1061 | dev_err(dev, "invalid size to map: %d\n", bytes); |
1065 | return -EINVAL; | 1062 | return -EINVAL; |
1066 | } | 1063 | } |
1067 | 1064 | ||
1068 | dev_dbg(dev, "mapping da 0x%lx to pa 0x%x size 0x%x\n", da, pa, bytes); | 1065 | dev_dbg(dev, "mapping da 0x%lx to pa 0x%x size 0x%x\n", da, pa, bytes); |
1069 | 1066 | ||
1070 | flags = omap_pgsz | prot; | 1067 | flags = omap_pgsz | prot; |
1071 | 1068 | ||
1072 | iotlb_init_entry(&e, da, pa, flags); | 1069 | iotlb_init_entry(&e, da, pa, flags); |
1073 | 1070 | ||
1074 | ret = omap_iopgtable_store_entry(oiommu, &e); | 1071 | ret = omap_iopgtable_store_entry(oiommu, &e); |
1075 | if (ret) | 1072 | if (ret) |
1076 | dev_err(dev, "omap_iopgtable_store_entry failed: %d\n", ret); | 1073 | dev_err(dev, "omap_iopgtable_store_entry failed: %d\n", ret); |
1077 | 1074 | ||
1078 | return ret; | 1075 | return ret; |
1079 | } | 1076 | } |
1080 | 1077 | ||
1081 | static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da, | 1078 | static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da, |
1082 | size_t size) | 1079 | size_t size) |
1083 | { | 1080 | { |
1084 | struct omap_iommu_domain *omap_domain = domain->priv; | 1081 | struct omap_iommu_domain *omap_domain = domain->priv; |
1085 | struct omap_iommu *oiommu = omap_domain->iommu_dev; | 1082 | struct omap_iommu *oiommu = omap_domain->iommu_dev; |
1086 | struct device *dev = oiommu->dev; | 1083 | struct device *dev = oiommu->dev; |
1087 | 1084 | ||
1088 | dev_dbg(dev, "unmapping da 0x%lx size %u\n", da, size); | 1085 | dev_dbg(dev, "unmapping da 0x%lx size %u\n", da, size); |
1089 | 1086 | ||
1090 | return iopgtable_clear_entry(oiommu, da); | 1087 | return iopgtable_clear_entry(oiommu, da); |
1091 | } | 1088 | } |
1092 | 1089 | ||
1093 | static int | 1090 | static int |
1094 | omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) | 1091 | omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) |
1095 | { | 1092 | { |
1096 | struct omap_iommu_domain *omap_domain = domain->priv; | 1093 | struct omap_iommu_domain *omap_domain = domain->priv; |
1097 | struct omap_iommu *oiommu; | 1094 | struct omap_iommu *oiommu; |
1098 | struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; | 1095 | struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; |
1099 | int ret = 0; | 1096 | int ret = 0; |
1100 | 1097 | ||
1101 | spin_lock(&omap_domain->lock); | 1098 | spin_lock(&omap_domain->lock); |
1102 | 1099 | ||
1103 | /* only a single device is supported per domain for now */ | 1100 | /* only a single device is supported per domain for now */ |
1104 | if (omap_domain->iommu_dev) { | 1101 | if (omap_domain->iommu_dev) { |
1105 | dev_err(dev, "iommu domain is already attached\n"); | 1102 | dev_err(dev, "iommu domain is already attached\n"); |
1106 | ret = -EBUSY; | 1103 | ret = -EBUSY; |
1107 | goto out; | 1104 | goto out; |
1108 | } | 1105 | } |
1109 | 1106 | ||
1110 | /* get a handle to and enable the omap iommu */ | 1107 | /* get a handle to and enable the omap iommu */ |
1111 | oiommu = omap_iommu_attach(arch_data->name, omap_domain->pgtable); | 1108 | oiommu = omap_iommu_attach(arch_data->name, omap_domain->pgtable); |
1112 | if (IS_ERR(oiommu)) { | 1109 | if (IS_ERR(oiommu)) { |
1113 | ret = PTR_ERR(oiommu); | 1110 | ret = PTR_ERR(oiommu); |
1114 | dev_err(dev, "can't get omap iommu: %d\n", ret); | 1111 | dev_err(dev, "can't get omap iommu: %d\n", ret); |
1115 | goto out; | 1112 | goto out; |
1116 | } | 1113 | } |
1117 | 1114 | ||
1118 | omap_domain->iommu_dev = arch_data->iommu_dev = oiommu; | 1115 | omap_domain->iommu_dev = arch_data->iommu_dev = oiommu; |
1119 | omap_domain->dev = dev; | 1116 | omap_domain->dev = dev; |
1120 | oiommu->domain = domain; | 1117 | oiommu->domain = domain; |
1121 | 1118 | ||
1122 | out: | 1119 | out: |
1123 | spin_unlock(&omap_domain->lock); | 1120 | spin_unlock(&omap_domain->lock); |
1124 | return ret; | 1121 | return ret; |
1125 | } | 1122 | } |
1126 | 1123 | ||
1127 | static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain, | 1124 | static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain, |
1128 | struct device *dev) | 1125 | struct device *dev) |
1129 | { | 1126 | { |
1130 | struct omap_iommu *oiommu = dev_to_omap_iommu(dev); | 1127 | struct omap_iommu *oiommu = dev_to_omap_iommu(dev); |
1131 | struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; | 1128 | struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; |
1132 | 1129 | ||
1133 | /* only a single device is supported per domain for now */ | 1130 | /* only a single device is supported per domain for now */ |
1134 | if (omap_domain->iommu_dev != oiommu) { | 1131 | if (omap_domain->iommu_dev != oiommu) { |
1135 | dev_err(dev, "invalid iommu device\n"); | 1132 | dev_err(dev, "invalid iommu device\n"); |
1136 | return; | 1133 | return; |
1137 | } | 1134 | } |
1138 | 1135 | ||
1139 | iopgtable_clear_entry_all(oiommu); | 1136 | iopgtable_clear_entry_all(oiommu); |
1140 | 1137 | ||
1141 | omap_iommu_detach(oiommu); | 1138 | omap_iommu_detach(oiommu); |
1142 | 1139 | ||
1143 | omap_domain->iommu_dev = arch_data->iommu_dev = NULL; | 1140 | omap_domain->iommu_dev = arch_data->iommu_dev = NULL; |
1144 | omap_domain->dev = NULL; | 1141 | omap_domain->dev = NULL; |
1145 | } | 1142 | } |
1146 | 1143 | ||
1147 | static void omap_iommu_detach_dev(struct iommu_domain *domain, | 1144 | static void omap_iommu_detach_dev(struct iommu_domain *domain, |
1148 | struct device *dev) | 1145 | struct device *dev) |
1149 | { | 1146 | { |
1150 | struct omap_iommu_domain *omap_domain = domain->priv; | 1147 | struct omap_iommu_domain *omap_domain = domain->priv; |
1151 | 1148 | ||
1152 | spin_lock(&omap_domain->lock); | 1149 | spin_lock(&omap_domain->lock); |
1153 | _omap_iommu_detach_dev(omap_domain, dev); | 1150 | _omap_iommu_detach_dev(omap_domain, dev); |
1154 | spin_unlock(&omap_domain->lock); | 1151 | spin_unlock(&omap_domain->lock); |
1155 | } | 1152 | } |
1156 | 1153 | ||
1157 | static int omap_iommu_domain_init(struct iommu_domain *domain) | 1154 | static int omap_iommu_domain_init(struct iommu_domain *domain) |
1158 | { | 1155 | { |
1159 | struct omap_iommu_domain *omap_domain; | 1156 | struct omap_iommu_domain *omap_domain; |
1160 | 1157 | ||
1161 | omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL); | 1158 | omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL); |
1162 | if (!omap_domain) { | 1159 | if (!omap_domain) { |
1163 | pr_err("kzalloc failed\n"); | 1160 | pr_err("kzalloc failed\n"); |
1164 | goto out; | 1161 | goto out; |
1165 | } | 1162 | } |
1166 | 1163 | ||
1167 | omap_domain->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_KERNEL); | 1164 | omap_domain->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_KERNEL); |
1168 | if (!omap_domain->pgtable) { | 1165 | if (!omap_domain->pgtable) { |
1169 | pr_err("kzalloc failed\n"); | 1166 | pr_err("kzalloc failed\n"); |
1170 | goto fail_nomem; | 1167 | goto fail_nomem; |
1171 | } | 1168 | } |
1172 | 1169 | ||
1173 | /* | 1170 | /* |
1174 | * should never fail, but please keep this around to ensure | 1171 | * should never fail, but please keep this around to ensure |
1175 | * we keep the hardware happy | 1172 | * we keep the hardware happy |
1176 | */ | 1173 | */ |
1177 | BUG_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE)); | 1174 | BUG_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE)); |
1178 | 1175 | ||
1179 | clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE); | 1176 | clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE); |
1180 | spin_lock_init(&omap_domain->lock); | 1177 | spin_lock_init(&omap_domain->lock); |
1181 | 1178 | ||
1182 | domain->priv = omap_domain; | 1179 | domain->priv = omap_domain; |
1183 | 1180 | ||
1184 | domain->geometry.aperture_start = 0; | 1181 | domain->geometry.aperture_start = 0; |
1185 | domain->geometry.aperture_end = (1ULL << 32) - 1; | 1182 | domain->geometry.aperture_end = (1ULL << 32) - 1; |
1186 | domain->geometry.force_aperture = true; | 1183 | domain->geometry.force_aperture = true; |
1187 | 1184 | ||
1188 | return 0; | 1185 | return 0; |
1189 | 1186 | ||
1190 | fail_nomem: | 1187 | fail_nomem: |
1191 | kfree(omap_domain); | 1188 | kfree(omap_domain); |
1192 | out: | 1189 | out: |
1193 | return -ENOMEM; | 1190 | return -ENOMEM; |
1194 | } | 1191 | } |
1195 | 1192 | ||
1196 | static void omap_iommu_domain_destroy(struct iommu_domain *domain) | 1193 | static void omap_iommu_domain_destroy(struct iommu_domain *domain) |
1197 | { | 1194 | { |
1198 | struct omap_iommu_domain *omap_domain = domain->priv; | 1195 | struct omap_iommu_domain *omap_domain = domain->priv; |
1199 | 1196 | ||
1200 | domain->priv = NULL; | 1197 | domain->priv = NULL; |
1201 | 1198 | ||
1202 | /* | 1199 | /* |
1203 | * An iommu device is still attached | 1200 | * An iommu device is still attached |
1204 | * (currently, only one device can be attached) ? | 1201 | * (currently, only one device can be attached) ? |
1205 | */ | 1202 | */ |
1206 | if (omap_domain->iommu_dev) | 1203 | if (omap_domain->iommu_dev) |
1207 | _omap_iommu_detach_dev(omap_domain, omap_domain->dev); | 1204 | _omap_iommu_detach_dev(omap_domain, omap_domain->dev); |
1208 | 1205 | ||
1209 | kfree(omap_domain->pgtable); | 1206 | kfree(omap_domain->pgtable); |
1210 | kfree(omap_domain); | 1207 | kfree(omap_domain); |
1211 | } | 1208 | } |
1212 | 1209 | ||
1213 | static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain, | 1210 | static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain, |
1214 | unsigned long da) | 1211 | unsigned long da) |
1215 | { | 1212 | { |
1216 | struct omap_iommu_domain *omap_domain = domain->priv; | 1213 | struct omap_iommu_domain *omap_domain = domain->priv; |
1217 | struct omap_iommu *oiommu = omap_domain->iommu_dev; | 1214 | struct omap_iommu *oiommu = omap_domain->iommu_dev; |
1218 | struct device *dev = oiommu->dev; | 1215 | struct device *dev = oiommu->dev; |
1219 | u32 *pgd, *pte; | 1216 | u32 *pgd, *pte; |
1220 | phys_addr_t ret = 0; | 1217 | phys_addr_t ret = 0; |
1221 | 1218 | ||
1222 | iopgtable_lookup_entry(oiommu, da, &pgd, &pte); | 1219 | iopgtable_lookup_entry(oiommu, da, &pgd, &pte); |
1223 | 1220 | ||
1224 | if (pte) { | 1221 | if (pte) { |
1225 | if (iopte_is_small(*pte)) | 1222 | if (iopte_is_small(*pte)) |
1226 | ret = omap_iommu_translate(*pte, da, IOPTE_MASK); | 1223 | ret = omap_iommu_translate(*pte, da, IOPTE_MASK); |
1227 | else if (iopte_is_large(*pte)) | 1224 | else if (iopte_is_large(*pte)) |
1228 | ret = omap_iommu_translate(*pte, da, IOLARGE_MASK); | 1225 | ret = omap_iommu_translate(*pte, da, IOLARGE_MASK); |
1229 | else | 1226 | else |
1230 | dev_err(dev, "bogus pte 0x%x, da 0x%lx", *pte, da); | 1227 | dev_err(dev, "bogus pte 0x%x, da 0x%lx", *pte, da); |
1231 | } else { | 1228 | } else { |
1232 | if (iopgd_is_section(*pgd)) | 1229 | if (iopgd_is_section(*pgd)) |
1233 | ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK); | 1230 | ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK); |
1234 | else if (iopgd_is_super(*pgd)) | 1231 | else if (iopgd_is_super(*pgd)) |
1235 | ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK); | 1232 | ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK); |
1236 | else | 1233 | else |
1237 | dev_err(dev, "bogus pgd 0x%x, da 0x%lx", *pgd, da); | 1234 | dev_err(dev, "bogus pgd 0x%x, da 0x%lx", *pgd, da); |
1238 | } | 1235 | } |
1239 | 1236 | ||
1240 | return ret; | 1237 | return ret; |
1241 | } | 1238 | } |
1242 | 1239 | ||
1243 | static int omap_iommu_domain_has_cap(struct iommu_domain *domain, | 1240 | static int omap_iommu_domain_has_cap(struct iommu_domain *domain, |
1244 | unsigned long cap) | 1241 | unsigned long cap) |
1245 | { | 1242 | { |
1246 | return 0; | 1243 | return 0; |
1247 | } | 1244 | } |
1248 | 1245 | ||
1249 | static struct iommu_ops omap_iommu_ops = { | 1246 | static struct iommu_ops omap_iommu_ops = { |
1250 | .domain_init = omap_iommu_domain_init, | 1247 | .domain_init = omap_iommu_domain_init, |
1251 | .domain_destroy = omap_iommu_domain_destroy, | 1248 | .domain_destroy = omap_iommu_domain_destroy, |
1252 | .attach_dev = omap_iommu_attach_dev, | 1249 | .attach_dev = omap_iommu_attach_dev, |
1253 | .detach_dev = omap_iommu_detach_dev, | 1250 | .detach_dev = omap_iommu_detach_dev, |
1254 | .map = omap_iommu_map, | 1251 | .map = omap_iommu_map, |
1255 | .unmap = omap_iommu_unmap, | 1252 | .unmap = omap_iommu_unmap, |
1256 | .iova_to_phys = omap_iommu_iova_to_phys, | 1253 | .iova_to_phys = omap_iommu_iova_to_phys, |
1257 | .domain_has_cap = omap_iommu_domain_has_cap, | 1254 | .domain_has_cap = omap_iommu_domain_has_cap, |
1258 | .pgsize_bitmap = OMAP_IOMMU_PGSIZES, | 1255 | .pgsize_bitmap = OMAP_IOMMU_PGSIZES, |
1259 | }; | 1256 | }; |
1260 | 1257 | ||
1261 | static int __init omap_iommu_init(void) | 1258 | static int __init omap_iommu_init(void) |
1262 | { | 1259 | { |
1263 | struct kmem_cache *p; | 1260 | struct kmem_cache *p; |
1264 | const unsigned long flags = SLAB_HWCACHE_ALIGN; | 1261 | const unsigned long flags = SLAB_HWCACHE_ALIGN; |
1265 | size_t align = 1 << 10; /* L2 pagetable alignement */ | 1262 | size_t align = 1 << 10; /* L2 pagetable alignement */ |
1266 | 1263 | ||
1267 | p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags, | 1264 | p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags, |
1268 | iopte_cachep_ctor); | 1265 | iopte_cachep_ctor); |
1269 | if (!p) | 1266 | if (!p) |
1270 | return -ENOMEM; | 1267 | return -ENOMEM; |
1271 | iopte_cachep = p; | 1268 | iopte_cachep = p; |
1272 | 1269 | ||
1273 | bus_set_iommu(&platform_bus_type, &omap_iommu_ops); | 1270 | bus_set_iommu(&platform_bus_type, &omap_iommu_ops); |
1274 | 1271 | ||
1275 | return platform_driver_register(&omap_iommu_driver); | 1272 | return platform_driver_register(&omap_iommu_driver); |
1276 | } | 1273 | } |
1277 | /* must be ready before omap3isp is probed */ | 1274 | /* must be ready before omap3isp is probed */ |
1278 | subsys_initcall(omap_iommu_init); | 1275 | subsys_initcall(omap_iommu_init); |
1279 | 1276 | ||
1280 | static void __exit omap_iommu_exit(void) | 1277 | static void __exit omap_iommu_exit(void) |
1281 | { | 1278 | { |
1282 | kmem_cache_destroy(iopte_cachep); | 1279 | kmem_cache_destroy(iopte_cachep); |
1283 | 1280 | ||
1284 | platform_driver_unregister(&omap_iommu_driver); | 1281 | platform_driver_unregister(&omap_iommu_driver); |
1285 | } | 1282 | } |
1286 | module_exit(omap_iommu_exit); | 1283 | module_exit(omap_iommu_exit); |
1287 | 1284 | ||
1288 | MODULE_DESCRIPTION("omap iommu: tlb and pagetable primitives"); | 1285 | MODULE_DESCRIPTION("omap iommu: tlb and pagetable primitives"); |
1289 | MODULE_ALIAS("platform:omap-iommu"); | 1286 | MODULE_ALIAS("platform:omap-iommu"); |
1290 | MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi"); | 1287 | MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi"); |
1291 | MODULE_LICENSE("GPL v2"); | 1288 | MODULE_LICENSE("GPL v2"); |
1292 | 1289 |