Blame view
arch/arm/mm/ioremap.c
10.1 KB
1da177e4c Linux-2.6.12-rc2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 |
/* * linux/arch/arm/mm/ioremap.c * * Re-map IO memory to kernel address space so that we can access it. * * (C) Copyright 1995 1996 Linus Torvalds * * Hacked for ARM by Phil Blundell <philb@gnu.org> * Hacked to allow all architectures to build, and various cleanups * by Russell King * * This allows a driver to remap an arbitrary region of bus memory into * virtual space. One should *only* use readl, writel, memcpy_toio and * so on with such remapped areas. * * Because the ARM only has a 32-bit address space we can't address the * whole of the (physical) PCI space at once. PCI huge-mode addressing * allows us to circumvent this restriction by splitting PCI space into * two 2GB chunks and mapping only one at a time into processor memory. * We use MMU protection domains to trap any attempt to access the bank * that is not currently mapped. (This isn't fully implemented yet.) */ #include <linux/module.h> #include <linux/errno.h> #include <linux/mm.h> #include <linux/vmalloc.h> |
fced80c73 [ARM] Convert asm... |
27 |
#include <linux/io.h> |
158e8bfe8 ARM: 7432/1: use ... |
28 |
#include <linux/sizes.h> |
1da177e4c Linux-2.6.12-rc2 |
29 |
|
15d07dc9c ARM: move CP15 de... |
30 |
#include <asm/cp15.h> |
0ba8b9b27 [ARM] cputype: se... |
31 |
#include <asm/cputype.h> |
1da177e4c Linux-2.6.12-rc2 |
32 |
#include <asm/cacheflush.h> |
ff0daca52 [ARM] Add section... |
33 34 |
#include <asm/mmu_context.h> #include <asm/pgalloc.h> |
1da177e4c Linux-2.6.12-rc2 |
35 |
#include <asm/tlbflush.h> |
9f97da78b Disintegrate asm/... |
36 |
#include <asm/system_info.h> |
ff0daca52 [ARM] Add section... |
37 |
|
b29e9f5e6 [ARM] mm 5: Use m... |
38 39 |
#include <asm/mach/map.h> #include "mm.h" |
69d3a84a6 omap iommu: simpl... |
40 41 42 |
int ioremap_page(unsigned long virt, unsigned long phys, const struct mem_type *mtype) { |
d74619636 ARM: use generic ... |
43 44 |
return ioremap_page_range(virt, virt + PAGE_SIZE, phys, __pgprot(mtype->prot_pte)); |
69d3a84a6 omap iommu: simpl... |
45 46 |
} EXPORT_SYMBOL(ioremap_page); |
ff0daca52 [ARM] Add section... |
47 48 49 50 51 52 53 54 55 56 57 58 59 60 |
void __check_kvm_seq(struct mm_struct *mm) { unsigned int seq; do { seq = init_mm.context.kvm_seq; memcpy(pgd_offset(mm, VMALLOC_START), pgd_offset_k(VMALLOC_START), sizeof(pgd_t) * (pgd_index(VMALLOC_END) - pgd_index(VMALLOC_START))); mm->context.kvm_seq = seq; } while (seq != init_mm.context.kvm_seq); } |
da0287798 ARM: LPAE: Page t... |
61 |
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) |
ff0daca52 [ARM] Add section... |
62 63 64 65 66 67 68 |
/* * Section support is unsafe on SMP - If you iounmap and ioremap a region, * the other CPUs will not see this change until their next context switch. * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs * which requires the new ioremap'd region to be referenced, the CPU will * reference the _old_ region. * |
31aa8fd6f ARM: Add caller i... |
69 70 |
* Note that get_vm_area_caller() allocates a guard 4K page, so we need to * mask the size back to 1MB aligned or we will overflow in the loop below. |
ff0daca52 [ARM] Add section... |
71 72 73 |
*/ static void unmap_area_sections(unsigned long virt, unsigned long size) { |
24f11ec00 [ARM] fix section... |
74 |
unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1)); |
ff0daca52 [ARM] Add section... |
75 |
pgd_t *pgd; |
03a6b8274 ARM: pgtable: Fix... |
76 77 |
pud_t *pud; pmd_t *pmdp; |
ff0daca52 [ARM] Add section... |
78 79 80 |
flush_cache_vunmap(addr, end); pgd = pgd_offset_k(addr); |
03a6b8274 ARM: pgtable: Fix... |
81 82 |
pud = pud_offset(pgd, addr); pmdp = pmd_offset(pud, addr); |
ff0daca52 [ARM] Add section... |
83 |
do { |
03a6b8274 ARM: pgtable: Fix... |
84 |
pmd_t pmd = *pmdp; |
ff0daca52 [ARM] Add section... |
85 |
|
ff0daca52 [ARM] Add section... |
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 |
if (!pmd_none(pmd)) { /* * Clear the PMD from the page table, and * increment the kvm sequence so others * notice this change. * * Note: this is still racy on SMP machines. */ pmd_clear(pmdp); init_mm.context.kvm_seq++; /* * Free the page table, if there was one. */ if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE) |
5e5419734 add mm argument t... |
101 |
pte_free_kernel(&init_mm, pmd_page_vaddr(pmd)); |
ff0daca52 [ARM] Add section... |
102 |
} |
03a6b8274 ARM: pgtable: Fix... |
103 104 |
addr += PMD_SIZE; pmdp += 2; |
ff0daca52 [ARM] Add section... |
105 106 107 108 109 110 111 112 113 114 115 116 117 118 |
} while (addr < end); /* * Ensure that the active_mm is up to date - we want to * catch any use-after-iounmap cases. */ if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq) __check_kvm_seq(current->active_mm); flush_tlb_kernel_range(virt, end); } static int remap_area_sections(unsigned long virt, unsigned long pfn, |
b29e9f5e6 [ARM] mm 5: Use m... |
119 |
size_t size, const struct mem_type *type) |
ff0daca52 [ARM] Add section... |
120 |
{ |
b29e9f5e6 [ARM] mm 5: Use m... |
121 |
unsigned long addr = virt, end = virt + size; |
ff0daca52 [ARM] Add section... |
122 |
pgd_t *pgd; |
03a6b8274 ARM: pgtable: Fix... |
123 124 |
pud_t *pud; pmd_t *pmd; |
ff0daca52 [ARM] Add section... |
125 126 127 128 129 130 |
/* * Remove and free any PTE-based mapping, and * sync the current kernel mapping. */ unmap_area_sections(virt, size); |
ff0daca52 [ARM] Add section... |
131 |
pgd = pgd_offset_k(addr); |
03a6b8274 ARM: pgtable: Fix... |
132 133 |
pud = pud_offset(pgd, addr); pmd = pmd_offset(pud, addr); |
ff0daca52 [ARM] Add section... |
134 |
do { |
b29e9f5e6 [ARM] mm 5: Use m... |
135 |
pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); |
ff0daca52 [ARM] Add section... |
136 |
pfn += SZ_1M >> PAGE_SHIFT; |
b29e9f5e6 [ARM] mm 5: Use m... |
137 |
pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); |
ff0daca52 [ARM] Add section... |
138 139 |
pfn += SZ_1M >> PAGE_SHIFT; flush_pmd_entry(pmd); |
03a6b8274 ARM: pgtable: Fix... |
140 141 |
addr += PMD_SIZE; pmd += 2; |
ff0daca52 [ARM] Add section... |
142 143 144 145 |
} while (addr < end); return 0; } |
a069c896d [ARM] 3705/1: add... |
146 147 148 |
static int remap_area_supersections(unsigned long virt, unsigned long pfn, |
b29e9f5e6 [ARM] mm 5: Use m... |
149 |
size_t size, const struct mem_type *type) |
a069c896d [ARM] 3705/1: add... |
150 |
{ |
b29e9f5e6 [ARM] mm 5: Use m... |
151 |
unsigned long addr = virt, end = virt + size; |
a069c896d [ARM] 3705/1: add... |
152 |
pgd_t *pgd; |
03a6b8274 ARM: pgtable: Fix... |
153 154 |
pud_t *pud; pmd_t *pmd; |
a069c896d [ARM] 3705/1: add... |
155 156 157 158 159 160 |
/* * Remove and free any PTE-based mapping, and * sync the current kernel mapping. */ unmap_area_sections(virt, size); |
a069c896d [ARM] 3705/1: add... |
161 |
pgd = pgd_offset_k(virt); |
03a6b8274 ARM: pgtable: Fix... |
162 163 |
pud = pud_offset(pgd, addr); pmd = pmd_offset(pud, addr); |
a069c896d [ARM] 3705/1: add... |
164 165 |
do { unsigned long super_pmd_val, i; |
b29e9f5e6 [ARM] mm 5: Use m... |
166 167 |
super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect | PMD_SECT_SUPER; |
a069c896d [ARM] 3705/1: add... |
168 169 170 |
super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20; for (i = 0; i < 8; i++) { |
a069c896d [ARM] 3705/1: add... |
171 172 173 |
pmd[0] = __pmd(super_pmd_val); pmd[1] = __pmd(super_pmd_val); flush_pmd_entry(pmd); |
03a6b8274 ARM: pgtable: Fix... |
174 175 |
addr += PMD_SIZE; pmd += 2; |
a069c896d [ARM] 3705/1: add... |
176 177 178 179 180 181 182 |
} pfn += SUPERSECTION_SIZE >> PAGE_SHIFT; } while (addr < end); return 0; } |
ff0daca52 [ARM] Add section... |
183 |
#endif |
31aa8fd6f ARM: Add caller i... |
184 185 |
void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, unsigned long offset, size_t size, unsigned int mtype, void *caller) |
9d4ae7276 [ARM] 3070/2: Add... |
186 |
{ |
b29e9f5e6 [ARM] mm 5: Use m... |
187 |
const struct mem_type *type; |
ff0daca52 [ARM] Add section... |
188 |
int err; |
9d4ae7276 [ARM] 3070/2: Add... |
189 190 |
unsigned long addr; struct vm_struct * area; |
a069c896d [ARM] 3705/1: add... |
191 |
|
da0287798 ARM: LPAE: Page t... |
192 |
#ifndef CONFIG_ARM_LPAE |
a069c896d [ARM] 3705/1: add... |
193 194 195 196 197 |
/* * High mappings must be supersection aligned */ if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) return NULL; |
da0287798 ARM: LPAE: Page t... |
198 |
#endif |
9d4ae7276 [ARM] 3070/2: Add... |
199 |
|
3603ab2b6 [ARM] mm 10: allo... |
200 201 202 |
type = get_mem_type(mtype); if (!type) return NULL; |
b29e9f5e6 [ARM] mm 5: Use m... |
203 |
|
6d78b5f9c [ARM] Fix boundin... |
204 205 206 207 |
/* * Page align the mapping size, taking account of any offset. */ size = PAGE_ALIGN(offset + size); |
c924aff85 [ARM] Fix BUG()s ... |
208 |
|
576d2f252 ARM: add generic ... |
209 210 211 212 213 214 215 216 217 218 219 220 |
/* * Try to reuse one of the static mapping whenever possible. */ read_lock(&vmlist_lock); for (area = vmlist; area; area = area->next) { if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) break; if (!(area->flags & VM_ARM_STATIC_MAPPING)) continue; if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype)) continue; if (__phys_to_pfn(area->phys_addr) > pfn || |
97f104098 Revert "ARM: 7304... |
221 |
__pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1) |
576d2f252 ARM: add generic ... |
222 223 224 225 226 227 228 229 230 231 232 233 234 235 |
continue; /* we can drop the lock here as we know *area is static */ read_unlock(&vmlist_lock); addr = (unsigned long)area->addr; addr += __pfn_to_phys(pfn) - area->phys_addr; return (void __iomem *) (offset + addr); } read_unlock(&vmlist_lock); /* * Don't allow RAM to be mapped - this causes problems with ARMv6+ */ if (WARN_ON(pfn_valid(pfn))) return NULL; |
31aa8fd6f ARM: Add caller i... |
236 |
area = get_vm_area_caller(size, VM_IOREMAP, caller); |
9d4ae7276 [ARM] 3070/2: Add... |
237 238 239 |
if (!area) return NULL; addr = (unsigned long)area->addr; |
ff0daca52 [ARM] Add section... |
240 |
|
da0287798 ARM: LPAE: Page t... |
241 |
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) |
412489af7 [ARM] 4112/1: Onl... |
242 243 |
if (DOMAIN_IO == 0 && (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) || |
4a56c1e41 [ARM] mm 3: separ... |
244 |
cpu_is_xsc3()) && pfn >= 0x100000 && |
a069c896d [ARM] 3705/1: add... |
245 246 |
!((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) { area->flags |= VM_ARM_SECTION_MAPPING; |
b29e9f5e6 [ARM] mm 5: Use m... |
247 |
err = remap_area_supersections(addr, pfn, size, type); |
a069c896d [ARM] 3705/1: add... |
248 |
} else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) { |
ff0daca52 [ARM] Add section... |
249 |
area->flags |= VM_ARM_SECTION_MAPPING; |
b29e9f5e6 [ARM] mm 5: Use m... |
250 |
err = remap_area_sections(addr, pfn, size, type); |
ff0daca52 [ARM] Add section... |
251 252 |
} else #endif |
d74619636 ARM: use generic ... |
253 254 |
err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn), __pgprot(type->prot_pte)); |
ff0daca52 [ARM] Add section... |
255 256 |
if (err) { |
478922c2b [ARM] 3526/1: ior... |
257 |
vunmap((void *)addr); |
9d4ae7276 [ARM] 3070/2: Add... |
258 259 |
return NULL; } |
ff0daca52 [ARM] Add section... |
260 261 262 |
flush_cache_vmap(addr, addr + size); return (void __iomem *) (offset + addr); |
9d4ae7276 [ARM] 3070/2: Add... |
263 |
} |
9d4ae7276 [ARM] 3070/2: Add... |
264 |
|
31aa8fd6f ARM: Add caller i... |
265 266 |
void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size, unsigned int mtype, void *caller) |
1da177e4c Linux-2.6.12-rc2 |
267 |
{ |
9d4ae7276 [ARM] 3070/2: Add... |
268 269 270 |
unsigned long last_addr; unsigned long offset = phys_addr & ~PAGE_MASK; unsigned long pfn = __phys_to_pfn(phys_addr); |
1da177e4c Linux-2.6.12-rc2 |
271 |
|
9d4ae7276 [ARM] 3070/2: Add... |
272 273 274 |
/* * Don't allow wraparound or zero size */ |
1da177e4c Linux-2.6.12-rc2 |
275 276 277 |
last_addr = phys_addr + size - 1; if (!size || last_addr < phys_addr) return NULL; |
31aa8fd6f ARM: Add caller i... |
278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 |
return __arm_ioremap_pfn_caller(pfn, offset, size, mtype, caller); } /* * Remap an arbitrary physical address space into the kernel virtual * address space. Needed when the kernel wants to access high addresses * directly. * * NOTE! We need to allow non-page-aligned mappings too: we will obviously * have to convert them into an offset in a page-aligned mapping, but the * caller shouldn't need to know that small detail. */ void __iomem * __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, unsigned int mtype) { return __arm_ioremap_pfn_caller(pfn, offset, size, mtype, __builtin_return_address(0)); } EXPORT_SYMBOL(__arm_ioremap_pfn); |
4fe7ef3a0 ARM: provide runt... |
299 300 301 |
void __iomem * (*arch_ioremap_caller)(unsigned long, size_t, unsigned int, void *) = __arm_ioremap_caller; |
31aa8fd6f ARM: Add caller i... |
302 303 304 |
void __iomem * __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) { |
4fe7ef3a0 ARM: provide runt... |
305 306 |
return arch_ioremap_caller(phys_addr, size, mtype, __builtin_return_address(0)); |
1da177e4c Linux-2.6.12-rc2 |
307 |
} |
3603ab2b6 [ARM] mm 10: allo... |
308 |
EXPORT_SYMBOL(__arm_ioremap); |
1da177e4c Linux-2.6.12-rc2 |
309 |
|
6c5482d53 ARM: 7129/1: Add ... |
310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 |
/* * Remap an arbitrary physical address space into the kernel virtual * address space as memory. Needed when the kernel wants to execute * code in external memory. This is needed for reprogramming source * clocks that would affect normal memory for example. Please see * CONFIG_GENERIC_ALLOCATOR for allocating external memory. */ void __iomem * __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached) { unsigned int mtype; if (cached) mtype = MT_MEMORY; else mtype = MT_MEMORY_NONCACHED; return __arm_ioremap_caller(phys_addr, size, mtype, __builtin_return_address(0)); } |
09d9bae06 [ARM] sparse: fix... |
330 |
void __iounmap(volatile void __iomem *io_addr) |
1da177e4c Linux-2.6.12-rc2 |
331 |
{ |
09d9bae06 [ARM] sparse: fix... |
332 |
void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); |
6ee723a65 ARM: simplify __i... |
333 |
struct vm_struct *vm; |
ff0daca52 [ARM] Add section... |
334 |
|
6ee723a65 ARM: simplify __i... |
335 336 |
read_lock(&vmlist_lock); for (vm = vmlist; vm; vm = vm->next) { |
576d2f252 ARM: add generic ... |
337 |
if (vm->addr > addr) |
ff0daca52 [ARM] Add section... |
338 |
break; |
576d2f252 ARM: add generic ... |
339 340 341 342 343 344 345 |
if (!(vm->flags & VM_IOREMAP)) continue; /* If this is a static mapping we must leave it alone */ if ((vm->flags & VM_ARM_STATIC_MAPPING) && (vm->addr <= addr) && (vm->addr + vm->size > addr)) { read_unlock(&vmlist_lock); return; |
ff0daca52 [ARM] Add section... |
346 |
} |
6ae25a5b9 Merge branch 'for... |
347 |
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) |
576d2f252 ARM: add generic ... |
348 349 350 351 352 353 354 355 356 357 358 |
/* * If this is a section based mapping we need to handle it * specially as the VM subsystem does not know how to handle * such a beast. */ if ((vm->addr == addr) && (vm->flags & VM_ARM_SECTION_MAPPING)) { unmap_area_sections((unsigned long)vm->addr, vm->size); break; } #endif |
ff0daca52 [ARM] Add section... |
359 |
} |
6ee723a65 ARM: simplify __i... |
360 |
read_unlock(&vmlist_lock); |
ff0daca52 [ARM] Add section... |
361 |
|
24f11ec00 [ARM] fix section... |
362 |
vunmap(addr); |
1da177e4c Linux-2.6.12-rc2 |
363 |
} |
4fe7ef3a0 ARM: provide runt... |
364 365 366 367 368 369 370 371 |
void (*arch_iounmap)(volatile void __iomem *) = __iounmap; void __arm_iounmap(volatile void __iomem *io_addr) { arch_iounmap(io_addr); } EXPORT_SYMBOL(__arm_iounmap); |