Commit 640710a33b54de8d90ae140ef633ed0feba76a75
1 parent
49cf78ef7b
Exists in
smarc-imx_3.14.28_1.0.0_ga
and in
1 other branch
tile: add virt_to_kpte() API and clean up and document behavior
We use virt_to_pte(NULL, va) a lot, which isn't very obvious. I added virt_to_kpte(va) as a more obvious wrapper function, that also validates the va as being a kernel adddress. And, I fixed the semantics of virt_to_pte() so that we handle the pud and pmd the same way, and we now document the fact that we handle the final pte level differently. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Showing 6 changed files with 30 additions and 11 deletions Side-by-side Diff
arch/tile/include/asm/mmu_context.h
arch/tile/include/asm/page.h
arch/tile/kernel/setup.c
... | ... | @@ -1600,7 +1600,7 @@ |
1600 | 1600 | |
1601 | 1601 | /* Update the vmalloc mapping and page home. */ |
1602 | 1602 | unsigned long addr = (unsigned long)ptr + i; |
1603 | - pte_t *ptep = virt_to_pte(NULL, addr); | |
1603 | + pte_t *ptep = virt_to_kpte(addr); | |
1604 | 1604 | pte_t pte = *ptep; |
1605 | 1605 | BUG_ON(pfn != pte_pfn(pte)); |
1606 | 1606 | pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3); |
1607 | 1607 | |
... | ... | @@ -1609,12 +1609,12 @@ |
1609 | 1609 | |
1610 | 1610 | /* Update the lowmem mapping for consistency. */ |
1611 | 1611 | lowmem_va = (unsigned long)pfn_to_kaddr(pfn); |
1612 | - ptep = virt_to_pte(NULL, lowmem_va); | |
1612 | + ptep = virt_to_kpte(lowmem_va); | |
1613 | 1613 | if (pte_huge(*ptep)) { |
1614 | 1614 | printk(KERN_DEBUG "early shatter of huge page" |
1615 | 1615 | " at %#lx\n", lowmem_va); |
1616 | 1616 | shatter_pmd((pmd_t *)ptep); |
1617 | - ptep = virt_to_pte(NULL, lowmem_va); | |
1617 | + ptep = virt_to_kpte(lowmem_va); | |
1618 | 1618 | BUG_ON(pte_huge(*ptep)); |
1619 | 1619 | } |
1620 | 1620 | BUG_ON(pfn != pte_pfn(*ptep)); |
arch/tile/mm/homecache.c
... | ... | @@ -200,7 +200,7 @@ |
200 | 200 | #else |
201 | 201 | va = __fix_to_virt(FIX_HOMECACHE_BEGIN + smp_processor_id()); |
202 | 202 | #endif |
203 | - ptep = virt_to_pte(NULL, (unsigned long)va); | |
203 | + ptep = virt_to_kpte(va); | |
204 | 204 | pte = pfn_pte(page_to_pfn(page), PAGE_KERNEL); |
205 | 205 | __set_pte(ptep, pte_set_home(pte, home)); |
206 | 206 | homecache_finv_page_va((void *)va, home); |
... | ... | @@ -385,7 +385,7 @@ |
385 | 385 | return initial_page_home(); |
386 | 386 | } else { |
387 | 387 | unsigned long kva = (unsigned long)page_address(page); |
388 | - return pte_to_home(*virt_to_pte(NULL, kva)); | |
388 | + return pte_to_home(*virt_to_kpte(kva)); | |
389 | 389 | } |
390 | 390 | } |
391 | 391 | EXPORT_SYMBOL(page_home); |
... | ... | @@ -404,7 +404,7 @@ |
404 | 404 | NULL, 0); |
405 | 405 | |
406 | 406 | for (i = 0; i < pages; ++i, kva += PAGE_SIZE) { |
407 | - pte_t *ptep = virt_to_pte(NULL, kva); | |
407 | + pte_t *ptep = virt_to_kpte(kva); | |
408 | 408 | pte_t pteval = *ptep; |
409 | 409 | BUG_ON(!pte_present(pteval) || pte_huge(pteval)); |
410 | 410 | __set_pte(ptep, pte_set_home(pteval, home)); |
arch/tile/mm/init.c
... | ... | @@ -951,7 +951,7 @@ |
951 | 951 | BUG_ON((addr & (PAGE_SIZE-1)) != 0); |
952 | 952 | for (; addr <= (unsigned long)__w1data_end - 1; addr += PAGE_SIZE) { |
953 | 953 | unsigned long pfn = kaddr_to_pfn((void *)addr); |
954 | - pte_t *ptep = virt_to_pte(NULL, addr); | |
954 | + pte_t *ptep = virt_to_kpte(addr); | |
955 | 955 | BUG_ON(pte_huge(*ptep)); /* not relevant for kdata_huge */ |
956 | 956 | set_pte_at(&init_mm, addr, ptep, pfn_pte(pfn, PAGE_KERNEL_RO)); |
957 | 957 | } |
... | ... | @@ -997,7 +997,7 @@ |
997 | 997 | */ |
998 | 998 | int pfn = kaddr_to_pfn((void *)addr); |
999 | 999 | struct page *page = pfn_to_page(pfn); |
1000 | - pte_t *ptep = virt_to_pte(NULL, addr); | |
1000 | + pte_t *ptep = virt_to_kpte(addr); | |
1001 | 1001 | if (!initfree) { |
1002 | 1002 | /* |
1003 | 1003 | * If debugging page accesses then do not free |
arch/tile/mm/pgtable.c
... | ... | @@ -325,6 +325,17 @@ |
325 | 325 | |
326 | 326 | #endif |
327 | 327 | |
328 | +/* | |
329 | + * Return a pointer to the PTE that corresponds to the given | |
330 | + * address in the given page table. A NULL page table just uses | |
331 | + * the standard kernel page table; the preferred API in this case | |
332 | + * is virt_to_kpte(). | |
333 | + * | |
334 | + * The returned pointer can point to a huge page in other levels | |
335 | + * of the page table than the bottom, if the huge page is present | |
336 | + * in the page table. For bottom-level PTEs, the returned pointer | |
337 | + * can point to a PTE that is either present or not. | |
338 | + */ | |
328 | 339 | pte_t *virt_to_pte(struct mm_struct* mm, unsigned long addr) |
329 | 340 | { |
330 | 341 | pgd_t *pgd; |
331 | 342 | |
332 | 343 | |
... | ... | @@ -341,13 +352,20 @@ |
341 | 352 | if (pud_huge_page(*pud)) |
342 | 353 | return (pte_t *)pud; |
343 | 354 | pmd = pmd_offset(pud, addr); |
344 | - if (pmd_huge_page(*pmd)) | |
345 | - return (pte_t *)pmd; | |
346 | 355 | if (!pmd_present(*pmd)) |
347 | 356 | return NULL; |
357 | + if (pmd_huge_page(*pmd)) | |
358 | + return (pte_t *)pmd; | |
348 | 359 | return pte_offset_kernel(pmd, addr); |
349 | 360 | } |
350 | 361 | EXPORT_SYMBOL(virt_to_pte); |
362 | + | |
363 | +pte_t *virt_to_kpte(unsigned long kaddr) | |
364 | +{ | |
365 | + BUG_ON(kaddr < PAGE_OFFSET); | |
366 | + return virt_to_pte(NULL, kaddr); | |
367 | +} | |
368 | +EXPORT_SYMBOL(virt_to_kpte); | |
351 | 369 | |
352 | 370 | pgprot_t set_remote_cache_cpu(pgprot_t prot, int cpu) |
353 | 371 | { |