Commit 4c139862b8831261d57de02716b92f82e5fb463b
Committed by
Linus Torvalds
1 parent
d99cf715a0
Exists in
master
and in
4 other branches
[PATCH] xtensa: delete accidental file
This file seems to be an accident. Signed-off-by: Adrian Bunk <bunk@stusta.de> Signed-off-by: Chris Zankel <chris@zankel.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Showing 1 changed file with 0 additions and 135 deletions Inline Diff
include/asm-xtensa/page.h.n
| 1 | /* | File was deleted | |
| 2 | * linux/include/asm-xtensa/page.h | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License version2 as | ||
| 6 | * published by the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * Copyright (C) 2001 - 2005 Tensilica Inc. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #ifndef _XTENSA_PAGE_H | ||
| 12 | #define _XTENSA_PAGE_H | ||
| 13 | |||
| 14 | #ifdef __KERNEL__ | ||
| 15 | |||
| 16 | #include <asm/processor.h> | ||
| 17 | #include <linux/config.h> | ||
| 18 | |||
| 19 | /* | ||
| 20 | * PAGE_SHIFT determines the page size | ||
| 21 | * PAGE_ALIGN(x) aligns the pointer to the (next) page boundary | ||
| 22 | */ | ||
| 23 | #define PAGE_SHIFT XCHAL_MMU_MIN_PTE_PAGE_SIZE | ||
| 24 | #define PAGE_SIZE (1 << PAGE_SHIFT) | ||
| 25 | #define PAGE_MASK (~(PAGE_SIZE-1)) | ||
| 26 | #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE - 1) & PAGE_MASK) | ||
| 27 | |||
| 28 | #define DCACHE_WAY_SIZE (XCHAL_DCACHE_SIZE / XCHAL_DCACHE_WAYS) | ||
| 29 | #define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR | ||
| 30 | |||
| 31 | #ifdef __ASSEMBLY__ | ||
| 32 | |||
| 33 | #define __pgprot(x) (x) | ||
| 34 | |||
| 35 | #else | ||
| 36 | |||
| 37 | |||
| 38 | /* | ||
| 39 | * These are used to make use of C type-checking.. | ||
| 40 | */ | ||
| 41 | typedef struct { unsigned long pte; } pte_t; /* page table entry */ | ||
| 42 | typedef struct { unsigned long pmd; } pmd_t; /* PMD table entry */ | ||
| 43 | typedef struct { unsigned long pgd; } pgd_t; /* PGD table entry */ | ||
| 44 | typedef struct { unsigned long pgprot; } pgprot_t; | ||
| 45 | |||
| 46 | #define pte_val(x) ((x).pte) | ||
| 47 | #define pmd_val(x) ((x).pmd) | ||
| 48 | #define pgd_val(x) ((x).pgd) | ||
| 49 | #define pgprot_val(x) ((x).pgprot) | ||
| 50 | |||
| 51 | #define __pte(x) ((pte_t) { (x) } ) | ||
| 52 | #define __pmd(x) ((pmd_t) { (x) } ) | ||
| 53 | #define __pgd(x) ((pgd_t) { (x) } ) | ||
| 54 | #define __pgprot(x) ((pgprot_t) { (x) } ) | ||
| 55 | |||
| 56 | /* | ||
| 57 | * Pure 2^n version of get_order | ||
| 58 | */ | ||
| 59 | extern __inline__ int get_order(unsigned long size) | ||
| 60 | { | ||
| 61 | int order; | ||
| 62 | #ifndef XCHAL_HAVE_NSU | ||
| 63 | unsigned long x1, x2, x4, x8, x16; | ||
| 64 | |||
| 65 | size = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
| 66 | x1 = size & 0xAAAAAAAA; | ||
| 67 | x2 = size & 0xCCCCCCCC; | ||
| 68 | x4 = size & 0xF0F0F0F0; | ||
| 69 | x8 = size & 0xFF00FF00; | ||
| 70 | x16 = size & 0xFFFF0000; | ||
| 71 | order = x2 ? 2 : 0; | ||
| 72 | order += (x16 != 0) * 16; | ||
| 73 | order += (x8 != 0) * 8; | ||
| 74 | order += (x4 != 0) * 4; | ||
| 75 | order += (x1 != 0); | ||
| 76 | |||
| 77 | return order; | ||
| 78 | #else | ||
| 79 | size = (size - 1) >> PAGE_SHIFT; | ||
| 80 | asm ("nsau %0, %1" : "=r" (order) : "r" (size)); | ||
| 81 | return 32 - order; | ||
| 82 | #endif | ||
| 83 | } | ||
| 84 | |||
| 85 | |||
| 86 | struct page; | ||
| 87 | extern void clear_page(void *page); | ||
| 88 | extern void copy_page(void *to, void *from); | ||
| 89 | |||
| 90 | /* | ||
| 91 | * If we have cache aliasing and writeback caches, we might have to do | ||
| 92 | * some extra work | ||
| 93 | */ | ||
| 94 | |||
| 95 | #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK | ||
| 96 | void clear_user_page(void *addr, unsigned long vaddr, struct page* page); | ||
| 97 | void copy_user_page(void *to, void* from, unsigned long vaddr, struct page* page); | ||
| 98 | #else | ||
| 99 | # define clear_user_page(page,vaddr,pg) clear_page(page) | ||
| 100 | # define copy_user_page(to, from, vaddr, pg) copy_page(to, from) | ||
| 101 | #endif | ||
| 102 | |||
| 103 | |||
| 104 | /* | ||
| 105 | * This handles the memory map. We handle pages at | ||
| 106 | * XCHAL_KSEG_CACHED_VADDR for kernels with 32 bit address space. | ||
| 107 | * These macros are for conversion of kernel address, not user | ||
| 108 | * addresses. | ||
| 109 | */ | ||
| 110 | |||
| 111 | #define __pa(x) ((unsigned long) (x) - PAGE_OFFSET) | ||
| 112 | #define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) | ||
| 113 | #define pfn_valid(pfn) ((unsigned long)pfn < max_mapnr) | ||
| 114 | #ifndef CONFIG_DISCONTIGMEM | ||
| 115 | # define pfn_to_page(pfn) (mem_map + (pfn)) | ||
| 116 | # define page_to_pfn(page) ((unsigned long)((page) - mem_map)) | ||
| 117 | #else | ||
| 118 | # error CONFIG_DISCONTIGMEM not supported | ||
| 119 | #endif | ||
| 120 | |||
| 121 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) | ||
| 122 | #define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT) | ||
| 123 | #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) | ||
| 124 | #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) | ||
| 125 | |||
| 126 | #define WANT_PAGE_VIRTUAL | ||
| 127 | |||
| 128 | |||
| 129 | #endif /* __ASSEMBLY__ */ | ||
| 130 | |||
| 131 | #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ | ||
| 132 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) | ||
| 133 | |||
| 134 | #endif /* __KERNEL__ */ | ||
| 135 | #endif /* _XTENSA_PAGE_H */ | ||
| 136 | 1 | /* |