Commit 9a0b3869bbf7cc66ee668515d4852c729158c0ca
Committed by
Linus Torvalds
1 parent
f718404aa9
Exists in
master
and in
4 other branches
[PATCH] bogus #if (arch/um/kernel/mem.c)
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Showing 1 changed file with 1 additions and 1 deletions Inline Diff
arch/um/kernel/mem.c
| 1 | /* | 1 | /* |
| 2 | * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com) | 2 | * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com) |
| 3 | * Licensed under the GPL | 3 | * Licensed under the GPL |
| 4 | */ | 4 | */ |
| 5 | 5 | ||
| 6 | #include "linux/stddef.h" | 6 | #include "linux/stddef.h" |
| 7 | #include "linux/kernel.h" | 7 | #include "linux/kernel.h" |
| 8 | #include "linux/mm.h" | 8 | #include "linux/mm.h" |
| 9 | #include "linux/bootmem.h" | 9 | #include "linux/bootmem.h" |
| 10 | #include "linux/swap.h" | 10 | #include "linux/swap.h" |
| 11 | #include "linux/highmem.h" | 11 | #include "linux/highmem.h" |
| 12 | #include "linux/gfp.h" | 12 | #include "linux/gfp.h" |
| 13 | #include "asm/page.h" | 13 | #include "asm/page.h" |
| 14 | #include "asm/fixmap.h" | 14 | #include "asm/fixmap.h" |
| 15 | #include "asm/pgalloc.h" | 15 | #include "asm/pgalloc.h" |
| 16 | #include "user_util.h" | 16 | #include "user_util.h" |
| 17 | #include "kern_util.h" | 17 | #include "kern_util.h" |
| 18 | #include "kern.h" | 18 | #include "kern.h" |
| 19 | #include "mem_user.h" | 19 | #include "mem_user.h" |
| 20 | #include "uml_uaccess.h" | 20 | #include "uml_uaccess.h" |
| 21 | #include "os.h" | 21 | #include "os.h" |
| 22 | 22 | ||
| 23 | extern char __binary_start; | 23 | extern char __binary_start; |
| 24 | 24 | ||
| 25 | /* Changed during early boot */ | 25 | /* Changed during early boot */ |
| 26 | unsigned long *empty_zero_page = NULL; | 26 | unsigned long *empty_zero_page = NULL; |
| 27 | unsigned long *empty_bad_page = NULL; | 27 | unsigned long *empty_bad_page = NULL; |
| 28 | pgd_t swapper_pg_dir[PTRS_PER_PGD]; | 28 | pgd_t swapper_pg_dir[PTRS_PER_PGD]; |
| 29 | unsigned long highmem; | 29 | unsigned long highmem; |
| 30 | int kmalloc_ok = 0; | 30 | int kmalloc_ok = 0; |
| 31 | 31 | ||
| 32 | static unsigned long brk_end; | 32 | static unsigned long brk_end; |
| 33 | 33 | ||
| 34 | void unmap_physmem(void) | 34 | void unmap_physmem(void) |
| 35 | { | 35 | { |
| 36 | os_unmap_memory((void *) brk_end, uml_reserved - brk_end); | 36 | os_unmap_memory((void *) brk_end, uml_reserved - brk_end); |
| 37 | } | 37 | } |
| 38 | 38 | ||
| 39 | static void map_cb(void *unused) | 39 | static void map_cb(void *unused) |
| 40 | { | 40 | { |
| 41 | map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0); | 41 | map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0); |
| 42 | } | 42 | } |
| 43 | 43 | ||
| 44 | #ifdef CONFIG_HIGHMEM | 44 | #ifdef CONFIG_HIGHMEM |
| 45 | static void setup_highmem(unsigned long highmem_start, | 45 | static void setup_highmem(unsigned long highmem_start, |
| 46 | unsigned long highmem_len) | 46 | unsigned long highmem_len) |
| 47 | { | 47 | { |
| 48 | struct page *page; | 48 | struct page *page; |
| 49 | unsigned long highmem_pfn; | 49 | unsigned long highmem_pfn; |
| 50 | int i; | 50 | int i; |
| 51 | 51 | ||
| 52 | highmem_pfn = __pa(highmem_start) >> PAGE_SHIFT; | 52 | highmem_pfn = __pa(highmem_start) >> PAGE_SHIFT; |
| 53 | for(i = 0; i < highmem_len >> PAGE_SHIFT; i++){ | 53 | for(i = 0; i < highmem_len >> PAGE_SHIFT; i++){ |
| 54 | page = &mem_map[highmem_pfn + i]; | 54 | page = &mem_map[highmem_pfn + i]; |
| 55 | ClearPageReserved(page); | 55 | ClearPageReserved(page); |
| 56 | set_page_count(page, 1); | 56 | set_page_count(page, 1); |
| 57 | __free_page(page); | 57 | __free_page(page); |
| 58 | } | 58 | } |
| 59 | } | 59 | } |
| 60 | #endif | 60 | #endif |
| 61 | 61 | ||
| 62 | void mem_init(void) | 62 | void mem_init(void) |
| 63 | { | 63 | { |
| 64 | unsigned long start; | 64 | unsigned long start; |
| 65 | 65 | ||
| 66 | max_low_pfn = (high_physmem - uml_physmem) >> PAGE_SHIFT; | 66 | max_low_pfn = (high_physmem - uml_physmem) >> PAGE_SHIFT; |
| 67 | 67 | ||
| 68 | /* clear the zero-page */ | 68 | /* clear the zero-page */ |
| 69 | memset((void *) empty_zero_page, 0, PAGE_SIZE); | 69 | memset((void *) empty_zero_page, 0, PAGE_SIZE); |
| 70 | 70 | ||
| 71 | /* Map in the area just after the brk now that kmalloc is about | 71 | /* Map in the area just after the brk now that kmalloc is about |
| 72 | * to be turned on. | 72 | * to be turned on. |
| 73 | */ | 73 | */ |
| 74 | brk_end = (unsigned long) UML_ROUND_UP(sbrk(0)); | 74 | brk_end = (unsigned long) UML_ROUND_UP(sbrk(0)); |
| 75 | map_cb(NULL); | 75 | map_cb(NULL); |
| 76 | initial_thread_cb(map_cb, NULL); | 76 | initial_thread_cb(map_cb, NULL); |
| 77 | free_bootmem(__pa(brk_end), uml_reserved - brk_end); | 77 | free_bootmem(__pa(brk_end), uml_reserved - brk_end); |
| 78 | uml_reserved = brk_end; | 78 | uml_reserved = brk_end; |
| 79 | 79 | ||
| 80 | /* Fill in any hole at the start of the binary */ | 80 | /* Fill in any hole at the start of the binary */ |
| 81 | start = (unsigned long) &__binary_start & PAGE_MASK; | 81 | start = (unsigned long) &__binary_start & PAGE_MASK; |
| 82 | if(uml_physmem != start){ | 82 | if(uml_physmem != start){ |
| 83 | map_memory(uml_physmem, __pa(uml_physmem), start - uml_physmem, | 83 | map_memory(uml_physmem, __pa(uml_physmem), start - uml_physmem, |
| 84 | 1, 1, 0); | 84 | 1, 1, 0); |
| 85 | } | 85 | } |
| 86 | 86 | ||
| 87 | /* this will put all low memory onto the freelists */ | 87 | /* this will put all low memory onto the freelists */ |
| 88 | totalram_pages = free_all_bootmem(); | 88 | totalram_pages = free_all_bootmem(); |
| 89 | totalhigh_pages = highmem >> PAGE_SHIFT; | 89 | totalhigh_pages = highmem >> PAGE_SHIFT; |
| 90 | totalram_pages += totalhigh_pages; | 90 | totalram_pages += totalhigh_pages; |
| 91 | num_physpages = totalram_pages; | 91 | num_physpages = totalram_pages; |
| 92 | max_pfn = totalram_pages; | 92 | max_pfn = totalram_pages; |
| 93 | printk(KERN_INFO "Memory: %luk available\n", | 93 | printk(KERN_INFO "Memory: %luk available\n", |
| 94 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10)); | 94 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10)); |
| 95 | kmalloc_ok = 1; | 95 | kmalloc_ok = 1; |
| 96 | 96 | ||
| 97 | #ifdef CONFIG_HIGHMEM | 97 | #ifdef CONFIG_HIGHMEM |
| 98 | setup_highmem(end_iomem, highmem); | 98 | setup_highmem(end_iomem, highmem); |
| 99 | #endif | 99 | #endif |
| 100 | } | 100 | } |
| 101 | 101 | ||
| 102 | /* | 102 | /* |
| 103 | * Create a page table and place a pointer to it in a middle page | 103 | * Create a page table and place a pointer to it in a middle page |
| 104 | * directory entry. | 104 | * directory entry. |
| 105 | */ | 105 | */ |
| 106 | static void __init one_page_table_init(pmd_t *pmd) | 106 | static void __init one_page_table_init(pmd_t *pmd) |
| 107 | { | 107 | { |
| 108 | if (pmd_none(*pmd)) { | 108 | if (pmd_none(*pmd)) { |
| 109 | pte_t *pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); | 109 | pte_t *pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); |
| 110 | set_pmd(pmd, __pmd(_KERNPG_TABLE + | 110 | set_pmd(pmd, __pmd(_KERNPG_TABLE + |
| 111 | (unsigned long) __pa(pte))); | 111 | (unsigned long) __pa(pte))); |
| 112 | if (pte != pte_offset_kernel(pmd, 0)) | 112 | if (pte != pte_offset_kernel(pmd, 0)) |
| 113 | BUG(); | 113 | BUG(); |
| 114 | } | 114 | } |
| 115 | } | 115 | } |
| 116 | 116 | ||
| 117 | static void __init one_md_table_init(pud_t *pud) | 117 | static void __init one_md_table_init(pud_t *pud) |
| 118 | { | 118 | { |
| 119 | #ifdef CONFIG_3_LEVEL_PGTABLES | 119 | #ifdef CONFIG_3_LEVEL_PGTABLES |
| 120 | pmd_t *pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE); | 120 | pmd_t *pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE); |
| 121 | set_pud(pud, __pud(_KERNPG_TABLE + (unsigned long) __pa(pmd_table))); | 121 | set_pud(pud, __pud(_KERNPG_TABLE + (unsigned long) __pa(pmd_table))); |
| 122 | if (pmd_table != pmd_offset(pud, 0)) | 122 | if (pmd_table != pmd_offset(pud, 0)) |
| 123 | BUG(); | 123 | BUG(); |
| 124 | #endif | 124 | #endif |
| 125 | } | 125 | } |
| 126 | 126 | ||
| 127 | static void __init fixrange_init(unsigned long start, unsigned long end, | 127 | static void __init fixrange_init(unsigned long start, unsigned long end, |
| 128 | pgd_t *pgd_base) | 128 | pgd_t *pgd_base) |
| 129 | { | 129 | { |
| 130 | pgd_t *pgd; | 130 | pgd_t *pgd; |
| 131 | pud_t *pud; | 131 | pud_t *pud; |
| 132 | pmd_t *pmd; | 132 | pmd_t *pmd; |
| 133 | int i, j; | 133 | int i, j; |
| 134 | unsigned long vaddr; | 134 | unsigned long vaddr; |
| 135 | 135 | ||
| 136 | vaddr = start; | 136 | vaddr = start; |
| 137 | i = pgd_index(vaddr); | 137 | i = pgd_index(vaddr); |
| 138 | j = pmd_index(vaddr); | 138 | j = pmd_index(vaddr); |
| 139 | pgd = pgd_base + i; | 139 | pgd = pgd_base + i; |
| 140 | 140 | ||
| 141 | for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) { | 141 | for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) { |
| 142 | pud = pud_offset(pgd, vaddr); | 142 | pud = pud_offset(pgd, vaddr); |
| 143 | if (pud_none(*pud)) | 143 | if (pud_none(*pud)) |
| 144 | one_md_table_init(pud); | 144 | one_md_table_init(pud); |
| 145 | pmd = pmd_offset(pud, vaddr); | 145 | pmd = pmd_offset(pud, vaddr); |
| 146 | for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) { | 146 | for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) { |
| 147 | one_page_table_init(pmd); | 147 | one_page_table_init(pmd); |
| 148 | vaddr += PMD_SIZE; | 148 | vaddr += PMD_SIZE; |
| 149 | } | 149 | } |
| 150 | j = 0; | 150 | j = 0; |
| 151 | } | 151 | } |
| 152 | } | 152 | } |
| 153 | 153 | ||
| 154 | #ifdef CONFIG_HIGHMEM | 154 | #ifdef CONFIG_HIGHMEM |
| 155 | pte_t *kmap_pte; | 155 | pte_t *kmap_pte; |
| 156 | pgprot_t kmap_prot; | 156 | pgprot_t kmap_prot; |
| 157 | 157 | ||
| 158 | #define kmap_get_fixmap_pte(vaddr) \ | 158 | #define kmap_get_fixmap_pte(vaddr) \ |
| 159 | pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)),\ | 159 | pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)),\ |
| 160 | (vaddr)), (vaddr)) | 160 | (vaddr)), (vaddr)) |
| 161 | 161 | ||
| 162 | static void __init kmap_init(void) | 162 | static void __init kmap_init(void) |
| 163 | { | 163 | { |
| 164 | unsigned long kmap_vstart; | 164 | unsigned long kmap_vstart; |
| 165 | 165 | ||
| 166 | /* cache the first kmap pte */ | 166 | /* cache the first kmap pte */ |
| 167 | kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); | 167 | kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); |
| 168 | kmap_pte = kmap_get_fixmap_pte(kmap_vstart); | 168 | kmap_pte = kmap_get_fixmap_pte(kmap_vstart); |
| 169 | 169 | ||
| 170 | kmap_prot = PAGE_KERNEL; | 170 | kmap_prot = PAGE_KERNEL; |
| 171 | } | 171 | } |
| 172 | 172 | ||
| 173 | static void init_highmem(void) | 173 | static void init_highmem(void) |
| 174 | { | 174 | { |
| 175 | pgd_t *pgd; | 175 | pgd_t *pgd; |
| 176 | pud_t *pud; | 176 | pud_t *pud; |
| 177 | pmd_t *pmd; | 177 | pmd_t *pmd; |
| 178 | pte_t *pte; | 178 | pte_t *pte; |
| 179 | unsigned long vaddr; | 179 | unsigned long vaddr; |
| 180 | 180 | ||
| 181 | /* | 181 | /* |
| 182 | * Permanent kmaps: | 182 | * Permanent kmaps: |
| 183 | */ | 183 | */ |
| 184 | vaddr = PKMAP_BASE; | 184 | vaddr = PKMAP_BASE; |
| 185 | fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, swapper_pg_dir); | 185 | fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, swapper_pg_dir); |
| 186 | 186 | ||
| 187 | pgd = swapper_pg_dir + pgd_index(vaddr); | 187 | pgd = swapper_pg_dir + pgd_index(vaddr); |
| 188 | pud = pud_offset(pgd, vaddr); | 188 | pud = pud_offset(pgd, vaddr); |
| 189 | pmd = pmd_offset(pud, vaddr); | 189 | pmd = pmd_offset(pud, vaddr); |
| 190 | pte = pte_offset_kernel(pmd, vaddr); | 190 | pte = pte_offset_kernel(pmd, vaddr); |
| 191 | pkmap_page_table = pte; | 191 | pkmap_page_table = pte; |
| 192 | 192 | ||
| 193 | kmap_init(); | 193 | kmap_init(); |
| 194 | } | 194 | } |
| 195 | #endif /* CONFIG_HIGHMEM */ | 195 | #endif /* CONFIG_HIGHMEM */ |
| 196 | 196 | ||
| 197 | static void __init fixaddr_user_init( void) | 197 | static void __init fixaddr_user_init( void) |
| 198 | { | 198 | { |
| 199 | #if CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA | 199 | #ifdef CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA |
| 200 | long size = FIXADDR_USER_END - FIXADDR_USER_START; | 200 | long size = FIXADDR_USER_END - FIXADDR_USER_START; |
| 201 | pgd_t *pgd; | 201 | pgd_t *pgd; |
| 202 | pud_t *pud; | 202 | pud_t *pud; |
| 203 | pmd_t *pmd; | 203 | pmd_t *pmd; |
| 204 | pte_t *pte; | 204 | pte_t *pte; |
| 205 | unsigned long paddr, vaddr = FIXADDR_USER_START; | 205 | unsigned long paddr, vaddr = FIXADDR_USER_START; |
| 206 | 206 | ||
| 207 | if ( ! size ) | 207 | if ( ! size ) |
| 208 | return; | 208 | return; |
| 209 | 209 | ||
| 210 | fixrange_init( FIXADDR_USER_START, FIXADDR_USER_END, swapper_pg_dir); | 210 | fixrange_init( FIXADDR_USER_START, FIXADDR_USER_END, swapper_pg_dir); |
| 211 | paddr = (unsigned long)alloc_bootmem_low_pages( size); | 211 | paddr = (unsigned long)alloc_bootmem_low_pages( size); |
| 212 | memcpy( (void *)paddr, (void *)FIXADDR_USER_START, size); | 212 | memcpy( (void *)paddr, (void *)FIXADDR_USER_START, size); |
| 213 | paddr = __pa(paddr); | 213 | paddr = __pa(paddr); |
| 214 | for ( ; size > 0; size-=PAGE_SIZE, vaddr+=PAGE_SIZE, paddr+=PAGE_SIZE){ | 214 | for ( ; size > 0; size-=PAGE_SIZE, vaddr+=PAGE_SIZE, paddr+=PAGE_SIZE){ |
| 215 | pgd = swapper_pg_dir + pgd_index(vaddr); | 215 | pgd = swapper_pg_dir + pgd_index(vaddr); |
| 216 | pud = pud_offset(pgd, vaddr); | 216 | pud = pud_offset(pgd, vaddr); |
| 217 | pmd = pmd_offset(pud, vaddr); | 217 | pmd = pmd_offset(pud, vaddr); |
| 218 | pte = pte_offset_kernel(pmd, vaddr); | 218 | pte = pte_offset_kernel(pmd, vaddr); |
| 219 | pte_set_val( (*pte), paddr, PAGE_READONLY); | 219 | pte_set_val( (*pte), paddr, PAGE_READONLY); |
| 220 | } | 220 | } |
| 221 | #endif | 221 | #endif |
| 222 | } | 222 | } |
| 223 | 223 | ||
| 224 | void paging_init(void) | 224 | void paging_init(void) |
| 225 | { | 225 | { |
| 226 | unsigned long zones_size[MAX_NR_ZONES], vaddr; | 226 | unsigned long zones_size[MAX_NR_ZONES], vaddr; |
| 227 | int i; | 227 | int i; |
| 228 | 228 | ||
| 229 | empty_zero_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE); | 229 | empty_zero_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE); |
| 230 | empty_bad_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE); | 230 | empty_bad_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE); |
| 231 | for(i=0;i<sizeof(zones_size)/sizeof(zones_size[0]);i++) | 231 | for(i=0;i<sizeof(zones_size)/sizeof(zones_size[0]);i++) |
| 232 | zones_size[i] = 0; | 232 | zones_size[i] = 0; |
| 233 | zones_size[0] = (end_iomem >> PAGE_SHIFT) - (uml_physmem >> PAGE_SHIFT); | 233 | zones_size[0] = (end_iomem >> PAGE_SHIFT) - (uml_physmem >> PAGE_SHIFT); |
| 234 | zones_size[2] = highmem >> PAGE_SHIFT; | 234 | zones_size[2] = highmem >> PAGE_SHIFT; |
| 235 | free_area_init(zones_size); | 235 | free_area_init(zones_size); |
| 236 | 236 | ||
| 237 | /* | 237 | /* |
| 238 | * Fixed mappings, only the page table structure has to be | 238 | * Fixed mappings, only the page table structure has to be |
| 239 | * created - mappings will be set by set_fixmap(): | 239 | * created - mappings will be set by set_fixmap(): |
| 240 | */ | 240 | */ |
| 241 | vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; | 241 | vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; |
| 242 | fixrange_init(vaddr, FIXADDR_TOP, swapper_pg_dir); | 242 | fixrange_init(vaddr, FIXADDR_TOP, swapper_pg_dir); |
| 243 | 243 | ||
| 244 | fixaddr_user_init(); | 244 | fixaddr_user_init(); |
| 245 | 245 | ||
| 246 | #ifdef CONFIG_HIGHMEM | 246 | #ifdef CONFIG_HIGHMEM |
| 247 | init_highmem(); | 247 | init_highmem(); |
| 248 | #endif | 248 | #endif |
| 249 | } | 249 | } |
| 250 | 250 | ||
| 251 | struct page *arch_validate(struct page *page, int mask, int order) | 251 | struct page *arch_validate(struct page *page, int mask, int order) |
| 252 | { | 252 | { |
| 253 | unsigned long addr, zero = 0; | 253 | unsigned long addr, zero = 0; |
| 254 | int i; | 254 | int i; |
| 255 | 255 | ||
| 256 | again: | 256 | again: |
| 257 | if(page == NULL) return(page); | 257 | if(page == NULL) return(page); |
| 258 | if(PageHighMem(page)) return(page); | 258 | if(PageHighMem(page)) return(page); |
| 259 | 259 | ||
| 260 | addr = (unsigned long) page_address(page); | 260 | addr = (unsigned long) page_address(page); |
| 261 | for(i = 0; i < (1 << order); i++){ | 261 | for(i = 0; i < (1 << order); i++){ |
| 262 | current->thread.fault_addr = (void *) addr; | 262 | current->thread.fault_addr = (void *) addr; |
| 263 | if(__do_copy_to_user((void __user *) addr, &zero, | 263 | if(__do_copy_to_user((void __user *) addr, &zero, |
| 264 | sizeof(zero), | 264 | sizeof(zero), |
| 265 | ¤t->thread.fault_addr, | 265 | ¤t->thread.fault_addr, |
| 266 | ¤t->thread.fault_catcher)){ | 266 | ¤t->thread.fault_catcher)){ |
| 267 | if(!(mask & __GFP_WAIT)) return(NULL); | 267 | if(!(mask & __GFP_WAIT)) return(NULL); |
| 268 | else break; | 268 | else break; |
| 269 | } | 269 | } |
| 270 | addr += PAGE_SIZE; | 270 | addr += PAGE_SIZE; |
| 271 | } | 271 | } |
| 272 | 272 | ||
| 273 | if(i == (1 << order)) return(page); | 273 | if(i == (1 << order)) return(page); |
| 274 | page = alloc_pages(mask, order); | 274 | page = alloc_pages(mask, order); |
| 275 | goto again; | 275 | goto again; |
| 276 | } | 276 | } |
| 277 | 277 | ||
| 278 | /* This can't do anything because nothing in the kernel image can be freed | 278 | /* This can't do anything because nothing in the kernel image can be freed |
| 279 | * since it's not in kernel physical memory. | 279 | * since it's not in kernel physical memory. |
| 280 | */ | 280 | */ |
| 281 | 281 | ||
| 282 | void free_initmem(void) | 282 | void free_initmem(void) |
| 283 | { | 283 | { |
| 284 | } | 284 | } |
| 285 | 285 | ||
| 286 | #ifdef CONFIG_BLK_DEV_INITRD | 286 | #ifdef CONFIG_BLK_DEV_INITRD |
| 287 | 287 | ||
| 288 | void free_initrd_mem(unsigned long start, unsigned long end) | 288 | void free_initrd_mem(unsigned long start, unsigned long end) |
| 289 | { | 289 | { |
| 290 | if (start < end) | 290 | if (start < end) |
| 291 | printk ("Freeing initrd memory: %ldk freed\n", | 291 | printk ("Freeing initrd memory: %ldk freed\n", |
| 292 | (end - start) >> 10); | 292 | (end - start) >> 10); |
| 293 | for (; start < end; start += PAGE_SIZE) { | 293 | for (; start < end; start += PAGE_SIZE) { |
| 294 | ClearPageReserved(virt_to_page(start)); | 294 | ClearPageReserved(virt_to_page(start)); |
| 295 | set_page_count(virt_to_page(start), 1); | 295 | set_page_count(virt_to_page(start), 1); |
| 296 | free_page(start); | 296 | free_page(start); |
| 297 | totalram_pages++; | 297 | totalram_pages++; |
| 298 | } | 298 | } |
| 299 | } | 299 | } |
| 300 | 300 | ||
| 301 | #endif | 301 | #endif |
| 302 | 302 | ||
| 303 | void show_mem(void) | 303 | void show_mem(void) |
| 304 | { | 304 | { |
| 305 | int pfn, total = 0, reserved = 0; | 305 | int pfn, total = 0, reserved = 0; |
| 306 | int shared = 0, cached = 0; | 306 | int shared = 0, cached = 0; |
| 307 | int highmem = 0; | 307 | int highmem = 0; |
| 308 | struct page *page; | 308 | struct page *page; |
| 309 | 309 | ||
| 310 | printk("Mem-info:\n"); | 310 | printk("Mem-info:\n"); |
| 311 | show_free_areas(); | 311 | show_free_areas(); |
| 312 | printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); | 312 | printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); |
| 313 | pfn = max_mapnr; | 313 | pfn = max_mapnr; |
| 314 | while(pfn-- > 0) { | 314 | while(pfn-- > 0) { |
| 315 | page = pfn_to_page(pfn); | 315 | page = pfn_to_page(pfn); |
| 316 | total++; | 316 | total++; |
| 317 | if(PageHighMem(page)) | 317 | if(PageHighMem(page)) |
| 318 | highmem++; | 318 | highmem++; |
| 319 | if(PageReserved(page)) | 319 | if(PageReserved(page)) |
| 320 | reserved++; | 320 | reserved++; |
| 321 | else if(PageSwapCache(page)) | 321 | else if(PageSwapCache(page)) |
| 322 | cached++; | 322 | cached++; |
| 323 | else if(page_count(page)) | 323 | else if(page_count(page)) |
| 324 | shared += page_count(page) - 1; | 324 | shared += page_count(page) - 1; |
| 325 | } | 325 | } |
| 326 | printk("%d pages of RAM\n", total); | 326 | printk("%d pages of RAM\n", total); |
| 327 | printk("%d pages of HIGHMEM\n", highmem); | 327 | printk("%d pages of HIGHMEM\n", highmem); |
| 328 | printk("%d reserved pages\n", reserved); | 328 | printk("%d reserved pages\n", reserved); |
| 329 | printk("%d pages shared\n", shared); | 329 | printk("%d pages shared\n", shared); |
| 330 | printk("%d pages swap cached\n", cached); | 330 | printk("%d pages swap cached\n", cached); |
| 331 | } | 331 | } |
| 332 | 332 | ||
| 333 | /* | 333 | /* |
| 334 | * Allocate and free page tables. | 334 | * Allocate and free page tables. |
| 335 | */ | 335 | */ |
| 336 | 336 | ||
| 337 | pgd_t *pgd_alloc(struct mm_struct *mm) | 337 | pgd_t *pgd_alloc(struct mm_struct *mm) |
| 338 | { | 338 | { |
| 339 | pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL); | 339 | pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL); |
| 340 | 340 | ||
| 341 | if (pgd) { | 341 | if (pgd) { |
| 342 | memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); | 342 | memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); |
| 343 | memcpy(pgd + USER_PTRS_PER_PGD, | 343 | memcpy(pgd + USER_PTRS_PER_PGD, |
| 344 | swapper_pg_dir + USER_PTRS_PER_PGD, | 344 | swapper_pg_dir + USER_PTRS_PER_PGD, |
| 345 | (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); | 345 | (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); |
| 346 | } | 346 | } |
| 347 | return pgd; | 347 | return pgd; |
| 348 | } | 348 | } |
| 349 | 349 | ||
| 350 | void pgd_free(pgd_t *pgd) | 350 | void pgd_free(pgd_t *pgd) |
| 351 | { | 351 | { |
| 352 | free_page((unsigned long) pgd); | 352 | free_page((unsigned long) pgd); |
| 353 | } | 353 | } |
| 354 | 354 | ||
| 355 | pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) | 355 | pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) |
| 356 | { | 356 | { |
| 357 | pte_t *pte; | 357 | pte_t *pte; |
| 358 | 358 | ||
| 359 | pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); | 359 | pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); |
| 360 | return pte; | 360 | return pte; |
| 361 | } | 361 | } |
| 362 | 362 | ||
| 363 | struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) | 363 | struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) |
| 364 | { | 364 | { |
| 365 | struct page *pte; | 365 | struct page *pte; |
| 366 | 366 | ||
| 367 | pte = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); | 367 | pte = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); |
| 368 | return pte; | 368 | return pte; |
| 369 | } | 369 | } |
| 370 | 370 | ||
| 371 | /* | 371 | /* |
| 372 | * Overrides for Emacs so that we follow Linus's tabbing style. | 372 | * Overrides for Emacs so that we follow Linus's tabbing style. |
| 373 | * Emacs will notice this stuff at the end of the file and automatically | 373 | * Emacs will notice this stuff at the end of the file and automatically |
| 374 | * adjust the settings for this buffer only. This must remain at the end | 374 | * adjust the settings for this buffer only. This must remain at the end |
| 375 | * of the file. | 375 | * of the file. |
| 376 | * --------------------------------------------------------------------------- | 376 | * --------------------------------------------------------------------------- |
| 377 | * Local variables: | 377 | * Local variables: |
| 378 | * c-file-style: "linux" | 378 | * c-file-style: "linux" |
| 379 | * End: | 379 | * End: |
| 380 | */ | 380 | */ |
| 381 | 381 |