Blame view
mm/highmem.c
11.7 KB
b24413180 License cleanup: ... |
1 |
// SPDX-License-Identifier: GPL-2.0 |
1da177e4c Linux-2.6.12-rc2 |
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 |
/* * High memory handling common code and variables. * * (C) 1999 Andrea Arcangeli, SuSE GmbH, andrea@suse.de * Gerhard Wichert, Siemens AG, Gerhard.Wichert@pdb.siemens.de * * * Redesigned the x86 32-bit VM architecture to deal with * 64-bit physical space. With current x86 CPUs this * means up to 64 Gigabytes physical RAM. * * Rewrote high memory support to move the page cache into * high memory. Implemented permanent (schedulable) kmaps * based on Linus' idea. * * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> */ #include <linux/mm.h> |
b95f1b31b mm: Map most file... |
21 |
#include <linux/export.h> |
1da177e4c Linux-2.6.12-rc2 |
22 23 24 25 26 27 28 29 |
#include <linux/swap.h> #include <linux/bio.h> #include <linux/pagemap.h> #include <linux/mempool.h> #include <linux/blkdev.h> #include <linux/init.h> #include <linux/hash.h> #include <linux/highmem.h> |
eac790059 mm,kdb,kgdb: Add ... |
30 |
#include <linux/kgdb.h> |
1da177e4c Linux-2.6.12-rc2 |
31 |
#include <asm/tlbflush.h> |
a8e23a291 mm,x86: fix kmap_... |
32 33 34 35 |
#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) DEFINE_PER_CPU(int, __kmap_atomic_idx); #endif |
1da177e4c Linux-2.6.12-rc2 |
36 37 38 39 40 41 42 43 44 |
/* * Virtual_count is not a pure "count". * 0 means that it is not mapped, and has not been mapped * since a TLB flush - it is usable. * 1 means that there are no users, but it has been mapped * since the last TLB flush - so we can't use it. * n means that there are (n-1) current users of it. */ #ifdef CONFIG_HIGHMEM |
260b23674 [PATCH] gfp_t: th... |
45 |
|
15de36a4c mm/highmem: make ... |
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 |
/* * Architecture with aliasing data cache may define the following family of * helper functions in its asm/highmem.h to control cache color of virtual * addresses where physical memory pages are mapped by kmap. */ #ifndef get_pkmap_color /* * Determine color of virtual address where the page should be mapped. */ static inline unsigned int get_pkmap_color(struct page *page) { return 0; } #define get_pkmap_color get_pkmap_color /* * Get next index for mapping inside PKMAP region for page with given color. */ static inline unsigned int get_next_pkmap_nr(unsigned int color) { static unsigned int last_pkmap_nr; last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK; return last_pkmap_nr; } /* * Determine if page index inside PKMAP region (pkmap_nr) of given color * has wrapped around PKMAP region end. When this happens an attempt to * flush all unused PKMAP slots is made. */ static inline int no_more_pkmaps(unsigned int pkmap_nr, unsigned int color) { return pkmap_nr == 0; } /* * Get the number of PKMAP entries of the given color. If no free slot is * found after checking that many entries, kmap will sleep waiting for * someone to call kunmap and free PKMAP slot. */ static inline int get_pkmap_entries_count(unsigned int color) { return LAST_PKMAP; } /* * Get head of a wait queue for PKMAP entries of the given color. * Wait queues for different mapping colors should be independent to avoid * unnecessary wakeups caused by freeing of slots of other colors. */ static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color) { static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait); return &pkmap_map_wait; } #endif |
c1f60a5a4 [PATCH] reduce MA... |
105 |
unsigned long totalhigh_pages __read_mostly; |
db7a94d60 highmem: Export t... |
106 |
EXPORT_SYMBOL(totalhigh_pages); |
c1f60a5a4 [PATCH] reduce MA... |
107 |
|
3e4d3af50 mm: stack based k... |
108 |
|
3e4d3af50 mm: stack based k... |
109 |
EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx); |
c1f60a5a4 [PATCH] reduce MA... |
110 111 |
unsigned int nr_free_highpages (void) { |
33499bfe5 mm/highmem: make ... |
112 |
struct zone *zone; |
c1f60a5a4 [PATCH] reduce MA... |
113 |
unsigned int pages = 0; |
33499bfe5 mm/highmem: make ... |
114 115 116 |
for_each_populated_zone(zone) { if (is_highmem(zone)) pages += zone_page_state(zone, NR_FREE_PAGES); |
2a1e274ac Create the ZONE_M... |
117 |
} |
c1f60a5a4 [PATCH] reduce MA... |
118 119 120 |
return pages; } |
1da177e4c Linux-2.6.12-rc2 |
121 |
static int pkmap_count[LAST_PKMAP]; |
1da177e4c Linux-2.6.12-rc2 |
122 123 124 |
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock); pte_t * pkmap_page_table; |
3297e7607 highmem: atomic h... |
125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 |
/* * Most architectures have no use for kmap_high_get(), so let's abstract * the disabling of IRQ out of the locking in that case to save on a * potential useless overhead. */ #ifdef ARCH_NEEDS_KMAP_HIGH_GET #define lock_kmap() spin_lock_irq(&kmap_lock) #define unlock_kmap() spin_unlock_irq(&kmap_lock) #define lock_kmap_any(flags) spin_lock_irqsave(&kmap_lock, flags) #define unlock_kmap_any(flags) spin_unlock_irqrestore(&kmap_lock, flags) #else #define lock_kmap() spin_lock(&kmap_lock) #define unlock_kmap() spin_unlock(&kmap_lock) #define lock_kmap_any(flags) \ do { spin_lock(&kmap_lock); (void)(flags); } while (0) #define unlock_kmap_any(flags) \ do { spin_unlock(&kmap_lock); (void)(flags); } while (0) #endif |
5a178119b mm: add support f... |
143 144 145 |
struct page *kmap_to_page(void *vaddr) { unsigned long addr = (unsigned long)vaddr; |
498c22802 mm: highmem: don'... |
146 |
if (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) { |
4de22c058 mm, highmem: use ... |
147 |
int i = PKMAP_NR(addr); |
5a178119b mm: add support f... |
148 149 150 151 152 |
return pte_page(pkmap_page_table[i]); } return virt_to_page(addr); } |
f0263d2d2 mm: highmem: expo... |
153 |
EXPORT_SYMBOL(kmap_to_page); |
5a178119b mm: add support f... |
154 |
|
1da177e4c Linux-2.6.12-rc2 |
155 156 157 |
static void flush_all_zero_pkmaps(void) { int i; |
5843d9a4d x86, pat: avoid h... |
158 |
int need_flush = 0; |
1da177e4c Linux-2.6.12-rc2 |
159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 |
flush_cache_kmaps(); for (i = 0; i < LAST_PKMAP; i++) { struct page *page; /* * zero means we don't have anything to do, * >1 means that it is still in use. Only * a count of 1 means that it is free but * needs to be unmapped */ if (pkmap_count[i] != 1) continue; pkmap_count[i] = 0; /* sanity check */ |
75babcace BUG_ON() Conversi... |
176 |
BUG_ON(pte_none(pkmap_page_table[i])); |
1da177e4c Linux-2.6.12-rc2 |
177 178 179 180 181 182 183 184 185 |
/* * Don't need an atomic fetch-and-clear op here; * no-one has the page mapped, and cannot get at * its virtual address (and hence PTE) without first * getting the kmap_lock (which is held here). * So no dangers, even with speculative execution. */ page = pte_page(pkmap_page_table[i]); |
eb2db439a mm, highmem: get ... |
186 |
pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]); |
1da177e4c Linux-2.6.12-rc2 |
187 188 |
set_page_address(page, NULL); |
5843d9a4d x86, pat: avoid h... |
189 |
need_flush = 1; |
1da177e4c Linux-2.6.12-rc2 |
190 |
} |
5843d9a4d x86, pat: avoid h... |
191 192 |
if (need_flush) flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP)); |
1da177e4c Linux-2.6.12-rc2 |
193 |
} |
77f6078aa mm: highmem kerne... |
194 195 196 |
/** * kmap_flush_unused - flush all unused kmap mappings in order to remove stray mappings */ |
ce6234b52 [PATCH] i386: PAR... |
197 198 |
void kmap_flush_unused(void) { |
3297e7607 highmem: atomic h... |
199 |
lock_kmap(); |
ce6234b52 [PATCH] i386: PAR... |
200 |
flush_all_zero_pkmaps(); |
3297e7607 highmem: atomic h... |
201 |
unlock_kmap(); |
ce6234b52 [PATCH] i386: PAR... |
202 |
} |
1da177e4c Linux-2.6.12-rc2 |
203 204 205 206 |
static inline unsigned long map_new_virtual(struct page *page) { unsigned long vaddr; int count; |
15de36a4c mm/highmem: make ... |
207 208 |
unsigned int last_pkmap_nr; unsigned int color = get_pkmap_color(page); |
1da177e4c Linux-2.6.12-rc2 |
209 210 |
start: |
15de36a4c mm/highmem: make ... |
211 |
count = get_pkmap_entries_count(color); |
1da177e4c Linux-2.6.12-rc2 |
212 213 |
/* Find an empty entry */ for (;;) { |
15de36a4c mm/highmem: make ... |
214 215 |
last_pkmap_nr = get_next_pkmap_nr(color); if (no_more_pkmaps(last_pkmap_nr, color)) { |
1da177e4c Linux-2.6.12-rc2 |
216 |
flush_all_zero_pkmaps(); |
15de36a4c mm/highmem: make ... |
217 |
count = get_pkmap_entries_count(color); |
1da177e4c Linux-2.6.12-rc2 |
218 219 220 221 222 223 224 225 226 227 228 |
} if (!pkmap_count[last_pkmap_nr]) break; /* Found a usable entry */ if (--count) continue; /* * Sleep for somebody else to unmap their entries */ { DECLARE_WAITQUEUE(wait, current); |
15de36a4c mm/highmem: make ... |
229 230 |
wait_queue_head_t *pkmap_map_wait = get_pkmap_wait_queue_head(color); |
1da177e4c Linux-2.6.12-rc2 |
231 232 |
__set_current_state(TASK_UNINTERRUPTIBLE); |
15de36a4c mm/highmem: make ... |
233 |
add_wait_queue(pkmap_map_wait, &wait); |
3297e7607 highmem: atomic h... |
234 |
unlock_kmap(); |
1da177e4c Linux-2.6.12-rc2 |
235 |
schedule(); |
15de36a4c mm/highmem: make ... |
236 |
remove_wait_queue(pkmap_map_wait, &wait); |
3297e7607 highmem: atomic h... |
237 |
lock_kmap(); |
1da177e4c Linux-2.6.12-rc2 |
238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 |
/* Somebody else might have mapped it while we slept */ if (page_address(page)) return (unsigned long)page_address(page); /* Re-start */ goto start; } } vaddr = PKMAP_ADDR(last_pkmap_nr); set_pte_at(&init_mm, vaddr, &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot)); pkmap_count[last_pkmap_nr] = 1; set_page_address(page, (void *)vaddr); return vaddr; } |
77f6078aa mm: highmem kerne... |
256 257 258 259 260 261 262 263 |
/** * kmap_high - map a highmem page into memory * @page: &struct page to map * * Returns the page's virtual memory address. * * We cannot call this from interrupts, as it may block. */ |
920c7a5d0 mm: remove fastca... |
264 |
void *kmap_high(struct page *page) |
1da177e4c Linux-2.6.12-rc2 |
265 266 267 268 269 270 |
{ unsigned long vaddr; /* * For highmem pages, we can't trust "virtual" until * after we have the lock. |
1da177e4c Linux-2.6.12-rc2 |
271 |
*/ |
3297e7607 highmem: atomic h... |
272 |
lock_kmap(); |
1da177e4c Linux-2.6.12-rc2 |
273 274 275 276 |
vaddr = (unsigned long)page_address(page); if (!vaddr) vaddr = map_new_virtual(page); pkmap_count[PKMAP_NR(vaddr)]++; |
75babcace BUG_ON() Conversi... |
277 |
BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2); |
3297e7607 highmem: atomic h... |
278 |
unlock_kmap(); |
1da177e4c Linux-2.6.12-rc2 |
279 280 281 282 |
return (void*) vaddr; } EXPORT_SYMBOL(kmap_high); |
3297e7607 highmem: atomic h... |
283 284 285 286 287 288 |
#ifdef ARCH_NEEDS_KMAP_HIGH_GET /** * kmap_high_get - pin a highmem page into memory * @page: &struct page to pin * * Returns the page's current virtual memory address, or NULL if no mapping |
5e39df562 grammar fix in co... |
289 |
* exists. If and only if a non null address is returned then a |
3297e7607 highmem: atomic h... |
290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 |
* matching call to kunmap_high() is necessary. * * This can be called from any context. */ void *kmap_high_get(struct page *page) { unsigned long vaddr, flags; lock_kmap_any(flags); vaddr = (unsigned long)page_address(page); if (vaddr) { BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 1); pkmap_count[PKMAP_NR(vaddr)]++; } unlock_kmap_any(flags); return (void*) vaddr; } #endif |
77f6078aa mm: highmem kerne... |
308 |
/** |
4e9dc5df4 mm: fix kunmap_hi... |
309 |
* kunmap_high - unmap a highmem page into memory |
77f6078aa mm: highmem kerne... |
310 |
* @page: &struct page to unmap |
3297e7607 highmem: atomic h... |
311 312 313 |
* * If ARCH_NEEDS_KMAP_HIGH_GET is not defined then this may be called * only from user context. |
77f6078aa mm: highmem kerne... |
314 |
*/ |
920c7a5d0 mm: remove fastca... |
315 |
void kunmap_high(struct page *page) |
1da177e4c Linux-2.6.12-rc2 |
316 317 318 |
{ unsigned long vaddr; unsigned long nr; |
3297e7607 highmem: atomic h... |
319 |
unsigned long flags; |
1da177e4c Linux-2.6.12-rc2 |
320 |
int need_wakeup; |
15de36a4c mm/highmem: make ... |
321 322 |
unsigned int color = get_pkmap_color(page); wait_queue_head_t *pkmap_map_wait; |
1da177e4c Linux-2.6.12-rc2 |
323 |
|
3297e7607 highmem: atomic h... |
324 |
lock_kmap_any(flags); |
1da177e4c Linux-2.6.12-rc2 |
325 |
vaddr = (unsigned long)page_address(page); |
75babcace BUG_ON() Conversi... |
326 |
BUG_ON(!vaddr); |
1da177e4c Linux-2.6.12-rc2 |
327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 |
nr = PKMAP_NR(vaddr); /* * A count must never go down to zero * without a TLB flush! */ need_wakeup = 0; switch (--pkmap_count[nr]) { case 0: BUG(); case 1: /* * Avoid an unnecessary wake_up() function call. * The common case is pkmap_count[] == 1, but * no waiters. * The tasks queued in the wait-queue are guarded * by both the lock in the wait-queue-head and by * the kmap_lock. As the kmap_lock is held here, * no need for the wait-queue-head's lock. Simply * test if the queue is empty. */ |
15de36a4c mm/highmem: make ... |
348 349 |
pkmap_map_wait = get_pkmap_wait_queue_head(color); need_wakeup = waitqueue_active(pkmap_map_wait); |
1da177e4c Linux-2.6.12-rc2 |
350 |
} |
3297e7607 highmem: atomic h... |
351 |
unlock_kmap_any(flags); |
1da177e4c Linux-2.6.12-rc2 |
352 353 354 |
/* do wake-up, if needed, race-free outside of the spin lock */ if (need_wakeup) |
15de36a4c mm/highmem: make ... |
355 |
wake_up(pkmap_map_wait); |
1da177e4c Linux-2.6.12-rc2 |
356 357 358 |
} EXPORT_SYMBOL(kunmap_high); |
1da177e4c Linux-2.6.12-rc2 |
359 |
#endif |
1da177e4c Linux-2.6.12-rc2 |
360 361 362 363 364 365 366 367 368 369 370 371 |
#if defined(HASHED_PAGE_VIRTUAL) #define PA_HASH_ORDER 7 /* * Describes one page->virtual association */ struct page_address_map { struct page *page; void *virtual; struct list_head list; }; |
a354e2c84 mm, highmem: remo... |
372 |
static struct page_address_map page_address_maps[LAST_PKMAP]; |
1da177e4c Linux-2.6.12-rc2 |
373 374 375 376 377 378 379 380 |
/* * Hash table bucket */ static struct page_address_slot { struct list_head lh; /* List of page_address_maps */ spinlock_t lock; /* Protect this bucket's list */ } ____cacheline_aligned_in_smp page_address_htable[1<<PA_HASH_ORDER]; |
f99187947 mm: make HASHED_P... |
381 |
static struct page_address_slot *page_slot(const struct page *page) |
1da177e4c Linux-2.6.12-rc2 |
382 383 384 |
{ return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)]; } |
77f6078aa mm: highmem kerne... |
385 386 387 388 389 390 |
/** * page_address - get the mapped virtual address of a page * @page: &struct page to get the virtual address of * * Returns the page's virtual address. */ |
f99187947 mm: make HASHED_P... |
391 |
void *page_address(const struct page *page) |
1da177e4c Linux-2.6.12-rc2 |
392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 |
{ unsigned long flags; void *ret; struct page_address_slot *pas; if (!PageHighMem(page)) return lowmem_page_address(page); pas = page_slot(page); ret = NULL; spin_lock_irqsave(&pas->lock, flags); if (!list_empty(&pas->lh)) { struct page_address_map *pam; list_for_each_entry(pam, &pas->lh, list) { if (pam->page == page) { ret = pam->virtual; goto done; } } } done: spin_unlock_irqrestore(&pas->lock, flags); return ret; } EXPORT_SYMBOL(page_address); |
77f6078aa mm: highmem kerne... |
419 420 421 422 423 |
/** * set_page_address - set a page's virtual address * @page: &struct page to set * @virtual: virtual address to use */ |
1da177e4c Linux-2.6.12-rc2 |
424 425 426 427 428 429 430 431 432 433 |
void set_page_address(struct page *page, void *virtual) { unsigned long flags; struct page_address_slot *pas; struct page_address_map *pam; BUG_ON(!PageHighMem(page)); pas = page_slot(page); if (virtual) { /* Add */ |
a354e2c84 mm, highmem: remo... |
434 |
pam = &page_address_maps[PKMAP_NR((unsigned long)virtual)]; |
1da177e4c Linux-2.6.12-rc2 |
435 436 437 438 439 440 441 442 443 444 445 446 |
pam->page = page; pam->virtual = virtual; spin_lock_irqsave(&pas->lock, flags); list_add_tail(&pam->list, &pas->lh); spin_unlock_irqrestore(&pas->lock, flags); } else { /* Remove */ spin_lock_irqsave(&pas->lock, flags); list_for_each_entry(pam, &pas->lh, list) { if (pam->page == page) { list_del(&pam->list); spin_unlock_irqrestore(&pas->lock, flags); |
1da177e4c Linux-2.6.12-rc2 |
447 448 449 450 451 452 453 454 |
goto done; } } spin_unlock_irqrestore(&pas->lock, flags); } done: return; } |
1da177e4c Linux-2.6.12-rc2 |
455 456 457 |
void __init page_address_init(void) { int i; |
1da177e4c Linux-2.6.12-rc2 |
458 459 460 461 |
for (i = 0; i < ARRAY_SIZE(page_address_htable); i++) { INIT_LIST_HEAD(&page_address_htable[i].lh); spin_lock_init(&page_address_htable[i].lock); } |
1da177e4c Linux-2.6.12-rc2 |
462 463 464 |
} #endif /* defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) */ |