Blame view
include/linux/mmzone.h
36.9 KB
1da177e4c
|
1 2 |
#ifndef _LINUX_MMZONE_H #define _LINUX_MMZONE_H |
1da177e4c
|
3 |
#ifndef __ASSEMBLY__ |
97965478a
|
4 |
#ifndef __GENERATING_BOUNDS_H |
1da177e4c
|
5 |
|
1da177e4c
|
6 7 8 |
#include <linux/spinlock.h> #include <linux/list.h> #include <linux/wait.h> |
e815af95f
|
9 |
#include <linux/bitops.h> |
1da177e4c
|
10 11 12 13 |
#include <linux/cache.h> #include <linux/threads.h> #include <linux/numa.h> #include <linux/init.h> |
bdc8cb984
|
14 |
#include <linux/seqlock.h> |
8357f8695
|
15 |
#include <linux/nodemask.h> |
835c134ec
|
16 |
#include <linux/pageblock-flags.h> |
bbeae5b05
|
17 |
#include <linux/page-flags-layout.h> |
60063497a
|
18 |
#include <linux/atomic.h> |
93ff66bf1
|
19 |
#include <asm/page.h> |
1da177e4c
|
20 21 22 23 24 25 26 |
/* Free memory management - zoned buddy allocator. */ #ifndef CONFIG_FORCE_MAX_ZONEORDER #define MAX_ORDER 11 #else #define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER #endif |
e984bb43f
|
27 |
#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1)) |
1da177e4c
|
28 |
|
5ad333eb6
|
29 30 31 |
/* * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed * costly to service. That is between allocation orders which should |
35fca53e1
|
32 |
* coalesce naturally under reasonable reclaim pressure and those which |
5ad333eb6
|
33 34 35 |
* will not. */ #define PAGE_ALLOC_COSTLY_ORDER 3 |
47118af07
|
36 37 |
enum { MIGRATE_UNMOVABLE, |
47118af07
|
38 |
MIGRATE_MOVABLE, |
016c13daa
|
39 |
MIGRATE_RECLAIMABLE, |
0aaa29a56
|
40 41 |
MIGRATE_PCPTYPES, /* the number of types on the pcp lists */ MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES, |
47118af07
|
42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 |
#ifdef CONFIG_CMA /* * MIGRATE_CMA migration type is designed to mimic the way * ZONE_MOVABLE works. Only movable pages can be allocated * from MIGRATE_CMA pageblocks and page allocator never * implicitly change migration type of MIGRATE_CMA pageblock. * * The way to use it is to change migratetype of a range of * pageblocks to MIGRATE_CMA which can be done by * __free_pageblock_cma() function. What is important though * is that a range of pageblocks must be aligned to * MAX_ORDER_NR_PAGES should biggest page be bigger then * a single pageblock. */ MIGRATE_CMA, #endif |
194159fbc
|
58 |
#ifdef CONFIG_MEMORY_ISOLATION |
47118af07
|
59 |
MIGRATE_ISOLATE, /* can't allocate from here */ |
194159fbc
|
60 |
#endif |
47118af07
|
61 62 63 64 65 66 67 68 |
MIGRATE_TYPES }; #ifdef CONFIG_CMA # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) #else # define is_migrate_cma(migratetype) false #endif |
b2a0ac887
|
69 70 71 72 |
#define for_each_migratetype_order(order, type) \ for (order = 0; order < MAX_ORDER; order++) \ for (type = 0; type < MIGRATE_TYPES; type++) |
467c996c1
|
73 |
extern int page_group_by_mobility_disabled; |
e58469baf
|
74 75 |
#define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1) #define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1) |
dc4b0caff
|
76 77 78 79 80 |
#define get_pageblock_migratetype(page) \ get_pfnblock_flags_mask(page, page_to_pfn(page), \ PB_migrate_end, MIGRATETYPE_MASK) static inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn) |
467c996c1
|
81 |
{ |
e58469baf
|
82 |
BUILD_BUG_ON(PB_migrate_end - PB_migrate != 2); |
dc4b0caff
|
83 84 |
return get_pfnblock_flags_mask(page, pfn, PB_migrate_end, MIGRATETYPE_MASK); |
467c996c1
|
85 |
} |
1da177e4c
|
86 |
struct free_area { |
b2a0ac887
|
87 |
struct list_head free_list[MIGRATE_TYPES]; |
1da177e4c
|
88 89 90 91 92 93 94 95 96 97 98 99 100 101 |
unsigned long nr_free; }; struct pglist_data; /* * zone->lock and zone->lru_lock are two of the hottest locks in the kernel. * So add a wild amount of padding here to ensure that they fall into separate * cachelines. There are very few zone structures in the machine, so space * consumption is not a concern here. */ #if defined(CONFIG_SMP) struct zone_padding { char x[0]; |
22fc6eccb
|
102 |
} ____cacheline_internodealigned_in_smp; |
1da177e4c
|
103 104 105 106 |
#define ZONE_PADDING(name) struct zone_padding name; #else #define ZONE_PADDING(name) #endif |
2244b95a7
|
107 |
enum zone_stat_item { |
51ed44912
|
108 |
/* First 128 byte cacheline (assuming 64 bit words) */ |
d23ad4232
|
109 |
NR_FREE_PAGES, |
81c0a2bb5
|
110 |
NR_ALLOC_BATCH, |
b69408e88
|
111 |
NR_LRU_BASE, |
4f98a2fee
|
112 113 114 115 |
NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */ NR_ACTIVE_ANON, /* " " " " " */ NR_INACTIVE_FILE, /* " " " " " */ NR_ACTIVE_FILE, /* " " " " " */ |
894bc3104
|
116 |
NR_UNEVICTABLE, /* " " " " " */ |
5344b7e64
|
117 |
NR_MLOCK, /* mlock()ed pages found and moved off LRU */ |
f3dbd3446
|
118 119 |
NR_ANON_PAGES, /* Mapped anonymous pages */ NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. |
65ba55f50
|
120 |
only modified from process context */ |
347ce434d
|
121 |
NR_FILE_PAGES, |
b1e7a8fd8
|
122 |
NR_FILE_DIRTY, |
ce866b34a
|
123 |
NR_WRITEBACK, |
51ed44912
|
124 125 126 |
NR_SLAB_RECLAIMABLE, NR_SLAB_UNRECLAIMABLE, NR_PAGETABLE, /* used for pagetables */ |
c6a7f5728
|
127 128 |
NR_KERNEL_STACK, /* Second 128 byte cacheline */ |
fd39fc856
|
129 |
NR_UNSTABLE_NFS, /* NFS unstable pages */ |
d2c5e30c9
|
130 |
NR_BOUNCE, |
e129b5c23
|
131 |
NR_VMSCAN_WRITE, |
49ea7eb65
|
132 |
NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */ |
fc3ba692a
|
133 |
NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */ |
a731286de
|
134 135 |
NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ |
4b02108ac
|
136 |
NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */ |
ea941f0e2
|
137 138 |
NR_DIRTIED, /* page dirtyings since bootup */ NR_WRITTEN, /* page writings since bootup */ |
0d5d823ab
|
139 |
NR_PAGES_SCANNED, /* pages scanned since last reclaim */ |
ca889e6c4
|
140 141 142 143 144 145 146 147 |
#ifdef CONFIG_NUMA NUMA_HIT, /* allocated in intended node */ NUMA_MISS, /* allocated in non intended node */ NUMA_FOREIGN, /* was intended here, hit elsewhere */ NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */ NUMA_LOCAL, /* allocation from local node */ NUMA_OTHER, /* allocation from other node */ #endif |
a528910e1
|
148 149 |
WORKINGSET_REFAULT, WORKINGSET_ACTIVATE, |
449dd6984
|
150 |
WORKINGSET_NODERECLAIM, |
79134171d
|
151 |
NR_ANON_TRANSPARENT_HUGEPAGES, |
d1ce749a0
|
152 |
NR_FREE_CMA_PAGES, |
2244b95a7
|
153 |
NR_VM_ZONE_STAT_ITEMS }; |
4f98a2fee
|
154 155 156 157 158 159 160 161 162 163 164 165 |
/* * We do arithmetic on the LRU lists in various places in the code, * so it is important to keep the active lists LRU_ACTIVE higher in * the array than the corresponding inactive lists, and to keep * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists. * * This has to be kept in sync with the statistics in zone_stat_item * above and the descriptions in vmstat_text in mm/vmstat.c */ #define LRU_BASE 0 #define LRU_ACTIVE 1 #define LRU_FILE 2 |
b69408e88
|
166 |
enum lru_list { |
4f98a2fee
|
167 168 169 170 |
LRU_INACTIVE_ANON = LRU_BASE, LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE, LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE, LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE, |
894bc3104
|
171 |
LRU_UNEVICTABLE, |
894bc3104
|
172 173 |
NR_LRU_LISTS }; |
b69408e88
|
174 |
|
4111304da
|
175 |
#define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++) |
b69408e88
|
176 |
|
4111304da
|
177 |
#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++) |
894bc3104
|
178 |
|
4111304da
|
179 |
static inline int is_file_lru(enum lru_list lru) |
4f98a2fee
|
180 |
{ |
4111304da
|
181 |
return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE); |
4f98a2fee
|
182 |
} |
4111304da
|
183 |
static inline int is_active_lru(enum lru_list lru) |
b69408e88
|
184 |
{ |
4111304da
|
185 |
return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE); |
b69408e88
|
186 |
} |
4111304da
|
187 |
static inline int is_unevictable_lru(enum lru_list lru) |
894bc3104
|
188 |
{ |
4111304da
|
189 |
return (lru == LRU_UNEVICTABLE); |
894bc3104
|
190 |
} |
89abfab13
|
191 192 193 |
struct zone_reclaim_stat { /* * The pageout code in vmscan.c keeps track of how many of the |
59f91e5dd
|
194 |
* mem/swap backed and file backed pages are referenced. |
89abfab13
|
195 196 197 198 199 200 201 202 |
* The higher the rotated/scanned ratio, the more valuable * that cache is. * * The anon LRU stats live in [0], file LRU stats in [1] */ unsigned long recent_rotated[2]; unsigned long recent_scanned[2]; }; |
6290df545
|
203 204 |
struct lruvec { struct list_head lists[NR_LRU_LISTS]; |
89abfab13
|
205 |
struct zone_reclaim_stat reclaim_stat; |
c255a4580
|
206 |
#ifdef CONFIG_MEMCG |
7f5e86c2c
|
207 208 |
struct zone *zone; #endif |
6290df545
|
209 |
}; |
bb2a0de92
|
210 211 212 |
/* Mask used at gathering information at once (see memcontrol.c) */ #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE)) #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON)) |
bb2a0de92
|
213 |
#define LRU_ALL ((1 << NR_LRU_LISTS) - 1) |
39deaf858
|
214 |
/* Isolate clean file */ |
f3fd4a619
|
215 |
#define ISOLATE_CLEAN ((__force isolate_mode_t)0x1) |
f80c06736
|
216 |
/* Isolate unmapped file */ |
f3fd4a619
|
217 |
#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2) |
c82449352
|
218 |
/* Isolate for asynchronous migration */ |
f3fd4a619
|
219 |
#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4) |
e46a28790
|
220 221 |
/* Isolate unevictable pages */ #define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8) |
4356f21d0
|
222 223 224 |
/* LRU Isolation modes. */ typedef unsigned __bitwise__ isolate_mode_t; |
418589663
|
225 226 227 228 229 230 231 232 233 234 |
enum zone_watermarks { WMARK_MIN, WMARK_LOW, WMARK_HIGH, NR_WMARK }; #define min_wmark_pages(z) (z->watermark[WMARK_MIN]) #define low_wmark_pages(z) (z->watermark[WMARK_LOW]) #define high_wmark_pages(z) (z->watermark[WMARK_HIGH]) |
1da177e4c
|
235 236 |
struct per_cpu_pages { int count; /* number of pages in the list */ |
1da177e4c
|
237 238 |
int high; /* high watermark, emptying needed */ int batch; /* chunk size for buddy add/remove */ |
5f8dcc212
|
239 240 241 |
/* Lists of pages, one per migrate type stored on the pcp-lists */ struct list_head lists[MIGRATE_PCPTYPES]; |
1da177e4c
|
242 243 244 |
}; struct per_cpu_pageset { |
3dfa5721f
|
245 |
struct per_cpu_pages pcp; |
4037d4522
|
246 247 248 |
#ifdef CONFIG_NUMA s8 expire; #endif |
2244b95a7
|
249 |
#ifdef CONFIG_SMP |
df9ecaba3
|
250 |
s8 stat_threshold; |
2244b95a7
|
251 252 |
s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; #endif |
99dcc3e5a
|
253 |
}; |
e7c8d5c99
|
254 |
|
97965478a
|
255 |
#endif /* !__GENERATING_BOUNDS.H */ |
2f1b62486
|
256 |
enum zone_type { |
4b51d6698
|
257 |
#ifdef CONFIG_ZONE_DMA |
2f1b62486
|
258 259 260 261 262 263 264 265 266 267 268 269 |
/* * ZONE_DMA is used when there are devices that are not able * to do DMA to all of addressable memory (ZONE_NORMAL). Then we * carve out the portion of memory that is needed for these devices. * The range is arch specific. * * Some examples * * Architecture Limit * --------------------------- * parisc, ia64, sparc <4G * s390 <2G |
2f1b62486
|
270 271 272 273 274 275 276 |
* arm Various * alpha Unlimited or 0-16MB. * * i386, x86_64 and multiple other arches * <16M. */ ZONE_DMA, |
4b51d6698
|
277 |
#endif |
fb0e7942b
|
278 |
#ifdef CONFIG_ZONE_DMA32 |
2f1b62486
|
279 280 281 282 283 284 |
/* * x86_64 needs two ZONE_DMAs because it supports devices that are * only able to do DMA to the lower 16M but also 32 bit devices that * can only do DMA areas below 4G. */ ZONE_DMA32, |
fb0e7942b
|
285 |
#endif |
2f1b62486
|
286 287 288 289 290 291 |
/* * Normal addressable memory is in ZONE_NORMAL. DMA operations can be * performed on pages in ZONE_NORMAL if the DMA devices support * transfers to all addressable memory. */ ZONE_NORMAL, |
e53ef38d0
|
292 |
#ifdef CONFIG_HIGHMEM |
2f1b62486
|
293 294 295 296 297 298 299 300 301 |
/* * A memory area that is only addressable by the kernel through * mapping portions into its own address space. This is for example * used by i386 to allow the kernel to address the memory beyond * 900MB. The kernel will set up special mappings (page * table entries on i386) for each page that the kernel needs to * access. */ ZONE_HIGHMEM, |
e53ef38d0
|
302 |
#endif |
2a1e274ac
|
303 |
ZONE_MOVABLE, |
033fbae98
|
304 305 306 |
#ifdef CONFIG_ZONE_DEVICE ZONE_DEVICE, #endif |
97965478a
|
307 |
__MAX_NR_ZONES |
033fbae98
|
308 |
|
2f1b62486
|
309 |
}; |
1da177e4c
|
310 |
|
97965478a
|
311 |
#ifndef __GENERATING_BOUNDS_H |
1da177e4c
|
312 |
struct zone { |
3484b2de9
|
313 |
/* Read-mostly fields */ |
418589663
|
314 315 316 |
/* zone watermarks, access with *_wmark_pages(zone) macros */ unsigned long watermark[NR_WMARK]; |
0aaa29a56
|
317 |
unsigned long nr_reserved_highatomic; |
1da177e4c
|
318 |
/* |
899033276
|
319 320 321 322 323 324 325 |
* We don't know if the memory that we're going to allocate will be * freeable or/and it will be released eventually, so to avoid totally * wasting several GB of ram we must reserve some of the lower zone * memory (otherwise we risk to run OOM on the lower zones despite * there being tons of freeable ram on the higher zones). This array is * recalculated at runtime if the sysctl_lowmem_reserve_ratio sysctl * changes. |
1da177e4c
|
326 |
*/ |
3484b2de9
|
327 |
long lowmem_reserve[MAX_NR_ZONES]; |
ab8fabd46
|
328 |
|
e7c8d5c99
|
329 |
#ifdef CONFIG_NUMA |
d5f541ed6
|
330 |
int node; |
3484b2de9
|
331 |
#endif |
9614634fe
|
332 |
/* |
3484b2de9
|
333 334 |
* The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on * this zone's LRU. Maintained by the pageout code. |
9614634fe
|
335 |
*/ |
3484b2de9
|
336 337 338 |
unsigned int inactive_ratio; struct pglist_data *zone_pgdat; |
43cf38eb5
|
339 |
struct per_cpu_pageset __percpu *pageset; |
3484b2de9
|
340 |
|
1da177e4c
|
341 |
/* |
3484b2de9
|
342 343 |
* This is a per-zone reserve of pages that should not be * considered dirtyable memory. |
1da177e4c
|
344 |
*/ |
3484b2de9
|
345 |
unsigned long dirty_balance_reserve; |
1da177e4c
|
346 |
|
835c134ec
|
347 348 |
#ifndef CONFIG_SPARSEMEM /* |
d9c234005
|
349 |
* Flags for a pageblock_nr_pages block. See pageblock-flags.h. |
835c134ec
|
350 351 352 353 |
* In SPARSEMEM, this map is stored in struct mem_section */ unsigned long *pageblock_flags; #endif /* CONFIG_SPARSEMEM */ |
3484b2de9
|
354 |
#ifdef CONFIG_NUMA |
1da177e4c
|
355 |
/* |
3484b2de9
|
356 |
* zone reclaim becomes active if more unmapped pages exist. |
1da177e4c
|
357 |
*/ |
3484b2de9
|
358 359 360 |
unsigned long min_unmapped_pages; unsigned long min_slab_pages; #endif /* CONFIG_NUMA */ |
1da177e4c
|
361 |
|
1da177e4c
|
362 363 |
/* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ unsigned long zone_start_pfn; |
bdc8cb984
|
364 |
/* |
9feedc9d8
|
365 366 367 |
* spanned_pages is the total pages spanned by the zone, including * holes, which is calculated as: * spanned_pages = zone_end_pfn - zone_start_pfn; |
bdc8cb984
|
368 |
* |
9feedc9d8
|
369 370 |
* present_pages is physical pages existing within the zone, which * is calculated as: |
8761e31c2
|
371 |
* present_pages = spanned_pages - absent_pages(pages in holes); |
9feedc9d8
|
372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 |
* * managed_pages is present pages managed by the buddy system, which * is calculated as (reserved_pages includes pages allocated by the * bootmem allocator): * managed_pages = present_pages - reserved_pages; * * So present_pages may be used by memory hotplug or memory power * management logic to figure out unmanaged pages by checking * (present_pages - managed_pages). And managed_pages should be used * by page allocator and vm scanner to calculate all kinds of watermarks * and thresholds. * * Locking rules: * * zone_start_pfn and spanned_pages are protected by span_seqlock. * It is a seqlock because it has to be read outside of zone->lock, * and it is done in the main allocator path. But, it is written * quite infrequently. * * The span_seq lock is declared along with zone->lock because it is |
bdc8cb984
|
392 393 |
* frequently read in proximity to zone->lock. It's good to * give them a chance of being in the same cacheline. |
9feedc9d8
|
394 |
* |
c3d5f5f0c
|
395 |
* Write access to present_pages at runtime should be protected by |
bfc8c9013
|
396 397 |
* mem_hotplug_begin/end(). Any reader who can't tolerant drift of * present_pages should get_online_mems() to get a stable value. |
c3d5f5f0c
|
398 399 400 401 402 403 |
* * Read access to managed_pages should be safe because it's unsigned * long. Write access to zone->managed_pages and totalram_pages are * protected by managed_page_count_lock at runtime. Idealy only * adjust_managed_page_count() should be used instead of directly * touching zone->managed_pages and totalram_pages. |
bdc8cb984
|
404 |
*/ |
3484b2de9
|
405 |
unsigned long managed_pages; |
9feedc9d8
|
406 407 |
unsigned long spanned_pages; unsigned long present_pages; |
3484b2de9
|
408 409 |
const char *name; |
1da177e4c
|
410 |
|
ad53f92eb
|
411 412 413 414 415 416 417 418 |
#ifdef CONFIG_MEMORY_ISOLATION /* * Number of isolated pageblock. It is used to solve incorrect * freepage counting problem due to racy retrieving migratetype * of pageblock. Protected by zone->lock. */ unsigned long nr_isolate_pageblock; #endif |
3484b2de9
|
419 420 421 422 |
#ifdef CONFIG_MEMORY_HOTPLUG /* see spanned/present_pages for more description */ seqlock_t span_seqlock; #endif |
943dca1a1
|
423 |
/* |
3484b2de9
|
424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 |
* wait_table -- the array holding the hash table * wait_table_hash_nr_entries -- the size of the hash table array * wait_table_bits -- wait_table_size == (1 << wait_table_bits) * * The purpose of all these is to keep track of the people * waiting for a page to become available and make them * runnable again when possible. The trouble is that this * consumes a lot of space, especially when so few things * wait on pages at a given time. So instead of using * per-page waitqueues, we use a waitqueue hash table. * * The bucket discipline is to sleep on the same queue when * colliding and wake all in that wait queue when removing. * When something wakes, it must check to be sure its page is * truly available, a la thundering herd. The cost of a * collision is great, but given the expected load of the * table, they should be so rare as to be outweighed by the * benefits from the saved space. * * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the * primary users of these fields, and in mm/page_alloc.c * free_area_init_core() performs the initialization of them. |
1da177e4c
|
446 |
*/ |
3484b2de9
|
447 448 449 450 451 |
wait_queue_head_t *wait_table; unsigned long wait_table_hash_nr_entries; unsigned long wait_table_bits; ZONE_PADDING(_pad1_) |
3484b2de9
|
452 453 454 455 456 |
/* free areas of different sizes */ struct free_area free_area[MAX_ORDER]; /* zone flags, see below */ unsigned long flags; |
a368ab67a
|
457 458 |
/* Write-intensive fields used from the page allocator */ spinlock_t lock; |
3484b2de9
|
459 460 461 462 463 464 |
ZONE_PADDING(_pad2_) /* Write-intensive fields used by page reclaim */ /* Fields commonly accessed by the page reclaim scanner */ spinlock_t lru_lock; |
3484b2de9
|
465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 |
struct lruvec lruvec; /* Evictions & activations on the inactive file list */ atomic_long_t inactive_age; /* * When free pages are below this point, additional steps are taken * when reading the number of free pages to avoid per-cpu counter * drift allowing watermarks to be breached */ unsigned long percpu_drift_mark; #if defined CONFIG_COMPACTION || defined CONFIG_CMA /* pfn where compaction free scanner should start */ unsigned long compact_cached_free_pfn; /* pfn where async and sync compaction migration scanner should start */ unsigned long compact_cached_migrate_pfn[2]; #endif #ifdef CONFIG_COMPACTION /* * On compaction failure, 1<<compact_defer_shift compactions * are skipped before trying again. The number attempted since * last failure is tracked with compact_considered. */ unsigned int compact_considered; unsigned int compact_defer_shift; int compact_order_failed; #endif #if defined CONFIG_COMPACTION || defined CONFIG_CMA /* Set to true when the PG_migrate_skip bits should be cleared */ bool compact_blockskip_flush; #endif ZONE_PADDING(_pad3_) /* Zone statistics */ atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; |
22fc6eccb
|
503 |
} ____cacheline_internodealigned_in_smp; |
1da177e4c
|
504 |
|
570546517
|
505 |
enum zone_flags { |
e815af95f
|
506 |
ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */ |
098d7f128
|
507 |
ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */ |
0e093d997
|
508 509 510 |
ZONE_CONGESTED, /* zone has many dirty pages backed by * a congested BDI */ |
570546517
|
511 |
ZONE_DIRTY, /* reclaim scanning has recently found |
d43006d50
|
512 513 514 |
* many dirty file pages at the tail * of the LRU. */ |
283aba9f9
|
515 516 517 |
ZONE_WRITEBACK, /* reclaim scanning has recently found * many pages under writeback */ |
4ffeaf356
|
518 |
ZONE_FAIR_DEPLETED, /* fair zone policy batch depleted */ |
570546517
|
519 |
}; |
e815af95f
|
520 |
|
f9228b204
|
521 |
static inline unsigned long zone_end_pfn(const struct zone *zone) |
108bcc96e
|
522 523 524 525 526 527 528 529 |
{ return zone->zone_start_pfn + zone->spanned_pages; } static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn) { return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone); } |
2a6e3ebee
|
530 531 532 533 534 535 536 537 538 |
static inline bool zone_is_initialized(struct zone *zone) { return !!zone->wait_table; } static inline bool zone_is_empty(struct zone *zone) { return zone->spanned_pages == 0; } |
1da177e4c
|
539 540 541 542 543 544 |
/* * The "priority" of VM scanning is how much of the queues we will scan in one * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the * queues ("queue_length >> 12") during an aging round. */ #define DEF_PRIORITY 12 |
9276b1bc9
|
545 546 547 548 |
/* Maximum number of zones on a zonelist */ #define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES) #ifdef CONFIG_NUMA |
523b94585
|
549 550 |
/* |
25a64ec1e
|
551 |
* The NUMA zonelists are doubled because we need zonelists that restrict the |
e97ca8e5b
|
552 |
* allocations to a single node for __GFP_THISNODE. |
523b94585
|
553 |
* |
54a6eb5c4
|
554 |
* [0] : Zonelist with fallback |
e97ca8e5b
|
555 |
* [1] : No fallback (__GFP_THISNODE) |
523b94585
|
556 |
*/ |
54a6eb5c4
|
557 |
#define MAX_ZONELISTS 2 |
9276b1bc9
|
558 |
#else |
54a6eb5c4
|
559 |
#define MAX_ZONELISTS 1 |
9276b1bc9
|
560 |
#endif |
1da177e4c
|
561 |
/* |
dd1a239f6
|
562 563 564 565 566 567 568 569 570 |
* This struct contains information about a zone in a zonelist. It is stored * here to avoid dereferences into large structures and lookups of tables */ struct zoneref { struct zone *zone; /* Pointer to actual zone */ int zone_idx; /* zone_idx(zoneref->zone) */ }; /* |
1da177e4c
|
571 572 573 574 575 |
* One allocation request operates on a zonelist. A zonelist * is a list of zones, the first one is the 'goal' of the * allocation, the other zones are fallback zones, in decreasing * priority. * |
dd1a239f6
|
576 577 578 579 580 581 582 |
* To speed the reading of the zonelist, the zonerefs contain the zone index * of the entry being read. Helper functions to access information given * a struct zoneref are * * zonelist_zone() - Return the struct zone * for an entry in _zonerefs * zonelist_zone_idx() - Return the index of the zone for an entry * zonelist_node_idx() - Return the index of the node for an entry |
1da177e4c
|
583 584 |
*/ struct zonelist { |
dd1a239f6
|
585 |
struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1]; |
1da177e4c
|
586 |
}; |
5b99cd0ef
|
587 588 589 590 |
#ifndef CONFIG_DISCONTIGMEM /* The array of struct pages - for discontigmem use pgdat->lmem_map */ extern struct page *mem_map; #endif |
1da177e4c
|
591 592 593 594 595 596 597 598 599 600 601 602 603 604 |
/* * The pg_data_t structure is used in machines with CONFIG_DISCONTIGMEM * (mostly NUMA machines?) to denote a higher-level memory zone than the * zone denotes. * * On NUMA machines, each NUMA node would have a pg_data_t to describe * it's memory layout. * * Memory statistics and page replacement data structures are maintained on a * per-zone basis. */ struct bootmem_data; typedef struct pglist_data { struct zone node_zones[MAX_NR_ZONES]; |
523b94585
|
605 |
struct zonelist node_zonelists[MAX_ZONELISTS]; |
1da177e4c
|
606 |
int nr_zones; |
52d4b9ac0
|
607 |
#ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */ |
1da177e4c
|
608 |
struct page *node_mem_map; |
eefa864b7
|
609 610 611 |
#ifdef CONFIG_PAGE_EXTENSION struct page_ext *node_page_ext; #endif |
d41dee369
|
612 |
#endif |
08677214e
|
613 |
#ifndef CONFIG_NO_BOOTMEM |
1da177e4c
|
614 |
struct bootmem_data *bdata; |
08677214e
|
615 |
#endif |
208d54e55
|
616 617 618 619 620 621 |
#ifdef CONFIG_MEMORY_HOTPLUG /* * Must be held any time you expect node_start_pfn, node_present_pages * or node_spanned_pages stay constant. Holding this will also * guarantee that any pfn_valid() stays that way. * |
114d4b79f
|
622 623 624 |
* pgdat_resize_lock() and pgdat_resize_unlock() are provided to * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG. * |
72c3b51bd
|
625 |
* Nests above zone->lock and zone->span_seqlock |
208d54e55
|
626 627 628 |
*/ spinlock_t node_size_lock; #endif |
1da177e4c
|
629 630 631 632 633 |
unsigned long node_start_pfn; unsigned long node_present_pages; /* total number of physical pages */ unsigned long node_spanned_pages; /* total size of physical page range, including holes */ int node_id; |
1da177e4c
|
634 |
wait_queue_head_t kswapd_wait; |
5515061d2
|
635 |
wait_queue_head_t pfmemalloc_wait; |
bfc8c9013
|
636 637 |
struct task_struct *kswapd; /* Protected by mem_hotplug_begin/end() */ |
1da177e4c
|
638 |
int kswapd_max_order; |
995047488
|
639 |
enum zone_type classzone_idx; |
8177a420e
|
640 |
#ifdef CONFIG_NUMA_BALANCING |
1c5e9c27c
|
641 |
/* Lock serializing the migrate rate limiting window */ |
8177a420e
|
642 643 644 645 646 647 648 649 |
spinlock_t numabalancing_migrate_lock; /* Rate limiting time interval */ unsigned long numabalancing_migrate_next_window; /* Number of pages migrated during the rate limiting time interval */ unsigned long numabalancing_migrate_nr_pages; #endif |
3a80a7fa7
|
650 651 652 653 654 655 656 657 |
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT /* * If memory initialisation on large machines is deferred then this * is the first PFN that needs to be initialised. */ unsigned long first_deferred_pfn; #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ |
1da177e4c
|
658 659 660 661 |
} pg_data_t; #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) #define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages) |
d41dee369
|
662 |
#ifdef CONFIG_FLAT_NODE_MEM_MAP |
408fde81c
|
663 |
#define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr)) |
d41dee369
|
664 665 666 |
#else #define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr)) #endif |
408fde81c
|
667 |
#define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr)) |
1da177e4c
|
668 |
|
c6830c226
|
669 |
#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) |
da3649e13
|
670 |
#define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid)) |
c6830c226
|
671 |
|
da3649e13
|
672 673 674 675 676 677 678 679 680 |
static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat) { return pgdat->node_start_pfn + pgdat->node_spanned_pages; } static inline bool pgdat_is_empty(pg_data_t *pgdat) { return !pgdat->node_start_pfn && !pgdat->node_spanned_pages; } |
c6830c226
|
681 |
|
033fbae98
|
682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 |
static inline int zone_id(const struct zone *zone) { struct pglist_data *pgdat = zone->zone_pgdat; return zone - pgdat->node_zones; } #ifdef CONFIG_ZONE_DEVICE static inline bool is_dev_zone(const struct zone *zone) { return zone_id(zone) == ZONE_DEVICE; } #else static inline bool is_dev_zone(const struct zone *zone) { return false; } #endif |
208d54e55
|
700 |
#include <linux/memory_hotplug.h> |
4eaf3f643
|
701 |
extern struct mutex zonelists_mutex; |
9adb62a5d
|
702 |
void build_all_zonelists(pg_data_t *pgdat, struct zone *zone); |
995047488
|
703 |
void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx); |
7aeb09f91
|
704 705 706 |
bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, int classzone_idx, int alloc_flags); bool zone_watermark_ok_safe(struct zone *z, unsigned int order, |
e2b19197f
|
707 |
unsigned long mark, int classzone_idx); |
a2f3aa025
|
708 709 710 711 |
enum memmap_context { MEMMAP_EARLY, MEMMAP_HOTPLUG, }; |
718127cc3
|
712 |
extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, |
b171e4093
|
713 |
unsigned long size); |
718127cc3
|
714 |
|
bea8c150a
|
715 |
extern void lruvec_init(struct lruvec *lruvec); |
7f5e86c2c
|
716 717 718 |
static inline struct zone *lruvec_zone(struct lruvec *lruvec) { |
c255a4580
|
719 |
#ifdef CONFIG_MEMCG |
7f5e86c2c
|
720 721 722 723 724 |
return lruvec->zone; #else return container_of(lruvec, struct zone, lruvec); #endif } |
1da177e4c
|
725 726 727 728 729 |
#ifdef CONFIG_HAVE_MEMORY_PRESENT void memory_present(int nid, unsigned long start, unsigned long end); #else static inline void memory_present(int nid, unsigned long start, unsigned long end) {} #endif |
7aac78988
|
730 731 732 733 734 |
#ifdef CONFIG_HAVE_MEMORYLESS_NODES int local_memory_node(int node_id); #else static inline int local_memory_node(int node_id) { return node_id; }; #endif |
1da177e4c
|
735 736 737 738 739 740 741 742 |
#ifdef CONFIG_NEED_NODE_MEMMAP_SIZE unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); #endif /* * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc. */ #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) |
f3fe65122
|
743 744 745 746 |
static inline int populated_zone(struct zone *zone) { return (!!zone->present_pages); } |
2a1e274ac
|
747 |
extern int movable_zone; |
d7e4a2ea5
|
748 |
#ifdef CONFIG_HIGHMEM |
2a1e274ac
|
749 750 |
static inline int zone_movable_is_highmem(void) { |
d7e4a2ea5
|
751 |
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP |
2a1e274ac
|
752 753 |
return movable_zone == ZONE_HIGHMEM; #else |
d7e4a2ea5
|
754 |
return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM; |
2a1e274ac
|
755 756 |
#endif } |
d7e4a2ea5
|
757 |
#endif |
2a1e274ac
|
758 |
|
2f1b62486
|
759 |
static inline int is_highmem_idx(enum zone_type idx) |
1da177e4c
|
760 |
{ |
e53ef38d0
|
761 |
#ifdef CONFIG_HIGHMEM |
2a1e274ac
|
762 763 |
return (idx == ZONE_HIGHMEM || (idx == ZONE_MOVABLE && zone_movable_is_highmem())); |
e53ef38d0
|
764 765 766 |
#else return 0; #endif |
1da177e4c
|
767 |
} |
1da177e4c
|
768 769 770 771 772 773 774 775 |
/** * is_highmem - helper function to quickly check if a struct zone is a * highmem zone or not. This is an attempt to keep references * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum. * @zone - pointer to struct zone variable */ static inline int is_highmem(struct zone *zone) { |
e53ef38d0
|
776 |
#ifdef CONFIG_HIGHMEM |
ddc81ed2c
|
777 778 779 780 |
int zone_off = (char *)zone - (char *)zone->zone_pgdat->node_zones; return zone_off == ZONE_HIGHMEM * sizeof(*zone) || (zone_off == ZONE_MOVABLE * sizeof(*zone) && zone_movable_is_highmem()); |
e53ef38d0
|
781 782 783 |
#else return 0; #endif |
1da177e4c
|
784 |
} |
1da177e4c
|
785 786 |
/* These two functions are used to setup the per zone pages min values */ struct ctl_table; |
8d65af789
|
787 |
int min_free_kbytes_sysctl_handler(struct ctl_table *, int, |
1da177e4c
|
788 789 |
void __user *, size_t *, loff_t *); extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1]; |
8d65af789
|
790 |
int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, |
1da177e4c
|
791 |
void __user *, size_t *, loff_t *); |
8d65af789
|
792 |
int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, |
8ad4b1fb8
|
793 |
void __user *, size_t *, loff_t *); |
9614634fe
|
794 |
int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, |
8d65af789
|
795 |
void __user *, size_t *, loff_t *); |
0ff38490c
|
796 |
int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int, |
8d65af789
|
797 |
void __user *, size_t *, loff_t *); |
1da177e4c
|
798 |
|
f0c0b2b80
|
799 |
extern int numa_zonelist_order_handler(struct ctl_table *, int, |
8d65af789
|
800 |
void __user *, size_t *, loff_t *); |
f0c0b2b80
|
801 802 |
extern char numa_zonelist_order[]; #define NUMA_ZONELIST_ORDER_LEN 16 /* string buffer size */ |
93b7504e3
|
803 |
#ifndef CONFIG_NEED_MULTIPLE_NODES |
1da177e4c
|
804 805 806 807 |
extern struct pglist_data contig_page_data; #define NODE_DATA(nid) (&contig_page_data) #define NODE_MEM_MAP(nid) mem_map |
1da177e4c
|
808 |
|
93b7504e3
|
809 |
#else /* CONFIG_NEED_MULTIPLE_NODES */ |
1da177e4c
|
810 811 |
#include <asm/mmzone.h> |
93b7504e3
|
812 |
#endif /* !CONFIG_NEED_MULTIPLE_NODES */ |
348f8b6c4
|
813 |
|
95144c788
|
814 815 816 |
extern struct pglist_data *first_online_pgdat(void); extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat); extern struct zone *next_zone(struct zone *zone); |
8357f8695
|
817 818 |
/** |
12d15f0d5
|
819 |
* for_each_online_pgdat - helper macro to iterate over all online nodes |
8357f8695
|
820 821 822 823 824 825 |
* @pgdat - pointer to a pg_data_t variable */ #define for_each_online_pgdat(pgdat) \ for (pgdat = first_online_pgdat(); \ pgdat; \ pgdat = next_online_pgdat(pgdat)) |
8357f8695
|
826 827 828 829 830 831 832 833 834 835 836 |
/** * for_each_zone - helper macro to iterate over all memory zones * @zone - pointer to struct zone variable * * The user only needs to declare the zone variable, for_each_zone * fills it in. */ #define for_each_zone(zone) \ for (zone = (first_online_pgdat())->node_zones; \ zone; \ zone = next_zone(zone)) |
ee99c71c5
|
837 838 839 840 841 842 843 |
#define for_each_populated_zone(zone) \ for (zone = (first_online_pgdat())->node_zones; \ zone; \ zone = next_zone(zone)) \ if (!populated_zone(zone)) \ ; /* do nothing */ \ else |
dd1a239f6
|
844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 |
static inline struct zone *zonelist_zone(struct zoneref *zoneref) { return zoneref->zone; } static inline int zonelist_zone_idx(struct zoneref *zoneref) { return zoneref->zone_idx; } static inline int zonelist_node_idx(struct zoneref *zoneref) { #ifdef CONFIG_NUMA /* zone_to_nid not available in this context */ return zoneref->zone->node; #else return 0; #endif /* CONFIG_NUMA */ } |
19770b326
|
863 864 865 866 867 |
/** * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point * @z - The cursor used as a starting point for the search * @highest_zoneidx - The zone index of the highest zone to return * @nodes - An optional nodemask to filter the zonelist with |
19770b326
|
868 869 870 |
* * This function returns the next zone at or below a given zone index that is * within the allowed nodemask using a cursor as the starting point for the |
5bead2a06
|
871 872 873 |
* search. The zoneref returned is a cursor that represents the current zone * being examined. It should be advanced by one before calling * next_zones_zonelist again. |
19770b326
|
874 875 876 |
*/ struct zoneref *next_zones_zonelist(struct zoneref *z, enum zone_type highest_zoneidx, |
05891fb06
|
877 |
nodemask_t *nodes); |
dd1a239f6
|
878 |
|
19770b326
|
879 880 881 882 883 884 885 886 887 |
/** * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist * @zonelist - The zonelist to search for a suitable zone * @highest_zoneidx - The zone index of the highest zone to return * @nodes - An optional nodemask to filter the zonelist with * @zone - The first suitable zone found is returned via this parameter * * This function returns the first zone at or below a given zone index that is * within the allowed nodemask. The zoneref returned is a cursor that can be |
5bead2a06
|
888 889 |
* used to iterate the zonelist with next_zones_zonelist by advancing it by * one before calling. |
19770b326
|
890 |
*/ |
dd1a239f6
|
891 |
static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, |
19770b326
|
892 893 894 |
enum zone_type highest_zoneidx, nodemask_t *nodes, struct zone **zone) |
54a6eb5c4
|
895 |
{ |
05891fb06
|
896 897 898 899 |
struct zoneref *z = next_zones_zonelist(zonelist->_zonerefs, highest_zoneidx, nodes); *zone = zonelist_zone(z); return z; |
54a6eb5c4
|
900 |
} |
19770b326
|
901 902 903 904 905 906 907 908 909 910 911 912 913 914 |
/** * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask * @zone - The current zone in the iterator * @z - The current pointer within zonelist->zones being iterated * @zlist - The zonelist being iterated * @highidx - The zone index of the highest zone to return * @nodemask - Nodemask allowed by the allocator * * This iterator iterates though all zones at or below a given zone index and * within a given nodemask */ #define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \ for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone); \ zone; \ |
05891fb06
|
915 916 |
z = next_zones_zonelist(++z, highidx, nodemask), \ zone = zonelist_zone(z)) \ |
54a6eb5c4
|
917 918 919 920 921 922 923 924 925 926 927 |
/** * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index * @zone - The current zone in the iterator * @z - The current pointer within zonelist->zones being iterated * @zlist - The zonelist being iterated * @highidx - The zone index of the highest zone to return * * This iterator iterates though all zones at or below a given zone index. */ #define for_each_zone_zonelist(zone, z, zlist, highidx) \ |
19770b326
|
928 |
for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL) |
54a6eb5c4
|
929 |
|
d41dee369
|
930 931 932 |
#ifdef CONFIG_SPARSEMEM #include <asm/sparsemem.h> #endif |
c713216de
|
933 |
#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \ |
0ee332c14
|
934 |
!defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) |
b45445684
|
935 936 937 938 |
static inline unsigned long early_pfn_to_nid(unsigned long pfn) { return 0; } |
b159d43fb
|
939 |
#endif |
2bdaf115b
|
940 941 942 |
#ifdef CONFIG_FLATMEM #define pfn_to_nid(pfn) (0) #endif |
d41dee369
|
943 944 945 946 947 948 949 950 |
#ifdef CONFIG_SPARSEMEM /* * SECTION_SHIFT #bits space required to store a section # * * PA_SECTION_SHIFT physical address to/from section number * PFN_SECTION_SHIFT pfn to/from section number */ |
d41dee369
|
951 952 953 954 955 956 957 |
#define PA_SECTION_SHIFT (SECTION_SIZE_BITS) #define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT) #define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT) #define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT) #define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1)) |
835c134ec
|
958 |
#define SECTION_BLOCKFLAGS_BITS \ |
d9c234005
|
959 |
((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS) |
835c134ec
|
960 |
|
d41dee369
|
961 962 963 |
#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS #error Allocator MAX_ORDER exceeds SECTION_SIZE #endif |
e3c40f379
|
964 965 |
#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT) #define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT) |
a539f3533
|
966 967 |
#define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK) #define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK) |
d41dee369
|
968 |
struct page; |
eefa864b7
|
969 |
struct page_ext; |
d41dee369
|
970 |
struct mem_section { |
29751f699
|
971 972 973 974 975 |
/* * This is, logically, a pointer to an array of struct * pages. However, it is stored with some other magic. * (see sparse.c::sparse_init_one_section()) * |
30c253e6d
|
976 977 978 979 |
* Additionally during early boot we encode node id of * the location of the section here to guide allocation. * (see sparse.c::memory_present()) * |
29751f699
|
980 981 982 983 |
* Making it a UL at least makes someone do a cast * before using it wrong. */ unsigned long section_mem_map; |
5c0e30664
|
984 985 986 |
/* See declaration of similar field in struct zone */ unsigned long *pageblock_flags; |
eefa864b7
|
987 988 989 990 991 992 993 994 |
#ifdef CONFIG_PAGE_EXTENSION /* * If !SPARSEMEM, pgdat doesn't have page_ext pointer. We use * section. (see page_ext.h about this.) */ struct page_ext *page_ext; unsigned long pad; #endif |
55878e88c
|
995 996 997 998 |
/* * WARNING: mem_section must be a power-of-2 in size for the * calculation and use of SECTION_ROOT_MASK to make sense. */ |
d41dee369
|
999 |
}; |
3e347261a
|
1000 1001 1002 1003 1004 |
#ifdef CONFIG_SPARSEMEM_EXTREME #define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section)) #else #define SECTIONS_PER_ROOT 1 #endif |
802f192e4
|
1005 |
|
3e347261a
|
1006 |
#define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT) |
0faa56389
|
1007 |
#define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT) |
3e347261a
|
1008 |
#define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1) |
802f192e4
|
1009 |
|
3e347261a
|
1010 1011 |
#ifdef CONFIG_SPARSEMEM_EXTREME extern struct mem_section *mem_section[NR_SECTION_ROOTS]; |
802f192e4
|
1012 |
#else |
3e347261a
|
1013 1014 |
extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]; #endif |
d41dee369
|
1015 |
|
29751f699
|
1016 1017 |
static inline struct mem_section *__nr_to_section(unsigned long nr) { |
3e347261a
|
1018 1019 1020 |
if (!mem_section[SECTION_NR_TO_ROOT(nr)]) return NULL; return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK]; |
29751f699
|
1021 |
} |
4ca644d97
|
1022 |
extern int __section_nr(struct mem_section* ms); |
047532787
|
1023 |
extern unsigned long usemap_size(void); |
29751f699
|
1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 |
/* * We use the lower bits of the mem_map pointer to store * a little bit of information. There should be at least * 3 bits here due to 32-bit alignment. */ #define SECTION_MARKED_PRESENT (1UL<<0) #define SECTION_HAS_MEM_MAP (1UL<<1) #define SECTION_MAP_LAST_BIT (1UL<<2) #define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1)) |
30c253e6d
|
1034 |
#define SECTION_NID_SHIFT 2 |
29751f699
|
1035 1036 1037 1038 1039 1040 1041 |
static inline struct page *__section_mem_map_addr(struct mem_section *section) { unsigned long map = section->section_mem_map; map &= SECTION_MAP_MASK; return (struct page *)map; } |
540557b94
|
1042 |
static inline int present_section(struct mem_section *section) |
29751f699
|
1043 |
{ |
802f192e4
|
1044 |
return (section && (section->section_mem_map & SECTION_MARKED_PRESENT)); |
29751f699
|
1045 |
} |
540557b94
|
1046 1047 1048 1049 1050 1051 |
static inline int present_section_nr(unsigned long nr) { return present_section(__nr_to_section(nr)); } static inline int valid_section(struct mem_section *section) |
29751f699
|
1052 |
{ |
802f192e4
|
1053 |
return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP)); |
29751f699
|
1054 1055 1056 1057 1058 1059 |
} static inline int valid_section_nr(unsigned long nr) { return valid_section(__nr_to_section(nr)); } |
d41dee369
|
1060 1061 |
static inline struct mem_section *__pfn_to_section(unsigned long pfn) { |
29751f699
|
1062 |
return __nr_to_section(pfn_to_section_nr(pfn)); |
d41dee369
|
1063 |
} |
7b7bf499f
|
1064 |
#ifndef CONFIG_HAVE_ARCH_PFN_VALID |
d41dee369
|
1065 1066 1067 1068 |
static inline int pfn_valid(unsigned long pfn) { if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) return 0; |
29751f699
|
1069 |
return valid_section(__nr_to_section(pfn_to_section_nr(pfn))); |
d41dee369
|
1070 |
} |
7b7bf499f
|
1071 |
#endif |
d41dee369
|
1072 |
|
540557b94
|
1073 1074 1075 1076 1077 1078 |
static inline int pfn_present(unsigned long pfn) { if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) return 0; return present_section(__nr_to_section(pfn_to_section_nr(pfn))); } |
d41dee369
|
1079 1080 1081 1082 1083 1084 |
/* * These are _only_ used during initialisation, therefore they * can use __initdata ... They could have names to indicate * this restriction. */ #ifdef CONFIG_NUMA |
161599ff3
|
1085 1086 1087 1088 1089 |
#define pfn_to_nid(pfn) \ ({ \ unsigned long __pfn_to_nid_pfn = (pfn); \ page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \ }) |
2bdaf115b
|
1090 1091 |
#else #define pfn_to_nid(pfn) (0) |
d41dee369
|
1092 |
#endif |
d41dee369
|
1093 1094 1095 1096 |
#define early_pfn_valid(pfn) pfn_valid(pfn) void sparse_init(void); #else #define sparse_init() do {} while (0) |
28ae55c98
|
1097 |
#define sparse_index_init(_sec, _nid) do {} while (0) |
d41dee369
|
1098 |
#endif /* CONFIG_SPARSEMEM */ |
8a942fdea
|
1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 |
/* * During memory init memblocks map pfns to nids. The search is expensive and * this caches recent lookups. The implementation of __early_pfn_to_nid * may treat start/end as pfns or sections. */ struct mminit_pfnnid_cache { unsigned long last_start; unsigned long last_end; int last_nid; }; |
d41dee369
|
1109 1110 1111 1112 1113 1114 |
#ifndef early_pfn_valid #define early_pfn_valid(pfn) (1) #endif void memory_present(int nid, unsigned long start, unsigned long end); unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); |
14e072984
|
1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 |
/* * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we * need to check pfn validility within that MAX_ORDER_NR_PAGES block. * pfn_valid_within() should be used in this case; we optimise this away * when we have no holes within a MAX_ORDER_NR_PAGES block. */ #ifdef CONFIG_HOLES_IN_ZONE #define pfn_valid_within(pfn) pfn_valid(pfn) #else #define pfn_valid_within(pfn) (1) #endif |
eb33575cf
|
1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 |
#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL /* * pfn_valid() is meant to be able to tell if a given PFN has valid memmap * associated with it or not. In FLATMEM, it is expected that holes always * have valid memmap as long as there is valid PFNs either side of the hole. * In SPARSEMEM, it is assumed that a valid section has a memmap for the * entire section. * * However, an ARM, and maybe other embedded architectures in the future * free memmap backing holes to save memory on the assumption the memmap is * never used. The page_zone linkages are then broken even though pfn_valid() * returns true. A walker of the full memmap must then do this additional * check to ensure the memmap they are looking at is sane by making sure * the zone and PFN linkages are still valid. This is expensive, but walkers * of the full memmap are extremely rare. */ int memmap_valid_within(unsigned long pfn, struct page *page, struct zone *zone); #else static inline int memmap_valid_within(unsigned long pfn, struct page *page, struct zone *zone) { return 1; } #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ |
97965478a
|
1151 |
#endif /* !__GENERATING_BOUNDS.H */ |
1da177e4c
|
1152 |
#endif /* !__ASSEMBLY__ */ |
1da177e4c
|
1153 |
#endif /* _LINUX_MMZONE_H */ |