Blame view

include/linux/mmzone.h 36.5 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
  #ifndef _LINUX_MMZONE_H
  #define _LINUX_MMZONE_H
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3
  #ifndef __ASSEMBLY__
97965478a   Christoph Lameter   mm: Get rid of __...
4
  #ifndef __GENERATING_BOUNDS_H
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
5

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
6
7
8
  #include <linux/spinlock.h>
  #include <linux/list.h>
  #include <linux/wait.h>
e815af95f   David Rientjes   oom: change all_u...
9
  #include <linux/bitops.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
10
11
12
13
  #include <linux/cache.h>
  #include <linux/threads.h>
  #include <linux/numa.h>
  #include <linux/init.h>
bdc8cb984   Dave Hansen   [PATCH] memory ho...
14
  #include <linux/seqlock.h>
8357f8695   KAMEZAWA Hiroyuki   [PATCH] define fo...
15
  #include <linux/nodemask.h>
835c134ec   Mel Gorman   Add a bitmap that...
16
  #include <linux/pageblock-flags.h>
01fc0ac19   Sam Ravnborg   kbuild: move boun...
17
  #include <generated/bounds.h>
60063497a   Arun Sharma   atomic: use <linu...
18
  #include <linux/atomic.h>
93ff66bf1   Ralf Baechle   [PATCH] Sparsemem...
19
  #include <asm/page.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
20
21
22
23
24
25
26
  
  /* Free memory management - zoned buddy allocator.  */
  #ifndef CONFIG_FORCE_MAX_ZONEORDER
  #define MAX_ORDER 11
  #else
  #define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
  #endif
e984bb43f   Bob Picco   [PATCH] Align the...
27
  #define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
28

5ad333eb6   Andy Whitcroft   Lumpy Reclaim V4
29
30
31
32
33
34
35
  /*
   * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed
   * costly to service.  That is between allocation orders which should
   * coelesce naturally under reasonable reclaim pressure and those which
   * will not.
   */
  #define PAGE_ALLOC_COSTLY_ORDER 3
b2a0ac887   Mel Gorman   Split the free li...
36
  #define MIGRATE_UNMOVABLE     0
e12ba74d8   Mel Gorman   Group short-lived...
37
38
  #define MIGRATE_RECLAIMABLE   1
  #define MIGRATE_MOVABLE       2
5f8dcc212   Mel Gorman   page-allocator: s...
39
  #define MIGRATE_PCPTYPES      3 /* the number of types on the pcp lists */
64c5e135b   Mel Gorman   don't group high ...
40
  #define MIGRATE_RESERVE       3
a5d76b54a   KAMEZAWA Hiroyuki   memory unplug: pa...
41
42
  #define MIGRATE_ISOLATE       4 /* can't allocate from here */
  #define MIGRATE_TYPES         5
b2a0ac887   Mel Gorman   Split the free li...
43
44
45
46
  
  #define for_each_migratetype_order(order, type) \
  	for (order = 0; order < MAX_ORDER; order++) \
  		for (type = 0; type < MIGRATE_TYPES; type++)
467c996c1   Mel Gorman   Print out statist...
47
48
49
50
  extern int page_group_by_mobility_disabled;
  
  static inline int get_pageblock_migratetype(struct page *page)
  {
467c996c1   Mel Gorman   Print out statist...
51
52
  	return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
53
  struct free_area {
b2a0ac887   Mel Gorman   Split the free li...
54
  	struct list_head	free_list[MIGRATE_TYPES];
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
55
56
57
58
59
60
61
62
63
64
65
66
67
68
  	unsigned long		nr_free;
  };
  
  struct pglist_data;
  
  /*
   * zone->lock and zone->lru_lock are two of the hottest locks in the kernel.
   * So add a wild amount of padding here to ensure that they fall into separate
   * cachelines.  There are very few zone structures in the machine, so space
   * consumption is not a concern here.
   */
  #if defined(CONFIG_SMP)
  struct zone_padding {
  	char x[0];
22fc6eccb   Ravikiran G Thirumalai   [PATCH] Change ma...
69
  } ____cacheline_internodealigned_in_smp;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
70
71
72
73
  #define ZONE_PADDING(name)	struct zone_padding name;
  #else
  #define ZONE_PADDING(name)
  #endif
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
74
  enum zone_stat_item {
51ed44912   Christoph Lameter   [PATCH] Reorder Z...
75
  	/* First 128 byte cacheline (assuming 64 bit words) */
d23ad4232   Christoph Lameter   [PATCH] Use ZVC f...
76
  	NR_FREE_PAGES,
b69408e88   Christoph Lameter   vmscan: Use an in...
77
  	NR_LRU_BASE,
4f98a2fee   Rik van Riel   vmscan: split LRU...
78
79
80
81
  	NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */
  	NR_ACTIVE_ANON,		/*  "     "     "   "       "         */
  	NR_INACTIVE_FILE,	/*  "     "     "   "       "         */
  	NR_ACTIVE_FILE,		/*  "     "     "   "       "         */
894bc3104   Lee Schermerhorn   Unevictable LRU I...
82
  	NR_UNEVICTABLE,		/*  "     "     "   "       "         */
5344b7e64   Nick Piggin   vmstat: mlocked p...
83
  	NR_MLOCK,		/* mlock()ed pages found and moved off LRU */
f3dbd3446   Christoph Lameter   [PATCH] zoned vm ...
84
85
  	NR_ANON_PAGES,	/* Mapped anonymous pages */
  	NR_FILE_MAPPED,	/* pagecache pages mapped into pagetables.
65ba55f50   Christoph Lameter   [PATCH] zoned vm ...
86
  			   only modified from process context */
347ce434d   Christoph Lameter   [PATCH] zoned vm ...
87
  	NR_FILE_PAGES,
b1e7a8fd8   Christoph Lameter   [PATCH] zoned vm ...
88
  	NR_FILE_DIRTY,
ce866b34a   Christoph Lameter   [PATCH] zoned vm ...
89
  	NR_WRITEBACK,
51ed44912   Christoph Lameter   [PATCH] Reorder Z...
90
91
92
  	NR_SLAB_RECLAIMABLE,
  	NR_SLAB_UNRECLAIMABLE,
  	NR_PAGETABLE,		/* used for pagetables */
c6a7f5728   KOSAKI Motohiro   mm: oom analysis:...
93
94
  	NR_KERNEL_STACK,
  	/* Second 128 byte cacheline */
fd39fc856   Christoph Lameter   [PATCH] zoned vm ...
95
  	NR_UNSTABLE_NFS,	/* NFS unstable pages */
d2c5e30c9   Christoph Lameter   [PATCH] zoned vm ...
96
  	NR_BOUNCE,
e129b5c23   Andrew Morton   [PATCH] vm: add p...
97
  	NR_VMSCAN_WRITE,
49ea7eb65   Mel Gorman   mm: vmscan: immed...
98
  	NR_VMSCAN_IMMEDIATE,	/* Prioritise for reclaim when writeback ends */
fc3ba692a   Miklos Szeredi   mm: Add NR_WRITEB...
99
  	NR_WRITEBACK_TEMP,	/* Writeback using temporary buffers */
a731286de   KOSAKI Motohiro   mm: vmstat: add i...
100
101
  	NR_ISOLATED_ANON,	/* Temporary isolated pages from anon lru */
  	NR_ISOLATED_FILE,	/* Temporary isolated pages from file lru */
4b02108ac   KOSAKI Motohiro   mm: oom analysis:...
102
  	NR_SHMEM,		/* shmem pages (included tmpfs/GEM pages) */
ea941f0e2   Michael Rubin   writeback: add nr...
103
104
  	NR_DIRTIED,		/* page dirtyings since bootup */
  	NR_WRITTEN,		/* page writings since bootup */
ca889e6c4   Christoph Lameter   [PATCH] Use Zoned...
105
106
107
108
109
110
111
112
  #ifdef CONFIG_NUMA
  	NUMA_HIT,		/* allocated in intended node */
  	NUMA_MISS,		/* allocated in non intended node */
  	NUMA_FOREIGN,		/* was intended here, hit elsewhere */
  	NUMA_INTERLEAVE_HIT,	/* interleaver preferred this zone */
  	NUMA_LOCAL,		/* allocation from local node */
  	NUMA_OTHER,		/* allocation from other node */
  #endif
79134171d   Andrea Arcangeli   thp: transparent ...
113
  	NR_ANON_TRANSPARENT_HUGEPAGES,
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
114
  	NR_VM_ZONE_STAT_ITEMS };
4f98a2fee   Rik van Riel   vmscan: split LRU...
115
116
117
118
119
120
121
122
123
124
125
126
  /*
   * We do arithmetic on the LRU lists in various places in the code,
   * so it is important to keep the active lists LRU_ACTIVE higher in
   * the array than the corresponding inactive lists, and to keep
   * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists.
   *
   * This has to be kept in sync with the statistics in zone_stat_item
   * above and the descriptions in vmstat_text in mm/vmstat.c
   */
  #define LRU_BASE 0
  #define LRU_ACTIVE 1
  #define LRU_FILE 2
b69408e88   Christoph Lameter   vmscan: Use an in...
127
  enum lru_list {
4f98a2fee   Rik van Riel   vmscan: split LRU...
128
129
130
131
  	LRU_INACTIVE_ANON = LRU_BASE,
  	LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
  	LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
  	LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
894bc3104   Lee Schermerhorn   Unevictable LRU I...
132
  	LRU_UNEVICTABLE,
894bc3104   Lee Schermerhorn   Unevictable LRU I...
133
134
  	NR_LRU_LISTS
  };
b69408e88   Christoph Lameter   vmscan: Use an in...
135

4111304da   Hugh Dickins   mm: enum lru_list...
136
  #define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++)
b69408e88   Christoph Lameter   vmscan: Use an in...
137

4111304da   Hugh Dickins   mm: enum lru_list...
138
  #define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
894bc3104   Lee Schermerhorn   Unevictable LRU I...
139

4111304da   Hugh Dickins   mm: enum lru_list...
140
  static inline int is_file_lru(enum lru_list lru)
4f98a2fee   Rik van Riel   vmscan: split LRU...
141
  {
4111304da   Hugh Dickins   mm: enum lru_list...
142
  	return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE);
4f98a2fee   Rik van Riel   vmscan: split LRU...
143
  }
4111304da   Hugh Dickins   mm: enum lru_list...
144
  static inline int is_active_lru(enum lru_list lru)
b69408e88   Christoph Lameter   vmscan: Use an in...
145
  {
4111304da   Hugh Dickins   mm: enum lru_list...
146
  	return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
b69408e88   Christoph Lameter   vmscan: Use an in...
147
  }
4111304da   Hugh Dickins   mm: enum lru_list...
148
  static inline int is_unevictable_lru(enum lru_list lru)
894bc3104   Lee Schermerhorn   Unevictable LRU I...
149
  {
4111304da   Hugh Dickins   mm: enum lru_list...
150
  	return (lru == LRU_UNEVICTABLE);
894bc3104   Lee Schermerhorn   Unevictable LRU I...
151
  }
6290df545   Johannes Weiner   mm: collect LRU l...
152
153
154
  struct lruvec {
  	struct list_head lists[NR_LRU_LISTS];
  };
bb2a0de92   KAMEZAWA Hiroyuki   memcg: consolidat...
155
156
157
158
159
  /* Mask used at gathering information at once (see memcontrol.c) */
  #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
  #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
  #define LRU_ALL_EVICTABLE (LRU_ALL_FILE | LRU_ALL_ANON)
  #define LRU_ALL	     ((1 << NR_LRU_LISTS) - 1)
4356f21d0   Minchan Kim   mm: change isolat...
160
161
162
163
  /* Isolate inactive pages */
  #define ISOLATE_INACTIVE	((__force isolate_mode_t)0x1)
  /* Isolate active pages */
  #define ISOLATE_ACTIVE		((__force isolate_mode_t)0x2)
39deaf858   Minchan Kim   mm: compaction: m...
164
165
  /* Isolate clean file */
  #define ISOLATE_CLEAN		((__force isolate_mode_t)0x4)
f80c06736   Minchan Kim   mm: zone_reclaim:...
166
167
  /* Isolate unmapped file */
  #define ISOLATE_UNMAPPED	((__force isolate_mode_t)0x8)
c82449352   Mel Gorman   mm: compaction: m...
168
169
  /* Isolate for asynchronous migration */
  #define ISOLATE_ASYNC_MIGRATE	((__force isolate_mode_t)0x10)
4356f21d0   Minchan Kim   mm: change isolat...
170
171
172
  
  /* LRU Isolation modes. */
  typedef unsigned __bitwise__ isolate_mode_t;
418589663   Mel Gorman   page allocator: u...
173
174
175
176
177
178
179
180
181
182
  enum zone_watermarks {
  	WMARK_MIN,
  	WMARK_LOW,
  	WMARK_HIGH,
  	NR_WMARK
  };
  
  #define min_wmark_pages(z) (z->watermark[WMARK_MIN])
  #define low_wmark_pages(z) (z->watermark[WMARK_LOW])
  #define high_wmark_pages(z) (z->watermark[WMARK_HIGH])
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
183
184
  struct per_cpu_pages {
  	int count;		/* number of pages in the list */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
185
186
  	int high;		/* high watermark, emptying needed */
  	int batch;		/* chunk size for buddy add/remove */
5f8dcc212   Mel Gorman   page-allocator: s...
187
188
189
  
  	/* Lists of pages, one per migrate type stored on the pcp-lists */
  	struct list_head lists[MIGRATE_PCPTYPES];
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
190
191
192
  };
  
  struct per_cpu_pageset {
3dfa5721f   Christoph Lameter   Page allocator: g...
193
  	struct per_cpu_pages pcp;
4037d4522   Christoph Lameter   Move remote node ...
194
195
196
  #ifdef CONFIG_NUMA
  	s8 expire;
  #endif
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
197
  #ifdef CONFIG_SMP
df9ecaba3   Christoph Lameter   [PATCH] ZVC: Scal...
198
  	s8 stat_threshold;
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
199
200
  	s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
  #endif
99dcc3e5a   Christoph Lameter   this_cpu: Page al...
201
  };
e7c8d5c99   Christoph Lameter   [PATCH] node loca...
202

97965478a   Christoph Lameter   mm: Get rid of __...
203
  #endif /* !__GENERATING_BOUNDS.H */
2f1b62486   Christoph Lameter   [PATCH] reduce MA...
204
  enum zone_type {
4b51d6698   Christoph Lameter   [PATCH] optional ...
205
  #ifdef CONFIG_ZONE_DMA
2f1b62486   Christoph Lameter   [PATCH] reduce MA...
206
207
208
209
210
211
212
213
214
215
216
217
  	/*
  	 * ZONE_DMA is used when there are devices that are not able
  	 * to do DMA to all of addressable memory (ZONE_NORMAL). Then we
  	 * carve out the portion of memory that is needed for these devices.
  	 * The range is arch specific.
  	 *
  	 * Some examples
  	 *
  	 * Architecture		Limit
  	 * ---------------------------
  	 * parisc, ia64, sparc	<4G
  	 * s390			<2G
2f1b62486   Christoph Lameter   [PATCH] reduce MA...
218
219
220
221
222
223
224
  	 * arm			Various
  	 * alpha		Unlimited or 0-16MB.
  	 *
  	 * i386, x86_64 and multiple other arches
  	 * 			<16M.
  	 */
  	ZONE_DMA,
4b51d6698   Christoph Lameter   [PATCH] optional ...
225
  #endif
fb0e7942b   Christoph Lameter   [PATCH] reduce MA...
226
  #ifdef CONFIG_ZONE_DMA32
2f1b62486   Christoph Lameter   [PATCH] reduce MA...
227
228
229
230
231
232
  	/*
  	 * x86_64 needs two ZONE_DMAs because it supports devices that are
  	 * only able to do DMA to the lower 16M but also 32 bit devices that
  	 * can only do DMA areas below 4G.
  	 */
  	ZONE_DMA32,
fb0e7942b   Christoph Lameter   [PATCH] reduce MA...
233
  #endif
2f1b62486   Christoph Lameter   [PATCH] reduce MA...
234
235
236
237
238
239
  	/*
  	 * Normal addressable memory is in ZONE_NORMAL. DMA operations can be
  	 * performed on pages in ZONE_NORMAL if the DMA devices support
  	 * transfers to all addressable memory.
  	 */
  	ZONE_NORMAL,
e53ef38d0   Christoph Lameter   [PATCH] reduce MA...
240
  #ifdef CONFIG_HIGHMEM
2f1b62486   Christoph Lameter   [PATCH] reduce MA...
241
242
243
244
245
246
247
248
249
  	/*
  	 * A memory area that is only addressable by the kernel through
  	 * mapping portions into its own address space. This is for example
  	 * used by i386 to allow the kernel to address the memory beyond
  	 * 900MB. The kernel will set up special mappings (page
  	 * table entries on i386) for each page that the kernel needs to
  	 * access.
  	 */
  	ZONE_HIGHMEM,
e53ef38d0   Christoph Lameter   [PATCH] reduce MA...
250
  #endif
2a1e274ac   Mel Gorman   Create the ZONE_M...
251
  	ZONE_MOVABLE,
97965478a   Christoph Lameter   mm: Get rid of __...
252
  	__MAX_NR_ZONES
2f1b62486   Christoph Lameter   [PATCH] reduce MA...
253
  };
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
254

97965478a   Christoph Lameter   mm: Get rid of __...
255
  #ifndef __GENERATING_BOUNDS_H
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
256
257
258
259
260
  /*
   * When a memory allocation must conform to specific limitations (such
   * as being suitable for DMA) the caller will pass in hints to the
   * allocator in the gfp_mask, in the zone modifier bits.  These bits
   * are used to select a priority ordered list of memory zones which
19655d348   Christoph Lameter   [PATCH] linearly ...
261
   * match the requested limits. See gfp_zone() in include/linux/gfp.h
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
262
   */
fb0e7942b   Christoph Lameter   [PATCH] reduce MA...
263

97965478a   Christoph Lameter   mm: Get rid of __...
264
  #if MAX_NR_ZONES < 2
4b51d6698   Christoph Lameter   [PATCH] optional ...
265
  #define ZONES_SHIFT 0
97965478a   Christoph Lameter   mm: Get rid of __...
266
  #elif MAX_NR_ZONES <= 2
19655d348   Christoph Lameter   [PATCH] linearly ...
267
  #define ZONES_SHIFT 1
97965478a   Christoph Lameter   mm: Get rid of __...
268
  #elif MAX_NR_ZONES <= 4
19655d348   Christoph Lameter   [PATCH] linearly ...
269
  #define ZONES_SHIFT 2
4b51d6698   Christoph Lameter   [PATCH] optional ...
270
271
  #else
  #error ZONES_SHIFT -- too many zones configured adjust calculation
fb0e7942b   Christoph Lameter   [PATCH] reduce MA...
272
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
273

6e9015716   KOSAKI Motohiro   mm: introduce zon...
274
275
276
277
278
279
280
281
282
283
284
285
  struct zone_reclaim_stat {
  	/*
  	 * The pageout code in vmscan.c keeps track of how many of the
  	 * mem/swap backed and file backed pages are refeferenced.
  	 * The higher the rotated/scanned ratio, the more valuable
  	 * that cache is.
  	 *
  	 * The anon LRU stats live in [0], file LRU stats in [1]
  	 */
  	unsigned long		recent_rotated[2];
  	unsigned long		recent_scanned[2];
  };
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
286
287
  struct zone {
  	/* Fields commonly accessed by the page allocator */
418589663   Mel Gorman   page allocator: u...
288
289
290
  
  	/* zone watermarks, access with *_wmark_pages(zone) macros */
  	unsigned long watermark[NR_WMARK];
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
291
  	/*
aa4548403   Christoph Lameter   mm: page allocato...
292
293
294
295
296
297
298
  	 * When free pages are below this point, additional steps are taken
  	 * when reading the number of free pages to avoid per-cpu counter
  	 * drift allowing watermarks to be breached
  	 */
  	unsigned long percpu_drift_mark;
  
  	/*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
299
300
301
302
303
304
305
306
  	 * We don't know if the memory that we're going to allocate will be freeable
  	 * or/and it will be released eventually, so to avoid totally wasting several
  	 * GB of ram we must reserve some of the lower zone memory (otherwise we risk
  	 * to run OOM on the lower zones despite there's tons of freeable ram
  	 * on the higher zones). This array is recalculated at runtime if the
  	 * sysctl_lowmem_reserve_ratio sysctl changes.
  	 */
  	unsigned long		lowmem_reserve[MAX_NR_ZONES];
ab8fabd46   Johannes Weiner   mm: exclude reser...
307
308
309
310
311
  	/*
  	 * This is a per-zone reserve of pages that should not be
  	 * considered dirtyable memory.
  	 */
  	unsigned long		dirty_balance_reserve;
e7c8d5c99   Christoph Lameter   [PATCH] node loca...
312
  #ifdef CONFIG_NUMA
d5f541ed6   Christoph Lameter   [PATCH] Add node ...
313
  	int node;
9614634fe   Christoph Lameter   [PATCH] ZVC/zone_...
314
315
316
  	/*
  	 * zone reclaim becomes active if more unmapped pages exist.
  	 */
8417bba4b   Christoph Lameter   [PATCH] Replace m...
317
  	unsigned long		min_unmapped_pages;
0ff38490c   Christoph Lameter   [PATCH] zone_recl...
318
  	unsigned long		min_slab_pages;
e7c8d5c99   Christoph Lameter   [PATCH] node loca...
319
  #endif
43cf38eb5   Tejun Heo   percpu: add __per...
320
  	struct per_cpu_pageset __percpu *pageset;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
321
322
323
324
  	/*
  	 * free areas of different sizes
  	 */
  	spinlock_t		lock;
93e4a89a8   KOSAKI Motohiro   mm: restore zone-...
325
  	int                     all_unreclaimable; /* All pages pinned */
bdc8cb984   Dave Hansen   [PATCH] memory ho...
326
327
328
329
  #ifdef CONFIG_MEMORY_HOTPLUG
  	/* see spanned/present_pages for more description */
  	seqlock_t		span_seqlock;
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
330
  	struct free_area	free_area[MAX_ORDER];
835c134ec   Mel Gorman   Add a bitmap that...
331
332
  #ifndef CONFIG_SPARSEMEM
  	/*
d9c234005   Mel Gorman   Do not depend on ...
333
  	 * Flags for a pageblock_nr_pages block. See pageblock-flags.h.
835c134ec   Mel Gorman   Add a bitmap that...
334
335
336
337
  	 * In SPARSEMEM, this map is stored in struct mem_section
  	 */
  	unsigned long		*pageblock_flags;
  #endif /* CONFIG_SPARSEMEM */
4f92e2586   Mel Gorman   mm: compaction: d...
338
339
340
341
342
343
344
345
346
  #ifdef CONFIG_COMPACTION
  	/*
  	 * On compaction failure, 1<<compact_defer_shift compactions
  	 * are skipped before trying again. The number attempted since
  	 * last failure is tracked with compact_considered.
  	 */
  	unsigned int		compact_considered;
  	unsigned int		compact_defer_shift;
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
347
348
349
350
  
  	ZONE_PADDING(_pad1_)
  
  	/* Fields commonly accessed by the page reclaim scanner */
6290df545   Johannes Weiner   mm: collect LRU l...
351
352
  	spinlock_t		lru_lock;
  	struct lruvec		lruvec;
4f98a2fee   Rik van Riel   vmscan: split LRU...
353

6e9015716   KOSAKI Motohiro   mm: introduce zon...
354
  	struct zone_reclaim_stat reclaim_stat;
4f98a2fee   Rik van Riel   vmscan: split LRU...
355

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
356
  	unsigned long		pages_scanned;	   /* since last reclaim */
e815af95f   David Rientjes   oom: change all_u...
357
  	unsigned long		flags;		   /* zone flags, see below */
753ee7289   Martin Hicks   [PATCH] VM: early...
358

2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
359
360
  	/* Zone statistics */
  	atomic_long_t		vm_stat[NR_VM_ZONE_STAT_ITEMS];
9eeff2395   Christoph Lameter   [PATCH] Zone recl...
361
362
  
  	/*
556adecba   Rik van Riel   vmscan: second ch...
363
364
365
366
  	 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
  	 * this zone's LRU.  Maintained by the pageout code.
  	 */
  	unsigned int inactive_ratio;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
367
368
369
370
371
372
  
  	ZONE_PADDING(_pad2_)
  	/* Rarely used or read-mostly fields */
  
  	/*
  	 * wait_table		-- the array holding the hash table
02b694dea   Yasunori Goto   [PATCH] wait_tabl...
373
  	 * wait_table_hash_nr_entries	-- the size of the hash table array
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
  	 * wait_table_bits	-- wait_table_size == (1 << wait_table_bits)
  	 *
  	 * The purpose of all these is to keep track of the people
  	 * waiting for a page to become available and make them
  	 * runnable again when possible. The trouble is that this
  	 * consumes a lot of space, especially when so few things
  	 * wait on pages at a given time. So instead of using
  	 * per-page waitqueues, we use a waitqueue hash table.
  	 *
  	 * The bucket discipline is to sleep on the same queue when
  	 * colliding and wake all in that wait queue when removing.
  	 * When something wakes, it must check to be sure its page is
  	 * truly available, a la thundering herd. The cost of a
  	 * collision is great, but given the expected load of the
  	 * table, they should be so rare as to be outweighed by the
  	 * benefits from the saved space.
  	 *
  	 * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the
  	 * primary users of these fields, and in mm/page_alloc.c
  	 * free_area_init_core() performs the initialization of them.
  	 */
  	wait_queue_head_t	* wait_table;
02b694dea   Yasunori Goto   [PATCH] wait_tabl...
396
  	unsigned long		wait_table_hash_nr_entries;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
397
398
399
400
401
402
  	unsigned long		wait_table_bits;
  
  	/*
  	 * Discontig memory support fields.
  	 */
  	struct pglist_data	*zone_pgdat;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
403
404
  	/* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
  	unsigned long		zone_start_pfn;
bdc8cb984   Dave Hansen   [PATCH] memory ho...
405
406
407
408
409
410
411
412
413
414
  	/*
  	 * zone_start_pfn, spanned_pages and present_pages are all
  	 * protected by span_seqlock.  It is a seqlock because it has
  	 * to be read outside of zone->lock, and it is done in the main
  	 * allocator path.  But, it is written quite infrequently.
  	 *
  	 * The lock is declared along with zone->lock because it is
  	 * frequently read in proximity to zone->lock.  It's good to
  	 * give them a chance of being in the same cacheline.
  	 */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
415
416
417
418
419
420
  	unsigned long		spanned_pages;	/* total size, including holes */
  	unsigned long		present_pages;	/* amount of memory (excluding holes) */
  
  	/*
  	 * rarely used fields:
  	 */
15ad7cdcf   Helge Deller   [PATCH] struct se...
421
  	const char		*name;
22fc6eccb   Ravikiran G Thirumalai   [PATCH] Change ma...
422
  } ____cacheline_internodealigned_in_smp;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
423

e815af95f   David Rientjes   oom: change all_u...
424
  typedef enum {
e815af95f   David Rientjes   oom: change all_u...
425
  	ZONE_RECLAIM_LOCKED,		/* prevents concurrent reclaim */
098d7f128   David Rientjes   oom: add per-zone...
426
  	ZONE_OOM_LOCKED,		/* zone is in OOM killer zonelist */
0e093d997   Mel Gorman   writeback: do not...
427
428
429
  	ZONE_CONGESTED,			/* zone has many dirty pages backed by
  					 * a congested BDI
  					 */
e815af95f   David Rientjes   oom: change all_u...
430
431
432
433
434
435
  } zone_flags_t;
  
  static inline void zone_set_flag(struct zone *zone, zone_flags_t flag)
  {
  	set_bit(flag, &zone->flags);
  }
d773ed6b8   David Rientjes   mm: test and set ...
436
437
438
439
440
  
  static inline int zone_test_and_set_flag(struct zone *zone, zone_flags_t flag)
  {
  	return test_and_set_bit(flag, &zone->flags);
  }
e815af95f   David Rientjes   oom: change all_u...
441
442
443
444
  static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag)
  {
  	clear_bit(flag, &zone->flags);
  }
0e093d997   Mel Gorman   writeback: do not...
445
446
447
448
  static inline int zone_is_reclaim_congested(const struct zone *zone)
  {
  	return test_bit(ZONE_CONGESTED, &zone->flags);
  }
e815af95f   David Rientjes   oom: change all_u...
449
450
451
452
  static inline int zone_is_reclaim_locked(const struct zone *zone)
  {
  	return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
  }
d773ed6b8   David Rientjes   mm: test and set ...
453

098d7f128   David Rientjes   oom: add per-zone...
454
455
456
457
  static inline int zone_is_oom_locked(const struct zone *zone)
  {
  	return test_bit(ZONE_OOM_LOCKED, &zone->flags);
  }
e815af95f   David Rientjes   oom: change all_u...
458

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
459
460
461
462
463
464
  /*
   * The "priority" of VM scanning is how much of the queues we will scan in one
   * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
   * queues ("queue_length >> 12") during an aging round.
   */
  #define DEF_PRIORITY 12
9276b1bc9   Paul Jackson   [PATCH] memory pa...
465
466
467
468
  /* Maximum number of zones on a zonelist */
  #define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
  
  #ifdef CONFIG_NUMA
523b94585   Christoph Lameter   Memoryless nodes:...
469
470
  
  /*
25a64ec1e   Pete Zaitcev   fix comment spell...
471
   * The NUMA zonelists are doubled because we need zonelists that restrict the
523b94585   Christoph Lameter   Memoryless nodes:...
472
473
   * allocations to a single node for GFP_THISNODE.
   *
54a6eb5c4   Mel Gorman   mm: use two zonel...
474
475
   * [0]	: Zonelist with fallback
   * [1]	: No fallback (GFP_THISNODE)
523b94585   Christoph Lameter   Memoryless nodes:...
476
   */
54a6eb5c4   Mel Gorman   mm: use two zonel...
477
  #define MAX_ZONELISTS 2
523b94585   Christoph Lameter   Memoryless nodes:...
478

9276b1bc9   Paul Jackson   [PATCH] memory pa...
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
  /*
   * We cache key information from each zonelist for smaller cache
   * footprint when scanning for free pages in get_page_from_freelist().
   *
   * 1) The BITMAP fullzones tracks which zones in a zonelist have come
   *    up short of free memory since the last time (last_fullzone_zap)
   *    we zero'd fullzones.
   * 2) The array z_to_n[] maps each zone in the zonelist to its node
   *    id, so that we can efficiently evaluate whether that node is
   *    set in the current tasks mems_allowed.
   *
   * Both fullzones and z_to_n[] are one-to-one with the zonelist,
   * indexed by a zones offset in the zonelist zones[] array.
   *
   * The get_page_from_freelist() routine does two scans.  During the
   * first scan, we skip zones whose corresponding bit in 'fullzones'
   * is set or whose corresponding node in current->mems_allowed (which
   * comes from cpusets) is not set.  During the second scan, we bypass
   * this zonelist_cache, to ensure we look methodically at each zone.
   *
   * Once per second, we zero out (zap) fullzones, forcing us to
   * reconsider nodes that might have regained more free memory.
   * The field last_full_zap is the time we last zapped fullzones.
   *
   * This mechanism reduces the amount of time we waste repeatedly
   * reexaming zones for free memory when they just came up low on
   * memory momentarilly ago.
   *
   * The zonelist_cache struct members logically belong in struct
   * zonelist.  However, the mempolicy zonelists constructed for
   * MPOL_BIND are intentionally variable length (and usually much
   * shorter).  A general purpose mechanism for handling structs with
   * multiple variable length members is more mechanism than we want
   * here.  We resort to some special case hackery instead.
   *
   * The MPOL_BIND zonelists don't need this zonelist_cache (in good
   * part because they are shorter), so we put the fixed length stuff
   * at the front of the zonelist struct, ending in a variable length
   * zones[], as is needed by MPOL_BIND.
   *
   * Then we put the optional zonelist cache on the end of the zonelist
   * struct.  This optional stuff is found by a 'zlcache_ptr' pointer in
   * the fixed length portion at the front of the struct.  This pointer
   * both enables us to find the zonelist cache, and in the case of
   * MPOL_BIND zonelists, (which will just set the zlcache_ptr to NULL)
   * to know that the zonelist cache is not there.
   *
   * The end result is that struct zonelists come in two flavors:
   *  1) The full, fixed length version, shown below, and
   *  2) The custom zonelists for MPOL_BIND.
   * The custom MPOL_BIND zonelists have a NULL zlcache_ptr and no zlcache.
   *
   * Even though there may be multiple CPU cores on a node modifying
   * fullzones or last_full_zap in the same zonelist_cache at the same
   * time, we don't lock it.  This is just hint data - if it is wrong now
   * and then, the allocator will still function, perhaps a bit slower.
   */
  
  
  struct zonelist_cache {
9276b1bc9   Paul Jackson   [PATCH] memory pa...
539
  	unsigned short z_to_n[MAX_ZONES_PER_ZONELIST];		/* zone->nid */
7253f4ef0   Paul Jackson   [PATCH] memory pa...
540
  	DECLARE_BITMAP(fullzones, MAX_ZONES_PER_ZONELIST);	/* zone full? */
9276b1bc9   Paul Jackson   [PATCH] memory pa...
541
542
543
  	unsigned long last_full_zap;		/* when last zap'd (jiffies) */
  };
  #else
54a6eb5c4   Mel Gorman   mm: use two zonel...
544
  #define MAX_ZONELISTS 1
9276b1bc9   Paul Jackson   [PATCH] memory pa...
545
546
  struct zonelist_cache;
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
547
  /*
dd1a239f6   Mel Gorman   mm: have zonelist...
548
549
550
551
552
553
554
555
556
   * This struct contains information about a zone in a zonelist. It is stored
   * here to avoid dereferences into large structures and lookups of tables
   */
  struct zoneref {
  	struct zone *zone;	/* Pointer to actual zone */
  	int zone_idx;		/* zone_idx(zoneref->zone) */
  };
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
557
558
559
560
561
   * One allocation request operates on a zonelist. A zonelist
   * is a list of zones, the first one is the 'goal' of the
   * allocation, the other zones are fallback zones, in decreasing
   * priority.
   *
9276b1bc9   Paul Jackson   [PATCH] memory pa...
562
563
   * If zlcache_ptr is not NULL, then it is just the address of zlcache,
   * as explained above.  If zlcache_ptr is NULL, there is no zlcache.
dd1a239f6   Mel Gorman   mm: have zonelist...
564
565
566
567
568
569
570
571
   * *
   * To speed the reading of the zonelist, the zonerefs contain the zone index
   * of the entry being read. Helper functions to access information given
   * a struct zoneref are
   *
   * zonelist_zone()	- Return the struct zone * for an entry in _zonerefs
   * zonelist_zone_idx()	- Return the index of the zone for an entry
   * zonelist_node_idx()	- Return the index of the node for an entry
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
572
573
   */
  struct zonelist {
9276b1bc9   Paul Jackson   [PATCH] memory pa...
574
  	struct zonelist_cache *zlcache_ptr;		     // NULL or &zlcache
dd1a239f6   Mel Gorman   mm: have zonelist...
575
  	struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
9276b1bc9   Paul Jackson   [PATCH] memory pa...
576
577
578
  #ifdef CONFIG_NUMA
  	struct zonelist_cache zlcache;			     // optional ...
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
579
  };
0ee332c14   Tejun Heo   memblock: Kill ea...
580
  #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
c713216de   Mel Gorman   [PATCH] Introduce...
581
582
583
584
585
  struct node_active_region {
  	unsigned long start_pfn;
  	unsigned long end_pfn;
  	int nid;
  };
0ee332c14   Tejun Heo   memblock: Kill ea...
586
  #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
587

5b99cd0ef   Heiko Carstens   [PATCH] own heade...
588
589
590
591
  #ifndef CONFIG_DISCONTIGMEM
  /* The array of struct pages - for discontigmem use pgdat->lmem_map */
  extern struct page *mem_map;
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
592
593
594
595
596
597
598
599
600
601
602
603
604
605
  /*
   * The pg_data_t structure is used in machines with CONFIG_DISCONTIGMEM
   * (mostly NUMA machines?) to denote a higher-level memory zone than the
   * zone denotes.
   *
   * On NUMA machines, each NUMA node would have a pg_data_t to describe
   * it's memory layout.
   *
   * Memory statistics and page replacement data structures are maintained on a
   * per-zone basis.
   */
  struct bootmem_data;
  typedef struct pglist_data {
  	struct zone node_zones[MAX_NR_ZONES];
523b94585   Christoph Lameter   Memoryless nodes:...
606
  	struct zonelist node_zonelists[MAX_ZONELISTS];
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
607
  	int nr_zones;
52d4b9ac0   KAMEZAWA Hiroyuki   memcg: allocate a...
608
  #ifdef CONFIG_FLAT_NODE_MEM_MAP	/* means !SPARSEMEM */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
609
  	struct page *node_mem_map;
52d4b9ac0   KAMEZAWA Hiroyuki   memcg: allocate a...
610
611
612
  #ifdef CONFIG_CGROUP_MEM_RES_CTLR
  	struct page_cgroup *node_page_cgroup;
  #endif
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
613
  #endif
08677214e   Yinghai Lu   x86: Make 64 bit ...
614
  #ifndef CONFIG_NO_BOOTMEM
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
615
  	struct bootmem_data *bdata;
08677214e   Yinghai Lu   x86: Make 64 bit ...
616
  #endif
208d54e55   Dave Hansen   [PATCH] memory ho...
617
618
619
620
621
622
623
624
625
626
  #ifdef CONFIG_MEMORY_HOTPLUG
  	/*
  	 * Must be held any time you expect node_start_pfn, node_present_pages
  	 * or node_spanned_pages stay constant.  Holding this will also
  	 * guarantee that any pfn_valid() stays that way.
  	 *
  	 * Nests above zone->lock and zone->size_seqlock.
  	 */
  	spinlock_t node_size_lock;
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
627
628
629
630
631
  	unsigned long node_start_pfn;
  	unsigned long node_present_pages; /* total number of physical pages */
  	unsigned long node_spanned_pages; /* total size of physical page
  					     range, including holes */
  	int node_id;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
632
633
634
  	wait_queue_head_t kswapd_wait;
  	struct task_struct *kswapd;
  	int kswapd_max_order;
995047488   Mel Gorman   mm: kswapd: stop ...
635
  	enum zone_type classzone_idx;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
636
637
638
639
  } pg_data_t;
  
  #define node_present_pages(nid)	(NODE_DATA(nid)->node_present_pages)
  #define node_spanned_pages(nid)	(NODE_DATA(nid)->node_spanned_pages)
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
640
  #ifdef CONFIG_FLAT_NODE_MEM_MAP
408fde81c   Dave Hansen   [PATCH] remove no...
641
  #define pgdat_page_nr(pgdat, pagenr)	((pgdat)->node_mem_map + (pagenr))
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
642
643
644
  #else
  #define pgdat_page_nr(pgdat, pagenr)	pfn_to_page((pgdat)->node_start_pfn + (pagenr))
  #endif
408fde81c   Dave Hansen   [PATCH] remove no...
645
  #define nid_page_nr(nid, pagenr) 	pgdat_page_nr(NODE_DATA(nid),(pagenr))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
646

c6830c226   KAMEZAWA Hiroyuki   Fix node_start/en...
647
648
649
650
651
652
  #define node_start_pfn(nid)	(NODE_DATA(nid)->node_start_pfn)
  
  #define node_end_pfn(nid) ({\
  	pg_data_t *__pgdat = NODE_DATA(nid);\
  	__pgdat->node_start_pfn + __pgdat->node_spanned_pages;\
  })
208d54e55   Dave Hansen   [PATCH] memory ho...
653
  #include <linux/memory_hotplug.h>
4eaf3f643   Haicheng Li   mem-hotplug: fix ...
654
  extern struct mutex zonelists_mutex;
1f522509c   Haicheng Li   mem-hotplug: avoi...
655
  void build_all_zonelists(void *data);
995047488   Mel Gorman   mm: kswapd: stop ...
656
  void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx);
88f5acf88   Mel Gorman   mm: page allocato...
657
658
659
  bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
  		int classzone_idx, int alloc_flags);
  bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
7fb1d9fca   Rohit Seth   [PATCH] mm: __all...
660
  		int classzone_idx, int alloc_flags);
a2f3aa025   Dave Hansen   [PATCH] Fix spars...
661
662
663
664
  enum memmap_context {
  	MEMMAP_EARLY,
  	MEMMAP_HOTPLUG,
  };
718127cc3   Yasunori Goto   [PATCH] wait_tabl...
665
  extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
a2f3aa025   Dave Hansen   [PATCH] Fix spars...
666
667
  				     unsigned long size,
  				     enum memmap_context context);
718127cc3   Yasunori Goto   [PATCH] wait_tabl...
668

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
669
670
671
672
673
  #ifdef CONFIG_HAVE_MEMORY_PRESENT
  void memory_present(int nid, unsigned long start, unsigned long end);
  #else
  static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
  #endif
7aac78988   Lee Schermerhorn   numa: introduce n...
674
675
676
677
678
  #ifdef CONFIG_HAVE_MEMORYLESS_NODES
  int local_memory_node(int node_id);
  #else
  static inline int local_memory_node(int node_id) { return node_id; };
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
679
680
681
682
683
684
685
686
  #ifdef CONFIG_NEED_NODE_MEMMAP_SIZE
  unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
  #endif
  
  /*
   * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc.
   */
  #define zone_idx(zone)		((zone) - (zone)->zone_pgdat->node_zones)
f3fe65122   Con Kolivas   [PATCH] mm: add p...
687
688
689
690
  static inline int populated_zone(struct zone *zone)
  {
  	return (!!zone->present_pages);
  }
2a1e274ac   Mel Gorman   Create the ZONE_M...
691
692
693
694
  extern int movable_zone;
  
  static inline int zone_movable_is_highmem(void)
  {
0ee332c14   Tejun Heo   memblock: Kill ea...
695
  #if defined(CONFIG_HIGHMEM) && defined(CONFIG_HAVE_MEMBLOCK_NODE)
2a1e274ac   Mel Gorman   Create the ZONE_M...
696
697
698
699
700
  	return movable_zone == ZONE_HIGHMEM;
  #else
  	return 0;
  #endif
  }
2f1b62486   Christoph Lameter   [PATCH] reduce MA...
701
  static inline int is_highmem_idx(enum zone_type idx)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
702
  {
e53ef38d0   Christoph Lameter   [PATCH] reduce MA...
703
  #ifdef CONFIG_HIGHMEM
2a1e274ac   Mel Gorman   Create the ZONE_M...
704
705
  	return (idx == ZONE_HIGHMEM ||
  		(idx == ZONE_MOVABLE && zone_movable_is_highmem()));
e53ef38d0   Christoph Lameter   [PATCH] reduce MA...
706
707
708
  #else
  	return 0;
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
709
  }
2f1b62486   Christoph Lameter   [PATCH] reduce MA...
710
  static inline int is_normal_idx(enum zone_type idx)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
711
712
713
  {
  	return (idx == ZONE_NORMAL);
  }
9328b8faa   Nick Piggin   [PATCH] mm: dma32...
714

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
715
716
717
718
719
720
721
722
  /**
   * is_highmem - helper function to quickly check if a struct zone is a 
   *              highmem zone or not.  This is an attempt to keep references
   *              to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum.
   * @zone - pointer to struct zone variable
   */
  static inline int is_highmem(struct zone *zone)
  {
e53ef38d0   Christoph Lameter   [PATCH] reduce MA...
723
  #ifdef CONFIG_HIGHMEM
ddc81ed2c   Harvey Harrison   remove sparse war...
724
725
726
727
  	int zone_off = (char *)zone - (char *)zone->zone_pgdat->node_zones;
  	return zone_off == ZONE_HIGHMEM * sizeof(*zone) ||
  	       (zone_off == ZONE_MOVABLE * sizeof(*zone) &&
  		zone_movable_is_highmem());
e53ef38d0   Christoph Lameter   [PATCH] reduce MA...
728
729
730
  #else
  	return 0;
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
731
732
733
734
735
736
  }
  
  static inline int is_normal(struct zone *zone)
  {
  	return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL;
  }
9328b8faa   Nick Piggin   [PATCH] mm: dma32...
737
738
  static inline int is_dma32(struct zone *zone)
  {
fb0e7942b   Christoph Lameter   [PATCH] reduce MA...
739
  #ifdef CONFIG_ZONE_DMA32
9328b8faa   Nick Piggin   [PATCH] mm: dma32...
740
  	return zone == zone->zone_pgdat->node_zones + ZONE_DMA32;
fb0e7942b   Christoph Lameter   [PATCH] reduce MA...
741
742
743
  #else
  	return 0;
  #endif
9328b8faa   Nick Piggin   [PATCH] mm: dma32...
744
745
746
747
  }
  
  static inline int is_dma(struct zone *zone)
  {
4b51d6698   Christoph Lameter   [PATCH] optional ...
748
  #ifdef CONFIG_ZONE_DMA
9328b8faa   Nick Piggin   [PATCH] mm: dma32...
749
  	return zone == zone->zone_pgdat->node_zones + ZONE_DMA;
4b51d6698   Christoph Lameter   [PATCH] optional ...
750
751
752
  #else
  	return 0;
  #endif
9328b8faa   Nick Piggin   [PATCH] mm: dma32...
753
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
754
755
  /* These two functions are used to setup the per zone pages min values */
  struct ctl_table;
8d65af789   Alexey Dobriyan   sysctl: remove "s...
756
  int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
757
758
  					void __user *, size_t *, loff_t *);
  extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1];
8d65af789   Alexey Dobriyan   sysctl: remove "s...
759
  int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
760
  					void __user *, size_t *, loff_t *);
8d65af789   Alexey Dobriyan   sysctl: remove "s...
761
  int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
8ad4b1fb8   Rohit Seth   [PATCH] Make high...
762
  					void __user *, size_t *, loff_t *);
9614634fe   Christoph Lameter   [PATCH] ZVC/zone_...
763
  int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
8d65af789   Alexey Dobriyan   sysctl: remove "s...
764
  			void __user *, size_t *, loff_t *);
0ff38490c   Christoph Lameter   [PATCH] zone_recl...
765
  int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
8d65af789   Alexey Dobriyan   sysctl: remove "s...
766
  			void __user *, size_t *, loff_t *);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
767

f0c0b2b80   KAMEZAWA Hiroyuki   change zonelist o...
768
  extern int numa_zonelist_order_handler(struct ctl_table *, int,
8d65af789   Alexey Dobriyan   sysctl: remove "s...
769
  			void __user *, size_t *, loff_t *);
f0c0b2b80   KAMEZAWA Hiroyuki   change zonelist o...
770
771
  extern char numa_zonelist_order[];
  #define NUMA_ZONELIST_ORDER_LEN 16	/* string buffer size */
93b7504e3   Dave Hansen   [PATCH] Introduce...
772
  #ifndef CONFIG_NEED_MULTIPLE_NODES
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
773
774
775
776
  
  extern struct pglist_data contig_page_data;
  #define NODE_DATA(nid)		(&contig_page_data)
  #define NODE_MEM_MAP(nid)	mem_map
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
777

93b7504e3   Dave Hansen   [PATCH] Introduce...
778
  #else /* CONFIG_NEED_MULTIPLE_NODES */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
779
780
  
  #include <asm/mmzone.h>
93b7504e3   Dave Hansen   [PATCH] Introduce...
781
  #endif /* !CONFIG_NEED_MULTIPLE_NODES */
348f8b6c4   Dave Hansen   [PATCH] sparsemem...
782

95144c788   KAMEZAWA Hiroyuki   [PATCH] uninline ...
783
784
785
  extern struct pglist_data *first_online_pgdat(void);
  extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
  extern struct zone *next_zone(struct zone *zone);
8357f8695   KAMEZAWA Hiroyuki   [PATCH] define fo...
786
787
  
  /**
12d15f0d5   Fernando Luis Vazquez Cao   for_each_online_p...
788
   * for_each_online_pgdat - helper macro to iterate over all online nodes
8357f8695   KAMEZAWA Hiroyuki   [PATCH] define fo...
789
790
791
792
793
794
   * @pgdat - pointer to a pg_data_t variable
   */
  #define for_each_online_pgdat(pgdat)			\
  	for (pgdat = first_online_pgdat();		\
  	     pgdat;					\
  	     pgdat = next_online_pgdat(pgdat))
8357f8695   KAMEZAWA Hiroyuki   [PATCH] define fo...
795
796
797
798
799
800
801
802
803
804
805
  /**
   * for_each_zone - helper macro to iterate over all memory zones
   * @zone - pointer to struct zone variable
   *
   * The user only needs to declare the zone variable, for_each_zone
   * fills it in.
   */
  #define for_each_zone(zone)			        \
  	for (zone = (first_online_pgdat())->node_zones; \
  	     zone;					\
  	     zone = next_zone(zone))
ee99c71c5   KOSAKI Motohiro   mm: introduce for...
806
807
808
809
810
811
812
  #define for_each_populated_zone(zone)		        \
  	for (zone = (first_online_pgdat())->node_zones; \
  	     zone;					\
  	     zone = next_zone(zone))			\
  		if (!populated_zone(zone))		\
  			; /* do nothing */		\
  		else
dd1a239f6   Mel Gorman   mm: have zonelist...
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
  static inline struct zone *zonelist_zone(struct zoneref *zoneref)
  {
  	return zoneref->zone;
  }
  
  static inline int zonelist_zone_idx(struct zoneref *zoneref)
  {
  	return zoneref->zone_idx;
  }
  
  static inline int zonelist_node_idx(struct zoneref *zoneref)
  {
  #ifdef CONFIG_NUMA
  	/* zone_to_nid not available in this context */
  	return zoneref->zone->node;
  #else
  	return 0;
  #endif /* CONFIG_NUMA */
  }
19770b326   Mel Gorman   mm: filter based ...
832
833
834
835
836
837
838
839
840
  /**
   * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point
   * @z - The cursor used as a starting point for the search
   * @highest_zoneidx - The zone index of the highest zone to return
   * @nodes - An optional nodemask to filter the zonelist with
   * @zone - The first suitable zone found is returned via this parameter
   *
   * This function returns the next zone at or below a given zone index that is
   * within the allowed nodemask using a cursor as the starting point for the
5bead2a06   Mel Gorman   mm: mark the corr...
841
842
843
   * search. The zoneref returned is a cursor that represents the current zone
   * being examined. It should be advanced by one before calling
   * next_zones_zonelist again.
19770b326   Mel Gorman   mm: filter based ...
844
845
846
847
848
   */
  struct zoneref *next_zones_zonelist(struct zoneref *z,
  					enum zone_type highest_zoneidx,
  					nodemask_t *nodes,
  					struct zone **zone);
dd1a239f6   Mel Gorman   mm: have zonelist...
849

19770b326   Mel Gorman   mm: filter based ...
850
851
852
853
854
855
856
857
858
  /**
   * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist
   * @zonelist - The zonelist to search for a suitable zone
   * @highest_zoneidx - The zone index of the highest zone to return
   * @nodes - An optional nodemask to filter the zonelist with
   * @zone - The first suitable zone found is returned via this parameter
   *
   * This function returns the first zone at or below a given zone index that is
   * within the allowed nodemask. The zoneref returned is a cursor that can be
5bead2a06   Mel Gorman   mm: mark the corr...
859
860
   * used to iterate the zonelist with next_zones_zonelist by advancing it by
   * one before calling.
19770b326   Mel Gorman   mm: filter based ...
861
   */
dd1a239f6   Mel Gorman   mm: have zonelist...
862
  static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
19770b326   Mel Gorman   mm: filter based ...
863
864
865
  					enum zone_type highest_zoneidx,
  					nodemask_t *nodes,
  					struct zone **zone)
54a6eb5c4   Mel Gorman   mm: use two zonel...
866
  {
19770b326   Mel Gorman   mm: filter based ...
867
868
  	return next_zones_zonelist(zonelist->_zonerefs, highest_zoneidx, nodes,
  								zone);
54a6eb5c4   Mel Gorman   mm: use two zonel...
869
  }
19770b326   Mel Gorman   mm: filter based ...
870
871
872
873
874
875
876
877
878
879
880
881
882
883
  /**
   * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask
   * @zone - The current zone in the iterator
   * @z - The current pointer within zonelist->zones being iterated
   * @zlist - The zonelist being iterated
   * @highidx - The zone index of the highest zone to return
   * @nodemask - Nodemask allowed by the allocator
   *
   * This iterator iterates though all zones at or below a given zone index and
   * within a given nodemask
   */
  #define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
  	for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone);	\
  		zone;							\
5bead2a06   Mel Gorman   mm: mark the corr...
884
  		z = next_zones_zonelist(++z, highidx, nodemask, &zone))	\
54a6eb5c4   Mel Gorman   mm: use two zonel...
885
886
887
888
889
890
891
892
893
894
895
  
  /**
   * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index
   * @zone - The current zone in the iterator
   * @z - The current pointer within zonelist->zones being iterated
   * @zlist - The zonelist being iterated
   * @highidx - The zone index of the highest zone to return
   *
   * This iterator iterates though all zones at or below a given zone index.
   */
  #define for_each_zone_zonelist(zone, z, zlist, highidx) \
19770b326   Mel Gorman   mm: filter based ...
896
  	for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
54a6eb5c4   Mel Gorman   mm: use two zonel...
897

d41dee369   Andy Whitcroft   [PATCH] sparsemem...
898
899
900
  #ifdef CONFIG_SPARSEMEM
  #include <asm/sparsemem.h>
  #endif
c713216de   Mel Gorman   [PATCH] Introduce...
901
  #if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
0ee332c14   Tejun Heo   memblock: Kill ea...
902
  	!defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
b45445684   Andrew Morton   mm: make early_pf...
903
904
905
906
  static inline unsigned long early_pfn_to_nid(unsigned long pfn)
  {
  	return 0;
  }
b159d43fb   Andy Whitcroft   [PATCH] generify ...
907
  #endif
2bdaf115b   Andy Whitcroft   [PATCH] flatmem s...
908
909
910
  #ifdef CONFIG_FLATMEM
  #define pfn_to_nid(pfn)		(0)
  #endif
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
  #ifdef CONFIG_SPARSEMEM
  
  /*
   * SECTION_SHIFT    		#bits space required to store a section #
   *
   * PA_SECTION_SHIFT		physical address to/from section number
   * PFN_SECTION_SHIFT		pfn to/from section number
   */
  #define SECTIONS_SHIFT		(MAX_PHYSMEM_BITS - SECTION_SIZE_BITS)
  
  #define PA_SECTION_SHIFT	(SECTION_SIZE_BITS)
  #define PFN_SECTION_SHIFT	(SECTION_SIZE_BITS - PAGE_SHIFT)
  
  #define NR_MEM_SECTIONS		(1UL << SECTIONS_SHIFT)
  
  #define PAGES_PER_SECTION       (1UL << PFN_SECTION_SHIFT)
  #define PAGE_SECTION_MASK	(~(PAGES_PER_SECTION-1))
835c134ec   Mel Gorman   Add a bitmap that...
928
  #define SECTION_BLOCKFLAGS_BITS \
d9c234005   Mel Gorman   Do not depend on ...
929
  	((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
835c134ec   Mel Gorman   Add a bitmap that...
930

d41dee369   Andy Whitcroft   [PATCH] sparsemem...
931
932
933
  #if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
  #error Allocator MAX_ORDER exceeds SECTION_SIZE
  #endif
e3c40f379   Daniel Kiper   mm: pfn_to_sectio...
934
935
  #define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT)
  #define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT)
a539f3533   Daniel Kiper   mm: add SECTION_A...
936
937
  #define SECTION_ALIGN_UP(pfn)	(((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
  #define SECTION_ALIGN_DOWN(pfn)	((pfn) & PAGE_SECTION_MASK)
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
938
  struct page;
52d4b9ac0   KAMEZAWA Hiroyuki   memcg: allocate a...
939
  struct page_cgroup;
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
940
  struct mem_section {
29751f699   Andy Whitcroft   [PATCH] sparsemem...
941
942
943
944
945
  	/*
  	 * This is, logically, a pointer to an array of struct
  	 * pages.  However, it is stored with some other magic.
  	 * (see sparse.c::sparse_init_one_section())
  	 *
30c253e6d   Andy Whitcroft   [PATCH] sparsemem...
946
947
948
949
  	 * Additionally during early boot we encode node id of
  	 * the location of the section here to guide allocation.
  	 * (see sparse.c::memory_present())
  	 *
29751f699   Andy Whitcroft   [PATCH] sparsemem...
950
951
952
953
  	 * Making it a UL at least makes someone do a cast
  	 * before using it wrong.
  	 */
  	unsigned long section_mem_map;
5c0e30664   Mel Gorman   Fix corruption of...
954
955
956
  
  	/* See declaration of similar field in struct zone */
  	unsigned long *pageblock_flags;
52d4b9ac0   KAMEZAWA Hiroyuki   memcg: allocate a...
957
958
959
960
961
962
963
964
  #ifdef CONFIG_CGROUP_MEM_RES_CTLR
  	/*
  	 * If !SPARSEMEM, pgdat doesn't have page_cgroup pointer. We use
  	 * section. (see memcontrol.h/page_cgroup.h about this.)
  	 */
  	struct page_cgroup *page_cgroup;
  	unsigned long pad;
  #endif
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
965
  };
3e347261a   Bob Picco   [PATCH] sparsemem...
966
967
968
969
970
  #ifdef CONFIG_SPARSEMEM_EXTREME
  #define SECTIONS_PER_ROOT       (PAGE_SIZE / sizeof (struct mem_section))
  #else
  #define SECTIONS_PER_ROOT	1
  #endif
802f192e4   Bob Picco   [PATCH] SPARSEMEM...
971

3e347261a   Bob Picco   [PATCH] sparsemem...
972
  #define SECTION_NR_TO_ROOT(sec)	((sec) / SECTIONS_PER_ROOT)
0faa56389   Marcelo Roberto Jimenez   mm: fix NR_SECTIO...
973
  #define NR_SECTION_ROOTS	DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT)
3e347261a   Bob Picco   [PATCH] sparsemem...
974
  #define SECTION_ROOT_MASK	(SECTIONS_PER_ROOT - 1)
802f192e4   Bob Picco   [PATCH] SPARSEMEM...
975

3e347261a   Bob Picco   [PATCH] sparsemem...
976
977
  #ifdef CONFIG_SPARSEMEM_EXTREME
  extern struct mem_section *mem_section[NR_SECTION_ROOTS];
802f192e4   Bob Picco   [PATCH] SPARSEMEM...
978
  #else
3e347261a   Bob Picco   [PATCH] sparsemem...
979
980
  extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
  #endif
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
981

29751f699   Andy Whitcroft   [PATCH] sparsemem...
982
983
  static inline struct mem_section *__nr_to_section(unsigned long nr)
  {
3e347261a   Bob Picco   [PATCH] sparsemem...
984
985
986
  	if (!mem_section[SECTION_NR_TO_ROOT(nr)])
  		return NULL;
  	return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
29751f699   Andy Whitcroft   [PATCH] sparsemem...
987
  }
4ca644d97   Dave Hansen   [PATCH] memory ho...
988
  extern int __section_nr(struct mem_section* ms);
047532787   Yasunori Goto   memory hotplug: r...
989
  extern unsigned long usemap_size(void);
29751f699   Andy Whitcroft   [PATCH] sparsemem...
990
991
992
993
994
995
996
997
998
999
  
  /*
   * We use the lower bits of the mem_map pointer to store
   * a little bit of information.  There should be at least
   * 3 bits here due to 32-bit alignment.
   */
  #define	SECTION_MARKED_PRESENT	(1UL<<0)
  #define SECTION_HAS_MEM_MAP	(1UL<<1)
  #define SECTION_MAP_LAST_BIT	(1UL<<2)
  #define SECTION_MAP_MASK	(~(SECTION_MAP_LAST_BIT-1))
30c253e6d   Andy Whitcroft   [PATCH] sparsemem...
1000
  #define SECTION_NID_SHIFT	2
29751f699   Andy Whitcroft   [PATCH] sparsemem...
1001
1002
1003
1004
1005
1006
1007
  
  static inline struct page *__section_mem_map_addr(struct mem_section *section)
  {
  	unsigned long map = section->section_mem_map;
  	map &= SECTION_MAP_MASK;
  	return (struct page *)map;
  }
540557b94   Andy Whitcroft   sparsemem: record...
1008
  static inline int present_section(struct mem_section *section)
29751f699   Andy Whitcroft   [PATCH] sparsemem...
1009
  {
802f192e4   Bob Picco   [PATCH] SPARSEMEM...
1010
  	return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
29751f699   Andy Whitcroft   [PATCH] sparsemem...
1011
  }
540557b94   Andy Whitcroft   sparsemem: record...
1012
1013
1014
1015
1016
1017
  static inline int present_section_nr(unsigned long nr)
  {
  	return present_section(__nr_to_section(nr));
  }
  
  static inline int valid_section(struct mem_section *section)
29751f699   Andy Whitcroft   [PATCH] sparsemem...
1018
  {
802f192e4   Bob Picco   [PATCH] SPARSEMEM...
1019
  	return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
29751f699   Andy Whitcroft   [PATCH] sparsemem...
1020
1021
1022
1023
1024
1025
  }
  
  static inline int valid_section_nr(unsigned long nr)
  {
  	return valid_section(__nr_to_section(nr));
  }
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
1026
1027
  static inline struct mem_section *__pfn_to_section(unsigned long pfn)
  {
29751f699   Andy Whitcroft   [PATCH] sparsemem...
1028
  	return __nr_to_section(pfn_to_section_nr(pfn));
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
1029
  }
7b7bf499f   Will Deacon   ARM: 6913/1: spar...
1030
  #ifndef CONFIG_HAVE_ARCH_PFN_VALID
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
1031
1032
1033
1034
  static inline int pfn_valid(unsigned long pfn)
  {
  	if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
  		return 0;
29751f699   Andy Whitcroft   [PATCH] sparsemem...
1035
  	return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
1036
  }
7b7bf499f   Will Deacon   ARM: 6913/1: spar...
1037
  #endif
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
1038

540557b94   Andy Whitcroft   sparsemem: record...
1039
1040
1041
1042
1043
1044
  static inline int pfn_present(unsigned long pfn)
  {
  	if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
  		return 0;
  	return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
  }
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
1045
1046
1047
1048
1049
1050
  /*
   * These are _only_ used during initialisation, therefore they
   * can use __initdata ...  They could have names to indicate
   * this restriction.
   */
  #ifdef CONFIG_NUMA
161599ff3   Andy Whitcroft   [PATCH] sparsemem...
1051
1052
1053
1054
1055
  #define pfn_to_nid(pfn)							\
  ({									\
  	unsigned long __pfn_to_nid_pfn = (pfn);				\
  	page_to_nid(pfn_to_page(__pfn_to_nid_pfn));			\
  })
2bdaf115b   Andy Whitcroft   [PATCH] flatmem s...
1056
1057
  #else
  #define pfn_to_nid(pfn)		(0)
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
1058
  #endif
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
1059
1060
1061
1062
  #define early_pfn_valid(pfn)	pfn_valid(pfn)
  void sparse_init(void);
  #else
  #define sparse_init()	do {} while (0)
28ae55c98   Dave Hansen   [PATCH] sparsemem...
1063
  #define sparse_index_init(_sec, _nid)  do {} while (0)
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
1064
  #endif /* CONFIG_SPARSEMEM */
751679573   Andy Whitcroft   [PATCH] Reintrodu...
1065
  #ifdef CONFIG_NODES_SPAN_OTHER_NODES
cc2559bcc   KAMEZAWA Hiroyuki   mm: fix memmap in...
1066
  bool early_pfn_in_nid(unsigned long pfn, int nid);
751679573   Andy Whitcroft   [PATCH] Reintrodu...
1067
1068
1069
  #else
  #define early_pfn_in_nid(pfn, nid)	(1)
  #endif
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
1070
1071
1072
1073
1074
1075
  #ifndef early_pfn_valid
  #define early_pfn_valid(pfn)	(1)
  #endif
  
  void memory_present(int nid, unsigned long start, unsigned long end);
  unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
14e072984   Andy Whitcroft   add pfn_valid_wit...
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
  /*
   * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we
   * need to check pfn validility within that MAX_ORDER_NR_PAGES block.
   * pfn_valid_within() should be used in this case; we optimise this away
   * when we have no holes within a MAX_ORDER_NR_PAGES block.
   */
  #ifdef CONFIG_HOLES_IN_ZONE
  #define pfn_valid_within(pfn) pfn_valid(pfn)
  #else
  #define pfn_valid_within(pfn) (1)
  #endif
eb33575cf   Mel Gorman   [ARM] Double chec...
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
  #ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
  /*
   * pfn_valid() is meant to be able to tell if a given PFN has valid memmap
   * associated with it or not. In FLATMEM, it is expected that holes always
   * have valid memmap as long as there is valid PFNs either side of the hole.
   * In SPARSEMEM, it is assumed that a valid section has a memmap for the
   * entire section.
   *
   * However, an ARM, and maybe other embedded architectures in the future
   * free memmap backing holes to save memory on the assumption the memmap is
   * never used. The page_zone linkages are then broken even though pfn_valid()
   * returns true. A walker of the full memmap must then do this additional
   * check to ensure the memmap they are looking at is sane by making sure
   * the zone and PFN linkages are still valid. This is expensive, but walkers
   * of the full memmap are extremely rare.
   */
  int memmap_valid_within(unsigned long pfn,
  					struct page *page, struct zone *zone);
  #else
  static inline int memmap_valid_within(unsigned long pfn,
  					struct page *page, struct zone *zone)
  {
  	return 1;
  }
  #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
97965478a   Christoph Lameter   mm: Get rid of __...
1112
  #endif /* !__GENERATING_BOUNDS.H */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1113
  #endif /* !__ASSEMBLY__ */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1114
  #endif /* _LINUX_MMZONE_H */