Blame view

include/linux/mm_types.h 9.68 KB
5b99cd0ef   Heiko Carstens   [PATCH] own heade...
1
2
  #ifndef _LINUX_MM_TYPES_H
  #define _LINUX_MM_TYPES_H
4f9a58d75   Olaf Hering   increase AT_VECTO...
3
  #include <linux/auxvec.h>
5b99cd0ef   Heiko Carstens   [PATCH] own heade...
4
5
6
7
  #include <linux/types.h>
  #include <linux/threads.h>
  #include <linux/list.h>
  #include <linux/spinlock.h>
c92ff1bde   Martin Schwidefsky   move mm_struct an...
8
9
10
11
  #include <linux/prio_tree.h>
  #include <linux/rbtree.h>
  #include <linux/rwsem.h>
  #include <linux/completion.h>
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
12
  #include <linux/cpumask.h>
6a11f75b6   Akinobu Mita   generic debug pag...
13
  #include <linux/page-debug-flags.h>
c92ff1bde   Martin Schwidefsky   move mm_struct an...
14
15
  #include <asm/page.h>
  #include <asm/mmu.h>
5b99cd0ef   Heiko Carstens   [PATCH] own heade...
16

4f9a58d75   Olaf Hering   increase AT_VECTO...
17
18
19
20
  #ifndef AT_VECTOR_SIZE_ARCH
  #define AT_VECTOR_SIZE_ARCH 0
  #endif
  #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
5b99cd0ef   Heiko Carstens   [PATCH] own heade...
21
  struct address_space;
f7d0b926a   Jeremy Fitzhardinge   mm: define USE_SP...
22
  #define USE_SPLIT_PTLOCKS	(NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
5b99cd0ef   Heiko Carstens   [PATCH] own heade...
23
24
25
26
27
28
29
30
31
32
33
  /*
   * Each physical page in the system has a struct page associated with
   * it to keep track of whatever it is we are using the page for at the
   * moment. Note that we have no way to track which tasks are using
   * a page, though if it is a pagecache page, rmap structures can tell us
   * who is mapping it.
   */
  struct page {
  	unsigned long flags;		/* Atomic flags, some possibly
  					 * updated asynchronously */
  	atomic_t _count;		/* Usage count, see below. */
81819f0fc   Christoph Lameter   SLUB core
34
35
  	union {
  		atomic_t _mapcount;	/* Count of ptes mapped in mms,
5b99cd0ef   Heiko Carstens   [PATCH] own heade...
36
37
38
  					 * to show when page is mapped
  					 * & limit reverse map searches.
  					 */
39b264641   Christoph Lameter   slub: Store max n...
39
40
41
42
  		struct {		/* SLUB */
  			u16 inuse;
  			u16 objects;
  		};
81819f0fc   Christoph Lameter   SLUB core
43
  	};
5b99cd0ef   Heiko Carstens   [PATCH] own heade...
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
  	union {
  	    struct {
  		unsigned long private;		/* Mapping-private opaque data:
  					 	 * usually used for buffer_heads
  						 * if PagePrivate set; used for
  						 * swp_entry_t if PageSwapCache;
  						 * indicates order in the buddy
  						 * system if PG_buddy is set.
  						 */
  		struct address_space *mapping;	/* If low bit clear, points to
  						 * inode address_space, or NULL.
  						 * If page mapped as anonymous
  						 * memory, low bit is set, and
  						 * it points to anon_vma object:
  						 * see PAGE_MAPPING_ANON below.
  						 */
  	    };
f7d0b926a   Jeremy Fitzhardinge   mm: define USE_SP...
61
  #if USE_SPLIT_PTLOCKS
5b99cd0ef   Heiko Carstens   [PATCH] own heade...
62
63
  	    spinlock_t ptl;
  #endif
a973e9dd1   Christoph Lameter   Revert "unique en...
64
  	    struct kmem_cache *slab;	/* SLUB: Pointer to slab */
8e65d24c7   Christoph Lameter   SLUB: Do not use ...
65
  	    struct page *first_page;	/* Compound tail pages */
81819f0fc   Christoph Lameter   SLUB core
66
67
68
  	};
  	union {
  		pgoff_t index;		/* Our offset within mapping. */
894b8788d   Christoph Lameter   slub: support con...
69
  		void *freelist;		/* SLUB: freelist req. slab lock */
5b99cd0ef   Heiko Carstens   [PATCH] own heade...
70
  	};
5b99cd0ef   Heiko Carstens   [PATCH] own heade...
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
  	struct list_head lru;		/* Pageout list, eg. active_list
  					 * protected by zone->lru_lock !
  					 */
  	/*
  	 * On machines where all RAM is mapped into kernel address space,
  	 * we can simply calculate the virtual address. On machines with
  	 * highmem some memory is mapped into kernel virtual memory
  	 * dynamically, so we need a place to store that address.
  	 * Note that this field could be 16 bits on x86 ... ;)
  	 *
  	 * Architectures with slow multiplication can define
  	 * WANT_PAGE_VIRTUAL in asm/page.h
  	 */
  #if defined(WANT_PAGE_VIRTUAL)
  	void *virtual;			/* Kernel virtual address (NULL if
  					   not kmapped, ie. highmem) */
  #endif /* WANT_PAGE_VIRTUAL */
ee3b4290a   Akinobu Mita   generic debug pag...
88
89
90
  #ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
  	unsigned long debug_flags;	/* Use atomic bitops on this */
  #endif
dfec072ec   Vegard Nossum   kmemcheck: add th...
91
92
93
94
95
96
97
98
  
  #ifdef CONFIG_KMEMCHECK
  	/*
  	 * kmemcheck wants to track the status of each byte in a page; this
  	 * is a pointer to such a status block. NULL if not tracked.
  	 */
  	void *shadow;
  #endif
5b99cd0ef   Heiko Carstens   [PATCH] own heade...
99
  };
c92ff1bde   Martin Schwidefsky   move mm_struct an...
100
  /*
8feae1311   David Howells   NOMMU: Make VMAs ...
101
102
103
104
105
106
107
108
109
   * A region containing a mapping of a non-memory backed file under NOMMU
   * conditions.  These are held in a global tree and are pinned by the VMAs that
   * map parts of them.
   */
  struct vm_region {
  	struct rb_node	vm_rb;		/* link in global region tree */
  	unsigned long	vm_flags;	/* VMA vm_flags */
  	unsigned long	vm_start;	/* start address of region */
  	unsigned long	vm_end;		/* region initialised to here */
dd8632a12   Paul Mundt   NOMMU: Make mmap ...
110
  	unsigned long	vm_top;		/* region allocated to here */
8feae1311   David Howells   NOMMU: Make VMAs ...
111
112
  	unsigned long	vm_pgoff;	/* the offset in vm_file corresponding to vm_start */
  	struct file	*vm_file;	/* the backing file or NULL */
1e2ae599d   David Howells   nommu: struct vm_...
113
  	int		vm_usage;	/* region usage count (access under nommu_region_sem) */
cfe79c00a   Mike Frysinger   NOMMU: Avoiding d...
114
115
  	bool		vm_icache_flushed : 1; /* true if the icache has been flushed for
  						* this region */
8feae1311   David Howells   NOMMU: Make VMAs ...
116
117
118
  };
  
  /*
c92ff1bde   Martin Schwidefsky   move mm_struct an...
119
120
121
122
123
124
125
126
127
128
129
130
   * This struct defines a memory VMM memory area. There is one of these
   * per VM-area/task.  A VM area is any part of the process virtual memory
   * space that has a special rule for the page-fault handlers (ie a shared
   * library, the executable area etc).
   */
  struct vm_area_struct {
  	struct mm_struct * vm_mm;	/* The address space we belong to. */
  	unsigned long vm_start;		/* Our start address within vm_mm. */
  	unsigned long vm_end;		/* The first byte after our end address
  					   within vm_mm. */
  
  	/* linked list of VM areas per task, sorted by address */
297c5eee3   Linus Torvalds   mm: make the vma ...
131
  	struct vm_area_struct *vm_next, *vm_prev;
c92ff1bde   Martin Schwidefsky   move mm_struct an...
132
133
  
  	pgprot_t vm_page_prot;		/* Access permissions of this VMA. */
605d9288b   Hugh Dickins   mm: VM_flags comm...
134
  	unsigned long vm_flags;		/* Flags, see mm.h. */
c92ff1bde   Martin Schwidefsky   move mm_struct an...
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
  
  	struct rb_node vm_rb;
  
  	/*
  	 * For areas with an address space and backing store,
  	 * linkage into the address_space->i_mmap prio tree, or
  	 * linkage to the list of like vmas hanging off its node, or
  	 * linkage of vma in the address_space->i_mmap_nonlinear list.
  	 */
  	union {
  		struct {
  			struct list_head list;
  			void *parent;	/* aligns with prio_tree_node parent */
  			struct vm_area_struct *head;
  		} vm_set;
  
  		struct raw_prio_tree_node prio_tree_node;
  	} shared;
  
  	/*
  	 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
  	 * list, after a COW of one of the file pages.	A MAP_SHARED vma
  	 * can only be in the i_mmap tree.  An anonymous MAP_PRIVATE, stack
  	 * or brk vma (with NULL file) can only be in an anon_vma list.
  	 */
5beb49305   Rik van Riel   mm: change anon_v...
160
161
  	struct list_head anon_vma_chain; /* Serialized by mmap_sem &
  					  * page_table_lock */
c92ff1bde   Martin Schwidefsky   move mm_struct an...
162
163
164
  	struct anon_vma *anon_vma;	/* Serialized by page_table_lock */
  
  	/* Function pointers to deal with this struct. */
f0f37e2f7   Alexey Dobriyan   const: mark struc...
165
  	const struct vm_operations_struct *vm_ops;
c92ff1bde   Martin Schwidefsky   move mm_struct an...
166
167
168
169
170
171
172
173
174
  
  	/* Information about our backing store: */
  	unsigned long vm_pgoff;		/* Offset (within vm_file) in PAGE_SIZE
  					   units, *not* PAGE_CACHE_SIZE */
  	struct file * vm_file;		/* File we map to (can be NULL). */
  	void * vm_private_data;		/* was vm_pte (shared mem) */
  	unsigned long vm_truncate_count;/* truncate_count or restart_addr */
  
  #ifndef CONFIG_MMU
8feae1311   David Howells   NOMMU: Make VMAs ...
175
  	struct vm_region *vm_region;	/* NOMMU mapping region */
c92ff1bde   Martin Schwidefsky   move mm_struct an...
176
177
178
179
180
  #endif
  #ifdef CONFIG_NUMA
  	struct mempolicy *vm_policy;	/* NUMA policy for the VMA */
  #endif
  };
b564daf80   Oleg Nesterov   coredump: constru...
181
182
183
184
  struct core_thread {
  	struct task_struct *task;
  	struct core_thread *next;
  };
32ecb1f26   Oleg Nesterov   coredump: turn mm...
185
  struct core_state {
c5f1cc8c1   Oleg Nesterov   coredump: turn co...
186
  	atomic_t nr_threads;
b564daf80   Oleg Nesterov   coredump: constru...
187
  	struct core_thread dumper;
32ecb1f26   Oleg Nesterov   coredump: turn mm...
188
189
  	struct completion startup;
  };
d559db086   KAMEZAWA Hiroyuki   mm: clean up mm_c...
190
191
192
  enum {
  	MM_FILEPAGES,
  	MM_ANONPAGES,
b084d4353   KAMEZAWA Hiroyuki   mm: count swap usage
193
  	MM_SWAPENTS,
d559db086   KAMEZAWA Hiroyuki   mm: clean up mm_c...
194
195
  	NR_MM_COUNTERS
  };
53bddb4e9   KAMEZAWA Hiroyuki   nommu: fix build ...
196
  #if USE_SPLIT_PTLOCKS && defined(CONFIG_MMU)
34e55232e   KAMEZAWA Hiroyuki   mm: avoid false s...
197
  #define SPLIT_RSS_COUNTING
d559db086   KAMEZAWA Hiroyuki   mm: clean up mm_c...
198
199
200
  struct mm_rss_stat {
  	atomic_long_t count[NR_MM_COUNTERS];
  };
34e55232e   KAMEZAWA Hiroyuki   mm: avoid false s...
201
202
203
204
205
  /* per-thread cached information, */
  struct task_rss_stat {
  	int events;	/* for synchronization threshold */
  	int count[NR_MM_COUNTERS];
  };
d559db086   KAMEZAWA Hiroyuki   mm: clean up mm_c...
206
207
208
209
210
  #else  /* !USE_SPLIT_PTLOCKS */
  struct mm_rss_stat {
  	unsigned long count[NR_MM_COUNTERS];
  };
  #endif /* !USE_SPLIT_PTLOCKS */
c92ff1bde   Martin Schwidefsky   move mm_struct an...
211
212
213
214
  struct mm_struct {
  	struct vm_area_struct * mmap;		/* list of VMAs */
  	struct rb_root mm_rb;
  	struct vm_area_struct * mmap_cache;	/* last find_vma result */
efc1a3b16   David Howells   nommu: don't need...
215
  #ifdef CONFIG_MMU
c92ff1bde   Martin Schwidefsky   move mm_struct an...
216
217
218
219
  	unsigned long (*get_unmapped_area) (struct file *filp,
  				unsigned long addr, unsigned long len,
  				unsigned long pgoff, unsigned long flags);
  	void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
efc1a3b16   David Howells   nommu: don't need...
220
  #endif
c92ff1bde   Martin Schwidefsky   move mm_struct an...
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
  	unsigned long mmap_base;		/* base of mmap area */
  	unsigned long task_size;		/* size of task vm space */
  	unsigned long cached_hole_size; 	/* if non-zero, the largest hole below free_area_cache */
  	unsigned long free_area_cache;		/* first hole of size cached_hole_size or larger */
  	pgd_t * pgd;
  	atomic_t mm_users;			/* How many users with user space? */
  	atomic_t mm_count;			/* How many references to "struct mm_struct" (users count as 1) */
  	int map_count;				/* number of VMAs */
  	struct rw_semaphore mmap_sem;
  	spinlock_t page_table_lock;		/* Protects page tables and some counters */
  
  	struct list_head mmlist;		/* List of maybe swapped mm's.	These are globally strung
  						 * together off init_mm.mmlist, and are protected
  						 * by mmlist_lock
  						 */
c92ff1bde   Martin Schwidefsky   move mm_struct an...
236
237
238
239
240
241
242
243
244
245
246
  
  	unsigned long hiwater_rss;	/* High-watermark of RSS usage */
  	unsigned long hiwater_vm;	/* High-water virtual memory usage */
  
  	unsigned long total_vm, locked_vm, shared_vm, exec_vm;
  	unsigned long stack_vm, reserved_vm, def_flags, nr_ptes;
  	unsigned long start_code, end_code, start_data, end_data;
  	unsigned long start_brk, brk, start_stack;
  	unsigned long arg_start, arg_end, env_start, env_end;
  
  	unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
d559db086   KAMEZAWA Hiroyuki   mm: clean up mm_c...
247
248
249
250
251
  	/*
  	 * Special counters, in some configurations protected by the
  	 * page_table_lock, in other configurations by being atomic.
  	 */
  	struct mm_rss_stat rss_stat;
801460d0c   Hiroshi Shimamoto   task_struct clean...
252
  	struct linux_binfmt *binfmt;
c92ff1bde   Martin Schwidefsky   move mm_struct an...
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
  	cpumask_t cpu_vm_mask;
  
  	/* Architecture-specific MM context */
  	mm_context_t context;
  
  	/* Swap token stuff */
  	/*
  	 * Last value of global fault stamp as seen by this process.
  	 * In other words, this value gives an indication of how long
  	 * it has been since this task got the token.
  	 * Look at mm/thrash.c
  	 */
  	unsigned int faultstamp;
  	unsigned int token_priority;
  	unsigned int last_interval;
  
  	unsigned long flags; /* Must use atomic bitops to access the bits */
a94e2d408   Oleg Nesterov   coredump: kill mm...
270
  	struct core_state *core_state; /* coredumping support */
858f09930   Alexey Dobriyan   aio: ifdef fields...
271
  #ifdef CONFIG_AIO
abf137dd7   Jens Axboe   aio: make the loo...
272
273
  	spinlock_t		ioctx_lock;
  	struct hlist_head	ioctx_list;
858f09930   Alexey Dobriyan   aio: ifdef fields...
274
  #endif
cf475ad28   Balbir Singh   cgroups: add an o...
275
  #ifdef CONFIG_MM_OWNER
4cd1a8fc3   KOSAKI Motohiro   memcg: fix possib...
276
277
278
279
280
281
282
283
284
285
286
  	/*
  	 * "owner" points to a task that is regarded as the canonical
  	 * user/owner of this mm. All of the following must be true in
  	 * order for it to be changed:
  	 *
  	 * current == mm->owner
  	 * current->mm != mm
  	 * new_owner->mm == mm
  	 * new_owner->alloc_lock is held
  	 */
  	struct task_struct *owner;
78fb74669   Pavel Emelianov   Memory controller...
287
  #endif
925d1c401   Matt Helsley   procfs task exe s...
288
289
290
291
292
293
  
  #ifdef CONFIG_PROC_FS
  	/* store ref to file /proc/<pid>/exe symlink points to */
  	struct file *exe_file;
  	unsigned long num_exe_file_vmas;
  #endif
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
294
295
296
  #ifdef CONFIG_MMU_NOTIFIER
  	struct mmu_notifier_mm *mmu_notifier_mm;
  #endif
c92ff1bde   Martin Schwidefsky   move mm_struct an...
297
  };
45e575ab9   Rusty Russell   cpumask: mm_cpuma...
298
299
  /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
  #define mm_cpumask(mm) (&(mm)->cpu_vm_mask)
5b99cd0ef   Heiko Carstens   [PATCH] own heade...
300
  #endif /* _LINUX_MM_TYPES_H */