Blame view
include/linux/mm_types.h
11.8 KB
5b99cd0ef [PATCH] own heade... |
1 2 |
#ifndef _LINUX_MM_TYPES_H #define _LINUX_MM_TYPES_H |
4f9a58d75 increase AT_VECTO... |
3 |
#include <linux/auxvec.h> |
5b99cd0ef [PATCH] own heade... |
4 5 6 7 |
#include <linux/types.h> #include <linux/threads.h> #include <linux/list.h> #include <linux/spinlock.h> |
c92ff1bde move mm_struct an... |
8 9 10 11 |
#include <linux/prio_tree.h> #include <linux/rbtree.h> #include <linux/rwsem.h> #include <linux/completion.h> |
cddb8a5c1 mmu-notifiers: core |
12 |
#include <linux/cpumask.h> |
6a11f75b6 generic debug pag... |
13 |
#include <linux/page-debug-flags.h> |
c92ff1bde move mm_struct an... |
14 15 |
#include <asm/page.h> #include <asm/mmu.h> |
5b99cd0ef [PATCH] own heade... |
16 |
|
4f9a58d75 increase AT_VECTO... |
17 18 19 20 |
#ifndef AT_VECTOR_SIZE_ARCH #define AT_VECTOR_SIZE_ARCH 0 #endif #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1)) |
5b99cd0ef [PATCH] own heade... |
21 |
struct address_space; |
f7d0b926a mm: define USE_SP... |
22 |
#define USE_SPLIT_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) |
5b99cd0ef [PATCH] own heade... |
23 24 25 26 27 28 |
/* * Each physical page in the system has a struct page associated with * it to keep track of whatever it is we are using the page for at the * moment. Note that we have no way to track which tasks are using * a page, though if it is a pagecache page, rmap structures can tell us * who is mapping it. |
fc9bb8c76 mm: Rearrange str... |
29 30 31 32 33 34 |
* * The objects in struct page are organized in double word blocks in * order to allows us to use atomic double word operations on portions * of struct page. That is currently only used by slub but the arrangement * allows the use of atomic double word operations on the flags/mapping * and lru list pointers also. |
5b99cd0ef [PATCH] own heade... |
35 36 |
*/ struct page { |
fc9bb8c76 mm: Rearrange str... |
37 |
/* First double word block */ |
5b99cd0ef [PATCH] own heade... |
38 39 |
unsigned long flags; /* Atomic flags, some possibly * updated asynchronously */ |
fc9bb8c76 mm: Rearrange str... |
40 41 42 43 44 45 46 47 |
struct address_space *mapping; /* If low bit clear, points to * inode address_space, or NULL. * If page mapped as anonymous * memory, low bit is set, and * it points to anon_vma object: * see PAGE_MAPPING_ANON below. */ /* Second double word */ |
013e89637 Avoid duplicate _... |
48 49 |
struct { union { |
fc9bb8c76 mm: Rearrange str... |
50 |
pgoff_t index; /* Our offset within mapping. */ |
013e89637 Avoid duplicate _... |
51 52 53 54 55 56 57 58 59 60 |
void *freelist; /* slub first free object */ }; union { /* Used for cmpxchg_double in slub */ unsigned long counters; struct { union { |
70b50f94f mm: thp: tail pag... |
61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 |
/* * Count of ptes mapped in * mms, to show when page is * mapped & limit reverse map * searches. * * Used also for tail pages * refcounting instead of * _count. Tail pages cannot * be mapped and keeping the * tail page _count zero at * all times guarantees * get_page_unless_zero() will * never succeed on tail * pages. */ atomic_t _mapcount; |
fc9bb8c76 mm: Rearrange str... |
78 |
|
013e89637 Avoid duplicate _... |
79 80 81 82 83 |
struct { unsigned inuse:16; unsigned objects:15; unsigned frozen:1; }; |
3adf004d8 Revert "SLUB: Fix... |
84 |
}; |
013e89637 Avoid duplicate _... |
85 |
atomic_t _count; /* Usage count, see below. */ |
fc9bb8c76 mm: Rearrange str... |
86 |
}; |
39b264641 slub: Store max n... |
87 |
}; |
81819f0fc SLUB core |
88 |
}; |
fc9bb8c76 mm: Rearrange str... |
89 90 |
/* Third double word block */ |
49e225858 slub: per cpu cac... |
91 92 |
union { struct list_head lru; /* Pageout list, eg. active_list |
fc9bb8c76 mm: Rearrange str... |
93 94 |
* protected by zone->lru_lock ! */ |
49e225858 slub: per cpu cac... |
95 96 97 98 99 100 101 102 103 104 105 |
struct { /* slub per cpu partial pages */ struct page *next; /* Next partial slab */ #ifdef CONFIG_64BIT int pages; /* Nr of partial slabs left */ int pobjects; /* Approximate # of objects */ #else short int pages; short int pobjects; #endif }; }; |
fc9bb8c76 mm: Rearrange str... |
106 107 |
/* Remainder is not double word aligned */ |
5b99cd0ef [PATCH] own heade... |
108 |
union { |
5b99cd0ef [PATCH] own heade... |
109 110 111 112 113 114 115 |
unsigned long private; /* Mapping-private opaque data: * usually used for buffer_heads * if PagePrivate set; used for * swp_entry_t if PageSwapCache; * indicates order in the buddy * system if PG_buddy is set. */ |
f7d0b926a mm: define USE_SP... |
116 |
#if USE_SPLIT_PTLOCKS |
fc9bb8c76 mm: Rearrange str... |
117 |
spinlock_t ptl; |
5b99cd0ef [PATCH] own heade... |
118 |
#endif |
fc9bb8c76 mm: Rearrange str... |
119 120 |
struct kmem_cache *slab; /* SLUB: Pointer to slab */ struct page *first_page; /* Compound tail pages */ |
81819f0fc SLUB core |
121 |
}; |
fc9bb8c76 mm: Rearrange str... |
122 |
|
5b99cd0ef [PATCH] own heade... |
123 124 125 126 127 128 129 130 131 132 133 134 135 136 |
/* * On machines where all RAM is mapped into kernel address space, * we can simply calculate the virtual address. On machines with * highmem some memory is mapped into kernel virtual memory * dynamically, so we need a place to store that address. * Note that this field could be 16 bits on x86 ... ;) * * Architectures with slow multiplication can define * WANT_PAGE_VIRTUAL in asm/page.h */ #if defined(WANT_PAGE_VIRTUAL) void *virtual; /* Kernel virtual address (NULL if not kmapped, ie. highmem) */ #endif /* WANT_PAGE_VIRTUAL */ |
ee3b4290a generic debug pag... |
137 138 139 |
#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS unsigned long debug_flags; /* Use atomic bitops on this */ #endif |
dfec072ec kmemcheck: add th... |
140 141 142 143 144 145 146 147 |
#ifdef CONFIG_KMEMCHECK /* * kmemcheck wants to track the status of each byte in a page; this * is a pointer to such a status block. NULL if not tracked. */ void *shadow; #endif |
fc9bb8c76 mm: Rearrange str... |
148 149 |
} /* |
43570fd2f mm,slub,x86: deco... |
150 151 |
* The struct page can be forced to be double word aligned so that atomic ops * on double words work. The SLUB allocator can make use of such a feature. |
fc9bb8c76 mm: Rearrange str... |
152 |
*/ |
43570fd2f mm,slub,x86: deco... |
153 154 |
#ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE __aligned(2 * sizeof(unsigned long)) |
fc9bb8c76 mm: Rearrange str... |
155 156 |
#endif ; |
5b99cd0ef [PATCH] own heade... |
157 |
|
30d3c128e mm: add a "struct... |
158 159 160 161 162 163 164 165 166 167 |
struct page_frag { struct page *page; #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) __u32 offset; __u32 size; #else __u16 offset; __u16 size; #endif }; |
ca16d140a mm: don't access ... |
168 |
typedef unsigned long __nocast vm_flags_t; |
c92ff1bde move mm_struct an... |
169 |
/* |
8feae1311 NOMMU: Make VMAs ... |
170 171 172 173 174 175 |
* A region containing a mapping of a non-memory backed file under NOMMU * conditions. These are held in a global tree and are pinned by the VMAs that * map parts of them. */ struct vm_region { struct rb_node vm_rb; /* link in global region tree */ |
ca16d140a mm: don't access ... |
176 |
vm_flags_t vm_flags; /* VMA vm_flags */ |
8feae1311 NOMMU: Make VMAs ... |
177 178 |
unsigned long vm_start; /* start address of region */ unsigned long vm_end; /* region initialised to here */ |
dd8632a12 NOMMU: Make mmap ... |
179 |
unsigned long vm_top; /* region allocated to here */ |
8feae1311 NOMMU: Make VMAs ... |
180 181 |
unsigned long vm_pgoff; /* the offset in vm_file corresponding to vm_start */ struct file *vm_file; /* the backing file or NULL */ |
1e2ae599d nommu: struct vm_... |
182 |
int vm_usage; /* region usage count (access under nommu_region_sem) */ |
cfe79c00a NOMMU: Avoiding d... |
183 184 |
bool vm_icache_flushed : 1; /* true if the icache has been flushed for * this region */ |
8feae1311 NOMMU: Make VMAs ... |
185 186 187 |
}; /* |
c92ff1bde move mm_struct an... |
188 189 190 191 192 193 194 195 196 197 198 199 |
* This struct defines a memory VMM memory area. There is one of these * per VM-area/task. A VM area is any part of the process virtual memory * space that has a special rule for the page-fault handlers (ie a shared * library, the executable area etc). */ struct vm_area_struct { struct mm_struct * vm_mm; /* The address space we belong to. */ unsigned long vm_start; /* Our start address within vm_mm. */ unsigned long vm_end; /* The first byte after our end address within vm_mm. */ /* linked list of VM areas per task, sorted by address */ |
297c5eee3 mm: make the vma ... |
200 |
struct vm_area_struct *vm_next, *vm_prev; |
c92ff1bde move mm_struct an... |
201 202 |
pgprot_t vm_page_prot; /* Access permissions of this VMA. */ |
605d9288b mm: VM_flags comm... |
203 |
unsigned long vm_flags; /* Flags, see mm.h. */ |
c92ff1bde move mm_struct an... |
204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 |
struct rb_node vm_rb; /* * For areas with an address space and backing store, * linkage into the address_space->i_mmap prio tree, or * linkage to the list of like vmas hanging off its node, or * linkage of vma in the address_space->i_mmap_nonlinear list. */ union { struct { struct list_head list; void *parent; /* aligns with prio_tree_node parent */ struct vm_area_struct *head; } vm_set; struct raw_prio_tree_node prio_tree_node; } shared; /* * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma * list, after a COW of one of the file pages. A MAP_SHARED vma * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack * or brk vma (with NULL file) can only be in an anon_vma list. */ |
5beb49305 mm: change anon_v... |
229 230 |
struct list_head anon_vma_chain; /* Serialized by mmap_sem & * page_table_lock */ |
c92ff1bde move mm_struct an... |
231 232 233 |
struct anon_vma *anon_vma; /* Serialized by page_table_lock */ /* Function pointers to deal with this struct. */ |
f0f37e2f7 const: mark struc... |
234 |
const struct vm_operations_struct *vm_ops; |
c92ff1bde move mm_struct an... |
235 236 237 238 239 240 |
/* Information about our backing store: */ unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE units, *not* PAGE_CACHE_SIZE */ struct file * vm_file; /* File we map to (can be NULL). */ void * vm_private_data; /* was vm_pte (shared mem) */ |
c92ff1bde move mm_struct an... |
241 242 |
#ifndef CONFIG_MMU |
8feae1311 NOMMU: Make VMAs ... |
243 |
struct vm_region *vm_region; /* NOMMU mapping region */ |
c92ff1bde move mm_struct an... |
244 245 246 247 248 |
#endif #ifdef CONFIG_NUMA struct mempolicy *vm_policy; /* NUMA policy for the VMA */ #endif }; |
b564daf80 coredump: constru... |
249 250 251 252 |
struct core_thread { struct task_struct *task; struct core_thread *next; }; |
32ecb1f26 coredump: turn mm... |
253 |
struct core_state { |
c5f1cc8c1 coredump: turn co... |
254 |
atomic_t nr_threads; |
b564daf80 coredump: constru... |
255 |
struct core_thread dumper; |
32ecb1f26 coredump: turn mm... |
256 257 |
struct completion startup; }; |
d559db086 mm: clean up mm_c... |
258 259 260 |
enum { MM_FILEPAGES, MM_ANONPAGES, |
b084d4353 mm: count swap usage |
261 |
MM_SWAPENTS, |
d559db086 mm: clean up mm_c... |
262 263 |
NR_MM_COUNTERS }; |
53bddb4e9 nommu: fix build ... |
264 |
#if USE_SPLIT_PTLOCKS && defined(CONFIG_MMU) |
34e55232e mm: avoid false s... |
265 |
#define SPLIT_RSS_COUNTING |
34e55232e mm: avoid false s... |
266 267 268 269 270 |
/* per-thread cached information, */ struct task_rss_stat { int events; /* for synchronization threshold */ int count[NR_MM_COUNTERS]; }; |
172703b08 mm: delete non-at... |
271 |
#endif /* USE_SPLIT_PTLOCKS */ |
d559db086 mm: clean up mm_c... |
272 |
struct mm_rss_stat { |
172703b08 mm: delete non-at... |
273 |
atomic_long_t count[NR_MM_COUNTERS]; |
d559db086 mm: clean up mm_c... |
274 |
}; |
d559db086 mm: clean up mm_c... |
275 |
|
c92ff1bde move mm_struct an... |
276 277 278 279 |
struct mm_struct { struct vm_area_struct * mmap; /* list of VMAs */ struct rb_root mm_rb; struct vm_area_struct * mmap_cache; /* last find_vma result */ |
efc1a3b16 nommu: don't need... |
280 |
#ifdef CONFIG_MMU |
c92ff1bde move mm_struct an... |
281 282 283 284 |
unsigned long (*get_unmapped_area) (struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); void (*unmap_area) (struct mm_struct *mm, unsigned long addr); |
efc1a3b16 nommu: don't need... |
285 |
#endif |
c92ff1bde move mm_struct an... |
286 287 288 289 290 291 292 293 |
unsigned long mmap_base; /* base of mmap area */ unsigned long task_size; /* size of task vm space */ unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */ unsigned long free_area_cache; /* first hole of size cached_hole_size or larger */ pgd_t * pgd; atomic_t mm_users; /* How many users with user space? */ atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */ int map_count; /* number of VMAs */ |
481b4bb5e mm: mm_struct: re... |
294 |
|
c92ff1bde move mm_struct an... |
295 |
spinlock_t page_table_lock; /* Protects page tables and some counters */ |
481b4bb5e mm: mm_struct: re... |
296 |
struct rw_semaphore mmap_sem; |
c92ff1bde move mm_struct an... |
297 298 299 300 301 |
struct list_head mmlist; /* List of maybe swapped mm's. These are globally strung * together off init_mm.mmlist, and are protected * by mmlist_lock */ |
c92ff1bde move mm_struct an... |
302 303 304 |
unsigned long hiwater_rss; /* High-watermark of RSS usage */ unsigned long hiwater_vm; /* High-water virtual memory usage */ |
e10d59f2c mm: add comments ... |
305 306 307 308 309 310 311 312 313 |
unsigned long total_vm; /* Total pages mapped */ unsigned long locked_vm; /* Pages that have PG_mlocked set */ unsigned long pinned_vm; /* Refcount permanently increased */ unsigned long shared_vm; /* Shared pages (files) */ unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE */ unsigned long stack_vm; /* VM_GROWSUP/DOWN */ unsigned long reserved_vm; /* VM_RESERVED|VM_IO pages */ unsigned long def_flags; unsigned long nr_ptes; /* Page table pages */ |
c92ff1bde move mm_struct an... |
314 315 316 317 318 |
unsigned long start_code, end_code, start_data, end_data; unsigned long start_brk, brk, start_stack; unsigned long arg_start, arg_end, env_start, env_end; unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */ |
d559db086 mm: clean up mm_c... |
319 320 321 322 323 |
/* * Special counters, in some configurations protected by the * page_table_lock, in other configurations by being atomic. */ struct mm_rss_stat rss_stat; |
801460d0c task_struct clean... |
324 |
struct linux_binfmt *binfmt; |
6345d24da mm: Fix boot cras... |
325 |
cpumask_var_t cpu_vm_mask_var; |
c92ff1bde move mm_struct an... |
326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 |
/* Architecture-specific MM context */ mm_context_t context; /* Swap token stuff */ /* * Last value of global fault stamp as seen by this process. * In other words, this value gives an indication of how long * it has been since this task got the token. * Look at mm/thrash.c */ unsigned int faultstamp; unsigned int token_priority; unsigned int last_interval; unsigned long flags; /* Must use atomic bitops to access the bits */ |
a94e2d408 coredump: kill mm... |
341 |
struct core_state *core_state; /* coredumping support */ |
858f09930 aio: ifdef fields... |
342 |
#ifdef CONFIG_AIO |
abf137dd7 aio: make the loo... |
343 344 |
spinlock_t ioctx_lock; struct hlist_head ioctx_list; |
858f09930 aio: ifdef fields... |
345 |
#endif |
cf475ad28 cgroups: add an o... |
346 |
#ifdef CONFIG_MM_OWNER |
4cd1a8fc3 memcg: fix possib... |
347 348 349 350 351 352 353 354 355 356 |
/* * "owner" points to a task that is regarded as the canonical * user/owner of this mm. All of the following must be true in * order for it to be changed: * * current == mm->owner * current->mm != mm * new_owner->mm == mm * new_owner->alloc_lock is held */ |
4d2deb40b kernel: __rcu ann... |
357 |
struct task_struct __rcu *owner; |
78fb74669 Memory controller... |
358 |
#endif |
925d1c401 procfs task exe s... |
359 |
|
925d1c401 procfs task exe s... |
360 361 362 |
/* store ref to file /proc/<pid>/exe symlink points to */ struct file *exe_file; unsigned long num_exe_file_vmas; |
cddb8a5c1 mmu-notifiers: core |
363 364 365 |
#ifdef CONFIG_MMU_NOTIFIER struct mmu_notifier_mm *mmu_notifier_mm; #endif |
e7a00c45f thp: add pmd_huge... |
366 367 368 |
#ifdef CONFIG_TRANSPARENT_HUGEPAGE pgtable_t pmd_huge_pte; /* protected by page_table_lock */ #endif |
6345d24da mm: Fix boot cras... |
369 370 371 |
#ifdef CONFIG_CPUMASK_OFFSTACK struct cpumask cpumask_allocation; #endif |
c92ff1bde move mm_struct an... |
372 |
}; |
6345d24da mm: Fix boot cras... |
373 374 375 376 377 378 |
static inline void mm_init_cpumask(struct mm_struct *mm) { #ifdef CONFIG_CPUMASK_OFFSTACK mm->cpu_vm_mask_var = &mm->cpumask_allocation; #endif } |
45e575ab9 cpumask: mm_cpuma... |
379 |
/* Future-safe accessor for struct mm_struct's cpu_vm_mask. */ |
de03c72cf mm: convert mm->c... |
380 381 382 383 |
static inline cpumask_t *mm_cpumask(struct mm_struct *mm) { return mm->cpu_vm_mask_var; } |
45e575ab9 cpumask: mm_cpuma... |
384 |
|
5b99cd0ef [PATCH] own heade... |
385 |
#endif /* _LINUX_MM_TYPES_H */ |