Blame view
include/linux/mempolicy.h
7.1 KB
1da177e4c
|
1 2 3 4 |
/* * NUMA memory policies for Linux. * Copyright 2003,2004 Andi Kleen SuSE Labs */ |
607ca46e9
|
5 6 |
#ifndef _LINUX_MEMPOLICY_H #define _LINUX_MEMPOLICY_H 1 |
1da177e4c
|
7 |
|
1da177e4c
|
8 |
|
1da177e4c
|
9 |
#include <linux/mmzone.h> |
1da177e4c
|
10 11 12 |
#include <linux/slab.h> #include <linux/rbtree.h> #include <linux/spinlock.h> |
dfcd3c0dc
|
13 |
#include <linux/nodemask.h> |
83d1674a9
|
14 |
#include <linux/pagemap.h> |
607ca46e9
|
15 |
#include <uapi/linux/mempolicy.h> |
1da177e4c
|
16 |
|
45b35a5ce
|
17 |
struct mm_struct; |
1da177e4c
|
18 19 20 21 22 23 24 25 26 27 28 29 30 31 |
#ifdef CONFIG_NUMA /* * Describe a memory policy. * * A mempolicy can be either associated with a process or with a VMA. * For VMA related allocations the VMA policy is preferred, otherwise * the process policy is used. Interrupts ignore the memory policy * of the current process. * * Locking policy for interlave: * In process context there is no locking because only the process accesses * its own state. All vma manipulation is somewhat protected by a down_read on |
b8072f099
|
32 |
* mmap_sem. |
1da177e4c
|
33 34 |
* * Freeing policy: |
19770b326
|
35 |
* Mempolicy objects are reference counted. A mempolicy will be freed when |
f0be3d32b
|
36 |
* mpol_put() decrements the reference count to zero. |
1da177e4c
|
37 |
* |
846a16bf0
|
38 39 |
* Duplicating policy objects: * mpol_dup() allocates a new mempolicy and copies the specified mempolicy |
19770b326
|
40 |
* to the new storage. The reference count of the new object is initialized |
846a16bf0
|
41 |
* to 1, representing the caller of mpol_dup(). |
1da177e4c
|
42 43 44 |
*/ struct mempolicy { atomic_t refcnt; |
45c4745af
|
45 |
unsigned short mode; /* See MPOL_* above */ |
028fec414
|
46 |
unsigned short flags; /* See set_mempolicy() MPOL_F_* above */ |
1da177e4c
|
47 |
union { |
1da177e4c
|
48 |
short preferred_node; /* preferred */ |
19770b326
|
49 |
nodemask_t nodes; /* interleave/bind */ |
1da177e4c
|
50 51 |
/* undefined for default */ } v; |
f5b087b52
|
52 53 54 55 |
union { nodemask_t cpuset_mems_allowed; /* relative to these nodes */ nodemask_t user_nodemask; /* nodemask passed by user */ } w; |
1da177e4c
|
56 57 58 59 60 61 |
}; /* * Support for managing mempolicy data objects (clone, copy, destroy) * The default fast path of a NULL MPOL_DEFAULT policy is always inlined. */ |
f0be3d32b
|
62 63 |
extern void __mpol_put(struct mempolicy *pol); static inline void mpol_put(struct mempolicy *pol) |
1da177e4c
|
64 65 |
{ if (pol) |
f0be3d32b
|
66 |
__mpol_put(pol); |
1da177e4c
|
67 |
} |
52cd3b074
|
68 69 70 71 72 73 74 75 76 77 78 79 80 81 |
/* * Does mempolicy pol need explicit unref after use? * Currently only needed for shared policies. */ static inline int mpol_needs_cond_ref(struct mempolicy *pol) { return (pol && (pol->flags & MPOL_F_SHARED)); } static inline void mpol_cond_put(struct mempolicy *pol) { if (mpol_needs_cond_ref(pol)) __mpol_put(pol); } |
846a16bf0
|
82 83 |
extern struct mempolicy *__mpol_dup(struct mempolicy *pol); static inline struct mempolicy *mpol_dup(struct mempolicy *pol) |
1da177e4c
|
84 85 |
{ if (pol) |
846a16bf0
|
86 |
pol = __mpol_dup(pol); |
1da177e4c
|
87 88 89 90 |
return pol; } #define vma_policy(vma) ((vma)->vm_policy) |
1da177e4c
|
91 92 93 94 95 96 |
static inline void mpol_get(struct mempolicy *pol) { if (pol) atomic_inc(&pol->refcnt); } |
fcfb4dcc9
|
97 98 |
extern bool __mpol_equal(struct mempolicy *a, struct mempolicy *b); static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) |
1da177e4c
|
99 100 |
{ if (a == b) |
fcfb4dcc9
|
101 |
return true; |
1da177e4c
|
102 103 |
return __mpol_equal(a, b); } |
1da177e4c
|
104 |
|
1da177e4c
|
105 |
/* |
1da177e4c
|
106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 |
* Tree of shared policies for a shared memory region. * Maintain the policies in a pseudo mm that contains vmas. The vmas * carry the policy. As a special twist the pseudo mm is indexed in pages, not * bytes, so that we can work with shared memory segments bigger than * unsigned long. */ struct sp_node { struct rb_node nd; unsigned long start, end; struct mempolicy *policy; }; struct shared_policy { struct rb_root root; |
42288fe36
|
121 |
spinlock_t lock; |
1da177e4c
|
122 |
}; |
ef0855d33
|
123 |
int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst); |
71fe804b6
|
124 |
void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol); |
1da177e4c
|
125 126 127 128 129 130 |
int mpol_set_shared_policy(struct shared_policy *info, struct vm_area_struct *vma, struct mempolicy *new); void mpol_free_shared_policy(struct shared_policy *p); struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx); |
74d2c3a05
|
131 132 133 |
struct mempolicy *get_task_policy(struct task_struct *p); struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, unsigned long addr); |
6b6482bbf
|
134 |
bool vma_policy_mof(struct vm_area_struct *vma); |
d98f6cb67
|
135 |
|
1da177e4c
|
136 137 |
extern void numa_default_policy(void); extern void numa_policy_init(void); |
708c1bbc9
|
138 139 |
extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new, enum mpol_rebind_step step); |
4225399a6
|
140 |
extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new); |
4225399a6
|
141 |
|
5da7ca860
|
142 |
extern struct zonelist *huge_zonelist(struct vm_area_struct *vma, |
19770b326
|
143 144 |
unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol, nodemask_t **nodemask); |
06808b082
|
145 |
extern bool init_nodemask_of_mempolicy(nodemask_t *mask); |
6f48d0ebd
|
146 147 |
extern bool mempolicy_nodemask_intersects(struct task_struct *tsk, const nodemask_t *mask); |
2a389610a
|
148 |
extern unsigned int mempolicy_slab_node(void); |
1da177e4c
|
149 |
|
2f6726e54
|
150 |
extern enum zone_type policy_zone; |
4be38e351
|
151 |
|
2f6726e54
|
152 |
static inline void check_highest_zone(enum zone_type k) |
4be38e351
|
153 |
{ |
b377fd398
|
154 |
if (k > policy_zone && k != ZONE_MOVABLE) |
4be38e351
|
155 156 |
policy_zone = k; } |
0ce72d4f7
|
157 158 |
int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, const nodemask_t *to, int flags); |
39743889a
|
159 |
|
095f1fc4e
|
160 161 |
#ifdef CONFIG_TMPFS |
a7a88b237
|
162 |
extern int mpol_parse_str(char *str, struct mempolicy **mpol); |
13057efb0
|
163 |
#endif |
095f1fc4e
|
164 |
|
948927ee9
|
165 |
extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol); |
83d1674a9
|
166 167 168 169 |
/* Check if a vma is migratable */ static inline int vma_migratable(struct vm_area_struct *vma) { |
71ea2efb1
|
170 |
if (vma->vm_flags & (VM_IO | VM_PFNMAP)) |
83d1674a9
|
171 |
return 0; |
c177c81e0
|
172 173 174 175 176 |
#ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION if (vma->vm_flags & VM_HUGETLB) return 0; #endif |
83d1674a9
|
177 178 179 180 181 182 183 184 185 186 187 |
/* * Migration allocates pages in the highest zone. If we cannot * do so then migration (at least from node to node) is not * possible. */ if (vma->vm_file && gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) < policy_zone) return 0; return 1; } |
771fb4d80
|
188 |
extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long); |
1da177e4c
|
189 190 191 |
#else struct mempolicy {}; |
fcfb4dcc9
|
192 |
static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) |
1da177e4c
|
193 |
{ |
fcfb4dcc9
|
194 |
return true; |
1da177e4c
|
195 |
} |
1da177e4c
|
196 |
|
f0be3d32b
|
197 |
static inline void mpol_put(struct mempolicy *p) |
1da177e4c
|
198 199 |
{ } |
52cd3b074
|
200 201 202 |
static inline void mpol_cond_put(struct mempolicy *pol) { } |
1da177e4c
|
203 204 205 |
static inline void mpol_get(struct mempolicy *pol) { } |
1da177e4c
|
206 |
struct shared_policy {}; |
71fe804b6
|
207 208 |
static inline void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) |
1da177e4c
|
209 210 211 212 213 214 |
{ } static inline void mpol_free_shared_policy(struct shared_policy *p) { } |
1da177e4c
|
215 |
#define vma_policy(vma) NULL |
ef0855d33
|
216 217 218 219 220 221 |
static inline int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) { return 0; } |
1da177e4c
|
222 223 224 225 226 227 228 229 |
static inline void numa_policy_init(void) { } static inline void numa_default_policy(void) { } |
74cb21553
|
230 |
static inline void mpol_rebind_task(struct task_struct *tsk, |
708c1bbc9
|
231 232 |
const nodemask_t *new, enum mpol_rebind_step step) |
68860ec10
|
233 234 |
{ } |
4225399a6
|
235 236 237 |
static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) { } |
5da7ca860
|
238 |
static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma, |
19770b326
|
239 240 |
unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol, nodemask_t **nodemask) |
5da7ca860
|
241 |
{ |
19770b326
|
242 243 |
*mpol = NULL; *nodemask = NULL; |
0e88460da
|
244 |
return node_zonelist(0, gfp_flags); |
5da7ca860
|
245 |
} |
6f48d0ebd
|
246 247 248 249 |
static inline bool init_nodemask_of_mempolicy(nodemask_t *m) { return false; } |
0ce72d4f7
|
250 251 |
static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, const nodemask_t *to, int flags) |
45b07ef31
|
252 253 254 |
{ return 0; } |
4be38e351
|
255 256 257 |
static inline void check_highest_zone(int k) { } |
095f1fc4e
|
258 259 |
#ifdef CONFIG_TMPFS |
a7a88b237
|
260 |
static inline int mpol_parse_str(char *str, struct mempolicy **mpol) |
095f1fc4e
|
261 |
{ |
71fe804b6
|
262 |
return 1; /* error */ |
095f1fc4e
|
263 |
} |
13057efb0
|
264 |
#endif |
095f1fc4e
|
265 |
|
771fb4d80
|
266 267 268 269 270 |
static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long address) { return -1; /* no node preference */ } |
1da177e4c
|
271 |
#endif /* CONFIG_NUMA */ |
1da177e4c
|
272 |
#endif |