Commit a1bc5a4eee990a1f290735c8694d0aebdad095fa
Committed by
Linus Torvalds
1 parent
7f81b1ae18
Exists in
master
and in
4 other branches
cpusets: replace zone allowed functions with node allowed
The cpuset_zone_allowed() variants are actually only a function of the zone's node. Cc: Paul Menage <menage@google.com> Acked-by: Christoph Lameter <cl@linux-foundation.org> Cc: Randy Dunlap <randy.dunlap@oracle.com> Signed-off-by: David Rientjes <rientjes@google.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 2 changed files with 52 additions and 40 deletions Side-by-side Diff
include/linux/cpuset.h
| ... | ... | @@ -12,6 +12,7 @@ |
| 12 | 12 | #include <linux/cpumask.h> |
| 13 | 13 | #include <linux/nodemask.h> |
| 14 | 14 | #include <linux/cgroup.h> |
| 15 | +#include <linux/mm.h> | |
| 15 | 16 | |
| 16 | 17 | #ifdef CONFIG_CPUSETS |
| 17 | 18 | |
| 18 | 19 | |
| 19 | 20 | |
| 20 | 21 | |
| 21 | 22 | |
| 22 | 23 | |
| ... | ... | @@ -29,21 +30,31 @@ |
| 29 | 30 | void cpuset_update_task_memory_state(void); |
| 30 | 31 | int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); |
| 31 | 32 | |
| 32 | -extern int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask); | |
| 33 | -extern int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask); | |
| 33 | +extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask); | |
| 34 | +extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask); | |
| 34 | 35 | |
| 35 | -static int inline cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) | |
| 36 | +static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) | |
| 36 | 37 | { |
| 37 | 38 | return number_of_cpusets <= 1 || |
| 38 | - __cpuset_zone_allowed_softwall(z, gfp_mask); | |
| 39 | + __cpuset_node_allowed_softwall(node, gfp_mask); | |
| 39 | 40 | } |
| 40 | 41 | |
| 41 | -static int inline cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) | |
| 42 | +static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) | |
| 42 | 43 | { |
| 43 | 44 | return number_of_cpusets <= 1 || |
| 44 | - __cpuset_zone_allowed_hardwall(z, gfp_mask); | |
| 45 | + __cpuset_node_allowed_hardwall(node, gfp_mask); | |
| 45 | 46 | } |
| 46 | 47 | |
| 48 | +static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) | |
| 49 | +{ | |
| 50 | + return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask); | |
| 51 | +} | |
| 52 | + | |
| 53 | +static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) | |
| 54 | +{ | |
| 55 | + return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask); | |
| 56 | +} | |
| 57 | + | |
| 47 | 58 | extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, |
| 48 | 59 | const struct task_struct *tsk2); |
| 49 | 60 | |
| ... | ... | @@ -108,6 +119,16 @@ |
| 108 | 119 | static inline void cpuset_update_task_memory_state(void) {} |
| 109 | 120 | |
| 110 | 121 | static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) |
| 122 | +{ | |
| 123 | + return 1; | |
| 124 | +} | |
| 125 | + | |
| 126 | +static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) | |
| 127 | +{ | |
| 128 | + return 1; | |
| 129 | +} | |
| 130 | + | |
| 131 | +static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) | |
| 111 | 132 | { |
| 112 | 133 | return 1; |
| 113 | 134 | } |
kernel/cpuset.c
| ... | ... | @@ -2181,26 +2181,24 @@ |
| 2181 | 2181 | } |
| 2182 | 2182 | |
| 2183 | 2183 | /** |
| 2184 | - * cpuset_zone_allowed_softwall - Can we allocate on zone z's memory node? | |
| 2185 | - * @z: is this zone on an allowed node? | |
| 2184 | + * cpuset_node_allowed_softwall - Can we allocate on a memory node? | |
| 2185 | + * @node: is this an allowed node? | |
| 2186 | 2186 | * @gfp_mask: memory allocation flags |
| 2187 | 2187 | * |
| 2188 | - * If we're in interrupt, yes, we can always allocate. If | |
| 2189 | - * __GFP_THISNODE is set, yes, we can always allocate. If zone | |
| 2190 | - * z's node is in our tasks mems_allowed, yes. If it's not a | |
| 2191 | - * __GFP_HARDWALL request and this zone's nodes is in the nearest | |
| 2192 | - * hardwalled cpuset ancestor to this tasks cpuset, yes. | |
| 2193 | - * If the task has been OOM killed and has access to memory reserves | |
| 2194 | - * as specified by the TIF_MEMDIE flag, yes. | |
| 2188 | + * If we're in interrupt, yes, we can always allocate. If __GFP_THISNODE is | |
| 2189 | + * set, yes, we can always allocate. If node is in our task's mems_allowed, | |
| 2190 | + * yes. If it's not a __GFP_HARDWALL request and this node is in the nearest | |
| 2191 | + * hardwalled cpuset ancestor to this task's cpuset, yes. If the task has been | |
| 2192 | + * OOM killed and has access to memory reserves as specified by the TIF_MEMDIE | |
| 2193 | + * flag, yes. | |
| 2195 | 2194 | * Otherwise, no. |
| 2196 | 2195 | * |
| 2197 | - * If __GFP_HARDWALL is set, cpuset_zone_allowed_softwall() | |
| 2198 | - * reduces to cpuset_zone_allowed_hardwall(). Otherwise, | |
| 2199 | - * cpuset_zone_allowed_softwall() might sleep, and might allow a zone | |
| 2200 | - * from an enclosing cpuset. | |
| 2196 | + * If __GFP_HARDWALL is set, cpuset_node_allowed_softwall() reduces to | |
| 2197 | + * cpuset_node_allowed_hardwall(). Otherwise, cpuset_node_allowed_softwall() | |
| 2198 | + * might sleep, and might allow a node from an enclosing cpuset. | |
| 2201 | 2199 | * |
| 2202 | - * cpuset_zone_allowed_hardwall() only handles the simpler case of | |
| 2203 | - * hardwall cpusets, and never sleeps. | |
| 2200 | + * cpuset_node_allowed_hardwall() only handles the simpler case of hardwall | |
| 2201 | + * cpusets, and never sleeps. | |
| 2204 | 2202 | * |
| 2205 | 2203 | * The __GFP_THISNODE placement logic is really handled elsewhere, |
| 2206 | 2204 | * by forcibly using a zonelist starting at a specified node, and by |
| 2207 | 2205 | |
| 2208 | 2206 | |
| 2209 | 2207 | |
| ... | ... | @@ -2239,20 +2237,17 @@ |
| 2239 | 2237 | * GFP_USER - only nodes in current tasks mems allowed ok. |
| 2240 | 2238 | * |
| 2241 | 2239 | * Rule: |
| 2242 | - * Don't call cpuset_zone_allowed_softwall if you can't sleep, unless you | |
| 2240 | + * Don't call cpuset_node_allowed_softwall if you can't sleep, unless you | |
| 2243 | 2241 | * pass in the __GFP_HARDWALL flag set in gfp_flag, which disables |
| 2244 | 2242 | * the code that might scan up ancestor cpusets and sleep. |
| 2245 | 2243 | */ |
| 2246 | - | |
| 2247 | -int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) | |
| 2244 | +int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) | |
| 2248 | 2245 | { |
| 2249 | - int node; /* node that zone z is on */ | |
| 2250 | 2246 | const struct cpuset *cs; /* current cpuset ancestors */ |
| 2251 | 2247 | int allowed; /* is allocation in zone z allowed? */ |
| 2252 | 2248 | |
| 2253 | 2249 | if (in_interrupt() || (gfp_mask & __GFP_THISNODE)) |
| 2254 | 2250 | return 1; |
| 2255 | - node = zone_to_nid(z); | |
| 2256 | 2251 | might_sleep_if(!(gfp_mask & __GFP_HARDWALL)); |
| 2257 | 2252 | if (node_isset(node, current->mems_allowed)) |
| 2258 | 2253 | return 1; |
| 2259 | 2254 | |
| ... | ... | @@ -2281,15 +2276,15 @@ |
| 2281 | 2276 | } |
| 2282 | 2277 | |
| 2283 | 2278 | /* |
| 2284 | - * cpuset_zone_allowed_hardwall - Can we allocate on zone z's memory node? | |
| 2285 | - * @z: is this zone on an allowed node? | |
| 2279 | + * cpuset_node_allowed_hardwall - Can we allocate on a memory node? | |
| 2280 | + * @node: is this an allowed node? | |
| 2286 | 2281 | * @gfp_mask: memory allocation flags |
| 2287 | 2282 | * |
| 2288 | - * If we're in interrupt, yes, we can always allocate. | |
| 2289 | - * If __GFP_THISNODE is set, yes, we can always allocate. If zone | |
| 2290 | - * z's node is in our tasks mems_allowed, yes. If the task has been | |
| 2291 | - * OOM killed and has access to memory reserves as specified by the | |
| 2292 | - * TIF_MEMDIE flag, yes. Otherwise, no. | |
| 2283 | + * If we're in interrupt, yes, we can always allocate. If __GFP_THISNODE is | |
| 2284 | + * set, yes, we can always allocate. If node is in our task's mems_allowed, | |
| 2285 | + * yes. If the task has been OOM killed and has access to memory reserves as | |
| 2286 | + * specified by the TIF_MEMDIE flag, yes. | |
| 2287 | + * Otherwise, no. | |
| 2293 | 2288 | * |
| 2294 | 2289 | * The __GFP_THISNODE placement logic is really handled elsewhere, |
| 2295 | 2290 | * by forcibly using a zonelist starting at a specified node, and by |
| 2296 | 2291 | |
| 2297 | 2292 | |
| 2298 | 2293 | |
| ... | ... | @@ -2297,20 +2292,16 @@ |
| 2297 | 2292 | * any node on the zonelist except the first. By the time any such |
| 2298 | 2293 | * calls get to this routine, we should just shut up and say 'yes'. |
| 2299 | 2294 | * |
| 2300 | - * Unlike the cpuset_zone_allowed_softwall() variant, above, | |
| 2301 | - * this variant requires that the zone be in the current tasks | |
| 2295 | + * Unlike the cpuset_node_allowed_softwall() variant, above, | |
| 2296 | + * this variant requires that the node be in the current task's | |
| 2302 | 2297 | * mems_allowed or that we're in interrupt. It does not scan up the |
| 2303 | 2298 | * cpuset hierarchy for the nearest enclosing mem_exclusive cpuset. |
| 2304 | 2299 | * It never sleeps. |
| 2305 | 2300 | */ |
| 2306 | - | |
| 2307 | -int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) | |
| 2301 | +int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) | |
| 2308 | 2302 | { |
| 2309 | - int node; /* node that zone z is on */ | |
| 2310 | - | |
| 2311 | 2303 | if (in_interrupt() || (gfp_mask & __GFP_THISNODE)) |
| 2312 | 2304 | return 1; |
| 2313 | - node = zone_to_nid(z); | |
| 2314 | 2305 | if (node_isset(node, current->mems_allowed)) |
| 2315 | 2306 | return 1; |
| 2316 | 2307 | /* |