Commit f64dc58c5412233d4d44b0275eaebdc11bde23b3

Authored by Christoph Lameter
Committed by Linus Torvalds
1 parent 04231b3002

Memoryless nodes: SLUB support

Simply switch all for_each_online_node to for_each_node_state(NORMAL_MEMORY).
That way SLUB only operates on nodes with regular memory.  Any allocation
attempt on a memoryless node or a node with just highmem will fall whereupon
SLUB will fetch memory from a nearby node (depending on how memory policies
and cpuset describe fallback).

Signed-off-by: Christoph Lameter <clameter@sgi.com>
Tested-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Acked-by: Bob Picco <bob.picco@hp.com>
Cc: Nishanth Aravamudan <nacc@us.ibm.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Mel Gorman <mel@skynet.ie>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 1 changed file with 8 additions and 8 deletions Side-by-side Diff

... ... @@ -1921,7 +1921,7 @@
1921 1921 {
1922 1922 int node;
1923 1923  
1924   - for_each_online_node(node) {
  1924 + for_each_node_state(node, N_NORMAL_MEMORY) {
1925 1925 struct kmem_cache_node *n = s->node[node];
1926 1926 if (n && n != &s->local_node)
1927 1927 kmem_cache_free(kmalloc_caches, n);
... ... @@ -1939,7 +1939,7 @@
1939 1939 else
1940 1940 local_node = 0;
1941 1941  
1942   - for_each_online_node(node) {
  1942 + for_each_node_state(node, N_NORMAL_MEMORY) {
1943 1943 struct kmem_cache_node *n;
1944 1944  
1945 1945 if (local_node == node)
... ... @@ -2192,7 +2192,7 @@
2192 2192 flush_all(s);
2193 2193  
2194 2194 /* Attempt to free all objects */
2195   - for_each_online_node(node) {
  2195 + for_each_node_state(node, N_NORMAL_MEMORY) {
2196 2196 struct kmem_cache_node *n = get_node(s, node);
2197 2197  
2198 2198 n->nr_partial -= free_list(s, n, &n->partial);
... ... @@ -2521,7 +2521,7 @@
2521 2521 return -ENOMEM;
2522 2522  
2523 2523 flush_all(s);
2524   - for_each_online_node(node) {
  2524 + for_each_node_state(node, N_NORMAL_MEMORY) {
2525 2525 n = get_node(s, node);
2526 2526  
2527 2527 if (!n->nr_partial)
... ... @@ -2916,7 +2916,7 @@
2916 2916 return -ENOMEM;
2917 2917  
2918 2918 flush_all(s);
2919   - for_each_online_node(node) {
  2919 + for_each_node_state(node, N_NORMAL_MEMORY) {
2920 2920 struct kmem_cache_node *n = get_node(s, node);
2921 2921  
2922 2922 count += validate_slab_node(s, n, map);
... ... @@ -3136,7 +3136,7 @@
3136 3136 /* Push back cpu slabs */
3137 3137 flush_all(s);
3138 3138  
3139   - for_each_online_node(node) {
  3139 + for_each_node_state(node, N_NORMAL_MEMORY) {
3140 3140 struct kmem_cache_node *n = get_node(s, node);
3141 3141 unsigned long flags;
3142 3142 struct page *page;
... ... @@ -3263,7 +3263,7 @@
3263 3263 }
3264 3264 }
3265 3265  
3266   - for_each_online_node(node) {
  3266 + for_each_node_state(node, N_NORMAL_MEMORY) {
3267 3267 struct kmem_cache_node *n = get_node(s, node);
3268 3268  
3269 3269 if (flags & SO_PARTIAL) {
... ... @@ -3291,7 +3291,7 @@
3291 3291  
3292 3292 x = sprintf(buf, "%lu", total);
3293 3293 #ifdef CONFIG_NUMA
3294   - for_each_online_node(node)
  3294 + for_each_node_state(node, N_NORMAL_MEMORY)
3295 3295 if (nodes[node])
3296 3296 x += sprintf(buf + x, " N%d=%lu",
3297 3297 node, nodes[node]);