Blame view

mm/mmzone.c 2.4 KB
95144c788   KAMEZAWA Hiroyuki   [PATCH] uninline ...
1
2
3
  /*
   * linux/mm/mmzone.c
   *
4468b8f1e   Mel Gorman   mm: uninline page...
4
   * management codes for pgdats, zones and page flags
95144c788   KAMEZAWA Hiroyuki   [PATCH] uninline ...
5
   */
95144c788   KAMEZAWA Hiroyuki   [PATCH] uninline ...
6
  #include <linux/stddef.h>
eb33575cf   Mel Gorman   [ARM] Double chec...
7
  #include <linux/mm.h>
95144c788   KAMEZAWA Hiroyuki   [PATCH] uninline ...
8
  #include <linux/mmzone.h>
95144c788   KAMEZAWA Hiroyuki   [PATCH] uninline ...
9
10
11
12
13
  
  struct pglist_data *first_online_pgdat(void)
  {
  	return NODE_DATA(first_online_node);
  }
95144c788   KAMEZAWA Hiroyuki   [PATCH] uninline ...
14
15
16
17
18
19
20
21
  struct pglist_data *next_online_pgdat(struct pglist_data *pgdat)
  {
  	int nid = next_online_node(pgdat->node_id);
  
  	if (nid == MAX_NUMNODES)
  		return NULL;
  	return NODE_DATA(nid);
  }
95144c788   KAMEZAWA Hiroyuki   [PATCH] uninline ...
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
  
  /*
   * next_zone - helper magic for for_each_zone()
   */
  struct zone *next_zone(struct zone *zone)
  {
  	pg_data_t *pgdat = zone->zone_pgdat;
  
  	if (zone < pgdat->node_zones + MAX_NR_ZONES - 1)
  		zone++;
  	else {
  		pgdat = next_online_pgdat(pgdat);
  		if (pgdat)
  			zone = pgdat->node_zones;
  		else
  			zone = NULL;
  	}
  	return zone;
  }
95144c788   KAMEZAWA Hiroyuki   [PATCH] uninline ...
41

19770b326   Mel Gorman   mm: filter based ...
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
  static inline int zref_in_nodemask(struct zoneref *zref, nodemask_t *nodes)
  {
  #ifdef CONFIG_NUMA
  	return node_isset(zonelist_node_idx(zref), *nodes);
  #else
  	return 1;
  #endif /* CONFIG_NUMA */
  }
  
  /* Returns the next zone at or below highest_zoneidx in a zonelist */
  struct zoneref *next_zones_zonelist(struct zoneref *z,
  					enum zone_type highest_zoneidx,
  					nodemask_t *nodes,
  					struct zone **zone)
  {
  	/*
  	 * Find the next suitable zone to use for the allocation.
  	 * Only filter based on nodemask if it's set
  	 */
  	if (likely(nodes == NULL))
  		while (zonelist_zone_idx(z) > highest_zoneidx)
  			z++;
  	else
  		while (zonelist_zone_idx(z) > highest_zoneidx ||
  				(z->zone && !zref_in_nodemask(z, nodes)))
  			z++;
5bead2a06   Mel Gorman   mm: mark the corr...
68
  	*zone = zonelist_zone(z);
19770b326   Mel Gorman   mm: filter based ...
69
70
  	return z;
  }
eb33575cf   Mel Gorman   [ARM] Double chec...
71
72
73
74
75
76
77
78
79
80
81
82
83
84
  
  #ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
  int memmap_valid_within(unsigned long pfn,
  					struct page *page, struct zone *zone)
  {
  	if (page_to_pfn(page) != pfn)
  		return 0;
  
  	if (page_zone(page) != zone)
  		return 0;
  
  	return 1;
  }
  #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
7f5e86c2c   Konstantin Khlebnikov   mm: add link from...
85

bea8c150a   Hugh Dickins   memcg: fix hotplu...
86
  void lruvec_init(struct lruvec *lruvec)
7f5e86c2c   Konstantin Khlebnikov   mm: add link from...
87
88
89
90
91
92
93
  {
  	enum lru_list lru;
  
  	memset(lruvec, 0, sizeof(struct lruvec));
  
  	for_each_lru(lru)
  		INIT_LIST_HEAD(&lruvec->lists[lru]);
7f5e86c2c   Konstantin Khlebnikov   mm: add link from...
94
  }
4468b8f1e   Mel Gorman   mm: uninline page...
95

90572890d   Peter Zijlstra   mm: numa: Change ...
96
97
  #if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS)
  int page_cpupid_xchg_last(struct page *page, int cpupid)
4468b8f1e   Mel Gorman   mm: uninline page...
98
99
  {
  	unsigned long old_flags, flags;
90572890d   Peter Zijlstra   mm: numa: Change ...
100
  	int last_cpupid;
4468b8f1e   Mel Gorman   mm: uninline page...
101
102
103
  
  	do {
  		old_flags = flags = page->flags;
90572890d   Peter Zijlstra   mm: numa: Change ...
104
  		last_cpupid = page_cpupid_last(page);
4468b8f1e   Mel Gorman   mm: uninline page...
105

90572890d   Peter Zijlstra   mm: numa: Change ...
106
107
  		flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
  		flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
4468b8f1e   Mel Gorman   mm: uninline page...
108
  	} while (unlikely(cmpxchg(&page->flags, old_flags, flags) != old_flags));
90572890d   Peter Zijlstra   mm: numa: Change ...
109
  	return last_cpupid;
4468b8f1e   Mel Gorman   mm: uninline page...
110
111
  }
  #endif