Blame view

mm/mmzone.c 2.76 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  // SPDX-License-Identifier: GPL-2.0
95144c788   KAMEZAWA Hiroyuki   [PATCH] uninline ...
2
3
4
  /*
   * linux/mm/mmzone.c
   *
4468b8f1e   Mel Gorman   mm: uninline page...
5
   * management codes for pgdats, zones and page flags
95144c788   KAMEZAWA Hiroyuki   [PATCH] uninline ...
6
   */
95144c788   KAMEZAWA Hiroyuki   [PATCH] uninline ...
7
  #include <linux/stddef.h>
eb33575cf   Mel Gorman   [ARM] Double chec...
8
  #include <linux/mm.h>
95144c788   KAMEZAWA Hiroyuki   [PATCH] uninline ...
9
  #include <linux/mmzone.h>
62e32cf8f   Chris Goldsworthy   ANDROID: mm: Crea...
10
  #include <trace/hooks/mm.h>
95144c788   KAMEZAWA Hiroyuki   [PATCH] uninline ...
11
12
13
14
15
  
  struct pglist_data *first_online_pgdat(void)
  {
  	return NODE_DATA(first_online_node);
  }
95144c788   KAMEZAWA Hiroyuki   [PATCH] uninline ...
16
17
18
19
20
21
22
23
  struct pglist_data *next_online_pgdat(struct pglist_data *pgdat)
  {
  	int nid = next_online_node(pgdat->node_id);
  
  	if (nid == MAX_NUMNODES)
  		return NULL;
  	return NODE_DATA(nid);
  }
95144c788   KAMEZAWA Hiroyuki   [PATCH] uninline ...
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
  
  /*
   * next_zone - helper magic for for_each_zone()
   */
  struct zone *next_zone(struct zone *zone)
  {
  	pg_data_t *pgdat = zone->zone_pgdat;
  
  	if (zone < pgdat->node_zones + MAX_NR_ZONES - 1)
  		zone++;
  	else {
  		pgdat = next_online_pgdat(pgdat);
  		if (pgdat)
  			zone = pgdat->node_zones;
  		else
  			zone = NULL;
  	}
  	return zone;
  }
95144c788   KAMEZAWA Hiroyuki   [PATCH] uninline ...
43

19770b326   Mel Gorman   mm: filter based ...
44
45
46
47
48
49
50
51
52
53
  static inline int zref_in_nodemask(struct zoneref *zref, nodemask_t *nodes)
  {
  #ifdef CONFIG_NUMA
  	return node_isset(zonelist_node_idx(zref), *nodes);
  #else
  	return 1;
  #endif /* CONFIG_NUMA */
  }
  
  /* Returns the next zone at or below highest_zoneidx in a zonelist */
682a3385e   Mel Gorman   mm, page_alloc: i...
54
  struct zoneref *__next_zones_zonelist(struct zoneref *z,
19770b326   Mel Gorman   mm: filter based ...
55
  					enum zone_type highest_zoneidx,
05891fb06   Vlastimil Babka   mm: microoptimize...
56
  					nodemask_t *nodes)
19770b326   Mel Gorman   mm: filter based ...
57
58
59
60
61
  {
  	/*
  	 * Find the next suitable zone to use for the allocation.
  	 * Only filter based on nodemask if it's set
  	 */
e57b9d8c5   Steven Rostedt   mm/mmzone.c: swap...
62
  	if (unlikely(nodes == NULL))
19770b326   Mel Gorman   mm: filter based ...
63
64
65
66
67
68
  		while (zonelist_zone_idx(z) > highest_zoneidx)
  			z++;
  	else
  		while (zonelist_zone_idx(z) > highest_zoneidx ||
  				(z->zone && !zref_in_nodemask(z, nodes)))
  			z++;
19770b326   Mel Gorman   mm: filter based ...
69
70
  	return z;
  }
eb33575cf   Mel Gorman   [ARM] Double chec...
71
72
  
  #ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
5b80287a6   Yaowei Bai   mm/mmzone.c: memm...
73
  bool memmap_valid_within(unsigned long pfn,
eb33575cf   Mel Gorman   [ARM] Double chec...
74
75
76
  					struct page *page, struct zone *zone)
  {
  	if (page_to_pfn(page) != pfn)
5b80287a6   Yaowei Bai   mm/mmzone.c: memm...
77
  		return false;
eb33575cf   Mel Gorman   [ARM] Double chec...
78
79
  
  	if (page_zone(page) != zone)
5b80287a6   Yaowei Bai   mm/mmzone.c: memm...
80
  		return false;
eb33575cf   Mel Gorman   [ARM] Double chec...
81

5b80287a6   Yaowei Bai   mm/mmzone.c: memm...
82
  	return true;
eb33575cf   Mel Gorman   [ARM] Double chec...
83
84
  }
  #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
7f5e86c2c   Konstantin Khlebnikov   mm: add link from...
85

bea8c150a   Hugh Dickins   memcg: fix hotplu...
86
  void lruvec_init(struct lruvec *lruvec)
7f5e86c2c   Konstantin Khlebnikov   mm: add link from...
87
88
89
90
91
92
93
  {
  	enum lru_list lru;
  
  	memset(lruvec, 0, sizeof(struct lruvec));
  
  	for_each_lru(lru)
  		INIT_LIST_HEAD(&lruvec->lists[lru]);
7f5e86c2c   Konstantin Khlebnikov   mm: add link from...
94
  }
4468b8f1e   Mel Gorman   mm: uninline page...
95

90572890d   Peter Zijlstra   mm: numa: Change ...
96
97
  #if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS)
  int page_cpupid_xchg_last(struct page *page, int cpupid)
4468b8f1e   Mel Gorman   mm: uninline page...
98
99
  {
  	unsigned long old_flags, flags;
90572890d   Peter Zijlstra   mm: numa: Change ...
100
  	int last_cpupid;
4468b8f1e   Mel Gorman   mm: uninline page...
101
102
103
  
  	do {
  		old_flags = flags = page->flags;
90572890d   Peter Zijlstra   mm: numa: Change ...
104
  		last_cpupid = page_cpupid_last(page);
4468b8f1e   Mel Gorman   mm: uninline page...
105

90572890d   Peter Zijlstra   mm: numa: Change ...
106
107
  		flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
  		flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
4468b8f1e   Mel Gorman   mm: uninline page...
108
  	} while (unlikely(cmpxchg(&page->flags, old_flags, flags) != old_flags));
90572890d   Peter Zijlstra   mm: numa: Change ...
109
  	return last_cpupid;
4468b8f1e   Mel Gorman   mm: uninline page...
110
111
  }
  #endif
62e32cf8f   Chris Goldsworthy   ANDROID: mm: Crea...
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
  
  enum zone_type gfp_zone(gfp_t flags)
  {
  	enum zone_type z;
  	gfp_t local_flags = flags;
  	int bit;
  
  	trace_android_rvh_set_gfp_zone_flags(&local_flags);
  
  	bit = (__force int) ((local_flags) & GFP_ZONEMASK);
  
  	z = (GFP_ZONE_TABLE >> (bit * GFP_ZONES_SHIFT)) &
  					 ((1 << GFP_ZONES_SHIFT) - 1);
  	VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
  	return z;
  }