Blame view

include/linux/page_cgroup.h 4.4 KB
52d4b9ac0   KAMEZAWA Hiroyuki   memcg: allocate a...
1
2
  #ifndef __LINUX_PAGE_CGROUP_H
  #define __LINUX_PAGE_CGROUP_H
6b3ae58ef   Johannes Weiner   memcg: remove dir...
3
4
5
6
7
8
9
10
11
  enum {
  	/* flags for mem_cgroup */
  	PCG_LOCK,  /* Lock for pc->mem_cgroup and following bits. */
  	PCG_CACHE, /* charged as cache */
  	PCG_USED, /* this object is in use. */
  	PCG_MIGRATION, /* under page migration */
  	/* flags for mem_cgroup and file and I/O status */
  	PCG_MOVE_LOCK, /* For race between move_account v.s. following bits */
  	PCG_FILE_MAPPED, /* page is accounted as "mapped" */
6b3ae58ef   Johannes Weiner   memcg: remove dir...
12
13
14
15
16
  	__NR_PCG_FLAGS,
  };
  
  #ifndef __GENERATING_BOUNDS_H
  #include <generated/bounds.h>
52d4b9ac0   KAMEZAWA Hiroyuki   memcg: allocate a...
17
18
  #ifdef CONFIG_CGROUP_MEM_RES_CTLR
  #include <linux/bit_spinlock.h>
6b3ae58ef   Johannes Weiner   memcg: remove dir...
19

52d4b9ac0   KAMEZAWA Hiroyuki   memcg: allocate a...
20
21
22
23
24
25
26
27
28
29
  /*
   * Page Cgroup can be considered as an extended mem_map.
   * A page_cgroup page is associated with every page descriptor. The
   * page_cgroup helps us identify information about the cgroup
   * All page cgroups are allocated at boot or memory hotplug event,
   * then the page cgroup for pfn always exists.
   */
  struct page_cgroup {
  	unsigned long flags;
  	struct mem_cgroup *mem_cgroup;
52d4b9ac0   KAMEZAWA Hiroyuki   memcg: allocate a...
30
  };
31168481c   Al Viro   meminit section w...
31
  void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat);
ca371c0d7   KAMEZAWA Hiroyuki   memcg: fix page_c...
32
33
34
35
36
37
38
39
40
41
42
43
  
  #ifdef CONFIG_SPARSEMEM
  static inline void __init page_cgroup_init_flatmem(void)
  {
  }
  extern void __init page_cgroup_init(void);
  #else
  void __init page_cgroup_init_flatmem(void);
  static inline void __init page_cgroup_init(void)
  {
  }
  #endif
52d4b9ac0   KAMEZAWA Hiroyuki   memcg: allocate a...
44
  struct page_cgroup *lookup_page_cgroup(struct page *page);
6b3ae58ef   Johannes Weiner   memcg: remove dir...
45
  struct page *lookup_cgroup_page(struct page_cgroup *pc);
52d4b9ac0   KAMEZAWA Hiroyuki   memcg: allocate a...
46
47
48
49
50
51
52
53
54
55
56
57
  
  #define TESTPCGFLAG(uname, lname)			\
  static inline int PageCgroup##uname(struct page_cgroup *pc)	\
  	{ return test_bit(PCG_##lname, &pc->flags); }
  
  #define SETPCGFLAG(uname, lname)			\
  static inline void SetPageCgroup##uname(struct page_cgroup *pc)\
  	{ set_bit(PCG_##lname, &pc->flags);  }
  
  #define CLEARPCGFLAG(uname, lname)			\
  static inline void ClearPageCgroup##uname(struct page_cgroup *pc)	\
  	{ clear_bit(PCG_##lname, &pc->flags);  }
4b3bde4c9   Balbir Singh   memcg: remove the...
58
59
60
  #define TESTCLEARPCGFLAG(uname, lname)			\
  static inline int TestClearPageCgroup##uname(struct page_cgroup *pc)	\
  	{ return test_and_clear_bit(PCG_##lname, &pc->flags);  }
52d4b9ac0   KAMEZAWA Hiroyuki   memcg: allocate a...
61
62
  /* Cache flag is set only once (at allocation) */
  TESTPCGFLAG(Cache, CACHE)
4b3bde4c9   Balbir Singh   memcg: remove the...
63
64
  CLEARPCGFLAG(Cache, CACHE)
  SETPCGFLAG(Cache, CACHE)
52d4b9ac0   KAMEZAWA Hiroyuki   memcg: allocate a...
65
66
67
  
  TESTPCGFLAG(Used, USED)
  CLEARPCGFLAG(Used, USED)
4b3bde4c9   Balbir Singh   memcg: remove the...
68
  SETPCGFLAG(Used, USED)
8725d5416   KAMEZAWA Hiroyuki   memcg: fix race i...
69
70
71
  SETPCGFLAG(FileMapped, FILE_MAPPED)
  CLEARPCGFLAG(FileMapped, FILE_MAPPED)
  TESTPCGFLAG(FileMapped, FILE_MAPPED)
ac39cf8cb   akpm@linux-foundation.org   memcg: fix mis-ac...
72
73
74
  SETPCGFLAG(Migration, MIGRATION)
  CLEARPCGFLAG(Migration, MIGRATION)
  TESTPCGFLAG(Migration, MIGRATION)
52d4b9ac0   KAMEZAWA Hiroyuki   memcg: allocate a...
75
76
  static inline void lock_page_cgroup(struct page_cgroup *pc)
  {
dbd4ea78f   KAMEZAWA Hiroyuki   memcg: add lock t...
77
78
79
80
  	/*
  	 * Don't take this lock in IRQ context.
  	 * This lock is for pc->mem_cgroup, USED, CACHE, MIGRATION
  	 */
52d4b9ac0   KAMEZAWA Hiroyuki   memcg: allocate a...
81
82
  	bit_spin_lock(PCG_LOCK, &pc->flags);
  }
52d4b9ac0   KAMEZAWA Hiroyuki   memcg: allocate a...
83
84
85
86
  static inline void unlock_page_cgroup(struct page_cgroup *pc)
  {
  	bit_spin_unlock(PCG_LOCK, &pc->flags);
  }
dbd4ea78f   KAMEZAWA Hiroyuki   memcg: add lock t...
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
  static inline void move_lock_page_cgroup(struct page_cgroup *pc,
  	unsigned long *flags)
  {
  	/*
  	 * We know updates to pc->flags of page cache's stats are from both of
  	 * usual context or IRQ context. Disable IRQ to avoid deadlock.
  	 */
  	local_irq_save(*flags);
  	bit_spin_lock(PCG_MOVE_LOCK, &pc->flags);
  }
  
  static inline void move_unlock_page_cgroup(struct page_cgroup *pc,
  	unsigned long *flags)
  {
  	bit_spin_unlock(PCG_MOVE_LOCK, &pc->flags);
  	local_irq_restore(*flags);
  }
52d4b9ac0   KAMEZAWA Hiroyuki   memcg: allocate a...
104
105
  #else /* CONFIG_CGROUP_MEM_RES_CTLR */
  struct page_cgroup;
31168481c   Al Viro   meminit section w...
106
  static inline void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
52d4b9ac0   KAMEZAWA Hiroyuki   memcg: allocate a...
107
108
109
110
111
112
113
  {
  }
  
  static inline struct page_cgroup *lookup_page_cgroup(struct page *page)
  {
  	return NULL;
  }
94b6da5ab   KAMEZAWA Hiroyuki   memcg: fix page_c...
114
115
116
117
  
  static inline void page_cgroup_init(void)
  {
  }
ca371c0d7   KAMEZAWA Hiroyuki   memcg: fix page_c...
118
119
120
  static inline void __init page_cgroup_init_flatmem(void)
  {
  }
6b3ae58ef   Johannes Weiner   memcg: remove dir...
121
  #endif /* CONFIG_CGROUP_MEM_RES_CTLR */
27a7faa07   KAMEZAWA Hiroyuki   memcg: swap cgrou...
122

27a7faa07   KAMEZAWA Hiroyuki   memcg: swap cgrou...
123
  #include <linux/swap.h>
97572751d   Jaswinder Singh Rajput   includecheck fix:...
124
125
  
  #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
024914477   Daisuke Nishimura   memcg: move charg...
126
127
  extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
  					unsigned short old, unsigned short new);
a3b2d6926   KAMEZAWA Hiroyuki   cgroups: use css ...
128
  extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id);
9fb4b7cc0   Bob Liu   page_cgroup: add ...
129
  extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent);
27a7faa07   KAMEZAWA Hiroyuki   memcg: swap cgrou...
130
131
132
  extern int swap_cgroup_swapon(int type, unsigned long max_pages);
  extern void swap_cgroup_swapoff(int type);
  #else
27a7faa07   KAMEZAWA Hiroyuki   memcg: swap cgrou...
133
134
  
  static inline
a3b2d6926   KAMEZAWA Hiroyuki   cgroups: use css ...
135
  unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id)
27a7faa07   KAMEZAWA Hiroyuki   memcg: swap cgrou...
136
  {
a3b2d6926   KAMEZAWA Hiroyuki   cgroups: use css ...
137
  	return 0;
27a7faa07   KAMEZAWA Hiroyuki   memcg: swap cgrou...
138
139
140
  }
  
  static inline
9fb4b7cc0   Bob Liu   page_cgroup: add ...
141
  unsigned short lookup_swap_cgroup_id(swp_entry_t ent)
27a7faa07   KAMEZAWA Hiroyuki   memcg: swap cgrou...
142
  {
a3b2d6926   KAMEZAWA Hiroyuki   cgroups: use css ...
143
  	return 0;
27a7faa07   KAMEZAWA Hiroyuki   memcg: swap cgrou...
144
145
146
147
148
149
150
151
152
153
154
155
  }
  
  static inline int
  swap_cgroup_swapon(int type, unsigned long max_pages)
  {
  	return 0;
  }
  
  static inline void swap_cgroup_swapoff(int type)
  {
  	return;
  }
6b3ae58ef   Johannes Weiner   memcg: remove dir...
156
157
158
159
160
  #endif /* CONFIG_CGROUP_MEM_RES_CTLR_SWAP */
  
  #endif /* !__GENERATING_BOUNDS_H */
  
  #endif /* __LINUX_PAGE_CGROUP_H */