Blame view

mm/percpu-internal.h 7.73 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  /* SPDX-License-Identifier: GPL-2.0 */
8fa3ed801   Dennis Zhou   percpu: migrate p...
2
3
4
5
6
  #ifndef _MM_PERCPU_INTERNAL_H
  #define _MM_PERCPU_INTERNAL_H
  
  #include <linux/types.h>
  #include <linux/percpu.h>
ca460b3c9   Dennis Zhou (Facebook)   percpu: introduce...
7
  /*
3c7be18ac   Roman Gushchin   mm: memcg/percpu:...
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
   * There are two chunk types: root and memcg-aware.
   * Chunks of each type have separate slots list.
   *
   * Memcg-aware chunks have an attached vector of obj_cgroup pointers, which is
   * used to store memcg membership data of a percpu object.  Obj_cgroups are
   * ref-counted pointers to a memory cgroup with an ability to switch dynamically
   * to the parent memory cgroup.  This allows to reclaim a deleted memory cgroup
   * without reclaiming of all outstanding objects, which hold a reference at it.
   */
  enum pcpu_chunk_type {
  	PCPU_CHUNK_ROOT,
  #ifdef CONFIG_MEMCG_KMEM
  	PCPU_CHUNK_MEMCG,
  #endif
  	PCPU_NR_CHUNK_TYPES,
  	PCPU_FAIL_ALLOC = PCPU_NR_CHUNK_TYPES
  };
  
  /*
ca460b3c9   Dennis Zhou (Facebook)   percpu: introduce...
27
28
29
   * pcpu_block_md is the metadata block struct.
   * Each chunk's bitmap is split into a number of full blocks.
   * All units are in terms of bits.
382b88e96   Dennis Zhou   percpu: add block...
30
31
32
33
34
35
   *
   * The scan hint is the largest known contiguous area before the contig hint.
   * It is not necessarily the actual largest contig hint though.  There is an
   * invariant that the scan_hint_start > contig_hint_start iff
   * scan_hint == contig_hint.  This is necessary because when scanning forward,
   * we don't know if a new contig hint would be better than the current one.
ca460b3c9   Dennis Zhou (Facebook)   percpu: introduce...
36
37
   */
  struct pcpu_block_md {
382b88e96   Dennis Zhou   percpu: add block...
38
39
40
  	int			scan_hint;	/* scan hint for block */
  	int			scan_hint_start; /* block relative starting
  						    position of the scan hint */
ca460b3c9   Dennis Zhou (Facebook)   percpu: introduce...
41
42
43
44
45
46
47
48
  	int                     contig_hint;    /* contig hint for block */
  	int                     contig_hint_start; /* block relative starting
  						      position of the contig hint */
  	int                     left_free;      /* size of free space along
  						   the left side of the block */
  	int                     right_free;     /* size of free space along
  						   the right side of the block */
  	int                     first_free;     /* block position of first free */
047924c96   Dennis Zhou   percpu: make pcpu...
49
  	int			nr_bits;	/* total bits responsible for */
ca460b3c9   Dennis Zhou (Facebook)   percpu: introduce...
50
  };
8fa3ed801   Dennis Zhou   percpu: migrate p...
51
  struct pcpu_chunk {
30a5b5367   Dennis Zhou   percpu: expose st...
52
53
54
55
  #ifdef CONFIG_PERCPU_STATS
  	int			nr_alloc;	/* # of allocations */
  	size_t			max_alloc_size; /* largest allocation size */
  #endif
8fa3ed801   Dennis Zhou   percpu: migrate p...
56
  	struct list_head	list;		/* linked to pcpu_slot lists */
40064aeca   Dennis Zhou (Facebook)   percpu: replace a...
57
  	int			free_bytes;	/* free bytes in the chunk */
92c14cab4   Dennis Zhou   percpu: convert c...
58
  	struct pcpu_block_md	chunk_md;
8fa3ed801   Dennis Zhou   percpu: migrate p...
59
  	void			*base_addr;	/* base address of this chunk */
40064aeca   Dennis Zhou (Facebook)   percpu: replace a...
60
61
  	unsigned long		*alloc_map;	/* allocation map */
  	unsigned long		*bound_map;	/* boundary map */
ca460b3c9   Dennis Zhou (Facebook)   percpu: introduce...
62
  	struct pcpu_block_md	*md_blocks;	/* metadata blocks */
8fa3ed801   Dennis Zhou   percpu: migrate p...
63
64
  
  	void			*data;		/* chunk data */
8fa3ed801   Dennis Zhou   percpu: migrate p...
65
  	bool			immutable;	/* no [de]population allowed */
e22667056   Dennis Zhou (Facebook)   percpu: introduce...
66
67
68
  	int			start_offset;	/* the overlap with the previous
  						   region to have a page aligned
  						   base_addr */
6b9d7c8e8   Dennis Zhou (Facebook)   percpu: end chunk...
69
70
71
  	int			end_offset;	/* additional area required to
  						   have the region end page
  						   aligned */
3c7be18ac   Roman Gushchin   mm: memcg/percpu:...
72
73
74
  #ifdef CONFIG_MEMCG_KMEM
  	struct obj_cgroup	**obj_cgroups;	/* vector of object cgroups */
  #endif
c0ebfdc3f   Dennis Zhou (Facebook)   percpu: modify ba...
75
76
  
  	int			nr_pages;	/* # of pages served by this chunk */
8fa3ed801   Dennis Zhou   percpu: migrate p...
77
  	int			nr_populated;	/* # of populated pages */
0cecf50cf   Dennis Zhou (Facebook)   percpu: introduce...
78
  	int                     nr_empty_pop_pages; /* # of empty populated pages */
8fa3ed801   Dennis Zhou   percpu: migrate p...
79
80
81
82
  	unsigned long		populated[];	/* populated bitmap */
  };
  
  extern spinlock_t pcpu_lock;
3c7be18ac   Roman Gushchin   mm: memcg/percpu:...
83
  extern struct list_head *pcpu_chunk_lists;
8fa3ed801   Dennis Zhou   percpu: migrate p...
84
  extern int pcpu_nr_slots;
6b9b6f399   Dennis Zhou (Facebook)   percpu: expose pc...
85
  extern int pcpu_nr_empty_pop_pages;
8fa3ed801   Dennis Zhou   percpu: migrate p...
86
87
88
  
  extern struct pcpu_chunk *pcpu_first_chunk;
  extern struct pcpu_chunk *pcpu_reserved_chunk;
40064aeca   Dennis Zhou (Facebook)   percpu: replace a...
89
  /**
ca460b3c9   Dennis Zhou (Facebook)   percpu: introduce...
90
91
92
93
94
95
96
97
98
99
100
101
   * pcpu_chunk_nr_blocks - converts nr_pages to # of md_blocks
   * @chunk: chunk of interest
   *
   * This conversion is from the number of physical pages that the chunk
   * serves to the number of bitmap blocks used.
   */
  static inline int pcpu_chunk_nr_blocks(struct pcpu_chunk *chunk)
  {
  	return chunk->nr_pages * PAGE_SIZE / PCPU_BITMAP_BLOCK_SIZE;
  }
  
  /**
40064aeca   Dennis Zhou (Facebook)   percpu: replace a...
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
   * pcpu_nr_pages_to_map_bits - converts the pages to size of bitmap
   * @pages: number of physical pages
   *
   * This conversion is from physical pages to the number of bits
   * required in the bitmap.
   */
  static inline int pcpu_nr_pages_to_map_bits(int pages)
  {
  	return pages * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE;
  }
  
  /**
   * pcpu_chunk_map_bits - helper to convert nr_pages to size of bitmap
   * @chunk: chunk of interest
   *
   * This conversion is from the number of physical pages that the chunk
   * serves to the number of bits in the bitmap.
   */
  static inline int pcpu_chunk_map_bits(struct pcpu_chunk *chunk)
  {
  	return pcpu_nr_pages_to_map_bits(chunk->nr_pages);
  }
3c7be18ac   Roman Gushchin   mm: memcg/percpu:...
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
  #ifdef CONFIG_MEMCG_KMEM
  static inline enum pcpu_chunk_type pcpu_chunk_type(struct pcpu_chunk *chunk)
  {
  	if (chunk->obj_cgroups)
  		return PCPU_CHUNK_MEMCG;
  	return PCPU_CHUNK_ROOT;
  }
  
  static inline bool pcpu_is_memcg_chunk(enum pcpu_chunk_type chunk_type)
  {
  	return chunk_type == PCPU_CHUNK_MEMCG;
  }
  
  #else
  static inline enum pcpu_chunk_type pcpu_chunk_type(struct pcpu_chunk *chunk)
  {
  	return PCPU_CHUNK_ROOT;
  }
  
  static inline bool pcpu_is_memcg_chunk(enum pcpu_chunk_type chunk_type)
  {
  	return false;
  }
  #endif
  
  static inline struct list_head *pcpu_chunk_list(enum pcpu_chunk_type chunk_type)
  {
  	return &pcpu_chunk_lists[pcpu_nr_slots *
  				 pcpu_is_memcg_chunk(chunk_type)];
  }
30a5b5367   Dennis Zhou   percpu: expose st...
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
  #ifdef CONFIG_PERCPU_STATS
  
  #include <linux/spinlock.h>
  
  struct percpu_stats {
  	u64 nr_alloc;		/* lifetime # of allocations */
  	u64 nr_dealloc;		/* lifetime # of deallocations */
  	u64 nr_cur_alloc;	/* current # of allocations */
  	u64 nr_max_alloc;	/* max # of live allocations */
  	u32 nr_chunks;		/* current # of live chunks */
  	u32 nr_max_chunks;	/* max # of live chunks */
  	size_t min_alloc_size;	/* min allocaiton size */
  	size_t max_alloc_size;	/* max allocation size */
  };
  
  extern struct percpu_stats pcpu_stats;
  extern struct pcpu_alloc_info pcpu_stats_ai;
  
  /*
   * For debug purposes. We don't care about the flexible array.
   */
  static inline void pcpu_stats_save_ai(const struct pcpu_alloc_info *ai)
  {
  	memcpy(&pcpu_stats_ai, ai, sizeof(struct pcpu_alloc_info));
  
  	/* initialize min_alloc_size to unit_size */
  	pcpu_stats.min_alloc_size = pcpu_stats_ai.unit_size;
  }
  
  /*
   * pcpu_stats_area_alloc - increment area allocation stats
   * @chunk: the location of the area being allocated
   * @size: size of area to allocate in bytes
   *
   * CONTEXT:
   * pcpu_lock.
   */
  static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size)
  {
  	lockdep_assert_held(&pcpu_lock);
  
  	pcpu_stats.nr_alloc++;
  	pcpu_stats.nr_cur_alloc++;
  	pcpu_stats.nr_max_alloc =
  		max(pcpu_stats.nr_max_alloc, pcpu_stats.nr_cur_alloc);
  	pcpu_stats.min_alloc_size =
  		min(pcpu_stats.min_alloc_size, size);
  	pcpu_stats.max_alloc_size =
  		max(pcpu_stats.max_alloc_size, size);
  
  	chunk->nr_alloc++;
  	chunk->max_alloc_size = max(chunk->max_alloc_size, size);
  }
  
  /*
   * pcpu_stats_area_dealloc - decrement allocation stats
   * @chunk: the location of the area being deallocated
   *
   * CONTEXT:
   * pcpu_lock.
   */
  static inline void pcpu_stats_area_dealloc(struct pcpu_chunk *chunk)
  {
  	lockdep_assert_held(&pcpu_lock);
  
  	pcpu_stats.nr_dealloc++;
  	pcpu_stats.nr_cur_alloc--;
  
  	chunk->nr_alloc--;
  }
  
  /*
   * pcpu_stats_chunk_alloc - increment chunk stats
   */
  static inline void pcpu_stats_chunk_alloc(void)
  {
303abfdf7   Dennis Zhou   percpu: fix early...
230
231
  	unsigned long flags;
  	spin_lock_irqsave(&pcpu_lock, flags);
30a5b5367   Dennis Zhou   percpu: expose st...
232
233
234
235
  
  	pcpu_stats.nr_chunks++;
  	pcpu_stats.nr_max_chunks =
  		max(pcpu_stats.nr_max_chunks, pcpu_stats.nr_chunks);
303abfdf7   Dennis Zhou   percpu: fix early...
236
  	spin_unlock_irqrestore(&pcpu_lock, flags);
30a5b5367   Dennis Zhou   percpu: expose st...
237
238
239
240
241
242
243
  }
  
  /*
   * pcpu_stats_chunk_dealloc - decrement chunk stats
   */
  static inline void pcpu_stats_chunk_dealloc(void)
  {
303abfdf7   Dennis Zhou   percpu: fix early...
244
245
  	unsigned long flags;
  	spin_lock_irqsave(&pcpu_lock, flags);
30a5b5367   Dennis Zhou   percpu: expose st...
246
247
  
  	pcpu_stats.nr_chunks--;
303abfdf7   Dennis Zhou   percpu: fix early...
248
  	spin_unlock_irqrestore(&pcpu_lock, flags);
30a5b5367   Dennis Zhou   percpu: expose st...
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
  }
  
  #else
  
  static inline void pcpu_stats_save_ai(const struct pcpu_alloc_info *ai)
  {
  }
  
  static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size)
  {
  }
  
  static inline void pcpu_stats_area_dealloc(struct pcpu_chunk *chunk)
  {
  }
  
  static inline void pcpu_stats_chunk_alloc(void)
  {
  }
  
  static inline void pcpu_stats_chunk_dealloc(void)
  {
  }
  
  #endif /* !CONFIG_PERCPU_STATS */
8fa3ed801   Dennis Zhou   percpu: migrate p...
274
  #endif