Blame view

include/linux/hugetlb.h 23.8 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  /* SPDX-License-Identifier: GPL-2.0 */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2
3
  #ifndef _LINUX_HUGETLB_H
  #define _LINUX_HUGETLB_H
be93d8cfb   Linus Torvalds   Fix build with !H...
4
  #include <linux/mm_types.h>
309381fea   Sasha Levin   mm: dump page whe...
5
  #include <linux/mmdebug.h>
4e950f6f0   Alexey Dobriyan   Remove fs.h from ...
6
  #include <linux/fs.h>
8edf344c6   Naoya Horiguchi   hugetlb: move def...
7
  #include <linux/hugetlb_inline.h>
abb8206cb   Aneesh Kumar K.V   hugetlb/cgroup: a...
8
  #include <linux/cgroup.h>
9119a41e9   Joonsoo Kim   mm, hugetlb: unif...
9
10
  #include <linux/list.h>
  #include <linux/kref.h>
ca5999fde   Mike Rapoport   mm: introduce inc...
11
  #include <linux/pgtable.h>
d92bbc271   Joonsoo Kim   mm/hugetlb: unify...
12
  #include <linux/gfp.h>
4e950f6f0   Alexey Dobriyan   Remove fs.h from ...
13

e9ea0e2d1   Andrew Morton   hugetlb_file_setu...
14
15
  struct ctl_table;
  struct user_struct;
24669e584   Aneesh Kumar K.V   hugetlb: use mmu_...
16
  struct mmu_gather;
e9ea0e2d1   Andrew Morton   hugetlb_file_setu...
17

e22992923   Aneesh Kumar K.V   mm/hugetlb: move ...
18
  #ifndef is_hugepd
e22992923   Aneesh Kumar K.V   mm/hugetlb: move ...
19
20
21
  typedef struct { unsigned long pd; } hugepd_t;
  #define is_hugepd(hugepd) (0)
  #define __hugepd(x) ((hugepd_t) { (x) })
e22992923   Aneesh Kumar K.V   mm/hugetlb: move ...
22
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
23
24
25
  #ifdef CONFIG_HUGETLB_PAGE
  
  #include <linux/mempolicy.h>
516dffdcd   Adam Litke   [PATCH] Fix get_u...
26
  #include <linux/shm.h>
63551ae0f   David Gibson   [PATCH] Hugepage ...
27
  #include <asm/tlbflush.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
28

90481622d   David Gibson   hugepages: fix us...
29
30
31
  struct hugepage_subpool {
  	spinlock_t lock;
  	long count;
c6a918200   Mike Kravetz   hugetlbfs: add mi...
32
33
34
35
36
37
38
  	long max_hpages;	/* Maximum huge pages or -1 if no maximum. */
  	long used_hpages;	/* Used count against maximum, includes */
  				/* both alloced and reserved pages. */
  	struct hstate *hstate;
  	long min_hpages;	/* Minimum huge pages or -1 if no minimum. */
  	long rsv_hpages;	/* Pages reserved against global pool to */
  				/* sasitfy minimum size. */
90481622d   David Gibson   hugepages: fix us...
39
  };
9119a41e9   Joonsoo Kim   mm, hugetlb: unif...
40
41
  struct resv_map {
  	struct kref refs;
7b24d8616   Davidlohr Bueso   mm, hugetlb: fix ...
42
  	spinlock_t lock;
9119a41e9   Joonsoo Kim   mm, hugetlb: unif...
43
  	struct list_head regions;
5e9113731   Mike Kravetz   mm/hugetlb: add c...
44
45
46
  	long adds_in_progress;
  	struct list_head region_cache;
  	long region_cache_count;
e9fe92ae0   Mina Almasry   hugetlb_cgroup: a...
47
48
49
50
51
52
53
54
55
56
  #ifdef CONFIG_CGROUP_HUGETLB
  	/*
  	 * On private mappings, the counter to uncharge reservations is stored
  	 * here. If these fields are 0, then either the mapping is shared, or
  	 * cgroup accounting is disabled for this resv_map.
  	 */
  	struct page_counter *reservation_counter;
  	unsigned long pages_per_hpage;
  	struct cgroup_subsys_state *css;
  #endif
9119a41e9   Joonsoo Kim   mm, hugetlb: unif...
57
  };
075a61d07   Mina Almasry   hugetlb_cgroup: a...
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
  
  /*
   * Region tracking -- allows tracking of reservations and instantiated pages
   *                    across the pages in a mapping.
   *
   * The region data structures are embedded into a resv_map and protected
   * by a resv_map's lock.  The set of regions within the resv_map represent
   * reservations for huge pages, or huge pages that have already been
   * instantiated within the map.  The from and to elements are huge page
   * indicies into the associated mapping.  from indicates the starting index
   * of the region.  to represents the first index past the end of  the region.
   *
   * For example, a file region structure with from == 0 and to == 4 represents
   * four huge pages in a mapping.  It is important to note that the to element
   * represents the first element past the end of the region. This is used in
   * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
   *
   * Interval notation of the form [from, to) will be used to indicate that
   * the endpoint from is inclusive and to is exclusive.
   */
  struct file_region {
  	struct list_head link;
  	long from;
  	long to;
  #ifdef CONFIG_CGROUP_HUGETLB
  	/*
  	 * On shared mappings, each reserved region appears as a struct
  	 * file_region in resv_map. These fields hold the info needed to
  	 * uncharge each reservation.
  	 */
  	struct page_counter *reservation_counter;
  	struct cgroup_subsys_state *css;
  #endif
  };
9119a41e9   Joonsoo Kim   mm, hugetlb: unif...
92
93
  extern struct resv_map *resv_map_alloc(void);
  void resv_map_release(struct kref *ref);
c3f38a387   Aneesh Kumar K.V   hugetlb: make som...
94
95
96
97
  extern spinlock_t hugetlb_lock;
  extern int hugetlb_max_hstate __read_mostly;
  #define for_each_hstate(h) \
  	for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
7ca02d0ae   Mike Kravetz   hugetlbfs: accept...
98
99
  struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
  						long min_hpages);
90481622d   David Gibson   hugepages: fix us...
100
  void hugepage_put_subpool(struct hugepage_subpool *spool);
a1e78772d   Mel Gorman   hugetlb: reserve ...
101
  void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
32927393d   Christoph Hellwig   sysctl: pass kern...
102
103
104
105
106
107
108
  int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
  int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *,
  		loff_t *);
  int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *,
  		loff_t *);
  int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *,
  		loff_t *);
06808b082   Lee Schermerhorn   hugetlb: derive h...
109

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
110
  int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
28a35716d   Michel Lespinasse   mm: use long type...
111
112
  long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
  			 struct page **, struct vm_area_struct **,
87ffc118b   Andrea Arcangeli   userfaultfd: huge...
113
114
  			 unsigned long *, unsigned long *, long, unsigned int,
  			 int *);
04f2cbe35   Mel Gorman   hugetlb: guarante...
115
  void unmap_hugepage_range(struct vm_area_struct *,
24669e584   Aneesh Kumar K.V   hugetlb: use mmu_...
116
  			  unsigned long, unsigned long, struct page *);
d833352a4   Mel Gorman   mm: hugetlbfs: cl...
117
118
119
120
  void __unmap_hugepage_range_final(struct mmu_gather *tlb,
  			  struct vm_area_struct *vma,
  			  unsigned long start, unsigned long end,
  			  struct page *ref_page);
24669e584   Aneesh Kumar K.V   hugetlb: use mmu_...
121
122
123
  void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
  				unsigned long start, unsigned long end,
  				struct page *ref_page);
e1759c215   Alexey Dobriyan   proc: switch /pro...
124
  void hugetlb_report_meminfo(struct seq_file *);
7981593bf   Joe Perches   mm: and drivers c...
125
  int hugetlb_report_node_meminfo(char *buf, int len, int nid);
949f7ec57   David Rientjes   mm, hugetlb: incl...
126
  void hugetlb_show_meminfo(void);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
127
  unsigned long hugetlb_total_pages(void);
2b7403035   Souptick Joarder   mm: Change return...
128
  vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
788c7df45   Hugh Dickins   hugetlb: fault fl...
129
  			unsigned long address, unsigned int flags);
8fb5debc5   Mike Kravetz   userfaultfd: huge...
130
131
132
133
134
  int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
  				struct vm_area_struct *dst_vma,
  				unsigned long dst_addr,
  				unsigned long src_addr,
  				struct page **pagep);
a1e78772d   Mel Gorman   hugetlb: reserve ...
135
  int hugetlb_reserve_pages(struct inode *inode, long from, long to,
5a6fe1259   Mel Gorman   Do not account fo...
136
  						struct vm_area_struct *vma,
ca16d140a   KOSAKI Motohiro   mm: don't access ...
137
  						vm_flags_t vm_flags);
b5cec28d3   Mike Kravetz   hugetlbfs: trunca...
138
139
  long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
  						long freed);
31caf665e   Naoya Horiguchi   mm: migrate: make...
140
141
  bool isolate_huge_page(struct page *page, struct list_head *list);
  void putback_active_hugepage(struct page *page);
ab5ac90ae   Michal Hocko   mm, hugetlb: do n...
142
  void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
8f1d26d0e   Atsushi Kumagai   kexec: export fre...
143
  void free_huge_page(struct page *page);
72e2936c0   zhong jiang   mm: remove unnece...
144
  void hugetlb_fix_reserve_counts(struct inode *inode);
c672c7f29   Mike Kravetz   mm/hugetlb: expos...
145
  extern struct mutex *hugetlb_fault_mutex_table;
188b04a7d   Wei Yang   hugetlb: remove u...
146
  u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
147

3212b535f   Steve Capper   mm: hugetlb: Copy...
148
  pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
3212b535f   Steve Capper   mm: hugetlb: Copy...
149

c0d0381ad   Mike Kravetz   hugetlbfs: use i_...
150
  struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
151
  extern int sysctl_hugetlb_shm_group;
53ba51d21   Jon Tollefson   hugetlb: allow ar...
152
  extern struct list_head huge_boot_pages;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
153

63551ae0f   David Gibson   [PATCH] Hugepage ...
154
  /* arch callbacks */
a55164389   Andi Kleen   hugetlb: modular ...
155
156
  pte_t *huge_pte_alloc(struct mm_struct *mm,
  			unsigned long addr, unsigned long sz);
7868a2087   Punit Agrawal   mm/hugetlb: add s...
157
158
  pte_t *huge_pte_offset(struct mm_struct *mm,
  		       unsigned long addr, unsigned long sz);
34ae204f1   Mike Kravetz   hugetlbfs: remove...
159
160
  int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
  				unsigned long *addr, pte_t *ptep);
017b1660d   Mike Kravetz   mm: migration: fi...
161
162
  void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
  				unsigned long *start, unsigned long *end);
63551ae0f   David Gibson   [PATCH] Hugepage ...
163
164
  struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
  			      int write);
4dc71451a   Aneesh Kumar K.V   mm/follow_page_ma...
165
166
167
  struct page *follow_huge_pd(struct vm_area_struct *vma,
  			    unsigned long address, hugepd_t hpd,
  			    int flags, int pdshift);
63551ae0f   David Gibson   [PATCH] Hugepage ...
168
  struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
e66f17ff7   Naoya Horiguchi   mm/hugetlb: take ...
169
  				pmd_t *pmd, int flags);
ceb868796   Andi Kleen   hugetlb: introduc...
170
  struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
e66f17ff7   Naoya Horiguchi   mm/hugetlb: take ...
171
  				pud_t *pud, int flags);
faaa5b62d   Anshuman Khandual   mm/follow_page_ma...
172
173
  struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
  			     pgd_t *pgd, int flags);
63551ae0f   David Gibson   [PATCH] Hugepage ...
174
  int pmd_huge(pmd_t pmd);
c2febafc6   Kirill A. Shutemov   mm: convert gener...
175
  int pud_huge(pud_t pud);
7da4d641c   Peter Zijlstra   mm: Count the num...
176
  unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
8f860591f   Zhang, Yanmin   [PATCH] Enable mp...
177
  		unsigned long address, unsigned long end, pgprot_t newprot);
63551ae0f   David Gibson   [PATCH] Hugepage ...
178

d5ed7444d   Aneesh Kumar K.V   mm/hugetlb: expor...
179
  bool is_hugetlb_entry_migration(pte_t pte);
ab5ac90ae   Michal Hocko   mm, hugetlb: do n...
180

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
181
  #else /* !CONFIG_HUGETLB_PAGE */
a1e78772d   Mel Gorman   hugetlb: reserve ...
182
183
184
  static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
  {
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
185
186
187
188
  static inline unsigned long hugetlb_total_pages(void)
  {
  	return 0;
  }
c0d0381ad   Mike Kravetz   hugetlbfs: use i_...
189
190
191
192
193
  static inline struct address_space *hugetlb_page_mapping_lock_write(
  							struct page *hpage)
  {
  	return NULL;
  }
34ae204f1   Mike Kravetz   hugetlbfs: remove...
194
195
196
  static inline int huge_pmd_unshare(struct mm_struct *mm,
  					struct vm_area_struct *vma,
  					unsigned long *addr, pte_t *ptep)
017b1660d   Mike Kravetz   mm: migration: fi...
197
198
199
200
201
202
203
204
205
  {
  	return 0;
  }
  
  static inline void adjust_range_if_pmd_sharing_possible(
  				struct vm_area_struct *vma,
  				unsigned long *start, unsigned long *end)
  {
  }
1f9dccb25   Mike Kravetz   hugetlbfs: conver...
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
  static inline long follow_hugetlb_page(struct mm_struct *mm,
  			struct vm_area_struct *vma, struct page **pages,
  			struct vm_area_struct **vmas, unsigned long *position,
  			unsigned long *nr_pages, long i, unsigned int flags,
  			int *nonblocking)
  {
  	BUG();
  	return 0;
  }
  
  static inline struct page *follow_huge_addr(struct mm_struct *mm,
  					unsigned long address, int write)
  {
  	return ERR_PTR(-EINVAL);
  }
  
  static inline int copy_hugetlb_page_range(struct mm_struct *dst,
  			struct mm_struct *src, struct vm_area_struct *vma)
  {
  	BUG();
  	return 0;
  }
e1759c215   Alexey Dobriyan   proc: switch /pro...
228
229
230
  static inline void hugetlb_report_meminfo(struct seq_file *m)
  {
  }
1f9dccb25   Mike Kravetz   hugetlbfs: conver...
231

7981593bf   Joe Perches   mm: and drivers c...
232
  static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid)
1f9dccb25   Mike Kravetz   hugetlbfs: conver...
233
234
235
  {
  	return 0;
  }
949f7ec57   David Rientjes   mm, hugetlb: incl...
236
237
238
  static inline void hugetlb_show_meminfo(void)
  {
  }
1f9dccb25   Mike Kravetz   hugetlbfs: conver...
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
  
  static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
  				unsigned long address, hugepd_t hpd, int flags,
  				int pdshift)
  {
  	return NULL;
  }
  
  static inline struct page *follow_huge_pmd(struct mm_struct *mm,
  				unsigned long address, pmd_t *pmd, int flags)
  {
  	return NULL;
  }
  
  static inline struct page *follow_huge_pud(struct mm_struct *mm,
  				unsigned long address, pud_t *pud, int flags)
  {
  	return NULL;
  }
  
  static inline struct page *follow_huge_pgd(struct mm_struct *mm,
  				unsigned long address, pgd_t *pgd, int flags)
  {
  	return NULL;
  }
  
  static inline int prepare_hugepage_range(struct file *file,
  				unsigned long addr, unsigned long len)
  {
  	return -EINVAL;
  }
  
  static inline int pmd_huge(pmd_t pmd)
  {
  	return 0;
  }
  
  static inline int pud_huge(pud_t pud)
  {
  	return 0;
  }
  
  static inline int is_hugepage_only_range(struct mm_struct *mm,
  					unsigned long addr, unsigned long len)
  {
  	return 0;
  }
  
  static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
  				unsigned long addr, unsigned long end,
  				unsigned long floor, unsigned long ceiling)
  {
  	BUG();
  }
  
  static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
  						pte_t *dst_pte,
  						struct vm_area_struct *dst_vma,
  						unsigned long dst_addr,
  						unsigned long src_addr,
  						struct page **pagep)
  {
  	BUG();
  	return 0;
  }
  
  static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
  					unsigned long sz)
  {
  	return NULL;
  }
24669e584   Aneesh Kumar K.V   hugetlb: use mmu_...
310

f40386a4e   Naoya Horiguchi   include/linux/hug...
311
312
313
314
  static inline bool isolate_huge_page(struct page *page, struct list_head *list)
  {
  	return false;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
315

1f9dccb25   Mike Kravetz   hugetlbfs: conver...
316
317
318
319
320
321
322
323
324
325
326
327
  static inline void putback_active_hugepage(struct page *page)
  {
  }
  
  static inline void move_hugetlb_state(struct page *oldpage,
  					struct page *newpage, int reason)
  {
  }
  
  static inline unsigned long hugetlb_change_protection(
  			struct vm_area_struct *vma, unsigned long address,
  			unsigned long end, pgprot_t newprot)
7da4d641c   Peter Zijlstra   mm: Count the num...
328
329
330
  {
  	return 0;
  }
8f860591f   Zhang, Yanmin   [PATCH] Enable mp...
331

d833352a4   Mel Gorman   mm: hugetlbfs: cl...
332
333
334
335
336
337
  static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
  			struct vm_area_struct *vma, unsigned long start,
  			unsigned long end, struct page *ref_page)
  {
  	BUG();
  }
24669e584   Aneesh Kumar K.V   hugetlb: use mmu_...
338
339
340
341
342
343
  static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
  			struct vm_area_struct *vma, unsigned long start,
  			unsigned long end, struct page *ref_page)
  {
  	BUG();
  }
1f9dccb25   Mike Kravetz   hugetlbfs: conver...
344

a953e7721   Souptick Joarder   include/linux/hug...
345
  static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
1f9dccb25   Mike Kravetz   hugetlbfs: conver...
346
347
  			struct vm_area_struct *vma, unsigned long address,
  			unsigned int flags)
a953e7721   Souptick Joarder   include/linux/hug...
348
349
350
351
  {
  	BUG();
  	return 0;
  }
24669e584   Aneesh Kumar K.V   hugetlb: use mmu_...
352

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
353
  #endif /* !CONFIG_HUGETLB_PAGE */
f30c59e92   Aneesh Kumar K.V   mm: Update generi...
354
355
356
357
358
359
360
  /*
   * hugepages at page global directory. If arch support
   * hugepages at pgd level, they need to define this.
   */
  #ifndef pgd_huge
  #define pgd_huge(x)	0
  #endif
c2febafc6   Kirill A. Shutemov   mm: convert gener...
361
362
363
  #ifndef p4d_huge
  #define p4d_huge(x)	0
  #endif
f30c59e92   Aneesh Kumar K.V   mm: Update generi...
364
365
366
367
368
369
370
371
  
  #ifndef pgd_write
  static inline int pgd_write(pgd_t pgd)
  {
  	BUG();
  	return 0;
  }
  #endif
4e52780d4   Eric B Munson   hugetlb: add MAP_...
372
  #define HUGETLB_ANON_FILE "anon_hugepage"
6bfde05bf   Eric B Munson   hugetlbfs: allow ...
373
374
375
376
377
378
  enum {
  	/*
  	 * The file will be used as an shm file so shmfs accounting rules
  	 * apply
  	 */
  	HUGETLB_SHMFS_INODE     = 1,
4e52780d4   Eric B Munson   hugetlb: add MAP_...
379
380
381
382
383
  	/*
  	 * The file is being created on the internal vfs mount and shmfs
  	 * accounting rules do not apply
  	 */
  	HUGETLB_ANONHUGE_INODE  = 2,
6bfde05bf   Eric B Munson   hugetlbfs: allow ...
384
  };
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
385
  #ifdef CONFIG_HUGETLBFS
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
386
  struct hugetlbfs_sb_info {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
387
388
389
  	long	max_inodes;   /* inodes allowed */
  	long	free_inodes;  /* inodes free */
  	spinlock_t	stat_lock;
a137e1cc6   Andi Kleen   hugetlbfs: per mo...
390
  	struct hstate *hstate;
90481622d   David Gibson   hugepages: fix us...
391
  	struct hugepage_subpool *spool;
4a25220d4   David Howells   hugetlbfs: Implem...
392
393
394
  	kuid_t	uid;
  	kgid_t	gid;
  	umode_t mode;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
395
  };
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
396
397
398
399
  static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
  {
  	return sb->s_fs_info;
  }
da14c1e52   Marc-André Lureau   hugetlb: expose h...
400
401
402
  struct hugetlbfs_inode_info {
  	struct shared_policy policy;
  	struct inode vfs_inode;
ff62a3421   Marc-André Lureau   hugetlb: implemen...
403
  	unsigned int seals;
da14c1e52   Marc-André Lureau   hugetlb: expose h...
404
405
406
407
408
409
  };
  
  static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
  {
  	return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
  }
4b6f5d20b   Arjan van de Ven   [PATCH] Make most...
410
  extern const struct file_operations hugetlbfs_file_operations;
f0f37e2f7   Alexey Dobriyan   const: mark struc...
411
  extern const struct vm_operations_struct hugetlb_vm_ops;
af73e4d95   Naoya Horiguchi   hugetlbfs: fix mm...
412
  struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
42d7395fe   Andi Kleen   mm: support more ...
413
414
  				struct user_struct **user, int creat_flags,
  				int page_size_log);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
415

719ff3216   Yaowei Bai   include/linux/hug...
416
  static inline bool is_file_hugepages(struct file *file)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
417
  {
516dffdcd   Adam Litke   [PATCH] Fix get_u...
418
  	if (file->f_op == &hugetlbfs_file_operations)
719ff3216   Yaowei Bai   include/linux/hug...
419
  		return true;
516dffdcd   Adam Litke   [PATCH] Fix get_u...
420

719ff3216   Yaowei Bai   include/linux/hug...
421
  	return is_file_shm_hugepages(file);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
422
  }
bb297bb2d   Christophe Leroy   mm/hugetlb: fix b...
423
424
425
426
  static inline struct hstate *hstate_inode(struct inode *i)
  {
  	return HUGETLBFS_SB(i->i_sb)->hstate;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
427
  #else /* !CONFIG_HUGETLBFS */
719ff3216   Yaowei Bai   include/linux/hug...
428
  #define is_file_hugepages(file)			false
40716e292   Steven Truelove   hugetlbfs: fix al...
429
  static inline struct file *
af73e4d95   Naoya Horiguchi   hugetlbfs: fix mm...
430
431
  hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
  		struct user_struct **user, int creat_flags,
42d7395fe   Andi Kleen   mm: support more ...
432
  		int page_size_log)
e9ea0e2d1   Andrew Morton   hugetlb_file_setu...
433
434
435
  {
  	return ERR_PTR(-ENOSYS);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
436

bb297bb2d   Christophe Leroy   mm/hugetlb: fix b...
437
438
439
440
  static inline struct hstate *hstate_inode(struct inode *i)
  {
  	return NULL;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
441
  #endif /* !CONFIG_HUGETLBFS */
d2ba27e80   Adrian Bunk   proper prototype ...
442
443
444
445
446
  #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
  unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  					unsigned long len, unsigned long pgoff,
  					unsigned long flags);
  #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
a55164389   Andi Kleen   hugetlb: modular ...
447
  #ifdef CONFIG_HUGETLB_PAGE
a34378701   Nishanth Aravamudan   hugetlb: new sysf...
448
  #define HSTATE_NAME_LEN 32
a55164389   Andi Kleen   hugetlb: modular ...
449
450
  /* Defines one hugetlb page size */
  struct hstate {
e8c5c8249   Lee Schermerhorn   hugetlb: balance ...
451
452
  	int next_nid_to_alloc;
  	int next_nid_to_free;
a55164389   Andi Kleen   hugetlb: modular ...
453
454
455
456
457
458
459
460
  	unsigned int order;
  	unsigned long mask;
  	unsigned long max_huge_pages;
  	unsigned long nr_huge_pages;
  	unsigned long free_huge_pages;
  	unsigned long resv_huge_pages;
  	unsigned long surplus_huge_pages;
  	unsigned long nr_overcommit_huge_pages;
0edaecfab   Aneesh Kumar K.V   hugetlb: add a li...
461
  	struct list_head hugepage_activelist;
a55164389   Andi Kleen   hugetlb: modular ...
462
463
464
465
  	struct list_head hugepage_freelists[MAX_NUMNODES];
  	unsigned int nr_huge_pages_node[MAX_NUMNODES];
  	unsigned int free_huge_pages_node[MAX_NUMNODES];
  	unsigned int surplus_huge_pages_node[MAX_NUMNODES];
abb8206cb   Aneesh Kumar K.V   hugetlb/cgroup: a...
466
467
  #ifdef CONFIG_CGROUP_HUGETLB
  	/* cgroup control files */
cdc2fcfea   Mina Almasry   hugetlb_cgroup: a...
468
469
  	struct cftype cgroup_files_dfl[7];
  	struct cftype cgroup_files_legacy[9];
abb8206cb   Aneesh Kumar K.V   hugetlb/cgroup: a...
470
  #endif
a34378701   Nishanth Aravamudan   hugetlb: new sysf...
471
  	char name[HSTATE_NAME_LEN];
a55164389   Andi Kleen   hugetlb: modular ...
472
  };
53ba51d21   Jon Tollefson   hugetlb: allow ar...
473
474
475
476
  struct huge_bootmem_page {
  	struct list_head list;
  	struct hstate *hstate;
  };
70c3547e3   Mike Kravetz   hugetlbfs: add hu...
477
478
  struct page *alloc_huge_page(struct vm_area_struct *vma,
  				unsigned long addr, int avoid_reserve);
3e59fcb0e   Michal Hocko   hugetlb: add supp...
479
  struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
d92bbc271   Joonsoo Kim   mm/hugetlb: unify...
480
  				nodemask_t *nmask, gfp_t gfp_mask);
389c8178d   Michal Hocko   hugetlb, mbind: f...
481
482
  struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
  				unsigned long address);
ab76ad540   Mike Kravetz   hugetlbfs: New hu...
483
484
  int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
  			pgoff_t idx);
bf50bab2b   Naoya Horiguchi   hugetlb: add allo...
485

53ba51d21   Jon Tollefson   hugetlb: allow ar...
486
  /* arch callback */
e24a1307b   Aneesh Kumar K.V   mm/hugetlb: Allow...
487
  int __init __alloc_bootmem_huge_page(struct hstate *h);
53ba51d21   Jon Tollefson   hugetlb: allow ar...
488
  int __init alloc_bootmem_huge_page(struct hstate *h);
e5ff21594   Andi Kleen   hugetlb: multiple...
489
  void __init hugetlb_add_hstate(unsigned order);
ae94da898   Mike Kravetz   hugetlbfs: add ar...
490
  bool __init arch_hugetlb_valid_size(unsigned long size);
e5ff21594   Andi Kleen   hugetlb: multiple...
491
492
493
494
495
496
497
498
499
500
  struct hstate *size_to_hstate(unsigned long size);
  
  #ifndef HUGE_MAX_HSTATE
  #define HUGE_MAX_HSTATE 1
  #endif
  
  extern struct hstate hstates[HUGE_MAX_HSTATE];
  extern unsigned int default_hstate_idx;
  
  #define default_hstate (hstates[default_hstate_idx])
a55164389   Andi Kleen   hugetlb: modular ...
501

a55164389   Andi Kleen   hugetlb: modular ...
502
503
  static inline struct hstate *hstate_file(struct file *f)
  {
496ad9aa8   Al Viro   new helper: file_...
504
  	return hstate_inode(file_inode(f));
a55164389   Andi Kleen   hugetlb: modular ...
505
  }
af73e4d95   Naoya Horiguchi   hugetlbfs: fix mm...
506
507
508
509
  static inline struct hstate *hstate_sizelog(int page_size_log)
  {
  	if (!page_size_log)
  		return &default_hstate;
97ad2be1d   Sasha Levin   mm, hugetlb: corr...
510
511
  
  	return size_to_hstate(1UL << page_size_log);
af73e4d95   Naoya Horiguchi   hugetlbfs: fix mm...
512
  }
a137e1cc6   Andi Kleen   hugetlbfs: per mo...
513
  static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
a55164389   Andi Kleen   hugetlb: modular ...
514
  {
a137e1cc6   Andi Kleen   hugetlbfs: per mo...
515
  	return hstate_file(vma->vm_file);
a55164389   Andi Kleen   hugetlb: modular ...
516
517
518
519
520
521
  }
  
  static inline unsigned long huge_page_size(struct hstate *h)
  {
  	return (unsigned long)PAGE_SIZE << h->order;
  }
08fba6998   Mel Gorman   mm: report the pa...
522
  extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
3340289dd   Mel Gorman   mm: report the MM...
523
  extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
a55164389   Andi Kleen   hugetlb: modular ...
524
525
526
527
528
529
530
531
532
533
534
535
536
537
  static inline unsigned long huge_page_mask(struct hstate *h)
  {
  	return h->mask;
  }
  
  static inline unsigned int huge_page_order(struct hstate *h)
  {
  	return h->order;
  }
  
  static inline unsigned huge_page_shift(struct hstate *h)
  {
  	return h->order + PAGE_SHIFT;
  }
bae7f4ae1   Luiz Capitulino   hugetlb: add hsta...
538
539
540
541
  static inline bool hstate_is_gigantic(struct hstate *h)
  {
  	return huge_page_order(h) >= MAX_ORDER;
  }
a55164389   Andi Kleen   hugetlb: modular ...
542
543
544
545
546
547
548
549
550
551
552
  static inline unsigned int pages_per_huge_page(struct hstate *h)
  {
  	return 1 << h->order;
  }
  
  static inline unsigned int blocks_per_huge_page(struct hstate *h)
  {
  	return huge_page_size(h) / 512;
  }
  
  #include <asm/hugetlb.h>
b0eae98c6   Anshuman Khandual   mm/hugetlb: defin...
553
554
555
556
557
558
559
560
  #ifndef is_hugepage_only_range
  static inline int is_hugepage_only_range(struct mm_struct *mm,
  					unsigned long addr, unsigned long len)
  {
  	return 0;
  }
  #define is_hugepage_only_range is_hugepage_only_range
  #endif
5be993432   Anshuman Khandual   mm/hugetlb: defin...
561
562
563
564
  #ifndef arch_clear_hugepage_flags
  static inline void arch_clear_hugepage_flags(struct page *page) { }
  #define arch_clear_hugepage_flags arch_clear_hugepage_flags
  #endif
d9ed9faac   Chris Metcalf   mm: add new arch_...
565
566
567
568
569
570
571
  #ifndef arch_make_huge_pte
  static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
  				       struct page *page, int writable)
  {
  	return entry;
  }
  #endif
e5ff21594   Andi Kleen   hugetlb: multiple...
572
573
  static inline struct hstate *page_hstate(struct page *page)
  {
309381fea   Sasha Levin   mm: dump page whe...
574
  	VM_BUG_ON_PAGE(!PageHuge(page), page);
a50b854e0   Matthew Wilcox (Oracle)   mm: introduce pag...
575
  	return size_to_hstate(page_size(page));
e5ff21594   Andi Kleen   hugetlb: multiple...
576
  }
aa50d3a7a   Andi Kleen   Encode huge page ...
577
578
579
580
  static inline unsigned hstate_index_to_shift(unsigned index)
  {
  	return hstates[index].order + PAGE_SHIFT;
  }
972dc4de1   Aneesh Kumar K.V   hugetlb: add an i...
581
582
583
584
  static inline int hstate_index(struct hstate *h)
  {
  	return h - hstates;
  }
13d60f4b6   Zhang Yi   futex: Take hugep...
585
586
587
588
589
590
591
592
593
594
  pgoff_t __basepage_index(struct page *page);
  
  /* Return page->index in PAGE_SIZE units */
  static inline pgoff_t basepage_index(struct page *page)
  {
  	if (!PageCompound(page))
  		return page->index;
  
  	return __basepage_index(page);
  }
c3114a84f   Anshuman Khandual   mm: hugetlb: soft...
595
  extern int dissolve_free_huge_page(struct page *page);
082d5b6b6   Gerald Schaefer   mm/hugetlb: check...
596
597
  extern int dissolve_free_huge_pages(unsigned long start_pfn,
  				    unsigned long end_pfn);
e693de186   Anshuman Khandual   mm/hugetlb: enabl...
598

c177c81e0   Naoya Horiguchi   hugetlb: restrict...
599
  #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
e693de186   Anshuman Khandual   mm/hugetlb: enabl...
600
601
602
  #ifndef arch_hugetlb_migration_supported
  static inline bool arch_hugetlb_migration_supported(struct hstate *h)
  {
94310cbca   Anshuman Khandual   mm/madvise: enabl...
603
  	if ((huge_page_shift(h) == PMD_SHIFT) ||
9b553bf5e   Anshuman Khandual   mm/hugetlb: enabl...
604
605
  		(huge_page_shift(h) == PUD_SHIFT) ||
  			(huge_page_shift(h) == PGDIR_SHIFT))
94310cbca   Anshuman Khandual   mm/madvise: enabl...
606
607
608
  		return true;
  	else
  		return false;
e693de186   Anshuman Khandual   mm/hugetlb: enabl...
609
610
  }
  #endif
c177c81e0   Naoya Horiguchi   hugetlb: restrict...
611
  #else
e693de186   Anshuman Khandual   mm/hugetlb: enabl...
612
613
  static inline bool arch_hugetlb_migration_supported(struct hstate *h)
  {
d70c17d43   Chen Gang   include/linux/hug...
614
  	return false;
e693de186   Anshuman Khandual   mm/hugetlb: enabl...
615
  }
c177c81e0   Naoya Horiguchi   hugetlb: restrict...
616
  #endif
e693de186   Anshuman Khandual   mm/hugetlb: enabl...
617
618
619
620
  
  static inline bool hugepage_migration_supported(struct hstate *h)
  {
  	return arch_hugetlb_migration_supported(h);
83467efbd   Naoya Horiguchi   mm: migrate: chec...
621
  }
c8721bbbd   Naoya Horiguchi   mm: memory-hotplu...
622

7ed2c31da   Anshuman Khandual   mm/hugetlb: disti...
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
  /*
   * Movability check is different as compared to migration check.
   * It determines whether or not a huge page should be placed on
   * movable zone or not. Movability of any huge page should be
   * required only if huge page size is supported for migration.
   * There wont be any reason for the huge page to be movable if
   * it is not migratable to start with. Also the size of the huge
   * page should be large enough to be placed under a movable zone
   * and still feasible enough to be migratable. Just the presence
   * in movable zone does not make the migration feasible.
   *
   * So even though large huge page sizes like the gigantic ones
   * are migratable they should not be movable because its not
   * feasible to migrate them from movable zone.
   */
  static inline bool hugepage_movable_supported(struct hstate *h)
  {
  	if (!hugepage_migration_supported(h))
  		return false;
  
  	if (hstate_is_gigantic(h))
  		return false;
  	return true;
  }
d92bbc271   Joonsoo Kim   mm/hugetlb: unify...
647
648
649
650
651
652
653
654
  /* Movability of hugepages depends on migration support. */
  static inline gfp_t htlb_alloc_mask(struct hstate *h)
  {
  	if (hugepage_movable_supported(h))
  		return GFP_HIGHUSER_MOVABLE;
  	else
  		return GFP_HIGHUSER;
  }
19fc7bed2   Joonsoo Kim   mm/migrate: intro...
655
656
657
658
659
660
  static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
  {
  	gfp_t modified_mask = htlb_alloc_mask(h);
  
  	/* Some callers might want to enforce node */
  	modified_mask |= (gfp_mask & __GFP_THISNODE);
41b4dc14e   Joonsoo Kim   mm/gup: restrict ...
661
  	modified_mask |= (gfp_mask & __GFP_NOWARN);
19fc7bed2   Joonsoo Kim   mm/migrate: intro...
662
663
  	return modified_mask;
  }
cb900f412   Kirill A. Shutemov   mm, hugetlb: conv...
664
665
666
667
668
669
670
671
  static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
  					   struct mm_struct *mm, pte_t *pte)
  {
  	if (huge_page_size(h) == PMD_SIZE)
  		return pmd_lockptr(mm, (pmd_t *) pte);
  	VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
  	return &mm->page_table_lock;
  }
2531c8cf5   Dominik Dingel   mm: hugetlb: allo...
672
673
674
675
676
677
678
679
  #ifndef hugepages_supported
  /*
   * Some platform decide whether they support huge pages at boot
   * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
   * when there is no such support
   */
  #define hugepages_supported() (HPAGE_SHIFT != 0)
  #endif
457c1b27e   Nishanth Aravamudan   hugetlb: ensure h...
680

5d317b2b6   Naoya Horiguchi   mm: hugetlb: proc...
681
682
683
684
685
686
687
688
689
690
691
  void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
  
  static inline void hugetlb_count_add(long l, struct mm_struct *mm)
  {
  	atomic_long_add(l, &mm->hugetlb_usage);
  }
  
  static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
  {
  	atomic_long_sub(l, &mm->hugetlb_usage);
  }
e5251fd43   Punit Agrawal   mm/hugetlb: intro...
692
693
694
695
696
697
698
699
  
  #ifndef set_huge_swap_pte_at
  static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
  					pte_t *ptep, pte_t pte, unsigned long sz)
  {
  	set_huge_pte_at(mm, addr, ptep, pte);
  }
  #endif
023bdd002   Aneesh Kumar K.V   mm/hugetlb: add p...
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
  
  #ifndef huge_ptep_modify_prot_start
  #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
  static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
  						unsigned long addr, pte_t *ptep)
  {
  	return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
  }
  #endif
  
  #ifndef huge_ptep_modify_prot_commit
  #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
  static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
  						unsigned long addr, pte_t *ptep,
  						pte_t old_pte, pte_t pte)
  {
  	set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
  }
  #endif
af73e4d95   Naoya Horiguchi   hugetlbfs: fix mm...
719
  #else	/* CONFIG_HUGETLB_PAGE */
a55164389   Andi Kleen   hugetlb: modular ...
720
  struct hstate {};
442a5a9a9   Jason Gunthorpe   mm: make !CONFIG_...
721
722
723
724
725
726
727
  
  static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
  					   unsigned long addr,
  					   int avoid_reserve)
  {
  	return NULL;
  }
442a5a9a9   Jason Gunthorpe   mm: make !CONFIG_...
728
  static inline struct page *
d92bbc271   Joonsoo Kim   mm/hugetlb: unify...
729
730
  alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
  			nodemask_t *nmask, gfp_t gfp_mask)
442a5a9a9   Jason Gunthorpe   mm: make !CONFIG_...
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
  {
  	return NULL;
  }
  
  static inline struct page *alloc_huge_page_vma(struct hstate *h,
  					       struct vm_area_struct *vma,
  					       unsigned long address)
  {
  	return NULL;
  }
  
  static inline int __alloc_bootmem_huge_page(struct hstate *h)
  {
  	return 0;
  }
  
  static inline struct hstate *hstate_file(struct file *f)
  {
  	return NULL;
  }
  
  static inline struct hstate *hstate_sizelog(int page_size_log)
  {
  	return NULL;
  }
  
  static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
  {
  	return NULL;
442a5a9a9   Jason Gunthorpe   mm: make !CONFIG_...
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
  }
  
  static inline struct hstate *page_hstate(struct page *page)
  {
  	return NULL;
  }
  
  static inline unsigned long huge_page_size(struct hstate *h)
  {
  	return PAGE_SIZE;
  }
  
  static inline unsigned long huge_page_mask(struct hstate *h)
  {
  	return PAGE_MASK;
  }
  
  static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
  {
  	return PAGE_SIZE;
  }
  
  static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
  {
  	return PAGE_SIZE;
  }
  
  static inline unsigned int huge_page_order(struct hstate *h)
  {
  	return 0;
  }
  
  static inline unsigned int huge_page_shift(struct hstate *h)
  {
  	return PAGE_SHIFT;
  }
94310cbca   Anshuman Khandual   mm/madvise: enabl...
796
797
798
799
  static inline bool hstate_is_gigantic(struct hstate *h)
  {
  	return false;
  }
510a35d4a   Andrea Righi   hugetlb: remove u...
800
801
802
803
  static inline unsigned int pages_per_huge_page(struct hstate *h)
  {
  	return 1;
  }
c3114a84f   Anshuman Khandual   mm: hugetlb: soft...
804
805
806
807
808
809
810
811
812
813
  
  static inline unsigned hstate_index_to_shift(unsigned index)
  {
  	return 0;
  }
  
  static inline int hstate_index(struct hstate *h)
  {
  	return 0;
  }
13d60f4b6   Zhang Yi   futex: Take hugep...
814
815
816
817
818
  
  static inline pgoff_t basepage_index(struct page *page)
  {
  	return page->index;
  }
c3114a84f   Anshuman Khandual   mm: hugetlb: soft...
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
  
  static inline int dissolve_free_huge_page(struct page *page)
  {
  	return 0;
  }
  
  static inline int dissolve_free_huge_pages(unsigned long start_pfn,
  					   unsigned long end_pfn)
  {
  	return 0;
  }
  
  static inline bool hugepage_migration_supported(struct hstate *h)
  {
  	return false;
  }
cb900f412   Kirill A. Shutemov   mm, hugetlb: conv...
835

7ed2c31da   Anshuman Khandual   mm/hugetlb: disti...
836
837
838
839
  static inline bool hugepage_movable_supported(struct hstate *h)
  {
  	return false;
  }
d92bbc271   Joonsoo Kim   mm/hugetlb: unify...
840
841
842
843
  static inline gfp_t htlb_alloc_mask(struct hstate *h)
  {
  	return 0;
  }
19fc7bed2   Joonsoo Kim   mm/migrate: intro...
844
845
846
847
  static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
  {
  	return 0;
  }
cb900f412   Kirill A. Shutemov   mm, hugetlb: conv...
848
849
850
851
852
  static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
  					   struct mm_struct *mm, pte_t *pte)
  {
  	return &mm->page_table_lock;
  }
5d317b2b6   Naoya Horiguchi   mm: hugetlb: proc...
853
854
855
856
857
858
859
860
  
  static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
  {
  }
  
  static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
  {
  }
e5251fd43   Punit Agrawal   mm/hugetlb: intro...
861
862
863
864
865
  
  static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
  					pte_t *ptep, pte_t pte, unsigned long sz)
  {
  }
af73e4d95   Naoya Horiguchi   hugetlbfs: fix mm...
866
  #endif	/* CONFIG_HUGETLB_PAGE */
a55164389   Andi Kleen   hugetlb: modular ...
867

cb900f412   Kirill A. Shutemov   mm, hugetlb: conv...
868
869
870
871
872
873
874
875
876
  static inline spinlock_t *huge_pte_lock(struct hstate *h,
  					struct mm_struct *mm, pte_t *pte)
  {
  	spinlock_t *ptl;
  
  	ptl = huge_pte_lockptr(h, mm, pte);
  	spin_lock(ptl);
  	return ptl;
  }
cf11e85fc   Roman Gushchin   mm: hugetlb: opti...
877
878
879
880
881
882
883
884
885
886
887
  #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
  extern void __init hugetlb_cma_reserve(int order);
  extern void __init hugetlb_cma_check(void);
  #else
  static inline __init void hugetlb_cma_reserve(int order)
  {
  }
  static inline __init void hugetlb_cma_check(void)
  {
  }
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
888
  #endif /* _LINUX_HUGETLB_H */