Blame view

include/linux/swapops.h 4.66 KB
a2c16d6cb   Hugh Dickins   mm: let swap use ...
1
2
3
4
  #ifndef _LINUX_SWAPOPS_H
  #define _LINUX_SWAPOPS_H
  
  #include <linux/radix-tree.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
5
6
7
8
9
10
  /*
   * swapcache pages are stored in the swapper_space radix tree.  We want to
   * get good packing density in that tree, so the index should be dense in
   * the low-order bits.
   *
   * We arrange the `type' and `offset' fields so that `type' is at the five
e83a95967   Paolo 'Blaisorblade' Giarrusso   [PATCH] comment t...
11
   * high-order bits of the swp_entry_t and `offset' is right-aligned in the
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
   * remaining bits.
   *
   * swp_entry_t's are *never* stored anywhere in their arch-dependent format.
   */
  #define SWP_TYPE_SHIFT(e)	(sizeof(e.val) * 8 - MAX_SWAPFILES_SHIFT)
  #define SWP_OFFSET_MASK(e)	((1UL << SWP_TYPE_SHIFT(e)) - 1)
  
  /*
   * Store a type+offset into a swp_entry_t in an arch-independent format
   */
  static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset)
  {
  	swp_entry_t ret;
  
  	ret.val = (type << SWP_TYPE_SHIFT(ret)) |
  			(offset & SWP_OFFSET_MASK(ret));
  	return ret;
  }
  
  /*
   * Extract the `type' field from a swp_entry_t.  The swp_entry_t is in
   * arch-independent format
   */
  static inline unsigned swp_type(swp_entry_t entry)
  {
  	return (entry.val >> SWP_TYPE_SHIFT(entry));
  }
  
  /*
   * Extract the `offset' field from a swp_entry_t.  The swp_entry_t is in
   * arch-independent format
   */
  static inline pgoff_t swp_offset(swp_entry_t entry)
  {
  	return entry.val & SWP_OFFSET_MASK(entry);
  }
880cdf3a8   Matt Mackall   Fix compile error...
48
  #ifdef CONFIG_MMU
698dd4ba6   Matt Mackall   maps4: move is_sw...
49
50
51
52
53
  /* check whether a pte points to a swap entry */
  static inline int is_swap_pte(pte_t pte)
  {
  	return !pte_none(pte) && !pte_present(pte) && !pte_file(pte);
  }
880cdf3a8   Matt Mackall   Fix compile error...
54
  #endif
698dd4ba6   Matt Mackall   maps4: move is_sw...
55

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
  /*
   * Convert the arch-dependent pte representation of a swp_entry_t into an
   * arch-independent swp_entry_t.
   */
  static inline swp_entry_t pte_to_swp_entry(pte_t pte)
  {
  	swp_entry_t arch_entry;
  
  	BUG_ON(pte_file(pte));
  	arch_entry = __pte_to_swp_entry(pte);
  	return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
  }
  
  /*
   * Convert the arch-independent representation of a swp_entry_t into the
   * arch-dependent pte representation.
   */
  static inline pte_t swp_entry_to_pte(swp_entry_t entry)
  {
  	swp_entry_t arch_entry;
  
  	arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
  	BUG_ON(pte_file(__swp_entry_to_pte(arch_entry)));
  	return __swp_entry_to_pte(arch_entry);
  }
0697212a4   Christoph Lameter   [PATCH] Swapless ...
81

a2c16d6cb   Hugh Dickins   mm: let swap use ...
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
  static inline swp_entry_t radix_to_swp_entry(void *arg)
  {
  	swp_entry_t entry;
  
  	entry.val = (unsigned long)arg >> RADIX_TREE_EXCEPTIONAL_SHIFT;
  	return entry;
  }
  
  static inline void *swp_to_radix_entry(swp_entry_t entry)
  {
  	unsigned long value;
  
  	value = entry.val << RADIX_TREE_EXCEPTIONAL_SHIFT;
  	return (void *)(value | RADIX_TREE_EXCEPTIONAL_ENTRY);
  }
0697212a4   Christoph Lameter   [PATCH] Swapless ...
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
  #ifdef CONFIG_MIGRATION
  static inline swp_entry_t make_migration_entry(struct page *page, int write)
  {
  	BUG_ON(!PageLocked(page));
  	return swp_entry(write ? SWP_MIGRATION_WRITE : SWP_MIGRATION_READ,
  			page_to_pfn(page));
  }
  
  static inline int is_migration_entry(swp_entry_t entry)
  {
  	return unlikely(swp_type(entry) == SWP_MIGRATION_READ ||
  			swp_type(entry) == SWP_MIGRATION_WRITE);
  }
  
  static inline int is_write_migration_entry(swp_entry_t entry)
  {
  	return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE);
  }
  
  static inline struct page *migration_entry_to_page(swp_entry_t entry)
  {
  	struct page *p = pfn_to_page(swp_offset(entry));
  	/*
  	 * Any use of migration entries may only occur while the
  	 * corresponding page is locked
  	 */
  	BUG_ON(!PageLocked(p));
  	return p;
  }
  
  static inline void make_migration_entry_read(swp_entry_t *entry)
  {
  	*entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry));
  }
  
  extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
  					unsigned long address);
  #else
  
  #define make_migration_entry(page, write) swp_entry(0, 0)
5ec553a90   Andrew Morton   [PATCH] mincore w...
137
138
139
140
  static inline int is_migration_entry(swp_entry_t swp)
  {
  	return 0;
  }
0697212a4   Christoph Lameter   [PATCH] Swapless ...
141
142
143
144
145
146
147
148
149
150
  #define migration_entry_to_page(swp) NULL
  static inline void make_migration_entry_read(swp_entry_t *entryp) { }
  static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
  					 unsigned long address) { }
  static inline int is_write_migration_entry(swp_entry_t entry)
  {
  	return 0;
  }
  
  #endif
a7420aa54   Andi Kleen   HWPOISON: Add sup...
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
  #ifdef CONFIG_MEMORY_FAILURE
  /*
   * Support for hardware poisoned pages
   */
  static inline swp_entry_t make_hwpoison_entry(struct page *page)
  {
  	BUG_ON(!PageLocked(page));
  	return swp_entry(SWP_HWPOISON, page_to_pfn(page));
  }
  
  static inline int is_hwpoison_entry(swp_entry_t entry)
  {
  	return swp_type(entry) == SWP_HWPOISON;
  }
  #else
  
  static inline swp_entry_t make_hwpoison_entry(struct page *page)
  {
  	return swp_entry(0, 0);
  }
  
  static inline int is_hwpoison_entry(swp_entry_t swp)
  {
  	return 0;
  }
  #endif
  
  #if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION)
  static inline int non_swap_entry(swp_entry_t entry)
  {
  	return swp_type(entry) >= MAX_SWAPFILES;
  }
  #else
  static inline int non_swap_entry(swp_entry_t entry)
  {
  	return 0;
  }
  #endif
a2c16d6cb   Hugh Dickins   mm: let swap use ...
189
190
  
  #endif /* _LINUX_SWAPOPS_H */