Blame view

mm/ioremap.c 6.51 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  // SPDX-License-Identifier: GPL-2.0
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
2
3
4
5
6
7
8
  /*
   * Re-map IO memory to kernel address space so that we can access it.
   * This is needed for high PCI addresses that aren't mapped in the
   * 640k-1MB IO memory area on PC's
   *
   * (C) Copyright 1995 1996 Linus Torvalds
   */
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
9
10
  #include <linux/vmalloc.h>
  #include <linux/mm.h>
e8edc6e03   Alexey Dobriyan   Detach sched.h fr...
11
  #include <linux/sched.h>
53fa66452   Adrian Bunk   lib/ioremap.c sho...
12
  #include <linux/io.h>
8bc3bcc93   Paul Gortmaker   lib: reduce the u...
13
  #include <linux/export.h>
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
14
  #include <asm/cacheflush.h>
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
15

2a681cfa5   Joerg Roedel   mm: move p?d_allo...
16
  #include "pgalloc-track.h"
0ddab1d2e   Toshi Kani   lib/ioremap.c: ad...
17
  #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
c2febafc6   Kirill A. Shutemov   mm: convert gener...
18
  static int __read_mostly ioremap_p4d_capable;
6b6378355   Toshi Kani   x86, mm: support ...
19
20
21
  static int __read_mostly ioremap_pud_capable;
  static int __read_mostly ioremap_pmd_capable;
  static int __read_mostly ioremap_huge_disabled;
0ddab1d2e   Toshi Kani   lib/ioremap.c: ad...
22
23
24
25
26
27
28
29
30
31
32
  
  static int __init set_nohugeiomap(char *str)
  {
  	ioremap_huge_disabled = 1;
  	return 0;
  }
  early_param("nohugeiomap", set_nohugeiomap);
  
  void __init ioremap_huge_init(void)
  {
  	if (!ioremap_huge_disabled) {
0f472d04f   Anshuman Khandual   mm/ioremap: probe...
33
34
  		if (arch_ioremap_p4d_supported())
  			ioremap_p4d_capable = 1;
0ddab1d2e   Toshi Kani   lib/ioremap.c: ad...
35
36
37
38
39
40
  		if (arch_ioremap_pud_supported())
  			ioremap_pud_capable = 1;
  		if (arch_ioremap_pmd_supported())
  			ioremap_pmd_capable = 1;
  	}
  }
c2febafc6   Kirill A. Shutemov   mm: convert gener...
41
42
43
44
  static inline int ioremap_p4d_enabled(void)
  {
  	return ioremap_p4d_capable;
  }
0ddab1d2e   Toshi Kani   lib/ioremap.c: ad...
45
46
47
48
49
50
51
52
53
54
55
  static inline int ioremap_pud_enabled(void)
  {
  	return ioremap_pud_capable;
  }
  
  static inline int ioremap_pmd_enabled(void)
  {
  	return ioremap_pmd_capable;
  }
  
  #else	/* !CONFIG_HAVE_ARCH_HUGE_VMAP */
c2febafc6   Kirill A. Shutemov   mm: convert gener...
56
  static inline int ioremap_p4d_enabled(void) { return 0; }
0ddab1d2e   Toshi Kani   lib/ioremap.c: ad...
57
58
59
  static inline int ioremap_pud_enabled(void) { return 0; }
  static inline int ioremap_pmd_enabled(void) { return 0; }
  #endif	/* CONFIG_HAVE_ARCH_HUGE_VMAP */
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
60
  static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
6c0c7d2b3   Joerg Roedel   mm/ioremap: track...
61
62
  		unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
  		pgtbl_mod_mask *mask)
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
63
64
  {
  	pte_t *pte;
ffa71f33a   Kenji Kaneshige   x86, ioremap: Fix...
65
  	u64 pfn;
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
66
67
  
  	pfn = phys_addr >> PAGE_SHIFT;
6c0c7d2b3   Joerg Roedel   mm/ioremap: track...
68
  	pte = pte_alloc_kernel_track(pmd, addr, mask);
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
69
70
71
72
73
74
75
  	if (!pte)
  		return -ENOMEM;
  	do {
  		BUG_ON(!pte_none(*pte));
  		set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
  		pfn++;
  	} while (pte++, addr += PAGE_SIZE, addr != end);
6c0c7d2b3   Joerg Roedel   mm/ioremap: track...
76
  	*mask |= PGTBL_PTE_MODIFIED;
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
77
78
  	return 0;
  }
d239865ac   Will Deacon   ioremap: rework p...
79
80
81
82
83
84
85
86
87
  static int ioremap_try_huge_pmd(pmd_t *pmd, unsigned long addr,
  				unsigned long end, phys_addr_t phys_addr,
  				pgprot_t prot)
  {
  	if (!ioremap_pmd_enabled())
  		return 0;
  
  	if ((end - addr) != PMD_SIZE)
  		return 0;
6b95ab421   Anshuman Khandual   mm/ioremap: check...
88
89
  	if (!IS_ALIGNED(addr, PMD_SIZE))
  		return 0;
d239865ac   Will Deacon   ioremap: rework p...
90
91
92
93
94
95
96
97
  	if (!IS_ALIGNED(phys_addr, PMD_SIZE))
  		return 0;
  
  	if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
  		return 0;
  
  	return pmd_set_huge(pmd, phys_addr, prot);
  }
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
98
  static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
6c0c7d2b3   Joerg Roedel   mm/ioremap: track...
99
100
  		unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
  		pgtbl_mod_mask *mask)
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
101
102
103
  {
  	pmd_t *pmd;
  	unsigned long next;
6c0c7d2b3   Joerg Roedel   mm/ioremap: track...
104
  	pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
105
106
107
108
  	if (!pmd)
  		return -ENOMEM;
  	do {
  		next = pmd_addr_end(addr, end);
e61ce6ade   Toshi Kani   mm: change iorema...
109

6c0c7d2b3   Joerg Roedel   mm/ioremap: track...
110
111
  		if (ioremap_try_huge_pmd(pmd, addr, next, phys_addr, prot)) {
  			*mask |= PGTBL_PMD_MODIFIED;
d239865ac   Will Deacon   ioremap: rework p...
112
  			continue;
6c0c7d2b3   Joerg Roedel   mm/ioremap: track...
113
  		}
e61ce6ade   Toshi Kani   mm: change iorema...
114

6c0c7d2b3   Joerg Roedel   mm/ioremap: track...
115
  		if (ioremap_pte_range(pmd, addr, next, phys_addr, prot, mask))
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
116
  			return -ENOMEM;
36ddc5a78   Will Deacon   lib/ioremap: ensu...
117
  	} while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
118
119
  	return 0;
  }
d239865ac   Will Deacon   ioremap: rework p...
120
121
122
123
124
125
126
127
128
  static int ioremap_try_huge_pud(pud_t *pud, unsigned long addr,
  				unsigned long end, phys_addr_t phys_addr,
  				pgprot_t prot)
  {
  	if (!ioremap_pud_enabled())
  		return 0;
  
  	if ((end - addr) != PUD_SIZE)
  		return 0;
6b95ab421   Anshuman Khandual   mm/ioremap: check...
129
130
  	if (!IS_ALIGNED(addr, PUD_SIZE))
  		return 0;
d239865ac   Will Deacon   ioremap: rework p...
131
132
133
134
135
136
137
138
  	if (!IS_ALIGNED(phys_addr, PUD_SIZE))
  		return 0;
  
  	if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
  		return 0;
  
  	return pud_set_huge(pud, phys_addr, prot);
  }
c2febafc6   Kirill A. Shutemov   mm: convert gener...
139
  static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
6c0c7d2b3   Joerg Roedel   mm/ioremap: track...
140
141
  		unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
  		pgtbl_mod_mask *mask)
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
142
143
144
  {
  	pud_t *pud;
  	unsigned long next;
6c0c7d2b3   Joerg Roedel   mm/ioremap: track...
145
  	pud = pud_alloc_track(&init_mm, p4d, addr, mask);
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
146
147
148
149
  	if (!pud)
  		return -ENOMEM;
  	do {
  		next = pud_addr_end(addr, end);
e61ce6ade   Toshi Kani   mm: change iorema...
150

6c0c7d2b3   Joerg Roedel   mm/ioremap: track...
151
152
  		if (ioremap_try_huge_pud(pud, addr, next, phys_addr, prot)) {
  			*mask |= PGTBL_PUD_MODIFIED;
d239865ac   Will Deacon   ioremap: rework p...
153
  			continue;
6c0c7d2b3   Joerg Roedel   mm/ioremap: track...
154
  		}
e61ce6ade   Toshi Kani   mm: change iorema...
155

6c0c7d2b3   Joerg Roedel   mm/ioremap: track...
156
  		if (ioremap_pmd_range(pud, addr, next, phys_addr, prot, mask))
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
157
  			return -ENOMEM;
36ddc5a78   Will Deacon   lib/ioremap: ensu...
158
  	} while (pud++, phys_addr += (next - addr), addr = next, addr != end);
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
159
160
  	return 0;
  }
8e2d43405   Will Deacon   lib/ioremap: ensu...
161
162
163
164
165
166
167
168
169
  static int ioremap_try_huge_p4d(p4d_t *p4d, unsigned long addr,
  				unsigned long end, phys_addr_t phys_addr,
  				pgprot_t prot)
  {
  	if (!ioremap_p4d_enabled())
  		return 0;
  
  	if ((end - addr) != P4D_SIZE)
  		return 0;
6b95ab421   Anshuman Khandual   mm/ioremap: check...
170
171
  	if (!IS_ALIGNED(addr, P4D_SIZE))
  		return 0;
8e2d43405   Will Deacon   lib/ioremap: ensu...
172
173
174
175
176
177
178
179
  	if (!IS_ALIGNED(phys_addr, P4D_SIZE))
  		return 0;
  
  	if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
  		return 0;
  
  	return p4d_set_huge(p4d, phys_addr, prot);
  }
c2febafc6   Kirill A. Shutemov   mm: convert gener...
180
  static inline int ioremap_p4d_range(pgd_t *pgd, unsigned long addr,
6c0c7d2b3   Joerg Roedel   mm/ioremap: track...
181
182
  		unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
  		pgtbl_mod_mask *mask)
c2febafc6   Kirill A. Shutemov   mm: convert gener...
183
184
185
  {
  	p4d_t *p4d;
  	unsigned long next;
6c0c7d2b3   Joerg Roedel   mm/ioremap: track...
186
  	p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
c2febafc6   Kirill A. Shutemov   mm: convert gener...
187
188
189
190
  	if (!p4d)
  		return -ENOMEM;
  	do {
  		next = p4d_addr_end(addr, end);
6c0c7d2b3   Joerg Roedel   mm/ioremap: track...
191
192
  		if (ioremap_try_huge_p4d(p4d, addr, next, phys_addr, prot)) {
  			*mask |= PGTBL_P4D_MODIFIED;
8e2d43405   Will Deacon   lib/ioremap: ensu...
193
  			continue;
6c0c7d2b3   Joerg Roedel   mm/ioremap: track...
194
  		}
c2febafc6   Kirill A. Shutemov   mm: convert gener...
195

6c0c7d2b3   Joerg Roedel   mm/ioremap: track...
196
  		if (ioremap_pud_range(p4d, addr, next, phys_addr, prot, mask))
c2febafc6   Kirill A. Shutemov   mm: convert gener...
197
  			return -ENOMEM;
36ddc5a78   Will Deacon   lib/ioremap: ensu...
198
  	} while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
c2febafc6   Kirill A. Shutemov   mm: convert gener...
199
200
  	return 0;
  }
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
201
  int ioremap_page_range(unsigned long addr,
ffa71f33a   Kenji Kaneshige   x86, ioremap: Fix...
202
  		       unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
203
204
205
206
207
  {
  	pgd_t *pgd;
  	unsigned long start;
  	unsigned long next;
  	int err;
6c0c7d2b3   Joerg Roedel   mm/ioremap: track...
208
  	pgtbl_mod_mask mask = 0;
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
209

b39ab98e2   Linus Torvalds   Mark 'ioremap_pag...
210
  	might_sleep();
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
211
  	BUG_ON(addr >= end);
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
212
  	start = addr;
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
213
214
215
  	pgd = pgd_offset_k(addr);
  	do {
  		next = pgd_addr_end(addr, end);
6c0c7d2b3   Joerg Roedel   mm/ioremap: track...
216
217
  		err = ioremap_p4d_range(pgd, addr, next, phys_addr, prot,
  					&mask);
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
218
219
  		if (err)
  			break;
36ddc5a78   Will Deacon   lib/ioremap: ensu...
220
  	} while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
221

db71daaba   Haavard Skinnemoen   [PATCH] Generic i...
222
  	flush_cache_vmap(start, end);
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
223

6c0c7d2b3   Joerg Roedel   mm/ioremap: track...
224
225
  	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
  		arch_sync_kernel_mappings(start, end);
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
226
227
  	return err;
  }
de6797b13   Jan Kiszka   mm: Re-export ior...
228
  EXPORT_SYMBOL_GPL(ioremap_page_range);
80b0ca98f   Christoph Hellwig   lib: provide a si...
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
  
  #ifdef CONFIG_GENERIC_IOREMAP
  void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot)
  {
  	unsigned long offset, vaddr;
  	phys_addr_t last_addr;
  	struct vm_struct *area;
  
  	/* Disallow wrap-around or zero size */
  	last_addr = addr + size - 1;
  	if (!size || last_addr < addr)
  		return NULL;
  
  	/* Page-align mappings */
  	offset = addr & (~PAGE_MASK);
  	addr -= offset;
  	size = PAGE_ALIGN(size + offset);
  
  	area = get_vm_area_caller(size, VM_IOREMAP,
  			__builtin_return_address(0));
  	if (!area)
  		return NULL;
  	vaddr = (unsigned long)area->addr;
  
  	if (ioremap_page_range(vaddr, vaddr + size, addr, __pgprot(prot))) {
  		free_vm_area(area);
  		return NULL;
  	}
  
  	return (void __iomem *)(vaddr + offset);
  }
  EXPORT_SYMBOL(ioremap_prot);
  
  void iounmap(volatile void __iomem *addr)
  {
  	vunmap((void *)((unsigned long)addr & PAGE_MASK));
  }
  EXPORT_SYMBOL(iounmap);
  #endif /* CONFIG_GENERIC_IOREMAP */