Blame view

lib/ioremap.c 5.2 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  // SPDX-License-Identifier: GPL-2.0
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
2
3
4
5
6
7
8
  /*
   * Re-map IO memory to kernel address space so that we can access it.
   * This is needed for high PCI addresses that aren't mapped in the
   * 640k-1MB IO memory area on PC's
   *
   * (C) Copyright 1995 1996 Linus Torvalds
   */
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
9
10
  #include <linux/vmalloc.h>
  #include <linux/mm.h>
e8edc6e03   Alexey Dobriyan   Detach sched.h fr...
11
  #include <linux/sched.h>
53fa66452   Adrian Bunk   lib/ioremap.c sho...
12
  #include <linux/io.h>
8bc3bcc93   Paul Gortmaker   lib: reduce the u...
13
  #include <linux/export.h>
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
14
15
  #include <asm/cacheflush.h>
  #include <asm/pgtable.h>
0ddab1d2e   Toshi Kani   lib/ioremap.c: ad...
16
  #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
c2febafc6   Kirill A. Shutemov   mm: convert gener...
17
  static int __read_mostly ioremap_p4d_capable;
6b6378355   Toshi Kani   x86, mm: support ...
18
19
20
  static int __read_mostly ioremap_pud_capable;
  static int __read_mostly ioremap_pmd_capable;
  static int __read_mostly ioremap_huge_disabled;
0ddab1d2e   Toshi Kani   lib/ioremap.c: ad...
21
22
23
24
25
26
27
28
29
30
31
  
  static int __init set_nohugeiomap(char *str)
  {
  	ioremap_huge_disabled = 1;
  	return 0;
  }
  early_param("nohugeiomap", set_nohugeiomap);
  
  void __init ioremap_huge_init(void)
  {
  	if (!ioremap_huge_disabled) {
0f472d04f   Anshuman Khandual   mm/ioremap: probe...
32
33
  		if (arch_ioremap_p4d_supported())
  			ioremap_p4d_capable = 1;
0ddab1d2e   Toshi Kani   lib/ioremap.c: ad...
34
35
36
37
38
39
  		if (arch_ioremap_pud_supported())
  			ioremap_pud_capable = 1;
  		if (arch_ioremap_pmd_supported())
  			ioremap_pmd_capable = 1;
  	}
  }
c2febafc6   Kirill A. Shutemov   mm: convert gener...
40
41
42
43
  static inline int ioremap_p4d_enabled(void)
  {
  	return ioremap_p4d_capable;
  }
0ddab1d2e   Toshi Kani   lib/ioremap.c: ad...
44
45
46
47
48
49
50
51
52
53
54
  static inline int ioremap_pud_enabled(void)
  {
  	return ioremap_pud_capable;
  }
  
  static inline int ioremap_pmd_enabled(void)
  {
  	return ioremap_pmd_capable;
  }
  
  #else	/* !CONFIG_HAVE_ARCH_HUGE_VMAP */
c2febafc6   Kirill A. Shutemov   mm: convert gener...
55
  static inline int ioremap_p4d_enabled(void) { return 0; }
0ddab1d2e   Toshi Kani   lib/ioremap.c: ad...
56
57
58
  static inline int ioremap_pud_enabled(void) { return 0; }
  static inline int ioremap_pmd_enabled(void) { return 0; }
  #endif	/* CONFIG_HAVE_ARCH_HUGE_VMAP */
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
59
  static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
ffa71f33a   Kenji Kaneshige   x86, ioremap: Fix...
60
  		unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
61
62
  {
  	pte_t *pte;
ffa71f33a   Kenji Kaneshige   x86, ioremap: Fix...
63
  	u64 pfn;
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
64
65
66
67
68
69
70
71
72
73
74
75
  
  	pfn = phys_addr >> PAGE_SHIFT;
  	pte = pte_alloc_kernel(pmd, addr);
  	if (!pte)
  		return -ENOMEM;
  	do {
  		BUG_ON(!pte_none(*pte));
  		set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
  		pfn++;
  	} while (pte++, addr += PAGE_SIZE, addr != end);
  	return 0;
  }
d239865ac   Will Deacon   ioremap: rework p...
76
77
78
79
80
81
82
83
84
  static int ioremap_try_huge_pmd(pmd_t *pmd, unsigned long addr,
  				unsigned long end, phys_addr_t phys_addr,
  				pgprot_t prot)
  {
  	if (!ioremap_pmd_enabled())
  		return 0;
  
  	if ((end - addr) != PMD_SIZE)
  		return 0;
6b95ab421   Anshuman Khandual   mm/ioremap: check...
85
86
  	if (!IS_ALIGNED(addr, PMD_SIZE))
  		return 0;
d239865ac   Will Deacon   ioremap: rework p...
87
88
89
90
91
92
93
94
  	if (!IS_ALIGNED(phys_addr, PMD_SIZE))
  		return 0;
  
  	if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
  		return 0;
  
  	return pmd_set_huge(pmd, phys_addr, prot);
  }
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
95
  static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
ffa71f33a   Kenji Kaneshige   x86, ioremap: Fix...
96
  		unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
97
98
99
  {
  	pmd_t *pmd;
  	unsigned long next;
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
100
101
102
103
104
  	pmd = pmd_alloc(&init_mm, pud, addr);
  	if (!pmd)
  		return -ENOMEM;
  	do {
  		next = pmd_addr_end(addr, end);
e61ce6ade   Toshi Kani   mm: change iorema...
105

36ddc5a78   Will Deacon   lib/ioremap: ensu...
106
  		if (ioremap_try_huge_pmd(pmd, addr, next, phys_addr, prot))
d239865ac   Will Deacon   ioremap: rework p...
107
  			continue;
e61ce6ade   Toshi Kani   mm: change iorema...
108

36ddc5a78   Will Deacon   lib/ioremap: ensu...
109
  		if (ioremap_pte_range(pmd, addr, next, phys_addr, prot))
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
110
  			return -ENOMEM;
36ddc5a78   Will Deacon   lib/ioremap: ensu...
111
  	} while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
112
113
  	return 0;
  }
d239865ac   Will Deacon   ioremap: rework p...
114
115
116
117
118
119
120
121
122
  static int ioremap_try_huge_pud(pud_t *pud, unsigned long addr,
  				unsigned long end, phys_addr_t phys_addr,
  				pgprot_t prot)
  {
  	if (!ioremap_pud_enabled())
  		return 0;
  
  	if ((end - addr) != PUD_SIZE)
  		return 0;
6b95ab421   Anshuman Khandual   mm/ioremap: check...
123
124
  	if (!IS_ALIGNED(addr, PUD_SIZE))
  		return 0;
d239865ac   Will Deacon   ioremap: rework p...
125
126
127
128
129
130
131
132
  	if (!IS_ALIGNED(phys_addr, PUD_SIZE))
  		return 0;
  
  	if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
  		return 0;
  
  	return pud_set_huge(pud, phys_addr, prot);
  }
c2febafc6   Kirill A. Shutemov   mm: convert gener...
133
  static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
ffa71f33a   Kenji Kaneshige   x86, ioremap: Fix...
134
  		unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
135
136
137
  {
  	pud_t *pud;
  	unsigned long next;
c2febafc6   Kirill A. Shutemov   mm: convert gener...
138
  	pud = pud_alloc(&init_mm, p4d, addr);
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
139
140
141
142
  	if (!pud)
  		return -ENOMEM;
  	do {
  		next = pud_addr_end(addr, end);
e61ce6ade   Toshi Kani   mm: change iorema...
143

36ddc5a78   Will Deacon   lib/ioremap: ensu...
144
  		if (ioremap_try_huge_pud(pud, addr, next, phys_addr, prot))
d239865ac   Will Deacon   ioremap: rework p...
145
  			continue;
e61ce6ade   Toshi Kani   mm: change iorema...
146

36ddc5a78   Will Deacon   lib/ioremap: ensu...
147
  		if (ioremap_pmd_range(pud, addr, next, phys_addr, prot))
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
148
  			return -ENOMEM;
36ddc5a78   Will Deacon   lib/ioremap: ensu...
149
  	} while (pud++, phys_addr += (next - addr), addr = next, addr != end);
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
150
151
  	return 0;
  }
8e2d43405   Will Deacon   lib/ioremap: ensu...
152
153
154
155
156
157
158
159
160
  static int ioremap_try_huge_p4d(p4d_t *p4d, unsigned long addr,
  				unsigned long end, phys_addr_t phys_addr,
  				pgprot_t prot)
  {
  	if (!ioremap_p4d_enabled())
  		return 0;
  
  	if ((end - addr) != P4D_SIZE)
  		return 0;
6b95ab421   Anshuman Khandual   mm/ioremap: check...
161
162
  	if (!IS_ALIGNED(addr, P4D_SIZE))
  		return 0;
8e2d43405   Will Deacon   lib/ioremap: ensu...
163
164
165
166
167
168
169
170
  	if (!IS_ALIGNED(phys_addr, P4D_SIZE))
  		return 0;
  
  	if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
  		return 0;
  
  	return p4d_set_huge(p4d, phys_addr, prot);
  }
c2febafc6   Kirill A. Shutemov   mm: convert gener...
171
172
173
174
175
  static inline int ioremap_p4d_range(pgd_t *pgd, unsigned long addr,
  		unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
  {
  	p4d_t *p4d;
  	unsigned long next;
c2febafc6   Kirill A. Shutemov   mm: convert gener...
176
177
178
179
180
  	p4d = p4d_alloc(&init_mm, pgd, addr);
  	if (!p4d)
  		return -ENOMEM;
  	do {
  		next = p4d_addr_end(addr, end);
8e2d43405   Will Deacon   lib/ioremap: ensu...
181
182
  		if (ioremap_try_huge_p4d(p4d, addr, next, phys_addr, prot))
  			continue;
c2febafc6   Kirill A. Shutemov   mm: convert gener...
183

36ddc5a78   Will Deacon   lib/ioremap: ensu...
184
  		if (ioremap_pud_range(p4d, addr, next, phys_addr, prot))
c2febafc6   Kirill A. Shutemov   mm: convert gener...
185
  			return -ENOMEM;
36ddc5a78   Will Deacon   lib/ioremap: ensu...
186
  	} while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
c2febafc6   Kirill A. Shutemov   mm: convert gener...
187
188
  	return 0;
  }
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
189
  int ioremap_page_range(unsigned long addr,
ffa71f33a   Kenji Kaneshige   x86, ioremap: Fix...
190
  		       unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
191
192
193
194
195
  {
  	pgd_t *pgd;
  	unsigned long start;
  	unsigned long next;
  	int err;
b39ab98e2   Linus Torvalds   Mark 'ioremap_pag...
196
  	might_sleep();
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
197
  	BUG_ON(addr >= end);
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
198
  	start = addr;
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
199
200
201
  	pgd = pgd_offset_k(addr);
  	do {
  		next = pgd_addr_end(addr, end);
36ddc5a78   Will Deacon   lib/ioremap: ensu...
202
  		err = ioremap_p4d_range(pgd, addr, next, phys_addr, prot);
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
203
204
  		if (err)
  			break;
36ddc5a78   Will Deacon   lib/ioremap: ensu...
205
  	} while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
206

db71daaba   Haavard Skinnemoen   [PATCH] Generic i...
207
  	flush_cache_vmap(start, end);
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
208
209
210
  
  	return err;
  }
3dcec0005   Jan Kiszka   mm: Re-export ior...
211
  EXPORT_SYMBOL_GPL(ioremap_page_range);