Blame view

lib/ioremap.c 4.22 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  // SPDX-License-Identifier: GPL-2.0
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
2
3
4
5
6
7
8
  /*
   * Re-map IO memory to kernel address space so that we can access it.
   * This is needed for high PCI addresses that aren't mapped in the
   * 640k-1MB IO memory area on PC's
   *
   * (C) Copyright 1995 1996 Linus Torvalds
   */
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
9
10
  #include <linux/vmalloc.h>
  #include <linux/mm.h>
e8edc6e03   Alexey Dobriyan   Detach sched.h fr...
11
  #include <linux/sched.h>
53fa66452   Adrian Bunk   lib/ioremap.c sho...
12
  #include <linux/io.h>
8bc3bcc93   Paul Gortmaker   lib: reduce the u...
13
  #include <linux/export.h>
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
14
15
  #include <asm/cacheflush.h>
  #include <asm/pgtable.h>
0ddab1d2e   Toshi Kani   lib/ioremap.c: ad...
16
  #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
c2febafc6   Kirill A. Shutemov   mm: convert gener...
17
  static int __read_mostly ioremap_p4d_capable;
6b6378355   Toshi Kani   x86, mm: support ...
18
19
20
  static int __read_mostly ioremap_pud_capable;
  static int __read_mostly ioremap_pmd_capable;
  static int __read_mostly ioremap_huge_disabled;
0ddab1d2e   Toshi Kani   lib/ioremap.c: ad...
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
  
  static int __init set_nohugeiomap(char *str)
  {
  	ioremap_huge_disabled = 1;
  	return 0;
  }
  early_param("nohugeiomap", set_nohugeiomap);
  
  void __init ioremap_huge_init(void)
  {
  	if (!ioremap_huge_disabled) {
  		if (arch_ioremap_pud_supported())
  			ioremap_pud_capable = 1;
  		if (arch_ioremap_pmd_supported())
  			ioremap_pmd_capable = 1;
  	}
  }
c2febafc6   Kirill A. Shutemov   mm: convert gener...
38
39
40
41
  static inline int ioremap_p4d_enabled(void)
  {
  	return ioremap_p4d_capable;
  }
0ddab1d2e   Toshi Kani   lib/ioremap.c: ad...
42
43
44
45
46
47
48
49
50
51
52
  static inline int ioremap_pud_enabled(void)
  {
  	return ioremap_pud_capable;
  }
  
  static inline int ioremap_pmd_enabled(void)
  {
  	return ioremap_pmd_capable;
  }
  
  #else	/* !CONFIG_HAVE_ARCH_HUGE_VMAP */
c2febafc6   Kirill A. Shutemov   mm: convert gener...
53
  static inline int ioremap_p4d_enabled(void) { return 0; }
0ddab1d2e   Toshi Kani   lib/ioremap.c: ad...
54
55
56
  static inline int ioremap_pud_enabled(void) { return 0; }
  static inline int ioremap_pmd_enabled(void) { return 0; }
  #endif	/* CONFIG_HAVE_ARCH_HUGE_VMAP */
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
57
  static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
ffa71f33a   Kenji Kaneshige   x86, ioremap: Fix...
58
  		unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
59
60
  {
  	pte_t *pte;
ffa71f33a   Kenji Kaneshige   x86, ioremap: Fix...
61
  	u64 pfn;
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
62
63
64
65
66
67
68
69
70
71
72
73
74
75
  
  	pfn = phys_addr >> PAGE_SHIFT;
  	pte = pte_alloc_kernel(pmd, addr);
  	if (!pte)
  		return -ENOMEM;
  	do {
  		BUG_ON(!pte_none(*pte));
  		set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
  		pfn++;
  	} while (pte++, addr += PAGE_SIZE, addr != end);
  	return 0;
  }
  
  static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
ffa71f33a   Kenji Kaneshige   x86, ioremap: Fix...
76
  		unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
77
78
79
80
81
82
83
84
85
86
  {
  	pmd_t *pmd;
  	unsigned long next;
  
  	phys_addr -= addr;
  	pmd = pmd_alloc(&init_mm, pud, addr);
  	if (!pmd)
  		return -ENOMEM;
  	do {
  		next = pmd_addr_end(addr, end);
e61ce6ade   Toshi Kani   mm: change iorema...
87
88
89
  
  		if (ioremap_pmd_enabled() &&
  		    ((next - addr) == PMD_SIZE) &&
b6bdb7517   Toshi Kani   mm/vmalloc: add i...
90
  		    IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
785a19f9d   Chintan Pandya   ioremap: Update p...
91
  		    pmd_free_pte_page(pmd, addr)) {
e61ce6ade   Toshi Kani   mm: change iorema...
92
93
94
  			if (pmd_set_huge(pmd, phys_addr + addr, prot))
  				continue;
  		}
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
95
96
97
98
99
  		if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, prot))
  			return -ENOMEM;
  	} while (pmd++, addr = next, addr != end);
  	return 0;
  }
c2febafc6   Kirill A. Shutemov   mm: convert gener...
100
  static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
ffa71f33a   Kenji Kaneshige   x86, ioremap: Fix...
101
  		unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
102
103
104
105
106
  {
  	pud_t *pud;
  	unsigned long next;
  
  	phys_addr -= addr;
c2febafc6   Kirill A. Shutemov   mm: convert gener...
107
  	pud = pud_alloc(&init_mm, p4d, addr);
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
108
109
110
111
  	if (!pud)
  		return -ENOMEM;
  	do {
  		next = pud_addr_end(addr, end);
e61ce6ade   Toshi Kani   mm: change iorema...
112
113
114
  
  		if (ioremap_pud_enabled() &&
  		    ((next - addr) == PUD_SIZE) &&
b6bdb7517   Toshi Kani   mm/vmalloc: add i...
115
  		    IS_ALIGNED(phys_addr + addr, PUD_SIZE) &&
785a19f9d   Chintan Pandya   ioremap: Update p...
116
  		    pud_free_pmd_page(pud, addr)) {
e61ce6ade   Toshi Kani   mm: change iorema...
117
118
119
  			if (pud_set_huge(pud, phys_addr + addr, prot))
  				continue;
  		}
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
120
121
122
123
124
  		if (ioremap_pmd_range(pud, addr, next, phys_addr + addr, prot))
  			return -ENOMEM;
  	} while (pud++, addr = next, addr != end);
  	return 0;
  }
c2febafc6   Kirill A. Shutemov   mm: convert gener...
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
  static inline int ioremap_p4d_range(pgd_t *pgd, unsigned long addr,
  		unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
  {
  	p4d_t *p4d;
  	unsigned long next;
  
  	phys_addr -= addr;
  	p4d = p4d_alloc(&init_mm, pgd, addr);
  	if (!p4d)
  		return -ENOMEM;
  	do {
  		next = p4d_addr_end(addr, end);
  
  		if (ioremap_p4d_enabled() &&
  		    ((next - addr) == P4D_SIZE) &&
  		    IS_ALIGNED(phys_addr + addr, P4D_SIZE)) {
  			if (p4d_set_huge(p4d, phys_addr + addr, prot))
  				continue;
  		}
  
  		if (ioremap_pud_range(p4d, addr, next, phys_addr + addr, prot))
  			return -ENOMEM;
  	} while (p4d++, addr = next, addr != end);
  	return 0;
  }
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
150
  int ioremap_page_range(unsigned long addr,
ffa71f33a   Kenji Kaneshige   x86, ioremap: Fix...
151
  		       unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
152
153
154
155
156
  {
  	pgd_t *pgd;
  	unsigned long start;
  	unsigned long next;
  	int err;
b39ab98e2   Linus Torvalds   Mark 'ioremap_pag...
157
  	might_sleep();
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
158
  	BUG_ON(addr >= end);
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
159
160
161
162
163
  	start = addr;
  	phys_addr -= addr;
  	pgd = pgd_offset_k(addr);
  	do {
  		next = pgd_addr_end(addr, end);
c2febafc6   Kirill A. Shutemov   mm: convert gener...
164
  		err = ioremap_p4d_range(pgd, addr, next, phys_addr+addr, prot);
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
165
166
167
  		if (err)
  			break;
  	} while (pgd++, addr = next, addr != end);
db71daaba   Haavard Skinnemoen   [PATCH] Generic i...
168
  	flush_cache_vmap(start, end);
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
169
170
171
  
  	return err;
  }