Blame view

lib/ioremap.c 3.35 KB
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
1
2
3
4
5
6
7
  /*
   * Re-map IO memory to kernel address space so that we can access it.
   * This is needed for high PCI addresses that aren't mapped in the
   * 640k-1MB IO memory area on PC's
   *
   * (C) Copyright 1995 1996 Linus Torvalds
   */
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
8
9
  #include <linux/vmalloc.h>
  #include <linux/mm.h>
e8edc6e03   Alexey Dobriyan   Detach sched.h fr...
10
  #include <linux/sched.h>
53fa66452   Adrian Bunk   lib/ioremap.c sho...
11
  #include <linux/io.h>
8bc3bcc93   Paul Gortmaker   lib: reduce the u...
12
  #include <linux/export.h>
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
13
14
  #include <asm/cacheflush.h>
  #include <asm/pgtable.h>
0ddab1d2e   Toshi Kani   lib/ioremap.c: ad...
15
  #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
6b6378355   Toshi Kani   x86, mm: support ...
16
17
18
  static int __read_mostly ioremap_pud_capable;
  static int __read_mostly ioremap_pmd_capable;
  static int __read_mostly ioremap_huge_disabled;
0ddab1d2e   Toshi Kani   lib/ioremap.c: ad...
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
  
  static int __init set_nohugeiomap(char *str)
  {
  	ioremap_huge_disabled = 1;
  	return 0;
  }
  early_param("nohugeiomap", set_nohugeiomap);
  
  void __init ioremap_huge_init(void)
  {
  	if (!ioremap_huge_disabled) {
  		if (arch_ioremap_pud_supported())
  			ioremap_pud_capable = 1;
  		if (arch_ioremap_pmd_supported())
  			ioremap_pmd_capable = 1;
  	}
  }
  
  static inline int ioremap_pud_enabled(void)
  {
  	return ioremap_pud_capable;
  }
  
  static inline int ioremap_pmd_enabled(void)
  {
  	return ioremap_pmd_capable;
  }
  
  #else	/* !CONFIG_HAVE_ARCH_HUGE_VMAP */
  static inline int ioremap_pud_enabled(void) { return 0; }
  static inline int ioremap_pmd_enabled(void) { return 0; }
  #endif	/* CONFIG_HAVE_ARCH_HUGE_VMAP */
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
51
  static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
ffa71f33a   Kenji Kaneshige   x86, ioremap: Fix...
52
  		unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
53
54
  {
  	pte_t *pte;
ffa71f33a   Kenji Kaneshige   x86, ioremap: Fix...
55
  	u64 pfn;
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
56
57
58
59
60
61
62
63
64
65
66
67
68
69
  
  	pfn = phys_addr >> PAGE_SHIFT;
  	pte = pte_alloc_kernel(pmd, addr);
  	if (!pte)
  		return -ENOMEM;
  	do {
  		BUG_ON(!pte_none(*pte));
  		set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
  		pfn++;
  	} while (pte++, addr += PAGE_SIZE, addr != end);
  	return 0;
  }
  
  static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
ffa71f33a   Kenji Kaneshige   x86, ioremap: Fix...
70
  		unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
71
72
73
74
75
76
77
78
79
80
  {
  	pmd_t *pmd;
  	unsigned long next;
  
  	phys_addr -= addr;
  	pmd = pmd_alloc(&init_mm, pud, addr);
  	if (!pmd)
  		return -ENOMEM;
  	do {
  		next = pmd_addr_end(addr, end);
e61ce6ade   Toshi Kani   mm: change iorema...
81
82
83
84
85
86
87
  
  		if (ioremap_pmd_enabled() &&
  		    ((next - addr) == PMD_SIZE) &&
  		    IS_ALIGNED(phys_addr + addr, PMD_SIZE)) {
  			if (pmd_set_huge(pmd, phys_addr + addr, prot))
  				continue;
  		}
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
88
89
90
91
92
93
94
  		if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, prot))
  			return -ENOMEM;
  	} while (pmd++, addr = next, addr != end);
  	return 0;
  }
  
  static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
ffa71f33a   Kenji Kaneshige   x86, ioremap: Fix...
95
  		unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
96
97
98
99
100
101
102
103
104
105
  {
  	pud_t *pud;
  	unsigned long next;
  
  	phys_addr -= addr;
  	pud = pud_alloc(&init_mm, pgd, addr);
  	if (!pud)
  		return -ENOMEM;
  	do {
  		next = pud_addr_end(addr, end);
e61ce6ade   Toshi Kani   mm: change iorema...
106
107
108
109
110
111
112
  
  		if (ioremap_pud_enabled() &&
  		    ((next - addr) == PUD_SIZE) &&
  		    IS_ALIGNED(phys_addr + addr, PUD_SIZE)) {
  			if (pud_set_huge(pud, phys_addr + addr, prot))
  				continue;
  		}
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
113
114
115
116
117
118
119
  		if (ioremap_pmd_range(pud, addr, next, phys_addr + addr, prot))
  			return -ENOMEM;
  	} while (pud++, addr = next, addr != end);
  	return 0;
  }
  
  int ioremap_page_range(unsigned long addr,
ffa71f33a   Kenji Kaneshige   x86, ioremap: Fix...
120
  		       unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
121
122
123
124
125
126
127
  {
  	pgd_t *pgd;
  	unsigned long start;
  	unsigned long next;
  	int err;
  
  	BUG_ON(addr >= end);
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
128
129
130
131
132
133
134
135
136
  	start = addr;
  	phys_addr -= addr;
  	pgd = pgd_offset_k(addr);
  	do {
  		next = pgd_addr_end(addr, end);
  		err = ioremap_pud_range(pgd, addr, next, phys_addr+addr, prot);
  		if (err)
  			break;
  	} while (pgd++, addr = next, addr != end);
db71daaba   Haavard Skinnemoen   [PATCH] Generic i...
137
  	flush_cache_vmap(start, end);
74588d8ba   Haavard Skinnemoen   [PATCH] Generic i...
138
139
140
  
  	return err;
  }
81e88fdc4   Huang Ying   ACPI, APEI, Gener...
141
  EXPORT_SYMBOL_GPL(ioremap_page_range);