Blame view

kernel/iomem.c 4.69 KB
5981690dd   Dan Williams   memremap: split d...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
  /* SPDX-License-Identifier: GPL-2.0 */
  #include <linux/device.h>
  #include <linux/types.h>
  #include <linux/io.h>
  #include <linux/mm.h>
  
  #ifndef ioremap_cache
  /* temporary while we convert existing ioremap_cache users to memremap */
  __weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size)
  {
  	return ioremap(offset, size);
  }
  #endif
  
  #ifndef arch_memremap_wb
  static void *arch_memremap_wb(resource_size_t offset, unsigned long size)
  {
  	return (__force void *)ioremap_cache(offset, size);
  }
  #endif
  
  #ifndef arch_memremap_can_ram_remap
  static bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
  					unsigned long flags)
  {
  	return true;
  }
  #endif
  
  static void *try_ram_remap(resource_size_t offset, size_t size,
  			   unsigned long flags)
  {
  	unsigned long pfn = PHYS_PFN(offset);
  
  	/* In the simple case just return the existing linear address */
  	if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn)) &&
  	    arch_memremap_can_ram_remap(offset, size, flags))
  		return __va(offset);
  
  	return NULL; /* fallback to arch_memremap_wb */
  }
  
  /**
   * memremap() - remap an iomem_resource as cacheable memory
   * @offset: iomem resource start address
   * @size: size of remap
   * @flags: any of MEMREMAP_WB, MEMREMAP_WT, MEMREMAP_WC,
   *		  MEMREMAP_ENC, MEMREMAP_DEC
   *
   * memremap() is "ioremap" for cases where it is known that the resource
   * being mapped does not have i/o side effects and the __iomem
   * annotation is not applicable. In the case of multiple flags, the different
   * mapping types will be attempted in the order listed below until one of
   * them succeeds.
   *
   * MEMREMAP_WB - matches the default mapping for System RAM on
   * the architecture.  This is usually a read-allocate write-back cache.
f6c6010a0   Wei Yang   mm/resource: Use ...
58
   * Moreover, if MEMREMAP_WB is specified and the requested remap region is RAM
5981690dd   Dan Williams   memremap: split d...
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
   * memremap() will bypass establishing a new mapping and instead return
   * a pointer into the direct map.
   *
   * MEMREMAP_WT - establish a mapping whereby writes either bypass the
   * cache or are written through to memory and never exist in a
   * cache-dirty state with respect to program visibility.  Attempts to
   * map System RAM with this mapping type will fail.
   *
   * MEMREMAP_WC - establish a writecombine mapping, whereby writes may
   * be coalesced together (e.g. in the CPU's write buffers), but is otherwise
   * uncached. Attempts to map System RAM with this mapping type will fail.
   */
  void *memremap(resource_size_t offset, size_t size, unsigned long flags)
  {
  	int is_ram = region_intersects(offset, size,
  				       IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
  	void *addr = NULL;
  
  	if (!flags)
  		return NULL;
  
  	if (is_ram == REGION_MIXED) {
  		WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx
  ",
  				&offset, (unsigned long) size);
  		return NULL;
  	}
  
  	/* Try all mapping types requested until one returns non-NULL */
  	if (flags & MEMREMAP_WB) {
  		/*
f6c6010a0   Wei Yang   mm/resource: Use ...
90
  		 * MEMREMAP_WB is special in that it can be satisfied
5981690dd   Dan Williams   memremap: split d...
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
  		 * from the direct map.  Some archs depend on the
  		 * capability of memremap() to autodetect cases where
  		 * the requested range is potentially in System RAM.
  		 */
  		if (is_ram == REGION_INTERSECTS)
  			addr = try_ram_remap(offset, size, flags);
  		if (!addr)
  			addr = arch_memremap_wb(offset, size);
  	}
  
  	/*
  	 * If we don't have a mapping yet and other request flags are
  	 * present then we will be attempting to establish a new virtual
  	 * address mapping.  Enforce that this mapping is not aliasing
  	 * System RAM.
  	 */
  	if (!addr && is_ram == REGION_INTERSECTS && flags != MEMREMAP_WB) {
  		WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx
  ",
  				&offset, (unsigned long) size);
  		return NULL;
  	}
  
  	if (!addr && (flags & MEMREMAP_WT))
  		addr = ioremap_wt(offset, size);
  
  	if (!addr && (flags & MEMREMAP_WC))
  		addr = ioremap_wc(offset, size);
  
  	return addr;
  }
  EXPORT_SYMBOL(memremap);
  
  void memunmap(void *addr)
  {
9bd3bb670   Aneesh Kumar K.V   mm/nvdimm: add is...
126
  	if (is_ioremap_addr(addr))
5981690dd   Dan Williams   memremap: split d...
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
  		iounmap((void __iomem *) addr);
  }
  EXPORT_SYMBOL(memunmap);
  
  static void devm_memremap_release(struct device *dev, void *res)
  {
  	memunmap(*(void **)res);
  }
  
  static int devm_memremap_match(struct device *dev, void *res, void *match_data)
  {
  	return *(void **)res == match_data;
  }
  
  void *devm_memremap(struct device *dev, resource_size_t offset,
  		size_t size, unsigned long flags)
  {
  	void **ptr, *addr;
  
  	ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL,
  			dev_to_node(dev));
  	if (!ptr)
  		return ERR_PTR(-ENOMEM);
  
  	addr = memremap(offset, size, flags);
  	if (addr) {
  		*ptr = addr;
  		devres_add(dev, ptr);
  	} else {
  		devres_free(ptr);
  		return ERR_PTR(-ENXIO);
  	}
  
  	return addr;
  }
  EXPORT_SYMBOL(devm_memremap);
  
  void devm_memunmap(struct device *dev, void *addr)
  {
  	WARN_ON(devres_release(dev, devm_memremap_release,
  				devm_memremap_match, addr));
  }
  EXPORT_SYMBOL(devm_memunmap);