Commit d57d64080ddc0ff13fcffc898b6251074a482ba1
1 parent
af1415314a
Exists in
master
and in
7 other branches
sh: Prevent 64-bit pgprot clobbering across ioremap implementations.
Presently 'flags' gets passed around a lot between the various ioremap helpers and implementations, which is only 32-bits. In the X2TLB case we use 64-bit pgprots which presently results in the upper 32bits being chopped off (which handily include our read/write/exec permissions). As such, we convert everything internally to using pgprot_t directly and simply convert over with pgprot_val() where needed. With this in place, transparent fixmap utilization for early ioremap works as expected. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Showing 6 changed files with 41 additions and 33 deletions Inline Diff
arch/sh/boards/board-sh7785lcr.c
1 | /* | 1 | /* |
2 | * Renesas Technology Corp. R0P7785LC0011RL Support. | 2 | * Renesas Technology Corp. R0P7785LC0011RL Support. |
3 | * | 3 | * |
4 | * Copyright (C) 2008 Yoshihiro Shimoda | 4 | * Copyright (C) 2008 Yoshihiro Shimoda |
5 | * Copyright (C) 2009 Paul Mundt | 5 | * Copyright (C) 2009 Paul Mundt |
6 | * | 6 | * |
7 | * This file is subject to the terms and conditions of the GNU General Public | 7 | * This file is subject to the terms and conditions of the GNU General Public |
8 | * License. See the file "COPYING" in the main directory of this archive | 8 | * License. See the file "COPYING" in the main directory of this archive |
9 | * for more details. | 9 | * for more details. |
10 | */ | 10 | */ |
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/platform_device.h> | 12 | #include <linux/platform_device.h> |
13 | #include <linux/sm501.h> | 13 | #include <linux/sm501.h> |
14 | #include <linux/sm501-regs.h> | 14 | #include <linux/sm501-regs.h> |
15 | #include <linux/fb.h> | 15 | #include <linux/fb.h> |
16 | #include <linux/mtd/physmap.h> | 16 | #include <linux/mtd/physmap.h> |
17 | #include <linux/delay.h> | 17 | #include <linux/delay.h> |
18 | #include <linux/interrupt.h> | 18 | #include <linux/interrupt.h> |
19 | #include <linux/i2c.h> | 19 | #include <linux/i2c.h> |
20 | #include <linux/i2c-pca-platform.h> | 20 | #include <linux/i2c-pca-platform.h> |
21 | #include <linux/i2c-algo-pca.h> | 21 | #include <linux/i2c-algo-pca.h> |
22 | #include <linux/usb/r8a66597.h> | 22 | #include <linux/usb/r8a66597.h> |
23 | #include <linux/irq.h> | 23 | #include <linux/irq.h> |
24 | #include <linux/io.h> | ||
24 | #include <linux/clk.h> | 25 | #include <linux/clk.h> |
25 | #include <linux/errno.h> | 26 | #include <linux/errno.h> |
26 | #include <mach/sh7785lcr.h> | 27 | #include <mach/sh7785lcr.h> |
27 | #include <cpu/sh7785.h> | 28 | #include <cpu/sh7785.h> |
28 | #include <asm/heartbeat.h> | 29 | #include <asm/heartbeat.h> |
29 | #include <asm/clock.h> | 30 | #include <asm/clock.h> |
30 | 31 | ||
31 | /* | 32 | /* |
32 | * NOTE: This board has 2 physical memory maps. | 33 | * NOTE: This board has 2 physical memory maps. |
33 | * Please look at include/asm-sh/sh7785lcr.h or hardware manual. | 34 | * Please look at include/asm-sh/sh7785lcr.h or hardware manual. |
34 | */ | 35 | */ |
35 | static struct resource heartbeat_resource = { | 36 | static struct resource heartbeat_resource = { |
36 | .start = PLD_LEDCR, | 37 | .start = PLD_LEDCR, |
37 | .end = PLD_LEDCR, | 38 | .end = PLD_LEDCR, |
38 | .flags = IORESOURCE_MEM | IORESOURCE_MEM_8BIT, | 39 | .flags = IORESOURCE_MEM | IORESOURCE_MEM_8BIT, |
39 | }; | 40 | }; |
40 | 41 | ||
41 | static struct platform_device heartbeat_device = { | 42 | static struct platform_device heartbeat_device = { |
42 | .name = "heartbeat", | 43 | .name = "heartbeat", |
43 | .id = -1, | 44 | .id = -1, |
44 | .num_resources = 1, | 45 | .num_resources = 1, |
45 | .resource = &heartbeat_resource, | 46 | .resource = &heartbeat_resource, |
46 | }; | 47 | }; |
47 | 48 | ||
48 | static struct mtd_partition nor_flash_partitions[] = { | 49 | static struct mtd_partition nor_flash_partitions[] = { |
49 | { | 50 | { |
50 | .name = "loader", | 51 | .name = "loader", |
51 | .offset = 0x00000000, | 52 | .offset = 0x00000000, |
52 | .size = 512 * 1024, | 53 | .size = 512 * 1024, |
53 | }, | 54 | }, |
54 | { | 55 | { |
55 | .name = "bootenv", | 56 | .name = "bootenv", |
56 | .offset = MTDPART_OFS_APPEND, | 57 | .offset = MTDPART_OFS_APPEND, |
57 | .size = 512 * 1024, | 58 | .size = 512 * 1024, |
58 | }, | 59 | }, |
59 | { | 60 | { |
60 | .name = "kernel", | 61 | .name = "kernel", |
61 | .offset = MTDPART_OFS_APPEND, | 62 | .offset = MTDPART_OFS_APPEND, |
62 | .size = 4 * 1024 * 1024, | 63 | .size = 4 * 1024 * 1024, |
63 | }, | 64 | }, |
64 | { | 65 | { |
65 | .name = "data", | 66 | .name = "data", |
66 | .offset = MTDPART_OFS_APPEND, | 67 | .offset = MTDPART_OFS_APPEND, |
67 | .size = MTDPART_SIZ_FULL, | 68 | .size = MTDPART_SIZ_FULL, |
68 | }, | 69 | }, |
69 | }; | 70 | }; |
70 | 71 | ||
71 | static struct physmap_flash_data nor_flash_data = { | 72 | static struct physmap_flash_data nor_flash_data = { |
72 | .width = 4, | 73 | .width = 4, |
73 | .parts = nor_flash_partitions, | 74 | .parts = nor_flash_partitions, |
74 | .nr_parts = ARRAY_SIZE(nor_flash_partitions), | 75 | .nr_parts = ARRAY_SIZE(nor_flash_partitions), |
75 | }; | 76 | }; |
76 | 77 | ||
77 | static struct resource nor_flash_resources[] = { | 78 | static struct resource nor_flash_resources[] = { |
78 | [0] = { | 79 | [0] = { |
79 | .start = NOR_FLASH_ADDR, | 80 | .start = NOR_FLASH_ADDR, |
80 | .end = NOR_FLASH_ADDR + NOR_FLASH_SIZE - 1, | 81 | .end = NOR_FLASH_ADDR + NOR_FLASH_SIZE - 1, |
81 | .flags = IORESOURCE_MEM, | 82 | .flags = IORESOURCE_MEM, |
82 | } | 83 | } |
83 | }; | 84 | }; |
84 | 85 | ||
85 | static struct platform_device nor_flash_device = { | 86 | static struct platform_device nor_flash_device = { |
86 | .name = "physmap-flash", | 87 | .name = "physmap-flash", |
87 | .dev = { | 88 | .dev = { |
88 | .platform_data = &nor_flash_data, | 89 | .platform_data = &nor_flash_data, |
89 | }, | 90 | }, |
90 | .num_resources = ARRAY_SIZE(nor_flash_resources), | 91 | .num_resources = ARRAY_SIZE(nor_flash_resources), |
91 | .resource = nor_flash_resources, | 92 | .resource = nor_flash_resources, |
92 | }; | 93 | }; |
93 | 94 | ||
94 | static struct r8a66597_platdata r8a66597_data = { | 95 | static struct r8a66597_platdata r8a66597_data = { |
95 | .xtal = R8A66597_PLATDATA_XTAL_12MHZ, | 96 | .xtal = R8A66597_PLATDATA_XTAL_12MHZ, |
96 | .vif = 1, | 97 | .vif = 1, |
97 | }; | 98 | }; |
98 | 99 | ||
99 | static struct resource r8a66597_usb_host_resources[] = { | 100 | static struct resource r8a66597_usb_host_resources[] = { |
100 | [0] = { | 101 | [0] = { |
101 | .start = R8A66597_ADDR, | 102 | .start = R8A66597_ADDR, |
102 | .end = R8A66597_ADDR + R8A66597_SIZE - 1, | 103 | .end = R8A66597_ADDR + R8A66597_SIZE - 1, |
103 | .flags = IORESOURCE_MEM, | 104 | .flags = IORESOURCE_MEM, |
104 | }, | 105 | }, |
105 | [1] = { | 106 | [1] = { |
106 | .start = 2, | 107 | .start = 2, |
107 | .end = 2, | 108 | .end = 2, |
108 | .flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW, | 109 | .flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW, |
109 | }, | 110 | }, |
110 | }; | 111 | }; |
111 | 112 | ||
112 | static struct platform_device r8a66597_usb_host_device = { | 113 | static struct platform_device r8a66597_usb_host_device = { |
113 | .name = "r8a66597_hcd", | 114 | .name = "r8a66597_hcd", |
114 | .id = -1, | 115 | .id = -1, |
115 | .dev = { | 116 | .dev = { |
116 | .dma_mask = NULL, | 117 | .dma_mask = NULL, |
117 | .coherent_dma_mask = 0xffffffff, | 118 | .coherent_dma_mask = 0xffffffff, |
118 | .platform_data = &r8a66597_data, | 119 | .platform_data = &r8a66597_data, |
119 | }, | 120 | }, |
120 | .num_resources = ARRAY_SIZE(r8a66597_usb_host_resources), | 121 | .num_resources = ARRAY_SIZE(r8a66597_usb_host_resources), |
121 | .resource = r8a66597_usb_host_resources, | 122 | .resource = r8a66597_usb_host_resources, |
122 | }; | 123 | }; |
123 | 124 | ||
124 | static struct resource sm501_resources[] = { | 125 | static struct resource sm501_resources[] = { |
125 | [0] = { | 126 | [0] = { |
126 | .start = SM107_MEM_ADDR, | 127 | .start = SM107_MEM_ADDR, |
127 | .end = SM107_MEM_ADDR + SM107_MEM_SIZE - 1, | 128 | .end = SM107_MEM_ADDR + SM107_MEM_SIZE - 1, |
128 | .flags = IORESOURCE_MEM, | 129 | .flags = IORESOURCE_MEM, |
129 | }, | 130 | }, |
130 | [1] = { | 131 | [1] = { |
131 | .start = SM107_REG_ADDR, | 132 | .start = SM107_REG_ADDR, |
132 | .end = SM107_REG_ADDR + SM107_REG_SIZE - 1, | 133 | .end = SM107_REG_ADDR + SM107_REG_SIZE - 1, |
133 | .flags = IORESOURCE_MEM, | 134 | .flags = IORESOURCE_MEM, |
134 | }, | 135 | }, |
135 | [2] = { | 136 | [2] = { |
136 | .start = 10, | 137 | .start = 10, |
137 | .flags = IORESOURCE_IRQ, | 138 | .flags = IORESOURCE_IRQ, |
138 | }, | 139 | }, |
139 | }; | 140 | }; |
140 | 141 | ||
141 | static struct fb_videomode sm501_default_mode_crt = { | 142 | static struct fb_videomode sm501_default_mode_crt = { |
142 | .pixclock = 35714, /* 28MHz */ | 143 | .pixclock = 35714, /* 28MHz */ |
143 | .xres = 640, | 144 | .xres = 640, |
144 | .yres = 480, | 145 | .yres = 480, |
145 | .left_margin = 105, | 146 | .left_margin = 105, |
146 | .right_margin = 16, | 147 | .right_margin = 16, |
147 | .upper_margin = 33, | 148 | .upper_margin = 33, |
148 | .lower_margin = 10, | 149 | .lower_margin = 10, |
149 | .hsync_len = 39, | 150 | .hsync_len = 39, |
150 | .vsync_len = 2, | 151 | .vsync_len = 2, |
151 | .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, | 152 | .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, |
152 | }; | 153 | }; |
153 | 154 | ||
154 | static struct fb_videomode sm501_default_mode_pnl = { | 155 | static struct fb_videomode sm501_default_mode_pnl = { |
155 | .pixclock = 40000, /* 25MHz */ | 156 | .pixclock = 40000, /* 25MHz */ |
156 | .xres = 640, | 157 | .xres = 640, |
157 | .yres = 480, | 158 | .yres = 480, |
158 | .left_margin = 2, | 159 | .left_margin = 2, |
159 | .right_margin = 16, | 160 | .right_margin = 16, |
160 | .upper_margin = 33, | 161 | .upper_margin = 33, |
161 | .lower_margin = 10, | 162 | .lower_margin = 10, |
162 | .hsync_len = 39, | 163 | .hsync_len = 39, |
163 | .vsync_len = 2, | 164 | .vsync_len = 2, |
164 | .sync = 0, | 165 | .sync = 0, |
165 | }; | 166 | }; |
166 | 167 | ||
167 | static struct sm501_platdata_fbsub sm501_pdata_fbsub_pnl = { | 168 | static struct sm501_platdata_fbsub sm501_pdata_fbsub_pnl = { |
168 | .def_bpp = 16, | 169 | .def_bpp = 16, |
169 | .def_mode = &sm501_default_mode_pnl, | 170 | .def_mode = &sm501_default_mode_pnl, |
170 | .flags = SM501FB_FLAG_USE_INIT_MODE | | 171 | .flags = SM501FB_FLAG_USE_INIT_MODE | |
171 | SM501FB_FLAG_USE_HWCURSOR | | 172 | SM501FB_FLAG_USE_HWCURSOR | |
172 | SM501FB_FLAG_USE_HWACCEL | | 173 | SM501FB_FLAG_USE_HWACCEL | |
173 | SM501FB_FLAG_DISABLE_AT_EXIT | | 174 | SM501FB_FLAG_DISABLE_AT_EXIT | |
174 | SM501FB_FLAG_PANEL_NO_VBIASEN, | 175 | SM501FB_FLAG_PANEL_NO_VBIASEN, |
175 | }; | 176 | }; |
176 | 177 | ||
177 | static struct sm501_platdata_fbsub sm501_pdata_fbsub_crt = { | 178 | static struct sm501_platdata_fbsub sm501_pdata_fbsub_crt = { |
178 | .def_bpp = 16, | 179 | .def_bpp = 16, |
179 | .def_mode = &sm501_default_mode_crt, | 180 | .def_mode = &sm501_default_mode_crt, |
180 | .flags = SM501FB_FLAG_USE_INIT_MODE | | 181 | .flags = SM501FB_FLAG_USE_INIT_MODE | |
181 | SM501FB_FLAG_USE_HWCURSOR | | 182 | SM501FB_FLAG_USE_HWCURSOR | |
182 | SM501FB_FLAG_USE_HWACCEL | | 183 | SM501FB_FLAG_USE_HWACCEL | |
183 | SM501FB_FLAG_DISABLE_AT_EXIT, | 184 | SM501FB_FLAG_DISABLE_AT_EXIT, |
184 | }; | 185 | }; |
185 | 186 | ||
186 | static struct sm501_platdata_fb sm501_fb_pdata = { | 187 | static struct sm501_platdata_fb sm501_fb_pdata = { |
187 | .fb_route = SM501_FB_OWN, | 188 | .fb_route = SM501_FB_OWN, |
188 | .fb_crt = &sm501_pdata_fbsub_crt, | 189 | .fb_crt = &sm501_pdata_fbsub_crt, |
189 | .fb_pnl = &sm501_pdata_fbsub_pnl, | 190 | .fb_pnl = &sm501_pdata_fbsub_pnl, |
190 | }; | 191 | }; |
191 | 192 | ||
192 | static struct sm501_initdata sm501_initdata = { | 193 | static struct sm501_initdata sm501_initdata = { |
193 | .gpio_high = { | 194 | .gpio_high = { |
194 | .set = 0x00001fe0, | 195 | .set = 0x00001fe0, |
195 | .mask = 0x0, | 196 | .mask = 0x0, |
196 | }, | 197 | }, |
197 | .devices = 0, | 198 | .devices = 0, |
198 | .mclk = 84 * 1000000, | 199 | .mclk = 84 * 1000000, |
199 | .m1xclk = 112 * 1000000, | 200 | .m1xclk = 112 * 1000000, |
200 | }; | 201 | }; |
201 | 202 | ||
202 | static struct sm501_platdata sm501_platform_data = { | 203 | static struct sm501_platdata sm501_platform_data = { |
203 | .init = &sm501_initdata, | 204 | .init = &sm501_initdata, |
204 | .fb = &sm501_fb_pdata, | 205 | .fb = &sm501_fb_pdata, |
205 | }; | 206 | }; |
206 | 207 | ||
207 | static struct platform_device sm501_device = { | 208 | static struct platform_device sm501_device = { |
208 | .name = "sm501", | 209 | .name = "sm501", |
209 | .id = -1, | 210 | .id = -1, |
210 | .dev = { | 211 | .dev = { |
211 | .platform_data = &sm501_platform_data, | 212 | .platform_data = &sm501_platform_data, |
212 | }, | 213 | }, |
213 | .num_resources = ARRAY_SIZE(sm501_resources), | 214 | .num_resources = ARRAY_SIZE(sm501_resources), |
214 | .resource = sm501_resources, | 215 | .resource = sm501_resources, |
215 | }; | 216 | }; |
216 | 217 | ||
217 | static struct resource i2c_proto_resources[] = { | 218 | static struct resource i2c_proto_resources[] = { |
218 | [0] = { | 219 | [0] = { |
219 | .start = PCA9564_PROTO_32BIT_ADDR, | 220 | .start = PCA9564_PROTO_32BIT_ADDR, |
220 | .end = PCA9564_PROTO_32BIT_ADDR + PCA9564_SIZE - 1, | 221 | .end = PCA9564_PROTO_32BIT_ADDR + PCA9564_SIZE - 1, |
221 | .flags = IORESOURCE_MEM | IORESOURCE_MEM_8BIT, | 222 | .flags = IORESOURCE_MEM | IORESOURCE_MEM_8BIT, |
222 | }, | 223 | }, |
223 | [1] = { | 224 | [1] = { |
224 | .start = 12, | 225 | .start = 12, |
225 | .end = 12, | 226 | .end = 12, |
226 | .flags = IORESOURCE_IRQ, | 227 | .flags = IORESOURCE_IRQ, |
227 | }, | 228 | }, |
228 | }; | 229 | }; |
229 | 230 | ||
230 | static struct resource i2c_resources[] = { | 231 | static struct resource i2c_resources[] = { |
231 | [0] = { | 232 | [0] = { |
232 | .start = PCA9564_ADDR, | 233 | .start = PCA9564_ADDR, |
233 | .end = PCA9564_ADDR + PCA9564_SIZE - 1, | 234 | .end = PCA9564_ADDR + PCA9564_SIZE - 1, |
234 | .flags = IORESOURCE_MEM | IORESOURCE_MEM_8BIT, | 235 | .flags = IORESOURCE_MEM | IORESOURCE_MEM_8BIT, |
235 | }, | 236 | }, |
236 | [1] = { | 237 | [1] = { |
237 | .start = 12, | 238 | .start = 12, |
238 | .end = 12, | 239 | .end = 12, |
239 | .flags = IORESOURCE_IRQ, | 240 | .flags = IORESOURCE_IRQ, |
240 | }, | 241 | }, |
241 | }; | 242 | }; |
242 | 243 | ||
243 | static struct i2c_pca9564_pf_platform_data i2c_platform_data = { | 244 | static struct i2c_pca9564_pf_platform_data i2c_platform_data = { |
244 | .gpio = 0, | 245 | .gpio = 0, |
245 | .i2c_clock_speed = I2C_PCA_CON_330kHz, | 246 | .i2c_clock_speed = I2C_PCA_CON_330kHz, |
246 | .timeout = HZ, | 247 | .timeout = HZ, |
247 | }; | 248 | }; |
248 | 249 | ||
249 | static struct platform_device i2c_device = { | 250 | static struct platform_device i2c_device = { |
250 | .name = "i2c-pca-platform", | 251 | .name = "i2c-pca-platform", |
251 | .id = -1, | 252 | .id = -1, |
252 | .dev = { | 253 | .dev = { |
253 | .platform_data = &i2c_platform_data, | 254 | .platform_data = &i2c_platform_data, |
254 | }, | 255 | }, |
255 | .num_resources = ARRAY_SIZE(i2c_resources), | 256 | .num_resources = ARRAY_SIZE(i2c_resources), |
256 | .resource = i2c_resources, | 257 | .resource = i2c_resources, |
257 | }; | 258 | }; |
258 | 259 | ||
259 | static struct platform_device *sh7785lcr_devices[] __initdata = { | 260 | static struct platform_device *sh7785lcr_devices[] __initdata = { |
260 | &heartbeat_device, | 261 | &heartbeat_device, |
261 | &nor_flash_device, | 262 | &nor_flash_device, |
262 | &r8a66597_usb_host_device, | 263 | &r8a66597_usb_host_device, |
263 | &sm501_device, | 264 | &sm501_device, |
264 | &i2c_device, | 265 | &i2c_device, |
265 | }; | 266 | }; |
266 | 267 | ||
267 | static struct i2c_board_info __initdata sh7785lcr_i2c_devices[] = { | 268 | static struct i2c_board_info __initdata sh7785lcr_i2c_devices[] = { |
268 | { | 269 | { |
269 | I2C_BOARD_INFO("r2025sd", 0x32), | 270 | I2C_BOARD_INFO("r2025sd", 0x32), |
270 | }, | 271 | }, |
271 | }; | 272 | }; |
272 | 273 | ||
273 | static int __init sh7785lcr_devices_setup(void) | 274 | static int __init sh7785lcr_devices_setup(void) |
274 | { | 275 | { |
275 | i2c_register_board_info(0, sh7785lcr_i2c_devices, | 276 | i2c_register_board_info(0, sh7785lcr_i2c_devices, |
276 | ARRAY_SIZE(sh7785lcr_i2c_devices)); | 277 | ARRAY_SIZE(sh7785lcr_i2c_devices)); |
277 | 278 | ||
278 | if (mach_is_sh7785lcr_pt()) { | 279 | if (mach_is_sh7785lcr_pt()) { |
279 | i2c_device.resource = i2c_proto_resources; | 280 | i2c_device.resource = i2c_proto_resources; |
280 | i2c_device.num_resources = ARRAY_SIZE(i2c_proto_resources); | 281 | i2c_device.num_resources = ARRAY_SIZE(i2c_proto_resources); |
281 | } | 282 | } |
282 | 283 | ||
283 | return platform_add_devices(sh7785lcr_devices, | 284 | return platform_add_devices(sh7785lcr_devices, |
284 | ARRAY_SIZE(sh7785lcr_devices)); | 285 | ARRAY_SIZE(sh7785lcr_devices)); |
285 | } | 286 | } |
286 | __initcall(sh7785lcr_devices_setup); | 287 | __initcall(sh7785lcr_devices_setup); |
287 | 288 | ||
288 | /* Initialize IRQ setting */ | 289 | /* Initialize IRQ setting */ |
289 | void __init init_sh7785lcr_IRQ(void) | 290 | void __init init_sh7785lcr_IRQ(void) |
290 | { | 291 | { |
291 | plat_irq_setup_pins(IRQ_MODE_IRQ7654); | 292 | plat_irq_setup_pins(IRQ_MODE_IRQ7654); |
292 | plat_irq_setup_pins(IRQ_MODE_IRQ3210); | 293 | plat_irq_setup_pins(IRQ_MODE_IRQ3210); |
293 | } | 294 | } |
294 | 295 | ||
295 | static int sh7785lcr_clk_init(void) | 296 | static int sh7785lcr_clk_init(void) |
296 | { | 297 | { |
297 | struct clk *clk; | 298 | struct clk *clk; |
298 | int ret; | 299 | int ret; |
299 | 300 | ||
300 | clk = clk_get(NULL, "extal"); | 301 | clk = clk_get(NULL, "extal"); |
301 | if (!clk || IS_ERR(clk)) | 302 | if (!clk || IS_ERR(clk)) |
302 | return PTR_ERR(clk); | 303 | return PTR_ERR(clk); |
303 | ret = clk_set_rate(clk, 33333333); | 304 | ret = clk_set_rate(clk, 33333333); |
304 | clk_put(clk); | 305 | clk_put(clk); |
305 | 306 | ||
306 | return ret; | 307 | return ret; |
307 | } | 308 | } |
308 | 309 | ||
309 | static void sh7785lcr_power_off(void) | 310 | static void sh7785lcr_power_off(void) |
310 | { | 311 | { |
311 | unsigned char *p; | 312 | unsigned char *p; |
312 | 313 | ||
313 | p = ioremap(PLD_POFCR, PLD_POFCR + 1); | 314 | p = ioremap(PLD_POFCR, PLD_POFCR + 1); |
314 | if (!p) { | 315 | if (!p) { |
315 | printk(KERN_ERR "%s: ioremap error.\n", __func__); | 316 | printk(KERN_ERR "%s: ioremap error.\n", __func__); |
316 | return; | 317 | return; |
317 | } | 318 | } |
318 | *p = 0x01; | 319 | *p = 0x01; |
319 | iounmap(p); | 320 | iounmap(p); |
320 | set_bl_bit(); | 321 | set_bl_bit(); |
321 | while (1) | 322 | while (1) |
322 | cpu_relax(); | 323 | cpu_relax(); |
323 | } | 324 | } |
324 | 325 | ||
325 | /* Initialize the board */ | 326 | /* Initialize the board */ |
326 | static void __init sh7785lcr_setup(char **cmdline_p) | 327 | static void __init sh7785lcr_setup(char **cmdline_p) |
327 | { | 328 | { |
328 | void __iomem *sm501_reg; | 329 | void __iomem *sm501_reg; |
329 | 330 | ||
330 | printk(KERN_INFO "Renesas Technology Corp. R0P7785LC0011RL support.\n"); | 331 | printk(KERN_INFO "Renesas Technology Corp. R0P7785LC0011RL support.\n"); |
331 | 332 | ||
332 | pm_power_off = sh7785lcr_power_off; | 333 | pm_power_off = sh7785lcr_power_off; |
333 | 334 | ||
334 | /* sm501 DRAM configuration */ | 335 | /* sm501 DRAM configuration */ |
335 | sm501_reg = ioremap_fixed(SM107_REG_ADDR, SM501_DRAM_CONTROL, | 336 | sm501_reg = ioremap_nocache(SM107_REG_ADDR, SM501_DRAM_CONTROL); |
336 | PAGE_KERNEL); | ||
337 | if (!sm501_reg) { | 337 | if (!sm501_reg) { |
338 | printk(KERN_ERR "%s: ioremap error.\n", __func__); | 338 | printk(KERN_ERR "%s: ioremap error.\n", __func__); |
339 | return; | 339 | return; |
340 | } | 340 | } |
341 | 341 | ||
342 | writel(0x000307c2, sm501_reg + SM501_DRAM_CONTROL); | 342 | writel(0x000307c2, sm501_reg + SM501_DRAM_CONTROL); |
343 | iounmap_fixed(sm501_reg); | 343 | iounmap(sm501_reg); |
344 | } | 344 | } |
345 | 345 | ||
346 | /* Return the board specific boot mode pin configuration */ | 346 | /* Return the board specific boot mode pin configuration */ |
347 | static int sh7785lcr_mode_pins(void) | 347 | static int sh7785lcr_mode_pins(void) |
348 | { | 348 | { |
349 | int value = 0; | 349 | int value = 0; |
350 | 350 | ||
351 | /* These are the factory default settings of S1 and S2. | 351 | /* These are the factory default settings of S1 and S2. |
352 | * If you change these dip switches then you will need to | 352 | * If you change these dip switches then you will need to |
353 | * adjust the values below as well. | 353 | * adjust the values below as well. |
354 | */ | 354 | */ |
355 | value |= MODE_PIN4; /* Clock Mode 16 */ | 355 | value |= MODE_PIN4; /* Clock Mode 16 */ |
356 | value |= MODE_PIN5; /* 32-bit Area0 bus width */ | 356 | value |= MODE_PIN5; /* 32-bit Area0 bus width */ |
357 | value |= MODE_PIN6; /* 32-bit Area0 bus width */ | 357 | value |= MODE_PIN6; /* 32-bit Area0 bus width */ |
358 | value |= MODE_PIN7; /* Area 0 SRAM interface [fixed] */ | 358 | value |= MODE_PIN7; /* Area 0 SRAM interface [fixed] */ |
359 | value |= MODE_PIN8; /* Little Endian */ | 359 | value |= MODE_PIN8; /* Little Endian */ |
360 | value |= MODE_PIN9; /* Master Mode */ | 360 | value |= MODE_PIN9; /* Master Mode */ |
361 | value |= MODE_PIN14; /* No PLL step-up */ | 361 | value |= MODE_PIN14; /* No PLL step-up */ |
362 | 362 | ||
363 | return value; | 363 | return value; |
364 | } | 364 | } |
365 | 365 | ||
366 | /* | 366 | /* |
367 | * The Machine Vector | 367 | * The Machine Vector |
368 | */ | 368 | */ |
369 | static struct sh_machine_vector mv_sh7785lcr __initmv = { | 369 | static struct sh_machine_vector mv_sh7785lcr __initmv = { |
370 | .mv_name = "SH7785LCR", | 370 | .mv_name = "SH7785LCR", |
371 | .mv_setup = sh7785lcr_setup, | 371 | .mv_setup = sh7785lcr_setup, |
372 | .mv_clk_init = sh7785lcr_clk_init, | 372 | .mv_clk_init = sh7785lcr_clk_init, |
373 | .mv_init_irq = init_sh7785lcr_IRQ, | 373 | .mv_init_irq = init_sh7785lcr_IRQ, |
374 | .mv_mode_pins = sh7785lcr_mode_pins, | 374 | .mv_mode_pins = sh7785lcr_mode_pins, |
375 | }; | 375 | }; |
376 | 376 |
arch/sh/boards/mach-landisk/setup.c
1 | /* | 1 | /* |
2 | * arch/sh/boards/landisk/setup.c | 2 | * arch/sh/boards/landisk/setup.c |
3 | * | 3 | * |
4 | * I-O DATA Device, Inc. LANDISK Support. | 4 | * I-O DATA Device, Inc. LANDISK Support. |
5 | * | 5 | * |
6 | * Copyright (C) 2000 Kazumoto Kojima | 6 | * Copyright (C) 2000 Kazumoto Kojima |
7 | * Copyright (C) 2002 Paul Mundt | 7 | * Copyright (C) 2002 Paul Mundt |
8 | * Copylight (C) 2002 Atom Create Engineering Co., Ltd. | 8 | * Copylight (C) 2002 Atom Create Engineering Co., Ltd. |
9 | * Copyright (C) 2005-2007 kogiidena | 9 | * Copyright (C) 2005-2007 kogiidena |
10 | * | 10 | * |
11 | * This file is subject to the terms and conditions of the GNU General Public | 11 | * This file is subject to the terms and conditions of the GNU General Public |
12 | * License. See the file "COPYING" in the main directory of this archive | 12 | * License. See the file "COPYING" in the main directory of this archive |
13 | * for more details. | 13 | * for more details. |
14 | */ | 14 | */ |
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/platform_device.h> | 16 | #include <linux/platform_device.h> |
17 | #include <linux/ata_platform.h> | 17 | #include <linux/ata_platform.h> |
18 | #include <linux/pm.h> | 18 | #include <linux/pm.h> |
19 | #include <linux/mm.h> | 19 | #include <linux/mm.h> |
20 | #include <asm/machvec.h> | 20 | #include <asm/machvec.h> |
21 | #include <mach-landisk/mach/iodata_landisk.h> | 21 | #include <mach-landisk/mach/iodata_landisk.h> |
22 | #include <asm/io.h> | 22 | #include <asm/io.h> |
23 | 23 | ||
24 | void init_landisk_IRQ(void); | 24 | void init_landisk_IRQ(void); |
25 | 25 | ||
26 | static void landisk_power_off(void) | 26 | static void landisk_power_off(void) |
27 | { | 27 | { |
28 | ctrl_outb(0x01, PA_SHUTDOWN); | 28 | ctrl_outb(0x01, PA_SHUTDOWN); |
29 | } | 29 | } |
30 | 30 | ||
31 | static struct resource cf_ide_resources[3]; | 31 | static struct resource cf_ide_resources[3]; |
32 | 32 | ||
33 | static struct pata_platform_info pata_info = { | 33 | static struct pata_platform_info pata_info = { |
34 | .ioport_shift = 1, | 34 | .ioport_shift = 1, |
35 | }; | 35 | }; |
36 | 36 | ||
37 | static struct platform_device cf_ide_device = { | 37 | static struct platform_device cf_ide_device = { |
38 | .name = "pata_platform", | 38 | .name = "pata_platform", |
39 | .id = -1, | 39 | .id = -1, |
40 | .num_resources = ARRAY_SIZE(cf_ide_resources), | 40 | .num_resources = ARRAY_SIZE(cf_ide_resources), |
41 | .resource = cf_ide_resources, | 41 | .resource = cf_ide_resources, |
42 | .dev = { | 42 | .dev = { |
43 | .platform_data = &pata_info, | 43 | .platform_data = &pata_info, |
44 | }, | 44 | }, |
45 | }; | 45 | }; |
46 | 46 | ||
47 | static struct platform_device rtc_device = { | 47 | static struct platform_device rtc_device = { |
48 | .name = "rs5c313", | 48 | .name = "rs5c313", |
49 | .id = -1, | 49 | .id = -1, |
50 | }; | 50 | }; |
51 | 51 | ||
52 | static struct platform_device *landisk_devices[] __initdata = { | 52 | static struct platform_device *landisk_devices[] __initdata = { |
53 | &cf_ide_device, | 53 | &cf_ide_device, |
54 | &rtc_device, | 54 | &rtc_device, |
55 | }; | 55 | }; |
56 | 56 | ||
57 | static int __init landisk_devices_setup(void) | 57 | static int __init landisk_devices_setup(void) |
58 | { | 58 | { |
59 | pgprot_t prot; | 59 | pgprot_t prot; |
60 | unsigned long paddrbase; | 60 | unsigned long paddrbase; |
61 | void *cf_ide_base; | 61 | void *cf_ide_base; |
62 | 62 | ||
63 | /* open I/O area window */ | 63 | /* open I/O area window */ |
64 | paddrbase = virt_to_phys((void *)PA_AREA5_IO); | 64 | paddrbase = virt_to_phys((void *)PA_AREA5_IO); |
65 | prot = PAGE_KERNEL_PCC(1, _PAGE_PCC_IO16); | 65 | prot = PAGE_KERNEL_PCC(1, _PAGE_PCC_IO16); |
66 | cf_ide_base = p3_ioremap(paddrbase, PAGE_SIZE, prot.pgprot); | 66 | cf_ide_base = p3_ioremap(paddrbase, PAGE_SIZE, prot); |
67 | if (!cf_ide_base) { | 67 | if (!cf_ide_base) { |
68 | printk("allocate_cf_area : can't open CF I/O window!\n"); | 68 | printk("allocate_cf_area : can't open CF I/O window!\n"); |
69 | return -ENOMEM; | 69 | return -ENOMEM; |
70 | } | 70 | } |
71 | 71 | ||
72 | /* IDE cmd address : 0x1f0-0x1f7 and 0x3f6 */ | 72 | /* IDE cmd address : 0x1f0-0x1f7 and 0x3f6 */ |
73 | cf_ide_resources[0].start = (unsigned long)cf_ide_base + 0x40; | 73 | cf_ide_resources[0].start = (unsigned long)cf_ide_base + 0x40; |
74 | cf_ide_resources[0].end = (unsigned long)cf_ide_base + 0x40 + 0x0f; | 74 | cf_ide_resources[0].end = (unsigned long)cf_ide_base + 0x40 + 0x0f; |
75 | cf_ide_resources[0].flags = IORESOURCE_IO; | 75 | cf_ide_resources[0].flags = IORESOURCE_IO; |
76 | cf_ide_resources[1].start = (unsigned long)cf_ide_base + 0x2c; | 76 | cf_ide_resources[1].start = (unsigned long)cf_ide_base + 0x2c; |
77 | cf_ide_resources[1].end = (unsigned long)cf_ide_base + 0x2c + 0x03; | 77 | cf_ide_resources[1].end = (unsigned long)cf_ide_base + 0x2c + 0x03; |
78 | cf_ide_resources[1].flags = IORESOURCE_IO; | 78 | cf_ide_resources[1].flags = IORESOURCE_IO; |
79 | cf_ide_resources[2].start = IRQ_FATA; | 79 | cf_ide_resources[2].start = IRQ_FATA; |
80 | cf_ide_resources[2].flags = IORESOURCE_IRQ; | 80 | cf_ide_resources[2].flags = IORESOURCE_IRQ; |
81 | 81 | ||
82 | return platform_add_devices(landisk_devices, | 82 | return platform_add_devices(landisk_devices, |
83 | ARRAY_SIZE(landisk_devices)); | 83 | ARRAY_SIZE(landisk_devices)); |
84 | } | 84 | } |
85 | 85 | ||
86 | __initcall(landisk_devices_setup); | 86 | __initcall(landisk_devices_setup); |
87 | 87 | ||
88 | static void __init landisk_setup(char **cmdline_p) | 88 | static void __init landisk_setup(char **cmdline_p) |
89 | { | 89 | { |
90 | /* LED ON */ | 90 | /* LED ON */ |
91 | ctrl_outb(ctrl_inb(PA_LED) | 0x03, PA_LED); | 91 | ctrl_outb(ctrl_inb(PA_LED) | 0x03, PA_LED); |
92 | 92 | ||
93 | printk(KERN_INFO "I-O DATA DEVICE, INC. \"LANDISK Series\" support.\n"); | 93 | printk(KERN_INFO "I-O DATA DEVICE, INC. \"LANDISK Series\" support.\n"); |
94 | pm_power_off = landisk_power_off; | 94 | pm_power_off = landisk_power_off; |
95 | } | 95 | } |
96 | 96 | ||
97 | /* | 97 | /* |
98 | * The Machine Vector | 98 | * The Machine Vector |
99 | */ | 99 | */ |
100 | static struct sh_machine_vector mv_landisk __initmv = { | 100 | static struct sh_machine_vector mv_landisk __initmv = { |
101 | .mv_name = "LANDISK", | 101 | .mv_name = "LANDISK", |
102 | .mv_nr_irqs = 72, | 102 | .mv_nr_irqs = 72, |
103 | .mv_setup = landisk_setup, | 103 | .mv_setup = landisk_setup, |
104 | .mv_init_irq = init_landisk_IRQ, | 104 | .mv_init_irq = init_landisk_IRQ, |
105 | }; | 105 | }; |
106 | 106 |
arch/sh/boards/mach-lboxre2/setup.c
1 | /* | 1 | /* |
2 | * linux/arch/sh/boards/lbox/setup.c | 2 | * linux/arch/sh/boards/lbox/setup.c |
3 | * | 3 | * |
4 | * Copyright (C) 2007 Nobuhiro Iwamatsu | 4 | * Copyright (C) 2007 Nobuhiro Iwamatsu |
5 | * | 5 | * |
6 | * NTT COMWARE L-BOX RE2 Support | 6 | * NTT COMWARE L-BOX RE2 Support |
7 | * | 7 | * |
8 | * This file is subject to the terms and conditions of the GNU General Public | 8 | * This file is subject to the terms and conditions of the GNU General Public |
9 | * License. See the file "COPYING" in the main directory of this archive | 9 | * License. See the file "COPYING" in the main directory of this archive |
10 | * for more details. | 10 | * for more details. |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/platform_device.h> | 15 | #include <linux/platform_device.h> |
16 | #include <linux/ata_platform.h> | 16 | #include <linux/ata_platform.h> |
17 | #include <asm/machvec.h> | 17 | #include <asm/machvec.h> |
18 | #include <asm/addrspace.h> | 18 | #include <asm/addrspace.h> |
19 | #include <mach/lboxre2.h> | 19 | #include <mach/lboxre2.h> |
20 | #include <asm/io.h> | 20 | #include <asm/io.h> |
21 | 21 | ||
22 | static struct resource cf_ide_resources[] = { | 22 | static struct resource cf_ide_resources[] = { |
23 | [0] = { | 23 | [0] = { |
24 | .start = 0x1f0, | 24 | .start = 0x1f0, |
25 | .end = 0x1f0 + 8 , | 25 | .end = 0x1f0 + 8 , |
26 | .flags = IORESOURCE_IO, | 26 | .flags = IORESOURCE_IO, |
27 | }, | 27 | }, |
28 | [1] = { | 28 | [1] = { |
29 | .start = 0x1f0 + 0x206, | 29 | .start = 0x1f0 + 0x206, |
30 | .end = 0x1f0 +8 + 0x206 + 8, | 30 | .end = 0x1f0 +8 + 0x206 + 8, |
31 | .flags = IORESOURCE_IO, | 31 | .flags = IORESOURCE_IO, |
32 | }, | 32 | }, |
33 | [2] = { | 33 | [2] = { |
34 | .start = IRQ_CF0, | 34 | .start = IRQ_CF0, |
35 | .flags = IORESOURCE_IRQ, | 35 | .flags = IORESOURCE_IRQ, |
36 | }, | 36 | }, |
37 | }; | 37 | }; |
38 | 38 | ||
39 | static struct platform_device cf_ide_device = { | 39 | static struct platform_device cf_ide_device = { |
40 | .name = "pata_platform", | 40 | .name = "pata_platform", |
41 | .id = -1, | 41 | .id = -1, |
42 | .num_resources = ARRAY_SIZE(cf_ide_resources), | 42 | .num_resources = ARRAY_SIZE(cf_ide_resources), |
43 | .resource = cf_ide_resources, | 43 | .resource = cf_ide_resources, |
44 | }; | 44 | }; |
45 | 45 | ||
46 | static struct platform_device *lboxre2_devices[] __initdata = { | 46 | static struct platform_device *lboxre2_devices[] __initdata = { |
47 | &cf_ide_device, | 47 | &cf_ide_device, |
48 | }; | 48 | }; |
49 | 49 | ||
50 | static int __init lboxre2_devices_setup(void) | 50 | static int __init lboxre2_devices_setup(void) |
51 | { | 51 | { |
52 | u32 cf0_io_base; /* Boot CF base address */ | 52 | u32 cf0_io_base; /* Boot CF base address */ |
53 | pgprot_t prot; | 53 | pgprot_t prot; |
54 | unsigned long paddrbase, psize; | 54 | unsigned long paddrbase, psize; |
55 | 55 | ||
56 | /* open I/O area window */ | 56 | /* open I/O area window */ |
57 | paddrbase = virt_to_phys((void*)PA_AREA5_IO); | 57 | paddrbase = virt_to_phys((void*)PA_AREA5_IO); |
58 | psize = PAGE_SIZE; | 58 | psize = PAGE_SIZE; |
59 | prot = PAGE_KERNEL_PCC( 1 , _PAGE_PCC_IO16); | 59 | prot = PAGE_KERNEL_PCC( 1 , _PAGE_PCC_IO16); |
60 | cf0_io_base = (u32)p3_ioremap(paddrbase, psize, prot.pgprot); | 60 | cf0_io_base = (u32)p3_ioremap(paddrbase, psize, prot); |
61 | if (!cf0_io_base) { | 61 | if (!cf0_io_base) { |
62 | printk(KERN_ERR "%s : can't open CF I/O window!\n" , __func__ ); | 62 | printk(KERN_ERR "%s : can't open CF I/O window!\n" , __func__ ); |
63 | return -ENOMEM; | 63 | return -ENOMEM; |
64 | } | 64 | } |
65 | 65 | ||
66 | cf_ide_resources[0].start += cf0_io_base ; | 66 | cf_ide_resources[0].start += cf0_io_base ; |
67 | cf_ide_resources[0].end += cf0_io_base ; | 67 | cf_ide_resources[0].end += cf0_io_base ; |
68 | cf_ide_resources[1].start += cf0_io_base ; | 68 | cf_ide_resources[1].start += cf0_io_base ; |
69 | cf_ide_resources[1].end += cf0_io_base ; | 69 | cf_ide_resources[1].end += cf0_io_base ; |
70 | 70 | ||
71 | return platform_add_devices(lboxre2_devices, | 71 | return platform_add_devices(lboxre2_devices, |
72 | ARRAY_SIZE(lboxre2_devices)); | 72 | ARRAY_SIZE(lboxre2_devices)); |
73 | 73 | ||
74 | } | 74 | } |
75 | device_initcall(lboxre2_devices_setup); | 75 | device_initcall(lboxre2_devices_setup); |
76 | 76 | ||
77 | /* | 77 | /* |
78 | * The Machine Vector | 78 | * The Machine Vector |
79 | */ | 79 | */ |
80 | static struct sh_machine_vector mv_lboxre2 __initmv = { | 80 | static struct sh_machine_vector mv_lboxre2 __initmv = { |
81 | .mv_name = "L-BOX RE2", | 81 | .mv_name = "L-BOX RE2", |
82 | .mv_nr_irqs = 72, | 82 | .mv_nr_irqs = 72, |
83 | .mv_init_irq = init_lboxre2_IRQ, | 83 | .mv_init_irq = init_lboxre2_IRQ, |
84 | }; | 84 | }; |
85 | 85 |
arch/sh/boards/mach-sh03/setup.c
1 | /* | 1 | /* |
2 | * linux/arch/sh/boards/sh03/setup.c | 2 | * linux/arch/sh/boards/sh03/setup.c |
3 | * | 3 | * |
4 | * Copyright (C) 2004 Interface Co.,Ltd. Saito.K | 4 | * Copyright (C) 2004 Interface Co.,Ltd. Saito.K |
5 | * | 5 | * |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <linux/init.h> | 8 | #include <linux/init.h> |
9 | #include <linux/irq.h> | 9 | #include <linux/irq.h> |
10 | #include <linux/pci.h> | 10 | #include <linux/pci.h> |
11 | #include <linux/platform_device.h> | 11 | #include <linux/platform_device.h> |
12 | #include <linux/ata_platform.h> | 12 | #include <linux/ata_platform.h> |
13 | #include <asm/io.h> | 13 | #include <asm/io.h> |
14 | #include <asm/rtc.h> | 14 | #include <asm/rtc.h> |
15 | #include <mach-sh03/mach/io.h> | 15 | #include <mach-sh03/mach/io.h> |
16 | #include <mach-sh03/mach/sh03.h> | 16 | #include <mach-sh03/mach/sh03.h> |
17 | #include <asm/addrspace.h> | 17 | #include <asm/addrspace.h> |
18 | 18 | ||
19 | static void __init init_sh03_IRQ(void) | 19 | static void __init init_sh03_IRQ(void) |
20 | { | 20 | { |
21 | plat_irq_setup_pins(IRQ_MODE_IRQ); | 21 | plat_irq_setup_pins(IRQ_MODE_IRQ); |
22 | } | 22 | } |
23 | 23 | ||
24 | /* arch/sh/boards/sh03/rtc.c */ | 24 | /* arch/sh/boards/sh03/rtc.c */ |
25 | void sh03_time_init(void); | 25 | void sh03_time_init(void); |
26 | 26 | ||
27 | static void __init sh03_setup(char **cmdline_p) | 27 | static void __init sh03_setup(char **cmdline_p) |
28 | { | 28 | { |
29 | board_time_init = sh03_time_init; | 29 | board_time_init = sh03_time_init; |
30 | } | 30 | } |
31 | 31 | ||
32 | static struct resource cf_ide_resources[] = { | 32 | static struct resource cf_ide_resources[] = { |
33 | [0] = { | 33 | [0] = { |
34 | .start = 0x1f0, | 34 | .start = 0x1f0, |
35 | .end = 0x1f0 + 8, | 35 | .end = 0x1f0 + 8, |
36 | .flags = IORESOURCE_IO, | 36 | .flags = IORESOURCE_IO, |
37 | }, | 37 | }, |
38 | [1] = { | 38 | [1] = { |
39 | .start = 0x1f0 + 0x206, | 39 | .start = 0x1f0 + 0x206, |
40 | .end = 0x1f0 +8 + 0x206 + 8, | 40 | .end = 0x1f0 +8 + 0x206 + 8, |
41 | .flags = IORESOURCE_IO, | 41 | .flags = IORESOURCE_IO, |
42 | }, | 42 | }, |
43 | [2] = { | 43 | [2] = { |
44 | .start = IRL2_IRQ, | 44 | .start = IRL2_IRQ, |
45 | .flags = IORESOURCE_IRQ, | 45 | .flags = IORESOURCE_IRQ, |
46 | }, | 46 | }, |
47 | }; | 47 | }; |
48 | 48 | ||
49 | static struct platform_device cf_ide_device = { | 49 | static struct platform_device cf_ide_device = { |
50 | .name = "pata_platform", | 50 | .name = "pata_platform", |
51 | .id = -1, | 51 | .id = -1, |
52 | .num_resources = ARRAY_SIZE(cf_ide_resources), | 52 | .num_resources = ARRAY_SIZE(cf_ide_resources), |
53 | .resource = cf_ide_resources, | 53 | .resource = cf_ide_resources, |
54 | }; | 54 | }; |
55 | 55 | ||
56 | static struct resource heartbeat_resources[] = { | 56 | static struct resource heartbeat_resources[] = { |
57 | [0] = { | 57 | [0] = { |
58 | .start = 0xa0800000, | 58 | .start = 0xa0800000, |
59 | .end = 0xa0800000, | 59 | .end = 0xa0800000, |
60 | .flags = IORESOURCE_MEM, | 60 | .flags = IORESOURCE_MEM, |
61 | }, | 61 | }, |
62 | }; | 62 | }; |
63 | 63 | ||
64 | static struct platform_device heartbeat_device = { | 64 | static struct platform_device heartbeat_device = { |
65 | .name = "heartbeat", | 65 | .name = "heartbeat", |
66 | .id = -1, | 66 | .id = -1, |
67 | .num_resources = ARRAY_SIZE(heartbeat_resources), | 67 | .num_resources = ARRAY_SIZE(heartbeat_resources), |
68 | .resource = heartbeat_resources, | 68 | .resource = heartbeat_resources, |
69 | }; | 69 | }; |
70 | 70 | ||
71 | static struct platform_device *sh03_devices[] __initdata = { | 71 | static struct platform_device *sh03_devices[] __initdata = { |
72 | &heartbeat_device, | 72 | &heartbeat_device, |
73 | &cf_ide_device, | 73 | &cf_ide_device, |
74 | }; | 74 | }; |
75 | 75 | ||
76 | static int __init sh03_devices_setup(void) | 76 | static int __init sh03_devices_setup(void) |
77 | { | 77 | { |
78 | pgprot_t prot; | 78 | pgprot_t prot; |
79 | unsigned long paddrbase; | 79 | unsigned long paddrbase; |
80 | void *cf_ide_base; | 80 | void *cf_ide_base; |
81 | 81 | ||
82 | /* open I/O area window */ | 82 | /* open I/O area window */ |
83 | paddrbase = virt_to_phys((void *)PA_AREA5_IO); | 83 | paddrbase = virt_to_phys((void *)PA_AREA5_IO); |
84 | prot = PAGE_KERNEL_PCC(1, _PAGE_PCC_IO16); | 84 | prot = PAGE_KERNEL_PCC(1, _PAGE_PCC_IO16); |
85 | cf_ide_base = p3_ioremap(paddrbase, PAGE_SIZE, prot.pgprot); | 85 | cf_ide_base = p3_ioremap(paddrbase, PAGE_SIZE, prot); |
86 | if (!cf_ide_base) { | 86 | if (!cf_ide_base) { |
87 | printk("allocate_cf_area : can't open CF I/O window!\n"); | 87 | printk("allocate_cf_area : can't open CF I/O window!\n"); |
88 | return -ENOMEM; | 88 | return -ENOMEM; |
89 | } | 89 | } |
90 | 90 | ||
91 | /* IDE cmd address : 0x1f0-0x1f7 and 0x3f6 */ | 91 | /* IDE cmd address : 0x1f0-0x1f7 and 0x3f6 */ |
92 | cf_ide_resources[0].start += (unsigned long)cf_ide_base; | 92 | cf_ide_resources[0].start += (unsigned long)cf_ide_base; |
93 | cf_ide_resources[0].end += (unsigned long)cf_ide_base; | 93 | cf_ide_resources[0].end += (unsigned long)cf_ide_base; |
94 | cf_ide_resources[1].start += (unsigned long)cf_ide_base; | 94 | cf_ide_resources[1].start += (unsigned long)cf_ide_base; |
95 | cf_ide_resources[1].end += (unsigned long)cf_ide_base; | 95 | cf_ide_resources[1].end += (unsigned long)cf_ide_base; |
96 | 96 | ||
97 | return platform_add_devices(sh03_devices, ARRAY_SIZE(sh03_devices)); | 97 | return platform_add_devices(sh03_devices, ARRAY_SIZE(sh03_devices)); |
98 | } | 98 | } |
99 | __initcall(sh03_devices_setup); | 99 | __initcall(sh03_devices_setup); |
100 | 100 | ||
101 | static struct sh_machine_vector mv_sh03 __initmv = { | 101 | static struct sh_machine_vector mv_sh03 __initmv = { |
102 | .mv_name = "Interface (CTP/PCI-SH03)", | 102 | .mv_name = "Interface (CTP/PCI-SH03)", |
103 | .mv_setup = sh03_setup, | 103 | .mv_setup = sh03_setup, |
104 | .mv_nr_irqs = 48, | 104 | .mv_nr_irqs = 48, |
105 | .mv_init_irq = init_sh03_IRQ, | 105 | .mv_init_irq = init_sh03_IRQ, |
106 | }; | 106 | }; |
107 | 107 |
arch/sh/include/asm/io.h
1 | #ifndef __ASM_SH_IO_H | 1 | #ifndef __ASM_SH_IO_H |
2 | #define __ASM_SH_IO_H | 2 | #define __ASM_SH_IO_H |
3 | /* | 3 | /* |
4 | * Convention: | 4 | * Convention: |
5 | * read{b,w,l,q}/write{b,w,l,q} are for PCI, | 5 | * read{b,w,l,q}/write{b,w,l,q} are for PCI, |
6 | * while in{b,w,l}/out{b,w,l} are for ISA | 6 | * while in{b,w,l}/out{b,w,l} are for ISA |
7 | * | 7 | * |
8 | * In addition we have 'pausing' versions: in{b,w,l}_p/out{b,w,l}_p | 8 | * In addition we have 'pausing' versions: in{b,w,l}_p/out{b,w,l}_p |
9 | * and 'string' versions: ins{b,w,l}/outs{b,w,l} | 9 | * and 'string' versions: ins{b,w,l}/outs{b,w,l} |
10 | * | 10 | * |
11 | * While read{b,w,l,q} and write{b,w,l,q} contain memory barriers | 11 | * While read{b,w,l,q} and write{b,w,l,q} contain memory barriers |
12 | * automatically, there are also __raw versions, which do not. | 12 | * automatically, there are also __raw versions, which do not. |
13 | * | 13 | * |
14 | * Historically, we have also had ctrl_in{b,w,l,q}/ctrl_out{b,w,l,q} for | 14 | * Historically, we have also had ctrl_in{b,w,l,q}/ctrl_out{b,w,l,q} for |
15 | * SuperH specific I/O (raw I/O to on-chip CPU peripherals). In practice | 15 | * SuperH specific I/O (raw I/O to on-chip CPU peripherals). In practice |
16 | * these have the same semantics as the __raw variants, and as such, all | 16 | * these have the same semantics as the __raw variants, and as such, all |
17 | * new code should be using the __raw versions. | 17 | * new code should be using the __raw versions. |
18 | * | 18 | * |
19 | * All ISA I/O routines are wrapped through the machine vector. If a | 19 | * All ISA I/O routines are wrapped through the machine vector. If a |
20 | * board does not provide overrides, a generic set that are copied in | 20 | * board does not provide overrides, a generic set that are copied in |
21 | * from the default machine vector are used instead. These are largely | 21 | * from the default machine vector are used instead. These are largely |
22 | * for old compat code for I/O offseting to SuperIOs, all of which are | 22 | * for old compat code for I/O offseting to SuperIOs, all of which are |
23 | * better handled through the machvec ioport mapping routines these days. | 23 | * better handled through the machvec ioport mapping routines these days. |
24 | */ | 24 | */ |
25 | #include <linux/errno.h> | 25 | #include <linux/errno.h> |
26 | #include <asm/cache.h> | 26 | #include <asm/cache.h> |
27 | #include <asm/system.h> | 27 | #include <asm/system.h> |
28 | #include <asm/addrspace.h> | 28 | #include <asm/addrspace.h> |
29 | #include <asm/machvec.h> | 29 | #include <asm/machvec.h> |
30 | #include <asm/pgtable.h> | 30 | #include <asm/pgtable.h> |
31 | #include <asm-generic/iomap.h> | 31 | #include <asm-generic/iomap.h> |
32 | 32 | ||
33 | #ifdef __KERNEL__ | 33 | #ifdef __KERNEL__ |
34 | /* | 34 | /* |
35 | * Depending on which platform we are running on, we need different | 35 | * Depending on which platform we are running on, we need different |
36 | * I/O functions. | 36 | * I/O functions. |
37 | */ | 37 | */ |
38 | #define __IO_PREFIX generic | 38 | #define __IO_PREFIX generic |
39 | #include <asm/io_generic.h> | 39 | #include <asm/io_generic.h> |
40 | #include <asm/io_trapped.h> | 40 | #include <asm/io_trapped.h> |
41 | 41 | ||
42 | #define inb(p) sh_mv.mv_inb((p)) | 42 | #define inb(p) sh_mv.mv_inb((p)) |
43 | #define inw(p) sh_mv.mv_inw((p)) | 43 | #define inw(p) sh_mv.mv_inw((p)) |
44 | #define inl(p) sh_mv.mv_inl((p)) | 44 | #define inl(p) sh_mv.mv_inl((p)) |
45 | #define outb(x,p) sh_mv.mv_outb((x),(p)) | 45 | #define outb(x,p) sh_mv.mv_outb((x),(p)) |
46 | #define outw(x,p) sh_mv.mv_outw((x),(p)) | 46 | #define outw(x,p) sh_mv.mv_outw((x),(p)) |
47 | #define outl(x,p) sh_mv.mv_outl((x),(p)) | 47 | #define outl(x,p) sh_mv.mv_outl((x),(p)) |
48 | 48 | ||
49 | #define inb_p(p) sh_mv.mv_inb_p((p)) | 49 | #define inb_p(p) sh_mv.mv_inb_p((p)) |
50 | #define inw_p(p) sh_mv.mv_inw_p((p)) | 50 | #define inw_p(p) sh_mv.mv_inw_p((p)) |
51 | #define inl_p(p) sh_mv.mv_inl_p((p)) | 51 | #define inl_p(p) sh_mv.mv_inl_p((p)) |
52 | #define outb_p(x,p) sh_mv.mv_outb_p((x),(p)) | 52 | #define outb_p(x,p) sh_mv.mv_outb_p((x),(p)) |
53 | #define outw_p(x,p) sh_mv.mv_outw_p((x),(p)) | 53 | #define outw_p(x,p) sh_mv.mv_outw_p((x),(p)) |
54 | #define outl_p(x,p) sh_mv.mv_outl_p((x),(p)) | 54 | #define outl_p(x,p) sh_mv.mv_outl_p((x),(p)) |
55 | 55 | ||
56 | #define insb(p,b,c) sh_mv.mv_insb((p), (b), (c)) | 56 | #define insb(p,b,c) sh_mv.mv_insb((p), (b), (c)) |
57 | #define insw(p,b,c) sh_mv.mv_insw((p), (b), (c)) | 57 | #define insw(p,b,c) sh_mv.mv_insw((p), (b), (c)) |
58 | #define insl(p,b,c) sh_mv.mv_insl((p), (b), (c)) | 58 | #define insl(p,b,c) sh_mv.mv_insl((p), (b), (c)) |
59 | #define outsb(p,b,c) sh_mv.mv_outsb((p), (b), (c)) | 59 | #define outsb(p,b,c) sh_mv.mv_outsb((p), (b), (c)) |
60 | #define outsw(p,b,c) sh_mv.mv_outsw((p), (b), (c)) | 60 | #define outsw(p,b,c) sh_mv.mv_outsw((p), (b), (c)) |
61 | #define outsl(p,b,c) sh_mv.mv_outsl((p), (b), (c)) | 61 | #define outsl(p,b,c) sh_mv.mv_outsl((p), (b), (c)) |
62 | 62 | ||
63 | #define __raw_writeb(v,a) (__chk_io_ptr(a), *(volatile u8 __force *)(a) = (v)) | 63 | #define __raw_writeb(v,a) (__chk_io_ptr(a), *(volatile u8 __force *)(a) = (v)) |
64 | #define __raw_writew(v,a) (__chk_io_ptr(a), *(volatile u16 __force *)(a) = (v)) | 64 | #define __raw_writew(v,a) (__chk_io_ptr(a), *(volatile u16 __force *)(a) = (v)) |
65 | #define __raw_writel(v,a) (__chk_io_ptr(a), *(volatile u32 __force *)(a) = (v)) | 65 | #define __raw_writel(v,a) (__chk_io_ptr(a), *(volatile u32 __force *)(a) = (v)) |
66 | #define __raw_writeq(v,a) (__chk_io_ptr(a), *(volatile u64 __force *)(a) = (v)) | 66 | #define __raw_writeq(v,a) (__chk_io_ptr(a), *(volatile u64 __force *)(a) = (v)) |
67 | 67 | ||
68 | #define __raw_readb(a) (__chk_io_ptr(a), *(volatile u8 __force *)(a)) | 68 | #define __raw_readb(a) (__chk_io_ptr(a), *(volatile u8 __force *)(a)) |
69 | #define __raw_readw(a) (__chk_io_ptr(a), *(volatile u16 __force *)(a)) | 69 | #define __raw_readw(a) (__chk_io_ptr(a), *(volatile u16 __force *)(a)) |
70 | #define __raw_readl(a) (__chk_io_ptr(a), *(volatile u32 __force *)(a)) | 70 | #define __raw_readl(a) (__chk_io_ptr(a), *(volatile u32 __force *)(a)) |
71 | #define __raw_readq(a) (__chk_io_ptr(a), *(volatile u64 __force *)(a)) | 71 | #define __raw_readq(a) (__chk_io_ptr(a), *(volatile u64 __force *)(a)) |
72 | 72 | ||
73 | #define readb(a) ({ u8 r_ = __raw_readb(a); mb(); r_; }) | 73 | #define readb(a) ({ u8 r_ = __raw_readb(a); mb(); r_; }) |
74 | #define readw(a) ({ u16 r_ = __raw_readw(a); mb(); r_; }) | 74 | #define readw(a) ({ u16 r_ = __raw_readw(a); mb(); r_; }) |
75 | #define readl(a) ({ u32 r_ = __raw_readl(a); mb(); r_; }) | 75 | #define readl(a) ({ u32 r_ = __raw_readl(a); mb(); r_; }) |
76 | #define readq(a) ({ u64 r_ = __raw_readq(a); mb(); r_; }) | 76 | #define readq(a) ({ u64 r_ = __raw_readq(a); mb(); r_; }) |
77 | 77 | ||
78 | #define writeb(v,a) ({ __raw_writeb((v),(a)); mb(); }) | 78 | #define writeb(v,a) ({ __raw_writeb((v),(a)); mb(); }) |
79 | #define writew(v,a) ({ __raw_writew((v),(a)); mb(); }) | 79 | #define writew(v,a) ({ __raw_writew((v),(a)); mb(); }) |
80 | #define writel(v,a) ({ __raw_writel((v),(a)); mb(); }) | 80 | #define writel(v,a) ({ __raw_writel((v),(a)); mb(); }) |
81 | #define writeq(v,a) ({ __raw_writeq((v),(a)); mb(); }) | 81 | #define writeq(v,a) ({ __raw_writeq((v),(a)); mb(); }) |
82 | 82 | ||
83 | /* SuperH on-chip I/O functions */ | 83 | /* SuperH on-chip I/O functions */ |
84 | #define ctrl_inb __raw_readb | 84 | #define ctrl_inb __raw_readb |
85 | #define ctrl_inw __raw_readw | 85 | #define ctrl_inw __raw_readw |
86 | #define ctrl_inl __raw_readl | 86 | #define ctrl_inl __raw_readl |
87 | #define ctrl_inq __raw_readq | 87 | #define ctrl_inq __raw_readq |
88 | 88 | ||
89 | #define ctrl_outb __raw_writeb | 89 | #define ctrl_outb __raw_writeb |
90 | #define ctrl_outw __raw_writew | 90 | #define ctrl_outw __raw_writew |
91 | #define ctrl_outl __raw_writel | 91 | #define ctrl_outl __raw_writel |
92 | #define ctrl_outq __raw_writeq | 92 | #define ctrl_outq __raw_writeq |
93 | 93 | ||
94 | extern unsigned long generic_io_base; | 94 | extern unsigned long generic_io_base; |
95 | 95 | ||
96 | static inline void ctrl_delay(void) | 96 | static inline void ctrl_delay(void) |
97 | { | 97 | { |
98 | __raw_readw(generic_io_base); | 98 | __raw_readw(generic_io_base); |
99 | } | 99 | } |
100 | 100 | ||
101 | #define __BUILD_MEMORY_STRING(bwlq, type) \ | 101 | #define __BUILD_MEMORY_STRING(bwlq, type) \ |
102 | \ | 102 | \ |
103 | static inline void __raw_writes##bwlq(volatile void __iomem *mem, \ | 103 | static inline void __raw_writes##bwlq(volatile void __iomem *mem, \ |
104 | const void *addr, unsigned int count) \ | 104 | const void *addr, unsigned int count) \ |
105 | { \ | 105 | { \ |
106 | const volatile type *__addr = addr; \ | 106 | const volatile type *__addr = addr; \ |
107 | \ | 107 | \ |
108 | while (count--) { \ | 108 | while (count--) { \ |
109 | __raw_write##bwlq(*__addr, mem); \ | 109 | __raw_write##bwlq(*__addr, mem); \ |
110 | __addr++; \ | 110 | __addr++; \ |
111 | } \ | 111 | } \ |
112 | } \ | 112 | } \ |
113 | \ | 113 | \ |
114 | static inline void __raw_reads##bwlq(volatile void __iomem *mem, \ | 114 | static inline void __raw_reads##bwlq(volatile void __iomem *mem, \ |
115 | void *addr, unsigned int count) \ | 115 | void *addr, unsigned int count) \ |
116 | { \ | 116 | { \ |
117 | volatile type *__addr = addr; \ | 117 | volatile type *__addr = addr; \ |
118 | \ | 118 | \ |
119 | while (count--) { \ | 119 | while (count--) { \ |
120 | *__addr = __raw_read##bwlq(mem); \ | 120 | *__addr = __raw_read##bwlq(mem); \ |
121 | __addr++; \ | 121 | __addr++; \ |
122 | } \ | 122 | } \ |
123 | } | 123 | } |
124 | 124 | ||
125 | __BUILD_MEMORY_STRING(b, u8) | 125 | __BUILD_MEMORY_STRING(b, u8) |
126 | __BUILD_MEMORY_STRING(w, u16) | 126 | __BUILD_MEMORY_STRING(w, u16) |
127 | 127 | ||
128 | #ifdef CONFIG_SUPERH32 | 128 | #ifdef CONFIG_SUPERH32 |
129 | void __raw_writesl(void __iomem *addr, const void *data, int longlen); | 129 | void __raw_writesl(void __iomem *addr, const void *data, int longlen); |
130 | void __raw_readsl(const void __iomem *addr, void *data, int longlen); | 130 | void __raw_readsl(const void __iomem *addr, void *data, int longlen); |
131 | #else | 131 | #else |
132 | __BUILD_MEMORY_STRING(l, u32) | 132 | __BUILD_MEMORY_STRING(l, u32) |
133 | #endif | 133 | #endif |
134 | 134 | ||
135 | __BUILD_MEMORY_STRING(q, u64) | 135 | __BUILD_MEMORY_STRING(q, u64) |
136 | 136 | ||
137 | #define writesb __raw_writesb | 137 | #define writesb __raw_writesb |
138 | #define writesw __raw_writesw | 138 | #define writesw __raw_writesw |
139 | #define writesl __raw_writesl | 139 | #define writesl __raw_writesl |
140 | 140 | ||
141 | #define readsb __raw_readsb | 141 | #define readsb __raw_readsb |
142 | #define readsw __raw_readsw | 142 | #define readsw __raw_readsw |
143 | #define readsl __raw_readsl | 143 | #define readsl __raw_readsl |
144 | 144 | ||
145 | #define readb_relaxed(a) readb(a) | 145 | #define readb_relaxed(a) readb(a) |
146 | #define readw_relaxed(a) readw(a) | 146 | #define readw_relaxed(a) readw(a) |
147 | #define readl_relaxed(a) readl(a) | 147 | #define readl_relaxed(a) readl(a) |
148 | #define readq_relaxed(a) readq(a) | 148 | #define readq_relaxed(a) readq(a) |
149 | 149 | ||
150 | #ifndef CONFIG_GENERIC_IOMAP | 150 | #ifndef CONFIG_GENERIC_IOMAP |
151 | /* Simple MMIO */ | 151 | /* Simple MMIO */ |
152 | #define ioread8(a) __raw_readb(a) | 152 | #define ioread8(a) __raw_readb(a) |
153 | #define ioread16(a) __raw_readw(a) | 153 | #define ioread16(a) __raw_readw(a) |
154 | #define ioread16be(a) be16_to_cpu(__raw_readw((a))) | 154 | #define ioread16be(a) be16_to_cpu(__raw_readw((a))) |
155 | #define ioread32(a) __raw_readl(a) | 155 | #define ioread32(a) __raw_readl(a) |
156 | #define ioread32be(a) be32_to_cpu(__raw_readl((a))) | 156 | #define ioread32be(a) be32_to_cpu(__raw_readl((a))) |
157 | 157 | ||
158 | #define iowrite8(v,a) __raw_writeb((v),(a)) | 158 | #define iowrite8(v,a) __raw_writeb((v),(a)) |
159 | #define iowrite16(v,a) __raw_writew((v),(a)) | 159 | #define iowrite16(v,a) __raw_writew((v),(a)) |
160 | #define iowrite16be(v,a) __raw_writew(cpu_to_be16((v)),(a)) | 160 | #define iowrite16be(v,a) __raw_writew(cpu_to_be16((v)),(a)) |
161 | #define iowrite32(v,a) __raw_writel((v),(a)) | 161 | #define iowrite32(v,a) __raw_writel((v),(a)) |
162 | #define iowrite32be(v,a) __raw_writel(cpu_to_be32((v)),(a)) | 162 | #define iowrite32be(v,a) __raw_writel(cpu_to_be32((v)),(a)) |
163 | 163 | ||
164 | #define ioread8_rep(a, d, c) __raw_readsb((a), (d), (c)) | 164 | #define ioread8_rep(a, d, c) __raw_readsb((a), (d), (c)) |
165 | #define ioread16_rep(a, d, c) __raw_readsw((a), (d), (c)) | 165 | #define ioread16_rep(a, d, c) __raw_readsw((a), (d), (c)) |
166 | #define ioread32_rep(a, d, c) __raw_readsl((a), (d), (c)) | 166 | #define ioread32_rep(a, d, c) __raw_readsl((a), (d), (c)) |
167 | 167 | ||
168 | #define iowrite8_rep(a, s, c) __raw_writesb((a), (s), (c)) | 168 | #define iowrite8_rep(a, s, c) __raw_writesb((a), (s), (c)) |
169 | #define iowrite16_rep(a, s, c) __raw_writesw((a), (s), (c)) | 169 | #define iowrite16_rep(a, s, c) __raw_writesw((a), (s), (c)) |
170 | #define iowrite32_rep(a, s, c) __raw_writesl((a), (s), (c)) | 170 | #define iowrite32_rep(a, s, c) __raw_writesl((a), (s), (c)) |
171 | #endif | 171 | #endif |
172 | 172 | ||
173 | #define mmio_insb(p,d,c) __raw_readsb(p,d,c) | 173 | #define mmio_insb(p,d,c) __raw_readsb(p,d,c) |
174 | #define mmio_insw(p,d,c) __raw_readsw(p,d,c) | 174 | #define mmio_insw(p,d,c) __raw_readsw(p,d,c) |
175 | #define mmio_insl(p,d,c) __raw_readsl(p,d,c) | 175 | #define mmio_insl(p,d,c) __raw_readsl(p,d,c) |
176 | 176 | ||
177 | #define mmio_outsb(p,s,c) __raw_writesb(p,s,c) | 177 | #define mmio_outsb(p,s,c) __raw_writesb(p,s,c) |
178 | #define mmio_outsw(p,s,c) __raw_writesw(p,s,c) | 178 | #define mmio_outsw(p,s,c) __raw_writesw(p,s,c) |
179 | #define mmio_outsl(p,s,c) __raw_writesl(p,s,c) | 179 | #define mmio_outsl(p,s,c) __raw_writesl(p,s,c) |
180 | 180 | ||
181 | /* synco on SH-4A, otherwise a nop */ | 181 | /* synco on SH-4A, otherwise a nop */ |
182 | #define mmiowb() wmb() | 182 | #define mmiowb() wmb() |
183 | 183 | ||
184 | #define IO_SPACE_LIMIT 0xffffffff | 184 | #define IO_SPACE_LIMIT 0xffffffff |
185 | 185 | ||
186 | /* | 186 | /* |
187 | * This function provides a method for the generic case where a | 187 | * This function provides a method for the generic case where a |
188 | * board-specific ioport_map simply needs to return the port + some | 188 | * board-specific ioport_map simply needs to return the port + some |
189 | * arbitrary port base. | 189 | * arbitrary port base. |
190 | * | 190 | * |
191 | * We use this at board setup time to implicitly set the port base, and | 191 | * We use this at board setup time to implicitly set the port base, and |
192 | * as a result, we can use the generic ioport_map. | 192 | * as a result, we can use the generic ioport_map. |
193 | */ | 193 | */ |
194 | static inline void __set_io_port_base(unsigned long pbase) | 194 | static inline void __set_io_port_base(unsigned long pbase) |
195 | { | 195 | { |
196 | generic_io_base = pbase; | 196 | generic_io_base = pbase; |
197 | } | 197 | } |
198 | 198 | ||
199 | #define __ioport_map(p, n) sh_mv.mv_ioport_map((p), (n)) | 199 | #define __ioport_map(p, n) sh_mv.mv_ioport_map((p), (n)) |
200 | 200 | ||
201 | /* We really want to try and get these to memcpy etc */ | 201 | /* We really want to try and get these to memcpy etc */ |
202 | void memcpy_fromio(void *, const volatile void __iomem *, unsigned long); | 202 | void memcpy_fromio(void *, const volatile void __iomem *, unsigned long); |
203 | void memcpy_toio(volatile void __iomem *, const void *, unsigned long); | 203 | void memcpy_toio(volatile void __iomem *, const void *, unsigned long); |
204 | void memset_io(volatile void __iomem *, int, unsigned long); | 204 | void memset_io(volatile void __iomem *, int, unsigned long); |
205 | 205 | ||
206 | /* Quad-word real-mode I/O, don't ask.. */ | 206 | /* Quad-word real-mode I/O, don't ask.. */ |
207 | unsigned long long peek_real_address_q(unsigned long long addr); | 207 | unsigned long long peek_real_address_q(unsigned long long addr); |
208 | unsigned long long poke_real_address_q(unsigned long long addr, | 208 | unsigned long long poke_real_address_q(unsigned long long addr, |
209 | unsigned long long val); | 209 | unsigned long long val); |
210 | 210 | ||
211 | #if !defined(CONFIG_MMU) | 211 | #if !defined(CONFIG_MMU) |
212 | #define virt_to_phys(address) ((unsigned long)(address)) | 212 | #define virt_to_phys(address) ((unsigned long)(address)) |
213 | #define phys_to_virt(address) ((void *)(address)) | 213 | #define phys_to_virt(address) ((void *)(address)) |
214 | #else | 214 | #else |
215 | #define virt_to_phys(address) (__pa(address)) | 215 | #define virt_to_phys(address) (__pa(address)) |
216 | #define phys_to_virt(address) (__va(address)) | 216 | #define phys_to_virt(address) (__va(address)) |
217 | #endif | 217 | #endif |
218 | 218 | ||
219 | /* | 219 | /* |
220 | * On 32-bit SH, we traditionally have the whole physical address space | 220 | * On 32-bit SH, we traditionally have the whole physical address space |
221 | * mapped at all times (as MIPS does), so "ioremap()" and "iounmap()" do | 221 | * mapped at all times (as MIPS does), so "ioremap()" and "iounmap()" do |
222 | * not need to do anything but place the address in the proper segment. | 222 | * not need to do anything but place the address in the proper segment. |
223 | * This is true for P1 and P2 addresses, as well as some P3 ones. | 223 | * This is true for P1 and P2 addresses, as well as some P3 ones. |
224 | * However, most of the P3 addresses and newer cores using extended | 224 | * However, most of the P3 addresses and newer cores using extended |
225 | * addressing need to map through page tables, so the ioremap() | 225 | * addressing need to map through page tables, so the ioremap() |
226 | * implementation becomes a bit more complicated. | 226 | * implementation becomes a bit more complicated. |
227 | * | 227 | * |
228 | * See arch/sh/mm/ioremap.c for additional notes on this. | 228 | * See arch/sh/mm/ioremap.c for additional notes on this. |
229 | * | 229 | * |
230 | * We cheat a bit and always return uncachable areas until we've fixed | 230 | * We cheat a bit and always return uncachable areas until we've fixed |
231 | * the drivers to handle caching properly. | 231 | * the drivers to handle caching properly. |
232 | * | 232 | * |
233 | * On the SH-5 the concept of segmentation in the 1:1 PXSEG sense simply | 233 | * On the SH-5 the concept of segmentation in the 1:1 PXSEG sense simply |
234 | * doesn't exist, so everything must go through page tables. | 234 | * doesn't exist, so everything must go through page tables. |
235 | */ | 235 | */ |
236 | #ifdef CONFIG_MMU | 236 | #ifdef CONFIG_MMU |
237 | void __iomem *__ioremap_caller(unsigned long offset, unsigned long size, | 237 | void __iomem *__ioremap_caller(unsigned long offset, unsigned long size, |
238 | unsigned long flags, void *caller); | 238 | pgprot_t prot, void *caller); |
239 | void __iounmap(void __iomem *addr); | 239 | void __iounmap(void __iomem *addr); |
240 | 240 | ||
241 | #ifdef CONFIG_IOREMAP_FIXED | 241 | #ifdef CONFIG_IOREMAP_FIXED |
242 | extern void __iomem *ioremap_fixed(resource_size_t, unsigned long, pgprot_t); | 242 | extern void __iomem *ioremap_fixed(resource_size_t, unsigned long, pgprot_t); |
243 | extern int iounmap_fixed(void __iomem *); | 243 | extern int iounmap_fixed(void __iomem *); |
244 | extern void ioremap_fixed_init(void); | 244 | extern void ioremap_fixed_init(void); |
245 | #else | 245 | #else |
246 | static inline void __iomem * | 246 | static inline void __iomem * |
247 | ioremap_fixed(resource_size t phys_addr, unsigned long size, pgprot_t prot) | 247 | ioremap_fixed(resource_size t phys_addr, unsigned long size, pgprot_t prot) |
248 | { | 248 | { |
249 | BUG(); | 249 | BUG(); |
250 | } | 250 | } |
251 | 251 | ||
252 | static inline void ioremap_fixed_init(void) { } | 252 | static inline void ioremap_fixed_init(void) { } |
253 | static inline int iounmap_fixed(void __iomem *addr) { return -EINVAL; } | 253 | static inline int iounmap_fixed(void __iomem *addr) { return -EINVAL; } |
254 | #endif | 254 | #endif |
255 | 255 | ||
256 | static inline void __iomem * | 256 | static inline void __iomem * |
257 | __ioremap(unsigned long offset, unsigned long size, unsigned long flags) | 257 | __ioremap(unsigned long offset, unsigned long size, pgprot_t prot) |
258 | { | 258 | { |
259 | return __ioremap_caller(offset, size, flags, __builtin_return_address(0)); | 259 | return __ioremap_caller(offset, size, prot, __builtin_return_address(0)); |
260 | } | 260 | } |
261 | 261 | ||
262 | static inline void __iomem * | 262 | static inline void __iomem * |
263 | __ioremap_29bit(unsigned long offset, unsigned long size, unsigned long flags) | 263 | __ioremap_29bit(unsigned long offset, unsigned long size, pgprot_t prot) |
264 | { | 264 | { |
265 | #ifdef CONFIG_29BIT | 265 | #ifdef CONFIG_29BIT |
266 | unsigned long last_addr = offset + size - 1; | 266 | unsigned long last_addr = offset + size - 1; |
267 | 267 | ||
268 | /* | 268 | /* |
269 | * For P1 and P2 space this is trivial, as everything is already | 269 | * For P1 and P2 space this is trivial, as everything is already |
270 | * mapped. Uncached access for P1 addresses are done through P2. | 270 | * mapped. Uncached access for P1 addresses are done through P2. |
271 | * In the P3 case or for addresses outside of the 29-bit space, | 271 | * In the P3 case or for addresses outside of the 29-bit space, |
272 | * mapping must be done by the PMB or by using page tables. | 272 | * mapping must be done by the PMB or by using page tables. |
273 | */ | 273 | */ |
274 | if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) { | 274 | if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) { |
275 | if (unlikely(flags & _PAGE_CACHABLE)) | 275 | if (unlikely(pgprot_val(prot) & _PAGE_CACHABLE)) |
276 | return (void __iomem *)P1SEGADDR(offset); | 276 | return (void __iomem *)P1SEGADDR(offset); |
277 | 277 | ||
278 | return (void __iomem *)P2SEGADDR(offset); | 278 | return (void __iomem *)P2SEGADDR(offset); |
279 | } | 279 | } |
280 | 280 | ||
281 | /* P4 above the store queues are always mapped. */ | 281 | /* P4 above the store queues are always mapped. */ |
282 | if (unlikely(offset >= P3_ADDR_MAX)) | 282 | if (unlikely(offset >= P3_ADDR_MAX)) |
283 | return (void __iomem *)P4SEGADDR(offset); | 283 | return (void __iomem *)P4SEGADDR(offset); |
284 | #endif | 284 | #endif |
285 | 285 | ||
286 | return NULL; | 286 | return NULL; |
287 | } | 287 | } |
288 | 288 | ||
289 | static inline void __iomem * | 289 | static inline void __iomem * |
290 | __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) | 290 | __ioremap_mode(unsigned long offset, unsigned long size, pgprot_t prot) |
291 | { | 291 | { |
292 | void __iomem *ret; | 292 | void __iomem *ret; |
293 | 293 | ||
294 | ret = __ioremap_trapped(offset, size); | 294 | ret = __ioremap_trapped(offset, size); |
295 | if (ret) | 295 | if (ret) |
296 | return ret; | 296 | return ret; |
297 | 297 | ||
298 | ret = __ioremap_29bit(offset, size, flags); | 298 | ret = __ioremap_29bit(offset, size, prot); |
299 | if (ret) | 299 | if (ret) |
300 | return ret; | 300 | return ret; |
301 | 301 | ||
302 | return __ioremap(offset, size, flags); | 302 | return __ioremap(offset, size, prot); |
303 | } | 303 | } |
304 | #else | 304 | #else |
305 | #define __ioremap(offset, size, flags) ((void __iomem *)(offset)) | 305 | #define __ioremap(offset, size, prot) ((void __iomem *)(offset)) |
306 | #define __ioremap_mode(offset, size, flags) ((void __iomem *)(offset)) | 306 | #define __ioremap_mode(offset, size, prot) ((void __iomem *)(offset)) |
307 | #define __iounmap(addr) do { } while (0) | 307 | #define __iounmap(addr) do { } while (0) |
308 | #endif /* CONFIG_MMU */ | 308 | #endif /* CONFIG_MMU */ |
309 | 309 | ||
310 | #define ioremap(offset, size) \ | 310 | static inline void __iomem * |
311 | __ioremap_mode((offset), (size), 0) | 311 | ioremap(unsigned long offset, unsigned long size) |
312 | #define ioremap_nocache(offset, size) \ | 312 | { |
313 | __ioremap_mode((offset), (size), 0) | 313 | return __ioremap_mode(offset, size, PAGE_KERNEL_NOCACHE); |
314 | #define ioremap_cache(offset, size) \ | 314 | } |
315 | __ioremap_mode((offset), (size), _PAGE_CACHABLE) | 315 | |
316 | #define p3_ioremap(offset, size, flags) \ | 316 | static inline void __iomem * |
317 | __ioremap((offset), (size), (flags)) | 317 | ioremap_cache(unsigned long offset, unsigned long size) |
318 | #define ioremap_prot(offset, size, flags) \ | 318 | { |
319 | __ioremap_mode((offset), (size), (flags)) | 319 | return __ioremap_mode(offset, size, PAGE_KERNEL); |
320 | #define iounmap(addr) \ | 320 | } |
321 | __iounmap((addr)) | 321 | |
322 | static inline void __iomem * | ||
323 | ioremap_prot(resource_size_t offset, unsigned long size, unsigned long flags) | ||
324 | { | ||
325 | return __ioremap_mode(offset, size, __pgprot(flags)); | ||
326 | } | ||
327 | |||
328 | #define ioremap_nocache ioremap | ||
329 | #define p3_ioremap __ioremap | ||
330 | #define iounmap __iounmap | ||
322 | 331 | ||
323 | #define maybebadio(port) \ | 332 | #define maybebadio(port) \ |
324 | printk(KERN_ERR "bad PC-like io %s:%u for port 0x%lx at 0x%08x\n", \ | 333 | printk(KERN_ERR "bad PC-like io %s:%u for port 0x%lx at 0x%08x\n", \ |
325 | __func__, __LINE__, (port), (u32)__builtin_return_address(0)) | 334 | __func__, __LINE__, (port), (u32)__builtin_return_address(0)) |
326 | 335 | ||
327 | /* | 336 | /* |
328 | * Convert a physical pointer to a virtual kernel pointer for /dev/mem | 337 | * Convert a physical pointer to a virtual kernel pointer for /dev/mem |
329 | * access | 338 | * access |
330 | */ | 339 | */ |
331 | #define xlate_dev_mem_ptr(p) __va(p) | 340 | #define xlate_dev_mem_ptr(p) __va(p) |
332 | 341 | ||
333 | /* | 342 | /* |
334 | * Convert a virtual cached pointer to an uncached pointer | 343 | * Convert a virtual cached pointer to an uncached pointer |
335 | */ | 344 | */ |
336 | #define xlate_dev_kmem_ptr(p) p | 345 | #define xlate_dev_kmem_ptr(p) p |
337 | 346 | ||
338 | #define ARCH_HAS_VALID_PHYS_ADDR_RANGE | 347 | #define ARCH_HAS_VALID_PHYS_ADDR_RANGE |
339 | int valid_phys_addr_range(unsigned long addr, size_t size); | 348 | int valid_phys_addr_range(unsigned long addr, size_t size); |
340 | int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); | 349 | int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); |
341 | 350 | ||
342 | #endif /* __KERNEL__ */ | 351 | #endif /* __KERNEL__ */ |
343 | 352 | ||
344 | #endif /* __ASM_SH_IO_H */ | 353 | #endif /* __ASM_SH_IO_H */ |
345 | 354 |
arch/sh/mm/ioremap.c
1 | /* | 1 | /* |
2 | * arch/sh/mm/ioremap.c | 2 | * arch/sh/mm/ioremap.c |
3 | * | 3 | * |
4 | * (C) Copyright 1995 1996 Linus Torvalds | 4 | * (C) Copyright 1995 1996 Linus Torvalds |
5 | * (C) Copyright 2005 - 2010 Paul Mundt | 5 | * (C) Copyright 2005 - 2010 Paul Mundt |
6 | * | 6 | * |
7 | * Re-map IO memory to kernel address space so that we can access it. | 7 | * Re-map IO memory to kernel address space so that we can access it. |
8 | * This is needed for high PCI addresses that aren't mapped in the | 8 | * This is needed for high PCI addresses that aren't mapped in the |
9 | * 640k-1MB IO memory area on PC's | 9 | * 640k-1MB IO memory area on PC's |
10 | * | 10 | * |
11 | * This file is subject to the terms and conditions of the GNU General | 11 | * This file is subject to the terms and conditions of the GNU General |
12 | * Public License. See the file "COPYING" in the main directory of this | 12 | * Public License. See the file "COPYING" in the main directory of this |
13 | * archive for more details. | 13 | * archive for more details. |
14 | */ | 14 | */ |
15 | #include <linux/vmalloc.h> | 15 | #include <linux/vmalloc.h> |
16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
17 | #include <linux/mm.h> | 17 | #include <linux/mm.h> |
18 | #include <linux/pci.h> | 18 | #include <linux/pci.h> |
19 | #include <linux/io.h> | 19 | #include <linux/io.h> |
20 | #include <asm/page.h> | 20 | #include <asm/page.h> |
21 | #include <asm/pgalloc.h> | 21 | #include <asm/pgalloc.h> |
22 | #include <asm/addrspace.h> | 22 | #include <asm/addrspace.h> |
23 | #include <asm/cacheflush.h> | 23 | #include <asm/cacheflush.h> |
24 | #include <asm/tlbflush.h> | 24 | #include <asm/tlbflush.h> |
25 | #include <asm/mmu.h> | 25 | #include <asm/mmu.h> |
26 | 26 | ||
27 | /* | 27 | /* |
28 | * Remap an arbitrary physical address space into the kernel virtual | 28 | * Remap an arbitrary physical address space into the kernel virtual |
29 | * address space. Needed when the kernel wants to access high addresses | 29 | * address space. Needed when the kernel wants to access high addresses |
30 | * directly. | 30 | * directly. |
31 | * | 31 | * |
32 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously | 32 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously |
33 | * have to convert them into an offset in a page-aligned mapping, but the | 33 | * have to convert them into an offset in a page-aligned mapping, but the |
34 | * caller shouldn't need to know that small detail. | 34 | * caller shouldn't need to know that small detail. |
35 | */ | 35 | */ |
36 | void __iomem * __init_refok | 36 | void __iomem * __init_refok |
37 | __ioremap_caller(unsigned long phys_addr, unsigned long size, | 37 | __ioremap_caller(unsigned long phys_addr, unsigned long size, |
38 | unsigned long flags, void *caller) | 38 | pgprot_t pgprot, void *caller) |
39 | { | 39 | { |
40 | struct vm_struct *area; | 40 | struct vm_struct *area; |
41 | unsigned long offset, last_addr, addr, orig_addr; | 41 | unsigned long offset, last_addr, addr, orig_addr; |
42 | pgprot_t pgprot; | ||
43 | 42 | ||
44 | /* Don't allow wraparound or zero size */ | 43 | /* Don't allow wraparound or zero size */ |
45 | last_addr = phys_addr + size - 1; | 44 | last_addr = phys_addr + size - 1; |
46 | if (!size || last_addr < phys_addr) | 45 | if (!size || last_addr < phys_addr) |
47 | return NULL; | 46 | return NULL; |
48 | 47 | ||
49 | /* | 48 | /* |
50 | * If we're in the fixed PCI memory range, mapping through page | 49 | * If we're in the fixed PCI memory range, mapping through page |
51 | * tables is not only pointless, but also fundamentally broken. | 50 | * tables is not only pointless, but also fundamentally broken. |
52 | * Just return the physical address instead. | 51 | * Just return the physical address instead. |
53 | * | 52 | * |
54 | * For boards that map a small PCI memory aperture somewhere in | 53 | * For boards that map a small PCI memory aperture somewhere in |
55 | * P1/P2 space, ioremap() will already do the right thing, | 54 | * P1/P2 space, ioremap() will already do the right thing, |
56 | * and we'll never get this far. | 55 | * and we'll never get this far. |
57 | */ | 56 | */ |
58 | if (is_pci_memory_fixed_range(phys_addr, size)) | 57 | if (is_pci_memory_fixed_range(phys_addr, size)) |
59 | return (void __iomem *)phys_addr; | 58 | return (void __iomem *)phys_addr; |
60 | 59 | ||
61 | /* | 60 | /* |
62 | * Mappings have to be page-aligned | 61 | * Mappings have to be page-aligned |
63 | */ | 62 | */ |
64 | offset = phys_addr & ~PAGE_MASK; | 63 | offset = phys_addr & ~PAGE_MASK; |
65 | phys_addr &= PAGE_MASK; | 64 | phys_addr &= PAGE_MASK; |
66 | size = PAGE_ALIGN(last_addr+1) - phys_addr; | 65 | size = PAGE_ALIGN(last_addr+1) - phys_addr; |
67 | 66 | ||
68 | /* | 67 | /* |
69 | * If we can't yet use the regular approach, go the fixmap route. | 68 | * If we can't yet use the regular approach, go the fixmap route. |
70 | */ | 69 | */ |
71 | if (!mem_init_done) | 70 | if (!mem_init_done) |
72 | return ioremap_fixed(phys_addr, size, __pgprot(flags)); | 71 | return ioremap_fixed(phys_addr, size, pgprot); |
73 | 72 | ||
74 | /* | 73 | /* |
75 | * Ok, go for it.. | 74 | * Ok, go for it.. |
76 | */ | 75 | */ |
77 | area = get_vm_area_caller(size, VM_IOREMAP, caller); | 76 | area = get_vm_area_caller(size, VM_IOREMAP, caller); |
78 | if (!area) | 77 | if (!area) |
79 | return NULL; | 78 | return NULL; |
80 | area->phys_addr = phys_addr; | 79 | area->phys_addr = phys_addr; |
81 | orig_addr = addr = (unsigned long)area->addr; | 80 | orig_addr = addr = (unsigned long)area->addr; |
82 | 81 | ||
83 | #ifdef CONFIG_PMB | 82 | #ifdef CONFIG_PMB |
84 | /* | 83 | /* |
85 | * First try to remap through the PMB once a valid VMA has been | 84 | * First try to remap through the PMB once a valid VMA has been |
86 | * established. Smaller allocations (or the rest of the size | 85 | * established. Smaller allocations (or the rest of the size |
87 | * remaining after a PMB mapping due to the size not being | 86 | * remaining after a PMB mapping due to the size not being |
88 | * perfectly aligned on a PMB size boundary) are then mapped | 87 | * perfectly aligned on a PMB size boundary) are then mapped |
89 | * through the UTLB using conventional page tables. | 88 | * through the UTLB using conventional page tables. |
90 | * | 89 | * |
91 | * PMB entries are all pre-faulted. | 90 | * PMB entries are all pre-faulted. |
92 | */ | 91 | */ |
93 | if (unlikely(phys_addr >= P1SEG)) { | 92 | if (unlikely(phys_addr >= P1SEG)) { |
94 | unsigned long mapped = pmb_remap(addr, phys_addr, size, flags); | 93 | unsigned long mapped; |
95 | 94 | ||
95 | mapped = pmb_remap(addr, phys_addr, size, pgprot_val(pgprot)); | ||
96 | if (likely(mapped)) { | 96 | if (likely(mapped)) { |
97 | addr += mapped; | 97 | addr += mapped; |
98 | phys_addr += mapped; | 98 | phys_addr += mapped; |
99 | size -= mapped; | 99 | size -= mapped; |
100 | } | 100 | } |
101 | } | 101 | } |
102 | #endif | 102 | #endif |
103 | 103 | ||
104 | pgprot = __pgprot(pgprot_val(PAGE_KERNEL_NOCACHE) | flags); | ||
105 | if (likely(size)) | 104 | if (likely(size)) |
106 | if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) { | 105 | if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) { |
107 | vunmap((void *)orig_addr); | 106 | vunmap((void *)orig_addr); |
108 | return NULL; | 107 | return NULL; |
109 | } | 108 | } |
110 | 109 | ||
111 | return (void __iomem *)(offset + (char *)orig_addr); | 110 | return (void __iomem *)(offset + (char *)orig_addr); |
112 | } | 111 | } |
113 | EXPORT_SYMBOL(__ioremap_caller); | 112 | EXPORT_SYMBOL(__ioremap_caller); |
114 | 113 | ||
115 | /* | 114 | /* |
116 | * Simple checks for non-translatable mappings. | 115 | * Simple checks for non-translatable mappings. |
117 | */ | 116 | */ |
118 | static inline int iomapping_nontranslatable(unsigned long offset) | 117 | static inline int iomapping_nontranslatable(unsigned long offset) |
119 | { | 118 | { |
120 | #ifdef CONFIG_29BIT | 119 | #ifdef CONFIG_29BIT |
121 | /* | 120 | /* |
122 | * In 29-bit mode this includes the fixed P1/P2 areas, as well as | 121 | * In 29-bit mode this includes the fixed P1/P2 areas, as well as |
123 | * parts of P3. | 122 | * parts of P3. |
124 | */ | 123 | */ |
125 | if (PXSEG(offset) < P3SEG || offset >= P3_ADDR_MAX) | 124 | if (PXSEG(offset) < P3SEG || offset >= P3_ADDR_MAX) |
126 | return 1; | 125 | return 1; |
127 | #endif | 126 | #endif |
128 | 127 | ||
129 | if (is_pci_memory_fixed_range(offset, 0)) | 128 | if (is_pci_memory_fixed_range(offset, 0)) |
130 | return 1; | 129 | return 1; |
131 | 130 | ||
132 | return 0; | 131 | return 0; |
133 | } | 132 | } |
134 | 133 | ||
135 | void __iounmap(void __iomem *addr) | 134 | void __iounmap(void __iomem *addr) |
136 | { | 135 | { |
137 | unsigned long vaddr = (unsigned long __force)addr; | 136 | unsigned long vaddr = (unsigned long __force)addr; |
138 | struct vm_struct *p; | 137 | struct vm_struct *p; |
139 | 138 | ||
140 | /* | 139 | /* |
141 | * Nothing to do if there is no translatable mapping. | 140 | * Nothing to do if there is no translatable mapping. |
142 | */ | 141 | */ |
143 | if (iomapping_nontranslatable(vaddr)) | 142 | if (iomapping_nontranslatable(vaddr)) |
144 | return; | 143 | return; |
145 | 144 | ||
146 | /* | 145 | /* |
147 | * There's no VMA if it's from an early fixed mapping. | 146 | * There's no VMA if it's from an early fixed mapping. |
148 | */ | 147 | */ |
149 | if (iounmap_fixed(addr) == 0) | 148 | if (iounmap_fixed(addr) == 0) |
150 | return; | 149 | return; |
151 | 150 | ||
152 | #ifdef CONFIG_PMB | 151 | #ifdef CONFIG_PMB |
153 | /* | 152 | /* |
154 | * Purge any PMB entries that may have been established for this | 153 | * Purge any PMB entries that may have been established for this |
155 | * mapping, then proceed with conventional VMA teardown. | 154 | * mapping, then proceed with conventional VMA teardown. |
156 | * | 155 | * |
157 | * XXX: Note that due to the way that remove_vm_area() does | 156 | * XXX: Note that due to the way that remove_vm_area() does |
158 | * matching of the resultant VMA, we aren't able to fast-forward | 157 | * matching of the resultant VMA, we aren't able to fast-forward |
159 | * the address past the PMB space until the end of the VMA where | 158 | * the address past the PMB space until the end of the VMA where |
160 | * the page tables reside. As such, unmap_vm_area() will be | 159 | * the page tables reside. As such, unmap_vm_area() will be |
161 | * forced to linearly scan over the area until it finds the page | 160 | * forced to linearly scan over the area until it finds the page |
162 | * tables where PTEs that need to be unmapped actually reside, | 161 | * tables where PTEs that need to be unmapped actually reside, |
163 | * which is far from optimal. Perhaps we need to use a separate | 162 | * which is far from optimal. Perhaps we need to use a separate |
164 | * VMA for the PMB mappings? | 163 | * VMA for the PMB mappings? |
165 | * -- PFM. | 164 | * -- PFM. |
166 | */ | 165 | */ |
167 | pmb_unmap(vaddr); | 166 | pmb_unmap(vaddr); |
168 | #endif | 167 | #endif |
169 | 168 | ||
170 | p = remove_vm_area((void *)(vaddr & PAGE_MASK)); | 169 | p = remove_vm_area((void *)(vaddr & PAGE_MASK)); |
171 | if (!p) { | 170 | if (!p) { |
172 | printk(KERN_ERR "%s: bad address %p\n", __func__, addr); | 171 | printk(KERN_ERR "%s: bad address %p\n", __func__, addr); |
173 | return; | 172 | return; |
174 | } | 173 | } |
175 | 174 | ||
176 | kfree(p); | 175 | kfree(p); |
177 | } | 176 | } |
178 | EXPORT_SYMBOL(__iounmap); | 177 | EXPORT_SYMBOL(__iounmap); |