Commit 5d01e6ce785884a5db5792cd2e5bb36fa82fe23c
Committed by
Linus Torvalds
1 parent
dcf1310b72
Exists in
master
and in
7 other branches
[PATCH] CRIS update: updates for 2.6.12
Patches to make CRIS work with 2.6.12. Signed-off-by: Mikael Starvik <starvik@axis.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Showing 15 changed files with 88 additions and 119 deletions Inline Diff
- arch/cris/arch-v10/kernel/ptrace.c
- arch/cris/kernel/module.c
- arch/cris/kernel/process.c
- include/asm-cris/arch-v10/bitops.h
- include/asm-cris/arch-v10/offset.h
- include/asm-cris/bitops.h
- include/asm-cris/kmap_types.h
- include/asm-cris/page.h
- include/asm-cris/pgalloc.h
- include/asm-cris/pgtable.h
- include/asm-cris/processor.h
- include/asm-cris/thread_info.h
- include/asm-cris/timex.h
- include/asm-cris/types.h
- include/asm-cris/unistd.h
arch/cris/arch-v10/kernel/ptrace.c
1 | /* | 1 | /* |
2 | * Copyright (C) 2000-2003, Axis Communications AB. | 2 | * Copyright (C) 2000-2003, Axis Communications AB. |
3 | */ | 3 | */ |
4 | 4 | ||
5 | #include <linux/kernel.h> | 5 | #include <linux/kernel.h> |
6 | #include <linux/sched.h> | 6 | #include <linux/sched.h> |
7 | #include <linux/mm.h> | 7 | #include <linux/mm.h> |
8 | #include <linux/smp.h> | 8 | #include <linux/smp.h> |
9 | #include <linux/smp_lock.h> | 9 | #include <linux/smp_lock.h> |
10 | #include <linux/errno.h> | 10 | #include <linux/errno.h> |
11 | #include <linux/ptrace.h> | 11 | #include <linux/ptrace.h> |
12 | #include <linux/user.h> | 12 | #include <linux/user.h> |
13 | #include <linux/signal.h> | 13 | #include <linux/signal.h> |
14 | #include <linux/security.h> | ||
14 | 15 | ||
15 | #include <asm/uaccess.h> | 16 | #include <asm/uaccess.h> |
16 | #include <asm/page.h> | 17 | #include <asm/page.h> |
17 | #include <asm/pgtable.h> | 18 | #include <asm/pgtable.h> |
18 | #include <asm/system.h> | 19 | #include <asm/system.h> |
19 | #include <asm/processor.h> | 20 | #include <asm/processor.h> |
20 | 21 | ||
21 | /* | 22 | /* |
22 | * Determines which bits in DCCR the user has access to. | 23 | * Determines which bits in DCCR the user has access to. |
23 | * 1 = access, 0 = no access. | 24 | * 1 = access, 0 = no access. |
24 | */ | 25 | */ |
25 | #define DCCR_MASK 0x0000001f /* XNZVC */ | 26 | #define DCCR_MASK 0x0000001f /* XNZVC */ |
26 | 27 | ||
27 | /* | 28 | /* |
28 | * Get contents of register REGNO in task TASK. | 29 | * Get contents of register REGNO in task TASK. |
29 | */ | 30 | */ |
30 | inline long get_reg(struct task_struct *task, unsigned int regno) | 31 | inline long get_reg(struct task_struct *task, unsigned int regno) |
31 | { | 32 | { |
32 | /* USP is a special case, it's not in the pt_regs struct but | 33 | /* USP is a special case, it's not in the pt_regs struct but |
33 | * in the tasks thread struct | 34 | * in the tasks thread struct |
34 | */ | 35 | */ |
35 | 36 | ||
36 | if (regno == PT_USP) | 37 | if (regno == PT_USP) |
37 | return task->thread.usp; | 38 | return task->thread.usp; |
38 | else if (regno < PT_MAX) | 39 | else if (regno < PT_MAX) |
39 | return ((unsigned long *)user_regs(task->thread_info))[regno]; | 40 | return ((unsigned long *)user_regs(task->thread_info))[regno]; |
40 | else | 41 | else |
41 | return 0; | 42 | return 0; |
42 | } | 43 | } |
43 | 44 | ||
44 | /* | 45 | /* |
45 | * Write contents of register REGNO in task TASK. | 46 | * Write contents of register REGNO in task TASK. |
46 | */ | 47 | */ |
47 | inline int put_reg(struct task_struct *task, unsigned int regno, | 48 | inline int put_reg(struct task_struct *task, unsigned int regno, |
48 | unsigned long data) | 49 | unsigned long data) |
49 | { | 50 | { |
50 | if (regno == PT_USP) | 51 | if (regno == PT_USP) |
51 | task->thread.usp = data; | 52 | task->thread.usp = data; |
52 | else if (regno < PT_MAX) | 53 | else if (regno < PT_MAX) |
53 | ((unsigned long *)user_regs(task->thread_info))[regno] = data; | 54 | ((unsigned long *)user_regs(task->thread_info))[regno] = data; |
54 | else | 55 | else |
55 | return -1; | 56 | return -1; |
56 | return 0; | 57 | return 0; |
57 | } | 58 | } |
58 | 59 | ||
59 | /* | 60 | /* |
60 | * Called by kernel/ptrace.c when detaching. | 61 | * Called by kernel/ptrace.c when detaching. |
61 | * | 62 | * |
62 | * Make sure the single step bit is not set. | 63 | * Make sure the single step bit is not set. |
63 | */ | 64 | */ |
64 | void | 65 | void |
65 | ptrace_disable(struct task_struct *child) | 66 | ptrace_disable(struct task_struct *child) |
66 | { | 67 | { |
67 | /* Todo - pending singlesteps? */ | 68 | /* Todo - pending singlesteps? */ |
68 | } | 69 | } |
69 | 70 | ||
70 | /* | 71 | /* |
71 | * Note that this implementation of ptrace behaves differently from vanilla | 72 | * Note that this implementation of ptrace behaves differently from vanilla |
72 | * ptrace. Contrary to what the man page says, in the PTRACE_PEEKTEXT, | 73 | * ptrace. Contrary to what the man page says, in the PTRACE_PEEKTEXT, |
73 | * PTRACE_PEEKDATA, and PTRACE_PEEKUSER requests the data variable is not | 74 | * PTRACE_PEEKDATA, and PTRACE_PEEKUSER requests the data variable is not |
74 | * ignored. Instead, the data variable is expected to point at a location | 75 | * ignored. Instead, the data variable is expected to point at a location |
75 | * (in user space) where the result of the ptrace call is written (instead of | 76 | * (in user space) where the result of the ptrace call is written (instead of |
76 | * being returned). | 77 | * being returned). |
77 | */ | 78 | */ |
78 | asmlinkage int | 79 | asmlinkage int |
79 | sys_ptrace(long request, long pid, long addr, long data) | 80 | sys_ptrace(long request, long pid, long addr, long data) |
80 | { | 81 | { |
81 | struct task_struct *child; | 82 | struct task_struct *child; |
82 | int ret; | 83 | int ret; |
83 | unsigned long __user *datap = (unsigned long __user *)data; | 84 | unsigned long __user *datap = (unsigned long __user *)data; |
84 | 85 | ||
85 | lock_kernel(); | 86 | lock_kernel(); |
86 | ret = -EPERM; | 87 | ret = -EPERM; |
87 | 88 | ||
88 | if (request == PTRACE_TRACEME) { | 89 | if (request == PTRACE_TRACEME) { |
90 | /* are we already being traced? */ | ||
89 | if (current->ptrace & PT_PTRACED) | 91 | if (current->ptrace & PT_PTRACED) |
90 | goto out; | 92 | goto out; |
91 | 93 | ret = security_ptrace(current->parent, current); | |
94 | if (ret) | ||
95 | goto out; | ||
96 | /* set the ptrace bit in the process flags. */ | ||
92 | current->ptrace |= PT_PTRACED; | 97 | current->ptrace |= PT_PTRACED; |
93 | ret = 0; | 98 | ret = 0; |
94 | goto out; | 99 | goto out; |
95 | } | 100 | } |
96 | 101 | ||
97 | ret = -ESRCH; | 102 | ret = -ESRCH; |
98 | read_lock(&tasklist_lock); | 103 | read_lock(&tasklist_lock); |
99 | child = find_task_by_pid(pid); | 104 | child = find_task_by_pid(pid); |
100 | 105 | ||
101 | if (child) | 106 | if (child) |
102 | get_task_struct(child); | 107 | get_task_struct(child); |
103 | 108 | ||
104 | read_unlock(&tasklist_lock); | 109 | read_unlock(&tasklist_lock); |
105 | 110 | ||
106 | if (!child) | 111 | if (!child) |
107 | goto out; | 112 | goto out; |
108 | 113 | ||
109 | ret = -EPERM; | 114 | ret = -EPERM; |
110 | 115 | ||
111 | if (pid == 1) /* Leave the init process alone! */ | 116 | if (pid == 1) /* Leave the init process alone! */ |
112 | goto out_tsk; | 117 | goto out_tsk; |
113 | 118 | ||
114 | if (request == PTRACE_ATTACH) { | 119 | if (request == PTRACE_ATTACH) { |
115 | ret = ptrace_attach(child); | 120 | ret = ptrace_attach(child); |
116 | goto out_tsk; | 121 | goto out_tsk; |
117 | } | 122 | } |
118 | 123 | ||
119 | ret = ptrace_check_attach(child, request == PTRACE_KILL); | 124 | ret = ptrace_check_attach(child, request == PTRACE_KILL); |
120 | if (ret < 0) | 125 | if (ret < 0) |
121 | goto out_tsk; | 126 | goto out_tsk; |
122 | 127 | ||
123 | switch (request) { | 128 | switch (request) { |
124 | /* Read word at location address. */ | 129 | /* Read word at location address. */ |
125 | case PTRACE_PEEKTEXT: | 130 | case PTRACE_PEEKTEXT: |
126 | case PTRACE_PEEKDATA: { | 131 | case PTRACE_PEEKDATA: { |
127 | unsigned long tmp; | 132 | unsigned long tmp; |
128 | int copied; | 133 | int copied; |
129 | 134 | ||
130 | copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); | 135 | copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); |
131 | ret = -EIO; | 136 | ret = -EIO; |
132 | 137 | ||
133 | if (copied != sizeof(tmp)) | 138 | if (copied != sizeof(tmp)) |
134 | break; | 139 | break; |
135 | 140 | ||
136 | ret = put_user(tmp,datap); | 141 | ret = put_user(tmp,datap); |
137 | break; | 142 | break; |
138 | } | 143 | } |
139 | 144 | ||
140 | /* Read the word at location address in the USER area. */ | 145 | /* Read the word at location address in the USER area. */ |
141 | case PTRACE_PEEKUSR: { | 146 | case PTRACE_PEEKUSR: { |
142 | unsigned long tmp; | 147 | unsigned long tmp; |
143 | 148 | ||
144 | ret = -EIO; | 149 | ret = -EIO; |
145 | if ((addr & 3) || addr < 0 || addr > PT_MAX << 2) | 150 | if ((addr & 3) || addr < 0 || addr > PT_MAX << 2) |
146 | break; | 151 | break; |
147 | 152 | ||
148 | tmp = get_reg(child, addr >> 2); | 153 | tmp = get_reg(child, addr >> 2); |
149 | ret = put_user(tmp, datap); | 154 | ret = put_user(tmp, datap); |
150 | break; | 155 | break; |
151 | } | 156 | } |
152 | 157 | ||
153 | /* Write the word at location address. */ | 158 | /* Write the word at location address. */ |
154 | case PTRACE_POKETEXT: | 159 | case PTRACE_POKETEXT: |
155 | case PTRACE_POKEDATA: | 160 | case PTRACE_POKEDATA: |
156 | ret = 0; | 161 | ret = 0; |
157 | 162 | ||
158 | if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data)) | 163 | if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data)) |
159 | break; | 164 | break; |
160 | 165 | ||
161 | ret = -EIO; | 166 | ret = -EIO; |
162 | break; | 167 | break; |
163 | 168 | ||
164 | /* Write the word at location address in the USER area. */ | 169 | /* Write the word at location address in the USER area. */ |
165 | case PTRACE_POKEUSR: | 170 | case PTRACE_POKEUSR: |
166 | ret = -EIO; | 171 | ret = -EIO; |
167 | if ((addr & 3) || addr < 0 || addr > PT_MAX << 2) | 172 | if ((addr & 3) || addr < 0 || addr > PT_MAX << 2) |
168 | break; | 173 | break; |
169 | 174 | ||
170 | addr >>= 2; | 175 | addr >>= 2; |
171 | 176 | ||
172 | if (addr == PT_DCCR) { | 177 | if (addr == PT_DCCR) { |
173 | /* don't allow the tracing process to change stuff like | 178 | /* don't allow the tracing process to change stuff like |
174 | * interrupt enable, kernel/user bit, dma enables etc. | 179 | * interrupt enable, kernel/user bit, dma enables etc. |
175 | */ | 180 | */ |
176 | data &= DCCR_MASK; | 181 | data &= DCCR_MASK; |
177 | data |= get_reg(child, PT_DCCR) & ~DCCR_MASK; | 182 | data |= get_reg(child, PT_DCCR) & ~DCCR_MASK; |
178 | } | 183 | } |
179 | if (put_reg(child, addr, data)) | 184 | if (put_reg(child, addr, data)) |
180 | break; | 185 | break; |
181 | ret = 0; | 186 | ret = 0; |
182 | break; | 187 | break; |
183 | 188 | ||
184 | case PTRACE_SYSCALL: | 189 | case PTRACE_SYSCALL: |
185 | case PTRACE_CONT: | 190 | case PTRACE_CONT: |
186 | ret = -EIO; | 191 | ret = -EIO; |
187 | 192 | ||
188 | if (!valid_signal(data)) | 193 | if (!valid_signal(data)) |
189 | break; | 194 | break; |
190 | 195 | ||
191 | if (request == PTRACE_SYSCALL) { | 196 | if (request == PTRACE_SYSCALL) { |
192 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 197 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
193 | } | 198 | } |
194 | else { | 199 | else { |
195 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 200 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
196 | } | 201 | } |
197 | 202 | ||
198 | child->exit_code = data; | 203 | child->exit_code = data; |
199 | 204 | ||
200 | /* TODO: make sure any pending breakpoint is killed */ | 205 | /* TODO: make sure any pending breakpoint is killed */ |
201 | wake_up_process(child); | 206 | wake_up_process(child); |
202 | ret = 0; | 207 | ret = 0; |
203 | 208 | ||
204 | break; | 209 | break; |
205 | 210 | ||
206 | /* Make the child exit by sending it a sigkill. */ | 211 | /* Make the child exit by sending it a sigkill. */ |
207 | case PTRACE_KILL: | 212 | case PTRACE_KILL: |
208 | ret = 0; | 213 | ret = 0; |
209 | 214 | ||
210 | if (child->state == TASK_ZOMBIE) | 215 | if (child->exit_state == EXIT_ZOMBIE) |
211 | break; | 216 | break; |
212 | 217 | ||
213 | child->exit_code = SIGKILL; | 218 | child->exit_code = SIGKILL; |
214 | 219 | ||
215 | /* TODO: make sure any pending breakpoint is killed */ | 220 | /* TODO: make sure any pending breakpoint is killed */ |
216 | wake_up_process(child); | 221 | wake_up_process(child); |
217 | break; | 222 | break; |
218 | 223 | ||
219 | /* Set the trap flag. */ | 224 | /* Set the trap flag. */ |
220 | case PTRACE_SINGLESTEP: | 225 | case PTRACE_SINGLESTEP: |
221 | ret = -EIO; | 226 | ret = -EIO; |
222 | 227 | ||
223 | if (!valid_signal(data)) | 228 | if (!valid_signal(data)) |
224 | break; | 229 | break; |
225 | 230 | ||
226 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 231 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
227 | 232 | ||
228 | /* TODO: set some clever breakpoint mechanism... */ | 233 | /* TODO: set some clever breakpoint mechanism... */ |
229 | 234 | ||
230 | child->exit_code = data; | 235 | child->exit_code = data; |
231 | wake_up_process(child); | 236 | wake_up_process(child); |
232 | ret = 0; | 237 | ret = 0; |
233 | break; | 238 | break; |
234 | 239 | ||
235 | case PTRACE_DETACH: | 240 | case PTRACE_DETACH: |
236 | ret = ptrace_detach(child, data); | 241 | ret = ptrace_detach(child, data); |
237 | break; | 242 | break; |
238 | 243 | ||
239 | /* Get all GP registers from the child. */ | 244 | /* Get all GP registers from the child. */ |
240 | case PTRACE_GETREGS: { | 245 | case PTRACE_GETREGS: { |
241 | int i; | 246 | int i; |
242 | unsigned long tmp; | 247 | unsigned long tmp; |
243 | 248 | ||
244 | for (i = 0; i <= PT_MAX; i++) { | 249 | for (i = 0; i <= PT_MAX; i++) { |
245 | tmp = get_reg(child, i); | 250 | tmp = get_reg(child, i); |
246 | 251 | ||
247 | if (put_user(tmp, datap)) { | 252 | if (put_user(tmp, datap)) { |
248 | ret = -EFAULT; | 253 | ret = -EFAULT; |
249 | goto out_tsk; | 254 | goto out_tsk; |
250 | } | 255 | } |
251 | 256 | ||
252 | data += sizeof(long); | 257 | data += sizeof(long); |
253 | } | 258 | } |
254 | 259 | ||
255 | ret = 0; | 260 | ret = 0; |
256 | break; | 261 | break; |
257 | } | 262 | } |
258 | 263 | ||
259 | /* Set all GP registers in the child. */ | 264 | /* Set all GP registers in the child. */ |
260 | case PTRACE_SETREGS: { | 265 | case PTRACE_SETREGS: { |
261 | int i; | 266 | int i; |
262 | unsigned long tmp; | 267 | unsigned long tmp; |
263 | 268 | ||
264 | for (i = 0; i <= PT_MAX; i++) { | 269 | for (i = 0; i <= PT_MAX; i++) { |
265 | if (get_user(tmp, datap)) { | 270 | if (get_user(tmp, datap)) { |
266 | ret = -EFAULT; | 271 | ret = -EFAULT; |
267 | goto out_tsk; | 272 | goto out_tsk; |
268 | } | 273 | } |
269 | 274 | ||
270 | if (i == PT_DCCR) { | 275 | if (i == PT_DCCR) { |
271 | tmp &= DCCR_MASK; | 276 | tmp &= DCCR_MASK; |
272 | tmp |= get_reg(child, PT_DCCR) & ~DCCR_MASK; | 277 | tmp |= get_reg(child, PT_DCCR) & ~DCCR_MASK; |
273 | } | 278 | } |
274 | 279 | ||
275 | put_reg(child, i, tmp); | 280 | put_reg(child, i, tmp); |
276 | data += sizeof(long); | 281 | data += sizeof(long); |
277 | } | 282 | } |
278 | 283 | ||
279 | ret = 0; | 284 | ret = 0; |
280 | break; | 285 | break; |
281 | } | 286 | } |
282 | 287 | ||
283 | default: | 288 | default: |
284 | ret = ptrace_request(child, request, addr, data); | 289 | ret = ptrace_request(child, request, addr, data); |
285 | break; | 290 | break; |
286 | } | 291 | } |
287 | out_tsk: | 292 | out_tsk: |
288 | put_task_struct(child); | 293 | put_task_struct(child); |
289 | out: | 294 | out: |
290 | unlock_kernel(); | 295 | unlock_kernel(); |
291 | return ret; | 296 | return ret; |
292 | } | 297 | } |
293 | 298 | ||
294 | void do_syscall_trace(void) | 299 | void do_syscall_trace(void) |
295 | { | 300 | { |
296 | if (!test_thread_flag(TIF_SYSCALL_TRACE)) | 301 | if (!test_thread_flag(TIF_SYSCALL_TRACE)) |
297 | return; | 302 | return; |
298 | 303 | ||
299 | if (!(current->ptrace & PT_PTRACED)) | 304 | if (!(current->ptrace & PT_PTRACED)) |
300 | return; | 305 | return; |
301 | 306 | ||
302 | /* the 0x80 provides a way for the tracing parent to distinguish | 307 | /* the 0x80 provides a way for the tracing parent to distinguish |
303 | between a syscall stop and SIGTRAP delivery */ | 308 | between a syscall stop and SIGTRAP delivery */ |
304 | ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) | 309 | ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) |
305 | ? 0x80 : 0)); | 310 | ? 0x80 : 0)); |
306 | 311 | ||
307 | /* | 312 | /* |
308 | * This isn't the same as continuing with a signal, but it will do for | 313 | * This isn't the same as continuing with a signal, but it will do for |
309 | * normal use. | 314 | * normal use. |
310 | */ | 315 | */ |
311 | if (current->exit_code) { | 316 | if (current->exit_code) { |
312 | send_sig(current->exit_code, current, 1); | 317 | send_sig(current->exit_code, current, 1); |
313 | current->exit_code = 0; | 318 | current->exit_code = 0; |
314 | } | 319 | } |
315 | } | 320 | } |
316 | 321 |
arch/cris/kernel/module.c
1 | /* Kernel module help for i386. | 1 | /* Kernel module help for i386. |
2 | Copyright (C) 2001 Rusty Russell. | 2 | Copyright (C) 2001 Rusty Russell. |
3 | 3 | ||
4 | This program is free software; you can redistribute it and/or modify | 4 | This program is free software; you can redistribute it and/or modify |
5 | it under the terms of the GNU General Public License as published by | 5 | it under the terms of the GNU General Public License as published by |
6 | the Free Software Foundation; either version 2 of the License, or | 6 | the Free Software Foundation; either version 2 of the License, or |
7 | (at your option) any later version. | 7 | (at your option) any later version. |
8 | 8 | ||
9 | This program is distributed in the hope that it will be useful, | 9 | This program is distributed in the hope that it will be useful, |
10 | but WITHOUT ANY WARRANTY; without even the implied warranty of | 10 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 11 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
12 | GNU General Public License for more details. | 12 | GNU General Public License for more details. |
13 | 13 | ||
14 | You should have received a copy of the GNU General Public License | 14 | You should have received a copy of the GNU General Public License |
15 | along with this program; if not, write to the Free Software | 15 | along with this program; if not, write to the Free Software |
16 | Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 16 | Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
17 | */ | 17 | */ |
18 | #include <linux/moduleloader.h> | 18 | #include <linux/moduleloader.h> |
19 | #include <linux/elf.h> | 19 | #include <linux/elf.h> |
20 | #include <linux/vmalloc.h> | 20 | #include <linux/vmalloc.h> |
21 | #include <linux/fs.h> | 21 | #include <linux/fs.h> |
22 | #include <linux/string.h> | 22 | #include <linux/string.h> |
23 | #include <linux/kernel.h> | 23 | #include <linux/kernel.h> |
24 | 24 | ||
25 | #if 0 | 25 | #if 0 |
26 | #define DEBUGP printk | 26 | #define DEBUGP printk |
27 | #else | 27 | #else |
28 | #define DEBUGP(fmt , ...) | 28 | #define DEBUGP(fmt , ...) |
29 | #endif | 29 | #endif |
30 | 30 | ||
31 | void *module_alloc(unsigned long size) | 31 | void *module_alloc(unsigned long size) |
32 | { | 32 | { |
33 | if (size == 0) | 33 | if (size == 0) |
34 | return NULL; | 34 | return NULL; |
35 | return vmalloc(size); | 35 | return vmalloc_exec(size); |
36 | } | 36 | } |
37 | 37 | ||
38 | 38 | ||
39 | /* Free memory returned from module_alloc */ | 39 | /* Free memory returned from module_alloc */ |
40 | void module_free(struct module *mod, void *module_region) | 40 | void module_free(struct module *mod, void *module_region) |
41 | { | 41 | { |
42 | vfree(module_region); | 42 | vfree(module_region); |
43 | /* FIXME: If module_region == mod->init_region, trim exception | 43 | /* FIXME: If module_region == mod->init_region, trim exception |
44 | table entries. */ | 44 | table entries. */ |
45 | } | 45 | } |
46 | 46 | ||
47 | /* We don't need anything special. */ | 47 | /* We don't need anything special. */ |
48 | int module_frob_arch_sections(Elf_Ehdr *hdr, | 48 | int module_frob_arch_sections(Elf_Ehdr *hdr, |
49 | Elf_Shdr *sechdrs, | 49 | Elf_Shdr *sechdrs, |
50 | char *secstrings, | 50 | char *secstrings, |
51 | struct module *mod) | 51 | struct module *mod) |
52 | { | 52 | { |
53 | return 0; | 53 | return 0; |
54 | } | 54 | } |
55 | 55 | ||
56 | int apply_relocate(Elf32_Shdr *sechdrs, | 56 | int apply_relocate(Elf32_Shdr *sechdrs, |
57 | const char *strtab, | 57 | const char *strtab, |
58 | unsigned int symindex, | 58 | unsigned int symindex, |
59 | unsigned int relsec, | 59 | unsigned int relsec, |
60 | struct module *me) | 60 | struct module *me) |
61 | { | 61 | { |
62 | unsigned int i; | 62 | printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name); |
63 | Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr; | 63 | return -ENOEXEC; |
64 | Elf32_Sym *sym; | ||
65 | uint32_t *location; | ||
66 | |||
67 | DEBUGP("Applying relocate section %u to %u\n", relsec, | ||
68 | sechdrs[relsec].sh_info); | ||
69 | for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { | ||
70 | /* This is where to make the change */ | ||
71 | location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_offset | ||
72 | + rel[i].r_offset; | ||
73 | /* This is the symbol it is referring to. Note that all | ||
74 | undefined symbols have been resolved. */ | ||
75 | sym = (Elf32_Sym *)sechdrs[symindex].sh_addr | ||
76 | + ELF32_R_SYM(rel[i].r_info); | ||
77 | |||
78 | /* We add the value into the location given */ | ||
79 | *location += sym->st_value; | ||
80 | } | ||
81 | return 0; | ||
82 | } | 64 | } |
83 | 65 | ||
84 | int apply_relocate_add(Elf32_Shdr *sechdrs, | 66 | int apply_relocate_add(Elf32_Shdr *sechdrs, |
85 | const char *strtab, | 67 | const char *strtab, |
86 | unsigned int symindex, | 68 | unsigned int symindex, |
87 | unsigned int relsec, | 69 | unsigned int relsec, |
88 | struct module *me) | 70 | struct module *me) |
89 | { | 71 | { |
90 | unsigned int i; | 72 | unsigned int i; |
91 | Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr; | 73 | Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr; |
92 | 74 | ||
93 | DEBUGP ("Applying relocate section %u to %u\n", relsec, | 75 | DEBUGP ("Applying add relocate section %u to %u\n", relsec, |
94 | sechdrs[relsec].sh_info); | 76 | sechdrs[relsec].sh_info); |
95 | 77 | ||
96 | for (i = 0; i < sechdrs[relsec].sh_size / sizeof (*rela); i++) { | 78 | for (i = 0; i < sechdrs[relsec].sh_size / sizeof (*rela); i++) { |
97 | /* This is where to make the change */ | 79 | /* This is where to make the change */ |
98 | uint32_t *loc | 80 | uint32_t *loc |
99 | = ((void *)sechdrs[sechdrs[relsec].sh_info].sh_addr | 81 | = ((void *)sechdrs[sechdrs[relsec].sh_info].sh_addr |
100 | + rela[i].r_offset); | 82 | + rela[i].r_offset); |
101 | /* This is the symbol it is referring to. Note that all | 83 | /* This is the symbol it is referring to. Note that all |
102 | undefined symbols have been resolved. */ | 84 | undefined symbols have been resolved. */ |
103 | Elf32_Sym *sym | 85 | Elf32_Sym *sym |
104 | = ((Elf32_Sym *)sechdrs[symindex].sh_addr | 86 | = ((Elf32_Sym *)sechdrs[symindex].sh_addr |
105 | + ELF32_R_SYM (rela[i].r_info)); | 87 | + ELF32_R_SYM (rela[i].r_info)); |
106 | *loc = sym->st_value + rela[i].r_addend; | 88 | switch (ELF32_R_TYPE(rela[i].r_info)) { |
89 | case R_CRIS_32: | ||
90 | *loc = sym->st_value + rela[i].r_addend; | ||
91 | break; | ||
92 | case R_CRIS_32_PCREL: | ||
93 | *loc = sym->st_value - (unsigned)loc + rela[i].r_addend - 4; | ||
94 | break; | ||
95 | default: | ||
96 | printk(KERN_ERR "module %s: Unknown relocation: %u\n", | ||
97 | me->name, ELF32_R_TYPE(rela[i].r_info)); | ||
98 | return -ENOEXEC; | ||
99 | } | ||
107 | } | 100 | } |
108 | 101 | ||
109 | return 0; | 102 | return 0; |
110 | } | 103 | } |
111 | 104 |
arch/cris/kernel/process.c
1 | /* $Id: process.c,v 1.17 2004/04/05 13:53:48 starvik Exp $ | 1 | /* $Id: process.c,v 1.21 2005/03/04 08:16:17 starvik Exp $ |
2 | * | 2 | * |
3 | * linux/arch/cris/kernel/process.c | 3 | * linux/arch/cris/kernel/process.c |
4 | * | 4 | * |
5 | * Copyright (C) 1995 Linus Torvalds | 5 | * Copyright (C) 1995 Linus Torvalds |
6 | * Copyright (C) 2000-2002 Axis Communications AB | 6 | * Copyright (C) 2000-2002 Axis Communications AB |
7 | * | 7 | * |
8 | * Authors: Bjorn Wesen (bjornw@axis.com) | 8 | * Authors: Bjorn Wesen (bjornw@axis.com) |
9 | * | 9 | * |
10 | * $Log: process.c,v $ | 10 | * $Log: process.c,v $ |
11 | * Revision 1.21 2005/03/04 08:16:17 starvik | ||
12 | * Merge of Linux 2.6.11. | ||
13 | * | ||
14 | * Revision 1.20 2005/01/18 05:57:22 starvik | ||
15 | * Renamed hlt_counter to cris_hlt_counter and made it global. | ||
16 | * | ||
17 | * Revision 1.19 2004/10/19 13:07:43 starvik | ||
18 | * Merge of Linux 2.6.9 | ||
19 | * | ||
20 | * Revision 1.18 2004/08/16 12:37:23 starvik | ||
21 | * Merge of Linux 2.6.8 | ||
22 | * | ||
11 | * Revision 1.17 2004/04/05 13:53:48 starvik | 23 | * Revision 1.17 2004/04/05 13:53:48 starvik |
12 | * Merge of Linux 2.6.5 | 24 | * Merge of Linux 2.6.5 |
13 | * | 25 | * |
14 | * Revision 1.16 2003/10/27 08:04:33 starvik | 26 | * Revision 1.16 2003/10/27 08:04:33 starvik |
15 | * Merge of Linux 2.6.0-test9 | 27 | * Merge of Linux 2.6.0-test9 |
16 | * | 28 | * |
17 | * Revision 1.15 2003/09/11 07:29:52 starvik | 29 | * Revision 1.15 2003/09/11 07:29:52 starvik |
18 | * Merge of Linux 2.6.0-test5 | 30 | * Merge of Linux 2.6.0-test5 |
19 | * | 31 | * |
20 | * Revision 1.14 2003/06/10 10:21:12 johana | 32 | * Revision 1.14 2003/06/10 10:21:12 johana |
21 | * Moved thread_saved_pc() from arch/cris/kernel/process.c to | 33 | * Moved thread_saved_pc() from arch/cris/kernel/process.c to |
22 | * subarch specific process.c. arch-v32 has an erp, no irp. | 34 | * subarch specific process.c. arch-v32 has an erp, no irp. |
23 | * | 35 | * |
24 | * Revision 1.13 2003/04/09 05:20:47 starvik | 36 | * Revision 1.13 2003/04/09 05:20:47 starvik |
25 | * Merge of Linux 2.5.67 | 37 | * Merge of Linux 2.5.67 |
26 | * | 38 | * |
27 | * Revision 1.12 2002/12/11 15:41:11 starvik | 39 | * Revision 1.12 2002/12/11 15:41:11 starvik |
28 | * Extracted v10 (ETRAX 100LX) specific stuff to arch/cris/arch-v10/kernel | 40 | * Extracted v10 (ETRAX 100LX) specific stuff to arch/cris/arch-v10/kernel |
29 | * | 41 | * |
30 | * Revision 1.11 2002/12/10 09:00:10 starvik | 42 | * Revision 1.11 2002/12/10 09:00:10 starvik |
31 | * Merge of Linux 2.5.51 | 43 | * Merge of Linux 2.5.51 |
32 | * | 44 | * |
33 | * Revision 1.10 2002/11/27 08:42:34 starvik | 45 | * Revision 1.10 2002/11/27 08:42:34 starvik |
34 | * Argument to user_regs() is thread_info* | 46 | * Argument to user_regs() is thread_info* |
35 | * | 47 | * |
36 | * Revision 1.9 2002/11/26 09:44:21 starvik | 48 | * Revision 1.9 2002/11/26 09:44:21 starvik |
37 | * New threads exits through ret_from_fork (necessary for preemptive scheduling) | 49 | * New threads exits through ret_from_fork (necessary for preemptive scheduling) |
38 | * | 50 | * |
39 | * Revision 1.8 2002/11/19 14:35:24 starvik | 51 | * Revision 1.8 2002/11/19 14:35:24 starvik |
40 | * Changes from linux 2.4 | 52 | * Changes from linux 2.4 |
41 | * Changed struct initializer syntax to the currently prefered notation | 53 | * Changed struct initializer syntax to the currently prefered notation |
42 | * | 54 | * |
43 | * Revision 1.7 2002/11/18 07:39:42 starvik | 55 | * Revision 1.7 2002/11/18 07:39:42 starvik |
44 | * thread_saved_pc moved here from processor.h | 56 | * thread_saved_pc moved here from processor.h |
45 | * | 57 | * |
46 | * Revision 1.6 2002/11/14 06:51:27 starvik | 58 | * Revision 1.6 2002/11/14 06:51:27 starvik |
47 | * Made cpu_idle more similar with other archs | 59 | * Made cpu_idle more similar with other archs |
48 | * init_task_union -> init_thread_union | 60 | * init_task_union -> init_thread_union |
49 | * Updated for new interrupt macros | 61 | * Updated for new interrupt macros |
50 | * sys_clone and do_fork have a new argument, user_tid | 62 | * sys_clone and do_fork have a new argument, user_tid |
51 | * | 63 | * |
52 | * Revision 1.5 2002/11/05 06:45:11 starvik | 64 | * Revision 1.5 2002/11/05 06:45:11 starvik |
53 | * Merge of Linux 2.5.45 | 65 | * Merge of Linux 2.5.45 |
54 | * | 66 | * |
55 | * Revision 1.4 2002/02/05 15:37:44 bjornw | 67 | * Revision 1.4 2002/02/05 15:37:44 bjornw |
56 | * Need init_task.h | 68 | * Need init_task.h |
57 | * | 69 | * |
58 | * Revision 1.3 2002/01/21 15:22:49 bjornw | 70 | * Revision 1.3 2002/01/21 15:22:49 bjornw |
59 | * current->counter is gone | 71 | * current->counter is gone |
60 | * | 72 | * |
61 | * Revision 1.22 2001/11/13 09:40:43 orjanf | 73 | * Revision 1.22 2001/11/13 09:40:43 orjanf |
62 | * Added dump_fpu (needed for core dumps). | 74 | * Added dump_fpu (needed for core dumps). |
63 | * | 75 | * |
64 | * Revision 1.21 2001/11/12 18:26:21 pkj | 76 | * Revision 1.21 2001/11/12 18:26:21 pkj |
65 | * Fixed compiler warnings. | 77 | * Fixed compiler warnings. |
66 | * | 78 | * |
67 | * Revision 1.20 2001/10/03 08:21:39 jonashg | 79 | * Revision 1.20 2001/10/03 08:21:39 jonashg |
68 | * cause_of_death does not exist if CONFIG_SVINTO_SIM is defined. | 80 | * cause_of_death does not exist if CONFIG_SVINTO_SIM is defined. |
69 | * | 81 | * |
70 | * Revision 1.19 2001/09/26 11:52:54 bjornw | 82 | * Revision 1.19 2001/09/26 11:52:54 bjornw |
71 | * INIT_MMAP is gone in 2.4.10 | 83 | * INIT_MMAP is gone in 2.4.10 |
72 | * | 84 | * |
73 | * Revision 1.18 2001/08/21 21:43:51 hp | 85 | * Revision 1.18 2001/08/21 21:43:51 hp |
74 | * Move last watchdog fix inside #ifdef CONFIG_ETRAX_WATCHDOG | 86 | * Move last watchdog fix inside #ifdef CONFIG_ETRAX_WATCHDOG |
75 | * | 87 | * |
76 | * Revision 1.17 2001/08/21 13:48:01 jonashg | 88 | * Revision 1.17 2001/08/21 13:48:01 jonashg |
77 | * Added fix by HP to avoid oops when doing a hard_reset_now. | 89 | * Added fix by HP to avoid oops when doing a hard_reset_now. |
78 | * | 90 | * |
79 | * Revision 1.16 2001/06/21 02:00:40 hp | 91 | * Revision 1.16 2001/06/21 02:00:40 hp |
80 | * * entry.S: Include asm/unistd.h. | 92 | * * entry.S: Include asm/unistd.h. |
81 | * (_sys_call_table): Use section .rodata, not .data. | 93 | * (_sys_call_table): Use section .rodata, not .data. |
82 | * (_kernel_thread): Move from... | 94 | * (_kernel_thread): Move from... |
83 | * * process.c: ... here. | 95 | * * process.c: ... here. |
84 | * * entryoffsets.c (VAL): Break out from... | 96 | * * entryoffsets.c (VAL): Break out from... |
85 | * (OF): Use VAL. | 97 | * (OF): Use VAL. |
86 | * (LCLONE_VM): New asmified value from CLONE_VM. | 98 | * (LCLONE_VM): New asmified value from CLONE_VM. |
87 | * | 99 | * |
88 | * Revision 1.15 2001/06/20 16:31:57 hp | 100 | * Revision 1.15 2001/06/20 16:31:57 hp |
89 | * Add comments to describe empty functions according to review. | 101 | * Add comments to describe empty functions according to review. |
90 | * | 102 | * |
91 | * Revision 1.14 2001/05/29 11:27:59 markusl | 103 | * Revision 1.14 2001/05/29 11:27:59 markusl |
92 | * Fixed so that hard_reset_now will do reset even if watchdog wasn't enabled | 104 | * Fixed so that hard_reset_now will do reset even if watchdog wasn't enabled |
93 | * | 105 | * |
94 | * Revision 1.13 2001/03/20 19:44:06 bjornw | 106 | * Revision 1.13 2001/03/20 19:44:06 bjornw |
95 | * Use the 7th syscall argument for regs instead of current_regs | 107 | * Use the 7th syscall argument for regs instead of current_regs |
96 | * | 108 | * |
97 | */ | 109 | */ |
98 | 110 | ||
99 | /* | 111 | /* |
100 | * This file handles the architecture-dependent parts of process handling.. | 112 | * This file handles the architecture-dependent parts of process handling.. |
101 | */ | 113 | */ |
102 | 114 | ||
103 | #include <asm/atomic.h> | 115 | #include <asm/atomic.h> |
104 | #include <asm/pgtable.h> | 116 | #include <asm/pgtable.h> |
105 | #include <asm/uaccess.h> | 117 | #include <asm/uaccess.h> |
106 | #include <asm/irq.h> | 118 | #include <asm/irq.h> |
107 | #include <linux/module.h> | 119 | #include <linux/module.h> |
108 | #include <linux/spinlock.h> | 120 | #include <linux/spinlock.h> |
109 | #include <linux/fs_struct.h> | 121 | #include <linux/fs_struct.h> |
110 | #include <linux/init_task.h> | 122 | #include <linux/init_task.h> |
111 | #include <linux/sched.h> | 123 | #include <linux/sched.h> |
112 | #include <linux/fs.h> | 124 | #include <linux/fs.h> |
113 | #include <linux/user.h> | 125 | #include <linux/user.h> |
114 | #include <linux/elfcore.h> | 126 | #include <linux/elfcore.h> |
115 | #include <linux/mqueue.h> | 127 | #include <linux/mqueue.h> |
116 | #include <linux/reboot.h> | 128 | #include <linux/reboot.h> |
117 | 129 | ||
118 | //#define DEBUG | 130 | //#define DEBUG |
119 | 131 | ||
120 | /* | 132 | /* |
121 | * Initial task structure. Make this a per-architecture thing, | 133 | * Initial task structure. Make this a per-architecture thing, |
122 | * because different architectures tend to have different | 134 | * because different architectures tend to have different |
123 | * alignment requirements and potentially different initial | 135 | * alignment requirements and potentially different initial |
124 | * setup. | 136 | * setup. |
125 | */ | 137 | */ |
126 | 138 | ||
127 | static struct fs_struct init_fs = INIT_FS; | 139 | static struct fs_struct init_fs = INIT_FS; |
128 | static struct files_struct init_files = INIT_FILES; | 140 | static struct files_struct init_files = INIT_FILES; |
129 | static struct signal_struct init_signals = INIT_SIGNALS(init_signals); | 141 | static struct signal_struct init_signals = INIT_SIGNALS(init_signals); |
130 | static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); | 142 | static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); |
131 | struct mm_struct init_mm = INIT_MM(init_mm); | 143 | struct mm_struct init_mm = INIT_MM(init_mm); |
132 | 144 | ||
133 | EXPORT_SYMBOL(init_mm); | 145 | EXPORT_SYMBOL(init_mm); |
134 | 146 | ||
135 | /* | 147 | /* |
136 | * Initial thread structure. | 148 | * Initial thread structure. |
137 | * | 149 | * |
138 | * We need to make sure that this is 8192-byte aligned due to the | 150 | * We need to make sure that this is 8192-byte aligned due to the |
139 | * way process stacks are handled. This is done by having a special | 151 | * way process stacks are handled. This is done by having a special |
140 | * "init_task" linker map entry.. | 152 | * "init_task" linker map entry.. |
141 | */ | 153 | */ |
142 | union thread_union init_thread_union | 154 | union thread_union init_thread_union |
143 | __attribute__((__section__(".data.init_task"))) = | 155 | __attribute__((__section__(".data.init_task"))) = |
144 | { INIT_THREAD_INFO(init_task) }; | 156 | { INIT_THREAD_INFO(init_task) }; |
145 | 157 | ||
146 | /* | 158 | /* |
147 | * Initial task structure. | 159 | * Initial task structure. |
148 | * | 160 | * |
149 | * All other task structs will be allocated on slabs in fork.c | 161 | * All other task structs will be allocated on slabs in fork.c |
150 | */ | 162 | */ |
151 | struct task_struct init_task = INIT_TASK(init_task); | 163 | struct task_struct init_task = INIT_TASK(init_task); |
152 | 164 | ||
153 | EXPORT_SYMBOL(init_task); | 165 | EXPORT_SYMBOL(init_task); |
154 | 166 | ||
155 | /* | 167 | /* |
156 | * The hlt_counter, disable_hlt and enable_hlt is just here as a hook if | 168 | * The hlt_counter, disable_hlt and enable_hlt is just here as a hook if |
157 | * there would ever be a halt sequence (for power save when idle) with | 169 | * there would ever be a halt sequence (for power save when idle) with |
158 | * some largish delay when halting or resuming *and* a driver that can't | 170 | * some largish delay when halting or resuming *and* a driver that can't |
159 | * afford that delay. The hlt_counter would then be checked before | 171 | * afford that delay. The hlt_counter would then be checked before |
160 | * executing the halt sequence, and the driver marks the unhaltable | 172 | * executing the halt sequence, and the driver marks the unhaltable |
161 | * region by enable_hlt/disable_hlt. | 173 | * region by enable_hlt/disable_hlt. |
162 | */ | 174 | */ |
163 | 175 | ||
164 | static int hlt_counter=0; | 176 | int cris_hlt_counter=0; |
165 | 177 | ||
166 | void disable_hlt(void) | 178 | void disable_hlt(void) |
167 | { | 179 | { |
168 | hlt_counter++; | 180 | cris_hlt_counter++; |
169 | } | 181 | } |
170 | 182 | ||
171 | EXPORT_SYMBOL(disable_hlt); | 183 | EXPORT_SYMBOL(disable_hlt); |
172 | 184 | ||
173 | void enable_hlt(void) | 185 | void enable_hlt(void) |
174 | { | 186 | { |
175 | hlt_counter--; | 187 | cris_hlt_counter--; |
176 | } | 188 | } |
177 | 189 | ||
178 | EXPORT_SYMBOL(enable_hlt); | 190 | EXPORT_SYMBOL(enable_hlt); |
179 | 191 | ||
180 | /* | 192 | /* |
181 | * The following aren't currently used. | 193 | * The following aren't currently used. |
182 | */ | 194 | */ |
183 | void (*pm_idle)(void); | 195 | void (*pm_idle)(void); |
184 | 196 | ||
185 | extern void default_idle(void); | 197 | extern void default_idle(void); |
186 | 198 | ||
187 | /* | 199 | /* |
188 | * The idle thread. There's no useful work to be | 200 | * The idle thread. There's no useful work to be |
189 | * done, so just try to conserve power and have a | 201 | * done, so just try to conserve power and have a |
190 | * low exit latency (ie sit in a loop waiting for | 202 | * low exit latency (ie sit in a loop waiting for |
191 | * somebody to say that they'd like to reschedule) | 203 | * somebody to say that they'd like to reschedule) |
192 | */ | 204 | */ |
193 | void cpu_idle (void) | 205 | void cpu_idle (void) |
194 | { | 206 | { |
195 | /* endless idle loop with no priority at all */ | 207 | /* endless idle loop with no priority at all */ |
196 | while (1) { | 208 | while (1) { |
197 | while (!need_resched()) { | 209 | while (!need_resched()) { |
198 | void (*idle)(void) = pm_idle; | 210 | void (*idle)(void); |
199 | 211 | /* | |
212 | * Mark this as an RCU critical section so that | ||
213 | * synchronize_kernel() in the unload path waits | ||
214 | * for our completion. | ||
215 | */ | ||
216 | idle = pm_idle; | ||
200 | if (!idle) | 217 | if (!idle) |
201 | idle = default_idle; | 218 | idle = default_idle; |
202 | |||
203 | idle(); | 219 | idle(); |
204 | } | 220 | } |
205 | schedule(); | 221 | schedule(); |
206 | } | 222 | } |
207 | |||
208 | } | 223 | } |
209 | 224 | ||
210 | void hard_reset_now (void); | 225 | void hard_reset_now (void); |
211 | 226 | ||
212 | void machine_restart(char *cmd) | 227 | void machine_restart(char *cmd) |
213 | { | 228 | { |
214 | hard_reset_now(); | 229 | hard_reset_now(); |
215 | } | 230 | } |
216 | 231 | ||
217 | /* | 232 | /* |
218 | * Similar to machine_power_off, but don't shut off power. Add code | 233 | * Similar to machine_power_off, but don't shut off power. Add code |
219 | * here to freeze the system for e.g. post-mortem debug purpose when | 234 | * here to freeze the system for e.g. post-mortem debug purpose when |
220 | * possible. This halt has nothing to do with the idle halt. | 235 | * possible. This halt has nothing to do with the idle halt. |
221 | */ | 236 | */ |
222 | 237 | ||
223 | void machine_halt(void) | 238 | void machine_halt(void) |
224 | { | 239 | { |
225 | } | 240 | } |
226 | 241 | ||
227 | /* If or when software power-off is implemented, add code here. */ | 242 | /* If or when software power-off is implemented, add code here. */ |
228 | 243 | ||
229 | void machine_power_off(void) | 244 | void machine_power_off(void) |
230 | { | 245 | { |
231 | } | 246 | } |
232 | 247 | ||
233 | /* | 248 | /* |
234 | * When a process does an "exec", machine state like FPU and debug | 249 | * When a process does an "exec", machine state like FPU and debug |
235 | * registers need to be reset. This is a hook function for that. | 250 | * registers need to be reset. This is a hook function for that. |
236 | * Currently we don't have any such state to reset, so this is empty. | 251 | * Currently we don't have any such state to reset, so this is empty. |
237 | */ | 252 | */ |
238 | 253 | ||
239 | void flush_thread(void) | 254 | void flush_thread(void) |
240 | { | 255 | { |
241 | } | 256 | } |
242 | 257 | ||
243 | /* | 258 | /* |
244 | * fill in the user structure for a core dump.. | 259 | * fill in the user structure for a core dump.. |
245 | */ | 260 | */ |
246 | void dump_thread(struct pt_regs * regs, struct user * dump) | 261 | void dump_thread(struct pt_regs * regs, struct user * dump) |
247 | { | 262 | { |
248 | #if 0 | 263 | #if 0 |
249 | int i; | 264 | int i; |
250 | 265 | ||
251 | /* changed the size calculations - should hopefully work better. lbt */ | 266 | /* changed the size calculations - should hopefully work better. lbt */ |
252 | dump->magic = CMAGIC; | 267 | dump->magic = CMAGIC; |
253 | dump->start_code = 0; | 268 | dump->start_code = 0; |
254 | dump->start_stack = regs->esp & ~(PAGE_SIZE - 1); | 269 | dump->start_stack = regs->esp & ~(PAGE_SIZE - 1); |
255 | dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT; | 270 | dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT; |
256 | dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT; | 271 | dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT; |
257 | dump->u_dsize -= dump->u_tsize; | 272 | dump->u_dsize -= dump->u_tsize; |
258 | dump->u_ssize = 0; | 273 | dump->u_ssize = 0; |
259 | for (i = 0; i < 8; i++) | 274 | for (i = 0; i < 8; i++) |
260 | dump->u_debugreg[i] = current->debugreg[i]; | 275 | dump->u_debugreg[i] = current->debugreg[i]; |
261 | 276 | ||
262 | if (dump->start_stack < TASK_SIZE) | 277 | if (dump->start_stack < TASK_SIZE) |
263 | dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT; | 278 | dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT; |
264 | 279 | ||
265 | dump->regs = *regs; | 280 | dump->regs = *regs; |
266 | 281 | ||
267 | dump->u_fpvalid = dump_fpu (regs, &dump->i387); | 282 | dump->u_fpvalid = dump_fpu (regs, &dump->i387); |
268 | #endif | 283 | #endif |
269 | } | 284 | } |
270 | 285 | ||
271 | /* Fill in the fpu structure for a core dump. */ | 286 | /* Fill in the fpu structure for a core dump. */ |
272 | int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) | 287 | int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) |
273 | { | 288 | { |
274 | return 0; | 289 | return 0; |
include/asm-cris/arch-v10/bitops.h
1 | /* asm/arch/bitops.h for Linux/CRISv10 */ | 1 | /* asm/arch/bitops.h for Linux/CRISv10 */ |
2 | 2 | ||
3 | #ifndef _CRIS_ARCH_BITOPS_H | 3 | #ifndef _CRIS_ARCH_BITOPS_H |
4 | #define _CRIS_ARCH_BITOPS_H | 4 | #define _CRIS_ARCH_BITOPS_H |
5 | 5 | ||
6 | /* | 6 | /* |
7 | * Helper functions for the core of the ff[sz] functions, wrapping the | 7 | * Helper functions for the core of the ff[sz] functions, wrapping the |
8 | * syntactically awkward asms. The asms compute the number of leading | 8 | * syntactically awkward asms. The asms compute the number of leading |
9 | * zeroes of a bits-in-byte and byte-in-word and word-in-dword-swapped | 9 | * zeroes of a bits-in-byte and byte-in-word and word-in-dword-swapped |
10 | * number. They differ in that the first function also inverts all bits | 10 | * number. They differ in that the first function also inverts all bits |
11 | * in the input. | 11 | * in the input. |
12 | */ | 12 | */ |
13 | extern inline unsigned long cris_swapnwbrlz(unsigned long w) | 13 | extern inline unsigned long cris_swapnwbrlz(unsigned long w) |
14 | { | 14 | { |
15 | /* Let's just say we return the result in the same register as the | 15 | /* Let's just say we return the result in the same register as the |
16 | input. Saying we clobber the input but can return the result | 16 | input. Saying we clobber the input but can return the result |
17 | in another register: | 17 | in another register: |
18 | ! __asm__ ("swapnwbr %2\n\tlz %2,%0" | 18 | ! __asm__ ("swapnwbr %2\n\tlz %2,%0" |
19 | ! : "=r,r" (res), "=r,X" (dummy) : "1,0" (w)); | 19 | ! : "=r,r" (res), "=r,X" (dummy) : "1,0" (w)); |
20 | confuses gcc (sched.c, gcc from cris-dist-1.14). */ | 20 | confuses gcc (sched.c, gcc from cris-dist-1.14). */ |
21 | 21 | ||
22 | unsigned long res; | 22 | unsigned long res; |
23 | __asm__ ("swapnwbr %0 \n\t" | 23 | __asm__ ("swapnwbr %0 \n\t" |
24 | "lz %0,%0" | 24 | "lz %0,%0" |
25 | : "=r" (res) : "0" (w)); | 25 | : "=r" (res) : "0" (w)); |
26 | return res; | 26 | return res; |
27 | } | 27 | } |
28 | 28 | ||
29 | extern inline unsigned long cris_swapwbrlz(unsigned long w) | 29 | extern inline unsigned long cris_swapwbrlz(unsigned long w) |
30 | { | 30 | { |
31 | unsigned res; | 31 | unsigned res; |
32 | __asm__ ("swapwbr %0 \n\t" | 32 | __asm__ ("swapwbr %0 \n\t" |
33 | "lz %0,%0" | 33 | "lz %0,%0" |
34 | : "=r" (res) | 34 | : "=r" (res) |
35 | : "0" (w)); | 35 | : "0" (w)); |
36 | return res; | 36 | return res; |
37 | } | 37 | } |
38 | 38 | ||
39 | /* | 39 | /* |
40 | * ffz = Find First Zero in word. Undefined if no zero exists, | 40 | * ffz = Find First Zero in word. Undefined if no zero exists, |
41 | * so code should check against ~0UL first.. | 41 | * so code should check against ~0UL first.. |
42 | */ | 42 | */ |
43 | extern inline unsigned long ffz(unsigned long w) | 43 | extern inline unsigned long ffz(unsigned long w) |
44 | { | 44 | { |
45 | return cris_swapnwbrlz(w); | 45 | return cris_swapnwbrlz(w); |
46 | } | 46 | } |
47 | 47 | ||
48 | /** | 48 | /** |
49 | * __ffs - find first bit in word. | 49 | * __ffs - find first bit in word. |
50 | * @word: The word to search | 50 | * @word: The word to search |
51 | * | 51 | * |
52 | * Undefined if no bit exists, so code should check against 0 first. | 52 | * Undefined if no bit exists, so code should check against 0 first. |
53 | */ | 53 | */ |
54 | extern __inline__ unsigned long __ffs(unsigned long word) | 54 | extern inline unsigned long __ffs(unsigned long word) |
55 | { | 55 | { |
56 | return cris_swapnwbrlz(~word); | 56 | return cris_swapnwbrlz(~word); |
57 | } | 57 | } |
58 | 58 | ||
59 | /** | 59 | /** |
60 | * ffs - find first bit set | 60 | * ffs - find first bit set |
61 | * @x: the word to search | 61 | * @x: the word to search |
62 | * | 62 | * |
63 | * This is defined the same way as | 63 | * This is defined the same way as |
64 | * the libc and compiler builtin ffs routines, therefore | 64 | * the libc and compiler builtin ffs routines, therefore |
65 | * differs in spirit from the above ffz (man ffs). | 65 | * differs in spirit from the above ffz (man ffs). |
66 | */ | 66 | */ |
67 | 67 | ||
68 | extern inline unsigned long kernel_ffs(unsigned long w) | 68 | extern inline unsigned long kernel_ffs(unsigned long w) |
69 | { | 69 | { |
70 | return w ? cris_swapwbrlz (w) + 1 : 0; | 70 | return w ? cris_swapwbrlz (w) + 1 : 0; |
71 | } | 71 | } |
72 | 72 | ||
73 | #endif | 73 | #endif |
74 | 74 |
include/asm-cris/arch-v10/offset.h
1 | #ifndef __ASM_OFFSETS_H__ | 1 | #ifndef __ASM_OFFSETS_H__ |
2 | #define __ASM_OFFSETS_H__ | 2 | #define __ASM_OFFSETS_H__ |
3 | /* | 3 | /* |
4 | * DO NOT MODIFY. | 4 | * DO NOT MODIFY. |
5 | * | 5 | * |
6 | * This file was generated by arch/cris/Makefile | 6 | * This file was generated by arch/cris/Makefile |
7 | * | 7 | * |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #define PT_orig_r10 4 /* offsetof(struct pt_regs, orig_r10) */ | 10 | #define PT_orig_r10 4 /* offsetof(struct pt_regs, orig_r10) */ |
11 | #define PT_r13 8 /* offsetof(struct pt_regs, r13) */ | 11 | #define PT_r13 8 /* offsetof(struct pt_regs, r13) */ |
12 | #define PT_r12 12 /* offsetof(struct pt_regs, r12) */ | 12 | #define PT_r12 12 /* offsetof(struct pt_regs, r12) */ |
13 | #define PT_r11 16 /* offsetof(struct pt_regs, r11) */ | 13 | #define PT_r11 16 /* offsetof(struct pt_regs, r11) */ |
14 | #define PT_r10 20 /* offsetof(struct pt_regs, r10) */ | 14 | #define PT_r10 20 /* offsetof(struct pt_regs, r10) */ |
15 | #define PT_r9 24 /* offsetof(struct pt_regs, r9) */ | 15 | #define PT_r9 24 /* offsetof(struct pt_regs, r9) */ |
16 | #define PT_mof 64 /* offsetof(struct pt_regs, mof) */ | 16 | #define PT_mof 64 /* offsetof(struct pt_regs, mof) */ |
17 | #define PT_dccr 68 /* offsetof(struct pt_regs, dccr) */ | 17 | #define PT_dccr 68 /* offsetof(struct pt_regs, dccr) */ |
18 | #define PT_srp 72 /* offsetof(struct pt_regs, srp) */ | 18 | #define PT_srp 72 /* offsetof(struct pt_regs, srp) */ |
19 | 19 | ||
20 | #define TI_task 0 /* offsetof(struct thread_info, task) */ | 20 | #define TI_task 0 /* offsetof(struct thread_info, task) */ |
21 | #define TI_flags 8 /* offsetof(struct thread_info, flags) */ | 21 | #define TI_flags 8 /* offsetof(struct thread_info, flags) */ |
22 | #define TI_preempt_count 16 /* offsetof(struct thread_info, preempt_count) */ | 22 | #define TI_preempt_count 16 /* offsetof(struct thread_info, preempt_count) */ |
23 | 23 | ||
24 | #define THREAD_ksp 0 /* offsetof(struct thread_struct, ksp) */ | 24 | #define THREAD_ksp 0 /* offsetof(struct thread_struct, ksp) */ |
25 | #define THREAD_usp 4 /* offsetof(struct thread_struct, usp) */ | 25 | #define THREAD_usp 4 /* offsetof(struct thread_struct, usp) */ |
26 | #define THREAD_dccr 8 /* offsetof(struct thread_struct, dccr) */ | 26 | #define THREAD_dccr 8 /* offsetof(struct thread_struct, dccr) */ |
27 | 27 | ||
28 | #define TASK_pid 133 /* offsetof(struct task_struct, pid) */ | 28 | #define TASK_pid 141 /* offsetof(struct task_struct, pid) */ |
29 | 29 | ||
30 | #define LCLONE_VM 256 /* CLONE_VM */ | 30 | #define LCLONE_VM 256 /* CLONE_VM */ |
31 | #define LCLONE_UNTRACED 8388608 /* CLONE_UNTRACED */ | 31 | #define LCLONE_UNTRACED 8388608 /* CLONE_UNTRACED */ |
32 | 32 | ||
33 | #endif | 33 | #endif |
34 | 34 |
include/asm-cris/bitops.h
1 | /* asm/bitops.h for Linux/CRIS | 1 | /* asm/bitops.h for Linux/CRIS |
2 | * | 2 | * |
3 | * TODO: asm versions if speed is needed | 3 | * TODO: asm versions if speed is needed |
4 | * | 4 | * |
5 | * All bit operations return 0 if the bit was cleared before the | 5 | * All bit operations return 0 if the bit was cleared before the |
6 | * operation and != 0 if it was not. | 6 | * operation and != 0 if it was not. |
7 | * | 7 | * |
8 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). | 8 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #ifndef _CRIS_BITOPS_H | 11 | #ifndef _CRIS_BITOPS_H |
12 | #define _CRIS_BITOPS_H | 12 | #define _CRIS_BITOPS_H |
13 | 13 | ||
14 | /* Currently this is unsuitable for consumption outside the kernel. */ | 14 | /* Currently this is unsuitable for consumption outside the kernel. */ |
15 | #ifdef __KERNEL__ | 15 | #ifdef __KERNEL__ |
16 | 16 | ||
17 | #include <asm/arch/bitops.h> | 17 | #include <asm/arch/bitops.h> |
18 | #include <asm/system.h> | 18 | #include <asm/system.h> |
19 | #include <asm/atomic.h> | ||
19 | #include <linux/compiler.h> | 20 | #include <linux/compiler.h> |
20 | 21 | ||
21 | /* | 22 | /* |
22 | * Some hacks to defeat gcc over-optimizations.. | 23 | * Some hacks to defeat gcc over-optimizations.. |
23 | */ | 24 | */ |
24 | struct __dummy { unsigned long a[100]; }; | 25 | struct __dummy { unsigned long a[100]; }; |
25 | #define ADDR (*(struct __dummy *) addr) | 26 | #define ADDR (*(struct __dummy *) addr) |
26 | #define CONST_ADDR (*(const struct __dummy *) addr) | 27 | #define CONST_ADDR (*(const struct __dummy *) addr) |
27 | 28 | ||
28 | /* | 29 | /* |
29 | * set_bit - Atomically set a bit in memory | 30 | * set_bit - Atomically set a bit in memory |
30 | * @nr: the bit to set | 31 | * @nr: the bit to set |
31 | * @addr: the address to start counting from | 32 | * @addr: the address to start counting from |
32 | * | 33 | * |
33 | * This function is atomic and may not be reordered. See __set_bit() | 34 | * This function is atomic and may not be reordered. See __set_bit() |
34 | * if you do not require the atomic guarantees. | 35 | * if you do not require the atomic guarantees. |
35 | * Note that @nr may be almost arbitrarily large; this function is not | 36 | * Note that @nr may be almost arbitrarily large; this function is not |
36 | * restricted to acting on a single-word quantity. | 37 | * restricted to acting on a single-word quantity. |
37 | */ | 38 | */ |
38 | 39 | ||
39 | #define set_bit(nr, addr) (void)test_and_set_bit(nr, addr) | 40 | #define set_bit(nr, addr) (void)test_and_set_bit(nr, addr) |
40 | 41 | ||
41 | #define __set_bit(nr, addr) (void)__test_and_set_bit(nr, addr) | 42 | #define __set_bit(nr, addr) (void)__test_and_set_bit(nr, addr) |
42 | 43 | ||
43 | /* | 44 | /* |
44 | * clear_bit - Clears a bit in memory | 45 | * clear_bit - Clears a bit in memory |
45 | * @nr: Bit to clear | 46 | * @nr: Bit to clear |
46 | * @addr: Address to start counting from | 47 | * @addr: Address to start counting from |
47 | * | 48 | * |
48 | * clear_bit() is atomic and may not be reordered. However, it does | 49 | * clear_bit() is atomic and may not be reordered. However, it does |
49 | * not contain a memory barrier, so if it is used for locking purposes, | 50 | * not contain a memory barrier, so if it is used for locking purposes, |
50 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | 51 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() |
51 | * in order to ensure changes are visible on other processors. | 52 | * in order to ensure changes are visible on other processors. |
52 | */ | 53 | */ |
53 | 54 | ||
54 | #define clear_bit(nr, addr) (void)test_and_clear_bit(nr, addr) | 55 | #define clear_bit(nr, addr) (void)test_and_clear_bit(nr, addr) |
55 | 56 | ||
56 | #define __clear_bit(nr, addr) (void)__test_and_clear_bit(nr, addr) | 57 | #define __clear_bit(nr, addr) (void)__test_and_clear_bit(nr, addr) |
57 | 58 | ||
58 | /* | 59 | /* |
59 | * change_bit - Toggle a bit in memory | 60 | * change_bit - Toggle a bit in memory |
60 | * @nr: Bit to change | 61 | * @nr: Bit to change |
61 | * @addr: Address to start counting from | 62 | * @addr: Address to start counting from |
62 | * | 63 | * |
63 | * change_bit() is atomic and may not be reordered. | 64 | * change_bit() is atomic and may not be reordered. |
64 | * Note that @nr may be almost arbitrarily large; this function is not | 65 | * Note that @nr may be almost arbitrarily large; this function is not |
65 | * restricted to acting on a single-word quantity. | 66 | * restricted to acting on a single-word quantity. |
66 | */ | 67 | */ |
67 | 68 | ||
68 | #define change_bit(nr, addr) (void)test_and_change_bit(nr, addr) | 69 | #define change_bit(nr, addr) (void)test_and_change_bit(nr, addr) |
69 | 70 | ||
70 | /* | 71 | /* |
71 | * __change_bit - Toggle a bit in memory | 72 | * __change_bit - Toggle a bit in memory |
72 | * @nr: the bit to change | 73 | * @nr: the bit to change |
73 | * @addr: the address to start counting from | 74 | * @addr: the address to start counting from |
74 | * | 75 | * |
75 | * Unlike change_bit(), this function is non-atomic and may be reordered. | 76 | * Unlike change_bit(), this function is non-atomic and may be reordered. |
76 | * If it's called on the same region of memory simultaneously, the effect | 77 | * If it's called on the same region of memory simultaneously, the effect |
77 | * may be that only one operation succeeds. | 78 | * may be that only one operation succeeds. |
78 | */ | 79 | */ |
79 | 80 | ||
80 | #define __change_bit(nr, addr) (void)__test_and_change_bit(nr, addr) | 81 | #define __change_bit(nr, addr) (void)__test_and_change_bit(nr, addr) |
81 | 82 | ||
82 | /** | 83 | /** |
83 | * test_and_set_bit - Set a bit and return its old value | 84 | * test_and_set_bit - Set a bit and return its old value |
84 | * @nr: Bit to set | 85 | * @nr: Bit to set |
85 | * @addr: Address to count from | 86 | * @addr: Address to count from |
86 | * | 87 | * |
87 | * This operation is atomic and cannot be reordered. | 88 | * This operation is atomic and cannot be reordered. |
88 | * It also implies a memory barrier. | 89 | * It also implies a memory barrier. |
89 | */ | 90 | */ |
90 | 91 | ||
91 | extern inline int test_and_set_bit(int nr, void *addr) | 92 | extern inline int test_and_set_bit(int nr, volatile unsigned long *addr) |
92 | { | 93 | { |
93 | unsigned int mask, retval; | 94 | unsigned int mask, retval; |
94 | unsigned long flags; | 95 | unsigned long flags; |
95 | unsigned int *adr = (unsigned int *)addr; | 96 | unsigned int *adr = (unsigned int *)addr; |
96 | 97 | ||
97 | adr += nr >> 5; | 98 | adr += nr >> 5; |
98 | mask = 1 << (nr & 0x1f); | 99 | mask = 1 << (nr & 0x1f); |
99 | local_save_flags(flags); | 100 | cris_atomic_save(addr, flags); |
100 | local_irq_disable(); | ||
101 | retval = (mask & *adr) != 0; | 101 | retval = (mask & *adr) != 0; |
102 | *adr |= mask; | 102 | *adr |= mask; |
103 | cris_atomic_restore(addr, flags); | ||
103 | local_irq_restore(flags); | 104 | local_irq_restore(flags); |
104 | return retval; | 105 | return retval; |
105 | } | 106 | } |
106 | 107 | ||
107 | extern inline int __test_and_set_bit(int nr, void *addr) | 108 | extern inline int __test_and_set_bit(int nr, volatile unsigned long *addr) |
108 | { | 109 | { |
109 | unsigned int mask, retval; | 110 | unsigned int mask, retval; |
110 | unsigned int *adr = (unsigned int *)addr; | 111 | unsigned int *adr = (unsigned int *)addr; |
111 | 112 | ||
112 | adr += nr >> 5; | 113 | adr += nr >> 5; |
113 | mask = 1 << (nr & 0x1f); | 114 | mask = 1 << (nr & 0x1f); |
114 | retval = (mask & *adr) != 0; | 115 | retval = (mask & *adr) != 0; |
115 | *adr |= mask; | 116 | *adr |= mask; |
116 | return retval; | 117 | return retval; |
117 | } | 118 | } |
118 | 119 | ||
119 | /* | 120 | /* |
120 | * clear_bit() doesn't provide any barrier for the compiler. | 121 | * clear_bit() doesn't provide any barrier for the compiler. |
121 | */ | 122 | */ |
122 | #define smp_mb__before_clear_bit() barrier() | 123 | #define smp_mb__before_clear_bit() barrier() |
123 | #define smp_mb__after_clear_bit() barrier() | 124 | #define smp_mb__after_clear_bit() barrier() |
124 | 125 | ||
125 | /** | 126 | /** |
126 | * test_and_clear_bit - Clear a bit and return its old value | 127 | * test_and_clear_bit - Clear a bit and return its old value |
127 | * @nr: Bit to clear | 128 | * @nr: Bit to clear |
128 | * @addr: Address to count from | 129 | * @addr: Address to count from |
129 | * | 130 | * |
130 | * This operation is atomic and cannot be reordered. | 131 | * This operation is atomic and cannot be reordered. |
131 | * It also implies a memory barrier. | 132 | * It also implies a memory barrier. |
132 | */ | 133 | */ |
133 | 134 | ||
134 | extern inline int test_and_clear_bit(int nr, void *addr) | 135 | extern inline int test_and_clear_bit(int nr, volatile unsigned long *addr) |
135 | { | 136 | { |
136 | unsigned int mask, retval; | 137 | unsigned int mask, retval; |
137 | unsigned long flags; | 138 | unsigned long flags; |
138 | unsigned int *adr = (unsigned int *)addr; | 139 | unsigned int *adr = (unsigned int *)addr; |
139 | 140 | ||
140 | adr += nr >> 5; | 141 | adr += nr >> 5; |
141 | mask = 1 << (nr & 0x1f); | 142 | mask = 1 << (nr & 0x1f); |
142 | local_save_flags(flags); | 143 | cris_atomic_save(addr, flags); |
143 | local_irq_disable(); | ||
144 | retval = (mask & *adr) != 0; | 144 | retval = (mask & *adr) != 0; |
145 | *adr &= ~mask; | 145 | *adr &= ~mask; |
146 | local_irq_restore(flags); | 146 | cris_atomic_restore(addr, flags); |
147 | return retval; | 147 | return retval; |
148 | } | 148 | } |
149 | 149 | ||
150 | /** | 150 | /** |
151 | * __test_and_clear_bit - Clear a bit and return its old value | 151 | * __test_and_clear_bit - Clear a bit and return its old value |
152 | * @nr: Bit to clear | 152 | * @nr: Bit to clear |
153 | * @addr: Address to count from | 153 | * @addr: Address to count from |
154 | * | 154 | * |
155 | * This operation is non-atomic and can be reordered. | 155 | * This operation is non-atomic and can be reordered. |
156 | * If two examples of this operation race, one can appear to succeed | 156 | * If two examples of this operation race, one can appear to succeed |
157 | * but actually fail. You must protect multiple accesses with a lock. | 157 | * but actually fail. You must protect multiple accesses with a lock. |
158 | */ | 158 | */ |
159 | 159 | ||
160 | extern inline int __test_and_clear_bit(int nr, void *addr) | 160 | extern inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) |
161 | { | 161 | { |
162 | unsigned int mask, retval; | 162 | unsigned int mask, retval; |
163 | unsigned int *adr = (unsigned int *)addr; | 163 | unsigned int *adr = (unsigned int *)addr; |
164 | 164 | ||
165 | adr += nr >> 5; | 165 | adr += nr >> 5; |
166 | mask = 1 << (nr & 0x1f); | 166 | mask = 1 << (nr & 0x1f); |
167 | retval = (mask & *adr) != 0; | 167 | retval = (mask & *adr) != 0; |
168 | *adr &= ~mask; | 168 | *adr &= ~mask; |
169 | return retval; | 169 | return retval; |
170 | } | 170 | } |
171 | /** | 171 | /** |
172 | * test_and_change_bit - Change a bit and return its old value | 172 | * test_and_change_bit - Change a bit and return its old value |
173 | * @nr: Bit to change | 173 | * @nr: Bit to change |
174 | * @addr: Address to count from | 174 | * @addr: Address to count from |
175 | * | 175 | * |
176 | * This operation is atomic and cannot be reordered. | 176 | * This operation is atomic and cannot be reordered. |
177 | * It also implies a memory barrier. | 177 | * It also implies a memory barrier. |
178 | */ | 178 | */ |
179 | 179 | ||
180 | extern inline int test_and_change_bit(int nr, void *addr) | 180 | extern inline int test_and_change_bit(int nr, volatile unsigned long *addr) |
181 | { | 181 | { |
182 | unsigned int mask, retval; | 182 | unsigned int mask, retval; |
183 | unsigned long flags; | 183 | unsigned long flags; |
184 | unsigned int *adr = (unsigned int *)addr; | 184 | unsigned int *adr = (unsigned int *)addr; |
185 | adr += nr >> 5; | 185 | adr += nr >> 5; |
186 | mask = 1 << (nr & 0x1f); | 186 | mask = 1 << (nr & 0x1f); |
187 | local_save_flags(flags); | 187 | cris_atomic_save(addr, flags); |
188 | local_irq_disable(); | ||
189 | retval = (mask & *adr) != 0; | 188 | retval = (mask & *adr) != 0; |
190 | *adr ^= mask; | 189 | *adr ^= mask; |
191 | local_irq_restore(flags); | 190 | cris_atomic_restore(addr, flags); |
192 | return retval; | 191 | return retval; |
193 | } | 192 | } |
194 | 193 | ||
195 | /* WARNING: non atomic and it can be reordered! */ | 194 | /* WARNING: non atomic and it can be reordered! */ |
196 | 195 | ||
197 | extern inline int __test_and_change_bit(int nr, void *addr) | 196 | extern inline int __test_and_change_bit(int nr, volatile unsigned long *addr) |
198 | { | 197 | { |
199 | unsigned int mask, retval; | 198 | unsigned int mask, retval; |
200 | unsigned int *adr = (unsigned int *)addr; | 199 | unsigned int *adr = (unsigned int *)addr; |
201 | 200 | ||
202 | adr += nr >> 5; | 201 | adr += nr >> 5; |
203 | mask = 1 << (nr & 0x1f); | 202 | mask = 1 << (nr & 0x1f); |
204 | retval = (mask & *adr) != 0; | 203 | retval = (mask & *adr) != 0; |
205 | *adr ^= mask; | 204 | *adr ^= mask; |
206 | 205 | ||
207 | return retval; | 206 | return retval; |
208 | } | 207 | } |
209 | 208 | ||
210 | /** | 209 | /** |
211 | * test_bit - Determine whether a bit is set | 210 | * test_bit - Determine whether a bit is set |
212 | * @nr: bit number to test | 211 | * @nr: bit number to test |
213 | * @addr: Address to start counting from | 212 | * @addr: Address to start counting from |
214 | * | 213 | * |
215 | * This routine doesn't need to be atomic. | 214 | * This routine doesn't need to be atomic. |
216 | */ | 215 | */ |
217 | 216 | ||
218 | extern inline int test_bit(int nr, const void *addr) | 217 | extern inline int test_bit(int nr, const volatile unsigned long *addr) |
219 | { | 218 | { |
220 | unsigned int mask; | 219 | unsigned int mask; |
221 | unsigned int *adr = (unsigned int *)addr; | 220 | unsigned int *adr = (unsigned int *)addr; |
222 | 221 | ||
223 | adr += nr >> 5; | 222 | adr += nr >> 5; |
224 | mask = 1 << (nr & 0x1f); | 223 | mask = 1 << (nr & 0x1f); |
225 | return ((mask & *adr) != 0); | 224 | return ((mask & *adr) != 0); |
226 | } | 225 | } |
227 | 226 | ||
228 | /* | 227 | /* |
229 | * Find-bit routines.. | 228 | * Find-bit routines.. |
230 | */ | 229 | */ |
231 | 230 | ||
232 | /* | 231 | /* |
233 | * Since we define it "external", it collides with the built-in | 232 | * Since we define it "external", it collides with the built-in |
234 | * definition, which doesn't have the same semantics. We don't want to | 233 | * definition, which doesn't have the same semantics. We don't want to |
235 | * use -fno-builtin, so just hide the name ffs. | 234 | * use -fno-builtin, so just hide the name ffs. |
236 | */ | 235 | */ |
237 | #define ffs kernel_ffs | 236 | #define ffs kernel_ffs |
238 | 237 | ||
239 | /* | 238 | /* |
240 | * fls: find last bit set. | 239 | * fls: find last bit set. |
241 | */ | 240 | */ |
242 | 241 | ||
243 | #define fls(x) generic_fls(x) | 242 | #define fls(x) generic_fls(x) |
244 | 243 | ||
245 | /* | 244 | /* |
246 | * hweightN - returns the hamming weight of a N-bit word | 245 | * hweightN - returns the hamming weight of a N-bit word |
247 | * @x: the word to weigh | 246 | * @x: the word to weigh |
248 | * | 247 | * |
249 | * The Hamming Weight of a number is the total number of bits set in it. | 248 | * The Hamming Weight of a number is the total number of bits set in it. |
250 | */ | 249 | */ |
251 | 250 | ||
252 | #define hweight32(x) generic_hweight32(x) | 251 | #define hweight32(x) generic_hweight32(x) |
253 | #define hweight16(x) generic_hweight16(x) | 252 | #define hweight16(x) generic_hweight16(x) |
254 | #define hweight8(x) generic_hweight8(x) | 253 | #define hweight8(x) generic_hweight8(x) |
255 | 254 | ||
256 | /** | 255 | /** |
257 | * find_next_zero_bit - find the first zero bit in a memory region | 256 | * find_next_zero_bit - find the first zero bit in a memory region |
258 | * @addr: The address to base the search on | 257 | * @addr: The address to base the search on |
259 | * @offset: The bitnumber to start searching at | 258 | * @offset: The bitnumber to start searching at |
260 | * @size: The maximum size to search | 259 | * @size: The maximum size to search |
261 | */ | 260 | */ |
262 | extern inline int find_next_zero_bit (void * addr, int size, int offset) | 261 | extern inline int find_next_zero_bit (const unsigned long * addr, int size, int offset) |
263 | { | 262 | { |
264 | unsigned long *p = ((unsigned long *) addr) + (offset >> 5); | 263 | unsigned long *p = ((unsigned long *) addr) + (offset >> 5); |
265 | unsigned long result = offset & ~31UL; | 264 | unsigned long result = offset & ~31UL; |
266 | unsigned long tmp; | 265 | unsigned long tmp; |
267 | 266 | ||
268 | if (offset >= size) | 267 | if (offset >= size) |
269 | return size; | 268 | return size; |
270 | size -= result; | 269 | size -= result; |
271 | offset &= 31UL; | 270 | offset &= 31UL; |
272 | if (offset) { | 271 | if (offset) { |
273 | tmp = *(p++); | 272 | tmp = *(p++); |
274 | tmp |= ~0UL >> (32-offset); | 273 | tmp |= ~0UL >> (32-offset); |
275 | if (size < 32) | 274 | if (size < 32) |
276 | goto found_first; | 275 | goto found_first; |
277 | if (~tmp) | 276 | if (~tmp) |
278 | goto found_middle; | 277 | goto found_middle; |
279 | size -= 32; | 278 | size -= 32; |
280 | result += 32; | 279 | result += 32; |
281 | } | 280 | } |
282 | while (size & ~31UL) { | 281 | while (size & ~31UL) { |
283 | if (~(tmp = *(p++))) | 282 | if (~(tmp = *(p++))) |
284 | goto found_middle; | 283 | goto found_middle; |
285 | result += 32; | 284 | result += 32; |
286 | size -= 32; | 285 | size -= 32; |
287 | } | 286 | } |
288 | if (!size) | 287 | if (!size) |
289 | return result; | 288 | return result; |
290 | tmp = *p; | 289 | tmp = *p; |
291 | 290 | ||
292 | found_first: | 291 | found_first: |
293 | tmp |= ~0UL >> size; | 292 | tmp |= ~0UL >> size; |
294 | found_middle: | 293 | found_middle: |
295 | return result + ffz(tmp); | 294 | return result + ffz(tmp); |
296 | } | 295 | } |
297 | 296 | ||
298 | /** | 297 | /** |
299 | * find_next_bit - find the first set bit in a memory region | 298 | * find_next_bit - find the first set bit in a memory region |
300 | * @addr: The address to base the search on | 299 | * @addr: The address to base the search on |
301 | * @offset: The bitnumber to start searching at | 300 | * @offset: The bitnumber to start searching at |
302 | * @size: The maximum size to search | 301 | * @size: The maximum size to search |
303 | */ | 302 | */ |
304 | static __inline__ int find_next_bit(void *addr, int size, int offset) | 303 | static __inline__ int find_next_bit(const unsigned long *addr, int size, int offset) |
305 | { | 304 | { |
306 | unsigned long *p = ((unsigned long *) addr) + (offset >> 5); | 305 | unsigned long *p = ((unsigned long *) addr) + (offset >> 5); |
307 | unsigned long result = offset & ~31UL; | 306 | unsigned long result = offset & ~31UL; |
308 | unsigned long tmp; | 307 | unsigned long tmp; |
309 | 308 | ||
310 | if (offset >= size) | 309 | if (offset >= size) |
311 | return size; | 310 | return size; |
312 | size -= result; | 311 | size -= result; |
313 | offset &= 31UL; | 312 | offset &= 31UL; |
314 | if (offset) { | 313 | if (offset) { |
315 | tmp = *(p++); | 314 | tmp = *(p++); |
316 | tmp &= (~0UL << offset); | 315 | tmp &= (~0UL << offset); |
317 | if (size < 32) | 316 | if (size < 32) |
318 | goto found_first; | 317 | goto found_first; |
319 | if (tmp) | 318 | if (tmp) |
320 | goto found_middle; | 319 | goto found_middle; |
321 | size -= 32; | 320 | size -= 32; |
322 | result += 32; | 321 | result += 32; |
323 | } | 322 | } |
324 | while (size & ~31UL) { | 323 | while (size & ~31UL) { |
325 | if ((tmp = *(p++))) | 324 | if ((tmp = *(p++))) |
326 | goto found_middle; | 325 | goto found_middle; |
327 | result += 32; | 326 | result += 32; |
328 | size -= 32; | 327 | size -= 32; |
329 | } | 328 | } |
330 | if (!size) | 329 | if (!size) |
331 | return result; | 330 | return result; |
332 | tmp = *p; | 331 | tmp = *p; |
333 | 332 | ||
334 | found_first: | 333 | found_first: |
335 | tmp &= (~0UL >> (32 - size)); | 334 | tmp &= (~0UL >> (32 - size)); |
336 | if (tmp == 0UL) /* Are any bits set? */ | 335 | if (tmp == 0UL) /* Are any bits set? */ |
337 | return result + size; /* Nope. */ | 336 | return result + size; /* Nope. */ |
338 | found_middle: | 337 | found_middle: |
339 | return result + __ffs(tmp); | 338 | return result + __ffs(tmp); |
340 | } | 339 | } |
341 | 340 | ||
342 | /** | 341 | /** |
343 | * find_first_zero_bit - find the first zero bit in a memory region | 342 | * find_first_zero_bit - find the first zero bit in a memory region |
344 | * @addr: The address to start the search at | 343 | * @addr: The address to start the search at |
345 | * @size: The maximum size to search | 344 | * @size: The maximum size to search |
346 | * | 345 | * |
347 | * Returns the bit-number of the first zero bit, not the number of the byte | 346 | * Returns the bit-number of the first zero bit, not the number of the byte |
348 | * containing a bit. | 347 | * containing a bit. |
349 | */ | 348 | */ |
350 | 349 | ||
351 | #define find_first_zero_bit(addr, size) \ | 350 | #define find_first_zero_bit(addr, size) \ |
352 | find_next_zero_bit((addr), (size), 0) | 351 | find_next_zero_bit((addr), (size), 0) |
353 | #define find_first_bit(addr, size) \ | 352 | #define find_first_bit(addr, size) \ |
354 | find_next_bit((addr), (size), 0) | 353 | find_next_bit((addr), (size), 0) |
355 | 354 | ||
356 | #define ext2_set_bit test_and_set_bit | 355 | #define ext2_set_bit test_and_set_bit |
357 | #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) | 356 | #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) |
358 | #define ext2_clear_bit test_and_clear_bit | 357 | #define ext2_clear_bit test_and_clear_bit |
359 | #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) | 358 | #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) |
360 | #define ext2_test_bit test_bit | 359 | #define ext2_test_bit test_bit |
361 | #define ext2_find_first_zero_bit find_first_zero_bit | 360 | #define ext2_find_first_zero_bit find_first_zero_bit |
362 | #define ext2_find_next_zero_bit find_next_zero_bit | 361 | #define ext2_find_next_zero_bit find_next_zero_bit |
363 | 362 | ||
364 | /* Bitmap functions for the minix filesystem. */ | 363 | /* Bitmap functions for the minix filesystem. */ |
365 | #define minix_set_bit(nr,addr) test_and_set_bit(nr,addr) | 364 | #define minix_set_bit(nr,addr) test_and_set_bit(nr,addr) |
366 | #define minix_clear_bit(nr,addr) test_and_clear_bit(nr,addr) | 365 | #define minix_clear_bit(nr,addr) test_and_clear_bit(nr,addr) |
367 | #define minix_test_bit(nr,addr) test_bit(nr,addr) | 366 | #define minix_test_bit(nr,addr) test_bit(nr,addr) |
368 | #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) | 367 | #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) |
369 | 368 | ||
370 | extern inline int sched_find_first_bit(unsigned long *b) | 369 | extern inline int sched_find_first_bit(const unsigned long *b) |
371 | { | 370 | { |
372 | if (unlikely(b[0])) | 371 | if (unlikely(b[0])) |
373 | return __ffs(b[0]); | 372 | return __ffs(b[0]); |
374 | if (unlikely(b[1])) | 373 | if (unlikely(b[1])) |
375 | return __ffs(b[1]) + 32; | 374 | return __ffs(b[1]) + 32; |
376 | if (unlikely(b[2])) | 375 | if (unlikely(b[2])) |
377 | return __ffs(b[2]) + 64; | 376 | return __ffs(b[2]) + 64; |
378 | if (unlikely(b[3])) | 377 | if (unlikely(b[3])) |
379 | return __ffs(b[3]) + 96; | 378 | return __ffs(b[3]) + 96; |
380 | if (b[4]) | 379 | if (b[4]) |
381 | return __ffs(b[4]) + 128; | 380 | return __ffs(b[4]) + 128; |
382 | return __ffs(b[5]) + 32 + 128; | 381 | return __ffs(b[5]) + 32 + 128; |
383 | } | 382 | } |
384 | 383 | ||
385 | #endif /* __KERNEL__ */ | 384 | #endif /* __KERNEL__ */ |
386 | 385 |
include/asm-cris/kmap_types.h
1 | #ifndef _ASM_KMAP_TYPES_H | 1 | #ifndef _ASM_KMAP_TYPES_H |
2 | #define _ASM_KMAP_TYPES_H | 2 | #define _ASM_KMAP_TYPES_H |
3 | 3 | ||
4 | /* Dummy header just to define km_type. None of this | 4 | /* Dummy header just to define km_type. None of this |
5 | * is actually used on cris. | 5 | * is actually used on cris. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | enum km_type { | 8 | enum km_type { |
9 | KM_BOUNCE_READ, | 9 | KM_BOUNCE_READ, |
10 | KM_SKB_SUNRPC_DATA, | 10 | KM_SKB_SUNRPC_DATA, |
11 | KM_SKB_DATA_SOFTIRQ, | 11 | KM_SKB_DATA_SOFTIRQ, |
12 | KM_USER0, | 12 | KM_USER0, |
13 | KM_USER1, | 13 | KM_USER1, |
14 | KM_BIO_SRC_IRQ, | 14 | KM_BIO_SRC_IRQ, |
15 | KM_BIO_DST_IRQ, | 15 | KM_BIO_DST_IRQ, |
16 | KM_PTE0, | 16 | KM_PTE0, |
17 | KM_PTE1, | 17 | KM_PTE1, |
18 | KM_IRQ0, | 18 | KM_IRQ0, |
19 | KM_IRQ1, | 19 | KM_IRQ1, |
20 | KM_CRYPTO_USER, | 20 | KM_SOFTIRQ0, |
21 | KM_CRYPTO_SOFTIRQ, | 21 | KM_SOFTIRQ1, |
22 | KM_TYPE_NR | 22 | KM_TYPE_NR |
23 | }; | 23 | }; |
24 | 24 | ||
25 | #endif | 25 | #endif |
26 | 26 |
include/asm-cris/page.h
1 | #ifndef _CRIS_PAGE_H | 1 | #ifndef _CRIS_PAGE_H |
2 | #define _CRIS_PAGE_H | 2 | #define _CRIS_PAGE_H |
3 | 3 | ||
4 | #include <linux/config.h> | 4 | #include <linux/config.h> |
5 | #include <asm/arch/page.h> | 5 | #include <asm/arch/page.h> |
6 | 6 | ||
7 | /* PAGE_SHIFT determines the page size */ | 7 | /* PAGE_SHIFT determines the page size */ |
8 | #define PAGE_SHIFT 13 | 8 | #define PAGE_SHIFT 13 |
9 | #ifndef __ASSEMBLY__ | 9 | #ifndef __ASSEMBLY__ |
10 | #define PAGE_SIZE (1UL << PAGE_SHIFT) | 10 | #define PAGE_SIZE (1UL << PAGE_SHIFT) |
11 | #else | 11 | #else |
12 | #define PAGE_SIZE (1 << PAGE_SHIFT) | 12 | #define PAGE_SIZE (1 << PAGE_SHIFT) |
13 | #endif | 13 | #endif |
14 | #define PAGE_MASK (~(PAGE_SIZE-1)) | 14 | #define PAGE_MASK (~(PAGE_SIZE-1)) |
15 | 15 | ||
16 | #ifdef __KERNEL__ | 16 | #ifdef __KERNEL__ |
17 | 17 | ||
18 | #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) | 18 | #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) |
19 | #define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE) | 19 | #define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE) |
20 | 20 | ||
21 | #define clear_user_page(page, vaddr, pg) clear_page(page) | 21 | #define clear_user_page(page, vaddr, pg) clear_page(page) |
22 | #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) | 22 | #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) |
23 | 23 | ||
24 | #define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr) | 24 | #define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr) |
25 | #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE | 25 | #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE |
26 | 26 | ||
27 | /* | 27 | /* |
28 | * These are used to make use of C type-checking.. | 28 | * These are used to make use of C type-checking.. |
29 | */ | 29 | */ |
30 | #ifndef __ASSEMBLY__ | 30 | #ifndef __ASSEMBLY__ |
31 | typedef struct { unsigned long pte; } pte_t; | 31 | typedef struct { unsigned long pte; } pte_t; |
32 | typedef struct { unsigned long pmd; } pmd_t; | ||
33 | typedef struct { unsigned long pgd; } pgd_t; | 32 | typedef struct { unsigned long pgd; } pgd_t; |
34 | typedef struct { unsigned long pgprot; } pgprot_t; | 33 | typedef struct { unsigned long pgprot; } pgprot_t; |
35 | #endif | 34 | #endif |
36 | 35 | ||
37 | #define pte_val(x) ((x).pte) | 36 | #define pte_val(x) ((x).pte) |
38 | #define pmd_val(x) ((x).pmd) | ||
39 | #define pgd_val(x) ((x).pgd) | 37 | #define pgd_val(x) ((x).pgd) |
40 | #define pgprot_val(x) ((x).pgprot) | 38 | #define pgprot_val(x) ((x).pgprot) |
41 | 39 | ||
42 | #define __pte(x) ((pte_t) { (x) } ) | 40 | #define __pte(x) ((pte_t) { (x) } ) |
43 | #define __pmd(x) ((pmd_t) { (x) } ) | ||
44 | #define __pgd(x) ((pgd_t) { (x) } ) | 41 | #define __pgd(x) ((pgd_t) { (x) } ) |
45 | #define __pgprot(x) ((pgprot_t) { (x) } ) | 42 | #define __pgprot(x) ((pgprot_t) { (x) } ) |
46 | 43 | ||
47 | /* On CRIS the PFN numbers doesn't start at 0 so we have to compensate */ | 44 | /* On CRIS the PFN numbers doesn't start at 0 so we have to compensate */ |
48 | /* for that before indexing into the page table starting at mem_map */ | 45 | /* for that before indexing into the page table starting at mem_map */ |
49 | #define pfn_to_page(pfn) (mem_map + ((pfn) - (PAGE_OFFSET >> PAGE_SHIFT))) | 46 | #define pfn_to_page(pfn) (mem_map + ((pfn) - (PAGE_OFFSET >> PAGE_SHIFT))) |
50 | #define page_to_pfn(page) ((unsigned long)((page) - mem_map) + (PAGE_OFFSET >> PAGE_SHIFT)) | 47 | #define page_to_pfn(page) ((unsigned long)((page) - mem_map) + (PAGE_OFFSET >> PAGE_SHIFT)) |
51 | #define pfn_valid(pfn) (((pfn) - (PAGE_OFFSET >> PAGE_SHIFT)) < max_mapnr) | 48 | #define pfn_valid(pfn) (((pfn) - (PAGE_OFFSET >> PAGE_SHIFT)) < max_mapnr) |
52 | 49 | ||
53 | /* to index into the page map. our pages all start at physical addr PAGE_OFFSET so | 50 | /* to index into the page map. our pages all start at physical addr PAGE_OFFSET so |
54 | * we can let the map start there. notice that we subtract PAGE_OFFSET because | 51 | * we can let the map start there. notice that we subtract PAGE_OFFSET because |
55 | * we start our mem_map there - in other ports they map mem_map physically and | 52 | * we start our mem_map there - in other ports they map mem_map physically and |
56 | * use __pa instead. in our system both the physical and virtual address of DRAM | 53 | * use __pa instead. in our system both the physical and virtual address of DRAM |
57 | * is too high to let mem_map start at 0, so we do it this way instead (similar | 54 | * is too high to let mem_map start at 0, so we do it this way instead (similar |
58 | * to arm and m68k I think) | 55 | * to arm and m68k I think) |
59 | */ | 56 | */ |
60 | 57 | ||
61 | #define virt_to_page(kaddr) (mem_map + (((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT)) | 58 | #define virt_to_page(kaddr) (mem_map + (((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT)) |
62 | #define VALID_PAGE(page) (((page) - mem_map) < max_mapnr) | 59 | #define VALID_PAGE(page) (((page) - mem_map) < max_mapnr) |
63 | #define virt_addr_valid(kaddr) pfn_valid((unsigned)(kaddr) >> PAGE_SHIFT) | 60 | #define virt_addr_valid(kaddr) pfn_valid((unsigned)(kaddr) >> PAGE_SHIFT) |
64 | 61 | ||
65 | /* convert a page (based on mem_map and forward) to a physical address | 62 | /* convert a page (based on mem_map and forward) to a physical address |
66 | * do this by figuring out the virtual address and then use __pa | 63 | * do this by figuring out the virtual address and then use __pa |
67 | */ | 64 | */ |
68 | 65 | ||
69 | #define page_to_phys(page) __pa((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET) | 66 | #define page_to_phys(page) __pa((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET) |
70 | 67 | ||
71 | /* to align the pointer to the (next) page boundary */ | 68 | /* to align the pointer to the (next) page boundary */ |
72 | #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) | 69 | #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) |
73 | 70 | ||
74 | #ifndef __ASSEMBLY__ | 71 | #ifndef __ASSEMBLY__ |
75 | |||
76 | #define BUG() do { \ | ||
77 | printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ | ||
78 | } while (0) | ||
79 | 72 | ||
80 | /* Pure 2^n version of get_order */ | 73 | /* Pure 2^n version of get_order */ |
81 | static inline int get_order(unsigned long size) | 74 | static inline int get_order(unsigned long size) |
82 | { | 75 | { |
83 | int order; | 76 | int order; |
84 | 77 | ||
85 | size = (size-1) >> (PAGE_SHIFT-1); | 78 | size = (size-1) >> (PAGE_SHIFT-1); |
86 | order = -1; | 79 | order = -1; |
87 | do { | 80 | do { |
88 | size >>= 1; | 81 | size >>= 1; |
89 | order++; | 82 | order++; |
90 | } while (size); | 83 | } while (size); |
91 | return order; | 84 | return order; |
92 | } | 85 | } |
93 | #endif /* __ASSEMBLY__ */ | 86 | #endif /* __ASSEMBLY__ */ |
94 | 87 | ||
95 | #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ | 88 | #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ |
96 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) | 89 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) |
97 | 90 | ||
98 | #endif /* __KERNEL__ */ | 91 | #endif /* __KERNEL__ */ |
99 | 92 | ||
100 | #endif /* _CRIS_PAGE_H */ | 93 | #endif /* _CRIS_PAGE_H */ |
101 | 94 | ||
102 | 95 |
include/asm-cris/pgalloc.h
1 | #ifndef _CRIS_PGALLOC_H | 1 | #ifndef _CRIS_PGALLOC_H |
2 | #define _CRIS_PGALLOC_H | 2 | #define _CRIS_PGALLOC_H |
3 | 3 | ||
4 | #include <linux/threads.h> | 4 | #include <linux/threads.h> |
5 | #include <linux/mm.h> | 5 | #include <linux/mm.h> |
6 | 6 | ||
7 | #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, pte) | 7 | #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, pte) |
8 | #define pmd_populate(mm, pmd, pte) pmd_set(pmd, page_address(pte)) | 8 | #define pmd_populate(mm, pmd, pte) pmd_set(pmd, page_address(pte)) |
9 | 9 | ||
10 | /* | 10 | /* |
11 | * Allocate and free page tables. | 11 | * Allocate and free page tables. |
12 | */ | 12 | */ |
13 | 13 | ||
14 | extern inline pgd_t *pgd_alloc (struct mm_struct *mm) | 14 | extern inline pgd_t *pgd_alloc (struct mm_struct *mm) |
15 | { | 15 | { |
16 | return (pgd_t *)get_zeroed_page(GFP_KERNEL); | 16 | return (pgd_t *)get_zeroed_page(GFP_KERNEL); |
17 | } | 17 | } |
18 | 18 | ||
19 | extern inline void pgd_free (pgd_t *pgd) | 19 | extern inline void pgd_free (pgd_t *pgd) |
20 | { | 20 | { |
21 | free_page((unsigned long)pgd); | 21 | free_page((unsigned long)pgd); |
22 | } | 22 | } |
23 | 23 | ||
24 | extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) | 24 | extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) |
25 | { | 25 | { |
26 | pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); | 26 | pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); |
27 | return pte; | 27 | return pte; |
28 | } | 28 | } |
29 | 29 | ||
30 | extern inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) | 30 | extern inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) |
31 | { | 31 | { |
32 | struct page *pte; | 32 | struct page *pte; |
33 | pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); | 33 | pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); |
34 | return pte; | 34 | return pte; |
35 | } | 35 | } |
36 | 36 | ||
37 | extern inline void pte_free_kernel(pte_t *pte) | 37 | extern inline void pte_free_kernel(pte_t *pte) |
38 | { | 38 | { |
39 | free_page((unsigned long)pte); | 39 | free_page((unsigned long)pte); |
40 | } | 40 | } |
41 | 41 | ||
42 | extern inline void pte_free(struct page *pte) | 42 | extern inline void pte_free(struct page *pte) |
43 | { | 43 | { |
44 | __free_page(pte); | 44 | __free_page(pte); |
45 | } | 45 | } |
46 | 46 | ||
47 | 47 | ||
48 | #define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte)) | 48 | #define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte)) |
49 | 49 | ||
50 | /* | ||
51 | * We don't have any real pmd's, and this code never triggers because | ||
52 | * the pgd will always be present.. | ||
53 | */ | ||
54 | |||
55 | #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); }) | ||
56 | #define pmd_free(x) do { } while (0) | ||
57 | #define __pmd_free_tlb(tlb,x) do { } while (0) | ||
58 | #define pgd_populate(mm, pmd, pte) BUG() | ||
59 | |||
60 | #define check_pgt_cache() do { } while (0) | 50 | #define check_pgt_cache() do { } while (0) |
61 | 51 | ||
62 | #endif | 52 | #endif |
63 | 53 |
include/asm-cris/pgtable.h
1 | /* | 1 | /* |
2 | * CRIS pgtable.h - macros and functions to manipulate page tables. | 2 | * CRIS pgtable.h - macros and functions to manipulate page tables. |
3 | */ | 3 | */ |
4 | 4 | ||
5 | #ifndef _CRIS_PGTABLE_H | 5 | #ifndef _CRIS_PGTABLE_H |
6 | #define _CRIS_PGTABLE_H | 6 | #define _CRIS_PGTABLE_H |
7 | 7 | ||
8 | #include <asm-generic/4level-fixup.h> | 8 | #include <asm/page.h> |
9 | #include <asm-generic/pgtable-nopmd.h> | ||
9 | 10 | ||
10 | #ifndef __ASSEMBLY__ | 11 | #ifndef __ASSEMBLY__ |
11 | #include <linux/config.h> | 12 | #include <linux/config.h> |
12 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
13 | #include <asm/mmu.h> | 14 | #include <asm/mmu.h> |
14 | #endif | 15 | #endif |
15 | #include <asm/arch/pgtable.h> | 16 | #include <asm/arch/pgtable.h> |
16 | 17 | ||
17 | /* | 18 | /* |
18 | * The Linux memory management assumes a three-level page table setup. On | 19 | * The Linux memory management assumes a three-level page table setup. On |
19 | * CRIS, we use that, but "fold" the mid level into the top-level page | 20 | * CRIS, we use that, but "fold" the mid level into the top-level page |
20 | * table. Since the MMU TLB is software loaded through an interrupt, it | 21 | * table. Since the MMU TLB is software loaded through an interrupt, it |
21 | * supports any page table structure, so we could have used a three-level | 22 | * supports any page table structure, so we could have used a three-level |
22 | * setup, but for the amounts of memory we normally use, a two-level is | 23 | * setup, but for the amounts of memory we normally use, a two-level is |
23 | * probably more efficient. | 24 | * probably more efficient. |
24 | * | 25 | * |
25 | * This file contains the functions and defines necessary to modify and use | 26 | * This file contains the functions and defines necessary to modify and use |
26 | * the CRIS page table tree. | 27 | * the CRIS page table tree. |
27 | */ | 28 | */ |
28 | #ifndef __ASSEMBLY__ | 29 | #ifndef __ASSEMBLY__ |
29 | extern void paging_init(void); | 30 | extern void paging_init(void); |
30 | #endif | 31 | #endif |
31 | 32 | ||
32 | /* Certain architectures need to do special things when pte's | 33 | /* Certain architectures need to do special things when pte's |
33 | * within a page table are directly modified. Thus, the following | 34 | * within a page table are directly modified. Thus, the following |
34 | * hook is made available. | 35 | * hook is made available. |
35 | */ | 36 | */ |
36 | #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval)) | 37 | #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval)) |
37 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) | 38 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) |
38 | 39 | ||
39 | /* | 40 | /* |
40 | * (pmds are folded into pgds so this doesn't get actually called, | 41 | * (pmds are folded into pgds so this doesn't get actually called, |
41 | * but the define is needed for a generic inline function.) | 42 | * but the define is needed for a generic inline function.) |
42 | */ | 43 | */ |
43 | #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval) | 44 | #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval) |
44 | #define set_pgd(pgdptr, pgdval) (*(pgdptr) = pgdval) | 45 | #define set_pgu(pudptr, pudval) (*(pudptr) = pudval) |
45 | 46 | ||
46 | /* PMD_SHIFT determines the size of the area a second-level page table can | 47 | /* PGDIR_SHIFT determines the size of the area a second-level page table can |
47 | * map. It is equal to the page size times the number of PTE's that fit in | 48 | * map. It is equal to the page size times the number of PTE's that fit in |
48 | * a PMD page. A PTE is 4-bytes in CRIS. Hence the following number. | 49 | * a PMD page. A PTE is 4-bytes in CRIS. Hence the following number. |
49 | */ | 50 | */ |
50 | 51 | ||
51 | #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-2)) | 52 | #define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-2)) |
52 | #define PMD_SIZE (1UL << PMD_SHIFT) | ||
53 | #define PMD_MASK (~(PMD_SIZE-1)) | ||
54 | |||
55 | /* PGDIR_SHIFT determines what a third-level page table entry can map. | ||
56 | * Since we fold into a two-level structure, this is the same as PMD_SHIFT. | ||
57 | */ | ||
58 | |||
59 | #define PGDIR_SHIFT PMD_SHIFT | ||
60 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | 53 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
61 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | 54 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
62 | 55 | ||
63 | /* | 56 | /* |
64 | * entries per page directory level: we use a two-level, so | 57 | * entries per page directory level: we use a two-level, so |
65 | * we don't really have any PMD directory physically. | 58 | * we don't really have any PMD directory physically. |
66 | * pointers are 4 bytes so we can use the page size and | 59 | * pointers are 4 bytes so we can use the page size and |
67 | * divide it by 4 (shift by 2). | 60 | * divide it by 4 (shift by 2). |
68 | */ | 61 | */ |
69 | #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-2)) | 62 | #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-2)) |
70 | #define PTRS_PER_PMD 1 | ||
71 | #define PTRS_PER_PGD (1UL << (PAGE_SHIFT-2)) | 63 | #define PTRS_PER_PGD (1UL << (PAGE_SHIFT-2)) |
72 | 64 | ||
73 | /* calculate how many PGD entries a user-level program can use | 65 | /* calculate how many PGD entries a user-level program can use |
74 | * the first mappable virtual address is 0 | 66 | * the first mappable virtual address is 0 |
75 | * (TASK_SIZE is the maximum virtual address space) | 67 | * (TASK_SIZE is the maximum virtual address space) |
76 | */ | 68 | */ |
77 | 69 | ||
78 | #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) | 70 | #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) |
79 | #define FIRST_USER_ADDRESS 0 | 71 | #define FIRST_USER_ADDRESS 0 |
80 | 72 | ||
81 | /* zero page used for uninitialized stuff */ | 73 | /* zero page used for uninitialized stuff */ |
82 | #ifndef __ASSEMBLY__ | 74 | #ifndef __ASSEMBLY__ |
83 | extern unsigned long empty_zero_page; | 75 | extern unsigned long empty_zero_page; |
84 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | 76 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) |
85 | #endif | 77 | #endif |
86 | 78 | ||
87 | /* number of bits that fit into a memory pointer */ | 79 | /* number of bits that fit into a memory pointer */ |
88 | #define BITS_PER_PTR (8*sizeof(unsigned long)) | 80 | #define BITS_PER_PTR (8*sizeof(unsigned long)) |
89 | 81 | ||
90 | /* to align the pointer to a pointer address */ | 82 | /* to align the pointer to a pointer address */ |
91 | #define PTR_MASK (~(sizeof(void*)-1)) | 83 | #define PTR_MASK (~(sizeof(void*)-1)) |
92 | 84 | ||
93 | /* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */ | 85 | /* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */ |
94 | /* 64-bit machines, beware! SRB. */ | 86 | /* 64-bit machines, beware! SRB. */ |
95 | #define SIZEOF_PTR_LOG2 2 | 87 | #define SIZEOF_PTR_LOG2 2 |
96 | 88 | ||
97 | /* to find an entry in a page-table */ | 89 | /* to find an entry in a page-table */ |
98 | #define PAGE_PTR(address) \ | 90 | #define PAGE_PTR(address) \ |
99 | ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK) | 91 | ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK) |
100 | 92 | ||
101 | /* to set the page-dir */ | 93 | /* to set the page-dir */ |
102 | #define SET_PAGE_DIR(tsk,pgdir) | 94 | #define SET_PAGE_DIR(tsk,pgdir) |
103 | 95 | ||
104 | #define pte_none(x) (!pte_val(x)) | 96 | #define pte_none(x) (!pte_val(x)) |
105 | #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) | 97 | #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) |
106 | #define pte_clear(mm,addr,xp) do { pte_val(*(xp)) = 0; } while (0) | 98 | #define pte_clear(mm,addr,xp) do { pte_val(*(xp)) = 0; } while (0) |
107 | 99 | ||
108 | #define pmd_none(x) (!pmd_val(x)) | 100 | #define pmd_none(x) (!pmd_val(x)) |
109 | /* by removing the _PAGE_KERNEL bit from the comparision, the same pmd_bad | 101 | /* by removing the _PAGE_KERNEL bit from the comparision, the same pmd_bad |
110 | * works for both _PAGE_TABLE and _KERNPG_TABLE pmd entries. | 102 | * works for both _PAGE_TABLE and _KERNPG_TABLE pmd entries. |
111 | */ | 103 | */ |
112 | #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_KERNEL)) != _PAGE_TABLE) | 104 | #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_KERNEL)) != _PAGE_TABLE) |
113 | #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) | 105 | #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) |
114 | #define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0) | 106 | #define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0) |
115 | 107 | ||
116 | #ifndef __ASSEMBLY__ | 108 | #ifndef __ASSEMBLY__ |
117 | 109 | ||
118 | /* | 110 | /* |
119 | * The "pgd_xxx()" functions here are trivial for a folded two-level | ||
120 | * setup: the pgd is never bad, and a pmd always exists (as it's folded | ||
121 | * into the pgd entry) | ||
122 | */ | ||
123 | extern inline int pgd_none(pgd_t pgd) { return 0; } | ||
124 | extern inline int pgd_bad(pgd_t pgd) { return 0; } | ||
125 | extern inline int pgd_present(pgd_t pgd) { return 1; } | ||
126 | extern inline void pgd_clear(pgd_t * pgdp) { } | ||
127 | |||
128 | /* | ||
129 | * The following only work if pte_present() is true. | 111 | * The following only work if pte_present() is true. |
130 | * Undefined behaviour if not.. | 112 | * Undefined behaviour if not.. |
131 | */ | 113 | */ |
132 | 114 | ||
133 | extern inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_READ; } | 115 | extern inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_READ; } |
134 | extern inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } | 116 | extern inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } |
135 | extern inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_READ; } | 117 | extern inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_READ; } |
136 | extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; } | 118 | extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; } |
137 | extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } | 119 | extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } |
138 | extern inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } | 120 | extern inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } |
139 | 121 | ||
140 | extern inline pte_t pte_wrprotect(pte_t pte) | 122 | extern inline pte_t pte_wrprotect(pte_t pte) |
141 | { | 123 | { |
142 | pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); | 124 | pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); |
143 | return pte; | 125 | return pte; |
144 | } | 126 | } |
145 | 127 | ||
146 | extern inline pte_t pte_rdprotect(pte_t pte) | 128 | extern inline pte_t pte_rdprotect(pte_t pte) |
147 | { | 129 | { |
148 | pte_val(pte) &= ~(_PAGE_READ | _PAGE_SILENT_READ); | 130 | pte_val(pte) &= ~(_PAGE_READ | _PAGE_SILENT_READ); |
149 | return pte; | 131 | return pte; |
150 | } | 132 | } |
151 | 133 | ||
152 | extern inline pte_t pte_exprotect(pte_t pte) | 134 | extern inline pte_t pte_exprotect(pte_t pte) |
153 | { | 135 | { |
154 | pte_val(pte) &= ~(_PAGE_READ | _PAGE_SILENT_READ); | 136 | pte_val(pte) &= ~(_PAGE_READ | _PAGE_SILENT_READ); |
155 | return pte; | 137 | return pte; |
156 | } | 138 | } |
157 | 139 | ||
158 | extern inline pte_t pte_mkclean(pte_t pte) | 140 | extern inline pte_t pte_mkclean(pte_t pte) |
159 | { | 141 | { |
160 | pte_val(pte) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE); | 142 | pte_val(pte) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE); |
161 | return pte; | 143 | return pte; |
162 | } | 144 | } |
163 | 145 | ||
164 | extern inline pte_t pte_mkold(pte_t pte) | 146 | extern inline pte_t pte_mkold(pte_t pte) |
165 | { | 147 | { |
166 | pte_val(pte) &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ); | 148 | pte_val(pte) &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ); |
167 | return pte; | 149 | return pte; |
168 | } | 150 | } |
169 | 151 | ||
170 | extern inline pte_t pte_mkwrite(pte_t pte) | 152 | extern inline pte_t pte_mkwrite(pte_t pte) |
171 | { | 153 | { |
172 | pte_val(pte) |= _PAGE_WRITE; | 154 | pte_val(pte) |= _PAGE_WRITE; |
173 | if (pte_val(pte) & _PAGE_MODIFIED) | 155 | if (pte_val(pte) & _PAGE_MODIFIED) |
174 | pte_val(pte) |= _PAGE_SILENT_WRITE; | 156 | pte_val(pte) |= _PAGE_SILENT_WRITE; |
175 | return pte; | 157 | return pte; |
176 | } | 158 | } |
177 | 159 | ||
178 | extern inline pte_t pte_mkread(pte_t pte) | 160 | extern inline pte_t pte_mkread(pte_t pte) |
179 | { | 161 | { |
180 | pte_val(pte) |= _PAGE_READ; | 162 | pte_val(pte) |= _PAGE_READ; |
181 | if (pte_val(pte) & _PAGE_ACCESSED) | 163 | if (pte_val(pte) & _PAGE_ACCESSED) |
182 | pte_val(pte) |= _PAGE_SILENT_READ; | 164 | pte_val(pte) |= _PAGE_SILENT_READ; |
183 | return pte; | 165 | return pte; |
184 | } | 166 | } |
185 | 167 | ||
186 | extern inline pte_t pte_mkexec(pte_t pte) | 168 | extern inline pte_t pte_mkexec(pte_t pte) |
187 | { | 169 | { |
188 | pte_val(pte) |= _PAGE_READ; | 170 | pte_val(pte) |= _PAGE_READ; |
189 | if (pte_val(pte) & _PAGE_ACCESSED) | 171 | if (pte_val(pte) & _PAGE_ACCESSED) |
190 | pte_val(pte) |= _PAGE_SILENT_READ; | 172 | pte_val(pte) |= _PAGE_SILENT_READ; |
191 | return pte; | 173 | return pte; |
192 | } | 174 | } |
193 | 175 | ||
194 | extern inline pte_t pte_mkdirty(pte_t pte) | 176 | extern inline pte_t pte_mkdirty(pte_t pte) |
195 | { | 177 | { |
196 | pte_val(pte) |= _PAGE_MODIFIED; | 178 | pte_val(pte) |= _PAGE_MODIFIED; |
197 | if (pte_val(pte) & _PAGE_WRITE) | 179 | if (pte_val(pte) & _PAGE_WRITE) |
198 | pte_val(pte) |= _PAGE_SILENT_WRITE; | 180 | pte_val(pte) |= _PAGE_SILENT_WRITE; |
199 | return pte; | 181 | return pte; |
200 | } | 182 | } |
201 | 183 | ||
202 | extern inline pte_t pte_mkyoung(pte_t pte) | 184 | extern inline pte_t pte_mkyoung(pte_t pte) |
203 | { | 185 | { |
204 | pte_val(pte) |= _PAGE_ACCESSED; | 186 | pte_val(pte) |= _PAGE_ACCESSED; |
205 | if (pte_val(pte) & _PAGE_READ) | 187 | if (pte_val(pte) & _PAGE_READ) |
206 | { | 188 | { |
207 | pte_val(pte) |= _PAGE_SILENT_READ; | 189 | pte_val(pte) |= _PAGE_SILENT_READ; |
208 | if ((pte_val(pte) & (_PAGE_WRITE | _PAGE_MODIFIED)) == | 190 | if ((pte_val(pte) & (_PAGE_WRITE | _PAGE_MODIFIED)) == |
209 | (_PAGE_WRITE | _PAGE_MODIFIED)) | 191 | (_PAGE_WRITE | _PAGE_MODIFIED)) |
210 | pte_val(pte) |= _PAGE_SILENT_WRITE; | 192 | pte_val(pte) |= _PAGE_SILENT_WRITE; |
211 | } | 193 | } |
212 | return pte; | 194 | return pte; |
213 | } | 195 | } |
214 | 196 | ||
215 | /* | 197 | /* |
216 | * Conversion functions: convert a page and protection to a page entry, | 198 | * Conversion functions: convert a page and protection to a page entry, |
217 | * and a page entry and page directory to the page they refer to. | 199 | * and a page entry and page directory to the page they refer to. |
218 | */ | 200 | */ |
219 | 201 | ||
220 | /* What actually goes as arguments to the various functions is less than | 202 | /* What actually goes as arguments to the various functions is less than |
221 | * obvious, but a rule of thumb is that struct page's goes as struct page *, | 203 | * obvious, but a rule of thumb is that struct page's goes as struct page *, |
222 | * really physical DRAM addresses are unsigned long's, and DRAM "virtual" | 204 | * really physical DRAM addresses are unsigned long's, and DRAM "virtual" |
223 | * addresses (the 0xc0xxxxxx's) goes as void *'s. | 205 | * addresses (the 0xc0xxxxxx's) goes as void *'s. |
224 | */ | 206 | */ |
225 | 207 | ||
226 | extern inline pte_t __mk_pte(void * page, pgprot_t pgprot) | 208 | extern inline pte_t __mk_pte(void * page, pgprot_t pgprot) |
227 | { | 209 | { |
228 | pte_t pte; | 210 | pte_t pte; |
229 | /* the PTE needs a physical address */ | 211 | /* the PTE needs a physical address */ |
230 | pte_val(pte) = __pa(page) | pgprot_val(pgprot); | 212 | pte_val(pte) = __pa(page) | pgprot_val(pgprot); |
231 | return pte; | 213 | return pte; |
232 | } | 214 | } |
233 | 215 | ||
234 | #define mk_pte(page, pgprot) __mk_pte(page_address(page), (pgprot)) | 216 | #define mk_pte(page, pgprot) __mk_pte(page_address(page), (pgprot)) |
235 | 217 | ||
236 | #define mk_pte_phys(physpage, pgprot) \ | 218 | #define mk_pte_phys(physpage, pgprot) \ |
237 | ({ \ | 219 | ({ \ |
238 | pte_t __pte; \ | 220 | pte_t __pte; \ |
239 | \ | 221 | \ |
240 | pte_val(__pte) = (physpage) + pgprot_val(pgprot); \ | 222 | pte_val(__pte) = (physpage) + pgprot_val(pgprot); \ |
241 | __pte; \ | 223 | __pte; \ |
242 | }) | 224 | }) |
243 | 225 | ||
244 | extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | 226 | extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
245 | { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; } | 227 | { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; } |
246 | 228 | ||
247 | 229 | ||
248 | /* pte_val refers to a page in the 0x4xxxxxxx physical DRAM interval | 230 | /* pte_val refers to a page in the 0x4xxxxxxx physical DRAM interval |
249 | * __pte_page(pte_val) refers to the "virtual" DRAM interval | 231 | * __pte_page(pte_val) refers to the "virtual" DRAM interval |
250 | * pte_pagenr refers to the page-number counted starting from the virtual DRAM start | 232 | * pte_pagenr refers to the page-number counted starting from the virtual DRAM start |
251 | */ | 233 | */ |
252 | 234 | ||
253 | extern inline unsigned long __pte_page(pte_t pte) | 235 | extern inline unsigned long __pte_page(pte_t pte) |
254 | { | 236 | { |
255 | /* the PTE contains a physical address */ | 237 | /* the PTE contains a physical address */ |
256 | return (unsigned long)__va(pte_val(pte) & PAGE_MASK); | 238 | return (unsigned long)__va(pte_val(pte) & PAGE_MASK); |
257 | } | 239 | } |
258 | 240 | ||
259 | #define pte_pagenr(pte) ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT) | 241 | #define pte_pagenr(pte) ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT) |
260 | 242 | ||
261 | /* permanent address of a page */ | 243 | /* permanent address of a page */ |
262 | 244 | ||
263 | #define __page_address(page) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT)) | 245 | #define __page_address(page) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT)) |
264 | #define pte_page(pte) (mem_map+pte_pagenr(pte)) | 246 | #define pte_page(pte) (mem_map+pte_pagenr(pte)) |
265 | 247 | ||
266 | /* only the pte's themselves need to point to physical DRAM (see above) | 248 | /* only the pte's themselves need to point to physical DRAM (see above) |
267 | * the pagetable links are purely handled within the kernel SW and thus | 249 | * the pagetable links are purely handled within the kernel SW and thus |
268 | * don't need the __pa and __va transformations. | 250 | * don't need the __pa and __va transformations. |
269 | */ | 251 | */ |
270 | 252 | ||
271 | extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep) | 253 | extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep) |
272 | { pmd_val(*pmdp) = _PAGE_TABLE | (unsigned long) ptep; } | 254 | { pmd_val(*pmdp) = _PAGE_TABLE | (unsigned long) ptep; } |
273 | 255 | ||
274 | #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) | 256 | #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) |
275 | #define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) | 257 | #define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) |
276 | 258 | ||
277 | /* to find an entry in a page-table-directory. */ | 259 | /* to find an entry in a page-table-directory. */ |
278 | #define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) | 260 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) |
279 | 261 | ||
280 | /* to find an entry in a page-table-directory */ | 262 | /* to find an entry in a page-table-directory */ |
281 | extern inline pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address) | 263 | extern inline pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address) |
282 | { | 264 | { |
283 | return mm->pgd + pgd_index(address); | 265 | return mm->pgd + pgd_index(address); |
284 | } | 266 | } |
285 | 267 | ||
286 | /* to find an entry in a kernel page-table-directory */ | 268 | /* to find an entry in a kernel page-table-directory */ |
287 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | 269 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) |
288 | 270 | ||
289 | /* Find an entry in the second-level page table.. */ | ||
290 | extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address) | ||
291 | { | ||
292 | return (pmd_t *) dir; | ||
293 | } | ||
294 | |||
295 | /* Find an entry in the third-level page table.. */ | 271 | /* Find an entry in the third-level page table.. */ |
296 | #define __pte_offset(address) \ | 272 | #define __pte_offset(address) \ |
297 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | 273 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) |
298 | #define pte_offset_kernel(dir, address) \ | 274 | #define pte_offset_kernel(dir, address) \ |
299 | ((pte_t *) pmd_page_kernel(*(dir)) + __pte_offset(address)) | 275 | ((pte_t *) pmd_page_kernel(*(dir)) + __pte_offset(address)) |
300 | #define pte_offset_map(dir, address) \ | 276 | #define pte_offset_map(dir, address) \ |
301 | ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) | 277 | ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) |
302 | #define pte_offset_map_nested(dir, address) pte_offset_map(dir, address) | 278 | #define pte_offset_map_nested(dir, address) pte_offset_map(dir, address) |
303 | 279 | ||
304 | #define pte_unmap(pte) do { } while (0) | 280 | #define pte_unmap(pte) do { } while (0) |
305 | #define pte_unmap_nested(pte) do { } while (0) | 281 | #define pte_unmap_nested(pte) do { } while (0) |
306 | #define pte_pfn(x) ((unsigned long)(__va((x).pte)) >> PAGE_SHIFT) | 282 | #define pte_pfn(x) ((unsigned long)(__va((x).pte)) >> PAGE_SHIFT) |
307 | #define pfn_pte(pfn, prot) __pte((__pa((pfn) << PAGE_SHIFT)) | pgprot_val(prot)) | 283 | #define pfn_pte(pfn, prot) __pte((__pa((pfn) << PAGE_SHIFT)) | pgprot_val(prot)) |
308 | 284 | ||
309 | #define pte_ERROR(e) \ | 285 | #define pte_ERROR(e) \ |
310 | printk("%s:%d: bad pte %p(%08lx).\n", __FILE__, __LINE__, &(e), pte_val(e)) | 286 | printk("%s:%d: bad pte %p(%08lx).\n", __FILE__, __LINE__, &(e), pte_val(e)) |
311 | #define pmd_ERROR(e) \ | ||
312 | printk("%s:%d: bad pmd %p(%08lx).\n", __FILE__, __LINE__, &(e), pmd_val(e)) | ||
313 | #define pgd_ERROR(e) \ | 287 | #define pgd_ERROR(e) \ |
314 | printk("%s:%d: bad pgd %p(%08lx).\n", __FILE__, __LINE__, &(e), pgd_val(e)) | 288 | printk("%s:%d: bad pgd %p(%08lx).\n", __FILE__, __LINE__, &(e), pgd_val(e)) |
315 | 289 | ||
316 | 290 | ||
317 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* defined in head.S */ | 291 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* defined in head.S */ |
318 | 292 | ||
319 | /* | 293 | /* |
320 | * CRIS doesn't have any external MMU info: the kernel page | 294 | * CRIS doesn't have any external MMU info: the kernel page |
321 | * tables contain all the necessary information. | 295 | * tables contain all the necessary information. |
322 | * | 296 | * |
323 | * Actually I am not sure on what this could be used for. | 297 | * Actually I am not sure on what this could be used for. |
324 | */ | 298 | */ |
325 | extern inline void update_mmu_cache(struct vm_area_struct * vma, | 299 | extern inline void update_mmu_cache(struct vm_area_struct * vma, |
326 | unsigned long address, pte_t pte) | 300 | unsigned long address, pte_t pte) |
327 | { | 301 | { |
328 | } | 302 | } |
329 | 303 | ||
330 | /* Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e)) */ | 304 | /* Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e)) */ |
331 | /* Since the PAGE_PRESENT bit is bit 4, we can use the bits above */ | 305 | /* Since the PAGE_PRESENT bit is bit 4, we can use the bits above */ |
332 | 306 | ||
333 | #define __swp_type(x) (((x).val >> 5) & 0x7f) | 307 | #define __swp_type(x) (((x).val >> 5) & 0x7f) |
334 | #define __swp_offset(x) ((x).val >> 12) | 308 | #define __swp_offset(x) ((x).val >> 12) |
335 | #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 5) | ((offset) << 12) }) | 309 | #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 5) | ((offset) << 12) }) |
336 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | 310 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) |
337 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) | 311 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) |
338 | 312 | ||
339 | #define kern_addr_valid(addr) (1) | 313 | #define kern_addr_valid(addr) (1) |
340 | 314 | ||
341 | #include <asm-generic/pgtable.h> | 315 | #include <asm-generic/pgtable.h> |
342 | 316 | ||
343 | /* | 317 | /* |
344 | * No page table caches to initialise | 318 | * No page table caches to initialise |
345 | */ | 319 | */ |
346 | #define pgtable_cache_init() do { } while (0) | 320 | #define pgtable_cache_init() do { } while (0) |
347 | 321 | ||
348 | #define pte_to_pgoff(x) (pte_val(x) >> 6) | 322 | #define pte_to_pgoff(x) (pte_val(x) >> 6) |
349 | #define pgoff_to_pte(x) __pte(((x) << 6) | _PAGE_FILE) | 323 | #define pgoff_to_pte(x) __pte(((x) << 6) | _PAGE_FILE) |
324 | |||
325 | typedef pte_t *pte_addr_t; | ||
350 | 326 |
include/asm-cris/processor.h
1 | /* | 1 | /* |
2 | * include/asm-cris/processor.h | 2 | * include/asm-cris/processor.h |
3 | * | 3 | * |
4 | * Copyright (C) 2000, 2001 Axis Communications AB | 4 | * Copyright (C) 2000, 2001 Axis Communications AB |
5 | * | 5 | * |
6 | * Authors: Bjorn Wesen Initial version | 6 | * Authors: Bjorn Wesen Initial version |
7 | * | 7 | * |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #ifndef __ASM_CRIS_PROCESSOR_H | 10 | #ifndef __ASM_CRIS_PROCESSOR_H |
11 | #define __ASM_CRIS_PROCESSOR_H | 11 | #define __ASM_CRIS_PROCESSOR_H |
12 | 12 | ||
13 | #include <linux/config.h> | 13 | #include <linux/config.h> |
14 | #include <asm/system.h> | 14 | #include <asm/system.h> |
15 | #include <asm/page.h> | 15 | #include <asm/page.h> |
16 | #include <asm/ptrace.h> | 16 | #include <asm/ptrace.h> |
17 | #include <asm/arch/processor.h> | 17 | #include <asm/arch/processor.h> |
18 | 18 | ||
19 | /* This decides where the kernel will search for a free chunk of vm | 19 | /* This decides where the kernel will search for a free chunk of vm |
20 | * space during mmap's. | 20 | * space during mmap's. |
21 | */ | 21 | */ |
22 | #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) | 22 | #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) |
23 | 23 | ||
24 | /* THREAD_SIZE is the size of the task_struct/kernel_stack combo. | 24 | /* THREAD_SIZE is the size of the task_struct/kernel_stack combo. |
25 | * normally, the stack is found by doing something like p + THREAD_SIZE | 25 | * normally, the stack is found by doing something like p + THREAD_SIZE |
26 | * in CRIS, a page is 8192 bytes, which seems like a sane size | 26 | * in CRIS, a page is 8192 bytes, which seems like a sane size |
27 | */ | 27 | */ |
28 | 28 | ||
29 | #define THREAD_SIZE PAGE_SIZE | 29 | #define THREAD_SIZE PAGE_SIZE |
30 | #define KERNEL_STACK_SIZE PAGE_SIZE | 30 | #define KERNEL_STACK_SIZE PAGE_SIZE |
31 | 31 | ||
32 | /* | 32 | /* |
33 | * At user->kernel entry, the pt_regs struct is stacked on the top of the kernel-stack. | 33 | * At user->kernel entry, the pt_regs struct is stacked on the top of the kernel-stack. |
34 | * This macro allows us to find those regs for a task. | 34 | * This macro allows us to find those regs for a task. |
35 | * Notice that subsequent pt_regs stackings, like recursive interrupts occurring while | 35 | * Notice that subsequent pt_regs stackings, like recursive interrupts occurring while |
36 | * we're in the kernel, won't affect this - only the first user->kernel transition | 36 | * we're in the kernel, won't affect this - only the first user->kernel transition |
37 | * registers are reached by this. | 37 | * registers are reached by this. |
38 | */ | 38 | */ |
39 | 39 | ||
40 | #define user_regs(thread_info) (((struct pt_regs *)((unsigned long)(thread_info) + THREAD_SIZE)) - 1) | 40 | #define user_regs(thread_info) (((struct pt_regs *)((unsigned long)(thread_info) + THREAD_SIZE)) - 1) |
41 | 41 | ||
42 | /* | 42 | /* |
43 | * Dito but for the currently running task | 43 | * Dito but for the currently running task |
44 | */ | 44 | */ |
45 | 45 | ||
46 | #define current_regs() user_regs(current->thread_info) | 46 | #define current_regs() user_regs(current->thread_info) |
47 | 47 | ||
48 | extern inline void prepare_to_copy(struct task_struct *tsk) | 48 | extern inline void prepare_to_copy(struct task_struct *tsk) |
49 | { | 49 | { |
50 | } | 50 | } |
51 | 51 | ||
52 | extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); | 52 | extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); |
53 | 53 | ||
54 | unsigned long get_wchan(struct task_struct *p); | 54 | unsigned long get_wchan(struct task_struct *p); |
55 | 55 | ||
56 | #define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp) | 56 | #define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp) |
57 | 57 | ||
58 | /* | ||
59 | * Free current thread data structures etc.. | ||
60 | */ | ||
61 | |||
62 | extern inline void exit_thread(void) | ||
63 | { | ||
64 | /* Nothing needs to be done. */ | ||
65 | } | ||
66 | |||
67 | extern unsigned long thread_saved_pc(struct task_struct *tsk); | 58 | extern unsigned long thread_saved_pc(struct task_struct *tsk); |
68 | 59 | ||
69 | /* Free all resources held by a thread. */ | 60 | /* Free all resources held by a thread. */ |
70 | extern inline void release_thread(struct task_struct *dead_task) | 61 | extern inline void release_thread(struct task_struct *dead_task) |
71 | { | 62 | { |
72 | /* Nothing needs to be done. */ | 63 | /* Nothing needs to be done. */ |
73 | } | 64 | } |
74 | 65 | ||
75 | #define init_stack (init_thread_union.stack) | 66 | #define init_stack (init_thread_union.stack) |
76 | 67 | ||
77 | #define cpu_relax() barrier() | 68 | #define cpu_relax() barrier() |
78 | 69 | ||
79 | #endif /* __ASM_CRIS_PROCESSOR_H */ | 70 | #endif /* __ASM_CRIS_PROCESSOR_H */ |
80 | 71 |
include/asm-cris/thread_info.h
1 | /* thread_info.h: CRIS low-level thread information | 1 | /* thread_info.h: CRIS low-level thread information |
2 | * | 2 | * |
3 | * Copyright (C) 2002 David Howells (dhowells@redhat.com) | 3 | * Copyright (C) 2002 David Howells (dhowells@redhat.com) |
4 | * - Incorporating suggestions made by Linus Torvalds and Dave Miller | 4 | * - Incorporating suggestions made by Linus Torvalds and Dave Miller |
5 | * | 5 | * |
6 | * CRIS port by Axis Communications | 6 | * CRIS port by Axis Communications |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #ifndef _ASM_THREAD_INFO_H | 9 | #ifndef _ASM_THREAD_INFO_H |
10 | #define _ASM_THREAD_INFO_H | 10 | #define _ASM_THREAD_INFO_H |
11 | 11 | ||
12 | #ifdef __KERNEL__ | 12 | #ifdef __KERNEL__ |
13 | 13 | ||
14 | #ifndef __ASSEMBLY__ | 14 | #ifndef __ASSEMBLY__ |
15 | #include <asm/types.h> | 15 | #include <asm/types.h> |
16 | #include <asm/processor.h> | 16 | #include <asm/processor.h> |
17 | #include <asm/arch/thread_info.h> | 17 | #include <asm/arch/thread_info.h> |
18 | #include <asm/segment.h> | 18 | #include <asm/segment.h> |
19 | #endif | 19 | #endif |
20 | 20 | ||
21 | 21 | ||
22 | /* | 22 | /* |
23 | * low level task data that entry.S needs immediate access to | 23 | * low level task data that entry.S needs immediate access to |
24 | * - this struct should fit entirely inside of one cache line | 24 | * - this struct should fit entirely inside of one cache line |
25 | * - this struct shares the supervisor stack pages | 25 | * - this struct shares the supervisor stack pages |
26 | * - if the contents of this structure are changed, the assembly constants must also be changed | 26 | * - if the contents of this structure are changed, the assembly constants must also be changed |
27 | */ | 27 | */ |
28 | #ifndef __ASSEMBLY__ | 28 | #ifndef __ASSEMBLY__ |
29 | struct thread_info { | 29 | struct thread_info { |
30 | struct task_struct *task; /* main task structure */ | 30 | struct task_struct *task; /* main task structure */ |
31 | struct exec_domain *exec_domain; /* execution domain */ | 31 | struct exec_domain *exec_domain; /* execution domain */ |
32 | unsigned long flags; /* low level flags */ | 32 | unsigned long flags; /* low level flags */ |
33 | __u32 cpu; /* current CPU */ | 33 | __u32 cpu; /* current CPU */ |
34 | int preempt_count; /* 0 => preemptable, <0 => BUG */ | 34 | int preempt_count; /* 0 => preemptable, <0 => BUG */ |
35 | 35 | ||
36 | mm_segment_t addr_limit; /* thread address space: | 36 | mm_segment_t addr_limit; /* thread address space: |
37 | 0-0xBFFFFFFF for user-thead | 37 | 0-0xBFFFFFFF for user-thead |
38 | 0-0xFFFFFFFF for kernel-thread | 38 | 0-0xFFFFFFFF for kernel-thread |
39 | */ | 39 | */ |
40 | struct restart_block restart_block; | 40 | struct restart_block restart_block; |
41 | __u8 supervisor_stack[0]; | 41 | __u8 supervisor_stack[0]; |
42 | }; | 42 | }; |
43 | 43 | ||
44 | #endif | 44 | #endif |
45 | 45 | ||
46 | #define PREEMPT_ACTIVE 0x4000000 | 46 | #define PREEMPT_ACTIVE 0x10000000 |
47 | 47 | ||
48 | /* | 48 | /* |
49 | * macros/functions for gaining access to the thread information structure | 49 | * macros/functions for gaining access to the thread information structure |
50 | * | 50 | * |
51 | * preempt_count needs to be 1 initially, until the scheduler is functional. | 51 | * preempt_count needs to be 1 initially, until the scheduler is functional. |
52 | */ | 52 | */ |
53 | #ifndef __ASSEMBLY__ | 53 | #ifndef __ASSEMBLY__ |
54 | #define INIT_THREAD_INFO(tsk) \ | 54 | #define INIT_THREAD_INFO(tsk) \ |
55 | { \ | 55 | { \ |
56 | .task = &tsk, \ | 56 | .task = &tsk, \ |
57 | .exec_domain = &default_exec_domain, \ | 57 | .exec_domain = &default_exec_domain, \ |
58 | .flags = 0, \ | 58 | .flags = 0, \ |
59 | .cpu = 0, \ | 59 | .cpu = 0, \ |
60 | .preempt_count = 1, \ | 60 | .preempt_count = 1, \ |
61 | .addr_limit = KERNEL_DS, \ | 61 | .addr_limit = KERNEL_DS, \ |
62 | .restart_block = { \ | 62 | .restart_block = { \ |
63 | .fn = do_no_restart_syscall, \ | 63 | .fn = do_no_restart_syscall, \ |
64 | }, \ | 64 | }, \ |
65 | } | 65 | } |
66 | 66 | ||
67 | #define init_thread_info (init_thread_union.thread_info) | 67 | #define init_thread_info (init_thread_union.thread_info) |
68 | 68 | ||
69 | /* thread information allocation */ | 69 | /* thread information allocation */ |
70 | #define alloc_thread_info(tsk) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1)) | 70 | #define alloc_thread_info(tsk) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1)) |
71 | #define free_thread_info(ti) free_pages((unsigned long) (ti), 1) | 71 | #define free_thread_info(ti) free_pages((unsigned long) (ti), 1) |
72 | #define get_thread_info(ti) get_task_struct((ti)->task) | 72 | #define get_thread_info(ti) get_task_struct((ti)->task) |
73 | #define put_thread_info(ti) put_task_struct((ti)->task) | 73 | #define put_thread_info(ti) put_task_struct((ti)->task) |
74 | 74 | ||
75 | #endif /* !__ASSEMBLY__ */ | 75 | #endif /* !__ASSEMBLY__ */ |
76 | 76 | ||
77 | /* | 77 | /* |
78 | * thread information flags | 78 | * thread information flags |
79 | * - these are process state flags that various assembly files may need to access | 79 | * - these are process state flags that various assembly files may need to access |
80 | * - pending work-to-be-done flags are in LSW | 80 | * - pending work-to-be-done flags are in LSW |
81 | * - other flags in MSW | 81 | * - other flags in MSW |
82 | */ | 82 | */ |
83 | #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ | 83 | #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ |
84 | #define TIF_NOTIFY_RESUME 1 /* resumption notification requested */ | 84 | #define TIF_NOTIFY_RESUME 1 /* resumption notification requested */ |
85 | #define TIF_SIGPENDING 2 /* signal pending */ | 85 | #define TIF_SIGPENDING 2 /* signal pending */ |
86 | #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ | 86 | #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ |
87 | #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ | 87 | #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ |
88 | #define TIF_MEMDIE 17 | 88 | #define TIF_MEMDIE 17 |
89 | 89 | ||
90 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) | 90 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) |
91 | #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) | 91 | #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) |
92 | #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) | 92 | #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) |
93 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) | 93 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) |
94 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) | 94 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) |
95 | 95 | ||
96 | #define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ | 96 | #define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ |
97 | #define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */ | 97 | #define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */ |
98 | 98 | ||
99 | #endif /* __KERNEL__ */ | 99 | #endif /* __KERNEL__ */ |
100 | 100 | ||
101 | #endif /* _ASM_THREAD_INFO_H */ | 101 | #endif /* _ASM_THREAD_INFO_H */ |
102 | 102 |
include/asm-cris/timex.h
1 | /* | 1 | /* |
2 | * linux/include/asm-cris/timex.h | 2 | * linux/include/asm-cris/timex.h |
3 | * | 3 | * |
4 | * CRIS architecture timex specifications | 4 | * CRIS architecture timex specifications |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #ifndef _ASM_CRIS_TIMEX_H | 7 | #ifndef _ASM_CRIS_TIMEX_H |
8 | #define _ASM_CRIS_TIMEX_H | 8 | #define _ASM_CRIS_TIMEX_H |
9 | 9 | ||
10 | #include <asm/arch/timex.h> | 10 | #include <asm/arch/timex.h> |
11 | 11 | ||
12 | /* | 12 | /* |
13 | * We don't have a cycle-counter.. but we do not support SMP anyway where this is | 13 | * We don't have a cycle-counter.. but we do not support SMP anyway where this is |
14 | * used so it does not matter. | 14 | * used so it does not matter. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | typedef unsigned int cycles_t; | 17 | typedef unsigned long long cycles_t; |
18 | 18 | ||
19 | extern inline cycles_t get_cycles(void) | 19 | extern inline cycles_t get_cycles(void) |
20 | { | 20 | { |
21 | return 0; | 21 | return 0; |
22 | } | 22 | } |
23 | 23 | ||
24 | #endif | 24 | #endif |
25 | 25 |
include/asm-cris/types.h
1 | #ifndef _ETRAX_TYPES_H | 1 | #ifndef _ETRAX_TYPES_H |
2 | #define _ETRAX_TYPES_H | 2 | #define _ETRAX_TYPES_H |
3 | 3 | ||
4 | #ifndef __ASSEMBLY__ | 4 | #ifndef __ASSEMBLY__ |
5 | 5 | ||
6 | typedef unsigned short umode_t; | 6 | typedef unsigned short umode_t; |
7 | 7 | ||
8 | /* | 8 | /* |
9 | * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the | 9 | * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the |
10 | * header files exported to user space | 10 | * header files exported to user space |
11 | */ | 11 | */ |
12 | 12 | ||
13 | typedef __signed__ char __s8; | 13 | typedef __signed__ char __s8; |
14 | typedef unsigned char __u8; | 14 | typedef unsigned char __u8; |
15 | 15 | ||
16 | typedef __signed__ short __s16; | 16 | typedef __signed__ short __s16; |
17 | typedef unsigned short __u16; | 17 | typedef unsigned short __u16; |
18 | 18 | ||
19 | typedef __signed__ int __s32; | 19 | typedef __signed__ int __s32; |
20 | typedef unsigned int __u32; | 20 | typedef unsigned int __u32; |
21 | 21 | ||
22 | #if defined(__GNUC__) && !defined(__STRICT_ANSI__) | 22 | #if defined(__GNUC__) && !defined(__STRICT_ANSI__) |
23 | typedef __signed__ long long __s64; | 23 | typedef __signed__ long long __s64; |
24 | typedef unsigned long long __u64; | 24 | typedef unsigned long long __u64; |
25 | #endif | 25 | #endif |
26 | 26 | ||
27 | #endif /* __ASSEMBLY__ */ | 27 | #endif /* __ASSEMBLY__ */ |
28 | 28 | ||
29 | /* | 29 | /* |
30 | * These aren't exported outside the kernel to avoid name space clashes | 30 | * These aren't exported outside the kernel to avoid name space clashes |
31 | */ | 31 | */ |
32 | #ifdef __KERNEL__ | 32 | #ifdef __KERNEL__ |
33 | 33 | ||
34 | #define BITS_PER_LONG 32 | 34 | #define BITS_PER_LONG 32 |
35 | 35 | ||
36 | #ifndef __ASSEMBLY__ | 36 | #ifndef __ASSEMBLY__ |
37 | 37 | ||
38 | typedef signed char s8; | 38 | typedef signed char s8; |
39 | typedef unsigned char u8; | 39 | typedef unsigned char u8; |
40 | 40 | ||
41 | typedef signed short s16; | 41 | typedef signed short s16; |
42 | typedef unsigned short u16; | 42 | typedef unsigned short u16; |
43 | 43 | ||
44 | typedef signed int s32; | 44 | typedef signed int s32; |
45 | typedef unsigned int u32; | 45 | typedef unsigned int u32; |
46 | 46 | ||
47 | typedef signed long long s64; | 47 | typedef signed long long s64; |
48 | typedef unsigned long long u64; | 48 | typedef unsigned long long u64; |
49 | 49 | ||
50 | /* Dma addresses are 32-bits wide, just like our other addresses. */ | 50 | /* Dma addresses are 32-bits wide, just like our other addresses. */ |
51 | 51 | ||
52 | typedef u32 dma_addr_t; | 52 | typedef u32 dma_addr_t; |
53 | typedef u32 dma64_addr_t; | 53 | typedef u32 dma64_addr_t; |
54 | 54 | ||
55 | typedef unsigned int kmem_bufctl_t; | 55 | typedef unsigned short kmem_bufctl_t; |
56 | 56 | ||
57 | #endif /* __ASSEMBLY__ */ | 57 | #endif /* __ASSEMBLY__ */ |
58 | 58 | ||
59 | #endif /* __KERNEL__ */ | 59 | #endif /* __KERNEL__ */ |
60 | 60 | ||
61 | #endif | 61 | #endif |
62 | 62 |
include/asm-cris/unistd.h
1 | #ifndef _ASM_CRIS_UNISTD_H_ | 1 | #ifndef _ASM_CRIS_UNISTD_H_ |
2 | #define _ASM_CRIS_UNISTD_H_ | 2 | #define _ASM_CRIS_UNISTD_H_ |
3 | 3 | ||
4 | #include <asm/arch/unistd.h> | 4 | #include <asm/arch/unistd.h> |
5 | 5 | ||
6 | /* | 6 | /* |
7 | * This file contains the system call numbers, and stub macros for libc. | 7 | * This file contains the system call numbers, and stub macros for libc. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #define __NR_restart_syscall 0 | 10 | #define __NR_restart_syscall 0 |
11 | #define __NR_exit 1 | 11 | #define __NR_exit 1 |
12 | #define __NR_fork 2 | 12 | #define __NR_fork 2 |
13 | #define __NR_read 3 | 13 | #define __NR_read 3 |
14 | #define __NR_write 4 | 14 | #define __NR_write 4 |
15 | #define __NR_open 5 | 15 | #define __NR_open 5 |
16 | #define __NR_close 6 | 16 | #define __NR_close 6 |
17 | #define __NR_waitpid 7 | 17 | #define __NR_waitpid 7 |
18 | #define __NR_creat 8 | 18 | #define __NR_creat 8 |
19 | #define __NR_link 9 | 19 | #define __NR_link 9 |
20 | #define __NR_unlink 10 | 20 | #define __NR_unlink 10 |
21 | #define __NR_execve 11 | 21 | #define __NR_execve 11 |
22 | #define __NR_chdir 12 | 22 | #define __NR_chdir 12 |
23 | #define __NR_time 13 | 23 | #define __NR_time 13 |
24 | #define __NR_mknod 14 | 24 | #define __NR_mknod 14 |
25 | #define __NR_chmod 15 | 25 | #define __NR_chmod 15 |
26 | #define __NR_lchown 16 | 26 | #define __NR_lchown 16 |
27 | #define __NR_break 17 | 27 | #define __NR_break 17 |
28 | #define __NR_oldstat 18 | 28 | #define __NR_oldstat 18 |
29 | #define __NR_lseek 19 | 29 | #define __NR_lseek 19 |
30 | #define __NR_getpid 20 | 30 | #define __NR_getpid 20 |
31 | #define __NR_mount 21 | 31 | #define __NR_mount 21 |
32 | #define __NR_umount 22 | 32 | #define __NR_umount 22 |
33 | #define __NR_setuid 23 | 33 | #define __NR_setuid 23 |
34 | #define __NR_getuid 24 | 34 | #define __NR_getuid 24 |
35 | #define __NR_stime 25 | 35 | #define __NR_stime 25 |
36 | #define __NR_ptrace 26 | 36 | #define __NR_ptrace 26 |
37 | #define __NR_alarm 27 | 37 | #define __NR_alarm 27 |
38 | #define __NR_oldfstat 28 | 38 | #define __NR_oldfstat 28 |
39 | #define __NR_pause 29 | 39 | #define __NR_pause 29 |
40 | #define __NR_utime 30 | 40 | #define __NR_utime 30 |
41 | #define __NR_stty 31 | 41 | #define __NR_stty 31 |
42 | #define __NR_gtty 32 | 42 | #define __NR_gtty 32 |
43 | #define __NR_access 33 | 43 | #define __NR_access 33 |
44 | #define __NR_nice 34 | 44 | #define __NR_nice 34 |
45 | #define __NR_ftime 35 | 45 | #define __NR_ftime 35 |
46 | #define __NR_sync 36 | 46 | #define __NR_sync 36 |
47 | #define __NR_kill 37 | 47 | #define __NR_kill 37 |
48 | #define __NR_rename 38 | 48 | #define __NR_rename 38 |
49 | #define __NR_mkdir 39 | 49 | #define __NR_mkdir 39 |
50 | #define __NR_rmdir 40 | 50 | #define __NR_rmdir 40 |
51 | #define __NR_dup 41 | 51 | #define __NR_dup 41 |
52 | #define __NR_pipe 42 | 52 | #define __NR_pipe 42 |
53 | #define __NR_times 43 | 53 | #define __NR_times 43 |
54 | #define __NR_prof 44 | 54 | #define __NR_prof 44 |
55 | #define __NR_brk 45 | 55 | #define __NR_brk 45 |
56 | #define __NR_setgid 46 | 56 | #define __NR_setgid 46 |
57 | #define __NR_getgid 47 | 57 | #define __NR_getgid 47 |
58 | #define __NR_signal 48 | 58 | #define __NR_signal 48 |
59 | #define __NR_geteuid 49 | 59 | #define __NR_geteuid 49 |
60 | #define __NR_getegid 50 | 60 | #define __NR_getegid 50 |
61 | #define __NR_acct 51 | 61 | #define __NR_acct 51 |
62 | #define __NR_umount2 52 | 62 | #define __NR_umount2 52 |
63 | #define __NR_lock 53 | 63 | #define __NR_lock 53 |
64 | #define __NR_ioctl 54 | 64 | #define __NR_ioctl 54 |
65 | #define __NR_fcntl 55 | 65 | #define __NR_fcntl 55 |
66 | #define __NR_mpx 56 | 66 | #define __NR_mpx 56 |
67 | #define __NR_setpgid 57 | 67 | #define __NR_setpgid 57 |
68 | #define __NR_ulimit 58 | 68 | #define __NR_ulimit 58 |
69 | #define __NR_oldolduname 59 | 69 | #define __NR_oldolduname 59 |
70 | #define __NR_umask 60 | 70 | #define __NR_umask 60 |
71 | #define __NR_chroot 61 | 71 | #define __NR_chroot 61 |
72 | #define __NR_ustat 62 | 72 | #define __NR_ustat 62 |
73 | #define __NR_dup2 63 | 73 | #define __NR_dup2 63 |
74 | #define __NR_getppid 64 | 74 | #define __NR_getppid 64 |
75 | #define __NR_getpgrp 65 | 75 | #define __NR_getpgrp 65 |
76 | #define __NR_setsid 66 | 76 | #define __NR_setsid 66 |
77 | #define __NR_sigaction 67 | 77 | #define __NR_sigaction 67 |
78 | #define __NR_sgetmask 68 | 78 | #define __NR_sgetmask 68 |
79 | #define __NR_ssetmask 69 | 79 | #define __NR_ssetmask 69 |
80 | #define __NR_setreuid 70 | 80 | #define __NR_setreuid 70 |
81 | #define __NR_setregid 71 | 81 | #define __NR_setregid 71 |
82 | #define __NR_sigsuspend 72 | 82 | #define __NR_sigsuspend 72 |
83 | #define __NR_sigpending 73 | 83 | #define __NR_sigpending 73 |
84 | #define __NR_sethostname 74 | 84 | #define __NR_sethostname 74 |
85 | #define __NR_setrlimit 75 | 85 | #define __NR_setrlimit 75 |
86 | #define __NR_getrlimit 76 | 86 | #define __NR_getrlimit 76 |
87 | #define __NR_getrusage 77 | 87 | #define __NR_getrusage 77 |
88 | #define __NR_gettimeofday 78 | 88 | #define __NR_gettimeofday 78 |
89 | #define __NR_settimeofday 79 | 89 | #define __NR_settimeofday 79 |
90 | #define __NR_getgroups 80 | 90 | #define __NR_getgroups 80 |
91 | #define __NR_setgroups 81 | 91 | #define __NR_setgroups 81 |
92 | #define __NR_select 82 | 92 | #define __NR_select 82 |
93 | #define __NR_symlink 83 | 93 | #define __NR_symlink 83 |
94 | #define __NR_oldlstat 84 | 94 | #define __NR_oldlstat 84 |
95 | #define __NR_readlink 85 | 95 | #define __NR_readlink 85 |
96 | #define __NR_uselib 86 | 96 | #define __NR_uselib 86 |
97 | #define __NR_swapon 87 | 97 | #define __NR_swapon 87 |
98 | #define __NR_reboot 88 | 98 | #define __NR_reboot 88 |
99 | #define __NR_readdir 89 | 99 | #define __NR_readdir 89 |
100 | #define __NR_mmap 90 | 100 | #define __NR_mmap 90 |
101 | #define __NR_munmap 91 | 101 | #define __NR_munmap 91 |
102 | #define __NR_truncate 92 | 102 | #define __NR_truncate 92 |
103 | #define __NR_ftruncate 93 | 103 | #define __NR_ftruncate 93 |
104 | #define __NR_fchmod 94 | 104 | #define __NR_fchmod 94 |
105 | #define __NR_fchown 95 | 105 | #define __NR_fchown 95 |
106 | #define __NR_getpriority 96 | 106 | #define __NR_getpriority 96 |
107 | #define __NR_setpriority 97 | 107 | #define __NR_setpriority 97 |
108 | #define __NR_profil 98 | 108 | #define __NR_profil 98 |
109 | #define __NR_statfs 99 | 109 | #define __NR_statfs 99 |
110 | #define __NR_fstatfs 100 | 110 | #define __NR_fstatfs 100 |
111 | #define __NR_ioperm 101 | 111 | #define __NR_ioperm 101 |
112 | #define __NR_socketcall 102 | 112 | #define __NR_socketcall 102 |
113 | #define __NR_syslog 103 | 113 | #define __NR_syslog 103 |
114 | #define __NR_setitimer 104 | 114 | #define __NR_setitimer 104 |
115 | #define __NR_getitimer 105 | 115 | #define __NR_getitimer 105 |
116 | #define __NR_stat 106 | 116 | #define __NR_stat 106 |
117 | #define __NR_lstat 107 | 117 | #define __NR_lstat 107 |
118 | #define __NR_fstat 108 | 118 | #define __NR_fstat 108 |
119 | #define __NR_olduname 109 | 119 | #define __NR_olduname 109 |
120 | #define __NR_iopl 110 | 120 | #define __NR_iopl 110 |
121 | #define __NR_vhangup 111 | 121 | #define __NR_vhangup 111 |
122 | #define __NR_idle 112 | 122 | #define __NR_idle 112 |
123 | #define __NR_vm86 113 | 123 | #define __NR_vm86 113 |
124 | #define __NR_wait4 114 | 124 | #define __NR_wait4 114 |
125 | #define __NR_swapoff 115 | 125 | #define __NR_swapoff 115 |
126 | #define __NR_sysinfo 116 | 126 | #define __NR_sysinfo 116 |
127 | #define __NR_ipc 117 | 127 | #define __NR_ipc 117 |
128 | #define __NR_fsync 118 | 128 | #define __NR_fsync 118 |
129 | #define __NR_sigreturn 119 | 129 | #define __NR_sigreturn 119 |
130 | #define __NR_clone 120 | 130 | #define __NR_clone 120 |
131 | #define __NR_setdomainname 121 | 131 | #define __NR_setdomainname 121 |
132 | #define __NR_uname 122 | 132 | #define __NR_uname 122 |
133 | #define __NR_modify_ldt 123 | 133 | #define __NR_modify_ldt 123 |
134 | #define __NR_adjtimex 124 | 134 | #define __NR_adjtimex 124 |
135 | #define __NR_mprotect 125 | 135 | #define __NR_mprotect 125 |
136 | #define __NR_sigprocmask 126 | 136 | #define __NR_sigprocmask 126 |
137 | #define __NR_create_module 127 | 137 | #define __NR_create_module 127 |
138 | #define __NR_init_module 128 | 138 | #define __NR_init_module 128 |
139 | #define __NR_delete_module 129 | 139 | #define __NR_delete_module 129 |
140 | #define __NR_get_kernel_syms 130 | 140 | #define __NR_get_kernel_syms 130 |
141 | #define __NR_quotactl 131 | 141 | #define __NR_quotactl 131 |
142 | #define __NR_getpgid 132 | 142 | #define __NR_getpgid 132 |
143 | #define __NR_fchdir 133 | 143 | #define __NR_fchdir 133 |
144 | #define __NR_bdflush 134 | 144 | #define __NR_bdflush 134 |
145 | #define __NR_sysfs 135 | 145 | #define __NR_sysfs 135 |
146 | #define __NR_personality 136 | 146 | #define __NR_personality 136 |
147 | #define __NR_afs_syscall 137 /* Syscall for Andrew File System */ | 147 | #define __NR_afs_syscall 137 /* Syscall for Andrew File System */ |
148 | #define __NR_setfsuid 138 | 148 | #define __NR_setfsuid 138 |
149 | #define __NR_setfsgid 139 | 149 | #define __NR_setfsgid 139 |
150 | #define __NR__llseek 140 | 150 | #define __NR__llseek 140 |
151 | #define __NR_getdents 141 | 151 | #define __NR_getdents 141 |
152 | #define __NR__newselect 142 | 152 | #define __NR__newselect 142 |
153 | #define __NR_flock 143 | 153 | #define __NR_flock 143 |
154 | #define __NR_msync 144 | 154 | #define __NR_msync 144 |
155 | #define __NR_readv 145 | 155 | #define __NR_readv 145 |
156 | #define __NR_writev 146 | 156 | #define __NR_writev 146 |
157 | #define __NR_getsid 147 | 157 | #define __NR_getsid 147 |
158 | #define __NR_fdatasync 148 | 158 | #define __NR_fdatasync 148 |
159 | #define __NR__sysctl 149 | 159 | #define __NR__sysctl 149 |
160 | #define __NR_mlock 150 | 160 | #define __NR_mlock 150 |
161 | #define __NR_munlock 151 | 161 | #define __NR_munlock 151 |
162 | #define __NR_mlockall 152 | 162 | #define __NR_mlockall 152 |
163 | #define __NR_munlockall 153 | 163 | #define __NR_munlockall 153 |
164 | #define __NR_sched_setparam 154 | 164 | #define __NR_sched_setparam 154 |
165 | #define __NR_sched_getparam 155 | 165 | #define __NR_sched_getparam 155 |
166 | #define __NR_sched_setscheduler 156 | 166 | #define __NR_sched_setscheduler 156 |
167 | #define __NR_sched_getscheduler 157 | 167 | #define __NR_sched_getscheduler 157 |
168 | #define __NR_sched_yield 158 | 168 | #define __NR_sched_yield 158 |
169 | #define __NR_sched_get_priority_max 159 | 169 | #define __NR_sched_get_priority_max 159 |
170 | #define __NR_sched_get_priority_min 160 | 170 | #define __NR_sched_get_priority_min 160 |
171 | #define __NR_sched_rr_get_interval 161 | 171 | #define __NR_sched_rr_get_interval 161 |
172 | #define __NR_nanosleep 162 | 172 | #define __NR_nanosleep 162 |
173 | #define __NR_mremap 163 | 173 | #define __NR_mremap 163 |
174 | #define __NR_setresuid 164 | 174 | #define __NR_setresuid 164 |
175 | #define __NR_getresuid 165 | 175 | #define __NR_getresuid 165 |
176 | 176 | ||
177 | #define __NR_query_module 167 | 177 | #define __NR_query_module 167 |
178 | #define __NR_poll 168 | 178 | #define __NR_poll 168 |
179 | #define __NR_nfsservctl 169 | 179 | #define __NR_nfsservctl 169 |
180 | #define __NR_setresgid 170 | 180 | #define __NR_setresgid 170 |
181 | #define __NR_getresgid 171 | 181 | #define __NR_getresgid 171 |
182 | #define __NR_prctl 172 | 182 | #define __NR_prctl 172 |
183 | #define __NR_rt_sigreturn 173 | 183 | #define __NR_rt_sigreturn 173 |
184 | #define __NR_rt_sigaction 174 | 184 | #define __NR_rt_sigaction 174 |
185 | #define __NR_rt_sigprocmask 175 | 185 | #define __NR_rt_sigprocmask 175 |
186 | #define __NR_rt_sigpending 176 | 186 | #define __NR_rt_sigpending 176 |
187 | #define __NR_rt_sigtimedwait 177 | 187 | #define __NR_rt_sigtimedwait 177 |
188 | #define __NR_rt_sigqueueinfo 178 | 188 | #define __NR_rt_sigqueueinfo 178 |
189 | #define __NR_rt_sigsuspend 179 | 189 | #define __NR_rt_sigsuspend 179 |
190 | #define __NR_pread64 180 | 190 | #define __NR_pread64 180 |
191 | #define __NR_pwrite64 181 | 191 | #define __NR_pwrite64 181 |
192 | #define __NR_chown 182 | 192 | #define __NR_chown 182 |
193 | #define __NR_getcwd 183 | 193 | #define __NR_getcwd 183 |
194 | #define __NR_capget 184 | 194 | #define __NR_capget 184 |
195 | #define __NR_capset 185 | 195 | #define __NR_capset 185 |
196 | #define __NR_sigaltstack 186 | 196 | #define __NR_sigaltstack 186 |
197 | #define __NR_sendfile 187 | 197 | #define __NR_sendfile 187 |
198 | #define __NR_getpmsg 188 /* some people actually want streams */ | 198 | #define __NR_getpmsg 188 /* some people actually want streams */ |
199 | #define __NR_putpmsg 189 /* some people actually want streams */ | 199 | #define __NR_putpmsg 189 /* some people actually want streams */ |
200 | #define __NR_vfork 190 | 200 | #define __NR_vfork 190 |
201 | #define __NR_ugetrlimit 191 /* SuS compliant getrlimit */ | 201 | #define __NR_ugetrlimit 191 /* SuS compliant getrlimit */ |
202 | #define __NR_mmap2 192 | 202 | #define __NR_mmap2 192 |
203 | #define __NR_truncate64 193 | 203 | #define __NR_truncate64 193 |
204 | #define __NR_ftruncate64 194 | 204 | #define __NR_ftruncate64 194 |
205 | #define __NR_stat64 195 | 205 | #define __NR_stat64 195 |
206 | #define __NR_lstat64 196 | 206 | #define __NR_lstat64 196 |
207 | #define __NR_fstat64 197 | 207 | #define __NR_fstat64 197 |
208 | #define __NR_lchown32 198 | 208 | #define __NR_lchown32 198 |
209 | #define __NR_getuid32 199 | 209 | #define __NR_getuid32 199 |
210 | #define __NR_getgid32 200 | 210 | #define __NR_getgid32 200 |
211 | #define __NR_geteuid32 201 | 211 | #define __NR_geteuid32 201 |
212 | #define __NR_getegid32 202 | 212 | #define __NR_getegid32 202 |
213 | #define __NR_setreuid32 203 | 213 | #define __NR_setreuid32 203 |
214 | #define __NR_setregid32 204 | 214 | #define __NR_setregid32 204 |
215 | #define __NR_getgroups32 205 | 215 | #define __NR_getgroups32 205 |
216 | #define __NR_setgroups32 206 | 216 | #define __NR_setgroups32 206 |
217 | #define __NR_fchown32 207 | 217 | #define __NR_fchown32 207 |
218 | #define __NR_setresuid32 208 | 218 | #define __NR_setresuid32 208 |
219 | #define __NR_getresuid32 209 | 219 | #define __NR_getresuid32 209 |
220 | #define __NR_setresgid32 210 | 220 | #define __NR_setresgid32 210 |
221 | #define __NR_getresgid32 211 | 221 | #define __NR_getresgid32 211 |
222 | #define __NR_chown32 212 | 222 | #define __NR_chown32 212 |
223 | #define __NR_setuid32 213 | 223 | #define __NR_setuid32 213 |
224 | #define __NR_setgid32 214 | 224 | #define __NR_setgid32 214 |
225 | #define __NR_setfsuid32 215 | 225 | #define __NR_setfsuid32 215 |
226 | #define __NR_setfsgid32 216 | 226 | #define __NR_setfsgid32 216 |
227 | #define __NR_pivot_root 217 | 227 | #define __NR_pivot_root 217 |
228 | #define __NR_mincore 218 | 228 | #define __NR_mincore 218 |
229 | #define __NR_madvise 219 | 229 | #define __NR_madvise 219 |
230 | #define __NR_getdents64 220 | 230 | #define __NR_getdents64 220 |
231 | #define __NR_fcntl64 221 | 231 | #define __NR_fcntl64 221 |
232 | /* 223 is unused */ | 232 | /* 223 is unused */ |
233 | #define __NR_gettid 224 | 233 | #define __NR_gettid 224 |
234 | #define __NR_readahead 225 | 234 | #define __NR_readahead 225 |
235 | #define __NR_setxattr 226 | 235 | #define __NR_setxattr 226 |
236 | #define __NR_lsetxattr 227 | 236 | #define __NR_lsetxattr 227 |
237 | #define __NR_fsetxattr 228 | 237 | #define __NR_fsetxattr 228 |
238 | #define __NR_getxattr 229 | 238 | #define __NR_getxattr 229 |
239 | #define __NR_lgetxattr 230 | 239 | #define __NR_lgetxattr 230 |
240 | #define __NR_fgetxattr 231 | 240 | #define __NR_fgetxattr 231 |
241 | #define __NR_listxattr 232 | 241 | #define __NR_listxattr 232 |
242 | #define __NR_llistxattr 233 | 242 | #define __NR_llistxattr 233 |
243 | #define __NR_flistxattr 234 | 243 | #define __NR_flistxattr 234 |
244 | #define __NR_removexattr 235 | 244 | #define __NR_removexattr 235 |
245 | #define __NR_lremovexattr 236 | 245 | #define __NR_lremovexattr 236 |
246 | #define __NR_fremovexattr 237 | 246 | #define __NR_fremovexattr 237 |
247 | #define __NR_tkill 238 | 247 | #define __NR_tkill 238 |
248 | #define __NR_sendfile64 239 | 248 | #define __NR_sendfile64 239 |
249 | #define __NR_futex 240 | 249 | #define __NR_futex 240 |
250 | #define __NR_sched_setaffinity 241 | 250 | #define __NR_sched_setaffinity 241 |
251 | #define __NR_sched_getaffinity 242 | 251 | #define __NR_sched_getaffinity 242 |
252 | #define __NR_set_thread_area 243 | 252 | #define __NR_set_thread_area 243 |
253 | #define __NR_get_thread_area 244 | 253 | #define __NR_get_thread_area 244 |
254 | #define __NR_io_setup 245 | 254 | #define __NR_io_setup 245 |
255 | #define __NR_io_destroy 246 | 255 | #define __NR_io_destroy 246 |
256 | #define __NR_io_getevents 247 | 256 | #define __NR_io_getevents 247 |
257 | #define __NR_io_submit 248 | 257 | #define __NR_io_submit 248 |
258 | #define __NR_io_cancel 249 | 258 | #define __NR_io_cancel 249 |
259 | #define __NR_fadvise64 250 | 259 | #define __NR_fadvise64 250 |
260 | #define __NR_exit_group 252 | 260 | #define __NR_exit_group 252 |
261 | #define __NR_lookup_dcookie 253 | 261 | #define __NR_lookup_dcookie 253 |
262 | #define __NR_epoll_create 254 | 262 | #define __NR_epoll_create 254 |
263 | #define __NR_epoll_ctl 255 | 263 | #define __NR_epoll_ctl 255 |
264 | #define __NR_epoll_wait 256 | 264 | #define __NR_epoll_wait 256 |
265 | #define __NR_remap_file_pages 257 | 265 | #define __NR_remap_file_pages 257 |
266 | #define __NR_set_tid_address 258 | 266 | #define __NR_set_tid_address 258 |
267 | #define __NR_timer_create 259 | 267 | #define __NR_timer_create 259 |
268 | #define __NR_timer_settime (__NR_timer_create+1) | 268 | #define __NR_timer_settime (__NR_timer_create+1) |
269 | #define __NR_timer_gettime (__NR_timer_create+2) | 269 | #define __NR_timer_gettime (__NR_timer_create+2) |
270 | #define __NR_timer_getoverrun (__NR_timer_create+3) | 270 | #define __NR_timer_getoverrun (__NR_timer_create+3) |
271 | #define __NR_timer_delete (__NR_timer_create+4) | 271 | #define __NR_timer_delete (__NR_timer_create+4) |
272 | #define __NR_clock_settime (__NR_timer_create+5) | 272 | #define __NR_clock_settime (__NR_timer_create+5) |
273 | #define __NR_clock_gettime (__NR_timer_create+6) | 273 | #define __NR_clock_gettime (__NR_timer_create+6) |
274 | #define __NR_clock_getres (__NR_timer_create+7) | 274 | #define __NR_clock_getres (__NR_timer_create+7) |
275 | #define __NR_clock_nanosleep (__NR_timer_create+8) | 275 | #define __NR_clock_nanosleep (__NR_timer_create+8) |
276 | #define __NR_statfs64 268 | 276 | #define __NR_statfs64 268 |
277 | #define __NR_fstatfs64 269 | 277 | #define __NR_fstatfs64 269 |
278 | #define __NR_tgkill 270 | 278 | #define __NR_tgkill 270 |
279 | #define __NR_utimes 271 | 279 | #define __NR_utimes 271 |
280 | #define __NR_fadvise64_64 272 | 280 | #define __NR_fadvise64_64 272 |
281 | #define __NR_vserver 273 | 281 | #define __NR_vserver 273 |
282 | #define __NR_mbind 274 | 282 | #define __NR_mbind 274 |
283 | #define __NR_get_mempolicy 275 | 283 | #define __NR_get_mempolicy 275 |
284 | #define __NR_set_mempolicy 276 | 284 | #define __NR_set_mempolicy 276 |
285 | #define __NR_mq_open 277 | 285 | #define __NR_mq_open 277 |
286 | #define __NR_mq_unlink (__NR_mq_open+1) | 286 | #define __NR_mq_unlink (__NR_mq_open+1) |
287 | #define __NR_mq_timedsend (__NR_mq_open+2) | 287 | #define __NR_mq_timedsend (__NR_mq_open+2) |
288 | #define __NR_mq_timedreceive (__NR_mq_open+3) | 288 | #define __NR_mq_timedreceive (__NR_mq_open+3) |
289 | #define __NR_mq_notify (__NR_mq_open+4) | 289 | #define __NR_mq_notify (__NR_mq_open+4) |
290 | #define __NR_mq_getsetattr (__NR_mq_open+5) | 290 | #define __NR_mq_getsetattr (__NR_mq_open+5) |
291 | 291 | #define __NR_sys_kexec_load 283 | |
292 | #define NR_syscalls 283 | 292 | #define __NR_waitid 284 |
293 | /* #define __NR_sys_setaltroot 285 */ | ||
294 | #define __NR_add_key 286 | ||
295 | #define __NR_request_key 287 | ||
296 | #define __NR_keyctl 288 | ||
297 | |||
298 | #define NR_syscalls 289 | ||
299 | |||
293 | 300 | ||
294 | 301 | ||
295 | #ifdef __KERNEL__ | 302 | #ifdef __KERNEL__ |
296 | #define __ARCH_WANT_IPC_PARSE_VERSION | 303 | #define __ARCH_WANT_IPC_PARSE_VERSION |
297 | #define __ARCH_WANT_OLD_READDIR | 304 | #define __ARCH_WANT_OLD_READDIR |
298 | #define __ARCH_WANT_OLD_STAT | 305 | #define __ARCH_WANT_OLD_STAT |
299 | #define __ARCH_WANT_STAT64 | 306 | #define __ARCH_WANT_STAT64 |
300 | #define __ARCH_WANT_SYS_ALARM | 307 | #define __ARCH_WANT_SYS_ALARM |
301 | #define __ARCH_WANT_SYS_GETHOSTNAME | 308 | #define __ARCH_WANT_SYS_GETHOSTNAME |
302 | #define __ARCH_WANT_SYS_PAUSE | 309 | #define __ARCH_WANT_SYS_PAUSE |
303 | #define __ARCH_WANT_SYS_SGETMASK | 310 | #define __ARCH_WANT_SYS_SGETMASK |
304 | #define __ARCH_WANT_SYS_SIGNAL | 311 | #define __ARCH_WANT_SYS_SIGNAL |
305 | #define __ARCH_WANT_SYS_TIME | 312 | #define __ARCH_WANT_SYS_TIME |
306 | #define __ARCH_WANT_SYS_UTIME | 313 | #define __ARCH_WANT_SYS_UTIME |
307 | #define __ARCH_WANT_SYS_WAITPID | 314 | #define __ARCH_WANT_SYS_WAITPID |
308 | #define __ARCH_WANT_SYS_SOCKETCALL | 315 | #define __ARCH_WANT_SYS_SOCKETCALL |
309 | #define __ARCH_WANT_SYS_FADVISE64 | 316 | #define __ARCH_WANT_SYS_FADVISE64 |
310 | #define __ARCH_WANT_SYS_GETPGRP | 317 | #define __ARCH_WANT_SYS_GETPGRP |
311 | #define __ARCH_WANT_SYS_LLSEEK | 318 | #define __ARCH_WANT_SYS_LLSEEK |
312 | #define __ARCH_WANT_SYS_NICE | 319 | #define __ARCH_WANT_SYS_NICE |
313 | #define __ARCH_WANT_SYS_OLD_GETRLIMIT | 320 | #define __ARCH_WANT_SYS_OLD_GETRLIMIT |
314 | #define __ARCH_WANT_SYS_OLDUMOUNT | 321 | #define __ARCH_WANT_SYS_OLDUMOUNT |
315 | #define __ARCH_WANT_SYS_SIGPENDING | 322 | #define __ARCH_WANT_SYS_SIGPENDING |
316 | #define __ARCH_WANT_SYS_SIGPROCMASK | 323 | #define __ARCH_WANT_SYS_SIGPROCMASK |
317 | #define __ARCH_WANT_SYS_RT_SIGACTION | 324 | #define __ARCH_WANT_SYS_RT_SIGACTION |
318 | #endif | 325 | #endif |
319 | 326 | ||
320 | #ifdef __KERNEL_SYSCALLS__ | 327 | #ifdef __KERNEL_SYSCALLS__ |
321 | 328 | ||
322 | #include <linux/compiler.h> | 329 | #include <linux/compiler.h> |
323 | #include <linux/types.h> | 330 | #include <linux/types.h> |
324 | #include <linux/linkage.h> | 331 | #include <linux/linkage.h> |
325 | 332 | ||
326 | /* | 333 | /* |
327 | * we need this inline - forking from kernel space will result | 334 | * we need this inline - forking from kernel space will result |
328 | * in NO COPY ON WRITE (!!!), until an execve is executed. This | 335 | * in NO COPY ON WRITE (!!!), until an execve is executed. This |
329 | * is no problem, but for the stack. This is handled by not letting | 336 | * is no problem, but for the stack. This is handled by not letting |
330 | * main() use the stack at all after fork(). Thus, no function | 337 | * main() use the stack at all after fork(). Thus, no function |
331 | * calls - which means inline code for fork too, as otherwise we | 338 | * calls - which means inline code for fork too, as otherwise we |
332 | * would use the stack upon exit from 'fork()'. | 339 | * would use the stack upon exit from 'fork()'. |
333 | * | 340 | * |
334 | * Actually only pause and fork are needed inline, so that there | 341 | * Actually only pause and fork are needed inline, so that there |
335 | * won't be any messing with the stack from main(), but we define | 342 | * won't be any messing with the stack from main(), but we define |
336 | * some others too. | 343 | * some others too. |
337 | */ | 344 | */ |
338 | #define __NR__exit __NR_exit | 345 | #define __NR__exit __NR_exit |
339 | extern inline _syscall0(pid_t,setsid) | 346 | extern inline _syscall0(pid_t,setsid) |
340 | extern inline _syscall3(int,write,int,fd,const char *,buf,off_t,count) | 347 | extern inline _syscall3(int,write,int,fd,const char *,buf,off_t,count) |
341 | extern inline _syscall3(int,read,int,fd,char *,buf,off_t,count) | 348 | extern inline _syscall3(int,read,int,fd,char *,buf,off_t,count) |
342 | extern inline _syscall3(off_t,lseek,int,fd,off_t,offset,int,count) | 349 | extern inline _syscall3(off_t,lseek,int,fd,off_t,offset,int,count) |
343 | extern inline _syscall1(int,dup,int,fd) | 350 | extern inline _syscall1(int,dup,int,fd) |
344 | extern inline _syscall3(int,execve,const char *,file,char **,argv,char **,envp) | 351 | extern inline _syscall3(int,execve,const char *,file,char **,argv,char **,envp) |
345 | extern inline _syscall3(int,open,const char *,file,int,flag,int,mode) | 352 | extern inline _syscall3(int,open,const char *,file,int,flag,int,mode) |
346 | extern inline _syscall1(int,close,int,fd) | 353 | extern inline _syscall1(int,close,int,fd) |
347 | 354 | ||
348 | struct pt_regs; | 355 | struct pt_regs; |
349 | asmlinkage long sys_mmap2( | 356 | asmlinkage long sys_mmap2( |
350 | unsigned long addr, unsigned long len, | 357 | unsigned long addr, unsigned long len, |
351 | unsigned long prot, unsigned long flags, | 358 | unsigned long prot, unsigned long flags, |
352 | unsigned long fd, unsigned long pgoff); | 359 | unsigned long fd, unsigned long pgoff); |
353 | asmlinkage int sys_execve(const char *fname, char **argv, char **envp, | 360 | asmlinkage int sys_execve(const char *fname, char **argv, char **envp, |
354 | long r13, long mof, long srp, struct pt_regs *regs); | 361 | long r13, long mof, long srp, struct pt_regs *regs); |
355 | asmlinkage int sys_clone(unsigned long newusp, unsigned long flags, | 362 | asmlinkage int sys_clone(unsigned long newusp, unsigned long flags, |
356 | int* parent_tid, int* child_tid, long mof, long srp, | 363 | int* parent_tid, int* child_tid, long mof, long srp, |
357 | struct pt_regs *regs); | 364 | struct pt_regs *regs); |
358 | asmlinkage int sys_fork(long r10, long r11, long r12, long r13, | 365 | asmlinkage int sys_fork(long r10, long r11, long r12, long r13, |
359 | long mof, long srp, struct pt_regs *regs); | 366 | long mof, long srp, struct pt_regs *regs); |
360 | asmlinkage int sys_vfork(long r10, long r11, long r12, long r13, | 367 | asmlinkage int sys_vfork(long r10, long r11, long r12, long r13, |
361 | long mof, long srp, struct pt_regs *regs); | 368 | long mof, long srp, struct pt_regs *regs); |
362 | asmlinkage int sys_pipe(unsigned long __user *fildes); | 369 | asmlinkage int sys_pipe(unsigned long __user *fildes); |
363 | asmlinkage int sys_ptrace(long request, long pid, long addr, long data); | 370 | asmlinkage int sys_ptrace(long request, long pid, long addr, long data); |
364 | struct sigaction; | 371 | struct sigaction; |
365 | asmlinkage long sys_rt_sigaction(int sig, | 372 | asmlinkage long sys_rt_sigaction(int sig, |
366 | const struct sigaction __user *act, | 373 | const struct sigaction __user *act, |
367 | struct sigaction __user *oact, | 374 | struct sigaction __user *oact, |
368 | size_t sigsetsize); | 375 | size_t sigsetsize); |
369 | 376 | ||
370 | /* | 377 | /* |
371 | * Since we define it "external", it collides with the built-in | 378 | * Since we define it "external", it collides with the built-in |
372 | * definition, which has the "noreturn" attribute and will cause | 379 | * definition, which has the "noreturn" attribute and will cause |
373 | * complaints. We don't want to use -fno-builtin, so just use a | 380 | * complaints. We don't want to use -fno-builtin, so just use a |
374 | * different name when in the kernel. | 381 | * different name when in the kernel. |
375 | */ | 382 | */ |
376 | #ifdef __KERNEL__ | 383 | #ifdef __KERNEL__ |
377 | #define _exit kernel_syscall_exit | 384 | #define _exit kernel_syscall_exit |
378 | #endif | 385 | #endif |
379 | extern inline _syscall1(int,_exit,int,exitcode) | 386 | extern inline _syscall1(int,_exit,int,exitcode) |
380 | extern inline _syscall3(pid_t,waitpid,pid_t,pid,int *,wait_stat,int,options) | 387 | extern inline _syscall3(pid_t,waitpid,pid_t,pid,int *,wait_stat,int,options) |
381 | #endif | 388 | #endif |
382 | 389 | ||
383 | 390 | ||
384 | /* | 391 | /* |
385 | * "Conditional" syscalls | 392 | * "Conditional" syscalls |
386 | * | 393 | * |
387 | * What we want is __attribute__((weak,alias("sys_ni_syscall"))), | 394 | * What we want is __attribute__((weak,alias("sys_ni_syscall"))), |
388 | * but it doesn't work on all toolchains, so we just do it by hand | 395 | * but it doesn't work on all toolchains, so we just do it by hand |
389 | */ | 396 | */ |
390 | #define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall") | 397 | #define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall") |
391 | 398 | ||
392 | #endif /* _ASM_CRIS_UNISTD_H_ */ | 399 | #endif /* _ASM_CRIS_UNISTD_H_ */ |
393 | 400 |