Commit f3411b16c790e52b5abe174b33c7d213dbf5c259

Authored by Mike Frysinger
1 parent 4bdef3bd7e

Blackfin: wire up new fanotify/prlimit64 syscalls

Signed-off-by: Mike Frysinger <vapier@gentoo.org>

Showing 2 changed files with 7 additions and 1 deletions Inline Diff

arch/blackfin/include/asm/unistd.h
1 /* 1 /*
2 * Copyright 2004-2009 Analog Devices Inc. 2 * Copyright 2004-2009 Analog Devices Inc.
3 * 3 *
4 * Licensed under the GPL-2 or later. 4 * Licensed under the GPL-2 or later.
5 */ 5 */
6 6
7 #ifndef __ASM_BFIN_UNISTD_H 7 #ifndef __ASM_BFIN_UNISTD_H
8 #define __ASM_BFIN_UNISTD_H 8 #define __ASM_BFIN_UNISTD_H
9 /* 9 /*
10 * This file contains the system call numbers. 10 * This file contains the system call numbers.
11 */ 11 */
12 #define __NR_restart_syscall 0 12 #define __NR_restart_syscall 0
13 #define __NR_exit 1 13 #define __NR_exit 1
14 #define __NR_fork 2 14 #define __NR_fork 2
15 #define __NR_read 3 15 #define __NR_read 3
16 #define __NR_write 4 16 #define __NR_write 4
17 #define __NR_open 5 17 #define __NR_open 5
18 #define __NR_close 6 18 #define __NR_close 6
19 /* 7 __NR_waitpid obsolete */ 19 /* 7 __NR_waitpid obsolete */
20 #define __NR_creat 8 20 #define __NR_creat 8
21 #define __NR_link 9 21 #define __NR_link 9
22 #define __NR_unlink 10 22 #define __NR_unlink 10
23 #define __NR_execve 11 23 #define __NR_execve 11
24 #define __NR_chdir 12 24 #define __NR_chdir 12
25 #define __NR_time 13 25 #define __NR_time 13
26 #define __NR_mknod 14 26 #define __NR_mknod 14
27 #define __NR_chmod 15 27 #define __NR_chmod 15
28 #define __NR_chown 16 28 #define __NR_chown 16
29 /* 17 __NR_break obsolete */ 29 /* 17 __NR_break obsolete */
30 /* 18 __NR_oldstat obsolete */ 30 /* 18 __NR_oldstat obsolete */
31 #define __NR_lseek 19 31 #define __NR_lseek 19
32 #define __NR_getpid 20 32 #define __NR_getpid 20
33 #define __NR_mount 21 33 #define __NR_mount 21
34 /* 22 __NR_umount obsolete */ 34 /* 22 __NR_umount obsolete */
35 #define __NR_setuid 23 35 #define __NR_setuid 23
36 #define __NR_getuid 24 36 #define __NR_getuid 24
37 #define __NR_stime 25 37 #define __NR_stime 25
38 #define __NR_ptrace 26 38 #define __NR_ptrace 26
39 #define __NR_alarm 27 39 #define __NR_alarm 27
40 /* 28 __NR_oldfstat obsolete */ 40 /* 28 __NR_oldfstat obsolete */
41 #define __NR_pause 29 41 #define __NR_pause 29
42 /* 30 __NR_utime obsolete */ 42 /* 30 __NR_utime obsolete */
43 /* 31 __NR_stty obsolete */ 43 /* 31 __NR_stty obsolete */
44 /* 32 __NR_gtty obsolete */ 44 /* 32 __NR_gtty obsolete */
45 #define __NR_access 33 45 #define __NR_access 33
46 #define __NR_nice 34 46 #define __NR_nice 34
47 /* 35 __NR_ftime obsolete */ 47 /* 35 __NR_ftime obsolete */
48 #define __NR_sync 36 48 #define __NR_sync 36
49 #define __NR_kill 37 49 #define __NR_kill 37
50 #define __NR_rename 38 50 #define __NR_rename 38
51 #define __NR_mkdir 39 51 #define __NR_mkdir 39
52 #define __NR_rmdir 40 52 #define __NR_rmdir 40
53 #define __NR_dup 41 53 #define __NR_dup 41
54 #define __NR_pipe 42 54 #define __NR_pipe 42
55 #define __NR_times 43 55 #define __NR_times 43
56 /* 44 __NR_prof obsolete */ 56 /* 44 __NR_prof obsolete */
57 #define __NR_brk 45 57 #define __NR_brk 45
58 #define __NR_setgid 46 58 #define __NR_setgid 46
59 #define __NR_getgid 47 59 #define __NR_getgid 47
60 /* 48 __NR_signal obsolete */ 60 /* 48 __NR_signal obsolete */
61 #define __NR_geteuid 49 61 #define __NR_geteuid 49
62 #define __NR_getegid 50 62 #define __NR_getegid 50
63 #define __NR_acct 51 63 #define __NR_acct 51
64 #define __NR_umount2 52 64 #define __NR_umount2 52
65 /* 53 __NR_lock obsolete */ 65 /* 53 __NR_lock obsolete */
66 #define __NR_ioctl 54 66 #define __NR_ioctl 54
67 #define __NR_fcntl 55 67 #define __NR_fcntl 55
68 /* 56 __NR_mpx obsolete */ 68 /* 56 __NR_mpx obsolete */
69 #define __NR_setpgid 57 69 #define __NR_setpgid 57
70 /* 58 __NR_ulimit obsolete */ 70 /* 58 __NR_ulimit obsolete */
71 /* 59 __NR_oldolduname obsolete */ 71 /* 59 __NR_oldolduname obsolete */
72 #define __NR_umask 60 72 #define __NR_umask 60
73 #define __NR_chroot 61 73 #define __NR_chroot 61
74 #define __NR_ustat 62 74 #define __NR_ustat 62
75 #define __NR_dup2 63 75 #define __NR_dup2 63
76 #define __NR_getppid 64 76 #define __NR_getppid 64
77 #define __NR_getpgrp 65 77 #define __NR_getpgrp 65
78 #define __NR_setsid 66 78 #define __NR_setsid 66
79 /* 67 __NR_sigaction obsolete */ 79 /* 67 __NR_sigaction obsolete */
80 #define __NR_sgetmask 68 80 #define __NR_sgetmask 68
81 #define __NR_ssetmask 69 81 #define __NR_ssetmask 69
82 #define __NR_setreuid 70 82 #define __NR_setreuid 70
83 #define __NR_setregid 71 83 #define __NR_setregid 71
84 /* 72 __NR_sigsuspend obsolete */ 84 /* 72 __NR_sigsuspend obsolete */
85 /* 73 __NR_sigpending obsolete */ 85 /* 73 __NR_sigpending obsolete */
86 #define __NR_sethostname 74 86 #define __NR_sethostname 74
87 #define __NR_setrlimit 75 87 #define __NR_setrlimit 75
88 /* 76 __NR_old_getrlimit obsolete */ 88 /* 76 __NR_old_getrlimit obsolete */
89 #define __NR_getrusage 77 89 #define __NR_getrusage 77
90 #define __NR_gettimeofday 78 90 #define __NR_gettimeofday 78
91 #define __NR_settimeofday 79 91 #define __NR_settimeofday 79
92 #define __NR_getgroups 80 92 #define __NR_getgroups 80
93 #define __NR_setgroups 81 93 #define __NR_setgroups 81
94 /* 82 __NR_select obsolete */ 94 /* 82 __NR_select obsolete */
95 #define __NR_symlink 83 95 #define __NR_symlink 83
96 /* 84 __NR_oldlstat obsolete */ 96 /* 84 __NR_oldlstat obsolete */
97 #define __NR_readlink 85 97 #define __NR_readlink 85
98 /* 86 __NR_uselib obsolete */ 98 /* 86 __NR_uselib obsolete */
99 /* 87 __NR_swapon obsolete */ 99 /* 87 __NR_swapon obsolete */
100 #define __NR_reboot 88 100 #define __NR_reboot 88
101 /* 89 __NR_readdir obsolete */ 101 /* 89 __NR_readdir obsolete */
102 /* 90 __NR_mmap obsolete */ 102 /* 90 __NR_mmap obsolete */
103 #define __NR_munmap 91 103 #define __NR_munmap 91
104 #define __NR_truncate 92 104 #define __NR_truncate 92
105 #define __NR_ftruncate 93 105 #define __NR_ftruncate 93
106 #define __NR_fchmod 94 106 #define __NR_fchmod 94
107 #define __NR_fchown 95 107 #define __NR_fchown 95
108 #define __NR_getpriority 96 108 #define __NR_getpriority 96
109 #define __NR_setpriority 97 109 #define __NR_setpriority 97
110 /* 98 __NR_profil obsolete */ 110 /* 98 __NR_profil obsolete */
111 #define __NR_statfs 99 111 #define __NR_statfs 99
112 #define __NR_fstatfs 100 112 #define __NR_fstatfs 100
113 /* 101 __NR_ioperm */ 113 /* 101 __NR_ioperm */
114 /* 102 __NR_socketcall obsolete */ 114 /* 102 __NR_socketcall obsolete */
115 #define __NR_syslog 103 115 #define __NR_syslog 103
116 #define __NR_setitimer 104 116 #define __NR_setitimer 104
117 #define __NR_getitimer 105 117 #define __NR_getitimer 105
118 #define __NR_stat 106 118 #define __NR_stat 106
119 #define __NR_lstat 107 119 #define __NR_lstat 107
120 #define __NR_fstat 108 120 #define __NR_fstat 108
121 /* 109 __NR_olduname obsolete */ 121 /* 109 __NR_olduname obsolete */
122 /* 110 __NR_iopl obsolete */ 122 /* 110 __NR_iopl obsolete */
123 #define __NR_vhangup 111 123 #define __NR_vhangup 111
124 /* 112 __NR_idle obsolete */ 124 /* 112 __NR_idle obsolete */
125 /* 113 __NR_vm86old */ 125 /* 113 __NR_vm86old */
126 #define __NR_wait4 114 126 #define __NR_wait4 114
127 /* 115 __NR_swapoff obsolete */ 127 /* 115 __NR_swapoff obsolete */
128 #define __NR_sysinfo 116 128 #define __NR_sysinfo 116
129 /* 117 __NR_ipc oboslete */ 129 /* 117 __NR_ipc oboslete */
130 #define __NR_fsync 118 130 #define __NR_fsync 118
131 /* 119 __NR_sigreturn obsolete */ 131 /* 119 __NR_sigreturn obsolete */
132 #define __NR_clone 120 132 #define __NR_clone 120
133 #define __NR_setdomainname 121 133 #define __NR_setdomainname 121
134 #define __NR_uname 122 134 #define __NR_uname 122
135 /* 123 __NR_modify_ldt obsolete */ 135 /* 123 __NR_modify_ldt obsolete */
136 #define __NR_adjtimex 124 136 #define __NR_adjtimex 124
137 #define __NR_mprotect 125 137 #define __NR_mprotect 125
138 /* 126 __NR_sigprocmask obsolete */ 138 /* 126 __NR_sigprocmask obsolete */
139 /* 127 __NR_create_module obsolete */ 139 /* 127 __NR_create_module obsolete */
140 #define __NR_init_module 128 140 #define __NR_init_module 128
141 #define __NR_delete_module 129 141 #define __NR_delete_module 129
142 /* 130 __NR_get_kernel_syms obsolete */ 142 /* 130 __NR_get_kernel_syms obsolete */
143 #define __NR_quotactl 131 143 #define __NR_quotactl 131
144 #define __NR_getpgid 132 144 #define __NR_getpgid 132
145 #define __NR_fchdir 133 145 #define __NR_fchdir 133
146 #define __NR_bdflush 134 146 #define __NR_bdflush 134
147 /* 135 was sysfs */ 147 /* 135 was sysfs */
148 #define __NR_personality 136 148 #define __NR_personality 136
149 /* 137 __NR_afs_syscall */ 149 /* 137 __NR_afs_syscall */
150 #define __NR_setfsuid 138 150 #define __NR_setfsuid 138
151 #define __NR_setfsgid 139 151 #define __NR_setfsgid 139
152 #define __NR__llseek 140 152 #define __NR__llseek 140
153 #define __NR_getdents 141 153 #define __NR_getdents 141
154 /* 142 __NR__newselect obsolete */ 154 /* 142 __NR__newselect obsolete */
155 #define __NR_flock 143 155 #define __NR_flock 143
156 /* 144 __NR_msync obsolete */ 156 /* 144 __NR_msync obsolete */
157 #define __NR_readv 145 157 #define __NR_readv 145
158 #define __NR_writev 146 158 #define __NR_writev 146
159 #define __NR_getsid 147 159 #define __NR_getsid 147
160 #define __NR_fdatasync 148 160 #define __NR_fdatasync 148
161 #define __NR__sysctl 149 161 #define __NR__sysctl 149
162 /* 150 __NR_mlock */ 162 /* 150 __NR_mlock */
163 /* 151 __NR_munlock */ 163 /* 151 __NR_munlock */
164 /* 152 __NR_mlockall */ 164 /* 152 __NR_mlockall */
165 /* 153 __NR_munlockall */ 165 /* 153 __NR_munlockall */
166 #define __NR_sched_setparam 154 166 #define __NR_sched_setparam 154
167 #define __NR_sched_getparam 155 167 #define __NR_sched_getparam 155
168 #define __NR_sched_setscheduler 156 168 #define __NR_sched_setscheduler 156
169 #define __NR_sched_getscheduler 157 169 #define __NR_sched_getscheduler 157
170 #define __NR_sched_yield 158 170 #define __NR_sched_yield 158
171 #define __NR_sched_get_priority_max 159 171 #define __NR_sched_get_priority_max 159
172 #define __NR_sched_get_priority_min 160 172 #define __NR_sched_get_priority_min 160
173 #define __NR_sched_rr_get_interval 161 173 #define __NR_sched_rr_get_interval 161
174 #define __NR_nanosleep 162 174 #define __NR_nanosleep 162
175 #define __NR_mremap 163 175 #define __NR_mremap 163
176 #define __NR_setresuid 164 176 #define __NR_setresuid 164
177 #define __NR_getresuid 165 177 #define __NR_getresuid 165
178 /* 166 __NR_vm86 */ 178 /* 166 __NR_vm86 */
179 /* 167 __NR_query_module */ 179 /* 167 __NR_query_module */
180 /* 168 __NR_poll */ 180 /* 168 __NR_poll */
181 #define __NR_nfsservctl 169 181 #define __NR_nfsservctl 169
182 #define __NR_setresgid 170 182 #define __NR_setresgid 170
183 #define __NR_getresgid 171 183 #define __NR_getresgid 171
184 #define __NR_prctl 172 184 #define __NR_prctl 172
185 #define __NR_rt_sigreturn 173 185 #define __NR_rt_sigreturn 173
186 #define __NR_rt_sigaction 174 186 #define __NR_rt_sigaction 174
187 #define __NR_rt_sigprocmask 175 187 #define __NR_rt_sigprocmask 175
188 #define __NR_rt_sigpending 176 188 #define __NR_rt_sigpending 176
189 #define __NR_rt_sigtimedwait 177 189 #define __NR_rt_sigtimedwait 177
190 #define __NR_rt_sigqueueinfo 178 190 #define __NR_rt_sigqueueinfo 178
191 #define __NR_rt_sigsuspend 179 191 #define __NR_rt_sigsuspend 179
192 #define __NR_pread 180 192 #define __NR_pread 180
193 #define __NR_pwrite 181 193 #define __NR_pwrite 181
194 #define __NR_lchown 182 194 #define __NR_lchown 182
195 #define __NR_getcwd 183 195 #define __NR_getcwd 183
196 #define __NR_capget 184 196 #define __NR_capget 184
197 #define __NR_capset 185 197 #define __NR_capset 185
198 #define __NR_sigaltstack 186 198 #define __NR_sigaltstack 186
199 #define __NR_sendfile 187 199 #define __NR_sendfile 187
200 /* 188 __NR_getpmsg */ 200 /* 188 __NR_getpmsg */
201 /* 189 __NR_putpmsg */ 201 /* 189 __NR_putpmsg */
202 #define __NR_vfork 190 202 #define __NR_vfork 190
203 #define __NR_getrlimit 191 203 #define __NR_getrlimit 191
204 #define __NR_mmap2 192 204 #define __NR_mmap2 192
205 #define __NR_truncate64 193 205 #define __NR_truncate64 193
206 #define __NR_ftruncate64 194 206 #define __NR_ftruncate64 194
207 #define __NR_stat64 195 207 #define __NR_stat64 195
208 #define __NR_lstat64 196 208 #define __NR_lstat64 196
209 #define __NR_fstat64 197 209 #define __NR_fstat64 197
210 #define __NR_chown32 198 210 #define __NR_chown32 198
211 #define __NR_getuid32 199 211 #define __NR_getuid32 199
212 #define __NR_getgid32 200 212 #define __NR_getgid32 200
213 #define __NR_geteuid32 201 213 #define __NR_geteuid32 201
214 #define __NR_getegid32 202 214 #define __NR_getegid32 202
215 #define __NR_setreuid32 203 215 #define __NR_setreuid32 203
216 #define __NR_setregid32 204 216 #define __NR_setregid32 204
217 #define __NR_getgroups32 205 217 #define __NR_getgroups32 205
218 #define __NR_setgroups32 206 218 #define __NR_setgroups32 206
219 #define __NR_fchown32 207 219 #define __NR_fchown32 207
220 #define __NR_setresuid32 208 220 #define __NR_setresuid32 208
221 #define __NR_getresuid32 209 221 #define __NR_getresuid32 209
222 #define __NR_setresgid32 210 222 #define __NR_setresgid32 210
223 #define __NR_getresgid32 211 223 #define __NR_getresgid32 211
224 #define __NR_lchown32 212 224 #define __NR_lchown32 212
225 #define __NR_setuid32 213 225 #define __NR_setuid32 213
226 #define __NR_setgid32 214 226 #define __NR_setgid32 214
227 #define __NR_setfsuid32 215 227 #define __NR_setfsuid32 215
228 #define __NR_setfsgid32 216 228 #define __NR_setfsgid32 216
229 #define __NR_pivot_root 217 229 #define __NR_pivot_root 217
230 /* 218 __NR_mincore */ 230 /* 218 __NR_mincore */
231 /* 219 __NR_madvise */ 231 /* 219 __NR_madvise */
232 #define __NR_getdents64 220 232 #define __NR_getdents64 220
233 #define __NR_fcntl64 221 233 #define __NR_fcntl64 221
234 /* 222 reserved for TUX */ 234 /* 222 reserved for TUX */
235 /* 223 reserved for TUX */ 235 /* 223 reserved for TUX */
236 #define __NR_gettid 224 236 #define __NR_gettid 224
237 #define __NR_readahead 225 237 #define __NR_readahead 225
238 #define __NR_setxattr 226 238 #define __NR_setxattr 226
239 #define __NR_lsetxattr 227 239 #define __NR_lsetxattr 227
240 #define __NR_fsetxattr 228 240 #define __NR_fsetxattr 228
241 #define __NR_getxattr 229 241 #define __NR_getxattr 229
242 #define __NR_lgetxattr 230 242 #define __NR_lgetxattr 230
243 #define __NR_fgetxattr 231 243 #define __NR_fgetxattr 231
244 #define __NR_listxattr 232 244 #define __NR_listxattr 232
245 #define __NR_llistxattr 233 245 #define __NR_llistxattr 233
246 #define __NR_flistxattr 234 246 #define __NR_flistxattr 234
247 #define __NR_removexattr 235 247 #define __NR_removexattr 235
248 #define __NR_lremovexattr 236 248 #define __NR_lremovexattr 236
249 #define __NR_fremovexattr 237 249 #define __NR_fremovexattr 237
250 #define __NR_tkill 238 250 #define __NR_tkill 238
251 #define __NR_sendfile64 239 251 #define __NR_sendfile64 239
252 #define __NR_futex 240 252 #define __NR_futex 240
253 #define __NR_sched_setaffinity 241 253 #define __NR_sched_setaffinity 241
254 #define __NR_sched_getaffinity 242 254 #define __NR_sched_getaffinity 242
255 /* 243 __NR_set_thread_area */ 255 /* 243 __NR_set_thread_area */
256 /* 244 __NR_get_thread_area */ 256 /* 244 __NR_get_thread_area */
257 #define __NR_io_setup 245 257 #define __NR_io_setup 245
258 #define __NR_io_destroy 246 258 #define __NR_io_destroy 246
259 #define __NR_io_getevents 247 259 #define __NR_io_getevents 247
260 #define __NR_io_submit 248 260 #define __NR_io_submit 248
261 #define __NR_io_cancel 249 261 #define __NR_io_cancel 249
262 /* 250 __NR_alloc_hugepages */ 262 /* 250 __NR_alloc_hugepages */
263 /* 251 __NR_free_hugepages */ 263 /* 251 __NR_free_hugepages */
264 #define __NR_exit_group 252 264 #define __NR_exit_group 252
265 #define __NR_lookup_dcookie 253 265 #define __NR_lookup_dcookie 253
266 #define __NR_bfin_spinlock 254 266 #define __NR_bfin_spinlock 254
267 267
268 #define __NR_epoll_create 255 268 #define __NR_epoll_create 255
269 #define __NR_epoll_ctl 256 269 #define __NR_epoll_ctl 256
270 #define __NR_epoll_wait 257 270 #define __NR_epoll_wait 257
271 /* 258 __NR_remap_file_pages */ 271 /* 258 __NR_remap_file_pages */
272 #define __NR_set_tid_address 259 272 #define __NR_set_tid_address 259
273 #define __NR_timer_create 260 273 #define __NR_timer_create 260
274 #define __NR_timer_settime 261 274 #define __NR_timer_settime 261
275 #define __NR_timer_gettime 262 275 #define __NR_timer_gettime 262
276 #define __NR_timer_getoverrun 263 276 #define __NR_timer_getoverrun 263
277 #define __NR_timer_delete 264 277 #define __NR_timer_delete 264
278 #define __NR_clock_settime 265 278 #define __NR_clock_settime 265
279 #define __NR_clock_gettime 266 279 #define __NR_clock_gettime 266
280 #define __NR_clock_getres 267 280 #define __NR_clock_getres 267
281 #define __NR_clock_nanosleep 268 281 #define __NR_clock_nanosleep 268
282 #define __NR_statfs64 269 282 #define __NR_statfs64 269
283 #define __NR_fstatfs64 270 283 #define __NR_fstatfs64 270
284 #define __NR_tgkill 271 284 #define __NR_tgkill 271
285 #define __NR_utimes 272 285 #define __NR_utimes 272
286 #define __NR_fadvise64_64 273 286 #define __NR_fadvise64_64 273
287 /* 274 __NR_vserver */ 287 /* 274 __NR_vserver */
288 /* 275 __NR_mbind */ 288 /* 275 __NR_mbind */
289 /* 276 __NR_get_mempolicy */ 289 /* 276 __NR_get_mempolicy */
290 /* 277 __NR_set_mempolicy */ 290 /* 277 __NR_set_mempolicy */
291 #define __NR_mq_open 278 291 #define __NR_mq_open 278
292 #define __NR_mq_unlink 279 292 #define __NR_mq_unlink 279
293 #define __NR_mq_timedsend 280 293 #define __NR_mq_timedsend 280
294 #define __NR_mq_timedreceive 281 294 #define __NR_mq_timedreceive 281
295 #define __NR_mq_notify 282 295 #define __NR_mq_notify 282
296 #define __NR_mq_getsetattr 283 296 #define __NR_mq_getsetattr 283
297 #define __NR_kexec_load 284 297 #define __NR_kexec_load 284
298 #define __NR_waitid 285 298 #define __NR_waitid 285
299 #define __NR_add_key 286 299 #define __NR_add_key 286
300 #define __NR_request_key 287 300 #define __NR_request_key 287
301 #define __NR_keyctl 288 301 #define __NR_keyctl 288
302 #define __NR_ioprio_set 289 302 #define __NR_ioprio_set 289
303 #define __NR_ioprio_get 290 303 #define __NR_ioprio_get 290
304 #define __NR_inotify_init 291 304 #define __NR_inotify_init 291
305 #define __NR_inotify_add_watch 292 305 #define __NR_inotify_add_watch 292
306 #define __NR_inotify_rm_watch 293 306 #define __NR_inotify_rm_watch 293
307 /* 294 __NR_migrate_pages */ 307 /* 294 __NR_migrate_pages */
308 #define __NR_openat 295 308 #define __NR_openat 295
309 #define __NR_mkdirat 296 309 #define __NR_mkdirat 296
310 #define __NR_mknodat 297 310 #define __NR_mknodat 297
311 #define __NR_fchownat 298 311 #define __NR_fchownat 298
312 #define __NR_futimesat 299 312 #define __NR_futimesat 299
313 #define __NR_fstatat64 300 313 #define __NR_fstatat64 300
314 #define __NR_unlinkat 301 314 #define __NR_unlinkat 301
315 #define __NR_renameat 302 315 #define __NR_renameat 302
316 #define __NR_linkat 303 316 #define __NR_linkat 303
317 #define __NR_symlinkat 304 317 #define __NR_symlinkat 304
318 #define __NR_readlinkat 305 318 #define __NR_readlinkat 305
319 #define __NR_fchmodat 306 319 #define __NR_fchmodat 306
320 #define __NR_faccessat 307 320 #define __NR_faccessat 307
321 #define __NR_pselect6 308 321 #define __NR_pselect6 308
322 #define __NR_ppoll 309 322 #define __NR_ppoll 309
323 #define __NR_unshare 310 323 #define __NR_unshare 310
324 324
325 /* Blackfin private syscalls */ 325 /* Blackfin private syscalls */
326 #define __NR_sram_alloc 311 326 #define __NR_sram_alloc 311
327 #define __NR_sram_free 312 327 #define __NR_sram_free 312
328 #define __NR_dma_memcpy 313 328 #define __NR_dma_memcpy 313
329 329
330 /* socket syscalls */ 330 /* socket syscalls */
331 #define __NR_accept 314 331 #define __NR_accept 314
332 #define __NR_bind 315 332 #define __NR_bind 315
333 #define __NR_connect 316 333 #define __NR_connect 316
334 #define __NR_getpeername 317 334 #define __NR_getpeername 317
335 #define __NR_getsockname 318 335 #define __NR_getsockname 318
336 #define __NR_getsockopt 319 336 #define __NR_getsockopt 319
337 #define __NR_listen 320 337 #define __NR_listen 320
338 #define __NR_recv 321 338 #define __NR_recv 321
339 #define __NR_recvfrom 322 339 #define __NR_recvfrom 322
340 #define __NR_recvmsg 323 340 #define __NR_recvmsg 323
341 #define __NR_send 324 341 #define __NR_send 324
342 #define __NR_sendmsg 325 342 #define __NR_sendmsg 325
343 #define __NR_sendto 326 343 #define __NR_sendto 326
344 #define __NR_setsockopt 327 344 #define __NR_setsockopt 327
345 #define __NR_shutdown 328 345 #define __NR_shutdown 328
346 #define __NR_socket 329 346 #define __NR_socket 329
347 #define __NR_socketpair 330 347 #define __NR_socketpair 330
348 348
349 /* sysv ipc syscalls */ 349 /* sysv ipc syscalls */
350 #define __NR_semctl 331 350 #define __NR_semctl 331
351 #define __NR_semget 332 351 #define __NR_semget 332
352 #define __NR_semop 333 352 #define __NR_semop 333
353 #define __NR_msgctl 334 353 #define __NR_msgctl 334
354 #define __NR_msgget 335 354 #define __NR_msgget 335
355 #define __NR_msgrcv 336 355 #define __NR_msgrcv 336
356 #define __NR_msgsnd 337 356 #define __NR_msgsnd 337
357 #define __NR_shmat 338 357 #define __NR_shmat 338
358 #define __NR_shmctl 339 358 #define __NR_shmctl 339
359 #define __NR_shmdt 340 359 #define __NR_shmdt 340
360 #define __NR_shmget 341 360 #define __NR_shmget 341
361 361
362 #define __NR_splice 342 362 #define __NR_splice 342
363 #define __NR_sync_file_range 343 363 #define __NR_sync_file_range 343
364 #define __NR_tee 344 364 #define __NR_tee 344
365 #define __NR_vmsplice 345 365 #define __NR_vmsplice 345
366 366
367 #define __NR_epoll_pwait 346 367 #define __NR_epoll_pwait 346
368 #define __NR_utimensat 347 368 #define __NR_utimensat 347
369 #define __NR_signalfd 348 369 #define __NR_signalfd 348
370 #define __NR_timerfd_create 349 370 #define __NR_timerfd_create 349
371 #define __NR_eventfd 350 371 #define __NR_eventfd 350
372 #define __NR_pread64 351 372 #define __NR_pread64 351
373 #define __NR_pwrite64 352 373 #define __NR_pwrite64 352
374 #define __NR_fadvise64 353 374 #define __NR_fadvise64 353
375 #define __NR_set_robust_list 354 375 #define __NR_set_robust_list 354
376 #define __NR_get_robust_list 355 376 #define __NR_get_robust_list 355
377 #define __NR_fallocate 356 377 #define __NR_fallocate 356
378 #define __NR_semtimedop 357 378 #define __NR_semtimedop 357
379 #define __NR_timerfd_settime 358 379 #define __NR_timerfd_settime 358
380 #define __NR_timerfd_gettime 359 380 #define __NR_timerfd_gettime 359
381 #define __NR_signalfd4 360 381 #define __NR_signalfd4 360
382 #define __NR_eventfd2 361 382 #define __NR_eventfd2 361
383 #define __NR_epoll_create1 362 383 #define __NR_epoll_create1 362
384 #define __NR_dup3 363 384 #define __NR_dup3 363
385 #define __NR_pipe2 364 385 #define __NR_pipe2 364
386 #define __NR_inotify_init1 365 386 #define __NR_inotify_init1 365
387 #define __NR_preadv 366 387 #define __NR_preadv 366
388 #define __NR_pwritev 367 388 #define __NR_pwritev 367
389 #define __NR_rt_tgsigqueueinfo 368 389 #define __NR_rt_tgsigqueueinfo 368
390 #define __NR_perf_event_open 369 390 #define __NR_perf_event_open 369
391 #define __NR_recvmmsg 370 391 #define __NR_recvmmsg 370
392 #define __NR_fanotify_init 371
393 #define __NR_fanotify_mark 372
394 #define __NR_prlimit64 373
392 395
393 #define __NR_syscall 371 396 #define __NR_syscall 374
394 #define NR_syscalls __NR_syscall 397 #define NR_syscalls __NR_syscall
395 398
396 /* Old optional stuff no one actually uses */ 399 /* Old optional stuff no one actually uses */
397 #define __IGNORE_sysfs 400 #define __IGNORE_sysfs
398 #define __IGNORE_uselib 401 #define __IGNORE_uselib
399 402
400 /* Implement the newer interfaces */ 403 /* Implement the newer interfaces */
401 #define __IGNORE_mmap 404 #define __IGNORE_mmap
402 #define __IGNORE_poll 405 #define __IGNORE_poll
403 #define __IGNORE_select 406 #define __IGNORE_select
404 #define __IGNORE_utime 407 #define __IGNORE_utime
405 408
406 /* Not relevant on no-mmu */ 409 /* Not relevant on no-mmu */
407 #define __IGNORE_swapon 410 #define __IGNORE_swapon
408 #define __IGNORE_swapoff 411 #define __IGNORE_swapoff
409 #define __IGNORE_msync 412 #define __IGNORE_msync
410 #define __IGNORE_mlock 413 #define __IGNORE_mlock
411 #define __IGNORE_munlock 414 #define __IGNORE_munlock
412 #define __IGNORE_mlockall 415 #define __IGNORE_mlockall
413 #define __IGNORE_munlockall 416 #define __IGNORE_munlockall
414 #define __IGNORE_mincore 417 #define __IGNORE_mincore
415 #define __IGNORE_madvise 418 #define __IGNORE_madvise
416 #define __IGNORE_remap_file_pages 419 #define __IGNORE_remap_file_pages
417 #define __IGNORE_mbind 420 #define __IGNORE_mbind
418 #define __IGNORE_get_mempolicy 421 #define __IGNORE_get_mempolicy
419 #define __IGNORE_set_mempolicy 422 #define __IGNORE_set_mempolicy
420 #define __IGNORE_migrate_pages 423 #define __IGNORE_migrate_pages
421 #define __IGNORE_move_pages 424 #define __IGNORE_move_pages
422 #define __IGNORE_getcpu 425 #define __IGNORE_getcpu
423 426
424 #ifdef __KERNEL__ 427 #ifdef __KERNEL__
425 #define __ARCH_WANT_IPC_PARSE_VERSION 428 #define __ARCH_WANT_IPC_PARSE_VERSION
426 #define __ARCH_WANT_STAT64 429 #define __ARCH_WANT_STAT64
427 #define __ARCH_WANT_SYS_ALARM 430 #define __ARCH_WANT_SYS_ALARM
428 #define __ARCH_WANT_SYS_GETHOSTNAME 431 #define __ARCH_WANT_SYS_GETHOSTNAME
429 #define __ARCH_WANT_SYS_PAUSE 432 #define __ARCH_WANT_SYS_PAUSE
430 #define __ARCH_WANT_SYS_SGETMASK 433 #define __ARCH_WANT_SYS_SGETMASK
431 #define __ARCH_WANT_SYS_TIME 434 #define __ARCH_WANT_SYS_TIME
432 #define __ARCH_WANT_SYS_FADVISE64 435 #define __ARCH_WANT_SYS_FADVISE64
433 #define __ARCH_WANT_SYS_GETPGRP 436 #define __ARCH_WANT_SYS_GETPGRP
434 #define __ARCH_WANT_SYS_LLSEEK 437 #define __ARCH_WANT_SYS_LLSEEK
435 #define __ARCH_WANT_SYS_NICE 438 #define __ARCH_WANT_SYS_NICE
436 #define __ARCH_WANT_SYS_RT_SIGACTION 439 #define __ARCH_WANT_SYS_RT_SIGACTION
437 #define __ARCH_WANT_SYS_RT_SIGSUSPEND 440 #define __ARCH_WANT_SYS_RT_SIGSUSPEND
438 441
439 /* 442 /*
440 * "Conditional" syscalls 443 * "Conditional" syscalls
441 * 444 *
442 * What we want is __attribute__((weak,alias("sys_ni_syscall"))), 445 * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
443 * but it doesn't work on all toolchains, so we just do it by hand 446 * but it doesn't work on all toolchains, so we just do it by hand
444 */ 447 */
445 #define cond_syscall(x) asm(".weak\t_" #x "\n\t.set\t_" #x ",_sys_ni_syscall"); 448 #define cond_syscall(x) asm(".weak\t_" #x "\n\t.set\t_" #x ",_sys_ni_syscall");
446 449
447 #endif /* __KERNEL__ */ 450 #endif /* __KERNEL__ */
448 451
449 #endif /* __ASM_BFIN_UNISTD_H */ 452 #endif /* __ASM_BFIN_UNISTD_H */
450 453
arch/blackfin/mach-common/entry.S
1 /* 1 /*
2 * Contains the system-call and fault low-level handling routines. 2 * Contains the system-call and fault low-level handling routines.
3 * This also contains the timer-interrupt handler, as well as all 3 * This also contains the timer-interrupt handler, as well as all
4 * interrupts and faults that can result in a task-switch. 4 * interrupts and faults that can result in a task-switch.
5 * 5 *
6 * Copyright 2005-2009 Analog Devices Inc. 6 * Copyright 2005-2009 Analog Devices Inc.
7 * 7 *
8 * Licensed under the GPL-2 or later. 8 * Licensed under the GPL-2 or later.
9 */ 9 */
10 10
11 /* NOTE: This code handles signal-recognition, which happens every time 11 /* NOTE: This code handles signal-recognition, which happens every time
12 * after a timer-interrupt and after each system call. 12 * after a timer-interrupt and after each system call.
13 */ 13 */
14 14
15 #include <linux/init.h> 15 #include <linux/init.h>
16 #include <linux/linkage.h> 16 #include <linux/linkage.h>
17 #include <linux/unistd.h> 17 #include <linux/unistd.h>
18 #include <asm/blackfin.h> 18 #include <asm/blackfin.h>
19 #include <asm/errno.h> 19 #include <asm/errno.h>
20 #include <asm/fixed_code.h> 20 #include <asm/fixed_code.h>
21 #include <asm/thread_info.h> /* TIF_NEED_RESCHED */ 21 #include <asm/thread_info.h> /* TIF_NEED_RESCHED */
22 #include <asm/asm-offsets.h> 22 #include <asm/asm-offsets.h>
23 #include <asm/trace.h> 23 #include <asm/trace.h>
24 #include <asm/traps.h> 24 #include <asm/traps.h>
25 25
26 #include <asm/context.S> 26 #include <asm/context.S>
27 27
28 #if defined(CONFIG_BFIN_SCRATCH_REG_RETN) 28 #if defined(CONFIG_BFIN_SCRATCH_REG_RETN)
29 # define EX_SCRATCH_REG RETN 29 # define EX_SCRATCH_REG RETN
30 #elif defined(CONFIG_BFIN_SCRATCH_REG_RETE) 30 #elif defined(CONFIG_BFIN_SCRATCH_REG_RETE)
31 # define EX_SCRATCH_REG RETE 31 # define EX_SCRATCH_REG RETE
32 #else 32 #else
33 # define EX_SCRATCH_REG CYCLES 33 # define EX_SCRATCH_REG CYCLES
34 #endif 34 #endif
35 35
36 #ifdef CONFIG_EXCPT_IRQ_SYSC_L1 36 #ifdef CONFIG_EXCPT_IRQ_SYSC_L1
37 .section .l1.text 37 .section .l1.text
38 #else 38 #else
39 .text 39 .text
40 #endif 40 #endif
41 41
42 /* Slightly simplified and streamlined entry point for CPLB misses. 42 /* Slightly simplified and streamlined entry point for CPLB misses.
43 * This one does not lower the level to IRQ5, and thus can be used to 43 * This one does not lower the level to IRQ5, and thus can be used to
44 * patch up CPLB misses on the kernel stack. 44 * patch up CPLB misses on the kernel stack.
45 */ 45 */
46 #if ANOMALY_05000261 46 #if ANOMALY_05000261
47 #define _ex_dviol _ex_workaround_261 47 #define _ex_dviol _ex_workaround_261
48 #define _ex_dmiss _ex_workaround_261 48 #define _ex_dmiss _ex_workaround_261
49 #define _ex_dmult _ex_workaround_261 49 #define _ex_dmult _ex_workaround_261
50 50
51 ENTRY(_ex_workaround_261) 51 ENTRY(_ex_workaround_261)
52 /* 52 /*
53 * Work around an anomaly: if we see a new DCPLB fault, return 53 * Work around an anomaly: if we see a new DCPLB fault, return
54 * without doing anything. Then, if we get the same fault again, 54 * without doing anything. Then, if we get the same fault again,
55 * handle it. 55 * handle it.
56 */ 56 */
57 P4 = R7; /* Store EXCAUSE */ 57 P4 = R7; /* Store EXCAUSE */
58 58
59 GET_PDA(p5, r7); 59 GET_PDA(p5, r7);
60 r7 = [p5 + PDA_LFRETX]; 60 r7 = [p5 + PDA_LFRETX];
61 r6 = retx; 61 r6 = retx;
62 [p5 + PDA_LFRETX] = r6; 62 [p5 + PDA_LFRETX] = r6;
63 cc = r6 == r7; 63 cc = r6 == r7;
64 if !cc jump _bfin_return_from_exception; 64 if !cc jump _bfin_return_from_exception;
65 /* fall through */ 65 /* fall through */
66 R7 = P4; 66 R7 = P4;
67 R6 = VEC_CPLB_M; /* Data CPLB Miss */ 67 R6 = VEC_CPLB_M; /* Data CPLB Miss */
68 cc = R6 == R7; 68 cc = R6 == R7;
69 if cc jump _ex_dcplb_miss (BP); 69 if cc jump _ex_dcplb_miss (BP);
70 #ifdef CONFIG_MPU 70 #ifdef CONFIG_MPU
71 R6 = VEC_CPLB_VL; /* Data CPLB Violation */ 71 R6 = VEC_CPLB_VL; /* Data CPLB Violation */
72 cc = R6 == R7; 72 cc = R6 == R7;
73 if cc jump _ex_dcplb_viol (BP); 73 if cc jump _ex_dcplb_viol (BP);
74 #endif 74 #endif
75 /* Handle Data CPLB Protection Violation 75 /* Handle Data CPLB Protection Violation
76 * and Data CPLB Multiple Hits - Linux Trap Zero 76 * and Data CPLB Multiple Hits - Linux Trap Zero
77 */ 77 */
78 jump _ex_trap_c; 78 jump _ex_trap_c;
79 ENDPROC(_ex_workaround_261) 79 ENDPROC(_ex_workaround_261)
80 80
81 #else 81 #else
82 #ifdef CONFIG_MPU 82 #ifdef CONFIG_MPU
83 #define _ex_dviol _ex_dcplb_viol 83 #define _ex_dviol _ex_dcplb_viol
84 #else 84 #else
85 #define _ex_dviol _ex_trap_c 85 #define _ex_dviol _ex_trap_c
86 #endif 86 #endif
87 #define _ex_dmiss _ex_dcplb_miss 87 #define _ex_dmiss _ex_dcplb_miss
88 #define _ex_dmult _ex_trap_c 88 #define _ex_dmult _ex_trap_c
89 #endif 89 #endif
90 90
91 91
92 ENTRY(_ex_dcplb_viol) 92 ENTRY(_ex_dcplb_viol)
93 ENTRY(_ex_dcplb_miss) 93 ENTRY(_ex_dcplb_miss)
94 ENTRY(_ex_icplb_miss) 94 ENTRY(_ex_icplb_miss)
95 (R7:6,P5:4) = [sp++]; 95 (R7:6,P5:4) = [sp++];
96 /* We leave the previously pushed ASTAT on the stack. */ 96 /* We leave the previously pushed ASTAT on the stack. */
97 SAVE_CONTEXT_CPLB 97 SAVE_CONTEXT_CPLB
98 98
99 /* We must load R1 here, _before_ DEBUG_HWTRACE_SAVE, since that 99 /* We must load R1 here, _before_ DEBUG_HWTRACE_SAVE, since that
100 * will change the stack pointer. */ 100 * will change the stack pointer. */
101 R0 = SEQSTAT; 101 R0 = SEQSTAT;
102 R1 = SP; 102 R1 = SP;
103 103
104 DEBUG_HWTRACE_SAVE(p5, r7) 104 DEBUG_HWTRACE_SAVE(p5, r7)
105 105
106 sp += -12; 106 sp += -12;
107 call _cplb_hdr; 107 call _cplb_hdr;
108 sp += 12; 108 sp += 12;
109 CC = R0 == 0; 109 CC = R0 == 0;
110 IF !CC JUMP _handle_bad_cplb; 110 IF !CC JUMP _handle_bad_cplb;
111 111
112 #ifdef CONFIG_DEBUG_DOUBLEFAULT 112 #ifdef CONFIG_DEBUG_DOUBLEFAULT
113 /* While we were processing this, did we double fault? */ 113 /* While we were processing this, did we double fault? */
114 r7 = SEQSTAT; /* reason code is in bit 5:0 */ 114 r7 = SEQSTAT; /* reason code is in bit 5:0 */
115 r6.l = lo(SEQSTAT_EXCAUSE); 115 r6.l = lo(SEQSTAT_EXCAUSE);
116 r6.h = hi(SEQSTAT_EXCAUSE); 116 r6.h = hi(SEQSTAT_EXCAUSE);
117 r7 = r7 & r6; 117 r7 = r7 & r6;
118 r6 = 0x25; 118 r6 = 0x25;
119 CC = R7 == R6; 119 CC = R7 == R6;
120 if CC JUMP _double_fault; 120 if CC JUMP _double_fault;
121 #endif 121 #endif
122 122
123 DEBUG_HWTRACE_RESTORE(p5, r7) 123 DEBUG_HWTRACE_RESTORE(p5, r7)
124 RESTORE_CONTEXT_CPLB 124 RESTORE_CONTEXT_CPLB
125 ASTAT = [SP++]; 125 ASTAT = [SP++];
126 SP = EX_SCRATCH_REG; 126 SP = EX_SCRATCH_REG;
127 rtx; 127 rtx;
128 ENDPROC(_ex_icplb_miss) 128 ENDPROC(_ex_icplb_miss)
129 129
130 ENTRY(_ex_syscall) 130 ENTRY(_ex_syscall)
131 raise 15; /* invoked by TRAP #0, for sys call */ 131 raise 15; /* invoked by TRAP #0, for sys call */
132 jump.s _bfin_return_from_exception; 132 jump.s _bfin_return_from_exception;
133 ENDPROC(_ex_syscall) 133 ENDPROC(_ex_syscall)
134 134
135 ENTRY(_ex_single_step) 135 ENTRY(_ex_single_step)
136 /* If we just returned from an interrupt, the single step event is 136 /* If we just returned from an interrupt, the single step event is
137 for the RTI instruction. */ 137 for the RTI instruction. */
138 r7 = retx; 138 r7 = retx;
139 r6 = reti; 139 r6 = reti;
140 cc = r7 == r6; 140 cc = r7 == r6;
141 if cc jump _bfin_return_from_exception; 141 if cc jump _bfin_return_from_exception;
142 142
143 #ifdef CONFIG_KGDB 143 #ifdef CONFIG_KGDB
144 /* Don't do single step in hardware exception handler */ 144 /* Don't do single step in hardware exception handler */
145 p5.l = lo(IPEND); 145 p5.l = lo(IPEND);
146 p5.h = hi(IPEND); 146 p5.h = hi(IPEND);
147 r6 = [p5]; 147 r6 = [p5];
148 cc = bittst(r6, 4); 148 cc = bittst(r6, 4);
149 if cc jump _bfin_return_from_exception; 149 if cc jump _bfin_return_from_exception;
150 cc = bittst(r6, 5); 150 cc = bittst(r6, 5);
151 if cc jump _bfin_return_from_exception; 151 if cc jump _bfin_return_from_exception;
152 152
153 /* skip single step if current interrupt priority is higher than 153 /* skip single step if current interrupt priority is higher than
154 * that of the first instruction, from which gdb starts single step */ 154 * that of the first instruction, from which gdb starts single step */
155 r6 >>= 6; 155 r6 >>= 6;
156 r7 = 10; 156 r7 = 10;
157 .Lfind_priority_start: 157 .Lfind_priority_start:
158 cc = bittst(r6, 0); 158 cc = bittst(r6, 0);
159 if cc jump .Lfind_priority_done; 159 if cc jump .Lfind_priority_done;
160 r6 >>= 1; 160 r6 >>= 1;
161 r7 += -1; 161 r7 += -1;
162 cc = r7 == 0; 162 cc = r7 == 0;
163 if cc jump .Lfind_priority_done; 163 if cc jump .Lfind_priority_done;
164 jump.s .Lfind_priority_start; 164 jump.s .Lfind_priority_start;
165 .Lfind_priority_done: 165 .Lfind_priority_done:
166 p4.l = _kgdb_single_step; 166 p4.l = _kgdb_single_step;
167 p4.h = _kgdb_single_step; 167 p4.h = _kgdb_single_step;
168 r6 = [p4]; 168 r6 = [p4];
169 cc = r6 == 0; 169 cc = r6 == 0;
170 if cc jump .Ldo_single_step; 170 if cc jump .Ldo_single_step;
171 r6 += -1; 171 r6 += -1;
172 cc = r6 < r7; 172 cc = r6 < r7;
173 if cc jump 1f; 173 if cc jump 1f;
174 .Ldo_single_step: 174 .Ldo_single_step:
175 #else 175 #else
176 /* If we were in user mode, do the single step normally. */ 176 /* If we were in user mode, do the single step normally. */
177 p5.l = lo(IPEND); 177 p5.l = lo(IPEND);
178 p5.h = hi(IPEND); 178 p5.h = hi(IPEND);
179 r6 = [p5]; 179 r6 = [p5];
180 r7 = 0xffe0 (z); 180 r7 = 0xffe0 (z);
181 r7 = r7 & r6; 181 r7 = r7 & r6;
182 cc = r7 == 0; 182 cc = r7 == 0;
183 if !cc jump 1f; 183 if !cc jump 1f;
184 #endif 184 #endif
185 #ifdef CONFIG_EXACT_HWERR 185 #ifdef CONFIG_EXACT_HWERR
186 /* Read the ILAT, and to check to see if the process we are 186 /* Read the ILAT, and to check to see if the process we are
187 * single stepping caused a previous hardware error 187 * single stepping caused a previous hardware error
188 * If so, do not single step, (which lowers to IRQ5, and makes 188 * If so, do not single step, (which lowers to IRQ5, and makes
189 * us miss the error). 189 * us miss the error).
190 */ 190 */
191 p5.l = lo(ILAT); 191 p5.l = lo(ILAT);
192 p5.h = hi(ILAT); 192 p5.h = hi(ILAT);
193 r7 = [p5]; 193 r7 = [p5];
194 cc = bittst(r7, EVT_IVHW_P); 194 cc = bittst(r7, EVT_IVHW_P);
195 if cc jump 1f; 195 if cc jump 1f;
196 #endif 196 #endif
197 /* Single stepping only a single instruction, so clear the trace 197 /* Single stepping only a single instruction, so clear the trace
198 * bit here. */ 198 * bit here. */
199 r7 = syscfg; 199 r7 = syscfg;
200 bitclr (r7, SYSCFG_SSSTEP_P); 200 bitclr (r7, SYSCFG_SSSTEP_P);
201 syscfg = R7; 201 syscfg = R7;
202 jump _ex_trap_c; 202 jump _ex_trap_c;
203 203
204 1: 204 1:
205 /* 205 /*
206 * We were in an interrupt handler. By convention, all of them save 206 * We were in an interrupt handler. By convention, all of them save
207 * SYSCFG with their first instruction, so by checking whether our 207 * SYSCFG with their first instruction, so by checking whether our
208 * RETX points at the entry point, we can determine whether to allow 208 * RETX points at the entry point, we can determine whether to allow
209 * a single step, or whether to clear SYSCFG. 209 * a single step, or whether to clear SYSCFG.
210 * 210 *
211 * First, find out the interrupt level and the event vector for it. 211 * First, find out the interrupt level and the event vector for it.
212 */ 212 */
213 p5.l = lo(EVT0); 213 p5.l = lo(EVT0);
214 p5.h = hi(EVT0); 214 p5.h = hi(EVT0);
215 p5 += -4; 215 p5 += -4;
216 2: 216 2:
217 r7 = rot r7 by -1; 217 r7 = rot r7 by -1;
218 p5 += 4; 218 p5 += 4;
219 if !cc jump 2b; 219 if !cc jump 2b;
220 220
221 /* What we actually do is test for the _second_ instruction in the 221 /* What we actually do is test for the _second_ instruction in the
222 * IRQ handler. That way, if there are insns following the restore 222 * IRQ handler. That way, if there are insns following the restore
223 * of SYSCFG after leaving the handler, we will not turn off SYSCFG 223 * of SYSCFG after leaving the handler, we will not turn off SYSCFG
224 * for them. */ 224 * for them. */
225 225
226 r7 = [p5]; 226 r7 = [p5];
227 r7 += 2; 227 r7 += 2;
228 r6 = RETX; 228 r6 = RETX;
229 cc = R7 == R6; 229 cc = R7 == R6;
230 if !cc jump _bfin_return_from_exception; 230 if !cc jump _bfin_return_from_exception;
231 231
232 r7 = syscfg; 232 r7 = syscfg;
233 bitclr (r7, SYSCFG_SSSTEP_P); /* Turn off single step */ 233 bitclr (r7, SYSCFG_SSSTEP_P); /* Turn off single step */
234 syscfg = R7; 234 syscfg = R7;
235 235
236 /* Fall through to _bfin_return_from_exception. */ 236 /* Fall through to _bfin_return_from_exception. */
237 ENDPROC(_ex_single_step) 237 ENDPROC(_ex_single_step)
238 238
239 ENTRY(_bfin_return_from_exception) 239 ENTRY(_bfin_return_from_exception)
240 #if ANOMALY_05000257 240 #if ANOMALY_05000257
241 R7=LC0; 241 R7=LC0;
242 LC0=R7; 242 LC0=R7;
243 R7=LC1; 243 R7=LC1;
244 LC1=R7; 244 LC1=R7;
245 #endif 245 #endif
246 246
247 #ifdef CONFIG_DEBUG_DOUBLEFAULT 247 #ifdef CONFIG_DEBUG_DOUBLEFAULT
248 /* While we were processing the current exception, 248 /* While we were processing the current exception,
249 * did we cause another, and double fault? 249 * did we cause another, and double fault?
250 */ 250 */
251 r7 = SEQSTAT; /* reason code is in bit 5:0 */ 251 r7 = SEQSTAT; /* reason code is in bit 5:0 */
252 r6.l = lo(SEQSTAT_EXCAUSE); 252 r6.l = lo(SEQSTAT_EXCAUSE);
253 r6.h = hi(SEQSTAT_EXCAUSE); 253 r6.h = hi(SEQSTAT_EXCAUSE);
254 r7 = r7 & r6; 254 r7 = r7 & r6;
255 r6 = VEC_UNCOV; 255 r6 = VEC_UNCOV;
256 CC = R7 == R6; 256 CC = R7 == R6;
257 if CC JUMP _double_fault; 257 if CC JUMP _double_fault;
258 #endif 258 #endif
259 259
260 (R7:6,P5:4) = [sp++]; 260 (R7:6,P5:4) = [sp++];
261 ASTAT = [sp++]; 261 ASTAT = [sp++];
262 sp = EX_SCRATCH_REG; 262 sp = EX_SCRATCH_REG;
263 rtx; 263 rtx;
264 ENDPROC(_bfin_return_from_exception) 264 ENDPROC(_bfin_return_from_exception)
265 265
266 ENTRY(_handle_bad_cplb) 266 ENTRY(_handle_bad_cplb)
267 DEBUG_HWTRACE_RESTORE(p5, r7) 267 DEBUG_HWTRACE_RESTORE(p5, r7)
268 /* To get here, we just tried and failed to change a CPLB 268 /* To get here, we just tried and failed to change a CPLB
269 * so, handle things in trap_c (C code), by lowering to 269 * so, handle things in trap_c (C code), by lowering to
270 * IRQ5, just like we normally do. Since this is not a 270 * IRQ5, just like we normally do. Since this is not a
271 * "normal" return path, we have a do alot of stuff to 271 * "normal" return path, we have a do alot of stuff to
272 * the stack to get ready so, we can fall through - we 272 * the stack to get ready so, we can fall through - we
273 * need to make a CPLB exception look like a normal exception 273 * need to make a CPLB exception look like a normal exception
274 */ 274 */
275 RESTORE_CONTEXT_CPLB 275 RESTORE_CONTEXT_CPLB
276 /* ASTAT is still on the stack, where it is needed. */ 276 /* ASTAT is still on the stack, where it is needed. */
277 [--sp] = (R7:6,P5:4); 277 [--sp] = (R7:6,P5:4);
278 278
279 ENTRY(_ex_replaceable) 279 ENTRY(_ex_replaceable)
280 nop; 280 nop;
281 281
282 ENTRY(_ex_trap_c) 282 ENTRY(_ex_trap_c)
283 /* The only thing that has been saved in this context is 283 /* The only thing that has been saved in this context is
284 * (R7:6,P5:4), ASTAT & SP - don't use anything else 284 * (R7:6,P5:4), ASTAT & SP - don't use anything else
285 */ 285 */
286 286
287 GET_PDA(p5, r6); 287 GET_PDA(p5, r6);
288 288
289 /* Make sure we are not in a double fault */ 289 /* Make sure we are not in a double fault */
290 p4.l = lo(IPEND); 290 p4.l = lo(IPEND);
291 p4.h = hi(IPEND); 291 p4.h = hi(IPEND);
292 r7 = [p4]; 292 r7 = [p4];
293 CC = BITTST (r7, 5); 293 CC = BITTST (r7, 5);
294 if CC jump _double_fault; 294 if CC jump _double_fault;
295 [p5 + PDA_EXIPEND] = r7; 295 [p5 + PDA_EXIPEND] = r7;
296 296
297 /* Call C code (trap_c) to handle the exception, which most 297 /* Call C code (trap_c) to handle the exception, which most
298 * likely involves sending a signal to the current process. 298 * likely involves sending a signal to the current process.
299 * To avoid double faults, lower our priority to IRQ5 first. 299 * To avoid double faults, lower our priority to IRQ5 first.
300 */ 300 */
301 r7.h = _exception_to_level5; 301 r7.h = _exception_to_level5;
302 r7.l = _exception_to_level5; 302 r7.l = _exception_to_level5;
303 p4.l = lo(EVT5); 303 p4.l = lo(EVT5);
304 p4.h = hi(EVT5); 304 p4.h = hi(EVT5);
305 [p4] = r7; 305 [p4] = r7;
306 csync; 306 csync;
307 307
308 /* 308 /*
309 * Save these registers, as they are only valid in exception context 309 * Save these registers, as they are only valid in exception context
310 * (where we are now - as soon as we defer to IRQ5, they can change) 310 * (where we are now - as soon as we defer to IRQ5, they can change)
311 * DCPLB_STATUS and ICPLB_STATUS are also only valid in EVT3, 311 * DCPLB_STATUS and ICPLB_STATUS are also only valid in EVT3,
312 * but they are not very interesting, so don't save them 312 * but they are not very interesting, so don't save them
313 */ 313 */
314 314
315 p4.l = lo(DCPLB_FAULT_ADDR); 315 p4.l = lo(DCPLB_FAULT_ADDR);
316 p4.h = hi(DCPLB_FAULT_ADDR); 316 p4.h = hi(DCPLB_FAULT_ADDR);
317 r7 = [p4]; 317 r7 = [p4];
318 [p5 + PDA_DCPLB] = r7; 318 [p5 + PDA_DCPLB] = r7;
319 319
320 p4.l = lo(ICPLB_FAULT_ADDR); 320 p4.l = lo(ICPLB_FAULT_ADDR);
321 p4.h = hi(ICPLB_FAULT_ADDR); 321 p4.h = hi(ICPLB_FAULT_ADDR);
322 r6 = [p4]; 322 r6 = [p4];
323 [p5 + PDA_ICPLB] = r6; 323 [p5 + PDA_ICPLB] = r6;
324 324
325 r6 = retx; 325 r6 = retx;
326 [p5 + PDA_RETX] = r6; 326 [p5 + PDA_RETX] = r6;
327 327
328 r6 = SEQSTAT; 328 r6 = SEQSTAT;
329 [p5 + PDA_SEQSTAT] = r6; 329 [p5 + PDA_SEQSTAT] = r6;
330 330
331 /* Save the state of single stepping */ 331 /* Save the state of single stepping */
332 r6 = SYSCFG; 332 r6 = SYSCFG;
333 [p5 + PDA_SYSCFG] = r6; 333 [p5 + PDA_SYSCFG] = r6;
334 /* Clear it while we handle the exception in IRQ5 mode */ 334 /* Clear it while we handle the exception in IRQ5 mode */
335 BITCLR(r6, SYSCFG_SSSTEP_P); 335 BITCLR(r6, SYSCFG_SSSTEP_P);
336 SYSCFG = r6; 336 SYSCFG = r6;
337 337
338 /* Save the current IMASK, since we change in order to jump to level 5 */ 338 /* Save the current IMASK, since we change in order to jump to level 5 */
339 cli r6; 339 cli r6;
340 [p5 + PDA_EXIMASK] = r6; 340 [p5 + PDA_EXIMASK] = r6;
341 341
342 p4.l = lo(SAFE_USER_INSTRUCTION); 342 p4.l = lo(SAFE_USER_INSTRUCTION);
343 p4.h = hi(SAFE_USER_INSTRUCTION); 343 p4.h = hi(SAFE_USER_INSTRUCTION);
344 retx = p4; 344 retx = p4;
345 345
346 /* Disable all interrupts, but make sure level 5 is enabled so 346 /* Disable all interrupts, but make sure level 5 is enabled so
347 * we can switch to that level. 347 * we can switch to that level.
348 */ 348 */
349 r6 = 0x3f; 349 r6 = 0x3f;
350 sti r6; 350 sti r6;
351 351
352 /* In case interrupts are disabled IPEND[4] (global interrupt disable bit) 352 /* In case interrupts are disabled IPEND[4] (global interrupt disable bit)
353 * clear it (re-enabling interrupts again) by the special sequence of pushing 353 * clear it (re-enabling interrupts again) by the special sequence of pushing
354 * RETI onto the stack. This way we can lower ourselves to IVG5 even if the 354 * RETI onto the stack. This way we can lower ourselves to IVG5 even if the
355 * exception was taken after the interrupt handler was called but before it 355 * exception was taken after the interrupt handler was called but before it
356 * got a chance to enable global interrupts itself. 356 * got a chance to enable global interrupts itself.
357 */ 357 */
358 [--sp] = reti; 358 [--sp] = reti;
359 sp += 4; 359 sp += 4;
360 360
361 raise 5; 361 raise 5;
362 jump.s _bfin_return_from_exception; 362 jump.s _bfin_return_from_exception;
363 ENDPROC(_ex_trap_c) 363 ENDPROC(_ex_trap_c)
364 364
365 /* We just realized we got an exception, while we were processing a different 365 /* We just realized we got an exception, while we were processing a different
366 * exception. This is a unrecoverable event, so crash. 366 * exception. This is a unrecoverable event, so crash.
367 * Note: this cannot be ENTRY() as we jump here with "if cc jump" ... 367 * Note: this cannot be ENTRY() as we jump here with "if cc jump" ...
368 */ 368 */
369 ENTRY(_double_fault) 369 ENTRY(_double_fault)
370 /* Turn caches & protection off, to ensure we don't get any more 370 /* Turn caches & protection off, to ensure we don't get any more
371 * double exceptions 371 * double exceptions
372 */ 372 */
373 373
374 P4.L = LO(IMEM_CONTROL); 374 P4.L = LO(IMEM_CONTROL);
375 P4.H = HI(IMEM_CONTROL); 375 P4.H = HI(IMEM_CONTROL);
376 376
377 R5 = [P4]; /* Control Register*/ 377 R5 = [P4]; /* Control Register*/
378 BITCLR(R5,ENICPLB_P); 378 BITCLR(R5,ENICPLB_P);
379 CSYNC; /* Disabling of CPLBs should be proceeded by a CSYNC */ 379 CSYNC; /* Disabling of CPLBs should be proceeded by a CSYNC */
380 [P4] = R5; 380 [P4] = R5;
381 SSYNC; 381 SSYNC;
382 382
383 P4.L = LO(DMEM_CONTROL); 383 P4.L = LO(DMEM_CONTROL);
384 P4.H = HI(DMEM_CONTROL); 384 P4.H = HI(DMEM_CONTROL);
385 R5 = [P4]; 385 R5 = [P4];
386 BITCLR(R5,ENDCPLB_P); 386 BITCLR(R5,ENDCPLB_P);
387 CSYNC; /* Disabling of CPLBs should be proceeded by a CSYNC */ 387 CSYNC; /* Disabling of CPLBs should be proceeded by a CSYNC */
388 [P4] = R5; 388 [P4] = R5;
389 SSYNC; 389 SSYNC;
390 390
391 /* Fix up the stack */ 391 /* Fix up the stack */
392 (R7:6,P5:4) = [sp++]; 392 (R7:6,P5:4) = [sp++];
393 ASTAT = [sp++]; 393 ASTAT = [sp++];
394 SP = EX_SCRATCH_REG; 394 SP = EX_SCRATCH_REG;
395 395
396 /* We should be out of the exception stack, and back down into 396 /* We should be out of the exception stack, and back down into
397 * kernel or user space stack 397 * kernel or user space stack
398 */ 398 */
399 SAVE_ALL_SYS 399 SAVE_ALL_SYS
400 400
401 /* The dumping functions expect the return address in the RETI 401 /* The dumping functions expect the return address in the RETI
402 * slot. */ 402 * slot. */
403 r6 = retx; 403 r6 = retx;
404 [sp + PT_PC] = r6; 404 [sp + PT_PC] = r6;
405 405
406 r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */ 406 r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */
407 SP += -12; 407 SP += -12;
408 pseudo_long_call _double_fault_c, p5; 408 pseudo_long_call _double_fault_c, p5;
409 SP += 12; 409 SP += 12;
410 .L_double_fault_panic: 410 .L_double_fault_panic:
411 JUMP .L_double_fault_panic 411 JUMP .L_double_fault_panic
412 412
413 ENDPROC(_double_fault) 413 ENDPROC(_double_fault)
414 414
415 ENTRY(_exception_to_level5) 415 ENTRY(_exception_to_level5)
416 SAVE_ALL_SYS 416 SAVE_ALL_SYS
417 417
418 GET_PDA(p5, r7); /* Fetch current PDA */ 418 GET_PDA(p5, r7); /* Fetch current PDA */
419 r6 = [p5 + PDA_RETX]; 419 r6 = [p5 + PDA_RETX];
420 [sp + PT_PC] = r6; 420 [sp + PT_PC] = r6;
421 421
422 r6 = [p5 + PDA_SYSCFG]; 422 r6 = [p5 + PDA_SYSCFG];
423 [sp + PT_SYSCFG] = r6; 423 [sp + PT_SYSCFG] = r6;
424 424
425 r6 = [p5 + PDA_SEQSTAT]; /* Read back seqstat */ 425 r6 = [p5 + PDA_SEQSTAT]; /* Read back seqstat */
426 [sp + PT_SEQSTAT] = r6; 426 [sp + PT_SEQSTAT] = r6;
427 427
428 /* Restore the hardware error vector. */ 428 /* Restore the hardware error vector. */
429 r7.h = _evt_ivhw; 429 r7.h = _evt_ivhw;
430 r7.l = _evt_ivhw; 430 r7.l = _evt_ivhw;
431 p4.l = lo(EVT5); 431 p4.l = lo(EVT5);
432 p4.h = hi(EVT5); 432 p4.h = hi(EVT5);
433 [p4] = r7; 433 [p4] = r7;
434 csync; 434 csync;
435 435
436 #ifdef CONFIG_DEBUG_DOUBLEFAULT 436 #ifdef CONFIG_DEBUG_DOUBLEFAULT
437 /* Now that we have the hardware error vector programmed properly 437 /* Now that we have the hardware error vector programmed properly
438 * we can re-enable interrupts (IPEND[4]), so if the _trap_c causes 438 * we can re-enable interrupts (IPEND[4]), so if the _trap_c causes
439 * another hardware error, we can catch it (self-nesting). 439 * another hardware error, we can catch it (self-nesting).
440 */ 440 */
441 [--sp] = reti; 441 [--sp] = reti;
442 sp += 4; 442 sp += 4;
443 #endif 443 #endif
444 444
445 r7 = [p5 + PDA_EXIPEND] /* Read the IPEND from the Exception state */ 445 r7 = [p5 + PDA_EXIPEND] /* Read the IPEND from the Exception state */
446 [sp + PT_IPEND] = r7; /* Store IPEND onto the stack */ 446 [sp + PT_IPEND] = r7; /* Store IPEND onto the stack */
447 447
448 r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */ 448 r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */
449 SP += -12; 449 SP += -12;
450 pseudo_long_call _trap_c, p4; 450 pseudo_long_call _trap_c, p4;
451 SP += 12; 451 SP += 12;
452 452
453 /* If interrupts were off during the exception (IPEND[4] = 1), turn them off 453 /* If interrupts were off during the exception (IPEND[4] = 1), turn them off
454 * before we return. 454 * before we return.
455 */ 455 */
456 CC = BITTST(r7, EVT_IRPTEN_P) 456 CC = BITTST(r7, EVT_IRPTEN_P)
457 if !CC jump 1f; 457 if !CC jump 1f;
458 /* this will load a random value into the reti register - but that is OK, 458 /* this will load a random value into the reti register - but that is OK,
459 * since we do restore it to the correct value in the 'RESTORE_ALL_SYS' macro 459 * since we do restore it to the correct value in the 'RESTORE_ALL_SYS' macro
460 */ 460 */
461 sp += -4; 461 sp += -4;
462 reti = [sp++]; 462 reti = [sp++];
463 1: 463 1:
464 /* restore the interrupt mask (IMASK) */ 464 /* restore the interrupt mask (IMASK) */
465 r6 = [p5 + PDA_EXIMASK]; 465 r6 = [p5 + PDA_EXIMASK];
466 sti r6; 466 sti r6;
467 467
468 call _ret_from_exception; 468 call _ret_from_exception;
469 RESTORE_ALL_SYS 469 RESTORE_ALL_SYS
470 rti; 470 rti;
471 ENDPROC(_exception_to_level5) 471 ENDPROC(_exception_to_level5)
472 472
473 ENTRY(_trap) /* Exception: 4th entry into system event table(supervisor mode)*/ 473 ENTRY(_trap) /* Exception: 4th entry into system event table(supervisor mode)*/
474 /* Since the kernel stack can be anywhere, it's not guaranteed to be 474 /* Since the kernel stack can be anywhere, it's not guaranteed to be
475 * covered by a CPLB. Switch to an exception stack; use RETN as a 475 * covered by a CPLB. Switch to an exception stack; use RETN as a
476 * scratch register (for want of a better option). 476 * scratch register (for want of a better option).
477 */ 477 */
478 EX_SCRATCH_REG = sp; 478 EX_SCRATCH_REG = sp;
479 GET_PDA_SAFE(sp); 479 GET_PDA_SAFE(sp);
480 sp = [sp + PDA_EXSTACK]; 480 sp = [sp + PDA_EXSTACK];
481 /* Try to deal with syscalls quickly. */ 481 /* Try to deal with syscalls quickly. */
482 [--sp] = ASTAT; 482 [--sp] = ASTAT;
483 [--sp] = (R7:6,P5:4); 483 [--sp] = (R7:6,P5:4);
484 484
485 ANOMALY_283_315_WORKAROUND(p5, r7) 485 ANOMALY_283_315_WORKAROUND(p5, r7)
486 486
487 #ifdef CONFIG_EXACT_HWERR 487 #ifdef CONFIG_EXACT_HWERR
488 /* Make sure all pending read/writes complete. This will ensure any 488 /* Make sure all pending read/writes complete. This will ensure any
489 * accesses which could cause hardware errors completes, and signal 489 * accesses which could cause hardware errors completes, and signal
490 * the the hardware before we do something silly, like crash the 490 * the the hardware before we do something silly, like crash the
491 * kernel. We don't need to work around anomaly 05000312, since 491 * kernel. We don't need to work around anomaly 05000312, since
492 * we are already atomic 492 * we are already atomic
493 */ 493 */
494 ssync; 494 ssync;
495 #endif 495 #endif
496 496
497 #ifdef CONFIG_DEBUG_DOUBLEFAULT 497 #ifdef CONFIG_DEBUG_DOUBLEFAULT
498 /* 498 /*
499 * Save these registers, as they are only valid in exception context 499 * Save these registers, as they are only valid in exception context
500 * (where we are now - as soon as we defer to IRQ5, they can change) 500 * (where we are now - as soon as we defer to IRQ5, they can change)
501 * DCPLB_STATUS and ICPLB_STATUS are also only valid in EVT3, 501 * DCPLB_STATUS and ICPLB_STATUS are also only valid in EVT3,
502 * but they are not very interesting, so don't save them 502 * but they are not very interesting, so don't save them
503 */ 503 */
504 504
505 GET_PDA(p5, r7); 505 GET_PDA(p5, r7);
506 p4.l = lo(DCPLB_FAULT_ADDR); 506 p4.l = lo(DCPLB_FAULT_ADDR);
507 p4.h = hi(DCPLB_FAULT_ADDR); 507 p4.h = hi(DCPLB_FAULT_ADDR);
508 r7 = [p4]; 508 r7 = [p4];
509 [p5 + PDA_DF_DCPLB] = r7; 509 [p5 + PDA_DF_DCPLB] = r7;
510 510
511 p4.l = lo(ICPLB_FAULT_ADDR); 511 p4.l = lo(ICPLB_FAULT_ADDR);
512 p4.h = hi(ICPLB_FAULT_ADDR); 512 p4.h = hi(ICPLB_FAULT_ADDR);
513 r7 = [p4]; 513 r7 = [p4];
514 [p5 + PDA_DF_ICPLB] = r7; 514 [p5 + PDA_DF_ICPLB] = r7;
515 515
516 r7 = retx; 516 r7 = retx;
517 [p5 + PDA_DF_RETX] = r7; 517 [p5 + PDA_DF_RETX] = r7;
518 518
519 r7 = SEQSTAT; /* reason code is in bit 5:0 */ 519 r7 = SEQSTAT; /* reason code is in bit 5:0 */
520 [p5 + PDA_DF_SEQSTAT] = r7; 520 [p5 + PDA_DF_SEQSTAT] = r7;
521 #else 521 #else
522 r7 = SEQSTAT; /* reason code is in bit 5:0 */ 522 r7 = SEQSTAT; /* reason code is in bit 5:0 */
523 #endif 523 #endif
524 r6.l = lo(SEQSTAT_EXCAUSE); 524 r6.l = lo(SEQSTAT_EXCAUSE);
525 r6.h = hi(SEQSTAT_EXCAUSE); 525 r6.h = hi(SEQSTAT_EXCAUSE);
526 r7 = r7 & r6; 526 r7 = r7 & r6;
527 p5.h = _ex_table; 527 p5.h = _ex_table;
528 p5.l = _ex_table; 528 p5.l = _ex_table;
529 p4 = r7; 529 p4 = r7;
530 p5 = p5 + (p4 << 2); 530 p5 = p5 + (p4 << 2);
531 p4 = [p5]; 531 p4 = [p5];
532 jump (p4); 532 jump (p4);
533 533
534 .Lbadsys: 534 .Lbadsys:
535 r7 = -ENOSYS; /* signextending enough */ 535 r7 = -ENOSYS; /* signextending enough */
536 [sp + PT_R0] = r7; /* return value from system call */ 536 [sp + PT_R0] = r7; /* return value from system call */
537 jump .Lsyscall_really_exit; 537 jump .Lsyscall_really_exit;
538 ENDPROC(_trap) 538 ENDPROC(_trap)
539 539
540 ENTRY(_kernel_execve) 540 ENTRY(_kernel_execve)
541 link SIZEOF_PTREGS; 541 link SIZEOF_PTREGS;
542 p0 = sp; 542 p0 = sp;
543 r3 = SIZEOF_PTREGS / 4; 543 r3 = SIZEOF_PTREGS / 4;
544 r4 = 0(x); 544 r4 = 0(x);
545 .Lclear_regs: 545 .Lclear_regs:
546 [p0++] = r4; 546 [p0++] = r4;
547 r3 += -1; 547 r3 += -1;
548 cc = r3 == 0; 548 cc = r3 == 0;
549 if !cc jump .Lclear_regs (bp); 549 if !cc jump .Lclear_regs (bp);
550 550
551 p0 = sp; 551 p0 = sp;
552 sp += -16; 552 sp += -16;
553 [sp + 12] = p0; 553 [sp + 12] = p0;
554 pseudo_long_call _do_execve, p5; 554 pseudo_long_call _do_execve, p5;
555 SP += 16; 555 SP += 16;
556 cc = r0 == 0; 556 cc = r0 == 0;
557 if ! cc jump .Lexecve_failed; 557 if ! cc jump .Lexecve_failed;
558 /* Success. Copy our temporary pt_regs to the top of the kernel 558 /* Success. Copy our temporary pt_regs to the top of the kernel
559 * stack and do a normal exception return. 559 * stack and do a normal exception return.
560 */ 560 */
561 r1 = sp; 561 r1 = sp;
562 r0 = (-KERNEL_STACK_SIZE) (x); 562 r0 = (-KERNEL_STACK_SIZE) (x);
563 r1 = r1 & r0; 563 r1 = r1 & r0;
564 p2 = r1; 564 p2 = r1;
565 p3 = [p2]; 565 p3 = [p2];
566 r0 = KERNEL_STACK_SIZE - 4 (z); 566 r0 = KERNEL_STACK_SIZE - 4 (z);
567 p1 = r0; 567 p1 = r0;
568 p1 = p1 + p2; 568 p1 = p1 + p2;
569 569
570 p0 = fp; 570 p0 = fp;
571 r4 = [p0--]; 571 r4 = [p0--];
572 r3 = SIZEOF_PTREGS / 4; 572 r3 = SIZEOF_PTREGS / 4;
573 .Lcopy_regs: 573 .Lcopy_regs:
574 r4 = [p0--]; 574 r4 = [p0--];
575 [p1--] = r4; 575 [p1--] = r4;
576 r3 += -1; 576 r3 += -1;
577 cc = r3 == 0; 577 cc = r3 == 0;
578 if ! cc jump .Lcopy_regs (bp); 578 if ! cc jump .Lcopy_regs (bp);
579 579
580 r0 = (KERNEL_STACK_SIZE - SIZEOF_PTREGS) (z); 580 r0 = (KERNEL_STACK_SIZE - SIZEOF_PTREGS) (z);
581 p1 = r0; 581 p1 = r0;
582 p1 = p1 + p2; 582 p1 = p1 + p2;
583 sp = p1; 583 sp = p1;
584 r0 = syscfg; 584 r0 = syscfg;
585 [SP + PT_SYSCFG] = r0; 585 [SP + PT_SYSCFG] = r0;
586 [p3 + (TASK_THREAD + THREAD_KSP)] = sp; 586 [p3 + (TASK_THREAD + THREAD_KSP)] = sp;
587 587
588 RESTORE_CONTEXT; 588 RESTORE_CONTEXT;
589 rti; 589 rti;
590 .Lexecve_failed: 590 .Lexecve_failed:
591 unlink; 591 unlink;
592 rts; 592 rts;
593 ENDPROC(_kernel_execve) 593 ENDPROC(_kernel_execve)
594 594
595 ENTRY(_system_call) 595 ENTRY(_system_call)
596 /* Store IPEND */ 596 /* Store IPEND */
597 p2.l = lo(IPEND); 597 p2.l = lo(IPEND);
598 p2.h = hi(IPEND); 598 p2.h = hi(IPEND);
599 csync; 599 csync;
600 r0 = [p2]; 600 r0 = [p2];
601 [sp + PT_IPEND] = r0; 601 [sp + PT_IPEND] = r0;
602 602
603 /* Store RETS for now */ 603 /* Store RETS for now */
604 r0 = rets; 604 r0 = rets;
605 [sp + PT_RESERVED] = r0; 605 [sp + PT_RESERVED] = r0;
606 /* Set the stack for the current process */ 606 /* Set the stack for the current process */
607 r7 = sp; 607 r7 = sp;
608 r6.l = lo(ALIGN_PAGE_MASK); 608 r6.l = lo(ALIGN_PAGE_MASK);
609 r6.h = hi(ALIGN_PAGE_MASK); 609 r6.h = hi(ALIGN_PAGE_MASK);
610 r7 = r7 & r6; /* thread_info */ 610 r7 = r7 & r6; /* thread_info */
611 p2 = r7; 611 p2 = r7;
612 p2 = [p2]; 612 p2 = [p2];
613 613
614 [p2+(TASK_THREAD+THREAD_KSP)] = sp; 614 [p2+(TASK_THREAD+THREAD_KSP)] = sp;
615 #ifdef CONFIG_IPIPE 615 #ifdef CONFIG_IPIPE
616 r0 = sp; 616 r0 = sp;
617 SP += -12; 617 SP += -12;
618 call ___ipipe_syscall_root; 618 call ___ipipe_syscall_root;
619 SP += 12; 619 SP += 12;
620 cc = r0 == 1; 620 cc = r0 == 1;
621 if cc jump .Lsyscall_really_exit; 621 if cc jump .Lsyscall_really_exit;
622 cc = r0 == -1; 622 cc = r0 == -1;
623 if cc jump .Lresume_userspace; 623 if cc jump .Lresume_userspace;
624 r3 = [sp + PT_R3]; 624 r3 = [sp + PT_R3];
625 r4 = [sp + PT_R4]; 625 r4 = [sp + PT_R4];
626 p0 = [sp + PT_ORIG_P0]; 626 p0 = [sp + PT_ORIG_P0];
627 #endif /* CONFIG_IPIPE */ 627 #endif /* CONFIG_IPIPE */
628 628
629 /* are we tracing syscalls?*/ 629 /* are we tracing syscalls?*/
630 r7 = sp; 630 r7 = sp;
631 r6.l = lo(ALIGN_PAGE_MASK); 631 r6.l = lo(ALIGN_PAGE_MASK);
632 r6.h = hi(ALIGN_PAGE_MASK); 632 r6.h = hi(ALIGN_PAGE_MASK);
633 r7 = r7 & r6; 633 r7 = r7 & r6;
634 p2 = r7; 634 p2 = r7;
635 r7 = [p2+TI_FLAGS]; 635 r7 = [p2+TI_FLAGS];
636 CC = BITTST(r7,TIF_SYSCALL_TRACE); 636 CC = BITTST(r7,TIF_SYSCALL_TRACE);
637 if CC JUMP _sys_trace; 637 if CC JUMP _sys_trace;
638 CC = BITTST(r7,TIF_SINGLESTEP); 638 CC = BITTST(r7,TIF_SINGLESTEP);
639 if CC JUMP _sys_trace; 639 if CC JUMP _sys_trace;
640 640
641 /* Make sure the system call # is valid */ 641 /* Make sure the system call # is valid */
642 p4 = __NR_syscall; 642 p4 = __NR_syscall;
643 /* System call number is passed in P0 */ 643 /* System call number is passed in P0 */
644 cc = p4 <= p0; 644 cc = p4 <= p0;
645 if cc jump .Lbadsys; 645 if cc jump .Lbadsys;
646 646
647 /* Execute the appropriate system call */ 647 /* Execute the appropriate system call */
648 648
649 p4 = p0; 649 p4 = p0;
650 p5.l = _sys_call_table; 650 p5.l = _sys_call_table;
651 p5.h = _sys_call_table; 651 p5.h = _sys_call_table;
652 p5 = p5 + (p4 << 2); 652 p5 = p5 + (p4 << 2);
653 r0 = [sp + PT_R0]; 653 r0 = [sp + PT_R0];
654 r1 = [sp + PT_R1]; 654 r1 = [sp + PT_R1];
655 r2 = [sp + PT_R2]; 655 r2 = [sp + PT_R2];
656 p5 = [p5]; 656 p5 = [p5];
657 657
658 [--sp] = r5; 658 [--sp] = r5;
659 [--sp] = r4; 659 [--sp] = r4;
660 [--sp] = r3; 660 [--sp] = r3;
661 SP += -12; 661 SP += -12;
662 call (p5); 662 call (p5);
663 SP += 24; 663 SP += 24;
664 [sp + PT_R0] = r0; 664 [sp + PT_R0] = r0;
665 665
666 .Lresume_userspace: 666 .Lresume_userspace:
667 r7 = sp; 667 r7 = sp;
668 r4.l = lo(ALIGN_PAGE_MASK); 668 r4.l = lo(ALIGN_PAGE_MASK);
669 r4.h = hi(ALIGN_PAGE_MASK); 669 r4.h = hi(ALIGN_PAGE_MASK);
670 r7 = r7 & r4; /* thread_info->flags */ 670 r7 = r7 & r4; /* thread_info->flags */
671 p5 = r7; 671 p5 = r7;
672 .Lresume_userspace_1: 672 .Lresume_userspace_1:
673 /* Disable interrupts. */ 673 /* Disable interrupts. */
674 [--sp] = reti; 674 [--sp] = reti;
675 reti = [sp++]; 675 reti = [sp++];
676 676
677 r7 = [p5 + TI_FLAGS]; 677 r7 = [p5 + TI_FLAGS];
678 r4.l = lo(_TIF_WORK_MASK); 678 r4.l = lo(_TIF_WORK_MASK);
679 r4.h = hi(_TIF_WORK_MASK); 679 r4.h = hi(_TIF_WORK_MASK);
680 r7 = r7 & r4; 680 r7 = r7 & r4;
681 681
682 .Lsyscall_resched: 682 .Lsyscall_resched:
683 #ifdef CONFIG_IPIPE 683 #ifdef CONFIG_IPIPE
684 cc = BITTST(r7, TIF_IRQ_SYNC); 684 cc = BITTST(r7, TIF_IRQ_SYNC);
685 if !cc jump .Lsyscall_no_irqsync; 685 if !cc jump .Lsyscall_no_irqsync;
686 /* 686 /*
687 * Clear IPEND[4] manually to undo what resume_userspace_1 just did; 687 * Clear IPEND[4] manually to undo what resume_userspace_1 just did;
688 * we need this so that high priority domain interrupts may still 688 * we need this so that high priority domain interrupts may still
689 * preempt the current domain while the pipeline log is being played 689 * preempt the current domain while the pipeline log is being played
690 * back. 690 * back.
691 */ 691 */
692 [--sp] = reti; 692 [--sp] = reti;
693 SP += 4; /* don't merge with next insn to keep the pattern obvious */ 693 SP += 4; /* don't merge with next insn to keep the pattern obvious */
694 SP += -12; 694 SP += -12;
695 call ___ipipe_sync_root; 695 call ___ipipe_sync_root;
696 SP += 12; 696 SP += 12;
697 jump .Lresume_userspace_1; 697 jump .Lresume_userspace_1;
698 .Lsyscall_no_irqsync: 698 .Lsyscall_no_irqsync:
699 #endif 699 #endif
700 cc = BITTST(r7, TIF_NEED_RESCHED); 700 cc = BITTST(r7, TIF_NEED_RESCHED);
701 if !cc jump .Lsyscall_sigpending; 701 if !cc jump .Lsyscall_sigpending;
702 702
703 /* Reenable interrupts. */ 703 /* Reenable interrupts. */
704 [--sp] = reti; 704 [--sp] = reti;
705 sp += 4; 705 sp += 4;
706 706
707 SP += -12; 707 SP += -12;
708 pseudo_long_call _schedule, p4; 708 pseudo_long_call _schedule, p4;
709 SP += 12; 709 SP += 12;
710 710
711 jump .Lresume_userspace_1; 711 jump .Lresume_userspace_1;
712 712
713 .Lsyscall_sigpending: 713 .Lsyscall_sigpending:
714 cc = BITTST(r7, TIF_RESTORE_SIGMASK); 714 cc = BITTST(r7, TIF_RESTORE_SIGMASK);
715 if cc jump .Lsyscall_do_signals; 715 if cc jump .Lsyscall_do_signals;
716 cc = BITTST(r7, TIF_SIGPENDING); 716 cc = BITTST(r7, TIF_SIGPENDING);
717 if cc jump .Lsyscall_do_signals; 717 if cc jump .Lsyscall_do_signals;
718 cc = BITTST(r7, TIF_NOTIFY_RESUME); 718 cc = BITTST(r7, TIF_NOTIFY_RESUME);
719 if !cc jump .Lsyscall_really_exit; 719 if !cc jump .Lsyscall_really_exit;
720 .Lsyscall_do_signals: 720 .Lsyscall_do_signals:
721 /* Reenable interrupts. */ 721 /* Reenable interrupts. */
722 [--sp] = reti; 722 [--sp] = reti;
723 sp += 4; 723 sp += 4;
724 724
725 r0 = sp; 725 r0 = sp;
726 SP += -12; 726 SP += -12;
727 pseudo_long_call _do_notify_resume, p5; 727 pseudo_long_call _do_notify_resume, p5;
728 SP += 12; 728 SP += 12;
729 729
730 .Lsyscall_really_exit: 730 .Lsyscall_really_exit:
731 r5 = [sp + PT_RESERVED]; 731 r5 = [sp + PT_RESERVED];
732 rets = r5; 732 rets = r5;
733 rts; 733 rts;
734 ENDPROC(_system_call) 734 ENDPROC(_system_call)
735 735
736 /* Do not mark as ENTRY() to avoid error in assembler ... 736 /* Do not mark as ENTRY() to avoid error in assembler ...
737 * this symbol need not be global anyways, so ... 737 * this symbol need not be global anyways, so ...
738 */ 738 */
739 _sys_trace: 739 _sys_trace:
740 r0 = sp; 740 r0 = sp;
741 pseudo_long_call _syscall_trace_enter, p5; 741 pseudo_long_call _syscall_trace_enter, p5;
742 742
743 /* Make sure the system call # is valid */ 743 /* Make sure the system call # is valid */
744 p4 = [SP + PT_P0]; 744 p4 = [SP + PT_P0];
745 p3 = __NR_syscall; 745 p3 = __NR_syscall;
746 cc = p3 <= p4; 746 cc = p3 <= p4;
747 r0 = -ENOSYS; 747 r0 = -ENOSYS;
748 if cc jump .Lsys_trace_badsys; 748 if cc jump .Lsys_trace_badsys;
749 749
750 /* Execute the appropriate system call */ 750 /* Execute the appropriate system call */
751 p5.l = _sys_call_table; 751 p5.l = _sys_call_table;
752 p5.h = _sys_call_table; 752 p5.h = _sys_call_table;
753 p5 = p5 + (p4 << 2); 753 p5 = p5 + (p4 << 2);
754 r0 = [sp + PT_R0]; 754 r0 = [sp + PT_R0];
755 r1 = [sp + PT_R1]; 755 r1 = [sp + PT_R1];
756 r2 = [sp + PT_R2]; 756 r2 = [sp + PT_R2];
757 r3 = [sp + PT_R3]; 757 r3 = [sp + PT_R3];
758 r4 = [sp + PT_R4]; 758 r4 = [sp + PT_R4];
759 r5 = [sp + PT_R5]; 759 r5 = [sp + PT_R5];
760 p5 = [p5]; 760 p5 = [p5];
761 761
762 [--sp] = r5; 762 [--sp] = r5;
763 [--sp] = r4; 763 [--sp] = r4;
764 [--sp] = r3; 764 [--sp] = r3;
765 SP += -12; 765 SP += -12;
766 call (p5); 766 call (p5);
767 SP += 24; 767 SP += 24;
768 .Lsys_trace_badsys: 768 .Lsys_trace_badsys:
769 [sp + PT_R0] = r0; 769 [sp + PT_R0] = r0;
770 770
771 r0 = sp; 771 r0 = sp;
772 pseudo_long_call _syscall_trace_leave, p5; 772 pseudo_long_call _syscall_trace_leave, p5;
773 jump .Lresume_userspace; 773 jump .Lresume_userspace;
774 ENDPROC(_sys_trace) 774 ENDPROC(_sys_trace)
775 775
776 ENTRY(_resume) 776 ENTRY(_resume)
777 /* 777 /*
778 * Beware - when entering resume, prev (the current task) is 778 * Beware - when entering resume, prev (the current task) is
779 * in r0, next (the new task) is in r1. 779 * in r0, next (the new task) is in r1.
780 */ 780 */
781 p0 = r0; 781 p0 = r0;
782 p1 = r1; 782 p1 = r1;
783 [--sp] = rets; 783 [--sp] = rets;
784 [--sp] = fp; 784 [--sp] = fp;
785 [--sp] = (r7:4, p5:3); 785 [--sp] = (r7:4, p5:3);
786 786
787 /* save usp */ 787 /* save usp */
788 p2 = usp; 788 p2 = usp;
789 [p0+(TASK_THREAD+THREAD_USP)] = p2; 789 [p0+(TASK_THREAD+THREAD_USP)] = p2;
790 790
791 /* save current kernel stack pointer */ 791 /* save current kernel stack pointer */
792 [p0+(TASK_THREAD+THREAD_KSP)] = sp; 792 [p0+(TASK_THREAD+THREAD_KSP)] = sp;
793 793
794 /* save program counter */ 794 /* save program counter */
795 r1.l = _new_old_task; 795 r1.l = _new_old_task;
796 r1.h = _new_old_task; 796 r1.h = _new_old_task;
797 [p0+(TASK_THREAD+THREAD_PC)] = r1; 797 [p0+(TASK_THREAD+THREAD_PC)] = r1;
798 798
799 /* restore the kernel stack pointer */ 799 /* restore the kernel stack pointer */
800 sp = [p1+(TASK_THREAD+THREAD_KSP)]; 800 sp = [p1+(TASK_THREAD+THREAD_KSP)];
801 801
802 /* restore user stack pointer */ 802 /* restore user stack pointer */
803 p0 = [p1+(TASK_THREAD+THREAD_USP)]; 803 p0 = [p1+(TASK_THREAD+THREAD_USP)];
804 usp = p0; 804 usp = p0;
805 805
806 /* restore pc */ 806 /* restore pc */
807 p0 = [p1+(TASK_THREAD+THREAD_PC)]; 807 p0 = [p1+(TASK_THREAD+THREAD_PC)];
808 jump (p0); 808 jump (p0);
809 809
810 /* 810 /*
811 * Following code actually lands up in a new (old) task. 811 * Following code actually lands up in a new (old) task.
812 */ 812 */
813 813
814 _new_old_task: 814 _new_old_task:
815 (r7:4, p5:3) = [sp++]; 815 (r7:4, p5:3) = [sp++];
816 fp = [sp++]; 816 fp = [sp++];
817 rets = [sp++]; 817 rets = [sp++];
818 818
819 /* 819 /*
820 * When we come out of resume, r0 carries "old" task, becuase we are 820 * When we come out of resume, r0 carries "old" task, becuase we are
821 * in "new" task. 821 * in "new" task.
822 */ 822 */
823 rts; 823 rts;
824 ENDPROC(_resume) 824 ENDPROC(_resume)
825 825
826 ENTRY(_ret_from_exception) 826 ENTRY(_ret_from_exception)
827 #ifdef CONFIG_IPIPE 827 #ifdef CONFIG_IPIPE
828 p2.l = _ipipe_percpu_domain; 828 p2.l = _ipipe_percpu_domain;
829 p2.h = _ipipe_percpu_domain; 829 p2.h = _ipipe_percpu_domain;
830 r0.l = _ipipe_root; 830 r0.l = _ipipe_root;
831 r0.h = _ipipe_root; 831 r0.h = _ipipe_root;
832 r2 = [p2]; 832 r2 = [p2];
833 cc = r0 == r2; 833 cc = r0 == r2;
834 if !cc jump 4f; /* not on behalf of the root domain, get out */ 834 if !cc jump 4f; /* not on behalf of the root domain, get out */
835 #endif /* CONFIG_IPIPE */ 835 #endif /* CONFIG_IPIPE */
836 p2.l = lo(IPEND); 836 p2.l = lo(IPEND);
837 p2.h = hi(IPEND); 837 p2.h = hi(IPEND);
838 838
839 csync; 839 csync;
840 r0 = [p2]; 840 r0 = [p2];
841 [sp + PT_IPEND] = r0; 841 [sp + PT_IPEND] = r0;
842 842
843 1: 843 1:
844 r2 = LO(~0x37) (Z); 844 r2 = LO(~0x37) (Z);
845 r0 = r2 & r0; 845 r0 = r2 & r0;
846 cc = r0 == 0; 846 cc = r0 == 0;
847 if !cc jump 4f; /* if not return to user mode, get out */ 847 if !cc jump 4f; /* if not return to user mode, get out */
848 848
849 /* Make sure any pending system call or deferred exception 849 /* Make sure any pending system call or deferred exception
850 * return in ILAT for this process to get executed, otherwise 850 * return in ILAT for this process to get executed, otherwise
851 * in case context switch happens, system call of 851 * in case context switch happens, system call of
852 * first process (i.e in ILAT) will be carried 852 * first process (i.e in ILAT) will be carried
853 * forward to the switched process 853 * forward to the switched process
854 */ 854 */
855 855
856 p2.l = lo(ILAT); 856 p2.l = lo(ILAT);
857 p2.h = hi(ILAT); 857 p2.h = hi(ILAT);
858 r0 = [p2]; 858 r0 = [p2];
859 r1 = (EVT_IVG14 | EVT_IVG15) (z); 859 r1 = (EVT_IVG14 | EVT_IVG15) (z);
860 r0 = r0 & r1; 860 r0 = r0 & r1;
861 cc = r0 == 0; 861 cc = r0 == 0;
862 if !cc jump 5f; 862 if !cc jump 5f;
863 863
864 /* Set the stack for the current process */ 864 /* Set the stack for the current process */
865 r7 = sp; 865 r7 = sp;
866 r4.l = lo(ALIGN_PAGE_MASK); 866 r4.l = lo(ALIGN_PAGE_MASK);
867 r4.h = hi(ALIGN_PAGE_MASK); 867 r4.h = hi(ALIGN_PAGE_MASK);
868 r7 = r7 & r4; /* thread_info->flags */ 868 r7 = r7 & r4; /* thread_info->flags */
869 p5 = r7; 869 p5 = r7;
870 r7 = [p5 + TI_FLAGS]; 870 r7 = [p5 + TI_FLAGS];
871 r4.l = lo(_TIF_WORK_MASK); 871 r4.l = lo(_TIF_WORK_MASK);
872 r4.h = hi(_TIF_WORK_MASK); 872 r4.h = hi(_TIF_WORK_MASK);
873 r7 = r7 & r4; 873 r7 = r7 & r4;
874 cc = r7 == 0; 874 cc = r7 == 0;
875 if cc jump 4f; 875 if cc jump 4f;
876 876
877 p0.l = lo(EVT15); 877 p0.l = lo(EVT15);
878 p0.h = hi(EVT15); 878 p0.h = hi(EVT15);
879 p1.l = _schedule_and_signal; 879 p1.l = _schedule_and_signal;
880 p1.h = _schedule_and_signal; 880 p1.h = _schedule_and_signal;
881 [p0] = p1; 881 [p0] = p1;
882 csync; 882 csync;
883 raise 15; /* raise evt15 to do signal or reschedule */ 883 raise 15; /* raise evt15 to do signal or reschedule */
884 4: 884 4:
885 r0 = syscfg; 885 r0 = syscfg;
886 bitclr(r0, SYSCFG_SSSTEP_P); /* Turn off single step */ 886 bitclr(r0, SYSCFG_SSSTEP_P); /* Turn off single step */
887 syscfg = r0; 887 syscfg = r0;
888 5: 888 5:
889 rts; 889 rts;
890 ENDPROC(_ret_from_exception) 890 ENDPROC(_ret_from_exception)
891 891
892 #ifdef CONFIG_IPIPE 892 #ifdef CONFIG_IPIPE
893 893
894 _resume_kernel_from_int: 894 _resume_kernel_from_int:
895 r0.l = ___ipipe_sync_root; 895 r0.l = ___ipipe_sync_root;
896 r0.h = ___ipipe_sync_root; 896 r0.h = ___ipipe_sync_root;
897 [--sp] = rets; 897 [--sp] = rets;
898 [--sp] = ( r7:4, p5:3 ); 898 [--sp] = ( r7:4, p5:3 );
899 SP += -12; 899 SP += -12;
900 call ___ipipe_call_irqtail 900 call ___ipipe_call_irqtail
901 SP += 12; 901 SP += 12;
902 ( r7:4, p5:3 ) = [sp++]; 902 ( r7:4, p5:3 ) = [sp++];
903 rets = [sp++]; 903 rets = [sp++];
904 rts 904 rts
905 #else 905 #else
906 #define _resume_kernel_from_int 2f 906 #define _resume_kernel_from_int 2f
907 #endif 907 #endif
908 908
909 ENTRY(_return_from_int) 909 ENTRY(_return_from_int)
910 /* If someone else already raised IRQ 15, do nothing. */ 910 /* If someone else already raised IRQ 15, do nothing. */
911 csync; 911 csync;
912 p2.l = lo(ILAT); 912 p2.l = lo(ILAT);
913 p2.h = hi(ILAT); 913 p2.h = hi(ILAT);
914 r0 = [p2]; 914 r0 = [p2];
915 cc = bittst (r0, EVT_IVG15_P); 915 cc = bittst (r0, EVT_IVG15_P);
916 if cc jump 2f; 916 if cc jump 2f;
917 917
918 /* if not return to user mode, get out */ 918 /* if not return to user mode, get out */
919 p2.l = lo(IPEND); 919 p2.l = lo(IPEND);
920 p2.h = hi(IPEND); 920 p2.h = hi(IPEND);
921 r0 = [p2]; 921 r0 = [p2];
922 r1 = 0x17(Z); 922 r1 = 0x17(Z);
923 r2 = ~r1; 923 r2 = ~r1;
924 r2.h = 0; 924 r2.h = 0;
925 r0 = r2 & r0; 925 r0 = r2 & r0;
926 r1 = 1; 926 r1 = 1;
927 r1 = r0 - r1; 927 r1 = r0 - r1;
928 r2 = r0 & r1; 928 r2 = r0 & r1;
929 cc = r2 == 0; 929 cc = r2 == 0;
930 if !cc jump _resume_kernel_from_int; 930 if !cc jump _resume_kernel_from_int;
931 931
932 /* Lower the interrupt level to 15. */ 932 /* Lower the interrupt level to 15. */
933 p0.l = lo(EVT15); 933 p0.l = lo(EVT15);
934 p0.h = hi(EVT15); 934 p0.h = hi(EVT15);
935 p1.l = _schedule_and_signal_from_int; 935 p1.l = _schedule_and_signal_from_int;
936 p1.h = _schedule_and_signal_from_int; 936 p1.h = _schedule_and_signal_from_int;
937 [p0] = p1; 937 [p0] = p1;
938 csync; 938 csync;
939 #if ANOMALY_05000281 || ANOMALY_05000461 939 #if ANOMALY_05000281 || ANOMALY_05000461
940 r0.l = lo(SAFE_USER_INSTRUCTION); 940 r0.l = lo(SAFE_USER_INSTRUCTION);
941 r0.h = hi(SAFE_USER_INSTRUCTION); 941 r0.h = hi(SAFE_USER_INSTRUCTION);
942 reti = r0; 942 reti = r0;
943 #endif 943 #endif
944 r0 = 0x801f (z); 944 r0 = 0x801f (z);
945 STI r0; 945 STI r0;
946 raise 15; /* raise evt15 to do signal or reschedule */ 946 raise 15; /* raise evt15 to do signal or reschedule */
947 rti; 947 rti;
948 2: 948 2:
949 rts; 949 rts;
950 ENDPROC(_return_from_int) 950 ENDPROC(_return_from_int)
951 951
952 ENTRY(_lower_to_irq14) 952 ENTRY(_lower_to_irq14)
953 #if ANOMALY_05000281 || ANOMALY_05000461 953 #if ANOMALY_05000281 || ANOMALY_05000461
954 r0.l = lo(SAFE_USER_INSTRUCTION); 954 r0.l = lo(SAFE_USER_INSTRUCTION);
955 r0.h = hi(SAFE_USER_INSTRUCTION); 955 r0.h = hi(SAFE_USER_INSTRUCTION);
956 reti = r0; 956 reti = r0;
957 #endif 957 #endif
958 958
959 #ifdef CONFIG_DEBUG_HWERR 959 #ifdef CONFIG_DEBUG_HWERR
960 /* enable irq14 & hwerr interrupt, until we transition to _evt_evt14 */ 960 /* enable irq14 & hwerr interrupt, until we transition to _evt_evt14 */
961 r0 = (EVT_IVG14 | EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU); 961 r0 = (EVT_IVG14 | EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
962 #else 962 #else
963 /* Only enable irq14 interrupt, until we transition to _evt_evt14 */ 963 /* Only enable irq14 interrupt, until we transition to _evt_evt14 */
964 r0 = (EVT_IVG14 | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU); 964 r0 = (EVT_IVG14 | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
965 #endif 965 #endif
966 sti r0; 966 sti r0;
967 raise 14; 967 raise 14;
968 rti; 968 rti;
969 ENDPROC(_lower_to_irq14) 969 ENDPROC(_lower_to_irq14)
970 970
971 ENTRY(_evt_evt14) 971 ENTRY(_evt_evt14)
972 #ifdef CONFIG_DEBUG_HWERR 972 #ifdef CONFIG_DEBUG_HWERR
973 r0 = (EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU); 973 r0 = (EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
974 sti r0; 974 sti r0;
975 #else 975 #else
976 cli r0; 976 cli r0;
977 #endif 977 #endif
978 #ifdef CONFIG_TRACE_IRQFLAGS 978 #ifdef CONFIG_TRACE_IRQFLAGS
979 [--sp] = rets; 979 [--sp] = rets;
980 sp += -12; 980 sp += -12;
981 call _trace_hardirqs_off; 981 call _trace_hardirqs_off;
982 sp += 12; 982 sp += 12;
983 rets = [sp++]; 983 rets = [sp++];
984 #endif 984 #endif
985 [--sp] = RETI; 985 [--sp] = RETI;
986 SP += 4; 986 SP += 4;
987 rts; 987 rts;
988 ENDPROC(_evt_evt14) 988 ENDPROC(_evt_evt14)
989 989
990 ENTRY(_schedule_and_signal_from_int) 990 ENTRY(_schedule_and_signal_from_int)
991 /* To end up here, vector 15 was changed - so we have to change it 991 /* To end up here, vector 15 was changed - so we have to change it
992 * back. 992 * back.
993 */ 993 */
994 p0.l = lo(EVT15); 994 p0.l = lo(EVT15);
995 p0.h = hi(EVT15); 995 p0.h = hi(EVT15);
996 p1.l = _evt_system_call; 996 p1.l = _evt_system_call;
997 p1.h = _evt_system_call; 997 p1.h = _evt_system_call;
998 [p0] = p1; 998 [p0] = p1;
999 csync; 999 csync;
1000 1000
1001 /* Set orig_p0 to -1 to indicate this isn't the end of a syscall. */ 1001 /* Set orig_p0 to -1 to indicate this isn't the end of a syscall. */
1002 r0 = -1 (x); 1002 r0 = -1 (x);
1003 [sp + PT_ORIG_P0] = r0; 1003 [sp + PT_ORIG_P0] = r0;
1004 1004
1005 p1 = rets; 1005 p1 = rets;
1006 [sp + PT_RESERVED] = p1; 1006 [sp + PT_RESERVED] = p1;
1007 1007
1008 #ifdef CONFIG_TRACE_IRQFLAGS 1008 #ifdef CONFIG_TRACE_IRQFLAGS
1009 /* trace_hardirqs_on() checks if all irqs are disabled. But here IRQ 15 1009 /* trace_hardirqs_on() checks if all irqs are disabled. But here IRQ 15
1010 * is turned on, so disable all irqs. */ 1010 * is turned on, so disable all irqs. */
1011 cli r0; 1011 cli r0;
1012 sp += -12; 1012 sp += -12;
1013 call _trace_hardirqs_on; 1013 call _trace_hardirqs_on;
1014 sp += 12; 1014 sp += 12;
1015 #endif 1015 #endif
1016 #ifdef CONFIG_SMP 1016 #ifdef CONFIG_SMP
1017 GET_PDA(p0, r0); /* Fetch current PDA (can't migrate to other CPU here) */ 1017 GET_PDA(p0, r0); /* Fetch current PDA (can't migrate to other CPU here) */
1018 r0 = [p0 + PDA_IRQFLAGS]; 1018 r0 = [p0 + PDA_IRQFLAGS];
1019 #else 1019 #else
1020 p0.l = _bfin_irq_flags; 1020 p0.l = _bfin_irq_flags;
1021 p0.h = _bfin_irq_flags; 1021 p0.h = _bfin_irq_flags;
1022 r0 = [p0]; 1022 r0 = [p0];
1023 #endif 1023 #endif
1024 sti r0; 1024 sti r0;
1025 1025
1026 /* finish the userspace "atomic" functions for it */ 1026 /* finish the userspace "atomic" functions for it */
1027 r1 = FIXED_CODE_END; 1027 r1 = FIXED_CODE_END;
1028 r2 = [sp + PT_PC]; 1028 r2 = [sp + PT_PC];
1029 cc = r1 <= r2; 1029 cc = r1 <= r2;
1030 if cc jump .Lresume_userspace (bp); 1030 if cc jump .Lresume_userspace (bp);
1031 1031
1032 r0 = sp; 1032 r0 = sp;
1033 sp += -12; 1033 sp += -12;
1034 1034
1035 pseudo_long_call _finish_atomic_sections, p5; 1035 pseudo_long_call _finish_atomic_sections, p5;
1036 sp += 12; 1036 sp += 12;
1037 jump.s .Lresume_userspace; 1037 jump.s .Lresume_userspace;
1038 ENDPROC(_schedule_and_signal_from_int) 1038 ENDPROC(_schedule_and_signal_from_int)
1039 1039
1040 ENTRY(_schedule_and_signal) 1040 ENTRY(_schedule_and_signal)
1041 SAVE_CONTEXT_SYSCALL 1041 SAVE_CONTEXT_SYSCALL
1042 /* To end up here, vector 15 was changed - so we have to change it 1042 /* To end up here, vector 15 was changed - so we have to change it
1043 * back. 1043 * back.
1044 */ 1044 */
1045 p0.l = lo(EVT15); 1045 p0.l = lo(EVT15);
1046 p0.h = hi(EVT15); 1046 p0.h = hi(EVT15);
1047 p1.l = _evt_system_call; 1047 p1.l = _evt_system_call;
1048 p1.h = _evt_system_call; 1048 p1.h = _evt_system_call;
1049 [p0] = p1; 1049 [p0] = p1;
1050 csync; 1050 csync;
1051 p0.l = 1f; 1051 p0.l = 1f;
1052 p0.h = 1f; 1052 p0.h = 1f;
1053 [sp + PT_RESERVED] = P0; 1053 [sp + PT_RESERVED] = P0;
1054 call .Lresume_userspace; 1054 call .Lresume_userspace;
1055 1: 1055 1:
1056 RESTORE_CONTEXT 1056 RESTORE_CONTEXT
1057 rti; 1057 rti;
1058 ENDPROC(_schedule_and_signal) 1058 ENDPROC(_schedule_and_signal)
1059 1059
1060 /* We handle this 100% in exception space - to reduce overhead 1060 /* We handle this 100% in exception space - to reduce overhead
1061 * Only potiential problem is if the software buffer gets swapped out of the 1061 * Only potiential problem is if the software buffer gets swapped out of the
1062 * CPLB table - then double fault. - so we don't let this happen in other places 1062 * CPLB table - then double fault. - so we don't let this happen in other places
1063 */ 1063 */
1064 #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND 1064 #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
1065 ENTRY(_ex_trace_buff_full) 1065 ENTRY(_ex_trace_buff_full)
1066 [--sp] = P3; 1066 [--sp] = P3;
1067 [--sp] = P2; 1067 [--sp] = P2;
1068 [--sp] = LC0; 1068 [--sp] = LC0;
1069 [--sp] = LT0; 1069 [--sp] = LT0;
1070 [--sp] = LB0; 1070 [--sp] = LB0;
1071 P5.L = _trace_buff_offset; 1071 P5.L = _trace_buff_offset;
1072 P5.H = _trace_buff_offset; 1072 P5.H = _trace_buff_offset;
1073 P3 = [P5]; /* trace_buff_offset */ 1073 P3 = [P5]; /* trace_buff_offset */
1074 P5.L = lo(TBUFSTAT); 1074 P5.L = lo(TBUFSTAT);
1075 P5.H = hi(TBUFSTAT); 1075 P5.H = hi(TBUFSTAT);
1076 R7 = [P5]; 1076 R7 = [P5];
1077 R7 <<= 1; /* double, since we need to read twice */ 1077 R7 <<= 1; /* double, since we need to read twice */
1078 LC0 = R7; 1078 LC0 = R7;
1079 R7 <<= 2; /* need to shift over again, 1079 R7 <<= 2; /* need to shift over again,
1080 * to get the number of bytes */ 1080 * to get the number of bytes */
1081 P5.L = lo(TBUF); 1081 P5.L = lo(TBUF);
1082 P5.H = hi(TBUF); 1082 P5.H = hi(TBUF);
1083 R6 = ((1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN)*1024) - 1; 1083 R6 = ((1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN)*1024) - 1;
1084 1084
1085 P2 = R7; 1085 P2 = R7;
1086 P3 = P3 + P2; 1086 P3 = P3 + P2;
1087 R7 = P3; 1087 R7 = P3;
1088 R7 = R7 & R6; 1088 R7 = R7 & R6;
1089 P3 = R7; 1089 P3 = R7;
1090 P2.L = _trace_buff_offset; 1090 P2.L = _trace_buff_offset;
1091 P2.H = _trace_buff_offset; 1091 P2.H = _trace_buff_offset;
1092 [P2] = P3; 1092 [P2] = P3;
1093 1093
1094 P2.L = _software_trace_buff; 1094 P2.L = _software_trace_buff;
1095 P2.H = _software_trace_buff; 1095 P2.H = _software_trace_buff;
1096 1096
1097 LSETUP (.Lstart, .Lend) LC0; 1097 LSETUP (.Lstart, .Lend) LC0;
1098 .Lstart: 1098 .Lstart:
1099 R7 = [P5]; /* read TBUF */ 1099 R7 = [P5]; /* read TBUF */
1100 P4 = P3 + P2; 1100 P4 = P3 + P2;
1101 [P4] = R7; 1101 [P4] = R7;
1102 P3 += -4; 1102 P3 += -4;
1103 R7 = P3; 1103 R7 = P3;
1104 R7 = R7 & R6; 1104 R7 = R7 & R6;
1105 .Lend: 1105 .Lend:
1106 P3 = R7; 1106 P3 = R7;
1107 1107
1108 LB0 = [sp++]; 1108 LB0 = [sp++];
1109 LT0 = [sp++]; 1109 LT0 = [sp++];
1110 LC0 = [sp++]; 1110 LC0 = [sp++];
1111 P2 = [sp++]; 1111 P2 = [sp++];
1112 P3 = [sp++]; 1112 P3 = [sp++];
1113 jump _bfin_return_from_exception; 1113 jump _bfin_return_from_exception;
1114 ENDPROC(_ex_trace_buff_full) 1114 ENDPROC(_ex_trace_buff_full)
1115 1115
1116 #if CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN == 4 1116 #if CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN == 4
1117 .data 1117 .data
1118 #else 1118 #else
1119 .section .l1.data.B 1119 .section .l1.data.B
1120 #endif /* CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN */ 1120 #endif /* CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN */
1121 ENTRY(_trace_buff_offset) 1121 ENTRY(_trace_buff_offset)
1122 .long 0; 1122 .long 0;
1123 ALIGN 1123 ALIGN
1124 ENTRY(_software_trace_buff) 1124 ENTRY(_software_trace_buff)
1125 .rept ((1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN)*256); 1125 .rept ((1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN)*256);
1126 .long 0 1126 .long 0
1127 .endr 1127 .endr
1128 #endif /* CONFIG_DEBUG_BFIN_HWTRACE_EXPAND */ 1128 #endif /* CONFIG_DEBUG_BFIN_HWTRACE_EXPAND */
1129 1129
1130 #if CONFIG_EARLY_PRINTK 1130 #if CONFIG_EARLY_PRINTK
1131 __INIT 1131 __INIT
1132 ENTRY(_early_trap) 1132 ENTRY(_early_trap)
1133 SAVE_ALL_SYS 1133 SAVE_ALL_SYS
1134 trace_buffer_stop(p0,r0); 1134 trace_buffer_stop(p0,r0);
1135 1135
1136 ANOMALY_283_315_WORKAROUND(p4, r5) 1136 ANOMALY_283_315_WORKAROUND(p4, r5)
1137 1137
1138 /* Turn caches off, to ensure we don't get double exceptions */ 1138 /* Turn caches off, to ensure we don't get double exceptions */
1139 1139
1140 P4.L = LO(IMEM_CONTROL); 1140 P4.L = LO(IMEM_CONTROL);
1141 P4.H = HI(IMEM_CONTROL); 1141 P4.H = HI(IMEM_CONTROL);
1142 1142
1143 R5 = [P4]; /* Control Register*/ 1143 R5 = [P4]; /* Control Register*/
1144 BITCLR(R5,ENICPLB_P); 1144 BITCLR(R5,ENICPLB_P);
1145 CSYNC; /* Disabling of CPLBs should be proceeded by a CSYNC */ 1145 CSYNC; /* Disabling of CPLBs should be proceeded by a CSYNC */
1146 [P4] = R5; 1146 [P4] = R5;
1147 SSYNC; 1147 SSYNC;
1148 1148
1149 P4.L = LO(DMEM_CONTROL); 1149 P4.L = LO(DMEM_CONTROL);
1150 P4.H = HI(DMEM_CONTROL); 1150 P4.H = HI(DMEM_CONTROL);
1151 R5 = [P4]; 1151 R5 = [P4];
1152 BITCLR(R5,ENDCPLB_P); 1152 BITCLR(R5,ENDCPLB_P);
1153 CSYNC; /* Disabling of CPLBs should be proceeded by a CSYNC */ 1153 CSYNC; /* Disabling of CPLBs should be proceeded by a CSYNC */
1154 [P4] = R5; 1154 [P4] = R5;
1155 SSYNC; 1155 SSYNC;
1156 1156
1157 r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */ 1157 r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */
1158 r1 = RETX; 1158 r1 = RETX;
1159 1159
1160 SP += -12; 1160 SP += -12;
1161 call _early_trap_c; 1161 call _early_trap_c;
1162 SP += 12; 1162 SP += 12;
1163 ENDPROC(_early_trap) 1163 ENDPROC(_early_trap)
1164 __FINIT 1164 __FINIT
1165 #endif /* CONFIG_EARLY_PRINTK */ 1165 #endif /* CONFIG_EARLY_PRINTK */
1166 1166
1167 /* 1167 /*
1168 * Put these in the kernel data section - that should always be covered by 1168 * Put these in the kernel data section - that should always be covered by
1169 * a CPLB. This is needed to ensure we don't get double fault conditions 1169 * a CPLB. This is needed to ensure we don't get double fault conditions
1170 */ 1170 */
1171 1171
1172 #ifdef CONFIG_SYSCALL_TAB_L1 1172 #ifdef CONFIG_SYSCALL_TAB_L1
1173 .section .l1.data 1173 .section .l1.data
1174 #else 1174 #else
1175 .data 1175 .data
1176 #endif 1176 #endif
1177 1177
1178 ENTRY(_ex_table) 1178 ENTRY(_ex_table)
1179 /* entry for each EXCAUSE[5:0] 1179 /* entry for each EXCAUSE[5:0]
1180 * This table must be in sync with the table in ./kernel/traps.c 1180 * This table must be in sync with the table in ./kernel/traps.c
1181 * EXCPT instruction can provide 4 bits of EXCAUSE, allowing 16 to be user defined 1181 * EXCPT instruction can provide 4 bits of EXCAUSE, allowing 16 to be user defined
1182 */ 1182 */
1183 .long _ex_syscall /* 0x00 - User Defined - Linux Syscall */ 1183 .long _ex_syscall /* 0x00 - User Defined - Linux Syscall */
1184 .long _ex_trap_c /* 0x01 - User Defined - Software breakpoint */ 1184 .long _ex_trap_c /* 0x01 - User Defined - Software breakpoint */
1185 #ifdef CONFIG_KGDB 1185 #ifdef CONFIG_KGDB
1186 .long _ex_trap_c /* 0x02 - User Defined - KGDB initial connection 1186 .long _ex_trap_c /* 0x02 - User Defined - KGDB initial connection
1187 and break signal trap */ 1187 and break signal trap */
1188 #else 1188 #else
1189 .long _ex_replaceable /* 0x02 - User Defined */ 1189 .long _ex_replaceable /* 0x02 - User Defined */
1190 #endif 1190 #endif
1191 .long _ex_trap_c /* 0x03 - User Defined - userspace stack overflow */ 1191 .long _ex_trap_c /* 0x03 - User Defined - userspace stack overflow */
1192 .long _ex_trap_c /* 0x04 - User Defined - dump trace buffer */ 1192 .long _ex_trap_c /* 0x04 - User Defined - dump trace buffer */
1193 .long _ex_replaceable /* 0x05 - User Defined */ 1193 .long _ex_replaceable /* 0x05 - User Defined */
1194 .long _ex_replaceable /* 0x06 - User Defined */ 1194 .long _ex_replaceable /* 0x06 - User Defined */
1195 .long _ex_replaceable /* 0x07 - User Defined */ 1195 .long _ex_replaceable /* 0x07 - User Defined */
1196 .long _ex_replaceable /* 0x08 - User Defined */ 1196 .long _ex_replaceable /* 0x08 - User Defined */
1197 .long _ex_replaceable /* 0x09 - User Defined */ 1197 .long _ex_replaceable /* 0x09 - User Defined */
1198 .long _ex_replaceable /* 0x0A - User Defined */ 1198 .long _ex_replaceable /* 0x0A - User Defined */
1199 .long _ex_replaceable /* 0x0B - User Defined */ 1199 .long _ex_replaceable /* 0x0B - User Defined */
1200 .long _ex_replaceable /* 0x0C - User Defined */ 1200 .long _ex_replaceable /* 0x0C - User Defined */
1201 .long _ex_replaceable /* 0x0D - User Defined */ 1201 .long _ex_replaceable /* 0x0D - User Defined */
1202 .long _ex_replaceable /* 0x0E - User Defined */ 1202 .long _ex_replaceable /* 0x0E - User Defined */
1203 .long _ex_replaceable /* 0x0F - User Defined */ 1203 .long _ex_replaceable /* 0x0F - User Defined */
1204 .long _ex_single_step /* 0x10 - HW Single step */ 1204 .long _ex_single_step /* 0x10 - HW Single step */
1205 #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND 1205 #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
1206 .long _ex_trace_buff_full /* 0x11 - Trace Buffer Full */ 1206 .long _ex_trace_buff_full /* 0x11 - Trace Buffer Full */
1207 #else 1207 #else
1208 .long _ex_trap_c /* 0x11 - Trace Buffer Full */ 1208 .long _ex_trap_c /* 0x11 - Trace Buffer Full */
1209 #endif 1209 #endif
1210 .long _ex_trap_c /* 0x12 - Reserved */ 1210 .long _ex_trap_c /* 0x12 - Reserved */
1211 .long _ex_trap_c /* 0x13 - Reserved */ 1211 .long _ex_trap_c /* 0x13 - Reserved */
1212 .long _ex_trap_c /* 0x14 - Reserved */ 1212 .long _ex_trap_c /* 0x14 - Reserved */
1213 .long _ex_trap_c /* 0x15 - Reserved */ 1213 .long _ex_trap_c /* 0x15 - Reserved */
1214 .long _ex_trap_c /* 0x16 - Reserved */ 1214 .long _ex_trap_c /* 0x16 - Reserved */
1215 .long _ex_trap_c /* 0x17 - Reserved */ 1215 .long _ex_trap_c /* 0x17 - Reserved */
1216 .long _ex_trap_c /* 0x18 - Reserved */ 1216 .long _ex_trap_c /* 0x18 - Reserved */
1217 .long _ex_trap_c /* 0x19 - Reserved */ 1217 .long _ex_trap_c /* 0x19 - Reserved */
1218 .long _ex_trap_c /* 0x1A - Reserved */ 1218 .long _ex_trap_c /* 0x1A - Reserved */
1219 .long _ex_trap_c /* 0x1B - Reserved */ 1219 .long _ex_trap_c /* 0x1B - Reserved */
1220 .long _ex_trap_c /* 0x1C - Reserved */ 1220 .long _ex_trap_c /* 0x1C - Reserved */
1221 .long _ex_trap_c /* 0x1D - Reserved */ 1221 .long _ex_trap_c /* 0x1D - Reserved */
1222 .long _ex_trap_c /* 0x1E - Reserved */ 1222 .long _ex_trap_c /* 0x1E - Reserved */
1223 .long _ex_trap_c /* 0x1F - Reserved */ 1223 .long _ex_trap_c /* 0x1F - Reserved */
1224 .long _ex_trap_c /* 0x20 - Reserved */ 1224 .long _ex_trap_c /* 0x20 - Reserved */
1225 .long _ex_trap_c /* 0x21 - Undefined Instruction */ 1225 .long _ex_trap_c /* 0x21 - Undefined Instruction */
1226 .long _ex_trap_c /* 0x22 - Illegal Instruction Combination */ 1226 .long _ex_trap_c /* 0x22 - Illegal Instruction Combination */
1227 .long _ex_dviol /* 0x23 - Data CPLB Protection Violation */ 1227 .long _ex_dviol /* 0x23 - Data CPLB Protection Violation */
1228 .long _ex_trap_c /* 0x24 - Data access misaligned */ 1228 .long _ex_trap_c /* 0x24 - Data access misaligned */
1229 .long _ex_trap_c /* 0x25 - Unrecoverable Event */ 1229 .long _ex_trap_c /* 0x25 - Unrecoverable Event */
1230 .long _ex_dmiss /* 0x26 - Data CPLB Miss */ 1230 .long _ex_dmiss /* 0x26 - Data CPLB Miss */
1231 .long _ex_dmult /* 0x27 - Data CPLB Multiple Hits - Linux Trap Zero */ 1231 .long _ex_dmult /* 0x27 - Data CPLB Multiple Hits - Linux Trap Zero */
1232 .long _ex_trap_c /* 0x28 - Emulation Watchpoint */ 1232 .long _ex_trap_c /* 0x28 - Emulation Watchpoint */
1233 .long _ex_trap_c /* 0x29 - Instruction fetch access error (535 only) */ 1233 .long _ex_trap_c /* 0x29 - Instruction fetch access error (535 only) */
1234 .long _ex_trap_c /* 0x2A - Instruction fetch misaligned */ 1234 .long _ex_trap_c /* 0x2A - Instruction fetch misaligned */
1235 .long _ex_trap_c /* 0x2B - Instruction CPLB protection Violation */ 1235 .long _ex_trap_c /* 0x2B - Instruction CPLB protection Violation */
1236 .long _ex_icplb_miss /* 0x2C - Instruction CPLB miss */ 1236 .long _ex_icplb_miss /* 0x2C - Instruction CPLB miss */
1237 .long _ex_trap_c /* 0x2D - Instruction CPLB Multiple Hits */ 1237 .long _ex_trap_c /* 0x2D - Instruction CPLB Multiple Hits */
1238 .long _ex_trap_c /* 0x2E - Illegal use of Supervisor Resource */ 1238 .long _ex_trap_c /* 0x2E - Illegal use of Supervisor Resource */
1239 .long _ex_trap_c /* 0x2E - Illegal use of Supervisor Resource */ 1239 .long _ex_trap_c /* 0x2E - Illegal use of Supervisor Resource */
1240 .long _ex_trap_c /* 0x2F - Reserved */ 1240 .long _ex_trap_c /* 0x2F - Reserved */
1241 .long _ex_trap_c /* 0x30 - Reserved */ 1241 .long _ex_trap_c /* 0x30 - Reserved */
1242 .long _ex_trap_c /* 0x31 - Reserved */ 1242 .long _ex_trap_c /* 0x31 - Reserved */
1243 .long _ex_trap_c /* 0x32 - Reserved */ 1243 .long _ex_trap_c /* 0x32 - Reserved */
1244 .long _ex_trap_c /* 0x33 - Reserved */ 1244 .long _ex_trap_c /* 0x33 - Reserved */
1245 .long _ex_trap_c /* 0x34 - Reserved */ 1245 .long _ex_trap_c /* 0x34 - Reserved */
1246 .long _ex_trap_c /* 0x35 - Reserved */ 1246 .long _ex_trap_c /* 0x35 - Reserved */
1247 .long _ex_trap_c /* 0x36 - Reserved */ 1247 .long _ex_trap_c /* 0x36 - Reserved */
1248 .long _ex_trap_c /* 0x37 - Reserved */ 1248 .long _ex_trap_c /* 0x37 - Reserved */
1249 .long _ex_trap_c /* 0x38 - Reserved */ 1249 .long _ex_trap_c /* 0x38 - Reserved */
1250 .long _ex_trap_c /* 0x39 - Reserved */ 1250 .long _ex_trap_c /* 0x39 - Reserved */
1251 .long _ex_trap_c /* 0x3A - Reserved */ 1251 .long _ex_trap_c /* 0x3A - Reserved */
1252 .long _ex_trap_c /* 0x3B - Reserved */ 1252 .long _ex_trap_c /* 0x3B - Reserved */
1253 .long _ex_trap_c /* 0x3C - Reserved */ 1253 .long _ex_trap_c /* 0x3C - Reserved */
1254 .long _ex_trap_c /* 0x3D - Reserved */ 1254 .long _ex_trap_c /* 0x3D - Reserved */
1255 .long _ex_trap_c /* 0x3E - Reserved */ 1255 .long _ex_trap_c /* 0x3E - Reserved */
1256 .long _ex_trap_c /* 0x3F - Reserved */ 1256 .long _ex_trap_c /* 0x3F - Reserved */
1257 END(_ex_table) 1257 END(_ex_table)
1258 1258
1259 ENTRY(_sys_call_table) 1259 ENTRY(_sys_call_table)
1260 .long _sys_restart_syscall /* 0 */ 1260 .long _sys_restart_syscall /* 0 */
1261 .long _sys_exit 1261 .long _sys_exit
1262 .long _sys_fork 1262 .long _sys_fork
1263 .long _sys_read 1263 .long _sys_read
1264 .long _sys_write 1264 .long _sys_write
1265 .long _sys_open /* 5 */ 1265 .long _sys_open /* 5 */
1266 .long _sys_close 1266 .long _sys_close
1267 .long _sys_ni_syscall /* old waitpid */ 1267 .long _sys_ni_syscall /* old waitpid */
1268 .long _sys_creat 1268 .long _sys_creat
1269 .long _sys_link 1269 .long _sys_link
1270 .long _sys_unlink /* 10 */ 1270 .long _sys_unlink /* 10 */
1271 .long _sys_execve 1271 .long _sys_execve
1272 .long _sys_chdir 1272 .long _sys_chdir
1273 .long _sys_time 1273 .long _sys_time
1274 .long _sys_mknod 1274 .long _sys_mknod
1275 .long _sys_chmod /* 15 */ 1275 .long _sys_chmod /* 15 */
1276 .long _sys_chown /* chown16 */ 1276 .long _sys_chown /* chown16 */
1277 .long _sys_ni_syscall /* old break syscall holder */ 1277 .long _sys_ni_syscall /* old break syscall holder */
1278 .long _sys_ni_syscall /* old stat */ 1278 .long _sys_ni_syscall /* old stat */
1279 .long _sys_lseek 1279 .long _sys_lseek
1280 .long _sys_getpid /* 20 */ 1280 .long _sys_getpid /* 20 */
1281 .long _sys_mount 1281 .long _sys_mount
1282 .long _sys_ni_syscall /* old umount */ 1282 .long _sys_ni_syscall /* old umount */
1283 .long _sys_setuid 1283 .long _sys_setuid
1284 .long _sys_getuid 1284 .long _sys_getuid
1285 .long _sys_stime /* 25 */ 1285 .long _sys_stime /* 25 */
1286 .long _sys_ptrace 1286 .long _sys_ptrace
1287 .long _sys_alarm 1287 .long _sys_alarm
1288 .long _sys_ni_syscall /* old fstat */ 1288 .long _sys_ni_syscall /* old fstat */
1289 .long _sys_pause 1289 .long _sys_pause
1290 .long _sys_ni_syscall /* old utime */ /* 30 */ 1290 .long _sys_ni_syscall /* old utime */ /* 30 */
1291 .long _sys_ni_syscall /* old stty syscall holder */ 1291 .long _sys_ni_syscall /* old stty syscall holder */
1292 .long _sys_ni_syscall /* old gtty syscall holder */ 1292 .long _sys_ni_syscall /* old gtty syscall holder */
1293 .long _sys_access 1293 .long _sys_access
1294 .long _sys_nice 1294 .long _sys_nice
1295 .long _sys_ni_syscall /* 35 */ /* old ftime syscall holder */ 1295 .long _sys_ni_syscall /* 35 */ /* old ftime syscall holder */
1296 .long _sys_sync 1296 .long _sys_sync
1297 .long _sys_kill 1297 .long _sys_kill
1298 .long _sys_rename 1298 .long _sys_rename
1299 .long _sys_mkdir 1299 .long _sys_mkdir
1300 .long _sys_rmdir /* 40 */ 1300 .long _sys_rmdir /* 40 */
1301 .long _sys_dup 1301 .long _sys_dup
1302 .long _sys_pipe 1302 .long _sys_pipe
1303 .long _sys_times 1303 .long _sys_times
1304 .long _sys_ni_syscall /* old prof syscall holder */ 1304 .long _sys_ni_syscall /* old prof syscall holder */
1305 .long _sys_brk /* 45 */ 1305 .long _sys_brk /* 45 */
1306 .long _sys_setgid 1306 .long _sys_setgid
1307 .long _sys_getgid 1307 .long _sys_getgid
1308 .long _sys_ni_syscall /* old sys_signal */ 1308 .long _sys_ni_syscall /* old sys_signal */
1309 .long _sys_geteuid /* geteuid16 */ 1309 .long _sys_geteuid /* geteuid16 */
1310 .long _sys_getegid /* getegid16 */ /* 50 */ 1310 .long _sys_getegid /* getegid16 */ /* 50 */
1311 .long _sys_acct 1311 .long _sys_acct
1312 .long _sys_umount /* recycled never used phys() */ 1312 .long _sys_umount /* recycled never used phys() */
1313 .long _sys_ni_syscall /* old lock syscall holder */ 1313 .long _sys_ni_syscall /* old lock syscall holder */
1314 .long _sys_ioctl 1314 .long _sys_ioctl
1315 .long _sys_fcntl /* 55 */ 1315 .long _sys_fcntl /* 55 */
1316 .long _sys_ni_syscall /* old mpx syscall holder */ 1316 .long _sys_ni_syscall /* old mpx syscall holder */
1317 .long _sys_setpgid 1317 .long _sys_setpgid
1318 .long _sys_ni_syscall /* old ulimit syscall holder */ 1318 .long _sys_ni_syscall /* old ulimit syscall holder */
1319 .long _sys_ni_syscall /* old old uname */ 1319 .long _sys_ni_syscall /* old old uname */
1320 .long _sys_umask /* 60 */ 1320 .long _sys_umask /* 60 */
1321 .long _sys_chroot 1321 .long _sys_chroot
1322 .long _sys_ustat 1322 .long _sys_ustat
1323 .long _sys_dup2 1323 .long _sys_dup2
1324 .long _sys_getppid 1324 .long _sys_getppid
1325 .long _sys_getpgrp /* 65 */ 1325 .long _sys_getpgrp /* 65 */
1326 .long _sys_setsid 1326 .long _sys_setsid
1327 .long _sys_ni_syscall /* old sys_sigaction */ 1327 .long _sys_ni_syscall /* old sys_sigaction */
1328 .long _sys_sgetmask 1328 .long _sys_sgetmask
1329 .long _sys_ssetmask 1329 .long _sys_ssetmask
1330 .long _sys_setreuid /* setreuid16 */ /* 70 */ 1330 .long _sys_setreuid /* setreuid16 */ /* 70 */
1331 .long _sys_setregid /* setregid16 */ 1331 .long _sys_setregid /* setregid16 */
1332 .long _sys_ni_syscall /* old sys_sigsuspend */ 1332 .long _sys_ni_syscall /* old sys_sigsuspend */
1333 .long _sys_ni_syscall /* old sys_sigpending */ 1333 .long _sys_ni_syscall /* old sys_sigpending */
1334 .long _sys_sethostname 1334 .long _sys_sethostname
1335 .long _sys_setrlimit /* 75 */ 1335 .long _sys_setrlimit /* 75 */
1336 .long _sys_ni_syscall /* old getrlimit */ 1336 .long _sys_ni_syscall /* old getrlimit */
1337 .long _sys_getrusage 1337 .long _sys_getrusage
1338 .long _sys_gettimeofday 1338 .long _sys_gettimeofday
1339 .long _sys_settimeofday 1339 .long _sys_settimeofday
1340 .long _sys_getgroups /* getgroups16 */ /* 80 */ 1340 .long _sys_getgroups /* getgroups16 */ /* 80 */
1341 .long _sys_setgroups /* setgroups16 */ 1341 .long _sys_setgroups /* setgroups16 */
1342 .long _sys_ni_syscall /* old_select */ 1342 .long _sys_ni_syscall /* old_select */
1343 .long _sys_symlink 1343 .long _sys_symlink
1344 .long _sys_ni_syscall /* old lstat */ 1344 .long _sys_ni_syscall /* old lstat */
1345 .long _sys_readlink /* 85 */ 1345 .long _sys_readlink /* 85 */
1346 .long _sys_uselib 1346 .long _sys_uselib
1347 .long _sys_ni_syscall /* sys_swapon */ 1347 .long _sys_ni_syscall /* sys_swapon */
1348 .long _sys_reboot 1348 .long _sys_reboot
1349 .long _sys_ni_syscall /* old_readdir */ 1349 .long _sys_ni_syscall /* old_readdir */
1350 .long _sys_ni_syscall /* sys_mmap */ /* 90 */ 1350 .long _sys_ni_syscall /* sys_mmap */ /* 90 */
1351 .long _sys_munmap 1351 .long _sys_munmap
1352 .long _sys_truncate 1352 .long _sys_truncate
1353 .long _sys_ftruncate 1353 .long _sys_ftruncate
1354 .long _sys_fchmod 1354 .long _sys_fchmod
1355 .long _sys_fchown /* fchown16 */ /* 95 */ 1355 .long _sys_fchown /* fchown16 */ /* 95 */
1356 .long _sys_getpriority 1356 .long _sys_getpriority
1357 .long _sys_setpriority 1357 .long _sys_setpriority
1358 .long _sys_ni_syscall /* old profil syscall holder */ 1358 .long _sys_ni_syscall /* old profil syscall holder */
1359 .long _sys_statfs 1359 .long _sys_statfs
1360 .long _sys_fstatfs /* 100 */ 1360 .long _sys_fstatfs /* 100 */
1361 .long _sys_ni_syscall 1361 .long _sys_ni_syscall
1362 .long _sys_ni_syscall /* old sys_socketcall */ 1362 .long _sys_ni_syscall /* old sys_socketcall */
1363 .long _sys_syslog 1363 .long _sys_syslog
1364 .long _sys_setitimer 1364 .long _sys_setitimer
1365 .long _sys_getitimer /* 105 */ 1365 .long _sys_getitimer /* 105 */
1366 .long _sys_newstat 1366 .long _sys_newstat
1367 .long _sys_newlstat 1367 .long _sys_newlstat
1368 .long _sys_newfstat 1368 .long _sys_newfstat
1369 .long _sys_ni_syscall /* old uname */ 1369 .long _sys_ni_syscall /* old uname */
1370 .long _sys_ni_syscall /* iopl for i386 */ /* 110 */ 1370 .long _sys_ni_syscall /* iopl for i386 */ /* 110 */
1371 .long _sys_vhangup 1371 .long _sys_vhangup
1372 .long _sys_ni_syscall /* obsolete idle() syscall */ 1372 .long _sys_ni_syscall /* obsolete idle() syscall */
1373 .long _sys_ni_syscall /* vm86old for i386 */ 1373 .long _sys_ni_syscall /* vm86old for i386 */
1374 .long _sys_wait4 1374 .long _sys_wait4
1375 .long _sys_ni_syscall /* 115 */ /* sys_swapoff */ 1375 .long _sys_ni_syscall /* 115 */ /* sys_swapoff */
1376 .long _sys_sysinfo 1376 .long _sys_sysinfo
1377 .long _sys_ni_syscall /* old sys_ipc */ 1377 .long _sys_ni_syscall /* old sys_ipc */
1378 .long _sys_fsync 1378 .long _sys_fsync
1379 .long _sys_ni_syscall /* old sys_sigreturn */ 1379 .long _sys_ni_syscall /* old sys_sigreturn */
1380 .long _sys_clone /* 120 */ 1380 .long _sys_clone /* 120 */
1381 .long _sys_setdomainname 1381 .long _sys_setdomainname
1382 .long _sys_newuname 1382 .long _sys_newuname
1383 .long _sys_ni_syscall /* old sys_modify_ldt */ 1383 .long _sys_ni_syscall /* old sys_modify_ldt */
1384 .long _sys_adjtimex 1384 .long _sys_adjtimex
1385 .long _sys_mprotect /* 125 */ 1385 .long _sys_mprotect /* 125 */
1386 .long _sys_ni_syscall /* old sys_sigprocmask */ 1386 .long _sys_ni_syscall /* old sys_sigprocmask */
1387 .long _sys_ni_syscall /* old "creat_module" */ 1387 .long _sys_ni_syscall /* old "creat_module" */
1388 .long _sys_init_module 1388 .long _sys_init_module
1389 .long _sys_delete_module 1389 .long _sys_delete_module
1390 .long _sys_ni_syscall /* 130: old "get_kernel_syms" */ 1390 .long _sys_ni_syscall /* 130: old "get_kernel_syms" */
1391 .long _sys_quotactl 1391 .long _sys_quotactl
1392 .long _sys_getpgid 1392 .long _sys_getpgid
1393 .long _sys_fchdir 1393 .long _sys_fchdir
1394 .long _sys_bdflush 1394 .long _sys_bdflush
1395 .long _sys_ni_syscall /* 135 */ /* sys_sysfs */ 1395 .long _sys_ni_syscall /* 135 */ /* sys_sysfs */
1396 .long _sys_personality 1396 .long _sys_personality
1397 .long _sys_ni_syscall /* for afs_syscall */ 1397 .long _sys_ni_syscall /* for afs_syscall */
1398 .long _sys_setfsuid /* setfsuid16 */ 1398 .long _sys_setfsuid /* setfsuid16 */
1399 .long _sys_setfsgid /* setfsgid16 */ 1399 .long _sys_setfsgid /* setfsgid16 */
1400 .long _sys_llseek /* 140 */ 1400 .long _sys_llseek /* 140 */
1401 .long _sys_getdents 1401 .long _sys_getdents
1402 .long _sys_ni_syscall /* sys_select */ 1402 .long _sys_ni_syscall /* sys_select */
1403 .long _sys_flock 1403 .long _sys_flock
1404 .long _sys_msync 1404 .long _sys_msync
1405 .long _sys_readv /* 145 */ 1405 .long _sys_readv /* 145 */
1406 .long _sys_writev 1406 .long _sys_writev
1407 .long _sys_getsid 1407 .long _sys_getsid
1408 .long _sys_fdatasync 1408 .long _sys_fdatasync
1409 .long _sys_sysctl 1409 .long _sys_sysctl
1410 .long _sys_mlock /* 150 */ 1410 .long _sys_mlock /* 150 */
1411 .long _sys_munlock 1411 .long _sys_munlock
1412 .long _sys_mlockall 1412 .long _sys_mlockall
1413 .long _sys_munlockall 1413 .long _sys_munlockall
1414 .long _sys_sched_setparam 1414 .long _sys_sched_setparam
1415 .long _sys_sched_getparam /* 155 */ 1415 .long _sys_sched_getparam /* 155 */
1416 .long _sys_sched_setscheduler 1416 .long _sys_sched_setscheduler
1417 .long _sys_sched_getscheduler 1417 .long _sys_sched_getscheduler
1418 .long _sys_sched_yield 1418 .long _sys_sched_yield
1419 .long _sys_sched_get_priority_max 1419 .long _sys_sched_get_priority_max
1420 .long _sys_sched_get_priority_min /* 160 */ 1420 .long _sys_sched_get_priority_min /* 160 */
1421 .long _sys_sched_rr_get_interval 1421 .long _sys_sched_rr_get_interval
1422 .long _sys_nanosleep 1422 .long _sys_nanosleep
1423 .long _sys_mremap 1423 .long _sys_mremap
1424 .long _sys_setresuid /* setresuid16 */ 1424 .long _sys_setresuid /* setresuid16 */
1425 .long _sys_getresuid /* getresuid16 */ /* 165 */ 1425 .long _sys_getresuid /* getresuid16 */ /* 165 */
1426 .long _sys_ni_syscall /* for vm86 */ 1426 .long _sys_ni_syscall /* for vm86 */
1427 .long _sys_ni_syscall /* old "query_module" */ 1427 .long _sys_ni_syscall /* old "query_module" */
1428 .long _sys_ni_syscall /* sys_poll */ 1428 .long _sys_ni_syscall /* sys_poll */
1429 .long _sys_nfsservctl 1429 .long _sys_nfsservctl
1430 .long _sys_setresgid /* setresgid16 */ /* 170 */ 1430 .long _sys_setresgid /* setresgid16 */ /* 170 */
1431 .long _sys_getresgid /* getresgid16 */ 1431 .long _sys_getresgid /* getresgid16 */
1432 .long _sys_prctl 1432 .long _sys_prctl
1433 .long _sys_rt_sigreturn 1433 .long _sys_rt_sigreturn
1434 .long _sys_rt_sigaction 1434 .long _sys_rt_sigaction
1435 .long _sys_rt_sigprocmask /* 175 */ 1435 .long _sys_rt_sigprocmask /* 175 */
1436 .long _sys_rt_sigpending 1436 .long _sys_rt_sigpending
1437 .long _sys_rt_sigtimedwait 1437 .long _sys_rt_sigtimedwait
1438 .long _sys_rt_sigqueueinfo 1438 .long _sys_rt_sigqueueinfo
1439 .long _sys_rt_sigsuspend 1439 .long _sys_rt_sigsuspend
1440 .long _sys_pread64 /* 180 */ 1440 .long _sys_pread64 /* 180 */
1441 .long _sys_pwrite64 1441 .long _sys_pwrite64
1442 .long _sys_lchown /* lchown16 */ 1442 .long _sys_lchown /* lchown16 */
1443 .long _sys_getcwd 1443 .long _sys_getcwd
1444 .long _sys_capget 1444 .long _sys_capget
1445 .long _sys_capset /* 185 */ 1445 .long _sys_capset /* 185 */
1446 .long _sys_sigaltstack 1446 .long _sys_sigaltstack
1447 .long _sys_sendfile 1447 .long _sys_sendfile
1448 .long _sys_ni_syscall /* streams1 */ 1448 .long _sys_ni_syscall /* streams1 */
1449 .long _sys_ni_syscall /* streams2 */ 1449 .long _sys_ni_syscall /* streams2 */
1450 .long _sys_vfork /* 190 */ 1450 .long _sys_vfork /* 190 */
1451 .long _sys_getrlimit 1451 .long _sys_getrlimit
1452 .long _sys_mmap_pgoff 1452 .long _sys_mmap_pgoff
1453 .long _sys_truncate64 1453 .long _sys_truncate64
1454 .long _sys_ftruncate64 1454 .long _sys_ftruncate64
1455 .long _sys_stat64 /* 195 */ 1455 .long _sys_stat64 /* 195 */
1456 .long _sys_lstat64 1456 .long _sys_lstat64
1457 .long _sys_fstat64 1457 .long _sys_fstat64
1458 .long _sys_chown 1458 .long _sys_chown
1459 .long _sys_getuid 1459 .long _sys_getuid
1460 .long _sys_getgid /* 200 */ 1460 .long _sys_getgid /* 200 */
1461 .long _sys_geteuid 1461 .long _sys_geteuid
1462 .long _sys_getegid 1462 .long _sys_getegid
1463 .long _sys_setreuid 1463 .long _sys_setreuid
1464 .long _sys_setregid 1464 .long _sys_setregid
1465 .long _sys_getgroups /* 205 */ 1465 .long _sys_getgroups /* 205 */
1466 .long _sys_setgroups 1466 .long _sys_setgroups
1467 .long _sys_fchown 1467 .long _sys_fchown
1468 .long _sys_setresuid 1468 .long _sys_setresuid
1469 .long _sys_getresuid 1469 .long _sys_getresuid
1470 .long _sys_setresgid /* 210 */ 1470 .long _sys_setresgid /* 210 */
1471 .long _sys_getresgid 1471 .long _sys_getresgid
1472 .long _sys_lchown 1472 .long _sys_lchown
1473 .long _sys_setuid 1473 .long _sys_setuid
1474 .long _sys_setgid 1474 .long _sys_setgid
1475 .long _sys_setfsuid /* 215 */ 1475 .long _sys_setfsuid /* 215 */
1476 .long _sys_setfsgid 1476 .long _sys_setfsgid
1477 .long _sys_pivot_root 1477 .long _sys_pivot_root
1478 .long _sys_mincore 1478 .long _sys_mincore
1479 .long _sys_madvise 1479 .long _sys_madvise
1480 .long _sys_getdents64 /* 220 */ 1480 .long _sys_getdents64 /* 220 */
1481 .long _sys_fcntl64 1481 .long _sys_fcntl64
1482 .long _sys_ni_syscall /* reserved for TUX */ 1482 .long _sys_ni_syscall /* reserved for TUX */
1483 .long _sys_ni_syscall 1483 .long _sys_ni_syscall
1484 .long _sys_gettid 1484 .long _sys_gettid
1485 .long _sys_readahead /* 225 */ 1485 .long _sys_readahead /* 225 */
1486 .long _sys_setxattr 1486 .long _sys_setxattr
1487 .long _sys_lsetxattr 1487 .long _sys_lsetxattr
1488 .long _sys_fsetxattr 1488 .long _sys_fsetxattr
1489 .long _sys_getxattr 1489 .long _sys_getxattr
1490 .long _sys_lgetxattr /* 230 */ 1490 .long _sys_lgetxattr /* 230 */
1491 .long _sys_fgetxattr 1491 .long _sys_fgetxattr
1492 .long _sys_listxattr 1492 .long _sys_listxattr
1493 .long _sys_llistxattr 1493 .long _sys_llistxattr
1494 .long _sys_flistxattr 1494 .long _sys_flistxattr
1495 .long _sys_removexattr /* 235 */ 1495 .long _sys_removexattr /* 235 */
1496 .long _sys_lremovexattr 1496 .long _sys_lremovexattr
1497 .long _sys_fremovexattr 1497 .long _sys_fremovexattr
1498 .long _sys_tkill 1498 .long _sys_tkill
1499 .long _sys_sendfile64 1499 .long _sys_sendfile64
1500 .long _sys_futex /* 240 */ 1500 .long _sys_futex /* 240 */
1501 .long _sys_sched_setaffinity 1501 .long _sys_sched_setaffinity
1502 .long _sys_sched_getaffinity 1502 .long _sys_sched_getaffinity
1503 .long _sys_ni_syscall /* sys_set_thread_area */ 1503 .long _sys_ni_syscall /* sys_set_thread_area */
1504 .long _sys_ni_syscall /* sys_get_thread_area */ 1504 .long _sys_ni_syscall /* sys_get_thread_area */
1505 .long _sys_io_setup /* 245 */ 1505 .long _sys_io_setup /* 245 */
1506 .long _sys_io_destroy 1506 .long _sys_io_destroy
1507 .long _sys_io_getevents 1507 .long _sys_io_getevents
1508 .long _sys_io_submit 1508 .long _sys_io_submit
1509 .long _sys_io_cancel 1509 .long _sys_io_cancel
1510 .long _sys_ni_syscall /* 250 */ /* sys_alloc_hugepages */ 1510 .long _sys_ni_syscall /* 250 */ /* sys_alloc_hugepages */
1511 .long _sys_ni_syscall /* sys_freec_hugepages */ 1511 .long _sys_ni_syscall /* sys_freec_hugepages */
1512 .long _sys_exit_group 1512 .long _sys_exit_group
1513 .long _sys_lookup_dcookie 1513 .long _sys_lookup_dcookie
1514 .long _sys_bfin_spinlock 1514 .long _sys_bfin_spinlock
1515 .long _sys_epoll_create /* 255 */ 1515 .long _sys_epoll_create /* 255 */
1516 .long _sys_epoll_ctl 1516 .long _sys_epoll_ctl
1517 .long _sys_epoll_wait 1517 .long _sys_epoll_wait
1518 .long _sys_ni_syscall /* remap_file_pages */ 1518 .long _sys_ni_syscall /* remap_file_pages */
1519 .long _sys_set_tid_address 1519 .long _sys_set_tid_address
1520 .long _sys_timer_create /* 260 */ 1520 .long _sys_timer_create /* 260 */
1521 .long _sys_timer_settime 1521 .long _sys_timer_settime
1522 .long _sys_timer_gettime 1522 .long _sys_timer_gettime
1523 .long _sys_timer_getoverrun 1523 .long _sys_timer_getoverrun
1524 .long _sys_timer_delete 1524 .long _sys_timer_delete
1525 .long _sys_clock_settime /* 265 */ 1525 .long _sys_clock_settime /* 265 */
1526 .long _sys_clock_gettime 1526 .long _sys_clock_gettime
1527 .long _sys_clock_getres 1527 .long _sys_clock_getres
1528 .long _sys_clock_nanosleep 1528 .long _sys_clock_nanosleep
1529 .long _sys_statfs64 1529 .long _sys_statfs64
1530 .long _sys_fstatfs64 /* 270 */ 1530 .long _sys_fstatfs64 /* 270 */
1531 .long _sys_tgkill 1531 .long _sys_tgkill
1532 .long _sys_utimes 1532 .long _sys_utimes
1533 .long _sys_fadvise64_64 1533 .long _sys_fadvise64_64
1534 .long _sys_ni_syscall /* vserver */ 1534 .long _sys_ni_syscall /* vserver */
1535 .long _sys_mbind /* 275 */ 1535 .long _sys_mbind /* 275 */
1536 .long _sys_ni_syscall /* get_mempolicy */ 1536 .long _sys_ni_syscall /* get_mempolicy */
1537 .long _sys_ni_syscall /* set_mempolicy */ 1537 .long _sys_ni_syscall /* set_mempolicy */
1538 .long _sys_mq_open 1538 .long _sys_mq_open
1539 .long _sys_mq_unlink 1539 .long _sys_mq_unlink
1540 .long _sys_mq_timedsend /* 280 */ 1540 .long _sys_mq_timedsend /* 280 */
1541 .long _sys_mq_timedreceive 1541 .long _sys_mq_timedreceive
1542 .long _sys_mq_notify 1542 .long _sys_mq_notify
1543 .long _sys_mq_getsetattr 1543 .long _sys_mq_getsetattr
1544 .long _sys_ni_syscall /* kexec_load */ 1544 .long _sys_ni_syscall /* kexec_load */
1545 .long _sys_waitid /* 285 */ 1545 .long _sys_waitid /* 285 */
1546 .long _sys_add_key 1546 .long _sys_add_key
1547 .long _sys_request_key 1547 .long _sys_request_key
1548 .long _sys_keyctl 1548 .long _sys_keyctl
1549 .long _sys_ioprio_set 1549 .long _sys_ioprio_set
1550 .long _sys_ioprio_get /* 290 */ 1550 .long _sys_ioprio_get /* 290 */
1551 .long _sys_inotify_init 1551 .long _sys_inotify_init
1552 .long _sys_inotify_add_watch 1552 .long _sys_inotify_add_watch
1553 .long _sys_inotify_rm_watch 1553 .long _sys_inotify_rm_watch
1554 .long _sys_ni_syscall /* migrate_pages */ 1554 .long _sys_ni_syscall /* migrate_pages */
1555 .long _sys_openat /* 295 */ 1555 .long _sys_openat /* 295 */
1556 .long _sys_mkdirat 1556 .long _sys_mkdirat
1557 .long _sys_mknodat 1557 .long _sys_mknodat
1558 .long _sys_fchownat 1558 .long _sys_fchownat
1559 .long _sys_futimesat 1559 .long _sys_futimesat
1560 .long _sys_fstatat64 /* 300 */ 1560 .long _sys_fstatat64 /* 300 */
1561 .long _sys_unlinkat 1561 .long _sys_unlinkat
1562 .long _sys_renameat 1562 .long _sys_renameat
1563 .long _sys_linkat 1563 .long _sys_linkat
1564 .long _sys_symlinkat 1564 .long _sys_symlinkat
1565 .long _sys_readlinkat /* 305 */ 1565 .long _sys_readlinkat /* 305 */
1566 .long _sys_fchmodat 1566 .long _sys_fchmodat
1567 .long _sys_faccessat 1567 .long _sys_faccessat
1568 .long _sys_pselect6 1568 .long _sys_pselect6
1569 .long _sys_ppoll 1569 .long _sys_ppoll
1570 .long _sys_unshare /* 310 */ 1570 .long _sys_unshare /* 310 */
1571 .long _sys_sram_alloc 1571 .long _sys_sram_alloc
1572 .long _sys_sram_free 1572 .long _sys_sram_free
1573 .long _sys_dma_memcpy 1573 .long _sys_dma_memcpy
1574 .long _sys_accept 1574 .long _sys_accept
1575 .long _sys_bind /* 315 */ 1575 .long _sys_bind /* 315 */
1576 .long _sys_connect 1576 .long _sys_connect
1577 .long _sys_getpeername 1577 .long _sys_getpeername
1578 .long _sys_getsockname 1578 .long _sys_getsockname
1579 .long _sys_getsockopt 1579 .long _sys_getsockopt
1580 .long _sys_listen /* 320 */ 1580 .long _sys_listen /* 320 */
1581 .long _sys_recv 1581 .long _sys_recv
1582 .long _sys_recvfrom 1582 .long _sys_recvfrom
1583 .long _sys_recvmsg 1583 .long _sys_recvmsg
1584 .long _sys_send 1584 .long _sys_send
1585 .long _sys_sendmsg /* 325 */ 1585 .long _sys_sendmsg /* 325 */
1586 .long _sys_sendto 1586 .long _sys_sendto
1587 .long _sys_setsockopt 1587 .long _sys_setsockopt
1588 .long _sys_shutdown 1588 .long _sys_shutdown
1589 .long _sys_socket 1589 .long _sys_socket
1590 .long _sys_socketpair /* 330 */ 1590 .long _sys_socketpair /* 330 */
1591 .long _sys_semctl 1591 .long _sys_semctl
1592 .long _sys_semget 1592 .long _sys_semget
1593 .long _sys_semop 1593 .long _sys_semop
1594 .long _sys_msgctl 1594 .long _sys_msgctl
1595 .long _sys_msgget /* 335 */ 1595 .long _sys_msgget /* 335 */
1596 .long _sys_msgrcv 1596 .long _sys_msgrcv
1597 .long _sys_msgsnd 1597 .long _sys_msgsnd
1598 .long _sys_shmat 1598 .long _sys_shmat
1599 .long _sys_shmctl 1599 .long _sys_shmctl
1600 .long _sys_shmdt /* 340 */ 1600 .long _sys_shmdt /* 340 */
1601 .long _sys_shmget 1601 .long _sys_shmget
1602 .long _sys_splice 1602 .long _sys_splice
1603 .long _sys_sync_file_range 1603 .long _sys_sync_file_range
1604 .long _sys_tee 1604 .long _sys_tee
1605 .long _sys_vmsplice /* 345 */ 1605 .long _sys_vmsplice /* 345 */
1606 .long _sys_epoll_pwait 1606 .long _sys_epoll_pwait
1607 .long _sys_utimensat 1607 .long _sys_utimensat
1608 .long _sys_signalfd 1608 .long _sys_signalfd
1609 .long _sys_timerfd_create 1609 .long _sys_timerfd_create
1610 .long _sys_eventfd /* 350 */ 1610 .long _sys_eventfd /* 350 */
1611 .long _sys_pread64 1611 .long _sys_pread64
1612 .long _sys_pwrite64 1612 .long _sys_pwrite64
1613 .long _sys_fadvise64 1613 .long _sys_fadvise64
1614 .long _sys_set_robust_list 1614 .long _sys_set_robust_list
1615 .long _sys_get_robust_list /* 355 */ 1615 .long _sys_get_robust_list /* 355 */
1616 .long _sys_fallocate 1616 .long _sys_fallocate
1617 .long _sys_semtimedop 1617 .long _sys_semtimedop
1618 .long _sys_timerfd_settime 1618 .long _sys_timerfd_settime
1619 .long _sys_timerfd_gettime 1619 .long _sys_timerfd_gettime
1620 .long _sys_signalfd4 /* 360 */ 1620 .long _sys_signalfd4 /* 360 */
1621 .long _sys_eventfd2 1621 .long _sys_eventfd2
1622 .long _sys_epoll_create1 1622 .long _sys_epoll_create1
1623 .long _sys_dup3 1623 .long _sys_dup3
1624 .long _sys_pipe2 1624 .long _sys_pipe2
1625 .long _sys_inotify_init1 /* 365 */ 1625 .long _sys_inotify_init1 /* 365 */
1626 .long _sys_preadv 1626 .long _sys_preadv
1627 .long _sys_pwritev 1627 .long _sys_pwritev
1628 .long _sys_rt_tgsigqueueinfo 1628 .long _sys_rt_tgsigqueueinfo
1629 .long _sys_perf_event_open 1629 .long _sys_perf_event_open
1630 .long _sys_recvmmsg /* 370 */ 1630 .long _sys_recvmmsg /* 370 */
1631 .long _sys_fanotify_init
1632 .long _sys_fanotify_mark
1633 .long _sys_prlimit64
1631 1634
1632 .rept NR_syscalls-(.-_sys_call_table)/4 1635 .rept NR_syscalls-(.-_sys_call_table)/4
1633 .long _sys_ni_syscall 1636 .long _sys_ni_syscall
1634 .endr 1637 .endr
1635 END(_sys_call_table) 1638 END(_sys_call_table)
1636 1639