Commit dfcf753bd3fb09f336659d07b1c48db7e62772e0

Authored by Kyle McMartin
1 parent 5dd34572ad

Revert "parisc: fix trivial section name warnings"

This reverts commit bd3bb8c15b9a80dbddfb7905b237a4a11a4725b4.

Signed-off-by: Kyle McMartin <kyle@mcmartin.ca>

Showing 11 changed files with 18 additions and 27 deletions Inline Diff

arch/parisc/hpux/gate.S
1 /* 1 /*
2 * 2 *
3 * Linux/PARISC Project (http://www.parisc-linux.org/) 3 * Linux/PARISC Project (http://www.parisc-linux.org/)
4 * 4 *
5 * System call entry code Copyright (c) Matthew Wilcox 1999 <willy@bofh.ai> 5 * System call entry code Copyright (c) Matthew Wilcox 1999 <willy@bofh.ai>
6 * Licensed under the GNU GPL. 6 * Licensed under the GNU GPL.
7 * thanks to Philipp Rumpf, Mike Shaver and various others 7 * thanks to Philipp Rumpf, Mike Shaver and various others
8 * sorry about the wall, puffin.. 8 * sorry about the wall, puffin..
9 */ 9 */
10 10
11 #include <asm/assembly.h> 11 #include <asm/assembly.h>
12 #include <asm/asm-offsets.h> 12 #include <asm/asm-offsets.h>
13 #include <asm/unistd.h> 13 #include <asm/unistd.h>
14 #include <asm/errno.h> 14 #include <asm/errno.h>
15 #include <linux/linkage.h> 15 #include <linux/linkage.h>
16 #include <linux/init.h>
17 16
18 .level LEVEL 17 .level LEVEL
19 __HEAD 18 .text
20 19
21 .import hpux_call_table 20 .import hpux_call_table
22 .import hpux_syscall_exit,code 21 .import hpux_syscall_exit,code
23 22
24 .align PAGE_SIZE 23 .align PAGE_SIZE
25 ENTRY(hpux_gateway_page) 24 ENTRY(hpux_gateway_page)
26 nop 25 nop
27 #ifdef CONFIG_64BIT 26 #ifdef CONFIG_64BIT
28 #warning NEEDS WORK for 64-bit 27 #warning NEEDS WORK for 64-bit
29 #endif 28 #endif
30 ldw -64(%r30), %r29 ;! 8th argument 29 ldw -64(%r30), %r29 ;! 8th argument
31 ldw -60(%r30), %r19 ;! 7th argument 30 ldw -60(%r30), %r19 ;! 7th argument
32 ldw -56(%r30), %r20 ;! 6th argument 31 ldw -56(%r30), %r20 ;! 6th argument
33 ldw -52(%r30), %r21 ;! 5th argument 32 ldw -52(%r30), %r21 ;! 5th argument
34 gate .+8, %r0 /* become privileged */ 33 gate .+8, %r0 /* become privileged */
35 mtsp %r0,%sr4 /* get kernel space into sr4 */ 34 mtsp %r0,%sr4 /* get kernel space into sr4 */
36 mtsp %r0,%sr5 /* get kernel space into sr5 */ 35 mtsp %r0,%sr5 /* get kernel space into sr5 */
37 mtsp %r0,%sr6 /* get kernel space into sr6 */ 36 mtsp %r0,%sr6 /* get kernel space into sr6 */
38 mfsp %sr7,%r1 /* save user sr7 */ 37 mfsp %sr7,%r1 /* save user sr7 */
39 mtsp %r1,%sr3 /* and store it in sr3 */ 38 mtsp %r1,%sr3 /* and store it in sr3 */
40 39
41 mtctl %r30,%cr28 40 mtctl %r30,%cr28
42 mfctl %cr30,%r1 41 mfctl %cr30,%r1
43 xor %r1,%r30,%r30 /* ye olde xor trick */ 42 xor %r1,%r30,%r30 /* ye olde xor trick */
44 xor %r1,%r30,%r1 43 xor %r1,%r30,%r1
45 xor %r1,%r30,%r30 44 xor %r1,%r30,%r30
46 ldo TASK_SZ_ALGN+FRAME_SIZE(%r30),%r30 /* set up kernel stack */ 45 ldo TASK_SZ_ALGN+FRAME_SIZE(%r30),%r30 /* set up kernel stack */
47 46
48 /* N.B.: It is critical that we don't set sr7 to 0 until r30 47 /* N.B.: It is critical that we don't set sr7 to 0 until r30
49 * contains a valid kernel stack pointer. It is also 48 * contains a valid kernel stack pointer. It is also
50 * critical that we don't start using the kernel stack 49 * critical that we don't start using the kernel stack
51 * until after sr7 has been set to 0. 50 * until after sr7 has been set to 0.
52 */ 51 */
53 52
54 mtsp %r0,%sr7 /* get kernel space into sr7 */ 53 mtsp %r0,%sr7 /* get kernel space into sr7 */
55 STREG %r1,TASK_PT_GR30-TASK_SZ_ALGN-FRAME_SIZE(%r30) /* save usp */ 54 STREG %r1,TASK_PT_GR30-TASK_SZ_ALGN-FRAME_SIZE(%r30) /* save usp */
56 ldo -TASK_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr in %r1 */ 55 ldo -TASK_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr in %r1 */
57 56
58 /* Save some registers for sigcontext and potential task 57 /* Save some registers for sigcontext and potential task
59 switch (see entry.S for the details of which ones are 58 switch (see entry.S for the details of which ones are
60 saved/restored). TASK_PT_PSW is zeroed so we can see whether 59 saved/restored). TASK_PT_PSW is zeroed so we can see whether
61 a process is on a syscall or not. For an interrupt the real 60 a process is on a syscall or not. For an interrupt the real
62 PSW value is stored. This is needed for gdb and sys_ptrace. */ 61 PSW value is stored. This is needed for gdb and sys_ptrace. */
63 STREG %r0, TASK_PT_PSW(%r1) 62 STREG %r0, TASK_PT_PSW(%r1)
64 STREG %r2, TASK_PT_GR2(%r1) /* preserve rp */ 63 STREG %r2, TASK_PT_GR2(%r1) /* preserve rp */
65 STREG %r19, TASK_PT_GR19(%r1) /* 7th argument */ 64 STREG %r19, TASK_PT_GR19(%r1) /* 7th argument */
66 STREG %r20, TASK_PT_GR20(%r1) /* 6th argument */ 65 STREG %r20, TASK_PT_GR20(%r1) /* 6th argument */
67 STREG %r21, TASK_PT_GR21(%r1) /* 5th argument */ 66 STREG %r21, TASK_PT_GR21(%r1) /* 5th argument */
68 STREG %r22, TASK_PT_GR22(%r1) /* syscall # */ 67 STREG %r22, TASK_PT_GR22(%r1) /* syscall # */
69 STREG %r23, TASK_PT_GR23(%r1) /* 4th argument */ 68 STREG %r23, TASK_PT_GR23(%r1) /* 4th argument */
70 STREG %r24, TASK_PT_GR24(%r1) /* 3rd argument */ 69 STREG %r24, TASK_PT_GR24(%r1) /* 3rd argument */
71 STREG %r25, TASK_PT_GR25(%r1) /* 2nd argument */ 70 STREG %r25, TASK_PT_GR25(%r1) /* 2nd argument */
72 STREG %r26, TASK_PT_GR26(%r1) /* 1st argument */ 71 STREG %r26, TASK_PT_GR26(%r1) /* 1st argument */
73 STREG %r27, TASK_PT_GR27(%r1) /* user dp */ 72 STREG %r27, TASK_PT_GR27(%r1) /* user dp */
74 STREG %r28, TASK_PT_GR28(%r1) /* return value 0 */ 73 STREG %r28, TASK_PT_GR28(%r1) /* return value 0 */
75 STREG %r28, TASK_PT_ORIG_R28(%r1) /* return value 0 (saved for signals) */ 74 STREG %r28, TASK_PT_ORIG_R28(%r1) /* return value 0 (saved for signals) */
76 STREG %r29, TASK_PT_GR29(%r1) /* 8th argument */ 75 STREG %r29, TASK_PT_GR29(%r1) /* 8th argument */
77 STREG %r31, TASK_PT_GR31(%r1) /* preserve syscall return ptr */ 76 STREG %r31, TASK_PT_GR31(%r1) /* preserve syscall return ptr */
78 77
79 ldo TASK_PT_FR0(%r1), %r27 /* save fpregs from the kernel */ 78 ldo TASK_PT_FR0(%r1), %r27 /* save fpregs from the kernel */
80 save_fp %r27 /* or potential task switch */ 79 save_fp %r27 /* or potential task switch */
81 80
82 mfctl %cr11, %r27 /* i.e. SAR */ 81 mfctl %cr11, %r27 /* i.e. SAR */
83 STREG %r27, TASK_PT_SAR(%r1) 82 STREG %r27, TASK_PT_SAR(%r1)
84 83
85 loadgp 84 loadgp
86 85
87 stw %r21, -52(%r30) ;! 5th argument 86 stw %r21, -52(%r30) ;! 5th argument
88 stw %r20, -56(%r30) ;! 6th argument 87 stw %r20, -56(%r30) ;! 6th argument
89 stw %r19, -60(%r30) ;! 7th argument 88 stw %r19, -60(%r30) ;! 7th argument
90 stw %r29, -64(%r30) ;! 8th argument 89 stw %r29, -64(%r30) ;! 8th argument
91 90
92 ldil L%hpux_call_table, %r21 91 ldil L%hpux_call_table, %r21
93 ldo R%hpux_call_table(%r21), %r21 92 ldo R%hpux_call_table(%r21), %r21
94 comiclr,>>= __NR_HPUX_syscalls, %r22, %r0 93 comiclr,>>= __NR_HPUX_syscalls, %r22, %r0
95 b,n syscall_nosys 94 b,n syscall_nosys
96 LDREGX %r22(%r21), %r21 95 LDREGX %r22(%r21), %r21
97 ldil L%hpux_syscall_exit,%r2 96 ldil L%hpux_syscall_exit,%r2
98 be 0(%sr7,%r21) 97 be 0(%sr7,%r21)
99 ldo R%hpux_syscall_exit(%r2),%r2 98 ldo R%hpux_syscall_exit(%r2),%r2
100 99
101 syscall_nosys: 100 syscall_nosys:
102 ldil L%hpux_syscall_exit,%r1 101 ldil L%hpux_syscall_exit,%r1
103 be R%hpux_syscall_exit(%sr7,%r1) 102 be R%hpux_syscall_exit(%sr7,%r1)
104 ldo -ENOSYS(%r0),%r28 103 ldo -ENOSYS(%r0),%r28
105 ENDPROC(hpux_gateway_page) 104 ENDPROC(hpux_gateway_page)
106 105
107 .align PAGE_SIZE 106 .align PAGE_SIZE
108 ENTRY(end_hpux_gateway_page) 107 ENTRY(end_hpux_gateway_page)
109 108
arch/parisc/hpux/wrappers.S
1 /* 1 /*
2 * Linux/PARISC Project (http://www.parisc-linux.org/) 2 * Linux/PARISC Project (http://www.parisc-linux.org/)
3 * 3 *
4 * HP-UX System Call Wrapper routines and System Call Return Path 4 * HP-UX System Call Wrapper routines and System Call Return Path
5 * 5 *
6 * Copyright (C) 2000 Hewlett-Packard (John Marvin) 6 * Copyright (C) 2000 Hewlett-Packard (John Marvin)
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option) 10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version. 11 * any later version.
12 * 12 *
13 * This program is distributed in the hope that it will be useful, 13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details. 16 * GNU General Public License for more details.
17 * 17 *
18 * You should have received a copy of the GNU General Public License 18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software 19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */ 21 */
22 22
23 #ifdef CONFIG_64BIT 23 #ifdef CONFIG_64BIT
24 #warning PA64 support needs more work...did first cut 24 #warning PA64 support needs more work...did first cut
25 #endif 25 #endif
26 26
27 #include <asm/asm-offsets.h> 27 #include <asm/asm-offsets.h>
28 #include <asm/assembly.h> 28 #include <asm/assembly.h>
29 #include <asm/signal.h> 29 #include <asm/signal.h>
30 #include <linux/linkage.h> 30 #include <linux/linkage.h>
31 #include <linux/init.h>
32 31
33 .level LEVEL 32 .level LEVEL
34 __HEAD 33 .text
35 34
36 /* These should probably go in a header file somewhere. 35 /* These should probably go in a header file somewhere.
37 * They are duplicated in kernel/wrappers.S 36 * They are duplicated in kernel/wrappers.S
38 * Possibly we should consider consolidating these 37 * Possibly we should consider consolidating these
39 * register save/restore macros. 38 * register save/restore macros.
40 */ 39 */
41 .macro reg_save regs 40 .macro reg_save regs
42 #ifdef CONFIG_64BIT 41 #ifdef CONFIG_64BIT
43 #warning NEEDS WORK for 64-bit 42 #warning NEEDS WORK for 64-bit
44 #endif 43 #endif
45 STREG %r3, PT_GR3(\regs) 44 STREG %r3, PT_GR3(\regs)
46 STREG %r4, PT_GR4(\regs) 45 STREG %r4, PT_GR4(\regs)
47 STREG %r5, PT_GR5(\regs) 46 STREG %r5, PT_GR5(\regs)
48 STREG %r6, PT_GR6(\regs) 47 STREG %r6, PT_GR6(\regs)
49 STREG %r7, PT_GR7(\regs) 48 STREG %r7, PT_GR7(\regs)
50 STREG %r8, PT_GR8(\regs) 49 STREG %r8, PT_GR8(\regs)
51 STREG %r9, PT_GR9(\regs) 50 STREG %r9, PT_GR9(\regs)
52 STREG %r10,PT_GR10(\regs) 51 STREG %r10,PT_GR10(\regs)
53 STREG %r11,PT_GR11(\regs) 52 STREG %r11,PT_GR11(\regs)
54 STREG %r12,PT_GR12(\regs) 53 STREG %r12,PT_GR12(\regs)
55 STREG %r13,PT_GR13(\regs) 54 STREG %r13,PT_GR13(\regs)
56 STREG %r14,PT_GR14(\regs) 55 STREG %r14,PT_GR14(\regs)
57 STREG %r15,PT_GR15(\regs) 56 STREG %r15,PT_GR15(\regs)
58 STREG %r16,PT_GR16(\regs) 57 STREG %r16,PT_GR16(\regs)
59 STREG %r17,PT_GR17(\regs) 58 STREG %r17,PT_GR17(\regs)
60 STREG %r18,PT_GR18(\regs) 59 STREG %r18,PT_GR18(\regs)
61 .endm 60 .endm
62 61
63 .macro reg_restore regs 62 .macro reg_restore regs
64 LDREG PT_GR3(\regs), %r3 63 LDREG PT_GR3(\regs), %r3
65 LDREG PT_GR4(\regs), %r4 64 LDREG PT_GR4(\regs), %r4
66 LDREG PT_GR5(\regs), %r5 65 LDREG PT_GR5(\regs), %r5
67 LDREG PT_GR6(\regs), %r6 66 LDREG PT_GR6(\regs), %r6
68 LDREG PT_GR7(\regs), %r7 67 LDREG PT_GR7(\regs), %r7
69 LDREG PT_GR8(\regs), %r8 68 LDREG PT_GR8(\regs), %r8
70 LDREG PT_GR9(\regs), %r9 69 LDREG PT_GR9(\regs), %r9
71 LDREG PT_GR10(\regs),%r10 70 LDREG PT_GR10(\regs),%r10
72 LDREG PT_GR11(\regs),%r11 71 LDREG PT_GR11(\regs),%r11
73 LDREG PT_GR12(\regs),%r12 72 LDREG PT_GR12(\regs),%r12
74 LDREG PT_GR13(\regs),%r13 73 LDREG PT_GR13(\regs),%r13
75 LDREG PT_GR14(\regs),%r14 74 LDREG PT_GR14(\regs),%r14
76 LDREG PT_GR15(\regs),%r15 75 LDREG PT_GR15(\regs),%r15
77 LDREG PT_GR16(\regs),%r16 76 LDREG PT_GR16(\regs),%r16
78 LDREG PT_GR17(\regs),%r17 77 LDREG PT_GR17(\regs),%r17
79 LDREG PT_GR18(\regs),%r18 78 LDREG PT_GR18(\regs),%r18
80 .endm 79 .endm
81 80
82 81
83 .import sys_fork 82 .import sys_fork
84 83
85 ENTRY(hpux_fork_wrapper) 84 ENTRY(hpux_fork_wrapper)
86 ldo TASK_REGS-TASK_SZ_ALGN-64(%r30),%r1 ;! get pt regs 85 ldo TASK_REGS-TASK_SZ_ALGN-64(%r30),%r1 ;! get pt regs
87 ;! pointer in task 86 ;! pointer in task
88 reg_save %r1 87 reg_save %r1
89 88
90 STREG %r2,-20(%r30) 89 STREG %r2,-20(%r30)
91 ldo 64(%r30),%r30 90 ldo 64(%r30),%r30
92 STREG %r2,PT_GR19(%r1) ;! save for child 91 STREG %r2,PT_GR19(%r1) ;! save for child
93 STREG %r30,PT_GR21(%r1) ;! save for child 92 STREG %r30,PT_GR21(%r1) ;! save for child
94 93
95 LDREG PT_GR30(%r1),%r25 94 LDREG PT_GR30(%r1),%r25
96 mtctl %r25,%cr29 95 mtctl %r25,%cr29
97 copy %r1,%r24 96 copy %r1,%r24
98 bl sys_clone,%r2 97 bl sys_clone,%r2
99 ldi SIGCHLD,%r26 98 ldi SIGCHLD,%r26
100 99
101 LDREG -84(%r30),%r2 100 LDREG -84(%r30),%r2
102 fork_return: 101 fork_return:
103 ldo -64(%r30),%r30 102 ldo -64(%r30),%r30
104 ldo TASK_REGS-TASK_SZ_ALGN-64(%r30),%r1 ;! get pt regs 103 ldo TASK_REGS-TASK_SZ_ALGN-64(%r30),%r1 ;! get pt regs
105 104
106 reg_restore %r1 105 reg_restore %r1
107 106
108 /* 107 /*
109 * HP-UX wants pid (child gets parent pid, parent gets child pid) 108 * HP-UX wants pid (child gets parent pid, parent gets child pid)
110 * in r28 and a flag in r29 (r29 == 1 for child, 0 for parent). 109 * in r28 and a flag in r29 (r29 == 1 for child, 0 for parent).
111 * Linux fork returns 0 for child, pid for parent. Since HP-UX 110 * Linux fork returns 0 for child, pid for parent. Since HP-UX
112 * libc stub throws away parent pid and returns 0 for child, 111 * libc stub throws away parent pid and returns 0 for child,
113 * we'll just return 0 for parent pid now. Only applications 112 * we'll just return 0 for parent pid now. Only applications
114 * that jump directly to the gateway page (not supported) will 113 * that jump directly to the gateway page (not supported) will
115 * know the difference. We can fix this later if necessary. 114 * know the difference. We can fix this later if necessary.
116 */ 115 */
117 116
118 ldo -1024(%r0),%r1 117 ldo -1024(%r0),%r1
119 comb,>>=,n %r28,%r1,fork_exit /* just let the syscall exit handle it */ 118 comb,>>=,n %r28,%r1,fork_exit /* just let the syscall exit handle it */
120 or,= %r28,%r0,%r0 119 or,= %r28,%r0,%r0
121 or,tr %r0,%r0,%r29 /* r28 <> 0, we are parent, set r29 to 0 */ 120 or,tr %r0,%r0,%r29 /* r28 <> 0, we are parent, set r29 to 0 */
122 ldo 1(%r0),%r29 /* r28 == 0, we are child, set r29 to 1 */ 121 ldo 1(%r0),%r29 /* r28 == 0, we are child, set r29 to 1 */
123 122
124 fork_exit: 123 fork_exit:
125 bv %r0(%r2) 124 bv %r0(%r2)
126 nop 125 nop
127 ENDPROC(hpux_fork_wrapper) 126 ENDPROC(hpux_fork_wrapper)
128 127
129 /* Set the return value for the child */ 128 /* Set the return value for the child */
130 129
131 ENTRY(hpux_child_return) 130 ENTRY(hpux_child_return)
132 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) 131 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
133 bl,n schedule_tail, %r2 132 bl,n schedule_tail, %r2
134 #endif 133 #endif
135 134
136 LDREG TASK_PT_GR19-TASK_SZ_ALGN-128(%r30),%r2 135 LDREG TASK_PT_GR19-TASK_SZ_ALGN-128(%r30),%r2
137 b fork_return 136 b fork_return
138 copy %r0,%r28 137 copy %r0,%r28
139 ENDPROC(hpux_child_return) 138 ENDPROC(hpux_child_return)
140 139
141 .import hpux_execve 140 .import hpux_execve
142 141
143 ENTRY(hpux_execv_wrapper) 142 ENTRY(hpux_execv_wrapper)
144 copy %r0,%r24 /* NULL environment */ 143 copy %r0,%r24 /* NULL environment */
145 144
146 ENTRY(hpux_execve_wrapper) 145 ENTRY(hpux_execve_wrapper)
147 146
148 ldo TASK_REGS-TASK_SZ_ALGN-64(%r30),%r1 ;! get pt regs 147 ldo TASK_REGS-TASK_SZ_ALGN-64(%r30),%r1 ;! get pt regs
149 148
150 /* 149 /*
151 * Do we need to save/restore r3-r18 here? 150 * Do we need to save/restore r3-r18 here?
152 * I don't think so. why would new thread need old 151 * I don't think so. why would new thread need old
153 * threads registers? 152 * threads registers?
154 */ 153 */
155 154
156 /* Store arg0, arg1 and arg2 so that hpux_execve will find them */ 155 /* Store arg0, arg1 and arg2 so that hpux_execve will find them */
157 156
158 STREG %r26,PT_GR26(%r1) 157 STREG %r26,PT_GR26(%r1)
159 STREG %r25,PT_GR25(%r1) 158 STREG %r25,PT_GR25(%r1)
160 STREG %r24,PT_GR24(%r1) 159 STREG %r24,PT_GR24(%r1)
161 160
162 STREG %r2,-20(%r30) 161 STREG %r2,-20(%r30)
163 ldo 64(%r30),%r30 162 ldo 64(%r30),%r30
164 bl hpux_execve,%r2 163 bl hpux_execve,%r2
165 copy %r1,%arg0 164 copy %r1,%arg0
166 165
167 ldo -64(%r30),%r30 166 ldo -64(%r30),%r30
168 LDREG -20(%r30),%r2 167 LDREG -20(%r30),%r2
169 168
170 /* If exec succeeded we need to load the args */ 169 /* If exec succeeded we need to load the args */
171 170
172 ldo -1024(%r0),%r1 171 ldo -1024(%r0),%r1
173 comb,>>= %r28,%r1,exec_error 172 comb,>>= %r28,%r1,exec_error
174 copy %r2,%r19 173 copy %r2,%r19
175 ldo -TASK_SZ_ALGN-64(%r30),%r1 ;! get task ptr 174 ldo -TASK_SZ_ALGN-64(%r30),%r1 ;! get task ptr
176 LDREG TASK_PT_GR26(%r1),%r26 175 LDREG TASK_PT_GR26(%r1),%r26
177 LDREG TASK_PT_GR25(%r1),%r25 176 LDREG TASK_PT_GR25(%r1),%r25
178 LDREG TASK_PT_GR24(%r1),%r24 177 LDREG TASK_PT_GR24(%r1),%r24
179 LDREG TASK_PT_GR23(%r1),%r23 178 LDREG TASK_PT_GR23(%r1),%r23
180 copy %r0,%r2 /* Flag to syscall_exit not to clear args */ 179 copy %r0,%r2 /* Flag to syscall_exit not to clear args */
181 180
182 exec_error: 181 exec_error:
183 bv %r0(%r19) 182 bv %r0(%r19)
184 nop 183 nop
185 ENDPROC(hpux_execv_wrapper) 184 ENDPROC(hpux_execv_wrapper)
186 185
187 .import hpux_pipe 186 .import hpux_pipe
188 187
189 /* HP-UX expects pipefd's returned in r28 & r29 */ 188 /* HP-UX expects pipefd's returned in r28 & r29 */
190 189
191 ENTRY(hpux_pipe_wrapper) 190 ENTRY(hpux_pipe_wrapper)
192 STREG %r2,-20(%r30) 191 STREG %r2,-20(%r30)
193 ldo 64(%r30),%r30 192 ldo 64(%r30),%r30
194 bl hpux_pipe,%r2 193 bl hpux_pipe,%r2
195 ldo -56(%r30),%r26 /* pass local array to hpux_pipe */ 194 ldo -56(%r30),%r26 /* pass local array to hpux_pipe */
196 195
197 196
198 ldo -1024(%r0),%r1 197 ldo -1024(%r0),%r1
199 comb,>>= %r28,%r1,pipe_exit /* let syscall exit handle it */ 198 comb,>>= %r28,%r1,pipe_exit /* let syscall exit handle it */
200 LDREG -84(%r30),%r2 199 LDREG -84(%r30),%r2
201 200
202 /* if success, load fd's from stack array */ 201 /* if success, load fd's from stack array */
203 202
204 LDREG -56(%r30),%r28 203 LDREG -56(%r30),%r28
205 LDREG -52(%r30),%r29 204 LDREG -52(%r30),%r29
206 205
207 pipe_exit: 206 pipe_exit:
208 bv %r0(%r2) 207 bv %r0(%r2)
209 ldo -64(%r30),%r30 208 ldo -64(%r30),%r30
210 ENDPROC(hpux_pipe_wrapper) 209 ENDPROC(hpux_pipe_wrapper)
211 210
212 .import syscall_exit 211 .import syscall_exit
213 212
214 ENTRY(hpux_syscall_exit) 213 ENTRY(hpux_syscall_exit)
215 /* 214 /*
216 * 215 *
217 * HP-UX call return conventions: 216 * HP-UX call return conventions:
218 * 217 *
219 * if error: 218 * if error:
220 * r22 = 1 219 * r22 = 1
221 * r28 = errno value 220 * r28 = errno value
222 * r29 = secondary return value 221 * r29 = secondary return value
223 * else 222 * else
224 * r22 = 0 223 * r22 = 0
225 * r28 = return value 224 * r28 = return value
226 * r29 = secondary return value 225 * r29 = secondary return value
227 * 226 *
228 * For now, we'll just check to see if r28 is < (unsigned long)-1024 227 * For now, we'll just check to see if r28 is < (unsigned long)-1024
229 * (to handle addresses > 2 Gb) and if so set r22 to zero. If not, 228 * (to handle addresses > 2 Gb) and if so set r22 to zero. If not,
230 * we'll complement r28 and set r22 to 1. Wrappers will be 229 * we'll complement r28 and set r22 to 1. Wrappers will be
231 * needed for syscalls that care about the secondary return value. 230 * needed for syscalls that care about the secondary return value.
232 * The wrapper may also need a way of avoiding the following code, 231 * The wrapper may also need a way of avoiding the following code,
233 * but we'll deal with that when it becomes necessary. 232 * but we'll deal with that when it becomes necessary.
234 */ 233 */
235 234
236 ldo -1024(%r0),%r1 235 ldo -1024(%r0),%r1
237 comb,<< %r28,%r1,no_error 236 comb,<< %r28,%r1,no_error
238 copy %r0,%r22 237 copy %r0,%r22
239 subi 0,%r28,%r28 238 subi 0,%r28,%r28
240 ldo 1(%r0),%r22 239 ldo 1(%r0),%r22
241 240
242 no_error: 241 no_error:
243 b,n syscall_exit 242 b,n syscall_exit
244 ENDPROC(hpux_syscall_exit) 243 ENDPROC(hpux_syscall_exit)
245 244
246 .import hpux_unimplemented 245 .import hpux_unimplemented
247 246
248 ENTRY(hpux_unimplemented_wrapper) 247 ENTRY(hpux_unimplemented_wrapper)
249 b hpux_unimplemented 248 b hpux_unimplemented
250 STREG %r22,-64(%r30) /* overwrite arg8 with syscall number */ 249 STREG %r22,-64(%r30) /* overwrite arg8 with syscall number */
251 ENDPROC(hpux_unimplemented_wrapper) 250 ENDPROC(hpux_unimplemented_wrapper)
252 251
arch/parisc/kernel/entry.S
1 /* 1 /*
2 * Linux/PA-RISC Project (http://www.parisc-linux.org/) 2 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
3 * 3 *
4 * kernel entry points (interruptions, system call wrappers) 4 * kernel entry points (interruptions, system call wrappers)
5 * Copyright (C) 1999,2000 Philipp Rumpf 5 * Copyright (C) 1999,2000 Philipp Rumpf
6 * Copyright (C) 1999 SuSE GmbH Nuernberg 6 * Copyright (C) 1999 SuSE GmbH Nuernberg
7 * Copyright (C) 2000 Hewlett-Packard (John Marvin) 7 * Copyright (C) 2000 Hewlett-Packard (John Marvin)
8 * Copyright (C) 1999 Hewlett-Packard (Frank Rowand) 8 * Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option) 12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version. 13 * any later version.
14 * 14 *
15 * This program is distributed in the hope that it will be useful, 15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details. 18 * GNU General Public License for more details.
19 * 19 *
20 * You should have received a copy of the GNU General Public License 20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software 21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */ 23 */
24 24
25 #include <asm/asm-offsets.h> 25 #include <asm/asm-offsets.h>
26 26
27 /* we have the following possibilities to act on an interruption: 27 /* we have the following possibilities to act on an interruption:
28 * - handle in assembly and use shadowed registers only 28 * - handle in assembly and use shadowed registers only
29 * - save registers to kernel stack and handle in assembly or C */ 29 * - save registers to kernel stack and handle in assembly or C */
30 30
31 31
32 #include <asm/psw.h> 32 #include <asm/psw.h>
33 #include <asm/cache.h> /* for L1_CACHE_SHIFT */ 33 #include <asm/cache.h> /* for L1_CACHE_SHIFT */
34 #include <asm/assembly.h> /* for LDREG/STREG defines */ 34 #include <asm/assembly.h> /* for LDREG/STREG defines */
35 #include <asm/pgtable.h> 35 #include <asm/pgtable.h>
36 #include <asm/signal.h> 36 #include <asm/signal.h>
37 #include <asm/unistd.h> 37 #include <asm/unistd.h>
38 #include <asm/thread_info.h> 38 #include <asm/thread_info.h>
39 39
40 #include <linux/linkage.h> 40 #include <linux/linkage.h>
41 #include <linux/init.h>
42 41
43 #ifdef CONFIG_64BIT 42 #ifdef CONFIG_64BIT
44 .level 2.0w 43 .level 2.0w
45 #else 44 #else
46 .level 2.0 45 .level 2.0
47 #endif 46 #endif
48 47
49 .import pa_dbit_lock,data 48 .import pa_dbit_lock,data
50 49
51 /* space_to_prot macro creates a prot id from a space id */ 50 /* space_to_prot macro creates a prot id from a space id */
52 51
53 #if (SPACEID_SHIFT) == 0 52 #if (SPACEID_SHIFT) == 0
54 .macro space_to_prot spc prot 53 .macro space_to_prot spc prot
55 depd,z \spc,62,31,\prot 54 depd,z \spc,62,31,\prot
56 .endm 55 .endm
57 #else 56 #else
58 .macro space_to_prot spc prot 57 .macro space_to_prot spc prot
59 extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot 58 extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
60 .endm 59 .endm
61 #endif 60 #endif
62 61
63 /* Switch to virtual mapping, trashing only %r1 */ 62 /* Switch to virtual mapping, trashing only %r1 */
64 .macro virt_map 63 .macro virt_map
65 /* pcxt_ssm_bug */ 64 /* pcxt_ssm_bug */
66 rsm PSW_SM_I, %r0 /* barrier for "Relied upon Translation */ 65 rsm PSW_SM_I, %r0 /* barrier for "Relied upon Translation */
67 mtsp %r0, %sr4 66 mtsp %r0, %sr4
68 mtsp %r0, %sr5 67 mtsp %r0, %sr5
69 mfsp %sr7, %r1 68 mfsp %sr7, %r1
70 or,= %r0,%r1,%r0 /* Only save sr7 in sr3 if sr7 != 0 */ 69 or,= %r0,%r1,%r0 /* Only save sr7 in sr3 if sr7 != 0 */
71 mtsp %r1, %sr3 70 mtsp %r1, %sr3
72 tovirt_r1 %r29 71 tovirt_r1 %r29
73 load32 KERNEL_PSW, %r1 72 load32 KERNEL_PSW, %r1
74 73
75 rsm PSW_SM_QUIET,%r0 /* second "heavy weight" ctl op */ 74 rsm PSW_SM_QUIET,%r0 /* second "heavy weight" ctl op */
76 mtsp %r0, %sr6 75 mtsp %r0, %sr6
77 mtsp %r0, %sr7 76 mtsp %r0, %sr7
78 mtctl %r0, %cr17 /* Clear IIASQ tail */ 77 mtctl %r0, %cr17 /* Clear IIASQ tail */
79 mtctl %r0, %cr17 /* Clear IIASQ head */ 78 mtctl %r0, %cr17 /* Clear IIASQ head */
80 mtctl %r1, %ipsw 79 mtctl %r1, %ipsw
81 load32 4f, %r1 80 load32 4f, %r1
82 mtctl %r1, %cr18 /* Set IIAOQ tail */ 81 mtctl %r1, %cr18 /* Set IIAOQ tail */
83 ldo 4(%r1), %r1 82 ldo 4(%r1), %r1
84 mtctl %r1, %cr18 /* Set IIAOQ head */ 83 mtctl %r1, %cr18 /* Set IIAOQ head */
85 rfir 84 rfir
86 nop 85 nop
87 4: 86 4:
88 .endm 87 .endm
89 88
90 /* 89 /*
91 * The "get_stack" macros are responsible for determining the 90 * The "get_stack" macros are responsible for determining the
92 * kernel stack value. 91 * kernel stack value.
93 * 92 *
94 * If sr7 == 0 93 * If sr7 == 0
95 * Already using a kernel stack, so call the 94 * Already using a kernel stack, so call the
96 * get_stack_use_r30 macro to push a pt_regs structure 95 * get_stack_use_r30 macro to push a pt_regs structure
97 * on the stack, and store registers there. 96 * on the stack, and store registers there.
98 * else 97 * else
99 * Need to set up a kernel stack, so call the 98 * Need to set up a kernel stack, so call the
100 * get_stack_use_cr30 macro to set up a pointer 99 * get_stack_use_cr30 macro to set up a pointer
101 * to the pt_regs structure contained within the 100 * to the pt_regs structure contained within the
102 * task pointer pointed to by cr30. Set the stack 101 * task pointer pointed to by cr30. Set the stack
103 * pointer to point to the end of the task structure. 102 * pointer to point to the end of the task structure.
104 * 103 *
105 * Note that we use shadowed registers for temps until 104 * Note that we use shadowed registers for temps until
106 * we can save %r26 and %r29. %r26 is used to preserve 105 * we can save %r26 and %r29. %r26 is used to preserve
107 * %r8 (a shadowed register) which temporarily contained 106 * %r8 (a shadowed register) which temporarily contained
108 * either the fault type ("code") or the eirr. We need 107 * either the fault type ("code") or the eirr. We need
109 * to use a non-shadowed register to carry the value over 108 * to use a non-shadowed register to carry the value over
110 * the rfir in virt_map. We use %r26 since this value winds 109 * the rfir in virt_map. We use %r26 since this value winds
111 * up being passed as the argument to either do_cpu_irq_mask 110 * up being passed as the argument to either do_cpu_irq_mask
112 * or handle_interruption. %r29 is used to hold a pointer 111 * or handle_interruption. %r29 is used to hold a pointer
113 * the register save area, and once again, it needs to 112 * the register save area, and once again, it needs to
114 * be a non-shadowed register so that it survives the rfir. 113 * be a non-shadowed register so that it survives the rfir.
115 * 114 *
116 * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame. 115 * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame.
117 */ 116 */
118 117
119 .macro get_stack_use_cr30 118 .macro get_stack_use_cr30
120 119
121 /* we save the registers in the task struct */ 120 /* we save the registers in the task struct */
122 121
123 mfctl %cr30, %r1 122 mfctl %cr30, %r1
124 tophys %r1,%r9 123 tophys %r1,%r9
125 LDREG TI_TASK(%r9), %r1 /* thread_info -> task_struct */ 124 LDREG TI_TASK(%r9), %r1 /* thread_info -> task_struct */
126 tophys %r1,%r9 125 tophys %r1,%r9
127 ldo TASK_REGS(%r9),%r9 126 ldo TASK_REGS(%r9),%r9
128 STREG %r30, PT_GR30(%r9) 127 STREG %r30, PT_GR30(%r9)
129 STREG %r29,PT_GR29(%r9) 128 STREG %r29,PT_GR29(%r9)
130 STREG %r26,PT_GR26(%r9) 129 STREG %r26,PT_GR26(%r9)
131 copy %r9,%r29 130 copy %r9,%r29
132 mfctl %cr30, %r1 131 mfctl %cr30, %r1
133 ldo THREAD_SZ_ALGN(%r1), %r30 132 ldo THREAD_SZ_ALGN(%r1), %r30
134 .endm 133 .endm
135 134
136 .macro get_stack_use_r30 135 .macro get_stack_use_r30
137 136
138 /* we put a struct pt_regs on the stack and save the registers there */ 137 /* we put a struct pt_regs on the stack and save the registers there */
139 138
140 tophys %r30,%r9 139 tophys %r30,%r9
141 STREG %r30,PT_GR30(%r9) 140 STREG %r30,PT_GR30(%r9)
142 ldo PT_SZ_ALGN(%r30),%r30 141 ldo PT_SZ_ALGN(%r30),%r30
143 STREG %r29,PT_GR29(%r9) 142 STREG %r29,PT_GR29(%r9)
144 STREG %r26,PT_GR26(%r9) 143 STREG %r26,PT_GR26(%r9)
145 copy %r9,%r29 144 copy %r9,%r29
146 .endm 145 .endm
147 146
148 .macro rest_stack 147 .macro rest_stack
149 LDREG PT_GR1(%r29), %r1 148 LDREG PT_GR1(%r29), %r1
150 LDREG PT_GR30(%r29),%r30 149 LDREG PT_GR30(%r29),%r30
151 LDREG PT_GR29(%r29),%r29 150 LDREG PT_GR29(%r29),%r29
152 .endm 151 .endm
153 152
154 /* default interruption handler 153 /* default interruption handler
155 * (calls traps.c:handle_interruption) */ 154 * (calls traps.c:handle_interruption) */
156 .macro def code 155 .macro def code
157 b intr_save 156 b intr_save
158 ldi \code, %r8 157 ldi \code, %r8
159 .align 32 158 .align 32
160 .endm 159 .endm
161 160
162 /* Interrupt interruption handler 161 /* Interrupt interruption handler
163 * (calls irq.c:do_cpu_irq_mask) */ 162 * (calls irq.c:do_cpu_irq_mask) */
164 .macro extint code 163 .macro extint code
165 b intr_extint 164 b intr_extint
166 mfsp %sr7,%r16 165 mfsp %sr7,%r16
167 .align 32 166 .align 32
168 .endm 167 .endm
169 168
170 .import os_hpmc, code 169 .import os_hpmc, code
171 170
172 /* HPMC handler */ 171 /* HPMC handler */
173 .macro hpmc code 172 .macro hpmc code
174 nop /* must be a NOP, will be patched later */ 173 nop /* must be a NOP, will be patched later */
175 load32 PA(os_hpmc), %r3 174 load32 PA(os_hpmc), %r3
176 bv,n 0(%r3) 175 bv,n 0(%r3)
177 nop 176 nop
178 .word 0 /* checksum (will be patched) */ 177 .word 0 /* checksum (will be patched) */
179 .word PA(os_hpmc) /* address of handler */ 178 .word PA(os_hpmc) /* address of handler */
180 .word 0 /* length of handler */ 179 .word 0 /* length of handler */
181 .endm 180 .endm
182 181
183 /* 182 /*
184 * Performance Note: Instructions will be moved up into 183 * Performance Note: Instructions will be moved up into
185 * this part of the code later on, once we are sure 184 * this part of the code later on, once we are sure
186 * that the tlb miss handlers are close to final form. 185 * that the tlb miss handlers are close to final form.
187 */ 186 */
188 187
189 /* Register definitions for tlb miss handler macros */ 188 /* Register definitions for tlb miss handler macros */
190 189
191 va = r8 /* virtual address for which the trap occured */ 190 va = r8 /* virtual address for which the trap occured */
192 spc = r24 /* space for which the trap occured */ 191 spc = r24 /* space for which the trap occured */
193 192
194 #ifndef CONFIG_64BIT 193 #ifndef CONFIG_64BIT
195 194
196 /* 195 /*
197 * itlb miss interruption handler (parisc 1.1 - 32 bit) 196 * itlb miss interruption handler (parisc 1.1 - 32 bit)
198 */ 197 */
199 198
200 .macro itlb_11 code 199 .macro itlb_11 code
201 200
202 mfctl %pcsq, spc 201 mfctl %pcsq, spc
203 b itlb_miss_11 202 b itlb_miss_11
204 mfctl %pcoq, va 203 mfctl %pcoq, va
205 204
206 .align 32 205 .align 32
207 .endm 206 .endm
208 #endif 207 #endif
209 208
210 /* 209 /*
211 * itlb miss interruption handler (parisc 2.0) 210 * itlb miss interruption handler (parisc 2.0)
212 */ 211 */
213 212
214 .macro itlb_20 code 213 .macro itlb_20 code
215 mfctl %pcsq, spc 214 mfctl %pcsq, spc
216 #ifdef CONFIG_64BIT 215 #ifdef CONFIG_64BIT
217 b itlb_miss_20w 216 b itlb_miss_20w
218 #else 217 #else
219 b itlb_miss_20 218 b itlb_miss_20
220 #endif 219 #endif
221 mfctl %pcoq, va 220 mfctl %pcoq, va
222 221
223 .align 32 222 .align 32
224 .endm 223 .endm
225 224
226 #ifndef CONFIG_64BIT 225 #ifndef CONFIG_64BIT
227 /* 226 /*
228 * naitlb miss interruption handler (parisc 1.1 - 32 bit) 227 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
229 * 228 *
230 * Note: naitlb misses will be treated 229 * Note: naitlb misses will be treated
231 * as an ordinary itlb miss for now. 230 * as an ordinary itlb miss for now.
232 * However, note that naitlb misses 231 * However, note that naitlb misses
233 * have the faulting address in the 232 * have the faulting address in the
234 * IOR/ISR. 233 * IOR/ISR.
235 */ 234 */
236 235
237 .macro naitlb_11 code 236 .macro naitlb_11 code
238 237
239 mfctl %isr,spc 238 mfctl %isr,spc
240 b itlb_miss_11 239 b itlb_miss_11
241 mfctl %ior,va 240 mfctl %ior,va
242 /* FIXME: If user causes a naitlb miss, the priv level may not be in 241 /* FIXME: If user causes a naitlb miss, the priv level may not be in
243 * lower bits of va, where the itlb miss handler is expecting them 242 * lower bits of va, where the itlb miss handler is expecting them
244 */ 243 */
245 244
246 .align 32 245 .align 32
247 .endm 246 .endm
248 #endif 247 #endif
249 248
250 /* 249 /*
251 * naitlb miss interruption handler (parisc 2.0) 250 * naitlb miss interruption handler (parisc 2.0)
252 * 251 *
253 * Note: naitlb misses will be treated 252 * Note: naitlb misses will be treated
254 * as an ordinary itlb miss for now. 253 * as an ordinary itlb miss for now.
255 * However, note that naitlb misses 254 * However, note that naitlb misses
256 * have the faulting address in the 255 * have the faulting address in the
257 * IOR/ISR. 256 * IOR/ISR.
258 */ 257 */
259 258
260 .macro naitlb_20 code 259 .macro naitlb_20 code
261 260
262 mfctl %isr,spc 261 mfctl %isr,spc
263 #ifdef CONFIG_64BIT 262 #ifdef CONFIG_64BIT
264 b itlb_miss_20w 263 b itlb_miss_20w
265 #else 264 #else
266 b itlb_miss_20 265 b itlb_miss_20
267 #endif 266 #endif
268 mfctl %ior,va 267 mfctl %ior,va
269 /* FIXME: If user causes a naitlb miss, the priv level may not be in 268 /* FIXME: If user causes a naitlb miss, the priv level may not be in
270 * lower bits of va, where the itlb miss handler is expecting them 269 * lower bits of va, where the itlb miss handler is expecting them
271 */ 270 */
272 271
273 .align 32 272 .align 32
274 .endm 273 .endm
275 274
276 #ifndef CONFIG_64BIT 275 #ifndef CONFIG_64BIT
277 /* 276 /*
278 * dtlb miss interruption handler (parisc 1.1 - 32 bit) 277 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
279 */ 278 */
280 279
281 .macro dtlb_11 code 280 .macro dtlb_11 code
282 281
283 mfctl %isr, spc 282 mfctl %isr, spc
284 b dtlb_miss_11 283 b dtlb_miss_11
285 mfctl %ior, va 284 mfctl %ior, va
286 285
287 .align 32 286 .align 32
288 .endm 287 .endm
289 #endif 288 #endif
290 289
291 /* 290 /*
292 * dtlb miss interruption handler (parisc 2.0) 291 * dtlb miss interruption handler (parisc 2.0)
293 */ 292 */
294 293
295 .macro dtlb_20 code 294 .macro dtlb_20 code
296 295
297 mfctl %isr, spc 296 mfctl %isr, spc
298 #ifdef CONFIG_64BIT 297 #ifdef CONFIG_64BIT
299 b dtlb_miss_20w 298 b dtlb_miss_20w
300 #else 299 #else
301 b dtlb_miss_20 300 b dtlb_miss_20
302 #endif 301 #endif
303 mfctl %ior, va 302 mfctl %ior, va
304 303
305 .align 32 304 .align 32
306 .endm 305 .endm
307 306
308 #ifndef CONFIG_64BIT 307 #ifndef CONFIG_64BIT
309 /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */ 308 /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
310 309
311 .macro nadtlb_11 code 310 .macro nadtlb_11 code
312 311
313 mfctl %isr,spc 312 mfctl %isr,spc
314 b nadtlb_miss_11 313 b nadtlb_miss_11
315 mfctl %ior,va 314 mfctl %ior,va
316 315
317 .align 32 316 .align 32
318 .endm 317 .endm
319 #endif 318 #endif
320 319
321 /* nadtlb miss interruption handler (parisc 2.0) */ 320 /* nadtlb miss interruption handler (parisc 2.0) */
322 321
323 .macro nadtlb_20 code 322 .macro nadtlb_20 code
324 323
325 mfctl %isr,spc 324 mfctl %isr,spc
326 #ifdef CONFIG_64BIT 325 #ifdef CONFIG_64BIT
327 b nadtlb_miss_20w 326 b nadtlb_miss_20w
328 #else 327 #else
329 b nadtlb_miss_20 328 b nadtlb_miss_20
330 #endif 329 #endif
331 mfctl %ior,va 330 mfctl %ior,va
332 331
333 .align 32 332 .align 32
334 .endm 333 .endm
335 334
336 #ifndef CONFIG_64BIT 335 #ifndef CONFIG_64BIT
337 /* 336 /*
338 * dirty bit trap interruption handler (parisc 1.1 - 32 bit) 337 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
339 */ 338 */
340 339
341 .macro dbit_11 code 340 .macro dbit_11 code
342 341
343 mfctl %isr,spc 342 mfctl %isr,spc
344 b dbit_trap_11 343 b dbit_trap_11
345 mfctl %ior,va 344 mfctl %ior,va
346 345
347 .align 32 346 .align 32
348 .endm 347 .endm
349 #endif 348 #endif
350 349
351 /* 350 /*
352 * dirty bit trap interruption handler (parisc 2.0) 351 * dirty bit trap interruption handler (parisc 2.0)
353 */ 352 */
354 353
355 .macro dbit_20 code 354 .macro dbit_20 code
356 355
357 mfctl %isr,spc 356 mfctl %isr,spc
358 #ifdef CONFIG_64BIT 357 #ifdef CONFIG_64BIT
359 b dbit_trap_20w 358 b dbit_trap_20w
360 #else 359 #else
361 b dbit_trap_20 360 b dbit_trap_20
362 #endif 361 #endif
363 mfctl %ior,va 362 mfctl %ior,va
364 363
365 .align 32 364 .align 32
366 .endm 365 .endm
367 366
368 /* The following are simple 32 vs 64 bit instruction 367 /* The following are simple 32 vs 64 bit instruction
369 * abstractions for the macros */ 368 * abstractions for the macros */
370 .macro EXTR reg1,start,length,reg2 369 .macro EXTR reg1,start,length,reg2
371 #ifdef CONFIG_64BIT 370 #ifdef CONFIG_64BIT
372 extrd,u \reg1,32+\start,\length,\reg2 371 extrd,u \reg1,32+\start,\length,\reg2
373 #else 372 #else
374 extrw,u \reg1,\start,\length,\reg2 373 extrw,u \reg1,\start,\length,\reg2
375 #endif 374 #endif
376 .endm 375 .endm
377 376
378 .macro DEP reg1,start,length,reg2 377 .macro DEP reg1,start,length,reg2
379 #ifdef CONFIG_64BIT 378 #ifdef CONFIG_64BIT
380 depd \reg1,32+\start,\length,\reg2 379 depd \reg1,32+\start,\length,\reg2
381 #else 380 #else
382 depw \reg1,\start,\length,\reg2 381 depw \reg1,\start,\length,\reg2
383 #endif 382 #endif
384 .endm 383 .endm
385 384
386 .macro DEPI val,start,length,reg 385 .macro DEPI val,start,length,reg
387 #ifdef CONFIG_64BIT 386 #ifdef CONFIG_64BIT
388 depdi \val,32+\start,\length,\reg 387 depdi \val,32+\start,\length,\reg
389 #else 388 #else
390 depwi \val,\start,\length,\reg 389 depwi \val,\start,\length,\reg
391 #endif 390 #endif
392 .endm 391 .endm
393 392
394 /* In LP64, the space contains part of the upper 32 bits of the 393 /* In LP64, the space contains part of the upper 32 bits of the
395 * fault. We have to extract this and place it in the va, 394 * fault. We have to extract this and place it in the va,
396 * zeroing the corresponding bits in the space register */ 395 * zeroing the corresponding bits in the space register */
397 .macro space_adjust spc,va,tmp 396 .macro space_adjust spc,va,tmp
398 #ifdef CONFIG_64BIT 397 #ifdef CONFIG_64BIT
399 extrd,u \spc,63,SPACEID_SHIFT,\tmp 398 extrd,u \spc,63,SPACEID_SHIFT,\tmp
400 depd %r0,63,SPACEID_SHIFT,\spc 399 depd %r0,63,SPACEID_SHIFT,\spc
401 depd \tmp,31,SPACEID_SHIFT,\va 400 depd \tmp,31,SPACEID_SHIFT,\va
402 #endif 401 #endif
403 .endm 402 .endm
404 403
405 .import swapper_pg_dir,code 404 .import swapper_pg_dir,code
406 405
407 /* Get the pgd. For faults on space zero (kernel space), this 406 /* Get the pgd. For faults on space zero (kernel space), this
408 * is simply swapper_pg_dir. For user space faults, the 407 * is simply swapper_pg_dir. For user space faults, the
409 * pgd is stored in %cr25 */ 408 * pgd is stored in %cr25 */
410 .macro get_pgd spc,reg 409 .macro get_pgd spc,reg
411 ldil L%PA(swapper_pg_dir),\reg 410 ldil L%PA(swapper_pg_dir),\reg
412 ldo R%PA(swapper_pg_dir)(\reg),\reg 411 ldo R%PA(swapper_pg_dir)(\reg),\reg
413 or,COND(=) %r0,\spc,%r0 412 or,COND(=) %r0,\spc,%r0
414 mfctl %cr25,\reg 413 mfctl %cr25,\reg
415 .endm 414 .endm
416 415
417 /* 416 /*
418 space_check(spc,tmp,fault) 417 space_check(spc,tmp,fault)
419 418
420 spc - The space we saw the fault with. 419 spc - The space we saw the fault with.
421 tmp - The place to store the current space. 420 tmp - The place to store the current space.
422 fault - Function to call on failure. 421 fault - Function to call on failure.
423 422
424 Only allow faults on different spaces from the 423 Only allow faults on different spaces from the
425 currently active one if we're the kernel 424 currently active one if we're the kernel
426 425
427 */ 426 */
428 .macro space_check spc,tmp,fault 427 .macro space_check spc,tmp,fault
429 mfsp %sr7,\tmp 428 mfsp %sr7,\tmp
430 or,COND(<>) %r0,\spc,%r0 /* user may execute gateway page 429 or,COND(<>) %r0,\spc,%r0 /* user may execute gateway page
431 * as kernel, so defeat the space 430 * as kernel, so defeat the space
432 * check if it is */ 431 * check if it is */
433 copy \spc,\tmp 432 copy \spc,\tmp
434 or,COND(=) %r0,\tmp,%r0 /* nullify if executing as kernel */ 433 or,COND(=) %r0,\tmp,%r0 /* nullify if executing as kernel */
435 cmpb,COND(<>),n \tmp,\spc,\fault 434 cmpb,COND(<>),n \tmp,\spc,\fault
436 .endm 435 .endm
437 436
438 /* Look up a PTE in a 2-Level scheme (faulting at each 437 /* Look up a PTE in a 2-Level scheme (faulting at each
439 * level if the entry isn't present 438 * level if the entry isn't present
440 * 439 *
441 * NOTE: we use ldw even for LP64, since the short pointers 440 * NOTE: we use ldw even for LP64, since the short pointers
442 * can address up to 1TB 441 * can address up to 1TB
443 */ 442 */
444 .macro L2_ptep pmd,pte,index,va,fault 443 .macro L2_ptep pmd,pte,index,va,fault
445 #if PT_NLEVELS == 3 444 #if PT_NLEVELS == 3
446 EXTR \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index 445 EXTR \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
447 #else 446 #else
448 EXTR \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index 447 EXTR \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
449 #endif 448 #endif
450 DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */ 449 DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */
451 copy %r0,\pte 450 copy %r0,\pte
452 ldw,s \index(\pmd),\pmd 451 ldw,s \index(\pmd),\pmd
453 bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault 452 bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
454 DEP %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */ 453 DEP %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
455 copy \pmd,%r9 454 copy \pmd,%r9
456 SHLREG %r9,PxD_VALUE_SHIFT,\pmd 455 SHLREG %r9,PxD_VALUE_SHIFT,\pmd
457 EXTR \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index 456 EXTR \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
458 DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */ 457 DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */
459 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd 458 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd
460 LDREG %r0(\pmd),\pte /* pmd is now pte */ 459 LDREG %r0(\pmd),\pte /* pmd is now pte */
461 bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault 460 bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
462 .endm 461 .endm
463 462
464 /* Look up PTE in a 3-Level scheme. 463 /* Look up PTE in a 3-Level scheme.
465 * 464 *
466 * Here we implement a Hybrid L2/L3 scheme: we allocate the 465 * Here we implement a Hybrid L2/L3 scheme: we allocate the
467 * first pmd adjacent to the pgd. This means that we can 466 * first pmd adjacent to the pgd. This means that we can
468 * subtract a constant offset to get to it. The pmd and pgd 467 * subtract a constant offset to get to it. The pmd and pgd
469 * sizes are arranged so that a single pmd covers 4GB (giving 468 * sizes are arranged so that a single pmd covers 4GB (giving
470 * a full LP64 process access to 8TB) so our lookups are 469 * a full LP64 process access to 8TB) so our lookups are
471 * effectively L2 for the first 4GB of the kernel (i.e. for 470 * effectively L2 for the first 4GB of the kernel (i.e. for
472 * all ILP32 processes and all the kernel for machines with 471 * all ILP32 processes and all the kernel for machines with
473 * under 4GB of memory) */ 472 * under 4GB of memory) */
474 .macro L3_ptep pgd,pte,index,va,fault 473 .macro L3_ptep pgd,pte,index,va,fault
475 #if PT_NLEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */ 474 #if PT_NLEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
476 extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index 475 extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
477 copy %r0,\pte 476 copy %r0,\pte
478 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 477 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
479 ldw,s \index(\pgd),\pgd 478 ldw,s \index(\pgd),\pgd
480 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 479 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
481 bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault 480 bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault
482 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 481 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
483 shld \pgd,PxD_VALUE_SHIFT,\index 482 shld \pgd,PxD_VALUE_SHIFT,\index
484 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 483 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
485 copy \index,\pgd 484 copy \index,\pgd
486 extrd,u,*<> \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 485 extrd,u,*<> \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
487 ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd 486 ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd
488 #endif 487 #endif
489 L2_ptep \pgd,\pte,\index,\va,\fault 488 L2_ptep \pgd,\pte,\index,\va,\fault
490 .endm 489 .endm
491 490
492 /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and 491 /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and
493 * don't needlessly dirty the cache line if it was already set */ 492 * don't needlessly dirty the cache line if it was already set */
494 .macro update_ptep ptep,pte,tmp,tmp1 493 .macro update_ptep ptep,pte,tmp,tmp1
495 ldi _PAGE_ACCESSED,\tmp1 494 ldi _PAGE_ACCESSED,\tmp1
496 or \tmp1,\pte,\tmp 495 or \tmp1,\pte,\tmp
497 and,COND(<>) \tmp1,\pte,%r0 496 and,COND(<>) \tmp1,\pte,%r0
498 STREG \tmp,0(\ptep) 497 STREG \tmp,0(\ptep)
499 .endm 498 .endm
500 499
501 /* Set the dirty bit (and accessed bit). No need to be 500 /* Set the dirty bit (and accessed bit). No need to be
502 * clever, this is only used from the dirty fault */ 501 * clever, this is only used from the dirty fault */
503 .macro update_dirty ptep,pte,tmp 502 .macro update_dirty ptep,pte,tmp
504 ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp 503 ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
505 or \tmp,\pte,\pte 504 or \tmp,\pte,\pte
506 STREG \pte,0(\ptep) 505 STREG \pte,0(\ptep)
507 .endm 506 .endm
508 507
509 /* Convert the pte and prot to tlb insertion values. How 508 /* Convert the pte and prot to tlb insertion values. How
510 * this happens is quite subtle, read below */ 509 * this happens is quite subtle, read below */
511 .macro make_insert_tlb spc,pte,prot 510 .macro make_insert_tlb spc,pte,prot
512 space_to_prot \spc \prot /* create prot id from space */ 511 space_to_prot \spc \prot /* create prot id from space */
513 /* The following is the real subtlety. This is depositing 512 /* The following is the real subtlety. This is depositing
514 * T <-> _PAGE_REFTRAP 513 * T <-> _PAGE_REFTRAP
515 * D <-> _PAGE_DIRTY 514 * D <-> _PAGE_DIRTY
516 * B <-> _PAGE_DMB (memory break) 515 * B <-> _PAGE_DMB (memory break)
517 * 516 *
518 * Then incredible subtlety: The access rights are 517 * Then incredible subtlety: The access rights are
519 * _PAGE_GATEWAY _PAGE_EXEC _PAGE_READ 518 * _PAGE_GATEWAY _PAGE_EXEC _PAGE_READ
520 * See 3-14 of the parisc 2.0 manual 519 * See 3-14 of the parisc 2.0 manual
521 * 520 *
522 * Finally, _PAGE_READ goes in the top bit of PL1 (so we 521 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
523 * trigger an access rights trap in user space if the user 522 * trigger an access rights trap in user space if the user
524 * tries to read an unreadable page */ 523 * tries to read an unreadable page */
525 depd \pte,8,7,\prot 524 depd \pte,8,7,\prot
526 525
527 /* PAGE_USER indicates the page can be read with user privileges, 526 /* PAGE_USER indicates the page can be read with user privileges,
528 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1 527 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
529 * contains _PAGE_READ */ 528 * contains _PAGE_READ */
530 extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0 529 extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0
531 depdi 7,11,3,\prot 530 depdi 7,11,3,\prot
532 /* If we're a gateway page, drop PL2 back to zero for promotion 531 /* If we're a gateway page, drop PL2 back to zero for promotion
533 * to kernel privilege (so we can execute the page as kernel). 532 * to kernel privilege (so we can execute the page as kernel).
534 * Any privilege promotion page always denys read and write */ 533 * Any privilege promotion page always denys read and write */
535 extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0 534 extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0
536 depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */ 535 depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */
537 536
538 /* Enforce uncacheable pages. 537 /* Enforce uncacheable pages.
539 * This should ONLY be use for MMIO on PA 2.0 machines. 538 * This should ONLY be use for MMIO on PA 2.0 machines.
540 * Memory/DMA is cache coherent on all PA2.0 machines we support 539 * Memory/DMA is cache coherent on all PA2.0 machines we support
541 * (that means T-class is NOT supported) and the memory controllers 540 * (that means T-class is NOT supported) and the memory controllers
542 * on most of those machines only handles cache transactions. 541 * on most of those machines only handles cache transactions.
543 */ 542 */
544 extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0 543 extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0
545 depi 1,12,1,\prot 544 depi 1,12,1,\prot
546 545
547 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ 546 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
548 extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58),64-PAGE_SHIFT,\pte 547 extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58),64-PAGE_SHIFT,\pte
549 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,63-58,\pte 548 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,63-58,\pte
550 .endm 549 .endm
551 550
552 /* Identical macro to make_insert_tlb above, except it 551 /* Identical macro to make_insert_tlb above, except it
553 * makes the tlb entry for the differently formatted pa11 552 * makes the tlb entry for the differently formatted pa11
554 * insertion instructions */ 553 * insertion instructions */
555 .macro make_insert_tlb_11 spc,pte,prot 554 .macro make_insert_tlb_11 spc,pte,prot
556 zdep \spc,30,15,\prot 555 zdep \spc,30,15,\prot
557 dep \pte,8,7,\prot 556 dep \pte,8,7,\prot
558 extru,= \pte,_PAGE_NO_CACHE_BIT,1,%r0 557 extru,= \pte,_PAGE_NO_CACHE_BIT,1,%r0
559 depi 1,12,1,\prot 558 depi 1,12,1,\prot
560 extru,= \pte,_PAGE_USER_BIT,1,%r0 559 extru,= \pte,_PAGE_USER_BIT,1,%r0
561 depi 7,11,3,\prot /* Set for user space (1 rsvd for read) */ 560 depi 7,11,3,\prot /* Set for user space (1 rsvd for read) */
562 extru,= \pte,_PAGE_GATEWAY_BIT,1,%r0 561 extru,= \pte,_PAGE_GATEWAY_BIT,1,%r0
563 depi 0,11,2,\prot /* If Gateway, Set PL2 to 0 */ 562 depi 0,11,2,\prot /* If Gateway, Set PL2 to 0 */
564 563
565 /* Get rid of prot bits and convert to page addr for iitlba */ 564 /* Get rid of prot bits and convert to page addr for iitlba */
566 565
567 depi _PAGE_SIZE_ENCODING_DEFAULT,31,ASM_PFN_PTE_SHIFT,\pte 566 depi _PAGE_SIZE_ENCODING_DEFAULT,31,ASM_PFN_PTE_SHIFT,\pte
568 extru \pte,24,25,\pte 567 extru \pte,24,25,\pte
569 .endm 568 .endm
570 569
571 /* This is for ILP32 PA2.0 only. The TLB insertion needs 570 /* This is for ILP32 PA2.0 only. The TLB insertion needs
572 * to extend into I/O space if the address is 0xfXXXXXXX 571 * to extend into I/O space if the address is 0xfXXXXXXX
573 * so we extend the f's into the top word of the pte in 572 * so we extend the f's into the top word of the pte in
574 * this case */ 573 * this case */
575 .macro f_extend pte,tmp 574 .macro f_extend pte,tmp
576 extrd,s \pte,42,4,\tmp 575 extrd,s \pte,42,4,\tmp
577 addi,<> 1,\tmp,%r0 576 addi,<> 1,\tmp,%r0
578 extrd,s \pte,63,25,\pte 577 extrd,s \pte,63,25,\pte
579 .endm 578 .endm
580 579
581 /* The alias region is an 8MB aligned 16MB to do clear and 580 /* The alias region is an 8MB aligned 16MB to do clear and
582 * copy user pages at addresses congruent with the user 581 * copy user pages at addresses congruent with the user
583 * virtual address. 582 * virtual address.
584 * 583 *
585 * To use the alias page, you set %r26 up with the to TLB 584 * To use the alias page, you set %r26 up with the to TLB
586 * entry (identifying the physical page) and %r23 up with 585 * entry (identifying the physical page) and %r23 up with
587 * the from tlb entry (or nothing if only a to entry---for 586 * the from tlb entry (or nothing if only a to entry---for
588 * clear_user_page_asm) */ 587 * clear_user_page_asm) */
589 .macro do_alias spc,tmp,tmp1,va,pte,prot,fault 588 .macro do_alias spc,tmp,tmp1,va,pte,prot,fault
590 cmpib,COND(<>),n 0,\spc,\fault 589 cmpib,COND(<>),n 0,\spc,\fault
591 ldil L%(TMPALIAS_MAP_START),\tmp 590 ldil L%(TMPALIAS_MAP_START),\tmp
592 #if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000) 591 #if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
593 /* on LP64, ldi will sign extend into the upper 32 bits, 592 /* on LP64, ldi will sign extend into the upper 32 bits,
594 * which is behaviour we don't want */ 593 * which is behaviour we don't want */
595 depdi 0,31,32,\tmp 594 depdi 0,31,32,\tmp
596 #endif 595 #endif
597 copy \va,\tmp1 596 copy \va,\tmp1
598 DEPI 0,31,23,\tmp1 597 DEPI 0,31,23,\tmp1
599 cmpb,COND(<>),n \tmp,\tmp1,\fault 598 cmpb,COND(<>),n \tmp,\tmp1,\fault
600 ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),\prot 599 ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),\prot
601 depd,z \prot,8,7,\prot 600 depd,z \prot,8,7,\prot
602 /* 601 /*
603 * OK, it is in the temp alias region, check whether "from" or "to". 602 * OK, it is in the temp alias region, check whether "from" or "to".
604 * Check "subtle" note in pacache.S re: r23/r26. 603 * Check "subtle" note in pacache.S re: r23/r26.
605 */ 604 */
606 #ifdef CONFIG_64BIT 605 #ifdef CONFIG_64BIT
607 extrd,u,*= \va,41,1,%r0 606 extrd,u,*= \va,41,1,%r0
608 #else 607 #else
609 extrw,u,= \va,9,1,%r0 608 extrw,u,= \va,9,1,%r0
610 #endif 609 #endif
611 or,COND(tr) %r23,%r0,\pte 610 or,COND(tr) %r23,%r0,\pte
612 or %r26,%r0,\pte 611 or %r26,%r0,\pte
613 .endm 612 .endm
614 613
615 614
616 /* 615 /*
617 * Align fault_vector_20 on 4K boundary so that both 616 * Align fault_vector_20 on 4K boundary so that both
618 * fault_vector_11 and fault_vector_20 are on the 617 * fault_vector_11 and fault_vector_20 are on the
619 * same page. This is only necessary as long as we 618 * same page. This is only necessary as long as we
620 * write protect the kernel text, which we may stop 619 * write protect the kernel text, which we may stop
621 * doing once we use large page translations to cover 620 * doing once we use large page translations to cover
622 * the static part of the kernel address space. 621 * the static part of the kernel address space.
623 */ 622 */
624 623
625 __HEAD 624 .text
626 625
627 .align PAGE_SIZE 626 .align PAGE_SIZE
628 627
629 ENTRY(fault_vector_20) 628 ENTRY(fault_vector_20)
630 /* First vector is invalid (0) */ 629 /* First vector is invalid (0) */
631 .ascii "cows can fly" 630 .ascii "cows can fly"
632 .byte 0 631 .byte 0
633 .align 32 632 .align 32
634 633
635 hpmc 1 634 hpmc 1
636 def 2 635 def 2
637 def 3 636 def 3
638 extint 4 637 extint 4
639 def 5 638 def 5
640 itlb_20 6 639 itlb_20 6
641 def 7 640 def 7
642 def 8 641 def 8
643 def 9 642 def 9
644 def 10 643 def 10
645 def 11 644 def 11
646 def 12 645 def 12
647 def 13 646 def 13
648 def 14 647 def 14
649 dtlb_20 15 648 dtlb_20 15
650 #if 0 649 #if 0
651 naitlb_20 16 650 naitlb_20 16
652 #else 651 #else
653 def 16 652 def 16
654 #endif 653 #endif
655 nadtlb_20 17 654 nadtlb_20 17
656 def 18 655 def 18
657 def 19 656 def 19
658 dbit_20 20 657 dbit_20 20
659 def 21 658 def 21
660 def 22 659 def 22
661 def 23 660 def 23
662 def 24 661 def 24
663 def 25 662 def 25
664 def 26 663 def 26
665 def 27 664 def 27
666 def 28 665 def 28
667 def 29 666 def 29
668 def 30 667 def 30
669 def 31 668 def 31
670 END(fault_vector_20) 669 END(fault_vector_20)
671 670
672 #ifndef CONFIG_64BIT 671 #ifndef CONFIG_64BIT
673 672
674 .align 2048 673 .align 2048
675 674
676 ENTRY(fault_vector_11) 675 ENTRY(fault_vector_11)
677 /* First vector is invalid (0) */ 676 /* First vector is invalid (0) */
678 .ascii "cows can fly" 677 .ascii "cows can fly"
679 .byte 0 678 .byte 0
680 .align 32 679 .align 32
681 680
682 hpmc 1 681 hpmc 1
683 def 2 682 def 2
684 def 3 683 def 3
685 extint 4 684 extint 4
686 def 5 685 def 5
687 itlb_11 6 686 itlb_11 6
688 def 7 687 def 7
689 def 8 688 def 8
690 def 9 689 def 9
691 def 10 690 def 10
692 def 11 691 def 11
693 def 12 692 def 12
694 def 13 693 def 13
695 def 14 694 def 14
696 dtlb_11 15 695 dtlb_11 15
697 #if 0 696 #if 0
698 naitlb_11 16 697 naitlb_11 16
699 #else 698 #else
700 def 16 699 def 16
701 #endif 700 #endif
702 nadtlb_11 17 701 nadtlb_11 17
703 def 18 702 def 18
704 def 19 703 def 19
705 dbit_11 20 704 dbit_11 20
706 def 21 705 def 21
707 def 22 706 def 22
708 def 23 707 def 23
709 def 24 708 def 24
710 def 25 709 def 25
711 def 26 710 def 26
712 def 27 711 def 27
713 def 28 712 def 28
714 def 29 713 def 29
715 def 30 714 def 30
716 def 31 715 def 31
717 END(fault_vector_11) 716 END(fault_vector_11)
718 717
719 #endif 718 #endif
720 719
721 .import handle_interruption,code 720 .import handle_interruption,code
722 .import do_cpu_irq_mask,code 721 .import do_cpu_irq_mask,code
723 722
724 /* 723 /*
725 * r26 = function to be called 724 * r26 = function to be called
726 * r25 = argument to pass in 725 * r25 = argument to pass in
727 * r24 = flags for do_fork() 726 * r24 = flags for do_fork()
728 * 727 *
729 * Kernel threads don't ever return, so they don't need 728 * Kernel threads don't ever return, so they don't need
730 * a true register context. We just save away the arguments 729 * a true register context. We just save away the arguments
731 * for copy_thread/ret_ to properly set up the child. 730 * for copy_thread/ret_ to properly set up the child.
732 */ 731 */
733 732
734 #define CLONE_VM 0x100 /* Must agree with <linux/sched.h> */ 733 #define CLONE_VM 0x100 /* Must agree with <linux/sched.h> */
735 #define CLONE_UNTRACED 0x00800000 734 #define CLONE_UNTRACED 0x00800000
736 735
737 .import do_fork 736 .import do_fork
738 ENTRY(__kernel_thread) 737 ENTRY(__kernel_thread)
739 STREG %r2, -RP_OFFSET(%r30) 738 STREG %r2, -RP_OFFSET(%r30)
740 739
741 copy %r30, %r1 740 copy %r30, %r1
742 ldo PT_SZ_ALGN(%r30),%r30 741 ldo PT_SZ_ALGN(%r30),%r30
743 #ifdef CONFIG_64BIT 742 #ifdef CONFIG_64BIT
744 /* Yo, function pointers in wide mode are little structs... -PB */ 743 /* Yo, function pointers in wide mode are little structs... -PB */
745 ldd 24(%r26), %r2 744 ldd 24(%r26), %r2
746 STREG %r2, PT_GR27(%r1) /* Store childs %dp */ 745 STREG %r2, PT_GR27(%r1) /* Store childs %dp */
747 ldd 16(%r26), %r26 746 ldd 16(%r26), %r26
748 747
749 STREG %r22, PT_GR22(%r1) /* save r22 (arg5) */ 748 STREG %r22, PT_GR22(%r1) /* save r22 (arg5) */
750 copy %r0, %r22 /* user_tid */ 749 copy %r0, %r22 /* user_tid */
751 #endif 750 #endif
752 STREG %r26, PT_GR26(%r1) /* Store function & argument for child */ 751 STREG %r26, PT_GR26(%r1) /* Store function & argument for child */
753 STREG %r25, PT_GR25(%r1) 752 STREG %r25, PT_GR25(%r1)
754 ldil L%CLONE_UNTRACED, %r26 753 ldil L%CLONE_UNTRACED, %r26
755 ldo CLONE_VM(%r26), %r26 /* Force CLONE_VM since only init_mm */ 754 ldo CLONE_VM(%r26), %r26 /* Force CLONE_VM since only init_mm */
756 or %r26, %r24, %r26 /* will have kernel mappings. */ 755 or %r26, %r24, %r26 /* will have kernel mappings. */
757 ldi 1, %r25 /* stack_start, signals kernel thread */ 756 ldi 1, %r25 /* stack_start, signals kernel thread */
758 stw %r0, -52(%r30) /* user_tid */ 757 stw %r0, -52(%r30) /* user_tid */
759 #ifdef CONFIG_64BIT 758 #ifdef CONFIG_64BIT
760 ldo -16(%r30),%r29 /* Reference param save area */ 759 ldo -16(%r30),%r29 /* Reference param save area */
761 #endif 760 #endif
762 BL do_fork, %r2 761 BL do_fork, %r2
763 copy %r1, %r24 /* pt_regs */ 762 copy %r1, %r24 /* pt_regs */
764 763
765 /* Parent Returns here */ 764 /* Parent Returns here */
766 765
767 LDREG -PT_SZ_ALGN-RP_OFFSET(%r30), %r2 766 LDREG -PT_SZ_ALGN-RP_OFFSET(%r30), %r2
768 ldo -PT_SZ_ALGN(%r30), %r30 767 ldo -PT_SZ_ALGN(%r30), %r30
769 bv %r0(%r2) 768 bv %r0(%r2)
770 nop 769 nop
771 ENDPROC(__kernel_thread) 770 ENDPROC(__kernel_thread)
772 771
773 /* 772 /*
774 * Child Returns here 773 * Child Returns here
775 * 774 *
776 * copy_thread moved args from temp save area set up above 775 * copy_thread moved args from temp save area set up above
777 * into task save area. 776 * into task save area.
778 */ 777 */
779 778
780 ENTRY(ret_from_kernel_thread) 779 ENTRY(ret_from_kernel_thread)
781 780
782 /* Call schedule_tail first though */ 781 /* Call schedule_tail first though */
783 BL schedule_tail, %r2 782 BL schedule_tail, %r2
784 nop 783 nop
785 784
786 LDREG TI_TASK-THREAD_SZ_ALGN(%r30), %r1 785 LDREG TI_TASK-THREAD_SZ_ALGN(%r30), %r1
787 LDREG TASK_PT_GR25(%r1), %r26 786 LDREG TASK_PT_GR25(%r1), %r26
788 #ifdef CONFIG_64BIT 787 #ifdef CONFIG_64BIT
789 LDREG TASK_PT_GR27(%r1), %r27 788 LDREG TASK_PT_GR27(%r1), %r27
790 LDREG TASK_PT_GR22(%r1), %r22 789 LDREG TASK_PT_GR22(%r1), %r22
791 #endif 790 #endif
792 LDREG TASK_PT_GR26(%r1), %r1 791 LDREG TASK_PT_GR26(%r1), %r1
793 ble 0(%sr7, %r1) 792 ble 0(%sr7, %r1)
794 copy %r31, %r2 793 copy %r31, %r2
795 794
796 #ifdef CONFIG_64BIT 795 #ifdef CONFIG_64BIT
797 ldo -16(%r30),%r29 /* Reference param save area */ 796 ldo -16(%r30),%r29 /* Reference param save area */
798 loadgp /* Thread could have been in a module */ 797 loadgp /* Thread could have been in a module */
799 #endif 798 #endif
800 #ifndef CONFIG_64BIT 799 #ifndef CONFIG_64BIT
801 b sys_exit 800 b sys_exit
802 #else 801 #else
803 load32 sys_exit, %r1 802 load32 sys_exit, %r1
804 bv %r0(%r1) 803 bv %r0(%r1)
805 #endif 804 #endif
806 ldi 0, %r26 805 ldi 0, %r26
807 ENDPROC(ret_from_kernel_thread) 806 ENDPROC(ret_from_kernel_thread)
808 807
809 .import sys_execve, code 808 .import sys_execve, code
810 ENTRY(__execve) 809 ENTRY(__execve)
811 copy %r2, %r15 810 copy %r2, %r15
812 copy %r30, %r16 811 copy %r30, %r16
813 ldo PT_SZ_ALGN(%r30), %r30 812 ldo PT_SZ_ALGN(%r30), %r30
814 STREG %r26, PT_GR26(%r16) 813 STREG %r26, PT_GR26(%r16)
815 STREG %r25, PT_GR25(%r16) 814 STREG %r25, PT_GR25(%r16)
816 STREG %r24, PT_GR24(%r16) 815 STREG %r24, PT_GR24(%r16)
817 #ifdef CONFIG_64BIT 816 #ifdef CONFIG_64BIT
818 ldo -16(%r30),%r29 /* Reference param save area */ 817 ldo -16(%r30),%r29 /* Reference param save area */
819 #endif 818 #endif
820 BL sys_execve, %r2 819 BL sys_execve, %r2
821 copy %r16, %r26 820 copy %r16, %r26
822 821
823 cmpib,=,n 0,%r28,intr_return /* forward */ 822 cmpib,=,n 0,%r28,intr_return /* forward */
824 823
825 /* yes, this will trap and die. */ 824 /* yes, this will trap and die. */
826 copy %r15, %r2 825 copy %r15, %r2
827 copy %r16, %r30 826 copy %r16, %r30
828 bv %r0(%r2) 827 bv %r0(%r2)
829 nop 828 nop
830 ENDPROC(__execve) 829 ENDPROC(__execve)
831 830
832 831
833 /* 832 /*
834 * struct task_struct *_switch_to(struct task_struct *prev, 833 * struct task_struct *_switch_to(struct task_struct *prev,
835 * struct task_struct *next) 834 * struct task_struct *next)
836 * 835 *
837 * switch kernel stacks and return prev */ 836 * switch kernel stacks and return prev */
838 ENTRY(_switch_to) 837 ENTRY(_switch_to)
839 STREG %r2, -RP_OFFSET(%r30) 838 STREG %r2, -RP_OFFSET(%r30)
840 839
841 callee_save_float 840 callee_save_float
842 callee_save 841 callee_save
843 842
844 load32 _switch_to_ret, %r2 843 load32 _switch_to_ret, %r2
845 844
846 STREG %r2, TASK_PT_KPC(%r26) 845 STREG %r2, TASK_PT_KPC(%r26)
847 LDREG TASK_PT_KPC(%r25), %r2 846 LDREG TASK_PT_KPC(%r25), %r2
848 847
849 STREG %r30, TASK_PT_KSP(%r26) 848 STREG %r30, TASK_PT_KSP(%r26)
850 LDREG TASK_PT_KSP(%r25), %r30 849 LDREG TASK_PT_KSP(%r25), %r30
851 LDREG TASK_THREAD_INFO(%r25), %r25 850 LDREG TASK_THREAD_INFO(%r25), %r25
852 bv %r0(%r2) 851 bv %r0(%r2)
853 mtctl %r25,%cr30 852 mtctl %r25,%cr30
854 853
855 _switch_to_ret: 854 _switch_to_ret:
856 mtctl %r0, %cr0 /* Needed for single stepping */ 855 mtctl %r0, %cr0 /* Needed for single stepping */
857 callee_rest 856 callee_rest
858 callee_rest_float 857 callee_rest_float
859 858
860 LDREG -RP_OFFSET(%r30), %r2 859 LDREG -RP_OFFSET(%r30), %r2
861 bv %r0(%r2) 860 bv %r0(%r2)
862 copy %r26, %r28 861 copy %r26, %r28
863 ENDPROC(_switch_to) 862 ENDPROC(_switch_to)
864 863
865 /* 864 /*
866 * Common rfi return path for interruptions, kernel execve, and 865 * Common rfi return path for interruptions, kernel execve, and
867 * sys_rt_sigreturn (sometimes). The sys_rt_sigreturn syscall will 866 * sys_rt_sigreturn (sometimes). The sys_rt_sigreturn syscall will
868 * return via this path if the signal was received when the process 867 * return via this path if the signal was received when the process
869 * was running; if the process was blocked on a syscall then the 868 * was running; if the process was blocked on a syscall then the
870 * normal syscall_exit path is used. All syscalls for traced 869 * normal syscall_exit path is used. All syscalls for traced
871 * proceses exit via intr_restore. 870 * proceses exit via intr_restore.
872 * 871 *
873 * XXX If any syscalls that change a processes space id ever exit 872 * XXX If any syscalls that change a processes space id ever exit
874 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and 873 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
875 * adjust IASQ[0..1]. 874 * adjust IASQ[0..1].
876 * 875 *
877 */ 876 */
878 877
879 .align PAGE_SIZE 878 .align PAGE_SIZE
880 879
881 ENTRY(syscall_exit_rfi) 880 ENTRY(syscall_exit_rfi)
882 mfctl %cr30,%r16 881 mfctl %cr30,%r16
883 LDREG TI_TASK(%r16), %r16 /* thread_info -> task_struct */ 882 LDREG TI_TASK(%r16), %r16 /* thread_info -> task_struct */
884 ldo TASK_REGS(%r16),%r16 883 ldo TASK_REGS(%r16),%r16
885 /* Force iaoq to userspace, as the user has had access to our current 884 /* Force iaoq to userspace, as the user has had access to our current
886 * context via sigcontext. Also Filter the PSW for the same reason. 885 * context via sigcontext. Also Filter the PSW for the same reason.
887 */ 886 */
888 LDREG PT_IAOQ0(%r16),%r19 887 LDREG PT_IAOQ0(%r16),%r19
889 depi 3,31,2,%r19 888 depi 3,31,2,%r19
890 STREG %r19,PT_IAOQ0(%r16) 889 STREG %r19,PT_IAOQ0(%r16)
891 LDREG PT_IAOQ1(%r16),%r19 890 LDREG PT_IAOQ1(%r16),%r19
892 depi 3,31,2,%r19 891 depi 3,31,2,%r19
893 STREG %r19,PT_IAOQ1(%r16) 892 STREG %r19,PT_IAOQ1(%r16)
894 LDREG PT_PSW(%r16),%r19 893 LDREG PT_PSW(%r16),%r19
895 load32 USER_PSW_MASK,%r1 894 load32 USER_PSW_MASK,%r1
896 #ifdef CONFIG_64BIT 895 #ifdef CONFIG_64BIT
897 load32 USER_PSW_HI_MASK,%r20 896 load32 USER_PSW_HI_MASK,%r20
898 depd %r20,31,32,%r1 897 depd %r20,31,32,%r1
899 #endif 898 #endif
900 and %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */ 899 and %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
901 load32 USER_PSW,%r1 900 load32 USER_PSW,%r1
902 or %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */ 901 or %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
903 STREG %r19,PT_PSW(%r16) 902 STREG %r19,PT_PSW(%r16)
904 903
905 /* 904 /*
906 * If we aren't being traced, we never saved space registers 905 * If we aren't being traced, we never saved space registers
907 * (we don't store them in the sigcontext), so set them 906 * (we don't store them in the sigcontext), so set them
908 * to "proper" values now (otherwise we'll wind up restoring 907 * to "proper" values now (otherwise we'll wind up restoring
909 * whatever was last stored in the task structure, which might 908 * whatever was last stored in the task structure, which might
910 * be inconsistent if an interrupt occured while on the gateway 909 * be inconsistent if an interrupt occured while on the gateway
911 * page). Note that we may be "trashing" values the user put in 910 * page). Note that we may be "trashing" values the user put in
912 * them, but we don't support the user changing them. 911 * them, but we don't support the user changing them.
913 */ 912 */
914 913
915 STREG %r0,PT_SR2(%r16) 914 STREG %r0,PT_SR2(%r16)
916 mfsp %sr3,%r19 915 mfsp %sr3,%r19
917 STREG %r19,PT_SR0(%r16) 916 STREG %r19,PT_SR0(%r16)
918 STREG %r19,PT_SR1(%r16) 917 STREG %r19,PT_SR1(%r16)
919 STREG %r19,PT_SR3(%r16) 918 STREG %r19,PT_SR3(%r16)
920 STREG %r19,PT_SR4(%r16) 919 STREG %r19,PT_SR4(%r16)
921 STREG %r19,PT_SR5(%r16) 920 STREG %r19,PT_SR5(%r16)
922 STREG %r19,PT_SR6(%r16) 921 STREG %r19,PT_SR6(%r16)
923 STREG %r19,PT_SR7(%r16) 922 STREG %r19,PT_SR7(%r16)
924 923
925 intr_return: 924 intr_return:
926 /* NOTE: Need to enable interrupts incase we schedule. */ 925 /* NOTE: Need to enable interrupts incase we schedule. */
927 ssm PSW_SM_I, %r0 926 ssm PSW_SM_I, %r0
928 927
929 intr_check_resched: 928 intr_check_resched:
930 929
931 /* check for reschedule */ 930 /* check for reschedule */
932 mfctl %cr30,%r1 931 mfctl %cr30,%r1
933 LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */ 932 LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
934 bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */ 933 bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
935 934
936 .import do_notify_resume,code 935 .import do_notify_resume,code
937 intr_check_sig: 936 intr_check_sig:
938 /* As above */ 937 /* As above */
939 mfctl %cr30,%r1 938 mfctl %cr30,%r1
940 LDREG TI_FLAGS(%r1),%r19 939 LDREG TI_FLAGS(%r1),%r19
941 ldi (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK), %r20 940 ldi (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK), %r20
942 and,COND(<>) %r19, %r20, %r0 941 and,COND(<>) %r19, %r20, %r0
943 b,n intr_restore /* skip past if we've nothing to do */ 942 b,n intr_restore /* skip past if we've nothing to do */
944 943
945 /* This check is critical to having LWS 944 /* This check is critical to having LWS
946 * working. The IASQ is zero on the gateway 945 * working. The IASQ is zero on the gateway
947 * page and we cannot deliver any signals until 946 * page and we cannot deliver any signals until
948 * we get off the gateway page. 947 * we get off the gateway page.
949 * 948 *
950 * Only do signals if we are returning to user space 949 * Only do signals if we are returning to user space
951 */ 950 */
952 LDREG PT_IASQ0(%r16), %r20 951 LDREG PT_IASQ0(%r16), %r20
953 cmpib,COND(=),n 0,%r20,intr_restore /* backward */ 952 cmpib,COND(=),n 0,%r20,intr_restore /* backward */
954 LDREG PT_IASQ1(%r16), %r20 953 LDREG PT_IASQ1(%r16), %r20
955 cmpib,COND(=),n 0,%r20,intr_restore /* backward */ 954 cmpib,COND(=),n 0,%r20,intr_restore /* backward */
956 955
957 copy %r0, %r25 /* long in_syscall = 0 */ 956 copy %r0, %r25 /* long in_syscall = 0 */
958 #ifdef CONFIG_64BIT 957 #ifdef CONFIG_64BIT
959 ldo -16(%r30),%r29 /* Reference param save area */ 958 ldo -16(%r30),%r29 /* Reference param save area */
960 #endif 959 #endif
961 960
962 BL do_notify_resume,%r2 961 BL do_notify_resume,%r2
963 copy %r16, %r26 /* struct pt_regs *regs */ 962 copy %r16, %r26 /* struct pt_regs *regs */
964 963
965 b,n intr_check_sig 964 b,n intr_check_sig
966 965
967 intr_restore: 966 intr_restore:
968 copy %r16,%r29 967 copy %r16,%r29
969 ldo PT_FR31(%r29),%r1 968 ldo PT_FR31(%r29),%r1
970 rest_fp %r1 969 rest_fp %r1
971 rest_general %r29 970 rest_general %r29
972 971
973 /* inverse of virt_map */ 972 /* inverse of virt_map */
974 pcxt_ssm_bug 973 pcxt_ssm_bug
975 rsm PSW_SM_QUIET,%r0 /* prepare for rfi */ 974 rsm PSW_SM_QUIET,%r0 /* prepare for rfi */
976 tophys_r1 %r29 975 tophys_r1 %r29
977 976
978 /* Restore space id's and special cr's from PT_REGS 977 /* Restore space id's and special cr's from PT_REGS
979 * structure pointed to by r29 978 * structure pointed to by r29
980 */ 979 */
981 rest_specials %r29 980 rest_specials %r29
982 981
983 /* IMPORTANT: rest_stack restores r29 last (we are using it)! 982 /* IMPORTANT: rest_stack restores r29 last (we are using it)!
984 * It also restores r1 and r30. 983 * It also restores r1 and r30.
985 */ 984 */
986 rest_stack 985 rest_stack
987 986
988 rfi 987 rfi
989 nop 988 nop
990 nop 989 nop
991 nop 990 nop
992 nop 991 nop
993 nop 992 nop
994 nop 993 nop
995 nop 994 nop
996 nop 995 nop
997 996
998 #ifndef CONFIG_PREEMPT 997 #ifndef CONFIG_PREEMPT
999 # define intr_do_preempt intr_restore 998 # define intr_do_preempt intr_restore
1000 #endif /* !CONFIG_PREEMPT */ 999 #endif /* !CONFIG_PREEMPT */
1001 1000
1002 .import schedule,code 1001 .import schedule,code
1003 intr_do_resched: 1002 intr_do_resched:
1004 /* Only call schedule on return to userspace. If we're returning 1003 /* Only call schedule on return to userspace. If we're returning
1005 * to kernel space, we may schedule if CONFIG_PREEMPT, otherwise 1004 * to kernel space, we may schedule if CONFIG_PREEMPT, otherwise
1006 * we jump back to intr_restore. 1005 * we jump back to intr_restore.
1007 */ 1006 */
1008 LDREG PT_IASQ0(%r16), %r20 1007 LDREG PT_IASQ0(%r16), %r20
1009 cmpib,COND(=) 0, %r20, intr_do_preempt 1008 cmpib,COND(=) 0, %r20, intr_do_preempt
1010 nop 1009 nop
1011 LDREG PT_IASQ1(%r16), %r20 1010 LDREG PT_IASQ1(%r16), %r20
1012 cmpib,COND(=) 0, %r20, intr_do_preempt 1011 cmpib,COND(=) 0, %r20, intr_do_preempt
1013 nop 1012 nop
1014 1013
1015 #ifdef CONFIG_64BIT 1014 #ifdef CONFIG_64BIT
1016 ldo -16(%r30),%r29 /* Reference param save area */ 1015 ldo -16(%r30),%r29 /* Reference param save area */
1017 #endif 1016 #endif
1018 1017
1019 ldil L%intr_check_sig, %r2 1018 ldil L%intr_check_sig, %r2
1020 #ifndef CONFIG_64BIT 1019 #ifndef CONFIG_64BIT
1021 b schedule 1020 b schedule
1022 #else 1021 #else
1023 load32 schedule, %r20 1022 load32 schedule, %r20
1024 bv %r0(%r20) 1023 bv %r0(%r20)
1025 #endif 1024 #endif
1026 ldo R%intr_check_sig(%r2), %r2 1025 ldo R%intr_check_sig(%r2), %r2
1027 1026
1028 /* preempt the current task on returning to kernel 1027 /* preempt the current task on returning to kernel
1029 * mode from an interrupt, iff need_resched is set, 1028 * mode from an interrupt, iff need_resched is set,
1030 * and preempt_count is 0. otherwise, we continue on 1029 * and preempt_count is 0. otherwise, we continue on
1031 * our merry way back to the current running task. 1030 * our merry way back to the current running task.
1032 */ 1031 */
1033 #ifdef CONFIG_PREEMPT 1032 #ifdef CONFIG_PREEMPT
1034 .import preempt_schedule_irq,code 1033 .import preempt_schedule_irq,code
1035 intr_do_preempt: 1034 intr_do_preempt:
1036 rsm PSW_SM_I, %r0 /* disable interrupts */ 1035 rsm PSW_SM_I, %r0 /* disable interrupts */
1037 1036
1038 /* current_thread_info()->preempt_count */ 1037 /* current_thread_info()->preempt_count */
1039 mfctl %cr30, %r1 1038 mfctl %cr30, %r1
1040 LDREG TI_PRE_COUNT(%r1), %r19 1039 LDREG TI_PRE_COUNT(%r1), %r19
1041 cmpib,COND(<>) 0, %r19, intr_restore /* if preempt_count > 0 */ 1040 cmpib,COND(<>) 0, %r19, intr_restore /* if preempt_count > 0 */
1042 nop /* prev insn branched backwards */ 1041 nop /* prev insn branched backwards */
1043 1042
1044 /* check if we interrupted a critical path */ 1043 /* check if we interrupted a critical path */
1045 LDREG PT_PSW(%r16), %r20 1044 LDREG PT_PSW(%r16), %r20
1046 bb,<,n %r20, 31 - PSW_SM_I, intr_restore 1045 bb,<,n %r20, 31 - PSW_SM_I, intr_restore
1047 nop 1046 nop
1048 1047
1049 BL preempt_schedule_irq, %r2 1048 BL preempt_schedule_irq, %r2
1050 nop 1049 nop
1051 1050
1052 b,n intr_restore /* ssm PSW_SM_I done by intr_restore */ 1051 b,n intr_restore /* ssm PSW_SM_I done by intr_restore */
1053 #endif /* CONFIG_PREEMPT */ 1052 #endif /* CONFIG_PREEMPT */
1054 1053
1055 /* 1054 /*
1056 * External interrupts. 1055 * External interrupts.
1057 */ 1056 */
1058 1057
1059 intr_extint: 1058 intr_extint:
1060 cmpib,COND(=),n 0,%r16,1f 1059 cmpib,COND(=),n 0,%r16,1f
1061 1060
1062 get_stack_use_cr30 1061 get_stack_use_cr30
1063 b,n 2f 1062 b,n 2f
1064 1063
1065 1: 1064 1:
1066 get_stack_use_r30 1065 get_stack_use_r30
1067 2: 1066 2:
1068 save_specials %r29 1067 save_specials %r29
1069 virt_map 1068 virt_map
1070 save_general %r29 1069 save_general %r29
1071 1070
1072 ldo PT_FR0(%r29), %r24 1071 ldo PT_FR0(%r29), %r24
1073 save_fp %r24 1072 save_fp %r24
1074 1073
1075 loadgp 1074 loadgp
1076 1075
1077 copy %r29, %r26 /* arg0 is pt_regs */ 1076 copy %r29, %r26 /* arg0 is pt_regs */
1078 copy %r29, %r16 /* save pt_regs */ 1077 copy %r29, %r16 /* save pt_regs */
1079 1078
1080 ldil L%intr_return, %r2 1079 ldil L%intr_return, %r2
1081 1080
1082 #ifdef CONFIG_64BIT 1081 #ifdef CONFIG_64BIT
1083 ldo -16(%r30),%r29 /* Reference param save area */ 1082 ldo -16(%r30),%r29 /* Reference param save area */
1084 #endif 1083 #endif
1085 1084
1086 b do_cpu_irq_mask 1085 b do_cpu_irq_mask
1087 ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */ 1086 ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */
1088 ENDPROC(syscall_exit_rfi) 1087 ENDPROC(syscall_exit_rfi)
1089 1088
1090 1089
1091 /* Generic interruptions (illegal insn, unaligned, page fault, etc) */ 1090 /* Generic interruptions (illegal insn, unaligned, page fault, etc) */
1092 1091
1093 ENTRY(intr_save) /* for os_hpmc */ 1092 ENTRY(intr_save) /* for os_hpmc */
1094 mfsp %sr7,%r16 1093 mfsp %sr7,%r16
1095 cmpib,COND(=),n 0,%r16,1f 1094 cmpib,COND(=),n 0,%r16,1f
1096 get_stack_use_cr30 1095 get_stack_use_cr30
1097 b 2f 1096 b 2f
1098 copy %r8,%r26 1097 copy %r8,%r26
1099 1098
1100 1: 1099 1:
1101 get_stack_use_r30 1100 get_stack_use_r30
1102 copy %r8,%r26 1101 copy %r8,%r26
1103 1102
1104 2: 1103 2:
1105 save_specials %r29 1104 save_specials %r29
1106 1105
1107 /* If this trap is a itlb miss, skip saving/adjusting isr/ior */ 1106 /* If this trap is a itlb miss, skip saving/adjusting isr/ior */
1108 1107
1109 /* 1108 /*
1110 * FIXME: 1) Use a #define for the hardwired "6" below (and in 1109 * FIXME: 1) Use a #define for the hardwired "6" below (and in
1111 * traps.c. 1110 * traps.c.
1112 * 2) Once we start executing code above 4 Gb, we need 1111 * 2) Once we start executing code above 4 Gb, we need
1113 * to adjust iasq/iaoq here in the same way we 1112 * to adjust iasq/iaoq here in the same way we
1114 * adjust isr/ior below. 1113 * adjust isr/ior below.
1115 */ 1114 */
1116 1115
1117 cmpib,COND(=),n 6,%r26,skip_save_ior 1116 cmpib,COND(=),n 6,%r26,skip_save_ior
1118 1117
1119 1118
1120 mfctl %cr20, %r16 /* isr */ 1119 mfctl %cr20, %r16 /* isr */
1121 nop /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */ 1120 nop /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
1122 mfctl %cr21, %r17 /* ior */ 1121 mfctl %cr21, %r17 /* ior */
1123 1122
1124 1123
1125 #ifdef CONFIG_64BIT 1124 #ifdef CONFIG_64BIT
1126 /* 1125 /*
1127 * If the interrupted code was running with W bit off (32 bit), 1126 * If the interrupted code was running with W bit off (32 bit),
1128 * clear the b bits (bits 0 & 1) in the ior. 1127 * clear the b bits (bits 0 & 1) in the ior.
1129 * save_specials left ipsw value in r8 for us to test. 1128 * save_specials left ipsw value in r8 for us to test.
1130 */ 1129 */
1131 extrd,u,*<> %r8,PSW_W_BIT,1,%r0 1130 extrd,u,*<> %r8,PSW_W_BIT,1,%r0
1132 depdi 0,1,2,%r17 1131 depdi 0,1,2,%r17
1133 1132
1134 /* 1133 /*
1135 * FIXME: This code has hardwired assumptions about the split 1134 * FIXME: This code has hardwired assumptions about the split
1136 * between space bits and offset bits. This will change 1135 * between space bits and offset bits. This will change
1137 * when we allow alternate page sizes. 1136 * when we allow alternate page sizes.
1138 */ 1137 */
1139 1138
1140 /* adjust isr/ior. */ 1139 /* adjust isr/ior. */
1141 extrd,u %r16,63,SPACEID_SHIFT,%r1 /* get high bits from isr for ior */ 1140 extrd,u %r16,63,SPACEID_SHIFT,%r1 /* get high bits from isr for ior */
1142 depd %r1,31,SPACEID_SHIFT,%r17 /* deposit them into ior */ 1141 depd %r1,31,SPACEID_SHIFT,%r17 /* deposit them into ior */
1143 depdi 0,63,SPACEID_SHIFT,%r16 /* clear them from isr */ 1142 depdi 0,63,SPACEID_SHIFT,%r16 /* clear them from isr */
1144 #endif 1143 #endif
1145 STREG %r16, PT_ISR(%r29) 1144 STREG %r16, PT_ISR(%r29)
1146 STREG %r17, PT_IOR(%r29) 1145 STREG %r17, PT_IOR(%r29)
1147 1146
1148 1147
1149 skip_save_ior: 1148 skip_save_ior:
1150 virt_map 1149 virt_map
1151 save_general %r29 1150 save_general %r29
1152 1151
1153 ldo PT_FR0(%r29), %r25 1152 ldo PT_FR0(%r29), %r25
1154 save_fp %r25 1153 save_fp %r25
1155 1154
1156 loadgp 1155 loadgp
1157 1156
1158 copy %r29, %r25 /* arg1 is pt_regs */ 1157 copy %r29, %r25 /* arg1 is pt_regs */
1159 #ifdef CONFIG_64BIT 1158 #ifdef CONFIG_64BIT
1160 ldo -16(%r30),%r29 /* Reference param save area */ 1159 ldo -16(%r30),%r29 /* Reference param save area */
1161 #endif 1160 #endif
1162 1161
1163 ldil L%intr_check_sig, %r2 1162 ldil L%intr_check_sig, %r2
1164 copy %r25, %r16 /* save pt_regs */ 1163 copy %r25, %r16 /* save pt_regs */
1165 1164
1166 b handle_interruption 1165 b handle_interruption
1167 ldo R%intr_check_sig(%r2), %r2 1166 ldo R%intr_check_sig(%r2), %r2
1168 ENDPROC(intr_save) 1167 ENDPROC(intr_save)
1169 1168
1170 1169
1171 /* 1170 /*
1172 * Note for all tlb miss handlers: 1171 * Note for all tlb miss handlers:
1173 * 1172 *
1174 * cr24 contains a pointer to the kernel address space 1173 * cr24 contains a pointer to the kernel address space
1175 * page directory. 1174 * page directory.
1176 * 1175 *
1177 * cr25 contains a pointer to the current user address 1176 * cr25 contains a pointer to the current user address
1178 * space page directory. 1177 * space page directory.
1179 * 1178 *
1180 * sr3 will contain the space id of the user address space 1179 * sr3 will contain the space id of the user address space
1181 * of the current running thread while that thread is 1180 * of the current running thread while that thread is
1182 * running in the kernel. 1181 * running in the kernel.
1183 */ 1182 */
1184 1183
1185 /* 1184 /*
1186 * register number allocations. Note that these are all 1185 * register number allocations. Note that these are all
1187 * in the shadowed registers 1186 * in the shadowed registers
1188 */ 1187 */
1189 1188
1190 t0 = r1 /* temporary register 0 */ 1189 t0 = r1 /* temporary register 0 */
1191 va = r8 /* virtual address for which the trap occured */ 1190 va = r8 /* virtual address for which the trap occured */
1192 t1 = r9 /* temporary register 1 */ 1191 t1 = r9 /* temporary register 1 */
1193 pte = r16 /* pte/phys page # */ 1192 pte = r16 /* pte/phys page # */
1194 prot = r17 /* prot bits */ 1193 prot = r17 /* prot bits */
1195 spc = r24 /* space for which the trap occured */ 1194 spc = r24 /* space for which the trap occured */
1196 ptp = r25 /* page directory/page table pointer */ 1195 ptp = r25 /* page directory/page table pointer */
1197 1196
1198 #ifdef CONFIG_64BIT 1197 #ifdef CONFIG_64BIT
1199 1198
1200 dtlb_miss_20w: 1199 dtlb_miss_20w:
1201 space_adjust spc,va,t0 1200 space_adjust spc,va,t0
1202 get_pgd spc,ptp 1201 get_pgd spc,ptp
1203 space_check spc,t0,dtlb_fault 1202 space_check spc,t0,dtlb_fault
1204 1203
1205 L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w 1204 L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
1206 1205
1207 update_ptep ptp,pte,t0,t1 1206 update_ptep ptp,pte,t0,t1
1208 1207
1209 make_insert_tlb spc,pte,prot 1208 make_insert_tlb spc,pte,prot
1210 1209
1211 idtlbt pte,prot 1210 idtlbt pte,prot
1212 1211
1213 rfir 1212 rfir
1214 nop 1213 nop
1215 1214
1216 dtlb_check_alias_20w: 1215 dtlb_check_alias_20w:
1217 do_alias spc,t0,t1,va,pte,prot,dtlb_fault 1216 do_alias spc,t0,t1,va,pte,prot,dtlb_fault
1218 1217
1219 idtlbt pte,prot 1218 idtlbt pte,prot
1220 1219
1221 rfir 1220 rfir
1222 nop 1221 nop
1223 1222
1224 nadtlb_miss_20w: 1223 nadtlb_miss_20w:
1225 space_adjust spc,va,t0 1224 space_adjust spc,va,t0
1226 get_pgd spc,ptp 1225 get_pgd spc,ptp
1227 space_check spc,t0,nadtlb_fault 1226 space_check spc,t0,nadtlb_fault
1228 1227
1229 L3_ptep ptp,pte,t0,va,nadtlb_check_flush_20w 1228 L3_ptep ptp,pte,t0,va,nadtlb_check_flush_20w
1230 1229
1231 update_ptep ptp,pte,t0,t1 1230 update_ptep ptp,pte,t0,t1
1232 1231
1233 make_insert_tlb spc,pte,prot 1232 make_insert_tlb spc,pte,prot
1234 1233
1235 idtlbt pte,prot 1234 idtlbt pte,prot
1236 1235
1237 rfir 1236 rfir
1238 nop 1237 nop
1239 1238
1240 nadtlb_check_flush_20w: 1239 nadtlb_check_flush_20w:
1241 bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate 1240 bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1242 1241
1243 /* Insert a "flush only" translation */ 1242 /* Insert a "flush only" translation */
1244 1243
1245 depdi,z 7,7,3,prot 1244 depdi,z 7,7,3,prot
1246 depdi 1,10,1,prot 1245 depdi 1,10,1,prot
1247 1246
1248 /* Get rid of prot bits and convert to page addr for idtlbt */ 1247 /* Get rid of prot bits and convert to page addr for idtlbt */
1249 1248
1250 depdi 0,63,12,pte 1249 depdi 0,63,12,pte
1251 extrd,u pte,56,52,pte 1250 extrd,u pte,56,52,pte
1252 idtlbt pte,prot 1251 idtlbt pte,prot
1253 1252
1254 rfir 1253 rfir
1255 nop 1254 nop
1256 1255
1257 #else 1256 #else
1258 1257
1259 dtlb_miss_11: 1258 dtlb_miss_11:
1260 get_pgd spc,ptp 1259 get_pgd spc,ptp
1261 1260
1262 space_check spc,t0,dtlb_fault 1261 space_check spc,t0,dtlb_fault
1263 1262
1264 L2_ptep ptp,pte,t0,va,dtlb_check_alias_11 1263 L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
1265 1264
1266 update_ptep ptp,pte,t0,t1 1265 update_ptep ptp,pte,t0,t1
1267 1266
1268 make_insert_tlb_11 spc,pte,prot 1267 make_insert_tlb_11 spc,pte,prot
1269 1268
1270 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */ 1269 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1271 mtsp spc,%sr1 1270 mtsp spc,%sr1
1272 1271
1273 idtlba pte,(%sr1,va) 1272 idtlba pte,(%sr1,va)
1274 idtlbp prot,(%sr1,va) 1273 idtlbp prot,(%sr1,va)
1275 1274
1276 mtsp t0, %sr1 /* Restore sr1 */ 1275 mtsp t0, %sr1 /* Restore sr1 */
1277 1276
1278 rfir 1277 rfir
1279 nop 1278 nop
1280 1279
1281 dtlb_check_alias_11: 1280 dtlb_check_alias_11:
1282 1281
1283 /* Check to see if fault is in the temporary alias region */ 1282 /* Check to see if fault is in the temporary alias region */
1284 1283
1285 cmpib,<>,n 0,spc,dtlb_fault /* forward */ 1284 cmpib,<>,n 0,spc,dtlb_fault /* forward */
1286 ldil L%(TMPALIAS_MAP_START),t0 1285 ldil L%(TMPALIAS_MAP_START),t0
1287 copy va,t1 1286 copy va,t1
1288 depwi 0,31,23,t1 1287 depwi 0,31,23,t1
1289 cmpb,<>,n t0,t1,dtlb_fault /* forward */ 1288 cmpb,<>,n t0,t1,dtlb_fault /* forward */
1290 ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),prot 1289 ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),prot
1291 depw,z prot,8,7,prot 1290 depw,z prot,8,7,prot
1292 1291
1293 /* 1292 /*
1294 * OK, it is in the temp alias region, check whether "from" or "to". 1293 * OK, it is in the temp alias region, check whether "from" or "to".
1295 * Check "subtle" note in pacache.S re: r23/r26. 1294 * Check "subtle" note in pacache.S re: r23/r26.
1296 */ 1295 */
1297 1296
1298 extrw,u,= va,9,1,r0 1297 extrw,u,= va,9,1,r0
1299 or,tr %r23,%r0,pte /* If "from" use "from" page */ 1298 or,tr %r23,%r0,pte /* If "from" use "from" page */
1300 or %r26,%r0,pte /* else "to", use "to" page */ 1299 or %r26,%r0,pte /* else "to", use "to" page */
1301 1300
1302 idtlba pte,(va) 1301 idtlba pte,(va)
1303 idtlbp prot,(va) 1302 idtlbp prot,(va)
1304 1303
1305 rfir 1304 rfir
1306 nop 1305 nop
1307 1306
1308 nadtlb_miss_11: 1307 nadtlb_miss_11:
1309 get_pgd spc,ptp 1308 get_pgd spc,ptp
1310 1309
1311 space_check spc,t0,nadtlb_fault 1310 space_check spc,t0,nadtlb_fault
1312 1311
1313 L2_ptep ptp,pte,t0,va,nadtlb_check_flush_11 1312 L2_ptep ptp,pte,t0,va,nadtlb_check_flush_11
1314 1313
1315 update_ptep ptp,pte,t0,t1 1314 update_ptep ptp,pte,t0,t1
1316 1315
1317 make_insert_tlb_11 spc,pte,prot 1316 make_insert_tlb_11 spc,pte,prot
1318 1317
1319 1318
1320 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */ 1319 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1321 mtsp spc,%sr1 1320 mtsp spc,%sr1
1322 1321
1323 idtlba pte,(%sr1,va) 1322 idtlba pte,(%sr1,va)
1324 idtlbp prot,(%sr1,va) 1323 idtlbp prot,(%sr1,va)
1325 1324
1326 mtsp t0, %sr1 /* Restore sr1 */ 1325 mtsp t0, %sr1 /* Restore sr1 */
1327 1326
1328 rfir 1327 rfir
1329 nop 1328 nop
1330 1329
1331 nadtlb_check_flush_11: 1330 nadtlb_check_flush_11:
1332 bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate 1331 bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1333 1332
1334 /* Insert a "flush only" translation */ 1333 /* Insert a "flush only" translation */
1335 1334
1336 zdepi 7,7,3,prot 1335 zdepi 7,7,3,prot
1337 depi 1,10,1,prot 1336 depi 1,10,1,prot
1338 1337
1339 /* Get rid of prot bits and convert to page addr for idtlba */ 1338 /* Get rid of prot bits and convert to page addr for idtlba */
1340 1339
1341 depi 0,31,12,pte 1340 depi 0,31,12,pte
1342 extru pte,24,25,pte 1341 extru pte,24,25,pte
1343 1342
1344 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */ 1343 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1345 mtsp spc,%sr1 1344 mtsp spc,%sr1
1346 1345
1347 idtlba pte,(%sr1,va) 1346 idtlba pte,(%sr1,va)
1348 idtlbp prot,(%sr1,va) 1347 idtlbp prot,(%sr1,va)
1349 1348
1350 mtsp t0, %sr1 /* Restore sr1 */ 1349 mtsp t0, %sr1 /* Restore sr1 */
1351 1350
1352 rfir 1351 rfir
1353 nop 1352 nop
1354 1353
1355 dtlb_miss_20: 1354 dtlb_miss_20:
1356 space_adjust spc,va,t0 1355 space_adjust spc,va,t0
1357 get_pgd spc,ptp 1356 get_pgd spc,ptp
1358 space_check spc,t0,dtlb_fault 1357 space_check spc,t0,dtlb_fault
1359 1358
1360 L2_ptep ptp,pte,t0,va,dtlb_check_alias_20 1359 L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
1361 1360
1362 update_ptep ptp,pte,t0,t1 1361 update_ptep ptp,pte,t0,t1
1363 1362
1364 make_insert_tlb spc,pte,prot 1363 make_insert_tlb spc,pte,prot
1365 1364
1366 f_extend pte,t0 1365 f_extend pte,t0
1367 1366
1368 idtlbt pte,prot 1367 idtlbt pte,prot
1369 1368
1370 rfir 1369 rfir
1371 nop 1370 nop
1372 1371
1373 dtlb_check_alias_20: 1372 dtlb_check_alias_20:
1374 do_alias spc,t0,t1,va,pte,prot,dtlb_fault 1373 do_alias spc,t0,t1,va,pte,prot,dtlb_fault
1375 1374
1376 idtlbt pte,prot 1375 idtlbt pte,prot
1377 1376
1378 rfir 1377 rfir
1379 nop 1378 nop
1380 1379
1381 nadtlb_miss_20: 1380 nadtlb_miss_20:
1382 get_pgd spc,ptp 1381 get_pgd spc,ptp
1383 1382
1384 space_check spc,t0,nadtlb_fault 1383 space_check spc,t0,nadtlb_fault
1385 1384
1386 L2_ptep ptp,pte,t0,va,nadtlb_check_flush_20 1385 L2_ptep ptp,pte,t0,va,nadtlb_check_flush_20
1387 1386
1388 update_ptep ptp,pte,t0,t1 1387 update_ptep ptp,pte,t0,t1
1389 1388
1390 make_insert_tlb spc,pte,prot 1389 make_insert_tlb spc,pte,prot
1391 1390
1392 f_extend pte,t0 1391 f_extend pte,t0
1393 1392
1394 idtlbt pte,prot 1393 idtlbt pte,prot
1395 1394
1396 rfir 1395 rfir
1397 nop 1396 nop
1398 1397
1399 nadtlb_check_flush_20: 1398 nadtlb_check_flush_20:
1400 bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate 1399 bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1401 1400
1402 /* Insert a "flush only" translation */ 1401 /* Insert a "flush only" translation */
1403 1402
1404 depdi,z 7,7,3,prot 1403 depdi,z 7,7,3,prot
1405 depdi 1,10,1,prot 1404 depdi 1,10,1,prot
1406 1405
1407 /* Get rid of prot bits and convert to page addr for idtlbt */ 1406 /* Get rid of prot bits and convert to page addr for idtlbt */
1408 1407
1409 depdi 0,63,12,pte 1408 depdi 0,63,12,pte
1410 extrd,u pte,56,32,pte 1409 extrd,u pte,56,32,pte
1411 idtlbt pte,prot 1410 idtlbt pte,prot
1412 1411
1413 rfir 1412 rfir
1414 nop 1413 nop
1415 #endif 1414 #endif
1416 1415
1417 nadtlb_emulate: 1416 nadtlb_emulate:
1418 1417
1419 /* 1418 /*
1420 * Non access misses can be caused by fdc,fic,pdc,lpa,probe and 1419 * Non access misses can be caused by fdc,fic,pdc,lpa,probe and
1421 * probei instructions. We don't want to fault for these 1420 * probei instructions. We don't want to fault for these
1422 * instructions (not only does it not make sense, it can cause 1421 * instructions (not only does it not make sense, it can cause
1423 * deadlocks, since some flushes are done with the mmap 1422 * deadlocks, since some flushes are done with the mmap
1424 * semaphore held). If the translation doesn't exist, we can't 1423 * semaphore held). If the translation doesn't exist, we can't
1425 * insert a translation, so have to emulate the side effects 1424 * insert a translation, so have to emulate the side effects
1426 * of the instruction. Since we don't insert a translation 1425 * of the instruction. Since we don't insert a translation
1427 * we can get a lot of faults during a flush loop, so it makes 1426 * we can get a lot of faults during a flush loop, so it makes
1428 * sense to try to do it here with minimum overhead. We only 1427 * sense to try to do it here with minimum overhead. We only
1429 * emulate fdc,fic,pdc,probew,prober instructions whose base 1428 * emulate fdc,fic,pdc,probew,prober instructions whose base
1430 * and index registers are not shadowed. We defer everything 1429 * and index registers are not shadowed. We defer everything
1431 * else to the "slow" path. 1430 * else to the "slow" path.
1432 */ 1431 */
1433 1432
1434 mfctl %cr19,%r9 /* Get iir */ 1433 mfctl %cr19,%r9 /* Get iir */
1435 1434
1436 /* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits. 1435 /* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
1437 Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */ 1436 Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
1438 1437
1439 /* Checks for fdc,fdce,pdc,"fic,4f" only */ 1438 /* Checks for fdc,fdce,pdc,"fic,4f" only */
1440 ldi 0x280,%r16 1439 ldi 0x280,%r16
1441 and %r9,%r16,%r17 1440 and %r9,%r16,%r17
1442 cmpb,<>,n %r16,%r17,nadtlb_probe_check 1441 cmpb,<>,n %r16,%r17,nadtlb_probe_check
1443 bb,>=,n %r9,26,nadtlb_nullify /* m bit not set, just nullify */ 1442 bb,>=,n %r9,26,nadtlb_nullify /* m bit not set, just nullify */
1444 BL get_register,%r25 1443 BL get_register,%r25
1445 extrw,u %r9,15,5,%r8 /* Get index register # */ 1444 extrw,u %r9,15,5,%r8 /* Get index register # */
1446 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */ 1445 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
1447 copy %r1,%r24 1446 copy %r1,%r24
1448 BL get_register,%r25 1447 BL get_register,%r25
1449 extrw,u %r9,10,5,%r8 /* Get base register # */ 1448 extrw,u %r9,10,5,%r8 /* Get base register # */
1450 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */ 1449 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
1451 BL set_register,%r25 1450 BL set_register,%r25
1452 add,l %r1,%r24,%r1 /* doesn't affect c/b bits */ 1451 add,l %r1,%r24,%r1 /* doesn't affect c/b bits */
1453 1452
1454 nadtlb_nullify: 1453 nadtlb_nullify:
1455 mfctl %ipsw,%r8 1454 mfctl %ipsw,%r8
1456 ldil L%PSW_N,%r9 1455 ldil L%PSW_N,%r9
1457 or %r8,%r9,%r8 /* Set PSW_N */ 1456 or %r8,%r9,%r8 /* Set PSW_N */
1458 mtctl %r8,%ipsw 1457 mtctl %r8,%ipsw
1459 1458
1460 rfir 1459 rfir
1461 nop 1460 nop
1462 1461
1463 /* 1462 /*
1464 When there is no translation for the probe address then we 1463 When there is no translation for the probe address then we
1465 must nullify the insn and return zero in the target regsiter. 1464 must nullify the insn and return zero in the target regsiter.
1466 This will indicate to the calling code that it does not have 1465 This will indicate to the calling code that it does not have
1467 write/read privileges to this address. 1466 write/read privileges to this address.
1468 1467
1469 This should technically work for prober and probew in PA 1.1, 1468 This should technically work for prober and probew in PA 1.1,
1470 and also probe,r and probe,w in PA 2.0 1469 and also probe,r and probe,w in PA 2.0
1471 1470
1472 WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN! 1471 WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
1473 THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET. 1472 THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
1474 1473
1475 */ 1474 */
1476 nadtlb_probe_check: 1475 nadtlb_probe_check:
1477 ldi 0x80,%r16 1476 ldi 0x80,%r16
1478 and %r9,%r16,%r17 1477 and %r9,%r16,%r17
1479 cmpb,<>,n %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/ 1478 cmpb,<>,n %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
1480 BL get_register,%r25 /* Find the target register */ 1479 BL get_register,%r25 /* Find the target register */
1481 extrw,u %r9,31,5,%r8 /* Get target register */ 1480 extrw,u %r9,31,5,%r8 /* Get target register */
1482 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */ 1481 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
1483 BL set_register,%r25 1482 BL set_register,%r25
1484 copy %r0,%r1 /* Write zero to target register */ 1483 copy %r0,%r1 /* Write zero to target register */
1485 b nadtlb_nullify /* Nullify return insn */ 1484 b nadtlb_nullify /* Nullify return insn */
1486 nop 1485 nop
1487 1486
1488 1487
1489 #ifdef CONFIG_64BIT 1488 #ifdef CONFIG_64BIT
1490 itlb_miss_20w: 1489 itlb_miss_20w:
1491 1490
1492 /* 1491 /*
1493 * I miss is a little different, since we allow users to fault 1492 * I miss is a little different, since we allow users to fault
1494 * on the gateway page which is in the kernel address space. 1493 * on the gateway page which is in the kernel address space.
1495 */ 1494 */
1496 1495
1497 space_adjust spc,va,t0 1496 space_adjust spc,va,t0
1498 get_pgd spc,ptp 1497 get_pgd spc,ptp
1499 space_check spc,t0,itlb_fault 1498 space_check spc,t0,itlb_fault
1500 1499
1501 L3_ptep ptp,pte,t0,va,itlb_fault 1500 L3_ptep ptp,pte,t0,va,itlb_fault
1502 1501
1503 update_ptep ptp,pte,t0,t1 1502 update_ptep ptp,pte,t0,t1
1504 1503
1505 make_insert_tlb spc,pte,prot 1504 make_insert_tlb spc,pte,prot
1506 1505
1507 iitlbt pte,prot 1506 iitlbt pte,prot
1508 1507
1509 rfir 1508 rfir
1510 nop 1509 nop
1511 1510
1512 #else 1511 #else
1513 1512
1514 itlb_miss_11: 1513 itlb_miss_11:
1515 get_pgd spc,ptp 1514 get_pgd spc,ptp
1516 1515
1517 space_check spc,t0,itlb_fault 1516 space_check spc,t0,itlb_fault
1518 1517
1519 L2_ptep ptp,pte,t0,va,itlb_fault 1518 L2_ptep ptp,pte,t0,va,itlb_fault
1520 1519
1521 update_ptep ptp,pte,t0,t1 1520 update_ptep ptp,pte,t0,t1
1522 1521
1523 make_insert_tlb_11 spc,pte,prot 1522 make_insert_tlb_11 spc,pte,prot
1524 1523
1525 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */ 1524 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1526 mtsp spc,%sr1 1525 mtsp spc,%sr1
1527 1526
1528 iitlba pte,(%sr1,va) 1527 iitlba pte,(%sr1,va)
1529 iitlbp prot,(%sr1,va) 1528 iitlbp prot,(%sr1,va)
1530 1529
1531 mtsp t0, %sr1 /* Restore sr1 */ 1530 mtsp t0, %sr1 /* Restore sr1 */
1532 1531
1533 rfir 1532 rfir
1534 nop 1533 nop
1535 1534
1536 itlb_miss_20: 1535 itlb_miss_20:
1537 get_pgd spc,ptp 1536 get_pgd spc,ptp
1538 1537
1539 space_check spc,t0,itlb_fault 1538 space_check spc,t0,itlb_fault
1540 1539
1541 L2_ptep ptp,pte,t0,va,itlb_fault 1540 L2_ptep ptp,pte,t0,va,itlb_fault
1542 1541
1543 update_ptep ptp,pte,t0,t1 1542 update_ptep ptp,pte,t0,t1
1544 1543
1545 make_insert_tlb spc,pte,prot 1544 make_insert_tlb spc,pte,prot
1546 1545
1547 f_extend pte,t0 1546 f_extend pte,t0
1548 1547
1549 iitlbt pte,prot 1548 iitlbt pte,prot
1550 1549
1551 rfir 1550 rfir
1552 nop 1551 nop
1553 1552
1554 #endif 1553 #endif
1555 1554
1556 #ifdef CONFIG_64BIT 1555 #ifdef CONFIG_64BIT
1557 1556
1558 dbit_trap_20w: 1557 dbit_trap_20w:
1559 space_adjust spc,va,t0 1558 space_adjust spc,va,t0
1560 get_pgd spc,ptp 1559 get_pgd spc,ptp
1561 space_check spc,t0,dbit_fault 1560 space_check spc,t0,dbit_fault
1562 1561
1563 L3_ptep ptp,pte,t0,va,dbit_fault 1562 L3_ptep ptp,pte,t0,va,dbit_fault
1564 1563
1565 #ifdef CONFIG_SMP 1564 #ifdef CONFIG_SMP
1566 cmpib,COND(=),n 0,spc,dbit_nolock_20w 1565 cmpib,COND(=),n 0,spc,dbit_nolock_20w
1567 load32 PA(pa_dbit_lock),t0 1566 load32 PA(pa_dbit_lock),t0
1568 1567
1569 dbit_spin_20w: 1568 dbit_spin_20w:
1570 LDCW 0(t0),t1 1569 LDCW 0(t0),t1
1571 cmpib,COND(=) 0,t1,dbit_spin_20w 1570 cmpib,COND(=) 0,t1,dbit_spin_20w
1572 nop 1571 nop
1573 1572
1574 dbit_nolock_20w: 1573 dbit_nolock_20w:
1575 #endif 1574 #endif
1576 update_dirty ptp,pte,t1 1575 update_dirty ptp,pte,t1
1577 1576
1578 make_insert_tlb spc,pte,prot 1577 make_insert_tlb spc,pte,prot
1579 1578
1580 idtlbt pte,prot 1579 idtlbt pte,prot
1581 #ifdef CONFIG_SMP 1580 #ifdef CONFIG_SMP
1582 cmpib,COND(=),n 0,spc,dbit_nounlock_20w 1581 cmpib,COND(=),n 0,spc,dbit_nounlock_20w
1583 ldi 1,t1 1582 ldi 1,t1
1584 stw t1,0(t0) 1583 stw t1,0(t0)
1585 1584
1586 dbit_nounlock_20w: 1585 dbit_nounlock_20w:
1587 #endif 1586 #endif
1588 1587
1589 rfir 1588 rfir
1590 nop 1589 nop
1591 #else 1590 #else
1592 1591
1593 dbit_trap_11: 1592 dbit_trap_11:
1594 1593
1595 get_pgd spc,ptp 1594 get_pgd spc,ptp
1596 1595
1597 space_check spc,t0,dbit_fault 1596 space_check spc,t0,dbit_fault
1598 1597
1599 L2_ptep ptp,pte,t0,va,dbit_fault 1598 L2_ptep ptp,pte,t0,va,dbit_fault
1600 1599
1601 #ifdef CONFIG_SMP 1600 #ifdef CONFIG_SMP
1602 cmpib,COND(=),n 0,spc,dbit_nolock_11 1601 cmpib,COND(=),n 0,spc,dbit_nolock_11
1603 load32 PA(pa_dbit_lock),t0 1602 load32 PA(pa_dbit_lock),t0
1604 1603
1605 dbit_spin_11: 1604 dbit_spin_11:
1606 LDCW 0(t0),t1 1605 LDCW 0(t0),t1
1607 cmpib,= 0,t1,dbit_spin_11 1606 cmpib,= 0,t1,dbit_spin_11
1608 nop 1607 nop
1609 1608
1610 dbit_nolock_11: 1609 dbit_nolock_11:
1611 #endif 1610 #endif
1612 update_dirty ptp,pte,t1 1611 update_dirty ptp,pte,t1
1613 1612
1614 make_insert_tlb_11 spc,pte,prot 1613 make_insert_tlb_11 spc,pte,prot
1615 1614
1616 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */ 1615 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1617 mtsp spc,%sr1 1616 mtsp spc,%sr1
1618 1617
1619 idtlba pte,(%sr1,va) 1618 idtlba pte,(%sr1,va)
1620 idtlbp prot,(%sr1,va) 1619 idtlbp prot,(%sr1,va)
1621 1620
1622 mtsp t1, %sr1 /* Restore sr1 */ 1621 mtsp t1, %sr1 /* Restore sr1 */
1623 #ifdef CONFIG_SMP 1622 #ifdef CONFIG_SMP
1624 cmpib,COND(=),n 0,spc,dbit_nounlock_11 1623 cmpib,COND(=),n 0,spc,dbit_nounlock_11
1625 ldi 1,t1 1624 ldi 1,t1
1626 stw t1,0(t0) 1625 stw t1,0(t0)
1627 1626
1628 dbit_nounlock_11: 1627 dbit_nounlock_11:
1629 #endif 1628 #endif
1630 1629
1631 rfir 1630 rfir
1632 nop 1631 nop
1633 1632
1634 dbit_trap_20: 1633 dbit_trap_20:
1635 get_pgd spc,ptp 1634 get_pgd spc,ptp
1636 1635
1637 space_check spc,t0,dbit_fault 1636 space_check spc,t0,dbit_fault
1638 1637
1639 L2_ptep ptp,pte,t0,va,dbit_fault 1638 L2_ptep ptp,pte,t0,va,dbit_fault
1640 1639
1641 #ifdef CONFIG_SMP 1640 #ifdef CONFIG_SMP
1642 cmpib,COND(=),n 0,spc,dbit_nolock_20 1641 cmpib,COND(=),n 0,spc,dbit_nolock_20
1643 load32 PA(pa_dbit_lock),t0 1642 load32 PA(pa_dbit_lock),t0
1644 1643
1645 dbit_spin_20: 1644 dbit_spin_20:
1646 LDCW 0(t0),t1 1645 LDCW 0(t0),t1
1647 cmpib,= 0,t1,dbit_spin_20 1646 cmpib,= 0,t1,dbit_spin_20
1648 nop 1647 nop
1649 1648
1650 dbit_nolock_20: 1649 dbit_nolock_20:
1651 #endif 1650 #endif
1652 update_dirty ptp,pte,t1 1651 update_dirty ptp,pte,t1
1653 1652
1654 make_insert_tlb spc,pte,prot 1653 make_insert_tlb spc,pte,prot
1655 1654
1656 f_extend pte,t1 1655 f_extend pte,t1
1657 1656
1658 idtlbt pte,prot 1657 idtlbt pte,prot
1659 1658
1660 #ifdef CONFIG_SMP 1659 #ifdef CONFIG_SMP
1661 cmpib,COND(=),n 0,spc,dbit_nounlock_20 1660 cmpib,COND(=),n 0,spc,dbit_nounlock_20
1662 ldi 1,t1 1661 ldi 1,t1
1663 stw t1,0(t0) 1662 stw t1,0(t0)
1664 1663
1665 dbit_nounlock_20: 1664 dbit_nounlock_20:
1666 #endif 1665 #endif
1667 1666
1668 rfir 1667 rfir
1669 nop 1668 nop
1670 #endif 1669 #endif
1671 1670
1672 .import handle_interruption,code 1671 .import handle_interruption,code
1673 1672
1674 kernel_bad_space: 1673 kernel_bad_space:
1675 b intr_save 1674 b intr_save
1676 ldi 31,%r8 /* Use an unused code */ 1675 ldi 31,%r8 /* Use an unused code */
1677 1676
1678 dbit_fault: 1677 dbit_fault:
1679 b intr_save 1678 b intr_save
1680 ldi 20,%r8 1679 ldi 20,%r8
1681 1680
1682 itlb_fault: 1681 itlb_fault:
1683 b intr_save 1682 b intr_save
1684 ldi 6,%r8 1683 ldi 6,%r8
1685 1684
1686 nadtlb_fault: 1685 nadtlb_fault:
1687 b intr_save 1686 b intr_save
1688 ldi 17,%r8 1687 ldi 17,%r8
1689 1688
1690 dtlb_fault: 1689 dtlb_fault:
1691 b intr_save 1690 b intr_save
1692 ldi 15,%r8 1691 ldi 15,%r8
1693 1692
1694 /* Register saving semantics for system calls: 1693 /* Register saving semantics for system calls:
1695 1694
1696 %r1 clobbered by system call macro in userspace 1695 %r1 clobbered by system call macro in userspace
1697 %r2 saved in PT_REGS by gateway page 1696 %r2 saved in PT_REGS by gateway page
1698 %r3 - %r18 preserved by C code (saved by signal code) 1697 %r3 - %r18 preserved by C code (saved by signal code)
1699 %r19 - %r20 saved in PT_REGS by gateway page 1698 %r19 - %r20 saved in PT_REGS by gateway page
1700 %r21 - %r22 non-standard syscall args 1699 %r21 - %r22 non-standard syscall args
1701 stored in kernel stack by gateway page 1700 stored in kernel stack by gateway page
1702 %r23 - %r26 arg3-arg0, saved in PT_REGS by gateway page 1701 %r23 - %r26 arg3-arg0, saved in PT_REGS by gateway page
1703 %r27 - %r30 saved in PT_REGS by gateway page 1702 %r27 - %r30 saved in PT_REGS by gateway page
1704 %r31 syscall return pointer 1703 %r31 syscall return pointer
1705 */ 1704 */
1706 1705
1707 /* Floating point registers (FIXME: what do we do with these?) 1706 /* Floating point registers (FIXME: what do we do with these?)
1708 1707
1709 %fr0 - %fr3 status/exception, not preserved 1708 %fr0 - %fr3 status/exception, not preserved
1710 %fr4 - %fr7 arguments 1709 %fr4 - %fr7 arguments
1711 %fr8 - %fr11 not preserved by C code 1710 %fr8 - %fr11 not preserved by C code
1712 %fr12 - %fr21 preserved by C code 1711 %fr12 - %fr21 preserved by C code
1713 %fr22 - %fr31 not preserved by C code 1712 %fr22 - %fr31 not preserved by C code
1714 */ 1713 */
1715 1714
1716 .macro reg_save regs 1715 .macro reg_save regs
1717 STREG %r3, PT_GR3(\regs) 1716 STREG %r3, PT_GR3(\regs)
1718 STREG %r4, PT_GR4(\regs) 1717 STREG %r4, PT_GR4(\regs)
1719 STREG %r5, PT_GR5(\regs) 1718 STREG %r5, PT_GR5(\regs)
1720 STREG %r6, PT_GR6(\regs) 1719 STREG %r6, PT_GR6(\regs)
1721 STREG %r7, PT_GR7(\regs) 1720 STREG %r7, PT_GR7(\regs)
1722 STREG %r8, PT_GR8(\regs) 1721 STREG %r8, PT_GR8(\regs)
1723 STREG %r9, PT_GR9(\regs) 1722 STREG %r9, PT_GR9(\regs)
1724 STREG %r10,PT_GR10(\regs) 1723 STREG %r10,PT_GR10(\regs)
1725 STREG %r11,PT_GR11(\regs) 1724 STREG %r11,PT_GR11(\regs)
1726 STREG %r12,PT_GR12(\regs) 1725 STREG %r12,PT_GR12(\regs)
1727 STREG %r13,PT_GR13(\regs) 1726 STREG %r13,PT_GR13(\regs)
1728 STREG %r14,PT_GR14(\regs) 1727 STREG %r14,PT_GR14(\regs)
1729 STREG %r15,PT_GR15(\regs) 1728 STREG %r15,PT_GR15(\regs)
1730 STREG %r16,PT_GR16(\regs) 1729 STREG %r16,PT_GR16(\regs)
1731 STREG %r17,PT_GR17(\regs) 1730 STREG %r17,PT_GR17(\regs)
1732 STREG %r18,PT_GR18(\regs) 1731 STREG %r18,PT_GR18(\regs)
1733 .endm 1732 .endm
1734 1733
1735 .macro reg_restore regs 1734 .macro reg_restore regs
1736 LDREG PT_GR3(\regs), %r3 1735 LDREG PT_GR3(\regs), %r3
1737 LDREG PT_GR4(\regs), %r4 1736 LDREG PT_GR4(\regs), %r4
1738 LDREG PT_GR5(\regs), %r5 1737 LDREG PT_GR5(\regs), %r5
1739 LDREG PT_GR6(\regs), %r6 1738 LDREG PT_GR6(\regs), %r6
1740 LDREG PT_GR7(\regs), %r7 1739 LDREG PT_GR7(\regs), %r7
1741 LDREG PT_GR8(\regs), %r8 1740 LDREG PT_GR8(\regs), %r8
1742 LDREG PT_GR9(\regs), %r9 1741 LDREG PT_GR9(\regs), %r9
1743 LDREG PT_GR10(\regs),%r10 1742 LDREG PT_GR10(\regs),%r10
1744 LDREG PT_GR11(\regs),%r11 1743 LDREG PT_GR11(\regs),%r11
1745 LDREG PT_GR12(\regs),%r12 1744 LDREG PT_GR12(\regs),%r12
1746 LDREG PT_GR13(\regs),%r13 1745 LDREG PT_GR13(\regs),%r13
1747 LDREG PT_GR14(\regs),%r14 1746 LDREG PT_GR14(\regs),%r14
1748 LDREG PT_GR15(\regs),%r15 1747 LDREG PT_GR15(\regs),%r15
1749 LDREG PT_GR16(\regs),%r16 1748 LDREG PT_GR16(\regs),%r16
1750 LDREG PT_GR17(\regs),%r17 1749 LDREG PT_GR17(\regs),%r17
1751 LDREG PT_GR18(\regs),%r18 1750 LDREG PT_GR18(\regs),%r18
1752 .endm 1751 .endm
1753 1752
1754 ENTRY(sys_fork_wrapper) 1753 ENTRY(sys_fork_wrapper)
1755 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1 1754 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1756 ldo TASK_REGS(%r1),%r1 1755 ldo TASK_REGS(%r1),%r1
1757 reg_save %r1 1756 reg_save %r1
1758 mfctl %cr27, %r3 1757 mfctl %cr27, %r3
1759 STREG %r3, PT_CR27(%r1) 1758 STREG %r3, PT_CR27(%r1)
1760 1759
1761 STREG %r2,-RP_OFFSET(%r30) 1760 STREG %r2,-RP_OFFSET(%r30)
1762 ldo FRAME_SIZE(%r30),%r30 1761 ldo FRAME_SIZE(%r30),%r30
1763 #ifdef CONFIG_64BIT 1762 #ifdef CONFIG_64BIT
1764 ldo -16(%r30),%r29 /* Reference param save area */ 1763 ldo -16(%r30),%r29 /* Reference param save area */
1765 #endif 1764 #endif
1766 1765
1767 /* These are call-clobbered registers and therefore 1766 /* These are call-clobbered registers and therefore
1768 also syscall-clobbered (we hope). */ 1767 also syscall-clobbered (we hope). */
1769 STREG %r2,PT_GR19(%r1) /* save for child */ 1768 STREG %r2,PT_GR19(%r1) /* save for child */
1770 STREG %r30,PT_GR21(%r1) 1769 STREG %r30,PT_GR21(%r1)
1771 1770
1772 LDREG PT_GR30(%r1),%r25 1771 LDREG PT_GR30(%r1),%r25
1773 copy %r1,%r24 1772 copy %r1,%r24
1774 BL sys_clone,%r2 1773 BL sys_clone,%r2
1775 ldi SIGCHLD,%r26 1774 ldi SIGCHLD,%r26
1776 1775
1777 LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2 1776 LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
1778 wrapper_exit: 1777 wrapper_exit:
1779 ldo -FRAME_SIZE(%r30),%r30 /* get the stackframe */ 1778 ldo -FRAME_SIZE(%r30),%r30 /* get the stackframe */
1780 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 1779 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1781 ldo TASK_REGS(%r1),%r1 /* get pt regs */ 1780 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1782 1781
1783 LDREG PT_CR27(%r1), %r3 1782 LDREG PT_CR27(%r1), %r3
1784 mtctl %r3, %cr27 1783 mtctl %r3, %cr27
1785 reg_restore %r1 1784 reg_restore %r1
1786 1785
1787 /* strace expects syscall # to be preserved in r20 */ 1786 /* strace expects syscall # to be preserved in r20 */
1788 ldi __NR_fork,%r20 1787 ldi __NR_fork,%r20
1789 bv %r0(%r2) 1788 bv %r0(%r2)
1790 STREG %r20,PT_GR20(%r1) 1789 STREG %r20,PT_GR20(%r1)
1791 ENDPROC(sys_fork_wrapper) 1790 ENDPROC(sys_fork_wrapper)
1792 1791
1793 /* Set the return value for the child */ 1792 /* Set the return value for the child */
1794 ENTRY(child_return) 1793 ENTRY(child_return)
1795 BL schedule_tail, %r2 1794 BL schedule_tail, %r2
1796 nop 1795 nop
1797 1796
1798 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE-FRAME_SIZE(%r30), %r1 1797 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE-FRAME_SIZE(%r30), %r1
1799 LDREG TASK_PT_GR19(%r1),%r2 1798 LDREG TASK_PT_GR19(%r1),%r2
1800 b wrapper_exit 1799 b wrapper_exit
1801 copy %r0,%r28 1800 copy %r0,%r28
1802 ENDPROC(child_return) 1801 ENDPROC(child_return)
1803 1802
1804 1803
1805 ENTRY(sys_clone_wrapper) 1804 ENTRY(sys_clone_wrapper)
1806 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 1805 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1807 ldo TASK_REGS(%r1),%r1 /* get pt regs */ 1806 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1808 reg_save %r1 1807 reg_save %r1
1809 mfctl %cr27, %r3 1808 mfctl %cr27, %r3
1810 STREG %r3, PT_CR27(%r1) 1809 STREG %r3, PT_CR27(%r1)
1811 1810
1812 STREG %r2,-RP_OFFSET(%r30) 1811 STREG %r2,-RP_OFFSET(%r30)
1813 ldo FRAME_SIZE(%r30),%r30 1812 ldo FRAME_SIZE(%r30),%r30
1814 #ifdef CONFIG_64BIT 1813 #ifdef CONFIG_64BIT
1815 ldo -16(%r30),%r29 /* Reference param save area */ 1814 ldo -16(%r30),%r29 /* Reference param save area */
1816 #endif 1815 #endif
1817 1816
1818 /* WARNING - Clobbers r19 and r21, userspace must save these! */ 1817 /* WARNING - Clobbers r19 and r21, userspace must save these! */
1819 STREG %r2,PT_GR19(%r1) /* save for child */ 1818 STREG %r2,PT_GR19(%r1) /* save for child */
1820 STREG %r30,PT_GR21(%r1) 1819 STREG %r30,PT_GR21(%r1)
1821 BL sys_clone,%r2 1820 BL sys_clone,%r2
1822 copy %r1,%r24 1821 copy %r1,%r24
1823 1822
1824 b wrapper_exit 1823 b wrapper_exit
1825 LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2 1824 LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
1826 ENDPROC(sys_clone_wrapper) 1825 ENDPROC(sys_clone_wrapper)
1827 1826
1828 1827
1829 ENTRY(sys_vfork_wrapper) 1828 ENTRY(sys_vfork_wrapper)
1830 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 1829 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1831 ldo TASK_REGS(%r1),%r1 /* get pt regs */ 1830 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1832 reg_save %r1 1831 reg_save %r1
1833 mfctl %cr27, %r3 1832 mfctl %cr27, %r3
1834 STREG %r3, PT_CR27(%r1) 1833 STREG %r3, PT_CR27(%r1)
1835 1834
1836 STREG %r2,-RP_OFFSET(%r30) 1835 STREG %r2,-RP_OFFSET(%r30)
1837 ldo FRAME_SIZE(%r30),%r30 1836 ldo FRAME_SIZE(%r30),%r30
1838 #ifdef CONFIG_64BIT 1837 #ifdef CONFIG_64BIT
1839 ldo -16(%r30),%r29 /* Reference param save area */ 1838 ldo -16(%r30),%r29 /* Reference param save area */
1840 #endif 1839 #endif
1841 1840
1842 STREG %r2,PT_GR19(%r1) /* save for child */ 1841 STREG %r2,PT_GR19(%r1) /* save for child */
1843 STREG %r30,PT_GR21(%r1) 1842 STREG %r30,PT_GR21(%r1)
1844 1843
1845 BL sys_vfork,%r2 1844 BL sys_vfork,%r2
1846 copy %r1,%r26 1845 copy %r1,%r26
1847 1846
1848 b wrapper_exit 1847 b wrapper_exit
1849 LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2 1848 LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
1850 ENDPROC(sys_vfork_wrapper) 1849 ENDPROC(sys_vfork_wrapper)
1851 1850
1852 1851
1853 .macro execve_wrapper execve 1852 .macro execve_wrapper execve
1854 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 1853 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1855 ldo TASK_REGS(%r1),%r1 /* get pt regs */ 1854 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1856 1855
1857 /* 1856 /*
1858 * Do we need to save/restore r3-r18 here? 1857 * Do we need to save/restore r3-r18 here?
1859 * I don't think so. why would new thread need old 1858 * I don't think so. why would new thread need old
1860 * threads registers? 1859 * threads registers?
1861 */ 1860 */
1862 1861
1863 /* %arg0 - %arg3 are already saved for us. */ 1862 /* %arg0 - %arg3 are already saved for us. */
1864 1863
1865 STREG %r2,-RP_OFFSET(%r30) 1864 STREG %r2,-RP_OFFSET(%r30)
1866 ldo FRAME_SIZE(%r30),%r30 1865 ldo FRAME_SIZE(%r30),%r30
1867 #ifdef CONFIG_64BIT 1866 #ifdef CONFIG_64BIT
1868 ldo -16(%r30),%r29 /* Reference param save area */ 1867 ldo -16(%r30),%r29 /* Reference param save area */
1869 #endif 1868 #endif
1870 BL \execve,%r2 1869 BL \execve,%r2
1871 copy %r1,%arg0 1870 copy %r1,%arg0
1872 1871
1873 ldo -FRAME_SIZE(%r30),%r30 1872 ldo -FRAME_SIZE(%r30),%r30
1874 LDREG -RP_OFFSET(%r30),%r2 1873 LDREG -RP_OFFSET(%r30),%r2
1875 1874
1876 /* If exec succeeded we need to load the args */ 1875 /* If exec succeeded we need to load the args */
1877 1876
1878 ldo -1024(%r0),%r1 1877 ldo -1024(%r0),%r1
1879 cmpb,>>= %r28,%r1,error_\execve 1878 cmpb,>>= %r28,%r1,error_\execve
1880 copy %r2,%r19 1879 copy %r2,%r19
1881 1880
1882 error_\execve: 1881 error_\execve:
1883 bv %r0(%r19) 1882 bv %r0(%r19)
1884 nop 1883 nop
1885 .endm 1884 .endm
1886 1885
1887 .import sys_execve 1886 .import sys_execve
1888 ENTRY(sys_execve_wrapper) 1887 ENTRY(sys_execve_wrapper)
1889 execve_wrapper sys_execve 1888 execve_wrapper sys_execve
1890 ENDPROC(sys_execve_wrapper) 1889 ENDPROC(sys_execve_wrapper)
1891 1890
1892 #ifdef CONFIG_64BIT 1891 #ifdef CONFIG_64BIT
1893 .import sys32_execve 1892 .import sys32_execve
1894 ENTRY(sys32_execve_wrapper) 1893 ENTRY(sys32_execve_wrapper)
1895 execve_wrapper sys32_execve 1894 execve_wrapper sys32_execve
1896 ENDPROC(sys32_execve_wrapper) 1895 ENDPROC(sys32_execve_wrapper)
1897 #endif 1896 #endif
1898 1897
1899 ENTRY(sys_rt_sigreturn_wrapper) 1898 ENTRY(sys_rt_sigreturn_wrapper)
1900 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26 1899 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
1901 ldo TASK_REGS(%r26),%r26 /* get pt regs */ 1900 ldo TASK_REGS(%r26),%r26 /* get pt regs */
1902 /* Don't save regs, we are going to restore them from sigcontext. */ 1901 /* Don't save regs, we are going to restore them from sigcontext. */
1903 STREG %r2, -RP_OFFSET(%r30) 1902 STREG %r2, -RP_OFFSET(%r30)
1904 #ifdef CONFIG_64BIT 1903 #ifdef CONFIG_64BIT
1905 ldo FRAME_SIZE(%r30), %r30 1904 ldo FRAME_SIZE(%r30), %r30
1906 BL sys_rt_sigreturn,%r2 1905 BL sys_rt_sigreturn,%r2
1907 ldo -16(%r30),%r29 /* Reference param save area */ 1906 ldo -16(%r30),%r29 /* Reference param save area */
1908 #else 1907 #else
1909 BL sys_rt_sigreturn,%r2 1908 BL sys_rt_sigreturn,%r2
1910 ldo FRAME_SIZE(%r30), %r30 1909 ldo FRAME_SIZE(%r30), %r30
1911 #endif 1910 #endif
1912 1911
1913 ldo -FRAME_SIZE(%r30), %r30 1912 ldo -FRAME_SIZE(%r30), %r30
1914 LDREG -RP_OFFSET(%r30), %r2 1913 LDREG -RP_OFFSET(%r30), %r2
1915 1914
1916 /* FIXME: I think we need to restore a few more things here. */ 1915 /* FIXME: I think we need to restore a few more things here. */
1917 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 1916 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1918 ldo TASK_REGS(%r1),%r1 /* get pt regs */ 1917 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1919 reg_restore %r1 1918 reg_restore %r1
1920 1919
1921 /* If the signal was received while the process was blocked on a 1920 /* If the signal was received while the process was blocked on a
1922 * syscall, then r2 will take us to syscall_exit; otherwise r2 will 1921 * syscall, then r2 will take us to syscall_exit; otherwise r2 will
1923 * take us to syscall_exit_rfi and on to intr_return. 1922 * take us to syscall_exit_rfi and on to intr_return.
1924 */ 1923 */
1925 bv %r0(%r2) 1924 bv %r0(%r2)
1926 LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */ 1925 LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */
1927 ENDPROC(sys_rt_sigreturn_wrapper) 1926 ENDPROC(sys_rt_sigreturn_wrapper)
1928 1927
1929 ENTRY(sys_sigaltstack_wrapper) 1928 ENTRY(sys_sigaltstack_wrapper)
1930 /* Get the user stack pointer */ 1929 /* Get the user stack pointer */
1931 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 1930 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1932 ldo TASK_REGS(%r1),%r24 /* get pt regs */ 1931 ldo TASK_REGS(%r1),%r24 /* get pt regs */
1933 LDREG TASK_PT_GR30(%r24),%r24 1932 LDREG TASK_PT_GR30(%r24),%r24
1934 STREG %r2, -RP_OFFSET(%r30) 1933 STREG %r2, -RP_OFFSET(%r30)
1935 #ifdef CONFIG_64BIT 1934 #ifdef CONFIG_64BIT
1936 ldo FRAME_SIZE(%r30), %r30 1935 ldo FRAME_SIZE(%r30), %r30
1937 BL do_sigaltstack,%r2 1936 BL do_sigaltstack,%r2
1938 ldo -16(%r30),%r29 /* Reference param save area */ 1937 ldo -16(%r30),%r29 /* Reference param save area */
1939 #else 1938 #else
1940 BL do_sigaltstack,%r2 1939 BL do_sigaltstack,%r2
1941 ldo FRAME_SIZE(%r30), %r30 1940 ldo FRAME_SIZE(%r30), %r30
1942 #endif 1941 #endif
1943 1942
1944 ldo -FRAME_SIZE(%r30), %r30 1943 ldo -FRAME_SIZE(%r30), %r30
1945 LDREG -RP_OFFSET(%r30), %r2 1944 LDREG -RP_OFFSET(%r30), %r2
1946 bv %r0(%r2) 1945 bv %r0(%r2)
1947 nop 1946 nop
1948 ENDPROC(sys_sigaltstack_wrapper) 1947 ENDPROC(sys_sigaltstack_wrapper)
1949 1948
1950 #ifdef CONFIG_64BIT 1949 #ifdef CONFIG_64BIT
1951 ENTRY(sys32_sigaltstack_wrapper) 1950 ENTRY(sys32_sigaltstack_wrapper)
1952 /* Get the user stack pointer */ 1951 /* Get the user stack pointer */
1953 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r24 1952 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r24
1954 LDREG TASK_PT_GR30(%r24),%r24 1953 LDREG TASK_PT_GR30(%r24),%r24
1955 STREG %r2, -RP_OFFSET(%r30) 1954 STREG %r2, -RP_OFFSET(%r30)
1956 ldo FRAME_SIZE(%r30), %r30 1955 ldo FRAME_SIZE(%r30), %r30
1957 BL do_sigaltstack32,%r2 1956 BL do_sigaltstack32,%r2
1958 ldo -16(%r30),%r29 /* Reference param save area */ 1957 ldo -16(%r30),%r29 /* Reference param save area */
1959 1958
1960 ldo -FRAME_SIZE(%r30), %r30 1959 ldo -FRAME_SIZE(%r30), %r30
1961 LDREG -RP_OFFSET(%r30), %r2 1960 LDREG -RP_OFFSET(%r30), %r2
1962 bv %r0(%r2) 1961 bv %r0(%r2)
1963 nop 1962 nop
1964 ENDPROC(sys32_sigaltstack_wrapper) 1963 ENDPROC(sys32_sigaltstack_wrapper)
1965 #endif 1964 #endif
1966 1965
1967 ENTRY(syscall_exit) 1966 ENTRY(syscall_exit)
1968 /* NOTE: HP-UX syscalls also come through here 1967 /* NOTE: HP-UX syscalls also come through here
1969 * after hpux_syscall_exit fixes up return 1968 * after hpux_syscall_exit fixes up return
1970 * values. */ 1969 * values. */
1971 1970
1972 /* NOTE: Not all syscalls exit this way. rt_sigreturn will exit 1971 /* NOTE: Not all syscalls exit this way. rt_sigreturn will exit
1973 * via syscall_exit_rfi if the signal was received while the process 1972 * via syscall_exit_rfi if the signal was received while the process
1974 * was running. 1973 * was running.
1975 */ 1974 */
1976 1975
1977 /* save return value now */ 1976 /* save return value now */
1978 1977
1979 mfctl %cr30, %r1 1978 mfctl %cr30, %r1
1980 LDREG TI_TASK(%r1),%r1 1979 LDREG TI_TASK(%r1),%r1
1981 STREG %r28,TASK_PT_GR28(%r1) 1980 STREG %r28,TASK_PT_GR28(%r1)
1982 1981
1983 #ifdef CONFIG_HPUX 1982 #ifdef CONFIG_HPUX
1984 /* <linux/personality.h> cannot be easily included */ 1983 /* <linux/personality.h> cannot be easily included */
1985 #define PER_HPUX 0x10 1984 #define PER_HPUX 0x10
1986 ldw TASK_PERSONALITY(%r1),%r19 1985 ldw TASK_PERSONALITY(%r1),%r19
1987 1986
1988 /* We can't use "CMPIB<> PER_HPUX" since "im5" field is sign extended */ 1987 /* We can't use "CMPIB<> PER_HPUX" since "im5" field is sign extended */
1989 ldo -PER_HPUX(%r19), %r19 1988 ldo -PER_HPUX(%r19), %r19
1990 cmpib,COND(<>),n 0,%r19,1f 1989 cmpib,COND(<>),n 0,%r19,1f
1991 1990
1992 /* Save other hpux returns if personality is PER_HPUX */ 1991 /* Save other hpux returns if personality is PER_HPUX */
1993 STREG %r22,TASK_PT_GR22(%r1) 1992 STREG %r22,TASK_PT_GR22(%r1)
1994 STREG %r29,TASK_PT_GR29(%r1) 1993 STREG %r29,TASK_PT_GR29(%r1)
1995 1: 1994 1:
1996 1995
1997 #endif /* CONFIG_HPUX */ 1996 #endif /* CONFIG_HPUX */
1998 1997
1999 /* Seems to me that dp could be wrong here, if the syscall involved 1998 /* Seems to me that dp could be wrong here, if the syscall involved
2000 * calling a module, and nothing got round to restoring dp on return. 1999 * calling a module, and nothing got round to restoring dp on return.
2001 */ 2000 */
2002 loadgp 2001 loadgp
2003 2002
2004 syscall_check_resched: 2003 syscall_check_resched:
2005 2004
2006 /* check for reschedule */ 2005 /* check for reschedule */
2007 2006
2008 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* long */ 2007 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* long */
2009 bb,<,n %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */ 2008 bb,<,n %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
2010 2009
2011 .import do_signal,code 2010 .import do_signal,code
2012 syscall_check_sig: 2011 syscall_check_sig:
2013 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 2012 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19
2014 ldi (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK), %r26 2013 ldi (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK), %r26
2015 and,COND(<>) %r19, %r26, %r0 2014 and,COND(<>) %r19, %r26, %r0
2016 b,n syscall_restore /* skip past if we've nothing to do */ 2015 b,n syscall_restore /* skip past if we've nothing to do */
2017 2016
2018 syscall_do_signal: 2017 syscall_do_signal:
2019 /* Save callee-save registers (for sigcontext). 2018 /* Save callee-save registers (for sigcontext).
2020 * FIXME: After this point the process structure should be 2019 * FIXME: After this point the process structure should be
2021 * consistent with all the relevant state of the process 2020 * consistent with all the relevant state of the process
2022 * before the syscall. We need to verify this. 2021 * before the syscall. We need to verify this.
2023 */ 2022 */
2024 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 2023 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2025 ldo TASK_REGS(%r1), %r26 /* struct pt_regs *regs */ 2024 ldo TASK_REGS(%r1), %r26 /* struct pt_regs *regs */
2026 reg_save %r26 2025 reg_save %r26
2027 2026
2028 #ifdef CONFIG_64BIT 2027 #ifdef CONFIG_64BIT
2029 ldo -16(%r30),%r29 /* Reference param save area */ 2028 ldo -16(%r30),%r29 /* Reference param save area */
2030 #endif 2029 #endif
2031 2030
2032 BL do_notify_resume,%r2 2031 BL do_notify_resume,%r2
2033 ldi 1, %r25 /* long in_syscall = 1 */ 2032 ldi 1, %r25 /* long in_syscall = 1 */
2034 2033
2035 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 2034 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2036 ldo TASK_REGS(%r1), %r20 /* reload pt_regs */ 2035 ldo TASK_REGS(%r1), %r20 /* reload pt_regs */
2037 reg_restore %r20 2036 reg_restore %r20
2038 2037
2039 b,n syscall_check_sig 2038 b,n syscall_check_sig
2040 2039
2041 syscall_restore: 2040 syscall_restore:
2042 /* Are we being ptraced? */ 2041 /* Are we being ptraced? */
2043 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 2042 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2044 2043
2045 ldw TASK_PTRACE(%r1), %r19 2044 ldw TASK_PTRACE(%r1), %r19
2046 bb,< %r19,31,syscall_restore_rfi 2045 bb,< %r19,31,syscall_restore_rfi
2047 nop 2046 nop
2048 2047
2049 ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */ 2048 ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */
2050 rest_fp %r19 2049 rest_fp %r19
2051 2050
2052 LDREG TASK_PT_SAR(%r1),%r19 /* restore SAR */ 2051 LDREG TASK_PT_SAR(%r1),%r19 /* restore SAR */
2053 mtsar %r19 2052 mtsar %r19
2054 2053
2055 LDREG TASK_PT_GR2(%r1),%r2 /* restore user rp */ 2054 LDREG TASK_PT_GR2(%r1),%r2 /* restore user rp */
2056 LDREG TASK_PT_GR19(%r1),%r19 2055 LDREG TASK_PT_GR19(%r1),%r19
2057 LDREG TASK_PT_GR20(%r1),%r20 2056 LDREG TASK_PT_GR20(%r1),%r20
2058 LDREG TASK_PT_GR21(%r1),%r21 2057 LDREG TASK_PT_GR21(%r1),%r21
2059 LDREG TASK_PT_GR22(%r1),%r22 2058 LDREG TASK_PT_GR22(%r1),%r22
2060 LDREG TASK_PT_GR23(%r1),%r23 2059 LDREG TASK_PT_GR23(%r1),%r23
2061 LDREG TASK_PT_GR24(%r1),%r24 2060 LDREG TASK_PT_GR24(%r1),%r24
2062 LDREG TASK_PT_GR25(%r1),%r25 2061 LDREG TASK_PT_GR25(%r1),%r25
2063 LDREG TASK_PT_GR26(%r1),%r26 2062 LDREG TASK_PT_GR26(%r1),%r26
2064 LDREG TASK_PT_GR27(%r1),%r27 /* restore user dp */ 2063 LDREG TASK_PT_GR27(%r1),%r27 /* restore user dp */
2065 LDREG TASK_PT_GR28(%r1),%r28 /* syscall return value */ 2064 LDREG TASK_PT_GR28(%r1),%r28 /* syscall return value */
2066 LDREG TASK_PT_GR29(%r1),%r29 2065 LDREG TASK_PT_GR29(%r1),%r29
2067 LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */ 2066 LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */
2068 2067
2069 /* NOTE: We use rsm/ssm pair to make this operation atomic */ 2068 /* NOTE: We use rsm/ssm pair to make this operation atomic */
2070 rsm PSW_SM_I, %r0 2069 rsm PSW_SM_I, %r0
2071 LDREG TASK_PT_GR30(%r1),%r30 /* restore user sp */ 2070 LDREG TASK_PT_GR30(%r1),%r30 /* restore user sp */
2072 mfsp %sr3,%r1 /* Get users space id */ 2071 mfsp %sr3,%r1 /* Get users space id */
2073 mtsp %r1,%sr7 /* Restore sr7 */ 2072 mtsp %r1,%sr7 /* Restore sr7 */
2074 ssm PSW_SM_I, %r0 2073 ssm PSW_SM_I, %r0
2075 2074
2076 /* Set sr2 to zero for userspace syscalls to work. */ 2075 /* Set sr2 to zero for userspace syscalls to work. */
2077 mtsp %r0,%sr2 2076 mtsp %r0,%sr2
2078 mtsp %r1,%sr4 /* Restore sr4 */ 2077 mtsp %r1,%sr4 /* Restore sr4 */
2079 mtsp %r1,%sr5 /* Restore sr5 */ 2078 mtsp %r1,%sr5 /* Restore sr5 */
2080 mtsp %r1,%sr6 /* Restore sr6 */ 2079 mtsp %r1,%sr6 /* Restore sr6 */
2081 2080
2082 depi 3,31,2,%r31 /* ensure return to user mode. */ 2081 depi 3,31,2,%r31 /* ensure return to user mode. */
2083 2082
2084 #ifdef CONFIG_64BIT 2083 #ifdef CONFIG_64BIT
2085 /* decide whether to reset the wide mode bit 2084 /* decide whether to reset the wide mode bit
2086 * 2085 *
2087 * For a syscall, the W bit is stored in the lowest bit 2086 * For a syscall, the W bit is stored in the lowest bit
2088 * of sp. Extract it and reset W if it is zero */ 2087 * of sp. Extract it and reset W if it is zero */
2089 extrd,u,*<> %r30,63,1,%r1 2088 extrd,u,*<> %r30,63,1,%r1
2090 rsm PSW_SM_W, %r0 2089 rsm PSW_SM_W, %r0
2091 /* now reset the lowest bit of sp if it was set */ 2090 /* now reset the lowest bit of sp if it was set */
2092 xor %r30,%r1,%r30 2091 xor %r30,%r1,%r30
2093 #endif 2092 #endif
2094 be,n 0(%sr3,%r31) /* return to user space */ 2093 be,n 0(%sr3,%r31) /* return to user space */
2095 2094
2096 /* We have to return via an RFI, so that PSW T and R bits can be set 2095 /* We have to return via an RFI, so that PSW T and R bits can be set
2097 * appropriately. 2096 * appropriately.
2098 * This sets up pt_regs so we can return via intr_restore, which is not 2097 * This sets up pt_regs so we can return via intr_restore, which is not
2099 * the most efficient way of doing things, but it works. 2098 * the most efficient way of doing things, but it works.
2100 */ 2099 */
2101 syscall_restore_rfi: 2100 syscall_restore_rfi:
2102 ldo -1(%r0),%r2 /* Set recovery cntr to -1 */ 2101 ldo -1(%r0),%r2 /* Set recovery cntr to -1 */
2103 mtctl %r2,%cr0 /* for immediate trap */ 2102 mtctl %r2,%cr0 /* for immediate trap */
2104 LDREG TASK_PT_PSW(%r1),%r2 /* Get old PSW */ 2103 LDREG TASK_PT_PSW(%r1),%r2 /* Get old PSW */
2105 ldi 0x0b,%r20 /* Create new PSW */ 2104 ldi 0x0b,%r20 /* Create new PSW */
2106 depi -1,13,1,%r20 /* C, Q, D, and I bits */ 2105 depi -1,13,1,%r20 /* C, Q, D, and I bits */
2107 2106
2108 /* The values of PA_SINGLESTEP_BIT and PA_BLOCKSTEP_BIT are 2107 /* The values of PA_SINGLESTEP_BIT and PA_BLOCKSTEP_BIT are
2109 * set in include/linux/ptrace.h and converted to PA bitmap 2108 * set in include/linux/ptrace.h and converted to PA bitmap
2110 * numbers in asm-offsets.c */ 2109 * numbers in asm-offsets.c */
2111 2110
2112 /* if ((%r19.PA_SINGLESTEP_BIT)) { %r20.27=1} */ 2111 /* if ((%r19.PA_SINGLESTEP_BIT)) { %r20.27=1} */
2113 extru,= %r19,PA_SINGLESTEP_BIT,1,%r0 2112 extru,= %r19,PA_SINGLESTEP_BIT,1,%r0
2114 depi -1,27,1,%r20 /* R bit */ 2113 depi -1,27,1,%r20 /* R bit */
2115 2114
2116 /* if ((%r19.PA_BLOCKSTEP_BIT)) { %r20.7=1} */ 2115 /* if ((%r19.PA_BLOCKSTEP_BIT)) { %r20.7=1} */
2117 extru,= %r19,PA_BLOCKSTEP_BIT,1,%r0 2116 extru,= %r19,PA_BLOCKSTEP_BIT,1,%r0
2118 depi -1,7,1,%r20 /* T bit */ 2117 depi -1,7,1,%r20 /* T bit */
2119 2118
2120 STREG %r20,TASK_PT_PSW(%r1) 2119 STREG %r20,TASK_PT_PSW(%r1)
2121 2120
2122 /* Always store space registers, since sr3 can be changed (e.g. fork) */ 2121 /* Always store space registers, since sr3 can be changed (e.g. fork) */
2123 2122
2124 mfsp %sr3,%r25 2123 mfsp %sr3,%r25
2125 STREG %r25,TASK_PT_SR3(%r1) 2124 STREG %r25,TASK_PT_SR3(%r1)
2126 STREG %r25,TASK_PT_SR4(%r1) 2125 STREG %r25,TASK_PT_SR4(%r1)
2127 STREG %r25,TASK_PT_SR5(%r1) 2126 STREG %r25,TASK_PT_SR5(%r1)
2128 STREG %r25,TASK_PT_SR6(%r1) 2127 STREG %r25,TASK_PT_SR6(%r1)
2129 STREG %r25,TASK_PT_SR7(%r1) 2128 STREG %r25,TASK_PT_SR7(%r1)
2130 STREG %r25,TASK_PT_IASQ0(%r1) 2129 STREG %r25,TASK_PT_IASQ0(%r1)
2131 STREG %r25,TASK_PT_IASQ1(%r1) 2130 STREG %r25,TASK_PT_IASQ1(%r1)
2132 2131
2133 /* XXX W bit??? */ 2132 /* XXX W bit??? */
2134 /* Now if old D bit is clear, it means we didn't save all registers 2133 /* Now if old D bit is clear, it means we didn't save all registers
2135 * on syscall entry, so do that now. This only happens on TRACEME 2134 * on syscall entry, so do that now. This only happens on TRACEME
2136 * calls, or if someone attached to us while we were on a syscall. 2135 * calls, or if someone attached to us while we were on a syscall.
2137 * We could make this more efficient by not saving r3-r18, but 2136 * We could make this more efficient by not saving r3-r18, but
2138 * then we wouldn't be able to use the common intr_restore path. 2137 * then we wouldn't be able to use the common intr_restore path.
2139 * It is only for traced processes anyway, so performance is not 2138 * It is only for traced processes anyway, so performance is not
2140 * an issue. 2139 * an issue.
2141 */ 2140 */
2142 bb,< %r2,30,pt_regs_ok /* Branch if D set */ 2141 bb,< %r2,30,pt_regs_ok /* Branch if D set */
2143 ldo TASK_REGS(%r1),%r25 2142 ldo TASK_REGS(%r1),%r25
2144 reg_save %r25 /* Save r3 to r18 */ 2143 reg_save %r25 /* Save r3 to r18 */
2145 2144
2146 /* Save the current sr */ 2145 /* Save the current sr */
2147 mfsp %sr0,%r2 2146 mfsp %sr0,%r2
2148 STREG %r2,TASK_PT_SR0(%r1) 2147 STREG %r2,TASK_PT_SR0(%r1)
2149 2148
2150 /* Save the scratch sr */ 2149 /* Save the scratch sr */
2151 mfsp %sr1,%r2 2150 mfsp %sr1,%r2
2152 STREG %r2,TASK_PT_SR1(%r1) 2151 STREG %r2,TASK_PT_SR1(%r1)
2153 2152
2154 /* sr2 should be set to zero for userspace syscalls */ 2153 /* sr2 should be set to zero for userspace syscalls */
2155 STREG %r0,TASK_PT_SR2(%r1) 2154 STREG %r0,TASK_PT_SR2(%r1)
2156 2155
2157 pt_regs_ok: 2156 pt_regs_ok:
2158 LDREG TASK_PT_GR31(%r1),%r2 2157 LDREG TASK_PT_GR31(%r1),%r2
2159 depi 3,31,2,%r2 /* ensure return to user mode. */ 2158 depi 3,31,2,%r2 /* ensure return to user mode. */
2160 STREG %r2,TASK_PT_IAOQ0(%r1) 2159 STREG %r2,TASK_PT_IAOQ0(%r1)
2161 ldo 4(%r2),%r2 2160 ldo 4(%r2),%r2
2162 STREG %r2,TASK_PT_IAOQ1(%r1) 2161 STREG %r2,TASK_PT_IAOQ1(%r1)
2163 copy %r25,%r16 2162 copy %r25,%r16
2164 b intr_restore 2163 b intr_restore
2165 nop 2164 nop
2166 2165
2167 .import schedule,code 2166 .import schedule,code
2168 syscall_do_resched: 2167 syscall_do_resched:
2169 BL schedule,%r2 2168 BL schedule,%r2
2170 #ifdef CONFIG_64BIT 2169 #ifdef CONFIG_64BIT
2171 ldo -16(%r30),%r29 /* Reference param save area */ 2170 ldo -16(%r30),%r29 /* Reference param save area */
2172 #else 2171 #else
2173 nop 2172 nop
2174 #endif 2173 #endif
2175 b syscall_check_resched /* if resched, we start over again */ 2174 b syscall_check_resched /* if resched, we start over again */
2176 nop 2175 nop
2177 ENDPROC(syscall_exit) 2176 ENDPROC(syscall_exit)
2178 2177
2179 2178
2180 get_register: 2179 get_register:
2181 /* 2180 /*
2182 * get_register is used by the non access tlb miss handlers to 2181 * get_register is used by the non access tlb miss handlers to
2183 * copy the value of the general register specified in r8 into 2182 * copy the value of the general register specified in r8 into
2184 * r1. This routine can't be used for shadowed registers, since 2183 * r1. This routine can't be used for shadowed registers, since
2185 * the rfir will restore the original value. So, for the shadowed 2184 * the rfir will restore the original value. So, for the shadowed
2186 * registers we put a -1 into r1 to indicate that the register 2185 * registers we put a -1 into r1 to indicate that the register
2187 * should not be used (the register being copied could also have 2186 * should not be used (the register being copied could also have
2188 * a -1 in it, but that is OK, it just means that we will have 2187 * a -1 in it, but that is OK, it just means that we will have
2189 * to use the slow path instead). 2188 * to use the slow path instead).
2190 */ 2189 */
2191 blr %r8,%r0 2190 blr %r8,%r0
2192 nop 2191 nop
2193 bv %r0(%r25) /* r0 */ 2192 bv %r0(%r25) /* r0 */
2194 copy %r0,%r1 2193 copy %r0,%r1
2195 bv %r0(%r25) /* r1 - shadowed */ 2194 bv %r0(%r25) /* r1 - shadowed */
2196 ldi -1,%r1 2195 ldi -1,%r1
2197 bv %r0(%r25) /* r2 */ 2196 bv %r0(%r25) /* r2 */
2198 copy %r2,%r1 2197 copy %r2,%r1
2199 bv %r0(%r25) /* r3 */ 2198 bv %r0(%r25) /* r3 */
2200 copy %r3,%r1 2199 copy %r3,%r1
2201 bv %r0(%r25) /* r4 */ 2200 bv %r0(%r25) /* r4 */
2202 copy %r4,%r1 2201 copy %r4,%r1
2203 bv %r0(%r25) /* r5 */ 2202 bv %r0(%r25) /* r5 */
2204 copy %r5,%r1 2203 copy %r5,%r1
2205 bv %r0(%r25) /* r6 */ 2204 bv %r0(%r25) /* r6 */
2206 copy %r6,%r1 2205 copy %r6,%r1
2207 bv %r0(%r25) /* r7 */ 2206 bv %r0(%r25) /* r7 */
2208 copy %r7,%r1 2207 copy %r7,%r1
2209 bv %r0(%r25) /* r8 - shadowed */ 2208 bv %r0(%r25) /* r8 - shadowed */
2210 ldi -1,%r1 2209 ldi -1,%r1
2211 bv %r0(%r25) /* r9 - shadowed */ 2210 bv %r0(%r25) /* r9 - shadowed */
2212 ldi -1,%r1 2211 ldi -1,%r1
2213 bv %r0(%r25) /* r10 */ 2212 bv %r0(%r25) /* r10 */
2214 copy %r10,%r1 2213 copy %r10,%r1
2215 bv %r0(%r25) /* r11 */ 2214 bv %r0(%r25) /* r11 */
2216 copy %r11,%r1 2215 copy %r11,%r1
2217 bv %r0(%r25) /* r12 */ 2216 bv %r0(%r25) /* r12 */
2218 copy %r12,%r1 2217 copy %r12,%r1
2219 bv %r0(%r25) /* r13 */ 2218 bv %r0(%r25) /* r13 */
2220 copy %r13,%r1 2219 copy %r13,%r1
2221 bv %r0(%r25) /* r14 */ 2220 bv %r0(%r25) /* r14 */
2222 copy %r14,%r1 2221 copy %r14,%r1
2223 bv %r0(%r25) /* r15 */ 2222 bv %r0(%r25) /* r15 */
2224 copy %r15,%r1 2223 copy %r15,%r1
2225 bv %r0(%r25) /* r16 - shadowed */ 2224 bv %r0(%r25) /* r16 - shadowed */
2226 ldi -1,%r1 2225 ldi -1,%r1
2227 bv %r0(%r25) /* r17 - shadowed */ 2226 bv %r0(%r25) /* r17 - shadowed */
2228 ldi -1,%r1 2227 ldi -1,%r1
2229 bv %r0(%r25) /* r18 */ 2228 bv %r0(%r25) /* r18 */
2230 copy %r18,%r1 2229 copy %r18,%r1
2231 bv %r0(%r25) /* r19 */ 2230 bv %r0(%r25) /* r19 */
2232 copy %r19,%r1 2231 copy %r19,%r1
2233 bv %r0(%r25) /* r20 */ 2232 bv %r0(%r25) /* r20 */
2234 copy %r20,%r1 2233 copy %r20,%r1
2235 bv %r0(%r25) /* r21 */ 2234 bv %r0(%r25) /* r21 */
2236 copy %r21,%r1 2235 copy %r21,%r1
2237 bv %r0(%r25) /* r22 */ 2236 bv %r0(%r25) /* r22 */
2238 copy %r22,%r1 2237 copy %r22,%r1
2239 bv %r0(%r25) /* r23 */ 2238 bv %r0(%r25) /* r23 */
2240 copy %r23,%r1 2239 copy %r23,%r1
2241 bv %r0(%r25) /* r24 - shadowed */ 2240 bv %r0(%r25) /* r24 - shadowed */
2242 ldi -1,%r1 2241 ldi -1,%r1
2243 bv %r0(%r25) /* r25 - shadowed */ 2242 bv %r0(%r25) /* r25 - shadowed */
2244 ldi -1,%r1 2243 ldi -1,%r1
2245 bv %r0(%r25) /* r26 */ 2244 bv %r0(%r25) /* r26 */
2246 copy %r26,%r1 2245 copy %r26,%r1
2247 bv %r0(%r25) /* r27 */ 2246 bv %r0(%r25) /* r27 */
2248 copy %r27,%r1 2247 copy %r27,%r1
2249 bv %r0(%r25) /* r28 */ 2248 bv %r0(%r25) /* r28 */
2250 copy %r28,%r1 2249 copy %r28,%r1
2251 bv %r0(%r25) /* r29 */ 2250 bv %r0(%r25) /* r29 */
2252 copy %r29,%r1 2251 copy %r29,%r1
2253 bv %r0(%r25) /* r30 */ 2252 bv %r0(%r25) /* r30 */
2254 copy %r30,%r1 2253 copy %r30,%r1
2255 bv %r0(%r25) /* r31 */ 2254 bv %r0(%r25) /* r31 */
2256 copy %r31,%r1 2255 copy %r31,%r1
2257 2256
2258 2257
2259 set_register: 2258 set_register:
2260 /* 2259 /*
2261 * set_register is used by the non access tlb miss handlers to 2260 * set_register is used by the non access tlb miss handlers to
2262 * copy the value of r1 into the general register specified in 2261 * copy the value of r1 into the general register specified in
2263 * r8. 2262 * r8.
2264 */ 2263 */
2265 blr %r8,%r0 2264 blr %r8,%r0
2266 nop 2265 nop
2267 bv %r0(%r25) /* r0 (silly, but it is a place holder) */ 2266 bv %r0(%r25) /* r0 (silly, but it is a place holder) */
2268 copy %r1,%r0 2267 copy %r1,%r0
2269 bv %r0(%r25) /* r1 */ 2268 bv %r0(%r25) /* r1 */
2270 copy %r1,%r1 2269 copy %r1,%r1
2271 bv %r0(%r25) /* r2 */ 2270 bv %r0(%r25) /* r2 */
2272 copy %r1,%r2 2271 copy %r1,%r2
2273 bv %r0(%r25) /* r3 */ 2272 bv %r0(%r25) /* r3 */
2274 copy %r1,%r3 2273 copy %r1,%r3
2275 bv %r0(%r25) /* r4 */ 2274 bv %r0(%r25) /* r4 */
2276 copy %r1,%r4 2275 copy %r1,%r4
2277 bv %r0(%r25) /* r5 */ 2276 bv %r0(%r25) /* r5 */
2278 copy %r1,%r5 2277 copy %r1,%r5
2279 bv %r0(%r25) /* r6 */ 2278 bv %r0(%r25) /* r6 */
2280 copy %r1,%r6 2279 copy %r1,%r6
2281 bv %r0(%r25) /* r7 */ 2280 bv %r0(%r25) /* r7 */
2282 copy %r1,%r7 2281 copy %r1,%r7
2283 bv %r0(%r25) /* r8 */ 2282 bv %r0(%r25) /* r8 */
2284 copy %r1,%r8 2283 copy %r1,%r8
2285 bv %r0(%r25) /* r9 */ 2284 bv %r0(%r25) /* r9 */
2286 copy %r1,%r9 2285 copy %r1,%r9
2287 bv %r0(%r25) /* r10 */ 2286 bv %r0(%r25) /* r10 */
2288 copy %r1,%r10 2287 copy %r1,%r10
2289 bv %r0(%r25) /* r11 */ 2288 bv %r0(%r25) /* r11 */
2290 copy %r1,%r11 2289 copy %r1,%r11
2291 bv %r0(%r25) /* r12 */ 2290 bv %r0(%r25) /* r12 */
2292 copy %r1,%r12 2291 copy %r1,%r12
2293 bv %r0(%r25) /* r13 */ 2292 bv %r0(%r25) /* r13 */
2294 copy %r1,%r13 2293 copy %r1,%r13
2295 bv %r0(%r25) /* r14 */ 2294 bv %r0(%r25) /* r14 */
2296 copy %r1,%r14 2295 copy %r1,%r14
2297 bv %r0(%r25) /* r15 */ 2296 bv %r0(%r25) /* r15 */
2298 copy %r1,%r15 2297 copy %r1,%r15
2299 bv %r0(%r25) /* r16 */ 2298 bv %r0(%r25) /* r16 */
2300 copy %r1,%r16 2299 copy %r1,%r16
2301 bv %r0(%r25) /* r17 */ 2300 bv %r0(%r25) /* r17 */
2302 copy %r1,%r17 2301 copy %r1,%r17
2303 bv %r0(%r25) /* r18 */ 2302 bv %r0(%r25) /* r18 */
2304 copy %r1,%r18 2303 copy %r1,%r18
2305 bv %r0(%r25) /* r19 */ 2304 bv %r0(%r25) /* r19 */
2306 copy %r1,%r19 2305 copy %r1,%r19
2307 bv %r0(%r25) /* r20 */ 2306 bv %r0(%r25) /* r20 */
2308 copy %r1,%r20 2307 copy %r1,%r20
2309 bv %r0(%r25) /* r21 */ 2308 bv %r0(%r25) /* r21 */
2310 copy %r1,%r21 2309 copy %r1,%r21
2311 bv %r0(%r25) /* r22 */ 2310 bv %r0(%r25) /* r22 */
2312 copy %r1,%r22 2311 copy %r1,%r22
2313 bv %r0(%r25) /* r23 */ 2312 bv %r0(%r25) /* r23 */
2314 copy %r1,%r23 2313 copy %r1,%r23
2315 bv %r0(%r25) /* r24 */ 2314 bv %r0(%r25) /* r24 */
2316 copy %r1,%r24 2315 copy %r1,%r24
2317 bv %r0(%r25) /* r25 */ 2316 bv %r0(%r25) /* r25 */
2318 copy %r1,%r25 2317 copy %r1,%r25
2319 bv %r0(%r25) /* r26 */ 2318 bv %r0(%r25) /* r26 */
2320 copy %r1,%r26 2319 copy %r1,%r26
2321 bv %r0(%r25) /* r27 */ 2320 bv %r0(%r25) /* r27 */
2322 copy %r1,%r27 2321 copy %r1,%r27
2323 bv %r0(%r25) /* r28 */ 2322 bv %r0(%r25) /* r28 */
2324 copy %r1,%r28 2323 copy %r1,%r28
2325 bv %r0(%r25) /* r29 */ 2324 bv %r0(%r25) /* r29 */
2326 copy %r1,%r29 2325 copy %r1,%r29
2327 bv %r0(%r25) /* r30 */ 2326 bv %r0(%r25) /* r30 */
2328 copy %r1,%r30 2327 copy %r1,%r30
2329 bv %r0(%r25) /* r31 */ 2328 bv %r0(%r25) /* r31 */
2330 copy %r1,%r31 2329 copy %r1,%r31
2331 2330
2332 2331
arch/parisc/kernel/head.S
1 /* This file is subject to the terms and conditions of the GNU General Public 1 /* This file is subject to the terms and conditions of the GNU General Public
2 * License. See the file "COPYING" in the main directory of this archive 2 * License. See the file "COPYING" in the main directory of this archive
3 * for more details. 3 * for more details.
4 * 4 *
5 * Copyright (C) 1999-2007 by Helge Deller <deller@gmx.de> 5 * Copyright (C) 1999-2007 by Helge Deller <deller@gmx.de>
6 * Copyright 1999 SuSE GmbH (Philipp Rumpf) 6 * Copyright 1999 SuSE GmbH (Philipp Rumpf)
7 * Copyright 1999 Philipp Rumpf (prumpf@tux.org) 7 * Copyright 1999 Philipp Rumpf (prumpf@tux.org)
8 * Copyright 2000 Hewlett Packard (Paul Bame, bame@puffin.external.hp.com) 8 * Copyright 2000 Hewlett Packard (Paul Bame, bame@puffin.external.hp.com)
9 * Copyright (C) 2001 Grant Grundler (Hewlett Packard) 9 * Copyright (C) 2001 Grant Grundler (Hewlett Packard)
10 * Copyright (C) 2004 Kyle McMartin <kyle@debian.org> 10 * Copyright (C) 2004 Kyle McMartin <kyle@debian.org>
11 * 11 *
12 * Initial Version 04-23-1999 by Helge Deller <deller@gmx.de> 12 * Initial Version 04-23-1999 by Helge Deller <deller@gmx.de>
13 */ 13 */
14 14
15 #include <asm/asm-offsets.h> 15 #include <asm/asm-offsets.h>
16 #include <asm/psw.h> 16 #include <asm/psw.h>
17 #include <asm/pdc.h> 17 #include <asm/pdc.h>
18 18
19 #include <asm/assembly.h> 19 #include <asm/assembly.h>
20 #include <asm/pgtable.h> 20 #include <asm/pgtable.h>
21 21
22 #include <linux/linkage.h> 22 #include <linux/linkage.h>
23 #include <linux/init.h> 23 #include <linux/init.h>
24 24
25 .level LEVEL 25 .level LEVEL
26 26
27 __INITDATA 27 __INITDATA
28 ENTRY(boot_args) 28 ENTRY(boot_args)
29 .word 0 /* arg0 */ 29 .word 0 /* arg0 */
30 .word 0 /* arg1 */ 30 .word 0 /* arg1 */
31 .word 0 /* arg2 */ 31 .word 0 /* arg2 */
32 .word 0 /* arg3 */ 32 .word 0 /* arg3 */
33 END(boot_args) 33 END(boot_args)
34 34
35 __HEAD 35 .section .text.head
36 .align 4 36 .align 4
37 .import init_thread_union,data 37 .import init_thread_union,data
38 .import fault_vector_20,code /* IVA parisc 2.0 32 bit */ 38 .import fault_vector_20,code /* IVA parisc 2.0 32 bit */
39 #ifndef CONFIG_64BIT 39 #ifndef CONFIG_64BIT
40 .import fault_vector_11,code /* IVA parisc 1.1 32 bit */ 40 .import fault_vector_11,code /* IVA parisc 1.1 32 bit */
41 .import $global$ /* forward declaration */ 41 .import $global$ /* forward declaration */
42 #endif /*!CONFIG_64BIT*/ 42 #endif /*!CONFIG_64BIT*/
43 .export _stext,data /* Kernel want it this way! */ 43 .export _stext,data /* Kernel want it this way! */
44 _stext: 44 _stext:
45 ENTRY(stext) 45 ENTRY(stext)
46 .proc 46 .proc
47 .callinfo 47 .callinfo
48 48
49 /* Make sure sr4-sr7 are set to zero for the kernel address space */ 49 /* Make sure sr4-sr7 are set to zero for the kernel address space */
50 mtsp %r0,%sr4 50 mtsp %r0,%sr4
51 mtsp %r0,%sr5 51 mtsp %r0,%sr5
52 mtsp %r0,%sr6 52 mtsp %r0,%sr6
53 mtsp %r0,%sr7 53 mtsp %r0,%sr7
54 54
55 /* Clear BSS (shouldn't the boot loader do this?) */ 55 /* Clear BSS (shouldn't the boot loader do this?) */
56 56
57 .import __bss_start,data 57 .import __bss_start,data
58 .import __bss_stop,data 58 .import __bss_stop,data
59 59
60 load32 PA(__bss_start),%r3 60 load32 PA(__bss_start),%r3
61 load32 PA(__bss_stop),%r4 61 load32 PA(__bss_stop),%r4
62 $bss_loop: 62 $bss_loop:
63 cmpb,<<,n %r3,%r4,$bss_loop 63 cmpb,<<,n %r3,%r4,$bss_loop
64 stw,ma %r0,4(%r3) 64 stw,ma %r0,4(%r3)
65 65
66 /* Save away the arguments the boot loader passed in (32 bit args) */ 66 /* Save away the arguments the boot loader passed in (32 bit args) */
67 load32 PA(boot_args),%r1 67 load32 PA(boot_args),%r1
68 stw,ma %arg0,4(%r1) 68 stw,ma %arg0,4(%r1)
69 stw,ma %arg1,4(%r1) 69 stw,ma %arg1,4(%r1)
70 stw,ma %arg2,4(%r1) 70 stw,ma %arg2,4(%r1)
71 stw,ma %arg3,4(%r1) 71 stw,ma %arg3,4(%r1)
72 72
73 /* Initialize startup VM. Just map first 8/16 MB of memory */ 73 /* Initialize startup VM. Just map first 8/16 MB of memory */
74 load32 PA(swapper_pg_dir),%r4 74 load32 PA(swapper_pg_dir),%r4
75 mtctl %r4,%cr24 /* Initialize kernel root pointer */ 75 mtctl %r4,%cr24 /* Initialize kernel root pointer */
76 mtctl %r4,%cr25 /* Initialize user root pointer */ 76 mtctl %r4,%cr25 /* Initialize user root pointer */
77 77
78 #if PT_NLEVELS == 3 78 #if PT_NLEVELS == 3
79 /* Set pmd in pgd */ 79 /* Set pmd in pgd */
80 load32 PA(pmd0),%r5 80 load32 PA(pmd0),%r5
81 shrd %r5,PxD_VALUE_SHIFT,%r3 81 shrd %r5,PxD_VALUE_SHIFT,%r3
82 ldo (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3 82 ldo (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
83 stw %r3,ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4) 83 stw %r3,ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4)
84 ldo ASM_PMD_ENTRY*ASM_PMD_ENTRY_SIZE(%r5),%r4 84 ldo ASM_PMD_ENTRY*ASM_PMD_ENTRY_SIZE(%r5),%r4
85 #else 85 #else
86 /* 2-level page table, so pmd == pgd */ 86 /* 2-level page table, so pmd == pgd */
87 ldo ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4),%r4 87 ldo ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4),%r4
88 #endif 88 #endif
89 89
90 /* Fill in pmd with enough pte directories */ 90 /* Fill in pmd with enough pte directories */
91 load32 PA(pg0),%r1 91 load32 PA(pg0),%r1
92 SHRREG %r1,PxD_VALUE_SHIFT,%r3 92 SHRREG %r1,PxD_VALUE_SHIFT,%r3
93 ldo (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3 93 ldo (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
94 94
95 ldi ASM_PT_INITIAL,%r1 95 ldi ASM_PT_INITIAL,%r1
96 96
97 1: 97 1:
98 stw %r3,0(%r4) 98 stw %r3,0(%r4)
99 ldo (PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3 99 ldo (PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3
100 addib,> -1,%r1,1b 100 addib,> -1,%r1,1b
101 #if PT_NLEVELS == 3 101 #if PT_NLEVELS == 3
102 ldo ASM_PMD_ENTRY_SIZE(%r4),%r4 102 ldo ASM_PMD_ENTRY_SIZE(%r4),%r4
103 #else 103 #else
104 ldo ASM_PGD_ENTRY_SIZE(%r4),%r4 104 ldo ASM_PGD_ENTRY_SIZE(%r4),%r4
105 #endif 105 #endif
106 106
107 107
108 /* Now initialize the PTEs themselves */ 108 /* Now initialize the PTEs themselves */
109 ldo 0+_PAGE_KERNEL(%r0),%r3 /* Hardwired 0 phys addr start */ 109 ldo 0+_PAGE_KERNEL(%r0),%r3 /* Hardwired 0 phys addr start */
110 ldi (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */ 110 ldi (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
111 load32 PA(pg0),%r1 111 load32 PA(pg0),%r1
112 112
113 $pgt_fill_loop: 113 $pgt_fill_loop:
114 STREGM %r3,ASM_PTE_ENTRY_SIZE(%r1) 114 STREGM %r3,ASM_PTE_ENTRY_SIZE(%r1)
115 ldo (1<<PFN_PTE_SHIFT)(%r3),%r3 /* add one PFN */ 115 ldo (1<<PFN_PTE_SHIFT)(%r3),%r3 /* add one PFN */
116 addib,> -1,%r11,$pgt_fill_loop 116 addib,> -1,%r11,$pgt_fill_loop
117 nop 117 nop
118 118
119 /* Load the return address...er...crash 'n burn */ 119 /* Load the return address...er...crash 'n burn */
120 copy %r0,%r2 120 copy %r0,%r2
121 121
122 /* And the RFI Target address too */ 122 /* And the RFI Target address too */
123 load32 start_kernel,%r11 123 load32 start_kernel,%r11
124 124
125 /* And the initial task pointer */ 125 /* And the initial task pointer */
126 load32 init_thread_union,%r6 126 load32 init_thread_union,%r6
127 mtctl %r6,%cr30 127 mtctl %r6,%cr30
128 128
129 /* And the stack pointer too */ 129 /* And the stack pointer too */
130 ldo THREAD_SZ_ALGN(%r6),%sp 130 ldo THREAD_SZ_ALGN(%r6),%sp
131 131
132 #ifdef CONFIG_SMP 132 #ifdef CONFIG_SMP
133 /* Set the smp rendevous address into page zero. 133 /* Set the smp rendevous address into page zero.
134 ** It would be safer to do this in init_smp_config() but 134 ** It would be safer to do this in init_smp_config() but
135 ** it's just way easier to deal with here because 135 ** it's just way easier to deal with here because
136 ** of 64-bit function ptrs and the address is local to this file. 136 ** of 64-bit function ptrs and the address is local to this file.
137 */ 137 */
138 load32 PA(smp_slave_stext),%r10 138 load32 PA(smp_slave_stext),%r10
139 stw %r10,0x10(%r0) /* MEM_RENDEZ */ 139 stw %r10,0x10(%r0) /* MEM_RENDEZ */
140 stw %r0,0x28(%r0) /* MEM_RENDEZ_HI - assume addr < 4GB */ 140 stw %r0,0x28(%r0) /* MEM_RENDEZ_HI - assume addr < 4GB */
141 141
142 /* FALLTHROUGH */ 142 /* FALLTHROUGH */
143 .procend 143 .procend
144 144
145 /* 145 /*
146 ** Code Common to both Monarch and Slave processors. 146 ** Code Common to both Monarch and Slave processors.
147 ** Entry: 147 ** Entry:
148 ** 148 **
149 ** 1.1: 149 ** 1.1:
150 ** %r11 must contain RFI target address. 150 ** %r11 must contain RFI target address.
151 ** %r25/%r26 args to pass to target function 151 ** %r25/%r26 args to pass to target function
152 ** %r2 in case rfi target decides it didn't like something 152 ** %r2 in case rfi target decides it didn't like something
153 ** 153 **
154 ** 2.0w: 154 ** 2.0w:
155 ** %r3 PDCE_PROC address 155 ** %r3 PDCE_PROC address
156 ** %r11 RFI target address 156 ** %r11 RFI target address
157 ** 157 **
158 ** Caller must init: SR4-7, %sp, %r10, %cr24/25, 158 ** Caller must init: SR4-7, %sp, %r10, %cr24/25,
159 */ 159 */
160 common_stext: 160 common_stext:
161 .proc 161 .proc
162 .callinfo 162 .callinfo
163 #else 163 #else
164 /* Clear PDC entry point - we won't use it */ 164 /* Clear PDC entry point - we won't use it */
165 stw %r0,0x10(%r0) /* MEM_RENDEZ */ 165 stw %r0,0x10(%r0) /* MEM_RENDEZ */
166 stw %r0,0x28(%r0) /* MEM_RENDEZ_HI */ 166 stw %r0,0x28(%r0) /* MEM_RENDEZ_HI */
167 #endif /*CONFIG_SMP*/ 167 #endif /*CONFIG_SMP*/
168 168
169 #ifdef CONFIG_64BIT 169 #ifdef CONFIG_64BIT
170 tophys_r1 %sp 170 tophys_r1 %sp
171 171
172 /* Save the rfi target address */ 172 /* Save the rfi target address */
173 ldd TI_TASK-THREAD_SZ_ALGN(%sp), %r10 173 ldd TI_TASK-THREAD_SZ_ALGN(%sp), %r10
174 tophys_r1 %r10 174 tophys_r1 %r10
175 std %r11, TASK_PT_GR11(%r10) 175 std %r11, TASK_PT_GR11(%r10)
176 /* Switch to wide mode Superdome doesn't support narrow PDC 176 /* Switch to wide mode Superdome doesn't support narrow PDC
177 ** calls. 177 ** calls.
178 */ 178 */
179 1: mfia %rp /* clear upper part of pcoq */ 179 1: mfia %rp /* clear upper part of pcoq */
180 ldo 2f-1b(%rp),%rp 180 ldo 2f-1b(%rp),%rp
181 depdi 0,31,32,%rp 181 depdi 0,31,32,%rp
182 bv (%rp) 182 bv (%rp)
183 ssm PSW_SM_W,%r0 183 ssm PSW_SM_W,%r0
184 184
185 /* Set Wide mode as the "Default" (eg for traps) 185 /* Set Wide mode as the "Default" (eg for traps)
186 ** First trap occurs *right* after (or part of) rfi for slave CPUs. 186 ** First trap occurs *right* after (or part of) rfi for slave CPUs.
187 ** Someday, palo might not do this for the Monarch either. 187 ** Someday, palo might not do this for the Monarch either.
188 */ 188 */
189 2: 189 2:
190 #define MEM_PDC_LO 0x388 190 #define MEM_PDC_LO 0x388
191 #define MEM_PDC_HI 0x35C 191 #define MEM_PDC_HI 0x35C
192 ldw MEM_PDC_LO(%r0),%r3 192 ldw MEM_PDC_LO(%r0),%r3
193 ldw MEM_PDC_HI(%r0),%r6 193 ldw MEM_PDC_HI(%r0),%r6
194 depd %r6, 31, 32, %r3 /* move to upper word */ 194 depd %r6, 31, 32, %r3 /* move to upper word */
195 195
196 ldo PDC_PSW(%r0),%arg0 /* 21 */ 196 ldo PDC_PSW(%r0),%arg0 /* 21 */
197 ldo PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */ 197 ldo PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */
198 ldo PDC_PSW_WIDE_BIT(%r0),%arg2 /* 2 */ 198 ldo PDC_PSW_WIDE_BIT(%r0),%arg2 /* 2 */
199 load32 PA(stext_pdc_ret), %rp 199 load32 PA(stext_pdc_ret), %rp
200 bv (%r3) 200 bv (%r3)
201 copy %r0,%arg3 201 copy %r0,%arg3
202 202
203 stext_pdc_ret: 203 stext_pdc_ret:
204 /* restore rfi target address*/ 204 /* restore rfi target address*/
205 ldd TI_TASK-THREAD_SZ_ALGN(%sp), %r10 205 ldd TI_TASK-THREAD_SZ_ALGN(%sp), %r10
206 tophys_r1 %r10 206 tophys_r1 %r10
207 ldd TASK_PT_GR11(%r10), %r11 207 ldd TASK_PT_GR11(%r10), %r11
208 tovirt_r1 %sp 208 tovirt_r1 %sp
209 #endif 209 #endif
210 210
211 /* PARANOID: clear user scratch/user space SR's */ 211 /* PARANOID: clear user scratch/user space SR's */
212 mtsp %r0,%sr0 212 mtsp %r0,%sr0
213 mtsp %r0,%sr1 213 mtsp %r0,%sr1
214 mtsp %r0,%sr2 214 mtsp %r0,%sr2
215 mtsp %r0,%sr3 215 mtsp %r0,%sr3
216 216
217 /* Initialize Protection Registers */ 217 /* Initialize Protection Registers */
218 mtctl %r0,%cr8 218 mtctl %r0,%cr8
219 mtctl %r0,%cr9 219 mtctl %r0,%cr9
220 mtctl %r0,%cr12 220 mtctl %r0,%cr12
221 mtctl %r0,%cr13 221 mtctl %r0,%cr13
222 222
223 /* Initialize the global data pointer */ 223 /* Initialize the global data pointer */
224 loadgp 224 loadgp
225 225
226 /* Set up our interrupt table. HPMCs might not work after this! 226 /* Set up our interrupt table. HPMCs might not work after this!
227 * 227 *
228 * We need to install the correct iva for PA1.1 or PA2.0. The 228 * We need to install the correct iva for PA1.1 or PA2.0. The
229 * following short sequence of instructions can determine this 229 * following short sequence of instructions can determine this
230 * (without being illegal on a PA1.1 machine). 230 * (without being illegal on a PA1.1 machine).
231 */ 231 */
232 #ifndef CONFIG_64BIT 232 #ifndef CONFIG_64BIT
233 ldi 32,%r10 233 ldi 32,%r10
234 mtctl %r10,%cr11 234 mtctl %r10,%cr11
235 .level 2.0 235 .level 2.0
236 mfctl,w %cr11,%r10 236 mfctl,w %cr11,%r10
237 .level 1.1 237 .level 1.1
238 comib,<>,n 0,%r10,$is_pa20 238 comib,<>,n 0,%r10,$is_pa20
239 ldil L%PA(fault_vector_11),%r10 239 ldil L%PA(fault_vector_11),%r10
240 b $install_iva 240 b $install_iva
241 ldo R%PA(fault_vector_11)(%r10),%r10 241 ldo R%PA(fault_vector_11)(%r10),%r10
242 242
243 $is_pa20: 243 $is_pa20:
244 .level LEVEL /* restore 1.1 || 2.0w */ 244 .level LEVEL /* restore 1.1 || 2.0w */
245 #endif /*!CONFIG_64BIT*/ 245 #endif /*!CONFIG_64BIT*/
246 load32 PA(fault_vector_20),%r10 246 load32 PA(fault_vector_20),%r10
247 247
248 $install_iva: 248 $install_iva:
249 mtctl %r10,%cr14 249 mtctl %r10,%cr14
250 250
251 b aligned_rfi /* Prepare to RFI! Man all the cannons! */ 251 b aligned_rfi /* Prepare to RFI! Man all the cannons! */
252 nop 252 nop
253 253
254 .align 128 254 .align 128
255 aligned_rfi: 255 aligned_rfi:
256 pcxt_ssm_bug 256 pcxt_ssm_bug
257 257
258 rsm PSW_SM_QUIET,%r0 /* off troublesome PSW bits */ 258 rsm PSW_SM_QUIET,%r0 /* off troublesome PSW bits */
259 /* Don't need NOPs, have 8 compliant insn before rfi */ 259 /* Don't need NOPs, have 8 compliant insn before rfi */
260 260
261 mtctl %r0,%cr17 /* Clear IIASQ tail */ 261 mtctl %r0,%cr17 /* Clear IIASQ tail */
262 mtctl %r0,%cr17 /* Clear IIASQ head */ 262 mtctl %r0,%cr17 /* Clear IIASQ head */
263 263
264 /* Load RFI target into PC queue */ 264 /* Load RFI target into PC queue */
265 mtctl %r11,%cr18 /* IIAOQ head */ 265 mtctl %r11,%cr18 /* IIAOQ head */
266 ldo 4(%r11),%r11 266 ldo 4(%r11),%r11
267 mtctl %r11,%cr18 /* IIAOQ tail */ 267 mtctl %r11,%cr18 /* IIAOQ tail */
268 268
269 load32 KERNEL_PSW,%r10 269 load32 KERNEL_PSW,%r10
270 mtctl %r10,%ipsw 270 mtctl %r10,%ipsw
271 271
272 /* Jump through hyperspace to Virt Mode */ 272 /* Jump through hyperspace to Virt Mode */
273 rfi 273 rfi
274 nop 274 nop
275 275
276 .procend 276 .procend
277 277
278 #ifdef CONFIG_SMP 278 #ifdef CONFIG_SMP
279 279
280 .import smp_init_current_idle_task,data 280 .import smp_init_current_idle_task,data
281 .import smp_callin,code 281 .import smp_callin,code
282 282
283 #ifndef CONFIG_64BIT 283 #ifndef CONFIG_64BIT
284 smp_callin_rtn: 284 smp_callin_rtn:
285 .proc 285 .proc
286 .callinfo 286 .callinfo
287 break 1,1 /* Break if returned from start_secondary */ 287 break 1,1 /* Break if returned from start_secondary */
288 nop 288 nop
289 nop 289 nop
290 .procend 290 .procend
291 #endif /*!CONFIG_64BIT*/ 291 #endif /*!CONFIG_64BIT*/
292 292
293 /*************************************************************************** 293 /***************************************************************************
294 * smp_slave_stext is executed by all non-monarch Processors when the Monarch 294 * smp_slave_stext is executed by all non-monarch Processors when the Monarch
295 * pokes the slave CPUs in smp.c:smp_boot_cpus(). 295 * pokes the slave CPUs in smp.c:smp_boot_cpus().
296 * 296 *
297 * Once here, registers values are initialized in order to branch to virtual 297 * Once here, registers values are initialized in order to branch to virtual
298 * mode. Once all available/eligible CPUs are in virtual mode, all are 298 * mode. Once all available/eligible CPUs are in virtual mode, all are
299 * released and start out by executing their own idle task. 299 * released and start out by executing their own idle task.
300 *****************************************************************************/ 300 *****************************************************************************/
301 smp_slave_stext: 301 smp_slave_stext:
302 .proc 302 .proc
303 .callinfo 303 .callinfo
304 304
305 /* 305 /*
306 ** Initialize Space registers 306 ** Initialize Space registers
307 */ 307 */
308 mtsp %r0,%sr4 308 mtsp %r0,%sr4
309 mtsp %r0,%sr5 309 mtsp %r0,%sr5
310 mtsp %r0,%sr6 310 mtsp %r0,%sr6
311 mtsp %r0,%sr7 311 mtsp %r0,%sr7
312 312
313 /* Initialize the SP - monarch sets up smp_init_current_idle_task */ 313 /* Initialize the SP - monarch sets up smp_init_current_idle_task */
314 load32 PA(smp_init_current_idle_task),%sp 314 load32 PA(smp_init_current_idle_task),%sp
315 LDREG 0(%sp),%sp /* load task address */ 315 LDREG 0(%sp),%sp /* load task address */
316 tophys_r1 %sp 316 tophys_r1 %sp
317 LDREG TASK_THREAD_INFO(%sp),%sp 317 LDREG TASK_THREAD_INFO(%sp),%sp
318 mtctl %sp,%cr30 /* store in cr30 */ 318 mtctl %sp,%cr30 /* store in cr30 */
319 ldo THREAD_SZ_ALGN(%sp),%sp 319 ldo THREAD_SZ_ALGN(%sp),%sp
320 320
321 /* point CPU to kernel page tables */ 321 /* point CPU to kernel page tables */
322 load32 PA(swapper_pg_dir),%r4 322 load32 PA(swapper_pg_dir),%r4
323 mtctl %r4,%cr24 /* Initialize kernel root pointer */ 323 mtctl %r4,%cr24 /* Initialize kernel root pointer */
324 mtctl %r4,%cr25 /* Initialize user root pointer */ 324 mtctl %r4,%cr25 /* Initialize user root pointer */
325 325
326 #ifdef CONFIG_64BIT 326 #ifdef CONFIG_64BIT
327 /* Setup PDCE_PROC entry */ 327 /* Setup PDCE_PROC entry */
328 copy %arg0,%r3 328 copy %arg0,%r3
329 #else 329 #else
330 /* Load RFI *return* address in case smp_callin bails */ 330 /* Load RFI *return* address in case smp_callin bails */
331 load32 smp_callin_rtn,%r2 331 load32 smp_callin_rtn,%r2
332 #endif 332 #endif
333 333
334 /* Load RFI target address. */ 334 /* Load RFI target address. */
335 load32 smp_callin,%r11 335 load32 smp_callin,%r11
336 336
337 /* ok...common code can handle the rest */ 337 /* ok...common code can handle the rest */
338 b common_stext 338 b common_stext
339 nop 339 nop
340 340
341 .procend 341 .procend
342 #endif /* CONFIG_SMP */ 342 #endif /* CONFIG_SMP */
343 343
344 ENDPROC(stext) 344 ENDPROC(stext)
345 345
346 #ifndef CONFIG_64BIT 346 #ifndef CONFIG_64BIT
347 .section .data.read_mostly 347 .section .data.read_mostly
348 348
349 .align 4 349 .align 4
350 .export $global$,data 350 .export $global$,data
351 351
352 .type $global$,@object 352 .type $global$,@object
353 .size $global$,4 353 .size $global$,4
354 $global$: 354 $global$:
355 .word 0 355 .word 0
356 #endif /*!CONFIG_64BIT*/ 356 #endif /*!CONFIG_64BIT*/
357 357
arch/parisc/kernel/hpmc.S
1 /* 1 /*
2 * HPMC (High Priority Machine Check) handler. 2 * HPMC (High Priority Machine Check) handler.
3 * 3 *
4 * Copyright (C) 1999 Philipp Rumpf <prumpf@tux.org> 4 * Copyright (C) 1999 Philipp Rumpf <prumpf@tux.org>
5 * Copyright (C) 1999 Hewlett-Packard (Frank Rowand) 5 * Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
6 * Copyright (C) 2000 Hewlett-Packard (John Marvin) 6 * Copyright (C) 2000 Hewlett-Packard (John Marvin)
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option) 10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version. 11 * any later version.
12 * 12 *
13 * This program is distributed in the hope that it will be useful, 13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details. 16 * GNU General Public License for more details.
17 * 17 *
18 * You should have received a copy of the GNU General Public License 18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software 19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */ 21 */
22 22
23 23
24 /* 24 /*
25 * This HPMC handler retrieves the HPMC pim data, resets IO and 25 * This HPMC handler retrieves the HPMC pim data, resets IO and
26 * returns to the default trap handler with code set to 1 (HPMC). 26 * returns to the default trap handler with code set to 1 (HPMC).
27 * The default trap handler calls handle interruption, which 27 * The default trap handler calls handle interruption, which
28 * does a stack and register dump. This at least allows kernel 28 * does a stack and register dump. This at least allows kernel
29 * developers to get back to C code in virtual mode, where they 29 * developers to get back to C code in virtual mode, where they
30 * have the option to examine and print values from memory that 30 * have the option to examine and print values from memory that
31 * would help in debugging an HPMC caused by a software bug. 31 * would help in debugging an HPMC caused by a software bug.
32 * 32 *
33 * There is more to do here: 33 * There is more to do here:
34 * 34 *
35 * 1) On MP systems we need to synchronize processors 35 * 1) On MP systems we need to synchronize processors
36 * before calling pdc/iodc. 36 * before calling pdc/iodc.
37 * 2) We should be checking the system state and not 37 * 2) We should be checking the system state and not
38 * returning to the fault handler if things are really 38 * returning to the fault handler if things are really
39 * bad. 39 * bad.
40 * 40 *
41 */ 41 */
42 42
43 .level 1.1 43 .level 1.1
44 .data 44 .data
45 45
46 #include <asm/assembly.h> 46 #include <asm/assembly.h>
47 #include <asm/pdc.h> 47 #include <asm/pdc.h>
48 48
49 #include <linux/linkage.h> 49 #include <linux/linkage.h>
50 #include <linux/init.h>
51 50
52 /* 51 /*
53 * stack for os_hpmc, the HPMC handler. 52 * stack for os_hpmc, the HPMC handler.
54 * buffer for IODC procedures (for the HPMC handler). 53 * buffer for IODC procedures (for the HPMC handler).
55 * 54 *
56 * IODC requires 7K byte stack. That leaves 1K byte for os_hpmc. 55 * IODC requires 7K byte stack. That leaves 1K byte for os_hpmc.
57 */ 56 */
58 57
59 .align PAGE_SIZE 58 .align PAGE_SIZE
60 hpmc_stack: 59 hpmc_stack:
61 .block 16384 60 .block 16384
62 61
63 #define HPMC_IODC_BUF_SIZE 0x8000 62 #define HPMC_IODC_BUF_SIZE 0x8000
64 63
65 .align PAGE_SIZE 64 .align PAGE_SIZE
66 hpmc_iodc_buf: 65 hpmc_iodc_buf:
67 .block HPMC_IODC_BUF_SIZE 66 .block HPMC_IODC_BUF_SIZE
68 67
69 .align 8 68 .align 8
70 hpmc_raddr: 69 hpmc_raddr:
71 .block 128 70 .block 128
72 71
73 #define HPMC_PIM_DATA_SIZE 896 /* Enough to hold all architected 2.0 state */ 72 #define HPMC_PIM_DATA_SIZE 896 /* Enough to hold all architected 2.0 state */
74 73
75 .align 8 74 .align 8
76 ENTRY(hpmc_pim_data) 75 ENTRY(hpmc_pim_data)
77 .block HPMC_PIM_DATA_SIZE 76 .block HPMC_PIM_DATA_SIZE
78 END(hpmc_pim_data) 77 END(hpmc_pim_data)
79 78
80 __HEAD 79 .text
81 80
82 .import intr_save, code 81 .import intr_save, code
83 ENTRY(os_hpmc) 82 ENTRY(os_hpmc)
84 83
85 /* 84 /*
86 * registers modified: 85 * registers modified:
87 * 86 *
88 * Using callee saves registers without saving them. The 87 * Using callee saves registers without saving them. The
89 * original values are in the pim dump if we need them. 88 * original values are in the pim dump if we need them.
90 * 89 *
91 * r2 (rp) return pointer 90 * r2 (rp) return pointer
92 * r3 address of PDCE_PROC 91 * r3 address of PDCE_PROC
93 * r4 scratch 92 * r4 scratch
94 * r5 scratch 93 * r5 scratch
95 * r23 (arg3) procedure arg 94 * r23 (arg3) procedure arg
96 * r24 (arg2) procedure arg 95 * r24 (arg2) procedure arg
97 * r25 (arg1) procedure arg 96 * r25 (arg1) procedure arg
98 * r26 (arg0) procedure arg 97 * r26 (arg0) procedure arg
99 * r30 (sp) stack pointer 98 * r30 (sp) stack pointer
100 * 99 *
101 * registers read: 100 * registers read:
102 * 101 *
103 * r26 contains address of PDCE_PROC on entry 102 * r26 contains address of PDCE_PROC on entry
104 * r28 (ret0) return value from procedure 103 * r28 (ret0) return value from procedure
105 */ 104 */
106 105
107 copy arg0, %r3 /* save address of PDCE_PROC */ 106 copy arg0, %r3 /* save address of PDCE_PROC */
108 107
109 /* 108 /*
110 * disable nested HPMCs 109 * disable nested HPMCs
111 * 110 *
112 * Increment os_hpmc checksum to invalidate it. 111 * Increment os_hpmc checksum to invalidate it.
113 * Do this before turning the PSW M bit off. 112 * Do this before turning the PSW M bit off.
114 */ 113 */
115 114
116 mfctl %cr14, %r4 115 mfctl %cr14, %r4
117 ldw 52(%r4),%r5 116 ldw 52(%r4),%r5
118 addi 1,%r5,%r5 117 addi 1,%r5,%r5
119 stw %r5,52(%r4) 118 stw %r5,52(%r4)
120 119
121 /* MP_FIXME: synchronize all processors. */ 120 /* MP_FIXME: synchronize all processors. */
122 121
123 /* Setup stack pointer. */ 122 /* Setup stack pointer. */
124 123
125 load32 PA(hpmc_stack),sp 124 load32 PA(hpmc_stack),sp
126 125
127 ldo 128(sp),sp /* leave room for arguments */ 126 ldo 128(sp),sp /* leave room for arguments */
128 127
129 /* 128 /*
130 * Most PDC routines require that the M bit be off. 129 * Most PDC routines require that the M bit be off.
131 * So turn on the Q bit and turn off the M bit. 130 * So turn on the Q bit and turn off the M bit.
132 */ 131 */
133 132
134 ldo 8(%r0),%r4 /* PSW Q on, PSW M off */ 133 ldo 8(%r0),%r4 /* PSW Q on, PSW M off */
135 mtctl %r4,ipsw 134 mtctl %r4,ipsw
136 mtctl %r0,pcsq 135 mtctl %r0,pcsq
137 mtctl %r0,pcsq 136 mtctl %r0,pcsq
138 load32 PA(os_hpmc_1),%r4 137 load32 PA(os_hpmc_1),%r4
139 mtctl %r4,pcoq 138 mtctl %r4,pcoq
140 ldo 4(%r4),%r4 139 ldo 4(%r4),%r4
141 mtctl %r4,pcoq 140 mtctl %r4,pcoq
142 rfi 141 rfi
143 nop 142 nop
144 143
145 os_hpmc_1: 144 os_hpmc_1:
146 145
147 /* Call PDC_PIM to get HPMC pim info */ 146 /* Call PDC_PIM to get HPMC pim info */
148 147
149 /* 148 /*
150 * Note that on some newer boxes, PDC_PIM must be called 149 * Note that on some newer boxes, PDC_PIM must be called
151 * before PDC_IO if you want IO to be reset. PDC_PIM sets 150 * before PDC_IO if you want IO to be reset. PDC_PIM sets
152 * a flag that PDC_IO examines. 151 * a flag that PDC_IO examines.
153 */ 152 */
154 153
155 ldo PDC_PIM(%r0), arg0 154 ldo PDC_PIM(%r0), arg0
156 ldo PDC_PIM_HPMC(%r0),arg1 /* Transfer HPMC data */ 155 ldo PDC_PIM_HPMC(%r0),arg1 /* Transfer HPMC data */
157 load32 PA(hpmc_raddr),arg2 156 load32 PA(hpmc_raddr),arg2
158 load32 PA(hpmc_pim_data),arg3 157 load32 PA(hpmc_pim_data),arg3
159 load32 HPMC_PIM_DATA_SIZE,%r4 158 load32 HPMC_PIM_DATA_SIZE,%r4
160 stw %r4,-52(sp) 159 stw %r4,-52(sp)
161 160
162 ldil L%PA(os_hpmc_2), rp 161 ldil L%PA(os_hpmc_2), rp
163 bv (r3) /* call pdce_proc */ 162 bv (r3) /* call pdce_proc */
164 ldo R%PA(os_hpmc_2)(rp), rp 163 ldo R%PA(os_hpmc_2)(rp), rp
165 164
166 os_hpmc_2: 165 os_hpmc_2:
167 comib,<> 0,ret0, os_hpmc_fail 166 comib,<> 0,ret0, os_hpmc_fail
168 167
169 /* Reset IO by calling the hversion dependent PDC_IO routine */ 168 /* Reset IO by calling the hversion dependent PDC_IO routine */
170 169
171 ldo PDC_IO(%r0),arg0 170 ldo PDC_IO(%r0),arg0
172 ldo 0(%r0),arg1 /* log IO errors */ 171 ldo 0(%r0),arg1 /* log IO errors */
173 ldo 0(%r0),arg2 /* reserved */ 172 ldo 0(%r0),arg2 /* reserved */
174 ldo 0(%r0),arg3 /* reserved */ 173 ldo 0(%r0),arg3 /* reserved */
175 stw %r0,-52(sp) /* reserved */ 174 stw %r0,-52(sp) /* reserved */
176 175
177 ldil L%PA(os_hpmc_3),rp 176 ldil L%PA(os_hpmc_3),rp
178 bv (%r3) /* call pdce_proc */ 177 bv (%r3) /* call pdce_proc */
179 ldo R%PA(os_hpmc_3)(rp),rp 178 ldo R%PA(os_hpmc_3)(rp),rp
180 179
181 os_hpmc_3: 180 os_hpmc_3:
182 181
183 /* FIXME? Check for errors from PDC_IO (-1 might be OK) */ 182 /* FIXME? Check for errors from PDC_IO (-1 might be OK) */
184 183
185 /* 184 /*
186 * Initialize the IODC console device (HPA,SPA, path etc. 185 * Initialize the IODC console device (HPA,SPA, path etc.
187 * are stored on page 0. 186 * are stored on page 0.
188 */ 187 */
189 188
190 /* 189 /*
191 * Load IODC into hpmc_iodc_buf by calling PDC_IODC. 190 * Load IODC into hpmc_iodc_buf by calling PDC_IODC.
192 * Note that PDC_IODC handles flushing the appropriate 191 * Note that PDC_IODC handles flushing the appropriate
193 * data and instruction cache lines. 192 * data and instruction cache lines.
194 */ 193 */
195 194
196 ldo PDC_IODC(%r0),arg0 195 ldo PDC_IODC(%r0),arg0
197 ldo PDC_IODC_READ(%r0),arg1 196 ldo PDC_IODC_READ(%r0),arg1
198 load32 PA(hpmc_raddr),arg2 197 load32 PA(hpmc_raddr),arg2
199 ldw BOOT_CONSOLE_HPA_OFFSET(%r0),arg3 /* console hpa */ 198 ldw BOOT_CONSOLE_HPA_OFFSET(%r0),arg3 /* console hpa */
200 ldo PDC_IODC_RI_INIT(%r0),%r4 199 ldo PDC_IODC_RI_INIT(%r0),%r4
201 stw %r4,-52(sp) 200 stw %r4,-52(sp)
202 load32 PA(hpmc_iodc_buf),%r4 201 load32 PA(hpmc_iodc_buf),%r4
203 stw %r4,-56(sp) 202 stw %r4,-56(sp)
204 load32 HPMC_IODC_BUF_SIZE,%r4 203 load32 HPMC_IODC_BUF_SIZE,%r4
205 stw %r4,-60(sp) 204 stw %r4,-60(sp)
206 205
207 ldil L%PA(os_hpmc_4),rp 206 ldil L%PA(os_hpmc_4),rp
208 bv (%r3) /* call pdce_proc */ 207 bv (%r3) /* call pdce_proc */
209 ldo R%PA(os_hpmc_4)(rp),rp 208 ldo R%PA(os_hpmc_4)(rp),rp
210 209
211 os_hpmc_4: 210 os_hpmc_4:
212 comib,<> 0,ret0,os_hpmc_fail 211 comib,<> 0,ret0,os_hpmc_fail
213 212
214 /* Call the entry init (just loaded by PDC_IODC) */ 213 /* Call the entry init (just loaded by PDC_IODC) */
215 214
216 ldw BOOT_CONSOLE_HPA_OFFSET(%r0),arg0 /* console hpa */ 215 ldw BOOT_CONSOLE_HPA_OFFSET(%r0),arg0 /* console hpa */
217 ldo ENTRY_INIT_MOD_DEV(%r0), arg1 216 ldo ENTRY_INIT_MOD_DEV(%r0), arg1
218 ldw BOOT_CONSOLE_SPA_OFFSET(%r0),arg2 /* console spa */ 217 ldw BOOT_CONSOLE_SPA_OFFSET(%r0),arg2 /* console spa */
219 depi 0,31,11,arg2 /* clear bits 21-31 */ 218 depi 0,31,11,arg2 /* clear bits 21-31 */
220 ldo BOOT_CONSOLE_PATH_OFFSET(%r0),arg3 /* console path */ 219 ldo BOOT_CONSOLE_PATH_OFFSET(%r0),arg3 /* console path */
221 load32 PA(hpmc_raddr),%r4 220 load32 PA(hpmc_raddr),%r4
222 stw %r4, -52(sp) 221 stw %r4, -52(sp)
223 stw %r0, -56(sp) /* HV */ 222 stw %r0, -56(sp) /* HV */
224 stw %r0, -60(sp) /* HV */ 223 stw %r0, -60(sp) /* HV */
225 stw %r0, -64(sp) /* HV */ 224 stw %r0, -64(sp) /* HV */
226 stw %r0, -68(sp) /* lang, must be zero */ 225 stw %r0, -68(sp) /* lang, must be zero */
227 226
228 load32 PA(hpmc_iodc_buf),%r5 227 load32 PA(hpmc_iodc_buf),%r5
229 ldil L%PA(os_hpmc_5),rp 228 ldil L%PA(os_hpmc_5),rp
230 bv (%r5) 229 bv (%r5)
231 ldo R%PA(os_hpmc_5)(rp),rp 230 ldo R%PA(os_hpmc_5)(rp),rp
232 231
233 os_hpmc_5: 232 os_hpmc_5:
234 comib,<> 0,ret0,os_hpmc_fail 233 comib,<> 0,ret0,os_hpmc_fail
235 234
236 /* Prepare to call intr_save */ 235 /* Prepare to call intr_save */
237 236
238 /* 237 /*
239 * Load kernel page directory (load into user also, since 238 * Load kernel page directory (load into user also, since
240 * we don't intend to ever return to user land anyway) 239 * we don't intend to ever return to user land anyway)
241 */ 240 */
242 241
243 load32 PA(swapper_pg_dir),%r4 242 load32 PA(swapper_pg_dir),%r4
244 mtctl %r4,%cr24 /* Initialize kernel root pointer */ 243 mtctl %r4,%cr24 /* Initialize kernel root pointer */
245 mtctl %r4,%cr25 /* Initialize user root pointer */ 244 mtctl %r4,%cr25 /* Initialize user root pointer */
246 245
247 /* Clear sr4-sr7 */ 246 /* Clear sr4-sr7 */
248 247
249 mtsp %r0, %sr4 248 mtsp %r0, %sr4
250 mtsp %r0, %sr5 249 mtsp %r0, %sr5
251 mtsp %r0, %sr6 250 mtsp %r0, %sr6
252 mtsp %r0, %sr7 251 mtsp %r0, %sr7
253 252
254 tovirt_r1 %r30 /* make sp virtual */ 253 tovirt_r1 %r30 /* make sp virtual */
255 254
256 rsm 8,%r0 /* Clear Q bit */ 255 rsm 8,%r0 /* Clear Q bit */
257 ldi 1,%r8 /* Set trap code to "1" for HPMC */ 256 ldi 1,%r8 /* Set trap code to "1" for HPMC */
258 load32 PA(intr_save),%r1 257 load32 PA(intr_save),%r1
259 be 0(%sr7,%r1) 258 be 0(%sr7,%r1)
260 nop 259 nop
261 260
262 os_hpmc_fail: 261 os_hpmc_fail:
263 262
264 /* 263 /*
265 * Reset the system 264 * Reset the system
266 * 265 *
267 * Some systems may lockup from a broadcast reset, so try the 266 * Some systems may lockup from a broadcast reset, so try the
268 * hversion PDC_BROADCAST_RESET() first. 267 * hversion PDC_BROADCAST_RESET() first.
269 * MP_FIXME: reset all processors if more than one central bus. 268 * MP_FIXME: reset all processors if more than one central bus.
270 */ 269 */
271 270
272 /* PDC_BROADCAST_RESET() */ 271 /* PDC_BROADCAST_RESET() */
273 272
274 ldo PDC_BROADCAST_RESET(%r0),arg0 273 ldo PDC_BROADCAST_RESET(%r0),arg0
275 ldo 0(%r0),arg1 /* do reset */ 274 ldo 0(%r0),arg1 /* do reset */
276 275
277 ldil L%PA(os_hpmc_6),rp 276 ldil L%PA(os_hpmc_6),rp
278 bv (%r3) /* call pdce_proc */ 277 bv (%r3) /* call pdce_proc */
279 ldo R%PA(os_hpmc_6)(rp),rp 278 ldo R%PA(os_hpmc_6)(rp),rp
280 279
281 os_hpmc_6: 280 os_hpmc_6:
282 281
283 /* 282 /*
284 * possible return values: 283 * possible return values:
285 * -1 non-existent procedure 284 * -1 non-existent procedure
286 * -2 non-existent option 285 * -2 non-existent option
287 * -16 unaligned stack 286 * -16 unaligned stack
288 * 287 *
289 * If call returned, do a broadcast reset. 288 * If call returned, do a broadcast reset.
290 */ 289 */
291 290
292 ldil L%0xfffc0000,%r4 /* IO_BROADCAST */ 291 ldil L%0xfffc0000,%r4 /* IO_BROADCAST */
293 ldo 5(%r0),%r5 292 ldo 5(%r0),%r5
294 stw %r5,48(%r4) /* CMD_RESET to IO_COMMAND offset */ 293 stw %r5,48(%r4) /* CMD_RESET to IO_COMMAND offset */
295 294
296 b . 295 b .
297 nop 296 nop
298 ENDPROC(os_hpmc) 297 ENDPROC(os_hpmc)
299 ENTRY(os_hpmc_end) /* this label used to compute os_hpmc checksum */ 298 ENTRY(os_hpmc_end) /* this label used to compute os_hpmc checksum */
300 nop 299 nop
301 300
arch/parisc/kernel/pacache.S
1 /* 1 /*
2 * PARISC TLB and cache flushing support 2 * PARISC TLB and cache flushing support
3 * Copyright (C) 2000-2001 Hewlett-Packard (John Marvin) 3 * Copyright (C) 2000-2001 Hewlett-Packard (John Marvin)
4 * Copyright (C) 2001 Matthew Wilcox (willy at parisc-linux.org) 4 * Copyright (C) 2001 Matthew Wilcox (willy at parisc-linux.org)
5 * Copyright (C) 2002 Richard Hirst (rhirst with parisc-linux.org) 5 * Copyright (C) 2002 Richard Hirst (rhirst with parisc-linux.org)
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option) 9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version. 10 * any later version.
11 * 11 *
12 * This program is distributed in the hope that it will be useful, 12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details. 15 * GNU General Public License for more details.
16 * 16 *
17 * You should have received a copy of the GNU General Public License 17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software 18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */ 20 */
21 21
22 /* 22 /*
23 * NOTE: fdc,fic, and pdc instructions that use base register modification 23 * NOTE: fdc,fic, and pdc instructions that use base register modification
24 * should only use index and base registers that are not shadowed, 24 * should only use index and base registers that are not shadowed,
25 * so that the fast path emulation in the non access miss handler 25 * so that the fast path emulation in the non access miss handler
26 * can be used. 26 * can be used.
27 */ 27 */
28 28
29 #ifdef CONFIG_64BIT 29 #ifdef CONFIG_64BIT
30 .level 2.0w 30 .level 2.0w
31 #else 31 #else
32 .level 2.0 32 .level 2.0
33 #endif 33 #endif
34 34
35 #include <asm/psw.h> 35 #include <asm/psw.h>
36 #include <asm/assembly.h> 36 #include <asm/assembly.h>
37 #include <asm/pgtable.h> 37 #include <asm/pgtable.h>
38 #include <asm/cache.h> 38 #include <asm/cache.h>
39 #include <linux/linkage.h> 39 #include <linux/linkage.h>
40 #include <linux/init.h>
41 40
42 __HEAD 41 .text
43 .align 128 42 .align 128
44 43
45 ENTRY(flush_tlb_all_local) 44 ENTRY(flush_tlb_all_local)
46 .proc 45 .proc
47 .callinfo NO_CALLS 46 .callinfo NO_CALLS
48 .entry 47 .entry
49 48
50 /* 49 /*
51 * The pitlbe and pdtlbe instructions should only be used to 50 * The pitlbe and pdtlbe instructions should only be used to
52 * flush the entire tlb. Also, there needs to be no intervening 51 * flush the entire tlb. Also, there needs to be no intervening
53 * tlb operations, e.g. tlb misses, so the operation needs 52 * tlb operations, e.g. tlb misses, so the operation needs
54 * to happen in real mode with all interruptions disabled. 53 * to happen in real mode with all interruptions disabled.
55 */ 54 */
56 55
57 /* pcxt_ssm_bug - relied upon translation! PA 2.0 Arch. F-4 and F-5 */ 56 /* pcxt_ssm_bug - relied upon translation! PA 2.0 Arch. F-4 and F-5 */
58 rsm PSW_SM_I, %r19 /* save I-bit state */ 57 rsm PSW_SM_I, %r19 /* save I-bit state */
59 load32 PA(1f), %r1 58 load32 PA(1f), %r1
60 nop 59 nop
61 nop 60 nop
62 nop 61 nop
63 nop 62 nop
64 nop 63 nop
65 64
66 rsm PSW_SM_Q, %r0 /* prep to load iia queue */ 65 rsm PSW_SM_Q, %r0 /* prep to load iia queue */
67 mtctl %r0, %cr17 /* Clear IIASQ tail */ 66 mtctl %r0, %cr17 /* Clear IIASQ tail */
68 mtctl %r0, %cr17 /* Clear IIASQ head */ 67 mtctl %r0, %cr17 /* Clear IIASQ head */
69 mtctl %r1, %cr18 /* IIAOQ head */ 68 mtctl %r1, %cr18 /* IIAOQ head */
70 ldo 4(%r1), %r1 69 ldo 4(%r1), %r1
71 mtctl %r1, %cr18 /* IIAOQ tail */ 70 mtctl %r1, %cr18 /* IIAOQ tail */
72 load32 REAL_MODE_PSW, %r1 71 load32 REAL_MODE_PSW, %r1
73 mtctl %r1, %ipsw 72 mtctl %r1, %ipsw
74 rfi 73 rfi
75 nop 74 nop
76 75
77 1: load32 PA(cache_info), %r1 76 1: load32 PA(cache_info), %r1
78 77
79 /* Flush Instruction Tlb */ 78 /* Flush Instruction Tlb */
80 79
81 LDREG ITLB_SID_BASE(%r1), %r20 80 LDREG ITLB_SID_BASE(%r1), %r20
82 LDREG ITLB_SID_STRIDE(%r1), %r21 81 LDREG ITLB_SID_STRIDE(%r1), %r21
83 LDREG ITLB_SID_COUNT(%r1), %r22 82 LDREG ITLB_SID_COUNT(%r1), %r22
84 LDREG ITLB_OFF_BASE(%r1), %arg0 83 LDREG ITLB_OFF_BASE(%r1), %arg0
85 LDREG ITLB_OFF_STRIDE(%r1), %arg1 84 LDREG ITLB_OFF_STRIDE(%r1), %arg1
86 LDREG ITLB_OFF_COUNT(%r1), %arg2 85 LDREG ITLB_OFF_COUNT(%r1), %arg2
87 LDREG ITLB_LOOP(%r1), %arg3 86 LDREG ITLB_LOOP(%r1), %arg3
88 87
89 addib,COND(=) -1, %arg3, fitoneloop /* Preadjust and test */ 88 addib,COND(=) -1, %arg3, fitoneloop /* Preadjust and test */
90 movb,<,n %arg3, %r31, fitdone /* If loop < 0, skip */ 89 movb,<,n %arg3, %r31, fitdone /* If loop < 0, skip */
91 copy %arg0, %r28 /* Init base addr */ 90 copy %arg0, %r28 /* Init base addr */
92 91
93 fitmanyloop: /* Loop if LOOP >= 2 */ 92 fitmanyloop: /* Loop if LOOP >= 2 */
94 mtsp %r20, %sr1 93 mtsp %r20, %sr1
95 add %r21, %r20, %r20 /* increment space */ 94 add %r21, %r20, %r20 /* increment space */
96 copy %arg2, %r29 /* Init middle loop count */ 95 copy %arg2, %r29 /* Init middle loop count */
97 96
98 fitmanymiddle: /* Loop if LOOP >= 2 */ 97 fitmanymiddle: /* Loop if LOOP >= 2 */
99 addib,COND(>) -1, %r31, fitmanymiddle /* Adjusted inner loop decr */ 98 addib,COND(>) -1, %r31, fitmanymiddle /* Adjusted inner loop decr */
100 pitlbe 0(%sr1, %r28) 99 pitlbe 0(%sr1, %r28)
101 pitlbe,m %arg1(%sr1, %r28) /* Last pitlbe and addr adjust */ 100 pitlbe,m %arg1(%sr1, %r28) /* Last pitlbe and addr adjust */
102 addib,COND(>) -1, %r29, fitmanymiddle /* Middle loop decr */ 101 addib,COND(>) -1, %r29, fitmanymiddle /* Middle loop decr */
103 copy %arg3, %r31 /* Re-init inner loop count */ 102 copy %arg3, %r31 /* Re-init inner loop count */
104 103
105 movb,tr %arg0, %r28, fitmanyloop /* Re-init base addr */ 104 movb,tr %arg0, %r28, fitmanyloop /* Re-init base addr */
106 addib,COND(<=),n -1, %r22, fitdone /* Outer loop count decr */ 105 addib,COND(<=),n -1, %r22, fitdone /* Outer loop count decr */
107 106
108 fitoneloop: /* Loop if LOOP = 1 */ 107 fitoneloop: /* Loop if LOOP = 1 */
109 mtsp %r20, %sr1 108 mtsp %r20, %sr1
110 copy %arg0, %r28 /* init base addr */ 109 copy %arg0, %r28 /* init base addr */
111 copy %arg2, %r29 /* init middle loop count */ 110 copy %arg2, %r29 /* init middle loop count */
112 111
113 fitonemiddle: /* Loop if LOOP = 1 */ 112 fitonemiddle: /* Loop if LOOP = 1 */
114 addib,COND(>) -1, %r29, fitonemiddle /* Middle loop count decr */ 113 addib,COND(>) -1, %r29, fitonemiddle /* Middle loop count decr */
115 pitlbe,m %arg1(%sr1, %r28) /* pitlbe for one loop */ 114 pitlbe,m %arg1(%sr1, %r28) /* pitlbe for one loop */
116 115
117 addib,COND(>) -1, %r22, fitoneloop /* Outer loop count decr */ 116 addib,COND(>) -1, %r22, fitoneloop /* Outer loop count decr */
118 add %r21, %r20, %r20 /* increment space */ 117 add %r21, %r20, %r20 /* increment space */
119 118
120 fitdone: 119 fitdone:
121 120
122 /* Flush Data Tlb */ 121 /* Flush Data Tlb */
123 122
124 LDREG DTLB_SID_BASE(%r1), %r20 123 LDREG DTLB_SID_BASE(%r1), %r20
125 LDREG DTLB_SID_STRIDE(%r1), %r21 124 LDREG DTLB_SID_STRIDE(%r1), %r21
126 LDREG DTLB_SID_COUNT(%r1), %r22 125 LDREG DTLB_SID_COUNT(%r1), %r22
127 LDREG DTLB_OFF_BASE(%r1), %arg0 126 LDREG DTLB_OFF_BASE(%r1), %arg0
128 LDREG DTLB_OFF_STRIDE(%r1), %arg1 127 LDREG DTLB_OFF_STRIDE(%r1), %arg1
129 LDREG DTLB_OFF_COUNT(%r1), %arg2 128 LDREG DTLB_OFF_COUNT(%r1), %arg2
130 LDREG DTLB_LOOP(%r1), %arg3 129 LDREG DTLB_LOOP(%r1), %arg3
131 130
132 addib,COND(=) -1, %arg3, fdtoneloop /* Preadjust and test */ 131 addib,COND(=) -1, %arg3, fdtoneloop /* Preadjust and test */
133 movb,<,n %arg3, %r31, fdtdone /* If loop < 0, skip */ 132 movb,<,n %arg3, %r31, fdtdone /* If loop < 0, skip */
134 copy %arg0, %r28 /* Init base addr */ 133 copy %arg0, %r28 /* Init base addr */
135 134
136 fdtmanyloop: /* Loop if LOOP >= 2 */ 135 fdtmanyloop: /* Loop if LOOP >= 2 */
137 mtsp %r20, %sr1 136 mtsp %r20, %sr1
138 add %r21, %r20, %r20 /* increment space */ 137 add %r21, %r20, %r20 /* increment space */
139 copy %arg2, %r29 /* Init middle loop count */ 138 copy %arg2, %r29 /* Init middle loop count */
140 139
141 fdtmanymiddle: /* Loop if LOOP >= 2 */ 140 fdtmanymiddle: /* Loop if LOOP >= 2 */
142 addib,COND(>) -1, %r31, fdtmanymiddle /* Adjusted inner loop decr */ 141 addib,COND(>) -1, %r31, fdtmanymiddle /* Adjusted inner loop decr */
143 pdtlbe 0(%sr1, %r28) 142 pdtlbe 0(%sr1, %r28)
144 pdtlbe,m %arg1(%sr1, %r28) /* Last pdtlbe and addr adjust */ 143 pdtlbe,m %arg1(%sr1, %r28) /* Last pdtlbe and addr adjust */
145 addib,COND(>) -1, %r29, fdtmanymiddle /* Middle loop decr */ 144 addib,COND(>) -1, %r29, fdtmanymiddle /* Middle loop decr */
146 copy %arg3, %r31 /* Re-init inner loop count */ 145 copy %arg3, %r31 /* Re-init inner loop count */
147 146
148 movb,tr %arg0, %r28, fdtmanyloop /* Re-init base addr */ 147 movb,tr %arg0, %r28, fdtmanyloop /* Re-init base addr */
149 addib,COND(<=),n -1, %r22,fdtdone /* Outer loop count decr */ 148 addib,COND(<=),n -1, %r22,fdtdone /* Outer loop count decr */
150 149
151 fdtoneloop: /* Loop if LOOP = 1 */ 150 fdtoneloop: /* Loop if LOOP = 1 */
152 mtsp %r20, %sr1 151 mtsp %r20, %sr1
153 copy %arg0, %r28 /* init base addr */ 152 copy %arg0, %r28 /* init base addr */
154 copy %arg2, %r29 /* init middle loop count */ 153 copy %arg2, %r29 /* init middle loop count */
155 154
156 fdtonemiddle: /* Loop if LOOP = 1 */ 155 fdtonemiddle: /* Loop if LOOP = 1 */
157 addib,COND(>) -1, %r29, fdtonemiddle /* Middle loop count decr */ 156 addib,COND(>) -1, %r29, fdtonemiddle /* Middle loop count decr */
158 pdtlbe,m %arg1(%sr1, %r28) /* pdtlbe for one loop */ 157 pdtlbe,m %arg1(%sr1, %r28) /* pdtlbe for one loop */
159 158
160 addib,COND(>) -1, %r22, fdtoneloop /* Outer loop count decr */ 159 addib,COND(>) -1, %r22, fdtoneloop /* Outer loop count decr */
161 add %r21, %r20, %r20 /* increment space */ 160 add %r21, %r20, %r20 /* increment space */
162 161
163 162
164 fdtdone: 163 fdtdone:
165 /* 164 /*
166 * Switch back to virtual mode 165 * Switch back to virtual mode
167 */ 166 */
168 /* pcxt_ssm_bug */ 167 /* pcxt_ssm_bug */
169 rsm PSW_SM_I, %r0 168 rsm PSW_SM_I, %r0
170 load32 2f, %r1 169 load32 2f, %r1
171 nop 170 nop
172 nop 171 nop
173 nop 172 nop
174 nop 173 nop
175 nop 174 nop
176 175
177 rsm PSW_SM_Q, %r0 /* prep to load iia queue */ 176 rsm PSW_SM_Q, %r0 /* prep to load iia queue */
178 mtctl %r0, %cr17 /* Clear IIASQ tail */ 177 mtctl %r0, %cr17 /* Clear IIASQ tail */
179 mtctl %r0, %cr17 /* Clear IIASQ head */ 178 mtctl %r0, %cr17 /* Clear IIASQ head */
180 mtctl %r1, %cr18 /* IIAOQ head */ 179 mtctl %r1, %cr18 /* IIAOQ head */
181 ldo 4(%r1), %r1 180 ldo 4(%r1), %r1
182 mtctl %r1, %cr18 /* IIAOQ tail */ 181 mtctl %r1, %cr18 /* IIAOQ tail */
183 load32 KERNEL_PSW, %r1 182 load32 KERNEL_PSW, %r1
184 or %r1, %r19, %r1 /* I-bit to state on entry */ 183 or %r1, %r19, %r1 /* I-bit to state on entry */
185 mtctl %r1, %ipsw /* restore I-bit (entire PSW) */ 184 mtctl %r1, %ipsw /* restore I-bit (entire PSW) */
186 rfi 185 rfi
187 nop 186 nop
188 187
189 2: bv %r0(%r2) 188 2: bv %r0(%r2)
190 nop 189 nop
191 190
192 .exit 191 .exit
193 .procend 192 .procend
194 ENDPROC(flush_tlb_all_local) 193 ENDPROC(flush_tlb_all_local)
195 194
196 .import cache_info,data 195 .import cache_info,data
197 196
198 ENTRY(flush_instruction_cache_local) 197 ENTRY(flush_instruction_cache_local)
199 .proc 198 .proc
200 .callinfo NO_CALLS 199 .callinfo NO_CALLS
201 .entry 200 .entry
202 201
203 mtsp %r0, %sr1 202 mtsp %r0, %sr1
204 load32 cache_info, %r1 203 load32 cache_info, %r1
205 204
206 /* Flush Instruction Cache */ 205 /* Flush Instruction Cache */
207 206
208 LDREG ICACHE_BASE(%r1), %arg0 207 LDREG ICACHE_BASE(%r1), %arg0
209 LDREG ICACHE_STRIDE(%r1), %arg1 208 LDREG ICACHE_STRIDE(%r1), %arg1
210 LDREG ICACHE_COUNT(%r1), %arg2 209 LDREG ICACHE_COUNT(%r1), %arg2
211 LDREG ICACHE_LOOP(%r1), %arg3 210 LDREG ICACHE_LOOP(%r1), %arg3
212 rsm PSW_SM_I, %r22 /* No mmgt ops during loop*/ 211 rsm PSW_SM_I, %r22 /* No mmgt ops during loop*/
213 addib,COND(=) -1, %arg3, fioneloop /* Preadjust and test */ 212 addib,COND(=) -1, %arg3, fioneloop /* Preadjust and test */
214 movb,<,n %arg3, %r31, fisync /* If loop < 0, do sync */ 213 movb,<,n %arg3, %r31, fisync /* If loop < 0, do sync */
215 214
216 fimanyloop: /* Loop if LOOP >= 2 */ 215 fimanyloop: /* Loop if LOOP >= 2 */
217 addib,COND(>) -1, %r31, fimanyloop /* Adjusted inner loop decr */ 216 addib,COND(>) -1, %r31, fimanyloop /* Adjusted inner loop decr */
218 fice %r0(%sr1, %arg0) 217 fice %r0(%sr1, %arg0)
219 fice,m %arg1(%sr1, %arg0) /* Last fice and addr adjust */ 218 fice,m %arg1(%sr1, %arg0) /* Last fice and addr adjust */
220 movb,tr %arg3, %r31, fimanyloop /* Re-init inner loop count */ 219 movb,tr %arg3, %r31, fimanyloop /* Re-init inner loop count */
221 addib,COND(<=),n -1, %arg2, fisync /* Outer loop decr */ 220 addib,COND(<=),n -1, %arg2, fisync /* Outer loop decr */
222 221
223 fioneloop: /* Loop if LOOP = 1 */ 222 fioneloop: /* Loop if LOOP = 1 */
224 addib,COND(>) -1, %arg2, fioneloop /* Outer loop count decr */ 223 addib,COND(>) -1, %arg2, fioneloop /* Outer loop count decr */
225 fice,m %arg1(%sr1, %arg0) /* Fice for one loop */ 224 fice,m %arg1(%sr1, %arg0) /* Fice for one loop */
226 225
227 fisync: 226 fisync:
228 sync 227 sync
229 mtsm %r22 /* restore I-bit */ 228 mtsm %r22 /* restore I-bit */
230 bv %r0(%r2) 229 bv %r0(%r2)
231 nop 230 nop
232 .exit 231 .exit
233 232
234 .procend 233 .procend
235 ENDPROC(flush_instruction_cache_local) 234 ENDPROC(flush_instruction_cache_local)
236 235
237 236
238 .import cache_info, data 237 .import cache_info, data
239 ENTRY(flush_data_cache_local) 238 ENTRY(flush_data_cache_local)
240 .proc 239 .proc
241 .callinfo NO_CALLS 240 .callinfo NO_CALLS
242 .entry 241 .entry
243 242
244 mtsp %r0, %sr1 243 mtsp %r0, %sr1
245 load32 cache_info, %r1 244 load32 cache_info, %r1
246 245
247 /* Flush Data Cache */ 246 /* Flush Data Cache */
248 247
249 LDREG DCACHE_BASE(%r1), %arg0 248 LDREG DCACHE_BASE(%r1), %arg0
250 LDREG DCACHE_STRIDE(%r1), %arg1 249 LDREG DCACHE_STRIDE(%r1), %arg1
251 LDREG DCACHE_COUNT(%r1), %arg2 250 LDREG DCACHE_COUNT(%r1), %arg2
252 LDREG DCACHE_LOOP(%r1), %arg3 251 LDREG DCACHE_LOOP(%r1), %arg3
253 rsm PSW_SM_I, %r22 252 rsm PSW_SM_I, %r22
254 addib,COND(=) -1, %arg3, fdoneloop /* Preadjust and test */ 253 addib,COND(=) -1, %arg3, fdoneloop /* Preadjust and test */
255 movb,<,n %arg3, %r31, fdsync /* If loop < 0, do sync */ 254 movb,<,n %arg3, %r31, fdsync /* If loop < 0, do sync */
256 255
257 fdmanyloop: /* Loop if LOOP >= 2 */ 256 fdmanyloop: /* Loop if LOOP >= 2 */
258 addib,COND(>) -1, %r31, fdmanyloop /* Adjusted inner loop decr */ 257 addib,COND(>) -1, %r31, fdmanyloop /* Adjusted inner loop decr */
259 fdce %r0(%sr1, %arg0) 258 fdce %r0(%sr1, %arg0)
260 fdce,m %arg1(%sr1, %arg0) /* Last fdce and addr adjust */ 259 fdce,m %arg1(%sr1, %arg0) /* Last fdce and addr adjust */
261 movb,tr %arg3, %r31, fdmanyloop /* Re-init inner loop count */ 260 movb,tr %arg3, %r31, fdmanyloop /* Re-init inner loop count */
262 addib,COND(<=),n -1, %arg2, fdsync /* Outer loop decr */ 261 addib,COND(<=),n -1, %arg2, fdsync /* Outer loop decr */
263 262
264 fdoneloop: /* Loop if LOOP = 1 */ 263 fdoneloop: /* Loop if LOOP = 1 */
265 addib,COND(>) -1, %arg2, fdoneloop /* Outer loop count decr */ 264 addib,COND(>) -1, %arg2, fdoneloop /* Outer loop count decr */
266 fdce,m %arg1(%sr1, %arg0) /* Fdce for one loop */ 265 fdce,m %arg1(%sr1, %arg0) /* Fdce for one loop */
267 266
268 fdsync: 267 fdsync:
269 syncdma 268 syncdma
270 sync 269 sync
271 mtsm %r22 /* restore I-bit */ 270 mtsm %r22 /* restore I-bit */
272 bv %r0(%r2) 271 bv %r0(%r2)
273 nop 272 nop
274 .exit 273 .exit
275 274
276 .procend 275 .procend
277 ENDPROC(flush_data_cache_local) 276 ENDPROC(flush_data_cache_local)
278 277
279 .align 16 278 .align 16
280 279
281 ENTRY(copy_user_page_asm) 280 ENTRY(copy_user_page_asm)
282 .proc 281 .proc
283 .callinfo NO_CALLS 282 .callinfo NO_CALLS
284 .entry 283 .entry
285 284
286 #ifdef CONFIG_64BIT 285 #ifdef CONFIG_64BIT
287 /* PA8x00 CPUs can consume 2 loads or 1 store per cycle. 286 /* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
288 * Unroll the loop by hand and arrange insn appropriately. 287 * Unroll the loop by hand and arrange insn appropriately.
289 * GCC probably can do this just as well. 288 * GCC probably can do this just as well.
290 */ 289 */
291 290
292 ldd 0(%r25), %r19 291 ldd 0(%r25), %r19
293 ldi (PAGE_SIZE / 128), %r1 292 ldi (PAGE_SIZE / 128), %r1
294 293
295 ldw 64(%r25), %r0 /* prefetch 1 cacheline ahead */ 294 ldw 64(%r25), %r0 /* prefetch 1 cacheline ahead */
296 ldw 128(%r25), %r0 /* prefetch 2 */ 295 ldw 128(%r25), %r0 /* prefetch 2 */
297 296
298 1: ldd 8(%r25), %r20 297 1: ldd 8(%r25), %r20
299 ldw 192(%r25), %r0 /* prefetch 3 */ 298 ldw 192(%r25), %r0 /* prefetch 3 */
300 ldw 256(%r25), %r0 /* prefetch 4 */ 299 ldw 256(%r25), %r0 /* prefetch 4 */
301 300
302 ldd 16(%r25), %r21 301 ldd 16(%r25), %r21
303 ldd 24(%r25), %r22 302 ldd 24(%r25), %r22
304 std %r19, 0(%r26) 303 std %r19, 0(%r26)
305 std %r20, 8(%r26) 304 std %r20, 8(%r26)
306 305
307 ldd 32(%r25), %r19 306 ldd 32(%r25), %r19
308 ldd 40(%r25), %r20 307 ldd 40(%r25), %r20
309 std %r21, 16(%r26) 308 std %r21, 16(%r26)
310 std %r22, 24(%r26) 309 std %r22, 24(%r26)
311 310
312 ldd 48(%r25), %r21 311 ldd 48(%r25), %r21
313 ldd 56(%r25), %r22 312 ldd 56(%r25), %r22
314 std %r19, 32(%r26) 313 std %r19, 32(%r26)
315 std %r20, 40(%r26) 314 std %r20, 40(%r26)
316 315
317 ldd 64(%r25), %r19 316 ldd 64(%r25), %r19
318 ldd 72(%r25), %r20 317 ldd 72(%r25), %r20
319 std %r21, 48(%r26) 318 std %r21, 48(%r26)
320 std %r22, 56(%r26) 319 std %r22, 56(%r26)
321 320
322 ldd 80(%r25), %r21 321 ldd 80(%r25), %r21
323 ldd 88(%r25), %r22 322 ldd 88(%r25), %r22
324 std %r19, 64(%r26) 323 std %r19, 64(%r26)
325 std %r20, 72(%r26) 324 std %r20, 72(%r26)
326 325
327 ldd 96(%r25), %r19 326 ldd 96(%r25), %r19
328 ldd 104(%r25), %r20 327 ldd 104(%r25), %r20
329 std %r21, 80(%r26) 328 std %r21, 80(%r26)
330 std %r22, 88(%r26) 329 std %r22, 88(%r26)
331 330
332 ldd 112(%r25), %r21 331 ldd 112(%r25), %r21
333 ldd 120(%r25), %r22 332 ldd 120(%r25), %r22
334 std %r19, 96(%r26) 333 std %r19, 96(%r26)
335 std %r20, 104(%r26) 334 std %r20, 104(%r26)
336 335
337 ldo 128(%r25), %r25 336 ldo 128(%r25), %r25
338 std %r21, 112(%r26) 337 std %r21, 112(%r26)
339 std %r22, 120(%r26) 338 std %r22, 120(%r26)
340 ldo 128(%r26), %r26 339 ldo 128(%r26), %r26
341 340
342 /* conditional branches nullify on forward taken branch, and on 341 /* conditional branches nullify on forward taken branch, and on
343 * non-taken backward branch. Note that .+4 is a backwards branch. 342 * non-taken backward branch. Note that .+4 is a backwards branch.
344 * The ldd should only get executed if the branch is taken. 343 * The ldd should only get executed if the branch is taken.
345 */ 344 */
346 addib,COND(>),n -1, %r1, 1b /* bundle 10 */ 345 addib,COND(>),n -1, %r1, 1b /* bundle 10 */
347 ldd 0(%r25), %r19 /* start next loads */ 346 ldd 0(%r25), %r19 /* start next loads */
348 347
349 #else 348 #else
350 349
351 /* 350 /*
352 * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw 351 * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw
353 * bundles (very restricted rules for bundling). 352 * bundles (very restricted rules for bundling).
354 * Note that until (if) we start saving 353 * Note that until (if) we start saving
355 * the full 64 bit register values on interrupt, we can't 354 * the full 64 bit register values on interrupt, we can't
356 * use ldd/std on a 32 bit kernel. 355 * use ldd/std on a 32 bit kernel.
357 */ 356 */
358 ldw 0(%r25), %r19 357 ldw 0(%r25), %r19
359 ldi (PAGE_SIZE / 64), %r1 358 ldi (PAGE_SIZE / 64), %r1
360 359
361 1: 360 1:
362 ldw 4(%r25), %r20 361 ldw 4(%r25), %r20
363 ldw 8(%r25), %r21 362 ldw 8(%r25), %r21
364 ldw 12(%r25), %r22 363 ldw 12(%r25), %r22
365 stw %r19, 0(%r26) 364 stw %r19, 0(%r26)
366 stw %r20, 4(%r26) 365 stw %r20, 4(%r26)
367 stw %r21, 8(%r26) 366 stw %r21, 8(%r26)
368 stw %r22, 12(%r26) 367 stw %r22, 12(%r26)
369 ldw 16(%r25), %r19 368 ldw 16(%r25), %r19
370 ldw 20(%r25), %r20 369 ldw 20(%r25), %r20
371 ldw 24(%r25), %r21 370 ldw 24(%r25), %r21
372 ldw 28(%r25), %r22 371 ldw 28(%r25), %r22
373 stw %r19, 16(%r26) 372 stw %r19, 16(%r26)
374 stw %r20, 20(%r26) 373 stw %r20, 20(%r26)
375 stw %r21, 24(%r26) 374 stw %r21, 24(%r26)
376 stw %r22, 28(%r26) 375 stw %r22, 28(%r26)
377 ldw 32(%r25), %r19 376 ldw 32(%r25), %r19
378 ldw 36(%r25), %r20 377 ldw 36(%r25), %r20
379 ldw 40(%r25), %r21 378 ldw 40(%r25), %r21
380 ldw 44(%r25), %r22 379 ldw 44(%r25), %r22
381 stw %r19, 32(%r26) 380 stw %r19, 32(%r26)
382 stw %r20, 36(%r26) 381 stw %r20, 36(%r26)
383 stw %r21, 40(%r26) 382 stw %r21, 40(%r26)
384 stw %r22, 44(%r26) 383 stw %r22, 44(%r26)
385 ldw 48(%r25), %r19 384 ldw 48(%r25), %r19
386 ldw 52(%r25), %r20 385 ldw 52(%r25), %r20
387 ldw 56(%r25), %r21 386 ldw 56(%r25), %r21
388 ldw 60(%r25), %r22 387 ldw 60(%r25), %r22
389 stw %r19, 48(%r26) 388 stw %r19, 48(%r26)
390 stw %r20, 52(%r26) 389 stw %r20, 52(%r26)
391 ldo 64(%r25), %r25 390 ldo 64(%r25), %r25
392 stw %r21, 56(%r26) 391 stw %r21, 56(%r26)
393 stw %r22, 60(%r26) 392 stw %r22, 60(%r26)
394 ldo 64(%r26), %r26 393 ldo 64(%r26), %r26
395 addib,COND(>),n -1, %r1, 1b 394 addib,COND(>),n -1, %r1, 1b
396 ldw 0(%r25), %r19 395 ldw 0(%r25), %r19
397 #endif 396 #endif
398 bv %r0(%r2) 397 bv %r0(%r2)
399 nop 398 nop
400 .exit 399 .exit
401 400
402 .procend 401 .procend
403 ENDPROC(copy_user_page_asm) 402 ENDPROC(copy_user_page_asm)
404 403
405 /* 404 /*
406 * NOTE: Code in clear_user_page has a hard coded dependency on the 405 * NOTE: Code in clear_user_page has a hard coded dependency on the
407 * maximum alias boundary being 4 Mb. We've been assured by the 406 * maximum alias boundary being 4 Mb. We've been assured by the
408 * parisc chip designers that there will not ever be a parisc 407 * parisc chip designers that there will not ever be a parisc
409 * chip with a larger alias boundary (Never say never :-) ). 408 * chip with a larger alias boundary (Never say never :-) ).
410 * 409 *
411 * Subtle: the dtlb miss handlers support the temp alias region by 410 * Subtle: the dtlb miss handlers support the temp alias region by
412 * "knowing" that if a dtlb miss happens within the temp alias 411 * "knowing" that if a dtlb miss happens within the temp alias
413 * region it must have occurred while in clear_user_page. Since 412 * region it must have occurred while in clear_user_page. Since
414 * this routine makes use of processor local translations, we 413 * this routine makes use of processor local translations, we
415 * don't want to insert them into the kernel page table. Instead, 414 * don't want to insert them into the kernel page table. Instead,
416 * we load up some general registers (they need to be registers 415 * we load up some general registers (they need to be registers
417 * which aren't shadowed) with the physical page numbers (preshifted 416 * which aren't shadowed) with the physical page numbers (preshifted
418 * for tlb insertion) needed to insert the translations. When we 417 * for tlb insertion) needed to insert the translations. When we
419 * miss on the translation, the dtlb miss handler inserts the 418 * miss on the translation, the dtlb miss handler inserts the
420 * translation into the tlb using these values: 419 * translation into the tlb using these values:
421 * 420 *
422 * %r26 physical page (shifted for tlb insert) of "to" translation 421 * %r26 physical page (shifted for tlb insert) of "to" translation
423 * %r23 physical page (shifted for tlb insert) of "from" translation 422 * %r23 physical page (shifted for tlb insert) of "from" translation
424 */ 423 */
425 424
426 #if 0 425 #if 0
427 426
428 /* 427 /*
429 * We can't do this since copy_user_page is used to bring in 428 * We can't do this since copy_user_page is used to bring in
430 * file data that might have instructions. Since the data would 429 * file data that might have instructions. Since the data would
431 * then need to be flushed out so the i-fetch can see it, it 430 * then need to be flushed out so the i-fetch can see it, it
432 * makes more sense to just copy through the kernel translation 431 * makes more sense to just copy through the kernel translation
433 * and flush it. 432 * and flush it.
434 * 433 *
435 * I'm still keeping this around because it may be possible to 434 * I'm still keeping this around because it may be possible to
436 * use it if more information is passed into copy_user_page(). 435 * use it if more information is passed into copy_user_page().
437 * Have to do some measurements to see if it is worthwhile to 436 * Have to do some measurements to see if it is worthwhile to
438 * lobby for such a change. 437 * lobby for such a change.
439 */ 438 */
440 439
441 ENTRY(copy_user_page_asm) 440 ENTRY(copy_user_page_asm)
442 .proc 441 .proc
443 .callinfo NO_CALLS 442 .callinfo NO_CALLS
444 .entry 443 .entry
445 444
446 ldil L%(__PAGE_OFFSET), %r1 445 ldil L%(__PAGE_OFFSET), %r1
447 sub %r26, %r1, %r26 446 sub %r26, %r1, %r26
448 sub %r25, %r1, %r23 /* move physical addr into non shadowed reg */ 447 sub %r25, %r1, %r23 /* move physical addr into non shadowed reg */
449 448
450 ldil L%(TMPALIAS_MAP_START), %r28 449 ldil L%(TMPALIAS_MAP_START), %r28
451 /* FIXME for different page sizes != 4k */ 450 /* FIXME for different page sizes != 4k */
452 #ifdef CONFIG_64BIT 451 #ifdef CONFIG_64BIT
453 extrd,u %r26,56,32, %r26 /* convert phys addr to tlb insert format */ 452 extrd,u %r26,56,32, %r26 /* convert phys addr to tlb insert format */
454 extrd,u %r23,56,32, %r23 /* convert phys addr to tlb insert format */ 453 extrd,u %r23,56,32, %r23 /* convert phys addr to tlb insert format */
455 depd %r24,63,22, %r28 /* Form aliased virtual address 'to' */ 454 depd %r24,63,22, %r28 /* Form aliased virtual address 'to' */
456 depdi 0, 63,12, %r28 /* Clear any offset bits */ 455 depdi 0, 63,12, %r28 /* Clear any offset bits */
457 copy %r28, %r29 456 copy %r28, %r29
458 depdi 1, 41,1, %r29 /* Form aliased virtual address 'from' */ 457 depdi 1, 41,1, %r29 /* Form aliased virtual address 'from' */
459 #else 458 #else
460 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */ 459 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
461 extrw,u %r23, 24,25, %r23 /* convert phys addr to tlb insert format */ 460 extrw,u %r23, 24,25, %r23 /* convert phys addr to tlb insert format */
462 depw %r24, 31,22, %r28 /* Form aliased virtual address 'to' */ 461 depw %r24, 31,22, %r28 /* Form aliased virtual address 'to' */
463 depwi 0, 31,12, %r28 /* Clear any offset bits */ 462 depwi 0, 31,12, %r28 /* Clear any offset bits */
464 copy %r28, %r29 463 copy %r28, %r29
465 depwi 1, 9,1, %r29 /* Form aliased virtual address 'from' */ 464 depwi 1, 9,1, %r29 /* Form aliased virtual address 'from' */
466 #endif 465 #endif
467 466
468 /* Purge any old translations */ 467 /* Purge any old translations */
469 468
470 pdtlb 0(%r28) 469 pdtlb 0(%r28)
471 pdtlb 0(%r29) 470 pdtlb 0(%r29)
472 471
473 ldi 64, %r1 472 ldi 64, %r1
474 473
475 /* 474 /*
476 * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw 475 * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw
477 * bundles (very restricted rules for bundling). It probably 476 * bundles (very restricted rules for bundling). It probably
478 * does OK on PCXU and better, but we could do better with 477 * does OK on PCXU and better, but we could do better with
479 * ldd/std instructions. Note that until (if) we start saving 478 * ldd/std instructions. Note that until (if) we start saving
480 * the full 64 bit register values on interrupt, we can't 479 * the full 64 bit register values on interrupt, we can't
481 * use ldd/std on a 32 bit kernel. 480 * use ldd/std on a 32 bit kernel.
482 */ 481 */
483 482
484 483
485 1: 484 1:
486 ldw 0(%r29), %r19 485 ldw 0(%r29), %r19
487 ldw 4(%r29), %r20 486 ldw 4(%r29), %r20
488 ldw 8(%r29), %r21 487 ldw 8(%r29), %r21
489 ldw 12(%r29), %r22 488 ldw 12(%r29), %r22
490 stw %r19, 0(%r28) 489 stw %r19, 0(%r28)
491 stw %r20, 4(%r28) 490 stw %r20, 4(%r28)
492 stw %r21, 8(%r28) 491 stw %r21, 8(%r28)
493 stw %r22, 12(%r28) 492 stw %r22, 12(%r28)
494 ldw 16(%r29), %r19 493 ldw 16(%r29), %r19
495 ldw 20(%r29), %r20 494 ldw 20(%r29), %r20
496 ldw 24(%r29), %r21 495 ldw 24(%r29), %r21
497 ldw 28(%r29), %r22 496 ldw 28(%r29), %r22
498 stw %r19, 16(%r28) 497 stw %r19, 16(%r28)
499 stw %r20, 20(%r28) 498 stw %r20, 20(%r28)
500 stw %r21, 24(%r28) 499 stw %r21, 24(%r28)
501 stw %r22, 28(%r28) 500 stw %r22, 28(%r28)
502 ldw 32(%r29), %r19 501 ldw 32(%r29), %r19
503 ldw 36(%r29), %r20 502 ldw 36(%r29), %r20
504 ldw 40(%r29), %r21 503 ldw 40(%r29), %r21
505 ldw 44(%r29), %r22 504 ldw 44(%r29), %r22
506 stw %r19, 32(%r28) 505 stw %r19, 32(%r28)
507 stw %r20, 36(%r28) 506 stw %r20, 36(%r28)
508 stw %r21, 40(%r28) 507 stw %r21, 40(%r28)
509 stw %r22, 44(%r28) 508 stw %r22, 44(%r28)
510 ldw 48(%r29), %r19 509 ldw 48(%r29), %r19
511 ldw 52(%r29), %r20 510 ldw 52(%r29), %r20
512 ldw 56(%r29), %r21 511 ldw 56(%r29), %r21
513 ldw 60(%r29), %r22 512 ldw 60(%r29), %r22
514 stw %r19, 48(%r28) 513 stw %r19, 48(%r28)
515 stw %r20, 52(%r28) 514 stw %r20, 52(%r28)
516 stw %r21, 56(%r28) 515 stw %r21, 56(%r28)
517 stw %r22, 60(%r28) 516 stw %r22, 60(%r28)
518 ldo 64(%r28), %r28 517 ldo 64(%r28), %r28
519 addib,COND(>) -1, %r1,1b 518 addib,COND(>) -1, %r1,1b
520 ldo 64(%r29), %r29 519 ldo 64(%r29), %r29
521 520
522 bv %r0(%r2) 521 bv %r0(%r2)
523 nop 522 nop
524 .exit 523 .exit
525 524
526 .procend 525 .procend
527 ENDPROC(copy_user_page_asm) 526 ENDPROC(copy_user_page_asm)
528 #endif 527 #endif
529 528
530 ENTRY(__clear_user_page_asm) 529 ENTRY(__clear_user_page_asm)
531 .proc 530 .proc
532 .callinfo NO_CALLS 531 .callinfo NO_CALLS
533 .entry 532 .entry
534 533
535 tophys_r1 %r26 534 tophys_r1 %r26
536 535
537 ldil L%(TMPALIAS_MAP_START), %r28 536 ldil L%(TMPALIAS_MAP_START), %r28
538 #ifdef CONFIG_64BIT 537 #ifdef CONFIG_64BIT
539 #if (TMPALIAS_MAP_START >= 0x80000000) 538 #if (TMPALIAS_MAP_START >= 0x80000000)
540 depdi 0, 31,32, %r28 /* clear any sign extension */ 539 depdi 0, 31,32, %r28 /* clear any sign extension */
541 /* FIXME: page size dependend */ 540 /* FIXME: page size dependend */
542 #endif 541 #endif
543 extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */ 542 extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */
544 depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */ 543 depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
545 depdi 0, 63,12, %r28 /* Clear any offset bits */ 544 depdi 0, 63,12, %r28 /* Clear any offset bits */
546 #else 545 #else
547 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */ 546 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
548 depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */ 547 depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
549 depwi 0, 31,12, %r28 /* Clear any offset bits */ 548 depwi 0, 31,12, %r28 /* Clear any offset bits */
550 #endif 549 #endif
551 550
552 /* Purge any old translation */ 551 /* Purge any old translation */
553 552
554 pdtlb 0(%r28) 553 pdtlb 0(%r28)
555 554
556 #ifdef CONFIG_64BIT 555 #ifdef CONFIG_64BIT
557 ldi (PAGE_SIZE / 128), %r1 556 ldi (PAGE_SIZE / 128), %r1
558 557
559 /* PREFETCH (Write) has not (yet) been proven to help here */ 558 /* PREFETCH (Write) has not (yet) been proven to help here */
560 /* #define PREFETCHW_OP ldd 256(%0), %r0 */ 559 /* #define PREFETCHW_OP ldd 256(%0), %r0 */
561 560
562 1: std %r0, 0(%r28) 561 1: std %r0, 0(%r28)
563 std %r0, 8(%r28) 562 std %r0, 8(%r28)
564 std %r0, 16(%r28) 563 std %r0, 16(%r28)
565 std %r0, 24(%r28) 564 std %r0, 24(%r28)
566 std %r0, 32(%r28) 565 std %r0, 32(%r28)
567 std %r0, 40(%r28) 566 std %r0, 40(%r28)
568 std %r0, 48(%r28) 567 std %r0, 48(%r28)
569 std %r0, 56(%r28) 568 std %r0, 56(%r28)
570 std %r0, 64(%r28) 569 std %r0, 64(%r28)
571 std %r0, 72(%r28) 570 std %r0, 72(%r28)
572 std %r0, 80(%r28) 571 std %r0, 80(%r28)
573 std %r0, 88(%r28) 572 std %r0, 88(%r28)
574 std %r0, 96(%r28) 573 std %r0, 96(%r28)
575 std %r0, 104(%r28) 574 std %r0, 104(%r28)
576 std %r0, 112(%r28) 575 std %r0, 112(%r28)
577 std %r0, 120(%r28) 576 std %r0, 120(%r28)
578 addib,COND(>) -1, %r1, 1b 577 addib,COND(>) -1, %r1, 1b
579 ldo 128(%r28), %r28 578 ldo 128(%r28), %r28
580 579
581 #else /* ! CONFIG_64BIT */ 580 #else /* ! CONFIG_64BIT */
582 ldi (PAGE_SIZE / 64), %r1 581 ldi (PAGE_SIZE / 64), %r1
583 582
584 1: 583 1:
585 stw %r0, 0(%r28) 584 stw %r0, 0(%r28)
586 stw %r0, 4(%r28) 585 stw %r0, 4(%r28)
587 stw %r0, 8(%r28) 586 stw %r0, 8(%r28)
588 stw %r0, 12(%r28) 587 stw %r0, 12(%r28)
589 stw %r0, 16(%r28) 588 stw %r0, 16(%r28)
590 stw %r0, 20(%r28) 589 stw %r0, 20(%r28)
591 stw %r0, 24(%r28) 590 stw %r0, 24(%r28)
592 stw %r0, 28(%r28) 591 stw %r0, 28(%r28)
593 stw %r0, 32(%r28) 592 stw %r0, 32(%r28)
594 stw %r0, 36(%r28) 593 stw %r0, 36(%r28)
595 stw %r0, 40(%r28) 594 stw %r0, 40(%r28)
596 stw %r0, 44(%r28) 595 stw %r0, 44(%r28)
597 stw %r0, 48(%r28) 596 stw %r0, 48(%r28)
598 stw %r0, 52(%r28) 597 stw %r0, 52(%r28)
599 stw %r0, 56(%r28) 598 stw %r0, 56(%r28)
600 stw %r0, 60(%r28) 599 stw %r0, 60(%r28)
601 addib,COND(>) -1, %r1, 1b 600 addib,COND(>) -1, %r1, 1b
602 ldo 64(%r28), %r28 601 ldo 64(%r28), %r28
603 #endif /* CONFIG_64BIT */ 602 #endif /* CONFIG_64BIT */
604 603
605 bv %r0(%r2) 604 bv %r0(%r2)
606 nop 605 nop
607 .exit 606 .exit
608 607
609 .procend 608 .procend
610 ENDPROC(__clear_user_page_asm) 609 ENDPROC(__clear_user_page_asm)
611 610
612 ENTRY(flush_kernel_dcache_page_asm) 611 ENTRY(flush_kernel_dcache_page_asm)
613 .proc 612 .proc
614 .callinfo NO_CALLS 613 .callinfo NO_CALLS
615 .entry 614 .entry
616 615
617 ldil L%dcache_stride, %r1 616 ldil L%dcache_stride, %r1
618 ldw R%dcache_stride(%r1), %r23 617 ldw R%dcache_stride(%r1), %r23
619 618
620 #ifdef CONFIG_64BIT 619 #ifdef CONFIG_64BIT
621 depdi,z 1, 63-PAGE_SHIFT,1, %r25 620 depdi,z 1, 63-PAGE_SHIFT,1, %r25
622 #else 621 #else
623 depwi,z 1, 31-PAGE_SHIFT,1, %r25 622 depwi,z 1, 31-PAGE_SHIFT,1, %r25
624 #endif 623 #endif
625 add %r26, %r25, %r25 624 add %r26, %r25, %r25
626 sub %r25, %r23, %r25 625 sub %r25, %r23, %r25
627 626
628 627
629 1: fdc,m %r23(%r26) 628 1: fdc,m %r23(%r26)
630 fdc,m %r23(%r26) 629 fdc,m %r23(%r26)
631 fdc,m %r23(%r26) 630 fdc,m %r23(%r26)
632 fdc,m %r23(%r26) 631 fdc,m %r23(%r26)
633 fdc,m %r23(%r26) 632 fdc,m %r23(%r26)
634 fdc,m %r23(%r26) 633 fdc,m %r23(%r26)
635 fdc,m %r23(%r26) 634 fdc,m %r23(%r26)
636 fdc,m %r23(%r26) 635 fdc,m %r23(%r26)
637 fdc,m %r23(%r26) 636 fdc,m %r23(%r26)
638 fdc,m %r23(%r26) 637 fdc,m %r23(%r26)
639 fdc,m %r23(%r26) 638 fdc,m %r23(%r26)
640 fdc,m %r23(%r26) 639 fdc,m %r23(%r26)
641 fdc,m %r23(%r26) 640 fdc,m %r23(%r26)
642 fdc,m %r23(%r26) 641 fdc,m %r23(%r26)
643 fdc,m %r23(%r26) 642 fdc,m %r23(%r26)
644 cmpb,COND(<<) %r26, %r25,1b 643 cmpb,COND(<<) %r26, %r25,1b
645 fdc,m %r23(%r26) 644 fdc,m %r23(%r26)
646 645
647 sync 646 sync
648 bv %r0(%r2) 647 bv %r0(%r2)
649 nop 648 nop
650 .exit 649 .exit
651 650
652 .procend 651 .procend
653 ENDPROC(flush_kernel_dcache_page_asm) 652 ENDPROC(flush_kernel_dcache_page_asm)
654 653
655 ENTRY(flush_user_dcache_page) 654 ENTRY(flush_user_dcache_page)
656 .proc 655 .proc
657 .callinfo NO_CALLS 656 .callinfo NO_CALLS
658 .entry 657 .entry
659 658
660 ldil L%dcache_stride, %r1 659 ldil L%dcache_stride, %r1
661 ldw R%dcache_stride(%r1), %r23 660 ldw R%dcache_stride(%r1), %r23
662 661
663 #ifdef CONFIG_64BIT 662 #ifdef CONFIG_64BIT
664 depdi,z 1,63-PAGE_SHIFT,1, %r25 663 depdi,z 1,63-PAGE_SHIFT,1, %r25
665 #else 664 #else
666 depwi,z 1,31-PAGE_SHIFT,1, %r25 665 depwi,z 1,31-PAGE_SHIFT,1, %r25
667 #endif 666 #endif
668 add %r26, %r25, %r25 667 add %r26, %r25, %r25
669 sub %r25, %r23, %r25 668 sub %r25, %r23, %r25
670 669
671 670
672 1: fdc,m %r23(%sr3, %r26) 671 1: fdc,m %r23(%sr3, %r26)
673 fdc,m %r23(%sr3, %r26) 672 fdc,m %r23(%sr3, %r26)
674 fdc,m %r23(%sr3, %r26) 673 fdc,m %r23(%sr3, %r26)
675 fdc,m %r23(%sr3, %r26) 674 fdc,m %r23(%sr3, %r26)
676 fdc,m %r23(%sr3, %r26) 675 fdc,m %r23(%sr3, %r26)
677 fdc,m %r23(%sr3, %r26) 676 fdc,m %r23(%sr3, %r26)
678 fdc,m %r23(%sr3, %r26) 677 fdc,m %r23(%sr3, %r26)
679 fdc,m %r23(%sr3, %r26) 678 fdc,m %r23(%sr3, %r26)
680 fdc,m %r23(%sr3, %r26) 679 fdc,m %r23(%sr3, %r26)
681 fdc,m %r23(%sr3, %r26) 680 fdc,m %r23(%sr3, %r26)
682 fdc,m %r23(%sr3, %r26) 681 fdc,m %r23(%sr3, %r26)
683 fdc,m %r23(%sr3, %r26) 682 fdc,m %r23(%sr3, %r26)
684 fdc,m %r23(%sr3, %r26) 683 fdc,m %r23(%sr3, %r26)
685 fdc,m %r23(%sr3, %r26) 684 fdc,m %r23(%sr3, %r26)
686 fdc,m %r23(%sr3, %r26) 685 fdc,m %r23(%sr3, %r26)
687 cmpb,COND(<<) %r26, %r25,1b 686 cmpb,COND(<<) %r26, %r25,1b
688 fdc,m %r23(%sr3, %r26) 687 fdc,m %r23(%sr3, %r26)
689 688
690 sync 689 sync
691 bv %r0(%r2) 690 bv %r0(%r2)
692 nop 691 nop
693 .exit 692 .exit
694 693
695 .procend 694 .procend
696 ENDPROC(flush_user_dcache_page) 695 ENDPROC(flush_user_dcache_page)
697 696
698 ENTRY(flush_user_icache_page) 697 ENTRY(flush_user_icache_page)
699 .proc 698 .proc
700 .callinfo NO_CALLS 699 .callinfo NO_CALLS
701 .entry 700 .entry
702 701
703 ldil L%dcache_stride, %r1 702 ldil L%dcache_stride, %r1
704 ldw R%dcache_stride(%r1), %r23 703 ldw R%dcache_stride(%r1), %r23
705 704
706 #ifdef CONFIG_64BIT 705 #ifdef CONFIG_64BIT
707 depdi,z 1, 63-PAGE_SHIFT,1, %r25 706 depdi,z 1, 63-PAGE_SHIFT,1, %r25
708 #else 707 #else
709 depwi,z 1, 31-PAGE_SHIFT,1, %r25 708 depwi,z 1, 31-PAGE_SHIFT,1, %r25
710 #endif 709 #endif
711 add %r26, %r25, %r25 710 add %r26, %r25, %r25
712 sub %r25, %r23, %r25 711 sub %r25, %r23, %r25
713 712
714 713
715 1: fic,m %r23(%sr3, %r26) 714 1: fic,m %r23(%sr3, %r26)
716 fic,m %r23(%sr3, %r26) 715 fic,m %r23(%sr3, %r26)
717 fic,m %r23(%sr3, %r26) 716 fic,m %r23(%sr3, %r26)
718 fic,m %r23(%sr3, %r26) 717 fic,m %r23(%sr3, %r26)
719 fic,m %r23(%sr3, %r26) 718 fic,m %r23(%sr3, %r26)
720 fic,m %r23(%sr3, %r26) 719 fic,m %r23(%sr3, %r26)
721 fic,m %r23(%sr3, %r26) 720 fic,m %r23(%sr3, %r26)
722 fic,m %r23(%sr3, %r26) 721 fic,m %r23(%sr3, %r26)
723 fic,m %r23(%sr3, %r26) 722 fic,m %r23(%sr3, %r26)
724 fic,m %r23(%sr3, %r26) 723 fic,m %r23(%sr3, %r26)
725 fic,m %r23(%sr3, %r26) 724 fic,m %r23(%sr3, %r26)
726 fic,m %r23(%sr3, %r26) 725 fic,m %r23(%sr3, %r26)
727 fic,m %r23(%sr3, %r26) 726 fic,m %r23(%sr3, %r26)
728 fic,m %r23(%sr3, %r26) 727 fic,m %r23(%sr3, %r26)
729 fic,m %r23(%sr3, %r26) 728 fic,m %r23(%sr3, %r26)
730 cmpb,COND(<<) %r26, %r25,1b 729 cmpb,COND(<<) %r26, %r25,1b
731 fic,m %r23(%sr3, %r26) 730 fic,m %r23(%sr3, %r26)
732 731
733 sync 732 sync
734 bv %r0(%r2) 733 bv %r0(%r2)
735 nop 734 nop
736 .exit 735 .exit
737 736
738 .procend 737 .procend
739 ENDPROC(flush_user_icache_page) 738 ENDPROC(flush_user_icache_page)
740 739
741 740
742 ENTRY(purge_kernel_dcache_page) 741 ENTRY(purge_kernel_dcache_page)
743 .proc 742 .proc
744 .callinfo NO_CALLS 743 .callinfo NO_CALLS
745 .entry 744 .entry
746 745
747 ldil L%dcache_stride, %r1 746 ldil L%dcache_stride, %r1
748 ldw R%dcache_stride(%r1), %r23 747 ldw R%dcache_stride(%r1), %r23
749 748
750 #ifdef CONFIG_64BIT 749 #ifdef CONFIG_64BIT
751 depdi,z 1, 63-PAGE_SHIFT,1, %r25 750 depdi,z 1, 63-PAGE_SHIFT,1, %r25
752 #else 751 #else
753 depwi,z 1, 31-PAGE_SHIFT,1, %r25 752 depwi,z 1, 31-PAGE_SHIFT,1, %r25
754 #endif 753 #endif
755 add %r26, %r25, %r25 754 add %r26, %r25, %r25
756 sub %r25, %r23, %r25 755 sub %r25, %r23, %r25
757 756
758 1: pdc,m %r23(%r26) 757 1: pdc,m %r23(%r26)
759 pdc,m %r23(%r26) 758 pdc,m %r23(%r26)
760 pdc,m %r23(%r26) 759 pdc,m %r23(%r26)
761 pdc,m %r23(%r26) 760 pdc,m %r23(%r26)
762 pdc,m %r23(%r26) 761 pdc,m %r23(%r26)
763 pdc,m %r23(%r26) 762 pdc,m %r23(%r26)
764 pdc,m %r23(%r26) 763 pdc,m %r23(%r26)
765 pdc,m %r23(%r26) 764 pdc,m %r23(%r26)
766 pdc,m %r23(%r26) 765 pdc,m %r23(%r26)
767 pdc,m %r23(%r26) 766 pdc,m %r23(%r26)
768 pdc,m %r23(%r26) 767 pdc,m %r23(%r26)
769 pdc,m %r23(%r26) 768 pdc,m %r23(%r26)
770 pdc,m %r23(%r26) 769 pdc,m %r23(%r26)
771 pdc,m %r23(%r26) 770 pdc,m %r23(%r26)
772 pdc,m %r23(%r26) 771 pdc,m %r23(%r26)
773 cmpb,COND(<<) %r26, %r25, 1b 772 cmpb,COND(<<) %r26, %r25, 1b
774 pdc,m %r23(%r26) 773 pdc,m %r23(%r26)
775 774
776 sync 775 sync
777 bv %r0(%r2) 776 bv %r0(%r2)
778 nop 777 nop
779 .exit 778 .exit
780 779
781 .procend 780 .procend
782 ENDPROC(purge_kernel_dcache_page) 781 ENDPROC(purge_kernel_dcache_page)
783 782
784 #if 0 783 #if 0
785 /* Currently not used, but it still is a possible alternate 784 /* Currently not used, but it still is a possible alternate
786 * solution. 785 * solution.
787 */ 786 */
788 787
789 ENTRY(flush_alias_page) 788 ENTRY(flush_alias_page)
790 .proc 789 .proc
791 .callinfo NO_CALLS 790 .callinfo NO_CALLS
792 .entry 791 .entry
793 792
794 tophys_r1 %r26 793 tophys_r1 %r26
795 794
796 ldil L%(TMPALIAS_MAP_START), %r28 795 ldil L%(TMPALIAS_MAP_START), %r28
797 #ifdef CONFIG_64BIT 796 #ifdef CONFIG_64BIT
798 extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */ 797 extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */
799 depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */ 798 depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
800 depdi 0, 63,12, %r28 /* Clear any offset bits */ 799 depdi 0, 63,12, %r28 /* Clear any offset bits */
801 #else 800 #else
802 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */ 801 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
803 depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */ 802 depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
804 depwi 0, 31,12, %r28 /* Clear any offset bits */ 803 depwi 0, 31,12, %r28 /* Clear any offset bits */
805 #endif 804 #endif
806 805
807 /* Purge any old translation */ 806 /* Purge any old translation */
808 807
809 pdtlb 0(%r28) 808 pdtlb 0(%r28)
810 809
811 ldil L%dcache_stride, %r1 810 ldil L%dcache_stride, %r1
812 ldw R%dcache_stride(%r1), %r23 811 ldw R%dcache_stride(%r1), %r23
813 812
814 #ifdef CONFIG_64BIT 813 #ifdef CONFIG_64BIT
815 depdi,z 1, 63-PAGE_SHIFT,1, %r29 814 depdi,z 1, 63-PAGE_SHIFT,1, %r29
816 #else 815 #else
817 depwi,z 1, 31-PAGE_SHIFT,1, %r29 816 depwi,z 1, 31-PAGE_SHIFT,1, %r29
818 #endif 817 #endif
819 add %r28, %r29, %r29 818 add %r28, %r29, %r29
820 sub %r29, %r23, %r29 819 sub %r29, %r23, %r29
821 820
822 1: fdc,m %r23(%r28) 821 1: fdc,m %r23(%r28)
823 fdc,m %r23(%r28) 822 fdc,m %r23(%r28)
824 fdc,m %r23(%r28) 823 fdc,m %r23(%r28)
825 fdc,m %r23(%r28) 824 fdc,m %r23(%r28)
826 fdc,m %r23(%r28) 825 fdc,m %r23(%r28)
827 fdc,m %r23(%r28) 826 fdc,m %r23(%r28)
828 fdc,m %r23(%r28) 827 fdc,m %r23(%r28)
829 fdc,m %r23(%r28) 828 fdc,m %r23(%r28)
830 fdc,m %r23(%r28) 829 fdc,m %r23(%r28)
831 fdc,m %r23(%r28) 830 fdc,m %r23(%r28)
832 fdc,m %r23(%r28) 831 fdc,m %r23(%r28)
833 fdc,m %r23(%r28) 832 fdc,m %r23(%r28)
834 fdc,m %r23(%r28) 833 fdc,m %r23(%r28)
835 fdc,m %r23(%r28) 834 fdc,m %r23(%r28)
836 fdc,m %r23(%r28) 835 fdc,m %r23(%r28)
837 cmpb,COND(<<) %r28, %r29, 1b 836 cmpb,COND(<<) %r28, %r29, 1b
838 fdc,m %r23(%r28) 837 fdc,m %r23(%r28)
839 838
840 sync 839 sync
841 bv %r0(%r2) 840 bv %r0(%r2)
842 nop 841 nop
843 .exit 842 .exit
844 843
845 .procend 844 .procend
846 #endif 845 #endif
847 846
848 .export flush_user_dcache_range_asm 847 .export flush_user_dcache_range_asm
849 848
850 flush_user_dcache_range_asm: 849 flush_user_dcache_range_asm:
851 .proc 850 .proc
852 .callinfo NO_CALLS 851 .callinfo NO_CALLS
853 .entry 852 .entry
854 853
855 ldil L%dcache_stride, %r1 854 ldil L%dcache_stride, %r1
856 ldw R%dcache_stride(%r1), %r23 855 ldw R%dcache_stride(%r1), %r23
857 ldo -1(%r23), %r21 856 ldo -1(%r23), %r21
858 ANDCM %r26, %r21, %r26 857 ANDCM %r26, %r21, %r26
859 858
860 1: cmpb,COND(<<),n %r26, %r25, 1b 859 1: cmpb,COND(<<),n %r26, %r25, 1b
861 fdc,m %r23(%sr3, %r26) 860 fdc,m %r23(%sr3, %r26)
862 861
863 sync 862 sync
864 bv %r0(%r2) 863 bv %r0(%r2)
865 nop 864 nop
866 .exit 865 .exit
867 866
868 .procend 867 .procend
869 ENDPROC(flush_alias_page) 868 ENDPROC(flush_alias_page)
870 869
871 ENTRY(flush_kernel_dcache_range_asm) 870 ENTRY(flush_kernel_dcache_range_asm)
872 .proc 871 .proc
873 .callinfo NO_CALLS 872 .callinfo NO_CALLS
874 .entry 873 .entry
875 874
876 ldil L%dcache_stride, %r1 875 ldil L%dcache_stride, %r1
877 ldw R%dcache_stride(%r1), %r23 876 ldw R%dcache_stride(%r1), %r23
878 ldo -1(%r23), %r21 877 ldo -1(%r23), %r21
879 ANDCM %r26, %r21, %r26 878 ANDCM %r26, %r21, %r26
880 879
881 1: cmpb,COND(<<),n %r26, %r25,1b 880 1: cmpb,COND(<<),n %r26, %r25,1b
882 fdc,m %r23(%r26) 881 fdc,m %r23(%r26)
883 882
884 sync 883 sync
885 syncdma 884 syncdma
886 bv %r0(%r2) 885 bv %r0(%r2)
887 nop 886 nop
888 .exit 887 .exit
889 888
890 .procend 889 .procend
891 ENDPROC(flush_kernel_dcache_range_asm) 890 ENDPROC(flush_kernel_dcache_range_asm)
892 891
893 ENTRY(flush_user_icache_range_asm) 892 ENTRY(flush_user_icache_range_asm)
894 .proc 893 .proc
895 .callinfo NO_CALLS 894 .callinfo NO_CALLS
896 .entry 895 .entry
897 896
898 ldil L%icache_stride, %r1 897 ldil L%icache_stride, %r1
899 ldw R%icache_stride(%r1), %r23 898 ldw R%icache_stride(%r1), %r23
900 ldo -1(%r23), %r21 899 ldo -1(%r23), %r21
901 ANDCM %r26, %r21, %r26 900 ANDCM %r26, %r21, %r26
902 901
903 1: cmpb,COND(<<),n %r26, %r25,1b 902 1: cmpb,COND(<<),n %r26, %r25,1b
904 fic,m %r23(%sr3, %r26) 903 fic,m %r23(%sr3, %r26)
905 904
906 sync 905 sync
907 bv %r0(%r2) 906 bv %r0(%r2)
908 nop 907 nop
909 .exit 908 .exit
910 909
911 .procend 910 .procend
912 ENDPROC(flush_user_icache_range_asm) 911 ENDPROC(flush_user_icache_range_asm)
913 912
914 ENTRY(flush_kernel_icache_page) 913 ENTRY(flush_kernel_icache_page)
915 .proc 914 .proc
916 .callinfo NO_CALLS 915 .callinfo NO_CALLS
917 .entry 916 .entry
918 917
919 ldil L%icache_stride, %r1 918 ldil L%icache_stride, %r1
920 ldw R%icache_stride(%r1), %r23 919 ldw R%icache_stride(%r1), %r23
921 920
922 #ifdef CONFIG_64BIT 921 #ifdef CONFIG_64BIT
923 depdi,z 1, 63-PAGE_SHIFT,1, %r25 922 depdi,z 1, 63-PAGE_SHIFT,1, %r25
924 #else 923 #else
925 depwi,z 1, 31-PAGE_SHIFT,1, %r25 924 depwi,z 1, 31-PAGE_SHIFT,1, %r25
926 #endif 925 #endif
927 add %r26, %r25, %r25 926 add %r26, %r25, %r25
928 sub %r25, %r23, %r25 927 sub %r25, %r23, %r25
929 928
930 929
931 1: fic,m %r23(%sr4, %r26) 930 1: fic,m %r23(%sr4, %r26)
932 fic,m %r23(%sr4, %r26) 931 fic,m %r23(%sr4, %r26)
933 fic,m %r23(%sr4, %r26) 932 fic,m %r23(%sr4, %r26)
934 fic,m %r23(%sr4, %r26) 933 fic,m %r23(%sr4, %r26)
935 fic,m %r23(%sr4, %r26) 934 fic,m %r23(%sr4, %r26)
936 fic,m %r23(%sr4, %r26) 935 fic,m %r23(%sr4, %r26)
937 fic,m %r23(%sr4, %r26) 936 fic,m %r23(%sr4, %r26)
938 fic,m %r23(%sr4, %r26) 937 fic,m %r23(%sr4, %r26)
939 fic,m %r23(%sr4, %r26) 938 fic,m %r23(%sr4, %r26)
940 fic,m %r23(%sr4, %r26) 939 fic,m %r23(%sr4, %r26)
941 fic,m %r23(%sr4, %r26) 940 fic,m %r23(%sr4, %r26)
942 fic,m %r23(%sr4, %r26) 941 fic,m %r23(%sr4, %r26)
943 fic,m %r23(%sr4, %r26) 942 fic,m %r23(%sr4, %r26)
944 fic,m %r23(%sr4, %r26) 943 fic,m %r23(%sr4, %r26)
945 fic,m %r23(%sr4, %r26) 944 fic,m %r23(%sr4, %r26)
946 cmpb,COND(<<) %r26, %r25, 1b 945 cmpb,COND(<<) %r26, %r25, 1b
947 fic,m %r23(%sr4, %r26) 946 fic,m %r23(%sr4, %r26)
948 947
949 sync 948 sync
950 bv %r0(%r2) 949 bv %r0(%r2)
951 nop 950 nop
952 .exit 951 .exit
953 952
954 .procend 953 .procend
955 ENDPROC(flush_kernel_icache_page) 954 ENDPROC(flush_kernel_icache_page)
956 955
957 ENTRY(flush_kernel_icache_range_asm) 956 ENTRY(flush_kernel_icache_range_asm)
958 .proc 957 .proc
959 .callinfo NO_CALLS 958 .callinfo NO_CALLS
960 .entry 959 .entry
961 960
962 ldil L%icache_stride, %r1 961 ldil L%icache_stride, %r1
963 ldw R%icache_stride(%r1), %r23 962 ldw R%icache_stride(%r1), %r23
964 ldo -1(%r23), %r21 963 ldo -1(%r23), %r21
965 ANDCM %r26, %r21, %r26 964 ANDCM %r26, %r21, %r26
966 965
967 1: cmpb,COND(<<),n %r26, %r25, 1b 966 1: cmpb,COND(<<),n %r26, %r25, 1b
968 fic,m %r23(%sr4, %r26) 967 fic,m %r23(%sr4, %r26)
969 968
970 sync 969 sync
971 bv %r0(%r2) 970 bv %r0(%r2)
972 nop 971 nop
973 .exit 972 .exit
974 .procend 973 .procend
975 ENDPROC(flush_kernel_icache_range_asm) 974 ENDPROC(flush_kernel_icache_range_asm)
976 975
977 /* align should cover use of rfi in disable_sr_hashing_asm and 976 /* align should cover use of rfi in disable_sr_hashing_asm and
978 * srdis_done. 977 * srdis_done.
979 */ 978 */
980 .align 256 979 .align 256
981 ENTRY(disable_sr_hashing_asm) 980 ENTRY(disable_sr_hashing_asm)
982 .proc 981 .proc
983 .callinfo NO_CALLS 982 .callinfo NO_CALLS
984 .entry 983 .entry
985 984
986 /* 985 /*
987 * Switch to real mode 986 * Switch to real mode
988 */ 987 */
989 /* pcxt_ssm_bug */ 988 /* pcxt_ssm_bug */
990 rsm PSW_SM_I, %r0 989 rsm PSW_SM_I, %r0
991 load32 PA(1f), %r1 990 load32 PA(1f), %r1
992 nop 991 nop
993 nop 992 nop
994 nop 993 nop
995 nop 994 nop
996 nop 995 nop
997 996
998 rsm PSW_SM_Q, %r0 /* prep to load iia queue */ 997 rsm PSW_SM_Q, %r0 /* prep to load iia queue */
999 mtctl %r0, %cr17 /* Clear IIASQ tail */ 998 mtctl %r0, %cr17 /* Clear IIASQ tail */
1000 mtctl %r0, %cr17 /* Clear IIASQ head */ 999 mtctl %r0, %cr17 /* Clear IIASQ head */
1001 mtctl %r1, %cr18 /* IIAOQ head */ 1000 mtctl %r1, %cr18 /* IIAOQ head */
1002 ldo 4(%r1), %r1 1001 ldo 4(%r1), %r1
1003 mtctl %r1, %cr18 /* IIAOQ tail */ 1002 mtctl %r1, %cr18 /* IIAOQ tail */
1004 load32 REAL_MODE_PSW, %r1 1003 load32 REAL_MODE_PSW, %r1
1005 mtctl %r1, %ipsw 1004 mtctl %r1, %ipsw
1006 rfi 1005 rfi
1007 nop 1006 nop
1008 1007
1009 1: cmpib,=,n SRHASH_PCXST, %r26,srdis_pcxs 1008 1: cmpib,=,n SRHASH_PCXST, %r26,srdis_pcxs
1010 cmpib,=,n SRHASH_PCXL, %r26,srdis_pcxl 1009 cmpib,=,n SRHASH_PCXL, %r26,srdis_pcxl
1011 cmpib,=,n SRHASH_PA20, %r26,srdis_pa20 1010 cmpib,=,n SRHASH_PA20, %r26,srdis_pa20
1012 b,n srdis_done 1011 b,n srdis_done
1013 1012
1014 srdis_pcxs: 1013 srdis_pcxs:
1015 1014
1016 /* Disable Space Register Hashing for PCXS,PCXT,PCXT' */ 1015 /* Disable Space Register Hashing for PCXS,PCXT,PCXT' */
1017 1016
1018 .word 0x141c1a00 /* mfdiag %dr0, %r28 */ 1017 .word 0x141c1a00 /* mfdiag %dr0, %r28 */
1019 .word 0x141c1a00 /* must issue twice */ 1018 .word 0x141c1a00 /* must issue twice */
1020 depwi 0,18,1, %r28 /* Clear DHE (dcache hash enable) */ 1019 depwi 0,18,1, %r28 /* Clear DHE (dcache hash enable) */
1021 depwi 0,20,1, %r28 /* Clear IHE (icache hash enable) */ 1020 depwi 0,20,1, %r28 /* Clear IHE (icache hash enable) */
1022 .word 0x141c1600 /* mtdiag %r28, %dr0 */ 1021 .word 0x141c1600 /* mtdiag %r28, %dr0 */
1023 .word 0x141c1600 /* must issue twice */ 1022 .word 0x141c1600 /* must issue twice */
1024 b,n srdis_done 1023 b,n srdis_done
1025 1024
1026 srdis_pcxl: 1025 srdis_pcxl:
1027 1026
1028 /* Disable Space Register Hashing for PCXL */ 1027 /* Disable Space Register Hashing for PCXL */
1029 1028
1030 .word 0x141c0600 /* mfdiag %dr0, %r28 */ 1029 .word 0x141c0600 /* mfdiag %dr0, %r28 */
1031 depwi 0,28,2, %r28 /* Clear DHASH_EN & IHASH_EN */ 1030 depwi 0,28,2, %r28 /* Clear DHASH_EN & IHASH_EN */
1032 .word 0x141c0240 /* mtdiag %r28, %dr0 */ 1031 .word 0x141c0240 /* mtdiag %r28, %dr0 */
1033 b,n srdis_done 1032 b,n srdis_done
1034 1033
1035 srdis_pa20: 1034 srdis_pa20:
1036 1035
1037 /* Disable Space Register Hashing for PCXU,PCXU+,PCXW,PCXW+,PCXW2 */ 1036 /* Disable Space Register Hashing for PCXU,PCXU+,PCXW,PCXW+,PCXW2 */
1038 1037
1039 .word 0x144008bc /* mfdiag %dr2, %r28 */ 1038 .word 0x144008bc /* mfdiag %dr2, %r28 */
1040 depdi 0, 54,1, %r28 /* clear DIAG_SPHASH_ENAB (bit 54) */ 1039 depdi 0, 54,1, %r28 /* clear DIAG_SPHASH_ENAB (bit 54) */
1041 .word 0x145c1840 /* mtdiag %r28, %dr2 */ 1040 .word 0x145c1840 /* mtdiag %r28, %dr2 */
1042 1041
1043 1042
1044 srdis_done: 1043 srdis_done:
1045 /* Switch back to virtual mode */ 1044 /* Switch back to virtual mode */
1046 rsm PSW_SM_I, %r0 /* prep to load iia queue */ 1045 rsm PSW_SM_I, %r0 /* prep to load iia queue */
1047 load32 2f, %r1 1046 load32 2f, %r1
1048 nop 1047 nop
1049 nop 1048 nop
1050 nop 1049 nop
1051 nop 1050 nop
1052 nop 1051 nop
1053 1052
1054 rsm PSW_SM_Q, %r0 /* prep to load iia queue */ 1053 rsm PSW_SM_Q, %r0 /* prep to load iia queue */
1055 mtctl %r0, %cr17 /* Clear IIASQ tail */ 1054 mtctl %r0, %cr17 /* Clear IIASQ tail */
1056 mtctl %r0, %cr17 /* Clear IIASQ head */ 1055 mtctl %r0, %cr17 /* Clear IIASQ head */
1057 mtctl %r1, %cr18 /* IIAOQ head */ 1056 mtctl %r1, %cr18 /* IIAOQ head */
1058 ldo 4(%r1), %r1 1057 ldo 4(%r1), %r1
1059 mtctl %r1, %cr18 /* IIAOQ tail */ 1058 mtctl %r1, %cr18 /* IIAOQ tail */
1060 load32 KERNEL_PSW, %r1 1059 load32 KERNEL_PSW, %r1
1061 mtctl %r1, %ipsw 1060 mtctl %r1, %ipsw
1062 rfi 1061 rfi
1063 nop 1062 nop
1064 1063
1065 2: bv %r0(%r2) 1064 2: bv %r0(%r2)
1066 nop 1065 nop
1067 .exit 1066 .exit
1068 1067
1069 .procend 1068 .procend
1070 ENDPROC(disable_sr_hashing_asm) 1069 ENDPROC(disable_sr_hashing_asm)
1071 1070
1072 .end 1071 .end
1073 1072
arch/parisc/kernel/perf_asm.S
1 1
2 /* low-level asm for "intrigue" (PA8500-8700 CPU perf counters) 2 /* low-level asm for "intrigue" (PA8500-8700 CPU perf counters)
3 * 3 *
4 * Copyright (C) 2001 Randolph Chung <tausq at parisc-linux.org> 4 * Copyright (C) 2001 Randolph Chung <tausq at parisc-linux.org>
5 * Copyright (C) 2001 Hewlett-Packard (Grant Grundler) 5 * Copyright (C) 2001 Hewlett-Packard (Grant Grundler)
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or 9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version. 10 * (at your option) any later version.
11 * 11 *
12 * This program is distributed in the hope that it will be useful, 12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details. 15 * GNU General Public License for more details.
16 * 16 *
17 * You should have received a copy of the GNU General Public License 17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software 18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */ 20 */
21 21
22 #include <asm/assembly.h> 22 #include <asm/assembly.h>
23 23
24 #include <linux/init.h> 24 #include <linux/init.h>
25 #include <linux/linkage.h> 25 #include <linux/linkage.h>
26 26
27 #ifdef CONFIG_64BIT 27 #ifdef CONFIG_64BIT
28 .level 2.0w 28 .level 2.0w
29 #endif /* CONFIG_64BIT */ 29 #endif /* CONFIG_64BIT */
30 30
31 #define MTDIAG_1(gr) .word 0x14201840 + gr*0x10000 31 #define MTDIAG_1(gr) .word 0x14201840 + gr*0x10000
32 #define MTDIAG_2(gr) .word 0x14401840 + gr*0x10000 32 #define MTDIAG_2(gr) .word 0x14401840 + gr*0x10000
33 #define MFDIAG_1(gr) .word 0x142008A0 + gr 33 #define MFDIAG_1(gr) .word 0x142008A0 + gr
34 #define MFDIAG_2(gr) .word 0x144008A0 + gr 34 #define MFDIAG_2(gr) .word 0x144008A0 + gr
35 #define STDIAG(dr) .word 0x14000AA0 + dr*0x200000 35 #define STDIAG(dr) .word 0x14000AA0 + dr*0x200000
36 #define SFDIAG(dr) .word 0x14000BA0 + dr*0x200000 36 #define SFDIAG(dr) .word 0x14000BA0 + dr*0x200000
37 #define DR2_SLOW_RET 53 37 #define DR2_SLOW_RET 53
38 38
39 39
40 ; 40 ;
41 ; Enable the performance counters 41 ; Enable the performance counters
42 ; 42 ;
43 ; The coprocessor only needs to be enabled when 43 ; The coprocessor only needs to be enabled when
44 ; starting/stopping the coprocessor with the pmenb/pmdis. 44 ; starting/stopping the coprocessor with the pmenb/pmdis.
45 ; 45 ;
46 __HEAD 46 .text
47 47
48 ENTRY(perf_intrigue_enable_perf_counters) 48 ENTRY(perf_intrigue_enable_perf_counters)
49 .proc 49 .proc
50 .callinfo frame=0,NO_CALLS 50 .callinfo frame=0,NO_CALLS
51 .entry 51 .entry
52 52
53 ldi 0x20,%r25 ; load up perfmon bit 53 ldi 0x20,%r25 ; load up perfmon bit
54 mfctl ccr,%r26 ; get coprocessor register 54 mfctl ccr,%r26 ; get coprocessor register
55 or %r25,%r26,%r26 ; set bit 55 or %r25,%r26,%r26 ; set bit
56 mtctl %r26,ccr ; turn on performance coprocessor 56 mtctl %r26,ccr ; turn on performance coprocessor
57 pmenb ; enable performance monitor 57 pmenb ; enable performance monitor
58 ssm 0,0 ; dummy op to ensure completion 58 ssm 0,0 ; dummy op to ensure completion
59 sync ; follow ERS 59 sync ; follow ERS
60 andcm %r26,%r25,%r26 ; clear bit now 60 andcm %r26,%r25,%r26 ; clear bit now
61 mtctl %r26,ccr ; turn off performance coprocessor 61 mtctl %r26,ccr ; turn off performance coprocessor
62 nop ; NOPs as specified in ERS 62 nop ; NOPs as specified in ERS
63 nop 63 nop
64 nop 64 nop
65 nop 65 nop
66 nop 66 nop
67 nop 67 nop
68 nop 68 nop
69 bve (%r2) 69 bve (%r2)
70 nop 70 nop
71 .exit 71 .exit
72 .procend 72 .procend
73 ENDPROC(perf_intrigue_enable_perf_counters) 73 ENDPROC(perf_intrigue_enable_perf_counters)
74 74
75 ENTRY(perf_intrigue_disable_perf_counters) 75 ENTRY(perf_intrigue_disable_perf_counters)
76 .proc 76 .proc
77 .callinfo frame=0,NO_CALLS 77 .callinfo frame=0,NO_CALLS
78 .entry 78 .entry
79 ldi 0x20,%r25 ; load up perfmon bit 79 ldi 0x20,%r25 ; load up perfmon bit
80 mfctl ccr,%r26 ; get coprocessor register 80 mfctl ccr,%r26 ; get coprocessor register
81 or %r25,%r26,%r26 ; set bit 81 or %r25,%r26,%r26 ; set bit
82 mtctl %r26,ccr ; turn on performance coprocessor 82 mtctl %r26,ccr ; turn on performance coprocessor
83 pmdis ; disable performance monitor 83 pmdis ; disable performance monitor
84 ssm 0,0 ; dummy op to ensure completion 84 ssm 0,0 ; dummy op to ensure completion
85 andcm %r26,%r25,%r26 ; clear bit now 85 andcm %r26,%r25,%r26 ; clear bit now
86 bve (%r2) 86 bve (%r2)
87 mtctl %r26,ccr ; turn off performance coprocessor 87 mtctl %r26,ccr ; turn off performance coprocessor
88 .exit 88 .exit
89 .procend 89 .procend
90 ENDPROC(perf_intrigue_disable_perf_counters) 90 ENDPROC(perf_intrigue_disable_perf_counters)
91 91
92 ;*********************************************************************** 92 ;***********************************************************************
93 ;* 93 ;*
94 ;* Name: perf_rdr_shift_in_W 94 ;* Name: perf_rdr_shift_in_W
95 ;* 95 ;*
96 ;* Description: 96 ;* Description:
97 ;* This routine shifts data in from the RDR in arg0 and returns 97 ;* This routine shifts data in from the RDR in arg0 and returns
98 ;* the result in ret0. If the RDR is <= 64 bits in length, it 98 ;* the result in ret0. If the RDR is <= 64 bits in length, it
99 ;* is shifted shifted backup immediately. This is to compensate 99 ;* is shifted shifted backup immediately. This is to compensate
100 ;* for RDR10 which has bits that preclude PDC stack operations 100 ;* for RDR10 which has bits that preclude PDC stack operations
101 ;* when they are in the wrong state. 101 ;* when they are in the wrong state.
102 ;* 102 ;*
103 ;* Arguments: 103 ;* Arguments:
104 ;* arg0 : rdr to be read 104 ;* arg0 : rdr to be read
105 ;* arg1 : bit length of rdr 105 ;* arg1 : bit length of rdr
106 ;* 106 ;*
107 ;* Returns: 107 ;* Returns:
108 ;* ret0 = next 64 bits of rdr data from staging register 108 ;* ret0 = next 64 bits of rdr data from staging register
109 ;* 109 ;*
110 ;* Register usage: 110 ;* Register usage:
111 ;* arg0 : rdr to be read 111 ;* arg0 : rdr to be read
112 ;* arg1 : bit length of rdr 112 ;* arg1 : bit length of rdr
113 ;* %r24 - original DR2 value 113 ;* %r24 - original DR2 value
114 ;* %r1 - scratch 114 ;* %r1 - scratch
115 ;* %r29 - scratch 115 ;* %r29 - scratch
116 ;* 116 ;*
117 ;* Returns: 117 ;* Returns:
118 ;* ret0 = RDR data (right justified) 118 ;* ret0 = RDR data (right justified)
119 ;* 119 ;*
120 ;*********************************************************************** 120 ;***********************************************************************
121 121
122 ENTRY(perf_rdr_shift_in_W) 122 ENTRY(perf_rdr_shift_in_W)
123 .proc 123 .proc
124 .callinfo frame=0,NO_CALLS 124 .callinfo frame=0,NO_CALLS
125 .entry 125 .entry
126 ; 126 ;
127 ; read(shift in) the RDR. 127 ; read(shift in) the RDR.
128 ; 128 ;
129 129
130 ; NOTE: The PCX-W ERS states that DR2_SLOW_RET must be set before any 130 ; NOTE: The PCX-W ERS states that DR2_SLOW_RET must be set before any
131 ; shifting is done, from or to, remote diagnose registers. 131 ; shifting is done, from or to, remote diagnose registers.
132 ; 132 ;
133 133
134 depdi,z 1,DR2_SLOW_RET,1,%r29 134 depdi,z 1,DR2_SLOW_RET,1,%r29
135 MFDIAG_2 (24) 135 MFDIAG_2 (24)
136 or %r24,%r29,%r29 136 or %r24,%r29,%r29
137 MTDIAG_2 (29) ; set DR2_SLOW_RET 137 MTDIAG_2 (29) ; set DR2_SLOW_RET
138 138
139 nop 139 nop
140 nop 140 nop
141 nop 141 nop
142 nop 142 nop
143 143
144 ; 144 ;
145 ; Cacheline start (32-byte cacheline) 145 ; Cacheline start (32-byte cacheline)
146 ; 146 ;
147 nop 147 nop
148 nop 148 nop
149 nop 149 nop
150 extrd,u arg1,63,6,%r1 ; setup shift amount by bits to move 150 extrd,u arg1,63,6,%r1 ; setup shift amount by bits to move
151 151
152 mtsar %r1 152 mtsar %r1
153 shladd arg0,2,%r0,%r1 ; %r1 = 4 * RDR number 153 shladd arg0,2,%r0,%r1 ; %r1 = 4 * RDR number
154 blr %r1,%r0 ; branch to 8-instruction sequence 154 blr %r1,%r0 ; branch to 8-instruction sequence
155 nop 155 nop
156 156
157 ; 157 ;
158 ; Cacheline start (32-byte cacheline) 158 ; Cacheline start (32-byte cacheline)
159 ; 159 ;
160 160
161 ; 161 ;
162 ; RDR 0 sequence 162 ; RDR 0 sequence
163 ; 163 ;
164 SFDIAG (0) 164 SFDIAG (0)
165 ssm 0,0 165 ssm 0,0
166 MFDIAG_1 (28) 166 MFDIAG_1 (28)
167 shrpd ret0,%r0,%sar,%r1 167 shrpd ret0,%r0,%sar,%r1
168 MTDIAG_1 (1) ; mtdiag %dr1, %r1 168 MTDIAG_1 (1) ; mtdiag %dr1, %r1
169 STDIAG (0) 169 STDIAG (0)
170 ssm 0,0 170 ssm 0,0
171 b,n perf_rdr_shift_in_W_leave 171 b,n perf_rdr_shift_in_W_leave
172 172
173 ; 173 ;
174 ; RDR 1 sequence 174 ; RDR 1 sequence
175 ; 175 ;
176 sync 176 sync
177 ssm 0,0 177 ssm 0,0
178 SFDIAG (1) 178 SFDIAG (1)
179 ssm 0,0 179 ssm 0,0
180 MFDIAG_1 (28) 180 MFDIAG_1 (28)
181 ssm 0,0 181 ssm 0,0
182 b,n perf_rdr_shift_in_W_leave 182 b,n perf_rdr_shift_in_W_leave
183 nop 183 nop
184 184
185 ; 185 ;
186 ; RDR 2 read sequence 186 ; RDR 2 read sequence
187 ; 187 ;
188 SFDIAG (2) 188 SFDIAG (2)
189 ssm 0,0 189 ssm 0,0
190 MFDIAG_1 (28) 190 MFDIAG_1 (28)
191 shrpd ret0,%r0,%sar,%r1 191 shrpd ret0,%r0,%sar,%r1
192 MTDIAG_1 (1) 192 MTDIAG_1 (1)
193 STDIAG (2) 193 STDIAG (2)
194 ssm 0,0 194 ssm 0,0
195 b,n perf_rdr_shift_in_W_leave 195 b,n perf_rdr_shift_in_W_leave
196 196
197 ; 197 ;
198 ; RDR 3 read sequence 198 ; RDR 3 read sequence
199 ; 199 ;
200 b,n perf_rdr_shift_in_W_leave 200 b,n perf_rdr_shift_in_W_leave
201 nop 201 nop
202 nop 202 nop
203 nop 203 nop
204 nop 204 nop
205 nop 205 nop
206 nop 206 nop
207 nop 207 nop
208 208
209 ; 209 ;
210 ; RDR 4 read sequence 210 ; RDR 4 read sequence
211 ; 211 ;
212 sync 212 sync
213 ssm 0,0 213 ssm 0,0
214 SFDIAG (4) 214 SFDIAG (4)
215 ssm 0,0 215 ssm 0,0
216 MFDIAG_1 (28) 216 MFDIAG_1 (28)
217 b,n perf_rdr_shift_in_W_leave 217 b,n perf_rdr_shift_in_W_leave
218 ssm 0,0 218 ssm 0,0
219 nop 219 nop
220 220
221 ; 221 ;
222 ; RDR 5 read sequence 222 ; RDR 5 read sequence
223 ; 223 ;
224 sync 224 sync
225 ssm 0,0 225 ssm 0,0
226 SFDIAG (5) 226 SFDIAG (5)
227 ssm 0,0 227 ssm 0,0
228 MFDIAG_1 (28) 228 MFDIAG_1 (28)
229 b,n perf_rdr_shift_in_W_leave 229 b,n perf_rdr_shift_in_W_leave
230 ssm 0,0 230 ssm 0,0
231 nop 231 nop
232 232
233 ; 233 ;
234 ; RDR 6 read sequence 234 ; RDR 6 read sequence
235 ; 235 ;
236 sync 236 sync
237 ssm 0,0 237 ssm 0,0
238 SFDIAG (6) 238 SFDIAG (6)
239 ssm 0,0 239 ssm 0,0
240 MFDIAG_1 (28) 240 MFDIAG_1 (28)
241 b,n perf_rdr_shift_in_W_leave 241 b,n perf_rdr_shift_in_W_leave
242 ssm 0,0 242 ssm 0,0
243 nop 243 nop
244 244
245 ; 245 ;
246 ; RDR 7 read sequence 246 ; RDR 7 read sequence
247 ; 247 ;
248 b,n perf_rdr_shift_in_W_leave 248 b,n perf_rdr_shift_in_W_leave
249 nop 249 nop
250 nop 250 nop
251 nop 251 nop
252 nop 252 nop
253 nop 253 nop
254 nop 254 nop
255 nop 255 nop
256 256
257 ; 257 ;
258 ; RDR 8 read sequence 258 ; RDR 8 read sequence
259 ; 259 ;
260 b,n perf_rdr_shift_in_W_leave 260 b,n perf_rdr_shift_in_W_leave
261 nop 261 nop
262 nop 262 nop
263 nop 263 nop
264 nop 264 nop
265 nop 265 nop
266 nop 266 nop
267 nop 267 nop
268 268
269 ; 269 ;
270 ; RDR 9 read sequence 270 ; RDR 9 read sequence
271 ; 271 ;
272 b,n perf_rdr_shift_in_W_leave 272 b,n perf_rdr_shift_in_W_leave
273 nop 273 nop
274 nop 274 nop
275 nop 275 nop
276 nop 276 nop
277 nop 277 nop
278 nop 278 nop
279 nop 279 nop
280 280
281 ; 281 ;
282 ; RDR 10 read sequence 282 ; RDR 10 read sequence
283 ; 283 ;
284 SFDIAG (10) 284 SFDIAG (10)
285 ssm 0,0 285 ssm 0,0
286 MFDIAG_1 (28) 286 MFDIAG_1 (28)
287 shrpd ret0,%r0,%sar,%r1 287 shrpd ret0,%r0,%sar,%r1
288 MTDIAG_1 (1) 288 MTDIAG_1 (1)
289 STDIAG (10) 289 STDIAG (10)
290 ssm 0,0 290 ssm 0,0
291 b,n perf_rdr_shift_in_W_leave 291 b,n perf_rdr_shift_in_W_leave
292 292
293 ; 293 ;
294 ; RDR 11 read sequence 294 ; RDR 11 read sequence
295 ; 295 ;
296 SFDIAG (11) 296 SFDIAG (11)
297 ssm 0,0 297 ssm 0,0
298 MFDIAG_1 (28) 298 MFDIAG_1 (28)
299 shrpd ret0,%r0,%sar,%r1 299 shrpd ret0,%r0,%sar,%r1
300 MTDIAG_1 (1) 300 MTDIAG_1 (1)
301 STDIAG (11) 301 STDIAG (11)
302 ssm 0,0 302 ssm 0,0
303 b,n perf_rdr_shift_in_W_leave 303 b,n perf_rdr_shift_in_W_leave
304 304
305 ; 305 ;
306 ; RDR 12 read sequence 306 ; RDR 12 read sequence
307 ; 307 ;
308 b,n perf_rdr_shift_in_W_leave 308 b,n perf_rdr_shift_in_W_leave
309 nop 309 nop
310 nop 310 nop
311 nop 311 nop
312 nop 312 nop
313 nop 313 nop
314 nop 314 nop
315 nop 315 nop
316 316
317 ; 317 ;
318 ; RDR 13 read sequence 318 ; RDR 13 read sequence
319 ; 319 ;
320 sync 320 sync
321 ssm 0,0 321 ssm 0,0
322 SFDIAG (13) 322 SFDIAG (13)
323 ssm 0,0 323 ssm 0,0
324 MFDIAG_1 (28) 324 MFDIAG_1 (28)
325 b,n perf_rdr_shift_in_W_leave 325 b,n perf_rdr_shift_in_W_leave
326 ssm 0,0 326 ssm 0,0
327 nop 327 nop
328 328
329 ; 329 ;
330 ; RDR 14 read sequence 330 ; RDR 14 read sequence
331 ; 331 ;
332 SFDIAG (14) 332 SFDIAG (14)
333 ssm 0,0 333 ssm 0,0
334 MFDIAG_1 (28) 334 MFDIAG_1 (28)
335 shrpd ret0,%r0,%sar,%r1 335 shrpd ret0,%r0,%sar,%r1
336 MTDIAG_1 (1) 336 MTDIAG_1 (1)
337 STDIAG (14) 337 STDIAG (14)
338 ssm 0,0 338 ssm 0,0
339 b,n perf_rdr_shift_in_W_leave 339 b,n perf_rdr_shift_in_W_leave
340 340
341 ; 341 ;
342 ; RDR 15 read sequence 342 ; RDR 15 read sequence
343 ; 343 ;
344 sync 344 sync
345 ssm 0,0 345 ssm 0,0
346 SFDIAG (15) 346 SFDIAG (15)
347 ssm 0,0 347 ssm 0,0
348 MFDIAG_1 (28) 348 MFDIAG_1 (28)
349 ssm 0,0 349 ssm 0,0
350 b,n perf_rdr_shift_in_W_leave 350 b,n perf_rdr_shift_in_W_leave
351 nop 351 nop
352 352
353 ; 353 ;
354 ; RDR 16 read sequence 354 ; RDR 16 read sequence
355 ; 355 ;
356 sync 356 sync
357 ssm 0,0 357 ssm 0,0
358 SFDIAG (16) 358 SFDIAG (16)
359 ssm 0,0 359 ssm 0,0
360 MFDIAG_1 (28) 360 MFDIAG_1 (28)
361 b,n perf_rdr_shift_in_W_leave 361 b,n perf_rdr_shift_in_W_leave
362 ssm 0,0 362 ssm 0,0
363 nop 363 nop
364 364
365 ; 365 ;
366 ; RDR 17 read sequence 366 ; RDR 17 read sequence
367 ; 367 ;
368 SFDIAG (17) 368 SFDIAG (17)
369 ssm 0,0 369 ssm 0,0
370 MFDIAG_1 (28) 370 MFDIAG_1 (28)
371 shrpd ret0,%r0,%sar,%r1 371 shrpd ret0,%r0,%sar,%r1
372 MTDIAG_1 (1) 372 MTDIAG_1 (1)
373 STDIAG (17) 373 STDIAG (17)
374 ssm 0,0 374 ssm 0,0
375 b,n perf_rdr_shift_in_W_leave 375 b,n perf_rdr_shift_in_W_leave
376 376
377 ; 377 ;
378 ; RDR 18 read sequence 378 ; RDR 18 read sequence
379 ; 379 ;
380 SFDIAG (18) 380 SFDIAG (18)
381 ssm 0,0 381 ssm 0,0
382 MFDIAG_1 (28) 382 MFDIAG_1 (28)
383 shrpd ret0,%r0,%sar,%r1 383 shrpd ret0,%r0,%sar,%r1
384 MTDIAG_1 (1) 384 MTDIAG_1 (1)
385 STDIAG (18) 385 STDIAG (18)
386 ssm 0,0 386 ssm 0,0
387 b,n perf_rdr_shift_in_W_leave 387 b,n perf_rdr_shift_in_W_leave
388 388
389 ; 389 ;
390 ; RDR 19 read sequence 390 ; RDR 19 read sequence
391 ; 391 ;
392 b,n perf_rdr_shift_in_W_leave 392 b,n perf_rdr_shift_in_W_leave
393 nop 393 nop
394 nop 394 nop
395 nop 395 nop
396 nop 396 nop
397 nop 397 nop
398 nop 398 nop
399 nop 399 nop
400 400
401 ; 401 ;
402 ; RDR 20 read sequence 402 ; RDR 20 read sequence
403 ; 403 ;
404 sync 404 sync
405 ssm 0,0 405 ssm 0,0
406 SFDIAG (20) 406 SFDIAG (20)
407 ssm 0,0 407 ssm 0,0
408 MFDIAG_1 (28) 408 MFDIAG_1 (28)
409 b,n perf_rdr_shift_in_W_leave 409 b,n perf_rdr_shift_in_W_leave
410 ssm 0,0 410 ssm 0,0
411 nop 411 nop
412 412
413 ; 413 ;
414 ; RDR 21 read sequence 414 ; RDR 21 read sequence
415 ; 415 ;
416 sync 416 sync
417 ssm 0,0 417 ssm 0,0
418 SFDIAG (21) 418 SFDIAG (21)
419 ssm 0,0 419 ssm 0,0
420 MFDIAG_1 (28) 420 MFDIAG_1 (28)
421 b,n perf_rdr_shift_in_W_leave 421 b,n perf_rdr_shift_in_W_leave
422 ssm 0,0 422 ssm 0,0
423 nop 423 nop
424 424
425 ; 425 ;
426 ; RDR 22 read sequence 426 ; RDR 22 read sequence
427 ; 427 ;
428 sync 428 sync
429 ssm 0,0 429 ssm 0,0
430 SFDIAG (22) 430 SFDIAG (22)
431 ssm 0,0 431 ssm 0,0
432 MFDIAG_1 (28) 432 MFDIAG_1 (28)
433 b,n perf_rdr_shift_in_W_leave 433 b,n perf_rdr_shift_in_W_leave
434 ssm 0,0 434 ssm 0,0
435 nop 435 nop
436 436
437 ; 437 ;
438 ; RDR 23 read sequence 438 ; RDR 23 read sequence
439 ; 439 ;
440 sync 440 sync
441 ssm 0,0 441 ssm 0,0
442 SFDIAG (23) 442 SFDIAG (23)
443 ssm 0,0 443 ssm 0,0
444 MFDIAG_1 (28) 444 MFDIAG_1 (28)
445 b,n perf_rdr_shift_in_W_leave 445 b,n perf_rdr_shift_in_W_leave
446 ssm 0,0 446 ssm 0,0
447 nop 447 nop
448 448
449 ; 449 ;
450 ; RDR 24 read sequence 450 ; RDR 24 read sequence
451 ; 451 ;
452 sync 452 sync
453 ssm 0,0 453 ssm 0,0
454 SFDIAG (24) 454 SFDIAG (24)
455 ssm 0,0 455 ssm 0,0
456 MFDIAG_1 (28) 456 MFDIAG_1 (28)
457 b,n perf_rdr_shift_in_W_leave 457 b,n perf_rdr_shift_in_W_leave
458 ssm 0,0 458 ssm 0,0
459 nop 459 nop
460 460
461 ; 461 ;
462 ; RDR 25 read sequence 462 ; RDR 25 read sequence
463 ; 463 ;
464 sync 464 sync
465 ssm 0,0 465 ssm 0,0
466 SFDIAG (25) 466 SFDIAG (25)
467 ssm 0,0 467 ssm 0,0
468 MFDIAG_1 (28) 468 MFDIAG_1 (28)
469 b,n perf_rdr_shift_in_W_leave 469 b,n perf_rdr_shift_in_W_leave
470 ssm 0,0 470 ssm 0,0
471 nop 471 nop
472 472
473 ; 473 ;
474 ; RDR 26 read sequence 474 ; RDR 26 read sequence
475 ; 475 ;
476 SFDIAG (26) 476 SFDIAG (26)
477 ssm 0,0 477 ssm 0,0
478 MFDIAG_1 (28) 478 MFDIAG_1 (28)
479 shrpd ret0,%r0,%sar,%r1 479 shrpd ret0,%r0,%sar,%r1
480 MTDIAG_1 (1) 480 MTDIAG_1 (1)
481 STDIAG (26) 481 STDIAG (26)
482 ssm 0,0 482 ssm 0,0
483 b,n perf_rdr_shift_in_W_leave 483 b,n perf_rdr_shift_in_W_leave
484 484
485 ; 485 ;
486 ; RDR 27 read sequence 486 ; RDR 27 read sequence
487 ; 487 ;
488 SFDIAG (27) 488 SFDIAG (27)
489 ssm 0,0 489 ssm 0,0
490 MFDIAG_1 (28) 490 MFDIAG_1 (28)
491 shrpd ret0,%r0,%sar,%r1 491 shrpd ret0,%r0,%sar,%r1
492 MTDIAG_1 (1) 492 MTDIAG_1 (1)
493 STDIAG (27) 493 STDIAG (27)
494 ssm 0,0 494 ssm 0,0
495 b,n perf_rdr_shift_in_W_leave 495 b,n perf_rdr_shift_in_W_leave
496 496
497 ; 497 ;
498 ; RDR 28 read sequence 498 ; RDR 28 read sequence
499 ; 499 ;
500 sync 500 sync
501 ssm 0,0 501 ssm 0,0
502 SFDIAG (28) 502 SFDIAG (28)
503 ssm 0,0 503 ssm 0,0
504 MFDIAG_1 (28) 504 MFDIAG_1 (28)
505 b,n perf_rdr_shift_in_W_leave 505 b,n perf_rdr_shift_in_W_leave
506 ssm 0,0 506 ssm 0,0
507 nop 507 nop
508 508
509 ; 509 ;
510 ; RDR 29 read sequence 510 ; RDR 29 read sequence
511 ; 511 ;
512 sync 512 sync
513 ssm 0,0 513 ssm 0,0
514 SFDIAG (29) 514 SFDIAG (29)
515 ssm 0,0 515 ssm 0,0
516 MFDIAG_1 (28) 516 MFDIAG_1 (28)
517 b,n perf_rdr_shift_in_W_leave 517 b,n perf_rdr_shift_in_W_leave
518 ssm 0,0 518 ssm 0,0
519 nop 519 nop
520 520
521 ; 521 ;
522 ; RDR 30 read sequence 522 ; RDR 30 read sequence
523 ; 523 ;
524 SFDIAG (30) 524 SFDIAG (30)
525 ssm 0,0 525 ssm 0,0
526 MFDIAG_1 (28) 526 MFDIAG_1 (28)
527 shrpd ret0,%r0,%sar,%r1 527 shrpd ret0,%r0,%sar,%r1
528 MTDIAG_1 (1) 528 MTDIAG_1 (1)
529 STDIAG (30) 529 STDIAG (30)
530 ssm 0,0 530 ssm 0,0
531 b,n perf_rdr_shift_in_W_leave 531 b,n perf_rdr_shift_in_W_leave
532 532
533 ; 533 ;
534 ; RDR 31 read sequence 534 ; RDR 31 read sequence
535 ; 535 ;
536 sync 536 sync
537 ssm 0,0 537 ssm 0,0
538 SFDIAG (31) 538 SFDIAG (31)
539 ssm 0,0 539 ssm 0,0
540 MFDIAG_1 (28) 540 MFDIAG_1 (28)
541 nop 541 nop
542 ssm 0,0 542 ssm 0,0
543 nop 543 nop
544 544
545 ; 545 ;
546 ; Fallthrough 546 ; Fallthrough
547 ; 547 ;
548 548
549 perf_rdr_shift_in_W_leave: 549 perf_rdr_shift_in_W_leave:
550 bve (%r2) 550 bve (%r2)
551 .exit 551 .exit
552 MTDIAG_2 (24) ; restore DR2 552 MTDIAG_2 (24) ; restore DR2
553 .procend 553 .procend
554 ENDPROC(perf_rdr_shift_in_W) 554 ENDPROC(perf_rdr_shift_in_W)
555 555
556 556
557 ;*********************************************************************** 557 ;***********************************************************************
558 ;* 558 ;*
559 ;* Name: perf_rdr_shift_out_W 559 ;* Name: perf_rdr_shift_out_W
560 ;* 560 ;*
561 ;* Description: 561 ;* Description:
562 ;* This routine moves data to the RDR's. The double-word that 562 ;* This routine moves data to the RDR's. The double-word that
563 ;* arg1 points to is loaded and moved into the staging register. 563 ;* arg1 points to is loaded and moved into the staging register.
564 ;* Then the STDIAG instruction for the RDR # in arg0 is called 564 ;* Then the STDIAG instruction for the RDR # in arg0 is called
565 ;* to move the data to the RDR. 565 ;* to move the data to the RDR.
566 ;* 566 ;*
567 ;* Arguments: 567 ;* Arguments:
568 ;* arg0 = rdr number 568 ;* arg0 = rdr number
569 ;* arg1 = 64-bit value to write 569 ;* arg1 = 64-bit value to write
570 ;* %r24 - DR2 | DR2_SLOW_RET 570 ;* %r24 - DR2 | DR2_SLOW_RET
571 ;* %r23 - original DR2 value 571 ;* %r23 - original DR2 value
572 ;* 572 ;*
573 ;* Returns: 573 ;* Returns:
574 ;* None 574 ;* None
575 ;* 575 ;*
576 ;* Register usage: 576 ;* Register usage:
577 ;* 577 ;*
578 ;*********************************************************************** 578 ;***********************************************************************
579 579
580 ENTRY(perf_rdr_shift_out_W) 580 ENTRY(perf_rdr_shift_out_W)
581 .proc 581 .proc
582 .callinfo frame=0,NO_CALLS 582 .callinfo frame=0,NO_CALLS
583 .entry 583 .entry
584 ; 584 ;
585 ; NOTE: The PCX-W ERS states that DR2_SLOW_RET must be set before any 585 ; NOTE: The PCX-W ERS states that DR2_SLOW_RET must be set before any
586 ; shifting is done, from or to, the remote diagnose registers. 586 ; shifting is done, from or to, the remote diagnose registers.
587 ; 587 ;
588 588
589 depdi,z 1,DR2_SLOW_RET,1,%r24 589 depdi,z 1,DR2_SLOW_RET,1,%r24
590 MFDIAG_2 (23) 590 MFDIAG_2 (23)
591 or %r24,%r23,%r24 591 or %r24,%r23,%r24
592 MTDIAG_2 (24) ; set DR2_SLOW_RET 592 MTDIAG_2 (24) ; set DR2_SLOW_RET
593 MTDIAG_1 (25) ; data to the staging register 593 MTDIAG_1 (25) ; data to the staging register
594 shladd arg0,2,%r0,%r1 ; %r1 = 4 * RDR number 594 shladd arg0,2,%r0,%r1 ; %r1 = 4 * RDR number
595 blr %r1,%r0 ; branch to 8-instruction sequence 595 blr %r1,%r0 ; branch to 8-instruction sequence
596 nop 596 nop
597 597
598 ; 598 ;
599 ; RDR 0 write sequence 599 ; RDR 0 write sequence
600 ; 600 ;
601 sync ; RDR 0 write sequence 601 sync ; RDR 0 write sequence
602 ssm 0,0 602 ssm 0,0
603 STDIAG (0) 603 STDIAG (0)
604 ssm 0,0 604 ssm 0,0
605 b,n perf_rdr_shift_out_W_leave 605 b,n perf_rdr_shift_out_W_leave
606 nop 606 nop
607 ssm 0,0 607 ssm 0,0
608 nop 608 nop
609 609
610 ; 610 ;
611 ; RDR 1 write sequence 611 ; RDR 1 write sequence
612 ; 612 ;
613 sync 613 sync
614 ssm 0,0 614 ssm 0,0
615 STDIAG (1) 615 STDIAG (1)
616 ssm 0,0 616 ssm 0,0
617 b,n perf_rdr_shift_out_W_leave 617 b,n perf_rdr_shift_out_W_leave
618 nop 618 nop
619 ssm 0,0 619 ssm 0,0
620 nop 620 nop
621 621
622 ; 622 ;
623 ; RDR 2 write sequence 623 ; RDR 2 write sequence
624 ; 624 ;
625 sync 625 sync
626 ssm 0,0 626 ssm 0,0
627 STDIAG (2) 627 STDIAG (2)
628 ssm 0,0 628 ssm 0,0
629 b,n perf_rdr_shift_out_W_leave 629 b,n perf_rdr_shift_out_W_leave
630 nop 630 nop
631 ssm 0,0 631 ssm 0,0
632 nop 632 nop
633 633
634 ; 634 ;
635 ; RDR 3 write sequence 635 ; RDR 3 write sequence
636 ; 636 ;
637 sync 637 sync
638 ssm 0,0 638 ssm 0,0
639 STDIAG (3) 639 STDIAG (3)
640 ssm 0,0 640 ssm 0,0
641 b,n perf_rdr_shift_out_W_leave 641 b,n perf_rdr_shift_out_W_leave
642 nop 642 nop
643 ssm 0,0 643 ssm 0,0
644 nop 644 nop
645 645
646 ; 646 ;
647 ; RDR 4 write sequence 647 ; RDR 4 write sequence
648 ; 648 ;
649 sync 649 sync
650 ssm 0,0 650 ssm 0,0
651 STDIAG (4) 651 STDIAG (4)
652 ssm 0,0 652 ssm 0,0
653 b,n perf_rdr_shift_out_W_leave 653 b,n perf_rdr_shift_out_W_leave
654 nop 654 nop
655 ssm 0,0 655 ssm 0,0
656 nop 656 nop
657 657
658 ; 658 ;
659 ; RDR 5 write sequence 659 ; RDR 5 write sequence
660 ; 660 ;
661 sync 661 sync
662 ssm 0,0 662 ssm 0,0
663 STDIAG (5) 663 STDIAG (5)
664 ssm 0,0 664 ssm 0,0
665 b,n perf_rdr_shift_out_W_leave 665 b,n perf_rdr_shift_out_W_leave
666 nop 666 nop
667 ssm 0,0 667 ssm 0,0
668 nop 668 nop
669 669
670 ; 670 ;
671 ; RDR 6 write sequence 671 ; RDR 6 write sequence
672 ; 672 ;
673 sync 673 sync
674 ssm 0,0 674 ssm 0,0
675 STDIAG (6) 675 STDIAG (6)
676 ssm 0,0 676 ssm 0,0
677 b,n perf_rdr_shift_out_W_leave 677 b,n perf_rdr_shift_out_W_leave
678 nop 678 nop
679 ssm 0,0 679 ssm 0,0
680 nop 680 nop
681 681
682 ; 682 ;
683 ; RDR 7 write sequence 683 ; RDR 7 write sequence
684 ; 684 ;
685 sync 685 sync
686 ssm 0,0 686 ssm 0,0
687 STDIAG (7) 687 STDIAG (7)
688 ssm 0,0 688 ssm 0,0
689 b,n perf_rdr_shift_out_W_leave 689 b,n perf_rdr_shift_out_W_leave
690 nop 690 nop
691 ssm 0,0 691 ssm 0,0
692 nop 692 nop
693 693
694 ; 694 ;
695 ; RDR 8 write sequence 695 ; RDR 8 write sequence
696 ; 696 ;
697 sync 697 sync
698 ssm 0,0 698 ssm 0,0
699 STDIAG (8) 699 STDIAG (8)
700 ssm 0,0 700 ssm 0,0
701 b,n perf_rdr_shift_out_W_leave 701 b,n perf_rdr_shift_out_W_leave
702 nop 702 nop
703 ssm 0,0 703 ssm 0,0
704 nop 704 nop
705 705
706 ; 706 ;
707 ; RDR 9 write sequence 707 ; RDR 9 write sequence
708 ; 708 ;
709 sync 709 sync
710 ssm 0,0 710 ssm 0,0
711 STDIAG (9) 711 STDIAG (9)
712 ssm 0,0 712 ssm 0,0
713 b,n perf_rdr_shift_out_W_leave 713 b,n perf_rdr_shift_out_W_leave
714 nop 714 nop
715 ssm 0,0 715 ssm 0,0
716 nop 716 nop
717 717
718 ; 718 ;
719 ; RDR 10 write sequence 719 ; RDR 10 write sequence
720 ; 720 ;
721 sync 721 sync
722 ssm 0,0 722 ssm 0,0
723 STDIAG (10) 723 STDIAG (10)
724 STDIAG (26) 724 STDIAG (26)
725 ssm 0,0 725 ssm 0,0
726 b,n perf_rdr_shift_out_W_leave 726 b,n perf_rdr_shift_out_W_leave
727 ssm 0,0 727 ssm 0,0
728 nop 728 nop
729 729
730 ; 730 ;
731 ; RDR 11 write sequence 731 ; RDR 11 write sequence
732 ; 732 ;
733 sync 733 sync
734 ssm 0,0 734 ssm 0,0
735 STDIAG (11) 735 STDIAG (11)
736 STDIAG (27) 736 STDIAG (27)
737 ssm 0,0 737 ssm 0,0
738 b,n perf_rdr_shift_out_W_leave 738 b,n perf_rdr_shift_out_W_leave
739 ssm 0,0 739 ssm 0,0
740 nop 740 nop
741 741
742 ; 742 ;
743 ; RDR 12 write sequence 743 ; RDR 12 write sequence
744 ; 744 ;
745 sync 745 sync
746 ssm 0,0 746 ssm 0,0
747 STDIAG (12) 747 STDIAG (12)
748 ssm 0,0 748 ssm 0,0
749 b,n perf_rdr_shift_out_W_leave 749 b,n perf_rdr_shift_out_W_leave
750 nop 750 nop
751 ssm 0,0 751 ssm 0,0
752 nop 752 nop
753 753
754 ; 754 ;
755 ; RDR 13 write sequence 755 ; RDR 13 write sequence
756 ; 756 ;
757 sync 757 sync
758 ssm 0,0 758 ssm 0,0
759 STDIAG (13) 759 STDIAG (13)
760 ssm 0,0 760 ssm 0,0
761 b,n perf_rdr_shift_out_W_leave 761 b,n perf_rdr_shift_out_W_leave
762 nop 762 nop
763 ssm 0,0 763 ssm 0,0
764 nop 764 nop
765 765
766 ; 766 ;
767 ; RDR 14 write sequence 767 ; RDR 14 write sequence
768 ; 768 ;
769 sync 769 sync
770 ssm 0,0 770 ssm 0,0
771 STDIAG (14) 771 STDIAG (14)
772 ssm 0,0 772 ssm 0,0
773 b,n perf_rdr_shift_out_W_leave 773 b,n perf_rdr_shift_out_W_leave
774 nop 774 nop
775 ssm 0,0 775 ssm 0,0
776 nop 776 nop
777 777
778 ; 778 ;
779 ; RDR 15 write sequence 779 ; RDR 15 write sequence
780 ; 780 ;
781 sync 781 sync
782 ssm 0,0 782 ssm 0,0
783 STDIAG (15) 783 STDIAG (15)
784 ssm 0,0 784 ssm 0,0
785 b,n perf_rdr_shift_out_W_leave 785 b,n perf_rdr_shift_out_W_leave
786 nop 786 nop
787 ssm 0,0 787 ssm 0,0
788 nop 788 nop
789 789
790 ; 790 ;
791 ; RDR 16 write sequence 791 ; RDR 16 write sequence
792 ; 792 ;
793 sync 793 sync
794 ssm 0,0 794 ssm 0,0
795 STDIAG (16) 795 STDIAG (16)
796 ssm 0,0 796 ssm 0,0
797 b,n perf_rdr_shift_out_W_leave 797 b,n perf_rdr_shift_out_W_leave
798 nop 798 nop
799 ssm 0,0 799 ssm 0,0
800 nop 800 nop
801 801
802 ; 802 ;
803 ; RDR 17 write sequence 803 ; RDR 17 write sequence
804 ; 804 ;
805 sync 805 sync
806 ssm 0,0 806 ssm 0,0
807 STDIAG (17) 807 STDIAG (17)
808 ssm 0,0 808 ssm 0,0
809 b,n perf_rdr_shift_out_W_leave 809 b,n perf_rdr_shift_out_W_leave
810 nop 810 nop
811 ssm 0,0 811 ssm 0,0
812 nop 812 nop
813 813
814 ; 814 ;
815 ; RDR 18 write sequence 815 ; RDR 18 write sequence
816 ; 816 ;
817 sync 817 sync
818 ssm 0,0 818 ssm 0,0
819 STDIAG (18) 819 STDIAG (18)
820 ssm 0,0 820 ssm 0,0
821 b,n perf_rdr_shift_out_W_leave 821 b,n perf_rdr_shift_out_W_leave
822 nop 822 nop
823 ssm 0,0 823 ssm 0,0
824 nop 824 nop
825 825
826 ; 826 ;
827 ; RDR 19 write sequence 827 ; RDR 19 write sequence
828 ; 828 ;
829 sync 829 sync
830 ssm 0,0 830 ssm 0,0
831 STDIAG (19) 831 STDIAG (19)
832 ssm 0,0 832 ssm 0,0
833 b,n perf_rdr_shift_out_W_leave 833 b,n perf_rdr_shift_out_W_leave
834 nop 834 nop
835 ssm 0,0 835 ssm 0,0
836 nop 836 nop
837 837
838 ; 838 ;
839 ; RDR 20 write sequence 839 ; RDR 20 write sequence
840 ; 840 ;
841 sync 841 sync
842 ssm 0,0 842 ssm 0,0
843 STDIAG (20) 843 STDIAG (20)
844 ssm 0,0 844 ssm 0,0
845 b,n perf_rdr_shift_out_W_leave 845 b,n perf_rdr_shift_out_W_leave
846 nop 846 nop
847 ssm 0,0 847 ssm 0,0
848 nop 848 nop
849 849
850 ; 850 ;
851 ; RDR 21 write sequence 851 ; RDR 21 write sequence
852 ; 852 ;
853 sync 853 sync
854 ssm 0,0 854 ssm 0,0
855 STDIAG (21) 855 STDIAG (21)
856 ssm 0,0 856 ssm 0,0
857 b,n perf_rdr_shift_out_W_leave 857 b,n perf_rdr_shift_out_W_leave
858 nop 858 nop
859 ssm 0,0 859 ssm 0,0
860 nop 860 nop
861 861
862 ; 862 ;
863 ; RDR 22 write sequence 863 ; RDR 22 write sequence
864 ; 864 ;
865 sync 865 sync
866 ssm 0,0 866 ssm 0,0
867 STDIAG (22) 867 STDIAG (22)
868 ssm 0,0 868 ssm 0,0
869 b,n perf_rdr_shift_out_W_leave 869 b,n perf_rdr_shift_out_W_leave
870 nop 870 nop
871 ssm 0,0 871 ssm 0,0
872 nop 872 nop
873 873
874 ; 874 ;
875 ; RDR 23 write sequence 875 ; RDR 23 write sequence
876 ; 876 ;
877 sync 877 sync
878 ssm 0,0 878 ssm 0,0
879 STDIAG (23) 879 STDIAG (23)
880 ssm 0,0 880 ssm 0,0
881 b,n perf_rdr_shift_out_W_leave 881 b,n perf_rdr_shift_out_W_leave
882 nop 882 nop
883 ssm 0,0 883 ssm 0,0
884 nop 884 nop
885 885
886 ; 886 ;
887 ; RDR 24 write sequence 887 ; RDR 24 write sequence
888 ; 888 ;
889 sync 889 sync
890 ssm 0,0 890 ssm 0,0
891 STDIAG (24) 891 STDIAG (24)
892 ssm 0,0 892 ssm 0,0
893 b,n perf_rdr_shift_out_W_leave 893 b,n perf_rdr_shift_out_W_leave
894 nop 894 nop
895 ssm 0,0 895 ssm 0,0
896 nop 896 nop
897 897
898 ; 898 ;
899 ; RDR 25 write sequence 899 ; RDR 25 write sequence
900 ; 900 ;
901 sync 901 sync
902 ssm 0,0 902 ssm 0,0
903 STDIAG (25) 903 STDIAG (25)
904 ssm 0,0 904 ssm 0,0
905 b,n perf_rdr_shift_out_W_leave 905 b,n perf_rdr_shift_out_W_leave
906 nop 906 nop
907 ssm 0,0 907 ssm 0,0
908 nop 908 nop
909 909
910 ; 910 ;
911 ; RDR 26 write sequence 911 ; RDR 26 write sequence
912 ; 912 ;
913 sync 913 sync
914 ssm 0,0 914 ssm 0,0
915 STDIAG (10) 915 STDIAG (10)
916 STDIAG (26) 916 STDIAG (26)
917 ssm 0,0 917 ssm 0,0
918 b,n perf_rdr_shift_out_W_leave 918 b,n perf_rdr_shift_out_W_leave
919 ssm 0,0 919 ssm 0,0
920 nop 920 nop
921 921
922 ; 922 ;
923 ; RDR 27 write sequence 923 ; RDR 27 write sequence
924 ; 924 ;
925 sync 925 sync
926 ssm 0,0 926 ssm 0,0
927 STDIAG (11) 927 STDIAG (11)
928 STDIAG (27) 928 STDIAG (27)
929 ssm 0,0 929 ssm 0,0
930 b,n perf_rdr_shift_out_W_leave 930 b,n perf_rdr_shift_out_W_leave
931 ssm 0,0 931 ssm 0,0
932 nop 932 nop
933 933
934 ; 934 ;
935 ; RDR 28 write sequence 935 ; RDR 28 write sequence
936 ; 936 ;
937 sync 937 sync
938 ssm 0,0 938 ssm 0,0
939 STDIAG (28) 939 STDIAG (28)
940 ssm 0,0 940 ssm 0,0
941 b,n perf_rdr_shift_out_W_leave 941 b,n perf_rdr_shift_out_W_leave
942 nop 942 nop
943 ssm 0,0 943 ssm 0,0
944 nop 944 nop
945 945
946 ; 946 ;
947 ; RDR 29 write sequence 947 ; RDR 29 write sequence
948 ; 948 ;
949 sync 949 sync
950 ssm 0,0 950 ssm 0,0
951 STDIAG (29) 951 STDIAG (29)
952 ssm 0,0 952 ssm 0,0
953 b,n perf_rdr_shift_out_W_leave 953 b,n perf_rdr_shift_out_W_leave
954 nop 954 nop
955 ssm 0,0 955 ssm 0,0
956 nop 956 nop
957 957
958 ; 958 ;
959 ; RDR 30 write sequence 959 ; RDR 30 write sequence
960 ; 960 ;
961 sync 961 sync
962 ssm 0,0 962 ssm 0,0
963 STDIAG (30) 963 STDIAG (30)
964 ssm 0,0 964 ssm 0,0
965 b,n perf_rdr_shift_out_W_leave 965 b,n perf_rdr_shift_out_W_leave
966 nop 966 nop
967 ssm 0,0 967 ssm 0,0
968 nop 968 nop
969 969
970 ; 970 ;
971 ; RDR 31 write sequence 971 ; RDR 31 write sequence
972 ; 972 ;
973 sync 973 sync
974 ssm 0,0 974 ssm 0,0
975 STDIAG (31) 975 STDIAG (31)
976 ssm 0,0 976 ssm 0,0
977 b,n perf_rdr_shift_out_W_leave 977 b,n perf_rdr_shift_out_W_leave
978 nop 978 nop
979 ssm 0,0 979 ssm 0,0
980 nop 980 nop
981 981
982 perf_rdr_shift_out_W_leave: 982 perf_rdr_shift_out_W_leave:
983 bve (%r2) 983 bve (%r2)
984 .exit 984 .exit
985 MTDIAG_2 (23) ; restore DR2 985 MTDIAG_2 (23) ; restore DR2
986 .procend 986 .procend
987 ENDPROC(perf_rdr_shift_out_W) 987 ENDPROC(perf_rdr_shift_out_W)
988 988
989 989
990 ;*********************************************************************** 990 ;***********************************************************************
991 ;* 991 ;*
992 ;* Name: rdr_shift_in_U 992 ;* Name: rdr_shift_in_U
993 ;* 993 ;*
994 ;* Description: 994 ;* Description:
995 ;* This routine shifts data in from the RDR in arg0 and returns 995 ;* This routine shifts data in from the RDR in arg0 and returns
996 ;* the result in ret0. If the RDR is <= 64 bits in length, it 996 ;* the result in ret0. If the RDR is <= 64 bits in length, it
997 ;* is shifted shifted backup immediately. This is to compensate 997 ;* is shifted shifted backup immediately. This is to compensate
998 ;* for RDR10 which has bits that preclude PDC stack operations 998 ;* for RDR10 which has bits that preclude PDC stack operations
999 ;* when they are in the wrong state. 999 ;* when they are in the wrong state.
1000 ;* 1000 ;*
1001 ;* Arguments: 1001 ;* Arguments:
1002 ;* arg0 : rdr to be read 1002 ;* arg0 : rdr to be read
1003 ;* arg1 : bit length of rdr 1003 ;* arg1 : bit length of rdr
1004 ;* 1004 ;*
1005 ;* Returns: 1005 ;* Returns:
1006 ;* ret0 = next 64 bits of rdr data from staging register 1006 ;* ret0 = next 64 bits of rdr data from staging register
1007 ;* 1007 ;*
1008 ;* Register usage: 1008 ;* Register usage:
1009 ;* arg0 : rdr to be read 1009 ;* arg0 : rdr to be read
1010 ;* arg1 : bit length of rdr 1010 ;* arg1 : bit length of rdr
1011 ;* %r24 - original DR2 value 1011 ;* %r24 - original DR2 value
1012 ;* %r23 - DR2 | DR2_SLOW_RET 1012 ;* %r23 - DR2 | DR2_SLOW_RET
1013 ;* %r1 - scratch 1013 ;* %r1 - scratch
1014 ;* 1014 ;*
1015 ;*********************************************************************** 1015 ;***********************************************************************
1016 1016
1017 ENTRY(perf_rdr_shift_in_U) 1017 ENTRY(perf_rdr_shift_in_U)
1018 .proc 1018 .proc
1019 .callinfo frame=0,NO_CALLS 1019 .callinfo frame=0,NO_CALLS
1020 .entry 1020 .entry
1021 1021
1022 ; read(shift in) the RDR. 1022 ; read(shift in) the RDR.
1023 ; 1023 ;
1024 ; NOTE: The PCX-U ERS states that DR2_SLOW_RET must be set before any 1024 ; NOTE: The PCX-U ERS states that DR2_SLOW_RET must be set before any
1025 ; shifting is done, from or to, remote diagnose registers. 1025 ; shifting is done, from or to, remote diagnose registers.
1026 1026
1027 depdi,z 1,DR2_SLOW_RET,1,%r29 1027 depdi,z 1,DR2_SLOW_RET,1,%r29
1028 MFDIAG_2 (24) 1028 MFDIAG_2 (24)
1029 or %r24,%r29,%r29 1029 or %r24,%r29,%r29
1030 MTDIAG_2 (29) ; set DR2_SLOW_RET 1030 MTDIAG_2 (29) ; set DR2_SLOW_RET
1031 1031
1032 nop 1032 nop
1033 nop 1033 nop
1034 nop 1034 nop
1035 nop 1035 nop
1036 1036
1037 ; 1037 ;
1038 ; Start of next 32-byte cacheline 1038 ; Start of next 32-byte cacheline
1039 ; 1039 ;
1040 nop 1040 nop
1041 nop 1041 nop
1042 nop 1042 nop
1043 extrd,u arg1,63,6,%r1 1043 extrd,u arg1,63,6,%r1
1044 1044
1045 mtsar %r1 1045 mtsar %r1
1046 shladd arg0,2,%r0,%r1 ; %r1 = 4 * RDR number 1046 shladd arg0,2,%r0,%r1 ; %r1 = 4 * RDR number
1047 blr %r1,%r0 ; branch to 8-instruction sequence 1047 blr %r1,%r0 ; branch to 8-instruction sequence
1048 nop 1048 nop
1049 1049
1050 ; 1050 ;
1051 ; Start of next 32-byte cacheline 1051 ; Start of next 32-byte cacheline
1052 ; 1052 ;
1053 SFDIAG (0) ; RDR 0 read sequence 1053 SFDIAG (0) ; RDR 0 read sequence
1054 ssm 0,0 1054 ssm 0,0
1055 MFDIAG_1 (28) 1055 MFDIAG_1 (28)
1056 shrpd ret0,%r0,%sar,%r1 1056 shrpd ret0,%r0,%sar,%r1
1057 MTDIAG_1 (1) 1057 MTDIAG_1 (1)
1058 STDIAG (0) 1058 STDIAG (0)
1059 ssm 0,0 1059 ssm 0,0
1060 b,n perf_rdr_shift_in_U_leave 1060 b,n perf_rdr_shift_in_U_leave
1061 1061
1062 SFDIAG (1) ; RDR 1 read sequence 1062 SFDIAG (1) ; RDR 1 read sequence
1063 ssm 0,0 1063 ssm 0,0
1064 MFDIAG_1 (28) 1064 MFDIAG_1 (28)
1065 shrpd ret0,%r0,%sar,%r1 1065 shrpd ret0,%r0,%sar,%r1
1066 MTDIAG_1 (1) 1066 MTDIAG_1 (1)
1067 STDIAG (1) 1067 STDIAG (1)
1068 ssm 0,0 1068 ssm 0,0
1069 b,n perf_rdr_shift_in_U_leave 1069 b,n perf_rdr_shift_in_U_leave
1070 1070
1071 sync ; RDR 2 read sequence 1071 sync ; RDR 2 read sequence
1072 ssm 0,0 1072 ssm 0,0
1073 SFDIAG (4) 1073 SFDIAG (4)
1074 ssm 0,0 1074 ssm 0,0
1075 MFDIAG_1 (28) 1075 MFDIAG_1 (28)
1076 b,n perf_rdr_shift_in_U_leave 1076 b,n perf_rdr_shift_in_U_leave
1077 ssm 0,0 1077 ssm 0,0
1078 nop 1078 nop
1079 1079
1080 sync ; RDR 3 read sequence 1080 sync ; RDR 3 read sequence
1081 ssm 0,0 1081 ssm 0,0
1082 SFDIAG (3) 1082 SFDIAG (3)
1083 ssm 0,0 1083 ssm 0,0
1084 MFDIAG_1 (28) 1084 MFDIAG_1 (28)
1085 b,n perf_rdr_shift_in_U_leave 1085 b,n perf_rdr_shift_in_U_leave
1086 ssm 0,0 1086 ssm 0,0
1087 nop 1087 nop
1088 1088
1089 sync ; RDR 4 read sequence 1089 sync ; RDR 4 read sequence
1090 ssm 0,0 1090 ssm 0,0
1091 SFDIAG (4) 1091 SFDIAG (4)
1092 ssm 0,0 1092 ssm 0,0
1093 MFDIAG_1 (28) 1093 MFDIAG_1 (28)
1094 b,n perf_rdr_shift_in_U_leave 1094 b,n perf_rdr_shift_in_U_leave
1095 ssm 0,0 1095 ssm 0,0
1096 nop 1096 nop
1097 1097
1098 sync ; RDR 5 read sequence 1098 sync ; RDR 5 read sequence
1099 ssm 0,0 1099 ssm 0,0
1100 SFDIAG (5) 1100 SFDIAG (5)
1101 ssm 0,0 1101 ssm 0,0
1102 MFDIAG_1 (28) 1102 MFDIAG_1 (28)
1103 b,n perf_rdr_shift_in_U_leave 1103 b,n perf_rdr_shift_in_U_leave
1104 ssm 0,0 1104 ssm 0,0
1105 nop 1105 nop
1106 1106
1107 sync ; RDR 6 read sequence 1107 sync ; RDR 6 read sequence
1108 ssm 0,0 1108 ssm 0,0
1109 SFDIAG (6) 1109 SFDIAG (6)
1110 ssm 0,0 1110 ssm 0,0
1111 MFDIAG_1 (28) 1111 MFDIAG_1 (28)
1112 b,n perf_rdr_shift_in_U_leave 1112 b,n perf_rdr_shift_in_U_leave
1113 ssm 0,0 1113 ssm 0,0
1114 nop 1114 nop
1115 1115
1116 sync ; RDR 7 read sequence 1116 sync ; RDR 7 read sequence
1117 ssm 0,0 1117 ssm 0,0
1118 SFDIAG (7) 1118 SFDIAG (7)
1119 ssm 0,0 1119 ssm 0,0
1120 MFDIAG_1 (28) 1120 MFDIAG_1 (28)
1121 b,n perf_rdr_shift_in_U_leave 1121 b,n perf_rdr_shift_in_U_leave
1122 ssm 0,0 1122 ssm 0,0
1123 nop 1123 nop
1124 1124
1125 b,n perf_rdr_shift_in_U_leave 1125 b,n perf_rdr_shift_in_U_leave
1126 nop 1126 nop
1127 nop 1127 nop
1128 nop 1128 nop
1129 nop 1129 nop
1130 nop 1130 nop
1131 nop 1131 nop
1132 nop 1132 nop
1133 1133
1134 SFDIAG (9) ; RDR 9 read sequence 1134 SFDIAG (9) ; RDR 9 read sequence
1135 ssm 0,0 1135 ssm 0,0
1136 MFDIAG_1 (28) 1136 MFDIAG_1 (28)
1137 shrpd ret0,%r0,%sar,%r1 1137 shrpd ret0,%r0,%sar,%r1
1138 MTDIAG_1 (1) 1138 MTDIAG_1 (1)
1139 STDIAG (9) 1139 STDIAG (9)
1140 ssm 0,0 1140 ssm 0,0
1141 b,n perf_rdr_shift_in_U_leave 1141 b,n perf_rdr_shift_in_U_leave
1142 1142
1143 SFDIAG (10) ; RDR 10 read sequence 1143 SFDIAG (10) ; RDR 10 read sequence
1144 ssm 0,0 1144 ssm 0,0
1145 MFDIAG_1 (28) 1145 MFDIAG_1 (28)
1146 shrpd ret0,%r0,%sar,%r1 1146 shrpd ret0,%r0,%sar,%r1
1147 MTDIAG_1 (1) 1147 MTDIAG_1 (1)
1148 STDIAG (10) 1148 STDIAG (10)
1149 ssm 0,0 1149 ssm 0,0
1150 b,n perf_rdr_shift_in_U_leave 1150 b,n perf_rdr_shift_in_U_leave
1151 1151
1152 SFDIAG (11) ; RDR 11 read sequence 1152 SFDIAG (11) ; RDR 11 read sequence
1153 ssm 0,0 1153 ssm 0,0
1154 MFDIAG_1 (28) 1154 MFDIAG_1 (28)
1155 shrpd ret0,%r0,%sar,%r1 1155 shrpd ret0,%r0,%sar,%r1
1156 MTDIAG_1 (1) 1156 MTDIAG_1 (1)
1157 STDIAG (11) 1157 STDIAG (11)
1158 ssm 0,0 1158 ssm 0,0
1159 b,n perf_rdr_shift_in_U_leave 1159 b,n perf_rdr_shift_in_U_leave
1160 1160
1161 SFDIAG (12) ; RDR 12 read sequence 1161 SFDIAG (12) ; RDR 12 read sequence
1162 ssm 0,0 1162 ssm 0,0
1163 MFDIAG_1 (28) 1163 MFDIAG_1 (28)
1164 shrpd ret0,%r0,%sar,%r1 1164 shrpd ret0,%r0,%sar,%r1
1165 MTDIAG_1 (1) 1165 MTDIAG_1 (1)
1166 STDIAG (12) 1166 STDIAG (12)
1167 ssm 0,0 1167 ssm 0,0
1168 b,n perf_rdr_shift_in_U_leave 1168 b,n perf_rdr_shift_in_U_leave
1169 1169
1170 SFDIAG (13) ; RDR 13 read sequence 1170 SFDIAG (13) ; RDR 13 read sequence
1171 ssm 0,0 1171 ssm 0,0
1172 MFDIAG_1 (28) 1172 MFDIAG_1 (28)
1173 shrpd ret0,%r0,%sar,%r1 1173 shrpd ret0,%r0,%sar,%r1
1174 MTDIAG_1 (1) 1174 MTDIAG_1 (1)
1175 STDIAG (13) 1175 STDIAG (13)
1176 ssm 0,0 1176 ssm 0,0
1177 b,n perf_rdr_shift_in_U_leave 1177 b,n perf_rdr_shift_in_U_leave
1178 1178
1179 SFDIAG (14) ; RDR 14 read sequence 1179 SFDIAG (14) ; RDR 14 read sequence
1180 ssm 0,0 1180 ssm 0,0
1181 MFDIAG_1 (28) 1181 MFDIAG_1 (28)
1182 shrpd ret0,%r0,%sar,%r1 1182 shrpd ret0,%r0,%sar,%r1
1183 MTDIAG_1 (1) 1183 MTDIAG_1 (1)
1184 STDIAG (14) 1184 STDIAG (14)
1185 ssm 0,0 1185 ssm 0,0
1186 b,n perf_rdr_shift_in_U_leave 1186 b,n perf_rdr_shift_in_U_leave
1187 1187
1188 SFDIAG (15) ; RDR 15 read sequence 1188 SFDIAG (15) ; RDR 15 read sequence
1189 ssm 0,0 1189 ssm 0,0
1190 MFDIAG_1 (28) 1190 MFDIAG_1 (28)
1191 shrpd ret0,%r0,%sar,%r1 1191 shrpd ret0,%r0,%sar,%r1
1192 MTDIAG_1 (1) 1192 MTDIAG_1 (1)
1193 STDIAG (15) 1193 STDIAG (15)
1194 ssm 0,0 1194 ssm 0,0
1195 b,n perf_rdr_shift_in_U_leave 1195 b,n perf_rdr_shift_in_U_leave
1196 1196
1197 sync ; RDR 16 read sequence 1197 sync ; RDR 16 read sequence
1198 ssm 0,0 1198 ssm 0,0
1199 SFDIAG (16) 1199 SFDIAG (16)
1200 ssm 0,0 1200 ssm 0,0
1201 MFDIAG_1 (28) 1201 MFDIAG_1 (28)
1202 b,n perf_rdr_shift_in_U_leave 1202 b,n perf_rdr_shift_in_U_leave
1203 ssm 0,0 1203 ssm 0,0
1204 nop 1204 nop
1205 1205
1206 SFDIAG (17) ; RDR 17 read sequence 1206 SFDIAG (17) ; RDR 17 read sequence
1207 ssm 0,0 1207 ssm 0,0
1208 MFDIAG_1 (28) 1208 MFDIAG_1 (28)
1209 shrpd ret0,%r0,%sar,%r1 1209 shrpd ret0,%r0,%sar,%r1
1210 MTDIAG_1 (1) 1210 MTDIAG_1 (1)
1211 STDIAG (17) 1211 STDIAG (17)
1212 ssm 0,0 1212 ssm 0,0
1213 b,n perf_rdr_shift_in_U_leave 1213 b,n perf_rdr_shift_in_U_leave
1214 1214
1215 SFDIAG (18) ; RDR 18 read sequence 1215 SFDIAG (18) ; RDR 18 read sequence
1216 ssm 0,0 1216 ssm 0,0
1217 MFDIAG_1 (28) 1217 MFDIAG_1 (28)
1218 shrpd ret0,%r0,%sar,%r1 1218 shrpd ret0,%r0,%sar,%r1
1219 MTDIAG_1 (1) 1219 MTDIAG_1 (1)
1220 STDIAG (18) 1220 STDIAG (18)
1221 ssm 0,0 1221 ssm 0,0
1222 b,n perf_rdr_shift_in_U_leave 1222 b,n perf_rdr_shift_in_U_leave
1223 1223
1224 b,n perf_rdr_shift_in_U_leave 1224 b,n perf_rdr_shift_in_U_leave
1225 nop 1225 nop
1226 nop 1226 nop
1227 nop 1227 nop
1228 nop 1228 nop
1229 nop 1229 nop
1230 nop 1230 nop
1231 nop 1231 nop
1232 1232
1233 sync ; RDR 20 read sequence 1233 sync ; RDR 20 read sequence
1234 ssm 0,0 1234 ssm 0,0
1235 SFDIAG (20) 1235 SFDIAG (20)
1236 ssm 0,0 1236 ssm 0,0
1237 MFDIAG_1 (28) 1237 MFDIAG_1 (28)
1238 b,n perf_rdr_shift_in_U_leave 1238 b,n perf_rdr_shift_in_U_leave
1239 ssm 0,0 1239 ssm 0,0
1240 nop 1240 nop
1241 1241
1242 sync ; RDR 21 read sequence 1242 sync ; RDR 21 read sequence
1243 ssm 0,0 1243 ssm 0,0
1244 SFDIAG (21) 1244 SFDIAG (21)
1245 ssm 0,0 1245 ssm 0,0
1246 MFDIAG_1 (28) 1246 MFDIAG_1 (28)
1247 b,n perf_rdr_shift_in_U_leave 1247 b,n perf_rdr_shift_in_U_leave
1248 ssm 0,0 1248 ssm 0,0
1249 nop 1249 nop
1250 1250
1251 sync ; RDR 22 read sequence 1251 sync ; RDR 22 read sequence
1252 ssm 0,0 1252 ssm 0,0
1253 SFDIAG (22) 1253 SFDIAG (22)
1254 ssm 0,0 1254 ssm 0,0
1255 MFDIAG_1 (28) 1255 MFDIAG_1 (28)
1256 b,n perf_rdr_shift_in_U_leave 1256 b,n perf_rdr_shift_in_U_leave
1257 ssm 0,0 1257 ssm 0,0
1258 nop 1258 nop
1259 1259
1260 sync ; RDR 23 read sequence 1260 sync ; RDR 23 read sequence
1261 ssm 0,0 1261 ssm 0,0
1262 SFDIAG (23) 1262 SFDIAG (23)
1263 ssm 0,0 1263 ssm 0,0
1264 MFDIAG_1 (28) 1264 MFDIAG_1 (28)
1265 b,n perf_rdr_shift_in_U_leave 1265 b,n perf_rdr_shift_in_U_leave
1266 ssm 0,0 1266 ssm 0,0
1267 nop 1267 nop
1268 1268
1269 sync ; RDR 24 read sequence 1269 sync ; RDR 24 read sequence
1270 ssm 0,0 1270 ssm 0,0
1271 SFDIAG (24) 1271 SFDIAG (24)
1272 ssm 0,0 1272 ssm 0,0
1273 MFDIAG_1 (28) 1273 MFDIAG_1 (28)
1274 b,n perf_rdr_shift_in_U_leave 1274 b,n perf_rdr_shift_in_U_leave
1275 ssm 0,0 1275 ssm 0,0
1276 nop 1276 nop
1277 1277
1278 sync ; RDR 25 read sequence 1278 sync ; RDR 25 read sequence
1279 ssm 0,0 1279 ssm 0,0
1280 SFDIAG (25) 1280 SFDIAG (25)
1281 ssm 0,0 1281 ssm 0,0
1282 MFDIAG_1 (28) 1282 MFDIAG_1 (28)
1283 b,n perf_rdr_shift_in_U_leave 1283 b,n perf_rdr_shift_in_U_leave
1284 ssm 0,0 1284 ssm 0,0
1285 nop 1285 nop
1286 1286
1287 SFDIAG (26) ; RDR 26 read sequence 1287 SFDIAG (26) ; RDR 26 read sequence
1288 ssm 0,0 1288 ssm 0,0
1289 MFDIAG_1 (28) 1289 MFDIAG_1 (28)
1290 shrpd ret0,%r0,%sar,%r1 1290 shrpd ret0,%r0,%sar,%r1
1291 MTDIAG_1 (1) 1291 MTDIAG_1 (1)
1292 STDIAG (26) 1292 STDIAG (26)
1293 ssm 0,0 1293 ssm 0,0
1294 b,n perf_rdr_shift_in_U_leave 1294 b,n perf_rdr_shift_in_U_leave
1295 1295
1296 SFDIAG (27) ; RDR 27 read sequence 1296 SFDIAG (27) ; RDR 27 read sequence
1297 ssm 0,0 1297 ssm 0,0
1298 MFDIAG_1 (28) 1298 MFDIAG_1 (28)
1299 shrpd ret0,%r0,%sar,%r1 1299 shrpd ret0,%r0,%sar,%r1
1300 MTDIAG_1 (1) 1300 MTDIAG_1 (1)
1301 STDIAG (27) 1301 STDIAG (27)
1302 ssm 0,0 1302 ssm 0,0
1303 b,n perf_rdr_shift_in_U_leave 1303 b,n perf_rdr_shift_in_U_leave
1304 1304
1305 sync ; RDR 28 read sequence 1305 sync ; RDR 28 read sequence
1306 ssm 0,0 1306 ssm 0,0
1307 SFDIAG (28) 1307 SFDIAG (28)
1308 ssm 0,0 1308 ssm 0,0
1309 MFDIAG_1 (28) 1309 MFDIAG_1 (28)
1310 b,n perf_rdr_shift_in_U_leave 1310 b,n perf_rdr_shift_in_U_leave
1311 ssm 0,0 1311 ssm 0,0
1312 nop 1312 nop
1313 1313
1314 b,n perf_rdr_shift_in_U_leave 1314 b,n perf_rdr_shift_in_U_leave
1315 nop 1315 nop
1316 nop 1316 nop
1317 nop 1317 nop
1318 nop 1318 nop
1319 nop 1319 nop
1320 nop 1320 nop
1321 nop 1321 nop
1322 1322
1323 SFDIAG (30) ; RDR 30 read sequence 1323 SFDIAG (30) ; RDR 30 read sequence
1324 ssm 0,0 1324 ssm 0,0
1325 MFDIAG_1 (28) 1325 MFDIAG_1 (28)
1326 shrpd ret0,%r0,%sar,%r1 1326 shrpd ret0,%r0,%sar,%r1
1327 MTDIAG_1 (1) 1327 MTDIAG_1 (1)
1328 STDIAG (30) 1328 STDIAG (30)
1329 ssm 0,0 1329 ssm 0,0
1330 b,n perf_rdr_shift_in_U_leave 1330 b,n perf_rdr_shift_in_U_leave
1331 1331
1332 SFDIAG (31) ; RDR 31 read sequence 1332 SFDIAG (31) ; RDR 31 read sequence
1333 ssm 0,0 1333 ssm 0,0
1334 MFDIAG_1 (28) 1334 MFDIAG_1 (28)
1335 shrpd ret0,%r0,%sar,%r1 1335 shrpd ret0,%r0,%sar,%r1
1336 MTDIAG_1 (1) 1336 MTDIAG_1 (1)
1337 STDIAG (31) 1337 STDIAG (31)
1338 ssm 0,0 1338 ssm 0,0
1339 b,n perf_rdr_shift_in_U_leave 1339 b,n perf_rdr_shift_in_U_leave
1340 nop 1340 nop
1341 1341
1342 perf_rdr_shift_in_U_leave: 1342 perf_rdr_shift_in_U_leave:
1343 bve (%r2) 1343 bve (%r2)
1344 .exit 1344 .exit
1345 MTDIAG_2 (24) ; restore DR2 1345 MTDIAG_2 (24) ; restore DR2
1346 .procend 1346 .procend
1347 ENDPROC(perf_rdr_shift_in_U) 1347 ENDPROC(perf_rdr_shift_in_U)
1348 1348
1349 ;*********************************************************************** 1349 ;***********************************************************************
1350 ;* 1350 ;*
1351 ;* Name: rdr_shift_out_U 1351 ;* Name: rdr_shift_out_U
1352 ;* 1352 ;*
1353 ;* Description: 1353 ;* Description:
1354 ;* This routine moves data to the RDR's. The double-word that 1354 ;* This routine moves data to the RDR's. The double-word that
1355 ;* arg1 points to is loaded and moved into the staging register. 1355 ;* arg1 points to is loaded and moved into the staging register.
1356 ;* Then the STDIAG instruction for the RDR # in arg0 is called 1356 ;* Then the STDIAG instruction for the RDR # in arg0 is called
1357 ;* to move the data to the RDR. 1357 ;* to move the data to the RDR.
1358 ;* 1358 ;*
1359 ;* Arguments: 1359 ;* Arguments:
1360 ;* arg0 = rdr target 1360 ;* arg0 = rdr target
1361 ;* arg1 = buffer pointer 1361 ;* arg1 = buffer pointer
1362 ;* 1362 ;*
1363 ;* Returns: 1363 ;* Returns:
1364 ;* None 1364 ;* None
1365 ;* 1365 ;*
1366 ;* Register usage: 1366 ;* Register usage:
1367 ;* arg0 = rdr target 1367 ;* arg0 = rdr target
1368 ;* arg1 = buffer pointer 1368 ;* arg1 = buffer pointer
1369 ;* %r24 - DR2 | DR2_SLOW_RET 1369 ;* %r24 - DR2 | DR2_SLOW_RET
1370 ;* %r23 - original DR2 value 1370 ;* %r23 - original DR2 value
1371 ;* 1371 ;*
1372 ;*********************************************************************** 1372 ;***********************************************************************
1373 1373
1374 ENTRY(perf_rdr_shift_out_U) 1374 ENTRY(perf_rdr_shift_out_U)
1375 .proc 1375 .proc
1376 .callinfo frame=0,NO_CALLS 1376 .callinfo frame=0,NO_CALLS
1377 .entry 1377 .entry
1378 1378
1379 ; 1379 ;
1380 ; NOTE: The PCX-U ERS states that DR2_SLOW_RET must be set before any 1380 ; NOTE: The PCX-U ERS states that DR2_SLOW_RET must be set before any
1381 ; shifting is done, from or to, the remote diagnose registers. 1381 ; shifting is done, from or to, the remote diagnose registers.
1382 ; 1382 ;
1383 1383
1384 depdi,z 1,DR2_SLOW_RET,1,%r24 1384 depdi,z 1,DR2_SLOW_RET,1,%r24
1385 MFDIAG_2 (23) 1385 MFDIAG_2 (23)
1386 or %r24,%r23,%r24 1386 or %r24,%r23,%r24
1387 MTDIAG_2 (24) ; set DR2_SLOW_RET 1387 MTDIAG_2 (24) ; set DR2_SLOW_RET
1388 1388
1389 MTDIAG_1 (25) ; data to the staging register 1389 MTDIAG_1 (25) ; data to the staging register
1390 shladd arg0,2,%r0,%r1 ; %r1 = 4 * RDR number 1390 shladd arg0,2,%r0,%r1 ; %r1 = 4 * RDR number
1391 blr %r1,%r0 ; branch to 8-instruction sequence 1391 blr %r1,%r0 ; branch to 8-instruction sequence
1392 nop 1392 nop
1393 1393
1394 ; 1394 ;
1395 ; 32-byte cachline aligned 1395 ; 32-byte cachline aligned
1396 ; 1396 ;
1397 1397
1398 sync ; RDR 0 write sequence 1398 sync ; RDR 0 write sequence
1399 ssm 0,0 1399 ssm 0,0
1400 STDIAG (0) 1400 STDIAG (0)
1401 ssm 0,0 1401 ssm 0,0
1402 b,n perf_rdr_shift_out_U_leave 1402 b,n perf_rdr_shift_out_U_leave
1403 nop 1403 nop
1404 ssm 0,0 1404 ssm 0,0
1405 nop 1405 nop
1406 1406
1407 sync ; RDR 1 write sequence 1407 sync ; RDR 1 write sequence
1408 ssm 0,0 1408 ssm 0,0
1409 STDIAG (1) 1409 STDIAG (1)
1410 ssm 0,0 1410 ssm 0,0
1411 b,n perf_rdr_shift_out_U_leave 1411 b,n perf_rdr_shift_out_U_leave
1412 nop 1412 nop
1413 ssm 0,0 1413 ssm 0,0
1414 nop 1414 nop
1415 1415
1416 sync ; RDR 2 write sequence 1416 sync ; RDR 2 write sequence
1417 ssm 0,0 1417 ssm 0,0
1418 STDIAG (2) 1418 STDIAG (2)
1419 ssm 0,0 1419 ssm 0,0
1420 b,n perf_rdr_shift_out_U_leave 1420 b,n perf_rdr_shift_out_U_leave
1421 nop 1421 nop
1422 ssm 0,0 1422 ssm 0,0
1423 nop 1423 nop
1424 1424
1425 sync ; RDR 3 write sequence 1425 sync ; RDR 3 write sequence
1426 ssm 0,0 1426 ssm 0,0
1427 STDIAG (3) 1427 STDIAG (3)
1428 ssm 0,0 1428 ssm 0,0
1429 b,n perf_rdr_shift_out_U_leave 1429 b,n perf_rdr_shift_out_U_leave
1430 nop 1430 nop
1431 ssm 0,0 1431 ssm 0,0
1432 nop 1432 nop
1433 1433
1434 sync ; RDR 4 write sequence 1434 sync ; RDR 4 write sequence
1435 ssm 0,0 1435 ssm 0,0
1436 STDIAG (4) 1436 STDIAG (4)
1437 ssm 0,0 1437 ssm 0,0
1438 b,n perf_rdr_shift_out_U_leave 1438 b,n perf_rdr_shift_out_U_leave
1439 nop 1439 nop
1440 ssm 0,0 1440 ssm 0,0
1441 nop 1441 nop
1442 1442
1443 sync ; RDR 5 write sequence 1443 sync ; RDR 5 write sequence
1444 ssm 0,0 1444 ssm 0,0
1445 STDIAG (5) 1445 STDIAG (5)
1446 ssm 0,0 1446 ssm 0,0
1447 b,n perf_rdr_shift_out_U_leave 1447 b,n perf_rdr_shift_out_U_leave
1448 nop 1448 nop
1449 ssm 0,0 1449 ssm 0,0
1450 nop 1450 nop
1451 1451
1452 sync ; RDR 6 write sequence 1452 sync ; RDR 6 write sequence
1453 ssm 0,0 1453 ssm 0,0
1454 STDIAG (6) 1454 STDIAG (6)
1455 ssm 0,0 1455 ssm 0,0
1456 b,n perf_rdr_shift_out_U_leave 1456 b,n perf_rdr_shift_out_U_leave
1457 nop 1457 nop
1458 ssm 0,0 1458 ssm 0,0
1459 nop 1459 nop
1460 1460
1461 sync ; RDR 7 write sequence 1461 sync ; RDR 7 write sequence
1462 ssm 0,0 1462 ssm 0,0
1463 STDIAG (7) 1463 STDIAG (7)
1464 ssm 0,0 1464 ssm 0,0
1465 b,n perf_rdr_shift_out_U_leave 1465 b,n perf_rdr_shift_out_U_leave
1466 nop 1466 nop
1467 ssm 0,0 1467 ssm 0,0
1468 nop 1468 nop
1469 1469
1470 sync ; RDR 8 write sequence 1470 sync ; RDR 8 write sequence
1471 ssm 0,0 1471 ssm 0,0
1472 STDIAG (8) 1472 STDIAG (8)
1473 ssm 0,0 1473 ssm 0,0
1474 b,n perf_rdr_shift_out_U_leave 1474 b,n perf_rdr_shift_out_U_leave
1475 nop 1475 nop
1476 ssm 0,0 1476 ssm 0,0
1477 nop 1477 nop
1478 1478
1479 sync ; RDR 9 write sequence 1479 sync ; RDR 9 write sequence
1480 ssm 0,0 1480 ssm 0,0
1481 STDIAG (9) 1481 STDIAG (9)
1482 ssm 0,0 1482 ssm 0,0
1483 b,n perf_rdr_shift_out_U_leave 1483 b,n perf_rdr_shift_out_U_leave
1484 nop 1484 nop
1485 ssm 0,0 1485 ssm 0,0
1486 nop 1486 nop
1487 1487
1488 sync ; RDR 10 write sequence 1488 sync ; RDR 10 write sequence
1489 ssm 0,0 1489 ssm 0,0
1490 STDIAG (10) 1490 STDIAG (10)
1491 ssm 0,0 1491 ssm 0,0
1492 b,n perf_rdr_shift_out_U_leave 1492 b,n perf_rdr_shift_out_U_leave
1493 nop 1493 nop
1494 ssm 0,0 1494 ssm 0,0
1495 nop 1495 nop
1496 1496
1497 sync ; RDR 11 write sequence 1497 sync ; RDR 11 write sequence
1498 ssm 0,0 1498 ssm 0,0
1499 STDIAG (11) 1499 STDIAG (11)
1500 ssm 0,0 1500 ssm 0,0
1501 b,n perf_rdr_shift_out_U_leave 1501 b,n perf_rdr_shift_out_U_leave
1502 nop 1502 nop
1503 ssm 0,0 1503 ssm 0,0
1504 nop 1504 nop
1505 1505
1506 sync ; RDR 12 write sequence 1506 sync ; RDR 12 write sequence
1507 ssm 0,0 1507 ssm 0,0
1508 STDIAG (12) 1508 STDIAG (12)
1509 ssm 0,0 1509 ssm 0,0
1510 b,n perf_rdr_shift_out_U_leave 1510 b,n perf_rdr_shift_out_U_leave
1511 nop 1511 nop
1512 ssm 0,0 1512 ssm 0,0
1513 nop 1513 nop
1514 1514
1515 sync ; RDR 13 write sequence 1515 sync ; RDR 13 write sequence
1516 ssm 0,0 1516 ssm 0,0
1517 STDIAG (13) 1517 STDIAG (13)
1518 ssm 0,0 1518 ssm 0,0
1519 b,n perf_rdr_shift_out_U_leave 1519 b,n perf_rdr_shift_out_U_leave
1520 nop 1520 nop
1521 ssm 0,0 1521 ssm 0,0
1522 nop 1522 nop
1523 1523
1524 sync ; RDR 14 write sequence 1524 sync ; RDR 14 write sequence
1525 ssm 0,0 1525 ssm 0,0
1526 STDIAG (14) 1526 STDIAG (14)
1527 ssm 0,0 1527 ssm 0,0
1528 b,n perf_rdr_shift_out_U_leave 1528 b,n perf_rdr_shift_out_U_leave
1529 nop 1529 nop
1530 ssm 0,0 1530 ssm 0,0
1531 nop 1531 nop
1532 1532
1533 sync ; RDR 15 write sequence 1533 sync ; RDR 15 write sequence
1534 ssm 0,0 1534 ssm 0,0
1535 STDIAG (15) 1535 STDIAG (15)
1536 ssm 0,0 1536 ssm 0,0
1537 b,n perf_rdr_shift_out_U_leave 1537 b,n perf_rdr_shift_out_U_leave
1538 nop 1538 nop
1539 ssm 0,0 1539 ssm 0,0
1540 nop 1540 nop
1541 1541
1542 sync ; RDR 16 write sequence 1542 sync ; RDR 16 write sequence
1543 ssm 0,0 1543 ssm 0,0
1544 STDIAG (16) 1544 STDIAG (16)
1545 ssm 0,0 1545 ssm 0,0
1546 b,n perf_rdr_shift_out_U_leave 1546 b,n perf_rdr_shift_out_U_leave
1547 nop 1547 nop
1548 ssm 0,0 1548 ssm 0,0
1549 nop 1549 nop
1550 1550
1551 sync ; RDR 17 write sequence 1551 sync ; RDR 17 write sequence
1552 ssm 0,0 1552 ssm 0,0
1553 STDIAG (17) 1553 STDIAG (17)
1554 ssm 0,0 1554 ssm 0,0
1555 b,n perf_rdr_shift_out_U_leave 1555 b,n perf_rdr_shift_out_U_leave
1556 nop 1556 nop
1557 ssm 0,0 1557 ssm 0,0
1558 nop 1558 nop
1559 1559
1560 sync ; RDR 18 write sequence 1560 sync ; RDR 18 write sequence
1561 ssm 0,0 1561 ssm 0,0
1562 STDIAG (18) 1562 STDIAG (18)
1563 ssm 0,0 1563 ssm 0,0
1564 b,n perf_rdr_shift_out_U_leave 1564 b,n perf_rdr_shift_out_U_leave
1565 nop 1565 nop
1566 ssm 0,0 1566 ssm 0,0
1567 nop 1567 nop
1568 1568
1569 sync ; RDR 19 write sequence 1569 sync ; RDR 19 write sequence
1570 ssm 0,0 1570 ssm 0,0
1571 STDIAG (19) 1571 STDIAG (19)
1572 ssm 0,0 1572 ssm 0,0
1573 b,n perf_rdr_shift_out_U_leave 1573 b,n perf_rdr_shift_out_U_leave
1574 nop 1574 nop
1575 ssm 0,0 1575 ssm 0,0
1576 nop 1576 nop
1577 1577
1578 sync ; RDR 20 write sequence 1578 sync ; RDR 20 write sequence
1579 ssm 0,0 1579 ssm 0,0
1580 STDIAG (20) 1580 STDIAG (20)
1581 ssm 0,0 1581 ssm 0,0
1582 b,n perf_rdr_shift_out_U_leave 1582 b,n perf_rdr_shift_out_U_leave
1583 nop 1583 nop
1584 ssm 0,0 1584 ssm 0,0
1585 nop 1585 nop
1586 1586
1587 sync ; RDR 21 write sequence 1587 sync ; RDR 21 write sequence
1588 ssm 0,0 1588 ssm 0,0
1589 STDIAG (21) 1589 STDIAG (21)
1590 ssm 0,0 1590 ssm 0,0
1591 b,n perf_rdr_shift_out_U_leave 1591 b,n perf_rdr_shift_out_U_leave
1592 nop 1592 nop
1593 ssm 0,0 1593 ssm 0,0
1594 nop 1594 nop
1595 1595
1596 sync ; RDR 22 write sequence 1596 sync ; RDR 22 write sequence
1597 ssm 0,0 1597 ssm 0,0
1598 STDIAG (22) 1598 STDIAG (22)
1599 ssm 0,0 1599 ssm 0,0
1600 b,n perf_rdr_shift_out_U_leave 1600 b,n perf_rdr_shift_out_U_leave
1601 nop 1601 nop
1602 ssm 0,0 1602 ssm 0,0
1603 nop 1603 nop
1604 1604
1605 sync ; RDR 23 write sequence 1605 sync ; RDR 23 write sequence
1606 ssm 0,0 1606 ssm 0,0
1607 STDIAG (23) 1607 STDIAG (23)
1608 ssm 0,0 1608 ssm 0,0
1609 b,n perf_rdr_shift_out_U_leave 1609 b,n perf_rdr_shift_out_U_leave
1610 nop 1610 nop
1611 ssm 0,0 1611 ssm 0,0
1612 nop 1612 nop
1613 1613
1614 sync ; RDR 24 write sequence 1614 sync ; RDR 24 write sequence
1615 ssm 0,0 1615 ssm 0,0
1616 STDIAG (24) 1616 STDIAG (24)
1617 ssm 0,0 1617 ssm 0,0
1618 b,n perf_rdr_shift_out_U_leave 1618 b,n perf_rdr_shift_out_U_leave
1619 nop 1619 nop
1620 ssm 0,0 1620 ssm 0,0
1621 nop 1621 nop
1622 1622
1623 sync ; RDR 25 write sequence 1623 sync ; RDR 25 write sequence
1624 ssm 0,0 1624 ssm 0,0
1625 STDIAG (25) 1625 STDIAG (25)
1626 ssm 0,0 1626 ssm 0,0
1627 b,n perf_rdr_shift_out_U_leave 1627 b,n perf_rdr_shift_out_U_leave
1628 nop 1628 nop
1629 ssm 0,0 1629 ssm 0,0
1630 nop 1630 nop
1631 1631
1632 sync ; RDR 26 write sequence 1632 sync ; RDR 26 write sequence
1633 ssm 0,0 1633 ssm 0,0
1634 STDIAG (26) 1634 STDIAG (26)
1635 ssm 0,0 1635 ssm 0,0
1636 b,n perf_rdr_shift_out_U_leave 1636 b,n perf_rdr_shift_out_U_leave
1637 nop 1637 nop
1638 ssm 0,0 1638 ssm 0,0
1639 nop 1639 nop
1640 1640
1641 sync ; RDR 27 write sequence 1641 sync ; RDR 27 write sequence
1642 ssm 0,0 1642 ssm 0,0
1643 STDIAG (27) 1643 STDIAG (27)
1644 ssm 0,0 1644 ssm 0,0
1645 b,n perf_rdr_shift_out_U_leave 1645 b,n perf_rdr_shift_out_U_leave
1646 nop 1646 nop
1647 ssm 0,0 1647 ssm 0,0
1648 nop 1648 nop
1649 1649
1650 sync ; RDR 28 write sequence 1650 sync ; RDR 28 write sequence
1651 ssm 0,0 1651 ssm 0,0
1652 STDIAG (28) 1652 STDIAG (28)
1653 ssm 0,0 1653 ssm 0,0
1654 b,n perf_rdr_shift_out_U_leave 1654 b,n perf_rdr_shift_out_U_leave
1655 nop 1655 nop
1656 ssm 0,0 1656 ssm 0,0
1657 nop 1657 nop
1658 1658
1659 sync ; RDR 29 write sequence 1659 sync ; RDR 29 write sequence
1660 ssm 0,0 1660 ssm 0,0
1661 STDIAG (29) 1661 STDIAG (29)
1662 ssm 0,0 1662 ssm 0,0
1663 b,n perf_rdr_shift_out_U_leave 1663 b,n perf_rdr_shift_out_U_leave
1664 nop 1664 nop
1665 ssm 0,0 1665 ssm 0,0
1666 nop 1666 nop
1667 1667
1668 sync ; RDR 30 write sequence 1668 sync ; RDR 30 write sequence
1669 ssm 0,0 1669 ssm 0,0
1670 STDIAG (30) 1670 STDIAG (30)
1671 ssm 0,0 1671 ssm 0,0
1672 b,n perf_rdr_shift_out_U_leave 1672 b,n perf_rdr_shift_out_U_leave
1673 nop 1673 nop
1674 ssm 0,0 1674 ssm 0,0
1675 nop 1675 nop
1676 1676
1677 sync ; RDR 31 write sequence 1677 sync ; RDR 31 write sequence
1678 ssm 0,0 1678 ssm 0,0
1679 STDIAG (31) 1679 STDIAG (31)
1680 ssm 0,0 1680 ssm 0,0
1681 b,n perf_rdr_shift_out_U_leave 1681 b,n perf_rdr_shift_out_U_leave
1682 nop 1682 nop
1683 ssm 0,0 1683 ssm 0,0
1684 nop 1684 nop
1685 1685
1686 perf_rdr_shift_out_U_leave: 1686 perf_rdr_shift_out_U_leave:
1687 bve (%r2) 1687 bve (%r2)
1688 .exit 1688 .exit
1689 MTDIAG_2 (23) ; restore DR2 1689 MTDIAG_2 (23) ; restore DR2
1690 .procend 1690 .procend
1691 ENDPROC(perf_rdr_shift_out_U) 1691 ENDPROC(perf_rdr_shift_out_U)
1692 1692
1693 1693
arch/parisc/kernel/real2.S
1 /* 1 /*
2 * 2 *
3 * This file is subject to the terms and conditions of the GNU General Public 3 * This file is subject to the terms and conditions of the GNU General Public
4 * License. See the file "COPYING" in the main directory of this archive 4 * License. See the file "COPYING" in the main directory of this archive
5 * for more details. 5 * for more details.
6 * 6 *
7 * Copyright (C) 2000 Hewlett Packard (Paul Bame bame@puffin.external.hp.com) 7 * Copyright (C) 2000 Hewlett Packard (Paul Bame bame@puffin.external.hp.com)
8 * 8 *
9 */ 9 */
10 10
11 #include <asm/psw.h> 11 #include <asm/psw.h>
12 #include <asm/assembly.h> 12 #include <asm/assembly.h>
13 13
14 #include <linux/linkage.h> 14 #include <linux/linkage.h>
15 #include <linux/init.h>
16 15
17 .section .bss 16 .section .bss
18 .export real_stack 17 .export real_stack
19 .export real32_stack 18 .export real32_stack
20 .export real64_stack 19 .export real64_stack
21 .align 64 20 .align 64
22 real_stack: 21 real_stack:
23 real32_stack: 22 real32_stack:
24 real64_stack: 23 real64_stack:
25 .block 8192 24 .block 8192
26 25
27 #ifdef CONFIG_64BIT 26 #ifdef CONFIG_64BIT
28 # define REG_SZ 8 27 # define REG_SZ 8
29 #else 28 #else
30 # define REG_SZ 4 29 # define REG_SZ 4
31 #endif 30 #endif
32 31
33 #define N_SAVED_REGS 9 32 #define N_SAVED_REGS 9
34 33
35 save_cr_space: 34 save_cr_space:
36 .block REG_SZ * N_SAVED_REGS 35 .block REG_SZ * N_SAVED_REGS
37 save_cr_end: 36 save_cr_end:
38 37
39 38
40 /************************ 32-bit real-mode calls ***********************/ 39 /************************ 32-bit real-mode calls ***********************/
41 /* This can be called in both narrow and wide kernels */ 40 /* This can be called in both narrow and wide kernels */
42 41
43 __HEAD 42 .text
44 43
45 /* unsigned long real32_call_asm(unsigned int *sp, 44 /* unsigned long real32_call_asm(unsigned int *sp,
46 * unsigned int *arg0p, 45 * unsigned int *arg0p,
47 * unsigned int iodc_fn) 46 * unsigned int iodc_fn)
48 * sp is value of stack pointer to adopt before calling PDC (virt) 47 * sp is value of stack pointer to adopt before calling PDC (virt)
49 * arg0p points to where saved arg values may be found 48 * arg0p points to where saved arg values may be found
50 * iodc_fn is the IODC function to call 49 * iodc_fn is the IODC function to call
51 */ 50 */
52 51
53 ENTRY(real32_call_asm) 52 ENTRY(real32_call_asm)
54 STREG %rp, -RP_OFFSET(%sp) /* save RP */ 53 STREG %rp, -RP_OFFSET(%sp) /* save RP */
55 #ifdef CONFIG_64BIT 54 #ifdef CONFIG_64BIT
56 callee_save 55 callee_save
57 ldo 2*REG_SZ(%sp), %sp /* room for a couple more saves */ 56 ldo 2*REG_SZ(%sp), %sp /* room for a couple more saves */
58 STREG %r27, -1*REG_SZ(%sp) 57 STREG %r27, -1*REG_SZ(%sp)
59 STREG %r29, -2*REG_SZ(%sp) 58 STREG %r29, -2*REG_SZ(%sp)
60 #endif 59 #endif
61 STREG %sp, -REG_SZ(%arg0) /* save SP on real-mode stack */ 60 STREG %sp, -REG_SZ(%arg0) /* save SP on real-mode stack */
62 copy %arg0, %sp /* adopt the real-mode SP */ 61 copy %arg0, %sp /* adopt the real-mode SP */
63 62
64 /* save iodc_fn */ 63 /* save iodc_fn */
65 copy %arg2, %r31 64 copy %arg2, %r31
66 65
67 /* load up the arg registers from the saved arg area */ 66 /* load up the arg registers from the saved arg area */
68 /* 32-bit calling convention passes first 4 args in registers */ 67 /* 32-bit calling convention passes first 4 args in registers */
69 ldw 0(%arg1), %arg0 /* note overwriting arg0 */ 68 ldw 0(%arg1), %arg0 /* note overwriting arg0 */
70 ldw -8(%arg1), %arg2 69 ldw -8(%arg1), %arg2
71 ldw -12(%arg1), %arg3 70 ldw -12(%arg1), %arg3
72 ldw -4(%arg1), %arg1 /* obviously must do this one last! */ 71 ldw -4(%arg1), %arg1 /* obviously must do this one last! */
73 72
74 tophys_r1 %sp 73 tophys_r1 %sp
75 74
76 b,l rfi_virt2real,%r2 75 b,l rfi_virt2real,%r2
77 nop 76 nop
78 77
79 b,l save_control_regs,%r2 /* modifies r1, r2, r28 */ 78 b,l save_control_regs,%r2 /* modifies r1, r2, r28 */
80 nop 79 nop
81 80
82 #ifdef CONFIG_64BIT 81 #ifdef CONFIG_64BIT
83 rsm PSW_SM_W, %r0 /* go narrow */ 82 rsm PSW_SM_W, %r0 /* go narrow */
84 #endif 83 #endif
85 84
86 load32 PA(ric_ret), %r2 85 load32 PA(ric_ret), %r2
87 bv 0(%r31) 86 bv 0(%r31)
88 nop 87 nop
89 ric_ret: 88 ric_ret:
90 #ifdef CONFIG_64BIT 89 #ifdef CONFIG_64BIT
91 ssm PSW_SM_W, %r0 /* go wide */ 90 ssm PSW_SM_W, %r0 /* go wide */
92 #endif 91 #endif
93 /* restore CRs before going virtual in case we page fault */ 92 /* restore CRs before going virtual in case we page fault */
94 b,l restore_control_regs, %r2 /* modifies r1, r2, r26 */ 93 b,l restore_control_regs, %r2 /* modifies r1, r2, r26 */
95 nop 94 nop
96 95
97 b,l rfi_real2virt,%r2 96 b,l rfi_real2virt,%r2
98 nop 97 nop
99 98
100 tovirt_r1 %sp 99 tovirt_r1 %sp
101 LDREG -REG_SZ(%sp), %sp /* restore SP */ 100 LDREG -REG_SZ(%sp), %sp /* restore SP */
102 #ifdef CONFIG_64BIT 101 #ifdef CONFIG_64BIT
103 LDREG -1*REG_SZ(%sp), %r27 102 LDREG -1*REG_SZ(%sp), %r27
104 LDREG -2*REG_SZ(%sp), %r29 103 LDREG -2*REG_SZ(%sp), %r29
105 ldo -2*REG_SZ(%sp), %sp 104 ldo -2*REG_SZ(%sp), %sp
106 callee_rest 105 callee_rest
107 #endif 106 #endif
108 LDREG -RP_OFFSET(%sp), %rp /* restore RP */ 107 LDREG -RP_OFFSET(%sp), %rp /* restore RP */
109 bv 0(%rp) 108 bv 0(%rp)
110 nop 109 nop
111 ENDPROC(real32_call_asm) 110 ENDPROC(real32_call_asm)
112 111
113 112
114 # define PUSH_CR(r, where) mfctl r, %r1 ! STREG,ma %r1, REG_SZ(where) 113 # define PUSH_CR(r, where) mfctl r, %r1 ! STREG,ma %r1, REG_SZ(where)
115 # define POP_CR(r, where) LDREG,mb -REG_SZ(where), %r1 ! mtctl %r1, r 114 # define POP_CR(r, where) LDREG,mb -REG_SZ(where), %r1 ! mtctl %r1, r
116 115
117 __HEAD 116 .text
118 save_control_regs: 117 save_control_regs:
119 load32 PA(save_cr_space), %r28 118 load32 PA(save_cr_space), %r28
120 PUSH_CR(%cr24, %r28) 119 PUSH_CR(%cr24, %r28)
121 PUSH_CR(%cr25, %r28) 120 PUSH_CR(%cr25, %r28)
122 PUSH_CR(%cr26, %r28) 121 PUSH_CR(%cr26, %r28)
123 PUSH_CR(%cr27, %r28) 122 PUSH_CR(%cr27, %r28)
124 PUSH_CR(%cr28, %r28) 123 PUSH_CR(%cr28, %r28)
125 PUSH_CR(%cr29, %r28) 124 PUSH_CR(%cr29, %r28)
126 PUSH_CR(%cr30, %r28) 125 PUSH_CR(%cr30, %r28)
127 PUSH_CR(%cr31, %r28) 126 PUSH_CR(%cr31, %r28)
128 PUSH_CR(%cr15, %r28) 127 PUSH_CR(%cr15, %r28)
129 bv 0(%r2) 128 bv 0(%r2)
130 nop 129 nop
131 130
132 restore_control_regs: 131 restore_control_regs:
133 load32 PA(save_cr_end), %r26 132 load32 PA(save_cr_end), %r26
134 POP_CR(%cr15, %r26) 133 POP_CR(%cr15, %r26)
135 POP_CR(%cr31, %r26) 134 POP_CR(%cr31, %r26)
136 POP_CR(%cr30, %r26) 135 POP_CR(%cr30, %r26)
137 POP_CR(%cr29, %r26) 136 POP_CR(%cr29, %r26)
138 POP_CR(%cr28, %r26) 137 POP_CR(%cr28, %r26)
139 POP_CR(%cr27, %r26) 138 POP_CR(%cr27, %r26)
140 POP_CR(%cr26, %r26) 139 POP_CR(%cr26, %r26)
141 POP_CR(%cr25, %r26) 140 POP_CR(%cr25, %r26)
142 POP_CR(%cr24, %r26) 141 POP_CR(%cr24, %r26)
143 bv 0(%r2) 142 bv 0(%r2)
144 nop 143 nop
145 144
146 /* rfi_virt2real() and rfi_real2virt() could perhaps be adapted for 145 /* rfi_virt2real() and rfi_real2virt() could perhaps be adapted for
147 * more general-purpose use by the several places which need RFIs 146 * more general-purpose use by the several places which need RFIs
148 */ 147 */
149 __HEAD 148 .text
150 .align 128 149 .align 128
151 rfi_virt2real: 150 rfi_virt2real:
152 /* switch to real mode... */ 151 /* switch to real mode... */
153 rsm PSW_SM_I,%r0 152 rsm PSW_SM_I,%r0
154 load32 PA(rfi_v2r_1), %r1 153 load32 PA(rfi_v2r_1), %r1
155 nop 154 nop
156 nop 155 nop
157 nop 156 nop
158 nop 157 nop
159 nop 158 nop
160 159
161 rsm PSW_SM_Q,%r0 /* disable Q & I bits to load iia queue */ 160 rsm PSW_SM_Q,%r0 /* disable Q & I bits to load iia queue */
162 mtctl %r0, %cr17 /* Clear IIASQ tail */ 161 mtctl %r0, %cr17 /* Clear IIASQ tail */
163 mtctl %r0, %cr17 /* Clear IIASQ head */ 162 mtctl %r0, %cr17 /* Clear IIASQ head */
164 mtctl %r1, %cr18 /* IIAOQ head */ 163 mtctl %r1, %cr18 /* IIAOQ head */
165 ldo 4(%r1), %r1 164 ldo 4(%r1), %r1
166 mtctl %r1, %cr18 /* IIAOQ tail */ 165 mtctl %r1, %cr18 /* IIAOQ tail */
167 load32 REAL_MODE_PSW, %r1 166 load32 REAL_MODE_PSW, %r1
168 mtctl %r1, %cr22 167 mtctl %r1, %cr22
169 rfi 168 rfi
170 169
171 nop 170 nop
172 nop 171 nop
173 nop 172 nop
174 nop 173 nop
175 nop 174 nop
176 nop 175 nop
177 nop 176 nop
178 nop 177 nop
179 rfi_v2r_1: 178 rfi_v2r_1:
180 tophys_r1 %r2 179 tophys_r1 %r2
181 bv 0(%r2) 180 bv 0(%r2)
182 nop 181 nop
183 182
184 __HEAD 183 .text
185 .align 128 184 .align 128
186 rfi_real2virt: 185 rfi_real2virt:
187 rsm PSW_SM_I,%r0 186 rsm PSW_SM_I,%r0
188 load32 (rfi_r2v_1), %r1 187 load32 (rfi_r2v_1), %r1
189 nop 188 nop
190 nop 189 nop
191 nop 190 nop
192 nop 191 nop
193 nop 192 nop
194 193
195 rsm PSW_SM_Q,%r0 /* disable Q bit to load iia queue */ 194 rsm PSW_SM_Q,%r0 /* disable Q bit to load iia queue */
196 mtctl %r0, %cr17 /* Clear IIASQ tail */ 195 mtctl %r0, %cr17 /* Clear IIASQ tail */
197 mtctl %r0, %cr17 /* Clear IIASQ head */ 196 mtctl %r0, %cr17 /* Clear IIASQ head */
198 mtctl %r1, %cr18 /* IIAOQ head */ 197 mtctl %r1, %cr18 /* IIAOQ head */
199 ldo 4(%r1), %r1 198 ldo 4(%r1), %r1
200 mtctl %r1, %cr18 /* IIAOQ tail */ 199 mtctl %r1, %cr18 /* IIAOQ tail */
201 load32 KERNEL_PSW, %r1 200 load32 KERNEL_PSW, %r1
202 mtctl %r1, %cr22 201 mtctl %r1, %cr22
203 rfi 202 rfi
204 203
205 nop 204 nop
206 nop 205 nop
207 nop 206 nop
208 nop 207 nop
209 nop 208 nop
210 nop 209 nop
211 nop 210 nop
212 nop 211 nop
213 rfi_r2v_1: 212 rfi_r2v_1:
214 tovirt_r1 %r2 213 tovirt_r1 %r2
215 bv 0(%r2) 214 bv 0(%r2)
216 nop 215 nop
217 216
218 #ifdef CONFIG_64BIT 217 #ifdef CONFIG_64BIT
219 218
220 /************************ 64-bit real-mode calls ***********************/ 219 /************************ 64-bit real-mode calls ***********************/
221 /* This is only usable in wide kernels right now and will probably stay so */ 220 /* This is only usable in wide kernels right now and will probably stay so */
222 __HEAD 221 .text
223 /* unsigned long real64_call_asm(unsigned long *sp, 222 /* unsigned long real64_call_asm(unsigned long *sp,
224 * unsigned long *arg0p, 223 * unsigned long *arg0p,
225 * unsigned long fn) 224 * unsigned long fn)
226 * sp is value of stack pointer to adopt before calling PDC (virt) 225 * sp is value of stack pointer to adopt before calling PDC (virt)
227 * arg0p points to where saved arg values may be found 226 * arg0p points to where saved arg values may be found
228 * iodc_fn is the IODC function to call 227 * iodc_fn is the IODC function to call
229 */ 228 */
230 ENTRY(real64_call_asm) 229 ENTRY(real64_call_asm)
231 std %rp, -0x10(%sp) /* save RP */ 230 std %rp, -0x10(%sp) /* save RP */
232 std %sp, -8(%arg0) /* save SP on real-mode stack */ 231 std %sp, -8(%arg0) /* save SP on real-mode stack */
233 copy %arg0, %sp /* adopt the real-mode SP */ 232 copy %arg0, %sp /* adopt the real-mode SP */
234 233
235 /* save fn */ 234 /* save fn */
236 copy %arg2, %r31 235 copy %arg2, %r31
237 236
238 /* set up the new ap */ 237 /* set up the new ap */
239 ldo 64(%arg1), %r29 238 ldo 64(%arg1), %r29
240 239
241 /* load up the arg registers from the saved arg area */ 240 /* load up the arg registers from the saved arg area */
242 /* 32-bit calling convention passes first 4 args in registers */ 241 /* 32-bit calling convention passes first 4 args in registers */
243 ldd 0*REG_SZ(%arg1), %arg0 /* note overwriting arg0 */ 242 ldd 0*REG_SZ(%arg1), %arg0 /* note overwriting arg0 */
244 ldd 2*REG_SZ(%arg1), %arg2 243 ldd 2*REG_SZ(%arg1), %arg2
245 ldd 3*REG_SZ(%arg1), %arg3 244 ldd 3*REG_SZ(%arg1), %arg3
246 ldd 4*REG_SZ(%arg1), %r22 245 ldd 4*REG_SZ(%arg1), %r22
247 ldd 5*REG_SZ(%arg1), %r21 246 ldd 5*REG_SZ(%arg1), %r21
248 ldd 6*REG_SZ(%arg1), %r20 247 ldd 6*REG_SZ(%arg1), %r20
249 ldd 7*REG_SZ(%arg1), %r19 248 ldd 7*REG_SZ(%arg1), %r19
250 ldd 1*REG_SZ(%arg1), %arg1 /* do this one last! */ 249 ldd 1*REG_SZ(%arg1), %arg1 /* do this one last! */
251 250
252 tophys_r1 %sp 251 tophys_r1 %sp
253 252
254 b,l rfi_virt2real,%r2 253 b,l rfi_virt2real,%r2
255 nop 254 nop
256 255
257 b,l save_control_regs,%r2 /* modifies r1, r2, r28 */ 256 b,l save_control_regs,%r2 /* modifies r1, r2, r28 */
258 nop 257 nop
259 258
260 load32 PA(r64_ret), %r2 259 load32 PA(r64_ret), %r2
261 bv 0(%r31) 260 bv 0(%r31)
262 nop 261 nop
263 r64_ret: 262 r64_ret:
264 /* restore CRs before going virtual in case we page fault */ 263 /* restore CRs before going virtual in case we page fault */
265 b,l restore_control_regs, %r2 /* modifies r1, r2, r26 */ 264 b,l restore_control_regs, %r2 /* modifies r1, r2, r26 */
266 nop 265 nop
267 266
268 b,l rfi_real2virt,%r2 267 b,l rfi_real2virt,%r2
269 nop 268 nop
270 269
271 tovirt_r1 %sp 270 tovirt_r1 %sp
272 ldd -8(%sp), %sp /* restore SP */ 271 ldd -8(%sp), %sp /* restore SP */
273 ldd -0x10(%sp), %rp /* restore RP */ 272 ldd -0x10(%sp), %rp /* restore RP */
274 bv 0(%rp) 273 bv 0(%rp)
275 nop 274 nop
276 ENDPROC(real64_call_asm) 275 ENDPROC(real64_call_asm)
277 276
278 #endif 277 #endif
279 278
280 __HEAD 279 .text
281 /* http://lists.parisc-linux.org/hypermail/parisc-linux/10916.html 280 /* http://lists.parisc-linux.org/hypermail/parisc-linux/10916.html
282 ** GCC 3.3 and later has a new function in libgcc.a for 281 ** GCC 3.3 and later has a new function in libgcc.a for
283 ** comparing function pointers. 282 ** comparing function pointers.
284 */ 283 */
285 ENTRY(__canonicalize_funcptr_for_compare) 284 ENTRY(__canonicalize_funcptr_for_compare)
286 #ifdef CONFIG_64BIT 285 #ifdef CONFIG_64BIT
287 bve (%r2) 286 bve (%r2)
288 #else 287 #else
289 bv %r0(%r2) 288 bv %r0(%r2)
290 #endif 289 #endif
291 copy %r26,%r28 290 copy %r26,%r28
292 ENDPROC(__canonicalize_funcptr_for_compare) 291 ENDPROC(__canonicalize_funcptr_for_compare)
293 292
294 293
arch/parisc/kernel/syscall.S
1 /* 1 /*
2 * Linux/PA-RISC Project (http://www.parisc-linux.org/) 2 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
3 * 3 *
4 * System call entry code Copyright (c) Matthew Wilcox 1999 <willy@bofh.ai> 4 * System call entry code Copyright (c) Matthew Wilcox 1999 <willy@bofh.ai>
5 * Licensed under the GNU GPL. 5 * Licensed under the GNU GPL.
6 * thanks to Philipp Rumpf, Mike Shaver and various others 6 * thanks to Philipp Rumpf, Mike Shaver and various others
7 * sorry about the wall, puffin.. 7 * sorry about the wall, puffin..
8 */ 8 */
9 9
10 #include <asm/asm-offsets.h> 10 #include <asm/asm-offsets.h>
11 #include <asm/unistd.h> 11 #include <asm/unistd.h>
12 #include <asm/errno.h> 12 #include <asm/errno.h>
13 #include <asm/page.h> 13 #include <asm/page.h>
14 #include <asm/psw.h> 14 #include <asm/psw.h>
15 #include <asm/thread_info.h> 15 #include <asm/thread_info.h>
16 #include <asm/assembly.h> 16 #include <asm/assembly.h>
17 #include <asm/processor.h> 17 #include <asm/processor.h>
18 18
19 #include <linux/linkage.h> 19 #include <linux/linkage.h>
20 #include <linux/init.h>
21 20
22 /* We fill the empty parts of the gateway page with 21 /* We fill the empty parts of the gateway page with
23 * something that will kill the kernel or a 22 * something that will kill the kernel or a
24 * userspace application. 23 * userspace application.
25 */ 24 */
26 #define KILL_INSN break 0,0 25 #define KILL_INSN break 0,0
27 26
28 .level LEVEL 27 .level LEVEL
29 28
30 __HEAD 29 .text
31 30
32 .import syscall_exit,code 31 .import syscall_exit,code
33 .import syscall_exit_rfi,code 32 .import syscall_exit_rfi,code
34 33
35 /* Linux gateway page is aliased to virtual page 0 in the kernel 34 /* Linux gateway page is aliased to virtual page 0 in the kernel
36 * address space. Since it is a gateway page it cannot be 35 * address space. Since it is a gateway page it cannot be
37 * dereferenced, so null pointers will still fault. We start 36 * dereferenced, so null pointers will still fault. We start
38 * the actual entry point at 0x100. We put break instructions 37 * the actual entry point at 0x100. We put break instructions
39 * at the beginning of the page to trap null indirect function 38 * at the beginning of the page to trap null indirect function
40 * pointers. 39 * pointers.
41 */ 40 */
42 41
43 .align PAGE_SIZE 42 .align PAGE_SIZE
44 ENTRY(linux_gateway_page) 43 ENTRY(linux_gateway_page)
45 44
46 /* ADDRESS 0x00 to 0xb0 = 176 bytes / 4 bytes per insn = 44 insns */ 45 /* ADDRESS 0x00 to 0xb0 = 176 bytes / 4 bytes per insn = 44 insns */
47 .rept 44 46 .rept 44
48 KILL_INSN 47 KILL_INSN
49 .endr 48 .endr
50 49
51 /* ADDRESS 0xb0 to 0xb4, lws uses 1 insns for entry */ 50 /* ADDRESS 0xb0 to 0xb4, lws uses 1 insns for entry */
52 /* Light-weight-syscall entry must always be located at 0xb0 */ 51 /* Light-weight-syscall entry must always be located at 0xb0 */
53 /* WARNING: Keep this number updated with table size changes */ 52 /* WARNING: Keep this number updated with table size changes */
54 #define __NR_lws_entries (2) 53 #define __NR_lws_entries (2)
55 54
56 lws_entry: 55 lws_entry:
57 /* Unconditional branch to lws_start, located on the 56 /* Unconditional branch to lws_start, located on the
58 same gateway page */ 57 same gateway page */
59 b,n lws_start 58 b,n lws_start
60 59
61 /* Fill from 0xb4 to 0xe0 */ 60 /* Fill from 0xb4 to 0xe0 */
62 .rept 11 61 .rept 11
63 KILL_INSN 62 KILL_INSN
64 .endr 63 .endr
65 64
66 /* This function MUST be located at 0xe0 for glibc's threading 65 /* This function MUST be located at 0xe0 for glibc's threading
67 mechanism to work. DO NOT MOVE THIS CODE EVER! */ 66 mechanism to work. DO NOT MOVE THIS CODE EVER! */
68 set_thread_pointer: 67 set_thread_pointer:
69 gate .+8, %r0 /* increase privilege */ 68 gate .+8, %r0 /* increase privilege */
70 depi 3, 31, 2, %r31 /* Ensure we return into user mode. */ 69 depi 3, 31, 2, %r31 /* Ensure we return into user mode. */
71 be 0(%sr7,%r31) /* return to user space */ 70 be 0(%sr7,%r31) /* return to user space */
72 mtctl %r26, %cr27 /* move arg0 to the control register */ 71 mtctl %r26, %cr27 /* move arg0 to the control register */
73 72
74 /* Increase the chance of trapping if random jumps occur to this 73 /* Increase the chance of trapping if random jumps occur to this
75 address, fill from 0xf0 to 0x100 */ 74 address, fill from 0xf0 to 0x100 */
76 .rept 4 75 .rept 4
77 KILL_INSN 76 KILL_INSN
78 .endr 77 .endr
79 78
80 /* This address must remain fixed at 0x100 for glibc's syscalls to work */ 79 /* This address must remain fixed at 0x100 for glibc's syscalls to work */
81 .align 256 80 .align 256
82 linux_gateway_entry: 81 linux_gateway_entry:
83 gate .+8, %r0 /* become privileged */ 82 gate .+8, %r0 /* become privileged */
84 mtsp %r0,%sr4 /* get kernel space into sr4 */ 83 mtsp %r0,%sr4 /* get kernel space into sr4 */
85 mtsp %r0,%sr5 /* get kernel space into sr5 */ 84 mtsp %r0,%sr5 /* get kernel space into sr5 */
86 mtsp %r0,%sr6 /* get kernel space into sr6 */ 85 mtsp %r0,%sr6 /* get kernel space into sr6 */
87 mfsp %sr7,%r1 /* save user sr7 */ 86 mfsp %sr7,%r1 /* save user sr7 */
88 mtsp %r1,%sr3 /* and store it in sr3 */ 87 mtsp %r1,%sr3 /* and store it in sr3 */
89 88
90 #ifdef CONFIG_64BIT 89 #ifdef CONFIG_64BIT
91 /* for now we can *always* set the W bit on entry to the syscall 90 /* for now we can *always* set the W bit on entry to the syscall
92 * since we don't support wide userland processes. We could 91 * since we don't support wide userland processes. We could
93 * also save the current SM other than in r0 and restore it on 92 * also save the current SM other than in r0 and restore it on
94 * exit from the syscall, and also use that value to know 93 * exit from the syscall, and also use that value to know
95 * whether to do narrow or wide syscalls. -PB 94 * whether to do narrow or wide syscalls. -PB
96 */ 95 */
97 ssm PSW_SM_W, %r1 96 ssm PSW_SM_W, %r1
98 extrd,u %r1,PSW_W_BIT,1,%r1 97 extrd,u %r1,PSW_W_BIT,1,%r1
99 /* sp must be aligned on 4, so deposit the W bit setting into 98 /* sp must be aligned on 4, so deposit the W bit setting into
100 * the bottom of sp temporarily */ 99 * the bottom of sp temporarily */
101 or,ev %r1,%r30,%r30 100 or,ev %r1,%r30,%r30
102 b,n 1f 101 b,n 1f
103 /* The top halves of argument registers must be cleared on syscall 102 /* The top halves of argument registers must be cleared on syscall
104 * entry from narrow executable. 103 * entry from narrow executable.
105 */ 104 */
106 depdi 0, 31, 32, %r26 105 depdi 0, 31, 32, %r26
107 depdi 0, 31, 32, %r25 106 depdi 0, 31, 32, %r25
108 depdi 0, 31, 32, %r24 107 depdi 0, 31, 32, %r24
109 depdi 0, 31, 32, %r23 108 depdi 0, 31, 32, %r23
110 depdi 0, 31, 32, %r22 109 depdi 0, 31, 32, %r22
111 depdi 0, 31, 32, %r21 110 depdi 0, 31, 32, %r21
112 1: 111 1:
113 #endif 112 #endif
114 mfctl %cr30,%r1 113 mfctl %cr30,%r1
115 xor %r1,%r30,%r30 /* ye olde xor trick */ 114 xor %r1,%r30,%r30 /* ye olde xor trick */
116 xor %r1,%r30,%r1 115 xor %r1,%r30,%r1
117 xor %r1,%r30,%r30 116 xor %r1,%r30,%r30
118 117
119 ldo THREAD_SZ_ALGN+FRAME_SIZE(%r30),%r30 /* set up kernel stack */ 118 ldo THREAD_SZ_ALGN+FRAME_SIZE(%r30),%r30 /* set up kernel stack */
120 119
121 /* N.B.: It is critical that we don't set sr7 to 0 until r30 120 /* N.B.: It is critical that we don't set sr7 to 0 until r30
122 * contains a valid kernel stack pointer. It is also 121 * contains a valid kernel stack pointer. It is also
123 * critical that we don't start using the kernel stack 122 * critical that we don't start using the kernel stack
124 * until after sr7 has been set to 0. 123 * until after sr7 has been set to 0.
125 */ 124 */
126 125
127 mtsp %r0,%sr7 /* get kernel space into sr7 */ 126 mtsp %r0,%sr7 /* get kernel space into sr7 */
128 STREGM %r1,FRAME_SIZE(%r30) /* save r1 (usp) here for now */ 127 STREGM %r1,FRAME_SIZE(%r30) /* save r1 (usp) here for now */
129 mfctl %cr30,%r1 /* get task ptr in %r1 */ 128 mfctl %cr30,%r1 /* get task ptr in %r1 */
130 LDREG TI_TASK(%r1),%r1 129 LDREG TI_TASK(%r1),%r1
131 130
132 /* Save some registers for sigcontext and potential task 131 /* Save some registers for sigcontext and potential task
133 switch (see entry.S for the details of which ones are 132 switch (see entry.S for the details of which ones are
134 saved/restored). TASK_PT_PSW is zeroed so we can see whether 133 saved/restored). TASK_PT_PSW is zeroed so we can see whether
135 a process is on a syscall or not. For an interrupt the real 134 a process is on a syscall or not. For an interrupt the real
136 PSW value is stored. This is needed for gdb and sys_ptrace. */ 135 PSW value is stored. This is needed for gdb and sys_ptrace. */
137 STREG %r0, TASK_PT_PSW(%r1) 136 STREG %r0, TASK_PT_PSW(%r1)
138 STREG %r2, TASK_PT_GR2(%r1) /* preserve rp */ 137 STREG %r2, TASK_PT_GR2(%r1) /* preserve rp */
139 STREG %r19, TASK_PT_GR19(%r1) 138 STREG %r19, TASK_PT_GR19(%r1)
140 139
141 LDREGM -FRAME_SIZE(%r30), %r2 /* get users sp back */ 140 LDREGM -FRAME_SIZE(%r30), %r2 /* get users sp back */
142 #ifdef CONFIG_64BIT 141 #ifdef CONFIG_64BIT
143 extrd,u %r2,63,1,%r19 /* W hidden in bottom bit */ 142 extrd,u %r2,63,1,%r19 /* W hidden in bottom bit */
144 #if 0 143 #if 0
145 xor %r19,%r2,%r2 /* clear bottom bit */ 144 xor %r19,%r2,%r2 /* clear bottom bit */
146 depd,z %r19,1,1,%r19 145 depd,z %r19,1,1,%r19
147 std %r19,TASK_PT_PSW(%r1) 146 std %r19,TASK_PT_PSW(%r1)
148 #endif 147 #endif
149 #endif 148 #endif
150 STREG %r2, TASK_PT_GR30(%r1) /* ... and save it */ 149 STREG %r2, TASK_PT_GR30(%r1) /* ... and save it */
151 150
152 STREG %r20, TASK_PT_GR20(%r1) /* Syscall number */ 151 STREG %r20, TASK_PT_GR20(%r1) /* Syscall number */
153 STREG %r21, TASK_PT_GR21(%r1) 152 STREG %r21, TASK_PT_GR21(%r1)
154 STREG %r22, TASK_PT_GR22(%r1) 153 STREG %r22, TASK_PT_GR22(%r1)
155 STREG %r23, TASK_PT_GR23(%r1) /* 4th argument */ 154 STREG %r23, TASK_PT_GR23(%r1) /* 4th argument */
156 STREG %r24, TASK_PT_GR24(%r1) /* 3rd argument */ 155 STREG %r24, TASK_PT_GR24(%r1) /* 3rd argument */
157 STREG %r25, TASK_PT_GR25(%r1) /* 2nd argument */ 156 STREG %r25, TASK_PT_GR25(%r1) /* 2nd argument */
158 STREG %r26, TASK_PT_GR26(%r1) /* 1st argument */ 157 STREG %r26, TASK_PT_GR26(%r1) /* 1st argument */
159 STREG %r27, TASK_PT_GR27(%r1) /* user dp */ 158 STREG %r27, TASK_PT_GR27(%r1) /* user dp */
160 STREG %r28, TASK_PT_GR28(%r1) /* return value 0 */ 159 STREG %r28, TASK_PT_GR28(%r1) /* return value 0 */
161 STREG %r28, TASK_PT_ORIG_R28(%r1) /* return value 0 (saved for signals) */ 160 STREG %r28, TASK_PT_ORIG_R28(%r1) /* return value 0 (saved for signals) */
162 STREG %r29, TASK_PT_GR29(%r1) /* return value 1 */ 161 STREG %r29, TASK_PT_GR29(%r1) /* return value 1 */
163 STREG %r31, TASK_PT_GR31(%r1) /* preserve syscall return ptr */ 162 STREG %r31, TASK_PT_GR31(%r1) /* preserve syscall return ptr */
164 163
165 ldo TASK_PT_FR0(%r1), %r27 /* save fpregs from the kernel */ 164 ldo TASK_PT_FR0(%r1), %r27 /* save fpregs from the kernel */
166 save_fp %r27 /* or potential task switch */ 165 save_fp %r27 /* or potential task switch */
167 166
168 mfctl %cr11, %r27 /* i.e. SAR */ 167 mfctl %cr11, %r27 /* i.e. SAR */
169 STREG %r27, TASK_PT_SAR(%r1) 168 STREG %r27, TASK_PT_SAR(%r1)
170 169
171 loadgp 170 loadgp
172 171
173 #ifdef CONFIG_64BIT 172 #ifdef CONFIG_64BIT
174 ldo -16(%r30),%r29 /* Reference param save area */ 173 ldo -16(%r30),%r29 /* Reference param save area */
175 copy %r19,%r2 /* W bit back to r2 */ 174 copy %r19,%r2 /* W bit back to r2 */
176 #else 175 #else
177 /* no need to save these on stack in wide mode because the first 8 176 /* no need to save these on stack in wide mode because the first 8
178 * args are passed in registers */ 177 * args are passed in registers */
179 stw %r22, -52(%r30) /* 5th argument */ 178 stw %r22, -52(%r30) /* 5th argument */
180 stw %r21, -56(%r30) /* 6th argument */ 179 stw %r21, -56(%r30) /* 6th argument */
181 #endif 180 #endif
182 181
183 /* Are we being ptraced? */ 182 /* Are we being ptraced? */
184 mfctl %cr30, %r1 183 mfctl %cr30, %r1
185 LDREG TI_TASK(%r1),%r1 184 LDREG TI_TASK(%r1),%r1
186 ldw TASK_PTRACE(%r1), %r1 185 ldw TASK_PTRACE(%r1), %r1
187 bb,<,n %r1,31,.Ltracesys 186 bb,<,n %r1,31,.Ltracesys
188 187
189 /* Note! We cannot use the syscall table that is mapped 188 /* Note! We cannot use the syscall table that is mapped
190 nearby since the gateway page is mapped execute-only. */ 189 nearby since the gateway page is mapped execute-only. */
191 190
192 #ifdef CONFIG_64BIT 191 #ifdef CONFIG_64BIT
193 ldil L%sys_call_table, %r1 192 ldil L%sys_call_table, %r1
194 or,= %r2,%r2,%r2 193 or,= %r2,%r2,%r2
195 addil L%(sys_call_table64-sys_call_table), %r1 194 addil L%(sys_call_table64-sys_call_table), %r1
196 ldo R%sys_call_table(%r1), %r19 195 ldo R%sys_call_table(%r1), %r19
197 or,= %r2,%r2,%r2 196 or,= %r2,%r2,%r2
198 ldo R%sys_call_table64(%r1), %r19 197 ldo R%sys_call_table64(%r1), %r19
199 #else 198 #else
200 ldil L%sys_call_table, %r1 199 ldil L%sys_call_table, %r1
201 ldo R%sys_call_table(%r1), %r19 200 ldo R%sys_call_table(%r1), %r19
202 #endif 201 #endif
203 comiclr,>> __NR_Linux_syscalls, %r20, %r0 202 comiclr,>> __NR_Linux_syscalls, %r20, %r0
204 b,n .Lsyscall_nosys 203 b,n .Lsyscall_nosys
205 204
206 LDREGX %r20(%r19), %r19 205 LDREGX %r20(%r19), %r19
207 206
208 /* If this is a sys_rt_sigreturn call, and the signal was received 207 /* If this is a sys_rt_sigreturn call, and the signal was received
209 * when not in_syscall, then we want to return via syscall_exit_rfi, 208 * when not in_syscall, then we want to return via syscall_exit_rfi,
210 * not syscall_exit. Signal no. in r20, in_syscall in r25 (see 209 * not syscall_exit. Signal no. in r20, in_syscall in r25 (see
211 * trampoline code in signal.c). 210 * trampoline code in signal.c).
212 */ 211 */
213 ldi __NR_rt_sigreturn,%r2 212 ldi __NR_rt_sigreturn,%r2
214 comb,= %r2,%r20,.Lrt_sigreturn 213 comb,= %r2,%r20,.Lrt_sigreturn
215 .Lin_syscall: 214 .Lin_syscall:
216 ldil L%syscall_exit,%r2 215 ldil L%syscall_exit,%r2
217 be 0(%sr7,%r19) 216 be 0(%sr7,%r19)
218 ldo R%syscall_exit(%r2),%r2 217 ldo R%syscall_exit(%r2),%r2
219 .Lrt_sigreturn: 218 .Lrt_sigreturn:
220 comib,<> 0,%r25,.Lin_syscall 219 comib,<> 0,%r25,.Lin_syscall
221 ldil L%syscall_exit_rfi,%r2 220 ldil L%syscall_exit_rfi,%r2
222 be 0(%sr7,%r19) 221 be 0(%sr7,%r19)
223 ldo R%syscall_exit_rfi(%r2),%r2 222 ldo R%syscall_exit_rfi(%r2),%r2
224 223
225 /* Note! Because we are not running where we were linked, any 224 /* Note! Because we are not running where we were linked, any
226 calls to functions external to this file must be indirect. To 225 calls to functions external to this file must be indirect. To
227 be safe, we apply the opposite rule to functions within this 226 be safe, we apply the opposite rule to functions within this
228 file, with local labels given to them to ensure correctness. */ 227 file, with local labels given to them to ensure correctness. */
229 228
230 .Lsyscall_nosys: 229 .Lsyscall_nosys:
231 syscall_nosys: 230 syscall_nosys:
232 ldil L%syscall_exit,%r1 231 ldil L%syscall_exit,%r1
233 be R%syscall_exit(%sr7,%r1) 232 be R%syscall_exit(%sr7,%r1)
234 ldo -ENOSYS(%r0),%r28 /* set errno */ 233 ldo -ENOSYS(%r0),%r28 /* set errno */
235 234
236 235
237 /* Warning! This trace code is a virtual duplicate of the code above so be 236 /* Warning! This trace code is a virtual duplicate of the code above so be
238 * sure to maintain both! */ 237 * sure to maintain both! */
239 .Ltracesys: 238 .Ltracesys:
240 tracesys: 239 tracesys:
241 /* Need to save more registers so the debugger can see where we 240 /* Need to save more registers so the debugger can see where we
242 * are. This saves only the lower 8 bits of PSW, so that the C 241 * are. This saves only the lower 8 bits of PSW, so that the C
243 * bit is still clear on syscalls, and the D bit is set if this 242 * bit is still clear on syscalls, and the D bit is set if this
244 * full register save path has been executed. We check the D 243 * full register save path has been executed. We check the D
245 * bit on syscall_return_rfi to determine which registers to 244 * bit on syscall_return_rfi to determine which registers to
246 * restore. An interrupt results in a full PSW saved with the 245 * restore. An interrupt results in a full PSW saved with the
247 * C bit set, a non-straced syscall entry results in C and D clear 246 * C bit set, a non-straced syscall entry results in C and D clear
248 * in the saved PSW. 247 * in the saved PSW.
249 */ 248 */
250 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ 249 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
251 LDREG TI_TASK(%r1), %r1 250 LDREG TI_TASK(%r1), %r1
252 ssm 0,%r2 251 ssm 0,%r2
253 STREG %r2,TASK_PT_PSW(%r1) /* Lower 8 bits only!! */ 252 STREG %r2,TASK_PT_PSW(%r1) /* Lower 8 bits only!! */
254 mfsp %sr0,%r2 253 mfsp %sr0,%r2
255 STREG %r2,TASK_PT_SR0(%r1) 254 STREG %r2,TASK_PT_SR0(%r1)
256 mfsp %sr1,%r2 255 mfsp %sr1,%r2
257 STREG %r2,TASK_PT_SR1(%r1) 256 STREG %r2,TASK_PT_SR1(%r1)
258 mfsp %sr2,%r2 257 mfsp %sr2,%r2
259 STREG %r2,TASK_PT_SR2(%r1) 258 STREG %r2,TASK_PT_SR2(%r1)
260 mfsp %sr3,%r2 259 mfsp %sr3,%r2
261 STREG %r2,TASK_PT_SR3(%r1) 260 STREG %r2,TASK_PT_SR3(%r1)
262 STREG %r2,TASK_PT_SR4(%r1) 261 STREG %r2,TASK_PT_SR4(%r1)
263 STREG %r2,TASK_PT_SR5(%r1) 262 STREG %r2,TASK_PT_SR5(%r1)
264 STREG %r2,TASK_PT_SR6(%r1) 263 STREG %r2,TASK_PT_SR6(%r1)
265 STREG %r2,TASK_PT_SR7(%r1) 264 STREG %r2,TASK_PT_SR7(%r1)
266 STREG %r2,TASK_PT_IASQ0(%r1) 265 STREG %r2,TASK_PT_IASQ0(%r1)
267 STREG %r2,TASK_PT_IASQ1(%r1) 266 STREG %r2,TASK_PT_IASQ1(%r1)
268 LDREG TASK_PT_GR31(%r1),%r2 267 LDREG TASK_PT_GR31(%r1),%r2
269 STREG %r2,TASK_PT_IAOQ0(%r1) 268 STREG %r2,TASK_PT_IAOQ0(%r1)
270 ldo 4(%r2),%r2 269 ldo 4(%r2),%r2
271 STREG %r2,TASK_PT_IAOQ1(%r1) 270 STREG %r2,TASK_PT_IAOQ1(%r1)
272 ldo TASK_REGS(%r1),%r2 271 ldo TASK_REGS(%r1),%r2
273 /* reg_save %r2 */ 272 /* reg_save %r2 */
274 STREG %r3,PT_GR3(%r2) 273 STREG %r3,PT_GR3(%r2)
275 STREG %r4,PT_GR4(%r2) 274 STREG %r4,PT_GR4(%r2)
276 STREG %r5,PT_GR5(%r2) 275 STREG %r5,PT_GR5(%r2)
277 STREG %r6,PT_GR6(%r2) 276 STREG %r6,PT_GR6(%r2)
278 STREG %r7,PT_GR7(%r2) 277 STREG %r7,PT_GR7(%r2)
279 STREG %r8,PT_GR8(%r2) 278 STREG %r8,PT_GR8(%r2)
280 STREG %r9,PT_GR9(%r2) 279 STREG %r9,PT_GR9(%r2)
281 STREG %r10,PT_GR10(%r2) 280 STREG %r10,PT_GR10(%r2)
282 STREG %r11,PT_GR11(%r2) 281 STREG %r11,PT_GR11(%r2)
283 STREG %r12,PT_GR12(%r2) 282 STREG %r12,PT_GR12(%r2)
284 STREG %r13,PT_GR13(%r2) 283 STREG %r13,PT_GR13(%r2)
285 STREG %r14,PT_GR14(%r2) 284 STREG %r14,PT_GR14(%r2)
286 STREG %r15,PT_GR15(%r2) 285 STREG %r15,PT_GR15(%r2)
287 STREG %r16,PT_GR16(%r2) 286 STREG %r16,PT_GR16(%r2)
288 STREG %r17,PT_GR17(%r2) 287 STREG %r17,PT_GR17(%r2)
289 STREG %r18,PT_GR18(%r2) 288 STREG %r18,PT_GR18(%r2)
290 /* Finished saving things for the debugger */ 289 /* Finished saving things for the debugger */
291 290
292 ldil L%syscall_trace,%r1 291 ldil L%syscall_trace,%r1
293 ldil L%tracesys_next,%r2 292 ldil L%tracesys_next,%r2
294 be R%syscall_trace(%sr7,%r1) 293 be R%syscall_trace(%sr7,%r1)
295 ldo R%tracesys_next(%r2),%r2 294 ldo R%tracesys_next(%r2),%r2
296 295
297 tracesys_next: 296 tracesys_next:
298 ldil L%sys_call_table,%r1 297 ldil L%sys_call_table,%r1
299 ldo R%sys_call_table(%r1), %r19 298 ldo R%sys_call_table(%r1), %r19
300 299
301 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ 300 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
302 LDREG TI_TASK(%r1), %r1 301 LDREG TI_TASK(%r1), %r1
303 LDREG TASK_PT_GR20(%r1), %r20 302 LDREG TASK_PT_GR20(%r1), %r20
304 LDREG TASK_PT_GR26(%r1), %r26 /* Restore the users args */ 303 LDREG TASK_PT_GR26(%r1), %r26 /* Restore the users args */
305 LDREG TASK_PT_GR25(%r1), %r25 304 LDREG TASK_PT_GR25(%r1), %r25
306 LDREG TASK_PT_GR24(%r1), %r24 305 LDREG TASK_PT_GR24(%r1), %r24
307 LDREG TASK_PT_GR23(%r1), %r23 306 LDREG TASK_PT_GR23(%r1), %r23
308 #ifdef CONFIG_64BIT 307 #ifdef CONFIG_64BIT
309 LDREG TASK_PT_GR22(%r1), %r22 308 LDREG TASK_PT_GR22(%r1), %r22
310 LDREG TASK_PT_GR21(%r1), %r21 309 LDREG TASK_PT_GR21(%r1), %r21
311 ldo -16(%r30),%r29 /* Reference param save area */ 310 ldo -16(%r30),%r29 /* Reference param save area */
312 #endif 311 #endif
313 312
314 comiclr,>>= __NR_Linux_syscalls, %r20, %r0 313 comiclr,>>= __NR_Linux_syscalls, %r20, %r0
315 b,n .Lsyscall_nosys 314 b,n .Lsyscall_nosys
316 315
317 LDREGX %r20(%r19), %r19 316 LDREGX %r20(%r19), %r19
318 317
319 /* If this is a sys_rt_sigreturn call, and the signal was received 318 /* If this is a sys_rt_sigreturn call, and the signal was received
320 * when not in_syscall, then we want to return via syscall_exit_rfi, 319 * when not in_syscall, then we want to return via syscall_exit_rfi,
321 * not syscall_exit. Signal no. in r20, in_syscall in r25 (see 320 * not syscall_exit. Signal no. in r20, in_syscall in r25 (see
322 * trampoline code in signal.c). 321 * trampoline code in signal.c).
323 */ 322 */
324 ldi __NR_rt_sigreturn,%r2 323 ldi __NR_rt_sigreturn,%r2
325 comb,= %r2,%r20,.Ltrace_rt_sigreturn 324 comb,= %r2,%r20,.Ltrace_rt_sigreturn
326 .Ltrace_in_syscall: 325 .Ltrace_in_syscall:
327 ldil L%tracesys_exit,%r2 326 ldil L%tracesys_exit,%r2
328 be 0(%sr7,%r19) 327 be 0(%sr7,%r19)
329 ldo R%tracesys_exit(%r2),%r2 328 ldo R%tracesys_exit(%r2),%r2
330 329
331 /* Do *not* call this function on the gateway page, because it 330 /* Do *not* call this function on the gateway page, because it
332 makes a direct call to syscall_trace. */ 331 makes a direct call to syscall_trace. */
333 332
334 tracesys_exit: 333 tracesys_exit:
335 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ 334 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
336 LDREG TI_TASK(%r1), %r1 335 LDREG TI_TASK(%r1), %r1
337 #ifdef CONFIG_64BIT 336 #ifdef CONFIG_64BIT
338 ldo -16(%r30),%r29 /* Reference param save area */ 337 ldo -16(%r30),%r29 /* Reference param save area */
339 #endif 338 #endif
340 bl syscall_trace, %r2 339 bl syscall_trace, %r2
341 STREG %r28,TASK_PT_GR28(%r1) /* save return value now */ 340 STREG %r28,TASK_PT_GR28(%r1) /* save return value now */
342 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ 341 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
343 LDREG TI_TASK(%r1), %r1 342 LDREG TI_TASK(%r1), %r1
344 LDREG TASK_PT_GR28(%r1), %r28 /* Restore return val. */ 343 LDREG TASK_PT_GR28(%r1), %r28 /* Restore return val. */
345 344
346 ldil L%syscall_exit,%r1 345 ldil L%syscall_exit,%r1
347 be,n R%syscall_exit(%sr7,%r1) 346 be,n R%syscall_exit(%sr7,%r1)
348 347
349 .Ltrace_rt_sigreturn: 348 .Ltrace_rt_sigreturn:
350 comib,<> 0,%r25,.Ltrace_in_syscall 349 comib,<> 0,%r25,.Ltrace_in_syscall
351 ldil L%tracesys_sigexit,%r2 350 ldil L%tracesys_sigexit,%r2
352 be 0(%sr7,%r19) 351 be 0(%sr7,%r19)
353 ldo R%tracesys_sigexit(%r2),%r2 352 ldo R%tracesys_sigexit(%r2),%r2
354 353
355 tracesys_sigexit: 354 tracesys_sigexit:
356 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ 355 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
357 LDREG 0(%r1), %r1 356 LDREG 0(%r1), %r1
358 #ifdef CONFIG_64BIT 357 #ifdef CONFIG_64BIT
359 ldo -16(%r30),%r29 /* Reference param save area */ 358 ldo -16(%r30),%r29 /* Reference param save area */
360 #endif 359 #endif
361 bl syscall_trace, %r2 360 bl syscall_trace, %r2
362 nop 361 nop
363 362
364 ldil L%syscall_exit_rfi,%r1 363 ldil L%syscall_exit_rfi,%r1
365 be,n R%syscall_exit_rfi(%sr7,%r1) 364 be,n R%syscall_exit_rfi(%sr7,%r1)
366 365
367 366
368 /********************************************************* 367 /*********************************************************
369 Light-weight-syscall code 368 Light-weight-syscall code
370 369
371 r20 - lws number 370 r20 - lws number
372 r26,r25,r24,r23,r22 - Input registers 371 r26,r25,r24,r23,r22 - Input registers
373 r28 - Function return register 372 r28 - Function return register
374 r21 - Error code. 373 r21 - Error code.
375 374
376 Scracth: Any of the above that aren't being 375 Scracth: Any of the above that aren't being
377 currently used, including r1. 376 currently used, including r1.
378 377
379 Return pointer: r31 (Not usable) 378 Return pointer: r31 (Not usable)
380 379
381 Error codes returned by entry path: 380 Error codes returned by entry path:
382 381
383 ENOSYS - r20 was an invalid LWS number. 382 ENOSYS - r20 was an invalid LWS number.
384 383
385 *********************************************************/ 384 *********************************************************/
386 lws_start: 385 lws_start:
387 /* Gate and ensure we return to userspace */ 386 /* Gate and ensure we return to userspace */
388 gate .+8, %r0 387 gate .+8, %r0
389 depi 3, 31, 2, %r31 /* Ensure we return to userspace */ 388 depi 3, 31, 2, %r31 /* Ensure we return to userspace */
390 389
391 #ifdef CONFIG_64BIT 390 #ifdef CONFIG_64BIT
392 /* FIXME: If we are a 64-bit kernel just 391 /* FIXME: If we are a 64-bit kernel just
393 * turn this on unconditionally. 392 * turn this on unconditionally.
394 */ 393 */
395 ssm PSW_SM_W, %r1 394 ssm PSW_SM_W, %r1
396 extrd,u %r1,PSW_W_BIT,1,%r1 395 extrd,u %r1,PSW_W_BIT,1,%r1
397 /* sp must be aligned on 4, so deposit the W bit setting into 396 /* sp must be aligned on 4, so deposit the W bit setting into
398 * the bottom of sp temporarily */ 397 * the bottom of sp temporarily */
399 or,ev %r1,%r30,%r30 398 or,ev %r1,%r30,%r30
400 399
401 /* Clip LWS number to a 32-bit value always */ 400 /* Clip LWS number to a 32-bit value always */
402 depdi 0, 31, 32, %r20 401 depdi 0, 31, 32, %r20
403 #endif 402 #endif
404 403
405 /* Is the lws entry number valid? */ 404 /* Is the lws entry number valid? */
406 comiclr,>>= __NR_lws_entries, %r20, %r0 405 comiclr,>>= __NR_lws_entries, %r20, %r0
407 b,n lws_exit_nosys 406 b,n lws_exit_nosys
408 407
409 /* WARNING: Trashing sr2 and sr3 */ 408 /* WARNING: Trashing sr2 and sr3 */
410 mfsp %sr7,%r1 /* get userspace into sr3 */ 409 mfsp %sr7,%r1 /* get userspace into sr3 */
411 mtsp %r1,%sr3 410 mtsp %r1,%sr3
412 mtsp %r0,%sr2 /* get kernel space into sr2 */ 411 mtsp %r0,%sr2 /* get kernel space into sr2 */
413 412
414 /* Load table start */ 413 /* Load table start */
415 ldil L%lws_table, %r1 414 ldil L%lws_table, %r1
416 ldo R%lws_table(%r1), %r28 /* Scratch use of r28 */ 415 ldo R%lws_table(%r1), %r28 /* Scratch use of r28 */
417 LDREGX %r20(%sr2,r28), %r21 /* Scratch use of r21 */ 416 LDREGX %r20(%sr2,r28), %r21 /* Scratch use of r21 */
418 417
419 /* Jump to lws, lws table pointers already relocated */ 418 /* Jump to lws, lws table pointers already relocated */
420 be,n 0(%sr2,%r21) 419 be,n 0(%sr2,%r21)
421 420
422 lws_exit_nosys: 421 lws_exit_nosys:
423 ldo -ENOSYS(%r0),%r21 /* set errno */ 422 ldo -ENOSYS(%r0),%r21 /* set errno */
424 /* Fall through: Return to userspace */ 423 /* Fall through: Return to userspace */
425 424
426 lws_exit: 425 lws_exit:
427 #ifdef CONFIG_64BIT 426 #ifdef CONFIG_64BIT
428 /* decide whether to reset the wide mode bit 427 /* decide whether to reset the wide mode bit
429 * 428 *
430 * For a syscall, the W bit is stored in the lowest bit 429 * For a syscall, the W bit is stored in the lowest bit
431 * of sp. Extract it and reset W if it is zero */ 430 * of sp. Extract it and reset W if it is zero */
432 extrd,u,*<> %r30,63,1,%r1 431 extrd,u,*<> %r30,63,1,%r1
433 rsm PSW_SM_W, %r0 432 rsm PSW_SM_W, %r0
434 /* now reset the lowest bit of sp if it was set */ 433 /* now reset the lowest bit of sp if it was set */
435 xor %r30,%r1,%r30 434 xor %r30,%r1,%r30
436 #endif 435 #endif
437 be,n 0(%sr3, %r31) 436 be,n 0(%sr3, %r31)
438 437
439 438
440 439
441 /*************************************************** 440 /***************************************************
442 Implementing CAS as an atomic operation: 441 Implementing CAS as an atomic operation:
443 442
444 %r26 - Address to examine 443 %r26 - Address to examine
445 %r25 - Old value to check (old) 444 %r25 - Old value to check (old)
446 %r24 - New value to set (new) 445 %r24 - New value to set (new)
447 %r28 - Return prev through this register. 446 %r28 - Return prev through this register.
448 %r21 - Kernel error code 447 %r21 - Kernel error code
449 448
450 If debugging is DISabled: 449 If debugging is DISabled:
451 450
452 %r21 has the following meanings: 451 %r21 has the following meanings:
453 452
454 EAGAIN - CAS is busy, ldcw failed, try again. 453 EAGAIN - CAS is busy, ldcw failed, try again.
455 EFAULT - Read or write failed. 454 EFAULT - Read or write failed.
456 455
457 If debugging is enabled: 456 If debugging is enabled:
458 457
459 EDEADLOCK - CAS called recursively. 458 EDEADLOCK - CAS called recursively.
460 EAGAIN && r28 == 1 - CAS is busy. Lock contended. 459 EAGAIN && r28 == 1 - CAS is busy. Lock contended.
461 EAGAIN && r28 == 2 - CAS is busy. ldcw failed. 460 EAGAIN && r28 == 2 - CAS is busy. ldcw failed.
462 EFAULT - Read or write failed. 461 EFAULT - Read or write failed.
463 462
464 Scratch: r20, r28, r1 463 Scratch: r20, r28, r1
465 464
466 ****************************************************/ 465 ****************************************************/
467 466
468 /* Do not enable LWS debugging */ 467 /* Do not enable LWS debugging */
469 #define ENABLE_LWS_DEBUG 0 468 #define ENABLE_LWS_DEBUG 0
470 469
471 /* ELF64 Process entry path */ 470 /* ELF64 Process entry path */
472 lws_compare_and_swap64: 471 lws_compare_and_swap64:
473 #ifdef CONFIG_64BIT 472 #ifdef CONFIG_64BIT
474 b,n lws_compare_and_swap 473 b,n lws_compare_and_swap
475 #else 474 #else
476 /* If we are not a 64-bit kernel, then we don't 475 /* If we are not a 64-bit kernel, then we don't
477 * implement having 64-bit input registers 476 * implement having 64-bit input registers
478 */ 477 */
479 b,n lws_exit_nosys 478 b,n lws_exit_nosys
480 #endif 479 #endif
481 480
482 /* ELF32 Process entry path */ 481 /* ELF32 Process entry path */
483 lws_compare_and_swap32: 482 lws_compare_and_swap32:
484 #ifdef CONFIG_64BIT 483 #ifdef CONFIG_64BIT
485 /* Clip all the input registers */ 484 /* Clip all the input registers */
486 depdi 0, 31, 32, %r26 485 depdi 0, 31, 32, %r26
487 depdi 0, 31, 32, %r25 486 depdi 0, 31, 32, %r25
488 depdi 0, 31, 32, %r24 487 depdi 0, 31, 32, %r24
489 #endif 488 #endif
490 489
491 lws_compare_and_swap: 490 lws_compare_and_swap:
492 #ifdef CONFIG_SMP 491 #ifdef CONFIG_SMP
493 /* Load start of lock table */ 492 /* Load start of lock table */
494 ldil L%lws_lock_start, %r20 493 ldil L%lws_lock_start, %r20
495 ldo R%lws_lock_start(%r20), %r28 494 ldo R%lws_lock_start(%r20), %r28
496 495
497 /* Extract four bits from r26 and hash lock (Bits 4-7) */ 496 /* Extract four bits from r26 and hash lock (Bits 4-7) */
498 extru %r26, 27, 4, %r20 497 extru %r26, 27, 4, %r20
499 498
500 /* Find lock to use, the hash is either one of 0 to 499 /* Find lock to use, the hash is either one of 0 to
501 15, multiplied by 16 (keep it 16-byte aligned) 500 15, multiplied by 16 (keep it 16-byte aligned)
502 and add to the lock table offset. */ 501 and add to the lock table offset. */
503 shlw %r20, 4, %r20 502 shlw %r20, 4, %r20
504 add %r20, %r28, %r20 503 add %r20, %r28, %r20
505 504
506 # if ENABLE_LWS_DEBUG 505 # if ENABLE_LWS_DEBUG
507 /* 506 /*
508 DEBUG, check for deadlock! 507 DEBUG, check for deadlock!
509 If the thread register values are the same 508 If the thread register values are the same
510 then we were the one that locked it last and 509 then we were the one that locked it last and
511 this is a recurisve call that will deadlock. 510 this is a recurisve call that will deadlock.
512 We *must* giveup this call and fail. 511 We *must* giveup this call and fail.
513 */ 512 */
514 ldw 4(%sr2,%r20), %r28 /* Load thread register */ 513 ldw 4(%sr2,%r20), %r28 /* Load thread register */
515 /* WARNING: If cr27 cycles to the same value we have problems */ 514 /* WARNING: If cr27 cycles to the same value we have problems */
516 mfctl %cr27, %r21 /* Get current thread register */ 515 mfctl %cr27, %r21 /* Get current thread register */
517 cmpb,<>,n %r21, %r28, cas_lock /* Called recursive? */ 516 cmpb,<>,n %r21, %r28, cas_lock /* Called recursive? */
518 b lws_exit /* Return error! */ 517 b lws_exit /* Return error! */
519 ldo -EDEADLOCK(%r0), %r21 518 ldo -EDEADLOCK(%r0), %r21
520 cas_lock: 519 cas_lock:
521 cmpb,=,n %r0, %r28, cas_nocontend /* Is nobody using it? */ 520 cmpb,=,n %r0, %r28, cas_nocontend /* Is nobody using it? */
522 ldo 1(%r0), %r28 /* 1st case */ 521 ldo 1(%r0), %r28 /* 1st case */
523 b lws_exit /* Contended... */ 522 b lws_exit /* Contended... */
524 ldo -EAGAIN(%r0), %r21 /* Spin in userspace */ 523 ldo -EAGAIN(%r0), %r21 /* Spin in userspace */
525 cas_nocontend: 524 cas_nocontend:
526 # endif 525 # endif
527 /* ENABLE_LWS_DEBUG */ 526 /* ENABLE_LWS_DEBUG */
528 527
529 LDCW 0(%sr2,%r20), %r28 /* Try to acquire the lock */ 528 LDCW 0(%sr2,%r20), %r28 /* Try to acquire the lock */
530 cmpb,<>,n %r0, %r28, cas_action /* Did we get it? */ 529 cmpb,<>,n %r0, %r28, cas_action /* Did we get it? */
531 cas_wouldblock: 530 cas_wouldblock:
532 ldo 2(%r0), %r28 /* 2nd case */ 531 ldo 2(%r0), %r28 /* 2nd case */
533 b lws_exit /* Contended... */ 532 b lws_exit /* Contended... */
534 ldo -EAGAIN(%r0), %r21 /* Spin in userspace */ 533 ldo -EAGAIN(%r0), %r21 /* Spin in userspace */
535 #endif 534 #endif
536 /* CONFIG_SMP */ 535 /* CONFIG_SMP */
537 536
538 /* 537 /*
539 prev = *addr; 538 prev = *addr;
540 if ( prev == old ) 539 if ( prev == old )
541 *addr = new; 540 *addr = new;
542 return prev; 541 return prev;
543 */ 542 */
544 543
545 /* NOTES: 544 /* NOTES:
546 This all works becuse intr_do_signal 545 This all works becuse intr_do_signal
547 and schedule both check the return iasq 546 and schedule both check the return iasq
548 and see that we are on the kernel page 547 and see that we are on the kernel page
549 so this process is never scheduled off 548 so this process is never scheduled off
550 or is ever sent any signal of any sort, 549 or is ever sent any signal of any sort,
551 thus it is wholly atomic from usrspaces 550 thus it is wholly atomic from usrspaces
552 perspective 551 perspective
553 */ 552 */
554 cas_action: 553 cas_action:
555 #if defined CONFIG_SMP && ENABLE_LWS_DEBUG 554 #if defined CONFIG_SMP && ENABLE_LWS_DEBUG
556 /* DEBUG */ 555 /* DEBUG */
557 mfctl %cr27, %r1 556 mfctl %cr27, %r1
558 stw %r1, 4(%sr2,%r20) 557 stw %r1, 4(%sr2,%r20)
559 #endif 558 #endif
560 /* The load and store could fail */ 559 /* The load and store could fail */
561 1: ldw 0(%sr3,%r26), %r28 560 1: ldw 0(%sr3,%r26), %r28
562 sub,<> %r28, %r25, %r0 561 sub,<> %r28, %r25, %r0
563 2: stw %r24, 0(%sr3,%r26) 562 2: stw %r24, 0(%sr3,%r26)
564 #ifdef CONFIG_SMP 563 #ifdef CONFIG_SMP
565 /* Free lock */ 564 /* Free lock */
566 stw %r20, 0(%sr2,%r20) 565 stw %r20, 0(%sr2,%r20)
567 # if ENABLE_LWS_DEBUG 566 # if ENABLE_LWS_DEBUG
568 /* Clear thread register indicator */ 567 /* Clear thread register indicator */
569 stw %r0, 4(%sr2,%r20) 568 stw %r0, 4(%sr2,%r20)
570 # endif 569 # endif
571 #endif 570 #endif
572 /* Return to userspace, set no error */ 571 /* Return to userspace, set no error */
573 b lws_exit 572 b lws_exit
574 copy %r0, %r21 573 copy %r0, %r21
575 574
576 3: 575 3:
577 /* Error occured on load or store */ 576 /* Error occured on load or store */
578 #ifdef CONFIG_SMP 577 #ifdef CONFIG_SMP
579 /* Free lock */ 578 /* Free lock */
580 stw %r20, 0(%sr2,%r20) 579 stw %r20, 0(%sr2,%r20)
581 # if ENABLE_LWS_DEBUG 580 # if ENABLE_LWS_DEBUG
582 stw %r0, 4(%sr2,%r20) 581 stw %r0, 4(%sr2,%r20)
583 # endif 582 # endif
584 #endif 583 #endif
585 b lws_exit 584 b lws_exit
586 ldo -EFAULT(%r0),%r21 /* set errno */ 585 ldo -EFAULT(%r0),%r21 /* set errno */
587 nop 586 nop
588 nop 587 nop
589 nop 588 nop
590 nop 589 nop
591 590
592 /* Two exception table entries, one for the load, 591 /* Two exception table entries, one for the load,
593 the other for the store. Either return -EFAULT. 592 the other for the store. Either return -EFAULT.
594 Each of the entries must be relocated. */ 593 Each of the entries must be relocated. */
595 .section __ex_table,"aw" 594 .section __ex_table,"aw"
596 ASM_ULONG_INSN (1b - linux_gateway_page), (3b - linux_gateway_page) 595 ASM_ULONG_INSN (1b - linux_gateway_page), (3b - linux_gateway_page)
597 ASM_ULONG_INSN (2b - linux_gateway_page), (3b - linux_gateway_page) 596 ASM_ULONG_INSN (2b - linux_gateway_page), (3b - linux_gateway_page)
598 .previous 597 .previous
599 598
600 599
601 /* Make sure nothing else is placed on this page */ 600 /* Make sure nothing else is placed on this page */
602 .align PAGE_SIZE 601 .align PAGE_SIZE
603 END(linux_gateway_page) 602 END(linux_gateway_page)
604 ENTRY(end_linux_gateway_page) 603 ENTRY(end_linux_gateway_page)
605 604
606 /* Relocate symbols assuming linux_gateway_page is mapped 605 /* Relocate symbols assuming linux_gateway_page is mapped
607 to virtual address 0x0 */ 606 to virtual address 0x0 */
608 607
609 #define LWS_ENTRY(_name_) ASM_ULONG_INSN (lws_##_name_ - linux_gateway_page) 608 #define LWS_ENTRY(_name_) ASM_ULONG_INSN (lws_##_name_ - linux_gateway_page)
610 609
611 .section .rodata,"a" 610 .section .rodata,"a"
612 611
613 .align PAGE_SIZE 612 .align PAGE_SIZE
614 /* Light-weight-syscall table */ 613 /* Light-weight-syscall table */
615 /* Start of lws table. */ 614 /* Start of lws table. */
616 ENTRY(lws_table) 615 ENTRY(lws_table)
617 LWS_ENTRY(compare_and_swap32) /* 0 - ELF32 Atomic compare and swap */ 616 LWS_ENTRY(compare_and_swap32) /* 0 - ELF32 Atomic compare and swap */
618 LWS_ENTRY(compare_and_swap64) /* 1 - ELF64 Atomic compare and swap */ 617 LWS_ENTRY(compare_and_swap64) /* 1 - ELF64 Atomic compare and swap */
619 END(lws_table) 618 END(lws_table)
620 /* End of lws table */ 619 /* End of lws table */
621 620
622 .align PAGE_SIZE 621 .align PAGE_SIZE
623 ENTRY(sys_call_table) 622 ENTRY(sys_call_table)
624 #include "syscall_table.S" 623 #include "syscall_table.S"
625 END(sys_call_table) 624 END(sys_call_table)
626 625
627 #ifdef CONFIG_64BIT 626 #ifdef CONFIG_64BIT
628 .align PAGE_SIZE 627 .align PAGE_SIZE
629 ENTRY(sys_call_table64) 628 ENTRY(sys_call_table64)
630 #define SYSCALL_TABLE_64BIT 629 #define SYSCALL_TABLE_64BIT
631 #include "syscall_table.S" 630 #include "syscall_table.S"
632 END(sys_call_table64) 631 END(sys_call_table64)
633 #endif 632 #endif
634 633
635 #ifdef CONFIG_SMP 634 #ifdef CONFIG_SMP
636 /* 635 /*
637 All light-weight-syscall atomic operations 636 All light-weight-syscall atomic operations
638 will use this set of locks 637 will use this set of locks
639 */ 638 */
640 .section .data, "aw" 639 .section .data
641 .align PAGE_SIZE 640 .align PAGE_SIZE
642 ENTRY(lws_lock_start) 641 ENTRY(lws_lock_start)
643 /* lws locks */ 642 /* lws locks */
644 .align 16 643 .align 16
645 .rept 16 644 .rept 16
646 /* Keep locks aligned at 16-bytes */ 645 /* Keep locks aligned at 16-bytes */
647 .word 1 646 .word 1
648 .word 0 647 .word 0
649 .word 0 648 .word 0
650 .word 0 649 .word 0
651 .endr 650 .endr
652 END(lws_lock_start) 651 END(lws_lock_start)
653 .previous 652 .previous
654 #endif 653 #endif
655 /* CONFIG_SMP for lws_lock_start */ 654 /* CONFIG_SMP for lws_lock_start */
656 655
657 .end 656 .end
658 657
659 658
660 659
arch/parisc/lib/fixup.S
1 /* 1 /*
2 * Linux/PA-RISC Project (http://www.parisc-linux.org/) 2 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
3 * 3 *
4 * Copyright (C) 2004 Randolph Chung <tausq@debian.org> 4 * Copyright (C) 2004 Randolph Chung <tausq@debian.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2, or (at your option) 8 * the Free Software Foundation; either version 2, or (at your option)
9 * any later version. 9 * any later version.
10 * 10 *
11 * This program is distributed in the hope that it will be useful, 11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 * 15 *
16 * You should have received a copy of the GNU General Public License 16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software 17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 * 19 *
20 * Fixup routines for kernel exception handling. 20 * Fixup routines for kernel exception handling.
21 */ 21 */
22 #include <asm/asm-offsets.h> 22 #include <asm/asm-offsets.h>
23 #include <asm/assembly.h> 23 #include <asm/assembly.h>
24 #include <asm/errno.h> 24 #include <asm/errno.h>
25 #include <linux/linkage.h> 25 #include <linux/linkage.h>
26 #include <linux/init.h>
27 26
28 #ifdef CONFIG_SMP 27 #ifdef CONFIG_SMP
29 .macro get_fault_ip t1 t2 28 .macro get_fault_ip t1 t2
30 addil LT%__per_cpu_offset,%r27 29 addil LT%__per_cpu_offset,%r27
31 LDREG RT%__per_cpu_offset(%r1),\t1 30 LDREG RT%__per_cpu_offset(%r1),\t1
32 /* t2 = smp_processor_id() */ 31 /* t2 = smp_processor_id() */
33 mfctl 30,\t2 32 mfctl 30,\t2
34 ldw TI_CPU(\t2),\t2 33 ldw TI_CPU(\t2),\t2
35 #ifdef CONFIG_64BIT 34 #ifdef CONFIG_64BIT
36 extrd,u \t2,63,32,\t2 35 extrd,u \t2,63,32,\t2
37 #endif 36 #endif
38 /* t2 = &__per_cpu_offset[smp_processor_id()]; */ 37 /* t2 = &__per_cpu_offset[smp_processor_id()]; */
39 LDREGX \t2(\t1),\t2 38 LDREGX \t2(\t1),\t2
40 addil LT%per_cpu__exception_data,%r27 39 addil LT%per_cpu__exception_data,%r27
41 LDREG RT%per_cpu__exception_data(%r1),\t1 40 LDREG RT%per_cpu__exception_data(%r1),\t1
42 /* t1 = &__get_cpu_var(exception_data) */ 41 /* t1 = &__get_cpu_var(exception_data) */
43 add,l \t1,\t2,\t1 42 add,l \t1,\t2,\t1
44 /* t1 = t1->fault_ip */ 43 /* t1 = t1->fault_ip */
45 LDREG EXCDATA_IP(\t1), \t1 44 LDREG EXCDATA_IP(\t1), \t1
46 .endm 45 .endm
47 #else 46 #else
48 .macro get_fault_ip t1 t2 47 .macro get_fault_ip t1 t2
49 /* t1 = &__get_cpu_var(exception_data) */ 48 /* t1 = &__get_cpu_var(exception_data) */
50 addil LT%per_cpu__exception_data,%r27 49 addil LT%per_cpu__exception_data,%r27
51 LDREG RT%per_cpu__exception_data(%r1),\t2 50 LDREG RT%per_cpu__exception_data(%r1),\t2
52 /* t1 = t2->fault_ip */ 51 /* t1 = t2->fault_ip */
53 LDREG EXCDATA_IP(\t2), \t1 52 LDREG EXCDATA_IP(\t2), \t1
54 .endm 53 .endm
55 #endif 54 #endif
56 55
57 .level LEVEL 56 .level LEVEL
58 57
59 __HEAD 58 .text
60 .section .fixup, "ax" 59 .section .fixup, "ax"
61 60
62 /* get_user() fixups, store -EFAULT in r8, and 0 in r9 */ 61 /* get_user() fixups, store -EFAULT in r8, and 0 in r9 */
63 ENTRY(fixup_get_user_skip_1) 62 ENTRY(fixup_get_user_skip_1)
64 get_fault_ip %r1,%r8 63 get_fault_ip %r1,%r8
65 ldo 4(%r1), %r1 64 ldo 4(%r1), %r1
66 ldi -EFAULT, %r8 65 ldi -EFAULT, %r8
67 bv %r0(%r1) 66 bv %r0(%r1)
68 copy %r0, %r9 67 copy %r0, %r9
69 ENDPROC(fixup_get_user_skip_1) 68 ENDPROC(fixup_get_user_skip_1)
70 69
71 ENTRY(fixup_get_user_skip_2) 70 ENTRY(fixup_get_user_skip_2)
72 get_fault_ip %r1,%r8 71 get_fault_ip %r1,%r8
73 ldo 8(%r1), %r1 72 ldo 8(%r1), %r1
74 ldi -EFAULT, %r8 73 ldi -EFAULT, %r8
75 bv %r0(%r1) 74 bv %r0(%r1)
76 copy %r0, %r9 75 copy %r0, %r9
77 ENDPROC(fixup_get_user_skip_2) 76 ENDPROC(fixup_get_user_skip_2)
78 77
79 /* put_user() fixups, store -EFAULT in r8 */ 78 /* put_user() fixups, store -EFAULT in r8 */
80 ENTRY(fixup_put_user_skip_1) 79 ENTRY(fixup_put_user_skip_1)
81 get_fault_ip %r1,%r8 80 get_fault_ip %r1,%r8
82 ldo 4(%r1), %r1 81 ldo 4(%r1), %r1
83 bv %r0(%r1) 82 bv %r0(%r1)
84 ldi -EFAULT, %r8 83 ldi -EFAULT, %r8
85 ENDPROC(fixup_put_user_skip_1) 84 ENDPROC(fixup_put_user_skip_1)
86 85
87 ENTRY(fixup_put_user_skip_2) 86 ENTRY(fixup_put_user_skip_2)
88 get_fault_ip %r1,%r8 87 get_fault_ip %r1,%r8
89 ldo 8(%r1), %r1 88 ldo 8(%r1), %r1
90 bv %r0(%r1) 89 bv %r0(%r1)
91 ldi -EFAULT, %r8 90 ldi -EFAULT, %r8
92 ENDPROC(fixup_put_user_skip_2) 91 ENDPROC(fixup_put_user_skip_2)
93 92
94 93
arch/parisc/lib/lusercopy.S
1 /* 1 /*
2 * User Space Access Routines 2 * User Space Access Routines
3 * 3 *
4 * Copyright (C) 2000-2002 Hewlett-Packard (John Marvin) 4 * Copyright (C) 2000-2002 Hewlett-Packard (John Marvin)
5 * Copyright (C) 2000 Richard Hirst <rhirst with parisc-linux.org> 5 * Copyright (C) 2000 Richard Hirst <rhirst with parisc-linux.org>
6 * Copyright (C) 2001 Matthieu Delahaye <delahaym at esiee.fr> 6 * Copyright (C) 2001 Matthieu Delahaye <delahaym at esiee.fr>
7 * Copyright (C) 2003 Randolph Chung <tausq with parisc-linux.org> 7 * Copyright (C) 2003 Randolph Chung <tausq with parisc-linux.org>
8 * 8 *
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option) 12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version. 13 * any later version.
14 * 14 *
15 * This program is distributed in the hope that it will be useful, 15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details. 18 * GNU General Public License for more details.
19 * 19 *
20 * You should have received a copy of the GNU General Public License 20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software 21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */ 23 */
24 24
25 /* 25 /*
26 * These routines still have plenty of room for optimization 26 * These routines still have plenty of room for optimization
27 * (word & doubleword load/store, dual issue, store hints, etc.). 27 * (word & doubleword load/store, dual issue, store hints, etc.).
28 */ 28 */
29 29
30 /* 30 /*
31 * The following routines assume that space register 3 (sr3) contains 31 * The following routines assume that space register 3 (sr3) contains
32 * the space id associated with the current users address space. 32 * the space id associated with the current users address space.
33 */ 33 */
34 34
35 35
36 .text
37
36 #include <asm/assembly.h> 38 #include <asm/assembly.h>
37 #include <asm/errno.h> 39 #include <asm/errno.h>
38 #include <linux/linkage.h> 40 #include <linux/linkage.h>
39 #include <linux/init.h>
40
41 __HEAD
42 41
43 /* 42 /*
44 * get_sr gets the appropriate space value into 43 * get_sr gets the appropriate space value into
45 * sr1 for kernel/user space access, depending 44 * sr1 for kernel/user space access, depending
46 * on the flag stored in the task structure. 45 * on the flag stored in the task structure.
47 */ 46 */
48 47
49 .macro get_sr 48 .macro get_sr
50 mfctl %cr30,%r1 49 mfctl %cr30,%r1
51 ldw TI_SEGMENT(%r1),%r22 50 ldw TI_SEGMENT(%r1),%r22
52 mfsp %sr3,%r1 51 mfsp %sr3,%r1
53 or,<> %r22,%r0,%r0 52 or,<> %r22,%r0,%r0
54 copy %r0,%r1 53 copy %r0,%r1
55 mtsp %r1,%sr1 54 mtsp %r1,%sr1
56 .endm 55 .endm
57 56
58 .macro fixup_branch lbl 57 .macro fixup_branch lbl
59 ldil L%\lbl, %r1 58 ldil L%\lbl, %r1
60 ldo R%\lbl(%r1), %r1 59 ldo R%\lbl(%r1), %r1
61 bv %r0(%r1) 60 bv %r0(%r1)
62 .endm 61 .endm
63 62
64 /* 63 /*
65 * long lstrncpy_from_user(char *dst, const char *src, long n) 64 * long lstrncpy_from_user(char *dst, const char *src, long n)
66 * 65 *
67 * Returns -EFAULT if exception before terminator, 66 * Returns -EFAULT if exception before terminator,
68 * N if the entire buffer filled, 67 * N if the entire buffer filled,
69 * otherwise strlen (i.e. excludes zero byte) 68 * otherwise strlen (i.e. excludes zero byte)
70 */ 69 */
71 70
72 ENTRY(lstrncpy_from_user) 71 ENTRY(lstrncpy_from_user)
73 .proc 72 .proc
74 .callinfo NO_CALLS 73 .callinfo NO_CALLS
75 .entry 74 .entry
76 comib,= 0,%r24,$lsfu_done 75 comib,= 0,%r24,$lsfu_done
77 copy %r24,%r23 76 copy %r24,%r23
78 get_sr 77 get_sr
79 1: ldbs,ma 1(%sr1,%r25),%r1 78 1: ldbs,ma 1(%sr1,%r25),%r1
80 $lsfu_loop: 79 $lsfu_loop:
81 stbs,ma %r1,1(%r26) 80 stbs,ma %r1,1(%r26)
82 comib,=,n 0,%r1,$lsfu_done 81 comib,=,n 0,%r1,$lsfu_done
83 addib,<>,n -1,%r24,$lsfu_loop 82 addib,<>,n -1,%r24,$lsfu_loop
84 2: ldbs,ma 1(%sr1,%r25),%r1 83 2: ldbs,ma 1(%sr1,%r25),%r1
85 $lsfu_done: 84 $lsfu_done:
86 sub %r23,%r24,%r28 85 sub %r23,%r24,%r28
87 $lsfu_exit: 86 $lsfu_exit:
88 bv %r0(%r2) 87 bv %r0(%r2)
89 nop 88 nop
90 .exit 89 .exit
91 ENDPROC(lstrncpy_from_user) 90 ENDPROC(lstrncpy_from_user)
92 91
93 .section .fixup,"ax" 92 .section .fixup,"ax"
94 3: fixup_branch $lsfu_exit 93 3: fixup_branch $lsfu_exit
95 ldi -EFAULT,%r28 94 ldi -EFAULT,%r28
96 .previous 95 .previous
97 96
98 .section __ex_table,"aw" 97 .section __ex_table,"aw"
99 ASM_ULONG_INSN 1b,3b 98 ASM_ULONG_INSN 1b,3b
100 ASM_ULONG_INSN 2b,3b 99 ASM_ULONG_INSN 2b,3b
101 .previous 100 .previous
102 101
103 .procend 102 .procend
104 103
105 /* 104 /*
106 * unsigned long lclear_user(void *to, unsigned long n) 105 * unsigned long lclear_user(void *to, unsigned long n)
107 * 106 *
108 * Returns 0 for success. 107 * Returns 0 for success.
109 * otherwise, returns number of bytes not transferred. 108 * otherwise, returns number of bytes not transferred.
110 */ 109 */
111 110
112 ENTRY(lclear_user) 111 ENTRY(lclear_user)
113 .proc 112 .proc
114 .callinfo NO_CALLS 113 .callinfo NO_CALLS
115 .entry 114 .entry
116 comib,=,n 0,%r25,$lclu_done 115 comib,=,n 0,%r25,$lclu_done
117 get_sr 116 get_sr
118 $lclu_loop: 117 $lclu_loop:
119 addib,<> -1,%r25,$lclu_loop 118 addib,<> -1,%r25,$lclu_loop
120 1: stbs,ma %r0,1(%sr1,%r26) 119 1: stbs,ma %r0,1(%sr1,%r26)
121 120
122 $lclu_done: 121 $lclu_done:
123 bv %r0(%r2) 122 bv %r0(%r2)
124 copy %r25,%r28 123 copy %r25,%r28
125 .exit 124 .exit
126 ENDPROC(lclear_user) 125 ENDPROC(lclear_user)
127 126
128 .section .fixup,"ax" 127 .section .fixup,"ax"
129 2: fixup_branch $lclu_done 128 2: fixup_branch $lclu_done
130 ldo 1(%r25),%r25 129 ldo 1(%r25),%r25
131 .previous 130 .previous
132 131
133 .section __ex_table,"aw" 132 .section __ex_table,"aw"
134 ASM_ULONG_INSN 1b,2b 133 ASM_ULONG_INSN 1b,2b
135 .previous 134 .previous
136 135
137 .procend 136 .procend
138 137
139 /* 138 /*
140 * long lstrnlen_user(char *s, long n) 139 * long lstrnlen_user(char *s, long n)
141 * 140 *
142 * Returns 0 if exception before zero byte or reaching N, 141 * Returns 0 if exception before zero byte or reaching N,
143 * N+1 if N would be exceeded, 142 * N+1 if N would be exceeded,
144 * else strlen + 1 (i.e. includes zero byte). 143 * else strlen + 1 (i.e. includes zero byte).
145 */ 144 */
146 145
147 ENTRY(lstrnlen_user) 146 ENTRY(lstrnlen_user)
148 .proc 147 .proc
149 .callinfo NO_CALLS 148 .callinfo NO_CALLS
150 .entry 149 .entry
151 comib,= 0,%r25,$lslen_nzero 150 comib,= 0,%r25,$lslen_nzero
152 copy %r26,%r24 151 copy %r26,%r24
153 get_sr 152 get_sr
154 1: ldbs,ma 1(%sr1,%r26),%r1 153 1: ldbs,ma 1(%sr1,%r26),%r1
155 $lslen_loop: 154 $lslen_loop:
156 comib,=,n 0,%r1,$lslen_done 155 comib,=,n 0,%r1,$lslen_done
157 addib,<> -1,%r25,$lslen_loop 156 addib,<> -1,%r25,$lslen_loop
158 2: ldbs,ma 1(%sr1,%r26),%r1 157 2: ldbs,ma 1(%sr1,%r26),%r1
159 $lslen_done: 158 $lslen_done:
160 bv %r0(%r2) 159 bv %r0(%r2)
161 sub %r26,%r24,%r28 160 sub %r26,%r24,%r28
162 .exit 161 .exit
163 162
164 $lslen_nzero: 163 $lslen_nzero:
165 b $lslen_done 164 b $lslen_done
166 ldo 1(%r26),%r26 /* special case for N == 0 */ 165 ldo 1(%r26),%r26 /* special case for N == 0 */
167 ENDPROC(lstrnlen_user) 166 ENDPROC(lstrnlen_user)
168 167
169 .section .fixup,"ax" 168 .section .fixup,"ax"
170 3: fixup_branch $lslen_done 169 3: fixup_branch $lslen_done
171 copy %r24,%r26 /* reset r26 so 0 is returned on fault */ 170 copy %r24,%r26 /* reset r26 so 0 is returned on fault */
172 .previous 171 .previous
173 172
174 .section __ex_table,"aw" 173 .section __ex_table,"aw"
175 ASM_ULONG_INSN 1b,3b 174 ASM_ULONG_INSN 1b,3b
176 ASM_ULONG_INSN 2b,3b 175 ASM_ULONG_INSN 2b,3b
177 .previous 176 .previous
178 177
179 .procend 178 .procend
180 179