Commit 3b66a1edb01b82269a668a478625765b1fa4936f

Authored by Roman Zippel
Committed by Linus Torvalds
1 parent abd03753bd

[PATCH] m68k: convert thread flags to use bit fields

Remove task_work structure, use the standard thread flags functions and use
shifts in entry.S to test the thread flags.  Add a few local labels to entry.S
to allow gas to generate short jumps.

Finally it changes a number of inline functions in thread_info.h to macros to
delay the current_thread_info() usage, which requires on m68k a structure
(task_struct) not yet defined at this point.

Signed-off-by: Roman Zippel <zippel@linux-m68k.org>
Cc: Al Viro <viro@parcelfarce.linux.theplanet.co.uk>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

Showing 8 changed files with 72 additions and 178 deletions Side-by-side Diff

arch/m68k/fpsp040/skeleton.S
... ... @@ -381,10 +381,8 @@
381 381 .Lnotkern:
382 382 SAVE_ALL_INT
383 383 GET_CURRENT(%d0)
384   - tstb %curptr@(TASK_NEEDRESCHED)
385   - jne ret_from_exception | deliver signals,
386   - | reschedule etc..
387   - RESTORE_ALL
  384 + | deliver signals, reschedule etc..
  385 + jra ret_from_exception
388 386  
389 387 |
390 388 | mem_write --- write to user or supervisor address space
arch/m68k/ifpsp060/iskeleton.S
... ... @@ -75,10 +75,8 @@
75 75 .Lnotkern:
76 76 SAVE_ALL_INT
77 77 GET_CURRENT(%d0)
78   - tstb %curptr@(TASK_NEEDRESCHED)
79   - jne ret_from_exception | deliver signals,
80   - | reschedule etc..
81   - RESTORE_ALL
  78 + | deliver signals, reschedule etc..
  79 + jra ret_from_exception
82 80  
83 81 |
84 82 | _060_real_chk():
arch/m68k/kernel/asm-offsets.c
... ... @@ -25,11 +25,6 @@
25 25 DEFINE(TASK_STATE, offsetof(struct task_struct, state));
26 26 DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
27 27 DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
28   - DEFINE(TASK_WORK, offsetof(struct task_struct, thread.work));
29   - DEFINE(TASK_NEEDRESCHED, offsetof(struct task_struct, thread.work.need_resched));
30   - DEFINE(TASK_SYSCALL_TRACE, offsetof(struct task_struct, thread.work.syscall_trace));
31   - DEFINE(TASK_SIGPENDING, offsetof(struct task_struct, thread.work.sigpending));
32   - DEFINE(TASK_NOTIFY_RESUME, offsetof(struct task_struct, thread.work.notify_resume));
33 28 DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
34 29 DEFINE(TASK_INFO, offsetof(struct task_struct, thread.info));
35 30 DEFINE(TASK_MM, offsetof(struct task_struct, mm));
arch/m68k/kernel/entry.S
... ... @@ -44,9 +44,7 @@
44 44  
45 45 #include <asm/asm-offsets.h>
46 46  
47   -.globl system_call, buserr, trap
48   -.globl resume, ret_from_exception
49   -.globl ret_from_signal
  47 +.globl system_call, buserr, trap, resume
50 48 .globl inthandler, sys_call_table
51 49 .globl sys_fork, sys_clone, sys_vfork
52 50 .globl ret_from_interrupt, bad_interrupt
... ... @@ -58,7 +56,7 @@
58 56 movel %sp,%sp@- | stack frame pointer argument
59 57 bsrl buserr_c
60 58 addql #4,%sp
61   - jra ret_from_exception
  59 + jra .Lret_from_exception
62 60  
63 61 ENTRY(trap)
64 62 SAVE_ALL_INT
... ... @@ -66,7 +64,7 @@
66 64 movel %sp,%sp@- | stack frame pointer argument
67 65 bsrl trap_c
68 66 addql #4,%sp
69   - jra ret_from_exception
  67 + jra .Lret_from_exception
70 68  
71 69 | After a fork we jump here directly from resume,
72 70 | so that %d1 contains the previous task
73 71  
74 72  
75 73  
76 74  
... ... @@ -75,30 +73,31 @@
75 73 movel %d1,%sp@-
76 74 jsr schedule_tail
77 75 addql #4,%sp
78   - jra ret_from_exception
  76 + jra .Lret_from_exception
79 77  
80   -badsys:
81   - movel #-ENOSYS,%sp@(PT_D0)
82   - jra ret_from_exception
83   -
84   -do_trace:
  78 +do_trace_entry:
85 79 movel #-ENOSYS,%sp@(PT_D0) | needed for strace
86 80 subql #4,%sp
87 81 SAVE_SWITCH_STACK
88 82 jbsr syscall_trace
89 83 RESTORE_SWITCH_STACK
90 84 addql #4,%sp
91   - movel %sp@(PT_ORIG_D0),%d1
92   - movel #-ENOSYS,%d0
93   - cmpl #NR_syscalls,%d1
94   - jcc 1f
95   - jbsr @(sys_call_table,%d1:l:4)@(0)
96   -1: movel %d0,%sp@(PT_D0) | save the return value
97   - subql #4,%sp | dummy return address
  85 + movel %sp@(PT_ORIG_D0),%d0
  86 + cmpl #NR_syscalls,%d0
  87 + jcs syscall
  88 +badsys:
  89 + movel #-ENOSYS,%sp@(PT_D0)
  90 + jra ret_from_syscall
  91 +
  92 +do_trace_exit:
  93 + subql #4,%sp
98 94 SAVE_SWITCH_STACK
99 95 jbsr syscall_trace
  96 + RESTORE_SWITCH_STACK
  97 + addql #4,%sp
  98 + jra .Lret_from_exception
100 99  
101   -ret_from_signal:
  100 +ENTRY(ret_from_signal)
102 101 RESTORE_SWITCH_STACK
103 102 addql #4,%sp
104 103 /* on 68040 complete pending writebacks if any */
... ... @@ -111,7 +110,7 @@
111 110 addql #4,%sp
112 111 1:
113 112 #endif
114   - jra ret_from_exception
  113 + jra .Lret_from_exception
115 114  
116 115 ENTRY(system_call)
117 116 SAVE_ALL_SYS
118 117  
119 118  
120 119  
121 120  
122 121  
123 122  
... ... @@ -120,30 +119,34 @@
120 119 | save top of frame
121 120 movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
122 121  
123   - tstb %curptr@(TASK_SYSCALL_TRACE)
124   - jne do_trace
  122 + | syscall trace?
  123 + tstb %curptr@(TASK_INFO+TINFO_FLAGS+2)
  124 + jmi do_trace_entry
125 125 cmpl #NR_syscalls,%d0
126 126 jcc badsys
  127 +syscall:
127 128 jbsr @(sys_call_table,%d0:l:4)@(0)
128 129 movel %d0,%sp@(PT_D0) | save the return value
129   -
  130 +ret_from_syscall:
130 131 |oriw #0x0700,%sr
131   - movel %curptr@(TASK_WORK),%d0
  132 + movew %curptr@(TASK_INFO+TINFO_FLAGS+2),%d0
132 133 jne syscall_exit_work
133 134 1: RESTORE_ALL
134 135  
135 136 syscall_exit_work:
136 137 btst #5,%sp@(PT_SR) | check if returning to kernel
137 138 bnes 1b | if so, skip resched, signals
138   - tstw %d0
139   - jeq do_signal_return
140   - tstb %d0
141   - jne do_delayed_trace
142   -
  139 + lslw #1,%d0
  140 + jcs do_trace_exit
  141 + jmi do_delayed_trace
  142 + lslw #8,%d0
  143 + jmi do_signal_return
143 144 pea resume_userspace
144   - jmp schedule
  145 + jra schedule
145 146  
146   -ret_from_exception:
  147 +
  148 +ENTRY(ret_from_exception)
  149 +.Lret_from_exception:
147 150 btst #5,%sp@(PT_SR) | check if returning to kernel
148 151 bnes 1f | if so, skip resched, signals
149 152 | only allow interrupts when we are really the last one on the
150 153  
151 154  
152 155  
... ... @@ -152,20 +155,19 @@
152 155 andw #ALLOWINT,%sr
153 156  
154 157 resume_userspace:
155   - movel %curptr@(TASK_WORK),%d0
156   - lsrl #8,%d0
  158 + moveb %curptr@(TASK_INFO+TINFO_FLAGS+3),%d0
157 159 jne exit_work
158 160 1: RESTORE_ALL
159 161  
160 162 exit_work:
161 163 | save top of frame
162 164 movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
163   - tstb %d0
164   - jeq do_signal_return
165   -
  165 + lslb #1,%d0
  166 + jmi do_signal_return
166 167 pea resume_userspace
167   - jmp schedule
  168 + jra schedule
168 169  
  170 +
169 171 do_signal_return:
170 172 |andw #ALLOWINT,%sr
171 173 subql #4,%sp | dummy return address
... ... @@ -254,7 +256,7 @@
254 256  
255 257 /* check if we need to do software interrupts */
256 258 tstl irq_stat+CPUSTAT_SOFTIRQ_PENDING
257   - jeq ret_from_exception
  259 + jeq .Lret_from_exception
258 260 pea ret_from_exception
259 261 jra do_softirq
260 262  
arch/m68k/kernel/ptrace.c
... ... @@ -109,7 +109,7 @@
109 109 {
110 110 unsigned long tmp = get_reg(child, PT_SR) & ~(TRACE_BITS << 16);
111 111 put_reg(child, PT_SR, tmp);
112   - child->thread.work.delayed_trace = 0;
  112 + clear_tsk_thread_flag(child, TIF_DELAYED_TRACE);
113 113 }
114 114  
115 115 /*
... ... @@ -118,7 +118,7 @@
118 118 void ptrace_disable(struct task_struct *child)
119 119 {
120 120 singlestep_disable(child);
121   - child->thread.work.syscall_trace = 0;
  121 + clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
122 122 }
123 123  
124 124 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
125 125  
... ... @@ -198,9 +198,9 @@
198 198 goto out_eio;
199 199  
200 200 if (request == PTRACE_SYSCALL)
201   - child->thread.work.syscall_trace = ~0;
  201 + set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
202 202 else
203   - child->thread.work.syscall_trace = 0;
  203 + clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
204 204 child->exit_code = data;
205 205 singlestep_disable(child);
206 206 wake_up_process(child);
207 207  
... ... @@ -223,10 +223,10 @@
223 223 if (!valid_signal(data))
224 224 goto out_eio;
225 225  
226   - child->thread.work.syscall_trace = 0;
  226 + clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
227 227 tmp = get_reg(child, PT_SR) | (TRACE_BITS << 16);
228 228 put_reg(child, PT_SR, tmp);
229   - child->thread.work.delayed_trace = 1;
  229 + set_tsk_thread_flag(child, TIF_DELAYED_TRACE);
230 230  
231 231 child->exit_code = data;
232 232 /* give it a chance to run. */
... ... @@ -288,9 +288,6 @@
288 288  
289 289 asmlinkage void syscall_trace(void)
290 290 {
291   - if (!current->thread.work.delayed_trace &&
292   - !current->thread.work.syscall_trace)
293   - return;
294 291 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
295 292 ? 0x80 : 0));
296 293 /*
include/asm-m68k/processor.h
... ... @@ -56,17 +56,6 @@
56 56 #endif
57 57 #define TASK_UNMAPPED_ALIGN(addr, off) PAGE_ALIGN(addr)
58 58  
59   -struct task_work {
60   - unsigned char sigpending;
61   - unsigned char notify_resume; /* request for notification on
62   - userspace execution resumption */
63   - char need_resched;
64   - unsigned char delayed_trace; /* single step a syscall */
65   - unsigned char syscall_trace; /* count of syscall interceptors */
66   - unsigned char memdie; /* task was selected to be killed */
67   - unsigned char pad[2];
68   -};
69   -
70 59 struct thread_struct {
71 60 unsigned long ksp; /* kernel stack pointer */
72 61 unsigned long usp; /* user stack pointer */
... ... @@ -79,7 +68,6 @@
79 68 unsigned long fp[8*3];
80 69 unsigned long fpcntl[3]; /* fp control regs */
81 70 unsigned char fpstate[FPSTATESIZE]; /* floating point state */
82   - struct task_work work;
83 71 struct thread_info info;
84 72 };
85 73  
include/asm-m68k/thread_info.h
... ... @@ -6,12 +6,11 @@
6 6  
7 7 struct thread_info {
8 8 struct task_struct *task; /* main task structure */
  9 + unsigned long flags;
9 10 struct exec_domain *exec_domain; /* execution domain */
10 11 int preempt_count; /* 0 => preemptable, <0 => BUG */
11 12 __u32 cpu; /* should always be 0 on m68k */
12 13 struct restart_block restart_block;
13   -
14   - __u8 supervisor_stack[0];
15 14 };
16 15  
17 16 #define PREEMPT_ACTIVE 0x4000000
18 17  
... ... @@ -49,77 +48,15 @@
49 48  
50 49 #define end_of_stack(p) ((unsigned long *)(p)->thread_info + 1)
51 50  
52   -#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
53   -#define TIF_DELAYED_TRACE 1 /* single step a syscall */
54   -#define TIF_NOTIFY_RESUME 2 /* resumption notification requested */
55   -#define TIF_SIGPENDING 3 /* signal pending */
56   -#define TIF_NEED_RESCHED 4 /* rescheduling necessary */
57   -#define TIF_MEMDIE 5
58   -
59   -extern int thread_flag_fixme(void);
60   -
61   -/*
62   - * flag set/clear/test wrappers
63   - * - pass TIF_xxxx constants to these functions
  51 +/* entry.S relies on these definitions!
  52 + * bits 0-7 are tested at every exception exit
  53 + * bits 8-15 are also tested at syscall exit
64 54 */
65   -
66   -#define __set_tsk_thread_flag(tsk, flag, val) ({ \
67   - switch (flag) { \
68   - case TIF_SIGPENDING: \
69   - tsk->thread.work.sigpending = val; \
70   - break; \
71   - case TIF_NEED_RESCHED: \
72   - tsk->thread.work.need_resched = val; \
73   - break; \
74   - case TIF_SYSCALL_TRACE: \
75   - tsk->thread.work.syscall_trace = val; \
76   - break; \
77   - case TIF_MEMDIE: \
78   - tsk->thread.work.memdie = val; \
79   - break; \
80   - default: \
81   - thread_flag_fixme(); \
82   - } \
83   -})
84   -
85   -#define __get_tsk_thread_flag(tsk, flag) ({ \
86   - int ___res; \
87   - switch (flag) { \
88   - case TIF_SIGPENDING: \
89   - ___res = tsk->thread.work.sigpending; \
90   - break; \
91   - case TIF_NEED_RESCHED: \
92   - ___res = tsk->thread.work.need_resched; \
93   - break; \
94   - case TIF_SYSCALL_TRACE: \
95   - ___res = tsk->thread.work.syscall_trace;\
96   - break; \
97   - case TIF_MEMDIE: \
98   - ___res = tsk->thread.work.memdie;\
99   - break; \
100   - default: \
101   - ___res = thread_flag_fixme(); \
102   - } \
103   - ___res; \
104   -})
105   -
106   -#define __get_set_tsk_thread_flag(tsk, flag, val) ({ \
107   - int __res = __get_tsk_thread_flag(tsk, flag); \
108   - __set_tsk_thread_flag(tsk, flag, val); \
109   - __res; \
110   -})
111   -
112   -#define set_tsk_thread_flag(tsk, flag) __set_tsk_thread_flag(tsk, flag, ~0)
113   -#define clear_tsk_thread_flag(tsk, flag) __set_tsk_thread_flag(tsk, flag, 0)
114   -#define test_and_set_tsk_thread_flag(tsk, flag) __get_set_tsk_thread_flag(tsk, flag, ~0)
115   -#define test_tsk_thread_flag(tsk, flag) __get_tsk_thread_flag(tsk, flag)
116   -
117   -#define set_thread_flag(flag) set_tsk_thread_flag(current, flag)
118   -#define clear_thread_flag(flag) clear_tsk_thread_flag(current, flag)
119   -#define test_thread_flag(flag) test_tsk_thread_flag(current, flag)
120   -
121   -#define set_need_resched() set_thread_flag(TIF_NEED_RESCHED)
122   -#define clear_need_resched() clear_thread_flag(TIF_NEED_RESCHED)
  55 +#define TIF_SIGPENDING 6 /* signal pending */
  56 +#define TIF_NEED_RESCHED 7 /* rescheduling necessary */
  57 +#define TIF_DELAYED_TRACE 14 /* single step a syscall */
  58 +#define TIF_SYSCALL_TRACE 15 /* syscall trace active */
  59 +#define TIF_MEMDIE 16
123 60  
124 61 #endif /* _ASM_M68K_THREAD_INFO_H */
include/linux/thread_info.h
... ... @@ -27,31 +27,6 @@
27 27 * - pass TIF_xxxx constants to these functions
28 28 */
29 29  
30   -static inline void set_thread_flag(int flag)
31   -{
32   - set_bit(flag,&current_thread_info()->flags);
33   -}
34   -
35   -static inline void clear_thread_flag(int flag)
36   -{
37   - clear_bit(flag,&current_thread_info()->flags);
38   -}
39   -
40   -static inline int test_and_set_thread_flag(int flag)
41   -{
42   - return test_and_set_bit(flag,&current_thread_info()->flags);
43   -}
44   -
45   -static inline int test_and_clear_thread_flag(int flag)
46   -{
47   - return test_and_clear_bit(flag,&current_thread_info()->flags);
48   -}
49   -
50   -static inline int test_thread_flag(int flag)
51   -{
52   - return test_bit(flag,&current_thread_info()->flags);
53   -}
54   -
55 30 static inline void set_ti_thread_flag(struct thread_info *ti, int flag)
56 31 {
57 32 set_bit(flag,&ti->flags);
58 33  
... ... @@ -77,15 +52,19 @@
77 52 return test_bit(flag,&ti->flags);
78 53 }
79 54  
80   -static inline void set_need_resched(void)
81   -{
82   - set_thread_flag(TIF_NEED_RESCHED);
83   -}
  55 +#define set_thread_flag(flag) \
  56 + set_ti_thread_flag(current_thread_info(), flag)
  57 +#define clear_thread_flag(flag) \
  58 + clear_ti_thread_flag(current_thread_info(), flag)
  59 +#define test_and_set_thread_flag(flag) \
  60 + test_and_set_ti_thread_flag(current_thread_info(), flag)
  61 +#define test_and_clear_thread_flag(flag) \
  62 + test_and_clear_ti_thread_flag(current_thread_info(), flag)
  63 +#define test_thread_flag(flag) \
  64 + test_ti_thread_flag(current_thread_info(), flag)
84 65  
85   -static inline void clear_need_resched(void)
86   -{
87   - clear_thread_flag(TIF_NEED_RESCHED);
88   -}
  66 +#define set_need_resched() set_thread_flag(TIF_NEED_RESCHED)
  67 +#define clear_need_resched() clear_thread_flag(TIF_NEED_RESCHED)
89 68  
90 69 #endif
91 70