Commit 8d20a541b089ecb67a88a673548161b686ed7b85

Authored by Mikael Starvik
Committed by Linus Torvalds
1 parent 21783c9746

[PATCH] CRIS update: SMP

Patches to support SMP.

* Each CPU has its own current_pgd.
* flush_tlb_range is implemented as flush_tlb_mm.
* Atomic operations implemented with spinlocks.
* Semaphores implemented with spinlocks.

Signed-off-by: Mikael Starvik <starvik@axis.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

Showing 10 changed files with 69 additions and 131 deletions Side-by-side Diff

arch/cris/arch-v10/mm/fault.c
... ... @@ -14,6 +14,7 @@
14 14 #include <asm/uaccess.h>
15 15 #include <asm/pgtable.h>
16 16 #include <asm/arch/svinto.h>
  17 +#include <asm/mmu_context.h>
17 18  
18 19 /* debug of low-level TLB reload */
19 20 #undef DEBUG
... ... @@ -24,8 +25,6 @@
24 25 #define D(x)
25 26 #endif
26 27  
27   -extern volatile pgd_t *current_pgd;
28   -
29 28 extern const struct exception_table_entry
30 29 *search_exception_tables(unsigned long addr);
31 30  
... ... @@ -46,7 +45,7 @@
46 45 int page_id;
47 46 int acc, inv;
48 47 #endif
49   - pgd_t* pgd = (pgd_t*)current_pgd;
  48 + pgd_t* pgd = (pgd_t*)per_cpu(current_pgd, smp_processor_id());
50 49 pmd_t *pmd;
51 50 pte_t pte;
52 51 int miss, we, writeac;
... ... @@ -93,26 +92,5 @@
93 92 *R_TLB_HI = cause;
94 93 *R_TLB_LO = pte_val(pte);
95 94 local_irq_restore(flags);
96   -}
97   -
98   -/* Called from arch/cris/mm/fault.c to find fixup code. */
99   -int
100   -find_fixup_code(struct pt_regs *regs)
101   -{
102   - const struct exception_table_entry *fixup;
103   -
104   - if ((fixup = search_exception_tables(regs->irp)) != 0) {
105   - /* Adjust the instruction pointer in the stackframe. */
106   - regs->irp = fixup->fixup;
107   -
108   - /*
109   - * Don't return by restoring the CPU state, so switch
110   - * frame-type.
111   - */
112   - regs->frametype = CRIS_FRAME_NORMAL;
113   - return 1;
114   - }
115   -
116   - return 0;
117 95 }
arch/cris/arch-v10/mm/init.c
... ... @@ -42,7 +42,7 @@
42 42 * switch_mm)
43 43 */
44 44  
45   - current_pgd = init_mm.pgd;
  45 + per_cpu(current_pgd, smp_processor_id()) = init_mm.pgd;
46 46  
47 47 /* initialise the TLB (tlb.c) */
48 48  
arch/cris/arch-v10/mm/tlb.c
... ... @@ -139,53 +139,6 @@
139 139 local_irq_restore(flags);
140 140 }
141 141  
142   -/* invalidate a page range */
143   -
144   -void
145   -flush_tlb_range(struct vm_area_struct *vma,
146   - unsigned long start,
147   - unsigned long end)
148   -{
149   - struct mm_struct *mm = vma->vm_mm;
150   - int page_id = mm->context.page_id;
151   - int i;
152   - unsigned long flags;
153   -
154   - D(printk("tlb: flush range %p<->%p in context %d (%p)\n",
155   - start, end, page_id, mm));
156   -
157   - if(page_id == NO_CONTEXT)
158   - return;
159   -
160   - start &= PAGE_MASK; /* probably not necessary */
161   - end &= PAGE_MASK; /* dito */
162   -
163   - /* invalidate those TLB entries that match both the mm context
164   - * and the virtual address range
165   - */
166   -
167   - local_save_flags(flags);
168   - local_irq_disable();
169   - for(i = 0; i < NUM_TLB_ENTRIES; i++) {
170   - unsigned long tlb_hi, vpn;
171   - *R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i);
172   - tlb_hi = *R_TLB_HI;
173   - vpn = tlb_hi & PAGE_MASK;
174   - if (IO_EXTRACT(R_TLB_HI, page_id, tlb_hi) == page_id &&
175   - vpn >= start && vpn < end) {
176   - *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) |
177   - IO_FIELD(R_TLB_HI, vpn, i & 0xf ) );
178   -
179   - *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) |
180   - IO_STATE(R_TLB_LO, valid, no ) |
181   - IO_STATE(R_TLB_LO, kernel,no ) |
182   - IO_STATE(R_TLB_LO, we, no ) |
183   - IO_FIELD(R_TLB_LO, pfn, 0 ) );
184   - }
185   - }
186   - local_irq_restore(flags);
187   -}
188   -
189 142 /* dump the entire TLB for debug purposes */
190 143  
191 144 #if 0
... ... @@ -237,7 +190,7 @@
237 190 * the pgd.
238 191 */
239 192  
240   - current_pgd = next->pgd;
  193 + per_cpu(current_pgd, smp_processor_id()) = next->pgd;
241 194  
242 195 /* switch context in the MMU */
243 196  
include/asm-cris/arch-v10/atomic.h
  1 +#ifndef __ASM_CRIS_ARCH_ATOMIC__
  2 +#define __ASM_CRIS_ARCH_ATOMIC__
  3 +
  4 +#define cris_atomic_save(addr, flags) local_irq_save(flags);
  5 +#define cris_atomic_restore(addr, flags) local_irq_restore(flags);
  6 +
  7 +#endif
include/asm-cris/atomic.h
... ... @@ -4,22 +4,15 @@
4 4 #define __ASM_CRIS_ATOMIC__
5 5  
6 6 #include <asm/system.h>
  7 +#include <asm/arch/atomic.h>
7 8  
8 9 /*
9 10 * Atomic operations that C can't guarantee us. Useful for
10 11 * resource counting etc..
11 12 */
12 13  
13   -/*
14   - * Make sure gcc doesn't try to be clever and move things around
15   - * on us. We need to use _exactly_ the address the user gave us,
16   - * not some alias that contains the same information.
17   - */
  14 +typedef struct { volatile int counter; } atomic_t;
18 15  
19   -#define __atomic_fool_gcc(x) (*(struct { int a[100]; } *)x)
20   -
21   -typedef struct { int counter; } atomic_t;
22   -
23 16 #define ATOMIC_INIT(i) { (i) }
24 17  
25 18 #define atomic_read(v) ((v)->counter)
26 19  
27 20  
28 21  
29 22  
30 23  
... ... @@ -30,29 +23,26 @@
30 23 extern __inline__ void atomic_add(int i, volatile atomic_t *v)
31 24 {
32 25 unsigned long flags;
33   - local_save_flags(flags);
34   - local_irq_disable();
  26 + cris_atomic_save(v, flags);
35 27 v->counter += i;
36   - local_irq_restore(flags);
  28 + cris_atomic_restore(v, flags);
37 29 }
38 30  
39 31 extern __inline__ void atomic_sub(int i, volatile atomic_t *v)
40 32 {
41 33 unsigned long flags;
42   - local_save_flags(flags);
43   - local_irq_disable();
  34 + cris_atomic_save(v, flags);
44 35 v->counter -= i;
45   - local_irq_restore(flags);
  36 + cris_atomic_restore(v, flags);
46 37 }
47 38  
48 39 extern __inline__ int atomic_add_return(int i, volatile atomic_t *v)
49 40 {
50 41 unsigned long flags;
51 42 int retval;
52   - local_save_flags(flags);
53   - local_irq_disable();
  43 + cris_atomic_save(v, flags);
54 44 retval = (v->counter += i);
55   - local_irq_restore(flags);
  45 + cris_atomic_restore(v, flags);
56 46 return retval;
57 47 }
58 48  
59 49  
... ... @@ -62,10 +52,9 @@
62 52 {
63 53 unsigned long flags;
64 54 int retval;
65   - local_save_flags(flags);
66   - local_irq_disable();
  55 + cris_atomic_save(v, flags);
67 56 retval = (v->counter -= i);
68   - local_irq_restore(flags);
  57 + cris_atomic_restore(v, flags);
69 58 return retval;
70 59 }
71 60  
72 61  
73 62  
74 63  
75 64  
76 65  
77 66  
78 67  
... ... @@ -73,39 +62,35 @@
73 62 {
74 63 int retval;
75 64 unsigned long flags;
76   - local_save_flags(flags);
77   - local_irq_disable();
  65 + cris_atomic_save(v, flags);
78 66 retval = (v->counter -= i) == 0;
79   - local_irq_restore(flags);
  67 + cris_atomic_restore(v, flags);
80 68 return retval;
81 69 }
82 70  
83 71 extern __inline__ void atomic_inc(volatile atomic_t *v)
84 72 {
85 73 unsigned long flags;
86   - local_save_flags(flags);
87   - local_irq_disable();
  74 + cris_atomic_save(v, flags);
88 75 (v->counter)++;
89   - local_irq_restore(flags);
  76 + cris_atomic_restore(v, flags);
90 77 }
91 78  
92 79 extern __inline__ void atomic_dec(volatile atomic_t *v)
93 80 {
94 81 unsigned long flags;
95   - local_save_flags(flags);
96   - local_irq_disable();
  82 + cris_atomic_save(v, flags);
97 83 (v->counter)--;
98   - local_irq_restore(flags);
  84 + cris_atomic_restore(v, flags);
99 85 }
100 86  
101 87 extern __inline__ int atomic_inc_return(volatile atomic_t *v)
102 88 {
103 89 unsigned long flags;
104 90 int retval;
105   - local_save_flags(flags);
106   - local_irq_disable();
  91 + cris_atomic_save(v, flags);
107 92 retval = (v->counter)++;
108   - local_irq_restore(flags);
  93 + cris_atomic_restore(v, flags);
109 94 return retval;
110 95 }
111 96  
112 97  
113 98  
114 99  
... ... @@ -113,20 +98,18 @@
113 98 {
114 99 unsigned long flags;
115 100 int retval;
116   - local_save_flags(flags);
117   - local_irq_disable();
  101 + cris_atomic_save(v, flags);
118 102 retval = (v->counter)--;
119   - local_irq_restore(flags);
  103 + cris_atomic_restore(v, flags);
120 104 return retval;
121 105 }
122 106 extern __inline__ int atomic_dec_and_test(volatile atomic_t *v)
123 107 {
124 108 int retval;
125 109 unsigned long flags;
126   - local_save_flags(flags);
127   - local_irq_disable();
  110 + cris_atomic_save(v, flags);
128 111 retval = --(v->counter) == 0;
129   - local_irq_restore(flags);
  112 + cris_atomic_restore(v, flags);
130 113 return retval;
131 114 }
132 115  
133 116  
... ... @@ -134,10 +117,9 @@
134 117 {
135 118 int retval;
136 119 unsigned long flags;
137   - local_save_flags(flags);
138   - local_irq_disable();
  120 + cris_atomic_save(v, flags);
139 121 retval = ++(v->counter) == 0;
140   - local_irq_restore(flags);
  122 + cris_atomic_restore(v, flags);
141 123 return retval;
142 124 }
143 125  
include/asm-cris/mmu_context.h
... ... @@ -15,7 +15,7 @@
15 15 * registers like cr3 on the i386
16 16 */
17 17  
18   -extern volatile pgd_t *current_pgd; /* defined in arch/cris/mm/fault.c */
  18 +extern volatile DEFINE_PER_CPU(pgd_t *,current_pgd); /* defined in arch/cris/mm/fault.c */
19 19  
20 20 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
21 21 {
include/asm-cris/semaphore.h
... ... @@ -72,10 +72,9 @@
72 72 might_sleep();
73 73  
74 74 /* atomically decrement the semaphores count, and if its negative, we wait */
75   - local_save_flags(flags);
76   - local_irq_disable();
  75 + cris_atomic_save(sem, flags);
77 76 failed = --(sem->count.counter) < 0;
78   - local_irq_restore(flags);
  77 + cris_atomic_restore(sem, flags);
79 78 if(failed) {
80 79 __down(sem);
81 80 }
82 81  
... ... @@ -95,10 +94,9 @@
95 94 might_sleep();
96 95  
97 96 /* atomically decrement the semaphores count, and if its negative, we wait */
98   - local_save_flags(flags);
99   - local_irq_disable();
  97 + cris_atomic_save(sem, flags);
100 98 failed = --(sem->count.counter) < 0;
101   - local_irq_restore(flags);
  99 + cris_atomic_restore(sem, flags);
102 100 if(failed)
103 101 failed = __down_interruptible(sem);
104 102 return(failed);
105 103  
106 104  
... ... @@ -109,13 +107,13 @@
109 107 unsigned long flags;
110 108 int failed;
111 109  
112   - local_save_flags(flags);
113   - local_irq_disable();
  110 + cris_atomic_save(sem, flags);
114 111 failed = --(sem->count.counter) < 0;
115   - local_irq_restore(flags);
  112 + cris_atomic_restore(sem, flags);
116 113 if(failed)
117 114 failed = __down_trylock(sem);
118 115 return(failed);
  116 +
119 117 }
120 118  
121 119 /*
122 120  
... ... @@ -130,10 +128,9 @@
130 128 int wakeup;
131 129  
132 130 /* atomically increment the semaphores count, and if it was negative, we wake people */
133   - local_save_flags(flags);
134   - local_irq_disable();
  131 + cris_atomic_save(sem, flags);
135 132 wakeup = ++(sem->count.counter) <= 0;
136   - local_irq_restore(flags);
  133 + cris_atomic_restore(sem, flags);
137 134 if(wakeup) {
138 135 __up(sem);
139 136 }
include/asm-cris/smp.h
1 1 #ifndef __ASM_SMP_H
2 2 #define __ASM_SMP_H
3 3  
  4 +#include <linux/cpumask.h>
  5 +
  6 +extern cpumask_t phys_cpu_present_map;
  7 +#define cpu_possible_map phys_cpu_present_map
  8 +
  9 +#define __smp_processor_id() (current_thread_info()->cpu)
  10 +
4 11 #endif
include/asm-cris/spinlock.h
  1 +#include <asm/arch/spinlock.h>
include/asm-cris/tlbflush.h
... ... @@ -18,13 +18,26 @@
18 18 *
19 19 */
20 20  
  21 +extern void __flush_tlb_all(void);
  22 +extern void __flush_tlb_mm(struct mm_struct *mm);
  23 +extern void __flush_tlb_page(struct vm_area_struct *vma,
  24 + unsigned long addr);
  25 +
  26 +#ifdef CONFIG_SMP
21 27 extern void flush_tlb_all(void);
22 28 extern void flush_tlb_mm(struct mm_struct *mm);
23 29 extern void flush_tlb_page(struct vm_area_struct *vma,
24 30 unsigned long addr);
25   -extern void flush_tlb_range(struct vm_area_struct *vma,
26   - unsigned long start,
27   - unsigned long end);
  31 +#else
  32 +#define flush_tlb_all __flush_tlb_all
  33 +#define flush_tlb_mm __flush_tlb_mm
  34 +#define flush_tlb_page __flush_tlb_page
  35 +#endif
  36 +
  37 +static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
  38 +{
  39 + flush_tlb_mm(vma->vm_mm);
  40 +}
28 41  
29 42 extern inline void flush_tlb_pgtables(struct mm_struct *mm,
30 43 unsigned long start, unsigned long end)