Commit 2990821d0e38d2bfc556ad39d709b5f8a83c2ebd

Authored by Linus Torvalds

Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/upstream-linus

* 'upstream' of git://git.linux-mips.org/pub/scm/upstream-linus: (26 commits)
  MIPS: Alchemy: Fix reset for MTX-1 and XXS1500
  MIPS: MTX-1: Make au1000_eth probe all PHY addresses
  MIPS: Jz4740: Add HAVE_CLK
  MIPS: Move idle task creation to work queue
  MIPS, Perf-events: Use unsigned delta for right shift in event update
  MIPS, Perf-events: Work with the new callchain interface
  MIPS, Perf-events: Fix event check in validate_event()
  MIPS, Perf-events: Work with the new PMU interface
  MIPS, Perf-events: Work with irq_work
  MIPS: Fix always CONFIG_LOONGSON_UART_BASE=y
  MIPS: Loongson: Fix potentially wrong string handling
  MIPS: Fix GCC-4.6 'set but not used' warning in arch/mips/mm/init.c
  MIPS: Fix GCC-4.6 'set but not used' warning in ieee754int.h
  MIPS: Remove unused code from arch/mips/kernel/syscall.c
  MIPS: Fix GCC-4.6 'set but not used' warning in signal*.c
  MIPS: MSP: Fix MSP71xx bpci interrupt handler return value
  MIPS: Select R4K timer lib for all MSP platforms
  MIPS: Loongson: Remove ad-hoc cmdline default
  MIPS: Clear the correct flag in sysmips(MIPS_FIXADE, ...).
  MIPS: Add an unreachable return statement to satisfy buggy GCCs.
  ...

Showing 22 changed files Side-by-side Diff

... ... @@ -4,6 +4,7 @@
4 4 select HAVE_GENERIC_DMA_COHERENT
5 5 select HAVE_IDE
6 6 select HAVE_OPROFILE
  7 + select HAVE_IRQ_WORK
7 8 select HAVE_PERF_EVENTS
8 9 select PERF_USE_VMALLOC
9 10 select HAVE_ARCH_KGDB
... ... @@ -208,6 +209,7 @@
208 209 select ARCH_REQUIRE_GPIOLIB
209 210 select SYS_HAS_EARLY_PRINTK
210 211 select HAVE_PWM
  212 + select HAVE_CLK
211 213  
212 214 config LASAT
213 215 bool "LASAT Networks platforms"
... ... @@ -333,6 +335,8 @@
333 335 config PMC_MSP
334 336 bool "PMC-Sierra MSP chipsets"
335 337 depends on EXPERIMENTAL
  338 + select CEVT_R4K
  339 + select CSRC_R4K
336 340 select DMA_NONCOHERENT
337 341 select SWAP_IO_SPACE
338 342 select NO_EXCEPT_FILL
arch/mips/alchemy/mtx-1/board_setup.c
... ... @@ -54,8 +54,8 @@
54 54  
55 55 static void mtx1_reset(char *c)
56 56 {
57   - /* Hit BCSR.SYSTEM_CONTROL[SW_RST] */
58   - au_writel(0x00000000, 0xAE00001C);
  57 + /* Jump to the reset vector */
  58 + __asm__ __volatile__("jr\t%0"::"r"(0xbfc00000));
59 59 }
60 60  
61 61 static void mtx1_power_off(void)
arch/mips/alchemy/mtx-1/platform.c
... ... @@ -28,6 +28,8 @@
28 28 #include <linux/mtd/physmap.h>
29 29 #include <mtd/mtd-abi.h>
30 30  
  31 +#include <asm/mach-au1x00/au1xxx_eth.h>
  32 +
31 33 static struct gpio_keys_button mtx1_gpio_button[] = {
32 34 {
33 35 .gpio = 207,
34 36  
... ... @@ -140,9 +142,16 @@
140 142 &mtx1_mtd,
141 143 };
142 144  
  145 +static struct au1000_eth_platform_data mtx1_au1000_eth0_pdata = {
  146 + .phy_search_highest_addr = 1,
  147 + .phy1_search_mac0 = 1,
  148 +};
  149 +
143 150 static int __init mtx1_register_devices(void)
144 151 {
145 152 int rc;
  153 +
  154 + au1xxx_override_eth_cfg(0, &mtx1_au1000_eth0_pdata);
146 155  
147 156 rc = gpio_request(mtx1_gpio_button[0].gpio,
148 157 mtx1_gpio_button[0].desc);
arch/mips/alchemy/xxs1500/board_setup.c
... ... @@ -36,8 +36,8 @@
36 36  
37 37 static void xxs1500_reset(char *c)
38 38 {
39   - /* Hit BCSR.SYSTEM_CONTROL[SW_RST] */
40   - au_writel(0x00000000, 0xAE00001C);
  39 + /* Jump to the reset vector */
  40 + __asm__ __volatile__("jr\t%0"::"r"(0xbfc00000));
41 41 }
42 42  
43 43 static void xxs1500_power_off(void)
arch/mips/include/asm/perf_event.h
... ... @@ -11,16 +11,6 @@
11 11  
12 12 #ifndef __MIPS_PERF_EVENT_H__
13 13 #define __MIPS_PERF_EVENT_H__
14   -
15   -/*
16   - * MIPS performance counters do not raise NMI upon overflow, a regular
17   - * interrupt will be signaled. Hence we can do the pending perf event
18   - * work at the tail of the irq handler.
19   - */
20   -static inline void
21   -set_perf_event_pending(void)
22   -{
23   -}
24   -
  14 +/* Leave it empty here. The file is required by linux/perf_event.h */
25 15 #endif /* __MIPS_PERF_EVENT_H__ */
arch/mips/kernel/ftrace.c
... ... @@ -17,29 +17,13 @@
17 17 #include <asm/cacheflush.h>
18 18 #include <asm/uasm.h>
19 19  
20   -/*
21   - * If the Instruction Pointer is in module space (0xc0000000), return true;
22   - * otherwise, it is in kernel space (0x80000000), return false.
23   - *
24   - * FIXME: This will not work when the kernel space and module space are the
25   - * same. If they are the same, we need to modify scripts/recordmcount.pl,
26   - * ftrace_make_nop/call() and the other related parts to ensure the
27   - * enabling/disabling of the calling site to _mcount is right for both kernel
28   - * and module.
29   - */
  20 +#include <asm-generic/sections.h>
30 21  
31   -static inline int in_module(unsigned long ip)
32   -{
33   - return ip & 0x40000000;
34   -}
35   -
36 22 #ifdef CONFIG_DYNAMIC_FTRACE
37 23  
38 24 #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
39 25 #define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */
40 26  
41   -#define INSN_B_1F_4 0x10000004 /* b 1f; offset = 4 */
42   -#define INSN_B_1F_5 0x10000005 /* b 1f; offset = 5 */
43 27 #define INSN_NOP 0x00000000 /* nop */
44 28 #define INSN_JAL(addr) \
45 29 ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
... ... @@ -69,6 +53,20 @@
69 53 #endif
70 54 }
71 55  
  56 +/*
  57 + * Check if the address is in kernel space
  58 + *
  59 + * Clone core_kernel_text() from kernel/extable.c, but doesn't call
  60 + * init_kernel_text() for Ftrace doesn't trace functions in init sections.
  61 + */
  62 +static inline int in_kernel_space(unsigned long ip)
  63 +{
  64 + if (ip >= (unsigned long)_stext &&
  65 + ip <= (unsigned long)_etext)
  66 + return 1;
  67 + return 0;
  68 +}
  69 +
72 70 static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
73 71 {
74 72 int faulted;
... ... @@ -84,6 +82,42 @@
84 82 return 0;
85 83 }
86 84  
  85 +/*
  86 + * The details about the calling site of mcount on MIPS
  87 + *
  88 + * 1. For kernel:
  89 + *
  90 + * move at, ra
  91 + * jal _mcount --> nop
  92 + *
  93 + * 2. For modules:
  94 + *
  95 + * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT
  96 + *
  97 + * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005)
  98 + * addiu v1, v1, low_16bit_of_mcount
  99 + * move at, ra
  100 + * move $12, ra_address
  101 + * jalr v1
  102 + * sub sp, sp, 8
  103 + * 1: offset = 5 instructions
  104 + * 2.2 For the Other situations
  105 + *
  106 + * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
  107 + * addiu v1, v1, low_16bit_of_mcount
  108 + * move at, ra
  109 + * jalr v1
  110 + * nop | move $12, ra_address | sub sp, sp, 8
  111 + * 1: offset = 4 instructions
  112 + */
  113 +
  114 +#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
  115 +#define MCOUNT_OFFSET_INSNS 5
  116 +#else
  117 +#define MCOUNT_OFFSET_INSNS 4
  118 +#endif
  119 +#define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
  120 +
87 121 int ftrace_make_nop(struct module *mod,
88 122 struct dyn_ftrace *rec, unsigned long addr)
89 123 {
90 124  
... ... @@ -91,39 +125,11 @@
91 125 unsigned long ip = rec->ip;
92 126  
93 127 /*
94   - * We have compiled module with -mlong-calls, but compiled the kernel
95   - * without it, we need to cope with them respectively.
  128 + * If ip is in kernel space, no long call, otherwise, long call is
  129 + * needed.
96 130 */
97   - if (in_module(ip)) {
98   -#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
99   - /*
100   - * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005)
101   - * addiu v1, v1, low_16bit_of_mcount
102   - * move at, ra
103   - * move $12, ra_address
104   - * jalr v1
105   - * sub sp, sp, 8
106   - * 1: offset = 5 instructions
107   - */
108   - new = INSN_B_1F_5;
109   -#else
110   - /*
111   - * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
112   - * addiu v1, v1, low_16bit_of_mcount
113   - * move at, ra
114   - * jalr v1
115   - * nop | move $12, ra_address | sub sp, sp, 8
116   - * 1: offset = 4 instructions
117   - */
118   - new = INSN_B_1F_4;
119   -#endif
120   - } else {
121   - /*
122   - * move at, ra
123   - * jal _mcount --> nop
124   - */
125   - new = INSN_NOP;
126   - }
  131 + new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F;
  132 +
127 133 return ftrace_modify_code(ip, new);
128 134 }
129 135  
... ... @@ -132,8 +138,8 @@
132 138 unsigned int new;
133 139 unsigned long ip = rec->ip;
134 140  
135   - /* ip, module: 0xc0000000, kernel: 0x80000000 */
136   - new = in_module(ip) ? insn_lui_v1_hi16_mcount : insn_jal_ftrace_caller;
  141 + new = in_kernel_space(ip) ? insn_jal_ftrace_caller :
  142 + insn_lui_v1_hi16_mcount;
137 143  
138 144 return ftrace_modify_code(ip, new);
139 145 }
140 146  
141 147  
142 148  
143 149  
... ... @@ -190,29 +196,25 @@
190 196 #define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */
191 197 #define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */
192 198  
193   -unsigned long ftrace_get_parent_addr(unsigned long self_addr,
194   - unsigned long parent,
195   - unsigned long parent_addr,
196   - unsigned long fp)
  199 +unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
  200 + old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)
197 201 {
198   - unsigned long sp, ip, ra;
  202 + unsigned long sp, ip, tmp;
199 203 unsigned int code;
200 204 int faulted;
201 205  
202 206 /*
203   - * For module, move the ip from calling site of mcount to the
204   - * instruction "lui v1, hi_16bit_of_mcount"(offset is 20), but for
205   - * kernel, move to the instruction "move ra, at"(offset is 12)
  207 + * For module, move the ip from the return address after the
  208 + * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for
  209 + * kernel, move after the instruction "move ra, at"(offset is 16)
206 210 */
207   - ip = self_addr - (in_module(self_addr) ? 20 : 12);
  211 + ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24);
208 212  
209 213 /*
210 214 * search the text until finding the non-store instruction or "s{d,w}
211 215 * ra, offset(sp)" instruction
212 216 */
213 217 do {
214   - ip -= 4;
215   -
216 218 /* get the code at "ip": code = *(unsigned int *)ip; */
217 219 safe_load_code(code, ip, faulted);
218 220  
219 221  
220 222  
221 223  
... ... @@ -224,18 +226,20 @@
224 226 * store the ra on the stack
225 227 */
226 228 if ((code & S_R_SP) != S_R_SP)
227   - return parent_addr;
  229 + return parent_ra_addr;
228 230  
229   - } while (((code & S_RA_SP) != S_RA_SP));
  231 + /* Move to the next instruction */
  232 + ip -= 4;
  233 + } while ((code & S_RA_SP) != S_RA_SP);
230 234  
231 235 sp = fp + (code & OFFSET_MASK);
232 236  
233   - /* ra = *(unsigned long *)sp; */
234   - safe_load_stack(ra, sp, faulted);
  237 + /* tmp = *(unsigned long *)sp; */
  238 + safe_load_stack(tmp, sp, faulted);
235 239 if (unlikely(faulted))
236 240 return 0;
237 241  
238   - if (ra == parent)
  242 + if (tmp == old_parent_ra)
239 243 return sp;
240 244 return 0;
241 245 }
242 246  
243 247  
244 248  
... ... @@ -246,21 +250,21 @@
246 250 * Hook the return address and push it in the stack of return addrs
247 251 * in current thread info.
248 252 */
249   -void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
  253 +void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
250 254 unsigned long fp)
251 255 {
252   - unsigned long old;
  256 + unsigned long old_parent_ra;
253 257 struct ftrace_graph_ent trace;
254 258 unsigned long return_hooker = (unsigned long)
255 259 &return_to_handler;
256   - int faulted;
  260 + int faulted, insns;
257 261  
258 262 if (unlikely(atomic_read(&current->tracing_graph_pause)))
259 263 return;
260 264  
261 265 /*
262   - * "parent" is the stack address saved the return address of the caller
263   - * of _mcount.
  266 + * "parent_ra_addr" is the stack address saved the return address of
  267 + * the caller of _mcount.
264 268 *
265 269 * if the gcc < 4.5, a leaf function does not save the return address
266 270 * in the stack address, so, we "emulate" one in _mcount's stack space,
267 271  
268 272  
269 273  
270 274  
271 275  
272 276  
273 277  
... ... @@ -275,37 +279,44 @@
275 279 * do it in ftrace_graph_caller of mcount.S.
276 280 */
277 281  
278   - /* old = *parent; */
279   - safe_load_stack(old, parent, faulted);
  282 + /* old_parent_ra = *parent_ra_addr; */
  283 + safe_load_stack(old_parent_ra, parent_ra_addr, faulted);
280 284 if (unlikely(faulted))
281 285 goto out;
282 286 #ifndef KBUILD_MCOUNT_RA_ADDRESS
283   - parent = (unsigned long *)ftrace_get_parent_addr(self_addr, old,
284   - (unsigned long)parent, fp);
  287 + parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra,
  288 + old_parent_ra, (unsigned long)parent_ra_addr, fp);
285 289 /*
286 290 * If fails when getting the stack address of the non-leaf function's
287 291 * ra, stop function graph tracer and return
288 292 */
289   - if (parent == 0)
  293 + if (parent_ra_addr == 0)
290 294 goto out;
291 295 #endif
292   - /* *parent = return_hooker; */
293   - safe_store_stack(return_hooker, parent, faulted);
  296 + /* *parent_ra_addr = return_hooker; */
  297 + safe_store_stack(return_hooker, parent_ra_addr, faulted);
294 298 if (unlikely(faulted))
295 299 goto out;
296 300  
297   - if (ftrace_push_return_trace(old, self_addr, &trace.depth, fp) ==
298   - -EBUSY) {
299   - *parent = old;
  301 + if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp)
  302 + == -EBUSY) {
  303 + *parent_ra_addr = old_parent_ra;
300 304 return;
301 305 }
302 306  
303   - trace.func = self_addr;
  307 + /*
  308 + * Get the recorded ip of the current mcount calling site in the
  309 + * __mcount_loc section, which will be used to filter the function
  310 + * entries configured through the tracing/set_graph_function interface.
  311 + */
304 312  
  313 + insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
  314 + trace.func = self_ra - (MCOUNT_INSN_SIZE * insns);
  315 +
305 316 /* Only trace if the calling function expects to */
306 317 if (!ftrace_graph_entry(&trace)) {
307 318 current->curr_ret_stack--;
308   - *parent = old;
  319 + *parent_ra_addr = old_parent_ra;
309 320 }
310 321 return;
311 322 out:
arch/mips/kernel/perf_event.c
... ... @@ -161,41 +161,6 @@
161 161 return ret;
162 162 }
163 163  
164   -static int mipspmu_enable(struct perf_event *event)
165   -{
166   - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
167   - struct hw_perf_event *hwc = &event->hw;
168   - int idx;
169   - int err = 0;
170   -
171   - /* To look for a free counter for this event. */
172   - idx = mipspmu->alloc_counter(cpuc, hwc);
173   - if (idx < 0) {
174   - err = idx;
175   - goto out;
176   - }
177   -
178   - /*
179   - * If there is an event in the counter we are going to use then
180   - * make sure it is disabled.
181   - */
182   - event->hw.idx = idx;
183   - mipspmu->disable_event(idx);
184   - cpuc->events[idx] = event;
185   -
186   - /* Set the period for the event. */
187   - mipspmu_event_set_period(event, hwc, idx);
188   -
189   - /* Enable the event. */
190   - mipspmu->enable_event(hwc, idx);
191   -
192   - /* Propagate our changes to the userspace mapping. */
193   - perf_event_update_userpage(event);
194   -
195   -out:
196   - return err;
197   -}
198   -
199 164 static void mipspmu_event_update(struct perf_event *event,
200 165 struct hw_perf_event *hwc,
201 166 int idx)
... ... @@ -204,7 +169,7 @@
204 169 unsigned long flags;
205 170 int shift = 64 - TOTAL_BITS;
206 171 s64 prev_raw_count, new_raw_count;
207   - s64 delta;
  172 + u64 delta;
208 173  
209 174 again:
210 175 prev_raw_count = local64_read(&hwc->prev_count);
211 176  
212 177  
213 178  
214 179  
215 180  
216 181  
217 182  
218 183  
219 184  
220 185  
221 186  
222 187  
223 188  
... ... @@ -231,32 +196,90 @@
231 196 return;
232 197 }
233 198  
234   -static void mipspmu_disable(struct perf_event *event)
  199 +static void mipspmu_start(struct perf_event *event, int flags)
235 200 {
  201 + struct hw_perf_event *hwc = &event->hw;
  202 +
  203 + if (!mipspmu)
  204 + return;
  205 +
  206 + if (flags & PERF_EF_RELOAD)
  207 + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
  208 +
  209 + hwc->state = 0;
  210 +
  211 + /* Set the period for the event. */
  212 + mipspmu_event_set_period(event, hwc, hwc->idx);
  213 +
  214 + /* Enable the event. */
  215 + mipspmu->enable_event(hwc, hwc->idx);
  216 +}
  217 +
  218 +static void mipspmu_stop(struct perf_event *event, int flags)
  219 +{
  220 + struct hw_perf_event *hwc = &event->hw;
  221 +
  222 + if (!mipspmu)
  223 + return;
  224 +
  225 + if (!(hwc->state & PERF_HES_STOPPED)) {
  226 + /* We are working on a local event. */
  227 + mipspmu->disable_event(hwc->idx);
  228 + barrier();
  229 + mipspmu_event_update(event, hwc, hwc->idx);
  230 + hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
  231 + }
  232 +}
  233 +
  234 +static int mipspmu_add(struct perf_event *event, int flags)
  235 +{
236 236 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
237 237 struct hw_perf_event *hwc = &event->hw;
238   - int idx = hwc->idx;
  238 + int idx;
  239 + int err = 0;
239 240  
  241 + perf_pmu_disable(event->pmu);
240 242  
241   - WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
  243 + /* To look for a free counter for this event. */
  244 + idx = mipspmu->alloc_counter(cpuc, hwc);
  245 + if (idx < 0) {
  246 + err = idx;
  247 + goto out;
  248 + }
242 249  
243   - /* We are working on a local event. */
  250 + /*
  251 + * If there is an event in the counter we are going to use then
  252 + * make sure it is disabled.
  253 + */
  254 + event->hw.idx = idx;
244 255 mipspmu->disable_event(idx);
  256 + cpuc->events[idx] = event;
245 257  
246   - barrier();
  258 + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
  259 + if (flags & PERF_EF_START)
  260 + mipspmu_start(event, PERF_EF_RELOAD);
247 261  
248   - mipspmu_event_update(event, hwc, idx);
249   - cpuc->events[idx] = NULL;
250   - clear_bit(idx, cpuc->used_mask);
251   -
  262 + /* Propagate our changes to the userspace mapping. */
252 263 perf_event_update_userpage(event);
  264 +
  265 +out:
  266 + perf_pmu_enable(event->pmu);
  267 + return err;
253 268 }
254 269  
255   -static void mipspmu_unthrottle(struct perf_event *event)
  270 +static void mipspmu_del(struct perf_event *event, int flags)
256 271 {
  272 + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
257 273 struct hw_perf_event *hwc = &event->hw;
  274 + int idx = hwc->idx;
258 275  
259   - mipspmu->enable_event(hwc, hwc->idx);
  276 + WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
  277 +
  278 + mipspmu_stop(event, PERF_EF_UPDATE);
  279 + cpuc->events[idx] = NULL;
  280 + clear_bit(idx, cpuc->used_mask);
  281 +
  282 + perf_event_update_userpage(event);
260 283 }
261 284  
262 285 static void mipspmu_read(struct perf_event *event)
263 286  
... ... @@ -270,13 +293,18 @@
270 293 mipspmu_event_update(event, hwc, hwc->idx);
271 294 }
272 295  
273   -static struct pmu pmu = {
274   - .enable = mipspmu_enable,
275   - .disable = mipspmu_disable,
276   - .unthrottle = mipspmu_unthrottle,
277   - .read = mipspmu_read,
278   -};
  296 +static void mipspmu_enable(struct pmu *pmu)
  297 +{
  298 + if (mipspmu)
  299 + mipspmu->start();
  300 +}
279 301  
  302 +static void mipspmu_disable(struct pmu *pmu)
  303 +{
  304 + if (mipspmu)
  305 + mipspmu->stop();
  306 +}
  307 +
280 308 static atomic_t active_events = ATOMIC_INIT(0);
281 309 static DEFINE_MUTEX(pmu_reserve_mutex);
282 310 static int (*save_perf_irq)(void);
... ... @@ -318,6 +346,82 @@
318 346 perf_irq = save_perf_irq;
319 347 }
320 348  
  349 +/*
  350 + * mipsxx/rm9000/loongson2 have different performance counters, they have
  351 + * specific low-level init routines.
  352 + */
  353 +static void reset_counters(void *arg);
  354 +static int __hw_perf_event_init(struct perf_event *event);
  355 +
  356 +static void hw_perf_event_destroy(struct perf_event *event)
  357 +{
  358 + if (atomic_dec_and_mutex_lock(&active_events,
  359 + &pmu_reserve_mutex)) {
  360 + /*
  361 + * We must not call the destroy function with interrupts
  362 + * disabled.
  363 + */
  364 + on_each_cpu(reset_counters,
  365 + (void *)(long)mipspmu->num_counters, 1);
  366 + mipspmu_free_irq();
  367 + mutex_unlock(&pmu_reserve_mutex);
  368 + }
  369 +}
  370 +
  371 +static int mipspmu_event_init(struct perf_event *event)
  372 +{
  373 + int err = 0;
  374 +
  375 + switch (event->attr.type) {
  376 + case PERF_TYPE_RAW:
  377 + case PERF_TYPE_HARDWARE:
  378 + case PERF_TYPE_HW_CACHE:
  379 + break;
  380 +
  381 + default:
  382 + return -ENOENT;
  383 + }
  384 +
  385 + if (!mipspmu || event->cpu >= nr_cpumask_bits ||
  386 + (event->cpu >= 0 && !cpu_online(event->cpu)))
  387 + return -ENODEV;
  388 +
  389 + if (!atomic_inc_not_zero(&active_events)) {
  390 + if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
  391 + atomic_dec(&active_events);
  392 + return -ENOSPC;
  393 + }
  394 +
  395 + mutex_lock(&pmu_reserve_mutex);
  396 + if (atomic_read(&active_events) == 0)
  397 + err = mipspmu_get_irq();
  398 +
  399 + if (!err)
  400 + atomic_inc(&active_events);
  401 + mutex_unlock(&pmu_reserve_mutex);
  402 + }
  403 +
  404 + if (err)
  405 + return err;
  406 +
  407 + err = __hw_perf_event_init(event);
  408 + if (err)
  409 + hw_perf_event_destroy(event);
  410 +
  411 + return err;
  412 +}
  413 +
  414 +static struct pmu pmu = {
  415 + .pmu_enable = mipspmu_enable,
  416 + .pmu_disable = mipspmu_disable,
  417 + .event_init = mipspmu_event_init,
  418 + .add = mipspmu_add,
  419 + .del = mipspmu_del,
  420 + .start = mipspmu_start,
  421 + .stop = mipspmu_stop,
  422 + .read = mipspmu_read,
  423 +};
  424 +
321 425 static inline unsigned int
322 426 mipspmu_perf_event_encode(const struct mips_perf_event *pev)
323 427 {
... ... @@ -382,8 +486,9 @@
382 486 {
383 487 struct hw_perf_event fake_hwc = event->hw;
384 488  
385   - if (event->pmu && event->pmu != &pmu)
386   - return 0;
  489 + /* Allow mixed event group. So return 1 to pass validation. */
  490 + if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF)
  491 + return 1;
387 492  
388 493 return mipspmu->alloc_counter(cpuc, &fake_hwc) >= 0;
389 494 }
... ... @@ -409,73 +514,6 @@
409 514 return 0;
410 515 }
411 516  
412   -/*
413   - * mipsxx/rm9000/loongson2 have different performance counters, they have
414   - * specific low-level init routines.
415   - */
416   -static void reset_counters(void *arg);
417   -static int __hw_perf_event_init(struct perf_event *event);
418   -
419   -static void hw_perf_event_destroy(struct perf_event *event)
420   -{
421   - if (atomic_dec_and_mutex_lock(&active_events,
422   - &pmu_reserve_mutex)) {
423   - /*
424   - * We must not call the destroy function with interrupts
425   - * disabled.
426   - */
427   - on_each_cpu(reset_counters,
428   - (void *)(long)mipspmu->num_counters, 1);
429   - mipspmu_free_irq();
430   - mutex_unlock(&pmu_reserve_mutex);
431   - }
432   -}
433   -
434   -const struct pmu *hw_perf_event_init(struct perf_event *event)
435   -{
436   - int err = 0;
437   -
438   - if (!mipspmu || event->cpu >= nr_cpumask_bits ||
439   - (event->cpu >= 0 && !cpu_online(event->cpu)))
440   - return ERR_PTR(-ENODEV);
441   -
442   - if (!atomic_inc_not_zero(&active_events)) {
443   - if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
444   - atomic_dec(&active_events);
445   - return ERR_PTR(-ENOSPC);
446   - }
447   -
448   - mutex_lock(&pmu_reserve_mutex);
449   - if (atomic_read(&active_events) == 0)
450   - err = mipspmu_get_irq();
451   -
452   - if (!err)
453   - atomic_inc(&active_events);
454   - mutex_unlock(&pmu_reserve_mutex);
455   - }
456   -
457   - if (err)
458   - return ERR_PTR(err);
459   -
460   - err = __hw_perf_event_init(event);
461   - if (err)
462   - hw_perf_event_destroy(event);
463   -
464   - return err ? ERR_PTR(err) : &pmu;
465   -}
466   -
467   -void hw_perf_enable(void)
468   -{
469   - if (mipspmu)
470   - mipspmu->start();
471   -}
472   -
473   -void hw_perf_disable(void)
474   -{
475   - if (mipspmu)
476   - mipspmu->stop();
477   -}
478   -
479 517 /* This is needed by specific irq handlers in perf_event_*.c */
480 518 static void
481 519 handle_associated_event(struct cpu_hw_events *cpuc,
482 520  
... ... @@ -496,21 +534,13 @@
496 534 #include "perf_event_mipsxx.c"
497 535  
498 536 /* Callchain handling code. */
499   -static inline void
500   -callchain_store(struct perf_callchain_entry *entry,
501   - u64 ip)
502   -{
503   - if (entry->nr < PERF_MAX_STACK_DEPTH)
504   - entry->ip[entry->nr++] = ip;
505   -}
506 537  
507 538 /*
508 539 * Leave userspace callchain empty for now. When we find a way to trace
509 540 * the user stack callchains, we add here.
510 541 */
511   -static void
512   -perf_callchain_user(struct pt_regs *regs,
513   - struct perf_callchain_entry *entry)
  542 +void perf_callchain_user(struct perf_callchain_entry *entry,
  543 + struct pt_regs *regs)
514 544 {
515 545 }
516 546  
517 547  
518 548  
... ... @@ -523,23 +553,21 @@
523 553 while (!kstack_end(sp)) {
524 554 addr = *sp++;
525 555 if (__kernel_text_address(addr)) {
526   - callchain_store(entry, addr);
  556 + perf_callchain_store(entry, addr);
527 557 if (entry->nr >= PERF_MAX_STACK_DEPTH)
528 558 break;
529 559 }
530 560 }
531 561 }
532 562  
533   -static void
534   -perf_callchain_kernel(struct pt_regs *regs,
535   - struct perf_callchain_entry *entry)
  563 +void perf_callchain_kernel(struct perf_callchain_entry *entry,
  564 + struct pt_regs *regs)
536 565 {
537 566 unsigned long sp = regs->regs[29];
538 567 #ifdef CONFIG_KALLSYMS
539 568 unsigned long ra = regs->regs[31];
540 569 unsigned long pc = regs->cp0_epc;
541 570  
542   - callchain_store(entry, PERF_CONTEXT_KERNEL);
543 571 if (raw_show_trace || !__kernel_text_address(pc)) {
544 572 unsigned long stack_page =
545 573 (unsigned long)task_stack_page(current);
546 574  
547 575  
... ... @@ -549,54 +577,13 @@
549 577 return;
550 578 }
551 579 do {
552   - callchain_store(entry, pc);
  580 + perf_callchain_store(entry, pc);
553 581 if (entry->nr >= PERF_MAX_STACK_DEPTH)
554 582 break;
555 583 pc = unwind_stack(current, &sp, pc, &ra);
556 584 } while (pc);
557 585 #else
558   - callchain_store(entry, PERF_CONTEXT_KERNEL);
559 586 save_raw_perf_callchain(entry, sp);
560 587 #endif
561   -}
562   -
563   -static void
564   -perf_do_callchain(struct pt_regs *regs,
565   - struct perf_callchain_entry *entry)
566   -{
567   - int is_user;
568   -
569   - if (!regs)
570   - return;
571   -
572   - is_user = user_mode(regs);
573   -
574   - if (!current || !current->pid)
575   - return;
576   -
577   - if (is_user && current->state != TASK_RUNNING)
578   - return;
579   -
580   - if (!is_user) {
581   - perf_callchain_kernel(regs, entry);
582   - if (current->mm)
583   - regs = task_pt_regs(current);
584   - else
585   - regs = NULL;
586   - }
587   - if (regs)
588   - perf_callchain_user(regs, entry);
589   -}
590   -
591   -static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
592   -
593   -struct perf_callchain_entry *
594   -perf_callchain(struct pt_regs *regs)
595   -{
596   - struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry);
597   -
598   - entry->nr = 0;
599   - perf_do_callchain(regs, entry);
600   - return entry;
601 588 }
arch/mips/kernel/perf_event_mipsxx.c
... ... @@ -696,7 +696,7 @@
696 696 * interrupt, not NMI.
697 697 */
698 698 if (handled == IRQ_HANDLED)
699   - perf_event_do_pending();
  699 + irq_work_run();
700 700  
701 701 #ifdef CONFIG_MIPS_MT_SMP
702 702 read_unlock(&pmuint_rwlock);
... ... @@ -1044,6 +1044,8 @@
1044 1044 pr_cont("%s PMU enabled, %d counters available to each "
1045 1045 "CPU, irq %d%s\n", mipspmu->name, counters, irq,
1046 1046 irq < 0 ? " (share with timer interrupt)" : "");
  1047 +
  1048 + perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
1047 1049  
1048 1050 return 0;
1049 1051 }
arch/mips/kernel/signal.c
... ... @@ -84,7 +84,7 @@
84 84  
85 85 static int protected_restore_fp_context(struct sigcontext __user *sc)
86 86 {
87   - int err, tmp;
  87 + int err, tmp __maybe_unused;
88 88 while (1) {
89 89 lock_fpu_owner();
90 90 own_fpu_inatomic(0);
arch/mips/kernel/signal32.c
... ... @@ -115,7 +115,7 @@
115 115  
116 116 static int protected_restore_fp_context32(struct sigcontext32 __user *sc)
117 117 {
118   - int err, tmp;
  118 + int err, tmp __maybe_unused;
119 119 while (1) {
120 120 lock_fpu_owner();
121 121 own_fpu_inatomic(0);
arch/mips/kernel/smp.c
... ... @@ -193,6 +193,22 @@
193 193 */
194 194 static struct task_struct *cpu_idle_thread[NR_CPUS];
195 195  
  196 +struct create_idle {
  197 + struct work_struct work;
  198 + struct task_struct *idle;
  199 + struct completion done;
  200 + int cpu;
  201 +};
  202 +
  203 +static void __cpuinit do_fork_idle(struct work_struct *work)
  204 +{
  205 + struct create_idle *c_idle =
  206 + container_of(work, struct create_idle, work);
  207 +
  208 + c_idle->idle = fork_idle(c_idle->cpu);
  209 + complete(&c_idle->done);
  210 +}
  211 +
196 212 int __cpuinit __cpu_up(unsigned int cpu)
197 213 {
198 214 struct task_struct *idle;
... ... @@ -203,8 +219,19 @@
203 219 * Linux can schedule processes on this slave.
204 220 */
205 221 if (!cpu_idle_thread[cpu]) {
206   - idle = fork_idle(cpu);
207   - cpu_idle_thread[cpu] = idle;
  222 + /*
  223 + * Schedule work item to avoid forking user task
  224 + * Ported from arch/x86/kernel/smpboot.c
  225 + */
  226 + struct create_idle c_idle = {
  227 + .cpu = cpu,
  228 + .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
  229 + };
  230 +
  231 + INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
  232 + schedule_work(&c_idle.work);
  233 + wait_for_completion(&c_idle.done);
  234 + idle = cpu_idle_thread[cpu] = c_idle.idle;
208 235  
209 236 if (IS_ERR(idle))
210 237 panic(KERN_ERR "Fork failed for CPU %d", cpu);
arch/mips/kernel/syscall.c
... ... @@ -383,12 +383,11 @@
383 383 static int __used noinline
384 384 _sys_sysmips(nabi_no_regargs struct pt_regs regs)
385 385 {
386   - long cmd, arg1, arg2, arg3;
  386 + long cmd, arg1, arg2;
387 387  
388 388 cmd = regs.regs[4];
389 389 arg1 = regs.regs[5];
390 390 arg2 = regs.regs[6];
391   - arg3 = regs.regs[7];
392 391  
393 392 switch (cmd) {
394 393 case MIPS_ATOMIC_SET:
... ... @@ -405,7 +404,7 @@
405 404 if (arg1 & 2)
406 405 set_thread_flag(TIF_LOGADE);
407 406 else
408   - clear_thread_flag(TIF_FIXADE);
  407 + clear_thread_flag(TIF_LOGADE);
409 408  
410 409 return 0;
411 410  
arch/mips/kernel/vpe.c
... ... @@ -148,9 +148,9 @@
148 148 spinlock_t tc_list_lock;
149 149 struct list_head tc_list; /* Thread contexts */
150 150 } vpecontrol = {
151   - .vpe_list_lock = SPIN_LOCK_UNLOCKED,
  151 + .vpe_list_lock = __SPIN_LOCK_UNLOCKED(vpe_list_lock),
152 152 .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list),
153   - .tc_list_lock = SPIN_LOCK_UNLOCKED,
  153 + .tc_list_lock = __SPIN_LOCK_UNLOCKED(tc_list_lock),
154 154 .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list)
155 155 };
156 156  
arch/mips/loongson/Kconfig
  1 +if MACH_LOONGSON
  2 +
1 3 choice
2 4 prompt "Machine Type"
3   - depends on MACH_LOONGSON
4 5  
5 6 config LEMOTE_FULOONG2E
6 7 bool "Lemote Fuloong(2e) mini-PC"
... ... @@ -87,4 +88,6 @@
87 88 config LOONGSON_MC146818
88 89 bool
89 90 default n
  91 +
  92 +endif # MACH_LOONGSON
arch/mips/loongson/common/cmdline.c
... ... @@ -44,11 +44,6 @@
44 44 strcat(arcs_cmdline, " ");
45 45 }
46 46  
47   - if ((strstr(arcs_cmdline, "console=")) == NULL)
48   - strcat(arcs_cmdline, " console=ttyS0,115200");
49   - if ((strstr(arcs_cmdline, "root=")) == NULL)
50   - strcat(arcs_cmdline, " root=/dev/hda1");
51   -
52 47 prom_init_machtype();
53 48 }
arch/mips/loongson/common/machtype.c
... ... @@ -41,7 +41,7 @@
41 41  
42 42 void __init prom_init_machtype(void)
43 43 {
44   - char *p, str[MACHTYPE_LEN];
  44 + char *p, str[MACHTYPE_LEN + 1];
45 45 int machtype = MACH_LEMOTE_FL2E;
46 46  
47 47 mips_machtype = LOONGSON_MACHTYPE;
... ... @@ -53,6 +53,7 @@
53 53 }
54 54 p += strlen("machtype=");
55 55 strncpy(str, p, MACHTYPE_LEN);
  56 + str[MACHTYPE_LEN] = '\0';
56 57 p = strstr(str, " ");
57 58 if (p)
58 59 *p = '\0';
arch/mips/math-emu/ieee754int.h
... ... @@ -70,7 +70,7 @@
70 70  
71 71  
72 72 #define COMPXSP \
73   - unsigned xm; int xe; int xs; int xc
  73 + unsigned xm; int xe; int xs __maybe_unused; int xc
74 74  
75 75 #define COMPYSP \
76 76 unsigned ym; int ye; int ys; int yc
... ... @@ -104,7 +104,7 @@
104 104  
105 105  
106 106 #define COMPXDP \
107   -u64 xm; int xe; int xs; int xc
  107 +u64 xm; int xe; int xs __maybe_unused; int xc
108 108  
109 109 #define COMPYDP \
110 110 u64 ym; int ye; int ys; int yc
... ... @@ -324,7 +324,7 @@
324 324 void __init paging_init(void)
325 325 {
326 326 unsigned long max_zone_pfns[MAX_NR_ZONES];
327   - unsigned long lastpfn;
  327 + unsigned long lastpfn __maybe_unused;
328 328  
329 329 pagetable_init();
330 330  
arch/mips/mm/tlbex.c
... ... @@ -109,6 +109,8 @@
109 109 static int scratchpad_offset(int i)
110 110 {
111 111 BUG();
  112 + /* Really unreachable, but evidently some GCC want this. */
  113 + return 0;
112 114 }
113 115 #endif
114 116 /*
arch/mips/pci/ops-pmcmsp.c
... ... @@ -308,7 +308,7 @@
308 308 * RETURNS: PCIBIOS_SUCCESSFUL - success
309 309 *
310 310 ****************************************************************************/
311   -static int bpci_interrupt(int irq, void *dev_id)
  311 +static irqreturn_t bpci_interrupt(int irq, void *dev_id)
312 312 {
313 313 struct msp_pci_regs *preg = (void *)PCI_BASE_REG;
314 314 unsigned int stat = preg->if_status;
... ... @@ -326,7 +326,7 @@
326 326 /* write to clear all asserted interrupts */
327 327 preg->if_status = stat;
328 328  
329   - return PCIBIOS_SUCCESSFUL;
  329 + return IRQ_HANDLED;
330 330 }
331 331  
332 332 /*****************************************************************************
arch/mips/pmc-sierra/Kconfig
... ... @@ -4,15 +4,11 @@
4 4  
5 5 config PMC_MSP4200_EVAL
6 6 bool "PMC-Sierra MSP4200 Eval Board"
7   - select CEVT_R4K
8   - select CSRC_R4K
9 7 select IRQ_MSP_SLP
10 8 select HW_HAS_PCI
11 9  
12 10 config PMC_MSP4200_GW
13 11 bool "PMC-Sierra MSP4200 VoIP Gateway"
14   - select CEVT_R4K
15   - select CSRC_R4K
16 12 select IRQ_MSP_SLP
17 13 select HW_HAS_PCI
18 14  
arch/mips/pmc-sierra/msp71xx/msp_time.c
... ... @@ -81,7 +81,7 @@
81 81 mips_hpt_frequency = cpu_rate/2;
82 82 }
83 83  
84   -unsigned int __init get_c0_compare_int(void)
  84 +unsigned int __cpuinit get_c0_compare_int(void)
85 85 {
86 86 return MSP_INT_VPE0_TIMER;
87 87 }