Commit c3567f8a359b7917dcffa442301f88ed0a75211f

Authored by Noam Camus
Committed by Linus Torvalds
1 parent d5d04bb48f

ARC: SMP failed to boot due to missing IVT setup

Commit 05b016ecf5e7a "ARC: Setup Vector Table Base in early boot" moved
the Interrupt vector Table setup out of arc_init_IRQ() which is called
for all CPUs, to entry point of boot cpu only, breaking booting of others.

Fix by adding the same to entry point of non-boot CPUs too.

read_arc_build_cfg_regs() printing IVT Base Register didn't help the
casue since it prints a synthetic value if zero which is totally bogus,
so fix that to print the exact Register.

[vgupta: Remove the now stale comment from header of arc_init_IRQ and
also added the commentary for halt-on-reset]

Cc: Gilad Ben-Yossef <gilad@benyossef.com>
Cc: Cc: <stable@vger.kernel.org> #3.11
Signed-off-by: Noam Camus <noamc@ezchip.com>
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 4 changed files with 5 additions and 5 deletions Inline Diff

arch/arc/include/asm/sections.h
1 /* 1 /*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) 2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as 5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 */ 7 */
8 8
9 #ifndef _ASM_ARC_SECTIONS_H 9 #ifndef _ASM_ARC_SECTIONS_H
10 #define _ASM_ARC_SECTIONS_H 10 #define _ASM_ARC_SECTIONS_H
11 11
12 #include <asm-generic/sections.h> 12 #include <asm-generic/sections.h>
13 13
14 extern char _int_vec_base_lds[];
15 extern char __arc_dccm_base[]; 14 extern char __arc_dccm_base[];
16 extern char __dtb_start[]; 15 extern char __dtb_start[];
17 16
18 #endif 17 #endif
19 18
arch/arc/kernel/head.S
1 /* 1 /*
2 * ARC CPU startup Code 2 * ARC CPU startup Code
3 * 3 *
4 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) 4 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 * 9 *
10 * Vineetg: Dec 2007 10 * Vineetg: Dec 2007
11 * -Check if we are running on Simulator or on real hardware 11 * -Check if we are running on Simulator or on real hardware
12 * to skip certain things during boot on simulator 12 * to skip certain things during boot on simulator
13 */ 13 */
14 14
15 #include <asm/asm-offsets.h> 15 #include <asm/asm-offsets.h>
16 #include <asm/entry.h> 16 #include <asm/entry.h>
17 #include <linux/linkage.h> 17 #include <linux/linkage.h>
18 #include <asm/arcregs.h> 18 #include <asm/arcregs.h>
19 19
20 .cpu A7 20 .cpu A7
21 21
22 .section .init.text, "ax",@progbits 22 .section .init.text, "ax",@progbits
23 .type stext, @function 23 .type stext, @function
24 .globl stext 24 .globl stext
25 stext: 25 stext:
26 ;------------------------------------------------------------------- 26 ;-------------------------------------------------------------------
27 ; Don't clobber r0-r4 yet. It might have bootloader provided info 27 ; Don't clobber r0-r4 yet. It might have bootloader provided info
28 ;------------------------------------------------------------------- 28 ;-------------------------------------------------------------------
29 29
30 sr @_int_vec_base_lds, [AUX_INTR_VEC_BASE] 30 sr @_int_vec_base_lds, [AUX_INTR_VEC_BASE]
31 31
32 #ifdef CONFIG_SMP 32 #ifdef CONFIG_SMP
33 ; Only Boot (Master) proceeds. Others wait in platform dependent way 33 ; Only Boot (Master) proceeds. Others wait in platform dependent way
34 ; IDENTITY Reg [ 3 2 1 0 ] 34 ; IDENTITY Reg [ 3 2 1 0 ]
35 ; (cpu-id) ^^^ => Zero for UP ARC700 35 ; (cpu-id) ^^^ => Zero for UP ARC700
36 ; => #Core-ID if SMP (Master 0) 36 ; => #Core-ID if SMP (Master 0)
37 ; Note that non-boot CPUs might not land here if halt-on-reset and
38 ; instead breath life from @first_lines_of_secondary, but we still
39 ; need to make sure only boot cpu takes this path.
37 GET_CPU_ID r5 40 GET_CPU_ID r5
38 cmp r5, 0 41 cmp r5, 0
39 jnz arc_platform_smp_wait_to_boot 42 jnz arc_platform_smp_wait_to_boot
40 #endif 43 #endif
41 ; Clear BSS before updating any globals 44 ; Clear BSS before updating any globals
42 ; XXX: use ZOL here 45 ; XXX: use ZOL here
43 mov r5, __bss_start 46 mov r5, __bss_start
44 mov r6, __bss_stop 47 mov r6, __bss_stop
45 1: 48 1:
46 st.ab 0, [r5,4] 49 st.ab 0, [r5,4]
47 brlt r5, r6, 1b 50 brlt r5, r6, 1b
48 51
49 #ifdef CONFIG_CMDLINE_UBOOT 52 #ifdef CONFIG_CMDLINE_UBOOT
50 ; support for bootloader provided cmdline 53 ; support for bootloader provided cmdline
51 ; If cmdline passed by u-boot, then 54 ; If cmdline passed by u-boot, then
52 ; r0 = 1 (because ATAGS parsing, now retired, used to use 0) 55 ; r0 = 1 (because ATAGS parsing, now retired, used to use 0)
53 ; r1 = magic number (board identity) 56 ; r1 = magic number (board identity)
54 ; r2 = addr of cmdline string (somewhere in memory/flash) 57 ; r2 = addr of cmdline string (somewhere in memory/flash)
55 58
56 brne r0, 1, .Lother_bootup_chores ; u-boot didn't pass cmdline 59 brne r0, 1, .Lother_bootup_chores ; u-boot didn't pass cmdline
57 breq r2, 0, .Lother_bootup_chores ; or cmdline is NULL 60 breq r2, 0, .Lother_bootup_chores ; or cmdline is NULL
58 61
59 mov r5, @command_line 62 mov r5, @command_line
60 1: 63 1:
61 ldb.ab r6, [r2, 1] 64 ldb.ab r6, [r2, 1]
62 breq r6, 0, .Lother_bootup_chores 65 breq r6, 0, .Lother_bootup_chores
63 b.d 1b 66 b.d 1b
64 stb.ab r6, [r5, 1] 67 stb.ab r6, [r5, 1]
65 #endif 68 #endif
66 69
67 .Lother_bootup_chores: 70 .Lother_bootup_chores:
68 71
69 ; Identify if running on ISS vs Silicon 72 ; Identify if running on ISS vs Silicon
70 ; IDENTITY Reg [ 3 2 1 0 ] 73 ; IDENTITY Reg [ 3 2 1 0 ]
71 ; (chip-id) ^^^^^ ==> 0xffff for ISS 74 ; (chip-id) ^^^^^ ==> 0xffff for ISS
72 lr r0, [identity] 75 lr r0, [identity]
73 lsr r3, r0, 16 76 lsr r3, r0, 16
74 cmp r3, 0xffff 77 cmp r3, 0xffff
75 mov.z r4, 0 78 mov.z r4, 0
76 mov.nz r4, 1 79 mov.nz r4, 1
77 st r4, [@running_on_hw] 80 st r4, [@running_on_hw]
78 81
79 ; setup "current" tsk and optionally cache it in dedicated r25 82 ; setup "current" tsk and optionally cache it in dedicated r25
80 mov r9, @init_task 83 mov r9, @init_task
81 SET_CURR_TASK_ON_CPU r9, r0 ; r9 = tsk, r0 = scratch 84 SET_CURR_TASK_ON_CPU r9, r0 ; r9 = tsk, r0 = scratch
82 85
83 ; setup stack (fp, sp) 86 ; setup stack (fp, sp)
84 mov fp, 0 87 mov fp, 0
85 88
86 ; tsk->thread_info is really a PAGE, whose bottom hoists stack 89 ; tsk->thread_info is really a PAGE, whose bottom hoists stack
87 GET_TSK_STACK_BASE r9, sp ; r9 = tsk, sp = stack base(output) 90 GET_TSK_STACK_BASE r9, sp ; r9 = tsk, sp = stack base(output)
88 91
89 j start_kernel ; "C" entry point 92 j start_kernel ; "C" entry point
90 93
91 #ifdef CONFIG_SMP 94 #ifdef CONFIG_SMP
92 ;---------------------------------------------------------------- 95 ;----------------------------------------------------------------
93 ; First lines of code run by secondary before jumping to 'C' 96 ; First lines of code run by secondary before jumping to 'C'
94 ;---------------------------------------------------------------- 97 ;----------------------------------------------------------------
95 .section .init.text, "ax",@progbits 98 .section .init.text, "ax",@progbits
96 .type first_lines_of_secondary, @function 99 .type first_lines_of_secondary, @function
97 .globl first_lines_of_secondary 100 .globl first_lines_of_secondary
98 101
99 first_lines_of_secondary: 102 first_lines_of_secondary:
103
104 sr @_int_vec_base_lds, [AUX_INTR_VEC_BASE]
100 105
101 ; setup per-cpu idle task as "current" on this CPU 106 ; setup per-cpu idle task as "current" on this CPU
102 ld r0, [@secondary_idle_tsk] 107 ld r0, [@secondary_idle_tsk]
103 SET_CURR_TASK_ON_CPU r0, r1 108 SET_CURR_TASK_ON_CPU r0, r1
104 109
105 ; setup stack (fp, sp) 110 ; setup stack (fp, sp)
106 mov fp, 0 111 mov fp, 0
107 112
108 ; set it's stack base to tsk->thread_info bottom 113 ; set it's stack base to tsk->thread_info bottom
109 GET_TSK_STACK_BASE r0, sp 114 GET_TSK_STACK_BASE r0, sp
110 115
111 j start_kernel_secondary 116 j start_kernel_secondary
112 117
113 #endif 118 #endif
114 119
arch/arc/kernel/irq.c
1 /* 1 /*
2 * Copyright (C) 2011-12 Synopsys, Inc. (www.synopsys.com) 2 * Copyright (C) 2011-12 Synopsys, Inc. (www.synopsys.com)
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as 5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 * 7 *
8 */ 8 */
9 9
10 #include <linux/interrupt.h> 10 #include <linux/interrupt.h>
11 #include <linux/module.h> 11 #include <linux/module.h>
12 #include <linux/of.h> 12 #include <linux/of.h>
13 #include <linux/irqdomain.h> 13 #include <linux/irqdomain.h>
14 #include <linux/irqchip.h> 14 #include <linux/irqchip.h>
15 #include "../../drivers/irqchip/irqchip.h" 15 #include "../../drivers/irqchip/irqchip.h"
16 #include <asm/sections.h> 16 #include <asm/sections.h>
17 #include <asm/irq.h> 17 #include <asm/irq.h>
18 #include <asm/mach_desc.h> 18 #include <asm/mach_desc.h>
19 19
20 /* 20 /*
21 * Early Hardware specific Interrupt setup 21 * Early Hardware specific Interrupt setup
22 * -Called very early (start_kernel -> setup_arch -> setup_processor) 22 * -Called very early (start_kernel -> setup_arch -> setup_processor)
23 * -Platform Independent (must for any ARC700) 23 * -Platform Independent (must for any ARC700)
24 * -Needed for each CPU (hence not foldable into init_IRQ) 24 * -Needed for each CPU (hence not foldable into init_IRQ)
25 * 25 *
26 * what it does ? 26 * what it does ?
27 * -setup Vector Table Base Reg - in case Linux not linked at 0x8000_0000
28 * -Disable all IRQs (on CPU side) 27 * -Disable all IRQs (on CPU side)
29 * -Optionally, setup the High priority Interrupts as Level 2 IRQs 28 * -Optionally, setup the High priority Interrupts as Level 2 IRQs
30 */ 29 */
31 void arc_init_IRQ(void) 30 void arc_init_IRQ(void)
32 { 31 {
33 int level_mask = 0; 32 int level_mask = 0;
34 33
35 /* Disable all IRQs: enable them as devices request */ 34 /* Disable all IRQs: enable them as devices request */
36 write_aux_reg(AUX_IENABLE, 0); 35 write_aux_reg(AUX_IENABLE, 0);
37 36
38 /* setup any high priority Interrupts (Level2 in ARCompact jargon) */ 37 /* setup any high priority Interrupts (Level2 in ARCompact jargon) */
39 level_mask |= IS_ENABLED(CONFIG_ARC_IRQ3_LV2) << 3; 38 level_mask |= IS_ENABLED(CONFIG_ARC_IRQ3_LV2) << 3;
40 level_mask |= IS_ENABLED(CONFIG_ARC_IRQ5_LV2) << 5; 39 level_mask |= IS_ENABLED(CONFIG_ARC_IRQ5_LV2) << 5;
41 level_mask |= IS_ENABLED(CONFIG_ARC_IRQ6_LV2) << 6; 40 level_mask |= IS_ENABLED(CONFIG_ARC_IRQ6_LV2) << 6;
42 41
43 if (level_mask) { 42 if (level_mask) {
44 pr_info("Level-2 interrupts bitset %x\n", level_mask); 43 pr_info("Level-2 interrupts bitset %x\n", level_mask);
45 write_aux_reg(AUX_IRQ_LEV, level_mask); 44 write_aux_reg(AUX_IRQ_LEV, level_mask);
46 } 45 }
47 } 46 }
48 47
49 /* 48 /*
50 * ARC700 core includes a simple on-chip intc supporting 49 * ARC700 core includes a simple on-chip intc supporting
51 * -per IRQ enable/disable 50 * -per IRQ enable/disable
52 * -2 levels of interrupts (high/low) 51 * -2 levels of interrupts (high/low)
53 * -all interrupts being level triggered 52 * -all interrupts being level triggered
54 * 53 *
55 * To reduce platform code, we assume all IRQs directly hooked-up into intc. 54 * To reduce platform code, we assume all IRQs directly hooked-up into intc.
56 * Platforms with external intc, hence cascaded IRQs, are free to over-ride 55 * Platforms with external intc, hence cascaded IRQs, are free to over-ride
57 * below, per IRQ. 56 * below, per IRQ.
58 */ 57 */
59 58
60 static void arc_mask_irq(struct irq_data *data) 59 static void arc_mask_irq(struct irq_data *data)
61 { 60 {
62 arch_mask_irq(data->irq); 61 arch_mask_irq(data->irq);
63 } 62 }
64 63
65 static void arc_unmask_irq(struct irq_data *data) 64 static void arc_unmask_irq(struct irq_data *data)
66 { 65 {
67 arch_unmask_irq(data->irq); 66 arch_unmask_irq(data->irq);
68 } 67 }
69 68
70 static struct irq_chip onchip_intc = { 69 static struct irq_chip onchip_intc = {
71 .name = "ARC In-core Intc", 70 .name = "ARC In-core Intc",
72 .irq_mask = arc_mask_irq, 71 .irq_mask = arc_mask_irq,
73 .irq_unmask = arc_unmask_irq, 72 .irq_unmask = arc_unmask_irq,
74 }; 73 };
75 74
76 static int arc_intc_domain_map(struct irq_domain *d, unsigned int irq, 75 static int arc_intc_domain_map(struct irq_domain *d, unsigned int irq,
77 irq_hw_number_t hw) 76 irq_hw_number_t hw)
78 { 77 {
79 if (irq == TIMER0_IRQ) 78 if (irq == TIMER0_IRQ)
80 irq_set_chip_and_handler(irq, &onchip_intc, handle_percpu_irq); 79 irq_set_chip_and_handler(irq, &onchip_intc, handle_percpu_irq);
81 else 80 else
82 irq_set_chip_and_handler(irq, &onchip_intc, handle_level_irq); 81 irq_set_chip_and_handler(irq, &onchip_intc, handle_level_irq);
83 82
84 return 0; 83 return 0;
85 } 84 }
86 85
87 static const struct irq_domain_ops arc_intc_domain_ops = { 86 static const struct irq_domain_ops arc_intc_domain_ops = {
88 .xlate = irq_domain_xlate_onecell, 87 .xlate = irq_domain_xlate_onecell,
89 .map = arc_intc_domain_map, 88 .map = arc_intc_domain_map,
90 }; 89 };
91 90
92 static struct irq_domain *root_domain; 91 static struct irq_domain *root_domain;
93 92
94 static int __init 93 static int __init
95 init_onchip_IRQ(struct device_node *intc, struct device_node *parent) 94 init_onchip_IRQ(struct device_node *intc, struct device_node *parent)
96 { 95 {
97 if (parent) 96 if (parent)
98 panic("DeviceTree incore intc not a root irq controller\n"); 97 panic("DeviceTree incore intc not a root irq controller\n");
99 98
100 root_domain = irq_domain_add_legacy(intc, NR_CPU_IRQS, 0, 0, 99 root_domain = irq_domain_add_legacy(intc, NR_CPU_IRQS, 0, 0,
101 &arc_intc_domain_ops, NULL); 100 &arc_intc_domain_ops, NULL);
102 101
103 if (!root_domain) 102 if (!root_domain)
104 panic("root irq domain not avail\n"); 103 panic("root irq domain not avail\n");
105 104
106 /* with this we don't need to export root_domain */ 105 /* with this we don't need to export root_domain */
107 irq_set_default_host(root_domain); 106 irq_set_default_host(root_domain);
108 107
109 return 0; 108 return 0;
110 } 109 }
111 110
112 IRQCHIP_DECLARE(arc_intc, "snps,arc700-intc", init_onchip_IRQ); 111 IRQCHIP_DECLARE(arc_intc, "snps,arc700-intc", init_onchip_IRQ);
113 112
114 /* 113 /*
115 * Late Interrupt system init called from start_kernel for Boot CPU only 114 * Late Interrupt system init called from start_kernel for Boot CPU only
116 * 115 *
117 * Since slab must already be initialized, platforms can start doing any 116 * Since slab must already be initialized, platforms can start doing any
118 * needed request_irq( )s 117 * needed request_irq( )s
119 */ 118 */
120 void __init init_IRQ(void) 119 void __init init_IRQ(void)
121 { 120 {
122 /* Any external intc can be setup here */ 121 /* Any external intc can be setup here */
123 if (machine_desc->init_irq) 122 if (machine_desc->init_irq)
124 machine_desc->init_irq(); 123 machine_desc->init_irq();
125 124
126 /* process the entire interrupt tree in one go */ 125 /* process the entire interrupt tree in one go */
127 irqchip_init(); 126 irqchip_init();
128 127
129 #ifdef CONFIG_SMP 128 #ifdef CONFIG_SMP
130 /* Master CPU can initialize it's side of IPI */ 129 /* Master CPU can initialize it's side of IPI */
131 if (machine_desc->init_smp) 130 if (machine_desc->init_smp)
132 machine_desc->init_smp(smp_processor_id()); 131 machine_desc->init_smp(smp_processor_id());
133 #endif 132 #endif
134 } 133 }
135 134
136 /* 135 /*
137 * "C" Entry point for any ARC ISR, called from low level vector handler 136 * "C" Entry point for any ARC ISR, called from low level vector handler
138 * @irq is the vector number read from ICAUSE reg of on-chip intc 137 * @irq is the vector number read from ICAUSE reg of on-chip intc
139 */ 138 */
140 void arch_do_IRQ(unsigned int irq, struct pt_regs *regs) 139 void arch_do_IRQ(unsigned int irq, struct pt_regs *regs)
141 { 140 {
142 struct pt_regs *old_regs = set_irq_regs(regs); 141 struct pt_regs *old_regs = set_irq_regs(regs);
143 142
144 irq_enter(); 143 irq_enter();
145 generic_handle_irq(irq); 144 generic_handle_irq(irq);
146 irq_exit(); 145 irq_exit();
147 set_irq_regs(old_regs); 146 set_irq_regs(old_regs);
148 } 147 }
149 148
150 int __init get_hw_config_num_irq(void) 149 int __init get_hw_config_num_irq(void)
151 { 150 {
152 uint32_t val = read_aux_reg(ARC_REG_VECBASE_BCR); 151 uint32_t val = read_aux_reg(ARC_REG_VECBASE_BCR);
153 152
154 switch (val & 0x03) { 153 switch (val & 0x03) {
155 case 0: 154 case 0:
156 return 16; 155 return 16;
157 case 1: 156 case 1:
158 return 32; 157 return 32;
159 case 2: 158 case 2:
160 return 8; 159 return 8;
161 default: 160 default:
162 return 0; 161 return 0;
163 } 162 }
164 163
165 return 0; 164 return 0;
166 } 165 }
167 166
168 /* 167 /*
169 * arch_local_irq_enable - Enable interrupts. 168 * arch_local_irq_enable - Enable interrupts.
170 * 169 *
171 * 1. Explicitly called to re-enable interrupts 170 * 1. Explicitly called to re-enable interrupts
172 * 2. Implicitly called from spin_unlock_irq, write_unlock_irq etc 171 * 2. Implicitly called from spin_unlock_irq, write_unlock_irq etc
173 * which maybe in hard ISR itself 172 * which maybe in hard ISR itself
174 * 173 *
175 * Semantics of this function change depending on where it is called from: 174 * Semantics of this function change depending on where it is called from:
176 * 175 *
177 * -If called from hard-ISR, it must not invert interrupt priorities 176 * -If called from hard-ISR, it must not invert interrupt priorities
178 * e.g. suppose TIMER is high priority (Level 2) IRQ 177 * e.g. suppose TIMER is high priority (Level 2) IRQ
179 * Time hard-ISR, timer_interrupt( ) calls spin_unlock_irq several times. 178 * Time hard-ISR, timer_interrupt( ) calls spin_unlock_irq several times.
180 * Here local_irq_enable( ) shd not re-enable lower priority interrupts 179 * Here local_irq_enable( ) shd not re-enable lower priority interrupts
181 * -If called from soft-ISR, it must re-enable all interrupts 180 * -If called from soft-ISR, it must re-enable all interrupts
182 * soft ISR are low prioity jobs which can be very slow, thus all IRQs 181 * soft ISR are low prioity jobs which can be very slow, thus all IRQs
183 * must be enabled while they run. 182 * must be enabled while they run.
184 * Now hardware context wise we may still be in L2 ISR (not done rtie) 183 * Now hardware context wise we may still be in L2 ISR (not done rtie)
185 * still we must re-enable both L1 and L2 IRQs 184 * still we must re-enable both L1 and L2 IRQs
186 * Another twist is prev scenario with flow being 185 * Another twist is prev scenario with flow being
187 * L1 ISR ==> interrupted by L2 ISR ==> L2 soft ISR 186 * L1 ISR ==> interrupted by L2 ISR ==> L2 soft ISR
188 * here we must not re-enable Ll as prev Ll Interrupt's h/w context will get 187 * here we must not re-enable Ll as prev Ll Interrupt's h/w context will get
189 * over-written (this is deficiency in ARC700 Interrupt mechanism) 188 * over-written (this is deficiency in ARC700 Interrupt mechanism)
190 */ 189 */
191 190
192 #ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS /* Complex version for 2 IRQ levels */ 191 #ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS /* Complex version for 2 IRQ levels */
193 192
194 void arch_local_irq_enable(void) 193 void arch_local_irq_enable(void)
195 { 194 {
196 195
197 unsigned long flags; 196 unsigned long flags;
198 flags = arch_local_save_flags(); 197 flags = arch_local_save_flags();
199 198
200 /* Allow both L1 and L2 at the onset */ 199 /* Allow both L1 and L2 at the onset */
201 flags |= (STATUS_E1_MASK | STATUS_E2_MASK); 200 flags |= (STATUS_E1_MASK | STATUS_E2_MASK);
202 201
203 /* Called from hard ISR (between irq_enter and irq_exit) */ 202 /* Called from hard ISR (between irq_enter and irq_exit) */
204 if (in_irq()) { 203 if (in_irq()) {
205 204
206 /* If in L2 ISR, don't re-enable any further IRQs as this can 205 /* If in L2 ISR, don't re-enable any further IRQs as this can
207 * cause IRQ priorities to get upside down. e.g. it could allow 206 * cause IRQ priorities to get upside down. e.g. it could allow
208 * L1 be taken while in L2 hard ISR which is wrong not only in 207 * L1 be taken while in L2 hard ISR which is wrong not only in
209 * theory, it can also cause the dreaded L1-L2-L1 scenario 208 * theory, it can also cause the dreaded L1-L2-L1 scenario
210 */ 209 */
211 if (flags & STATUS_A2_MASK) 210 if (flags & STATUS_A2_MASK)
212 flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK); 211 flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK);
213 212
214 /* Even if in L1 ISR, allowe Higher prio L2 IRQs */ 213 /* Even if in L1 ISR, allowe Higher prio L2 IRQs */
215 else if (flags & STATUS_A1_MASK) 214 else if (flags & STATUS_A1_MASK)
216 flags &= ~(STATUS_E1_MASK); 215 flags &= ~(STATUS_E1_MASK);
217 } 216 }
218 217
219 /* called from soft IRQ, ideally we want to re-enable all levels */ 218 /* called from soft IRQ, ideally we want to re-enable all levels */
220 219
221 else if (in_softirq()) { 220 else if (in_softirq()) {
222 221
223 /* However if this is case of L1 interrupted by L2, 222 /* However if this is case of L1 interrupted by L2,
224 * re-enabling both may cause whaco L1-L2-L1 scenario 223 * re-enabling both may cause whaco L1-L2-L1 scenario
225 * because ARC700 allows level 1 to interrupt an active L2 ISR 224 * because ARC700 allows level 1 to interrupt an active L2 ISR
226 * Thus we disable both 225 * Thus we disable both
227 * However some code, executing in soft ISR wants some IRQs 226 * However some code, executing in soft ISR wants some IRQs
228 * to be enabled so we re-enable L2 only 227 * to be enabled so we re-enable L2 only
229 * 228 *
230 * How do we determine L1 intr by L2 229 * How do we determine L1 intr by L2
231 * -A2 is set (means in L2 ISR) 230 * -A2 is set (means in L2 ISR)
232 * -E1 is set in this ISR's pt_regs->status32 which is 231 * -E1 is set in this ISR's pt_regs->status32 which is
233 * saved copy of status32_l2 when l2 ISR happened 232 * saved copy of status32_l2 when l2 ISR happened
234 */ 233 */
235 struct pt_regs *pt = get_irq_regs(); 234 struct pt_regs *pt = get_irq_regs();
236 if ((flags & STATUS_A2_MASK) && pt && 235 if ((flags & STATUS_A2_MASK) && pt &&
237 (pt->status32 & STATUS_A1_MASK)) { 236 (pt->status32 & STATUS_A1_MASK)) {
238 /*flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK); */ 237 /*flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK); */
239 flags &= ~(STATUS_E1_MASK); 238 flags &= ~(STATUS_E1_MASK);
240 } 239 }
241 } 240 }
242 241
243 arch_local_irq_restore(flags); 242 arch_local_irq_restore(flags);
244 } 243 }
245 244
246 #else /* ! CONFIG_ARC_COMPACT_IRQ_LEVELS */ 245 #else /* ! CONFIG_ARC_COMPACT_IRQ_LEVELS */
247 246
248 /* 247 /*
249 * Simpler version for only 1 level of interrupt 248 * Simpler version for only 1 level of interrupt
250 * Here we only Worry about Level 1 Bits 249 * Here we only Worry about Level 1 Bits
251 */ 250 */
252 void arch_local_irq_enable(void) 251 void arch_local_irq_enable(void)
253 { 252 {
254 unsigned long flags; 253 unsigned long flags;
255 254
256 /* 255 /*
257 * ARC IDE Drivers tries to re-enable interrupts from hard-isr 256 * ARC IDE Drivers tries to re-enable interrupts from hard-isr
258 * context which is simply wrong 257 * context which is simply wrong
259 */ 258 */
260 if (in_irq()) { 259 if (in_irq()) {
261 WARN_ONCE(1, "IRQ enabled from hard-isr"); 260 WARN_ONCE(1, "IRQ enabled from hard-isr");
262 return; 261 return;
263 } 262 }
264 263
265 flags = arch_local_save_flags(); 264 flags = arch_local_save_flags();
266 flags |= (STATUS_E1_MASK | STATUS_E2_MASK); 265 flags |= (STATUS_E1_MASK | STATUS_E2_MASK);
267 arch_local_irq_restore(flags); 266 arch_local_irq_restore(flags);
268 } 267 }
269 #endif 268 #endif
270 EXPORT_SYMBOL(arch_local_irq_enable); 269 EXPORT_SYMBOL(arch_local_irq_enable);
271 270
arch/arc/kernel/setup.c
1 /* 1 /*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) 2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as 5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 */ 7 */
8 8
9 #include <linux/seq_file.h> 9 #include <linux/seq_file.h>
10 #include <linux/fs.h> 10 #include <linux/fs.h>
11 #include <linux/delay.h> 11 #include <linux/delay.h>
12 #include <linux/root_dev.h> 12 #include <linux/root_dev.h>
13 #include <linux/console.h> 13 #include <linux/console.h>
14 #include <linux/module.h> 14 #include <linux/module.h>
15 #include <linux/cpu.h> 15 #include <linux/cpu.h>
16 #include <linux/of_fdt.h> 16 #include <linux/of_fdt.h>
17 #include <linux/cache.h> 17 #include <linux/cache.h>
18 #include <asm/sections.h> 18 #include <asm/sections.h>
19 #include <asm/arcregs.h> 19 #include <asm/arcregs.h>
20 #include <asm/tlb.h> 20 #include <asm/tlb.h>
21 #include <asm/setup.h> 21 #include <asm/setup.h>
22 #include <asm/page.h> 22 #include <asm/page.h>
23 #include <asm/irq.h> 23 #include <asm/irq.h>
24 #include <asm/prom.h> 24 #include <asm/prom.h>
25 #include <asm/unwind.h> 25 #include <asm/unwind.h>
26 #include <asm/clk.h> 26 #include <asm/clk.h>
27 #include <asm/mach_desc.h> 27 #include <asm/mach_desc.h>
28 28
29 #define FIX_PTR(x) __asm__ __volatile__(";" : "+r"(x)) 29 #define FIX_PTR(x) __asm__ __volatile__(";" : "+r"(x))
30 30
31 int running_on_hw = 1; /* vs. on ISS */ 31 int running_on_hw = 1; /* vs. on ISS */
32 32
33 char __initdata command_line[COMMAND_LINE_SIZE]; 33 char __initdata command_line[COMMAND_LINE_SIZE];
34 struct machine_desc *machine_desc; 34 struct machine_desc *machine_desc;
35 35
36 struct task_struct *_current_task[NR_CPUS]; /* For stack switching */ 36 struct task_struct *_current_task[NR_CPUS]; /* For stack switching */
37 37
38 struct cpuinfo_arc cpuinfo_arc700[NR_CPUS]; 38 struct cpuinfo_arc cpuinfo_arc700[NR_CPUS];
39 39
40 40
41 void read_arc_build_cfg_regs(void) 41 void read_arc_build_cfg_regs(void)
42 { 42 {
43 struct bcr_perip uncached_space; 43 struct bcr_perip uncached_space;
44 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; 44 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
45 FIX_PTR(cpu); 45 FIX_PTR(cpu);
46 46
47 READ_BCR(AUX_IDENTITY, cpu->core); 47 READ_BCR(AUX_IDENTITY, cpu->core);
48 48
49 cpu->timers = read_aux_reg(ARC_REG_TIMERS_BCR); 49 cpu->timers = read_aux_reg(ARC_REG_TIMERS_BCR);
50
51 cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE); 50 cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
52 if (cpu->vec_base == 0)
53 cpu->vec_base = (unsigned int)_int_vec_base_lds;
54 51
55 READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space); 52 READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space);
56 cpu->uncached_base = uncached_space.start << 24; 53 cpu->uncached_base = uncached_space.start << 24;
57 54
58 cpu->extn.mul = read_aux_reg(ARC_REG_MUL_BCR); 55 cpu->extn.mul = read_aux_reg(ARC_REG_MUL_BCR);
59 cpu->extn.swap = read_aux_reg(ARC_REG_SWAP_BCR); 56 cpu->extn.swap = read_aux_reg(ARC_REG_SWAP_BCR);
60 cpu->extn.norm = read_aux_reg(ARC_REG_NORM_BCR); 57 cpu->extn.norm = read_aux_reg(ARC_REG_NORM_BCR);
61 cpu->extn.minmax = read_aux_reg(ARC_REG_MIXMAX_BCR); 58 cpu->extn.minmax = read_aux_reg(ARC_REG_MIXMAX_BCR);
62 cpu->extn.barrel = read_aux_reg(ARC_REG_BARREL_BCR); 59 cpu->extn.barrel = read_aux_reg(ARC_REG_BARREL_BCR);
63 READ_BCR(ARC_REG_MAC_BCR, cpu->extn_mac_mul); 60 READ_BCR(ARC_REG_MAC_BCR, cpu->extn_mac_mul);
64 61
65 cpu->extn.ext_arith = read_aux_reg(ARC_REG_EXTARITH_BCR); 62 cpu->extn.ext_arith = read_aux_reg(ARC_REG_EXTARITH_BCR);
66 cpu->extn.crc = read_aux_reg(ARC_REG_CRC_BCR); 63 cpu->extn.crc = read_aux_reg(ARC_REG_CRC_BCR);
67 64
68 /* Note that we read the CCM BCRs independent of kernel config 65 /* Note that we read the CCM BCRs independent of kernel config
69 * This is to catch the cases where user doesn't know that 66 * This is to catch the cases where user doesn't know that
70 * CCMs are present in hardware build 67 * CCMs are present in hardware build
71 */ 68 */
72 { 69 {
73 struct bcr_iccm iccm; 70 struct bcr_iccm iccm;
74 struct bcr_dccm dccm; 71 struct bcr_dccm dccm;
75 struct bcr_dccm_base dccm_base; 72 struct bcr_dccm_base dccm_base;
76 unsigned int bcr_32bit_val; 73 unsigned int bcr_32bit_val;
77 74
78 bcr_32bit_val = read_aux_reg(ARC_REG_ICCM_BCR); 75 bcr_32bit_val = read_aux_reg(ARC_REG_ICCM_BCR);
79 if (bcr_32bit_val) { 76 if (bcr_32bit_val) {
80 iccm = *((struct bcr_iccm *)&bcr_32bit_val); 77 iccm = *((struct bcr_iccm *)&bcr_32bit_val);
81 cpu->iccm.base_addr = iccm.base << 16; 78 cpu->iccm.base_addr = iccm.base << 16;
82 cpu->iccm.sz = 0x2000 << (iccm.sz - 1); 79 cpu->iccm.sz = 0x2000 << (iccm.sz - 1);
83 } 80 }
84 81
85 bcr_32bit_val = read_aux_reg(ARC_REG_DCCM_BCR); 82 bcr_32bit_val = read_aux_reg(ARC_REG_DCCM_BCR);
86 if (bcr_32bit_val) { 83 if (bcr_32bit_val) {
87 dccm = *((struct bcr_dccm *)&bcr_32bit_val); 84 dccm = *((struct bcr_dccm *)&bcr_32bit_val);
88 cpu->dccm.sz = 0x800 << (dccm.sz); 85 cpu->dccm.sz = 0x800 << (dccm.sz);
89 86
90 READ_BCR(ARC_REG_DCCMBASE_BCR, dccm_base); 87 READ_BCR(ARC_REG_DCCMBASE_BCR, dccm_base);
91 cpu->dccm.base_addr = dccm_base.addr << 8; 88 cpu->dccm.base_addr = dccm_base.addr << 8;
92 } 89 }
93 } 90 }
94 91
95 READ_BCR(ARC_REG_XY_MEM_BCR, cpu->extn_xymem); 92 READ_BCR(ARC_REG_XY_MEM_BCR, cpu->extn_xymem);
96 93
97 read_decode_mmu_bcr(); 94 read_decode_mmu_bcr();
98 read_decode_cache_bcr(); 95 read_decode_cache_bcr();
99 96
100 READ_BCR(ARC_REG_FP_BCR, cpu->fp); 97 READ_BCR(ARC_REG_FP_BCR, cpu->fp);
101 READ_BCR(ARC_REG_DPFP_BCR, cpu->dpfp); 98 READ_BCR(ARC_REG_DPFP_BCR, cpu->dpfp);
102 } 99 }
103 100
104 static const struct cpuinfo_data arc_cpu_tbl[] = { 101 static const struct cpuinfo_data arc_cpu_tbl[] = {
105 { {0x10, "ARCTangent A5"}, 0x1F}, 102 { {0x10, "ARCTangent A5"}, 0x1F},
106 { {0x20, "ARC 600" }, 0x2F}, 103 { {0x20, "ARC 600" }, 0x2F},
107 { {0x30, "ARC 700" }, 0x33}, 104 { {0x30, "ARC 700" }, 0x33},
108 { {0x34, "ARC 700 R4.10"}, 0x34}, 105 { {0x34, "ARC 700 R4.10"}, 0x34},
109 { {0x00, NULL } } 106 { {0x00, NULL } }
110 }; 107 };
111 108
112 char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len) 109 char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
113 { 110 {
114 int n = 0; 111 int n = 0;
115 struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id]; 112 struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
116 struct bcr_identity *core = &cpu->core; 113 struct bcr_identity *core = &cpu->core;
117 const struct cpuinfo_data *tbl; 114 const struct cpuinfo_data *tbl;
118 int be = 0; 115 int be = 0;
119 #ifdef CONFIG_CPU_BIG_ENDIAN 116 #ifdef CONFIG_CPU_BIG_ENDIAN
120 be = 1; 117 be = 1;
121 #endif 118 #endif
122 FIX_PTR(cpu); 119 FIX_PTR(cpu);
123 120
124 n += scnprintf(buf + n, len - n, 121 n += scnprintf(buf + n, len - n,
125 "\nARC IDENTITY\t: Family [%#02x]" 122 "\nARC IDENTITY\t: Family [%#02x]"
126 " Cpu-id [%#02x] Chip-id [%#4x]\n", 123 " Cpu-id [%#02x] Chip-id [%#4x]\n",
127 core->family, core->cpu_id, 124 core->family, core->cpu_id,
128 core->chip_id); 125 core->chip_id);
129 126
130 for (tbl = &arc_cpu_tbl[0]; tbl->info.id != 0; tbl++) { 127 for (tbl = &arc_cpu_tbl[0]; tbl->info.id != 0; tbl++) {
131 if ((core->family >= tbl->info.id) && 128 if ((core->family >= tbl->info.id) &&
132 (core->family <= tbl->up_range)) { 129 (core->family <= tbl->up_range)) {
133 n += scnprintf(buf + n, len - n, 130 n += scnprintf(buf + n, len - n,
134 "processor\t: %s %s\n", 131 "processor\t: %s %s\n",
135 tbl->info.str, 132 tbl->info.str,
136 be ? "[Big Endian]" : ""); 133 be ? "[Big Endian]" : "");
137 break; 134 break;
138 } 135 }
139 } 136 }
140 137
141 if (tbl->info.id == 0) 138 if (tbl->info.id == 0)
142 n += scnprintf(buf + n, len - n, "UNKNOWN ARC Processor\n"); 139 n += scnprintf(buf + n, len - n, "UNKNOWN ARC Processor\n");
143 140
144 n += scnprintf(buf + n, len - n, "CPU speed\t: %u.%02u Mhz\n", 141 n += scnprintf(buf + n, len - n, "CPU speed\t: %u.%02u Mhz\n",
145 (unsigned int)(arc_get_core_freq() / 1000000), 142 (unsigned int)(arc_get_core_freq() / 1000000),
146 (unsigned int)(arc_get_core_freq() / 10000) % 100); 143 (unsigned int)(arc_get_core_freq() / 10000) % 100);
147 144
148 n += scnprintf(buf + n, len - n, "Timers\t\t: %s %s\n", 145 n += scnprintf(buf + n, len - n, "Timers\t\t: %s %s\n",
149 (cpu->timers & 0x200) ? "TIMER1" : "", 146 (cpu->timers & 0x200) ? "TIMER1" : "",
150 (cpu->timers & 0x100) ? "TIMER0" : ""); 147 (cpu->timers & 0x100) ? "TIMER0" : "");
151 148
152 n += scnprintf(buf + n, len - n, "Vect Tbl Base\t: %#x\n", 149 n += scnprintf(buf + n, len - n, "Vect Tbl Base\t: %#x\n",
153 cpu->vec_base); 150 cpu->vec_base);
154 151
155 n += scnprintf(buf + n, len - n, "UNCACHED Base\t: %#x\n", 152 n += scnprintf(buf + n, len - n, "UNCACHED Base\t: %#x\n",
156 cpu->uncached_base); 153 cpu->uncached_base);
157 154
158 return buf; 155 return buf;
159 } 156 }
160 157
161 static const struct id_to_str mul_type_nm[] = { 158 static const struct id_to_str mul_type_nm[] = {
162 { 0x0, "N/A"}, 159 { 0x0, "N/A"},
163 { 0x1, "32x32 (spl Result Reg)" }, 160 { 0x1, "32x32 (spl Result Reg)" },
164 { 0x2, "32x32 (ANY Result Reg)" } 161 { 0x2, "32x32 (ANY Result Reg)" }
165 }; 162 };
166 163
167 static const struct id_to_str mac_mul_nm[] = { 164 static const struct id_to_str mac_mul_nm[] = {
168 {0x0, "N/A"}, 165 {0x0, "N/A"},
169 {0x1, "N/A"}, 166 {0x1, "N/A"},
170 {0x2, "Dual 16 x 16"}, 167 {0x2, "Dual 16 x 16"},
171 {0x3, "N/A"}, 168 {0x3, "N/A"},
172 {0x4, "32x16"}, 169 {0x4, "32x16"},
173 {0x5, "N/A"}, 170 {0x5, "N/A"},
174 {0x6, "Dual 16x16 and 32x16"} 171 {0x6, "Dual 16x16 and 32x16"}
175 }; 172 };
176 173
177 char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len) 174 char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
178 { 175 {
179 int n = 0; 176 int n = 0;
180 struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id]; 177 struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
181 178
182 FIX_PTR(cpu); 179 FIX_PTR(cpu);
183 #define IS_AVAIL1(var, str) ((var) ? str : "") 180 #define IS_AVAIL1(var, str) ((var) ? str : "")
184 #define IS_AVAIL2(var, str) ((var == 0x2) ? str : "") 181 #define IS_AVAIL2(var, str) ((var == 0x2) ? str : "")
185 #define IS_USED(cfg) (IS_ENABLED(cfg) ? "(in-use)" : "(not used)") 182 #define IS_USED(cfg) (IS_ENABLED(cfg) ? "(in-use)" : "(not used)")
186 183
187 n += scnprintf(buf + n, len - n, 184 n += scnprintf(buf + n, len - n,
188 "Extn [700-Base]\t: %s %s %s %s %s %s\n", 185 "Extn [700-Base]\t: %s %s %s %s %s %s\n",
189 IS_AVAIL2(cpu->extn.norm, "norm,"), 186 IS_AVAIL2(cpu->extn.norm, "norm,"),
190 IS_AVAIL2(cpu->extn.barrel, "barrel-shift,"), 187 IS_AVAIL2(cpu->extn.barrel, "barrel-shift,"),
191 IS_AVAIL1(cpu->extn.swap, "swap,"), 188 IS_AVAIL1(cpu->extn.swap, "swap,"),
192 IS_AVAIL2(cpu->extn.minmax, "minmax,"), 189 IS_AVAIL2(cpu->extn.minmax, "minmax,"),
193 IS_AVAIL1(cpu->extn.crc, "crc,"), 190 IS_AVAIL1(cpu->extn.crc, "crc,"),
194 IS_AVAIL2(cpu->extn.ext_arith, "ext-arith")); 191 IS_AVAIL2(cpu->extn.ext_arith, "ext-arith"));
195 192
196 n += scnprintf(buf + n, len - n, "Extn [700-MPY]\t: %s", 193 n += scnprintf(buf + n, len - n, "Extn [700-MPY]\t: %s",
197 mul_type_nm[cpu->extn.mul].str); 194 mul_type_nm[cpu->extn.mul].str);
198 195
199 n += scnprintf(buf + n, len - n, " MAC MPY: %s\n", 196 n += scnprintf(buf + n, len - n, " MAC MPY: %s\n",
200 mac_mul_nm[cpu->extn_mac_mul.type].str); 197 mac_mul_nm[cpu->extn_mac_mul.type].str);
201 198
202 if (cpu->core.family == 0x34) { 199 if (cpu->core.family == 0x34) {
203 n += scnprintf(buf + n, len - n, 200 n += scnprintf(buf + n, len - n,
204 "Extn [700-4.10]\t: LLOCK/SCOND %s, SWAPE %s, RTSC %s\n", 201 "Extn [700-4.10]\t: LLOCK/SCOND %s, SWAPE %s, RTSC %s\n",
205 IS_USED(CONFIG_ARC_HAS_LLSC), 202 IS_USED(CONFIG_ARC_HAS_LLSC),
206 IS_USED(CONFIG_ARC_HAS_SWAPE), 203 IS_USED(CONFIG_ARC_HAS_SWAPE),
207 IS_USED(CONFIG_ARC_HAS_RTSC)); 204 IS_USED(CONFIG_ARC_HAS_RTSC));
208 } 205 }
209 206
210 n += scnprintf(buf + n, len - n, "Extn [CCM]\t: %s", 207 n += scnprintf(buf + n, len - n, "Extn [CCM]\t: %s",
211 !(cpu->dccm.sz || cpu->iccm.sz) ? "N/A" : ""); 208 !(cpu->dccm.sz || cpu->iccm.sz) ? "N/A" : "");
212 209
213 if (cpu->dccm.sz) 210 if (cpu->dccm.sz)
214 n += scnprintf(buf + n, len - n, "DCCM: @ %x, %d KB ", 211 n += scnprintf(buf + n, len - n, "DCCM: @ %x, %d KB ",
215 cpu->dccm.base_addr, TO_KB(cpu->dccm.sz)); 212 cpu->dccm.base_addr, TO_KB(cpu->dccm.sz));
216 213
217 if (cpu->iccm.sz) 214 if (cpu->iccm.sz)
218 n += scnprintf(buf + n, len - n, "ICCM: @ %x, %d KB", 215 n += scnprintf(buf + n, len - n, "ICCM: @ %x, %d KB",
219 cpu->iccm.base_addr, TO_KB(cpu->iccm.sz)); 216 cpu->iccm.base_addr, TO_KB(cpu->iccm.sz));
220 217
221 n += scnprintf(buf + n, len - n, "\nExtn [FPU]\t: %s", 218 n += scnprintf(buf + n, len - n, "\nExtn [FPU]\t: %s",
222 !(cpu->fp.ver || cpu->dpfp.ver) ? "N/A" : ""); 219 !(cpu->fp.ver || cpu->dpfp.ver) ? "N/A" : "");
223 220
224 if (cpu->fp.ver) 221 if (cpu->fp.ver)
225 n += scnprintf(buf + n, len - n, "SP [v%d] %s", 222 n += scnprintf(buf + n, len - n, "SP [v%d] %s",
226 cpu->fp.ver, cpu->fp.fast ? "(fast)" : ""); 223 cpu->fp.ver, cpu->fp.fast ? "(fast)" : "");
227 224
228 if (cpu->dpfp.ver) 225 if (cpu->dpfp.ver)
229 n += scnprintf(buf + n, len - n, "DP [v%d] %s", 226 n += scnprintf(buf + n, len - n, "DP [v%d] %s",
230 cpu->dpfp.ver, cpu->dpfp.fast ? "(fast)" : ""); 227 cpu->dpfp.ver, cpu->dpfp.fast ? "(fast)" : "");
231 228
232 n += scnprintf(buf + n, len - n, "\n"); 229 n += scnprintf(buf + n, len - n, "\n");
233 230
234 n += scnprintf(buf + n, len - n, 231 n += scnprintf(buf + n, len - n,
235 "OS ABI [v3]\t: no-legacy-syscalls\n"); 232 "OS ABI [v3]\t: no-legacy-syscalls\n");
236 233
237 return buf; 234 return buf;
238 } 235 }
239 236
240 void arc_chk_ccms(void) 237 void arc_chk_ccms(void)
241 { 238 {
242 #if defined(CONFIG_ARC_HAS_DCCM) || defined(CONFIG_ARC_HAS_ICCM) 239 #if defined(CONFIG_ARC_HAS_DCCM) || defined(CONFIG_ARC_HAS_ICCM)
243 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; 240 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
244 241
245 #ifdef CONFIG_ARC_HAS_DCCM 242 #ifdef CONFIG_ARC_HAS_DCCM
246 /* 243 /*
247 * DCCM can be arbit placed in hardware. 244 * DCCM can be arbit placed in hardware.
248 * Make sure it's placement/sz matches what Linux is built with 245 * Make sure it's placement/sz matches what Linux is built with
249 */ 246 */
250 if ((unsigned int)__arc_dccm_base != cpu->dccm.base_addr) 247 if ((unsigned int)__arc_dccm_base != cpu->dccm.base_addr)
251 panic("Linux built with incorrect DCCM Base address\n"); 248 panic("Linux built with incorrect DCCM Base address\n");
252 249
253 if (CONFIG_ARC_DCCM_SZ != cpu->dccm.sz) 250 if (CONFIG_ARC_DCCM_SZ != cpu->dccm.sz)
254 panic("Linux built with incorrect DCCM Size\n"); 251 panic("Linux built with incorrect DCCM Size\n");
255 #endif 252 #endif
256 253
257 #ifdef CONFIG_ARC_HAS_ICCM 254 #ifdef CONFIG_ARC_HAS_ICCM
258 if (CONFIG_ARC_ICCM_SZ != cpu->iccm.sz) 255 if (CONFIG_ARC_ICCM_SZ != cpu->iccm.sz)
259 panic("Linux built with incorrect ICCM Size\n"); 256 panic("Linux built with incorrect ICCM Size\n");
260 #endif 257 #endif
261 #endif 258 #endif
262 } 259 }
263 260
264 /* 261 /*
265 * Ensure that FP hardware and kernel config match 262 * Ensure that FP hardware and kernel config match
266 * -If hardware contains DPFP, kernel needs to save/restore FPU state 263 * -If hardware contains DPFP, kernel needs to save/restore FPU state
267 * across context switches 264 * across context switches
268 * -If hardware lacks DPFP, but kernel configured to save FPU state then 265 * -If hardware lacks DPFP, but kernel configured to save FPU state then
269 * kernel trying to access non-existant DPFP regs will crash 266 * kernel trying to access non-existant DPFP regs will crash
270 * 267 *
271 * We only check for Dbl precision Floating Point, because only DPFP 268 * We only check for Dbl precision Floating Point, because only DPFP
272 * hardware has dedicated regs which need to be saved/restored on ctx-sw 269 * hardware has dedicated regs which need to be saved/restored on ctx-sw
273 * (Single Precision uses core regs), thus kernel is kind of oblivious to it 270 * (Single Precision uses core regs), thus kernel is kind of oblivious to it
274 */ 271 */
275 void arc_chk_fpu(void) 272 void arc_chk_fpu(void)
276 { 273 {
277 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; 274 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
278 275
279 if (cpu->dpfp.ver) { 276 if (cpu->dpfp.ver) {
280 #ifndef CONFIG_ARC_FPU_SAVE_RESTORE 277 #ifndef CONFIG_ARC_FPU_SAVE_RESTORE
281 pr_warn("DPFP support broken in this kernel...\n"); 278 pr_warn("DPFP support broken in this kernel...\n");
282 #endif 279 #endif
283 } else { 280 } else {
284 #ifdef CONFIG_ARC_FPU_SAVE_RESTORE 281 #ifdef CONFIG_ARC_FPU_SAVE_RESTORE
285 panic("H/w lacks DPFP support, apps won't work\n"); 282 panic("H/w lacks DPFP support, apps won't work\n");
286 #endif 283 #endif
287 } 284 }
288 } 285 }
289 286
290 /* 287 /*
291 * Initialize and setup the processor core 288 * Initialize and setup the processor core
292 * This is called by all the CPUs thus should not do special case stuff 289 * This is called by all the CPUs thus should not do special case stuff
293 * such as only for boot CPU etc 290 * such as only for boot CPU etc
294 */ 291 */
295 292
296 void setup_processor(void) 293 void setup_processor(void)
297 { 294 {
298 char str[512]; 295 char str[512];
299 int cpu_id = smp_processor_id(); 296 int cpu_id = smp_processor_id();
300 297
301 read_arc_build_cfg_regs(); 298 read_arc_build_cfg_regs();
302 arc_init_IRQ(); 299 arc_init_IRQ();
303 300
304 printk(arc_cpu_mumbojumbo(cpu_id, str, sizeof(str))); 301 printk(arc_cpu_mumbojumbo(cpu_id, str, sizeof(str)));
305 302
306 arc_mmu_init(); 303 arc_mmu_init();
307 arc_cache_init(); 304 arc_cache_init();
308 arc_chk_ccms(); 305 arc_chk_ccms();
309 306
310 printk(arc_extn_mumbojumbo(cpu_id, str, sizeof(str))); 307 printk(arc_extn_mumbojumbo(cpu_id, str, sizeof(str)));
311 308
312 #ifdef CONFIG_SMP 309 #ifdef CONFIG_SMP
313 printk(arc_platform_smp_cpuinfo()); 310 printk(arc_platform_smp_cpuinfo());
314 #endif 311 #endif
315 312
316 arc_chk_fpu(); 313 arc_chk_fpu();
317 } 314 }
318 315
319 void __init setup_arch(char **cmdline_p) 316 void __init setup_arch(char **cmdline_p)
320 { 317 {
321 /* This also populates @boot_command_line from /bootargs */ 318 /* This also populates @boot_command_line from /bootargs */
322 machine_desc = setup_machine_fdt(__dtb_start); 319 machine_desc = setup_machine_fdt(__dtb_start);
323 if (!machine_desc) 320 if (!machine_desc)
324 panic("Embedded DT invalid\n"); 321 panic("Embedded DT invalid\n");
325 322
326 /* Append any u-boot provided cmdline */ 323 /* Append any u-boot provided cmdline */
327 #ifdef CONFIG_CMDLINE_UBOOT 324 #ifdef CONFIG_CMDLINE_UBOOT
328 /* Add a whitespace seperator between the 2 cmdlines */ 325 /* Add a whitespace seperator between the 2 cmdlines */
329 strlcat(boot_command_line, " ", COMMAND_LINE_SIZE); 326 strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
330 strlcat(boot_command_line, command_line, COMMAND_LINE_SIZE); 327 strlcat(boot_command_line, command_line, COMMAND_LINE_SIZE);
331 #endif 328 #endif
332 329
333 /* Save unparsed command line copy for /proc/cmdline */ 330 /* Save unparsed command line copy for /proc/cmdline */
334 *cmdline_p = boot_command_line; 331 *cmdline_p = boot_command_line;
335 332
336 /* To force early parsing of things like mem=xxx */ 333 /* To force early parsing of things like mem=xxx */
337 parse_early_param(); 334 parse_early_param();
338 335
339 /* Platform/board specific: e.g. early console registration */ 336 /* Platform/board specific: e.g. early console registration */
340 if (machine_desc->init_early) 337 if (machine_desc->init_early)
341 machine_desc->init_early(); 338 machine_desc->init_early();
342 339
343 setup_processor(); 340 setup_processor();
344 341
345 #ifdef CONFIG_SMP 342 #ifdef CONFIG_SMP
346 smp_init_cpus(); 343 smp_init_cpus();
347 #endif 344 #endif
348 345
349 setup_arch_memory(); 346 setup_arch_memory();
350 347
351 /* copy flat DT out of .init and then unflatten it */ 348 /* copy flat DT out of .init and then unflatten it */
352 copy_devtree(); 349 copy_devtree();
353 unflatten_device_tree(); 350 unflatten_device_tree();
354 351
355 /* Can be issue if someone passes cmd line arg "ro" 352 /* Can be issue if someone passes cmd line arg "ro"
356 * But that is unlikely so keeping it as it is 353 * But that is unlikely so keeping it as it is
357 */ 354 */
358 root_mountflags &= ~MS_RDONLY; 355 root_mountflags &= ~MS_RDONLY;
359 356
360 #if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE) 357 #if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
361 conswitchp = &dummy_con; 358 conswitchp = &dummy_con;
362 #endif 359 #endif
363 360
364 arc_unwind_init(); 361 arc_unwind_init();
365 arc_unwind_setup(); 362 arc_unwind_setup();
366 } 363 }
367 364
368 static int __init customize_machine(void) 365 static int __init customize_machine(void)
369 { 366 {
370 /* Add platform devices */ 367 /* Add platform devices */
371 if (machine_desc->init_machine) 368 if (machine_desc->init_machine)
372 machine_desc->init_machine(); 369 machine_desc->init_machine();
373 370
374 return 0; 371 return 0;
375 } 372 }
376 arch_initcall(customize_machine); 373 arch_initcall(customize_machine);
377 374
378 static int __init init_late_machine(void) 375 static int __init init_late_machine(void)
379 { 376 {
380 if (machine_desc->init_late) 377 if (machine_desc->init_late)
381 machine_desc->init_late(); 378 machine_desc->init_late();
382 379
383 return 0; 380 return 0;
384 } 381 }
385 late_initcall(init_late_machine); 382 late_initcall(init_late_machine);
386 /* 383 /*
387 * Get CPU information for use by the procfs. 384 * Get CPU information for use by the procfs.
388 */ 385 */
389 386
390 #define cpu_to_ptr(c) ((void *)(0xFFFF0000 | (unsigned int)(c))) 387 #define cpu_to_ptr(c) ((void *)(0xFFFF0000 | (unsigned int)(c)))
391 #define ptr_to_cpu(p) (~0xFFFF0000UL & (unsigned int)(p)) 388 #define ptr_to_cpu(p) (~0xFFFF0000UL & (unsigned int)(p))
392 389
393 static int show_cpuinfo(struct seq_file *m, void *v) 390 static int show_cpuinfo(struct seq_file *m, void *v)
394 { 391 {
395 char *str; 392 char *str;
396 int cpu_id = ptr_to_cpu(v); 393 int cpu_id = ptr_to_cpu(v);
397 394
398 str = (char *)__get_free_page(GFP_TEMPORARY); 395 str = (char *)__get_free_page(GFP_TEMPORARY);
399 if (!str) 396 if (!str)
400 goto done; 397 goto done;
401 398
402 seq_printf(m, arc_cpu_mumbojumbo(cpu_id, str, PAGE_SIZE)); 399 seq_printf(m, arc_cpu_mumbojumbo(cpu_id, str, PAGE_SIZE));
403 400
404 seq_printf(m, "Bogo MIPS : \t%lu.%02lu\n", 401 seq_printf(m, "Bogo MIPS : \t%lu.%02lu\n",
405 loops_per_jiffy / (500000 / HZ), 402 loops_per_jiffy / (500000 / HZ),
406 (loops_per_jiffy / (5000 / HZ)) % 100); 403 (loops_per_jiffy / (5000 / HZ)) % 100);
407 404
408 seq_printf(m, arc_mmu_mumbojumbo(cpu_id, str, PAGE_SIZE)); 405 seq_printf(m, arc_mmu_mumbojumbo(cpu_id, str, PAGE_SIZE));
409 406
410 seq_printf(m, arc_cache_mumbojumbo(cpu_id, str, PAGE_SIZE)); 407 seq_printf(m, arc_cache_mumbojumbo(cpu_id, str, PAGE_SIZE));
411 408
412 seq_printf(m, arc_extn_mumbojumbo(cpu_id, str, PAGE_SIZE)); 409 seq_printf(m, arc_extn_mumbojumbo(cpu_id, str, PAGE_SIZE));
413 410
414 #ifdef CONFIG_SMP 411 #ifdef CONFIG_SMP
415 seq_printf(m, arc_platform_smp_cpuinfo()); 412 seq_printf(m, arc_platform_smp_cpuinfo());
416 #endif 413 #endif
417 414
418 free_page((unsigned long)str); 415 free_page((unsigned long)str);
419 done: 416 done:
420 seq_printf(m, "\n\n"); 417 seq_printf(m, "\n\n");
421 418
422 return 0; 419 return 0;
423 } 420 }
424 421
425 static void *c_start(struct seq_file *m, loff_t *pos) 422 static void *c_start(struct seq_file *m, loff_t *pos)
426 { 423 {
427 /* 424 /*
428 * Callback returns cpu-id to iterator for show routine, NULL to stop. 425 * Callback returns cpu-id to iterator for show routine, NULL to stop.
429 * However since NULL is also a valid cpu-id (0), we use a round-about 426 * However since NULL is also a valid cpu-id (0), we use a round-about
430 * way to pass it w/o having to kmalloc/free a 2 byte string. 427 * way to pass it w/o having to kmalloc/free a 2 byte string.
431 * Encode cpu-id as 0xFFcccc, which is decoded by show routine. 428 * Encode cpu-id as 0xFFcccc, which is decoded by show routine.
432 */ 429 */
433 return *pos < num_possible_cpus() ? cpu_to_ptr(*pos) : NULL; 430 return *pos < num_possible_cpus() ? cpu_to_ptr(*pos) : NULL;
434 } 431 }
435 432
436 static void *c_next(struct seq_file *m, void *v, loff_t *pos) 433 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
437 { 434 {
438 ++*pos; 435 ++*pos;
439 return c_start(m, pos); 436 return c_start(m, pos);
440 } 437 }
441 438
442 static void c_stop(struct seq_file *m, void *v) 439 static void c_stop(struct seq_file *m, void *v)
443 { 440 {
444 } 441 }
445 442
446 const struct seq_operations cpuinfo_op = { 443 const struct seq_operations cpuinfo_op = {
447 .start = c_start, 444 .start = c_start,
448 .next = c_next, 445 .next = c_next,
449 .stop = c_stop, 446 .stop = c_stop,
450 .show = show_cpuinfo 447 .show = show_cpuinfo
451 }; 448 };
452 449
453 static DEFINE_PER_CPU(struct cpu, cpu_topology); 450 static DEFINE_PER_CPU(struct cpu, cpu_topology);
454 451
455 static int __init topology_init(void) 452 static int __init topology_init(void)
456 { 453 {
457 int cpu; 454 int cpu;
458 455
459 for_each_present_cpu(cpu) 456 for_each_present_cpu(cpu)
460 register_cpu(&per_cpu(cpu_topology, cpu), cpu); 457 register_cpu(&per_cpu(cpu_topology, cpu), cpu);
461 458
462 return 0; 459 return 0;
463 } 460 }
464 461
465 subsys_initcall(topology_init); 462 subsys_initcall(topology_init);
466 463