Commit d7b250e2a2d7f3cd23cf8d8d6689285e6f51a98d
1 parent
df7997ab1c
Exists in
master
and in
39 other branches
[S390] irq: merge irq.c and s390_ext.c
Merge irq.c and s390_ext.c into irq.c. That way all external interrupt related functions are together. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Showing 17 changed files with 147 additions and 174 deletions Inline Diff
- arch/s390/include/asm/irq.h
- arch/s390/include/asm/s390_ext.h
- arch/s390/kernel/Makefile
- arch/s390/kernel/dis.c
- arch/s390/kernel/irq.c
- arch/s390/kernel/s390_ext.c
- arch/s390/kernel/smp.c
- arch/s390/kernel/time.c
- arch/s390/kernel/topology.c
- arch/s390/kernel/traps.c
- arch/s390/kernel/vtime.c
- arch/s390/mm/fault.c
- arch/s390/oprofile/hwsampler.c
- drivers/s390/block/dasd_diag.c
- drivers/s390/char/sclp.c
- drivers/s390/kvm/kvm_virtio.c
- net/iucv/iucv.c
arch/s390/include/asm/irq.h
1 | #ifndef _ASM_IRQ_H | 1 | #ifndef _ASM_IRQ_H |
2 | #define _ASM_IRQ_H | 2 | #define _ASM_IRQ_H |
3 | 3 | ||
4 | #include <linux/hardirq.h> | 4 | #include <linux/hardirq.h> |
5 | #include <linux/types.h> | ||
5 | 6 | ||
6 | enum interruption_class { | 7 | enum interruption_class { |
7 | EXTERNAL_INTERRUPT, | 8 | EXTERNAL_INTERRUPT, |
8 | IO_INTERRUPT, | 9 | IO_INTERRUPT, |
9 | EXTINT_CLK, | 10 | EXTINT_CLK, |
10 | EXTINT_IPI, | 11 | EXTINT_IPI, |
11 | EXTINT_TMR, | 12 | EXTINT_TMR, |
12 | EXTINT_TLA, | 13 | EXTINT_TLA, |
13 | EXTINT_PFL, | 14 | EXTINT_PFL, |
14 | EXTINT_DSD, | 15 | EXTINT_DSD, |
15 | EXTINT_VRT, | 16 | EXTINT_VRT, |
16 | EXTINT_SCP, | 17 | EXTINT_SCP, |
17 | EXTINT_IUC, | 18 | EXTINT_IUC, |
18 | EXTINT_CPM, | 19 | EXTINT_CPM, |
19 | IOINT_QAI, | 20 | IOINT_QAI, |
20 | IOINT_QDI, | 21 | IOINT_QDI, |
21 | IOINT_DAS, | 22 | IOINT_DAS, |
22 | IOINT_C15, | 23 | IOINT_C15, |
23 | IOINT_C70, | 24 | IOINT_C70, |
24 | IOINT_TAP, | 25 | IOINT_TAP, |
25 | IOINT_VMR, | 26 | IOINT_VMR, |
26 | IOINT_LCS, | 27 | IOINT_LCS, |
27 | IOINT_CLW, | 28 | IOINT_CLW, |
28 | IOINT_CTC, | 29 | IOINT_CTC, |
29 | IOINT_APB, | 30 | IOINT_APB, |
30 | NMI_NMI, | 31 | NMI_NMI, |
31 | NR_IRQS, | 32 | NR_IRQS, |
32 | }; | 33 | }; |
34 | |||
35 | typedef void (*ext_int_handler_t)(unsigned int, unsigned int, unsigned long); | ||
36 | |||
37 | int register_external_interrupt(u16 code, ext_int_handler_t handler); | ||
38 | int unregister_external_interrupt(u16 code, ext_int_handler_t handler); | ||
39 | void service_subclass_irq_register(void); | ||
40 | void service_subclass_irq_unregister(void); | ||
33 | 41 | ||
34 | #endif /* _ASM_IRQ_H */ | 42 | #endif /* _ASM_IRQ_H */ |
35 | 43 |
arch/s390/include/asm/s390_ext.h
1 | /* | File was deleted | |
2 | * Copyright IBM Corp. 1999,2010 | ||
3 | * Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>, | ||
4 | * Martin Schwidefsky <schwidefsky@de.ibm.com>, | ||
5 | */ | ||
6 | |||
7 | #ifndef _S390_EXTINT_H | ||
8 | #define _S390_EXTINT_H | ||
9 | |||
10 | #include <linux/types.h> | ||
11 | |||
12 | typedef void (*ext_int_handler_t)(unsigned int, unsigned int, unsigned long); | ||
13 | |||
14 | int register_external_interrupt(__u16 code, ext_int_handler_t handler); | ||
15 | int unregister_external_interrupt(__u16 code, ext_int_handler_t handler); | ||
16 | void service_subclass_irq_register(void); | ||
17 | void service_subclass_irq_unregister(void); | ||
18 | |||
19 | #endif /* _S390_EXTINT_H */ | ||
20 | 1 | /* |
arch/s390/kernel/Makefile
1 | # | 1 | # |
2 | # Makefile for the linux kernel. | 2 | # Makefile for the linux kernel. |
3 | # | 3 | # |
4 | 4 | ||
5 | ifdef CONFIG_FUNCTION_TRACER | 5 | ifdef CONFIG_FUNCTION_TRACER |
6 | # Don't trace early setup code and tracing code | 6 | # Don't trace early setup code and tracing code |
7 | CFLAGS_REMOVE_early.o = -pg | 7 | CFLAGS_REMOVE_early.o = -pg |
8 | CFLAGS_REMOVE_ftrace.o = -pg | 8 | CFLAGS_REMOVE_ftrace.o = -pg |
9 | endif | 9 | endif |
10 | 10 | ||
11 | # | 11 | # |
12 | # Passing null pointers is ok for smp code, since we access the lowcore here. | 12 | # Passing null pointers is ok for smp code, since we access the lowcore here. |
13 | # | 13 | # |
14 | CFLAGS_smp.o := -Wno-nonnull | 14 | CFLAGS_smp.o := -Wno-nonnull |
15 | 15 | ||
16 | # | 16 | # |
17 | # Pass UTS_MACHINE for user_regset definition | 17 | # Pass UTS_MACHINE for user_regset definition |
18 | # | 18 | # |
19 | CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' | 19 | CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' |
20 | 20 | ||
21 | CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w | 21 | CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w |
22 | 22 | ||
23 | obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o \ | 23 | obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o \ |
24 | processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ | 24 | processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o \ |
25 | s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o \ | 25 | debug.o irq.o ipl.o dis.o diag.o mem_detect.o sclp.o vdso.o \ |
26 | vdso.o vtime.o sysinfo.o nmi.o sclp.o jump_label.o | 26 | sysinfo.o jump_label.o |
27 | 27 | ||
28 | obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) | 28 | obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) |
29 | obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) | 29 | obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) |
30 | 30 | ||
31 | extra-y += head.o init_task.o vmlinux.lds | 31 | extra-y += head.o init_task.o vmlinux.lds |
32 | extra-y += $(if $(CONFIG_64BIT),head64.o,head31.o) | 32 | extra-y += $(if $(CONFIG_64BIT),head64.o,head31.o) |
33 | 33 | ||
34 | obj-$(CONFIG_MODULES) += s390_ksyms.o module.o | 34 | obj-$(CONFIG_MODULES) += s390_ksyms.o module.o |
35 | obj-$(CONFIG_SMP) += smp.o topology.o | 35 | obj-$(CONFIG_SMP) += smp.o topology.o |
36 | obj-$(CONFIG_SMP) += $(if $(CONFIG_64BIT),switch_cpu64.o, \ | 36 | obj-$(CONFIG_SMP) += $(if $(CONFIG_64BIT),switch_cpu64.o, \ |
37 | switch_cpu.o) | 37 | switch_cpu.o) |
38 | obj-$(CONFIG_HIBERNATION) += suspend.o swsusp_asm64.o | 38 | obj-$(CONFIG_HIBERNATION) += suspend.o swsusp_asm64.o |
39 | obj-$(CONFIG_AUDIT) += audit.o | 39 | obj-$(CONFIG_AUDIT) += audit.o |
40 | compat-obj-$(CONFIG_AUDIT) += compat_audit.o | 40 | compat-obj-$(CONFIG_AUDIT) += compat_audit.o |
41 | obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \ | 41 | obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \ |
42 | compat_wrapper.o compat_exec_domain.o \ | 42 | compat_wrapper.o compat_exec_domain.o \ |
43 | $(compat-obj-y) | 43 | $(compat-obj-y) |
44 | 44 | ||
45 | obj-$(CONFIG_STACKTRACE) += stacktrace.o | 45 | obj-$(CONFIG_STACKTRACE) += stacktrace.o |
46 | obj-$(CONFIG_KPROBES) += kprobes.o | 46 | obj-$(CONFIG_KPROBES) += kprobes.o |
47 | obj-$(CONFIG_FUNCTION_TRACER) += $(if $(CONFIG_64BIT),mcount64.o,mcount.o) | 47 | obj-$(CONFIG_FUNCTION_TRACER) += $(if $(CONFIG_64BIT),mcount64.o,mcount.o) |
48 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o | 48 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o |
49 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o | 49 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o |
50 | obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o | 50 | obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o |
51 | 51 | ||
52 | # Kexec part | 52 | # Kexec part |
53 | S390_KEXEC_OBJS := machine_kexec.o crash.o | 53 | S390_KEXEC_OBJS := machine_kexec.o crash.o |
54 | S390_KEXEC_OBJS += $(if $(CONFIG_64BIT),relocate_kernel64.o,relocate_kernel.o) | 54 | S390_KEXEC_OBJS += $(if $(CONFIG_64BIT),relocate_kernel64.o,relocate_kernel.o) |
55 | obj-$(CONFIG_KEXEC) += $(S390_KEXEC_OBJS) | 55 | obj-$(CONFIG_KEXEC) += $(S390_KEXEC_OBJS) |
56 | 56 | ||
57 | # vdso | 57 | # vdso |
58 | obj-$(CONFIG_64BIT) += vdso64/ | 58 | obj-$(CONFIG_64BIT) += vdso64/ |
59 | obj-$(CONFIG_32BIT) += vdso32/ | 59 | obj-$(CONFIG_32BIT) += vdso32/ |
60 | obj-$(CONFIG_COMPAT) += vdso32/ | 60 | obj-$(CONFIG_COMPAT) += vdso32/ |
61 | 61 |
arch/s390/kernel/dis.c
1 | /* | 1 | /* |
2 | * arch/s390/kernel/dis.c | 2 | * arch/s390/kernel/dis.c |
3 | * | 3 | * |
4 | * Disassemble s390 instructions. | 4 | * Disassemble s390 instructions. |
5 | * | 5 | * |
6 | * Copyright IBM Corp. 2007 | 6 | * Copyright IBM Corp. 2007 |
7 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), | 7 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/sched.h> | 10 | #include <linux/sched.h> |
11 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
12 | #include <linux/string.h> | 12 | #include <linux/string.h> |
13 | #include <linux/errno.h> | 13 | #include <linux/errno.h> |
14 | #include <linux/ptrace.h> | 14 | #include <linux/ptrace.h> |
15 | #include <linux/timer.h> | 15 | #include <linux/timer.h> |
16 | #include <linux/mm.h> | 16 | #include <linux/mm.h> |
17 | #include <linux/smp.h> | 17 | #include <linux/smp.h> |
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/interrupt.h> | 19 | #include <linux/interrupt.h> |
20 | #include <linux/delay.h> | 20 | #include <linux/delay.h> |
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/kallsyms.h> | 22 | #include <linux/kallsyms.h> |
23 | #include <linux/reboot.h> | 23 | #include <linux/reboot.h> |
24 | #include <linux/kprobes.h> | 24 | #include <linux/kprobes.h> |
25 | #include <linux/kdebug.h> | 25 | #include <linux/kdebug.h> |
26 | 26 | ||
27 | #include <asm/system.h> | 27 | #include <asm/system.h> |
28 | #include <asm/uaccess.h> | 28 | #include <asm/uaccess.h> |
29 | #include <asm/io.h> | 29 | #include <asm/io.h> |
30 | #include <asm/atomic.h> | 30 | #include <asm/atomic.h> |
31 | #include <asm/mathemu.h> | 31 | #include <asm/mathemu.h> |
32 | #include <asm/cpcmd.h> | 32 | #include <asm/cpcmd.h> |
33 | #include <asm/s390_ext.h> | ||
34 | #include <asm/lowcore.h> | 33 | #include <asm/lowcore.h> |
35 | #include <asm/debug.h> | 34 | #include <asm/debug.h> |
35 | #include <asm/irq.h> | ||
36 | 36 | ||
37 | #ifndef CONFIG_64BIT | 37 | #ifndef CONFIG_64BIT |
38 | #define ONELONG "%08lx: " | 38 | #define ONELONG "%08lx: " |
39 | #else /* CONFIG_64BIT */ | 39 | #else /* CONFIG_64BIT */ |
40 | #define ONELONG "%016lx: " | 40 | #define ONELONG "%016lx: " |
41 | #endif /* CONFIG_64BIT */ | 41 | #endif /* CONFIG_64BIT */ |
42 | 42 | ||
43 | #define OPERAND_GPR 0x1 /* Operand printed as %rx */ | 43 | #define OPERAND_GPR 0x1 /* Operand printed as %rx */ |
44 | #define OPERAND_FPR 0x2 /* Operand printed as %fx */ | 44 | #define OPERAND_FPR 0x2 /* Operand printed as %fx */ |
45 | #define OPERAND_AR 0x4 /* Operand printed as %ax */ | 45 | #define OPERAND_AR 0x4 /* Operand printed as %ax */ |
46 | #define OPERAND_CR 0x8 /* Operand printed as %cx */ | 46 | #define OPERAND_CR 0x8 /* Operand printed as %cx */ |
47 | #define OPERAND_DISP 0x10 /* Operand printed as displacement */ | 47 | #define OPERAND_DISP 0x10 /* Operand printed as displacement */ |
48 | #define OPERAND_BASE 0x20 /* Operand printed as base register */ | 48 | #define OPERAND_BASE 0x20 /* Operand printed as base register */ |
49 | #define OPERAND_INDEX 0x40 /* Operand printed as index register */ | 49 | #define OPERAND_INDEX 0x40 /* Operand printed as index register */ |
50 | #define OPERAND_PCREL 0x80 /* Operand printed as pc-relative symbol */ | 50 | #define OPERAND_PCREL 0x80 /* Operand printed as pc-relative symbol */ |
51 | #define OPERAND_SIGNED 0x100 /* Operand printed as signed value */ | 51 | #define OPERAND_SIGNED 0x100 /* Operand printed as signed value */ |
52 | #define OPERAND_LENGTH 0x200 /* Operand printed as length (+1) */ | 52 | #define OPERAND_LENGTH 0x200 /* Operand printed as length (+1) */ |
53 | 53 | ||
54 | enum { | 54 | enum { |
55 | UNUSED, /* Indicates the end of the operand list */ | 55 | UNUSED, /* Indicates the end of the operand list */ |
56 | R_8, /* GPR starting at position 8 */ | 56 | R_8, /* GPR starting at position 8 */ |
57 | R_12, /* GPR starting at position 12 */ | 57 | R_12, /* GPR starting at position 12 */ |
58 | R_16, /* GPR starting at position 16 */ | 58 | R_16, /* GPR starting at position 16 */ |
59 | R_20, /* GPR starting at position 20 */ | 59 | R_20, /* GPR starting at position 20 */ |
60 | R_24, /* GPR starting at position 24 */ | 60 | R_24, /* GPR starting at position 24 */ |
61 | R_28, /* GPR starting at position 28 */ | 61 | R_28, /* GPR starting at position 28 */ |
62 | R_32, /* GPR starting at position 32 */ | 62 | R_32, /* GPR starting at position 32 */ |
63 | F_8, /* FPR starting at position 8 */ | 63 | F_8, /* FPR starting at position 8 */ |
64 | F_12, /* FPR starting at position 12 */ | 64 | F_12, /* FPR starting at position 12 */ |
65 | F_16, /* FPR starting at position 16 */ | 65 | F_16, /* FPR starting at position 16 */ |
66 | F_20, /* FPR starting at position 16 */ | 66 | F_20, /* FPR starting at position 16 */ |
67 | F_24, /* FPR starting at position 24 */ | 67 | F_24, /* FPR starting at position 24 */ |
68 | F_28, /* FPR starting at position 28 */ | 68 | F_28, /* FPR starting at position 28 */ |
69 | F_32, /* FPR starting at position 32 */ | 69 | F_32, /* FPR starting at position 32 */ |
70 | A_8, /* Access reg. starting at position 8 */ | 70 | A_8, /* Access reg. starting at position 8 */ |
71 | A_12, /* Access reg. starting at position 12 */ | 71 | A_12, /* Access reg. starting at position 12 */ |
72 | A_24, /* Access reg. starting at position 24 */ | 72 | A_24, /* Access reg. starting at position 24 */ |
73 | A_28, /* Access reg. starting at position 28 */ | 73 | A_28, /* Access reg. starting at position 28 */ |
74 | C_8, /* Control reg. starting at position 8 */ | 74 | C_8, /* Control reg. starting at position 8 */ |
75 | C_12, /* Control reg. starting at position 12 */ | 75 | C_12, /* Control reg. starting at position 12 */ |
76 | B_16, /* Base register starting at position 16 */ | 76 | B_16, /* Base register starting at position 16 */ |
77 | B_32, /* Base register starting at position 32 */ | 77 | B_32, /* Base register starting at position 32 */ |
78 | X_12, /* Index register starting at position 12 */ | 78 | X_12, /* Index register starting at position 12 */ |
79 | D_20, /* Displacement starting at position 20 */ | 79 | D_20, /* Displacement starting at position 20 */ |
80 | D_36, /* Displacement starting at position 36 */ | 80 | D_36, /* Displacement starting at position 36 */ |
81 | D20_20, /* 20 bit displacement starting at 20 */ | 81 | D20_20, /* 20 bit displacement starting at 20 */ |
82 | L4_8, /* 4 bit length starting at position 8 */ | 82 | L4_8, /* 4 bit length starting at position 8 */ |
83 | L4_12, /* 4 bit length starting at position 12 */ | 83 | L4_12, /* 4 bit length starting at position 12 */ |
84 | L8_8, /* 8 bit length starting at position 8 */ | 84 | L8_8, /* 8 bit length starting at position 8 */ |
85 | U4_8, /* 4 bit unsigned value starting at 8 */ | 85 | U4_8, /* 4 bit unsigned value starting at 8 */ |
86 | U4_12, /* 4 bit unsigned value starting at 12 */ | 86 | U4_12, /* 4 bit unsigned value starting at 12 */ |
87 | U4_16, /* 4 bit unsigned value starting at 16 */ | 87 | U4_16, /* 4 bit unsigned value starting at 16 */ |
88 | U4_20, /* 4 bit unsigned value starting at 20 */ | 88 | U4_20, /* 4 bit unsigned value starting at 20 */ |
89 | U4_32, /* 4 bit unsigned value starting at 32 */ | 89 | U4_32, /* 4 bit unsigned value starting at 32 */ |
90 | U8_8, /* 8 bit unsigned value starting at 8 */ | 90 | U8_8, /* 8 bit unsigned value starting at 8 */ |
91 | U8_16, /* 8 bit unsigned value starting at 16 */ | 91 | U8_16, /* 8 bit unsigned value starting at 16 */ |
92 | U8_24, /* 8 bit unsigned value starting at 24 */ | 92 | U8_24, /* 8 bit unsigned value starting at 24 */ |
93 | U8_32, /* 8 bit unsigned value starting at 32 */ | 93 | U8_32, /* 8 bit unsigned value starting at 32 */ |
94 | I8_8, /* 8 bit signed value starting at 8 */ | 94 | I8_8, /* 8 bit signed value starting at 8 */ |
95 | I8_32, /* 8 bit signed value starting at 32 */ | 95 | I8_32, /* 8 bit signed value starting at 32 */ |
96 | I16_16, /* 16 bit signed value starting at 16 */ | 96 | I16_16, /* 16 bit signed value starting at 16 */ |
97 | I16_32, /* 32 bit signed value starting at 16 */ | 97 | I16_32, /* 32 bit signed value starting at 16 */ |
98 | U16_16, /* 16 bit unsigned value starting at 16 */ | 98 | U16_16, /* 16 bit unsigned value starting at 16 */ |
99 | U16_32, /* 32 bit unsigned value starting at 16 */ | 99 | U16_32, /* 32 bit unsigned value starting at 16 */ |
100 | J16_16, /* PC relative jump offset at 16 */ | 100 | J16_16, /* PC relative jump offset at 16 */ |
101 | J32_16, /* PC relative long offset at 16 */ | 101 | J32_16, /* PC relative long offset at 16 */ |
102 | I32_16, /* 32 bit signed value starting at 16 */ | 102 | I32_16, /* 32 bit signed value starting at 16 */ |
103 | U32_16, /* 32 bit unsigned value starting at 16 */ | 103 | U32_16, /* 32 bit unsigned value starting at 16 */ |
104 | M_16, /* 4 bit optional mask starting at 16 */ | 104 | M_16, /* 4 bit optional mask starting at 16 */ |
105 | RO_28, /* optional GPR starting at position 28 */ | 105 | RO_28, /* optional GPR starting at position 28 */ |
106 | }; | 106 | }; |
107 | 107 | ||
108 | /* | 108 | /* |
109 | * Enumeration of the different instruction formats. | 109 | * Enumeration of the different instruction formats. |
110 | * For details consult the principles of operation. | 110 | * For details consult the principles of operation. |
111 | */ | 111 | */ |
112 | enum { | 112 | enum { |
113 | INSTR_INVALID, | 113 | INSTR_INVALID, |
114 | INSTR_E, | 114 | INSTR_E, |
115 | INSTR_RIE_R0IU, INSTR_RIE_R0UU, INSTR_RIE_RRP, INSTR_RIE_RRPU, | 115 | INSTR_RIE_R0IU, INSTR_RIE_R0UU, INSTR_RIE_RRP, INSTR_RIE_RRPU, |
116 | INSTR_RIE_RRUUU, INSTR_RIE_RUPI, INSTR_RIE_RUPU, INSTR_RIE_RRI0, | 116 | INSTR_RIE_RRUUU, INSTR_RIE_RUPI, INSTR_RIE_RUPU, INSTR_RIE_RRI0, |
117 | INSTR_RIL_RI, INSTR_RIL_RP, INSTR_RIL_RU, INSTR_RIL_UP, | 117 | INSTR_RIL_RI, INSTR_RIL_RP, INSTR_RIL_RU, INSTR_RIL_UP, |
118 | INSTR_RIS_R0RDU, INSTR_RIS_R0UU, INSTR_RIS_RURDI, INSTR_RIS_RURDU, | 118 | INSTR_RIS_R0RDU, INSTR_RIS_R0UU, INSTR_RIS_RURDI, INSTR_RIS_RURDU, |
119 | INSTR_RI_RI, INSTR_RI_RP, INSTR_RI_RU, INSTR_RI_UP, | 119 | INSTR_RI_RI, INSTR_RI_RP, INSTR_RI_RU, INSTR_RI_UP, |
120 | INSTR_RRE_00, INSTR_RRE_0R, INSTR_RRE_AA, INSTR_RRE_AR, INSTR_RRE_F0, | 120 | INSTR_RRE_00, INSTR_RRE_0R, INSTR_RRE_AA, INSTR_RRE_AR, INSTR_RRE_F0, |
121 | INSTR_RRE_FF, INSTR_RRE_FR, INSTR_RRE_R0, INSTR_RRE_RA, INSTR_RRE_RF, | 121 | INSTR_RRE_FF, INSTR_RRE_FR, INSTR_RRE_R0, INSTR_RRE_RA, INSTR_RRE_RF, |
122 | INSTR_RRE_RR, INSTR_RRE_RR_OPT, | 122 | INSTR_RRE_RR, INSTR_RRE_RR_OPT, |
123 | INSTR_RRF_0UFF, INSTR_RRF_F0FF, INSTR_RRF_F0FF2, INSTR_RRF_F0FR, | 123 | INSTR_RRF_0UFF, INSTR_RRF_F0FF, INSTR_RRF_F0FF2, INSTR_RRF_F0FR, |
124 | INSTR_RRF_FFRU, INSTR_RRF_FUFF, INSTR_RRF_M0RR, INSTR_RRF_R0RR, | 124 | INSTR_RRF_FFRU, INSTR_RRF_FUFF, INSTR_RRF_M0RR, INSTR_RRF_R0RR, |
125 | INSTR_RRF_R0RR2, INSTR_RRF_RURR, INSTR_RRF_U0FF, INSTR_RRF_U0RF, | 125 | INSTR_RRF_R0RR2, INSTR_RRF_RURR, INSTR_RRF_U0FF, INSTR_RRF_U0RF, |
126 | INSTR_RRF_U0RR, INSTR_RRF_UUFF, INSTR_RRR_F0FF, INSTR_RRS_RRRDU, | 126 | INSTR_RRF_U0RR, INSTR_RRF_UUFF, INSTR_RRR_F0FF, INSTR_RRS_RRRDU, |
127 | INSTR_RR_FF, INSTR_RR_R0, INSTR_RR_RR, INSTR_RR_U0, INSTR_RR_UR, | 127 | INSTR_RR_FF, INSTR_RR_R0, INSTR_RR_RR, INSTR_RR_U0, INSTR_RR_UR, |
128 | INSTR_RSE_CCRD, INSTR_RSE_RRRD, INSTR_RSE_RURD, | 128 | INSTR_RSE_CCRD, INSTR_RSE_RRRD, INSTR_RSE_RURD, |
129 | INSTR_RSI_RRP, | 129 | INSTR_RSI_RRP, |
130 | INSTR_RSL_R0RD, | 130 | INSTR_RSL_R0RD, |
131 | INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD, INSTR_RSY_RURD, | 131 | INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD, INSTR_RSY_RURD, |
132 | INSTR_RSY_RDRM, | 132 | INSTR_RSY_RDRM, |
133 | INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, INSTR_RS_RRRD, | 133 | INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, INSTR_RS_RRRD, |
134 | INSTR_RS_RURD, | 134 | INSTR_RS_RURD, |
135 | INSTR_RXE_FRRD, INSTR_RXE_RRRD, | 135 | INSTR_RXE_FRRD, INSTR_RXE_RRRD, |
136 | INSTR_RXF_FRRDF, | 136 | INSTR_RXF_FRRDF, |
137 | INSTR_RXY_FRRD, INSTR_RXY_RRRD, INSTR_RXY_URRD, | 137 | INSTR_RXY_FRRD, INSTR_RXY_RRRD, INSTR_RXY_URRD, |
138 | INSTR_RX_FRRD, INSTR_RX_RRRD, INSTR_RX_URRD, | 138 | INSTR_RX_FRRD, INSTR_RX_RRRD, INSTR_RX_URRD, |
139 | INSTR_SIL_RDI, INSTR_SIL_RDU, | 139 | INSTR_SIL_RDI, INSTR_SIL_RDU, |
140 | INSTR_SIY_IRD, INSTR_SIY_URD, | 140 | INSTR_SIY_IRD, INSTR_SIY_URD, |
141 | INSTR_SI_URD, | 141 | INSTR_SI_URD, |
142 | INSTR_SSE_RDRD, | 142 | INSTR_SSE_RDRD, |
143 | INSTR_SSF_RRDRD, INSTR_SSF_RRDRD2, | 143 | INSTR_SSF_RRDRD, INSTR_SSF_RRDRD2, |
144 | INSTR_SS_L0RDRD, INSTR_SS_LIRDRD, INSTR_SS_LLRDRD, INSTR_SS_RRRDRD, | 144 | INSTR_SS_L0RDRD, INSTR_SS_LIRDRD, INSTR_SS_LLRDRD, INSTR_SS_RRRDRD, |
145 | INSTR_SS_RRRDRD2, INSTR_SS_RRRDRD3, | 145 | INSTR_SS_RRRDRD2, INSTR_SS_RRRDRD3, |
146 | INSTR_S_00, INSTR_S_RD, | 146 | INSTR_S_00, INSTR_S_RD, |
147 | }; | 147 | }; |
148 | 148 | ||
149 | struct operand { | 149 | struct operand { |
150 | int bits; /* The number of bits in the operand. */ | 150 | int bits; /* The number of bits in the operand. */ |
151 | int shift; /* The number of bits to shift. */ | 151 | int shift; /* The number of bits to shift. */ |
152 | int flags; /* One bit syntax flags. */ | 152 | int flags; /* One bit syntax flags. */ |
153 | }; | 153 | }; |
154 | 154 | ||
155 | struct insn { | 155 | struct insn { |
156 | const char name[5]; | 156 | const char name[5]; |
157 | unsigned char opfrag; | 157 | unsigned char opfrag; |
158 | unsigned char format; | 158 | unsigned char format; |
159 | }; | 159 | }; |
160 | 160 | ||
161 | static const struct operand operands[] = | 161 | static const struct operand operands[] = |
162 | { | 162 | { |
163 | [UNUSED] = { 0, 0, 0 }, | 163 | [UNUSED] = { 0, 0, 0 }, |
164 | [R_8] = { 4, 8, OPERAND_GPR }, | 164 | [R_8] = { 4, 8, OPERAND_GPR }, |
165 | [R_12] = { 4, 12, OPERAND_GPR }, | 165 | [R_12] = { 4, 12, OPERAND_GPR }, |
166 | [R_16] = { 4, 16, OPERAND_GPR }, | 166 | [R_16] = { 4, 16, OPERAND_GPR }, |
167 | [R_20] = { 4, 20, OPERAND_GPR }, | 167 | [R_20] = { 4, 20, OPERAND_GPR }, |
168 | [R_24] = { 4, 24, OPERAND_GPR }, | 168 | [R_24] = { 4, 24, OPERAND_GPR }, |
169 | [R_28] = { 4, 28, OPERAND_GPR }, | 169 | [R_28] = { 4, 28, OPERAND_GPR }, |
170 | [R_32] = { 4, 32, OPERAND_GPR }, | 170 | [R_32] = { 4, 32, OPERAND_GPR }, |
171 | [F_8] = { 4, 8, OPERAND_FPR }, | 171 | [F_8] = { 4, 8, OPERAND_FPR }, |
172 | [F_12] = { 4, 12, OPERAND_FPR }, | 172 | [F_12] = { 4, 12, OPERAND_FPR }, |
173 | [F_16] = { 4, 16, OPERAND_FPR }, | 173 | [F_16] = { 4, 16, OPERAND_FPR }, |
174 | [F_20] = { 4, 16, OPERAND_FPR }, | 174 | [F_20] = { 4, 16, OPERAND_FPR }, |
175 | [F_24] = { 4, 24, OPERAND_FPR }, | 175 | [F_24] = { 4, 24, OPERAND_FPR }, |
176 | [F_28] = { 4, 28, OPERAND_FPR }, | 176 | [F_28] = { 4, 28, OPERAND_FPR }, |
177 | [F_32] = { 4, 32, OPERAND_FPR }, | 177 | [F_32] = { 4, 32, OPERAND_FPR }, |
178 | [A_8] = { 4, 8, OPERAND_AR }, | 178 | [A_8] = { 4, 8, OPERAND_AR }, |
179 | [A_12] = { 4, 12, OPERAND_AR }, | 179 | [A_12] = { 4, 12, OPERAND_AR }, |
180 | [A_24] = { 4, 24, OPERAND_AR }, | 180 | [A_24] = { 4, 24, OPERAND_AR }, |
181 | [A_28] = { 4, 28, OPERAND_AR }, | 181 | [A_28] = { 4, 28, OPERAND_AR }, |
182 | [C_8] = { 4, 8, OPERAND_CR }, | 182 | [C_8] = { 4, 8, OPERAND_CR }, |
183 | [C_12] = { 4, 12, OPERAND_CR }, | 183 | [C_12] = { 4, 12, OPERAND_CR }, |
184 | [B_16] = { 4, 16, OPERAND_BASE | OPERAND_GPR }, | 184 | [B_16] = { 4, 16, OPERAND_BASE | OPERAND_GPR }, |
185 | [B_32] = { 4, 32, OPERAND_BASE | OPERAND_GPR }, | 185 | [B_32] = { 4, 32, OPERAND_BASE | OPERAND_GPR }, |
186 | [X_12] = { 4, 12, OPERAND_INDEX | OPERAND_GPR }, | 186 | [X_12] = { 4, 12, OPERAND_INDEX | OPERAND_GPR }, |
187 | [D_20] = { 12, 20, OPERAND_DISP }, | 187 | [D_20] = { 12, 20, OPERAND_DISP }, |
188 | [D_36] = { 12, 36, OPERAND_DISP }, | 188 | [D_36] = { 12, 36, OPERAND_DISP }, |
189 | [D20_20] = { 20, 20, OPERAND_DISP | OPERAND_SIGNED }, | 189 | [D20_20] = { 20, 20, OPERAND_DISP | OPERAND_SIGNED }, |
190 | [L4_8] = { 4, 8, OPERAND_LENGTH }, | 190 | [L4_8] = { 4, 8, OPERAND_LENGTH }, |
191 | [L4_12] = { 4, 12, OPERAND_LENGTH }, | 191 | [L4_12] = { 4, 12, OPERAND_LENGTH }, |
192 | [L8_8] = { 8, 8, OPERAND_LENGTH }, | 192 | [L8_8] = { 8, 8, OPERAND_LENGTH }, |
193 | [U4_8] = { 4, 8, 0 }, | 193 | [U4_8] = { 4, 8, 0 }, |
194 | [U4_12] = { 4, 12, 0 }, | 194 | [U4_12] = { 4, 12, 0 }, |
195 | [U4_16] = { 4, 16, 0 }, | 195 | [U4_16] = { 4, 16, 0 }, |
196 | [U4_20] = { 4, 20, 0 }, | 196 | [U4_20] = { 4, 20, 0 }, |
197 | [U4_32] = { 4, 32, 0 }, | 197 | [U4_32] = { 4, 32, 0 }, |
198 | [U8_8] = { 8, 8, 0 }, | 198 | [U8_8] = { 8, 8, 0 }, |
199 | [U8_16] = { 8, 16, 0 }, | 199 | [U8_16] = { 8, 16, 0 }, |
200 | [U8_24] = { 8, 24, 0 }, | 200 | [U8_24] = { 8, 24, 0 }, |
201 | [U8_32] = { 8, 32, 0 }, | 201 | [U8_32] = { 8, 32, 0 }, |
202 | [I16_16] = { 16, 16, OPERAND_SIGNED }, | 202 | [I16_16] = { 16, 16, OPERAND_SIGNED }, |
203 | [U16_16] = { 16, 16, 0 }, | 203 | [U16_16] = { 16, 16, 0 }, |
204 | [U16_32] = { 16, 32, 0 }, | 204 | [U16_32] = { 16, 32, 0 }, |
205 | [J16_16] = { 16, 16, OPERAND_PCREL }, | 205 | [J16_16] = { 16, 16, OPERAND_PCREL }, |
206 | [I16_32] = { 16, 32, OPERAND_SIGNED }, | 206 | [I16_32] = { 16, 32, OPERAND_SIGNED }, |
207 | [J32_16] = { 32, 16, OPERAND_PCREL }, | 207 | [J32_16] = { 32, 16, OPERAND_PCREL }, |
208 | [I32_16] = { 32, 16, OPERAND_SIGNED }, | 208 | [I32_16] = { 32, 16, OPERAND_SIGNED }, |
209 | [U32_16] = { 32, 16, 0 }, | 209 | [U32_16] = { 32, 16, 0 }, |
210 | [M_16] = { 4, 16, 0 }, | 210 | [M_16] = { 4, 16, 0 }, |
211 | [RO_28] = { 4, 28, OPERAND_GPR } | 211 | [RO_28] = { 4, 28, OPERAND_GPR } |
212 | }; | 212 | }; |
213 | 213 | ||
214 | static const unsigned char formats[][7] = { | 214 | static const unsigned char formats[][7] = { |
215 | [INSTR_E] = { 0xff, 0,0,0,0,0,0 }, | 215 | [INSTR_E] = { 0xff, 0,0,0,0,0,0 }, |
216 | [INSTR_RIE_R0UU] = { 0xff, R_8,U16_16,U4_32,0,0,0 }, | 216 | [INSTR_RIE_R0UU] = { 0xff, R_8,U16_16,U4_32,0,0,0 }, |
217 | [INSTR_RIE_RRPU] = { 0xff, R_8,R_12,U4_32,J16_16,0,0 }, | 217 | [INSTR_RIE_RRPU] = { 0xff, R_8,R_12,U4_32,J16_16,0,0 }, |
218 | [INSTR_RIE_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 }, | 218 | [INSTR_RIE_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 }, |
219 | [INSTR_RIE_RRUUU] = { 0xff, R_8,R_12,U8_16,U8_24,U8_32,0 }, | 219 | [INSTR_RIE_RRUUU] = { 0xff, R_8,R_12,U8_16,U8_24,U8_32,0 }, |
220 | [INSTR_RIE_RUPI] = { 0xff, R_8,I8_32,U4_12,J16_16,0,0 }, | 220 | [INSTR_RIE_RUPI] = { 0xff, R_8,I8_32,U4_12,J16_16,0,0 }, |
221 | [INSTR_RIE_RRI0] = { 0xff, R_8,R_12,I16_16,0,0,0 }, | 221 | [INSTR_RIE_RRI0] = { 0xff, R_8,R_12,I16_16,0,0,0 }, |
222 | [INSTR_RIL_RI] = { 0x0f, R_8,I32_16,0,0,0,0 }, | 222 | [INSTR_RIL_RI] = { 0x0f, R_8,I32_16,0,0,0,0 }, |
223 | [INSTR_RIL_RP] = { 0x0f, R_8,J32_16,0,0,0,0 }, | 223 | [INSTR_RIL_RP] = { 0x0f, R_8,J32_16,0,0,0,0 }, |
224 | [INSTR_RIL_RU] = { 0x0f, R_8,U32_16,0,0,0,0 }, | 224 | [INSTR_RIL_RU] = { 0x0f, R_8,U32_16,0,0,0,0 }, |
225 | [INSTR_RIL_UP] = { 0x0f, U4_8,J32_16,0,0,0,0 }, | 225 | [INSTR_RIL_UP] = { 0x0f, U4_8,J32_16,0,0,0,0 }, |
226 | [INSTR_RIS_R0RDU] = { 0xff, R_8,U8_32,D_20,B_16,0,0 }, | 226 | [INSTR_RIS_R0RDU] = { 0xff, R_8,U8_32,D_20,B_16,0,0 }, |
227 | [INSTR_RIS_RURDI] = { 0xff, R_8,I8_32,U4_12,D_20,B_16,0 }, | 227 | [INSTR_RIS_RURDI] = { 0xff, R_8,I8_32,U4_12,D_20,B_16,0 }, |
228 | [INSTR_RIS_RURDU] = { 0xff, R_8,U8_32,U4_12,D_20,B_16,0 }, | 228 | [INSTR_RIS_RURDU] = { 0xff, R_8,U8_32,U4_12,D_20,B_16,0 }, |
229 | [INSTR_RI_RI] = { 0x0f, R_8,I16_16,0,0,0,0 }, | 229 | [INSTR_RI_RI] = { 0x0f, R_8,I16_16,0,0,0,0 }, |
230 | [INSTR_RI_RP] = { 0x0f, R_8,J16_16,0,0,0,0 }, | 230 | [INSTR_RI_RP] = { 0x0f, R_8,J16_16,0,0,0,0 }, |
231 | [INSTR_RI_RU] = { 0x0f, R_8,U16_16,0,0,0,0 }, | 231 | [INSTR_RI_RU] = { 0x0f, R_8,U16_16,0,0,0,0 }, |
232 | [INSTR_RI_UP] = { 0x0f, U4_8,J16_16,0,0,0,0 }, | 232 | [INSTR_RI_UP] = { 0x0f, U4_8,J16_16,0,0,0,0 }, |
233 | [INSTR_RRE_00] = { 0xff, 0,0,0,0,0,0 }, | 233 | [INSTR_RRE_00] = { 0xff, 0,0,0,0,0,0 }, |
234 | [INSTR_RRE_0R] = { 0xff, R_28,0,0,0,0,0 }, | 234 | [INSTR_RRE_0R] = { 0xff, R_28,0,0,0,0,0 }, |
235 | [INSTR_RRE_AA] = { 0xff, A_24,A_28,0,0,0,0 }, | 235 | [INSTR_RRE_AA] = { 0xff, A_24,A_28,0,0,0,0 }, |
236 | [INSTR_RRE_AR] = { 0xff, A_24,R_28,0,0,0,0 }, | 236 | [INSTR_RRE_AR] = { 0xff, A_24,R_28,0,0,0,0 }, |
237 | [INSTR_RRE_F0] = { 0xff, F_24,0,0,0,0,0 }, | 237 | [INSTR_RRE_F0] = { 0xff, F_24,0,0,0,0,0 }, |
238 | [INSTR_RRE_FF] = { 0xff, F_24,F_28,0,0,0,0 }, | 238 | [INSTR_RRE_FF] = { 0xff, F_24,F_28,0,0,0,0 }, |
239 | [INSTR_RRE_FR] = { 0xff, F_24,R_28,0,0,0,0 }, | 239 | [INSTR_RRE_FR] = { 0xff, F_24,R_28,0,0,0,0 }, |
240 | [INSTR_RRE_R0] = { 0xff, R_24,0,0,0,0,0 }, | 240 | [INSTR_RRE_R0] = { 0xff, R_24,0,0,0,0,0 }, |
241 | [INSTR_RRE_RA] = { 0xff, R_24,A_28,0,0,0,0 }, | 241 | [INSTR_RRE_RA] = { 0xff, R_24,A_28,0,0,0,0 }, |
242 | [INSTR_RRE_RF] = { 0xff, R_24,F_28,0,0,0,0 }, | 242 | [INSTR_RRE_RF] = { 0xff, R_24,F_28,0,0,0,0 }, |
243 | [INSTR_RRE_RR] = { 0xff, R_24,R_28,0,0,0,0 }, | 243 | [INSTR_RRE_RR] = { 0xff, R_24,R_28,0,0,0,0 }, |
244 | [INSTR_RRE_RR_OPT]= { 0xff, R_24,RO_28,0,0,0,0 }, | 244 | [INSTR_RRE_RR_OPT]= { 0xff, R_24,RO_28,0,0,0,0 }, |
245 | [INSTR_RRF_0UFF] = { 0xff, F_24,F_28,U4_20,0,0,0 }, | 245 | [INSTR_RRF_0UFF] = { 0xff, F_24,F_28,U4_20,0,0,0 }, |
246 | [INSTR_RRF_F0FF2] = { 0xff, F_24,F_16,F_28,0,0,0 }, | 246 | [INSTR_RRF_F0FF2] = { 0xff, F_24,F_16,F_28,0,0,0 }, |
247 | [INSTR_RRF_F0FF] = { 0xff, F_16,F_24,F_28,0,0,0 }, | 247 | [INSTR_RRF_F0FF] = { 0xff, F_16,F_24,F_28,0,0,0 }, |
248 | [INSTR_RRF_F0FR] = { 0xff, F_24,F_16,R_28,0,0,0 }, | 248 | [INSTR_RRF_F0FR] = { 0xff, F_24,F_16,R_28,0,0,0 }, |
249 | [INSTR_RRF_FFRU] = { 0xff, F_24,F_16,R_28,U4_20,0,0 }, | 249 | [INSTR_RRF_FFRU] = { 0xff, F_24,F_16,R_28,U4_20,0,0 }, |
250 | [INSTR_RRF_FUFF] = { 0xff, F_24,F_16,F_28,U4_20,0,0 }, | 250 | [INSTR_RRF_FUFF] = { 0xff, F_24,F_16,F_28,U4_20,0,0 }, |
251 | [INSTR_RRF_M0RR] = { 0xff, R_24,R_28,M_16,0,0,0 }, | 251 | [INSTR_RRF_M0RR] = { 0xff, R_24,R_28,M_16,0,0,0 }, |
252 | [INSTR_RRF_R0RR] = { 0xff, R_24,R_16,R_28,0,0,0 }, | 252 | [INSTR_RRF_R0RR] = { 0xff, R_24,R_16,R_28,0,0,0 }, |
253 | [INSTR_RRF_R0RR2] = { 0xff, R_24,R_28,R_16,0,0,0 }, | 253 | [INSTR_RRF_R0RR2] = { 0xff, R_24,R_28,R_16,0,0,0 }, |
254 | [INSTR_RRF_RURR] = { 0xff, R_24,R_28,R_16,U4_20,0,0 }, | 254 | [INSTR_RRF_RURR] = { 0xff, R_24,R_28,R_16,U4_20,0,0 }, |
255 | [INSTR_RRF_U0FF] = { 0xff, F_24,U4_16,F_28,0,0,0 }, | 255 | [INSTR_RRF_U0FF] = { 0xff, F_24,U4_16,F_28,0,0,0 }, |
256 | [INSTR_RRF_U0RF] = { 0xff, R_24,U4_16,F_28,0,0,0 }, | 256 | [INSTR_RRF_U0RF] = { 0xff, R_24,U4_16,F_28,0,0,0 }, |
257 | [INSTR_RRF_U0RR] = { 0xff, R_24,R_28,U4_16,0,0,0 }, | 257 | [INSTR_RRF_U0RR] = { 0xff, R_24,R_28,U4_16,0,0,0 }, |
258 | [INSTR_RRF_UUFF] = { 0xff, F_24,U4_16,F_28,U4_20,0,0 }, | 258 | [INSTR_RRF_UUFF] = { 0xff, F_24,U4_16,F_28,U4_20,0,0 }, |
259 | [INSTR_RRR_F0FF] = { 0xff, F_24,F_28,F_16,0,0,0 }, | 259 | [INSTR_RRR_F0FF] = { 0xff, F_24,F_28,F_16,0,0,0 }, |
260 | [INSTR_RRS_RRRDU] = { 0xff, R_8,R_12,U4_32,D_20,B_16,0 }, | 260 | [INSTR_RRS_RRRDU] = { 0xff, R_8,R_12,U4_32,D_20,B_16,0 }, |
261 | [INSTR_RR_FF] = { 0xff, F_8,F_12,0,0,0,0 }, | 261 | [INSTR_RR_FF] = { 0xff, F_8,F_12,0,0,0,0 }, |
262 | [INSTR_RR_R0] = { 0xff, R_8, 0,0,0,0,0 }, | 262 | [INSTR_RR_R0] = { 0xff, R_8, 0,0,0,0,0 }, |
263 | [INSTR_RR_RR] = { 0xff, R_8,R_12,0,0,0,0 }, | 263 | [INSTR_RR_RR] = { 0xff, R_8,R_12,0,0,0,0 }, |
264 | [INSTR_RR_U0] = { 0xff, U8_8, 0,0,0,0,0 }, | 264 | [INSTR_RR_U0] = { 0xff, U8_8, 0,0,0,0,0 }, |
265 | [INSTR_RR_UR] = { 0xff, U4_8,R_12,0,0,0,0 }, | 265 | [INSTR_RR_UR] = { 0xff, U4_8,R_12,0,0,0,0 }, |
266 | [INSTR_RSE_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 }, | 266 | [INSTR_RSE_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 }, |
267 | [INSTR_RSE_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 }, | 267 | [INSTR_RSE_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 }, |
268 | [INSTR_RSE_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 }, | 268 | [INSTR_RSE_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 }, |
269 | [INSTR_RSI_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 }, | 269 | [INSTR_RSI_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 }, |
270 | [INSTR_RSL_R0RD] = { 0xff, D_20,L4_8,B_16,0,0,0 }, | 270 | [INSTR_RSL_R0RD] = { 0xff, D_20,L4_8,B_16,0,0,0 }, |
271 | [INSTR_RSY_AARD] = { 0xff, A_8,A_12,D20_20,B_16,0,0 }, | 271 | [INSTR_RSY_AARD] = { 0xff, A_8,A_12,D20_20,B_16,0,0 }, |
272 | [INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 }, | 272 | [INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 }, |
273 | [INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 }, | 273 | [INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 }, |
274 | [INSTR_RSY_RURD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 }, | 274 | [INSTR_RSY_RURD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 }, |
275 | [INSTR_RSY_RDRM] = { 0xff, R_8,D20_20,B_16,U4_12,0,0 }, | 275 | [INSTR_RSY_RDRM] = { 0xff, R_8,D20_20,B_16,U4_12,0,0 }, |
276 | [INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 }, | 276 | [INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 }, |
277 | [INSTR_RS_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 }, | 277 | [INSTR_RS_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 }, |
278 | [INSTR_RS_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 }, | 278 | [INSTR_RS_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 }, |
279 | [INSTR_RS_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 }, | 279 | [INSTR_RS_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 }, |
280 | [INSTR_RS_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 }, | 280 | [INSTR_RS_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 }, |
281 | [INSTR_RXE_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 }, | 281 | [INSTR_RXE_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 }, |
282 | [INSTR_RXE_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 }, | 282 | [INSTR_RXE_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 }, |
283 | [INSTR_RXF_FRRDF] = { 0xff, F_32,F_8,D_20,X_12,B_16,0 }, | 283 | [INSTR_RXF_FRRDF] = { 0xff, F_32,F_8,D_20,X_12,B_16,0 }, |
284 | [INSTR_RXY_FRRD] = { 0xff, F_8,D20_20,X_12,B_16,0,0 }, | 284 | [INSTR_RXY_FRRD] = { 0xff, F_8,D20_20,X_12,B_16,0,0 }, |
285 | [INSTR_RXY_RRRD] = { 0xff, R_8,D20_20,X_12,B_16,0,0 }, | 285 | [INSTR_RXY_RRRD] = { 0xff, R_8,D20_20,X_12,B_16,0,0 }, |
286 | [INSTR_RXY_URRD] = { 0xff, U4_8,D20_20,X_12,B_16,0,0 }, | 286 | [INSTR_RXY_URRD] = { 0xff, U4_8,D20_20,X_12,B_16,0,0 }, |
287 | [INSTR_RX_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 }, | 287 | [INSTR_RX_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 }, |
288 | [INSTR_RX_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 }, | 288 | [INSTR_RX_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 }, |
289 | [INSTR_RX_URRD] = { 0xff, U4_8,D_20,X_12,B_16,0,0 }, | 289 | [INSTR_RX_URRD] = { 0xff, U4_8,D_20,X_12,B_16,0,0 }, |
290 | [INSTR_SIL_RDI] = { 0xff, D_20,B_16,I16_32,0,0,0 }, | 290 | [INSTR_SIL_RDI] = { 0xff, D_20,B_16,I16_32,0,0,0 }, |
291 | [INSTR_SIL_RDU] = { 0xff, D_20,B_16,U16_32,0,0,0 }, | 291 | [INSTR_SIL_RDU] = { 0xff, D_20,B_16,U16_32,0,0,0 }, |
292 | [INSTR_SIY_IRD] = { 0xff, D20_20,B_16,I8_8,0,0,0 }, | 292 | [INSTR_SIY_IRD] = { 0xff, D20_20,B_16,I8_8,0,0,0 }, |
293 | [INSTR_SIY_URD] = { 0xff, D20_20,B_16,U8_8,0,0,0 }, | 293 | [INSTR_SIY_URD] = { 0xff, D20_20,B_16,U8_8,0,0,0 }, |
294 | [INSTR_SI_URD] = { 0xff, D_20,B_16,U8_8,0,0,0 }, | 294 | [INSTR_SI_URD] = { 0xff, D_20,B_16,U8_8,0,0,0 }, |
295 | [INSTR_SSE_RDRD] = { 0xff, D_20,B_16,D_36,B_32,0,0 }, | 295 | [INSTR_SSE_RDRD] = { 0xff, D_20,B_16,D_36,B_32,0,0 }, |
296 | [INSTR_SSF_RRDRD] = { 0x00, D_20,B_16,D_36,B_32,R_8,0 }, | 296 | [INSTR_SSF_RRDRD] = { 0x00, D_20,B_16,D_36,B_32,R_8,0 }, |
297 | [INSTR_SSF_RRDRD2]= { 0x00, R_8,D_20,B_16,D_36,B_32,0 }, | 297 | [INSTR_SSF_RRDRD2]= { 0x00, R_8,D_20,B_16,D_36,B_32,0 }, |
298 | [INSTR_SS_L0RDRD] = { 0xff, D_20,L8_8,B_16,D_36,B_32,0 }, | 298 | [INSTR_SS_L0RDRD] = { 0xff, D_20,L8_8,B_16,D_36,B_32,0 }, |
299 | [INSTR_SS_LIRDRD] = { 0xff, D_20,L4_8,B_16,D_36,B_32,U4_12 }, | 299 | [INSTR_SS_LIRDRD] = { 0xff, D_20,L4_8,B_16,D_36,B_32,U4_12 }, |
300 | [INSTR_SS_LLRDRD] = { 0xff, D_20,L4_8,B_16,D_36,L4_12,B_32 }, | 300 | [INSTR_SS_LLRDRD] = { 0xff, D_20,L4_8,B_16,D_36,L4_12,B_32 }, |
301 | [INSTR_SS_RRRDRD2]= { 0xff, R_8,D_20,B_16,R_12,D_36,B_32 }, | 301 | [INSTR_SS_RRRDRD2]= { 0xff, R_8,D_20,B_16,R_12,D_36,B_32 }, |
302 | [INSTR_SS_RRRDRD3]= { 0xff, R_8,R_12,D_20,B_16,D_36,B_32 }, | 302 | [INSTR_SS_RRRDRD3]= { 0xff, R_8,R_12,D_20,B_16,D_36,B_32 }, |
303 | [INSTR_SS_RRRDRD] = { 0xff, D_20,R_8,B_16,D_36,B_32,R_12 }, | 303 | [INSTR_SS_RRRDRD] = { 0xff, D_20,R_8,B_16,D_36,B_32,R_12 }, |
304 | [INSTR_S_00] = { 0xff, 0,0,0,0,0,0 }, | 304 | [INSTR_S_00] = { 0xff, 0,0,0,0,0,0 }, |
305 | [INSTR_S_RD] = { 0xff, D_20,B_16,0,0,0,0 }, | 305 | [INSTR_S_RD] = { 0xff, D_20,B_16,0,0,0,0 }, |
306 | }; | 306 | }; |
307 | 307 | ||
308 | enum { | 308 | enum { |
309 | LONG_INSN_ALGHSIK, | 309 | LONG_INSN_ALGHSIK, |
310 | LONG_INSN_ALHSIK, | 310 | LONG_INSN_ALHSIK, |
311 | LONG_INSN_CLFHSI, | 311 | LONG_INSN_CLFHSI, |
312 | LONG_INSN_CLGFRL, | 312 | LONG_INSN_CLGFRL, |
313 | LONG_INSN_CLGHRL, | 313 | LONG_INSN_CLGHRL, |
314 | LONG_INSN_CLGHSI, | 314 | LONG_INSN_CLGHSI, |
315 | LONG_INSN_CLHHSI, | 315 | LONG_INSN_CLHHSI, |
316 | LONG_INSN_LLGFRL, | 316 | LONG_INSN_LLGFRL, |
317 | LONG_INSN_LLGHRL, | 317 | LONG_INSN_LLGHRL, |
318 | LONG_INSN_POPCNT, | 318 | LONG_INSN_POPCNT, |
319 | LONG_INSN_RISBHG, | 319 | LONG_INSN_RISBHG, |
320 | LONG_INSN_RISBLG, | 320 | LONG_INSN_RISBLG, |
321 | }; | 321 | }; |
322 | 322 | ||
323 | static char *long_insn_name[] = { | 323 | static char *long_insn_name[] = { |
324 | [LONG_INSN_ALGHSIK] = "alghsik", | 324 | [LONG_INSN_ALGHSIK] = "alghsik", |
325 | [LONG_INSN_ALHSIK] = "alhsik", | 325 | [LONG_INSN_ALHSIK] = "alhsik", |
326 | [LONG_INSN_CLFHSI] = "clfhsi", | 326 | [LONG_INSN_CLFHSI] = "clfhsi", |
327 | [LONG_INSN_CLGFRL] = "clgfrl", | 327 | [LONG_INSN_CLGFRL] = "clgfrl", |
328 | [LONG_INSN_CLGHRL] = "clghrl", | 328 | [LONG_INSN_CLGHRL] = "clghrl", |
329 | [LONG_INSN_CLGHSI] = "clghsi", | 329 | [LONG_INSN_CLGHSI] = "clghsi", |
330 | [LONG_INSN_CLHHSI] = "clhhsi", | 330 | [LONG_INSN_CLHHSI] = "clhhsi", |
331 | [LONG_INSN_LLGFRL] = "llgfrl", | 331 | [LONG_INSN_LLGFRL] = "llgfrl", |
332 | [LONG_INSN_LLGHRL] = "llghrl", | 332 | [LONG_INSN_LLGHRL] = "llghrl", |
333 | [LONG_INSN_POPCNT] = "popcnt", | 333 | [LONG_INSN_POPCNT] = "popcnt", |
334 | [LONG_INSN_RISBHG] = "risbhg", | 334 | [LONG_INSN_RISBHG] = "risbhg", |
335 | [LONG_INSN_RISBLG] = "risblk", | 335 | [LONG_INSN_RISBLG] = "risblk", |
336 | }; | 336 | }; |
337 | 337 | ||
338 | static struct insn opcode[] = { | 338 | static struct insn opcode[] = { |
339 | #ifdef CONFIG_64BIT | 339 | #ifdef CONFIG_64BIT |
340 | { "lmd", 0xef, INSTR_SS_RRRDRD3 }, | 340 | { "lmd", 0xef, INSTR_SS_RRRDRD3 }, |
341 | #endif | 341 | #endif |
342 | { "spm", 0x04, INSTR_RR_R0 }, | 342 | { "spm", 0x04, INSTR_RR_R0 }, |
343 | { "balr", 0x05, INSTR_RR_RR }, | 343 | { "balr", 0x05, INSTR_RR_RR }, |
344 | { "bctr", 0x06, INSTR_RR_RR }, | 344 | { "bctr", 0x06, INSTR_RR_RR }, |
345 | { "bcr", 0x07, INSTR_RR_UR }, | 345 | { "bcr", 0x07, INSTR_RR_UR }, |
346 | { "svc", 0x0a, INSTR_RR_U0 }, | 346 | { "svc", 0x0a, INSTR_RR_U0 }, |
347 | { "bsm", 0x0b, INSTR_RR_RR }, | 347 | { "bsm", 0x0b, INSTR_RR_RR }, |
348 | { "bassm", 0x0c, INSTR_RR_RR }, | 348 | { "bassm", 0x0c, INSTR_RR_RR }, |
349 | { "basr", 0x0d, INSTR_RR_RR }, | 349 | { "basr", 0x0d, INSTR_RR_RR }, |
350 | { "mvcl", 0x0e, INSTR_RR_RR }, | 350 | { "mvcl", 0x0e, INSTR_RR_RR }, |
351 | { "clcl", 0x0f, INSTR_RR_RR }, | 351 | { "clcl", 0x0f, INSTR_RR_RR }, |
352 | { "lpr", 0x10, INSTR_RR_RR }, | 352 | { "lpr", 0x10, INSTR_RR_RR }, |
353 | { "lnr", 0x11, INSTR_RR_RR }, | 353 | { "lnr", 0x11, INSTR_RR_RR }, |
354 | { "ltr", 0x12, INSTR_RR_RR }, | 354 | { "ltr", 0x12, INSTR_RR_RR }, |
355 | { "lcr", 0x13, INSTR_RR_RR }, | 355 | { "lcr", 0x13, INSTR_RR_RR }, |
356 | { "nr", 0x14, INSTR_RR_RR }, | 356 | { "nr", 0x14, INSTR_RR_RR }, |
357 | { "clr", 0x15, INSTR_RR_RR }, | 357 | { "clr", 0x15, INSTR_RR_RR }, |
358 | { "or", 0x16, INSTR_RR_RR }, | 358 | { "or", 0x16, INSTR_RR_RR }, |
359 | { "xr", 0x17, INSTR_RR_RR }, | 359 | { "xr", 0x17, INSTR_RR_RR }, |
360 | { "lr", 0x18, INSTR_RR_RR }, | 360 | { "lr", 0x18, INSTR_RR_RR }, |
361 | { "cr", 0x19, INSTR_RR_RR }, | 361 | { "cr", 0x19, INSTR_RR_RR }, |
362 | { "ar", 0x1a, INSTR_RR_RR }, | 362 | { "ar", 0x1a, INSTR_RR_RR }, |
363 | { "sr", 0x1b, INSTR_RR_RR }, | 363 | { "sr", 0x1b, INSTR_RR_RR }, |
364 | { "mr", 0x1c, INSTR_RR_RR }, | 364 | { "mr", 0x1c, INSTR_RR_RR }, |
365 | { "dr", 0x1d, INSTR_RR_RR }, | 365 | { "dr", 0x1d, INSTR_RR_RR }, |
366 | { "alr", 0x1e, INSTR_RR_RR }, | 366 | { "alr", 0x1e, INSTR_RR_RR }, |
367 | { "slr", 0x1f, INSTR_RR_RR }, | 367 | { "slr", 0x1f, INSTR_RR_RR }, |
368 | { "lpdr", 0x20, INSTR_RR_FF }, | 368 | { "lpdr", 0x20, INSTR_RR_FF }, |
369 | { "lndr", 0x21, INSTR_RR_FF }, | 369 | { "lndr", 0x21, INSTR_RR_FF }, |
370 | { "ltdr", 0x22, INSTR_RR_FF }, | 370 | { "ltdr", 0x22, INSTR_RR_FF }, |
371 | { "lcdr", 0x23, INSTR_RR_FF }, | 371 | { "lcdr", 0x23, INSTR_RR_FF }, |
372 | { "hdr", 0x24, INSTR_RR_FF }, | 372 | { "hdr", 0x24, INSTR_RR_FF }, |
373 | { "ldxr", 0x25, INSTR_RR_FF }, | 373 | { "ldxr", 0x25, INSTR_RR_FF }, |
374 | { "lrdr", 0x25, INSTR_RR_FF }, | 374 | { "lrdr", 0x25, INSTR_RR_FF }, |
375 | { "mxr", 0x26, INSTR_RR_FF }, | 375 | { "mxr", 0x26, INSTR_RR_FF }, |
376 | { "mxdr", 0x27, INSTR_RR_FF }, | 376 | { "mxdr", 0x27, INSTR_RR_FF }, |
377 | { "ldr", 0x28, INSTR_RR_FF }, | 377 | { "ldr", 0x28, INSTR_RR_FF }, |
378 | { "cdr", 0x29, INSTR_RR_FF }, | 378 | { "cdr", 0x29, INSTR_RR_FF }, |
379 | { "adr", 0x2a, INSTR_RR_FF }, | 379 | { "adr", 0x2a, INSTR_RR_FF }, |
380 | { "sdr", 0x2b, INSTR_RR_FF }, | 380 | { "sdr", 0x2b, INSTR_RR_FF }, |
381 | { "mdr", 0x2c, INSTR_RR_FF }, | 381 | { "mdr", 0x2c, INSTR_RR_FF }, |
382 | { "ddr", 0x2d, INSTR_RR_FF }, | 382 | { "ddr", 0x2d, INSTR_RR_FF }, |
383 | { "awr", 0x2e, INSTR_RR_FF }, | 383 | { "awr", 0x2e, INSTR_RR_FF }, |
384 | { "swr", 0x2f, INSTR_RR_FF }, | 384 | { "swr", 0x2f, INSTR_RR_FF }, |
385 | { "lper", 0x30, INSTR_RR_FF }, | 385 | { "lper", 0x30, INSTR_RR_FF }, |
386 | { "lner", 0x31, INSTR_RR_FF }, | 386 | { "lner", 0x31, INSTR_RR_FF }, |
387 | { "lter", 0x32, INSTR_RR_FF }, | 387 | { "lter", 0x32, INSTR_RR_FF }, |
388 | { "lcer", 0x33, INSTR_RR_FF }, | 388 | { "lcer", 0x33, INSTR_RR_FF }, |
389 | { "her", 0x34, INSTR_RR_FF }, | 389 | { "her", 0x34, INSTR_RR_FF }, |
390 | { "ledr", 0x35, INSTR_RR_FF }, | 390 | { "ledr", 0x35, INSTR_RR_FF }, |
391 | { "lrer", 0x35, INSTR_RR_FF }, | 391 | { "lrer", 0x35, INSTR_RR_FF }, |
392 | { "axr", 0x36, INSTR_RR_FF }, | 392 | { "axr", 0x36, INSTR_RR_FF }, |
393 | { "sxr", 0x37, INSTR_RR_FF }, | 393 | { "sxr", 0x37, INSTR_RR_FF }, |
394 | { "ler", 0x38, INSTR_RR_FF }, | 394 | { "ler", 0x38, INSTR_RR_FF }, |
395 | { "cer", 0x39, INSTR_RR_FF }, | 395 | { "cer", 0x39, INSTR_RR_FF }, |
396 | { "aer", 0x3a, INSTR_RR_FF }, | 396 | { "aer", 0x3a, INSTR_RR_FF }, |
397 | { "ser", 0x3b, INSTR_RR_FF }, | 397 | { "ser", 0x3b, INSTR_RR_FF }, |
398 | { "mder", 0x3c, INSTR_RR_FF }, | 398 | { "mder", 0x3c, INSTR_RR_FF }, |
399 | { "mer", 0x3c, INSTR_RR_FF }, | 399 | { "mer", 0x3c, INSTR_RR_FF }, |
400 | { "der", 0x3d, INSTR_RR_FF }, | 400 | { "der", 0x3d, INSTR_RR_FF }, |
401 | { "aur", 0x3e, INSTR_RR_FF }, | 401 | { "aur", 0x3e, INSTR_RR_FF }, |
402 | { "sur", 0x3f, INSTR_RR_FF }, | 402 | { "sur", 0x3f, INSTR_RR_FF }, |
403 | { "sth", 0x40, INSTR_RX_RRRD }, | 403 | { "sth", 0x40, INSTR_RX_RRRD }, |
404 | { "la", 0x41, INSTR_RX_RRRD }, | 404 | { "la", 0x41, INSTR_RX_RRRD }, |
405 | { "stc", 0x42, INSTR_RX_RRRD }, | 405 | { "stc", 0x42, INSTR_RX_RRRD }, |
406 | { "ic", 0x43, INSTR_RX_RRRD }, | 406 | { "ic", 0x43, INSTR_RX_RRRD }, |
407 | { "ex", 0x44, INSTR_RX_RRRD }, | 407 | { "ex", 0x44, INSTR_RX_RRRD }, |
408 | { "bal", 0x45, INSTR_RX_RRRD }, | 408 | { "bal", 0x45, INSTR_RX_RRRD }, |
409 | { "bct", 0x46, INSTR_RX_RRRD }, | 409 | { "bct", 0x46, INSTR_RX_RRRD }, |
410 | { "bc", 0x47, INSTR_RX_URRD }, | 410 | { "bc", 0x47, INSTR_RX_URRD }, |
411 | { "lh", 0x48, INSTR_RX_RRRD }, | 411 | { "lh", 0x48, INSTR_RX_RRRD }, |
412 | { "ch", 0x49, INSTR_RX_RRRD }, | 412 | { "ch", 0x49, INSTR_RX_RRRD }, |
413 | { "ah", 0x4a, INSTR_RX_RRRD }, | 413 | { "ah", 0x4a, INSTR_RX_RRRD }, |
414 | { "sh", 0x4b, INSTR_RX_RRRD }, | 414 | { "sh", 0x4b, INSTR_RX_RRRD }, |
415 | { "mh", 0x4c, INSTR_RX_RRRD }, | 415 | { "mh", 0x4c, INSTR_RX_RRRD }, |
416 | { "bas", 0x4d, INSTR_RX_RRRD }, | 416 | { "bas", 0x4d, INSTR_RX_RRRD }, |
417 | { "cvd", 0x4e, INSTR_RX_RRRD }, | 417 | { "cvd", 0x4e, INSTR_RX_RRRD }, |
418 | { "cvb", 0x4f, INSTR_RX_RRRD }, | 418 | { "cvb", 0x4f, INSTR_RX_RRRD }, |
419 | { "st", 0x50, INSTR_RX_RRRD }, | 419 | { "st", 0x50, INSTR_RX_RRRD }, |
420 | { "lae", 0x51, INSTR_RX_RRRD }, | 420 | { "lae", 0x51, INSTR_RX_RRRD }, |
421 | { "n", 0x54, INSTR_RX_RRRD }, | 421 | { "n", 0x54, INSTR_RX_RRRD }, |
422 | { "cl", 0x55, INSTR_RX_RRRD }, | 422 | { "cl", 0x55, INSTR_RX_RRRD }, |
423 | { "o", 0x56, INSTR_RX_RRRD }, | 423 | { "o", 0x56, INSTR_RX_RRRD }, |
424 | { "x", 0x57, INSTR_RX_RRRD }, | 424 | { "x", 0x57, INSTR_RX_RRRD }, |
425 | { "l", 0x58, INSTR_RX_RRRD }, | 425 | { "l", 0x58, INSTR_RX_RRRD }, |
426 | { "c", 0x59, INSTR_RX_RRRD }, | 426 | { "c", 0x59, INSTR_RX_RRRD }, |
427 | { "a", 0x5a, INSTR_RX_RRRD }, | 427 | { "a", 0x5a, INSTR_RX_RRRD }, |
428 | { "s", 0x5b, INSTR_RX_RRRD }, | 428 | { "s", 0x5b, INSTR_RX_RRRD }, |
429 | { "m", 0x5c, INSTR_RX_RRRD }, | 429 | { "m", 0x5c, INSTR_RX_RRRD }, |
430 | { "d", 0x5d, INSTR_RX_RRRD }, | 430 | { "d", 0x5d, INSTR_RX_RRRD }, |
431 | { "al", 0x5e, INSTR_RX_RRRD }, | 431 | { "al", 0x5e, INSTR_RX_RRRD }, |
432 | { "sl", 0x5f, INSTR_RX_RRRD }, | 432 | { "sl", 0x5f, INSTR_RX_RRRD }, |
433 | { "std", 0x60, INSTR_RX_FRRD }, | 433 | { "std", 0x60, INSTR_RX_FRRD }, |
434 | { "mxd", 0x67, INSTR_RX_FRRD }, | 434 | { "mxd", 0x67, INSTR_RX_FRRD }, |
435 | { "ld", 0x68, INSTR_RX_FRRD }, | 435 | { "ld", 0x68, INSTR_RX_FRRD }, |
436 | { "cd", 0x69, INSTR_RX_FRRD }, | 436 | { "cd", 0x69, INSTR_RX_FRRD }, |
437 | { "ad", 0x6a, INSTR_RX_FRRD }, | 437 | { "ad", 0x6a, INSTR_RX_FRRD }, |
438 | { "sd", 0x6b, INSTR_RX_FRRD }, | 438 | { "sd", 0x6b, INSTR_RX_FRRD }, |
439 | { "md", 0x6c, INSTR_RX_FRRD }, | 439 | { "md", 0x6c, INSTR_RX_FRRD }, |
440 | { "dd", 0x6d, INSTR_RX_FRRD }, | 440 | { "dd", 0x6d, INSTR_RX_FRRD }, |
441 | { "aw", 0x6e, INSTR_RX_FRRD }, | 441 | { "aw", 0x6e, INSTR_RX_FRRD }, |
442 | { "sw", 0x6f, INSTR_RX_FRRD }, | 442 | { "sw", 0x6f, INSTR_RX_FRRD }, |
443 | { "ste", 0x70, INSTR_RX_FRRD }, | 443 | { "ste", 0x70, INSTR_RX_FRRD }, |
444 | { "ms", 0x71, INSTR_RX_RRRD }, | 444 | { "ms", 0x71, INSTR_RX_RRRD }, |
445 | { "le", 0x78, INSTR_RX_FRRD }, | 445 | { "le", 0x78, INSTR_RX_FRRD }, |
446 | { "ce", 0x79, INSTR_RX_FRRD }, | 446 | { "ce", 0x79, INSTR_RX_FRRD }, |
447 | { "ae", 0x7a, INSTR_RX_FRRD }, | 447 | { "ae", 0x7a, INSTR_RX_FRRD }, |
448 | { "se", 0x7b, INSTR_RX_FRRD }, | 448 | { "se", 0x7b, INSTR_RX_FRRD }, |
449 | { "mde", 0x7c, INSTR_RX_FRRD }, | 449 | { "mde", 0x7c, INSTR_RX_FRRD }, |
450 | { "me", 0x7c, INSTR_RX_FRRD }, | 450 | { "me", 0x7c, INSTR_RX_FRRD }, |
451 | { "de", 0x7d, INSTR_RX_FRRD }, | 451 | { "de", 0x7d, INSTR_RX_FRRD }, |
452 | { "au", 0x7e, INSTR_RX_FRRD }, | 452 | { "au", 0x7e, INSTR_RX_FRRD }, |
453 | { "su", 0x7f, INSTR_RX_FRRD }, | 453 | { "su", 0x7f, INSTR_RX_FRRD }, |
454 | { "ssm", 0x80, INSTR_S_RD }, | 454 | { "ssm", 0x80, INSTR_S_RD }, |
455 | { "lpsw", 0x82, INSTR_S_RD }, | 455 | { "lpsw", 0x82, INSTR_S_RD }, |
456 | { "diag", 0x83, INSTR_RS_RRRD }, | 456 | { "diag", 0x83, INSTR_RS_RRRD }, |
457 | { "brxh", 0x84, INSTR_RSI_RRP }, | 457 | { "brxh", 0x84, INSTR_RSI_RRP }, |
458 | { "brxle", 0x85, INSTR_RSI_RRP }, | 458 | { "brxle", 0x85, INSTR_RSI_RRP }, |
459 | { "bxh", 0x86, INSTR_RS_RRRD }, | 459 | { "bxh", 0x86, INSTR_RS_RRRD }, |
460 | { "bxle", 0x87, INSTR_RS_RRRD }, | 460 | { "bxle", 0x87, INSTR_RS_RRRD }, |
461 | { "srl", 0x88, INSTR_RS_R0RD }, | 461 | { "srl", 0x88, INSTR_RS_R0RD }, |
462 | { "sll", 0x89, INSTR_RS_R0RD }, | 462 | { "sll", 0x89, INSTR_RS_R0RD }, |
463 | { "sra", 0x8a, INSTR_RS_R0RD }, | 463 | { "sra", 0x8a, INSTR_RS_R0RD }, |
464 | { "sla", 0x8b, INSTR_RS_R0RD }, | 464 | { "sla", 0x8b, INSTR_RS_R0RD }, |
465 | { "srdl", 0x8c, INSTR_RS_R0RD }, | 465 | { "srdl", 0x8c, INSTR_RS_R0RD }, |
466 | { "sldl", 0x8d, INSTR_RS_R0RD }, | 466 | { "sldl", 0x8d, INSTR_RS_R0RD }, |
467 | { "srda", 0x8e, INSTR_RS_R0RD }, | 467 | { "srda", 0x8e, INSTR_RS_R0RD }, |
468 | { "slda", 0x8f, INSTR_RS_R0RD }, | 468 | { "slda", 0x8f, INSTR_RS_R0RD }, |
469 | { "stm", 0x90, INSTR_RS_RRRD }, | 469 | { "stm", 0x90, INSTR_RS_RRRD }, |
470 | { "tm", 0x91, INSTR_SI_URD }, | 470 | { "tm", 0x91, INSTR_SI_URD }, |
471 | { "mvi", 0x92, INSTR_SI_URD }, | 471 | { "mvi", 0x92, INSTR_SI_URD }, |
472 | { "ts", 0x93, INSTR_S_RD }, | 472 | { "ts", 0x93, INSTR_S_RD }, |
473 | { "ni", 0x94, INSTR_SI_URD }, | 473 | { "ni", 0x94, INSTR_SI_URD }, |
474 | { "cli", 0x95, INSTR_SI_URD }, | 474 | { "cli", 0x95, INSTR_SI_URD }, |
475 | { "oi", 0x96, INSTR_SI_URD }, | 475 | { "oi", 0x96, INSTR_SI_URD }, |
476 | { "xi", 0x97, INSTR_SI_URD }, | 476 | { "xi", 0x97, INSTR_SI_URD }, |
477 | { "lm", 0x98, INSTR_RS_RRRD }, | 477 | { "lm", 0x98, INSTR_RS_RRRD }, |
478 | { "trace", 0x99, INSTR_RS_RRRD }, | 478 | { "trace", 0x99, INSTR_RS_RRRD }, |
479 | { "lam", 0x9a, INSTR_RS_AARD }, | 479 | { "lam", 0x9a, INSTR_RS_AARD }, |
480 | { "stam", 0x9b, INSTR_RS_AARD }, | 480 | { "stam", 0x9b, INSTR_RS_AARD }, |
481 | { "mvcle", 0xa8, INSTR_RS_RRRD }, | 481 | { "mvcle", 0xa8, INSTR_RS_RRRD }, |
482 | { "clcle", 0xa9, INSTR_RS_RRRD }, | 482 | { "clcle", 0xa9, INSTR_RS_RRRD }, |
483 | { "stnsm", 0xac, INSTR_SI_URD }, | 483 | { "stnsm", 0xac, INSTR_SI_URD }, |
484 | { "stosm", 0xad, INSTR_SI_URD }, | 484 | { "stosm", 0xad, INSTR_SI_URD }, |
485 | { "sigp", 0xae, INSTR_RS_RRRD }, | 485 | { "sigp", 0xae, INSTR_RS_RRRD }, |
486 | { "mc", 0xaf, INSTR_SI_URD }, | 486 | { "mc", 0xaf, INSTR_SI_URD }, |
487 | { "lra", 0xb1, INSTR_RX_RRRD }, | 487 | { "lra", 0xb1, INSTR_RX_RRRD }, |
488 | { "stctl", 0xb6, INSTR_RS_CCRD }, | 488 | { "stctl", 0xb6, INSTR_RS_CCRD }, |
489 | { "lctl", 0xb7, INSTR_RS_CCRD }, | 489 | { "lctl", 0xb7, INSTR_RS_CCRD }, |
490 | { "cs", 0xba, INSTR_RS_RRRD }, | 490 | { "cs", 0xba, INSTR_RS_RRRD }, |
491 | { "cds", 0xbb, INSTR_RS_RRRD }, | 491 | { "cds", 0xbb, INSTR_RS_RRRD }, |
492 | { "clm", 0xbd, INSTR_RS_RURD }, | 492 | { "clm", 0xbd, INSTR_RS_RURD }, |
493 | { "stcm", 0xbe, INSTR_RS_RURD }, | 493 | { "stcm", 0xbe, INSTR_RS_RURD }, |
494 | { "icm", 0xbf, INSTR_RS_RURD }, | 494 | { "icm", 0xbf, INSTR_RS_RURD }, |
495 | { "mvn", 0xd1, INSTR_SS_L0RDRD }, | 495 | { "mvn", 0xd1, INSTR_SS_L0RDRD }, |
496 | { "mvc", 0xd2, INSTR_SS_L0RDRD }, | 496 | { "mvc", 0xd2, INSTR_SS_L0RDRD }, |
497 | { "mvz", 0xd3, INSTR_SS_L0RDRD }, | 497 | { "mvz", 0xd3, INSTR_SS_L0RDRD }, |
498 | { "nc", 0xd4, INSTR_SS_L0RDRD }, | 498 | { "nc", 0xd4, INSTR_SS_L0RDRD }, |
499 | { "clc", 0xd5, INSTR_SS_L0RDRD }, | 499 | { "clc", 0xd5, INSTR_SS_L0RDRD }, |
500 | { "oc", 0xd6, INSTR_SS_L0RDRD }, | 500 | { "oc", 0xd6, INSTR_SS_L0RDRD }, |
501 | { "xc", 0xd7, INSTR_SS_L0RDRD }, | 501 | { "xc", 0xd7, INSTR_SS_L0RDRD }, |
502 | { "mvck", 0xd9, INSTR_SS_RRRDRD }, | 502 | { "mvck", 0xd9, INSTR_SS_RRRDRD }, |
503 | { "mvcp", 0xda, INSTR_SS_RRRDRD }, | 503 | { "mvcp", 0xda, INSTR_SS_RRRDRD }, |
504 | { "mvcs", 0xdb, INSTR_SS_RRRDRD }, | 504 | { "mvcs", 0xdb, INSTR_SS_RRRDRD }, |
505 | { "tr", 0xdc, INSTR_SS_L0RDRD }, | 505 | { "tr", 0xdc, INSTR_SS_L0RDRD }, |
506 | { "trt", 0xdd, INSTR_SS_L0RDRD }, | 506 | { "trt", 0xdd, INSTR_SS_L0RDRD }, |
507 | { "ed", 0xde, INSTR_SS_L0RDRD }, | 507 | { "ed", 0xde, INSTR_SS_L0RDRD }, |
508 | { "edmk", 0xdf, INSTR_SS_L0RDRD }, | 508 | { "edmk", 0xdf, INSTR_SS_L0RDRD }, |
509 | { "pku", 0xe1, INSTR_SS_L0RDRD }, | 509 | { "pku", 0xe1, INSTR_SS_L0RDRD }, |
510 | { "unpku", 0xe2, INSTR_SS_L0RDRD }, | 510 | { "unpku", 0xe2, INSTR_SS_L0RDRD }, |
511 | { "mvcin", 0xe8, INSTR_SS_L0RDRD }, | 511 | { "mvcin", 0xe8, INSTR_SS_L0RDRD }, |
512 | { "pka", 0xe9, INSTR_SS_L0RDRD }, | 512 | { "pka", 0xe9, INSTR_SS_L0RDRD }, |
513 | { "unpka", 0xea, INSTR_SS_L0RDRD }, | 513 | { "unpka", 0xea, INSTR_SS_L0RDRD }, |
514 | { "plo", 0xee, INSTR_SS_RRRDRD2 }, | 514 | { "plo", 0xee, INSTR_SS_RRRDRD2 }, |
515 | { "srp", 0xf0, INSTR_SS_LIRDRD }, | 515 | { "srp", 0xf0, INSTR_SS_LIRDRD }, |
516 | { "mvo", 0xf1, INSTR_SS_LLRDRD }, | 516 | { "mvo", 0xf1, INSTR_SS_LLRDRD }, |
517 | { "pack", 0xf2, INSTR_SS_LLRDRD }, | 517 | { "pack", 0xf2, INSTR_SS_LLRDRD }, |
518 | { "unpk", 0xf3, INSTR_SS_LLRDRD }, | 518 | { "unpk", 0xf3, INSTR_SS_LLRDRD }, |
519 | { "zap", 0xf8, INSTR_SS_LLRDRD }, | 519 | { "zap", 0xf8, INSTR_SS_LLRDRD }, |
520 | { "cp", 0xf9, INSTR_SS_LLRDRD }, | 520 | { "cp", 0xf9, INSTR_SS_LLRDRD }, |
521 | { "ap", 0xfa, INSTR_SS_LLRDRD }, | 521 | { "ap", 0xfa, INSTR_SS_LLRDRD }, |
522 | { "sp", 0xfb, INSTR_SS_LLRDRD }, | 522 | { "sp", 0xfb, INSTR_SS_LLRDRD }, |
523 | { "mp", 0xfc, INSTR_SS_LLRDRD }, | 523 | { "mp", 0xfc, INSTR_SS_LLRDRD }, |
524 | { "dp", 0xfd, INSTR_SS_LLRDRD }, | 524 | { "dp", 0xfd, INSTR_SS_LLRDRD }, |
525 | { "", 0, INSTR_INVALID } | 525 | { "", 0, INSTR_INVALID } |
526 | }; | 526 | }; |
527 | 527 | ||
528 | static struct insn opcode_01[] = { | 528 | static struct insn opcode_01[] = { |
529 | #ifdef CONFIG_64BIT | 529 | #ifdef CONFIG_64BIT |
530 | { "sam64", 0x0e, INSTR_E }, | 530 | { "sam64", 0x0e, INSTR_E }, |
531 | { "pfpo", 0x0a, INSTR_E }, | 531 | { "pfpo", 0x0a, INSTR_E }, |
532 | { "ptff", 0x04, INSTR_E }, | 532 | { "ptff", 0x04, INSTR_E }, |
533 | #endif | 533 | #endif |
534 | { "pr", 0x01, INSTR_E }, | 534 | { "pr", 0x01, INSTR_E }, |
535 | { "upt", 0x02, INSTR_E }, | 535 | { "upt", 0x02, INSTR_E }, |
536 | { "sckpf", 0x07, INSTR_E }, | 536 | { "sckpf", 0x07, INSTR_E }, |
537 | { "tam", 0x0b, INSTR_E }, | 537 | { "tam", 0x0b, INSTR_E }, |
538 | { "sam24", 0x0c, INSTR_E }, | 538 | { "sam24", 0x0c, INSTR_E }, |
539 | { "sam31", 0x0d, INSTR_E }, | 539 | { "sam31", 0x0d, INSTR_E }, |
540 | { "trap2", 0xff, INSTR_E }, | 540 | { "trap2", 0xff, INSTR_E }, |
541 | { "", 0, INSTR_INVALID } | 541 | { "", 0, INSTR_INVALID } |
542 | }; | 542 | }; |
543 | 543 | ||
544 | static struct insn opcode_a5[] = { | 544 | static struct insn opcode_a5[] = { |
545 | #ifdef CONFIG_64BIT | 545 | #ifdef CONFIG_64BIT |
546 | { "iihh", 0x00, INSTR_RI_RU }, | 546 | { "iihh", 0x00, INSTR_RI_RU }, |
547 | { "iihl", 0x01, INSTR_RI_RU }, | 547 | { "iihl", 0x01, INSTR_RI_RU }, |
548 | { "iilh", 0x02, INSTR_RI_RU }, | 548 | { "iilh", 0x02, INSTR_RI_RU }, |
549 | { "iill", 0x03, INSTR_RI_RU }, | 549 | { "iill", 0x03, INSTR_RI_RU }, |
550 | { "nihh", 0x04, INSTR_RI_RU }, | 550 | { "nihh", 0x04, INSTR_RI_RU }, |
551 | { "nihl", 0x05, INSTR_RI_RU }, | 551 | { "nihl", 0x05, INSTR_RI_RU }, |
552 | { "nilh", 0x06, INSTR_RI_RU }, | 552 | { "nilh", 0x06, INSTR_RI_RU }, |
553 | { "nill", 0x07, INSTR_RI_RU }, | 553 | { "nill", 0x07, INSTR_RI_RU }, |
554 | { "oihh", 0x08, INSTR_RI_RU }, | 554 | { "oihh", 0x08, INSTR_RI_RU }, |
555 | { "oihl", 0x09, INSTR_RI_RU }, | 555 | { "oihl", 0x09, INSTR_RI_RU }, |
556 | { "oilh", 0x0a, INSTR_RI_RU }, | 556 | { "oilh", 0x0a, INSTR_RI_RU }, |
557 | { "oill", 0x0b, INSTR_RI_RU }, | 557 | { "oill", 0x0b, INSTR_RI_RU }, |
558 | { "llihh", 0x0c, INSTR_RI_RU }, | 558 | { "llihh", 0x0c, INSTR_RI_RU }, |
559 | { "llihl", 0x0d, INSTR_RI_RU }, | 559 | { "llihl", 0x0d, INSTR_RI_RU }, |
560 | { "llilh", 0x0e, INSTR_RI_RU }, | 560 | { "llilh", 0x0e, INSTR_RI_RU }, |
561 | { "llill", 0x0f, INSTR_RI_RU }, | 561 | { "llill", 0x0f, INSTR_RI_RU }, |
562 | #endif | 562 | #endif |
563 | { "", 0, INSTR_INVALID } | 563 | { "", 0, INSTR_INVALID } |
564 | }; | 564 | }; |
565 | 565 | ||
566 | static struct insn opcode_a7[] = { | 566 | static struct insn opcode_a7[] = { |
567 | #ifdef CONFIG_64BIT | 567 | #ifdef CONFIG_64BIT |
568 | { "tmhh", 0x02, INSTR_RI_RU }, | 568 | { "tmhh", 0x02, INSTR_RI_RU }, |
569 | { "tmhl", 0x03, INSTR_RI_RU }, | 569 | { "tmhl", 0x03, INSTR_RI_RU }, |
570 | { "brctg", 0x07, INSTR_RI_RP }, | 570 | { "brctg", 0x07, INSTR_RI_RP }, |
571 | { "lghi", 0x09, INSTR_RI_RI }, | 571 | { "lghi", 0x09, INSTR_RI_RI }, |
572 | { "aghi", 0x0b, INSTR_RI_RI }, | 572 | { "aghi", 0x0b, INSTR_RI_RI }, |
573 | { "mghi", 0x0d, INSTR_RI_RI }, | 573 | { "mghi", 0x0d, INSTR_RI_RI }, |
574 | { "cghi", 0x0f, INSTR_RI_RI }, | 574 | { "cghi", 0x0f, INSTR_RI_RI }, |
575 | #endif | 575 | #endif |
576 | { "tmlh", 0x00, INSTR_RI_RU }, | 576 | { "tmlh", 0x00, INSTR_RI_RU }, |
577 | { "tmll", 0x01, INSTR_RI_RU }, | 577 | { "tmll", 0x01, INSTR_RI_RU }, |
578 | { "brc", 0x04, INSTR_RI_UP }, | 578 | { "brc", 0x04, INSTR_RI_UP }, |
579 | { "bras", 0x05, INSTR_RI_RP }, | 579 | { "bras", 0x05, INSTR_RI_RP }, |
580 | { "brct", 0x06, INSTR_RI_RP }, | 580 | { "brct", 0x06, INSTR_RI_RP }, |
581 | { "lhi", 0x08, INSTR_RI_RI }, | 581 | { "lhi", 0x08, INSTR_RI_RI }, |
582 | { "ahi", 0x0a, INSTR_RI_RI }, | 582 | { "ahi", 0x0a, INSTR_RI_RI }, |
583 | { "mhi", 0x0c, INSTR_RI_RI }, | 583 | { "mhi", 0x0c, INSTR_RI_RI }, |
584 | { "chi", 0x0e, INSTR_RI_RI }, | 584 | { "chi", 0x0e, INSTR_RI_RI }, |
585 | { "", 0, INSTR_INVALID } | 585 | { "", 0, INSTR_INVALID } |
586 | }; | 586 | }; |
587 | 587 | ||
588 | static struct insn opcode_b2[] = { | 588 | static struct insn opcode_b2[] = { |
589 | #ifdef CONFIG_64BIT | 589 | #ifdef CONFIG_64BIT |
590 | { "sske", 0x2b, INSTR_RRF_M0RR }, | 590 | { "sske", 0x2b, INSTR_RRF_M0RR }, |
591 | { "stckf", 0x7c, INSTR_S_RD }, | 591 | { "stckf", 0x7c, INSTR_S_RD }, |
592 | { "cu21", 0xa6, INSTR_RRF_M0RR }, | 592 | { "cu21", 0xa6, INSTR_RRF_M0RR }, |
593 | { "cuutf", 0xa6, INSTR_RRF_M0RR }, | 593 | { "cuutf", 0xa6, INSTR_RRF_M0RR }, |
594 | { "cu12", 0xa7, INSTR_RRF_M0RR }, | 594 | { "cu12", 0xa7, INSTR_RRF_M0RR }, |
595 | { "cutfu", 0xa7, INSTR_RRF_M0RR }, | 595 | { "cutfu", 0xa7, INSTR_RRF_M0RR }, |
596 | { "stfle", 0xb0, INSTR_S_RD }, | 596 | { "stfle", 0xb0, INSTR_S_RD }, |
597 | { "lpswe", 0xb2, INSTR_S_RD }, | 597 | { "lpswe", 0xb2, INSTR_S_RD }, |
598 | { "srnmt", 0xb9, INSTR_S_RD }, | 598 | { "srnmt", 0xb9, INSTR_S_RD }, |
599 | { "lfas", 0xbd, INSTR_S_RD }, | 599 | { "lfas", 0xbd, INSTR_S_RD }, |
600 | #endif | 600 | #endif |
601 | { "stidp", 0x02, INSTR_S_RD }, | 601 | { "stidp", 0x02, INSTR_S_RD }, |
602 | { "sck", 0x04, INSTR_S_RD }, | 602 | { "sck", 0x04, INSTR_S_RD }, |
603 | { "stck", 0x05, INSTR_S_RD }, | 603 | { "stck", 0x05, INSTR_S_RD }, |
604 | { "sckc", 0x06, INSTR_S_RD }, | 604 | { "sckc", 0x06, INSTR_S_RD }, |
605 | { "stckc", 0x07, INSTR_S_RD }, | 605 | { "stckc", 0x07, INSTR_S_RD }, |
606 | { "spt", 0x08, INSTR_S_RD }, | 606 | { "spt", 0x08, INSTR_S_RD }, |
607 | { "stpt", 0x09, INSTR_S_RD }, | 607 | { "stpt", 0x09, INSTR_S_RD }, |
608 | { "spka", 0x0a, INSTR_S_RD }, | 608 | { "spka", 0x0a, INSTR_S_RD }, |
609 | { "ipk", 0x0b, INSTR_S_00 }, | 609 | { "ipk", 0x0b, INSTR_S_00 }, |
610 | { "ptlb", 0x0d, INSTR_S_00 }, | 610 | { "ptlb", 0x0d, INSTR_S_00 }, |
611 | { "spx", 0x10, INSTR_S_RD }, | 611 | { "spx", 0x10, INSTR_S_RD }, |
612 | { "stpx", 0x11, INSTR_S_RD }, | 612 | { "stpx", 0x11, INSTR_S_RD }, |
613 | { "stap", 0x12, INSTR_S_RD }, | 613 | { "stap", 0x12, INSTR_S_RD }, |
614 | { "sie", 0x14, INSTR_S_RD }, | 614 | { "sie", 0x14, INSTR_S_RD }, |
615 | { "pc", 0x18, INSTR_S_RD }, | 615 | { "pc", 0x18, INSTR_S_RD }, |
616 | { "sac", 0x19, INSTR_S_RD }, | 616 | { "sac", 0x19, INSTR_S_RD }, |
617 | { "cfc", 0x1a, INSTR_S_RD }, | 617 | { "cfc", 0x1a, INSTR_S_RD }, |
618 | { "ipte", 0x21, INSTR_RRE_RR }, | 618 | { "ipte", 0x21, INSTR_RRE_RR }, |
619 | { "ipm", 0x22, INSTR_RRE_R0 }, | 619 | { "ipm", 0x22, INSTR_RRE_R0 }, |
620 | { "ivsk", 0x23, INSTR_RRE_RR }, | 620 | { "ivsk", 0x23, INSTR_RRE_RR }, |
621 | { "iac", 0x24, INSTR_RRE_R0 }, | 621 | { "iac", 0x24, INSTR_RRE_R0 }, |
622 | { "ssar", 0x25, INSTR_RRE_R0 }, | 622 | { "ssar", 0x25, INSTR_RRE_R0 }, |
623 | { "epar", 0x26, INSTR_RRE_R0 }, | 623 | { "epar", 0x26, INSTR_RRE_R0 }, |
624 | { "esar", 0x27, INSTR_RRE_R0 }, | 624 | { "esar", 0x27, INSTR_RRE_R0 }, |
625 | { "pt", 0x28, INSTR_RRE_RR }, | 625 | { "pt", 0x28, INSTR_RRE_RR }, |
626 | { "iske", 0x29, INSTR_RRE_RR }, | 626 | { "iske", 0x29, INSTR_RRE_RR }, |
627 | { "rrbe", 0x2a, INSTR_RRE_RR }, | 627 | { "rrbe", 0x2a, INSTR_RRE_RR }, |
628 | { "sske", 0x2b, INSTR_RRE_RR }, | 628 | { "sske", 0x2b, INSTR_RRE_RR }, |
629 | { "tb", 0x2c, INSTR_RRE_0R }, | 629 | { "tb", 0x2c, INSTR_RRE_0R }, |
630 | { "dxr", 0x2d, INSTR_RRE_F0 }, | 630 | { "dxr", 0x2d, INSTR_RRE_F0 }, |
631 | { "pgin", 0x2e, INSTR_RRE_RR }, | 631 | { "pgin", 0x2e, INSTR_RRE_RR }, |
632 | { "pgout", 0x2f, INSTR_RRE_RR }, | 632 | { "pgout", 0x2f, INSTR_RRE_RR }, |
633 | { "csch", 0x30, INSTR_S_00 }, | 633 | { "csch", 0x30, INSTR_S_00 }, |
634 | { "hsch", 0x31, INSTR_S_00 }, | 634 | { "hsch", 0x31, INSTR_S_00 }, |
635 | { "msch", 0x32, INSTR_S_RD }, | 635 | { "msch", 0x32, INSTR_S_RD }, |
636 | { "ssch", 0x33, INSTR_S_RD }, | 636 | { "ssch", 0x33, INSTR_S_RD }, |
637 | { "stsch", 0x34, INSTR_S_RD }, | 637 | { "stsch", 0x34, INSTR_S_RD }, |
638 | { "tsch", 0x35, INSTR_S_RD }, | 638 | { "tsch", 0x35, INSTR_S_RD }, |
639 | { "tpi", 0x36, INSTR_S_RD }, | 639 | { "tpi", 0x36, INSTR_S_RD }, |
640 | { "sal", 0x37, INSTR_S_00 }, | 640 | { "sal", 0x37, INSTR_S_00 }, |
641 | { "rsch", 0x38, INSTR_S_00 }, | 641 | { "rsch", 0x38, INSTR_S_00 }, |
642 | { "stcrw", 0x39, INSTR_S_RD }, | 642 | { "stcrw", 0x39, INSTR_S_RD }, |
643 | { "stcps", 0x3a, INSTR_S_RD }, | 643 | { "stcps", 0x3a, INSTR_S_RD }, |
644 | { "rchp", 0x3b, INSTR_S_00 }, | 644 | { "rchp", 0x3b, INSTR_S_00 }, |
645 | { "schm", 0x3c, INSTR_S_00 }, | 645 | { "schm", 0x3c, INSTR_S_00 }, |
646 | { "bakr", 0x40, INSTR_RRE_RR }, | 646 | { "bakr", 0x40, INSTR_RRE_RR }, |
647 | { "cksm", 0x41, INSTR_RRE_RR }, | 647 | { "cksm", 0x41, INSTR_RRE_RR }, |
648 | { "sqdr", 0x44, INSTR_RRE_F0 }, | 648 | { "sqdr", 0x44, INSTR_RRE_F0 }, |
649 | { "sqer", 0x45, INSTR_RRE_F0 }, | 649 | { "sqer", 0x45, INSTR_RRE_F0 }, |
650 | { "stura", 0x46, INSTR_RRE_RR }, | 650 | { "stura", 0x46, INSTR_RRE_RR }, |
651 | { "msta", 0x47, INSTR_RRE_R0 }, | 651 | { "msta", 0x47, INSTR_RRE_R0 }, |
652 | { "palb", 0x48, INSTR_RRE_00 }, | 652 | { "palb", 0x48, INSTR_RRE_00 }, |
653 | { "ereg", 0x49, INSTR_RRE_RR }, | 653 | { "ereg", 0x49, INSTR_RRE_RR }, |
654 | { "esta", 0x4a, INSTR_RRE_RR }, | 654 | { "esta", 0x4a, INSTR_RRE_RR }, |
655 | { "lura", 0x4b, INSTR_RRE_RR }, | 655 | { "lura", 0x4b, INSTR_RRE_RR }, |
656 | { "tar", 0x4c, INSTR_RRE_AR }, | 656 | { "tar", 0x4c, INSTR_RRE_AR }, |
657 | { "cpya", 0x4d, INSTR_RRE_AA }, | 657 | { "cpya", 0x4d, INSTR_RRE_AA }, |
658 | { "sar", 0x4e, INSTR_RRE_AR }, | 658 | { "sar", 0x4e, INSTR_RRE_AR }, |
659 | { "ear", 0x4f, INSTR_RRE_RA }, | 659 | { "ear", 0x4f, INSTR_RRE_RA }, |
660 | { "csp", 0x50, INSTR_RRE_RR }, | 660 | { "csp", 0x50, INSTR_RRE_RR }, |
661 | { "msr", 0x52, INSTR_RRE_RR }, | 661 | { "msr", 0x52, INSTR_RRE_RR }, |
662 | { "mvpg", 0x54, INSTR_RRE_RR }, | 662 | { "mvpg", 0x54, INSTR_RRE_RR }, |
663 | { "mvst", 0x55, INSTR_RRE_RR }, | 663 | { "mvst", 0x55, INSTR_RRE_RR }, |
664 | { "cuse", 0x57, INSTR_RRE_RR }, | 664 | { "cuse", 0x57, INSTR_RRE_RR }, |
665 | { "bsg", 0x58, INSTR_RRE_RR }, | 665 | { "bsg", 0x58, INSTR_RRE_RR }, |
666 | { "bsa", 0x5a, INSTR_RRE_RR }, | 666 | { "bsa", 0x5a, INSTR_RRE_RR }, |
667 | { "clst", 0x5d, INSTR_RRE_RR }, | 667 | { "clst", 0x5d, INSTR_RRE_RR }, |
668 | { "srst", 0x5e, INSTR_RRE_RR }, | 668 | { "srst", 0x5e, INSTR_RRE_RR }, |
669 | { "cmpsc", 0x63, INSTR_RRE_RR }, | 669 | { "cmpsc", 0x63, INSTR_RRE_RR }, |
670 | { "siga", 0x74, INSTR_S_RD }, | 670 | { "siga", 0x74, INSTR_S_RD }, |
671 | { "xsch", 0x76, INSTR_S_00 }, | 671 | { "xsch", 0x76, INSTR_S_00 }, |
672 | { "rp", 0x77, INSTR_S_RD }, | 672 | { "rp", 0x77, INSTR_S_RD }, |
673 | { "stcke", 0x78, INSTR_S_RD }, | 673 | { "stcke", 0x78, INSTR_S_RD }, |
674 | { "sacf", 0x79, INSTR_S_RD }, | 674 | { "sacf", 0x79, INSTR_S_RD }, |
675 | { "spp", 0x80, INSTR_S_RD }, | 675 | { "spp", 0x80, INSTR_S_RD }, |
676 | { "stsi", 0x7d, INSTR_S_RD }, | 676 | { "stsi", 0x7d, INSTR_S_RD }, |
677 | { "srnm", 0x99, INSTR_S_RD }, | 677 | { "srnm", 0x99, INSTR_S_RD }, |
678 | { "stfpc", 0x9c, INSTR_S_RD }, | 678 | { "stfpc", 0x9c, INSTR_S_RD }, |
679 | { "lfpc", 0x9d, INSTR_S_RD }, | 679 | { "lfpc", 0x9d, INSTR_S_RD }, |
680 | { "tre", 0xa5, INSTR_RRE_RR }, | 680 | { "tre", 0xa5, INSTR_RRE_RR }, |
681 | { "cuutf", 0xa6, INSTR_RRE_RR }, | 681 | { "cuutf", 0xa6, INSTR_RRE_RR }, |
682 | { "cutfu", 0xa7, INSTR_RRE_RR }, | 682 | { "cutfu", 0xa7, INSTR_RRE_RR }, |
683 | { "stfl", 0xb1, INSTR_S_RD }, | 683 | { "stfl", 0xb1, INSTR_S_RD }, |
684 | { "trap4", 0xff, INSTR_S_RD }, | 684 | { "trap4", 0xff, INSTR_S_RD }, |
685 | { "", 0, INSTR_INVALID } | 685 | { "", 0, INSTR_INVALID } |
686 | }; | 686 | }; |
687 | 687 | ||
688 | static struct insn opcode_b3[] = { | 688 | static struct insn opcode_b3[] = { |
689 | #ifdef CONFIG_64BIT | 689 | #ifdef CONFIG_64BIT |
690 | { "maylr", 0x38, INSTR_RRF_F0FF }, | 690 | { "maylr", 0x38, INSTR_RRF_F0FF }, |
691 | { "mylr", 0x39, INSTR_RRF_F0FF }, | 691 | { "mylr", 0x39, INSTR_RRF_F0FF }, |
692 | { "mayr", 0x3a, INSTR_RRF_F0FF }, | 692 | { "mayr", 0x3a, INSTR_RRF_F0FF }, |
693 | { "myr", 0x3b, INSTR_RRF_F0FF }, | 693 | { "myr", 0x3b, INSTR_RRF_F0FF }, |
694 | { "mayhr", 0x3c, INSTR_RRF_F0FF }, | 694 | { "mayhr", 0x3c, INSTR_RRF_F0FF }, |
695 | { "myhr", 0x3d, INSTR_RRF_F0FF }, | 695 | { "myhr", 0x3d, INSTR_RRF_F0FF }, |
696 | { "cegbr", 0xa4, INSTR_RRE_RR }, | 696 | { "cegbr", 0xa4, INSTR_RRE_RR }, |
697 | { "cdgbr", 0xa5, INSTR_RRE_RR }, | 697 | { "cdgbr", 0xa5, INSTR_RRE_RR }, |
698 | { "cxgbr", 0xa6, INSTR_RRE_RR }, | 698 | { "cxgbr", 0xa6, INSTR_RRE_RR }, |
699 | { "cgebr", 0xa8, INSTR_RRF_U0RF }, | 699 | { "cgebr", 0xa8, INSTR_RRF_U0RF }, |
700 | { "cgdbr", 0xa9, INSTR_RRF_U0RF }, | 700 | { "cgdbr", 0xa9, INSTR_RRF_U0RF }, |
701 | { "cgxbr", 0xaa, INSTR_RRF_U0RF }, | 701 | { "cgxbr", 0xaa, INSTR_RRF_U0RF }, |
702 | { "cfer", 0xb8, INSTR_RRF_U0RF }, | 702 | { "cfer", 0xb8, INSTR_RRF_U0RF }, |
703 | { "cfdr", 0xb9, INSTR_RRF_U0RF }, | 703 | { "cfdr", 0xb9, INSTR_RRF_U0RF }, |
704 | { "cfxr", 0xba, INSTR_RRF_U0RF }, | 704 | { "cfxr", 0xba, INSTR_RRF_U0RF }, |
705 | { "cegr", 0xc4, INSTR_RRE_RR }, | 705 | { "cegr", 0xc4, INSTR_RRE_RR }, |
706 | { "cdgr", 0xc5, INSTR_RRE_RR }, | 706 | { "cdgr", 0xc5, INSTR_RRE_RR }, |
707 | { "cxgr", 0xc6, INSTR_RRE_RR }, | 707 | { "cxgr", 0xc6, INSTR_RRE_RR }, |
708 | { "cger", 0xc8, INSTR_RRF_U0RF }, | 708 | { "cger", 0xc8, INSTR_RRF_U0RF }, |
709 | { "cgdr", 0xc9, INSTR_RRF_U0RF }, | 709 | { "cgdr", 0xc9, INSTR_RRF_U0RF }, |
710 | { "cgxr", 0xca, INSTR_RRF_U0RF }, | 710 | { "cgxr", 0xca, INSTR_RRF_U0RF }, |
711 | { "lpdfr", 0x70, INSTR_RRE_FF }, | 711 | { "lpdfr", 0x70, INSTR_RRE_FF }, |
712 | { "lndfr", 0x71, INSTR_RRE_FF }, | 712 | { "lndfr", 0x71, INSTR_RRE_FF }, |
713 | { "cpsdr", 0x72, INSTR_RRF_F0FF2 }, | 713 | { "cpsdr", 0x72, INSTR_RRF_F0FF2 }, |
714 | { "lcdfr", 0x73, INSTR_RRE_FF }, | 714 | { "lcdfr", 0x73, INSTR_RRE_FF }, |
715 | { "ldgr", 0xc1, INSTR_RRE_FR }, | 715 | { "ldgr", 0xc1, INSTR_RRE_FR }, |
716 | { "lgdr", 0xcd, INSTR_RRE_RF }, | 716 | { "lgdr", 0xcd, INSTR_RRE_RF }, |
717 | { "adtr", 0xd2, INSTR_RRR_F0FF }, | 717 | { "adtr", 0xd2, INSTR_RRR_F0FF }, |
718 | { "axtr", 0xda, INSTR_RRR_F0FF }, | 718 | { "axtr", 0xda, INSTR_RRR_F0FF }, |
719 | { "cdtr", 0xe4, INSTR_RRE_FF }, | 719 | { "cdtr", 0xe4, INSTR_RRE_FF }, |
720 | { "cxtr", 0xec, INSTR_RRE_FF }, | 720 | { "cxtr", 0xec, INSTR_RRE_FF }, |
721 | { "kdtr", 0xe0, INSTR_RRE_FF }, | 721 | { "kdtr", 0xe0, INSTR_RRE_FF }, |
722 | { "kxtr", 0xe8, INSTR_RRE_FF }, | 722 | { "kxtr", 0xe8, INSTR_RRE_FF }, |
723 | { "cedtr", 0xf4, INSTR_RRE_FF }, | 723 | { "cedtr", 0xf4, INSTR_RRE_FF }, |
724 | { "cextr", 0xfc, INSTR_RRE_FF }, | 724 | { "cextr", 0xfc, INSTR_RRE_FF }, |
725 | { "cdgtr", 0xf1, INSTR_RRE_FR }, | 725 | { "cdgtr", 0xf1, INSTR_RRE_FR }, |
726 | { "cxgtr", 0xf9, INSTR_RRE_FR }, | 726 | { "cxgtr", 0xf9, INSTR_RRE_FR }, |
727 | { "cdstr", 0xf3, INSTR_RRE_FR }, | 727 | { "cdstr", 0xf3, INSTR_RRE_FR }, |
728 | { "cxstr", 0xfb, INSTR_RRE_FR }, | 728 | { "cxstr", 0xfb, INSTR_RRE_FR }, |
729 | { "cdutr", 0xf2, INSTR_RRE_FR }, | 729 | { "cdutr", 0xf2, INSTR_RRE_FR }, |
730 | { "cxutr", 0xfa, INSTR_RRE_FR }, | 730 | { "cxutr", 0xfa, INSTR_RRE_FR }, |
731 | { "cgdtr", 0xe1, INSTR_RRF_U0RF }, | 731 | { "cgdtr", 0xe1, INSTR_RRF_U0RF }, |
732 | { "cgxtr", 0xe9, INSTR_RRF_U0RF }, | 732 | { "cgxtr", 0xe9, INSTR_RRF_U0RF }, |
733 | { "csdtr", 0xe3, INSTR_RRE_RF }, | 733 | { "csdtr", 0xe3, INSTR_RRE_RF }, |
734 | { "csxtr", 0xeb, INSTR_RRE_RF }, | 734 | { "csxtr", 0xeb, INSTR_RRE_RF }, |
735 | { "cudtr", 0xe2, INSTR_RRE_RF }, | 735 | { "cudtr", 0xe2, INSTR_RRE_RF }, |
736 | { "cuxtr", 0xea, INSTR_RRE_RF }, | 736 | { "cuxtr", 0xea, INSTR_RRE_RF }, |
737 | { "ddtr", 0xd1, INSTR_RRR_F0FF }, | 737 | { "ddtr", 0xd1, INSTR_RRR_F0FF }, |
738 | { "dxtr", 0xd9, INSTR_RRR_F0FF }, | 738 | { "dxtr", 0xd9, INSTR_RRR_F0FF }, |
739 | { "eedtr", 0xe5, INSTR_RRE_RF }, | 739 | { "eedtr", 0xe5, INSTR_RRE_RF }, |
740 | { "eextr", 0xed, INSTR_RRE_RF }, | 740 | { "eextr", 0xed, INSTR_RRE_RF }, |
741 | { "esdtr", 0xe7, INSTR_RRE_RF }, | 741 | { "esdtr", 0xe7, INSTR_RRE_RF }, |
742 | { "esxtr", 0xef, INSTR_RRE_RF }, | 742 | { "esxtr", 0xef, INSTR_RRE_RF }, |
743 | { "iedtr", 0xf6, INSTR_RRF_F0FR }, | 743 | { "iedtr", 0xf6, INSTR_RRF_F0FR }, |
744 | { "iextr", 0xfe, INSTR_RRF_F0FR }, | 744 | { "iextr", 0xfe, INSTR_RRF_F0FR }, |
745 | { "ltdtr", 0xd6, INSTR_RRE_FF }, | 745 | { "ltdtr", 0xd6, INSTR_RRE_FF }, |
746 | { "ltxtr", 0xde, INSTR_RRE_FF }, | 746 | { "ltxtr", 0xde, INSTR_RRE_FF }, |
747 | { "fidtr", 0xd7, INSTR_RRF_UUFF }, | 747 | { "fidtr", 0xd7, INSTR_RRF_UUFF }, |
748 | { "fixtr", 0xdf, INSTR_RRF_UUFF }, | 748 | { "fixtr", 0xdf, INSTR_RRF_UUFF }, |
749 | { "ldetr", 0xd4, INSTR_RRF_0UFF }, | 749 | { "ldetr", 0xd4, INSTR_RRF_0UFF }, |
750 | { "lxdtr", 0xdc, INSTR_RRF_0UFF }, | 750 | { "lxdtr", 0xdc, INSTR_RRF_0UFF }, |
751 | { "ledtr", 0xd5, INSTR_RRF_UUFF }, | 751 | { "ledtr", 0xd5, INSTR_RRF_UUFF }, |
752 | { "ldxtr", 0xdd, INSTR_RRF_UUFF }, | 752 | { "ldxtr", 0xdd, INSTR_RRF_UUFF }, |
753 | { "mdtr", 0xd0, INSTR_RRR_F0FF }, | 753 | { "mdtr", 0xd0, INSTR_RRR_F0FF }, |
754 | { "mxtr", 0xd8, INSTR_RRR_F0FF }, | 754 | { "mxtr", 0xd8, INSTR_RRR_F0FF }, |
755 | { "qadtr", 0xf5, INSTR_RRF_FUFF }, | 755 | { "qadtr", 0xf5, INSTR_RRF_FUFF }, |
756 | { "qaxtr", 0xfd, INSTR_RRF_FUFF }, | 756 | { "qaxtr", 0xfd, INSTR_RRF_FUFF }, |
757 | { "rrdtr", 0xf7, INSTR_RRF_FFRU }, | 757 | { "rrdtr", 0xf7, INSTR_RRF_FFRU }, |
758 | { "rrxtr", 0xff, INSTR_RRF_FFRU }, | 758 | { "rrxtr", 0xff, INSTR_RRF_FFRU }, |
759 | { "sfasr", 0x85, INSTR_RRE_R0 }, | 759 | { "sfasr", 0x85, INSTR_RRE_R0 }, |
760 | { "sdtr", 0xd3, INSTR_RRR_F0FF }, | 760 | { "sdtr", 0xd3, INSTR_RRR_F0FF }, |
761 | { "sxtr", 0xdb, INSTR_RRR_F0FF }, | 761 | { "sxtr", 0xdb, INSTR_RRR_F0FF }, |
762 | #endif | 762 | #endif |
763 | { "lpebr", 0x00, INSTR_RRE_FF }, | 763 | { "lpebr", 0x00, INSTR_RRE_FF }, |
764 | { "lnebr", 0x01, INSTR_RRE_FF }, | 764 | { "lnebr", 0x01, INSTR_RRE_FF }, |
765 | { "ltebr", 0x02, INSTR_RRE_FF }, | 765 | { "ltebr", 0x02, INSTR_RRE_FF }, |
766 | { "lcebr", 0x03, INSTR_RRE_FF }, | 766 | { "lcebr", 0x03, INSTR_RRE_FF }, |
767 | { "ldebr", 0x04, INSTR_RRE_FF }, | 767 | { "ldebr", 0x04, INSTR_RRE_FF }, |
768 | { "lxdbr", 0x05, INSTR_RRE_FF }, | 768 | { "lxdbr", 0x05, INSTR_RRE_FF }, |
769 | { "lxebr", 0x06, INSTR_RRE_FF }, | 769 | { "lxebr", 0x06, INSTR_RRE_FF }, |
770 | { "mxdbr", 0x07, INSTR_RRE_FF }, | 770 | { "mxdbr", 0x07, INSTR_RRE_FF }, |
771 | { "kebr", 0x08, INSTR_RRE_FF }, | 771 | { "kebr", 0x08, INSTR_RRE_FF }, |
772 | { "cebr", 0x09, INSTR_RRE_FF }, | 772 | { "cebr", 0x09, INSTR_RRE_FF }, |
773 | { "aebr", 0x0a, INSTR_RRE_FF }, | 773 | { "aebr", 0x0a, INSTR_RRE_FF }, |
774 | { "sebr", 0x0b, INSTR_RRE_FF }, | 774 | { "sebr", 0x0b, INSTR_RRE_FF }, |
775 | { "mdebr", 0x0c, INSTR_RRE_FF }, | 775 | { "mdebr", 0x0c, INSTR_RRE_FF }, |
776 | { "debr", 0x0d, INSTR_RRE_FF }, | 776 | { "debr", 0x0d, INSTR_RRE_FF }, |
777 | { "maebr", 0x0e, INSTR_RRF_F0FF }, | 777 | { "maebr", 0x0e, INSTR_RRF_F0FF }, |
778 | { "msebr", 0x0f, INSTR_RRF_F0FF }, | 778 | { "msebr", 0x0f, INSTR_RRF_F0FF }, |
779 | { "lpdbr", 0x10, INSTR_RRE_FF }, | 779 | { "lpdbr", 0x10, INSTR_RRE_FF }, |
780 | { "lndbr", 0x11, INSTR_RRE_FF }, | 780 | { "lndbr", 0x11, INSTR_RRE_FF }, |
781 | { "ltdbr", 0x12, INSTR_RRE_FF }, | 781 | { "ltdbr", 0x12, INSTR_RRE_FF }, |
782 | { "lcdbr", 0x13, INSTR_RRE_FF }, | 782 | { "lcdbr", 0x13, INSTR_RRE_FF }, |
783 | { "sqebr", 0x14, INSTR_RRE_FF }, | 783 | { "sqebr", 0x14, INSTR_RRE_FF }, |
784 | { "sqdbr", 0x15, INSTR_RRE_FF }, | 784 | { "sqdbr", 0x15, INSTR_RRE_FF }, |
785 | { "sqxbr", 0x16, INSTR_RRE_FF }, | 785 | { "sqxbr", 0x16, INSTR_RRE_FF }, |
786 | { "meebr", 0x17, INSTR_RRE_FF }, | 786 | { "meebr", 0x17, INSTR_RRE_FF }, |
787 | { "kdbr", 0x18, INSTR_RRE_FF }, | 787 | { "kdbr", 0x18, INSTR_RRE_FF }, |
788 | { "cdbr", 0x19, INSTR_RRE_FF }, | 788 | { "cdbr", 0x19, INSTR_RRE_FF }, |
789 | { "adbr", 0x1a, INSTR_RRE_FF }, | 789 | { "adbr", 0x1a, INSTR_RRE_FF }, |
790 | { "sdbr", 0x1b, INSTR_RRE_FF }, | 790 | { "sdbr", 0x1b, INSTR_RRE_FF }, |
791 | { "mdbr", 0x1c, INSTR_RRE_FF }, | 791 | { "mdbr", 0x1c, INSTR_RRE_FF }, |
792 | { "ddbr", 0x1d, INSTR_RRE_FF }, | 792 | { "ddbr", 0x1d, INSTR_RRE_FF }, |
793 | { "madbr", 0x1e, INSTR_RRF_F0FF }, | 793 | { "madbr", 0x1e, INSTR_RRF_F0FF }, |
794 | { "msdbr", 0x1f, INSTR_RRF_F0FF }, | 794 | { "msdbr", 0x1f, INSTR_RRF_F0FF }, |
795 | { "lder", 0x24, INSTR_RRE_FF }, | 795 | { "lder", 0x24, INSTR_RRE_FF }, |
796 | { "lxdr", 0x25, INSTR_RRE_FF }, | 796 | { "lxdr", 0x25, INSTR_RRE_FF }, |
797 | { "lxer", 0x26, INSTR_RRE_FF }, | 797 | { "lxer", 0x26, INSTR_RRE_FF }, |
798 | { "maer", 0x2e, INSTR_RRF_F0FF }, | 798 | { "maer", 0x2e, INSTR_RRF_F0FF }, |
799 | { "mser", 0x2f, INSTR_RRF_F0FF }, | 799 | { "mser", 0x2f, INSTR_RRF_F0FF }, |
800 | { "sqxr", 0x36, INSTR_RRE_FF }, | 800 | { "sqxr", 0x36, INSTR_RRE_FF }, |
801 | { "meer", 0x37, INSTR_RRE_FF }, | 801 | { "meer", 0x37, INSTR_RRE_FF }, |
802 | { "madr", 0x3e, INSTR_RRF_F0FF }, | 802 | { "madr", 0x3e, INSTR_RRF_F0FF }, |
803 | { "msdr", 0x3f, INSTR_RRF_F0FF }, | 803 | { "msdr", 0x3f, INSTR_RRF_F0FF }, |
804 | { "lpxbr", 0x40, INSTR_RRE_FF }, | 804 | { "lpxbr", 0x40, INSTR_RRE_FF }, |
805 | { "lnxbr", 0x41, INSTR_RRE_FF }, | 805 | { "lnxbr", 0x41, INSTR_RRE_FF }, |
806 | { "ltxbr", 0x42, INSTR_RRE_FF }, | 806 | { "ltxbr", 0x42, INSTR_RRE_FF }, |
807 | { "lcxbr", 0x43, INSTR_RRE_FF }, | 807 | { "lcxbr", 0x43, INSTR_RRE_FF }, |
808 | { "ledbr", 0x44, INSTR_RRE_FF }, | 808 | { "ledbr", 0x44, INSTR_RRE_FF }, |
809 | { "ldxbr", 0x45, INSTR_RRE_FF }, | 809 | { "ldxbr", 0x45, INSTR_RRE_FF }, |
810 | { "lexbr", 0x46, INSTR_RRE_FF }, | 810 | { "lexbr", 0x46, INSTR_RRE_FF }, |
811 | { "fixbr", 0x47, INSTR_RRF_U0FF }, | 811 | { "fixbr", 0x47, INSTR_RRF_U0FF }, |
812 | { "kxbr", 0x48, INSTR_RRE_FF }, | 812 | { "kxbr", 0x48, INSTR_RRE_FF }, |
813 | { "cxbr", 0x49, INSTR_RRE_FF }, | 813 | { "cxbr", 0x49, INSTR_RRE_FF }, |
814 | { "axbr", 0x4a, INSTR_RRE_FF }, | 814 | { "axbr", 0x4a, INSTR_RRE_FF }, |
815 | { "sxbr", 0x4b, INSTR_RRE_FF }, | 815 | { "sxbr", 0x4b, INSTR_RRE_FF }, |
816 | { "mxbr", 0x4c, INSTR_RRE_FF }, | 816 | { "mxbr", 0x4c, INSTR_RRE_FF }, |
817 | { "dxbr", 0x4d, INSTR_RRE_FF }, | 817 | { "dxbr", 0x4d, INSTR_RRE_FF }, |
818 | { "tbedr", 0x50, INSTR_RRF_U0FF }, | 818 | { "tbedr", 0x50, INSTR_RRF_U0FF }, |
819 | { "tbdr", 0x51, INSTR_RRF_U0FF }, | 819 | { "tbdr", 0x51, INSTR_RRF_U0FF }, |
820 | { "diebr", 0x53, INSTR_RRF_FUFF }, | 820 | { "diebr", 0x53, INSTR_RRF_FUFF }, |
821 | { "fiebr", 0x57, INSTR_RRF_U0FF }, | 821 | { "fiebr", 0x57, INSTR_RRF_U0FF }, |
822 | { "thder", 0x58, INSTR_RRE_RR }, | 822 | { "thder", 0x58, INSTR_RRE_RR }, |
823 | { "thdr", 0x59, INSTR_RRE_RR }, | 823 | { "thdr", 0x59, INSTR_RRE_RR }, |
824 | { "didbr", 0x5b, INSTR_RRF_FUFF }, | 824 | { "didbr", 0x5b, INSTR_RRF_FUFF }, |
825 | { "fidbr", 0x5f, INSTR_RRF_U0FF }, | 825 | { "fidbr", 0x5f, INSTR_RRF_U0FF }, |
826 | { "lpxr", 0x60, INSTR_RRE_FF }, | 826 | { "lpxr", 0x60, INSTR_RRE_FF }, |
827 | { "lnxr", 0x61, INSTR_RRE_FF }, | 827 | { "lnxr", 0x61, INSTR_RRE_FF }, |
828 | { "ltxr", 0x62, INSTR_RRE_FF }, | 828 | { "ltxr", 0x62, INSTR_RRE_FF }, |
829 | { "lcxr", 0x63, INSTR_RRE_FF }, | 829 | { "lcxr", 0x63, INSTR_RRE_FF }, |
830 | { "lxr", 0x65, INSTR_RRE_RR }, | 830 | { "lxr", 0x65, INSTR_RRE_RR }, |
831 | { "lexr", 0x66, INSTR_RRE_FF }, | 831 | { "lexr", 0x66, INSTR_RRE_FF }, |
832 | { "fixr", 0x67, INSTR_RRF_U0FF }, | 832 | { "fixr", 0x67, INSTR_RRF_U0FF }, |
833 | { "cxr", 0x69, INSTR_RRE_FF }, | 833 | { "cxr", 0x69, INSTR_RRE_FF }, |
834 | { "lzer", 0x74, INSTR_RRE_R0 }, | 834 | { "lzer", 0x74, INSTR_RRE_R0 }, |
835 | { "lzdr", 0x75, INSTR_RRE_R0 }, | 835 | { "lzdr", 0x75, INSTR_RRE_R0 }, |
836 | { "lzxr", 0x76, INSTR_RRE_R0 }, | 836 | { "lzxr", 0x76, INSTR_RRE_R0 }, |
837 | { "fier", 0x77, INSTR_RRF_U0FF }, | 837 | { "fier", 0x77, INSTR_RRF_U0FF }, |
838 | { "fidr", 0x7f, INSTR_RRF_U0FF }, | 838 | { "fidr", 0x7f, INSTR_RRF_U0FF }, |
839 | { "sfpc", 0x84, INSTR_RRE_RR_OPT }, | 839 | { "sfpc", 0x84, INSTR_RRE_RR_OPT }, |
840 | { "efpc", 0x8c, INSTR_RRE_RR_OPT }, | 840 | { "efpc", 0x8c, INSTR_RRE_RR_OPT }, |
841 | { "cefbr", 0x94, INSTR_RRE_RF }, | 841 | { "cefbr", 0x94, INSTR_RRE_RF }, |
842 | { "cdfbr", 0x95, INSTR_RRE_RF }, | 842 | { "cdfbr", 0x95, INSTR_RRE_RF }, |
843 | { "cxfbr", 0x96, INSTR_RRE_RF }, | 843 | { "cxfbr", 0x96, INSTR_RRE_RF }, |
844 | { "cfebr", 0x98, INSTR_RRF_U0RF }, | 844 | { "cfebr", 0x98, INSTR_RRF_U0RF }, |
845 | { "cfdbr", 0x99, INSTR_RRF_U0RF }, | 845 | { "cfdbr", 0x99, INSTR_RRF_U0RF }, |
846 | { "cfxbr", 0x9a, INSTR_RRF_U0RF }, | 846 | { "cfxbr", 0x9a, INSTR_RRF_U0RF }, |
847 | { "cefr", 0xb4, INSTR_RRE_RF }, | 847 | { "cefr", 0xb4, INSTR_RRE_RF }, |
848 | { "cdfr", 0xb5, INSTR_RRE_RF }, | 848 | { "cdfr", 0xb5, INSTR_RRE_RF }, |
849 | { "cxfr", 0xb6, INSTR_RRE_RF }, | 849 | { "cxfr", 0xb6, INSTR_RRE_RF }, |
850 | { "", 0, INSTR_INVALID } | 850 | { "", 0, INSTR_INVALID } |
851 | }; | 851 | }; |
852 | 852 | ||
853 | static struct insn opcode_b9[] = { | 853 | static struct insn opcode_b9[] = { |
854 | #ifdef CONFIG_64BIT | 854 | #ifdef CONFIG_64BIT |
855 | { "lpgr", 0x00, INSTR_RRE_RR }, | 855 | { "lpgr", 0x00, INSTR_RRE_RR }, |
856 | { "lngr", 0x01, INSTR_RRE_RR }, | 856 | { "lngr", 0x01, INSTR_RRE_RR }, |
857 | { "ltgr", 0x02, INSTR_RRE_RR }, | 857 | { "ltgr", 0x02, INSTR_RRE_RR }, |
858 | { "lcgr", 0x03, INSTR_RRE_RR }, | 858 | { "lcgr", 0x03, INSTR_RRE_RR }, |
859 | { "lgr", 0x04, INSTR_RRE_RR }, | 859 | { "lgr", 0x04, INSTR_RRE_RR }, |
860 | { "lurag", 0x05, INSTR_RRE_RR }, | 860 | { "lurag", 0x05, INSTR_RRE_RR }, |
861 | { "lgbr", 0x06, INSTR_RRE_RR }, | 861 | { "lgbr", 0x06, INSTR_RRE_RR }, |
862 | { "lghr", 0x07, INSTR_RRE_RR }, | 862 | { "lghr", 0x07, INSTR_RRE_RR }, |
863 | { "agr", 0x08, INSTR_RRE_RR }, | 863 | { "agr", 0x08, INSTR_RRE_RR }, |
864 | { "sgr", 0x09, INSTR_RRE_RR }, | 864 | { "sgr", 0x09, INSTR_RRE_RR }, |
865 | { "algr", 0x0a, INSTR_RRE_RR }, | 865 | { "algr", 0x0a, INSTR_RRE_RR }, |
866 | { "slgr", 0x0b, INSTR_RRE_RR }, | 866 | { "slgr", 0x0b, INSTR_RRE_RR }, |
867 | { "msgr", 0x0c, INSTR_RRE_RR }, | 867 | { "msgr", 0x0c, INSTR_RRE_RR }, |
868 | { "dsgr", 0x0d, INSTR_RRE_RR }, | 868 | { "dsgr", 0x0d, INSTR_RRE_RR }, |
869 | { "eregg", 0x0e, INSTR_RRE_RR }, | 869 | { "eregg", 0x0e, INSTR_RRE_RR }, |
870 | { "lrvgr", 0x0f, INSTR_RRE_RR }, | 870 | { "lrvgr", 0x0f, INSTR_RRE_RR }, |
871 | { "lpgfr", 0x10, INSTR_RRE_RR }, | 871 | { "lpgfr", 0x10, INSTR_RRE_RR }, |
872 | { "lngfr", 0x11, INSTR_RRE_RR }, | 872 | { "lngfr", 0x11, INSTR_RRE_RR }, |
873 | { "ltgfr", 0x12, INSTR_RRE_RR }, | 873 | { "ltgfr", 0x12, INSTR_RRE_RR }, |
874 | { "lcgfr", 0x13, INSTR_RRE_RR }, | 874 | { "lcgfr", 0x13, INSTR_RRE_RR }, |
875 | { "lgfr", 0x14, INSTR_RRE_RR }, | 875 | { "lgfr", 0x14, INSTR_RRE_RR }, |
876 | { "llgfr", 0x16, INSTR_RRE_RR }, | 876 | { "llgfr", 0x16, INSTR_RRE_RR }, |
877 | { "llgtr", 0x17, INSTR_RRE_RR }, | 877 | { "llgtr", 0x17, INSTR_RRE_RR }, |
878 | { "agfr", 0x18, INSTR_RRE_RR }, | 878 | { "agfr", 0x18, INSTR_RRE_RR }, |
879 | { "sgfr", 0x19, INSTR_RRE_RR }, | 879 | { "sgfr", 0x19, INSTR_RRE_RR }, |
880 | { "algfr", 0x1a, INSTR_RRE_RR }, | 880 | { "algfr", 0x1a, INSTR_RRE_RR }, |
881 | { "slgfr", 0x1b, INSTR_RRE_RR }, | 881 | { "slgfr", 0x1b, INSTR_RRE_RR }, |
882 | { "msgfr", 0x1c, INSTR_RRE_RR }, | 882 | { "msgfr", 0x1c, INSTR_RRE_RR }, |
883 | { "dsgfr", 0x1d, INSTR_RRE_RR }, | 883 | { "dsgfr", 0x1d, INSTR_RRE_RR }, |
884 | { "cgr", 0x20, INSTR_RRE_RR }, | 884 | { "cgr", 0x20, INSTR_RRE_RR }, |
885 | { "clgr", 0x21, INSTR_RRE_RR }, | 885 | { "clgr", 0x21, INSTR_RRE_RR }, |
886 | { "sturg", 0x25, INSTR_RRE_RR }, | 886 | { "sturg", 0x25, INSTR_RRE_RR }, |
887 | { "lbr", 0x26, INSTR_RRE_RR }, | 887 | { "lbr", 0x26, INSTR_RRE_RR }, |
888 | { "lhr", 0x27, INSTR_RRE_RR }, | 888 | { "lhr", 0x27, INSTR_RRE_RR }, |
889 | { "cgfr", 0x30, INSTR_RRE_RR }, | 889 | { "cgfr", 0x30, INSTR_RRE_RR }, |
890 | { "clgfr", 0x31, INSTR_RRE_RR }, | 890 | { "clgfr", 0x31, INSTR_RRE_RR }, |
891 | { "bctgr", 0x46, INSTR_RRE_RR }, | 891 | { "bctgr", 0x46, INSTR_RRE_RR }, |
892 | { "ngr", 0x80, INSTR_RRE_RR }, | 892 | { "ngr", 0x80, INSTR_RRE_RR }, |
893 | { "ogr", 0x81, INSTR_RRE_RR }, | 893 | { "ogr", 0x81, INSTR_RRE_RR }, |
894 | { "xgr", 0x82, INSTR_RRE_RR }, | 894 | { "xgr", 0x82, INSTR_RRE_RR }, |
895 | { "flogr", 0x83, INSTR_RRE_RR }, | 895 | { "flogr", 0x83, INSTR_RRE_RR }, |
896 | { "llgcr", 0x84, INSTR_RRE_RR }, | 896 | { "llgcr", 0x84, INSTR_RRE_RR }, |
897 | { "llghr", 0x85, INSTR_RRE_RR }, | 897 | { "llghr", 0x85, INSTR_RRE_RR }, |
898 | { "mlgr", 0x86, INSTR_RRE_RR }, | 898 | { "mlgr", 0x86, INSTR_RRE_RR }, |
899 | { "dlgr", 0x87, INSTR_RRE_RR }, | 899 | { "dlgr", 0x87, INSTR_RRE_RR }, |
900 | { "alcgr", 0x88, INSTR_RRE_RR }, | 900 | { "alcgr", 0x88, INSTR_RRE_RR }, |
901 | { "slbgr", 0x89, INSTR_RRE_RR }, | 901 | { "slbgr", 0x89, INSTR_RRE_RR }, |
902 | { "cspg", 0x8a, INSTR_RRE_RR }, | 902 | { "cspg", 0x8a, INSTR_RRE_RR }, |
903 | { "idte", 0x8e, INSTR_RRF_R0RR }, | 903 | { "idte", 0x8e, INSTR_RRF_R0RR }, |
904 | { "llcr", 0x94, INSTR_RRE_RR }, | 904 | { "llcr", 0x94, INSTR_RRE_RR }, |
905 | { "llhr", 0x95, INSTR_RRE_RR }, | 905 | { "llhr", 0x95, INSTR_RRE_RR }, |
906 | { "esea", 0x9d, INSTR_RRE_R0 }, | 906 | { "esea", 0x9d, INSTR_RRE_R0 }, |
907 | { "lptea", 0xaa, INSTR_RRF_RURR }, | 907 | { "lptea", 0xaa, INSTR_RRF_RURR }, |
908 | { "cu14", 0xb0, INSTR_RRF_M0RR }, | 908 | { "cu14", 0xb0, INSTR_RRF_M0RR }, |
909 | { "cu24", 0xb1, INSTR_RRF_M0RR }, | 909 | { "cu24", 0xb1, INSTR_RRF_M0RR }, |
910 | { "cu41", 0xb2, INSTR_RRF_M0RR }, | 910 | { "cu41", 0xb2, INSTR_RRF_M0RR }, |
911 | { "cu42", 0xb3, INSTR_RRF_M0RR }, | 911 | { "cu42", 0xb3, INSTR_RRF_M0RR }, |
912 | { "crt", 0x72, INSTR_RRF_U0RR }, | 912 | { "crt", 0x72, INSTR_RRF_U0RR }, |
913 | { "cgrt", 0x60, INSTR_RRF_U0RR }, | 913 | { "cgrt", 0x60, INSTR_RRF_U0RR }, |
914 | { "clrt", 0x73, INSTR_RRF_U0RR }, | 914 | { "clrt", 0x73, INSTR_RRF_U0RR }, |
915 | { "clgrt", 0x61, INSTR_RRF_U0RR }, | 915 | { "clgrt", 0x61, INSTR_RRF_U0RR }, |
916 | { "ptf", 0xa2, INSTR_RRE_R0 }, | 916 | { "ptf", 0xa2, INSTR_RRE_R0 }, |
917 | { "pfmf", 0xaf, INSTR_RRE_RR }, | 917 | { "pfmf", 0xaf, INSTR_RRE_RR }, |
918 | { "trte", 0xbf, INSTR_RRF_M0RR }, | 918 | { "trte", 0xbf, INSTR_RRF_M0RR }, |
919 | { "trtre", 0xbd, INSTR_RRF_M0RR }, | 919 | { "trtre", 0xbd, INSTR_RRF_M0RR }, |
920 | { "ahhhr", 0xc8, INSTR_RRF_R0RR2 }, | 920 | { "ahhhr", 0xc8, INSTR_RRF_R0RR2 }, |
921 | { "shhhr", 0xc9, INSTR_RRF_R0RR2 }, | 921 | { "shhhr", 0xc9, INSTR_RRF_R0RR2 }, |
922 | { "alhhh", 0xca, INSTR_RRF_R0RR2 }, | 922 | { "alhhh", 0xca, INSTR_RRF_R0RR2 }, |
923 | { "alhhl", 0xca, INSTR_RRF_R0RR2 }, | 923 | { "alhhl", 0xca, INSTR_RRF_R0RR2 }, |
924 | { "slhhh", 0xcb, INSTR_RRF_R0RR2 }, | 924 | { "slhhh", 0xcb, INSTR_RRF_R0RR2 }, |
925 | { "chhr ", 0xcd, INSTR_RRE_RR }, | 925 | { "chhr ", 0xcd, INSTR_RRE_RR }, |
926 | { "clhhr", 0xcf, INSTR_RRE_RR }, | 926 | { "clhhr", 0xcf, INSTR_RRE_RR }, |
927 | { "ahhlr", 0xd8, INSTR_RRF_R0RR2 }, | 927 | { "ahhlr", 0xd8, INSTR_RRF_R0RR2 }, |
928 | { "shhlr", 0xd9, INSTR_RRF_R0RR2 }, | 928 | { "shhlr", 0xd9, INSTR_RRF_R0RR2 }, |
929 | { "slhhl", 0xdb, INSTR_RRF_R0RR2 }, | 929 | { "slhhl", 0xdb, INSTR_RRF_R0RR2 }, |
930 | { "chlr", 0xdd, INSTR_RRE_RR }, | 930 | { "chlr", 0xdd, INSTR_RRE_RR }, |
931 | { "clhlr", 0xdf, INSTR_RRE_RR }, | 931 | { "clhlr", 0xdf, INSTR_RRE_RR }, |
932 | { { 0, LONG_INSN_POPCNT }, 0xe1, INSTR_RRE_RR }, | 932 | { { 0, LONG_INSN_POPCNT }, 0xe1, INSTR_RRE_RR }, |
933 | { "locgr", 0xe2, INSTR_RRF_M0RR }, | 933 | { "locgr", 0xe2, INSTR_RRF_M0RR }, |
934 | { "ngrk", 0xe4, INSTR_RRF_R0RR2 }, | 934 | { "ngrk", 0xe4, INSTR_RRF_R0RR2 }, |
935 | { "ogrk", 0xe6, INSTR_RRF_R0RR2 }, | 935 | { "ogrk", 0xe6, INSTR_RRF_R0RR2 }, |
936 | { "xgrk", 0xe7, INSTR_RRF_R0RR2 }, | 936 | { "xgrk", 0xe7, INSTR_RRF_R0RR2 }, |
937 | { "agrk", 0xe8, INSTR_RRF_R0RR2 }, | 937 | { "agrk", 0xe8, INSTR_RRF_R0RR2 }, |
938 | { "sgrk", 0xe9, INSTR_RRF_R0RR2 }, | 938 | { "sgrk", 0xe9, INSTR_RRF_R0RR2 }, |
939 | { "algrk", 0xea, INSTR_RRF_R0RR2 }, | 939 | { "algrk", 0xea, INSTR_RRF_R0RR2 }, |
940 | { "slgrk", 0xeb, INSTR_RRF_R0RR2 }, | 940 | { "slgrk", 0xeb, INSTR_RRF_R0RR2 }, |
941 | { "locr", 0xf2, INSTR_RRF_M0RR }, | 941 | { "locr", 0xf2, INSTR_RRF_M0RR }, |
942 | { "nrk", 0xf4, INSTR_RRF_R0RR2 }, | 942 | { "nrk", 0xf4, INSTR_RRF_R0RR2 }, |
943 | { "ork", 0xf6, INSTR_RRF_R0RR2 }, | 943 | { "ork", 0xf6, INSTR_RRF_R0RR2 }, |
944 | { "xrk", 0xf7, INSTR_RRF_R0RR2 }, | 944 | { "xrk", 0xf7, INSTR_RRF_R0RR2 }, |
945 | { "ark", 0xf8, INSTR_RRF_R0RR2 }, | 945 | { "ark", 0xf8, INSTR_RRF_R0RR2 }, |
946 | { "srk", 0xf9, INSTR_RRF_R0RR2 }, | 946 | { "srk", 0xf9, INSTR_RRF_R0RR2 }, |
947 | { "alrk", 0xfa, INSTR_RRF_R0RR2 }, | 947 | { "alrk", 0xfa, INSTR_RRF_R0RR2 }, |
948 | { "slrk", 0xfb, INSTR_RRF_R0RR2 }, | 948 | { "slrk", 0xfb, INSTR_RRF_R0RR2 }, |
949 | #endif | 949 | #endif |
950 | { "kmac", 0x1e, INSTR_RRE_RR }, | 950 | { "kmac", 0x1e, INSTR_RRE_RR }, |
951 | { "lrvr", 0x1f, INSTR_RRE_RR }, | 951 | { "lrvr", 0x1f, INSTR_RRE_RR }, |
952 | { "km", 0x2e, INSTR_RRE_RR }, | 952 | { "km", 0x2e, INSTR_RRE_RR }, |
953 | { "kmc", 0x2f, INSTR_RRE_RR }, | 953 | { "kmc", 0x2f, INSTR_RRE_RR }, |
954 | { "kimd", 0x3e, INSTR_RRE_RR }, | 954 | { "kimd", 0x3e, INSTR_RRE_RR }, |
955 | { "klmd", 0x3f, INSTR_RRE_RR }, | 955 | { "klmd", 0x3f, INSTR_RRE_RR }, |
956 | { "epsw", 0x8d, INSTR_RRE_RR }, | 956 | { "epsw", 0x8d, INSTR_RRE_RR }, |
957 | { "trtt", 0x90, INSTR_RRE_RR }, | 957 | { "trtt", 0x90, INSTR_RRE_RR }, |
958 | { "trtt", 0x90, INSTR_RRF_M0RR }, | 958 | { "trtt", 0x90, INSTR_RRF_M0RR }, |
959 | { "trto", 0x91, INSTR_RRE_RR }, | 959 | { "trto", 0x91, INSTR_RRE_RR }, |
960 | { "trto", 0x91, INSTR_RRF_M0RR }, | 960 | { "trto", 0x91, INSTR_RRF_M0RR }, |
961 | { "trot", 0x92, INSTR_RRE_RR }, | 961 | { "trot", 0x92, INSTR_RRE_RR }, |
962 | { "trot", 0x92, INSTR_RRF_M0RR }, | 962 | { "trot", 0x92, INSTR_RRF_M0RR }, |
963 | { "troo", 0x93, INSTR_RRE_RR }, | 963 | { "troo", 0x93, INSTR_RRE_RR }, |
964 | { "troo", 0x93, INSTR_RRF_M0RR }, | 964 | { "troo", 0x93, INSTR_RRF_M0RR }, |
965 | { "mlr", 0x96, INSTR_RRE_RR }, | 965 | { "mlr", 0x96, INSTR_RRE_RR }, |
966 | { "dlr", 0x97, INSTR_RRE_RR }, | 966 | { "dlr", 0x97, INSTR_RRE_RR }, |
967 | { "alcr", 0x98, INSTR_RRE_RR }, | 967 | { "alcr", 0x98, INSTR_RRE_RR }, |
968 | { "slbr", 0x99, INSTR_RRE_RR }, | 968 | { "slbr", 0x99, INSTR_RRE_RR }, |
969 | { "", 0, INSTR_INVALID } | 969 | { "", 0, INSTR_INVALID } |
970 | }; | 970 | }; |
971 | 971 | ||
972 | static struct insn opcode_c0[] = { | 972 | static struct insn opcode_c0[] = { |
973 | #ifdef CONFIG_64BIT | 973 | #ifdef CONFIG_64BIT |
974 | { "lgfi", 0x01, INSTR_RIL_RI }, | 974 | { "lgfi", 0x01, INSTR_RIL_RI }, |
975 | { "xihf", 0x06, INSTR_RIL_RU }, | 975 | { "xihf", 0x06, INSTR_RIL_RU }, |
976 | { "xilf", 0x07, INSTR_RIL_RU }, | 976 | { "xilf", 0x07, INSTR_RIL_RU }, |
977 | { "iihf", 0x08, INSTR_RIL_RU }, | 977 | { "iihf", 0x08, INSTR_RIL_RU }, |
978 | { "iilf", 0x09, INSTR_RIL_RU }, | 978 | { "iilf", 0x09, INSTR_RIL_RU }, |
979 | { "nihf", 0x0a, INSTR_RIL_RU }, | 979 | { "nihf", 0x0a, INSTR_RIL_RU }, |
980 | { "nilf", 0x0b, INSTR_RIL_RU }, | 980 | { "nilf", 0x0b, INSTR_RIL_RU }, |
981 | { "oihf", 0x0c, INSTR_RIL_RU }, | 981 | { "oihf", 0x0c, INSTR_RIL_RU }, |
982 | { "oilf", 0x0d, INSTR_RIL_RU }, | 982 | { "oilf", 0x0d, INSTR_RIL_RU }, |
983 | { "llihf", 0x0e, INSTR_RIL_RU }, | 983 | { "llihf", 0x0e, INSTR_RIL_RU }, |
984 | { "llilf", 0x0f, INSTR_RIL_RU }, | 984 | { "llilf", 0x0f, INSTR_RIL_RU }, |
985 | #endif | 985 | #endif |
986 | { "larl", 0x00, INSTR_RIL_RP }, | 986 | { "larl", 0x00, INSTR_RIL_RP }, |
987 | { "brcl", 0x04, INSTR_RIL_UP }, | 987 | { "brcl", 0x04, INSTR_RIL_UP }, |
988 | { "brasl", 0x05, INSTR_RIL_RP }, | 988 | { "brasl", 0x05, INSTR_RIL_RP }, |
989 | { "", 0, INSTR_INVALID } | 989 | { "", 0, INSTR_INVALID } |
990 | }; | 990 | }; |
991 | 991 | ||
992 | static struct insn opcode_c2[] = { | 992 | static struct insn opcode_c2[] = { |
993 | #ifdef CONFIG_64BIT | 993 | #ifdef CONFIG_64BIT |
994 | { "slgfi", 0x04, INSTR_RIL_RU }, | 994 | { "slgfi", 0x04, INSTR_RIL_RU }, |
995 | { "slfi", 0x05, INSTR_RIL_RU }, | 995 | { "slfi", 0x05, INSTR_RIL_RU }, |
996 | { "agfi", 0x08, INSTR_RIL_RI }, | 996 | { "agfi", 0x08, INSTR_RIL_RI }, |
997 | { "afi", 0x09, INSTR_RIL_RI }, | 997 | { "afi", 0x09, INSTR_RIL_RI }, |
998 | { "algfi", 0x0a, INSTR_RIL_RU }, | 998 | { "algfi", 0x0a, INSTR_RIL_RU }, |
999 | { "alfi", 0x0b, INSTR_RIL_RU }, | 999 | { "alfi", 0x0b, INSTR_RIL_RU }, |
1000 | { "cgfi", 0x0c, INSTR_RIL_RI }, | 1000 | { "cgfi", 0x0c, INSTR_RIL_RI }, |
1001 | { "cfi", 0x0d, INSTR_RIL_RI }, | 1001 | { "cfi", 0x0d, INSTR_RIL_RI }, |
1002 | { "clgfi", 0x0e, INSTR_RIL_RU }, | 1002 | { "clgfi", 0x0e, INSTR_RIL_RU }, |
1003 | { "clfi", 0x0f, INSTR_RIL_RU }, | 1003 | { "clfi", 0x0f, INSTR_RIL_RU }, |
1004 | { "msfi", 0x01, INSTR_RIL_RI }, | 1004 | { "msfi", 0x01, INSTR_RIL_RI }, |
1005 | { "msgfi", 0x00, INSTR_RIL_RI }, | 1005 | { "msgfi", 0x00, INSTR_RIL_RI }, |
1006 | #endif | 1006 | #endif |
1007 | { "", 0, INSTR_INVALID } | 1007 | { "", 0, INSTR_INVALID } |
1008 | }; | 1008 | }; |
1009 | 1009 | ||
1010 | static struct insn opcode_c4[] = { | 1010 | static struct insn opcode_c4[] = { |
1011 | #ifdef CONFIG_64BIT | 1011 | #ifdef CONFIG_64BIT |
1012 | { "lrl", 0x0d, INSTR_RIL_RP }, | 1012 | { "lrl", 0x0d, INSTR_RIL_RP }, |
1013 | { "lgrl", 0x08, INSTR_RIL_RP }, | 1013 | { "lgrl", 0x08, INSTR_RIL_RP }, |
1014 | { "lgfrl", 0x0c, INSTR_RIL_RP }, | 1014 | { "lgfrl", 0x0c, INSTR_RIL_RP }, |
1015 | { "lhrl", 0x05, INSTR_RIL_RP }, | 1015 | { "lhrl", 0x05, INSTR_RIL_RP }, |
1016 | { "lghrl", 0x04, INSTR_RIL_RP }, | 1016 | { "lghrl", 0x04, INSTR_RIL_RP }, |
1017 | { { 0, LONG_INSN_LLGFRL }, 0x0e, INSTR_RIL_RP }, | 1017 | { { 0, LONG_INSN_LLGFRL }, 0x0e, INSTR_RIL_RP }, |
1018 | { "llhrl", 0x02, INSTR_RIL_RP }, | 1018 | { "llhrl", 0x02, INSTR_RIL_RP }, |
1019 | { { 0, LONG_INSN_LLGHRL }, 0x06, INSTR_RIL_RP }, | 1019 | { { 0, LONG_INSN_LLGHRL }, 0x06, INSTR_RIL_RP }, |
1020 | { "strl", 0x0f, INSTR_RIL_RP }, | 1020 | { "strl", 0x0f, INSTR_RIL_RP }, |
1021 | { "stgrl", 0x0b, INSTR_RIL_RP }, | 1021 | { "stgrl", 0x0b, INSTR_RIL_RP }, |
1022 | { "sthrl", 0x07, INSTR_RIL_RP }, | 1022 | { "sthrl", 0x07, INSTR_RIL_RP }, |
1023 | #endif | 1023 | #endif |
1024 | { "", 0, INSTR_INVALID } | 1024 | { "", 0, INSTR_INVALID } |
1025 | }; | 1025 | }; |
1026 | 1026 | ||
1027 | static struct insn opcode_c6[] = { | 1027 | static struct insn opcode_c6[] = { |
1028 | #ifdef CONFIG_64BIT | 1028 | #ifdef CONFIG_64BIT |
1029 | { "crl", 0x0d, INSTR_RIL_RP }, | 1029 | { "crl", 0x0d, INSTR_RIL_RP }, |
1030 | { "cgrl", 0x08, INSTR_RIL_RP }, | 1030 | { "cgrl", 0x08, INSTR_RIL_RP }, |
1031 | { "cgfrl", 0x0c, INSTR_RIL_RP }, | 1031 | { "cgfrl", 0x0c, INSTR_RIL_RP }, |
1032 | { "chrl", 0x05, INSTR_RIL_RP }, | 1032 | { "chrl", 0x05, INSTR_RIL_RP }, |
1033 | { "cghrl", 0x04, INSTR_RIL_RP }, | 1033 | { "cghrl", 0x04, INSTR_RIL_RP }, |
1034 | { "clrl", 0x0f, INSTR_RIL_RP }, | 1034 | { "clrl", 0x0f, INSTR_RIL_RP }, |
1035 | { "clgrl", 0x0a, INSTR_RIL_RP }, | 1035 | { "clgrl", 0x0a, INSTR_RIL_RP }, |
1036 | { { 0, LONG_INSN_CLGFRL }, 0x0e, INSTR_RIL_RP }, | 1036 | { { 0, LONG_INSN_CLGFRL }, 0x0e, INSTR_RIL_RP }, |
1037 | { "clhrl", 0x07, INSTR_RIL_RP }, | 1037 | { "clhrl", 0x07, INSTR_RIL_RP }, |
1038 | { { 0, LONG_INSN_CLGHRL }, 0x06, INSTR_RIL_RP }, | 1038 | { { 0, LONG_INSN_CLGHRL }, 0x06, INSTR_RIL_RP }, |
1039 | { "pfdrl", 0x02, INSTR_RIL_UP }, | 1039 | { "pfdrl", 0x02, INSTR_RIL_UP }, |
1040 | { "exrl", 0x00, INSTR_RIL_RP }, | 1040 | { "exrl", 0x00, INSTR_RIL_RP }, |
1041 | #endif | 1041 | #endif |
1042 | { "", 0, INSTR_INVALID } | 1042 | { "", 0, INSTR_INVALID } |
1043 | }; | 1043 | }; |
1044 | 1044 | ||
1045 | static struct insn opcode_c8[] = { | 1045 | static struct insn opcode_c8[] = { |
1046 | #ifdef CONFIG_64BIT | 1046 | #ifdef CONFIG_64BIT |
1047 | { "mvcos", 0x00, INSTR_SSF_RRDRD }, | 1047 | { "mvcos", 0x00, INSTR_SSF_RRDRD }, |
1048 | { "ectg", 0x01, INSTR_SSF_RRDRD }, | 1048 | { "ectg", 0x01, INSTR_SSF_RRDRD }, |
1049 | { "csst", 0x02, INSTR_SSF_RRDRD }, | 1049 | { "csst", 0x02, INSTR_SSF_RRDRD }, |
1050 | { "lpd", 0x04, INSTR_SSF_RRDRD2 }, | 1050 | { "lpd", 0x04, INSTR_SSF_RRDRD2 }, |
1051 | { "lpdg ", 0x05, INSTR_SSF_RRDRD2 }, | 1051 | { "lpdg ", 0x05, INSTR_SSF_RRDRD2 }, |
1052 | #endif | 1052 | #endif |
1053 | { "", 0, INSTR_INVALID } | 1053 | { "", 0, INSTR_INVALID } |
1054 | }; | 1054 | }; |
1055 | 1055 | ||
1056 | static struct insn opcode_cc[] = { | 1056 | static struct insn opcode_cc[] = { |
1057 | #ifdef CONFIG_64BIT | 1057 | #ifdef CONFIG_64BIT |
1058 | { "brcth", 0x06, INSTR_RIL_RP }, | 1058 | { "brcth", 0x06, INSTR_RIL_RP }, |
1059 | { "aih", 0x08, INSTR_RIL_RI }, | 1059 | { "aih", 0x08, INSTR_RIL_RI }, |
1060 | { "alsih", 0x0a, INSTR_RIL_RI }, | 1060 | { "alsih", 0x0a, INSTR_RIL_RI }, |
1061 | { "alsih", 0x0b, INSTR_RIL_RI }, | 1061 | { "alsih", 0x0b, INSTR_RIL_RI }, |
1062 | { "cih", 0x0d, INSTR_RIL_RI }, | 1062 | { "cih", 0x0d, INSTR_RIL_RI }, |
1063 | { "clih ", 0x0f, INSTR_RIL_RI }, | 1063 | { "clih ", 0x0f, INSTR_RIL_RI }, |
1064 | #endif | 1064 | #endif |
1065 | { "", 0, INSTR_INVALID } | 1065 | { "", 0, INSTR_INVALID } |
1066 | }; | 1066 | }; |
1067 | 1067 | ||
1068 | static struct insn opcode_e3[] = { | 1068 | static struct insn opcode_e3[] = { |
1069 | #ifdef CONFIG_64BIT | 1069 | #ifdef CONFIG_64BIT |
1070 | { "ltg", 0x02, INSTR_RXY_RRRD }, | 1070 | { "ltg", 0x02, INSTR_RXY_RRRD }, |
1071 | { "lrag", 0x03, INSTR_RXY_RRRD }, | 1071 | { "lrag", 0x03, INSTR_RXY_RRRD }, |
1072 | { "lg", 0x04, INSTR_RXY_RRRD }, | 1072 | { "lg", 0x04, INSTR_RXY_RRRD }, |
1073 | { "cvby", 0x06, INSTR_RXY_RRRD }, | 1073 | { "cvby", 0x06, INSTR_RXY_RRRD }, |
1074 | { "ag", 0x08, INSTR_RXY_RRRD }, | 1074 | { "ag", 0x08, INSTR_RXY_RRRD }, |
1075 | { "sg", 0x09, INSTR_RXY_RRRD }, | 1075 | { "sg", 0x09, INSTR_RXY_RRRD }, |
1076 | { "alg", 0x0a, INSTR_RXY_RRRD }, | 1076 | { "alg", 0x0a, INSTR_RXY_RRRD }, |
1077 | { "slg", 0x0b, INSTR_RXY_RRRD }, | 1077 | { "slg", 0x0b, INSTR_RXY_RRRD }, |
1078 | { "msg", 0x0c, INSTR_RXY_RRRD }, | 1078 | { "msg", 0x0c, INSTR_RXY_RRRD }, |
1079 | { "dsg", 0x0d, INSTR_RXY_RRRD }, | 1079 | { "dsg", 0x0d, INSTR_RXY_RRRD }, |
1080 | { "cvbg", 0x0e, INSTR_RXY_RRRD }, | 1080 | { "cvbg", 0x0e, INSTR_RXY_RRRD }, |
1081 | { "lrvg", 0x0f, INSTR_RXY_RRRD }, | 1081 | { "lrvg", 0x0f, INSTR_RXY_RRRD }, |
1082 | { "lt", 0x12, INSTR_RXY_RRRD }, | 1082 | { "lt", 0x12, INSTR_RXY_RRRD }, |
1083 | { "lray", 0x13, INSTR_RXY_RRRD }, | 1083 | { "lray", 0x13, INSTR_RXY_RRRD }, |
1084 | { "lgf", 0x14, INSTR_RXY_RRRD }, | 1084 | { "lgf", 0x14, INSTR_RXY_RRRD }, |
1085 | { "lgh", 0x15, INSTR_RXY_RRRD }, | 1085 | { "lgh", 0x15, INSTR_RXY_RRRD }, |
1086 | { "llgf", 0x16, INSTR_RXY_RRRD }, | 1086 | { "llgf", 0x16, INSTR_RXY_RRRD }, |
1087 | { "llgt", 0x17, INSTR_RXY_RRRD }, | 1087 | { "llgt", 0x17, INSTR_RXY_RRRD }, |
1088 | { "agf", 0x18, INSTR_RXY_RRRD }, | 1088 | { "agf", 0x18, INSTR_RXY_RRRD }, |
1089 | { "sgf", 0x19, INSTR_RXY_RRRD }, | 1089 | { "sgf", 0x19, INSTR_RXY_RRRD }, |
1090 | { "algf", 0x1a, INSTR_RXY_RRRD }, | 1090 | { "algf", 0x1a, INSTR_RXY_RRRD }, |
1091 | { "slgf", 0x1b, INSTR_RXY_RRRD }, | 1091 | { "slgf", 0x1b, INSTR_RXY_RRRD }, |
1092 | { "msgf", 0x1c, INSTR_RXY_RRRD }, | 1092 | { "msgf", 0x1c, INSTR_RXY_RRRD }, |
1093 | { "dsgf", 0x1d, INSTR_RXY_RRRD }, | 1093 | { "dsgf", 0x1d, INSTR_RXY_RRRD }, |
1094 | { "cg", 0x20, INSTR_RXY_RRRD }, | 1094 | { "cg", 0x20, INSTR_RXY_RRRD }, |
1095 | { "clg", 0x21, INSTR_RXY_RRRD }, | 1095 | { "clg", 0x21, INSTR_RXY_RRRD }, |
1096 | { "stg", 0x24, INSTR_RXY_RRRD }, | 1096 | { "stg", 0x24, INSTR_RXY_RRRD }, |
1097 | { "cvdy", 0x26, INSTR_RXY_RRRD }, | 1097 | { "cvdy", 0x26, INSTR_RXY_RRRD }, |
1098 | { "cvdg", 0x2e, INSTR_RXY_RRRD }, | 1098 | { "cvdg", 0x2e, INSTR_RXY_RRRD }, |
1099 | { "strvg", 0x2f, INSTR_RXY_RRRD }, | 1099 | { "strvg", 0x2f, INSTR_RXY_RRRD }, |
1100 | { "cgf", 0x30, INSTR_RXY_RRRD }, | 1100 | { "cgf", 0x30, INSTR_RXY_RRRD }, |
1101 | { "clgf", 0x31, INSTR_RXY_RRRD }, | 1101 | { "clgf", 0x31, INSTR_RXY_RRRD }, |
1102 | { "strvh", 0x3f, INSTR_RXY_RRRD }, | 1102 | { "strvh", 0x3f, INSTR_RXY_RRRD }, |
1103 | { "bctg", 0x46, INSTR_RXY_RRRD }, | 1103 | { "bctg", 0x46, INSTR_RXY_RRRD }, |
1104 | { "sty", 0x50, INSTR_RXY_RRRD }, | 1104 | { "sty", 0x50, INSTR_RXY_RRRD }, |
1105 | { "msy", 0x51, INSTR_RXY_RRRD }, | 1105 | { "msy", 0x51, INSTR_RXY_RRRD }, |
1106 | { "ny", 0x54, INSTR_RXY_RRRD }, | 1106 | { "ny", 0x54, INSTR_RXY_RRRD }, |
1107 | { "cly", 0x55, INSTR_RXY_RRRD }, | 1107 | { "cly", 0x55, INSTR_RXY_RRRD }, |
1108 | { "oy", 0x56, INSTR_RXY_RRRD }, | 1108 | { "oy", 0x56, INSTR_RXY_RRRD }, |
1109 | { "xy", 0x57, INSTR_RXY_RRRD }, | 1109 | { "xy", 0x57, INSTR_RXY_RRRD }, |
1110 | { "ly", 0x58, INSTR_RXY_RRRD }, | 1110 | { "ly", 0x58, INSTR_RXY_RRRD }, |
1111 | { "cy", 0x59, INSTR_RXY_RRRD }, | 1111 | { "cy", 0x59, INSTR_RXY_RRRD }, |
1112 | { "ay", 0x5a, INSTR_RXY_RRRD }, | 1112 | { "ay", 0x5a, INSTR_RXY_RRRD }, |
1113 | { "sy", 0x5b, INSTR_RXY_RRRD }, | 1113 | { "sy", 0x5b, INSTR_RXY_RRRD }, |
1114 | { "aly", 0x5e, INSTR_RXY_RRRD }, | 1114 | { "aly", 0x5e, INSTR_RXY_RRRD }, |
1115 | { "sly", 0x5f, INSTR_RXY_RRRD }, | 1115 | { "sly", 0x5f, INSTR_RXY_RRRD }, |
1116 | { "sthy", 0x70, INSTR_RXY_RRRD }, | 1116 | { "sthy", 0x70, INSTR_RXY_RRRD }, |
1117 | { "lay", 0x71, INSTR_RXY_RRRD }, | 1117 | { "lay", 0x71, INSTR_RXY_RRRD }, |
1118 | { "stcy", 0x72, INSTR_RXY_RRRD }, | 1118 | { "stcy", 0x72, INSTR_RXY_RRRD }, |
1119 | { "icy", 0x73, INSTR_RXY_RRRD }, | 1119 | { "icy", 0x73, INSTR_RXY_RRRD }, |
1120 | { "lb", 0x76, INSTR_RXY_RRRD }, | 1120 | { "lb", 0x76, INSTR_RXY_RRRD }, |
1121 | { "lgb", 0x77, INSTR_RXY_RRRD }, | 1121 | { "lgb", 0x77, INSTR_RXY_RRRD }, |
1122 | { "lhy", 0x78, INSTR_RXY_RRRD }, | 1122 | { "lhy", 0x78, INSTR_RXY_RRRD }, |
1123 | { "chy", 0x79, INSTR_RXY_RRRD }, | 1123 | { "chy", 0x79, INSTR_RXY_RRRD }, |
1124 | { "ahy", 0x7a, INSTR_RXY_RRRD }, | 1124 | { "ahy", 0x7a, INSTR_RXY_RRRD }, |
1125 | { "shy", 0x7b, INSTR_RXY_RRRD }, | 1125 | { "shy", 0x7b, INSTR_RXY_RRRD }, |
1126 | { "ng", 0x80, INSTR_RXY_RRRD }, | 1126 | { "ng", 0x80, INSTR_RXY_RRRD }, |
1127 | { "og", 0x81, INSTR_RXY_RRRD }, | 1127 | { "og", 0x81, INSTR_RXY_RRRD }, |
1128 | { "xg", 0x82, INSTR_RXY_RRRD }, | 1128 | { "xg", 0x82, INSTR_RXY_RRRD }, |
1129 | { "mlg", 0x86, INSTR_RXY_RRRD }, | 1129 | { "mlg", 0x86, INSTR_RXY_RRRD }, |
1130 | { "dlg", 0x87, INSTR_RXY_RRRD }, | 1130 | { "dlg", 0x87, INSTR_RXY_RRRD }, |
1131 | { "alcg", 0x88, INSTR_RXY_RRRD }, | 1131 | { "alcg", 0x88, INSTR_RXY_RRRD }, |
1132 | { "slbg", 0x89, INSTR_RXY_RRRD }, | 1132 | { "slbg", 0x89, INSTR_RXY_RRRD }, |
1133 | { "stpq", 0x8e, INSTR_RXY_RRRD }, | 1133 | { "stpq", 0x8e, INSTR_RXY_RRRD }, |
1134 | { "lpq", 0x8f, INSTR_RXY_RRRD }, | 1134 | { "lpq", 0x8f, INSTR_RXY_RRRD }, |
1135 | { "llgc", 0x90, INSTR_RXY_RRRD }, | 1135 | { "llgc", 0x90, INSTR_RXY_RRRD }, |
1136 | { "llgh", 0x91, INSTR_RXY_RRRD }, | 1136 | { "llgh", 0x91, INSTR_RXY_RRRD }, |
1137 | { "llc", 0x94, INSTR_RXY_RRRD }, | 1137 | { "llc", 0x94, INSTR_RXY_RRRD }, |
1138 | { "llh", 0x95, INSTR_RXY_RRRD }, | 1138 | { "llh", 0x95, INSTR_RXY_RRRD }, |
1139 | { "cgh", 0x34, INSTR_RXY_RRRD }, | 1139 | { "cgh", 0x34, INSTR_RXY_RRRD }, |
1140 | { "laey", 0x75, INSTR_RXY_RRRD }, | 1140 | { "laey", 0x75, INSTR_RXY_RRRD }, |
1141 | { "ltgf", 0x32, INSTR_RXY_RRRD }, | 1141 | { "ltgf", 0x32, INSTR_RXY_RRRD }, |
1142 | { "mfy", 0x5c, INSTR_RXY_RRRD }, | 1142 | { "mfy", 0x5c, INSTR_RXY_RRRD }, |
1143 | { "mhy", 0x7c, INSTR_RXY_RRRD }, | 1143 | { "mhy", 0x7c, INSTR_RXY_RRRD }, |
1144 | { "pfd", 0x36, INSTR_RXY_URRD }, | 1144 | { "pfd", 0x36, INSTR_RXY_URRD }, |
1145 | { "lbh", 0xc0, INSTR_RXY_RRRD }, | 1145 | { "lbh", 0xc0, INSTR_RXY_RRRD }, |
1146 | { "llch", 0xc2, INSTR_RXY_RRRD }, | 1146 | { "llch", 0xc2, INSTR_RXY_RRRD }, |
1147 | { "stch", 0xc3, INSTR_RXY_RRRD }, | 1147 | { "stch", 0xc3, INSTR_RXY_RRRD }, |
1148 | { "lhh", 0xc4, INSTR_RXY_RRRD }, | 1148 | { "lhh", 0xc4, INSTR_RXY_RRRD }, |
1149 | { "llhh", 0xc6, INSTR_RXY_RRRD }, | 1149 | { "llhh", 0xc6, INSTR_RXY_RRRD }, |
1150 | { "sthh", 0xc7, INSTR_RXY_RRRD }, | 1150 | { "sthh", 0xc7, INSTR_RXY_RRRD }, |
1151 | { "lfh", 0xca, INSTR_RXY_RRRD }, | 1151 | { "lfh", 0xca, INSTR_RXY_RRRD }, |
1152 | { "stfh", 0xcb, INSTR_RXY_RRRD }, | 1152 | { "stfh", 0xcb, INSTR_RXY_RRRD }, |
1153 | { "chf", 0xcd, INSTR_RXY_RRRD }, | 1153 | { "chf", 0xcd, INSTR_RXY_RRRD }, |
1154 | { "clhf", 0xcf, INSTR_RXY_RRRD }, | 1154 | { "clhf", 0xcf, INSTR_RXY_RRRD }, |
1155 | #endif | 1155 | #endif |
1156 | { "lrv", 0x1e, INSTR_RXY_RRRD }, | 1156 | { "lrv", 0x1e, INSTR_RXY_RRRD }, |
1157 | { "lrvh", 0x1f, INSTR_RXY_RRRD }, | 1157 | { "lrvh", 0x1f, INSTR_RXY_RRRD }, |
1158 | { "strv", 0x3e, INSTR_RXY_RRRD }, | 1158 | { "strv", 0x3e, INSTR_RXY_RRRD }, |
1159 | { "ml", 0x96, INSTR_RXY_RRRD }, | 1159 | { "ml", 0x96, INSTR_RXY_RRRD }, |
1160 | { "dl", 0x97, INSTR_RXY_RRRD }, | 1160 | { "dl", 0x97, INSTR_RXY_RRRD }, |
1161 | { "alc", 0x98, INSTR_RXY_RRRD }, | 1161 | { "alc", 0x98, INSTR_RXY_RRRD }, |
1162 | { "slb", 0x99, INSTR_RXY_RRRD }, | 1162 | { "slb", 0x99, INSTR_RXY_RRRD }, |
1163 | { "", 0, INSTR_INVALID } | 1163 | { "", 0, INSTR_INVALID } |
1164 | }; | 1164 | }; |
1165 | 1165 | ||
1166 | static struct insn opcode_e5[] = { | 1166 | static struct insn opcode_e5[] = { |
1167 | #ifdef CONFIG_64BIT | 1167 | #ifdef CONFIG_64BIT |
1168 | { "strag", 0x02, INSTR_SSE_RDRD }, | 1168 | { "strag", 0x02, INSTR_SSE_RDRD }, |
1169 | { "chhsi", 0x54, INSTR_SIL_RDI }, | 1169 | { "chhsi", 0x54, INSTR_SIL_RDI }, |
1170 | { "chsi", 0x5c, INSTR_SIL_RDI }, | 1170 | { "chsi", 0x5c, INSTR_SIL_RDI }, |
1171 | { "cghsi", 0x58, INSTR_SIL_RDI }, | 1171 | { "cghsi", 0x58, INSTR_SIL_RDI }, |
1172 | { { 0, LONG_INSN_CLHHSI }, 0x55, INSTR_SIL_RDU }, | 1172 | { { 0, LONG_INSN_CLHHSI }, 0x55, INSTR_SIL_RDU }, |
1173 | { { 0, LONG_INSN_CLFHSI }, 0x5d, INSTR_SIL_RDU }, | 1173 | { { 0, LONG_INSN_CLFHSI }, 0x5d, INSTR_SIL_RDU }, |
1174 | { { 0, LONG_INSN_CLGHSI }, 0x59, INSTR_SIL_RDU }, | 1174 | { { 0, LONG_INSN_CLGHSI }, 0x59, INSTR_SIL_RDU }, |
1175 | { "mvhhi", 0x44, INSTR_SIL_RDI }, | 1175 | { "mvhhi", 0x44, INSTR_SIL_RDI }, |
1176 | { "mvhi", 0x4c, INSTR_SIL_RDI }, | 1176 | { "mvhi", 0x4c, INSTR_SIL_RDI }, |
1177 | { "mvghi", 0x48, INSTR_SIL_RDI }, | 1177 | { "mvghi", 0x48, INSTR_SIL_RDI }, |
1178 | #endif | 1178 | #endif |
1179 | { "lasp", 0x00, INSTR_SSE_RDRD }, | 1179 | { "lasp", 0x00, INSTR_SSE_RDRD }, |
1180 | { "tprot", 0x01, INSTR_SSE_RDRD }, | 1180 | { "tprot", 0x01, INSTR_SSE_RDRD }, |
1181 | { "mvcsk", 0x0e, INSTR_SSE_RDRD }, | 1181 | { "mvcsk", 0x0e, INSTR_SSE_RDRD }, |
1182 | { "mvcdk", 0x0f, INSTR_SSE_RDRD }, | 1182 | { "mvcdk", 0x0f, INSTR_SSE_RDRD }, |
1183 | { "", 0, INSTR_INVALID } | 1183 | { "", 0, INSTR_INVALID } |
1184 | }; | 1184 | }; |
1185 | 1185 | ||
1186 | static struct insn opcode_eb[] = { | 1186 | static struct insn opcode_eb[] = { |
1187 | #ifdef CONFIG_64BIT | 1187 | #ifdef CONFIG_64BIT |
1188 | { "lmg", 0x04, INSTR_RSY_RRRD }, | 1188 | { "lmg", 0x04, INSTR_RSY_RRRD }, |
1189 | { "srag", 0x0a, INSTR_RSY_RRRD }, | 1189 | { "srag", 0x0a, INSTR_RSY_RRRD }, |
1190 | { "slag", 0x0b, INSTR_RSY_RRRD }, | 1190 | { "slag", 0x0b, INSTR_RSY_RRRD }, |
1191 | { "srlg", 0x0c, INSTR_RSY_RRRD }, | 1191 | { "srlg", 0x0c, INSTR_RSY_RRRD }, |
1192 | { "sllg", 0x0d, INSTR_RSY_RRRD }, | 1192 | { "sllg", 0x0d, INSTR_RSY_RRRD }, |
1193 | { "tracg", 0x0f, INSTR_RSY_RRRD }, | 1193 | { "tracg", 0x0f, INSTR_RSY_RRRD }, |
1194 | { "csy", 0x14, INSTR_RSY_RRRD }, | 1194 | { "csy", 0x14, INSTR_RSY_RRRD }, |
1195 | { "rllg", 0x1c, INSTR_RSY_RRRD }, | 1195 | { "rllg", 0x1c, INSTR_RSY_RRRD }, |
1196 | { "clmh", 0x20, INSTR_RSY_RURD }, | 1196 | { "clmh", 0x20, INSTR_RSY_RURD }, |
1197 | { "clmy", 0x21, INSTR_RSY_RURD }, | 1197 | { "clmy", 0x21, INSTR_RSY_RURD }, |
1198 | { "stmg", 0x24, INSTR_RSY_RRRD }, | 1198 | { "stmg", 0x24, INSTR_RSY_RRRD }, |
1199 | { "stctg", 0x25, INSTR_RSY_CCRD }, | 1199 | { "stctg", 0x25, INSTR_RSY_CCRD }, |
1200 | { "stmh", 0x26, INSTR_RSY_RRRD }, | 1200 | { "stmh", 0x26, INSTR_RSY_RRRD }, |
1201 | { "stcmh", 0x2c, INSTR_RSY_RURD }, | 1201 | { "stcmh", 0x2c, INSTR_RSY_RURD }, |
1202 | { "stcmy", 0x2d, INSTR_RSY_RURD }, | 1202 | { "stcmy", 0x2d, INSTR_RSY_RURD }, |
1203 | { "lctlg", 0x2f, INSTR_RSY_CCRD }, | 1203 | { "lctlg", 0x2f, INSTR_RSY_CCRD }, |
1204 | { "csg", 0x30, INSTR_RSY_RRRD }, | 1204 | { "csg", 0x30, INSTR_RSY_RRRD }, |
1205 | { "cdsy", 0x31, INSTR_RSY_RRRD }, | 1205 | { "cdsy", 0x31, INSTR_RSY_RRRD }, |
1206 | { "cdsg", 0x3e, INSTR_RSY_RRRD }, | 1206 | { "cdsg", 0x3e, INSTR_RSY_RRRD }, |
1207 | { "bxhg", 0x44, INSTR_RSY_RRRD }, | 1207 | { "bxhg", 0x44, INSTR_RSY_RRRD }, |
1208 | { "bxleg", 0x45, INSTR_RSY_RRRD }, | 1208 | { "bxleg", 0x45, INSTR_RSY_RRRD }, |
1209 | { "tmy", 0x51, INSTR_SIY_URD }, | 1209 | { "tmy", 0x51, INSTR_SIY_URD }, |
1210 | { "mviy", 0x52, INSTR_SIY_URD }, | 1210 | { "mviy", 0x52, INSTR_SIY_URD }, |
1211 | { "niy", 0x54, INSTR_SIY_URD }, | 1211 | { "niy", 0x54, INSTR_SIY_URD }, |
1212 | { "cliy", 0x55, INSTR_SIY_URD }, | 1212 | { "cliy", 0x55, INSTR_SIY_URD }, |
1213 | { "oiy", 0x56, INSTR_SIY_URD }, | 1213 | { "oiy", 0x56, INSTR_SIY_URD }, |
1214 | { "xiy", 0x57, INSTR_SIY_URD }, | 1214 | { "xiy", 0x57, INSTR_SIY_URD }, |
1215 | { "icmh", 0x80, INSTR_RSE_RURD }, | 1215 | { "icmh", 0x80, INSTR_RSE_RURD }, |
1216 | { "icmh", 0x80, INSTR_RSY_RURD }, | 1216 | { "icmh", 0x80, INSTR_RSY_RURD }, |
1217 | { "icmy", 0x81, INSTR_RSY_RURD }, | 1217 | { "icmy", 0x81, INSTR_RSY_RURD }, |
1218 | { "clclu", 0x8f, INSTR_RSY_RRRD }, | 1218 | { "clclu", 0x8f, INSTR_RSY_RRRD }, |
1219 | { "stmy", 0x90, INSTR_RSY_RRRD }, | 1219 | { "stmy", 0x90, INSTR_RSY_RRRD }, |
1220 | { "lmh", 0x96, INSTR_RSY_RRRD }, | 1220 | { "lmh", 0x96, INSTR_RSY_RRRD }, |
1221 | { "lmy", 0x98, INSTR_RSY_RRRD }, | 1221 | { "lmy", 0x98, INSTR_RSY_RRRD }, |
1222 | { "lamy", 0x9a, INSTR_RSY_AARD }, | 1222 | { "lamy", 0x9a, INSTR_RSY_AARD }, |
1223 | { "stamy", 0x9b, INSTR_RSY_AARD }, | 1223 | { "stamy", 0x9b, INSTR_RSY_AARD }, |
1224 | { "asi", 0x6a, INSTR_SIY_IRD }, | 1224 | { "asi", 0x6a, INSTR_SIY_IRD }, |
1225 | { "agsi", 0x7a, INSTR_SIY_IRD }, | 1225 | { "agsi", 0x7a, INSTR_SIY_IRD }, |
1226 | { "alsi", 0x6e, INSTR_SIY_IRD }, | 1226 | { "alsi", 0x6e, INSTR_SIY_IRD }, |
1227 | { "algsi", 0x7e, INSTR_SIY_IRD }, | 1227 | { "algsi", 0x7e, INSTR_SIY_IRD }, |
1228 | { "ecag", 0x4c, INSTR_RSY_RRRD }, | 1228 | { "ecag", 0x4c, INSTR_RSY_RRRD }, |
1229 | { "srak", 0xdc, INSTR_RSY_RRRD }, | 1229 | { "srak", 0xdc, INSTR_RSY_RRRD }, |
1230 | { "slak", 0xdd, INSTR_RSY_RRRD }, | 1230 | { "slak", 0xdd, INSTR_RSY_RRRD }, |
1231 | { "srlk", 0xde, INSTR_RSY_RRRD }, | 1231 | { "srlk", 0xde, INSTR_RSY_RRRD }, |
1232 | { "sllk", 0xdf, INSTR_RSY_RRRD }, | 1232 | { "sllk", 0xdf, INSTR_RSY_RRRD }, |
1233 | { "locg", 0xe2, INSTR_RSY_RDRM }, | 1233 | { "locg", 0xe2, INSTR_RSY_RDRM }, |
1234 | { "stocg", 0xe3, INSTR_RSY_RDRM }, | 1234 | { "stocg", 0xe3, INSTR_RSY_RDRM }, |
1235 | { "lang", 0xe4, INSTR_RSY_RRRD }, | 1235 | { "lang", 0xe4, INSTR_RSY_RRRD }, |
1236 | { "laog", 0xe6, INSTR_RSY_RRRD }, | 1236 | { "laog", 0xe6, INSTR_RSY_RRRD }, |
1237 | { "laxg", 0xe7, INSTR_RSY_RRRD }, | 1237 | { "laxg", 0xe7, INSTR_RSY_RRRD }, |
1238 | { "laag", 0xe8, INSTR_RSY_RRRD }, | 1238 | { "laag", 0xe8, INSTR_RSY_RRRD }, |
1239 | { "laalg", 0xea, INSTR_RSY_RRRD }, | 1239 | { "laalg", 0xea, INSTR_RSY_RRRD }, |
1240 | { "loc", 0xf2, INSTR_RSY_RDRM }, | 1240 | { "loc", 0xf2, INSTR_RSY_RDRM }, |
1241 | { "stoc", 0xf3, INSTR_RSY_RDRM }, | 1241 | { "stoc", 0xf3, INSTR_RSY_RDRM }, |
1242 | { "lan", 0xf4, INSTR_RSY_RRRD }, | 1242 | { "lan", 0xf4, INSTR_RSY_RRRD }, |
1243 | { "lao", 0xf6, INSTR_RSY_RRRD }, | 1243 | { "lao", 0xf6, INSTR_RSY_RRRD }, |
1244 | { "lax", 0xf7, INSTR_RSY_RRRD }, | 1244 | { "lax", 0xf7, INSTR_RSY_RRRD }, |
1245 | { "laa", 0xf8, INSTR_RSY_RRRD }, | 1245 | { "laa", 0xf8, INSTR_RSY_RRRD }, |
1246 | { "laal", 0xfa, INSTR_RSY_RRRD }, | 1246 | { "laal", 0xfa, INSTR_RSY_RRRD }, |
1247 | #endif | 1247 | #endif |
1248 | { "rll", 0x1d, INSTR_RSY_RRRD }, | 1248 | { "rll", 0x1d, INSTR_RSY_RRRD }, |
1249 | { "mvclu", 0x8e, INSTR_RSY_RRRD }, | 1249 | { "mvclu", 0x8e, INSTR_RSY_RRRD }, |
1250 | { "tp", 0xc0, INSTR_RSL_R0RD }, | 1250 | { "tp", 0xc0, INSTR_RSL_R0RD }, |
1251 | { "", 0, INSTR_INVALID } | 1251 | { "", 0, INSTR_INVALID } |
1252 | }; | 1252 | }; |
1253 | 1253 | ||
1254 | static struct insn opcode_ec[] = { | 1254 | static struct insn opcode_ec[] = { |
1255 | #ifdef CONFIG_64BIT | 1255 | #ifdef CONFIG_64BIT |
1256 | { "brxhg", 0x44, INSTR_RIE_RRP }, | 1256 | { "brxhg", 0x44, INSTR_RIE_RRP }, |
1257 | { "brxlg", 0x45, INSTR_RIE_RRP }, | 1257 | { "brxlg", 0x45, INSTR_RIE_RRP }, |
1258 | { "crb", 0xf6, INSTR_RRS_RRRDU }, | 1258 | { "crb", 0xf6, INSTR_RRS_RRRDU }, |
1259 | { "cgrb", 0xe4, INSTR_RRS_RRRDU }, | 1259 | { "cgrb", 0xe4, INSTR_RRS_RRRDU }, |
1260 | { "crj", 0x76, INSTR_RIE_RRPU }, | 1260 | { "crj", 0x76, INSTR_RIE_RRPU }, |
1261 | { "cgrj", 0x64, INSTR_RIE_RRPU }, | 1261 | { "cgrj", 0x64, INSTR_RIE_RRPU }, |
1262 | { "cib", 0xfe, INSTR_RIS_RURDI }, | 1262 | { "cib", 0xfe, INSTR_RIS_RURDI }, |
1263 | { "cgib", 0xfc, INSTR_RIS_RURDI }, | 1263 | { "cgib", 0xfc, INSTR_RIS_RURDI }, |
1264 | { "cij", 0x7e, INSTR_RIE_RUPI }, | 1264 | { "cij", 0x7e, INSTR_RIE_RUPI }, |
1265 | { "cgij", 0x7c, INSTR_RIE_RUPI }, | 1265 | { "cgij", 0x7c, INSTR_RIE_RUPI }, |
1266 | { "cit", 0x72, INSTR_RIE_R0IU }, | 1266 | { "cit", 0x72, INSTR_RIE_R0IU }, |
1267 | { "cgit", 0x70, INSTR_RIE_R0IU }, | 1267 | { "cgit", 0x70, INSTR_RIE_R0IU }, |
1268 | { "clrb", 0xf7, INSTR_RRS_RRRDU }, | 1268 | { "clrb", 0xf7, INSTR_RRS_RRRDU }, |
1269 | { "clgrb", 0xe5, INSTR_RRS_RRRDU }, | 1269 | { "clgrb", 0xe5, INSTR_RRS_RRRDU }, |
1270 | { "clrj", 0x77, INSTR_RIE_RRPU }, | 1270 | { "clrj", 0x77, INSTR_RIE_RRPU }, |
1271 | { "clgrj", 0x65, INSTR_RIE_RRPU }, | 1271 | { "clgrj", 0x65, INSTR_RIE_RRPU }, |
1272 | { "clib", 0xff, INSTR_RIS_RURDU }, | 1272 | { "clib", 0xff, INSTR_RIS_RURDU }, |
1273 | { "clgib", 0xfd, INSTR_RIS_RURDU }, | 1273 | { "clgib", 0xfd, INSTR_RIS_RURDU }, |
1274 | { "clij", 0x7f, INSTR_RIE_RUPU }, | 1274 | { "clij", 0x7f, INSTR_RIE_RUPU }, |
1275 | { "clgij", 0x7d, INSTR_RIE_RUPU }, | 1275 | { "clgij", 0x7d, INSTR_RIE_RUPU }, |
1276 | { "clfit", 0x73, INSTR_RIE_R0UU }, | 1276 | { "clfit", 0x73, INSTR_RIE_R0UU }, |
1277 | { "clgit", 0x71, INSTR_RIE_R0UU }, | 1277 | { "clgit", 0x71, INSTR_RIE_R0UU }, |
1278 | { "rnsbg", 0x54, INSTR_RIE_RRUUU }, | 1278 | { "rnsbg", 0x54, INSTR_RIE_RRUUU }, |
1279 | { "rxsbg", 0x57, INSTR_RIE_RRUUU }, | 1279 | { "rxsbg", 0x57, INSTR_RIE_RRUUU }, |
1280 | { "rosbg", 0x56, INSTR_RIE_RRUUU }, | 1280 | { "rosbg", 0x56, INSTR_RIE_RRUUU }, |
1281 | { "risbg", 0x55, INSTR_RIE_RRUUU }, | 1281 | { "risbg", 0x55, INSTR_RIE_RRUUU }, |
1282 | { { 0, LONG_INSN_RISBLG }, 0x51, INSTR_RIE_RRUUU }, | 1282 | { { 0, LONG_INSN_RISBLG }, 0x51, INSTR_RIE_RRUUU }, |
1283 | { { 0, LONG_INSN_RISBHG }, 0x5D, INSTR_RIE_RRUUU }, | 1283 | { { 0, LONG_INSN_RISBHG }, 0x5D, INSTR_RIE_RRUUU }, |
1284 | { "ahik", 0xd8, INSTR_RIE_RRI0 }, | 1284 | { "ahik", 0xd8, INSTR_RIE_RRI0 }, |
1285 | { "aghik", 0xd9, INSTR_RIE_RRI0 }, | 1285 | { "aghik", 0xd9, INSTR_RIE_RRI0 }, |
1286 | { { 0, LONG_INSN_ALHSIK }, 0xda, INSTR_RIE_RRI0 }, | 1286 | { { 0, LONG_INSN_ALHSIK }, 0xda, INSTR_RIE_RRI0 }, |
1287 | { { 0, LONG_INSN_ALGHSIK }, 0xdb, INSTR_RIE_RRI0 }, | 1287 | { { 0, LONG_INSN_ALGHSIK }, 0xdb, INSTR_RIE_RRI0 }, |
1288 | #endif | 1288 | #endif |
1289 | { "", 0, INSTR_INVALID } | 1289 | { "", 0, INSTR_INVALID } |
1290 | }; | 1290 | }; |
1291 | 1291 | ||
1292 | static struct insn opcode_ed[] = { | 1292 | static struct insn opcode_ed[] = { |
1293 | #ifdef CONFIG_64BIT | 1293 | #ifdef CONFIG_64BIT |
1294 | { "mayl", 0x38, INSTR_RXF_FRRDF }, | 1294 | { "mayl", 0x38, INSTR_RXF_FRRDF }, |
1295 | { "myl", 0x39, INSTR_RXF_FRRDF }, | 1295 | { "myl", 0x39, INSTR_RXF_FRRDF }, |
1296 | { "may", 0x3a, INSTR_RXF_FRRDF }, | 1296 | { "may", 0x3a, INSTR_RXF_FRRDF }, |
1297 | { "my", 0x3b, INSTR_RXF_FRRDF }, | 1297 | { "my", 0x3b, INSTR_RXF_FRRDF }, |
1298 | { "mayh", 0x3c, INSTR_RXF_FRRDF }, | 1298 | { "mayh", 0x3c, INSTR_RXF_FRRDF }, |
1299 | { "myh", 0x3d, INSTR_RXF_FRRDF }, | 1299 | { "myh", 0x3d, INSTR_RXF_FRRDF }, |
1300 | { "ley", 0x64, INSTR_RXY_FRRD }, | 1300 | { "ley", 0x64, INSTR_RXY_FRRD }, |
1301 | { "ldy", 0x65, INSTR_RXY_FRRD }, | 1301 | { "ldy", 0x65, INSTR_RXY_FRRD }, |
1302 | { "stey", 0x66, INSTR_RXY_FRRD }, | 1302 | { "stey", 0x66, INSTR_RXY_FRRD }, |
1303 | { "stdy", 0x67, INSTR_RXY_FRRD }, | 1303 | { "stdy", 0x67, INSTR_RXY_FRRD }, |
1304 | { "sldt", 0x40, INSTR_RXF_FRRDF }, | 1304 | { "sldt", 0x40, INSTR_RXF_FRRDF }, |
1305 | { "slxt", 0x48, INSTR_RXF_FRRDF }, | 1305 | { "slxt", 0x48, INSTR_RXF_FRRDF }, |
1306 | { "srdt", 0x41, INSTR_RXF_FRRDF }, | 1306 | { "srdt", 0x41, INSTR_RXF_FRRDF }, |
1307 | { "srxt", 0x49, INSTR_RXF_FRRDF }, | 1307 | { "srxt", 0x49, INSTR_RXF_FRRDF }, |
1308 | { "tdcet", 0x50, INSTR_RXE_FRRD }, | 1308 | { "tdcet", 0x50, INSTR_RXE_FRRD }, |
1309 | { "tdcdt", 0x54, INSTR_RXE_FRRD }, | 1309 | { "tdcdt", 0x54, INSTR_RXE_FRRD }, |
1310 | { "tdcxt", 0x58, INSTR_RXE_FRRD }, | 1310 | { "tdcxt", 0x58, INSTR_RXE_FRRD }, |
1311 | { "tdget", 0x51, INSTR_RXE_FRRD }, | 1311 | { "tdget", 0x51, INSTR_RXE_FRRD }, |
1312 | { "tdgdt", 0x55, INSTR_RXE_FRRD }, | 1312 | { "tdgdt", 0x55, INSTR_RXE_FRRD }, |
1313 | { "tdgxt", 0x59, INSTR_RXE_FRRD }, | 1313 | { "tdgxt", 0x59, INSTR_RXE_FRRD }, |
1314 | #endif | 1314 | #endif |
1315 | { "ldeb", 0x04, INSTR_RXE_FRRD }, | 1315 | { "ldeb", 0x04, INSTR_RXE_FRRD }, |
1316 | { "lxdb", 0x05, INSTR_RXE_FRRD }, | 1316 | { "lxdb", 0x05, INSTR_RXE_FRRD }, |
1317 | { "lxeb", 0x06, INSTR_RXE_FRRD }, | 1317 | { "lxeb", 0x06, INSTR_RXE_FRRD }, |
1318 | { "mxdb", 0x07, INSTR_RXE_FRRD }, | 1318 | { "mxdb", 0x07, INSTR_RXE_FRRD }, |
1319 | { "keb", 0x08, INSTR_RXE_FRRD }, | 1319 | { "keb", 0x08, INSTR_RXE_FRRD }, |
1320 | { "ceb", 0x09, INSTR_RXE_FRRD }, | 1320 | { "ceb", 0x09, INSTR_RXE_FRRD }, |
1321 | { "aeb", 0x0a, INSTR_RXE_FRRD }, | 1321 | { "aeb", 0x0a, INSTR_RXE_FRRD }, |
1322 | { "seb", 0x0b, INSTR_RXE_FRRD }, | 1322 | { "seb", 0x0b, INSTR_RXE_FRRD }, |
1323 | { "mdeb", 0x0c, INSTR_RXE_FRRD }, | 1323 | { "mdeb", 0x0c, INSTR_RXE_FRRD }, |
1324 | { "deb", 0x0d, INSTR_RXE_FRRD }, | 1324 | { "deb", 0x0d, INSTR_RXE_FRRD }, |
1325 | { "maeb", 0x0e, INSTR_RXF_FRRDF }, | 1325 | { "maeb", 0x0e, INSTR_RXF_FRRDF }, |
1326 | { "mseb", 0x0f, INSTR_RXF_FRRDF }, | 1326 | { "mseb", 0x0f, INSTR_RXF_FRRDF }, |
1327 | { "tceb", 0x10, INSTR_RXE_FRRD }, | 1327 | { "tceb", 0x10, INSTR_RXE_FRRD }, |
1328 | { "tcdb", 0x11, INSTR_RXE_FRRD }, | 1328 | { "tcdb", 0x11, INSTR_RXE_FRRD }, |
1329 | { "tcxb", 0x12, INSTR_RXE_FRRD }, | 1329 | { "tcxb", 0x12, INSTR_RXE_FRRD }, |
1330 | { "sqeb", 0x14, INSTR_RXE_FRRD }, | 1330 | { "sqeb", 0x14, INSTR_RXE_FRRD }, |
1331 | { "sqdb", 0x15, INSTR_RXE_FRRD }, | 1331 | { "sqdb", 0x15, INSTR_RXE_FRRD }, |
1332 | { "meeb", 0x17, INSTR_RXE_FRRD }, | 1332 | { "meeb", 0x17, INSTR_RXE_FRRD }, |
1333 | { "kdb", 0x18, INSTR_RXE_FRRD }, | 1333 | { "kdb", 0x18, INSTR_RXE_FRRD }, |
1334 | { "cdb", 0x19, INSTR_RXE_FRRD }, | 1334 | { "cdb", 0x19, INSTR_RXE_FRRD }, |
1335 | { "adb", 0x1a, INSTR_RXE_FRRD }, | 1335 | { "adb", 0x1a, INSTR_RXE_FRRD }, |
1336 | { "sdb", 0x1b, INSTR_RXE_FRRD }, | 1336 | { "sdb", 0x1b, INSTR_RXE_FRRD }, |
1337 | { "mdb", 0x1c, INSTR_RXE_FRRD }, | 1337 | { "mdb", 0x1c, INSTR_RXE_FRRD }, |
1338 | { "ddb", 0x1d, INSTR_RXE_FRRD }, | 1338 | { "ddb", 0x1d, INSTR_RXE_FRRD }, |
1339 | { "madb", 0x1e, INSTR_RXF_FRRDF }, | 1339 | { "madb", 0x1e, INSTR_RXF_FRRDF }, |
1340 | { "msdb", 0x1f, INSTR_RXF_FRRDF }, | 1340 | { "msdb", 0x1f, INSTR_RXF_FRRDF }, |
1341 | { "lde", 0x24, INSTR_RXE_FRRD }, | 1341 | { "lde", 0x24, INSTR_RXE_FRRD }, |
1342 | { "lxd", 0x25, INSTR_RXE_FRRD }, | 1342 | { "lxd", 0x25, INSTR_RXE_FRRD }, |
1343 | { "lxe", 0x26, INSTR_RXE_FRRD }, | 1343 | { "lxe", 0x26, INSTR_RXE_FRRD }, |
1344 | { "mae", 0x2e, INSTR_RXF_FRRDF }, | 1344 | { "mae", 0x2e, INSTR_RXF_FRRDF }, |
1345 | { "mse", 0x2f, INSTR_RXF_FRRDF }, | 1345 | { "mse", 0x2f, INSTR_RXF_FRRDF }, |
1346 | { "sqe", 0x34, INSTR_RXE_FRRD }, | 1346 | { "sqe", 0x34, INSTR_RXE_FRRD }, |
1347 | { "sqd", 0x35, INSTR_RXE_FRRD }, | 1347 | { "sqd", 0x35, INSTR_RXE_FRRD }, |
1348 | { "mee", 0x37, INSTR_RXE_FRRD }, | 1348 | { "mee", 0x37, INSTR_RXE_FRRD }, |
1349 | { "mad", 0x3e, INSTR_RXF_FRRDF }, | 1349 | { "mad", 0x3e, INSTR_RXF_FRRDF }, |
1350 | { "msd", 0x3f, INSTR_RXF_FRRDF }, | 1350 | { "msd", 0x3f, INSTR_RXF_FRRDF }, |
1351 | { "", 0, INSTR_INVALID } | 1351 | { "", 0, INSTR_INVALID } |
1352 | }; | 1352 | }; |
1353 | 1353 | ||
1354 | /* Extracts an operand value from an instruction. */ | 1354 | /* Extracts an operand value from an instruction. */ |
1355 | static unsigned int extract_operand(unsigned char *code, | 1355 | static unsigned int extract_operand(unsigned char *code, |
1356 | const struct operand *operand) | 1356 | const struct operand *operand) |
1357 | { | 1357 | { |
1358 | unsigned int val; | 1358 | unsigned int val; |
1359 | int bits; | 1359 | int bits; |
1360 | 1360 | ||
1361 | /* Extract fragments of the operand byte for byte. */ | 1361 | /* Extract fragments of the operand byte for byte. */ |
1362 | code += operand->shift / 8; | 1362 | code += operand->shift / 8; |
1363 | bits = (operand->shift & 7) + operand->bits; | 1363 | bits = (operand->shift & 7) + operand->bits; |
1364 | val = 0; | 1364 | val = 0; |
1365 | do { | 1365 | do { |
1366 | val <<= 8; | 1366 | val <<= 8; |
1367 | val |= (unsigned int) *code++; | 1367 | val |= (unsigned int) *code++; |
1368 | bits -= 8; | 1368 | bits -= 8; |
1369 | } while (bits > 0); | 1369 | } while (bits > 0); |
1370 | val >>= -bits; | 1370 | val >>= -bits; |
1371 | val &= ((1U << (operand->bits - 1)) << 1) - 1; | 1371 | val &= ((1U << (operand->bits - 1)) << 1) - 1; |
1372 | 1372 | ||
1373 | /* Check for special long displacement case. */ | 1373 | /* Check for special long displacement case. */ |
1374 | if (operand->bits == 20 && operand->shift == 20) | 1374 | if (operand->bits == 20 && operand->shift == 20) |
1375 | val = (val & 0xff) << 12 | (val & 0xfff00) >> 8; | 1375 | val = (val & 0xff) << 12 | (val & 0xfff00) >> 8; |
1376 | 1376 | ||
1377 | /* Sign extend value if the operand is signed or pc relative. */ | 1377 | /* Sign extend value if the operand is signed or pc relative. */ |
1378 | if ((operand->flags & (OPERAND_SIGNED | OPERAND_PCREL)) && | 1378 | if ((operand->flags & (OPERAND_SIGNED | OPERAND_PCREL)) && |
1379 | (val & (1U << (operand->bits - 1)))) | 1379 | (val & (1U << (operand->bits - 1)))) |
1380 | val |= (-1U << (operand->bits - 1)) << 1; | 1380 | val |= (-1U << (operand->bits - 1)) << 1; |
1381 | 1381 | ||
1382 | /* Double value if the operand is pc relative. */ | 1382 | /* Double value if the operand is pc relative. */ |
1383 | if (operand->flags & OPERAND_PCREL) | 1383 | if (operand->flags & OPERAND_PCREL) |
1384 | val <<= 1; | 1384 | val <<= 1; |
1385 | 1385 | ||
1386 | /* Length x in an instructions has real length x + 1. */ | 1386 | /* Length x in an instructions has real length x + 1. */ |
1387 | if (operand->flags & OPERAND_LENGTH) | 1387 | if (operand->flags & OPERAND_LENGTH) |
1388 | val++; | 1388 | val++; |
1389 | return val; | 1389 | return val; |
1390 | } | 1390 | } |
1391 | 1391 | ||
1392 | static inline int insn_length(unsigned char code) | 1392 | static inline int insn_length(unsigned char code) |
1393 | { | 1393 | { |
1394 | return ((((int) code + 64) >> 7) + 1) << 1; | 1394 | return ((((int) code + 64) >> 7) + 1) << 1; |
1395 | } | 1395 | } |
1396 | 1396 | ||
1397 | static struct insn *find_insn(unsigned char *code) | 1397 | static struct insn *find_insn(unsigned char *code) |
1398 | { | 1398 | { |
1399 | unsigned char opfrag = code[1]; | 1399 | unsigned char opfrag = code[1]; |
1400 | unsigned char opmask; | 1400 | unsigned char opmask; |
1401 | struct insn *table; | 1401 | struct insn *table; |
1402 | 1402 | ||
1403 | switch (code[0]) { | 1403 | switch (code[0]) { |
1404 | case 0x01: | 1404 | case 0x01: |
1405 | table = opcode_01; | 1405 | table = opcode_01; |
1406 | break; | 1406 | break; |
1407 | case 0xa5: | 1407 | case 0xa5: |
1408 | table = opcode_a5; | 1408 | table = opcode_a5; |
1409 | break; | 1409 | break; |
1410 | case 0xa7: | 1410 | case 0xa7: |
1411 | table = opcode_a7; | 1411 | table = opcode_a7; |
1412 | break; | 1412 | break; |
1413 | case 0xb2: | 1413 | case 0xb2: |
1414 | table = opcode_b2; | 1414 | table = opcode_b2; |
1415 | break; | 1415 | break; |
1416 | case 0xb3: | 1416 | case 0xb3: |
1417 | table = opcode_b3; | 1417 | table = opcode_b3; |
1418 | break; | 1418 | break; |
1419 | case 0xb9: | 1419 | case 0xb9: |
1420 | table = opcode_b9; | 1420 | table = opcode_b9; |
1421 | break; | 1421 | break; |
1422 | case 0xc0: | 1422 | case 0xc0: |
1423 | table = opcode_c0; | 1423 | table = opcode_c0; |
1424 | break; | 1424 | break; |
1425 | case 0xc2: | 1425 | case 0xc2: |
1426 | table = opcode_c2; | 1426 | table = opcode_c2; |
1427 | break; | 1427 | break; |
1428 | case 0xc4: | 1428 | case 0xc4: |
1429 | table = opcode_c4; | 1429 | table = opcode_c4; |
1430 | break; | 1430 | break; |
1431 | case 0xc6: | 1431 | case 0xc6: |
1432 | table = opcode_c6; | 1432 | table = opcode_c6; |
1433 | break; | 1433 | break; |
1434 | case 0xc8: | 1434 | case 0xc8: |
1435 | table = opcode_c8; | 1435 | table = opcode_c8; |
1436 | break; | 1436 | break; |
1437 | case 0xcc: | 1437 | case 0xcc: |
1438 | table = opcode_cc; | 1438 | table = opcode_cc; |
1439 | break; | 1439 | break; |
1440 | case 0xe3: | 1440 | case 0xe3: |
1441 | table = opcode_e3; | 1441 | table = opcode_e3; |
1442 | opfrag = code[5]; | 1442 | opfrag = code[5]; |
1443 | break; | 1443 | break; |
1444 | case 0xe5: | 1444 | case 0xe5: |
1445 | table = opcode_e5; | 1445 | table = opcode_e5; |
1446 | break; | 1446 | break; |
1447 | case 0xeb: | 1447 | case 0xeb: |
1448 | table = opcode_eb; | 1448 | table = opcode_eb; |
1449 | opfrag = code[5]; | 1449 | opfrag = code[5]; |
1450 | break; | 1450 | break; |
1451 | case 0xec: | 1451 | case 0xec: |
1452 | table = opcode_ec; | 1452 | table = opcode_ec; |
1453 | opfrag = code[5]; | 1453 | opfrag = code[5]; |
1454 | break; | 1454 | break; |
1455 | case 0xed: | 1455 | case 0xed: |
1456 | table = opcode_ed; | 1456 | table = opcode_ed; |
1457 | opfrag = code[5]; | 1457 | opfrag = code[5]; |
1458 | break; | 1458 | break; |
1459 | default: | 1459 | default: |
1460 | table = opcode; | 1460 | table = opcode; |
1461 | opfrag = code[0]; | 1461 | opfrag = code[0]; |
1462 | break; | 1462 | break; |
1463 | } | 1463 | } |
1464 | while (table->format != INSTR_INVALID) { | 1464 | while (table->format != INSTR_INVALID) { |
1465 | opmask = formats[table->format][0]; | 1465 | opmask = formats[table->format][0]; |
1466 | if (table->opfrag == (opfrag & opmask)) | 1466 | if (table->opfrag == (opfrag & opmask)) |
1467 | return table; | 1467 | return table; |
1468 | table++; | 1468 | table++; |
1469 | } | 1469 | } |
1470 | return NULL; | 1470 | return NULL; |
1471 | } | 1471 | } |
1472 | 1472 | ||
1473 | static int print_insn(char *buffer, unsigned char *code, unsigned long addr) | 1473 | static int print_insn(char *buffer, unsigned char *code, unsigned long addr) |
1474 | { | 1474 | { |
1475 | struct insn *insn; | 1475 | struct insn *insn; |
1476 | const unsigned char *ops; | 1476 | const unsigned char *ops; |
1477 | const struct operand *operand; | 1477 | const struct operand *operand; |
1478 | unsigned int value; | 1478 | unsigned int value; |
1479 | char separator; | 1479 | char separator; |
1480 | char *ptr; | 1480 | char *ptr; |
1481 | int i; | 1481 | int i; |
1482 | 1482 | ||
1483 | ptr = buffer; | 1483 | ptr = buffer; |
1484 | insn = find_insn(code); | 1484 | insn = find_insn(code); |
1485 | if (insn) { | 1485 | if (insn) { |
1486 | if (insn->name[0] == '\0') | 1486 | if (insn->name[0] == '\0') |
1487 | ptr += sprintf(ptr, "%s\t", | 1487 | ptr += sprintf(ptr, "%s\t", |
1488 | long_insn_name[(int) insn->name[1]]); | 1488 | long_insn_name[(int) insn->name[1]]); |
1489 | else | 1489 | else |
1490 | ptr += sprintf(ptr, "%.5s\t", insn->name); | 1490 | ptr += sprintf(ptr, "%.5s\t", insn->name); |
1491 | /* Extract the operands. */ | 1491 | /* Extract the operands. */ |
1492 | separator = 0; | 1492 | separator = 0; |
1493 | for (ops = formats[insn->format] + 1, i = 0; | 1493 | for (ops = formats[insn->format] + 1, i = 0; |
1494 | *ops != 0 && i < 6; ops++, i++) { | 1494 | *ops != 0 && i < 6; ops++, i++) { |
1495 | operand = operands + *ops; | 1495 | operand = operands + *ops; |
1496 | value = extract_operand(code, operand); | 1496 | value = extract_operand(code, operand); |
1497 | if ((operand->flags & OPERAND_INDEX) && value == 0) | 1497 | if ((operand->flags & OPERAND_INDEX) && value == 0) |
1498 | continue; | 1498 | continue; |
1499 | if ((operand->flags & OPERAND_BASE) && | 1499 | if ((operand->flags & OPERAND_BASE) && |
1500 | value == 0 && separator == '(') { | 1500 | value == 0 && separator == '(') { |
1501 | separator = ','; | 1501 | separator = ','; |
1502 | continue; | 1502 | continue; |
1503 | } | 1503 | } |
1504 | if (separator) | 1504 | if (separator) |
1505 | ptr += sprintf(ptr, "%c", separator); | 1505 | ptr += sprintf(ptr, "%c", separator); |
1506 | if (operand->flags & OPERAND_GPR) | 1506 | if (operand->flags & OPERAND_GPR) |
1507 | ptr += sprintf(ptr, "%%r%i", value); | 1507 | ptr += sprintf(ptr, "%%r%i", value); |
1508 | else if (operand->flags & OPERAND_FPR) | 1508 | else if (operand->flags & OPERAND_FPR) |
1509 | ptr += sprintf(ptr, "%%f%i", value); | 1509 | ptr += sprintf(ptr, "%%f%i", value); |
1510 | else if (operand->flags & OPERAND_AR) | 1510 | else if (operand->flags & OPERAND_AR) |
1511 | ptr += sprintf(ptr, "%%a%i", value); | 1511 | ptr += sprintf(ptr, "%%a%i", value); |
1512 | else if (operand->flags & OPERAND_CR) | 1512 | else if (operand->flags & OPERAND_CR) |
1513 | ptr += sprintf(ptr, "%%c%i", value); | 1513 | ptr += sprintf(ptr, "%%c%i", value); |
1514 | else if (operand->flags & OPERAND_PCREL) | 1514 | else if (operand->flags & OPERAND_PCREL) |
1515 | ptr += sprintf(ptr, "%lx", (signed int) value | 1515 | ptr += sprintf(ptr, "%lx", (signed int) value |
1516 | + addr); | 1516 | + addr); |
1517 | else if (operand->flags & OPERAND_SIGNED) | 1517 | else if (operand->flags & OPERAND_SIGNED) |
1518 | ptr += sprintf(ptr, "%i", value); | 1518 | ptr += sprintf(ptr, "%i", value); |
1519 | else | 1519 | else |
1520 | ptr += sprintf(ptr, "%u", value); | 1520 | ptr += sprintf(ptr, "%u", value); |
1521 | if (operand->flags & OPERAND_DISP) | 1521 | if (operand->flags & OPERAND_DISP) |
1522 | separator = '('; | 1522 | separator = '('; |
1523 | else if (operand->flags & OPERAND_BASE) { | 1523 | else if (operand->flags & OPERAND_BASE) { |
1524 | ptr += sprintf(ptr, ")"); | 1524 | ptr += sprintf(ptr, ")"); |
1525 | separator = ','; | 1525 | separator = ','; |
1526 | } else | 1526 | } else |
1527 | separator = ','; | 1527 | separator = ','; |
1528 | } | 1528 | } |
1529 | } else | 1529 | } else |
1530 | ptr += sprintf(ptr, "unknown"); | 1530 | ptr += sprintf(ptr, "unknown"); |
1531 | return (int) (ptr - buffer); | 1531 | return (int) (ptr - buffer); |
1532 | } | 1532 | } |
1533 | 1533 | ||
1534 | void show_code(struct pt_regs *regs) | 1534 | void show_code(struct pt_regs *regs) |
1535 | { | 1535 | { |
1536 | char *mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl"; | 1536 | char *mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl"; |
1537 | unsigned char code[64]; | 1537 | unsigned char code[64]; |
1538 | char buffer[64], *ptr; | 1538 | char buffer[64], *ptr; |
1539 | mm_segment_t old_fs; | 1539 | mm_segment_t old_fs; |
1540 | unsigned long addr; | 1540 | unsigned long addr; |
1541 | int start, end, opsize, hops, i; | 1541 | int start, end, opsize, hops, i; |
1542 | 1542 | ||
1543 | /* Get a snapshot of the 64 bytes surrounding the fault address. */ | 1543 | /* Get a snapshot of the 64 bytes surrounding the fault address. */ |
1544 | old_fs = get_fs(); | 1544 | old_fs = get_fs(); |
1545 | set_fs((regs->psw.mask & PSW_MASK_PSTATE) ? USER_DS : KERNEL_DS); | 1545 | set_fs((regs->psw.mask & PSW_MASK_PSTATE) ? USER_DS : KERNEL_DS); |
1546 | for (start = 32; start && regs->psw.addr >= 34 - start; start -= 2) { | 1546 | for (start = 32; start && regs->psw.addr >= 34 - start; start -= 2) { |
1547 | addr = regs->psw.addr - 34 + start; | 1547 | addr = regs->psw.addr - 34 + start; |
1548 | if (__copy_from_user(code + start - 2, | 1548 | if (__copy_from_user(code + start - 2, |
1549 | (char __user *) addr, 2)) | 1549 | (char __user *) addr, 2)) |
1550 | break; | 1550 | break; |
1551 | } | 1551 | } |
1552 | for (end = 32; end < 64; end += 2) { | 1552 | for (end = 32; end < 64; end += 2) { |
1553 | addr = regs->psw.addr + end - 32; | 1553 | addr = regs->psw.addr + end - 32; |
1554 | if (__copy_from_user(code + end, | 1554 | if (__copy_from_user(code + end, |
1555 | (char __user *) addr, 2)) | 1555 | (char __user *) addr, 2)) |
1556 | break; | 1556 | break; |
1557 | } | 1557 | } |
1558 | set_fs(old_fs); | 1558 | set_fs(old_fs); |
1559 | /* Code snapshot useable ? */ | 1559 | /* Code snapshot useable ? */ |
1560 | if ((regs->psw.addr & 1) || start >= end) { | 1560 | if ((regs->psw.addr & 1) || start >= end) { |
1561 | printk("%s Code: Bad PSW.\n", mode); | 1561 | printk("%s Code: Bad PSW.\n", mode); |
1562 | return; | 1562 | return; |
1563 | } | 1563 | } |
1564 | /* Find a starting point for the disassembly. */ | 1564 | /* Find a starting point for the disassembly. */ |
1565 | while (start < 32) { | 1565 | while (start < 32) { |
1566 | for (i = 0, hops = 0; start + i < 32 && hops < 3; hops++) { | 1566 | for (i = 0, hops = 0; start + i < 32 && hops < 3; hops++) { |
1567 | if (!find_insn(code + start + i)) | 1567 | if (!find_insn(code + start + i)) |
1568 | break; | 1568 | break; |
1569 | i += insn_length(code[start + i]); | 1569 | i += insn_length(code[start + i]); |
1570 | } | 1570 | } |
1571 | if (start + i == 32) | 1571 | if (start + i == 32) |
1572 | /* Looks good, sequence ends at PSW. */ | 1572 | /* Looks good, sequence ends at PSW. */ |
1573 | break; | 1573 | break; |
1574 | start += 2; | 1574 | start += 2; |
1575 | } | 1575 | } |
1576 | /* Decode the instructions. */ | 1576 | /* Decode the instructions. */ |
1577 | ptr = buffer; | 1577 | ptr = buffer; |
1578 | ptr += sprintf(ptr, "%s Code:", mode); | 1578 | ptr += sprintf(ptr, "%s Code:", mode); |
1579 | hops = 0; | 1579 | hops = 0; |
1580 | while (start < end && hops < 8) { | 1580 | while (start < end && hops < 8) { |
1581 | *ptr++ = (start == 32) ? '>' : ' '; | 1581 | *ptr++ = (start == 32) ? '>' : ' '; |
1582 | addr = regs->psw.addr + start - 32; | 1582 | addr = regs->psw.addr + start - 32; |
1583 | ptr += sprintf(ptr, ONELONG, addr); | 1583 | ptr += sprintf(ptr, ONELONG, addr); |
1584 | opsize = insn_length(code[start]); | 1584 | opsize = insn_length(code[start]); |
1585 | if (start + opsize >= end) | 1585 | if (start + opsize >= end) |
1586 | break; | 1586 | break; |
1587 | for (i = 0; i < opsize; i++) | 1587 | for (i = 0; i < opsize; i++) |
1588 | ptr += sprintf(ptr, "%02x", code[start + i]); | 1588 | ptr += sprintf(ptr, "%02x", code[start + i]); |
1589 | *ptr++ = '\t'; | 1589 | *ptr++ = '\t'; |
1590 | if (i < 6) | 1590 | if (i < 6) |
1591 | *ptr++ = '\t'; | 1591 | *ptr++ = '\t'; |
1592 | ptr += print_insn(ptr, code + start, addr); | 1592 | ptr += print_insn(ptr, code + start, addr); |
1593 | start += opsize; | 1593 | start += opsize; |
1594 | printk(buffer); | 1594 | printk(buffer); |
1595 | ptr = buffer; | 1595 | ptr = buffer; |
1596 | ptr += sprintf(ptr, "\n "); | 1596 | ptr += sprintf(ptr, "\n "); |
1597 | hops++; | 1597 | hops++; |
1598 | } | 1598 | } |
1599 | printk("\n"); | 1599 | printk("\n"); |
1600 | } | 1600 | } |
arch/s390/kernel/irq.c
1 | /* | 1 | /* |
2 | * Copyright IBM Corp. 2004,2010 | 2 | * Copyright IBM Corp. 2004,2011 |
3 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), | 3 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>, |
4 | * Thomas Spatzier (tspat@de.ibm.com) | 4 | * Holger Smolinski <Holger.Smolinski@de.ibm.com>, |
5 | * Thomas Spatzier <tspat@de.ibm.com>, | ||
5 | * | 6 | * |
6 | * This file contains interrupt related functions. | 7 | * This file contains interrupt related functions. |
7 | */ | 8 | */ |
8 | 9 | ||
9 | #include <linux/module.h> | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/kernel_stat.h> | 10 | #include <linux/kernel_stat.h> |
12 | #include <linux/interrupt.h> | 11 | #include <linux/interrupt.h> |
13 | #include <linux/seq_file.h> | 12 | #include <linux/seq_file.h> |
14 | #include <linux/cpu.h> | ||
15 | #include <linux/proc_fs.h> | 13 | #include <linux/proc_fs.h> |
16 | #include <linux/profile.h> | 14 | #include <linux/profile.h> |
15 | #include <linux/module.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/ftrace.h> | ||
18 | #include <linux/errno.h> | ||
19 | #include <linux/slab.h> | ||
20 | #include <linux/cpu.h> | ||
21 | #include <asm/irq_regs.h> | ||
22 | #include <asm/cputime.h> | ||
23 | #include <asm/lowcore.h> | ||
24 | #include <asm/irq.h> | ||
25 | #include "entry.h" | ||
17 | 26 | ||
18 | struct irq_class { | 27 | struct irq_class { |
19 | char *name; | 28 | char *name; |
20 | char *desc; | 29 | char *desc; |
21 | }; | 30 | }; |
22 | 31 | ||
23 | static const struct irq_class intrclass_names[] = { | 32 | static const struct irq_class intrclass_names[] = { |
24 | {.name = "EXT" }, | 33 | {.name = "EXT" }, |
25 | {.name = "I/O" }, | 34 | {.name = "I/O" }, |
26 | {.name = "CLK", .desc = "[EXT] Clock Comparator" }, | 35 | {.name = "CLK", .desc = "[EXT] Clock Comparator" }, |
27 | {.name = "IPI", .desc = "[EXT] Signal Processor" }, | 36 | {.name = "IPI", .desc = "[EXT] Signal Processor" }, |
28 | {.name = "TMR", .desc = "[EXT] CPU Timer" }, | 37 | {.name = "TMR", .desc = "[EXT] CPU Timer" }, |
29 | {.name = "TAL", .desc = "[EXT] Timing Alert" }, | 38 | {.name = "TAL", .desc = "[EXT] Timing Alert" }, |
30 | {.name = "PFL", .desc = "[EXT] Pseudo Page Fault" }, | 39 | {.name = "PFL", .desc = "[EXT] Pseudo Page Fault" }, |
31 | {.name = "DSD", .desc = "[EXT] DASD Diag" }, | 40 | {.name = "DSD", .desc = "[EXT] DASD Diag" }, |
32 | {.name = "VRT", .desc = "[EXT] Virtio" }, | 41 | {.name = "VRT", .desc = "[EXT] Virtio" }, |
33 | {.name = "SCP", .desc = "[EXT] Service Call" }, | 42 | {.name = "SCP", .desc = "[EXT] Service Call" }, |
34 | {.name = "IUC", .desc = "[EXT] IUCV" }, | 43 | {.name = "IUC", .desc = "[EXT] IUCV" }, |
35 | {.name = "CPM", .desc = "[EXT] CPU Measurement" }, | 44 | {.name = "CPM", .desc = "[EXT] CPU Measurement" }, |
36 | {.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt" }, | 45 | {.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt" }, |
37 | {.name = "QDI", .desc = "[I/O] QDIO Interrupt" }, | 46 | {.name = "QDI", .desc = "[I/O] QDIO Interrupt" }, |
38 | {.name = "DAS", .desc = "[I/O] DASD" }, | 47 | {.name = "DAS", .desc = "[I/O] DASD" }, |
39 | {.name = "C15", .desc = "[I/O] 3215" }, | 48 | {.name = "C15", .desc = "[I/O] 3215" }, |
40 | {.name = "C70", .desc = "[I/O] 3270" }, | 49 | {.name = "C70", .desc = "[I/O] 3270" }, |
41 | {.name = "TAP", .desc = "[I/O] Tape" }, | 50 | {.name = "TAP", .desc = "[I/O] Tape" }, |
42 | {.name = "VMR", .desc = "[I/O] Unit Record Devices" }, | 51 | {.name = "VMR", .desc = "[I/O] Unit Record Devices" }, |
43 | {.name = "LCS", .desc = "[I/O] LCS" }, | 52 | {.name = "LCS", .desc = "[I/O] LCS" }, |
44 | {.name = "CLW", .desc = "[I/O] CLAW" }, | 53 | {.name = "CLW", .desc = "[I/O] CLAW" }, |
45 | {.name = "CTC", .desc = "[I/O] CTC" }, | 54 | {.name = "CTC", .desc = "[I/O] CTC" }, |
46 | {.name = "APB", .desc = "[I/O] AP Bus" }, | 55 | {.name = "APB", .desc = "[I/O] AP Bus" }, |
47 | {.name = "NMI", .desc = "[NMI] Machine Check" }, | 56 | {.name = "NMI", .desc = "[NMI] Machine Check" }, |
48 | }; | 57 | }; |
49 | 58 | ||
50 | /* | 59 | /* |
51 | * show_interrupts is needed by /proc/interrupts. | 60 | * show_interrupts is needed by /proc/interrupts. |
52 | */ | 61 | */ |
53 | int show_interrupts(struct seq_file *p, void *v) | 62 | int show_interrupts(struct seq_file *p, void *v) |
54 | { | 63 | { |
55 | int i = *(loff_t *) v, j; | 64 | int i = *(loff_t *) v, j; |
56 | 65 | ||
57 | get_online_cpus(); | 66 | get_online_cpus(); |
58 | if (i == 0) { | 67 | if (i == 0) { |
59 | seq_puts(p, " "); | 68 | seq_puts(p, " "); |
60 | for_each_online_cpu(j) | 69 | for_each_online_cpu(j) |
61 | seq_printf(p, "CPU%d ",j); | 70 | seq_printf(p, "CPU%d ",j); |
62 | seq_putc(p, '\n'); | 71 | seq_putc(p, '\n'); |
63 | } | 72 | } |
64 | 73 | ||
65 | if (i < NR_IRQS) { | 74 | if (i < NR_IRQS) { |
66 | seq_printf(p, "%s: ", intrclass_names[i].name); | 75 | seq_printf(p, "%s: ", intrclass_names[i].name); |
67 | #ifndef CONFIG_SMP | 76 | #ifndef CONFIG_SMP |
68 | seq_printf(p, "%10u ", kstat_irqs(i)); | 77 | seq_printf(p, "%10u ", kstat_irqs(i)); |
69 | #else | 78 | #else |
70 | for_each_online_cpu(j) | 79 | for_each_online_cpu(j) |
71 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); | 80 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); |
72 | #endif | 81 | #endif |
73 | if (intrclass_names[i].desc) | 82 | if (intrclass_names[i].desc) |
74 | seq_printf(p, " %s", intrclass_names[i].desc); | 83 | seq_printf(p, " %s", intrclass_names[i].desc); |
75 | seq_putc(p, '\n'); | 84 | seq_putc(p, '\n'); |
76 | } | 85 | } |
77 | put_online_cpus(); | 86 | put_online_cpus(); |
78 | return 0; | 87 | return 0; |
79 | } | 88 | } |
80 | 89 | ||
81 | /* | 90 | /* |
82 | * For compatibilty only. S/390 specific setup of interrupts et al. is done | 91 | * For compatibilty only. S/390 specific setup of interrupts et al. is done |
83 | * much later in init_channel_subsystem(). | 92 | * much later in init_channel_subsystem(). |
84 | */ | 93 | */ |
85 | void __init | 94 | void __init init_IRQ(void) |
86 | init_IRQ(void) | ||
87 | { | 95 | { |
88 | /* nothing... */ | 96 | /* nothing... */ |
89 | } | 97 | } |
90 | 98 | ||
91 | /* | 99 | /* |
92 | * Switch to the asynchronous interrupt stack for softirq execution. | 100 | * Switch to the asynchronous interrupt stack for softirq execution. |
93 | */ | 101 | */ |
94 | asmlinkage void do_softirq(void) | 102 | asmlinkage void do_softirq(void) |
95 | { | 103 | { |
96 | unsigned long flags, old, new; | 104 | unsigned long flags, old, new; |
97 | 105 | ||
98 | if (in_interrupt()) | 106 | if (in_interrupt()) |
99 | return; | 107 | return; |
100 | 108 | ||
101 | local_irq_save(flags); | 109 | local_irq_save(flags); |
102 | 110 | ||
103 | if (local_softirq_pending()) { | 111 | if (local_softirq_pending()) { |
104 | /* Get current stack pointer. */ | 112 | /* Get current stack pointer. */ |
105 | asm volatile("la %0,0(15)" : "=a" (old)); | 113 | asm volatile("la %0,0(15)" : "=a" (old)); |
106 | /* Check against async. stack address range. */ | 114 | /* Check against async. stack address range. */ |
107 | new = S390_lowcore.async_stack; | 115 | new = S390_lowcore.async_stack; |
108 | if (((new - old) >> (PAGE_SHIFT + THREAD_ORDER)) != 0) { | 116 | if (((new - old) >> (PAGE_SHIFT + THREAD_ORDER)) != 0) { |
109 | /* Need to switch to the async. stack. */ | 117 | /* Need to switch to the async. stack. */ |
110 | new -= STACK_FRAME_OVERHEAD; | 118 | new -= STACK_FRAME_OVERHEAD; |
111 | ((struct stack_frame *) new)->back_chain = old; | 119 | ((struct stack_frame *) new)->back_chain = old; |
112 | 120 | ||
113 | asm volatile(" la 15,0(%0)\n" | 121 | asm volatile(" la 15,0(%0)\n" |
114 | " basr 14,%2\n" | 122 | " basr 14,%2\n" |
115 | " la 15,0(%1)\n" | 123 | " la 15,0(%1)\n" |
116 | : : "a" (new), "a" (old), | 124 | : : "a" (new), "a" (old), |
117 | "a" (__do_softirq) | 125 | "a" (__do_softirq) |
118 | : "0", "1", "2", "3", "4", "5", "14", | 126 | : "0", "1", "2", "3", "4", "5", "14", |
119 | "cc", "memory" ); | 127 | "cc", "memory" ); |
120 | } else | 128 | } else |
121 | /* We are already on the async stack. */ | 129 | /* We are already on the async stack. */ |
122 | __do_softirq(); | 130 | __do_softirq(); |
123 | } | 131 | } |
124 | 132 | ||
125 | local_irq_restore(flags); | 133 | local_irq_restore(flags); |
126 | } | 134 | } |
127 | 135 | ||
128 | #ifdef CONFIG_PROC_FS | 136 | #ifdef CONFIG_PROC_FS |
129 | void init_irq_proc(void) | 137 | void init_irq_proc(void) |
130 | { | 138 | { |
131 | struct proc_dir_entry *root_irq_dir; | 139 | struct proc_dir_entry *root_irq_dir; |
132 | 140 | ||
133 | root_irq_dir = proc_mkdir("irq", NULL); | 141 | root_irq_dir = proc_mkdir("irq", NULL); |
134 | create_prof_cpu_mask(root_irq_dir); | 142 | create_prof_cpu_mask(root_irq_dir); |
135 | } | 143 | } |
136 | #endif | 144 | #endif |
145 | |||
146 | /* | ||
147 | * ext_int_hash[index] is the start of the list for all external interrupts | ||
148 | * that hash to this index. With the current set of external interrupts | ||
149 | * (0x1202 external call, 0x1004 cpu timer, 0x2401 hwc console, 0x4000 | ||
150 | * iucv and 0x2603 pfault) this is always the first element. | ||
151 | */ | ||
152 | |||
153 | struct ext_int_info { | ||
154 | struct ext_int_info *next; | ||
155 | ext_int_handler_t handler; | ||
156 | u16 code; | ||
157 | }; | ||
158 | |||
159 | static struct ext_int_info *ext_int_hash[256]; | ||
160 | |||
161 | static inline int ext_hash(u16 code) | ||
162 | { | ||
163 | return (code + (code >> 9)) & 0xff; | ||
164 | } | ||
165 | |||
166 | int register_external_interrupt(u16 code, ext_int_handler_t handler) | ||
167 | { | ||
168 | struct ext_int_info *p; | ||
169 | int index; | ||
170 | |||
171 | p = kmalloc(sizeof(*p), GFP_ATOMIC); | ||
172 | if (!p) | ||
173 | return -ENOMEM; | ||
174 | p->code = code; | ||
175 | p->handler = handler; | ||
176 | index = ext_hash(code); | ||
177 | p->next = ext_int_hash[index]; | ||
178 | ext_int_hash[index] = p; | ||
179 | return 0; | ||
180 | } | ||
181 | EXPORT_SYMBOL(register_external_interrupt); | ||
182 | |||
183 | int unregister_external_interrupt(u16 code, ext_int_handler_t handler) | ||
184 | { | ||
185 | struct ext_int_info *p, *q; | ||
186 | int index; | ||
187 | |||
188 | index = ext_hash(code); | ||
189 | q = NULL; | ||
190 | p = ext_int_hash[index]; | ||
191 | while (p) { | ||
192 | if (p->code == code && p->handler == handler) | ||
193 | break; | ||
194 | q = p; | ||
195 | p = p->next; | ||
196 | } | ||
197 | if (!p) | ||
198 | return -ENOENT; | ||
199 | if (q) | ||
200 | q->next = p->next; | ||
201 | else | ||
202 | ext_int_hash[index] = p->next; | ||
203 | kfree(p); | ||
204 | return 0; | ||
205 | } | ||
206 | EXPORT_SYMBOL(unregister_external_interrupt); | ||
207 | |||
208 | void __irq_entry do_extint(struct pt_regs *regs, unsigned int ext_int_code, | ||
209 | unsigned int param32, unsigned long param64) | ||
210 | { | ||
211 | struct pt_regs *old_regs; | ||
212 | unsigned short code; | ||
213 | struct ext_int_info *p; | ||
214 | int index; | ||
215 | |||
216 | code = (unsigned short) ext_int_code; | ||
217 | old_regs = set_irq_regs(regs); | ||
218 | s390_idle_check(regs, S390_lowcore.int_clock, | ||
219 | S390_lowcore.async_enter_timer); | ||
220 | irq_enter(); | ||
221 | if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) | ||
222 | /* Serve timer interrupts first. */ | ||
223 | clock_comparator_work(); | ||
224 | kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++; | ||
225 | if (code != 0x1004) | ||
226 | __get_cpu_var(s390_idle).nohz_delay = 1; | ||
227 | index = ext_hash(code); | ||
228 | for (p = ext_int_hash[index]; p; p = p->next) { | ||
229 | if (likely(p->code == code)) | ||
230 | p->handler(ext_int_code, param32, param64); | ||
231 | } | ||
232 | irq_exit(); | ||
233 | set_irq_regs(old_regs); | ||
234 | } | ||
235 | |||
236 | static DEFINE_SPINLOCK(sc_irq_lock); | ||
237 | static int sc_irq_refcount; | ||
238 | |||
239 | void service_subclass_irq_register(void) | ||
240 | { | ||
241 | spin_lock(&sc_irq_lock); | ||
242 | if (!sc_irq_refcount) | ||
243 | ctl_set_bit(0, 9); | ||
244 | sc_irq_refcount++; | ||
245 | spin_unlock(&sc_irq_lock); | ||
246 | } | ||
247 | EXPORT_SYMBOL(service_subclass_irq_register); | ||
248 | |||
249 | void service_subclass_irq_unregister(void) | ||
250 | { | ||
251 | spin_lock(&sc_irq_lock); | ||
252 | sc_irq_refcount--; | ||
253 | if (!sc_irq_refcount) | ||
254 | ctl_clear_bit(0, 9); |
arch/s390/kernel/s390_ext.c
1 | /* | File was deleted | |
2 | * Copyright IBM Corp. 1999,2010 | ||
3 | * Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>, | ||
4 | * Martin Schwidefsky <schwidefsky@de.ibm.com>, | ||
5 | */ | ||
6 | |||
7 | #include <linux/kernel_stat.h> | ||
8 | #include <linux/interrupt.h> | ||
9 | #include <linux/module.h> | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/ftrace.h> | ||
12 | #include <linux/errno.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <asm/s390_ext.h> | ||
15 | #include <asm/irq_regs.h> | ||
16 | #include <asm/cputime.h> | ||
17 | #include <asm/lowcore.h> | ||
18 | #include <asm/irq.h> | ||
19 | #include "entry.h" | ||
20 | |||
21 | struct ext_int_info { | ||
22 | struct ext_int_info *next; | ||
23 | ext_int_handler_t handler; | ||
24 | __u16 code; | ||
25 | }; | ||
26 | |||
27 | /* | ||
28 | * ext_int_hash[index] is the start of the list for all external interrupts | ||
29 | * that hash to this index. With the current set of external interrupts | ||
30 | * (0x1202 external call, 0x1004 cpu timer, 0x2401 hwc console, 0x4000 | ||
31 | * iucv and 0x2603 pfault) this is always the first element. | ||
32 | */ | ||
33 | static struct ext_int_info *ext_int_hash[256]; | ||
34 | |||
35 | static inline int ext_hash(__u16 code) | ||
36 | { | ||
37 | return (code + (code >> 9)) & 0xff; | ||
38 | } | ||
39 | |||
40 | int register_external_interrupt(__u16 code, ext_int_handler_t handler) | ||
41 | { | ||
42 | struct ext_int_info *p; | ||
43 | int index; | ||
44 | |||
45 | p = kmalloc(sizeof(*p), GFP_ATOMIC); | ||
46 | if (!p) | ||
47 | return -ENOMEM; | ||
48 | p->code = code; | ||
49 | p->handler = handler; | ||
50 | index = ext_hash(code); | ||
51 | p->next = ext_int_hash[index]; | ||
52 | ext_int_hash[index] = p; | ||
53 | return 0; | ||
54 | } | ||
55 | EXPORT_SYMBOL(register_external_interrupt); | ||
56 | |||
57 | int unregister_external_interrupt(__u16 code, ext_int_handler_t handler) | ||
58 | { | ||
59 | struct ext_int_info *p, *q; | ||
60 | int index; | ||
61 | |||
62 | index = ext_hash(code); | ||
63 | q = NULL; | ||
64 | p = ext_int_hash[index]; | ||
65 | while (p) { | ||
66 | if (p->code == code && p->handler == handler) | ||
67 | break; | ||
68 | q = p; | ||
69 | p = p->next; | ||
70 | } | ||
71 | if (!p) | ||
72 | return -ENOENT; | ||
73 | if (q) | ||
74 | q->next = p->next; | ||
75 | else | ||
76 | ext_int_hash[index] = p->next; | ||
77 | kfree(p); | ||
78 | return 0; | ||
79 | } | ||
80 | EXPORT_SYMBOL(unregister_external_interrupt); | ||
81 | |||
82 | void __irq_entry do_extint(struct pt_regs *regs, unsigned int ext_int_code, | ||
83 | unsigned int param32, unsigned long param64) | ||
84 | { | ||
85 | struct pt_regs *old_regs; | ||
86 | unsigned short code; | ||
87 | struct ext_int_info *p; | ||
88 | int index; | ||
89 | |||
90 | code = (unsigned short) ext_int_code; | ||
91 | old_regs = set_irq_regs(regs); | ||
92 | s390_idle_check(regs, S390_lowcore.int_clock, | ||
93 | S390_lowcore.async_enter_timer); | ||
94 | irq_enter(); | ||
95 | if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) | ||
96 | /* Serve timer interrupts first. */ | ||
97 | clock_comparator_work(); | ||
98 | kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++; | ||
99 | if (code != 0x1004) | ||
100 | __get_cpu_var(s390_idle).nohz_delay = 1; | ||
101 | index = ext_hash(code); | ||
102 | for (p = ext_int_hash[index]; p; p = p->next) { | ||
103 | if (likely(p->code == code)) | ||
104 | p->handler(ext_int_code, param32, param64); | ||
105 | } | ||
106 | irq_exit(); | ||
107 | set_irq_regs(old_regs); | ||
108 | } | ||
109 | |||
110 | static DEFINE_SPINLOCK(sc_irq_lock); | ||
111 | static int sc_irq_refcount; | ||
112 | |||
113 | void service_subclass_irq_register(void) | ||
114 | { | ||
115 | spin_lock(&sc_irq_lock); | ||
116 | if (!sc_irq_refcount) | ||
117 | ctl_set_bit(0, 9); | ||
118 | sc_irq_refcount++; | ||
119 | spin_unlock(&sc_irq_lock); | ||
120 | } | ||
121 | EXPORT_SYMBOL(service_subclass_irq_register); | ||
122 | |||
123 | void service_subclass_irq_unregister(void) | ||
124 | { | ||
125 | spin_lock(&sc_irq_lock); | ||
126 | sc_irq_refcount--; | ||
127 | if (!sc_irq_refcount) | ||
128 | ctl_clear_bit(0, 9); | ||
129 | spin_unlock(&sc_irq_lock); | ||
130 | } | ||
131 | EXPORT_SYMBOL(service_subclass_irq_unregister); | ||
132 | 1 | /* |
arch/s390/kernel/smp.c
1 | /* | 1 | /* |
2 | * arch/s390/kernel/smp.c | 2 | * arch/s390/kernel/smp.c |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 1999, 2009 | 4 | * Copyright IBM Corp. 1999, 2009 |
5 | * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), | 5 | * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), |
6 | * Martin Schwidefsky (schwidefsky@de.ibm.com) | 6 | * Martin Schwidefsky (schwidefsky@de.ibm.com) |
7 | * Heiko Carstens (heiko.carstens@de.ibm.com) | 7 | * Heiko Carstens (heiko.carstens@de.ibm.com) |
8 | * | 8 | * |
9 | * based on other smp stuff by | 9 | * based on other smp stuff by |
10 | * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net> | 10 | * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net> |
11 | * (c) 1998 Ingo Molnar | 11 | * (c) 1998 Ingo Molnar |
12 | * | 12 | * |
13 | * We work with logical cpu numbering everywhere we can. The only | 13 | * We work with logical cpu numbering everywhere we can. The only |
14 | * functions using the real cpu address (got from STAP) are the sigp | 14 | * functions using the real cpu address (got from STAP) are the sigp |
15 | * functions. For all other functions we use the identity mapping. | 15 | * functions. For all other functions we use the identity mapping. |
16 | * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is | 16 | * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is |
17 | * used e.g. to find the idle task belonging to a logical cpu. Every array | 17 | * used e.g. to find the idle task belonging to a logical cpu. Every array |
18 | * in the kernel is sorted by the logical cpu number and not by the physical | 18 | * in the kernel is sorted by the logical cpu number and not by the physical |
19 | * one which is causing all the confusion with __cpu_logical_map and | 19 | * one which is causing all the confusion with __cpu_logical_map and |
20 | * cpu_number_map in other architectures. | 20 | * cpu_number_map in other architectures. |
21 | */ | 21 | */ |
22 | 22 | ||
23 | #define KMSG_COMPONENT "cpu" | 23 | #define KMSG_COMPONENT "cpu" |
24 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | 24 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
25 | 25 | ||
26 | #include <linux/workqueue.h> | 26 | #include <linux/workqueue.h> |
27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
28 | #include <linux/init.h> | 28 | #include <linux/init.h> |
29 | #include <linux/mm.h> | 29 | #include <linux/mm.h> |
30 | #include <linux/err.h> | 30 | #include <linux/err.h> |
31 | #include <linux/spinlock.h> | 31 | #include <linux/spinlock.h> |
32 | #include <linux/kernel_stat.h> | 32 | #include <linux/kernel_stat.h> |
33 | #include <linux/delay.h> | 33 | #include <linux/delay.h> |
34 | #include <linux/cache.h> | 34 | #include <linux/cache.h> |
35 | #include <linux/interrupt.h> | 35 | #include <linux/interrupt.h> |
36 | #include <linux/irqflags.h> | 36 | #include <linux/irqflags.h> |
37 | #include <linux/cpu.h> | 37 | #include <linux/cpu.h> |
38 | #include <linux/timex.h> | 38 | #include <linux/timex.h> |
39 | #include <linux/bootmem.h> | 39 | #include <linux/bootmem.h> |
40 | #include <linux/slab.h> | 40 | #include <linux/slab.h> |
41 | #include <asm/asm-offsets.h> | 41 | #include <asm/asm-offsets.h> |
42 | #include <asm/ipl.h> | 42 | #include <asm/ipl.h> |
43 | #include <asm/setup.h> | 43 | #include <asm/setup.h> |
44 | #include <asm/sigp.h> | 44 | #include <asm/sigp.h> |
45 | #include <asm/pgalloc.h> | 45 | #include <asm/pgalloc.h> |
46 | #include <asm/irq.h> | 46 | #include <asm/irq.h> |
47 | #include <asm/s390_ext.h> | ||
48 | #include <asm/cpcmd.h> | 47 | #include <asm/cpcmd.h> |
49 | #include <asm/tlbflush.h> | 48 | #include <asm/tlbflush.h> |
50 | #include <asm/timer.h> | 49 | #include <asm/timer.h> |
51 | #include <asm/lowcore.h> | 50 | #include <asm/lowcore.h> |
52 | #include <asm/sclp.h> | 51 | #include <asm/sclp.h> |
53 | #include <asm/cputime.h> | 52 | #include <asm/cputime.h> |
54 | #include <asm/vdso.h> | 53 | #include <asm/vdso.h> |
55 | #include <asm/cpu.h> | 54 | #include <asm/cpu.h> |
56 | #include "entry.h" | 55 | #include "entry.h" |
57 | 56 | ||
58 | /* logical cpu to cpu address */ | 57 | /* logical cpu to cpu address */ |
59 | unsigned short __cpu_logical_map[NR_CPUS]; | 58 | unsigned short __cpu_logical_map[NR_CPUS]; |
60 | 59 | ||
61 | static struct task_struct *current_set[NR_CPUS]; | 60 | static struct task_struct *current_set[NR_CPUS]; |
62 | 61 | ||
63 | static u8 smp_cpu_type; | 62 | static u8 smp_cpu_type; |
64 | static int smp_use_sigp_detection; | 63 | static int smp_use_sigp_detection; |
65 | 64 | ||
66 | enum s390_cpu_state { | 65 | enum s390_cpu_state { |
67 | CPU_STATE_STANDBY, | 66 | CPU_STATE_STANDBY, |
68 | CPU_STATE_CONFIGURED, | 67 | CPU_STATE_CONFIGURED, |
69 | }; | 68 | }; |
70 | 69 | ||
71 | DEFINE_MUTEX(smp_cpu_state_mutex); | 70 | DEFINE_MUTEX(smp_cpu_state_mutex); |
72 | int smp_cpu_polarization[NR_CPUS]; | 71 | int smp_cpu_polarization[NR_CPUS]; |
73 | static int smp_cpu_state[NR_CPUS]; | 72 | static int smp_cpu_state[NR_CPUS]; |
74 | static int cpu_management; | 73 | static int cpu_management; |
75 | 74 | ||
76 | static DEFINE_PER_CPU(struct cpu, cpu_devices); | 75 | static DEFINE_PER_CPU(struct cpu, cpu_devices); |
77 | 76 | ||
78 | static void smp_ext_bitcall(int, int); | 77 | static void smp_ext_bitcall(int, int); |
79 | 78 | ||
80 | static int raw_cpu_stopped(int cpu) | 79 | static int raw_cpu_stopped(int cpu) |
81 | { | 80 | { |
82 | u32 status; | 81 | u32 status; |
83 | 82 | ||
84 | switch (raw_sigp_ps(&status, 0, cpu, sigp_sense)) { | 83 | switch (raw_sigp_ps(&status, 0, cpu, sigp_sense)) { |
85 | case sigp_status_stored: | 84 | case sigp_status_stored: |
86 | /* Check for stopped and check stop state */ | 85 | /* Check for stopped and check stop state */ |
87 | if (status & 0x50) | 86 | if (status & 0x50) |
88 | return 1; | 87 | return 1; |
89 | break; | 88 | break; |
90 | default: | 89 | default: |
91 | break; | 90 | break; |
92 | } | 91 | } |
93 | return 0; | 92 | return 0; |
94 | } | 93 | } |
95 | 94 | ||
96 | static inline int cpu_stopped(int cpu) | 95 | static inline int cpu_stopped(int cpu) |
97 | { | 96 | { |
98 | return raw_cpu_stopped(cpu_logical_map(cpu)); | 97 | return raw_cpu_stopped(cpu_logical_map(cpu)); |
99 | } | 98 | } |
100 | 99 | ||
101 | void smp_switch_to_ipl_cpu(void (*func)(void *), void *data) | 100 | void smp_switch_to_ipl_cpu(void (*func)(void *), void *data) |
102 | { | 101 | { |
103 | struct _lowcore *lc, *current_lc; | 102 | struct _lowcore *lc, *current_lc; |
104 | struct stack_frame *sf; | 103 | struct stack_frame *sf; |
105 | struct pt_regs *regs; | 104 | struct pt_regs *regs; |
106 | unsigned long sp; | 105 | unsigned long sp; |
107 | 106 | ||
108 | if (smp_processor_id() == 0) | 107 | if (smp_processor_id() == 0) |
109 | func(data); | 108 | func(data); |
110 | __load_psw_mask(PSW_BASE_BITS | PSW_DEFAULT_KEY); | 109 | __load_psw_mask(PSW_BASE_BITS | PSW_DEFAULT_KEY); |
111 | /* Disable lowcore protection */ | 110 | /* Disable lowcore protection */ |
112 | __ctl_clear_bit(0, 28); | 111 | __ctl_clear_bit(0, 28); |
113 | current_lc = lowcore_ptr[smp_processor_id()]; | 112 | current_lc = lowcore_ptr[smp_processor_id()]; |
114 | lc = lowcore_ptr[0]; | 113 | lc = lowcore_ptr[0]; |
115 | if (!lc) | 114 | if (!lc) |
116 | lc = current_lc; | 115 | lc = current_lc; |
117 | lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; | 116 | lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; |
118 | lc->restart_psw.addr = PSW_ADDR_AMODE | (unsigned long) smp_restart_cpu; | 117 | lc->restart_psw.addr = PSW_ADDR_AMODE | (unsigned long) smp_restart_cpu; |
119 | if (!cpu_online(0)) | 118 | if (!cpu_online(0)) |
120 | smp_switch_to_cpu(func, data, 0, stap(), __cpu_logical_map[0]); | 119 | smp_switch_to_cpu(func, data, 0, stap(), __cpu_logical_map[0]); |
121 | while (sigp(0, sigp_stop_and_store_status) == sigp_busy) | 120 | while (sigp(0, sigp_stop_and_store_status) == sigp_busy) |
122 | cpu_relax(); | 121 | cpu_relax(); |
123 | sp = lc->panic_stack; | 122 | sp = lc->panic_stack; |
124 | sp -= sizeof(struct pt_regs); | 123 | sp -= sizeof(struct pt_regs); |
125 | regs = (struct pt_regs *) sp; | 124 | regs = (struct pt_regs *) sp; |
126 | memcpy(®s->gprs, ¤t_lc->gpregs_save_area, sizeof(regs->gprs)); | 125 | memcpy(®s->gprs, ¤t_lc->gpregs_save_area, sizeof(regs->gprs)); |
127 | regs->psw = lc->psw_save_area; | 126 | regs->psw = lc->psw_save_area; |
128 | sp -= STACK_FRAME_OVERHEAD; | 127 | sp -= STACK_FRAME_OVERHEAD; |
129 | sf = (struct stack_frame *) sp; | 128 | sf = (struct stack_frame *) sp; |
130 | sf->back_chain = regs->gprs[15]; | 129 | sf->back_chain = regs->gprs[15]; |
131 | smp_switch_to_cpu(func, data, sp, stap(), __cpu_logical_map[0]); | 130 | smp_switch_to_cpu(func, data, sp, stap(), __cpu_logical_map[0]); |
132 | } | 131 | } |
133 | 132 | ||
134 | void smp_send_stop(void) | 133 | void smp_send_stop(void) |
135 | { | 134 | { |
136 | int cpu, rc; | 135 | int cpu, rc; |
137 | 136 | ||
138 | /* Disable all interrupts/machine checks */ | 137 | /* Disable all interrupts/machine checks */ |
139 | __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK); | 138 | __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK); |
140 | trace_hardirqs_off(); | 139 | trace_hardirqs_off(); |
141 | 140 | ||
142 | /* stop all processors */ | 141 | /* stop all processors */ |
143 | for_each_online_cpu(cpu) { | 142 | for_each_online_cpu(cpu) { |
144 | if (cpu == smp_processor_id()) | 143 | if (cpu == smp_processor_id()) |
145 | continue; | 144 | continue; |
146 | do { | 145 | do { |
147 | rc = sigp(cpu, sigp_stop); | 146 | rc = sigp(cpu, sigp_stop); |
148 | } while (rc == sigp_busy); | 147 | } while (rc == sigp_busy); |
149 | 148 | ||
150 | while (!cpu_stopped(cpu)) | 149 | while (!cpu_stopped(cpu)) |
151 | cpu_relax(); | 150 | cpu_relax(); |
152 | } | 151 | } |
153 | } | 152 | } |
154 | 153 | ||
155 | /* | 154 | /* |
156 | * This is the main routine where commands issued by other | 155 | * This is the main routine where commands issued by other |
157 | * cpus are handled. | 156 | * cpus are handled. |
158 | */ | 157 | */ |
159 | 158 | ||
160 | static void do_ext_call_interrupt(unsigned int ext_int_code, | 159 | static void do_ext_call_interrupt(unsigned int ext_int_code, |
161 | unsigned int param32, unsigned long param64) | 160 | unsigned int param32, unsigned long param64) |
162 | { | 161 | { |
163 | unsigned long bits; | 162 | unsigned long bits; |
164 | 163 | ||
165 | kstat_cpu(smp_processor_id()).irqs[EXTINT_IPI]++; | 164 | kstat_cpu(smp_processor_id()).irqs[EXTINT_IPI]++; |
166 | /* | 165 | /* |
167 | * handle bit signal external calls | 166 | * handle bit signal external calls |
168 | */ | 167 | */ |
169 | bits = xchg(&S390_lowcore.ext_call_fast, 0); | 168 | bits = xchg(&S390_lowcore.ext_call_fast, 0); |
170 | 169 | ||
171 | if (test_bit(ec_schedule, &bits)) | 170 | if (test_bit(ec_schedule, &bits)) |
172 | scheduler_ipi(); | 171 | scheduler_ipi(); |
173 | 172 | ||
174 | if (test_bit(ec_call_function, &bits)) | 173 | if (test_bit(ec_call_function, &bits)) |
175 | generic_smp_call_function_interrupt(); | 174 | generic_smp_call_function_interrupt(); |
176 | 175 | ||
177 | if (test_bit(ec_call_function_single, &bits)) | 176 | if (test_bit(ec_call_function_single, &bits)) |
178 | generic_smp_call_function_single_interrupt(); | 177 | generic_smp_call_function_single_interrupt(); |
179 | } | 178 | } |
180 | 179 | ||
181 | /* | 180 | /* |
182 | * Send an external call sigp to another cpu and return without waiting | 181 | * Send an external call sigp to another cpu and return without waiting |
183 | * for its completion. | 182 | * for its completion. |
184 | */ | 183 | */ |
185 | static void smp_ext_bitcall(int cpu, int sig) | 184 | static void smp_ext_bitcall(int cpu, int sig) |
186 | { | 185 | { |
187 | /* | 186 | /* |
188 | * Set signaling bit in lowcore of target cpu and kick it | 187 | * Set signaling bit in lowcore of target cpu and kick it |
189 | */ | 188 | */ |
190 | set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); | 189 | set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); |
191 | while (sigp(cpu, sigp_emergency_signal) == sigp_busy) | 190 | while (sigp(cpu, sigp_emergency_signal) == sigp_busy) |
192 | udelay(10); | 191 | udelay(10); |
193 | } | 192 | } |
194 | 193 | ||
195 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) | 194 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
196 | { | 195 | { |
197 | int cpu; | 196 | int cpu; |
198 | 197 | ||
199 | for_each_cpu(cpu, mask) | 198 | for_each_cpu(cpu, mask) |
200 | smp_ext_bitcall(cpu, ec_call_function); | 199 | smp_ext_bitcall(cpu, ec_call_function); |
201 | } | 200 | } |
202 | 201 | ||
203 | void arch_send_call_function_single_ipi(int cpu) | 202 | void arch_send_call_function_single_ipi(int cpu) |
204 | { | 203 | { |
205 | smp_ext_bitcall(cpu, ec_call_function_single); | 204 | smp_ext_bitcall(cpu, ec_call_function_single); |
206 | } | 205 | } |
207 | 206 | ||
208 | #ifndef CONFIG_64BIT | 207 | #ifndef CONFIG_64BIT |
209 | /* | 208 | /* |
210 | * this function sends a 'purge tlb' signal to another CPU. | 209 | * this function sends a 'purge tlb' signal to another CPU. |
211 | */ | 210 | */ |
212 | static void smp_ptlb_callback(void *info) | 211 | static void smp_ptlb_callback(void *info) |
213 | { | 212 | { |
214 | __tlb_flush_local(); | 213 | __tlb_flush_local(); |
215 | } | 214 | } |
216 | 215 | ||
217 | void smp_ptlb_all(void) | 216 | void smp_ptlb_all(void) |
218 | { | 217 | { |
219 | on_each_cpu(smp_ptlb_callback, NULL, 1); | 218 | on_each_cpu(smp_ptlb_callback, NULL, 1); |
220 | } | 219 | } |
221 | EXPORT_SYMBOL(smp_ptlb_all); | 220 | EXPORT_SYMBOL(smp_ptlb_all); |
222 | #endif /* ! CONFIG_64BIT */ | 221 | #endif /* ! CONFIG_64BIT */ |
223 | 222 | ||
224 | /* | 223 | /* |
225 | * this function sends a 'reschedule' IPI to another CPU. | 224 | * this function sends a 'reschedule' IPI to another CPU. |
226 | * it goes straight through and wastes no time serializing | 225 | * it goes straight through and wastes no time serializing |
227 | * anything. Worst case is that we lose a reschedule ... | 226 | * anything. Worst case is that we lose a reschedule ... |
228 | */ | 227 | */ |
229 | void smp_send_reschedule(int cpu) | 228 | void smp_send_reschedule(int cpu) |
230 | { | 229 | { |
231 | smp_ext_bitcall(cpu, ec_schedule); | 230 | smp_ext_bitcall(cpu, ec_schedule); |
232 | } | 231 | } |
233 | 232 | ||
234 | /* | 233 | /* |
235 | * parameter area for the set/clear control bit callbacks | 234 | * parameter area for the set/clear control bit callbacks |
236 | */ | 235 | */ |
237 | struct ec_creg_mask_parms { | 236 | struct ec_creg_mask_parms { |
238 | unsigned long orvals[16]; | 237 | unsigned long orvals[16]; |
239 | unsigned long andvals[16]; | 238 | unsigned long andvals[16]; |
240 | }; | 239 | }; |
241 | 240 | ||
242 | /* | 241 | /* |
243 | * callback for setting/clearing control bits | 242 | * callback for setting/clearing control bits |
244 | */ | 243 | */ |
245 | static void smp_ctl_bit_callback(void *info) | 244 | static void smp_ctl_bit_callback(void *info) |
246 | { | 245 | { |
247 | struct ec_creg_mask_parms *pp = info; | 246 | struct ec_creg_mask_parms *pp = info; |
248 | unsigned long cregs[16]; | 247 | unsigned long cregs[16]; |
249 | int i; | 248 | int i; |
250 | 249 | ||
251 | __ctl_store(cregs, 0, 15); | 250 | __ctl_store(cregs, 0, 15); |
252 | for (i = 0; i <= 15; i++) | 251 | for (i = 0; i <= 15; i++) |
253 | cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i]; | 252 | cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i]; |
254 | __ctl_load(cregs, 0, 15); | 253 | __ctl_load(cregs, 0, 15); |
255 | } | 254 | } |
256 | 255 | ||
257 | /* | 256 | /* |
258 | * Set a bit in a control register of all cpus | 257 | * Set a bit in a control register of all cpus |
259 | */ | 258 | */ |
260 | void smp_ctl_set_bit(int cr, int bit) | 259 | void smp_ctl_set_bit(int cr, int bit) |
261 | { | 260 | { |
262 | struct ec_creg_mask_parms parms; | 261 | struct ec_creg_mask_parms parms; |
263 | 262 | ||
264 | memset(&parms.orvals, 0, sizeof(parms.orvals)); | 263 | memset(&parms.orvals, 0, sizeof(parms.orvals)); |
265 | memset(&parms.andvals, 0xff, sizeof(parms.andvals)); | 264 | memset(&parms.andvals, 0xff, sizeof(parms.andvals)); |
266 | parms.orvals[cr] = 1 << bit; | 265 | parms.orvals[cr] = 1 << bit; |
267 | on_each_cpu(smp_ctl_bit_callback, &parms, 1); | 266 | on_each_cpu(smp_ctl_bit_callback, &parms, 1); |
268 | } | 267 | } |
269 | EXPORT_SYMBOL(smp_ctl_set_bit); | 268 | EXPORT_SYMBOL(smp_ctl_set_bit); |
270 | 269 | ||
271 | /* | 270 | /* |
272 | * Clear a bit in a control register of all cpus | 271 | * Clear a bit in a control register of all cpus |
273 | */ | 272 | */ |
274 | void smp_ctl_clear_bit(int cr, int bit) | 273 | void smp_ctl_clear_bit(int cr, int bit) |
275 | { | 274 | { |
276 | struct ec_creg_mask_parms parms; | 275 | struct ec_creg_mask_parms parms; |
277 | 276 | ||
278 | memset(&parms.orvals, 0, sizeof(parms.orvals)); | 277 | memset(&parms.orvals, 0, sizeof(parms.orvals)); |
279 | memset(&parms.andvals, 0xff, sizeof(parms.andvals)); | 278 | memset(&parms.andvals, 0xff, sizeof(parms.andvals)); |
280 | parms.andvals[cr] = ~(1L << bit); | 279 | parms.andvals[cr] = ~(1L << bit); |
281 | on_each_cpu(smp_ctl_bit_callback, &parms, 1); | 280 | on_each_cpu(smp_ctl_bit_callback, &parms, 1); |
282 | } | 281 | } |
283 | EXPORT_SYMBOL(smp_ctl_clear_bit); | 282 | EXPORT_SYMBOL(smp_ctl_clear_bit); |
284 | 283 | ||
285 | #ifdef CONFIG_ZFCPDUMP | 284 | #ifdef CONFIG_ZFCPDUMP |
286 | 285 | ||
287 | static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) | 286 | static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) |
288 | { | 287 | { |
289 | if (ipl_info.type != IPL_TYPE_FCP_DUMP) | 288 | if (ipl_info.type != IPL_TYPE_FCP_DUMP) |
290 | return; | 289 | return; |
291 | if (cpu >= NR_CPUS) { | 290 | if (cpu >= NR_CPUS) { |
292 | pr_warning("CPU %i exceeds the maximum %i and is excluded from " | 291 | pr_warning("CPU %i exceeds the maximum %i and is excluded from " |
293 | "the dump\n", cpu, NR_CPUS - 1); | 292 | "the dump\n", cpu, NR_CPUS - 1); |
294 | return; | 293 | return; |
295 | } | 294 | } |
296 | zfcpdump_save_areas[cpu] = kmalloc(sizeof(struct save_area), GFP_KERNEL); | 295 | zfcpdump_save_areas[cpu] = kmalloc(sizeof(struct save_area), GFP_KERNEL); |
297 | while (raw_sigp(phy_cpu, sigp_stop_and_store_status) == sigp_busy) | 296 | while (raw_sigp(phy_cpu, sigp_stop_and_store_status) == sigp_busy) |
298 | cpu_relax(); | 297 | cpu_relax(); |
299 | memcpy_real(zfcpdump_save_areas[cpu], | 298 | memcpy_real(zfcpdump_save_areas[cpu], |
300 | (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE, | 299 | (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE, |
301 | sizeof(struct save_area)); | 300 | sizeof(struct save_area)); |
302 | } | 301 | } |
303 | 302 | ||
304 | struct save_area *zfcpdump_save_areas[NR_CPUS + 1]; | 303 | struct save_area *zfcpdump_save_areas[NR_CPUS + 1]; |
305 | EXPORT_SYMBOL_GPL(zfcpdump_save_areas); | 304 | EXPORT_SYMBOL_GPL(zfcpdump_save_areas); |
306 | 305 | ||
307 | #else | 306 | #else |
308 | 307 | ||
309 | static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { } | 308 | static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { } |
310 | 309 | ||
311 | #endif /* CONFIG_ZFCPDUMP */ | 310 | #endif /* CONFIG_ZFCPDUMP */ |
312 | 311 | ||
313 | static int cpu_known(int cpu_id) | 312 | static int cpu_known(int cpu_id) |
314 | { | 313 | { |
315 | int cpu; | 314 | int cpu; |
316 | 315 | ||
317 | for_each_present_cpu(cpu) { | 316 | for_each_present_cpu(cpu) { |
318 | if (__cpu_logical_map[cpu] == cpu_id) | 317 | if (__cpu_logical_map[cpu] == cpu_id) |
319 | return 1; | 318 | return 1; |
320 | } | 319 | } |
321 | return 0; | 320 | return 0; |
322 | } | 321 | } |
323 | 322 | ||
324 | static int smp_rescan_cpus_sigp(cpumask_t avail) | 323 | static int smp_rescan_cpus_sigp(cpumask_t avail) |
325 | { | 324 | { |
326 | int cpu_id, logical_cpu; | 325 | int cpu_id, logical_cpu; |
327 | 326 | ||
328 | logical_cpu = cpumask_first(&avail); | 327 | logical_cpu = cpumask_first(&avail); |
329 | if (logical_cpu >= nr_cpu_ids) | 328 | if (logical_cpu >= nr_cpu_ids) |
330 | return 0; | 329 | return 0; |
331 | for (cpu_id = 0; cpu_id <= MAX_CPU_ADDRESS; cpu_id++) { | 330 | for (cpu_id = 0; cpu_id <= MAX_CPU_ADDRESS; cpu_id++) { |
332 | if (cpu_known(cpu_id)) | 331 | if (cpu_known(cpu_id)) |
333 | continue; | 332 | continue; |
334 | __cpu_logical_map[logical_cpu] = cpu_id; | 333 | __cpu_logical_map[logical_cpu] = cpu_id; |
335 | smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN; | 334 | smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN; |
336 | if (!cpu_stopped(logical_cpu)) | 335 | if (!cpu_stopped(logical_cpu)) |
337 | continue; | 336 | continue; |
338 | set_cpu_present(logical_cpu, true); | 337 | set_cpu_present(logical_cpu, true); |
339 | smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED; | 338 | smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED; |
340 | logical_cpu = cpumask_next(logical_cpu, &avail); | 339 | logical_cpu = cpumask_next(logical_cpu, &avail); |
341 | if (logical_cpu >= nr_cpu_ids) | 340 | if (logical_cpu >= nr_cpu_ids) |
342 | break; | 341 | break; |
343 | } | 342 | } |
344 | return 0; | 343 | return 0; |
345 | } | 344 | } |
346 | 345 | ||
347 | static int smp_rescan_cpus_sclp(cpumask_t avail) | 346 | static int smp_rescan_cpus_sclp(cpumask_t avail) |
348 | { | 347 | { |
349 | struct sclp_cpu_info *info; | 348 | struct sclp_cpu_info *info; |
350 | int cpu_id, logical_cpu, cpu; | 349 | int cpu_id, logical_cpu, cpu; |
351 | int rc; | 350 | int rc; |
352 | 351 | ||
353 | logical_cpu = cpumask_first(&avail); | 352 | logical_cpu = cpumask_first(&avail); |
354 | if (logical_cpu >= nr_cpu_ids) | 353 | if (logical_cpu >= nr_cpu_ids) |
355 | return 0; | 354 | return 0; |
356 | info = kmalloc(sizeof(*info), GFP_KERNEL); | 355 | info = kmalloc(sizeof(*info), GFP_KERNEL); |
357 | if (!info) | 356 | if (!info) |
358 | return -ENOMEM; | 357 | return -ENOMEM; |
359 | rc = sclp_get_cpu_info(info); | 358 | rc = sclp_get_cpu_info(info); |
360 | if (rc) | 359 | if (rc) |
361 | goto out; | 360 | goto out; |
362 | for (cpu = 0; cpu < info->combined; cpu++) { | 361 | for (cpu = 0; cpu < info->combined; cpu++) { |
363 | if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type) | 362 | if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type) |
364 | continue; | 363 | continue; |
365 | cpu_id = info->cpu[cpu].address; | 364 | cpu_id = info->cpu[cpu].address; |
366 | if (cpu_known(cpu_id)) | 365 | if (cpu_known(cpu_id)) |
367 | continue; | 366 | continue; |
368 | __cpu_logical_map[logical_cpu] = cpu_id; | 367 | __cpu_logical_map[logical_cpu] = cpu_id; |
369 | smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN; | 368 | smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN; |
370 | set_cpu_present(logical_cpu, true); | 369 | set_cpu_present(logical_cpu, true); |
371 | if (cpu >= info->configured) | 370 | if (cpu >= info->configured) |
372 | smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY; | 371 | smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY; |
373 | else | 372 | else |
374 | smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED; | 373 | smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED; |
375 | logical_cpu = cpumask_next(logical_cpu, &avail); | 374 | logical_cpu = cpumask_next(logical_cpu, &avail); |
376 | if (logical_cpu >= nr_cpu_ids) | 375 | if (logical_cpu >= nr_cpu_ids) |
377 | break; | 376 | break; |
378 | } | 377 | } |
379 | out: | 378 | out: |
380 | kfree(info); | 379 | kfree(info); |
381 | return rc; | 380 | return rc; |
382 | } | 381 | } |
383 | 382 | ||
384 | static int __smp_rescan_cpus(void) | 383 | static int __smp_rescan_cpus(void) |
385 | { | 384 | { |
386 | cpumask_t avail; | 385 | cpumask_t avail; |
387 | 386 | ||
388 | cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask); | 387 | cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask); |
389 | if (smp_use_sigp_detection) | 388 | if (smp_use_sigp_detection) |
390 | return smp_rescan_cpus_sigp(avail); | 389 | return smp_rescan_cpus_sigp(avail); |
391 | else | 390 | else |
392 | return smp_rescan_cpus_sclp(avail); | 391 | return smp_rescan_cpus_sclp(avail); |
393 | } | 392 | } |
394 | 393 | ||
395 | static void __init smp_detect_cpus(void) | 394 | static void __init smp_detect_cpus(void) |
396 | { | 395 | { |
397 | unsigned int cpu, c_cpus, s_cpus; | 396 | unsigned int cpu, c_cpus, s_cpus; |
398 | struct sclp_cpu_info *info; | 397 | struct sclp_cpu_info *info; |
399 | u16 boot_cpu_addr, cpu_addr; | 398 | u16 boot_cpu_addr, cpu_addr; |
400 | 399 | ||
401 | c_cpus = 1; | 400 | c_cpus = 1; |
402 | s_cpus = 0; | 401 | s_cpus = 0; |
403 | boot_cpu_addr = __cpu_logical_map[0]; | 402 | boot_cpu_addr = __cpu_logical_map[0]; |
404 | info = kmalloc(sizeof(*info), GFP_KERNEL); | 403 | info = kmalloc(sizeof(*info), GFP_KERNEL); |
405 | if (!info) | 404 | if (!info) |
406 | panic("smp_detect_cpus failed to allocate memory\n"); | 405 | panic("smp_detect_cpus failed to allocate memory\n"); |
407 | /* Use sigp detection algorithm if sclp doesn't work. */ | 406 | /* Use sigp detection algorithm if sclp doesn't work. */ |
408 | if (sclp_get_cpu_info(info)) { | 407 | if (sclp_get_cpu_info(info)) { |
409 | smp_use_sigp_detection = 1; | 408 | smp_use_sigp_detection = 1; |
410 | for (cpu = 0; cpu <= MAX_CPU_ADDRESS; cpu++) { | 409 | for (cpu = 0; cpu <= MAX_CPU_ADDRESS; cpu++) { |
411 | if (cpu == boot_cpu_addr) | 410 | if (cpu == boot_cpu_addr) |
412 | continue; | 411 | continue; |
413 | if (!raw_cpu_stopped(cpu)) | 412 | if (!raw_cpu_stopped(cpu)) |
414 | continue; | 413 | continue; |
415 | smp_get_save_area(c_cpus, cpu); | 414 | smp_get_save_area(c_cpus, cpu); |
416 | c_cpus++; | 415 | c_cpus++; |
417 | } | 416 | } |
418 | goto out; | 417 | goto out; |
419 | } | 418 | } |
420 | 419 | ||
421 | if (info->has_cpu_type) { | 420 | if (info->has_cpu_type) { |
422 | for (cpu = 0; cpu < info->combined; cpu++) { | 421 | for (cpu = 0; cpu < info->combined; cpu++) { |
423 | if (info->cpu[cpu].address == boot_cpu_addr) { | 422 | if (info->cpu[cpu].address == boot_cpu_addr) { |
424 | smp_cpu_type = info->cpu[cpu].type; | 423 | smp_cpu_type = info->cpu[cpu].type; |
425 | break; | 424 | break; |
426 | } | 425 | } |
427 | } | 426 | } |
428 | } | 427 | } |
429 | 428 | ||
430 | for (cpu = 0; cpu < info->combined; cpu++) { | 429 | for (cpu = 0; cpu < info->combined; cpu++) { |
431 | if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type) | 430 | if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type) |
432 | continue; | 431 | continue; |
433 | cpu_addr = info->cpu[cpu].address; | 432 | cpu_addr = info->cpu[cpu].address; |
434 | if (cpu_addr == boot_cpu_addr) | 433 | if (cpu_addr == boot_cpu_addr) |
435 | continue; | 434 | continue; |
436 | if (!raw_cpu_stopped(cpu_addr)) { | 435 | if (!raw_cpu_stopped(cpu_addr)) { |
437 | s_cpus++; | 436 | s_cpus++; |
438 | continue; | 437 | continue; |
439 | } | 438 | } |
440 | smp_get_save_area(c_cpus, cpu_addr); | 439 | smp_get_save_area(c_cpus, cpu_addr); |
441 | c_cpus++; | 440 | c_cpus++; |
442 | } | 441 | } |
443 | out: | 442 | out: |
444 | kfree(info); | 443 | kfree(info); |
445 | pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus); | 444 | pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus); |
446 | get_online_cpus(); | 445 | get_online_cpus(); |
447 | __smp_rescan_cpus(); | 446 | __smp_rescan_cpus(); |
448 | put_online_cpus(); | 447 | put_online_cpus(); |
449 | } | 448 | } |
450 | 449 | ||
451 | /* | 450 | /* |
452 | * Activate a secondary processor. | 451 | * Activate a secondary processor. |
453 | */ | 452 | */ |
454 | int __cpuinit start_secondary(void *cpuvoid) | 453 | int __cpuinit start_secondary(void *cpuvoid) |
455 | { | 454 | { |
456 | /* Setup the cpu */ | 455 | /* Setup the cpu */ |
457 | cpu_init(); | 456 | cpu_init(); |
458 | preempt_disable(); | 457 | preempt_disable(); |
459 | /* Enable TOD clock interrupts on the secondary cpu. */ | 458 | /* Enable TOD clock interrupts on the secondary cpu. */ |
460 | init_cpu_timer(); | 459 | init_cpu_timer(); |
461 | /* Enable cpu timer interrupts on the secondary cpu. */ | 460 | /* Enable cpu timer interrupts on the secondary cpu. */ |
462 | init_cpu_vtimer(); | 461 | init_cpu_vtimer(); |
463 | /* Enable pfault pseudo page faults on this cpu. */ | 462 | /* Enable pfault pseudo page faults on this cpu. */ |
464 | pfault_init(); | 463 | pfault_init(); |
465 | 464 | ||
466 | /* call cpu notifiers */ | 465 | /* call cpu notifiers */ |
467 | notify_cpu_starting(smp_processor_id()); | 466 | notify_cpu_starting(smp_processor_id()); |
468 | /* Mark this cpu as online */ | 467 | /* Mark this cpu as online */ |
469 | ipi_call_lock(); | 468 | ipi_call_lock(); |
470 | set_cpu_online(smp_processor_id(), true); | 469 | set_cpu_online(smp_processor_id(), true); |
471 | ipi_call_unlock(); | 470 | ipi_call_unlock(); |
472 | /* Switch on interrupts */ | 471 | /* Switch on interrupts */ |
473 | local_irq_enable(); | 472 | local_irq_enable(); |
474 | /* cpu_idle will call schedule for us */ | 473 | /* cpu_idle will call schedule for us */ |
475 | cpu_idle(); | 474 | cpu_idle(); |
476 | return 0; | 475 | return 0; |
477 | } | 476 | } |
478 | 477 | ||
479 | struct create_idle { | 478 | struct create_idle { |
480 | struct work_struct work; | 479 | struct work_struct work; |
481 | struct task_struct *idle; | 480 | struct task_struct *idle; |
482 | struct completion done; | 481 | struct completion done; |
483 | int cpu; | 482 | int cpu; |
484 | }; | 483 | }; |
485 | 484 | ||
486 | static void __cpuinit smp_fork_idle(struct work_struct *work) | 485 | static void __cpuinit smp_fork_idle(struct work_struct *work) |
487 | { | 486 | { |
488 | struct create_idle *c_idle; | 487 | struct create_idle *c_idle; |
489 | 488 | ||
490 | c_idle = container_of(work, struct create_idle, work); | 489 | c_idle = container_of(work, struct create_idle, work); |
491 | c_idle->idle = fork_idle(c_idle->cpu); | 490 | c_idle->idle = fork_idle(c_idle->cpu); |
492 | complete(&c_idle->done); | 491 | complete(&c_idle->done); |
493 | } | 492 | } |
494 | 493 | ||
495 | static int __cpuinit smp_alloc_lowcore(int cpu) | 494 | static int __cpuinit smp_alloc_lowcore(int cpu) |
496 | { | 495 | { |
497 | unsigned long async_stack, panic_stack; | 496 | unsigned long async_stack, panic_stack; |
498 | struct _lowcore *lowcore; | 497 | struct _lowcore *lowcore; |
499 | 498 | ||
500 | lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); | 499 | lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); |
501 | if (!lowcore) | 500 | if (!lowcore) |
502 | return -ENOMEM; | 501 | return -ENOMEM; |
503 | async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); | 502 | async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); |
504 | panic_stack = __get_free_page(GFP_KERNEL); | 503 | panic_stack = __get_free_page(GFP_KERNEL); |
505 | if (!panic_stack || !async_stack) | 504 | if (!panic_stack || !async_stack) |
506 | goto out; | 505 | goto out; |
507 | memcpy(lowcore, &S390_lowcore, 512); | 506 | memcpy(lowcore, &S390_lowcore, 512); |
508 | memset((char *)lowcore + 512, 0, sizeof(*lowcore) - 512); | 507 | memset((char *)lowcore + 512, 0, sizeof(*lowcore) - 512); |
509 | lowcore->async_stack = async_stack + ASYNC_SIZE; | 508 | lowcore->async_stack = async_stack + ASYNC_SIZE; |
510 | lowcore->panic_stack = panic_stack + PAGE_SIZE; | 509 | lowcore->panic_stack = panic_stack + PAGE_SIZE; |
511 | 510 | ||
512 | #ifndef CONFIG_64BIT | 511 | #ifndef CONFIG_64BIT |
513 | if (MACHINE_HAS_IEEE) { | 512 | if (MACHINE_HAS_IEEE) { |
514 | unsigned long save_area; | 513 | unsigned long save_area; |
515 | 514 | ||
516 | save_area = get_zeroed_page(GFP_KERNEL); | 515 | save_area = get_zeroed_page(GFP_KERNEL); |
517 | if (!save_area) | 516 | if (!save_area) |
518 | goto out; | 517 | goto out; |
519 | lowcore->extended_save_area_addr = (u32) save_area; | 518 | lowcore->extended_save_area_addr = (u32) save_area; |
520 | } | 519 | } |
521 | #else | 520 | #else |
522 | if (vdso_alloc_per_cpu(cpu, lowcore)) | 521 | if (vdso_alloc_per_cpu(cpu, lowcore)) |
523 | goto out; | 522 | goto out; |
524 | #endif | 523 | #endif |
525 | lowcore_ptr[cpu] = lowcore; | 524 | lowcore_ptr[cpu] = lowcore; |
526 | return 0; | 525 | return 0; |
527 | 526 | ||
528 | out: | 527 | out: |
529 | free_page(panic_stack); | 528 | free_page(panic_stack); |
530 | free_pages(async_stack, ASYNC_ORDER); | 529 | free_pages(async_stack, ASYNC_ORDER); |
531 | free_pages((unsigned long) lowcore, LC_ORDER); | 530 | free_pages((unsigned long) lowcore, LC_ORDER); |
532 | return -ENOMEM; | 531 | return -ENOMEM; |
533 | } | 532 | } |
534 | 533 | ||
535 | static void smp_free_lowcore(int cpu) | 534 | static void smp_free_lowcore(int cpu) |
536 | { | 535 | { |
537 | struct _lowcore *lowcore; | 536 | struct _lowcore *lowcore; |
538 | 537 | ||
539 | lowcore = lowcore_ptr[cpu]; | 538 | lowcore = lowcore_ptr[cpu]; |
540 | #ifndef CONFIG_64BIT | 539 | #ifndef CONFIG_64BIT |
541 | if (MACHINE_HAS_IEEE) | 540 | if (MACHINE_HAS_IEEE) |
542 | free_page((unsigned long) lowcore->extended_save_area_addr); | 541 | free_page((unsigned long) lowcore->extended_save_area_addr); |
543 | #else | 542 | #else |
544 | vdso_free_per_cpu(cpu, lowcore); | 543 | vdso_free_per_cpu(cpu, lowcore); |
545 | #endif | 544 | #endif |
546 | free_page(lowcore->panic_stack - PAGE_SIZE); | 545 | free_page(lowcore->panic_stack - PAGE_SIZE); |
547 | free_pages(lowcore->async_stack - ASYNC_SIZE, ASYNC_ORDER); | 546 | free_pages(lowcore->async_stack - ASYNC_SIZE, ASYNC_ORDER); |
548 | free_pages((unsigned long) lowcore, LC_ORDER); | 547 | free_pages((unsigned long) lowcore, LC_ORDER); |
549 | lowcore_ptr[cpu] = NULL; | 548 | lowcore_ptr[cpu] = NULL; |
550 | } | 549 | } |
551 | 550 | ||
552 | /* Upping and downing of CPUs */ | 551 | /* Upping and downing of CPUs */ |
553 | int __cpuinit __cpu_up(unsigned int cpu) | 552 | int __cpuinit __cpu_up(unsigned int cpu) |
554 | { | 553 | { |
555 | struct _lowcore *cpu_lowcore; | 554 | struct _lowcore *cpu_lowcore; |
556 | struct create_idle c_idle; | 555 | struct create_idle c_idle; |
557 | struct task_struct *idle; | 556 | struct task_struct *idle; |
558 | struct stack_frame *sf; | 557 | struct stack_frame *sf; |
559 | u32 lowcore; | 558 | u32 lowcore; |
560 | int ccode; | 559 | int ccode; |
561 | 560 | ||
562 | if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED) | 561 | if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED) |
563 | return -EIO; | 562 | return -EIO; |
564 | idle = current_set[cpu]; | 563 | idle = current_set[cpu]; |
565 | if (!idle) { | 564 | if (!idle) { |
566 | c_idle.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done); | 565 | c_idle.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done); |
567 | INIT_WORK_ONSTACK(&c_idle.work, smp_fork_idle); | 566 | INIT_WORK_ONSTACK(&c_idle.work, smp_fork_idle); |
568 | c_idle.cpu = cpu; | 567 | c_idle.cpu = cpu; |
569 | schedule_work(&c_idle.work); | 568 | schedule_work(&c_idle.work); |
570 | wait_for_completion(&c_idle.done); | 569 | wait_for_completion(&c_idle.done); |
571 | if (IS_ERR(c_idle.idle)) | 570 | if (IS_ERR(c_idle.idle)) |
572 | return PTR_ERR(c_idle.idle); | 571 | return PTR_ERR(c_idle.idle); |
573 | idle = c_idle.idle; | 572 | idle = c_idle.idle; |
574 | current_set[cpu] = c_idle.idle; | 573 | current_set[cpu] = c_idle.idle; |
575 | } | 574 | } |
576 | init_idle(idle, cpu); | 575 | init_idle(idle, cpu); |
577 | if (smp_alloc_lowcore(cpu)) | 576 | if (smp_alloc_lowcore(cpu)) |
578 | return -ENOMEM; | 577 | return -ENOMEM; |
579 | do { | 578 | do { |
580 | ccode = sigp(cpu, sigp_initial_cpu_reset); | 579 | ccode = sigp(cpu, sigp_initial_cpu_reset); |
581 | if (ccode == sigp_busy) | 580 | if (ccode == sigp_busy) |
582 | udelay(10); | 581 | udelay(10); |
583 | if (ccode == sigp_not_operational) | 582 | if (ccode == sigp_not_operational) |
584 | goto err_out; | 583 | goto err_out; |
585 | } while (ccode == sigp_busy); | 584 | } while (ccode == sigp_busy); |
586 | 585 | ||
587 | lowcore = (u32)(unsigned long)lowcore_ptr[cpu]; | 586 | lowcore = (u32)(unsigned long)lowcore_ptr[cpu]; |
588 | while (sigp_p(lowcore, cpu, sigp_set_prefix) == sigp_busy) | 587 | while (sigp_p(lowcore, cpu, sigp_set_prefix) == sigp_busy) |
589 | udelay(10); | 588 | udelay(10); |
590 | 589 | ||
591 | cpu_lowcore = lowcore_ptr[cpu]; | 590 | cpu_lowcore = lowcore_ptr[cpu]; |
592 | cpu_lowcore->kernel_stack = (unsigned long) | 591 | cpu_lowcore->kernel_stack = (unsigned long) |
593 | task_stack_page(idle) + THREAD_SIZE; | 592 | task_stack_page(idle) + THREAD_SIZE; |
594 | cpu_lowcore->thread_info = (unsigned long) task_thread_info(idle); | 593 | cpu_lowcore->thread_info = (unsigned long) task_thread_info(idle); |
595 | sf = (struct stack_frame *) (cpu_lowcore->kernel_stack | 594 | sf = (struct stack_frame *) (cpu_lowcore->kernel_stack |
596 | - sizeof(struct pt_regs) | 595 | - sizeof(struct pt_regs) |
597 | - sizeof(struct stack_frame)); | 596 | - sizeof(struct stack_frame)); |
598 | memset(sf, 0, sizeof(struct stack_frame)); | 597 | memset(sf, 0, sizeof(struct stack_frame)); |
599 | sf->gprs[9] = (unsigned long) sf; | 598 | sf->gprs[9] = (unsigned long) sf; |
600 | cpu_lowcore->save_area[15] = (unsigned long) sf; | 599 | cpu_lowcore->save_area[15] = (unsigned long) sf; |
601 | __ctl_store(cpu_lowcore->cregs_save_area, 0, 15); | 600 | __ctl_store(cpu_lowcore->cregs_save_area, 0, 15); |
602 | atomic_inc(&init_mm.context.attach_count); | 601 | atomic_inc(&init_mm.context.attach_count); |
603 | asm volatile( | 602 | asm volatile( |
604 | " stam 0,15,0(%0)" | 603 | " stam 0,15,0(%0)" |
605 | : : "a" (&cpu_lowcore->access_regs_save_area) : "memory"); | 604 | : : "a" (&cpu_lowcore->access_regs_save_area) : "memory"); |
606 | cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; | 605 | cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; |
607 | cpu_lowcore->current_task = (unsigned long) idle; | 606 | cpu_lowcore->current_task = (unsigned long) idle; |
608 | cpu_lowcore->cpu_nr = cpu; | 607 | cpu_lowcore->cpu_nr = cpu; |
609 | cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce; | 608 | cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce; |
610 | cpu_lowcore->machine_flags = S390_lowcore.machine_flags; | 609 | cpu_lowcore->machine_flags = S390_lowcore.machine_flags; |
611 | cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func; | 610 | cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func; |
612 | memcpy(cpu_lowcore->stfle_fac_list, S390_lowcore.stfle_fac_list, | 611 | memcpy(cpu_lowcore->stfle_fac_list, S390_lowcore.stfle_fac_list, |
613 | MAX_FACILITY_BIT/8); | 612 | MAX_FACILITY_BIT/8); |
614 | eieio(); | 613 | eieio(); |
615 | 614 | ||
616 | while (sigp(cpu, sigp_restart) == sigp_busy) | 615 | while (sigp(cpu, sigp_restart) == sigp_busy) |
617 | udelay(10); | 616 | udelay(10); |
618 | 617 | ||
619 | while (!cpu_online(cpu)) | 618 | while (!cpu_online(cpu)) |
620 | cpu_relax(); | 619 | cpu_relax(); |
621 | return 0; | 620 | return 0; |
622 | 621 | ||
623 | err_out: | 622 | err_out: |
624 | smp_free_lowcore(cpu); | 623 | smp_free_lowcore(cpu); |
625 | return -EIO; | 624 | return -EIO; |
626 | } | 625 | } |
627 | 626 | ||
628 | static int __init setup_possible_cpus(char *s) | 627 | static int __init setup_possible_cpus(char *s) |
629 | { | 628 | { |
630 | int pcpus, cpu; | 629 | int pcpus, cpu; |
631 | 630 | ||
632 | pcpus = simple_strtoul(s, NULL, 0); | 631 | pcpus = simple_strtoul(s, NULL, 0); |
633 | init_cpu_possible(cpumask_of(0)); | 632 | init_cpu_possible(cpumask_of(0)); |
634 | for (cpu = 1; cpu < pcpus && cpu < nr_cpu_ids; cpu++) | 633 | for (cpu = 1; cpu < pcpus && cpu < nr_cpu_ids; cpu++) |
635 | set_cpu_possible(cpu, true); | 634 | set_cpu_possible(cpu, true); |
636 | return 0; | 635 | return 0; |
637 | } | 636 | } |
638 | early_param("possible_cpus", setup_possible_cpus); | 637 | early_param("possible_cpus", setup_possible_cpus); |
639 | 638 | ||
640 | #ifdef CONFIG_HOTPLUG_CPU | 639 | #ifdef CONFIG_HOTPLUG_CPU |
641 | 640 | ||
642 | int __cpu_disable(void) | 641 | int __cpu_disable(void) |
643 | { | 642 | { |
644 | struct ec_creg_mask_parms cr_parms; | 643 | struct ec_creg_mask_parms cr_parms; |
645 | int cpu = smp_processor_id(); | 644 | int cpu = smp_processor_id(); |
646 | 645 | ||
647 | set_cpu_online(cpu, false); | 646 | set_cpu_online(cpu, false); |
648 | 647 | ||
649 | /* Disable pfault pseudo page faults on this cpu. */ | 648 | /* Disable pfault pseudo page faults on this cpu. */ |
650 | pfault_fini(); | 649 | pfault_fini(); |
651 | 650 | ||
652 | memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals)); | 651 | memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals)); |
653 | memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals)); | 652 | memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals)); |
654 | 653 | ||
655 | /* disable all external interrupts */ | 654 | /* disable all external interrupts */ |
656 | cr_parms.orvals[0] = 0; | 655 | cr_parms.orvals[0] = 0; |
657 | cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 11 | | 656 | cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 11 | |
658 | 1 << 10 | 1 << 9 | 1 << 6 | 1 << 4); | 657 | 1 << 10 | 1 << 9 | 1 << 6 | 1 << 4); |
659 | /* disable all I/O interrupts */ | 658 | /* disable all I/O interrupts */ |
660 | cr_parms.orvals[6] = 0; | 659 | cr_parms.orvals[6] = 0; |
661 | cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 | | 660 | cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 | |
662 | 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24); | 661 | 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24); |
663 | /* disable most machine checks */ | 662 | /* disable most machine checks */ |
664 | cr_parms.orvals[14] = 0; | 663 | cr_parms.orvals[14] = 0; |
665 | cr_parms.andvals[14] = ~(1 << 28 | 1 << 27 | 1 << 26 | | 664 | cr_parms.andvals[14] = ~(1 << 28 | 1 << 27 | 1 << 26 | |
666 | 1 << 25 | 1 << 24); | 665 | 1 << 25 | 1 << 24); |
667 | 666 | ||
668 | smp_ctl_bit_callback(&cr_parms); | 667 | smp_ctl_bit_callback(&cr_parms); |
669 | 668 | ||
670 | return 0; | 669 | return 0; |
671 | } | 670 | } |
672 | 671 | ||
673 | void __cpu_die(unsigned int cpu) | 672 | void __cpu_die(unsigned int cpu) |
674 | { | 673 | { |
675 | /* Wait until target cpu is down */ | 674 | /* Wait until target cpu is down */ |
676 | while (!cpu_stopped(cpu)) | 675 | while (!cpu_stopped(cpu)) |
677 | cpu_relax(); | 676 | cpu_relax(); |
678 | while (sigp_p(0, cpu, sigp_set_prefix) == sigp_busy) | 677 | while (sigp_p(0, cpu, sigp_set_prefix) == sigp_busy) |
679 | udelay(10); | 678 | udelay(10); |
680 | smp_free_lowcore(cpu); | 679 | smp_free_lowcore(cpu); |
681 | atomic_dec(&init_mm.context.attach_count); | 680 | atomic_dec(&init_mm.context.attach_count); |
682 | } | 681 | } |
683 | 682 | ||
684 | void __noreturn cpu_die(void) | 683 | void __noreturn cpu_die(void) |
685 | { | 684 | { |
686 | idle_task_exit(); | 685 | idle_task_exit(); |
687 | while (sigp(smp_processor_id(), sigp_stop) == sigp_busy) | 686 | while (sigp(smp_processor_id(), sigp_stop) == sigp_busy) |
688 | cpu_relax(); | 687 | cpu_relax(); |
689 | for (;;); | 688 | for (;;); |
690 | } | 689 | } |
691 | 690 | ||
692 | #endif /* CONFIG_HOTPLUG_CPU */ | 691 | #endif /* CONFIG_HOTPLUG_CPU */ |
693 | 692 | ||
694 | void __init smp_prepare_cpus(unsigned int max_cpus) | 693 | void __init smp_prepare_cpus(unsigned int max_cpus) |
695 | { | 694 | { |
696 | #ifndef CONFIG_64BIT | 695 | #ifndef CONFIG_64BIT |
697 | unsigned long save_area = 0; | 696 | unsigned long save_area = 0; |
698 | #endif | 697 | #endif |
699 | unsigned long async_stack, panic_stack; | 698 | unsigned long async_stack, panic_stack; |
700 | struct _lowcore *lowcore; | 699 | struct _lowcore *lowcore; |
701 | 700 | ||
702 | smp_detect_cpus(); | 701 | smp_detect_cpus(); |
703 | 702 | ||
704 | /* request the 0x1201 emergency signal external interrupt */ | 703 | /* request the 0x1201 emergency signal external interrupt */ |
705 | if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) | 704 | if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) |
706 | panic("Couldn't request external interrupt 0x1201"); | 705 | panic("Couldn't request external interrupt 0x1201"); |
707 | 706 | ||
708 | /* Reallocate current lowcore, but keep its contents. */ | 707 | /* Reallocate current lowcore, but keep its contents. */ |
709 | lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); | 708 | lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); |
710 | panic_stack = __get_free_page(GFP_KERNEL); | 709 | panic_stack = __get_free_page(GFP_KERNEL); |
711 | async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); | 710 | async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); |
712 | BUG_ON(!lowcore || !panic_stack || !async_stack); | 711 | BUG_ON(!lowcore || !panic_stack || !async_stack); |
713 | #ifndef CONFIG_64BIT | 712 | #ifndef CONFIG_64BIT |
714 | if (MACHINE_HAS_IEEE) | 713 | if (MACHINE_HAS_IEEE) |
715 | save_area = get_zeroed_page(GFP_KERNEL); | 714 | save_area = get_zeroed_page(GFP_KERNEL); |
716 | #endif | 715 | #endif |
717 | local_irq_disable(); | 716 | local_irq_disable(); |
718 | local_mcck_disable(); | 717 | local_mcck_disable(); |
719 | lowcore_ptr[smp_processor_id()] = lowcore; | 718 | lowcore_ptr[smp_processor_id()] = lowcore; |
720 | *lowcore = S390_lowcore; | 719 | *lowcore = S390_lowcore; |
721 | lowcore->panic_stack = panic_stack + PAGE_SIZE; | 720 | lowcore->panic_stack = panic_stack + PAGE_SIZE; |
722 | lowcore->async_stack = async_stack + ASYNC_SIZE; | 721 | lowcore->async_stack = async_stack + ASYNC_SIZE; |
723 | #ifndef CONFIG_64BIT | 722 | #ifndef CONFIG_64BIT |
724 | if (MACHINE_HAS_IEEE) | 723 | if (MACHINE_HAS_IEEE) |
725 | lowcore->extended_save_area_addr = (u32) save_area; | 724 | lowcore->extended_save_area_addr = (u32) save_area; |
726 | #endif | 725 | #endif |
727 | set_prefix((u32)(unsigned long) lowcore); | 726 | set_prefix((u32)(unsigned long) lowcore); |
728 | local_mcck_enable(); | 727 | local_mcck_enable(); |
729 | local_irq_enable(); | 728 | local_irq_enable(); |
730 | #ifdef CONFIG_64BIT | 729 | #ifdef CONFIG_64BIT |
731 | if (vdso_alloc_per_cpu(smp_processor_id(), &S390_lowcore)) | 730 | if (vdso_alloc_per_cpu(smp_processor_id(), &S390_lowcore)) |
732 | BUG(); | 731 | BUG(); |
733 | #endif | 732 | #endif |
734 | } | 733 | } |
735 | 734 | ||
736 | void __init smp_prepare_boot_cpu(void) | 735 | void __init smp_prepare_boot_cpu(void) |
737 | { | 736 | { |
738 | BUG_ON(smp_processor_id() != 0); | 737 | BUG_ON(smp_processor_id() != 0); |
739 | 738 | ||
740 | current_thread_info()->cpu = 0; | 739 | current_thread_info()->cpu = 0; |
741 | set_cpu_present(0, true); | 740 | set_cpu_present(0, true); |
742 | set_cpu_online(0, true); | 741 | set_cpu_online(0, true); |
743 | S390_lowcore.percpu_offset = __per_cpu_offset[0]; | 742 | S390_lowcore.percpu_offset = __per_cpu_offset[0]; |
744 | current_set[0] = current; | 743 | current_set[0] = current; |
745 | smp_cpu_state[0] = CPU_STATE_CONFIGURED; | 744 | smp_cpu_state[0] = CPU_STATE_CONFIGURED; |
746 | smp_cpu_polarization[0] = POLARIZATION_UNKNWN; | 745 | smp_cpu_polarization[0] = POLARIZATION_UNKNWN; |
747 | } | 746 | } |
748 | 747 | ||
749 | void __init smp_cpus_done(unsigned int max_cpus) | 748 | void __init smp_cpus_done(unsigned int max_cpus) |
750 | { | 749 | { |
751 | } | 750 | } |
752 | 751 | ||
753 | void __init smp_setup_processor_id(void) | 752 | void __init smp_setup_processor_id(void) |
754 | { | 753 | { |
755 | S390_lowcore.cpu_nr = 0; | 754 | S390_lowcore.cpu_nr = 0; |
756 | __cpu_logical_map[0] = stap(); | 755 | __cpu_logical_map[0] = stap(); |
757 | } | 756 | } |
758 | 757 | ||
759 | /* | 758 | /* |
760 | * the frequency of the profiling timer can be changed | 759 | * the frequency of the profiling timer can be changed |
761 | * by writing a multiplier value into /proc/profile. | 760 | * by writing a multiplier value into /proc/profile. |
762 | * | 761 | * |
763 | * usually you want to run this on all CPUs ;) | 762 | * usually you want to run this on all CPUs ;) |
764 | */ | 763 | */ |
765 | int setup_profiling_timer(unsigned int multiplier) | 764 | int setup_profiling_timer(unsigned int multiplier) |
766 | { | 765 | { |
767 | return 0; | 766 | return 0; |
768 | } | 767 | } |
769 | 768 | ||
770 | #ifdef CONFIG_HOTPLUG_CPU | 769 | #ifdef CONFIG_HOTPLUG_CPU |
771 | static ssize_t cpu_configure_show(struct sys_device *dev, | 770 | static ssize_t cpu_configure_show(struct sys_device *dev, |
772 | struct sysdev_attribute *attr, char *buf) | 771 | struct sysdev_attribute *attr, char *buf) |
773 | { | 772 | { |
774 | ssize_t count; | 773 | ssize_t count; |
775 | 774 | ||
776 | mutex_lock(&smp_cpu_state_mutex); | 775 | mutex_lock(&smp_cpu_state_mutex); |
777 | count = sprintf(buf, "%d\n", smp_cpu_state[dev->id]); | 776 | count = sprintf(buf, "%d\n", smp_cpu_state[dev->id]); |
778 | mutex_unlock(&smp_cpu_state_mutex); | 777 | mutex_unlock(&smp_cpu_state_mutex); |
779 | return count; | 778 | return count; |
780 | } | 779 | } |
781 | 780 | ||
782 | static ssize_t cpu_configure_store(struct sys_device *dev, | 781 | static ssize_t cpu_configure_store(struct sys_device *dev, |
783 | struct sysdev_attribute *attr, | 782 | struct sysdev_attribute *attr, |
784 | const char *buf, size_t count) | 783 | const char *buf, size_t count) |
785 | { | 784 | { |
786 | int cpu = dev->id; | 785 | int cpu = dev->id; |
787 | int val, rc; | 786 | int val, rc; |
788 | char delim; | 787 | char delim; |
789 | 788 | ||
790 | if (sscanf(buf, "%d %c", &val, &delim) != 1) | 789 | if (sscanf(buf, "%d %c", &val, &delim) != 1) |
791 | return -EINVAL; | 790 | return -EINVAL; |
792 | if (val != 0 && val != 1) | 791 | if (val != 0 && val != 1) |
793 | return -EINVAL; | 792 | return -EINVAL; |
794 | 793 | ||
795 | get_online_cpus(); | 794 | get_online_cpus(); |
796 | mutex_lock(&smp_cpu_state_mutex); | 795 | mutex_lock(&smp_cpu_state_mutex); |
797 | rc = -EBUSY; | 796 | rc = -EBUSY; |
798 | /* disallow configuration changes of online cpus and cpu 0 */ | 797 | /* disallow configuration changes of online cpus and cpu 0 */ |
799 | if (cpu_online(cpu) || cpu == 0) | 798 | if (cpu_online(cpu) || cpu == 0) |
800 | goto out; | 799 | goto out; |
801 | rc = 0; | 800 | rc = 0; |
802 | switch (val) { | 801 | switch (val) { |
803 | case 0: | 802 | case 0: |
804 | if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) { | 803 | if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) { |
805 | rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]); | 804 | rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]); |
806 | if (!rc) { | 805 | if (!rc) { |
807 | smp_cpu_state[cpu] = CPU_STATE_STANDBY; | 806 | smp_cpu_state[cpu] = CPU_STATE_STANDBY; |
808 | smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN; | 807 | smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN; |
809 | } | 808 | } |
810 | } | 809 | } |
811 | break; | 810 | break; |
812 | case 1: | 811 | case 1: |
813 | if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) { | 812 | if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) { |
814 | rc = sclp_cpu_configure(__cpu_logical_map[cpu]); | 813 | rc = sclp_cpu_configure(__cpu_logical_map[cpu]); |
815 | if (!rc) { | 814 | if (!rc) { |
816 | smp_cpu_state[cpu] = CPU_STATE_CONFIGURED; | 815 | smp_cpu_state[cpu] = CPU_STATE_CONFIGURED; |
817 | smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN; | 816 | smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN; |
818 | } | 817 | } |
819 | } | 818 | } |
820 | break; | 819 | break; |
821 | default: | 820 | default: |
822 | break; | 821 | break; |
823 | } | 822 | } |
824 | out: | 823 | out: |
825 | mutex_unlock(&smp_cpu_state_mutex); | 824 | mutex_unlock(&smp_cpu_state_mutex); |
826 | put_online_cpus(); | 825 | put_online_cpus(); |
827 | return rc ? rc : count; | 826 | return rc ? rc : count; |
828 | } | 827 | } |
829 | static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store); | 828 | static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store); |
830 | #endif /* CONFIG_HOTPLUG_CPU */ | 829 | #endif /* CONFIG_HOTPLUG_CPU */ |
831 | 830 | ||
832 | static ssize_t cpu_polarization_show(struct sys_device *dev, | 831 | static ssize_t cpu_polarization_show(struct sys_device *dev, |
833 | struct sysdev_attribute *attr, char *buf) | 832 | struct sysdev_attribute *attr, char *buf) |
834 | { | 833 | { |
835 | int cpu = dev->id; | 834 | int cpu = dev->id; |
836 | ssize_t count; | 835 | ssize_t count; |
837 | 836 | ||
838 | mutex_lock(&smp_cpu_state_mutex); | 837 | mutex_lock(&smp_cpu_state_mutex); |
839 | switch (smp_cpu_polarization[cpu]) { | 838 | switch (smp_cpu_polarization[cpu]) { |
840 | case POLARIZATION_HRZ: | 839 | case POLARIZATION_HRZ: |
841 | count = sprintf(buf, "horizontal\n"); | 840 | count = sprintf(buf, "horizontal\n"); |
842 | break; | 841 | break; |
843 | case POLARIZATION_VL: | 842 | case POLARIZATION_VL: |
844 | count = sprintf(buf, "vertical:low\n"); | 843 | count = sprintf(buf, "vertical:low\n"); |
845 | break; | 844 | break; |
846 | case POLARIZATION_VM: | 845 | case POLARIZATION_VM: |
847 | count = sprintf(buf, "vertical:medium\n"); | 846 | count = sprintf(buf, "vertical:medium\n"); |
848 | break; | 847 | break; |
849 | case POLARIZATION_VH: | 848 | case POLARIZATION_VH: |
850 | count = sprintf(buf, "vertical:high\n"); | 849 | count = sprintf(buf, "vertical:high\n"); |
851 | break; | 850 | break; |
852 | default: | 851 | default: |
853 | count = sprintf(buf, "unknown\n"); | 852 | count = sprintf(buf, "unknown\n"); |
854 | break; | 853 | break; |
855 | } | 854 | } |
856 | mutex_unlock(&smp_cpu_state_mutex); | 855 | mutex_unlock(&smp_cpu_state_mutex); |
857 | return count; | 856 | return count; |
858 | } | 857 | } |
859 | static SYSDEV_ATTR(polarization, 0444, cpu_polarization_show, NULL); | 858 | static SYSDEV_ATTR(polarization, 0444, cpu_polarization_show, NULL); |
860 | 859 | ||
861 | static ssize_t show_cpu_address(struct sys_device *dev, | 860 | static ssize_t show_cpu_address(struct sys_device *dev, |
862 | struct sysdev_attribute *attr, char *buf) | 861 | struct sysdev_attribute *attr, char *buf) |
863 | { | 862 | { |
864 | return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]); | 863 | return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]); |
865 | } | 864 | } |
866 | static SYSDEV_ATTR(address, 0444, show_cpu_address, NULL); | 865 | static SYSDEV_ATTR(address, 0444, show_cpu_address, NULL); |
867 | 866 | ||
868 | 867 | ||
869 | static struct attribute *cpu_common_attrs[] = { | 868 | static struct attribute *cpu_common_attrs[] = { |
870 | #ifdef CONFIG_HOTPLUG_CPU | 869 | #ifdef CONFIG_HOTPLUG_CPU |
871 | &attr_configure.attr, | 870 | &attr_configure.attr, |
872 | #endif | 871 | #endif |
873 | &attr_address.attr, | 872 | &attr_address.attr, |
874 | &attr_polarization.attr, | 873 | &attr_polarization.attr, |
875 | NULL, | 874 | NULL, |
876 | }; | 875 | }; |
877 | 876 | ||
878 | static struct attribute_group cpu_common_attr_group = { | 877 | static struct attribute_group cpu_common_attr_group = { |
879 | .attrs = cpu_common_attrs, | 878 | .attrs = cpu_common_attrs, |
880 | }; | 879 | }; |
881 | 880 | ||
882 | static ssize_t show_capability(struct sys_device *dev, | 881 | static ssize_t show_capability(struct sys_device *dev, |
883 | struct sysdev_attribute *attr, char *buf) | 882 | struct sysdev_attribute *attr, char *buf) |
884 | { | 883 | { |
885 | unsigned int capability; | 884 | unsigned int capability; |
886 | int rc; | 885 | int rc; |
887 | 886 | ||
888 | rc = get_cpu_capability(&capability); | 887 | rc = get_cpu_capability(&capability); |
889 | if (rc) | 888 | if (rc) |
890 | return rc; | 889 | return rc; |
891 | return sprintf(buf, "%u\n", capability); | 890 | return sprintf(buf, "%u\n", capability); |
892 | } | 891 | } |
893 | static SYSDEV_ATTR(capability, 0444, show_capability, NULL); | 892 | static SYSDEV_ATTR(capability, 0444, show_capability, NULL); |
894 | 893 | ||
895 | static ssize_t show_idle_count(struct sys_device *dev, | 894 | static ssize_t show_idle_count(struct sys_device *dev, |
896 | struct sysdev_attribute *attr, char *buf) | 895 | struct sysdev_attribute *attr, char *buf) |
897 | { | 896 | { |
898 | struct s390_idle_data *idle; | 897 | struct s390_idle_data *idle; |
899 | unsigned long long idle_count; | 898 | unsigned long long idle_count; |
900 | unsigned int sequence; | 899 | unsigned int sequence; |
901 | 900 | ||
902 | idle = &per_cpu(s390_idle, dev->id); | 901 | idle = &per_cpu(s390_idle, dev->id); |
903 | repeat: | 902 | repeat: |
904 | sequence = idle->sequence; | 903 | sequence = idle->sequence; |
905 | smp_rmb(); | 904 | smp_rmb(); |
906 | if (sequence & 1) | 905 | if (sequence & 1) |
907 | goto repeat; | 906 | goto repeat; |
908 | idle_count = idle->idle_count; | 907 | idle_count = idle->idle_count; |
909 | if (idle->idle_enter) | 908 | if (idle->idle_enter) |
910 | idle_count++; | 909 | idle_count++; |
911 | smp_rmb(); | 910 | smp_rmb(); |
912 | if (idle->sequence != sequence) | 911 | if (idle->sequence != sequence) |
913 | goto repeat; | 912 | goto repeat; |
914 | return sprintf(buf, "%llu\n", idle_count); | 913 | return sprintf(buf, "%llu\n", idle_count); |
915 | } | 914 | } |
916 | static SYSDEV_ATTR(idle_count, 0444, show_idle_count, NULL); | 915 | static SYSDEV_ATTR(idle_count, 0444, show_idle_count, NULL); |
917 | 916 | ||
918 | static ssize_t show_idle_time(struct sys_device *dev, | 917 | static ssize_t show_idle_time(struct sys_device *dev, |
919 | struct sysdev_attribute *attr, char *buf) | 918 | struct sysdev_attribute *attr, char *buf) |
920 | { | 919 | { |
921 | struct s390_idle_data *idle; | 920 | struct s390_idle_data *idle; |
922 | unsigned long long now, idle_time, idle_enter; | 921 | unsigned long long now, idle_time, idle_enter; |
923 | unsigned int sequence; | 922 | unsigned int sequence; |
924 | 923 | ||
925 | idle = &per_cpu(s390_idle, dev->id); | 924 | idle = &per_cpu(s390_idle, dev->id); |
926 | now = get_clock(); | 925 | now = get_clock(); |
927 | repeat: | 926 | repeat: |
928 | sequence = idle->sequence; | 927 | sequence = idle->sequence; |
929 | smp_rmb(); | 928 | smp_rmb(); |
930 | if (sequence & 1) | 929 | if (sequence & 1) |
931 | goto repeat; | 930 | goto repeat; |
932 | idle_time = idle->idle_time; | 931 | idle_time = idle->idle_time; |
933 | idle_enter = idle->idle_enter; | 932 | idle_enter = idle->idle_enter; |
934 | if (idle_enter != 0ULL && idle_enter < now) | 933 | if (idle_enter != 0ULL && idle_enter < now) |
935 | idle_time += now - idle_enter; | 934 | idle_time += now - idle_enter; |
936 | smp_rmb(); | 935 | smp_rmb(); |
937 | if (idle->sequence != sequence) | 936 | if (idle->sequence != sequence) |
938 | goto repeat; | 937 | goto repeat; |
939 | return sprintf(buf, "%llu\n", idle_time >> 12); | 938 | return sprintf(buf, "%llu\n", idle_time >> 12); |
940 | } | 939 | } |
941 | static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL); | 940 | static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL); |
942 | 941 | ||
943 | static struct attribute *cpu_online_attrs[] = { | 942 | static struct attribute *cpu_online_attrs[] = { |
944 | &attr_capability.attr, | 943 | &attr_capability.attr, |
945 | &attr_idle_count.attr, | 944 | &attr_idle_count.attr, |
946 | &attr_idle_time_us.attr, | 945 | &attr_idle_time_us.attr, |
947 | NULL, | 946 | NULL, |
948 | }; | 947 | }; |
949 | 948 | ||
950 | static struct attribute_group cpu_online_attr_group = { | 949 | static struct attribute_group cpu_online_attr_group = { |
951 | .attrs = cpu_online_attrs, | 950 | .attrs = cpu_online_attrs, |
952 | }; | 951 | }; |
953 | 952 | ||
954 | static int __cpuinit smp_cpu_notify(struct notifier_block *self, | 953 | static int __cpuinit smp_cpu_notify(struct notifier_block *self, |
955 | unsigned long action, void *hcpu) | 954 | unsigned long action, void *hcpu) |
956 | { | 955 | { |
957 | unsigned int cpu = (unsigned int)(long)hcpu; | 956 | unsigned int cpu = (unsigned int)(long)hcpu; |
958 | struct cpu *c = &per_cpu(cpu_devices, cpu); | 957 | struct cpu *c = &per_cpu(cpu_devices, cpu); |
959 | struct sys_device *s = &c->sysdev; | 958 | struct sys_device *s = &c->sysdev; |
960 | struct s390_idle_data *idle; | 959 | struct s390_idle_data *idle; |
961 | int err = 0; | 960 | int err = 0; |
962 | 961 | ||
963 | switch (action) { | 962 | switch (action) { |
964 | case CPU_ONLINE: | 963 | case CPU_ONLINE: |
965 | case CPU_ONLINE_FROZEN: | 964 | case CPU_ONLINE_FROZEN: |
966 | idle = &per_cpu(s390_idle, cpu); | 965 | idle = &per_cpu(s390_idle, cpu); |
967 | memset(idle, 0, sizeof(struct s390_idle_data)); | 966 | memset(idle, 0, sizeof(struct s390_idle_data)); |
968 | err = sysfs_create_group(&s->kobj, &cpu_online_attr_group); | 967 | err = sysfs_create_group(&s->kobj, &cpu_online_attr_group); |
969 | break; | 968 | break; |
970 | case CPU_DEAD: | 969 | case CPU_DEAD: |
971 | case CPU_DEAD_FROZEN: | 970 | case CPU_DEAD_FROZEN: |
972 | sysfs_remove_group(&s->kobj, &cpu_online_attr_group); | 971 | sysfs_remove_group(&s->kobj, &cpu_online_attr_group); |
973 | break; | 972 | break; |
974 | } | 973 | } |
975 | return notifier_from_errno(err); | 974 | return notifier_from_errno(err); |
976 | } | 975 | } |
977 | 976 | ||
978 | static struct notifier_block __cpuinitdata smp_cpu_nb = { | 977 | static struct notifier_block __cpuinitdata smp_cpu_nb = { |
979 | .notifier_call = smp_cpu_notify, | 978 | .notifier_call = smp_cpu_notify, |
980 | }; | 979 | }; |
981 | 980 | ||
982 | static int __devinit smp_add_present_cpu(int cpu) | 981 | static int __devinit smp_add_present_cpu(int cpu) |
983 | { | 982 | { |
984 | struct cpu *c = &per_cpu(cpu_devices, cpu); | 983 | struct cpu *c = &per_cpu(cpu_devices, cpu); |
985 | struct sys_device *s = &c->sysdev; | 984 | struct sys_device *s = &c->sysdev; |
986 | int rc; | 985 | int rc; |
987 | 986 | ||
988 | c->hotpluggable = 1; | 987 | c->hotpluggable = 1; |
989 | rc = register_cpu(c, cpu); | 988 | rc = register_cpu(c, cpu); |
990 | if (rc) | 989 | if (rc) |
991 | goto out; | 990 | goto out; |
992 | rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group); | 991 | rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group); |
993 | if (rc) | 992 | if (rc) |
994 | goto out_cpu; | 993 | goto out_cpu; |
995 | if (!cpu_online(cpu)) | 994 | if (!cpu_online(cpu)) |
996 | goto out; | 995 | goto out; |
997 | rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group); | 996 | rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group); |
998 | if (!rc) | 997 | if (!rc) |
999 | return 0; | 998 | return 0; |
1000 | sysfs_remove_group(&s->kobj, &cpu_common_attr_group); | 999 | sysfs_remove_group(&s->kobj, &cpu_common_attr_group); |
1001 | out_cpu: | 1000 | out_cpu: |
1002 | #ifdef CONFIG_HOTPLUG_CPU | 1001 | #ifdef CONFIG_HOTPLUG_CPU |
1003 | unregister_cpu(c); | 1002 | unregister_cpu(c); |
1004 | #endif | 1003 | #endif |
1005 | out: | 1004 | out: |
1006 | return rc; | 1005 | return rc; |
1007 | } | 1006 | } |
1008 | 1007 | ||
1009 | #ifdef CONFIG_HOTPLUG_CPU | 1008 | #ifdef CONFIG_HOTPLUG_CPU |
1010 | 1009 | ||
1011 | int __ref smp_rescan_cpus(void) | 1010 | int __ref smp_rescan_cpus(void) |
1012 | { | 1011 | { |
1013 | cpumask_t newcpus; | 1012 | cpumask_t newcpus; |
1014 | int cpu; | 1013 | int cpu; |
1015 | int rc; | 1014 | int rc; |
1016 | 1015 | ||
1017 | get_online_cpus(); | 1016 | get_online_cpus(); |
1018 | mutex_lock(&smp_cpu_state_mutex); | 1017 | mutex_lock(&smp_cpu_state_mutex); |
1019 | cpumask_copy(&newcpus, cpu_present_mask); | 1018 | cpumask_copy(&newcpus, cpu_present_mask); |
1020 | rc = __smp_rescan_cpus(); | 1019 | rc = __smp_rescan_cpus(); |
1021 | if (rc) | 1020 | if (rc) |
1022 | goto out; | 1021 | goto out; |
1023 | cpumask_andnot(&newcpus, cpu_present_mask, &newcpus); | 1022 | cpumask_andnot(&newcpus, cpu_present_mask, &newcpus); |
1024 | for_each_cpu(cpu, &newcpus) { | 1023 | for_each_cpu(cpu, &newcpus) { |
1025 | rc = smp_add_present_cpu(cpu); | 1024 | rc = smp_add_present_cpu(cpu); |
1026 | if (rc) | 1025 | if (rc) |
1027 | set_cpu_present(cpu, false); | 1026 | set_cpu_present(cpu, false); |
1028 | } | 1027 | } |
1029 | rc = 0; | 1028 | rc = 0; |
1030 | out: | 1029 | out: |
1031 | mutex_unlock(&smp_cpu_state_mutex); | 1030 | mutex_unlock(&smp_cpu_state_mutex); |
1032 | put_online_cpus(); | 1031 | put_online_cpus(); |
1033 | if (!cpumask_empty(&newcpus)) | 1032 | if (!cpumask_empty(&newcpus)) |
1034 | topology_schedule_update(); | 1033 | topology_schedule_update(); |
1035 | return rc; | 1034 | return rc; |
1036 | } | 1035 | } |
1037 | 1036 | ||
1038 | static ssize_t __ref rescan_store(struct sysdev_class *class, | 1037 | static ssize_t __ref rescan_store(struct sysdev_class *class, |
1039 | struct sysdev_class_attribute *attr, | 1038 | struct sysdev_class_attribute *attr, |
1040 | const char *buf, | 1039 | const char *buf, |
1041 | size_t count) | 1040 | size_t count) |
1042 | { | 1041 | { |
1043 | int rc; | 1042 | int rc; |
1044 | 1043 | ||
1045 | rc = smp_rescan_cpus(); | 1044 | rc = smp_rescan_cpus(); |
1046 | return rc ? rc : count; | 1045 | return rc ? rc : count; |
1047 | } | 1046 | } |
1048 | static SYSDEV_CLASS_ATTR(rescan, 0200, NULL, rescan_store); | 1047 | static SYSDEV_CLASS_ATTR(rescan, 0200, NULL, rescan_store); |
1049 | #endif /* CONFIG_HOTPLUG_CPU */ | 1048 | #endif /* CONFIG_HOTPLUG_CPU */ |
1050 | 1049 | ||
1051 | static ssize_t dispatching_show(struct sysdev_class *class, | 1050 | static ssize_t dispatching_show(struct sysdev_class *class, |
1052 | struct sysdev_class_attribute *attr, | 1051 | struct sysdev_class_attribute *attr, |
1053 | char *buf) | 1052 | char *buf) |
1054 | { | 1053 | { |
1055 | ssize_t count; | 1054 | ssize_t count; |
1056 | 1055 | ||
1057 | mutex_lock(&smp_cpu_state_mutex); | 1056 | mutex_lock(&smp_cpu_state_mutex); |
1058 | count = sprintf(buf, "%d\n", cpu_management); | 1057 | count = sprintf(buf, "%d\n", cpu_management); |
1059 | mutex_unlock(&smp_cpu_state_mutex); | 1058 | mutex_unlock(&smp_cpu_state_mutex); |
1060 | return count; | 1059 | return count; |
1061 | } | 1060 | } |
1062 | 1061 | ||
1063 | static ssize_t dispatching_store(struct sysdev_class *dev, | 1062 | static ssize_t dispatching_store(struct sysdev_class *dev, |
1064 | struct sysdev_class_attribute *attr, | 1063 | struct sysdev_class_attribute *attr, |
1065 | const char *buf, | 1064 | const char *buf, |
1066 | size_t count) | 1065 | size_t count) |
1067 | { | 1066 | { |
1068 | int val, rc; | 1067 | int val, rc; |
1069 | char delim; | 1068 | char delim; |
1070 | 1069 | ||
1071 | if (sscanf(buf, "%d %c", &val, &delim) != 1) | 1070 | if (sscanf(buf, "%d %c", &val, &delim) != 1) |
1072 | return -EINVAL; | 1071 | return -EINVAL; |
1073 | if (val != 0 && val != 1) | 1072 | if (val != 0 && val != 1) |
1074 | return -EINVAL; | 1073 | return -EINVAL; |
1075 | rc = 0; | 1074 | rc = 0; |
1076 | get_online_cpus(); | 1075 | get_online_cpus(); |
1077 | mutex_lock(&smp_cpu_state_mutex); | 1076 | mutex_lock(&smp_cpu_state_mutex); |
1078 | if (cpu_management == val) | 1077 | if (cpu_management == val) |
1079 | goto out; | 1078 | goto out; |
1080 | rc = topology_set_cpu_management(val); | 1079 | rc = topology_set_cpu_management(val); |
1081 | if (!rc) | 1080 | if (!rc) |
1082 | cpu_management = val; | 1081 | cpu_management = val; |
1083 | out: | 1082 | out: |
1084 | mutex_unlock(&smp_cpu_state_mutex); | 1083 | mutex_unlock(&smp_cpu_state_mutex); |
1085 | put_online_cpus(); | 1084 | put_online_cpus(); |
1086 | return rc ? rc : count; | 1085 | return rc ? rc : count; |
1087 | } | 1086 | } |
1088 | static SYSDEV_CLASS_ATTR(dispatching, 0644, dispatching_show, | 1087 | static SYSDEV_CLASS_ATTR(dispatching, 0644, dispatching_show, |
1089 | dispatching_store); | 1088 | dispatching_store); |
1090 | 1089 | ||
1091 | static int __init topology_init(void) | 1090 | static int __init topology_init(void) |
1092 | { | 1091 | { |
1093 | int cpu; | 1092 | int cpu; |
1094 | int rc; | 1093 | int rc; |
1095 | 1094 | ||
1096 | register_cpu_notifier(&smp_cpu_nb); | 1095 | register_cpu_notifier(&smp_cpu_nb); |
1097 | 1096 | ||
1098 | #ifdef CONFIG_HOTPLUG_CPU | 1097 | #ifdef CONFIG_HOTPLUG_CPU |
1099 | rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_rescan); | 1098 | rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_rescan); |
1100 | if (rc) | 1099 | if (rc) |
1101 | return rc; | 1100 | return rc; |
1102 | #endif | 1101 | #endif |
1103 | rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_dispatching); | 1102 | rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_dispatching); |
1104 | if (rc) | 1103 | if (rc) |
1105 | return rc; | 1104 | return rc; |
1106 | for_each_present_cpu(cpu) { | 1105 | for_each_present_cpu(cpu) { |
1107 | rc = smp_add_present_cpu(cpu); | 1106 | rc = smp_add_present_cpu(cpu); |
1108 | if (rc) | 1107 | if (rc) |
1109 | return rc; | 1108 | return rc; |
1110 | } | 1109 | } |
1111 | return 0; | 1110 | return 0; |
1112 | } | 1111 | } |
1113 | subsys_initcall(topology_init); | 1112 | subsys_initcall(topology_init); |
1114 | 1113 |
arch/s390/kernel/time.c
1 | /* | 1 | /* |
2 | * arch/s390/kernel/time.c | 2 | * arch/s390/kernel/time.c |
3 | * Time of day based timer functions. | 3 | * Time of day based timer functions. |
4 | * | 4 | * |
5 | * S390 version | 5 | * S390 version |
6 | * Copyright IBM Corp. 1999, 2008 | 6 | * Copyright IBM Corp. 1999, 2008 |
7 | * Author(s): Hartmut Penner (hp@de.ibm.com), | 7 | * Author(s): Hartmut Penner (hp@de.ibm.com), |
8 | * Martin Schwidefsky (schwidefsky@de.ibm.com), | 8 | * Martin Schwidefsky (schwidefsky@de.ibm.com), |
9 | * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) | 9 | * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) |
10 | * | 10 | * |
11 | * Derived from "arch/i386/kernel/time.c" | 11 | * Derived from "arch/i386/kernel/time.c" |
12 | * Copyright (C) 1991, 1992, 1995 Linus Torvalds | 12 | * Copyright (C) 1991, 1992, 1995 Linus Torvalds |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #define KMSG_COMPONENT "time" | 15 | #define KMSG_COMPONENT "time" |
16 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | 16 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
17 | 17 | ||
18 | #include <linux/kernel_stat.h> | 18 | #include <linux/kernel_stat.h> |
19 | #include <linux/errno.h> | 19 | #include <linux/errno.h> |
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/sched.h> | 21 | #include <linux/sched.h> |
22 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
23 | #include <linux/param.h> | 23 | #include <linux/param.h> |
24 | #include <linux/string.h> | 24 | #include <linux/string.h> |
25 | #include <linux/mm.h> | 25 | #include <linux/mm.h> |
26 | #include <linux/interrupt.h> | 26 | #include <linux/interrupt.h> |
27 | #include <linux/cpu.h> | 27 | #include <linux/cpu.h> |
28 | #include <linux/stop_machine.h> | 28 | #include <linux/stop_machine.h> |
29 | #include <linux/time.h> | 29 | #include <linux/time.h> |
30 | #include <linux/sysdev.h> | 30 | #include <linux/sysdev.h> |
31 | #include <linux/delay.h> | 31 | #include <linux/delay.h> |
32 | #include <linux/init.h> | 32 | #include <linux/init.h> |
33 | #include <linux/smp.h> | 33 | #include <linux/smp.h> |
34 | #include <linux/types.h> | 34 | #include <linux/types.h> |
35 | #include <linux/profile.h> | 35 | #include <linux/profile.h> |
36 | #include <linux/timex.h> | 36 | #include <linux/timex.h> |
37 | #include <linux/notifier.h> | 37 | #include <linux/notifier.h> |
38 | #include <linux/clocksource.h> | 38 | #include <linux/clocksource.h> |
39 | #include <linux/clockchips.h> | 39 | #include <linux/clockchips.h> |
40 | #include <linux/gfp.h> | 40 | #include <linux/gfp.h> |
41 | #include <linux/kprobes.h> | 41 | #include <linux/kprobes.h> |
42 | #include <asm/uaccess.h> | 42 | #include <asm/uaccess.h> |
43 | #include <asm/delay.h> | 43 | #include <asm/delay.h> |
44 | #include <asm/s390_ext.h> | ||
45 | #include <asm/div64.h> | 44 | #include <asm/div64.h> |
46 | #include <asm/vdso.h> | 45 | #include <asm/vdso.h> |
47 | #include <asm/irq.h> | 46 | #include <asm/irq.h> |
48 | #include <asm/irq_regs.h> | 47 | #include <asm/irq_regs.h> |
49 | #include <asm/timer.h> | 48 | #include <asm/timer.h> |
50 | #include <asm/etr.h> | 49 | #include <asm/etr.h> |
51 | #include <asm/cio.h> | 50 | #include <asm/cio.h> |
52 | 51 | ||
53 | /* change this if you have some constant time drift */ | 52 | /* change this if you have some constant time drift */ |
54 | #define USECS_PER_JIFFY ((unsigned long) 1000000/HZ) | 53 | #define USECS_PER_JIFFY ((unsigned long) 1000000/HZ) |
55 | #define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12) | 54 | #define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12) |
56 | 55 | ||
57 | u64 sched_clock_base_cc = -1; /* Force to data section. */ | 56 | u64 sched_clock_base_cc = -1; /* Force to data section. */ |
58 | EXPORT_SYMBOL_GPL(sched_clock_base_cc); | 57 | EXPORT_SYMBOL_GPL(sched_clock_base_cc); |
59 | 58 | ||
60 | static DEFINE_PER_CPU(struct clock_event_device, comparators); | 59 | static DEFINE_PER_CPU(struct clock_event_device, comparators); |
61 | 60 | ||
62 | /* | 61 | /* |
63 | * Scheduler clock - returns current time in nanosec units. | 62 | * Scheduler clock - returns current time in nanosec units. |
64 | */ | 63 | */ |
65 | unsigned long long notrace __kprobes sched_clock(void) | 64 | unsigned long long notrace __kprobes sched_clock(void) |
66 | { | 65 | { |
67 | return (get_clock_monotonic() * 125) >> 9; | 66 | return (get_clock_monotonic() * 125) >> 9; |
68 | } | 67 | } |
69 | 68 | ||
70 | /* | 69 | /* |
71 | * Monotonic_clock - returns # of nanoseconds passed since time_init() | 70 | * Monotonic_clock - returns # of nanoseconds passed since time_init() |
72 | */ | 71 | */ |
73 | unsigned long long monotonic_clock(void) | 72 | unsigned long long monotonic_clock(void) |
74 | { | 73 | { |
75 | return sched_clock(); | 74 | return sched_clock(); |
76 | } | 75 | } |
77 | EXPORT_SYMBOL(monotonic_clock); | 76 | EXPORT_SYMBOL(monotonic_clock); |
78 | 77 | ||
79 | void tod_to_timeval(__u64 todval, struct timespec *xt) | 78 | void tod_to_timeval(__u64 todval, struct timespec *xt) |
80 | { | 79 | { |
81 | unsigned long long sec; | 80 | unsigned long long sec; |
82 | 81 | ||
83 | sec = todval >> 12; | 82 | sec = todval >> 12; |
84 | do_div(sec, 1000000); | 83 | do_div(sec, 1000000); |
85 | xt->tv_sec = sec; | 84 | xt->tv_sec = sec; |
86 | todval -= (sec * 1000000) << 12; | 85 | todval -= (sec * 1000000) << 12; |
87 | xt->tv_nsec = ((todval * 1000) >> 12); | 86 | xt->tv_nsec = ((todval * 1000) >> 12); |
88 | } | 87 | } |
89 | EXPORT_SYMBOL(tod_to_timeval); | 88 | EXPORT_SYMBOL(tod_to_timeval); |
90 | 89 | ||
91 | void clock_comparator_work(void) | 90 | void clock_comparator_work(void) |
92 | { | 91 | { |
93 | struct clock_event_device *cd; | 92 | struct clock_event_device *cd; |
94 | 93 | ||
95 | S390_lowcore.clock_comparator = -1ULL; | 94 | S390_lowcore.clock_comparator = -1ULL; |
96 | set_clock_comparator(S390_lowcore.clock_comparator); | 95 | set_clock_comparator(S390_lowcore.clock_comparator); |
97 | cd = &__get_cpu_var(comparators); | 96 | cd = &__get_cpu_var(comparators); |
98 | cd->event_handler(cd); | 97 | cd->event_handler(cd); |
99 | } | 98 | } |
100 | 99 | ||
101 | /* | 100 | /* |
102 | * Fixup the clock comparator. | 101 | * Fixup the clock comparator. |
103 | */ | 102 | */ |
104 | static void fixup_clock_comparator(unsigned long long delta) | 103 | static void fixup_clock_comparator(unsigned long long delta) |
105 | { | 104 | { |
106 | /* If nobody is waiting there's nothing to fix. */ | 105 | /* If nobody is waiting there's nothing to fix. */ |
107 | if (S390_lowcore.clock_comparator == -1ULL) | 106 | if (S390_lowcore.clock_comparator == -1ULL) |
108 | return; | 107 | return; |
109 | S390_lowcore.clock_comparator += delta; | 108 | S390_lowcore.clock_comparator += delta; |
110 | set_clock_comparator(S390_lowcore.clock_comparator); | 109 | set_clock_comparator(S390_lowcore.clock_comparator); |
111 | } | 110 | } |
112 | 111 | ||
113 | static int s390_next_event(unsigned long delta, | 112 | static int s390_next_event(unsigned long delta, |
114 | struct clock_event_device *evt) | 113 | struct clock_event_device *evt) |
115 | { | 114 | { |
116 | S390_lowcore.clock_comparator = get_clock() + delta; | 115 | S390_lowcore.clock_comparator = get_clock() + delta; |
117 | set_clock_comparator(S390_lowcore.clock_comparator); | 116 | set_clock_comparator(S390_lowcore.clock_comparator); |
118 | return 0; | 117 | return 0; |
119 | } | 118 | } |
120 | 119 | ||
121 | static void s390_set_mode(enum clock_event_mode mode, | 120 | static void s390_set_mode(enum clock_event_mode mode, |
122 | struct clock_event_device *evt) | 121 | struct clock_event_device *evt) |
123 | { | 122 | { |
124 | } | 123 | } |
125 | 124 | ||
126 | /* | 125 | /* |
127 | * Set up lowcore and control register of the current cpu to | 126 | * Set up lowcore and control register of the current cpu to |
128 | * enable TOD clock and clock comparator interrupts. | 127 | * enable TOD clock and clock comparator interrupts. |
129 | */ | 128 | */ |
130 | void init_cpu_timer(void) | 129 | void init_cpu_timer(void) |
131 | { | 130 | { |
132 | struct clock_event_device *cd; | 131 | struct clock_event_device *cd; |
133 | int cpu; | 132 | int cpu; |
134 | 133 | ||
135 | S390_lowcore.clock_comparator = -1ULL; | 134 | S390_lowcore.clock_comparator = -1ULL; |
136 | set_clock_comparator(S390_lowcore.clock_comparator); | 135 | set_clock_comparator(S390_lowcore.clock_comparator); |
137 | 136 | ||
138 | cpu = smp_processor_id(); | 137 | cpu = smp_processor_id(); |
139 | cd = &per_cpu(comparators, cpu); | 138 | cd = &per_cpu(comparators, cpu); |
140 | cd->name = "comparator"; | 139 | cd->name = "comparator"; |
141 | cd->features = CLOCK_EVT_FEAT_ONESHOT; | 140 | cd->features = CLOCK_EVT_FEAT_ONESHOT; |
142 | cd->mult = 16777; | 141 | cd->mult = 16777; |
143 | cd->shift = 12; | 142 | cd->shift = 12; |
144 | cd->min_delta_ns = 1; | 143 | cd->min_delta_ns = 1; |
145 | cd->max_delta_ns = LONG_MAX; | 144 | cd->max_delta_ns = LONG_MAX; |
146 | cd->rating = 400; | 145 | cd->rating = 400; |
147 | cd->cpumask = cpumask_of(cpu); | 146 | cd->cpumask = cpumask_of(cpu); |
148 | cd->set_next_event = s390_next_event; | 147 | cd->set_next_event = s390_next_event; |
149 | cd->set_mode = s390_set_mode; | 148 | cd->set_mode = s390_set_mode; |
150 | 149 | ||
151 | clockevents_register_device(cd); | 150 | clockevents_register_device(cd); |
152 | 151 | ||
153 | /* Enable clock comparator timer interrupt. */ | 152 | /* Enable clock comparator timer interrupt. */ |
154 | __ctl_set_bit(0,11); | 153 | __ctl_set_bit(0,11); |
155 | 154 | ||
156 | /* Always allow the timing alert external interrupt. */ | 155 | /* Always allow the timing alert external interrupt. */ |
157 | __ctl_set_bit(0, 4); | 156 | __ctl_set_bit(0, 4); |
158 | } | 157 | } |
159 | 158 | ||
160 | static void clock_comparator_interrupt(unsigned int ext_int_code, | 159 | static void clock_comparator_interrupt(unsigned int ext_int_code, |
161 | unsigned int param32, | 160 | unsigned int param32, |
162 | unsigned long param64) | 161 | unsigned long param64) |
163 | { | 162 | { |
164 | kstat_cpu(smp_processor_id()).irqs[EXTINT_CLK]++; | 163 | kstat_cpu(smp_processor_id()).irqs[EXTINT_CLK]++; |
165 | if (S390_lowcore.clock_comparator == -1ULL) | 164 | if (S390_lowcore.clock_comparator == -1ULL) |
166 | set_clock_comparator(S390_lowcore.clock_comparator); | 165 | set_clock_comparator(S390_lowcore.clock_comparator); |
167 | } | 166 | } |
168 | 167 | ||
169 | static void etr_timing_alert(struct etr_irq_parm *); | 168 | static void etr_timing_alert(struct etr_irq_parm *); |
170 | static void stp_timing_alert(struct stp_irq_parm *); | 169 | static void stp_timing_alert(struct stp_irq_parm *); |
171 | 170 | ||
172 | static void timing_alert_interrupt(unsigned int ext_int_code, | 171 | static void timing_alert_interrupt(unsigned int ext_int_code, |
173 | unsigned int param32, unsigned long param64) | 172 | unsigned int param32, unsigned long param64) |
174 | { | 173 | { |
175 | kstat_cpu(smp_processor_id()).irqs[EXTINT_TLA]++; | 174 | kstat_cpu(smp_processor_id()).irqs[EXTINT_TLA]++; |
176 | if (param32 & 0x00c40000) | 175 | if (param32 & 0x00c40000) |
177 | etr_timing_alert((struct etr_irq_parm *) ¶m32); | 176 | etr_timing_alert((struct etr_irq_parm *) ¶m32); |
178 | if (param32 & 0x00038000) | 177 | if (param32 & 0x00038000) |
179 | stp_timing_alert((struct stp_irq_parm *) ¶m32); | 178 | stp_timing_alert((struct stp_irq_parm *) ¶m32); |
180 | } | 179 | } |
181 | 180 | ||
182 | static void etr_reset(void); | 181 | static void etr_reset(void); |
183 | static void stp_reset(void); | 182 | static void stp_reset(void); |
184 | 183 | ||
185 | void read_persistent_clock(struct timespec *ts) | 184 | void read_persistent_clock(struct timespec *ts) |
186 | { | 185 | { |
187 | tod_to_timeval(get_clock() - TOD_UNIX_EPOCH, ts); | 186 | tod_to_timeval(get_clock() - TOD_UNIX_EPOCH, ts); |
188 | } | 187 | } |
189 | 188 | ||
190 | void read_boot_clock(struct timespec *ts) | 189 | void read_boot_clock(struct timespec *ts) |
191 | { | 190 | { |
192 | tod_to_timeval(sched_clock_base_cc - TOD_UNIX_EPOCH, ts); | 191 | tod_to_timeval(sched_clock_base_cc - TOD_UNIX_EPOCH, ts); |
193 | } | 192 | } |
194 | 193 | ||
195 | static cycle_t read_tod_clock(struct clocksource *cs) | 194 | static cycle_t read_tod_clock(struct clocksource *cs) |
196 | { | 195 | { |
197 | return get_clock(); | 196 | return get_clock(); |
198 | } | 197 | } |
199 | 198 | ||
200 | static struct clocksource clocksource_tod = { | 199 | static struct clocksource clocksource_tod = { |
201 | .name = "tod", | 200 | .name = "tod", |
202 | .rating = 400, | 201 | .rating = 400, |
203 | .read = read_tod_clock, | 202 | .read = read_tod_clock, |
204 | .mask = -1ULL, | 203 | .mask = -1ULL, |
205 | .mult = 1000, | 204 | .mult = 1000, |
206 | .shift = 12, | 205 | .shift = 12, |
207 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 206 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
208 | }; | 207 | }; |
209 | 208 | ||
210 | struct clocksource * __init clocksource_default_clock(void) | 209 | struct clocksource * __init clocksource_default_clock(void) |
211 | { | 210 | { |
212 | return &clocksource_tod; | 211 | return &clocksource_tod; |
213 | } | 212 | } |
214 | 213 | ||
215 | void update_vsyscall(struct timespec *wall_time, struct timespec *wtm, | 214 | void update_vsyscall(struct timespec *wall_time, struct timespec *wtm, |
216 | struct clocksource *clock, u32 mult) | 215 | struct clocksource *clock, u32 mult) |
217 | { | 216 | { |
218 | if (clock != &clocksource_tod) | 217 | if (clock != &clocksource_tod) |
219 | return; | 218 | return; |
220 | 219 | ||
221 | /* Make userspace gettimeofday spin until we're done. */ | 220 | /* Make userspace gettimeofday spin until we're done. */ |
222 | ++vdso_data->tb_update_count; | 221 | ++vdso_data->tb_update_count; |
223 | smp_wmb(); | 222 | smp_wmb(); |
224 | vdso_data->xtime_tod_stamp = clock->cycle_last; | 223 | vdso_data->xtime_tod_stamp = clock->cycle_last; |
225 | vdso_data->xtime_clock_sec = wall_time->tv_sec; | 224 | vdso_data->xtime_clock_sec = wall_time->tv_sec; |
226 | vdso_data->xtime_clock_nsec = wall_time->tv_nsec; | 225 | vdso_data->xtime_clock_nsec = wall_time->tv_nsec; |
227 | vdso_data->wtom_clock_sec = wtm->tv_sec; | 226 | vdso_data->wtom_clock_sec = wtm->tv_sec; |
228 | vdso_data->wtom_clock_nsec = wtm->tv_nsec; | 227 | vdso_data->wtom_clock_nsec = wtm->tv_nsec; |
229 | vdso_data->ntp_mult = mult; | 228 | vdso_data->ntp_mult = mult; |
230 | smp_wmb(); | 229 | smp_wmb(); |
231 | ++vdso_data->tb_update_count; | 230 | ++vdso_data->tb_update_count; |
232 | } | 231 | } |
233 | 232 | ||
234 | extern struct timezone sys_tz; | 233 | extern struct timezone sys_tz; |
235 | 234 | ||
236 | void update_vsyscall_tz(void) | 235 | void update_vsyscall_tz(void) |
237 | { | 236 | { |
238 | /* Make userspace gettimeofday spin until we're done. */ | 237 | /* Make userspace gettimeofday spin until we're done. */ |
239 | ++vdso_data->tb_update_count; | 238 | ++vdso_data->tb_update_count; |
240 | smp_wmb(); | 239 | smp_wmb(); |
241 | vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; | 240 | vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; |
242 | vdso_data->tz_dsttime = sys_tz.tz_dsttime; | 241 | vdso_data->tz_dsttime = sys_tz.tz_dsttime; |
243 | smp_wmb(); | 242 | smp_wmb(); |
244 | ++vdso_data->tb_update_count; | 243 | ++vdso_data->tb_update_count; |
245 | } | 244 | } |
246 | 245 | ||
247 | /* | 246 | /* |
248 | * Initialize the TOD clock and the CPU timer of | 247 | * Initialize the TOD clock and the CPU timer of |
249 | * the boot cpu. | 248 | * the boot cpu. |
250 | */ | 249 | */ |
251 | void __init time_init(void) | 250 | void __init time_init(void) |
252 | { | 251 | { |
253 | /* Reset time synchronization interfaces. */ | 252 | /* Reset time synchronization interfaces. */ |
254 | etr_reset(); | 253 | etr_reset(); |
255 | stp_reset(); | 254 | stp_reset(); |
256 | 255 | ||
257 | /* request the clock comparator external interrupt */ | 256 | /* request the clock comparator external interrupt */ |
258 | if (register_external_interrupt(0x1004, clock_comparator_interrupt)) | 257 | if (register_external_interrupt(0x1004, clock_comparator_interrupt)) |
259 | panic("Couldn't request external interrupt 0x1004"); | 258 | panic("Couldn't request external interrupt 0x1004"); |
260 | 259 | ||
261 | /* request the timing alert external interrupt */ | 260 | /* request the timing alert external interrupt */ |
262 | if (register_external_interrupt(0x1406, timing_alert_interrupt)) | 261 | if (register_external_interrupt(0x1406, timing_alert_interrupt)) |
263 | panic("Couldn't request external interrupt 0x1406"); | 262 | panic("Couldn't request external interrupt 0x1406"); |
264 | 263 | ||
265 | if (clocksource_register(&clocksource_tod) != 0) | 264 | if (clocksource_register(&clocksource_tod) != 0) |
266 | panic("Could not register TOD clock source"); | 265 | panic("Could not register TOD clock source"); |
267 | 266 | ||
268 | /* Enable TOD clock interrupts on the boot cpu. */ | 267 | /* Enable TOD clock interrupts on the boot cpu. */ |
269 | init_cpu_timer(); | 268 | init_cpu_timer(); |
270 | 269 | ||
271 | /* Enable cpu timer interrupts on the boot cpu. */ | 270 | /* Enable cpu timer interrupts on the boot cpu. */ |
272 | vtime_init(); | 271 | vtime_init(); |
273 | } | 272 | } |
274 | 273 | ||
275 | /* | 274 | /* |
276 | * The time is "clock". old is what we think the time is. | 275 | * The time is "clock". old is what we think the time is. |
277 | * Adjust the value by a multiple of jiffies and add the delta to ntp. | 276 | * Adjust the value by a multiple of jiffies and add the delta to ntp. |
278 | * "delay" is an approximation how long the synchronization took. If | 277 | * "delay" is an approximation how long the synchronization took. If |
279 | * the time correction is positive, then "delay" is subtracted from | 278 | * the time correction is positive, then "delay" is subtracted from |
280 | * the time difference and only the remaining part is passed to ntp. | 279 | * the time difference and only the remaining part is passed to ntp. |
281 | */ | 280 | */ |
282 | static unsigned long long adjust_time(unsigned long long old, | 281 | static unsigned long long adjust_time(unsigned long long old, |
283 | unsigned long long clock, | 282 | unsigned long long clock, |
284 | unsigned long long delay) | 283 | unsigned long long delay) |
285 | { | 284 | { |
286 | unsigned long long delta, ticks; | 285 | unsigned long long delta, ticks; |
287 | struct timex adjust; | 286 | struct timex adjust; |
288 | 287 | ||
289 | if (clock > old) { | 288 | if (clock > old) { |
290 | /* It is later than we thought. */ | 289 | /* It is later than we thought. */ |
291 | delta = ticks = clock - old; | 290 | delta = ticks = clock - old; |
292 | delta = ticks = (delta < delay) ? 0 : delta - delay; | 291 | delta = ticks = (delta < delay) ? 0 : delta - delay; |
293 | delta -= do_div(ticks, CLK_TICKS_PER_JIFFY); | 292 | delta -= do_div(ticks, CLK_TICKS_PER_JIFFY); |
294 | adjust.offset = ticks * (1000000 / HZ); | 293 | adjust.offset = ticks * (1000000 / HZ); |
295 | } else { | 294 | } else { |
296 | /* It is earlier than we thought. */ | 295 | /* It is earlier than we thought. */ |
297 | delta = ticks = old - clock; | 296 | delta = ticks = old - clock; |
298 | delta -= do_div(ticks, CLK_TICKS_PER_JIFFY); | 297 | delta -= do_div(ticks, CLK_TICKS_PER_JIFFY); |
299 | delta = -delta; | 298 | delta = -delta; |
300 | adjust.offset = -ticks * (1000000 / HZ); | 299 | adjust.offset = -ticks * (1000000 / HZ); |
301 | } | 300 | } |
302 | sched_clock_base_cc += delta; | 301 | sched_clock_base_cc += delta; |
303 | if (adjust.offset != 0) { | 302 | if (adjust.offset != 0) { |
304 | pr_notice("The ETR interface has adjusted the clock " | 303 | pr_notice("The ETR interface has adjusted the clock " |
305 | "by %li microseconds\n", adjust.offset); | 304 | "by %li microseconds\n", adjust.offset); |
306 | adjust.modes = ADJ_OFFSET_SINGLESHOT; | 305 | adjust.modes = ADJ_OFFSET_SINGLESHOT; |
307 | do_adjtimex(&adjust); | 306 | do_adjtimex(&adjust); |
308 | } | 307 | } |
309 | return delta; | 308 | return delta; |
310 | } | 309 | } |
311 | 310 | ||
312 | static DEFINE_PER_CPU(atomic_t, clock_sync_word); | 311 | static DEFINE_PER_CPU(atomic_t, clock_sync_word); |
313 | static DEFINE_MUTEX(clock_sync_mutex); | 312 | static DEFINE_MUTEX(clock_sync_mutex); |
314 | static unsigned long clock_sync_flags; | 313 | static unsigned long clock_sync_flags; |
315 | 314 | ||
316 | #define CLOCK_SYNC_HAS_ETR 0 | 315 | #define CLOCK_SYNC_HAS_ETR 0 |
317 | #define CLOCK_SYNC_HAS_STP 1 | 316 | #define CLOCK_SYNC_HAS_STP 1 |
318 | #define CLOCK_SYNC_ETR 2 | 317 | #define CLOCK_SYNC_ETR 2 |
319 | #define CLOCK_SYNC_STP 3 | 318 | #define CLOCK_SYNC_STP 3 |
320 | 319 | ||
321 | /* | 320 | /* |
322 | * The synchronous get_clock function. It will write the current clock | 321 | * The synchronous get_clock function. It will write the current clock |
323 | * value to the clock pointer and return 0 if the clock is in sync with | 322 | * value to the clock pointer and return 0 if the clock is in sync with |
324 | * the external time source. If the clock mode is local it will return | 323 | * the external time source. If the clock mode is local it will return |
325 | * -ENOSYS and -EAGAIN if the clock is not in sync with the external | 324 | * -ENOSYS and -EAGAIN if the clock is not in sync with the external |
326 | * reference. | 325 | * reference. |
327 | */ | 326 | */ |
328 | int get_sync_clock(unsigned long long *clock) | 327 | int get_sync_clock(unsigned long long *clock) |
329 | { | 328 | { |
330 | atomic_t *sw_ptr; | 329 | atomic_t *sw_ptr; |
331 | unsigned int sw0, sw1; | 330 | unsigned int sw0, sw1; |
332 | 331 | ||
333 | sw_ptr = &get_cpu_var(clock_sync_word); | 332 | sw_ptr = &get_cpu_var(clock_sync_word); |
334 | sw0 = atomic_read(sw_ptr); | 333 | sw0 = atomic_read(sw_ptr); |
335 | *clock = get_clock(); | 334 | *clock = get_clock(); |
336 | sw1 = atomic_read(sw_ptr); | 335 | sw1 = atomic_read(sw_ptr); |
337 | put_cpu_var(clock_sync_word); | 336 | put_cpu_var(clock_sync_word); |
338 | if (sw0 == sw1 && (sw0 & 0x80000000U)) | 337 | if (sw0 == sw1 && (sw0 & 0x80000000U)) |
339 | /* Success: time is in sync. */ | 338 | /* Success: time is in sync. */ |
340 | return 0; | 339 | return 0; |
341 | if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags) && | 340 | if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags) && |
342 | !test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags)) | 341 | !test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags)) |
343 | return -ENOSYS; | 342 | return -ENOSYS; |
344 | if (!test_bit(CLOCK_SYNC_ETR, &clock_sync_flags) && | 343 | if (!test_bit(CLOCK_SYNC_ETR, &clock_sync_flags) && |
345 | !test_bit(CLOCK_SYNC_STP, &clock_sync_flags)) | 344 | !test_bit(CLOCK_SYNC_STP, &clock_sync_flags)) |
346 | return -EACCES; | 345 | return -EACCES; |
347 | return -EAGAIN; | 346 | return -EAGAIN; |
348 | } | 347 | } |
349 | EXPORT_SYMBOL(get_sync_clock); | 348 | EXPORT_SYMBOL(get_sync_clock); |
350 | 349 | ||
351 | /* | 350 | /* |
352 | * Make get_sync_clock return -EAGAIN. | 351 | * Make get_sync_clock return -EAGAIN. |
353 | */ | 352 | */ |
354 | static void disable_sync_clock(void *dummy) | 353 | static void disable_sync_clock(void *dummy) |
355 | { | 354 | { |
356 | atomic_t *sw_ptr = &__get_cpu_var(clock_sync_word); | 355 | atomic_t *sw_ptr = &__get_cpu_var(clock_sync_word); |
357 | /* | 356 | /* |
358 | * Clear the in-sync bit 2^31. All get_sync_clock calls will | 357 | * Clear the in-sync bit 2^31. All get_sync_clock calls will |
359 | * fail until the sync bit is turned back on. In addition | 358 | * fail until the sync bit is turned back on. In addition |
360 | * increase the "sequence" counter to avoid the race of an | 359 | * increase the "sequence" counter to avoid the race of an |
361 | * etr event and the complete recovery against get_sync_clock. | 360 | * etr event and the complete recovery against get_sync_clock. |
362 | */ | 361 | */ |
363 | atomic_clear_mask(0x80000000, sw_ptr); | 362 | atomic_clear_mask(0x80000000, sw_ptr); |
364 | atomic_inc(sw_ptr); | 363 | atomic_inc(sw_ptr); |
365 | } | 364 | } |
366 | 365 | ||
367 | /* | 366 | /* |
368 | * Make get_sync_clock return 0 again. | 367 | * Make get_sync_clock return 0 again. |
369 | * Needs to be called from a context disabled for preemption. | 368 | * Needs to be called from a context disabled for preemption. |
370 | */ | 369 | */ |
371 | static void enable_sync_clock(void) | 370 | static void enable_sync_clock(void) |
372 | { | 371 | { |
373 | atomic_t *sw_ptr = &__get_cpu_var(clock_sync_word); | 372 | atomic_t *sw_ptr = &__get_cpu_var(clock_sync_word); |
374 | atomic_set_mask(0x80000000, sw_ptr); | 373 | atomic_set_mask(0x80000000, sw_ptr); |
375 | } | 374 | } |
376 | 375 | ||
377 | /* | 376 | /* |
378 | * Function to check if the clock is in sync. | 377 | * Function to check if the clock is in sync. |
379 | */ | 378 | */ |
380 | static inline int check_sync_clock(void) | 379 | static inline int check_sync_clock(void) |
381 | { | 380 | { |
382 | atomic_t *sw_ptr; | 381 | atomic_t *sw_ptr; |
383 | int rc; | 382 | int rc; |
384 | 383 | ||
385 | sw_ptr = &get_cpu_var(clock_sync_word); | 384 | sw_ptr = &get_cpu_var(clock_sync_word); |
386 | rc = (atomic_read(sw_ptr) & 0x80000000U) != 0; | 385 | rc = (atomic_read(sw_ptr) & 0x80000000U) != 0; |
387 | put_cpu_var(clock_sync_word); | 386 | put_cpu_var(clock_sync_word); |
388 | return rc; | 387 | return rc; |
389 | } | 388 | } |
390 | 389 | ||
391 | /* Single threaded workqueue used for etr and stp sync events */ | 390 | /* Single threaded workqueue used for etr and stp sync events */ |
392 | static struct workqueue_struct *time_sync_wq; | 391 | static struct workqueue_struct *time_sync_wq; |
393 | 392 | ||
394 | static void __init time_init_wq(void) | 393 | static void __init time_init_wq(void) |
395 | { | 394 | { |
396 | if (time_sync_wq) | 395 | if (time_sync_wq) |
397 | return; | 396 | return; |
398 | time_sync_wq = create_singlethread_workqueue("timesync"); | 397 | time_sync_wq = create_singlethread_workqueue("timesync"); |
399 | } | 398 | } |
400 | 399 | ||
401 | /* | 400 | /* |
402 | * External Time Reference (ETR) code. | 401 | * External Time Reference (ETR) code. |
403 | */ | 402 | */ |
404 | static int etr_port0_online; | 403 | static int etr_port0_online; |
405 | static int etr_port1_online; | 404 | static int etr_port1_online; |
406 | static int etr_steai_available; | 405 | static int etr_steai_available; |
407 | 406 | ||
408 | static int __init early_parse_etr(char *p) | 407 | static int __init early_parse_etr(char *p) |
409 | { | 408 | { |
410 | if (strncmp(p, "off", 3) == 0) | 409 | if (strncmp(p, "off", 3) == 0) |
411 | etr_port0_online = etr_port1_online = 0; | 410 | etr_port0_online = etr_port1_online = 0; |
412 | else if (strncmp(p, "port0", 5) == 0) | 411 | else if (strncmp(p, "port0", 5) == 0) |
413 | etr_port0_online = 1; | 412 | etr_port0_online = 1; |
414 | else if (strncmp(p, "port1", 5) == 0) | 413 | else if (strncmp(p, "port1", 5) == 0) |
415 | etr_port1_online = 1; | 414 | etr_port1_online = 1; |
416 | else if (strncmp(p, "on", 2) == 0) | 415 | else if (strncmp(p, "on", 2) == 0) |
417 | etr_port0_online = etr_port1_online = 1; | 416 | etr_port0_online = etr_port1_online = 1; |
418 | return 0; | 417 | return 0; |
419 | } | 418 | } |
420 | early_param("etr", early_parse_etr); | 419 | early_param("etr", early_parse_etr); |
421 | 420 | ||
422 | enum etr_event { | 421 | enum etr_event { |
423 | ETR_EVENT_PORT0_CHANGE, | 422 | ETR_EVENT_PORT0_CHANGE, |
424 | ETR_EVENT_PORT1_CHANGE, | 423 | ETR_EVENT_PORT1_CHANGE, |
425 | ETR_EVENT_PORT_ALERT, | 424 | ETR_EVENT_PORT_ALERT, |
426 | ETR_EVENT_SYNC_CHECK, | 425 | ETR_EVENT_SYNC_CHECK, |
427 | ETR_EVENT_SWITCH_LOCAL, | 426 | ETR_EVENT_SWITCH_LOCAL, |
428 | ETR_EVENT_UPDATE, | 427 | ETR_EVENT_UPDATE, |
429 | }; | 428 | }; |
430 | 429 | ||
431 | /* | 430 | /* |
432 | * Valid bit combinations of the eacr register are (x = don't care): | 431 | * Valid bit combinations of the eacr register are (x = don't care): |
433 | * e0 e1 dp p0 p1 ea es sl | 432 | * e0 e1 dp p0 p1 ea es sl |
434 | * 0 0 x 0 0 0 0 0 initial, disabled state | 433 | * 0 0 x 0 0 0 0 0 initial, disabled state |
435 | * 0 0 x 0 1 1 0 0 port 1 online | 434 | * 0 0 x 0 1 1 0 0 port 1 online |
436 | * 0 0 x 1 0 1 0 0 port 0 online | 435 | * 0 0 x 1 0 1 0 0 port 0 online |
437 | * 0 0 x 1 1 1 0 0 both ports online | 436 | * 0 0 x 1 1 1 0 0 both ports online |
438 | * 0 1 x 0 1 1 0 0 port 1 online and usable, ETR or PPS mode | 437 | * 0 1 x 0 1 1 0 0 port 1 online and usable, ETR or PPS mode |
439 | * 0 1 x 0 1 1 0 1 port 1 online, usable and ETR mode | 438 | * 0 1 x 0 1 1 0 1 port 1 online, usable and ETR mode |
440 | * 0 1 x 0 1 1 1 0 port 1 online, usable, PPS mode, in-sync | 439 | * 0 1 x 0 1 1 1 0 port 1 online, usable, PPS mode, in-sync |
441 | * 0 1 x 0 1 1 1 1 port 1 online, usable, ETR mode, in-sync | 440 | * 0 1 x 0 1 1 1 1 port 1 online, usable, ETR mode, in-sync |
442 | * 0 1 x 1 1 1 0 0 both ports online, port 1 usable | 441 | * 0 1 x 1 1 1 0 0 both ports online, port 1 usable |
443 | * 0 1 x 1 1 1 1 0 both ports online, port 1 usable, PPS mode, in-sync | 442 | * 0 1 x 1 1 1 1 0 both ports online, port 1 usable, PPS mode, in-sync |
444 | * 0 1 x 1 1 1 1 1 both ports online, port 1 usable, ETR mode, in-sync | 443 | * 0 1 x 1 1 1 1 1 both ports online, port 1 usable, ETR mode, in-sync |
445 | * 1 0 x 1 0 1 0 0 port 0 online and usable, ETR or PPS mode | 444 | * 1 0 x 1 0 1 0 0 port 0 online and usable, ETR or PPS mode |
446 | * 1 0 x 1 0 1 0 1 port 0 online, usable and ETR mode | 445 | * 1 0 x 1 0 1 0 1 port 0 online, usable and ETR mode |
447 | * 1 0 x 1 0 1 1 0 port 0 online, usable, PPS mode, in-sync | 446 | * 1 0 x 1 0 1 1 0 port 0 online, usable, PPS mode, in-sync |
448 | * 1 0 x 1 0 1 1 1 port 0 online, usable, ETR mode, in-sync | 447 | * 1 0 x 1 0 1 1 1 port 0 online, usable, ETR mode, in-sync |
449 | * 1 0 x 1 1 1 0 0 both ports online, port 0 usable | 448 | * 1 0 x 1 1 1 0 0 both ports online, port 0 usable |
450 | * 1 0 x 1 1 1 1 0 both ports online, port 0 usable, PPS mode, in-sync | 449 | * 1 0 x 1 1 1 1 0 both ports online, port 0 usable, PPS mode, in-sync |
451 | * 1 0 x 1 1 1 1 1 both ports online, port 0 usable, ETR mode, in-sync | 450 | * 1 0 x 1 1 1 1 1 both ports online, port 0 usable, ETR mode, in-sync |
452 | * 1 1 x 1 1 1 1 0 both ports online & usable, ETR, in-sync | 451 | * 1 1 x 1 1 1 1 0 both ports online & usable, ETR, in-sync |
453 | * 1 1 x 1 1 1 1 1 both ports online & usable, ETR, in-sync | 452 | * 1 1 x 1 1 1 1 1 both ports online & usable, ETR, in-sync |
454 | */ | 453 | */ |
455 | static struct etr_eacr etr_eacr; | 454 | static struct etr_eacr etr_eacr; |
456 | static u64 etr_tolec; /* time of last eacr update */ | 455 | static u64 etr_tolec; /* time of last eacr update */ |
457 | static struct etr_aib etr_port0; | 456 | static struct etr_aib etr_port0; |
458 | static int etr_port0_uptodate; | 457 | static int etr_port0_uptodate; |
459 | static struct etr_aib etr_port1; | 458 | static struct etr_aib etr_port1; |
460 | static int etr_port1_uptodate; | 459 | static int etr_port1_uptodate; |
461 | static unsigned long etr_events; | 460 | static unsigned long etr_events; |
462 | static struct timer_list etr_timer; | 461 | static struct timer_list etr_timer; |
463 | 462 | ||
464 | static void etr_timeout(unsigned long dummy); | 463 | static void etr_timeout(unsigned long dummy); |
465 | static void etr_work_fn(struct work_struct *work); | 464 | static void etr_work_fn(struct work_struct *work); |
466 | static DEFINE_MUTEX(etr_work_mutex); | 465 | static DEFINE_MUTEX(etr_work_mutex); |
467 | static DECLARE_WORK(etr_work, etr_work_fn); | 466 | static DECLARE_WORK(etr_work, etr_work_fn); |
468 | 467 | ||
469 | /* | 468 | /* |
470 | * Reset ETR attachment. | 469 | * Reset ETR attachment. |
471 | */ | 470 | */ |
472 | static void etr_reset(void) | 471 | static void etr_reset(void) |
473 | { | 472 | { |
474 | etr_eacr = (struct etr_eacr) { | 473 | etr_eacr = (struct etr_eacr) { |
475 | .e0 = 0, .e1 = 0, ._pad0 = 4, .dp = 0, | 474 | .e0 = 0, .e1 = 0, ._pad0 = 4, .dp = 0, |
476 | .p0 = 0, .p1 = 0, ._pad1 = 0, .ea = 0, | 475 | .p0 = 0, .p1 = 0, ._pad1 = 0, .ea = 0, |
477 | .es = 0, .sl = 0 }; | 476 | .es = 0, .sl = 0 }; |
478 | if (etr_setr(&etr_eacr) == 0) { | 477 | if (etr_setr(&etr_eacr) == 0) { |
479 | etr_tolec = get_clock(); | 478 | etr_tolec = get_clock(); |
480 | set_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags); | 479 | set_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags); |
481 | if (etr_port0_online && etr_port1_online) | 480 | if (etr_port0_online && etr_port1_online) |
482 | set_bit(CLOCK_SYNC_ETR, &clock_sync_flags); | 481 | set_bit(CLOCK_SYNC_ETR, &clock_sync_flags); |
483 | } else if (etr_port0_online || etr_port1_online) { | 482 | } else if (etr_port0_online || etr_port1_online) { |
484 | pr_warning("The real or virtual hardware system does " | 483 | pr_warning("The real or virtual hardware system does " |
485 | "not provide an ETR interface\n"); | 484 | "not provide an ETR interface\n"); |
486 | etr_port0_online = etr_port1_online = 0; | 485 | etr_port0_online = etr_port1_online = 0; |
487 | } | 486 | } |
488 | } | 487 | } |
489 | 488 | ||
490 | static int __init etr_init(void) | 489 | static int __init etr_init(void) |
491 | { | 490 | { |
492 | struct etr_aib aib; | 491 | struct etr_aib aib; |
493 | 492 | ||
494 | if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags)) | 493 | if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags)) |
495 | return 0; | 494 | return 0; |
496 | time_init_wq(); | 495 | time_init_wq(); |
497 | /* Check if this machine has the steai instruction. */ | 496 | /* Check if this machine has the steai instruction. */ |
498 | if (etr_steai(&aib, ETR_STEAI_STEPPING_PORT) == 0) | 497 | if (etr_steai(&aib, ETR_STEAI_STEPPING_PORT) == 0) |
499 | etr_steai_available = 1; | 498 | etr_steai_available = 1; |
500 | setup_timer(&etr_timer, etr_timeout, 0UL); | 499 | setup_timer(&etr_timer, etr_timeout, 0UL); |
501 | if (etr_port0_online) { | 500 | if (etr_port0_online) { |
502 | set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); | 501 | set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); |
503 | queue_work(time_sync_wq, &etr_work); | 502 | queue_work(time_sync_wq, &etr_work); |
504 | } | 503 | } |
505 | if (etr_port1_online) { | 504 | if (etr_port1_online) { |
506 | set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events); | 505 | set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events); |
507 | queue_work(time_sync_wq, &etr_work); | 506 | queue_work(time_sync_wq, &etr_work); |
508 | } | 507 | } |
509 | return 0; | 508 | return 0; |
510 | } | 509 | } |
511 | 510 | ||
512 | arch_initcall(etr_init); | 511 | arch_initcall(etr_init); |
513 | 512 | ||
514 | /* | 513 | /* |
515 | * Two sorts of ETR machine checks. The architecture reads: | 514 | * Two sorts of ETR machine checks. The architecture reads: |
516 | * "When a machine-check niterruption occurs and if a switch-to-local or | 515 | * "When a machine-check niterruption occurs and if a switch-to-local or |
517 | * ETR-sync-check interrupt request is pending but disabled, this pending | 516 | * ETR-sync-check interrupt request is pending but disabled, this pending |
518 | * disabled interruption request is indicated and is cleared". | 517 | * disabled interruption request is indicated and is cleared". |
519 | * Which means that we can get etr_switch_to_local events from the machine | 518 | * Which means that we can get etr_switch_to_local events from the machine |
520 | * check handler although the interruption condition is disabled. Lovely.. | 519 | * check handler although the interruption condition is disabled. Lovely.. |
521 | */ | 520 | */ |
522 | 521 | ||
523 | /* | 522 | /* |
524 | * Switch to local machine check. This is called when the last usable | 523 | * Switch to local machine check. This is called when the last usable |
525 | * ETR port goes inactive. After switch to local the clock is not in sync. | 524 | * ETR port goes inactive. After switch to local the clock is not in sync. |
526 | */ | 525 | */ |
527 | void etr_switch_to_local(void) | 526 | void etr_switch_to_local(void) |
528 | { | 527 | { |
529 | if (!etr_eacr.sl) | 528 | if (!etr_eacr.sl) |
530 | return; | 529 | return; |
531 | disable_sync_clock(NULL); | 530 | disable_sync_clock(NULL); |
532 | if (!test_and_set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events)) { | 531 | if (!test_and_set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events)) { |
533 | etr_eacr.es = etr_eacr.sl = 0; | 532 | etr_eacr.es = etr_eacr.sl = 0; |
534 | etr_setr(&etr_eacr); | 533 | etr_setr(&etr_eacr); |
535 | queue_work(time_sync_wq, &etr_work); | 534 | queue_work(time_sync_wq, &etr_work); |
536 | } | 535 | } |
537 | } | 536 | } |
538 | 537 | ||
539 | /* | 538 | /* |
540 | * ETR sync check machine check. This is called when the ETR OTE and the | 539 | * ETR sync check machine check. This is called when the ETR OTE and the |
541 | * local clock OTE are farther apart than the ETR sync check tolerance. | 540 | * local clock OTE are farther apart than the ETR sync check tolerance. |
542 | * After a ETR sync check the clock is not in sync. The machine check | 541 | * After a ETR sync check the clock is not in sync. The machine check |
543 | * is broadcasted to all cpus at the same time. | 542 | * is broadcasted to all cpus at the same time. |
544 | */ | 543 | */ |
545 | void etr_sync_check(void) | 544 | void etr_sync_check(void) |
546 | { | 545 | { |
547 | if (!etr_eacr.es) | 546 | if (!etr_eacr.es) |
548 | return; | 547 | return; |
549 | disable_sync_clock(NULL); | 548 | disable_sync_clock(NULL); |
550 | if (!test_and_set_bit(ETR_EVENT_SYNC_CHECK, &etr_events)) { | 549 | if (!test_and_set_bit(ETR_EVENT_SYNC_CHECK, &etr_events)) { |
551 | etr_eacr.es = 0; | 550 | etr_eacr.es = 0; |
552 | etr_setr(&etr_eacr); | 551 | etr_setr(&etr_eacr); |
553 | queue_work(time_sync_wq, &etr_work); | 552 | queue_work(time_sync_wq, &etr_work); |
554 | } | 553 | } |
555 | } | 554 | } |
556 | 555 | ||
557 | /* | 556 | /* |
558 | * ETR timing alert. There are two causes: | 557 | * ETR timing alert. There are two causes: |
559 | * 1) port state change, check the usability of the port | 558 | * 1) port state change, check the usability of the port |
560 | * 2) port alert, one of the ETR-data-validity bits (v1-v2 bits of the | 559 | * 2) port alert, one of the ETR-data-validity bits (v1-v2 bits of the |
561 | * sldr-status word) or ETR-data word 1 (edf1) or ETR-data word 3 (edf3) | 560 | * sldr-status word) or ETR-data word 1 (edf1) or ETR-data word 3 (edf3) |
562 | * or ETR-data word 4 (edf4) has changed. | 561 | * or ETR-data word 4 (edf4) has changed. |
563 | */ | 562 | */ |
564 | static void etr_timing_alert(struct etr_irq_parm *intparm) | 563 | static void etr_timing_alert(struct etr_irq_parm *intparm) |
565 | { | 564 | { |
566 | if (intparm->pc0) | 565 | if (intparm->pc0) |
567 | /* ETR port 0 state change. */ | 566 | /* ETR port 0 state change. */ |
568 | set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); | 567 | set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); |
569 | if (intparm->pc1) | 568 | if (intparm->pc1) |
570 | /* ETR port 1 state change. */ | 569 | /* ETR port 1 state change. */ |
571 | set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events); | 570 | set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events); |
572 | if (intparm->eai) | 571 | if (intparm->eai) |
573 | /* | 572 | /* |
574 | * ETR port alert on either port 0, 1 or both. | 573 | * ETR port alert on either port 0, 1 or both. |
575 | * Both ports are not up-to-date now. | 574 | * Both ports are not up-to-date now. |
576 | */ | 575 | */ |
577 | set_bit(ETR_EVENT_PORT_ALERT, &etr_events); | 576 | set_bit(ETR_EVENT_PORT_ALERT, &etr_events); |
578 | queue_work(time_sync_wq, &etr_work); | 577 | queue_work(time_sync_wq, &etr_work); |
579 | } | 578 | } |
580 | 579 | ||
581 | static void etr_timeout(unsigned long dummy) | 580 | static void etr_timeout(unsigned long dummy) |
582 | { | 581 | { |
583 | set_bit(ETR_EVENT_UPDATE, &etr_events); | 582 | set_bit(ETR_EVENT_UPDATE, &etr_events); |
584 | queue_work(time_sync_wq, &etr_work); | 583 | queue_work(time_sync_wq, &etr_work); |
585 | } | 584 | } |
586 | 585 | ||
587 | /* | 586 | /* |
588 | * Check if the etr mode is pss. | 587 | * Check if the etr mode is pss. |
589 | */ | 588 | */ |
590 | static inline int etr_mode_is_pps(struct etr_eacr eacr) | 589 | static inline int etr_mode_is_pps(struct etr_eacr eacr) |
591 | { | 590 | { |
592 | return eacr.es && !eacr.sl; | 591 | return eacr.es && !eacr.sl; |
593 | } | 592 | } |
594 | 593 | ||
595 | /* | 594 | /* |
596 | * Check if the etr mode is etr. | 595 | * Check if the etr mode is etr. |
597 | */ | 596 | */ |
598 | static inline int etr_mode_is_etr(struct etr_eacr eacr) | 597 | static inline int etr_mode_is_etr(struct etr_eacr eacr) |
599 | { | 598 | { |
600 | return eacr.es && eacr.sl; | 599 | return eacr.es && eacr.sl; |
601 | } | 600 | } |
602 | 601 | ||
603 | /* | 602 | /* |
604 | * Check if the port can be used for TOD synchronization. | 603 | * Check if the port can be used for TOD synchronization. |
605 | * For PPS mode the port has to receive OTEs. For ETR mode | 604 | * For PPS mode the port has to receive OTEs. For ETR mode |
606 | * the port has to receive OTEs, the ETR stepping bit has to | 605 | * the port has to receive OTEs, the ETR stepping bit has to |
607 | * be zero and the validity bits for data frame 1, 2, and 3 | 606 | * be zero and the validity bits for data frame 1, 2, and 3 |
608 | * have to be 1. | 607 | * have to be 1. |
609 | */ | 608 | */ |
610 | static int etr_port_valid(struct etr_aib *aib, int port) | 609 | static int etr_port_valid(struct etr_aib *aib, int port) |
611 | { | 610 | { |
612 | unsigned int psc; | 611 | unsigned int psc; |
613 | 612 | ||
614 | /* Check that this port is receiving OTEs. */ | 613 | /* Check that this port is receiving OTEs. */ |
615 | if (aib->tsp == 0) | 614 | if (aib->tsp == 0) |
616 | return 0; | 615 | return 0; |
617 | 616 | ||
618 | psc = port ? aib->esw.psc1 : aib->esw.psc0; | 617 | psc = port ? aib->esw.psc1 : aib->esw.psc0; |
619 | if (psc == etr_lpsc_pps_mode) | 618 | if (psc == etr_lpsc_pps_mode) |
620 | return 1; | 619 | return 1; |
621 | if (psc == etr_lpsc_operational_step) | 620 | if (psc == etr_lpsc_operational_step) |
622 | return !aib->esw.y && aib->slsw.v1 && | 621 | return !aib->esw.y && aib->slsw.v1 && |
623 | aib->slsw.v2 && aib->slsw.v3; | 622 | aib->slsw.v2 && aib->slsw.v3; |
624 | return 0; | 623 | return 0; |
625 | } | 624 | } |
626 | 625 | ||
627 | /* | 626 | /* |
628 | * Check if two ports are on the same network. | 627 | * Check if two ports are on the same network. |
629 | */ | 628 | */ |
630 | static int etr_compare_network(struct etr_aib *aib1, struct etr_aib *aib2) | 629 | static int etr_compare_network(struct etr_aib *aib1, struct etr_aib *aib2) |
631 | { | 630 | { |
632 | // FIXME: any other fields we have to compare? | 631 | // FIXME: any other fields we have to compare? |
633 | return aib1->edf1.net_id == aib2->edf1.net_id; | 632 | return aib1->edf1.net_id == aib2->edf1.net_id; |
634 | } | 633 | } |
635 | 634 | ||
636 | /* | 635 | /* |
637 | * Wrapper for etr_stei that converts physical port states | 636 | * Wrapper for etr_stei that converts physical port states |
638 | * to logical port states to be consistent with the output | 637 | * to logical port states to be consistent with the output |
639 | * of stetr (see etr_psc vs. etr_lpsc). | 638 | * of stetr (see etr_psc vs. etr_lpsc). |
640 | */ | 639 | */ |
641 | static void etr_steai_cv(struct etr_aib *aib, unsigned int func) | 640 | static void etr_steai_cv(struct etr_aib *aib, unsigned int func) |
642 | { | 641 | { |
643 | BUG_ON(etr_steai(aib, func) != 0); | 642 | BUG_ON(etr_steai(aib, func) != 0); |
644 | /* Convert port state to logical port state. */ | 643 | /* Convert port state to logical port state. */ |
645 | if (aib->esw.psc0 == 1) | 644 | if (aib->esw.psc0 == 1) |
646 | aib->esw.psc0 = 2; | 645 | aib->esw.psc0 = 2; |
647 | else if (aib->esw.psc0 == 0 && aib->esw.p == 0) | 646 | else if (aib->esw.psc0 == 0 && aib->esw.p == 0) |
648 | aib->esw.psc0 = 1; | 647 | aib->esw.psc0 = 1; |
649 | if (aib->esw.psc1 == 1) | 648 | if (aib->esw.psc1 == 1) |
650 | aib->esw.psc1 = 2; | 649 | aib->esw.psc1 = 2; |
651 | else if (aib->esw.psc1 == 0 && aib->esw.p == 1) | 650 | else if (aib->esw.psc1 == 0 && aib->esw.p == 1) |
652 | aib->esw.psc1 = 1; | 651 | aib->esw.psc1 = 1; |
653 | } | 652 | } |
654 | 653 | ||
655 | /* | 654 | /* |
656 | * Check if the aib a2 is still connected to the same attachment as | 655 | * Check if the aib a2 is still connected to the same attachment as |
657 | * aib a1, the etv values differ by one and a2 is valid. | 656 | * aib a1, the etv values differ by one and a2 is valid. |
658 | */ | 657 | */ |
659 | static int etr_aib_follows(struct etr_aib *a1, struct etr_aib *a2, int p) | 658 | static int etr_aib_follows(struct etr_aib *a1, struct etr_aib *a2, int p) |
660 | { | 659 | { |
661 | int state_a1, state_a2; | 660 | int state_a1, state_a2; |
662 | 661 | ||
663 | /* Paranoia check: e0/e1 should better be the same. */ | 662 | /* Paranoia check: e0/e1 should better be the same. */ |
664 | if (a1->esw.eacr.e0 != a2->esw.eacr.e0 || | 663 | if (a1->esw.eacr.e0 != a2->esw.eacr.e0 || |
665 | a1->esw.eacr.e1 != a2->esw.eacr.e1) | 664 | a1->esw.eacr.e1 != a2->esw.eacr.e1) |
666 | return 0; | 665 | return 0; |
667 | 666 | ||
668 | /* Still connected to the same etr ? */ | 667 | /* Still connected to the same etr ? */ |
669 | state_a1 = p ? a1->esw.psc1 : a1->esw.psc0; | 668 | state_a1 = p ? a1->esw.psc1 : a1->esw.psc0; |
670 | state_a2 = p ? a2->esw.psc1 : a2->esw.psc0; | 669 | state_a2 = p ? a2->esw.psc1 : a2->esw.psc0; |
671 | if (state_a1 == etr_lpsc_operational_step) { | 670 | if (state_a1 == etr_lpsc_operational_step) { |
672 | if (state_a2 != etr_lpsc_operational_step || | 671 | if (state_a2 != etr_lpsc_operational_step || |
673 | a1->edf1.net_id != a2->edf1.net_id || | 672 | a1->edf1.net_id != a2->edf1.net_id || |
674 | a1->edf1.etr_id != a2->edf1.etr_id || | 673 | a1->edf1.etr_id != a2->edf1.etr_id || |
675 | a1->edf1.etr_pn != a2->edf1.etr_pn) | 674 | a1->edf1.etr_pn != a2->edf1.etr_pn) |
676 | return 0; | 675 | return 0; |
677 | } else if (state_a2 != etr_lpsc_pps_mode) | 676 | } else if (state_a2 != etr_lpsc_pps_mode) |
678 | return 0; | 677 | return 0; |
679 | 678 | ||
680 | /* The ETV value of a2 needs to be ETV of a1 + 1. */ | 679 | /* The ETV value of a2 needs to be ETV of a1 + 1. */ |
681 | if (a1->edf2.etv + 1 != a2->edf2.etv) | 680 | if (a1->edf2.etv + 1 != a2->edf2.etv) |
682 | return 0; | 681 | return 0; |
683 | 682 | ||
684 | if (!etr_port_valid(a2, p)) | 683 | if (!etr_port_valid(a2, p)) |
685 | return 0; | 684 | return 0; |
686 | 685 | ||
687 | return 1; | 686 | return 1; |
688 | } | 687 | } |
689 | 688 | ||
690 | struct clock_sync_data { | 689 | struct clock_sync_data { |
691 | atomic_t cpus; | 690 | atomic_t cpus; |
692 | int in_sync; | 691 | int in_sync; |
693 | unsigned long long fixup_cc; | 692 | unsigned long long fixup_cc; |
694 | int etr_port; | 693 | int etr_port; |
695 | struct etr_aib *etr_aib; | 694 | struct etr_aib *etr_aib; |
696 | }; | 695 | }; |
697 | 696 | ||
698 | static void clock_sync_cpu(struct clock_sync_data *sync) | 697 | static void clock_sync_cpu(struct clock_sync_data *sync) |
699 | { | 698 | { |
700 | atomic_dec(&sync->cpus); | 699 | atomic_dec(&sync->cpus); |
701 | enable_sync_clock(); | 700 | enable_sync_clock(); |
702 | /* | 701 | /* |
703 | * This looks like a busy wait loop but it isn't. etr_sync_cpus | 702 | * This looks like a busy wait loop but it isn't. etr_sync_cpus |
704 | * is called on all other cpus while the TOD clocks is stopped. | 703 | * is called on all other cpus while the TOD clocks is stopped. |
705 | * __udelay will stop the cpu on an enabled wait psw until the | 704 | * __udelay will stop the cpu on an enabled wait psw until the |
706 | * TOD is running again. | 705 | * TOD is running again. |
707 | */ | 706 | */ |
708 | while (sync->in_sync == 0) { | 707 | while (sync->in_sync == 0) { |
709 | __udelay(1); | 708 | __udelay(1); |
710 | /* | 709 | /* |
711 | * A different cpu changes *in_sync. Therefore use | 710 | * A different cpu changes *in_sync. Therefore use |
712 | * barrier() to force memory access. | 711 | * barrier() to force memory access. |
713 | */ | 712 | */ |
714 | barrier(); | 713 | barrier(); |
715 | } | 714 | } |
716 | if (sync->in_sync != 1) | 715 | if (sync->in_sync != 1) |
717 | /* Didn't work. Clear per-cpu in sync bit again. */ | 716 | /* Didn't work. Clear per-cpu in sync bit again. */ |
718 | disable_sync_clock(NULL); | 717 | disable_sync_clock(NULL); |
719 | /* | 718 | /* |
720 | * This round of TOD syncing is done. Set the clock comparator | 719 | * This round of TOD syncing is done. Set the clock comparator |
721 | * to the next tick and let the processor continue. | 720 | * to the next tick and let the processor continue. |
722 | */ | 721 | */ |
723 | fixup_clock_comparator(sync->fixup_cc); | 722 | fixup_clock_comparator(sync->fixup_cc); |
724 | } | 723 | } |
725 | 724 | ||
726 | /* | 725 | /* |
727 | * Sync the TOD clock using the port referred to by aibp. This port | 726 | * Sync the TOD clock using the port referred to by aibp. This port |
728 | * has to be enabled and the other port has to be disabled. The | 727 | * has to be enabled and the other port has to be disabled. The |
729 | * last eacr update has to be more than 1.6 seconds in the past. | 728 | * last eacr update has to be more than 1.6 seconds in the past. |
730 | */ | 729 | */ |
731 | static int etr_sync_clock(void *data) | 730 | static int etr_sync_clock(void *data) |
732 | { | 731 | { |
733 | static int first; | 732 | static int first; |
734 | unsigned long long clock, old_clock, delay, delta; | 733 | unsigned long long clock, old_clock, delay, delta; |
735 | struct clock_sync_data *etr_sync; | 734 | struct clock_sync_data *etr_sync; |
736 | struct etr_aib *sync_port, *aib; | 735 | struct etr_aib *sync_port, *aib; |
737 | int port; | 736 | int port; |
738 | int rc; | 737 | int rc; |
739 | 738 | ||
740 | etr_sync = data; | 739 | etr_sync = data; |
741 | 740 | ||
742 | if (xchg(&first, 1) == 1) { | 741 | if (xchg(&first, 1) == 1) { |
743 | /* Slave */ | 742 | /* Slave */ |
744 | clock_sync_cpu(etr_sync); | 743 | clock_sync_cpu(etr_sync); |
745 | return 0; | 744 | return 0; |
746 | } | 745 | } |
747 | 746 | ||
748 | /* Wait until all other cpus entered the sync function. */ | 747 | /* Wait until all other cpus entered the sync function. */ |
749 | while (atomic_read(&etr_sync->cpus) != 0) | 748 | while (atomic_read(&etr_sync->cpus) != 0) |
750 | cpu_relax(); | 749 | cpu_relax(); |
751 | 750 | ||
752 | port = etr_sync->etr_port; | 751 | port = etr_sync->etr_port; |
753 | aib = etr_sync->etr_aib; | 752 | aib = etr_sync->etr_aib; |
754 | sync_port = (port == 0) ? &etr_port0 : &etr_port1; | 753 | sync_port = (port == 0) ? &etr_port0 : &etr_port1; |
755 | enable_sync_clock(); | 754 | enable_sync_clock(); |
756 | 755 | ||
757 | /* Set clock to next OTE. */ | 756 | /* Set clock to next OTE. */ |
758 | __ctl_set_bit(14, 21); | 757 | __ctl_set_bit(14, 21); |
759 | __ctl_set_bit(0, 29); | 758 | __ctl_set_bit(0, 29); |
760 | clock = ((unsigned long long) (aib->edf2.etv + 1)) << 32; | 759 | clock = ((unsigned long long) (aib->edf2.etv + 1)) << 32; |
761 | old_clock = get_clock(); | 760 | old_clock = get_clock(); |
762 | if (set_clock(clock) == 0) { | 761 | if (set_clock(clock) == 0) { |
763 | __udelay(1); /* Wait for the clock to start. */ | 762 | __udelay(1); /* Wait for the clock to start. */ |
764 | __ctl_clear_bit(0, 29); | 763 | __ctl_clear_bit(0, 29); |
765 | __ctl_clear_bit(14, 21); | 764 | __ctl_clear_bit(14, 21); |
766 | etr_stetr(aib); | 765 | etr_stetr(aib); |
767 | /* Adjust Linux timing variables. */ | 766 | /* Adjust Linux timing variables. */ |
768 | delay = (unsigned long long) | 767 | delay = (unsigned long long) |
769 | (aib->edf2.etv - sync_port->edf2.etv) << 32; | 768 | (aib->edf2.etv - sync_port->edf2.etv) << 32; |
770 | delta = adjust_time(old_clock, clock, delay); | 769 | delta = adjust_time(old_clock, clock, delay); |
771 | etr_sync->fixup_cc = delta; | 770 | etr_sync->fixup_cc = delta; |
772 | fixup_clock_comparator(delta); | 771 | fixup_clock_comparator(delta); |
773 | /* Verify that the clock is properly set. */ | 772 | /* Verify that the clock is properly set. */ |
774 | if (!etr_aib_follows(sync_port, aib, port)) { | 773 | if (!etr_aib_follows(sync_port, aib, port)) { |
775 | /* Didn't work. */ | 774 | /* Didn't work. */ |
776 | disable_sync_clock(NULL); | 775 | disable_sync_clock(NULL); |
777 | etr_sync->in_sync = -EAGAIN; | 776 | etr_sync->in_sync = -EAGAIN; |
778 | rc = -EAGAIN; | 777 | rc = -EAGAIN; |
779 | } else { | 778 | } else { |
780 | etr_sync->in_sync = 1; | 779 | etr_sync->in_sync = 1; |
781 | rc = 0; | 780 | rc = 0; |
782 | } | 781 | } |
783 | } else { | 782 | } else { |
784 | /* Could not set the clock ?!? */ | 783 | /* Could not set the clock ?!? */ |
785 | __ctl_clear_bit(0, 29); | 784 | __ctl_clear_bit(0, 29); |
786 | __ctl_clear_bit(14, 21); | 785 | __ctl_clear_bit(14, 21); |
787 | disable_sync_clock(NULL); | 786 | disable_sync_clock(NULL); |
788 | etr_sync->in_sync = -EAGAIN; | 787 | etr_sync->in_sync = -EAGAIN; |
789 | rc = -EAGAIN; | 788 | rc = -EAGAIN; |
790 | } | 789 | } |
791 | xchg(&first, 0); | 790 | xchg(&first, 0); |
792 | return rc; | 791 | return rc; |
793 | } | 792 | } |
794 | 793 | ||
795 | static int etr_sync_clock_stop(struct etr_aib *aib, int port) | 794 | static int etr_sync_clock_stop(struct etr_aib *aib, int port) |
796 | { | 795 | { |
797 | struct clock_sync_data etr_sync; | 796 | struct clock_sync_data etr_sync; |
798 | struct etr_aib *sync_port; | 797 | struct etr_aib *sync_port; |
799 | int follows; | 798 | int follows; |
800 | int rc; | 799 | int rc; |
801 | 800 | ||
802 | /* Check if the current aib is adjacent to the sync port aib. */ | 801 | /* Check if the current aib is adjacent to the sync port aib. */ |
803 | sync_port = (port == 0) ? &etr_port0 : &etr_port1; | 802 | sync_port = (port == 0) ? &etr_port0 : &etr_port1; |
804 | follows = etr_aib_follows(sync_port, aib, port); | 803 | follows = etr_aib_follows(sync_port, aib, port); |
805 | memcpy(sync_port, aib, sizeof(*aib)); | 804 | memcpy(sync_port, aib, sizeof(*aib)); |
806 | if (!follows) | 805 | if (!follows) |
807 | return -EAGAIN; | 806 | return -EAGAIN; |
808 | memset(&etr_sync, 0, sizeof(etr_sync)); | 807 | memset(&etr_sync, 0, sizeof(etr_sync)); |
809 | etr_sync.etr_aib = aib; | 808 | etr_sync.etr_aib = aib; |
810 | etr_sync.etr_port = port; | 809 | etr_sync.etr_port = port; |
811 | get_online_cpus(); | 810 | get_online_cpus(); |
812 | atomic_set(&etr_sync.cpus, num_online_cpus() - 1); | 811 | atomic_set(&etr_sync.cpus, num_online_cpus() - 1); |
813 | rc = stop_machine(etr_sync_clock, &etr_sync, cpu_online_mask); | 812 | rc = stop_machine(etr_sync_clock, &etr_sync, cpu_online_mask); |
814 | put_online_cpus(); | 813 | put_online_cpus(); |
815 | return rc; | 814 | return rc; |
816 | } | 815 | } |
817 | 816 | ||
818 | /* | 817 | /* |
819 | * Handle the immediate effects of the different events. | 818 | * Handle the immediate effects of the different events. |
820 | * The port change event is used for online/offline changes. | 819 | * The port change event is used for online/offline changes. |
821 | */ | 820 | */ |
822 | static struct etr_eacr etr_handle_events(struct etr_eacr eacr) | 821 | static struct etr_eacr etr_handle_events(struct etr_eacr eacr) |
823 | { | 822 | { |
824 | if (test_and_clear_bit(ETR_EVENT_SYNC_CHECK, &etr_events)) | 823 | if (test_and_clear_bit(ETR_EVENT_SYNC_CHECK, &etr_events)) |
825 | eacr.es = 0; | 824 | eacr.es = 0; |
826 | if (test_and_clear_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events)) | 825 | if (test_and_clear_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events)) |
827 | eacr.es = eacr.sl = 0; | 826 | eacr.es = eacr.sl = 0; |
828 | if (test_and_clear_bit(ETR_EVENT_PORT_ALERT, &etr_events)) | 827 | if (test_and_clear_bit(ETR_EVENT_PORT_ALERT, &etr_events)) |
829 | etr_port0_uptodate = etr_port1_uptodate = 0; | 828 | etr_port0_uptodate = etr_port1_uptodate = 0; |
830 | 829 | ||
831 | if (test_and_clear_bit(ETR_EVENT_PORT0_CHANGE, &etr_events)) { | 830 | if (test_and_clear_bit(ETR_EVENT_PORT0_CHANGE, &etr_events)) { |
832 | if (eacr.e0) | 831 | if (eacr.e0) |
833 | /* | 832 | /* |
834 | * Port change of an enabled port. We have to | 833 | * Port change of an enabled port. We have to |
835 | * assume that this can have caused an stepping | 834 | * assume that this can have caused an stepping |
836 | * port switch. | 835 | * port switch. |
837 | */ | 836 | */ |
838 | etr_tolec = get_clock(); | 837 | etr_tolec = get_clock(); |
839 | eacr.p0 = etr_port0_online; | 838 | eacr.p0 = etr_port0_online; |
840 | if (!eacr.p0) | 839 | if (!eacr.p0) |
841 | eacr.e0 = 0; | 840 | eacr.e0 = 0; |
842 | etr_port0_uptodate = 0; | 841 | etr_port0_uptodate = 0; |
843 | } | 842 | } |
844 | if (test_and_clear_bit(ETR_EVENT_PORT1_CHANGE, &etr_events)) { | 843 | if (test_and_clear_bit(ETR_EVENT_PORT1_CHANGE, &etr_events)) { |
845 | if (eacr.e1) | 844 | if (eacr.e1) |
846 | /* | 845 | /* |
847 | * Port change of an enabled port. We have to | 846 | * Port change of an enabled port. We have to |
848 | * assume that this can have caused an stepping | 847 | * assume that this can have caused an stepping |
849 | * port switch. | 848 | * port switch. |
850 | */ | 849 | */ |
851 | etr_tolec = get_clock(); | 850 | etr_tolec = get_clock(); |
852 | eacr.p1 = etr_port1_online; | 851 | eacr.p1 = etr_port1_online; |
853 | if (!eacr.p1) | 852 | if (!eacr.p1) |
854 | eacr.e1 = 0; | 853 | eacr.e1 = 0; |
855 | etr_port1_uptodate = 0; | 854 | etr_port1_uptodate = 0; |
856 | } | 855 | } |
857 | clear_bit(ETR_EVENT_UPDATE, &etr_events); | 856 | clear_bit(ETR_EVENT_UPDATE, &etr_events); |
858 | return eacr; | 857 | return eacr; |
859 | } | 858 | } |
860 | 859 | ||
861 | /* | 860 | /* |
862 | * Set up a timer that expires after the etr_tolec + 1.6 seconds if | 861 | * Set up a timer that expires after the etr_tolec + 1.6 seconds if |
863 | * one of the ports needs an update. | 862 | * one of the ports needs an update. |
864 | */ | 863 | */ |
865 | static void etr_set_tolec_timeout(unsigned long long now) | 864 | static void etr_set_tolec_timeout(unsigned long long now) |
866 | { | 865 | { |
867 | unsigned long micros; | 866 | unsigned long micros; |
868 | 867 | ||
869 | if ((!etr_eacr.p0 || etr_port0_uptodate) && | 868 | if ((!etr_eacr.p0 || etr_port0_uptodate) && |
870 | (!etr_eacr.p1 || etr_port1_uptodate)) | 869 | (!etr_eacr.p1 || etr_port1_uptodate)) |
871 | return; | 870 | return; |
872 | micros = (now > etr_tolec) ? ((now - etr_tolec) >> 12) : 0; | 871 | micros = (now > etr_tolec) ? ((now - etr_tolec) >> 12) : 0; |
873 | micros = (micros > 1600000) ? 0 : 1600000 - micros; | 872 | micros = (micros > 1600000) ? 0 : 1600000 - micros; |
874 | mod_timer(&etr_timer, jiffies + (micros * HZ) / 1000000 + 1); | 873 | mod_timer(&etr_timer, jiffies + (micros * HZ) / 1000000 + 1); |
875 | } | 874 | } |
876 | 875 | ||
877 | /* | 876 | /* |
878 | * Set up a time that expires after 1/2 second. | 877 | * Set up a time that expires after 1/2 second. |
879 | */ | 878 | */ |
880 | static void etr_set_sync_timeout(void) | 879 | static void etr_set_sync_timeout(void) |
881 | { | 880 | { |
882 | mod_timer(&etr_timer, jiffies + HZ/2); | 881 | mod_timer(&etr_timer, jiffies + HZ/2); |
883 | } | 882 | } |
884 | 883 | ||
885 | /* | 884 | /* |
886 | * Update the aib information for one or both ports. | 885 | * Update the aib information for one or both ports. |
887 | */ | 886 | */ |
888 | static struct etr_eacr etr_handle_update(struct etr_aib *aib, | 887 | static struct etr_eacr etr_handle_update(struct etr_aib *aib, |
889 | struct etr_eacr eacr) | 888 | struct etr_eacr eacr) |
890 | { | 889 | { |
891 | /* With both ports disabled the aib information is useless. */ | 890 | /* With both ports disabled the aib information is useless. */ |
892 | if (!eacr.e0 && !eacr.e1) | 891 | if (!eacr.e0 && !eacr.e1) |
893 | return eacr; | 892 | return eacr; |
894 | 893 | ||
895 | /* Update port0 or port1 with aib stored in etr_work_fn. */ | 894 | /* Update port0 or port1 with aib stored in etr_work_fn. */ |
896 | if (aib->esw.q == 0) { | 895 | if (aib->esw.q == 0) { |
897 | /* Information for port 0 stored. */ | 896 | /* Information for port 0 stored. */ |
898 | if (eacr.p0 && !etr_port0_uptodate) { | 897 | if (eacr.p0 && !etr_port0_uptodate) { |
899 | etr_port0 = *aib; | 898 | etr_port0 = *aib; |
900 | if (etr_port0_online) | 899 | if (etr_port0_online) |
901 | etr_port0_uptodate = 1; | 900 | etr_port0_uptodate = 1; |
902 | } | 901 | } |
903 | } else { | 902 | } else { |
904 | /* Information for port 1 stored. */ | 903 | /* Information for port 1 stored. */ |
905 | if (eacr.p1 && !etr_port1_uptodate) { | 904 | if (eacr.p1 && !etr_port1_uptodate) { |
906 | etr_port1 = *aib; | 905 | etr_port1 = *aib; |
907 | if (etr_port0_online) | 906 | if (etr_port0_online) |
908 | etr_port1_uptodate = 1; | 907 | etr_port1_uptodate = 1; |
909 | } | 908 | } |
910 | } | 909 | } |
911 | 910 | ||
912 | /* | 911 | /* |
913 | * Do not try to get the alternate port aib if the clock | 912 | * Do not try to get the alternate port aib if the clock |
914 | * is not in sync yet. | 913 | * is not in sync yet. |
915 | */ | 914 | */ |
916 | if (!eacr.es || !check_sync_clock()) | 915 | if (!eacr.es || !check_sync_clock()) |
917 | return eacr; | 916 | return eacr; |
918 | 917 | ||
919 | /* | 918 | /* |
920 | * If steai is available we can get the information about | 919 | * If steai is available we can get the information about |
921 | * the other port immediately. If only stetr is available the | 920 | * the other port immediately. If only stetr is available the |
922 | * data-port bit toggle has to be used. | 921 | * data-port bit toggle has to be used. |
923 | */ | 922 | */ |
924 | if (etr_steai_available) { | 923 | if (etr_steai_available) { |
925 | if (eacr.p0 && !etr_port0_uptodate) { | 924 | if (eacr.p0 && !etr_port0_uptodate) { |
926 | etr_steai_cv(&etr_port0, ETR_STEAI_PORT_0); | 925 | etr_steai_cv(&etr_port0, ETR_STEAI_PORT_0); |
927 | etr_port0_uptodate = 1; | 926 | etr_port0_uptodate = 1; |
928 | } | 927 | } |
929 | if (eacr.p1 && !etr_port1_uptodate) { | 928 | if (eacr.p1 && !etr_port1_uptodate) { |
930 | etr_steai_cv(&etr_port1, ETR_STEAI_PORT_1); | 929 | etr_steai_cv(&etr_port1, ETR_STEAI_PORT_1); |
931 | etr_port1_uptodate = 1; | 930 | etr_port1_uptodate = 1; |
932 | } | 931 | } |
933 | } else { | 932 | } else { |
934 | /* | 933 | /* |
935 | * One port was updated above, if the other | 934 | * One port was updated above, if the other |
936 | * port is not uptodate toggle dp bit. | 935 | * port is not uptodate toggle dp bit. |
937 | */ | 936 | */ |
938 | if ((eacr.p0 && !etr_port0_uptodate) || | 937 | if ((eacr.p0 && !etr_port0_uptodate) || |
939 | (eacr.p1 && !etr_port1_uptodate)) | 938 | (eacr.p1 && !etr_port1_uptodate)) |
940 | eacr.dp ^= 1; | 939 | eacr.dp ^= 1; |
941 | else | 940 | else |
942 | eacr.dp = 0; | 941 | eacr.dp = 0; |
943 | } | 942 | } |
944 | return eacr; | 943 | return eacr; |
945 | } | 944 | } |
946 | 945 | ||
947 | /* | 946 | /* |
948 | * Write new etr control register if it differs from the current one. | 947 | * Write new etr control register if it differs from the current one. |
949 | * Return 1 if etr_tolec has been updated as well. | 948 | * Return 1 if etr_tolec has been updated as well. |
950 | */ | 949 | */ |
951 | static void etr_update_eacr(struct etr_eacr eacr) | 950 | static void etr_update_eacr(struct etr_eacr eacr) |
952 | { | 951 | { |
953 | int dp_changed; | 952 | int dp_changed; |
954 | 953 | ||
955 | if (memcmp(&etr_eacr, &eacr, sizeof(eacr)) == 0) | 954 | if (memcmp(&etr_eacr, &eacr, sizeof(eacr)) == 0) |
956 | /* No change, return. */ | 955 | /* No change, return. */ |
957 | return; | 956 | return; |
958 | /* | 957 | /* |
959 | * The disable of an active port of the change of the data port | 958 | * The disable of an active port of the change of the data port |
960 | * bit can/will cause a change in the data port. | 959 | * bit can/will cause a change in the data port. |
961 | */ | 960 | */ |
962 | dp_changed = etr_eacr.e0 > eacr.e0 || etr_eacr.e1 > eacr.e1 || | 961 | dp_changed = etr_eacr.e0 > eacr.e0 || etr_eacr.e1 > eacr.e1 || |
963 | (etr_eacr.dp ^ eacr.dp) != 0; | 962 | (etr_eacr.dp ^ eacr.dp) != 0; |
964 | etr_eacr = eacr; | 963 | etr_eacr = eacr; |
965 | etr_setr(&etr_eacr); | 964 | etr_setr(&etr_eacr); |
966 | if (dp_changed) | 965 | if (dp_changed) |
967 | etr_tolec = get_clock(); | 966 | etr_tolec = get_clock(); |
968 | } | 967 | } |
969 | 968 | ||
970 | /* | 969 | /* |
971 | * ETR work. In this function you'll find the main logic. In | 970 | * ETR work. In this function you'll find the main logic. In |
972 | * particular this is the only function that calls etr_update_eacr(), | 971 | * particular this is the only function that calls etr_update_eacr(), |
973 | * it "controls" the etr control register. | 972 | * it "controls" the etr control register. |
974 | */ | 973 | */ |
975 | static void etr_work_fn(struct work_struct *work) | 974 | static void etr_work_fn(struct work_struct *work) |
976 | { | 975 | { |
977 | unsigned long long now; | 976 | unsigned long long now; |
978 | struct etr_eacr eacr; | 977 | struct etr_eacr eacr; |
979 | struct etr_aib aib; | 978 | struct etr_aib aib; |
980 | int sync_port; | 979 | int sync_port; |
981 | 980 | ||
982 | /* prevent multiple execution. */ | 981 | /* prevent multiple execution. */ |
983 | mutex_lock(&etr_work_mutex); | 982 | mutex_lock(&etr_work_mutex); |
984 | 983 | ||
985 | /* Create working copy of etr_eacr. */ | 984 | /* Create working copy of etr_eacr. */ |
986 | eacr = etr_eacr; | 985 | eacr = etr_eacr; |
987 | 986 | ||
988 | /* Check for the different events and their immediate effects. */ | 987 | /* Check for the different events and their immediate effects. */ |
989 | eacr = etr_handle_events(eacr); | 988 | eacr = etr_handle_events(eacr); |
990 | 989 | ||
991 | /* Check if ETR is supposed to be active. */ | 990 | /* Check if ETR is supposed to be active. */ |
992 | eacr.ea = eacr.p0 || eacr.p1; | 991 | eacr.ea = eacr.p0 || eacr.p1; |
993 | if (!eacr.ea) { | 992 | if (!eacr.ea) { |
994 | /* Both ports offline. Reset everything. */ | 993 | /* Both ports offline. Reset everything. */ |
995 | eacr.dp = eacr.es = eacr.sl = 0; | 994 | eacr.dp = eacr.es = eacr.sl = 0; |
996 | on_each_cpu(disable_sync_clock, NULL, 1); | 995 | on_each_cpu(disable_sync_clock, NULL, 1); |
997 | del_timer_sync(&etr_timer); | 996 | del_timer_sync(&etr_timer); |
998 | etr_update_eacr(eacr); | 997 | etr_update_eacr(eacr); |
999 | goto out_unlock; | 998 | goto out_unlock; |
1000 | } | 999 | } |
1001 | 1000 | ||
1002 | /* Store aib to get the current ETR status word. */ | 1001 | /* Store aib to get the current ETR status word. */ |
1003 | BUG_ON(etr_stetr(&aib) != 0); | 1002 | BUG_ON(etr_stetr(&aib) != 0); |
1004 | etr_port0.esw = etr_port1.esw = aib.esw; /* Copy status word. */ | 1003 | etr_port0.esw = etr_port1.esw = aib.esw; /* Copy status word. */ |
1005 | now = get_clock(); | 1004 | now = get_clock(); |
1006 | 1005 | ||
1007 | /* | 1006 | /* |
1008 | * Update the port information if the last stepping port change | 1007 | * Update the port information if the last stepping port change |
1009 | * or data port change is older than 1.6 seconds. | 1008 | * or data port change is older than 1.6 seconds. |
1010 | */ | 1009 | */ |
1011 | if (now >= etr_tolec + (1600000 << 12)) | 1010 | if (now >= etr_tolec + (1600000 << 12)) |
1012 | eacr = etr_handle_update(&aib, eacr); | 1011 | eacr = etr_handle_update(&aib, eacr); |
1013 | 1012 | ||
1014 | /* | 1013 | /* |
1015 | * Select ports to enable. The preferred synchronization mode is PPS. | 1014 | * Select ports to enable. The preferred synchronization mode is PPS. |
1016 | * If a port can be enabled depends on a number of things: | 1015 | * If a port can be enabled depends on a number of things: |
1017 | * 1) The port needs to be online and uptodate. A port is not | 1016 | * 1) The port needs to be online and uptodate. A port is not |
1018 | * disabled just because it is not uptodate, but it is only | 1017 | * disabled just because it is not uptodate, but it is only |
1019 | * enabled if it is uptodate. | 1018 | * enabled if it is uptodate. |
1020 | * 2) The port needs to have the same mode (pps / etr). | 1019 | * 2) The port needs to have the same mode (pps / etr). |
1021 | * 3) The port needs to be usable -> etr_port_valid() == 1 | 1020 | * 3) The port needs to be usable -> etr_port_valid() == 1 |
1022 | * 4) To enable the second port the clock needs to be in sync. | 1021 | * 4) To enable the second port the clock needs to be in sync. |
1023 | * 5) If both ports are useable and are ETR ports, the network id | 1022 | * 5) If both ports are useable and are ETR ports, the network id |
1024 | * has to be the same. | 1023 | * has to be the same. |
1025 | * The eacr.sl bit is used to indicate etr mode vs. pps mode. | 1024 | * The eacr.sl bit is used to indicate etr mode vs. pps mode. |
1026 | */ | 1025 | */ |
1027 | if (eacr.p0 && aib.esw.psc0 == etr_lpsc_pps_mode) { | 1026 | if (eacr.p0 && aib.esw.psc0 == etr_lpsc_pps_mode) { |
1028 | eacr.sl = 0; | 1027 | eacr.sl = 0; |
1029 | eacr.e0 = 1; | 1028 | eacr.e0 = 1; |
1030 | if (!etr_mode_is_pps(etr_eacr)) | 1029 | if (!etr_mode_is_pps(etr_eacr)) |
1031 | eacr.es = 0; | 1030 | eacr.es = 0; |
1032 | if (!eacr.es || !eacr.p1 || aib.esw.psc1 != etr_lpsc_pps_mode) | 1031 | if (!eacr.es || !eacr.p1 || aib.esw.psc1 != etr_lpsc_pps_mode) |
1033 | eacr.e1 = 0; | 1032 | eacr.e1 = 0; |
1034 | // FIXME: uptodate checks ? | 1033 | // FIXME: uptodate checks ? |
1035 | else if (etr_port0_uptodate && etr_port1_uptodate) | 1034 | else if (etr_port0_uptodate && etr_port1_uptodate) |
1036 | eacr.e1 = 1; | 1035 | eacr.e1 = 1; |
1037 | sync_port = (etr_port0_uptodate && | 1036 | sync_port = (etr_port0_uptodate && |
1038 | etr_port_valid(&etr_port0, 0)) ? 0 : -1; | 1037 | etr_port_valid(&etr_port0, 0)) ? 0 : -1; |
1039 | } else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_pps_mode) { | 1038 | } else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_pps_mode) { |
1040 | eacr.sl = 0; | 1039 | eacr.sl = 0; |
1041 | eacr.e0 = 0; | 1040 | eacr.e0 = 0; |
1042 | eacr.e1 = 1; | 1041 | eacr.e1 = 1; |
1043 | if (!etr_mode_is_pps(etr_eacr)) | 1042 | if (!etr_mode_is_pps(etr_eacr)) |
1044 | eacr.es = 0; | 1043 | eacr.es = 0; |
1045 | sync_port = (etr_port1_uptodate && | 1044 | sync_port = (etr_port1_uptodate && |
1046 | etr_port_valid(&etr_port1, 1)) ? 1 : -1; | 1045 | etr_port_valid(&etr_port1, 1)) ? 1 : -1; |
1047 | } else if (eacr.p0 && aib.esw.psc0 == etr_lpsc_operational_step) { | 1046 | } else if (eacr.p0 && aib.esw.psc0 == etr_lpsc_operational_step) { |
1048 | eacr.sl = 1; | 1047 | eacr.sl = 1; |
1049 | eacr.e0 = 1; | 1048 | eacr.e0 = 1; |
1050 | if (!etr_mode_is_etr(etr_eacr)) | 1049 | if (!etr_mode_is_etr(etr_eacr)) |
1051 | eacr.es = 0; | 1050 | eacr.es = 0; |
1052 | if (!eacr.es || !eacr.p1 || | 1051 | if (!eacr.es || !eacr.p1 || |
1053 | aib.esw.psc1 != etr_lpsc_operational_alt) | 1052 | aib.esw.psc1 != etr_lpsc_operational_alt) |
1054 | eacr.e1 = 0; | 1053 | eacr.e1 = 0; |
1055 | else if (etr_port0_uptodate && etr_port1_uptodate && | 1054 | else if (etr_port0_uptodate && etr_port1_uptodate && |
1056 | etr_compare_network(&etr_port0, &etr_port1)) | 1055 | etr_compare_network(&etr_port0, &etr_port1)) |
1057 | eacr.e1 = 1; | 1056 | eacr.e1 = 1; |
1058 | sync_port = (etr_port0_uptodate && | 1057 | sync_port = (etr_port0_uptodate && |
1059 | etr_port_valid(&etr_port0, 0)) ? 0 : -1; | 1058 | etr_port_valid(&etr_port0, 0)) ? 0 : -1; |
1060 | } else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_operational_step) { | 1059 | } else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_operational_step) { |
1061 | eacr.sl = 1; | 1060 | eacr.sl = 1; |
1062 | eacr.e0 = 0; | 1061 | eacr.e0 = 0; |
1063 | eacr.e1 = 1; | 1062 | eacr.e1 = 1; |
1064 | if (!etr_mode_is_etr(etr_eacr)) | 1063 | if (!etr_mode_is_etr(etr_eacr)) |
1065 | eacr.es = 0; | 1064 | eacr.es = 0; |
1066 | sync_port = (etr_port1_uptodate && | 1065 | sync_port = (etr_port1_uptodate && |
1067 | etr_port_valid(&etr_port1, 1)) ? 1 : -1; | 1066 | etr_port_valid(&etr_port1, 1)) ? 1 : -1; |
1068 | } else { | 1067 | } else { |
1069 | /* Both ports not usable. */ | 1068 | /* Both ports not usable. */ |
1070 | eacr.es = eacr.sl = 0; | 1069 | eacr.es = eacr.sl = 0; |
1071 | sync_port = -1; | 1070 | sync_port = -1; |
1072 | } | 1071 | } |
1073 | 1072 | ||
1074 | /* | 1073 | /* |
1075 | * If the clock is in sync just update the eacr and return. | 1074 | * If the clock is in sync just update the eacr and return. |
1076 | * If there is no valid sync port wait for a port update. | 1075 | * If there is no valid sync port wait for a port update. |
1077 | */ | 1076 | */ |
1078 | if ((eacr.es && check_sync_clock()) || sync_port < 0) { | 1077 | if ((eacr.es && check_sync_clock()) || sync_port < 0) { |
1079 | etr_update_eacr(eacr); | 1078 | etr_update_eacr(eacr); |
1080 | etr_set_tolec_timeout(now); | 1079 | etr_set_tolec_timeout(now); |
1081 | goto out_unlock; | 1080 | goto out_unlock; |
1082 | } | 1081 | } |
1083 | 1082 | ||
1084 | /* | 1083 | /* |
1085 | * Prepare control register for clock syncing | 1084 | * Prepare control register for clock syncing |
1086 | * (reset data port bit, set sync check control. | 1085 | * (reset data port bit, set sync check control. |
1087 | */ | 1086 | */ |
1088 | eacr.dp = 0; | 1087 | eacr.dp = 0; |
1089 | eacr.es = 1; | 1088 | eacr.es = 1; |
1090 | 1089 | ||
1091 | /* | 1090 | /* |
1092 | * Update eacr and try to synchronize the clock. If the update | 1091 | * Update eacr and try to synchronize the clock. If the update |
1093 | * of eacr caused a stepping port switch (or if we have to | 1092 | * of eacr caused a stepping port switch (or if we have to |
1094 | * assume that a stepping port switch has occurred) or the | 1093 | * assume that a stepping port switch has occurred) or the |
1095 | * clock syncing failed, reset the sync check control bit | 1094 | * clock syncing failed, reset the sync check control bit |
1096 | * and set up a timer to try again after 0.5 seconds | 1095 | * and set up a timer to try again after 0.5 seconds |
1097 | */ | 1096 | */ |
1098 | etr_update_eacr(eacr); | 1097 | etr_update_eacr(eacr); |
1099 | if (now < etr_tolec + (1600000 << 12) || | 1098 | if (now < etr_tolec + (1600000 << 12) || |
1100 | etr_sync_clock_stop(&aib, sync_port) != 0) { | 1099 | etr_sync_clock_stop(&aib, sync_port) != 0) { |
1101 | /* Sync failed. Try again in 1/2 second. */ | 1100 | /* Sync failed. Try again in 1/2 second. */ |
1102 | eacr.es = 0; | 1101 | eacr.es = 0; |
1103 | etr_update_eacr(eacr); | 1102 | etr_update_eacr(eacr); |
1104 | etr_set_sync_timeout(); | 1103 | etr_set_sync_timeout(); |
1105 | } else | 1104 | } else |
1106 | etr_set_tolec_timeout(now); | 1105 | etr_set_tolec_timeout(now); |
1107 | out_unlock: | 1106 | out_unlock: |
1108 | mutex_unlock(&etr_work_mutex); | 1107 | mutex_unlock(&etr_work_mutex); |
1109 | } | 1108 | } |
1110 | 1109 | ||
1111 | /* | 1110 | /* |
1112 | * Sysfs interface functions | 1111 | * Sysfs interface functions |
1113 | */ | 1112 | */ |
1114 | static struct sysdev_class etr_sysclass = { | 1113 | static struct sysdev_class etr_sysclass = { |
1115 | .name = "etr", | 1114 | .name = "etr", |
1116 | }; | 1115 | }; |
1117 | 1116 | ||
1118 | static struct sys_device etr_port0_dev = { | 1117 | static struct sys_device etr_port0_dev = { |
1119 | .id = 0, | 1118 | .id = 0, |
1120 | .cls = &etr_sysclass, | 1119 | .cls = &etr_sysclass, |
1121 | }; | 1120 | }; |
1122 | 1121 | ||
1123 | static struct sys_device etr_port1_dev = { | 1122 | static struct sys_device etr_port1_dev = { |
1124 | .id = 1, | 1123 | .id = 1, |
1125 | .cls = &etr_sysclass, | 1124 | .cls = &etr_sysclass, |
1126 | }; | 1125 | }; |
1127 | 1126 | ||
1128 | /* | 1127 | /* |
1129 | * ETR class attributes | 1128 | * ETR class attributes |
1130 | */ | 1129 | */ |
1131 | static ssize_t etr_stepping_port_show(struct sysdev_class *class, | 1130 | static ssize_t etr_stepping_port_show(struct sysdev_class *class, |
1132 | struct sysdev_class_attribute *attr, | 1131 | struct sysdev_class_attribute *attr, |
1133 | char *buf) | 1132 | char *buf) |
1134 | { | 1133 | { |
1135 | return sprintf(buf, "%i\n", etr_port0.esw.p); | 1134 | return sprintf(buf, "%i\n", etr_port0.esw.p); |
1136 | } | 1135 | } |
1137 | 1136 | ||
1138 | static SYSDEV_CLASS_ATTR(stepping_port, 0400, etr_stepping_port_show, NULL); | 1137 | static SYSDEV_CLASS_ATTR(stepping_port, 0400, etr_stepping_port_show, NULL); |
1139 | 1138 | ||
1140 | static ssize_t etr_stepping_mode_show(struct sysdev_class *class, | 1139 | static ssize_t etr_stepping_mode_show(struct sysdev_class *class, |
1141 | struct sysdev_class_attribute *attr, | 1140 | struct sysdev_class_attribute *attr, |
1142 | char *buf) | 1141 | char *buf) |
1143 | { | 1142 | { |
1144 | char *mode_str; | 1143 | char *mode_str; |
1145 | 1144 | ||
1146 | if (etr_mode_is_pps(etr_eacr)) | 1145 | if (etr_mode_is_pps(etr_eacr)) |
1147 | mode_str = "pps"; | 1146 | mode_str = "pps"; |
1148 | else if (etr_mode_is_etr(etr_eacr)) | 1147 | else if (etr_mode_is_etr(etr_eacr)) |
1149 | mode_str = "etr"; | 1148 | mode_str = "etr"; |
1150 | else | 1149 | else |
1151 | mode_str = "local"; | 1150 | mode_str = "local"; |
1152 | return sprintf(buf, "%s\n", mode_str); | 1151 | return sprintf(buf, "%s\n", mode_str); |
1153 | } | 1152 | } |
1154 | 1153 | ||
1155 | static SYSDEV_CLASS_ATTR(stepping_mode, 0400, etr_stepping_mode_show, NULL); | 1154 | static SYSDEV_CLASS_ATTR(stepping_mode, 0400, etr_stepping_mode_show, NULL); |
1156 | 1155 | ||
1157 | /* | 1156 | /* |
1158 | * ETR port attributes | 1157 | * ETR port attributes |
1159 | */ | 1158 | */ |
1160 | static inline struct etr_aib *etr_aib_from_dev(struct sys_device *dev) | 1159 | static inline struct etr_aib *etr_aib_from_dev(struct sys_device *dev) |
1161 | { | 1160 | { |
1162 | if (dev == &etr_port0_dev) | 1161 | if (dev == &etr_port0_dev) |
1163 | return etr_port0_online ? &etr_port0 : NULL; | 1162 | return etr_port0_online ? &etr_port0 : NULL; |
1164 | else | 1163 | else |
1165 | return etr_port1_online ? &etr_port1 : NULL; | 1164 | return etr_port1_online ? &etr_port1 : NULL; |
1166 | } | 1165 | } |
1167 | 1166 | ||
1168 | static ssize_t etr_online_show(struct sys_device *dev, | 1167 | static ssize_t etr_online_show(struct sys_device *dev, |
1169 | struct sysdev_attribute *attr, | 1168 | struct sysdev_attribute *attr, |
1170 | char *buf) | 1169 | char *buf) |
1171 | { | 1170 | { |
1172 | unsigned int online; | 1171 | unsigned int online; |
1173 | 1172 | ||
1174 | online = (dev == &etr_port0_dev) ? etr_port0_online : etr_port1_online; | 1173 | online = (dev == &etr_port0_dev) ? etr_port0_online : etr_port1_online; |
1175 | return sprintf(buf, "%i\n", online); | 1174 | return sprintf(buf, "%i\n", online); |
1176 | } | 1175 | } |
1177 | 1176 | ||
1178 | static ssize_t etr_online_store(struct sys_device *dev, | 1177 | static ssize_t etr_online_store(struct sys_device *dev, |
1179 | struct sysdev_attribute *attr, | 1178 | struct sysdev_attribute *attr, |
1180 | const char *buf, size_t count) | 1179 | const char *buf, size_t count) |
1181 | { | 1180 | { |
1182 | unsigned int value; | 1181 | unsigned int value; |
1183 | 1182 | ||
1184 | value = simple_strtoul(buf, NULL, 0); | 1183 | value = simple_strtoul(buf, NULL, 0); |
1185 | if (value != 0 && value != 1) | 1184 | if (value != 0 && value != 1) |
1186 | return -EINVAL; | 1185 | return -EINVAL; |
1187 | if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags)) | 1186 | if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags)) |
1188 | return -EOPNOTSUPP; | 1187 | return -EOPNOTSUPP; |
1189 | mutex_lock(&clock_sync_mutex); | 1188 | mutex_lock(&clock_sync_mutex); |
1190 | if (dev == &etr_port0_dev) { | 1189 | if (dev == &etr_port0_dev) { |
1191 | if (etr_port0_online == value) | 1190 | if (etr_port0_online == value) |
1192 | goto out; /* Nothing to do. */ | 1191 | goto out; /* Nothing to do. */ |
1193 | etr_port0_online = value; | 1192 | etr_port0_online = value; |
1194 | if (etr_port0_online && etr_port1_online) | 1193 | if (etr_port0_online && etr_port1_online) |
1195 | set_bit(CLOCK_SYNC_ETR, &clock_sync_flags); | 1194 | set_bit(CLOCK_SYNC_ETR, &clock_sync_flags); |
1196 | else | 1195 | else |
1197 | clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags); | 1196 | clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags); |
1198 | set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); | 1197 | set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); |
1199 | queue_work(time_sync_wq, &etr_work); | 1198 | queue_work(time_sync_wq, &etr_work); |
1200 | } else { | 1199 | } else { |
1201 | if (etr_port1_online == value) | 1200 | if (etr_port1_online == value) |
1202 | goto out; /* Nothing to do. */ | 1201 | goto out; /* Nothing to do. */ |
1203 | etr_port1_online = value; | 1202 | etr_port1_online = value; |
1204 | if (etr_port0_online && etr_port1_online) | 1203 | if (etr_port0_online && etr_port1_online) |
1205 | set_bit(CLOCK_SYNC_ETR, &clock_sync_flags); | 1204 | set_bit(CLOCK_SYNC_ETR, &clock_sync_flags); |
1206 | else | 1205 | else |
1207 | clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags); | 1206 | clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags); |
1208 | set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events); | 1207 | set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events); |
1209 | queue_work(time_sync_wq, &etr_work); | 1208 | queue_work(time_sync_wq, &etr_work); |
1210 | } | 1209 | } |
1211 | out: | 1210 | out: |
1212 | mutex_unlock(&clock_sync_mutex); | 1211 | mutex_unlock(&clock_sync_mutex); |
1213 | return count; | 1212 | return count; |
1214 | } | 1213 | } |
1215 | 1214 | ||
1216 | static SYSDEV_ATTR(online, 0600, etr_online_show, etr_online_store); | 1215 | static SYSDEV_ATTR(online, 0600, etr_online_show, etr_online_store); |
1217 | 1216 | ||
1218 | static ssize_t etr_stepping_control_show(struct sys_device *dev, | 1217 | static ssize_t etr_stepping_control_show(struct sys_device *dev, |
1219 | struct sysdev_attribute *attr, | 1218 | struct sysdev_attribute *attr, |
1220 | char *buf) | 1219 | char *buf) |
1221 | { | 1220 | { |
1222 | return sprintf(buf, "%i\n", (dev == &etr_port0_dev) ? | 1221 | return sprintf(buf, "%i\n", (dev == &etr_port0_dev) ? |
1223 | etr_eacr.e0 : etr_eacr.e1); | 1222 | etr_eacr.e0 : etr_eacr.e1); |
1224 | } | 1223 | } |
1225 | 1224 | ||
1226 | static SYSDEV_ATTR(stepping_control, 0400, etr_stepping_control_show, NULL); | 1225 | static SYSDEV_ATTR(stepping_control, 0400, etr_stepping_control_show, NULL); |
1227 | 1226 | ||
1228 | static ssize_t etr_mode_code_show(struct sys_device *dev, | 1227 | static ssize_t etr_mode_code_show(struct sys_device *dev, |
1229 | struct sysdev_attribute *attr, char *buf) | 1228 | struct sysdev_attribute *attr, char *buf) |
1230 | { | 1229 | { |
1231 | if (!etr_port0_online && !etr_port1_online) | 1230 | if (!etr_port0_online && !etr_port1_online) |
1232 | /* Status word is not uptodate if both ports are offline. */ | 1231 | /* Status word is not uptodate if both ports are offline. */ |
1233 | return -ENODATA; | 1232 | return -ENODATA; |
1234 | return sprintf(buf, "%i\n", (dev == &etr_port0_dev) ? | 1233 | return sprintf(buf, "%i\n", (dev == &etr_port0_dev) ? |
1235 | etr_port0.esw.psc0 : etr_port0.esw.psc1); | 1234 | etr_port0.esw.psc0 : etr_port0.esw.psc1); |
1236 | } | 1235 | } |
1237 | 1236 | ||
1238 | static SYSDEV_ATTR(state_code, 0400, etr_mode_code_show, NULL); | 1237 | static SYSDEV_ATTR(state_code, 0400, etr_mode_code_show, NULL); |
1239 | 1238 | ||
1240 | static ssize_t etr_untuned_show(struct sys_device *dev, | 1239 | static ssize_t etr_untuned_show(struct sys_device *dev, |
1241 | struct sysdev_attribute *attr, char *buf) | 1240 | struct sysdev_attribute *attr, char *buf) |
1242 | { | 1241 | { |
1243 | struct etr_aib *aib = etr_aib_from_dev(dev); | 1242 | struct etr_aib *aib = etr_aib_from_dev(dev); |
1244 | 1243 | ||
1245 | if (!aib || !aib->slsw.v1) | 1244 | if (!aib || !aib->slsw.v1) |
1246 | return -ENODATA; | 1245 | return -ENODATA; |
1247 | return sprintf(buf, "%i\n", aib->edf1.u); | 1246 | return sprintf(buf, "%i\n", aib->edf1.u); |
1248 | } | 1247 | } |
1249 | 1248 | ||
1250 | static SYSDEV_ATTR(untuned, 0400, etr_untuned_show, NULL); | 1249 | static SYSDEV_ATTR(untuned, 0400, etr_untuned_show, NULL); |
1251 | 1250 | ||
1252 | static ssize_t etr_network_id_show(struct sys_device *dev, | 1251 | static ssize_t etr_network_id_show(struct sys_device *dev, |
1253 | struct sysdev_attribute *attr, char *buf) | 1252 | struct sysdev_attribute *attr, char *buf) |
1254 | { | 1253 | { |
1255 | struct etr_aib *aib = etr_aib_from_dev(dev); | 1254 | struct etr_aib *aib = etr_aib_from_dev(dev); |
1256 | 1255 | ||
1257 | if (!aib || !aib->slsw.v1) | 1256 | if (!aib || !aib->slsw.v1) |
1258 | return -ENODATA; | 1257 | return -ENODATA; |
1259 | return sprintf(buf, "%i\n", aib->edf1.net_id); | 1258 | return sprintf(buf, "%i\n", aib->edf1.net_id); |
1260 | } | 1259 | } |
1261 | 1260 | ||
1262 | static SYSDEV_ATTR(network, 0400, etr_network_id_show, NULL); | 1261 | static SYSDEV_ATTR(network, 0400, etr_network_id_show, NULL); |
1263 | 1262 | ||
1264 | static ssize_t etr_id_show(struct sys_device *dev, | 1263 | static ssize_t etr_id_show(struct sys_device *dev, |
1265 | struct sysdev_attribute *attr, char *buf) | 1264 | struct sysdev_attribute *attr, char *buf) |
1266 | { | 1265 | { |
1267 | struct etr_aib *aib = etr_aib_from_dev(dev); | 1266 | struct etr_aib *aib = etr_aib_from_dev(dev); |
1268 | 1267 | ||
1269 | if (!aib || !aib->slsw.v1) | 1268 | if (!aib || !aib->slsw.v1) |
1270 | return -ENODATA; | 1269 | return -ENODATA; |
1271 | return sprintf(buf, "%i\n", aib->edf1.etr_id); | 1270 | return sprintf(buf, "%i\n", aib->edf1.etr_id); |
1272 | } | 1271 | } |
1273 | 1272 | ||
1274 | static SYSDEV_ATTR(id, 0400, etr_id_show, NULL); | 1273 | static SYSDEV_ATTR(id, 0400, etr_id_show, NULL); |
1275 | 1274 | ||
1276 | static ssize_t etr_port_number_show(struct sys_device *dev, | 1275 | static ssize_t etr_port_number_show(struct sys_device *dev, |
1277 | struct sysdev_attribute *attr, char *buf) | 1276 | struct sysdev_attribute *attr, char *buf) |
1278 | { | 1277 | { |
1279 | struct etr_aib *aib = etr_aib_from_dev(dev); | 1278 | struct etr_aib *aib = etr_aib_from_dev(dev); |
1280 | 1279 | ||
1281 | if (!aib || !aib->slsw.v1) | 1280 | if (!aib || !aib->slsw.v1) |
1282 | return -ENODATA; | 1281 | return -ENODATA; |
1283 | return sprintf(buf, "%i\n", aib->edf1.etr_pn); | 1282 | return sprintf(buf, "%i\n", aib->edf1.etr_pn); |
1284 | } | 1283 | } |
1285 | 1284 | ||
1286 | static SYSDEV_ATTR(port, 0400, etr_port_number_show, NULL); | 1285 | static SYSDEV_ATTR(port, 0400, etr_port_number_show, NULL); |
1287 | 1286 | ||
1288 | static ssize_t etr_coupled_show(struct sys_device *dev, | 1287 | static ssize_t etr_coupled_show(struct sys_device *dev, |
1289 | struct sysdev_attribute *attr, char *buf) | 1288 | struct sysdev_attribute *attr, char *buf) |
1290 | { | 1289 | { |
1291 | struct etr_aib *aib = etr_aib_from_dev(dev); | 1290 | struct etr_aib *aib = etr_aib_from_dev(dev); |
1292 | 1291 | ||
1293 | if (!aib || !aib->slsw.v3) | 1292 | if (!aib || !aib->slsw.v3) |
1294 | return -ENODATA; | 1293 | return -ENODATA; |
1295 | return sprintf(buf, "%i\n", aib->edf3.c); | 1294 | return sprintf(buf, "%i\n", aib->edf3.c); |
1296 | } | 1295 | } |
1297 | 1296 | ||
1298 | static SYSDEV_ATTR(coupled, 0400, etr_coupled_show, NULL); | 1297 | static SYSDEV_ATTR(coupled, 0400, etr_coupled_show, NULL); |
1299 | 1298 | ||
1300 | static ssize_t etr_local_time_show(struct sys_device *dev, | 1299 | static ssize_t etr_local_time_show(struct sys_device *dev, |
1301 | struct sysdev_attribute *attr, char *buf) | 1300 | struct sysdev_attribute *attr, char *buf) |
1302 | { | 1301 | { |
1303 | struct etr_aib *aib = etr_aib_from_dev(dev); | 1302 | struct etr_aib *aib = etr_aib_from_dev(dev); |
1304 | 1303 | ||
1305 | if (!aib || !aib->slsw.v3) | 1304 | if (!aib || !aib->slsw.v3) |
1306 | return -ENODATA; | 1305 | return -ENODATA; |
1307 | return sprintf(buf, "%i\n", aib->edf3.blto); | 1306 | return sprintf(buf, "%i\n", aib->edf3.blto); |
1308 | } | 1307 | } |
1309 | 1308 | ||
1310 | static SYSDEV_ATTR(local_time, 0400, etr_local_time_show, NULL); | 1309 | static SYSDEV_ATTR(local_time, 0400, etr_local_time_show, NULL); |
1311 | 1310 | ||
1312 | static ssize_t etr_utc_offset_show(struct sys_device *dev, | 1311 | static ssize_t etr_utc_offset_show(struct sys_device *dev, |
1313 | struct sysdev_attribute *attr, char *buf) | 1312 | struct sysdev_attribute *attr, char *buf) |
1314 | { | 1313 | { |
1315 | struct etr_aib *aib = etr_aib_from_dev(dev); | 1314 | struct etr_aib *aib = etr_aib_from_dev(dev); |
1316 | 1315 | ||
1317 | if (!aib || !aib->slsw.v3) | 1316 | if (!aib || !aib->slsw.v3) |
1318 | return -ENODATA; | 1317 | return -ENODATA; |
1319 | return sprintf(buf, "%i\n", aib->edf3.buo); | 1318 | return sprintf(buf, "%i\n", aib->edf3.buo); |
1320 | } | 1319 | } |
1321 | 1320 | ||
1322 | static SYSDEV_ATTR(utc_offset, 0400, etr_utc_offset_show, NULL); | 1321 | static SYSDEV_ATTR(utc_offset, 0400, etr_utc_offset_show, NULL); |
1323 | 1322 | ||
1324 | static struct sysdev_attribute *etr_port_attributes[] = { | 1323 | static struct sysdev_attribute *etr_port_attributes[] = { |
1325 | &attr_online, | 1324 | &attr_online, |
1326 | &attr_stepping_control, | 1325 | &attr_stepping_control, |
1327 | &attr_state_code, | 1326 | &attr_state_code, |
1328 | &attr_untuned, | 1327 | &attr_untuned, |
1329 | &attr_network, | 1328 | &attr_network, |
1330 | &attr_id, | 1329 | &attr_id, |
1331 | &attr_port, | 1330 | &attr_port, |
1332 | &attr_coupled, | 1331 | &attr_coupled, |
1333 | &attr_local_time, | 1332 | &attr_local_time, |
1334 | &attr_utc_offset, | 1333 | &attr_utc_offset, |
1335 | NULL | 1334 | NULL |
1336 | }; | 1335 | }; |
1337 | 1336 | ||
1338 | static int __init etr_register_port(struct sys_device *dev) | 1337 | static int __init etr_register_port(struct sys_device *dev) |
1339 | { | 1338 | { |
1340 | struct sysdev_attribute **attr; | 1339 | struct sysdev_attribute **attr; |
1341 | int rc; | 1340 | int rc; |
1342 | 1341 | ||
1343 | rc = sysdev_register(dev); | 1342 | rc = sysdev_register(dev); |
1344 | if (rc) | 1343 | if (rc) |
1345 | goto out; | 1344 | goto out; |
1346 | for (attr = etr_port_attributes; *attr; attr++) { | 1345 | for (attr = etr_port_attributes; *attr; attr++) { |
1347 | rc = sysdev_create_file(dev, *attr); | 1346 | rc = sysdev_create_file(dev, *attr); |
1348 | if (rc) | 1347 | if (rc) |
1349 | goto out_unreg; | 1348 | goto out_unreg; |
1350 | } | 1349 | } |
1351 | return 0; | 1350 | return 0; |
1352 | out_unreg: | 1351 | out_unreg: |
1353 | for (; attr >= etr_port_attributes; attr--) | 1352 | for (; attr >= etr_port_attributes; attr--) |
1354 | sysdev_remove_file(dev, *attr); | 1353 | sysdev_remove_file(dev, *attr); |
1355 | sysdev_unregister(dev); | 1354 | sysdev_unregister(dev); |
1356 | out: | 1355 | out: |
1357 | return rc; | 1356 | return rc; |
1358 | } | 1357 | } |
1359 | 1358 | ||
1360 | static void __init etr_unregister_port(struct sys_device *dev) | 1359 | static void __init etr_unregister_port(struct sys_device *dev) |
1361 | { | 1360 | { |
1362 | struct sysdev_attribute **attr; | 1361 | struct sysdev_attribute **attr; |
1363 | 1362 | ||
1364 | for (attr = etr_port_attributes; *attr; attr++) | 1363 | for (attr = etr_port_attributes; *attr; attr++) |
1365 | sysdev_remove_file(dev, *attr); | 1364 | sysdev_remove_file(dev, *attr); |
1366 | sysdev_unregister(dev); | 1365 | sysdev_unregister(dev); |
1367 | } | 1366 | } |
1368 | 1367 | ||
1369 | static int __init etr_init_sysfs(void) | 1368 | static int __init etr_init_sysfs(void) |
1370 | { | 1369 | { |
1371 | int rc; | 1370 | int rc; |
1372 | 1371 | ||
1373 | rc = sysdev_class_register(&etr_sysclass); | 1372 | rc = sysdev_class_register(&etr_sysclass); |
1374 | if (rc) | 1373 | if (rc) |
1375 | goto out; | 1374 | goto out; |
1376 | rc = sysdev_class_create_file(&etr_sysclass, &attr_stepping_port); | 1375 | rc = sysdev_class_create_file(&etr_sysclass, &attr_stepping_port); |
1377 | if (rc) | 1376 | if (rc) |
1378 | goto out_unreg_class; | 1377 | goto out_unreg_class; |
1379 | rc = sysdev_class_create_file(&etr_sysclass, &attr_stepping_mode); | 1378 | rc = sysdev_class_create_file(&etr_sysclass, &attr_stepping_mode); |
1380 | if (rc) | 1379 | if (rc) |
1381 | goto out_remove_stepping_port; | 1380 | goto out_remove_stepping_port; |
1382 | rc = etr_register_port(&etr_port0_dev); | 1381 | rc = etr_register_port(&etr_port0_dev); |
1383 | if (rc) | 1382 | if (rc) |
1384 | goto out_remove_stepping_mode; | 1383 | goto out_remove_stepping_mode; |
1385 | rc = etr_register_port(&etr_port1_dev); | 1384 | rc = etr_register_port(&etr_port1_dev); |
1386 | if (rc) | 1385 | if (rc) |
1387 | goto out_remove_port0; | 1386 | goto out_remove_port0; |
1388 | return 0; | 1387 | return 0; |
1389 | 1388 | ||
1390 | out_remove_port0: | 1389 | out_remove_port0: |
1391 | etr_unregister_port(&etr_port0_dev); | 1390 | etr_unregister_port(&etr_port0_dev); |
1392 | out_remove_stepping_mode: | 1391 | out_remove_stepping_mode: |
1393 | sysdev_class_remove_file(&etr_sysclass, &attr_stepping_mode); | 1392 | sysdev_class_remove_file(&etr_sysclass, &attr_stepping_mode); |
1394 | out_remove_stepping_port: | 1393 | out_remove_stepping_port: |
1395 | sysdev_class_remove_file(&etr_sysclass, &attr_stepping_port); | 1394 | sysdev_class_remove_file(&etr_sysclass, &attr_stepping_port); |
1396 | out_unreg_class: | 1395 | out_unreg_class: |
1397 | sysdev_class_unregister(&etr_sysclass); | 1396 | sysdev_class_unregister(&etr_sysclass); |
1398 | out: | 1397 | out: |
1399 | return rc; | 1398 | return rc; |
1400 | } | 1399 | } |
1401 | 1400 | ||
1402 | device_initcall(etr_init_sysfs); | 1401 | device_initcall(etr_init_sysfs); |
1403 | 1402 | ||
1404 | /* | 1403 | /* |
1405 | * Server Time Protocol (STP) code. | 1404 | * Server Time Protocol (STP) code. |
1406 | */ | 1405 | */ |
1407 | static int stp_online; | 1406 | static int stp_online; |
1408 | static struct stp_sstpi stp_info; | 1407 | static struct stp_sstpi stp_info; |
1409 | static void *stp_page; | 1408 | static void *stp_page; |
1410 | 1409 | ||
1411 | static void stp_work_fn(struct work_struct *work); | 1410 | static void stp_work_fn(struct work_struct *work); |
1412 | static DEFINE_MUTEX(stp_work_mutex); | 1411 | static DEFINE_MUTEX(stp_work_mutex); |
1413 | static DECLARE_WORK(stp_work, stp_work_fn); | 1412 | static DECLARE_WORK(stp_work, stp_work_fn); |
1414 | static struct timer_list stp_timer; | 1413 | static struct timer_list stp_timer; |
1415 | 1414 | ||
1416 | static int __init early_parse_stp(char *p) | 1415 | static int __init early_parse_stp(char *p) |
1417 | { | 1416 | { |
1418 | if (strncmp(p, "off", 3) == 0) | 1417 | if (strncmp(p, "off", 3) == 0) |
1419 | stp_online = 0; | 1418 | stp_online = 0; |
1420 | else if (strncmp(p, "on", 2) == 0) | 1419 | else if (strncmp(p, "on", 2) == 0) |
1421 | stp_online = 1; | 1420 | stp_online = 1; |
1422 | return 0; | 1421 | return 0; |
1423 | } | 1422 | } |
1424 | early_param("stp", early_parse_stp); | 1423 | early_param("stp", early_parse_stp); |
1425 | 1424 | ||
1426 | /* | 1425 | /* |
1427 | * Reset STP attachment. | 1426 | * Reset STP attachment. |
1428 | */ | 1427 | */ |
1429 | static void __init stp_reset(void) | 1428 | static void __init stp_reset(void) |
1430 | { | 1429 | { |
1431 | int rc; | 1430 | int rc; |
1432 | 1431 | ||
1433 | stp_page = (void *) get_zeroed_page(GFP_ATOMIC); | 1432 | stp_page = (void *) get_zeroed_page(GFP_ATOMIC); |
1434 | rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000); | 1433 | rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000); |
1435 | if (rc == 0) | 1434 | if (rc == 0) |
1436 | set_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags); | 1435 | set_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags); |
1437 | else if (stp_online) { | 1436 | else if (stp_online) { |
1438 | pr_warning("The real or virtual hardware system does " | 1437 | pr_warning("The real or virtual hardware system does " |
1439 | "not provide an STP interface\n"); | 1438 | "not provide an STP interface\n"); |
1440 | free_page((unsigned long) stp_page); | 1439 | free_page((unsigned long) stp_page); |
1441 | stp_page = NULL; | 1440 | stp_page = NULL; |
1442 | stp_online = 0; | 1441 | stp_online = 0; |
1443 | } | 1442 | } |
1444 | } | 1443 | } |
1445 | 1444 | ||
1446 | static void stp_timeout(unsigned long dummy) | 1445 | static void stp_timeout(unsigned long dummy) |
1447 | { | 1446 | { |
1448 | queue_work(time_sync_wq, &stp_work); | 1447 | queue_work(time_sync_wq, &stp_work); |
1449 | } | 1448 | } |
1450 | 1449 | ||
1451 | static int __init stp_init(void) | 1450 | static int __init stp_init(void) |
1452 | { | 1451 | { |
1453 | if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags)) | 1452 | if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags)) |
1454 | return 0; | 1453 | return 0; |
1455 | setup_timer(&stp_timer, stp_timeout, 0UL); | 1454 | setup_timer(&stp_timer, stp_timeout, 0UL); |
1456 | time_init_wq(); | 1455 | time_init_wq(); |
1457 | if (!stp_online) | 1456 | if (!stp_online) |
1458 | return 0; | 1457 | return 0; |
1459 | queue_work(time_sync_wq, &stp_work); | 1458 | queue_work(time_sync_wq, &stp_work); |
1460 | return 0; | 1459 | return 0; |
1461 | } | 1460 | } |
1462 | 1461 | ||
1463 | arch_initcall(stp_init); | 1462 | arch_initcall(stp_init); |
1464 | 1463 | ||
1465 | /* | 1464 | /* |
1466 | * STP timing alert. There are three causes: | 1465 | * STP timing alert. There are three causes: |
1467 | * 1) timing status change | 1466 | * 1) timing status change |
1468 | * 2) link availability change | 1467 | * 2) link availability change |
1469 | * 3) time control parameter change | 1468 | * 3) time control parameter change |
1470 | * In all three cases we are only interested in the clock source state. | 1469 | * In all three cases we are only interested in the clock source state. |
1471 | * If a STP clock source is now available use it. | 1470 | * If a STP clock source is now available use it. |
1472 | */ | 1471 | */ |
1473 | static void stp_timing_alert(struct stp_irq_parm *intparm) | 1472 | static void stp_timing_alert(struct stp_irq_parm *intparm) |
1474 | { | 1473 | { |
1475 | if (intparm->tsc || intparm->lac || intparm->tcpc) | 1474 | if (intparm->tsc || intparm->lac || intparm->tcpc) |
1476 | queue_work(time_sync_wq, &stp_work); | 1475 | queue_work(time_sync_wq, &stp_work); |
1477 | } | 1476 | } |
1478 | 1477 | ||
1479 | /* | 1478 | /* |
1480 | * STP sync check machine check. This is called when the timing state | 1479 | * STP sync check machine check. This is called when the timing state |
1481 | * changes from the synchronized state to the unsynchronized state. | 1480 | * changes from the synchronized state to the unsynchronized state. |
1482 | * After a STP sync check the clock is not in sync. The machine check | 1481 | * After a STP sync check the clock is not in sync. The machine check |
1483 | * is broadcasted to all cpus at the same time. | 1482 | * is broadcasted to all cpus at the same time. |
1484 | */ | 1483 | */ |
1485 | void stp_sync_check(void) | 1484 | void stp_sync_check(void) |
1486 | { | 1485 | { |
1487 | disable_sync_clock(NULL); | 1486 | disable_sync_clock(NULL); |
1488 | queue_work(time_sync_wq, &stp_work); | 1487 | queue_work(time_sync_wq, &stp_work); |
1489 | } | 1488 | } |
1490 | 1489 | ||
1491 | /* | 1490 | /* |
1492 | * STP island condition machine check. This is called when an attached | 1491 | * STP island condition machine check. This is called when an attached |
1493 | * server attempts to communicate over an STP link and the servers | 1492 | * server attempts to communicate over an STP link and the servers |
1494 | * have matching CTN ids and have a valid stratum-1 configuration | 1493 | * have matching CTN ids and have a valid stratum-1 configuration |
1495 | * but the configurations do not match. | 1494 | * but the configurations do not match. |
1496 | */ | 1495 | */ |
1497 | void stp_island_check(void) | 1496 | void stp_island_check(void) |
1498 | { | 1497 | { |
1499 | disable_sync_clock(NULL); | 1498 | disable_sync_clock(NULL); |
1500 | queue_work(time_sync_wq, &stp_work); | 1499 | queue_work(time_sync_wq, &stp_work); |
1501 | } | 1500 | } |
1502 | 1501 | ||
1503 | 1502 | ||
1504 | static int stp_sync_clock(void *data) | 1503 | static int stp_sync_clock(void *data) |
1505 | { | 1504 | { |
1506 | static int first; | 1505 | static int first; |
1507 | unsigned long long old_clock, delta; | 1506 | unsigned long long old_clock, delta; |
1508 | struct clock_sync_data *stp_sync; | 1507 | struct clock_sync_data *stp_sync; |
1509 | int rc; | 1508 | int rc; |
1510 | 1509 | ||
1511 | stp_sync = data; | 1510 | stp_sync = data; |
1512 | 1511 | ||
1513 | if (xchg(&first, 1) == 1) { | 1512 | if (xchg(&first, 1) == 1) { |
1514 | /* Slave */ | 1513 | /* Slave */ |
1515 | clock_sync_cpu(stp_sync); | 1514 | clock_sync_cpu(stp_sync); |
1516 | return 0; | 1515 | return 0; |
1517 | } | 1516 | } |
1518 | 1517 | ||
1519 | /* Wait until all other cpus entered the sync function. */ | 1518 | /* Wait until all other cpus entered the sync function. */ |
1520 | while (atomic_read(&stp_sync->cpus) != 0) | 1519 | while (atomic_read(&stp_sync->cpus) != 0) |
1521 | cpu_relax(); | 1520 | cpu_relax(); |
1522 | 1521 | ||
1523 | enable_sync_clock(); | 1522 | enable_sync_clock(); |
1524 | 1523 | ||
1525 | rc = 0; | 1524 | rc = 0; |
1526 | if (stp_info.todoff[0] || stp_info.todoff[1] || | 1525 | if (stp_info.todoff[0] || stp_info.todoff[1] || |
1527 | stp_info.todoff[2] || stp_info.todoff[3] || | 1526 | stp_info.todoff[2] || stp_info.todoff[3] || |
1528 | stp_info.tmd != 2) { | 1527 | stp_info.tmd != 2) { |
1529 | old_clock = get_clock(); | 1528 | old_clock = get_clock(); |
1530 | rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0); | 1529 | rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0); |
1531 | if (rc == 0) { | 1530 | if (rc == 0) { |
1532 | delta = adjust_time(old_clock, get_clock(), 0); | 1531 | delta = adjust_time(old_clock, get_clock(), 0); |
1533 | fixup_clock_comparator(delta); | 1532 | fixup_clock_comparator(delta); |
1534 | rc = chsc_sstpi(stp_page, &stp_info, | 1533 | rc = chsc_sstpi(stp_page, &stp_info, |
1535 | sizeof(struct stp_sstpi)); | 1534 | sizeof(struct stp_sstpi)); |
1536 | if (rc == 0 && stp_info.tmd != 2) | 1535 | if (rc == 0 && stp_info.tmd != 2) |
1537 | rc = -EAGAIN; | 1536 | rc = -EAGAIN; |
1538 | } | 1537 | } |
1539 | } | 1538 | } |
1540 | if (rc) { | 1539 | if (rc) { |
1541 | disable_sync_clock(NULL); | 1540 | disable_sync_clock(NULL); |
1542 | stp_sync->in_sync = -EAGAIN; | 1541 | stp_sync->in_sync = -EAGAIN; |
1543 | } else | 1542 | } else |
1544 | stp_sync->in_sync = 1; | 1543 | stp_sync->in_sync = 1; |
1545 | xchg(&first, 0); | 1544 | xchg(&first, 0); |
1546 | return 0; | 1545 | return 0; |
1547 | } | 1546 | } |
1548 | 1547 | ||
1549 | /* | 1548 | /* |
1550 | * STP work. Check for the STP state and take over the clock | 1549 | * STP work. Check for the STP state and take over the clock |
1551 | * synchronization if the STP clock source is usable. | 1550 | * synchronization if the STP clock source is usable. |
1552 | */ | 1551 | */ |
1553 | static void stp_work_fn(struct work_struct *work) | 1552 | static void stp_work_fn(struct work_struct *work) |
1554 | { | 1553 | { |
1555 | struct clock_sync_data stp_sync; | 1554 | struct clock_sync_data stp_sync; |
1556 | int rc; | 1555 | int rc; |
1557 | 1556 | ||
1558 | /* prevent multiple execution. */ | 1557 | /* prevent multiple execution. */ |
1559 | mutex_lock(&stp_work_mutex); | 1558 | mutex_lock(&stp_work_mutex); |
1560 | 1559 | ||
1561 | if (!stp_online) { | 1560 | if (!stp_online) { |
1562 | chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000); | 1561 | chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000); |
1563 | del_timer_sync(&stp_timer); | 1562 | del_timer_sync(&stp_timer); |
1564 | goto out_unlock; | 1563 | goto out_unlock; |
1565 | } | 1564 | } |
1566 | 1565 | ||
1567 | rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0xb0e0); | 1566 | rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0xb0e0); |
1568 | if (rc) | 1567 | if (rc) |
1569 | goto out_unlock; | 1568 | goto out_unlock; |
1570 | 1569 | ||
1571 | rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi)); | 1570 | rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi)); |
1572 | if (rc || stp_info.c == 0) | 1571 | if (rc || stp_info.c == 0) |
1573 | goto out_unlock; | 1572 | goto out_unlock; |
1574 | 1573 | ||
1575 | /* Skip synchronization if the clock is already in sync. */ | 1574 | /* Skip synchronization if the clock is already in sync. */ |
1576 | if (check_sync_clock()) | 1575 | if (check_sync_clock()) |
1577 | goto out_unlock; | 1576 | goto out_unlock; |
1578 | 1577 | ||
1579 | memset(&stp_sync, 0, sizeof(stp_sync)); | 1578 | memset(&stp_sync, 0, sizeof(stp_sync)); |
1580 | get_online_cpus(); | 1579 | get_online_cpus(); |
1581 | atomic_set(&stp_sync.cpus, num_online_cpus() - 1); | 1580 | atomic_set(&stp_sync.cpus, num_online_cpus() - 1); |
1582 | stop_machine(stp_sync_clock, &stp_sync, cpu_online_mask); | 1581 | stop_machine(stp_sync_clock, &stp_sync, cpu_online_mask); |
1583 | put_online_cpus(); | 1582 | put_online_cpus(); |
1584 | 1583 | ||
1585 | if (!check_sync_clock()) | 1584 | if (!check_sync_clock()) |
1586 | /* | 1585 | /* |
1587 | * There is a usable clock but the synchonization failed. | 1586 | * There is a usable clock but the synchonization failed. |
1588 | * Retry after a second. | 1587 | * Retry after a second. |
1589 | */ | 1588 | */ |
1590 | mod_timer(&stp_timer, jiffies + HZ); | 1589 | mod_timer(&stp_timer, jiffies + HZ); |
1591 | 1590 | ||
1592 | out_unlock: | 1591 | out_unlock: |
1593 | mutex_unlock(&stp_work_mutex); | 1592 | mutex_unlock(&stp_work_mutex); |
1594 | } | 1593 | } |
1595 | 1594 | ||
1596 | /* | 1595 | /* |
1597 | * STP class sysfs interface functions | 1596 | * STP class sysfs interface functions |
1598 | */ | 1597 | */ |
1599 | static struct sysdev_class stp_sysclass = { | 1598 | static struct sysdev_class stp_sysclass = { |
1600 | .name = "stp", | 1599 | .name = "stp", |
1601 | }; | 1600 | }; |
1602 | 1601 | ||
1603 | static ssize_t stp_ctn_id_show(struct sysdev_class *class, | 1602 | static ssize_t stp_ctn_id_show(struct sysdev_class *class, |
1604 | struct sysdev_class_attribute *attr, | 1603 | struct sysdev_class_attribute *attr, |
1605 | char *buf) | 1604 | char *buf) |
1606 | { | 1605 | { |
1607 | if (!stp_online) | 1606 | if (!stp_online) |
1608 | return -ENODATA; | 1607 | return -ENODATA; |
1609 | return sprintf(buf, "%016llx\n", | 1608 | return sprintf(buf, "%016llx\n", |
1610 | *(unsigned long long *) stp_info.ctnid); | 1609 | *(unsigned long long *) stp_info.ctnid); |
1611 | } | 1610 | } |
1612 | 1611 | ||
1613 | static SYSDEV_CLASS_ATTR(ctn_id, 0400, stp_ctn_id_show, NULL); | 1612 | static SYSDEV_CLASS_ATTR(ctn_id, 0400, stp_ctn_id_show, NULL); |
1614 | 1613 | ||
1615 | static ssize_t stp_ctn_type_show(struct sysdev_class *class, | 1614 | static ssize_t stp_ctn_type_show(struct sysdev_class *class, |
1616 | struct sysdev_class_attribute *attr, | 1615 | struct sysdev_class_attribute *attr, |
1617 | char *buf) | 1616 | char *buf) |
1618 | { | 1617 | { |
1619 | if (!stp_online) | 1618 | if (!stp_online) |
1620 | return -ENODATA; | 1619 | return -ENODATA; |
1621 | return sprintf(buf, "%i\n", stp_info.ctn); | 1620 | return sprintf(buf, "%i\n", stp_info.ctn); |
1622 | } | 1621 | } |
1623 | 1622 | ||
1624 | static SYSDEV_CLASS_ATTR(ctn_type, 0400, stp_ctn_type_show, NULL); | 1623 | static SYSDEV_CLASS_ATTR(ctn_type, 0400, stp_ctn_type_show, NULL); |
1625 | 1624 | ||
1626 | static ssize_t stp_dst_offset_show(struct sysdev_class *class, | 1625 | static ssize_t stp_dst_offset_show(struct sysdev_class *class, |
1627 | struct sysdev_class_attribute *attr, | 1626 | struct sysdev_class_attribute *attr, |
1628 | char *buf) | 1627 | char *buf) |
1629 | { | 1628 | { |
1630 | if (!stp_online || !(stp_info.vbits & 0x2000)) | 1629 | if (!stp_online || !(stp_info.vbits & 0x2000)) |
1631 | return -ENODATA; | 1630 | return -ENODATA; |
1632 | return sprintf(buf, "%i\n", (int)(s16) stp_info.dsto); | 1631 | return sprintf(buf, "%i\n", (int)(s16) stp_info.dsto); |
1633 | } | 1632 | } |
1634 | 1633 | ||
1635 | static SYSDEV_CLASS_ATTR(dst_offset, 0400, stp_dst_offset_show, NULL); | 1634 | static SYSDEV_CLASS_ATTR(dst_offset, 0400, stp_dst_offset_show, NULL); |
1636 | 1635 | ||
1637 | static ssize_t stp_leap_seconds_show(struct sysdev_class *class, | 1636 | static ssize_t stp_leap_seconds_show(struct sysdev_class *class, |
1638 | struct sysdev_class_attribute *attr, | 1637 | struct sysdev_class_attribute *attr, |
1639 | char *buf) | 1638 | char *buf) |
1640 | { | 1639 | { |
1641 | if (!stp_online || !(stp_info.vbits & 0x8000)) | 1640 | if (!stp_online || !(stp_info.vbits & 0x8000)) |
1642 | return -ENODATA; | 1641 | return -ENODATA; |
1643 | return sprintf(buf, "%i\n", (int)(s16) stp_info.leaps); | 1642 | return sprintf(buf, "%i\n", (int)(s16) stp_info.leaps); |
1644 | } | 1643 | } |
1645 | 1644 | ||
1646 | static SYSDEV_CLASS_ATTR(leap_seconds, 0400, stp_leap_seconds_show, NULL); | 1645 | static SYSDEV_CLASS_ATTR(leap_seconds, 0400, stp_leap_seconds_show, NULL); |
1647 | 1646 | ||
1648 | static ssize_t stp_stratum_show(struct sysdev_class *class, | 1647 | static ssize_t stp_stratum_show(struct sysdev_class *class, |
1649 | struct sysdev_class_attribute *attr, | 1648 | struct sysdev_class_attribute *attr, |
1650 | char *buf) | 1649 | char *buf) |
1651 | { | 1650 | { |
1652 | if (!stp_online) | 1651 | if (!stp_online) |
1653 | return -ENODATA; | 1652 | return -ENODATA; |
1654 | return sprintf(buf, "%i\n", (int)(s16) stp_info.stratum); | 1653 | return sprintf(buf, "%i\n", (int)(s16) stp_info.stratum); |
1655 | } | 1654 | } |
1656 | 1655 | ||
1657 | static SYSDEV_CLASS_ATTR(stratum, 0400, stp_stratum_show, NULL); | 1656 | static SYSDEV_CLASS_ATTR(stratum, 0400, stp_stratum_show, NULL); |
1658 | 1657 | ||
1659 | static ssize_t stp_time_offset_show(struct sysdev_class *class, | 1658 | static ssize_t stp_time_offset_show(struct sysdev_class *class, |
1660 | struct sysdev_class_attribute *attr, | 1659 | struct sysdev_class_attribute *attr, |
1661 | char *buf) | 1660 | char *buf) |
1662 | { | 1661 | { |
1663 | if (!stp_online || !(stp_info.vbits & 0x0800)) | 1662 | if (!stp_online || !(stp_info.vbits & 0x0800)) |
1664 | return -ENODATA; | 1663 | return -ENODATA; |
1665 | return sprintf(buf, "%i\n", (int) stp_info.tto); | 1664 | return sprintf(buf, "%i\n", (int) stp_info.tto); |
1666 | } | 1665 | } |
1667 | 1666 | ||
1668 | static SYSDEV_CLASS_ATTR(time_offset, 0400, stp_time_offset_show, NULL); | 1667 | static SYSDEV_CLASS_ATTR(time_offset, 0400, stp_time_offset_show, NULL); |
1669 | 1668 | ||
1670 | static ssize_t stp_time_zone_offset_show(struct sysdev_class *class, | 1669 | static ssize_t stp_time_zone_offset_show(struct sysdev_class *class, |
1671 | struct sysdev_class_attribute *attr, | 1670 | struct sysdev_class_attribute *attr, |
1672 | char *buf) | 1671 | char *buf) |
1673 | { | 1672 | { |
1674 | if (!stp_online || !(stp_info.vbits & 0x4000)) | 1673 | if (!stp_online || !(stp_info.vbits & 0x4000)) |
1675 | return -ENODATA; | 1674 | return -ENODATA; |
1676 | return sprintf(buf, "%i\n", (int)(s16) stp_info.tzo); | 1675 | return sprintf(buf, "%i\n", (int)(s16) stp_info.tzo); |
1677 | } | 1676 | } |
1678 | 1677 | ||
1679 | static SYSDEV_CLASS_ATTR(time_zone_offset, 0400, | 1678 | static SYSDEV_CLASS_ATTR(time_zone_offset, 0400, |
1680 | stp_time_zone_offset_show, NULL); | 1679 | stp_time_zone_offset_show, NULL); |
1681 | 1680 | ||
1682 | static ssize_t stp_timing_mode_show(struct sysdev_class *class, | 1681 | static ssize_t stp_timing_mode_show(struct sysdev_class *class, |
1683 | struct sysdev_class_attribute *attr, | 1682 | struct sysdev_class_attribute *attr, |
1684 | char *buf) | 1683 | char *buf) |
1685 | { | 1684 | { |
1686 | if (!stp_online) | 1685 | if (!stp_online) |
1687 | return -ENODATA; | 1686 | return -ENODATA; |
1688 | return sprintf(buf, "%i\n", stp_info.tmd); | 1687 | return sprintf(buf, "%i\n", stp_info.tmd); |
1689 | } | 1688 | } |
1690 | 1689 | ||
1691 | static SYSDEV_CLASS_ATTR(timing_mode, 0400, stp_timing_mode_show, NULL); | 1690 | static SYSDEV_CLASS_ATTR(timing_mode, 0400, stp_timing_mode_show, NULL); |
1692 | 1691 | ||
1693 | static ssize_t stp_timing_state_show(struct sysdev_class *class, | 1692 | static ssize_t stp_timing_state_show(struct sysdev_class *class, |
1694 | struct sysdev_class_attribute *attr, | 1693 | struct sysdev_class_attribute *attr, |
1695 | char *buf) | 1694 | char *buf) |
1696 | { | 1695 | { |
1697 | if (!stp_online) | 1696 | if (!stp_online) |
1698 | return -ENODATA; | 1697 | return -ENODATA; |
1699 | return sprintf(buf, "%i\n", stp_info.tst); | 1698 | return sprintf(buf, "%i\n", stp_info.tst); |
1700 | } | 1699 | } |
1701 | 1700 | ||
1702 | static SYSDEV_CLASS_ATTR(timing_state, 0400, stp_timing_state_show, NULL); | 1701 | static SYSDEV_CLASS_ATTR(timing_state, 0400, stp_timing_state_show, NULL); |
1703 | 1702 | ||
1704 | static ssize_t stp_online_show(struct sysdev_class *class, | 1703 | static ssize_t stp_online_show(struct sysdev_class *class, |
1705 | struct sysdev_class_attribute *attr, | 1704 | struct sysdev_class_attribute *attr, |
1706 | char *buf) | 1705 | char *buf) |
1707 | { | 1706 | { |
1708 | return sprintf(buf, "%i\n", stp_online); | 1707 | return sprintf(buf, "%i\n", stp_online); |
1709 | } | 1708 | } |
1710 | 1709 | ||
1711 | static ssize_t stp_online_store(struct sysdev_class *class, | 1710 | static ssize_t stp_online_store(struct sysdev_class *class, |
1712 | struct sysdev_class_attribute *attr, | 1711 | struct sysdev_class_attribute *attr, |
1713 | const char *buf, size_t count) | 1712 | const char *buf, size_t count) |
1714 | { | 1713 | { |
1715 | unsigned int value; | 1714 | unsigned int value; |
1716 | 1715 | ||
1717 | value = simple_strtoul(buf, NULL, 0); | 1716 | value = simple_strtoul(buf, NULL, 0); |
1718 | if (value != 0 && value != 1) | 1717 | if (value != 0 && value != 1) |
1719 | return -EINVAL; | 1718 | return -EINVAL; |
1720 | if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags)) | 1719 | if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags)) |
1721 | return -EOPNOTSUPP; | 1720 | return -EOPNOTSUPP; |
1722 | mutex_lock(&clock_sync_mutex); | 1721 | mutex_lock(&clock_sync_mutex); |
1723 | stp_online = value; | 1722 | stp_online = value; |
1724 | if (stp_online) | 1723 | if (stp_online) |
1725 | set_bit(CLOCK_SYNC_STP, &clock_sync_flags); | 1724 | set_bit(CLOCK_SYNC_STP, &clock_sync_flags); |
1726 | else | 1725 | else |
1727 | clear_bit(CLOCK_SYNC_STP, &clock_sync_flags); | 1726 | clear_bit(CLOCK_SYNC_STP, &clock_sync_flags); |
1728 | queue_work(time_sync_wq, &stp_work); | 1727 | queue_work(time_sync_wq, &stp_work); |
1729 | mutex_unlock(&clock_sync_mutex); | 1728 | mutex_unlock(&clock_sync_mutex); |
1730 | return count; | 1729 | return count; |
1731 | } | 1730 | } |
1732 | 1731 | ||
1733 | /* | 1732 | /* |
1734 | * Can't use SYSDEV_CLASS_ATTR because the attribute should be named | 1733 | * Can't use SYSDEV_CLASS_ATTR because the attribute should be named |
1735 | * stp/online but attr_online already exists in this file .. | 1734 | * stp/online but attr_online already exists in this file .. |
1736 | */ | 1735 | */ |
1737 | static struct sysdev_class_attribute attr_stp_online = { | 1736 | static struct sysdev_class_attribute attr_stp_online = { |
1738 | .attr = { .name = "online", .mode = 0600 }, | 1737 | .attr = { .name = "online", .mode = 0600 }, |
1739 | .show = stp_online_show, | 1738 | .show = stp_online_show, |
1740 | .store = stp_online_store, | 1739 | .store = stp_online_store, |
1741 | }; | 1740 | }; |
1742 | 1741 | ||
1743 | static struct sysdev_class_attribute *stp_attributes[] = { | 1742 | static struct sysdev_class_attribute *stp_attributes[] = { |
1744 | &attr_ctn_id, | 1743 | &attr_ctn_id, |
1745 | &attr_ctn_type, | 1744 | &attr_ctn_type, |
1746 | &attr_dst_offset, | 1745 | &attr_dst_offset, |
1747 | &attr_leap_seconds, | 1746 | &attr_leap_seconds, |
1748 | &attr_stp_online, | 1747 | &attr_stp_online, |
1749 | &attr_stratum, | 1748 | &attr_stratum, |
1750 | &attr_time_offset, | 1749 | &attr_time_offset, |
1751 | &attr_time_zone_offset, | 1750 | &attr_time_zone_offset, |
1752 | &attr_timing_mode, | 1751 | &attr_timing_mode, |
1753 | &attr_timing_state, | 1752 | &attr_timing_state, |
1754 | NULL | 1753 | NULL |
1755 | }; | 1754 | }; |
1756 | 1755 | ||
1757 | static int __init stp_init_sysfs(void) | 1756 | static int __init stp_init_sysfs(void) |
1758 | { | 1757 | { |
1759 | struct sysdev_class_attribute **attr; | 1758 | struct sysdev_class_attribute **attr; |
1760 | int rc; | 1759 | int rc; |
1761 | 1760 | ||
1762 | rc = sysdev_class_register(&stp_sysclass); | 1761 | rc = sysdev_class_register(&stp_sysclass); |
1763 | if (rc) | 1762 | if (rc) |
1764 | goto out; | 1763 | goto out; |
1765 | for (attr = stp_attributes; *attr; attr++) { | 1764 | for (attr = stp_attributes; *attr; attr++) { |
1766 | rc = sysdev_class_create_file(&stp_sysclass, *attr); | 1765 | rc = sysdev_class_create_file(&stp_sysclass, *attr); |
1767 | if (rc) | 1766 | if (rc) |
1768 | goto out_unreg; | 1767 | goto out_unreg; |
1769 | } | 1768 | } |
1770 | return 0; | 1769 | return 0; |
1771 | out_unreg: | 1770 | out_unreg: |
1772 | for (; attr >= stp_attributes; attr--) | 1771 | for (; attr >= stp_attributes; attr--) |
1773 | sysdev_class_remove_file(&stp_sysclass, *attr); | 1772 | sysdev_class_remove_file(&stp_sysclass, *attr); |
1774 | sysdev_class_unregister(&stp_sysclass); | 1773 | sysdev_class_unregister(&stp_sysclass); |
1775 | out: | 1774 | out: |
1776 | return rc; | 1775 | return rc; |
1777 | } | 1776 | } |
1778 | 1777 | ||
1779 | device_initcall(stp_init_sysfs); | 1778 | device_initcall(stp_init_sysfs); |
1780 | 1779 |
arch/s390/kernel/topology.c
1 | /* | 1 | /* |
2 | * Copyright IBM Corp. 2007 | 2 | * Copyright IBM Corp. 2007 |
3 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> | 3 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #define KMSG_COMPONENT "cpu" | 6 | #define KMSG_COMPONENT "cpu" |
7 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | 7 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
8 | 8 | ||
9 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
10 | #include <linux/mm.h> | 10 | #include <linux/mm.h> |
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/device.h> | 12 | #include <linux/device.h> |
13 | #include <linux/bootmem.h> | 13 | #include <linux/bootmem.h> |
14 | #include <linux/sched.h> | 14 | #include <linux/sched.h> |
15 | #include <linux/workqueue.h> | 15 | #include <linux/workqueue.h> |
16 | #include <linux/cpu.h> | 16 | #include <linux/cpu.h> |
17 | #include <linux/smp.h> | 17 | #include <linux/smp.h> |
18 | #include <linux/cpuset.h> | 18 | #include <linux/cpuset.h> |
19 | #include <asm/delay.h> | 19 | #include <asm/delay.h> |
20 | #include <asm/s390_ext.h> | ||
21 | 20 | ||
22 | #define PTF_HORIZONTAL (0UL) | 21 | #define PTF_HORIZONTAL (0UL) |
23 | #define PTF_VERTICAL (1UL) | 22 | #define PTF_VERTICAL (1UL) |
24 | #define PTF_CHECK (2UL) | 23 | #define PTF_CHECK (2UL) |
25 | 24 | ||
26 | struct mask_info { | 25 | struct mask_info { |
27 | struct mask_info *next; | 26 | struct mask_info *next; |
28 | unsigned char id; | 27 | unsigned char id; |
29 | cpumask_t mask; | 28 | cpumask_t mask; |
30 | }; | 29 | }; |
31 | 30 | ||
32 | static int topology_enabled = 1; | 31 | static int topology_enabled = 1; |
33 | static void topology_work_fn(struct work_struct *work); | 32 | static void topology_work_fn(struct work_struct *work); |
34 | static struct sysinfo_15_1_x *tl_info; | 33 | static struct sysinfo_15_1_x *tl_info; |
35 | static struct timer_list topology_timer; | 34 | static struct timer_list topology_timer; |
36 | static void set_topology_timer(void); | 35 | static void set_topology_timer(void); |
37 | static DECLARE_WORK(topology_work, topology_work_fn); | 36 | static DECLARE_WORK(topology_work, topology_work_fn); |
38 | /* topology_lock protects the core linked list */ | 37 | /* topology_lock protects the core linked list */ |
39 | static DEFINE_SPINLOCK(topology_lock); | 38 | static DEFINE_SPINLOCK(topology_lock); |
40 | 39 | ||
41 | static struct mask_info core_info; | 40 | static struct mask_info core_info; |
42 | cpumask_t cpu_core_map[NR_CPUS]; | 41 | cpumask_t cpu_core_map[NR_CPUS]; |
43 | unsigned char cpu_core_id[NR_CPUS]; | 42 | unsigned char cpu_core_id[NR_CPUS]; |
44 | 43 | ||
45 | #ifdef CONFIG_SCHED_BOOK | 44 | #ifdef CONFIG_SCHED_BOOK |
46 | static struct mask_info book_info; | 45 | static struct mask_info book_info; |
47 | cpumask_t cpu_book_map[NR_CPUS]; | 46 | cpumask_t cpu_book_map[NR_CPUS]; |
48 | unsigned char cpu_book_id[NR_CPUS]; | 47 | unsigned char cpu_book_id[NR_CPUS]; |
49 | #endif | 48 | #endif |
50 | 49 | ||
51 | static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) | 50 | static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) |
52 | { | 51 | { |
53 | cpumask_t mask; | 52 | cpumask_t mask; |
54 | 53 | ||
55 | cpumask_clear(&mask); | 54 | cpumask_clear(&mask); |
56 | if (!topology_enabled || !MACHINE_HAS_TOPOLOGY) { | 55 | if (!topology_enabled || !MACHINE_HAS_TOPOLOGY) { |
57 | cpumask_copy(&mask, cpumask_of(cpu)); | 56 | cpumask_copy(&mask, cpumask_of(cpu)); |
58 | return mask; | 57 | return mask; |
59 | } | 58 | } |
60 | while (info) { | 59 | while (info) { |
61 | if (cpumask_test_cpu(cpu, &info->mask)) { | 60 | if (cpumask_test_cpu(cpu, &info->mask)) { |
62 | mask = info->mask; | 61 | mask = info->mask; |
63 | break; | 62 | break; |
64 | } | 63 | } |
65 | info = info->next; | 64 | info = info->next; |
66 | } | 65 | } |
67 | if (cpumask_empty(&mask)) | 66 | if (cpumask_empty(&mask)) |
68 | cpumask_copy(&mask, cpumask_of(cpu)); | 67 | cpumask_copy(&mask, cpumask_of(cpu)); |
69 | return mask; | 68 | return mask; |
70 | } | 69 | } |
71 | 70 | ||
72 | static void add_cpus_to_mask(struct topology_cpu *tl_cpu, | 71 | static void add_cpus_to_mask(struct topology_cpu *tl_cpu, |
73 | struct mask_info *book, struct mask_info *core) | 72 | struct mask_info *book, struct mask_info *core) |
74 | { | 73 | { |
75 | unsigned int cpu; | 74 | unsigned int cpu; |
76 | 75 | ||
77 | for (cpu = find_first_bit(&tl_cpu->mask[0], TOPOLOGY_CPU_BITS); | 76 | for (cpu = find_first_bit(&tl_cpu->mask[0], TOPOLOGY_CPU_BITS); |
78 | cpu < TOPOLOGY_CPU_BITS; | 77 | cpu < TOPOLOGY_CPU_BITS; |
79 | cpu = find_next_bit(&tl_cpu->mask[0], TOPOLOGY_CPU_BITS, cpu + 1)) | 78 | cpu = find_next_bit(&tl_cpu->mask[0], TOPOLOGY_CPU_BITS, cpu + 1)) |
80 | { | 79 | { |
81 | unsigned int rcpu, lcpu; | 80 | unsigned int rcpu, lcpu; |
82 | 81 | ||
83 | rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin; | 82 | rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin; |
84 | for_each_present_cpu(lcpu) { | 83 | for_each_present_cpu(lcpu) { |
85 | if (cpu_logical_map(lcpu) != rcpu) | 84 | if (cpu_logical_map(lcpu) != rcpu) |
86 | continue; | 85 | continue; |
87 | #ifdef CONFIG_SCHED_BOOK | 86 | #ifdef CONFIG_SCHED_BOOK |
88 | cpumask_set_cpu(lcpu, &book->mask); | 87 | cpumask_set_cpu(lcpu, &book->mask); |
89 | cpu_book_id[lcpu] = book->id; | 88 | cpu_book_id[lcpu] = book->id; |
90 | #endif | 89 | #endif |
91 | cpumask_set_cpu(lcpu, &core->mask); | 90 | cpumask_set_cpu(lcpu, &core->mask); |
92 | cpu_core_id[lcpu] = core->id; | 91 | cpu_core_id[lcpu] = core->id; |
93 | smp_cpu_polarization[lcpu] = tl_cpu->pp; | 92 | smp_cpu_polarization[lcpu] = tl_cpu->pp; |
94 | } | 93 | } |
95 | } | 94 | } |
96 | } | 95 | } |
97 | 96 | ||
98 | static void clear_masks(void) | 97 | static void clear_masks(void) |
99 | { | 98 | { |
100 | struct mask_info *info; | 99 | struct mask_info *info; |
101 | 100 | ||
102 | info = &core_info; | 101 | info = &core_info; |
103 | while (info) { | 102 | while (info) { |
104 | cpumask_clear(&info->mask); | 103 | cpumask_clear(&info->mask); |
105 | info = info->next; | 104 | info = info->next; |
106 | } | 105 | } |
107 | #ifdef CONFIG_SCHED_BOOK | 106 | #ifdef CONFIG_SCHED_BOOK |
108 | info = &book_info; | 107 | info = &book_info; |
109 | while (info) { | 108 | while (info) { |
110 | cpumask_clear(&info->mask); | 109 | cpumask_clear(&info->mask); |
111 | info = info->next; | 110 | info = info->next; |
112 | } | 111 | } |
113 | #endif | 112 | #endif |
114 | } | 113 | } |
115 | 114 | ||
116 | static union topology_entry *next_tle(union topology_entry *tle) | 115 | static union topology_entry *next_tle(union topology_entry *tle) |
117 | { | 116 | { |
118 | if (!tle->nl) | 117 | if (!tle->nl) |
119 | return (union topology_entry *)((struct topology_cpu *)tle + 1); | 118 | return (union topology_entry *)((struct topology_cpu *)tle + 1); |
120 | return (union topology_entry *)((struct topology_container *)tle + 1); | 119 | return (union topology_entry *)((struct topology_container *)tle + 1); |
121 | } | 120 | } |
122 | 121 | ||
123 | static void tl_to_cores(struct sysinfo_15_1_x *info) | 122 | static void tl_to_cores(struct sysinfo_15_1_x *info) |
124 | { | 123 | { |
125 | #ifdef CONFIG_SCHED_BOOK | 124 | #ifdef CONFIG_SCHED_BOOK |
126 | struct mask_info *book = &book_info; | 125 | struct mask_info *book = &book_info; |
127 | #else | 126 | #else |
128 | struct mask_info *book = NULL; | 127 | struct mask_info *book = NULL; |
129 | #endif | 128 | #endif |
130 | struct mask_info *core = &core_info; | 129 | struct mask_info *core = &core_info; |
131 | union topology_entry *tle, *end; | 130 | union topology_entry *tle, *end; |
132 | 131 | ||
133 | 132 | ||
134 | spin_lock_irq(&topology_lock); | 133 | spin_lock_irq(&topology_lock); |
135 | clear_masks(); | 134 | clear_masks(); |
136 | tle = info->tle; | 135 | tle = info->tle; |
137 | end = (union topology_entry *)((unsigned long)info + info->length); | 136 | end = (union topology_entry *)((unsigned long)info + info->length); |
138 | while (tle < end) { | 137 | while (tle < end) { |
139 | switch (tle->nl) { | 138 | switch (tle->nl) { |
140 | #ifdef CONFIG_SCHED_BOOK | 139 | #ifdef CONFIG_SCHED_BOOK |
141 | case 2: | 140 | case 2: |
142 | book = book->next; | 141 | book = book->next; |
143 | book->id = tle->container.id; | 142 | book->id = tle->container.id; |
144 | break; | 143 | break; |
145 | #endif | 144 | #endif |
146 | case 1: | 145 | case 1: |
147 | core = core->next; | 146 | core = core->next; |
148 | core->id = tle->container.id; | 147 | core->id = tle->container.id; |
149 | break; | 148 | break; |
150 | case 0: | 149 | case 0: |
151 | add_cpus_to_mask(&tle->cpu, book, core); | 150 | add_cpus_to_mask(&tle->cpu, book, core); |
152 | break; | 151 | break; |
153 | default: | 152 | default: |
154 | clear_masks(); | 153 | clear_masks(); |
155 | goto out; | 154 | goto out; |
156 | } | 155 | } |
157 | tle = next_tle(tle); | 156 | tle = next_tle(tle); |
158 | } | 157 | } |
159 | out: | 158 | out: |
160 | spin_unlock_irq(&topology_lock); | 159 | spin_unlock_irq(&topology_lock); |
161 | } | 160 | } |
162 | 161 | ||
163 | static void topology_update_polarization_simple(void) | 162 | static void topology_update_polarization_simple(void) |
164 | { | 163 | { |
165 | int cpu; | 164 | int cpu; |
166 | 165 | ||
167 | mutex_lock(&smp_cpu_state_mutex); | 166 | mutex_lock(&smp_cpu_state_mutex); |
168 | for_each_possible_cpu(cpu) | 167 | for_each_possible_cpu(cpu) |
169 | smp_cpu_polarization[cpu] = POLARIZATION_HRZ; | 168 | smp_cpu_polarization[cpu] = POLARIZATION_HRZ; |
170 | mutex_unlock(&smp_cpu_state_mutex); | 169 | mutex_unlock(&smp_cpu_state_mutex); |
171 | } | 170 | } |
172 | 171 | ||
173 | static int ptf(unsigned long fc) | 172 | static int ptf(unsigned long fc) |
174 | { | 173 | { |
175 | int rc; | 174 | int rc; |
176 | 175 | ||
177 | asm volatile( | 176 | asm volatile( |
178 | " .insn rre,0xb9a20000,%1,%1\n" | 177 | " .insn rre,0xb9a20000,%1,%1\n" |
179 | " ipm %0\n" | 178 | " ipm %0\n" |
180 | " srl %0,28\n" | 179 | " srl %0,28\n" |
181 | : "=d" (rc) | 180 | : "=d" (rc) |
182 | : "d" (fc) : "cc"); | 181 | : "d" (fc) : "cc"); |
183 | return rc; | 182 | return rc; |
184 | } | 183 | } |
185 | 184 | ||
186 | int topology_set_cpu_management(int fc) | 185 | int topology_set_cpu_management(int fc) |
187 | { | 186 | { |
188 | int cpu; | 187 | int cpu; |
189 | int rc; | 188 | int rc; |
190 | 189 | ||
191 | if (!MACHINE_HAS_TOPOLOGY) | 190 | if (!MACHINE_HAS_TOPOLOGY) |
192 | return -EOPNOTSUPP; | 191 | return -EOPNOTSUPP; |
193 | if (fc) | 192 | if (fc) |
194 | rc = ptf(PTF_VERTICAL); | 193 | rc = ptf(PTF_VERTICAL); |
195 | else | 194 | else |
196 | rc = ptf(PTF_HORIZONTAL); | 195 | rc = ptf(PTF_HORIZONTAL); |
197 | if (rc) | 196 | if (rc) |
198 | return -EBUSY; | 197 | return -EBUSY; |
199 | for_each_possible_cpu(cpu) | 198 | for_each_possible_cpu(cpu) |
200 | smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN; | 199 | smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN; |
201 | return rc; | 200 | return rc; |
202 | } | 201 | } |
203 | 202 | ||
204 | static void update_cpu_core_map(void) | 203 | static void update_cpu_core_map(void) |
205 | { | 204 | { |
206 | unsigned long flags; | 205 | unsigned long flags; |
207 | int cpu; | 206 | int cpu; |
208 | 207 | ||
209 | spin_lock_irqsave(&topology_lock, flags); | 208 | spin_lock_irqsave(&topology_lock, flags); |
210 | for_each_possible_cpu(cpu) { | 209 | for_each_possible_cpu(cpu) { |
211 | cpu_core_map[cpu] = cpu_group_map(&core_info, cpu); | 210 | cpu_core_map[cpu] = cpu_group_map(&core_info, cpu); |
212 | #ifdef CONFIG_SCHED_BOOK | 211 | #ifdef CONFIG_SCHED_BOOK |
213 | cpu_book_map[cpu] = cpu_group_map(&book_info, cpu); | 212 | cpu_book_map[cpu] = cpu_group_map(&book_info, cpu); |
214 | #endif | 213 | #endif |
215 | } | 214 | } |
216 | spin_unlock_irqrestore(&topology_lock, flags); | 215 | spin_unlock_irqrestore(&topology_lock, flags); |
217 | } | 216 | } |
218 | 217 | ||
219 | void store_topology(struct sysinfo_15_1_x *info) | 218 | void store_topology(struct sysinfo_15_1_x *info) |
220 | { | 219 | { |
221 | #ifdef CONFIG_SCHED_BOOK | 220 | #ifdef CONFIG_SCHED_BOOK |
222 | int rc; | 221 | int rc; |
223 | 222 | ||
224 | rc = stsi(info, 15, 1, 3); | 223 | rc = stsi(info, 15, 1, 3); |
225 | if (rc != -ENOSYS) | 224 | if (rc != -ENOSYS) |
226 | return; | 225 | return; |
227 | #endif | 226 | #endif |
228 | stsi(info, 15, 1, 2); | 227 | stsi(info, 15, 1, 2); |
229 | } | 228 | } |
230 | 229 | ||
231 | int arch_update_cpu_topology(void) | 230 | int arch_update_cpu_topology(void) |
232 | { | 231 | { |
233 | struct sysinfo_15_1_x *info = tl_info; | 232 | struct sysinfo_15_1_x *info = tl_info; |
234 | struct sys_device *sysdev; | 233 | struct sys_device *sysdev; |
235 | int cpu; | 234 | int cpu; |
236 | 235 | ||
237 | if (!MACHINE_HAS_TOPOLOGY) { | 236 | if (!MACHINE_HAS_TOPOLOGY) { |
238 | update_cpu_core_map(); | 237 | update_cpu_core_map(); |
239 | topology_update_polarization_simple(); | 238 | topology_update_polarization_simple(); |
240 | return 0; | 239 | return 0; |
241 | } | 240 | } |
242 | store_topology(info); | 241 | store_topology(info); |
243 | tl_to_cores(info); | 242 | tl_to_cores(info); |
244 | update_cpu_core_map(); | 243 | update_cpu_core_map(); |
245 | for_each_online_cpu(cpu) { | 244 | for_each_online_cpu(cpu) { |
246 | sysdev = get_cpu_sysdev(cpu); | 245 | sysdev = get_cpu_sysdev(cpu); |
247 | kobject_uevent(&sysdev->kobj, KOBJ_CHANGE); | 246 | kobject_uevent(&sysdev->kobj, KOBJ_CHANGE); |
248 | } | 247 | } |
249 | return 1; | 248 | return 1; |
250 | } | 249 | } |
251 | 250 | ||
252 | static void topology_work_fn(struct work_struct *work) | 251 | static void topology_work_fn(struct work_struct *work) |
253 | { | 252 | { |
254 | rebuild_sched_domains(); | 253 | rebuild_sched_domains(); |
255 | } | 254 | } |
256 | 255 | ||
257 | void topology_schedule_update(void) | 256 | void topology_schedule_update(void) |
258 | { | 257 | { |
259 | schedule_work(&topology_work); | 258 | schedule_work(&topology_work); |
260 | } | 259 | } |
261 | 260 | ||
262 | static void topology_timer_fn(unsigned long ignored) | 261 | static void topology_timer_fn(unsigned long ignored) |
263 | { | 262 | { |
264 | if (ptf(PTF_CHECK)) | 263 | if (ptf(PTF_CHECK)) |
265 | topology_schedule_update(); | 264 | topology_schedule_update(); |
266 | set_topology_timer(); | 265 | set_topology_timer(); |
267 | } | 266 | } |
268 | 267 | ||
269 | static void set_topology_timer(void) | 268 | static void set_topology_timer(void) |
270 | { | 269 | { |
271 | topology_timer.function = topology_timer_fn; | 270 | topology_timer.function = topology_timer_fn; |
272 | topology_timer.data = 0; | 271 | topology_timer.data = 0; |
273 | topology_timer.expires = jiffies + 60 * HZ; | 272 | topology_timer.expires = jiffies + 60 * HZ; |
274 | add_timer(&topology_timer); | 273 | add_timer(&topology_timer); |
275 | } | 274 | } |
276 | 275 | ||
277 | static int __init early_parse_topology(char *p) | 276 | static int __init early_parse_topology(char *p) |
278 | { | 277 | { |
279 | if (strncmp(p, "off", 3)) | 278 | if (strncmp(p, "off", 3)) |
280 | return 0; | 279 | return 0; |
281 | topology_enabled = 0; | 280 | topology_enabled = 0; |
282 | return 0; | 281 | return 0; |
283 | } | 282 | } |
284 | early_param("topology", early_parse_topology); | 283 | early_param("topology", early_parse_topology); |
285 | 284 | ||
286 | static int __init init_topology_update(void) | 285 | static int __init init_topology_update(void) |
287 | { | 286 | { |
288 | int rc; | 287 | int rc; |
289 | 288 | ||
290 | rc = 0; | 289 | rc = 0; |
291 | if (!MACHINE_HAS_TOPOLOGY) { | 290 | if (!MACHINE_HAS_TOPOLOGY) { |
292 | topology_update_polarization_simple(); | 291 | topology_update_polarization_simple(); |
293 | goto out; | 292 | goto out; |
294 | } | 293 | } |
295 | init_timer_deferrable(&topology_timer); | 294 | init_timer_deferrable(&topology_timer); |
296 | set_topology_timer(); | 295 | set_topology_timer(); |
297 | out: | 296 | out: |
298 | update_cpu_core_map(); | 297 | update_cpu_core_map(); |
299 | return rc; | 298 | return rc; |
300 | } | 299 | } |
301 | __initcall(init_topology_update); | 300 | __initcall(init_topology_update); |
302 | 301 | ||
303 | static void alloc_masks(struct sysinfo_15_1_x *info, struct mask_info *mask, | 302 | static void alloc_masks(struct sysinfo_15_1_x *info, struct mask_info *mask, |
304 | int offset) | 303 | int offset) |
305 | { | 304 | { |
306 | int i, nr_masks; | 305 | int i, nr_masks; |
307 | 306 | ||
308 | nr_masks = info->mag[TOPOLOGY_NR_MAG - offset]; | 307 | nr_masks = info->mag[TOPOLOGY_NR_MAG - offset]; |
309 | for (i = 0; i < info->mnest - offset; i++) | 308 | for (i = 0; i < info->mnest - offset; i++) |
310 | nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i]; | 309 | nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i]; |
311 | nr_masks = max(nr_masks, 1); | 310 | nr_masks = max(nr_masks, 1); |
312 | for (i = 0; i < nr_masks; i++) { | 311 | for (i = 0; i < nr_masks; i++) { |
313 | mask->next = alloc_bootmem(sizeof(struct mask_info)); | 312 | mask->next = alloc_bootmem(sizeof(struct mask_info)); |
314 | mask = mask->next; | 313 | mask = mask->next; |
315 | } | 314 | } |
316 | } | 315 | } |
317 | 316 | ||
318 | void __init s390_init_cpu_topology(void) | 317 | void __init s390_init_cpu_topology(void) |
319 | { | 318 | { |
320 | struct sysinfo_15_1_x *info; | 319 | struct sysinfo_15_1_x *info; |
321 | int i; | 320 | int i; |
322 | 321 | ||
323 | if (!MACHINE_HAS_TOPOLOGY) | 322 | if (!MACHINE_HAS_TOPOLOGY) |
324 | return; | 323 | return; |
325 | tl_info = alloc_bootmem_pages(PAGE_SIZE); | 324 | tl_info = alloc_bootmem_pages(PAGE_SIZE); |
326 | info = tl_info; | 325 | info = tl_info; |
327 | store_topology(info); | 326 | store_topology(info); |
328 | pr_info("The CPU configuration topology of the machine is:"); | 327 | pr_info("The CPU configuration topology of the machine is:"); |
329 | for (i = 0; i < TOPOLOGY_NR_MAG; i++) | 328 | for (i = 0; i < TOPOLOGY_NR_MAG; i++) |
330 | printk(" %d", info->mag[i]); | 329 | printk(" %d", info->mag[i]); |
331 | printk(" / %d\n", info->mnest); | 330 | printk(" / %d\n", info->mnest); |
332 | alloc_masks(info, &core_info, 2); | 331 | alloc_masks(info, &core_info, 2); |
333 | #ifdef CONFIG_SCHED_BOOK | 332 | #ifdef CONFIG_SCHED_BOOK |
334 | alloc_masks(info, &book_info, 3); | 333 | alloc_masks(info, &book_info, 3); |
335 | #endif | 334 | #endif |
336 | } | 335 | } |
337 | 336 |
arch/s390/kernel/traps.c
1 | /* | 1 | /* |
2 | * arch/s390/kernel/traps.c | 2 | * arch/s390/kernel/traps.c |
3 | * | 3 | * |
4 | * S390 version | 4 | * S390 version |
5 | * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation | 5 | * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation |
6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), | 6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), |
7 | * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), | 7 | * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), |
8 | * | 8 | * |
9 | * Derived from "arch/i386/kernel/traps.c" | 9 | * Derived from "arch/i386/kernel/traps.c" |
10 | * Copyright (C) 1991, 1992 Linus Torvalds | 10 | * Copyright (C) 1991, 1992 Linus Torvalds |
11 | */ | 11 | */ |
12 | 12 | ||
13 | /* | 13 | /* |
14 | * 'Traps.c' handles hardware traps and faults after we have saved some | 14 | * 'Traps.c' handles hardware traps and faults after we have saved some |
15 | * state in 'asm.s'. | 15 | * state in 'asm.s'. |
16 | */ | 16 | */ |
17 | #include <linux/sched.h> | 17 | #include <linux/sched.h> |
18 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
19 | #include <linux/string.h> | 19 | #include <linux/string.h> |
20 | #include <linux/errno.h> | 20 | #include <linux/errno.h> |
21 | #include <linux/tracehook.h> | 21 | #include <linux/tracehook.h> |
22 | #include <linux/timer.h> | 22 | #include <linux/timer.h> |
23 | #include <linux/mm.h> | 23 | #include <linux/mm.h> |
24 | #include <linux/smp.h> | 24 | #include <linux/smp.h> |
25 | #include <linux/init.h> | 25 | #include <linux/init.h> |
26 | #include <linux/interrupt.h> | 26 | #include <linux/interrupt.h> |
27 | #include <linux/seq_file.h> | 27 | #include <linux/seq_file.h> |
28 | #include <linux/delay.h> | 28 | #include <linux/delay.h> |
29 | #include <linux/module.h> | 29 | #include <linux/module.h> |
30 | #include <linux/kdebug.h> | 30 | #include <linux/kdebug.h> |
31 | #include <linux/kallsyms.h> | 31 | #include <linux/kallsyms.h> |
32 | #include <linux/reboot.h> | 32 | #include <linux/reboot.h> |
33 | #include <linux/kprobes.h> | 33 | #include <linux/kprobes.h> |
34 | #include <linux/bug.h> | 34 | #include <linux/bug.h> |
35 | #include <linux/utsname.h> | 35 | #include <linux/utsname.h> |
36 | #include <asm/system.h> | 36 | #include <asm/system.h> |
37 | #include <asm/uaccess.h> | 37 | #include <asm/uaccess.h> |
38 | #include <asm/io.h> | 38 | #include <asm/io.h> |
39 | #include <asm/atomic.h> | 39 | #include <asm/atomic.h> |
40 | #include <asm/mathemu.h> | 40 | #include <asm/mathemu.h> |
41 | #include <asm/cpcmd.h> | 41 | #include <asm/cpcmd.h> |
42 | #include <asm/s390_ext.h> | ||
43 | #include <asm/lowcore.h> | 42 | #include <asm/lowcore.h> |
44 | #include <asm/debug.h> | 43 | #include <asm/debug.h> |
45 | #include "entry.h" | 44 | #include "entry.h" |
46 | 45 | ||
47 | pgm_check_handler_t *pgm_check_table[128]; | 46 | pgm_check_handler_t *pgm_check_table[128]; |
48 | 47 | ||
49 | int show_unhandled_signals; | 48 | int show_unhandled_signals; |
50 | 49 | ||
51 | extern pgm_check_handler_t do_protection_exception; | 50 | extern pgm_check_handler_t do_protection_exception; |
52 | extern pgm_check_handler_t do_dat_exception; | 51 | extern pgm_check_handler_t do_dat_exception; |
53 | extern pgm_check_handler_t do_asce_exception; | 52 | extern pgm_check_handler_t do_asce_exception; |
54 | 53 | ||
55 | #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; }) | 54 | #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; }) |
56 | 55 | ||
57 | #ifndef CONFIG_64BIT | 56 | #ifndef CONFIG_64BIT |
58 | #define LONG "%08lx " | 57 | #define LONG "%08lx " |
59 | #define FOURLONG "%08lx %08lx %08lx %08lx\n" | 58 | #define FOURLONG "%08lx %08lx %08lx %08lx\n" |
60 | static int kstack_depth_to_print = 12; | 59 | static int kstack_depth_to_print = 12; |
61 | #else /* CONFIG_64BIT */ | 60 | #else /* CONFIG_64BIT */ |
62 | #define LONG "%016lx " | 61 | #define LONG "%016lx " |
63 | #define FOURLONG "%016lx %016lx %016lx %016lx\n" | 62 | #define FOURLONG "%016lx %016lx %016lx %016lx\n" |
64 | static int kstack_depth_to_print = 20; | 63 | static int kstack_depth_to_print = 20; |
65 | #endif /* CONFIG_64BIT */ | 64 | #endif /* CONFIG_64BIT */ |
66 | 65 | ||
67 | /* | 66 | /* |
68 | * For show_trace we have tree different stack to consider: | 67 | * For show_trace we have tree different stack to consider: |
69 | * - the panic stack which is used if the kernel stack has overflown | 68 | * - the panic stack which is used if the kernel stack has overflown |
70 | * - the asynchronous interrupt stack (cpu related) | 69 | * - the asynchronous interrupt stack (cpu related) |
71 | * - the synchronous kernel stack (process related) | 70 | * - the synchronous kernel stack (process related) |
72 | * The stack trace can start at any of the three stack and can potentially | 71 | * The stack trace can start at any of the three stack and can potentially |
73 | * touch all of them. The order is: panic stack, async stack, sync stack. | 72 | * touch all of them. The order is: panic stack, async stack, sync stack. |
74 | */ | 73 | */ |
75 | static unsigned long | 74 | static unsigned long |
76 | __show_trace(unsigned long sp, unsigned long low, unsigned long high) | 75 | __show_trace(unsigned long sp, unsigned long low, unsigned long high) |
77 | { | 76 | { |
78 | struct stack_frame *sf; | 77 | struct stack_frame *sf; |
79 | struct pt_regs *regs; | 78 | struct pt_regs *regs; |
80 | 79 | ||
81 | while (1) { | 80 | while (1) { |
82 | sp = sp & PSW_ADDR_INSN; | 81 | sp = sp & PSW_ADDR_INSN; |
83 | if (sp < low || sp > high - sizeof(*sf)) | 82 | if (sp < low || sp > high - sizeof(*sf)) |
84 | return sp; | 83 | return sp; |
85 | sf = (struct stack_frame *) sp; | 84 | sf = (struct stack_frame *) sp; |
86 | printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN); | 85 | printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN); |
87 | print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN); | 86 | print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN); |
88 | /* Follow the backchain. */ | 87 | /* Follow the backchain. */ |
89 | while (1) { | 88 | while (1) { |
90 | low = sp; | 89 | low = sp; |
91 | sp = sf->back_chain & PSW_ADDR_INSN; | 90 | sp = sf->back_chain & PSW_ADDR_INSN; |
92 | if (!sp) | 91 | if (!sp) |
93 | break; | 92 | break; |
94 | if (sp <= low || sp > high - sizeof(*sf)) | 93 | if (sp <= low || sp > high - sizeof(*sf)) |
95 | return sp; | 94 | return sp; |
96 | sf = (struct stack_frame *) sp; | 95 | sf = (struct stack_frame *) sp; |
97 | printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN); | 96 | printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN); |
98 | print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN); | 97 | print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN); |
99 | } | 98 | } |
100 | /* Zero backchain detected, check for interrupt frame. */ | 99 | /* Zero backchain detected, check for interrupt frame. */ |
101 | sp = (unsigned long) (sf + 1); | 100 | sp = (unsigned long) (sf + 1); |
102 | if (sp <= low || sp > high - sizeof(*regs)) | 101 | if (sp <= low || sp > high - sizeof(*regs)) |
103 | return sp; | 102 | return sp; |
104 | regs = (struct pt_regs *) sp; | 103 | regs = (struct pt_regs *) sp; |
105 | printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN); | 104 | printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN); |
106 | print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN); | 105 | print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN); |
107 | low = sp; | 106 | low = sp; |
108 | sp = regs->gprs[15]; | 107 | sp = regs->gprs[15]; |
109 | } | 108 | } |
110 | } | 109 | } |
111 | 110 | ||
112 | static void show_trace(struct task_struct *task, unsigned long *stack) | 111 | static void show_trace(struct task_struct *task, unsigned long *stack) |
113 | { | 112 | { |
114 | register unsigned long __r15 asm ("15"); | 113 | register unsigned long __r15 asm ("15"); |
115 | unsigned long sp; | 114 | unsigned long sp; |
116 | 115 | ||
117 | sp = (unsigned long) stack; | 116 | sp = (unsigned long) stack; |
118 | if (!sp) | 117 | if (!sp) |
119 | sp = task ? task->thread.ksp : __r15; | 118 | sp = task ? task->thread.ksp : __r15; |
120 | printk("Call Trace:\n"); | 119 | printk("Call Trace:\n"); |
121 | #ifdef CONFIG_CHECK_STACK | 120 | #ifdef CONFIG_CHECK_STACK |
122 | sp = __show_trace(sp, S390_lowcore.panic_stack - 4096, | 121 | sp = __show_trace(sp, S390_lowcore.panic_stack - 4096, |
123 | S390_lowcore.panic_stack); | 122 | S390_lowcore.panic_stack); |
124 | #endif | 123 | #endif |
125 | sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE, | 124 | sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE, |
126 | S390_lowcore.async_stack); | 125 | S390_lowcore.async_stack); |
127 | if (task) | 126 | if (task) |
128 | __show_trace(sp, (unsigned long) task_stack_page(task), | 127 | __show_trace(sp, (unsigned long) task_stack_page(task), |
129 | (unsigned long) task_stack_page(task) + THREAD_SIZE); | 128 | (unsigned long) task_stack_page(task) + THREAD_SIZE); |
130 | else | 129 | else |
131 | __show_trace(sp, S390_lowcore.thread_info, | 130 | __show_trace(sp, S390_lowcore.thread_info, |
132 | S390_lowcore.thread_info + THREAD_SIZE); | 131 | S390_lowcore.thread_info + THREAD_SIZE); |
133 | if (!task) | 132 | if (!task) |
134 | task = current; | 133 | task = current; |
135 | debug_show_held_locks(task); | 134 | debug_show_held_locks(task); |
136 | } | 135 | } |
137 | 136 | ||
138 | void show_stack(struct task_struct *task, unsigned long *sp) | 137 | void show_stack(struct task_struct *task, unsigned long *sp) |
139 | { | 138 | { |
140 | register unsigned long * __r15 asm ("15"); | 139 | register unsigned long * __r15 asm ("15"); |
141 | unsigned long *stack; | 140 | unsigned long *stack; |
142 | int i; | 141 | int i; |
143 | 142 | ||
144 | if (!sp) | 143 | if (!sp) |
145 | stack = task ? (unsigned long *) task->thread.ksp : __r15; | 144 | stack = task ? (unsigned long *) task->thread.ksp : __r15; |
146 | else | 145 | else |
147 | stack = sp; | 146 | stack = sp; |
148 | 147 | ||
149 | for (i = 0; i < kstack_depth_to_print; i++) { | 148 | for (i = 0; i < kstack_depth_to_print; i++) { |
150 | if (((addr_t) stack & (THREAD_SIZE-1)) == 0) | 149 | if (((addr_t) stack & (THREAD_SIZE-1)) == 0) |
151 | break; | 150 | break; |
152 | if (i && ((i * sizeof (long) % 32) == 0)) | 151 | if (i && ((i * sizeof (long) % 32) == 0)) |
153 | printk("\n "); | 152 | printk("\n "); |
154 | printk(LONG, *stack++); | 153 | printk(LONG, *stack++); |
155 | } | 154 | } |
156 | printk("\n"); | 155 | printk("\n"); |
157 | show_trace(task, sp); | 156 | show_trace(task, sp); |
158 | } | 157 | } |
159 | 158 | ||
160 | static void show_last_breaking_event(struct pt_regs *regs) | 159 | static void show_last_breaking_event(struct pt_regs *regs) |
161 | { | 160 | { |
162 | #ifdef CONFIG_64BIT | 161 | #ifdef CONFIG_64BIT |
163 | printk("Last Breaking-Event-Address:\n"); | 162 | printk("Last Breaking-Event-Address:\n"); |
164 | printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN); | 163 | printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN); |
165 | print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN); | 164 | print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN); |
166 | #endif | 165 | #endif |
167 | } | 166 | } |
168 | 167 | ||
169 | /* | 168 | /* |
170 | * The architecture-independent dump_stack generator | 169 | * The architecture-independent dump_stack generator |
171 | */ | 170 | */ |
172 | void dump_stack(void) | 171 | void dump_stack(void) |
173 | { | 172 | { |
174 | printk("CPU: %d %s %s %.*s\n", | 173 | printk("CPU: %d %s %s %.*s\n", |
175 | task_thread_info(current)->cpu, print_tainted(), | 174 | task_thread_info(current)->cpu, print_tainted(), |
176 | init_utsname()->release, | 175 | init_utsname()->release, |
177 | (int)strcspn(init_utsname()->version, " "), | 176 | (int)strcspn(init_utsname()->version, " "), |
178 | init_utsname()->version); | 177 | init_utsname()->version); |
179 | printk("Process %s (pid: %d, task: %p, ksp: %p)\n", | 178 | printk("Process %s (pid: %d, task: %p, ksp: %p)\n", |
180 | current->comm, current->pid, current, | 179 | current->comm, current->pid, current, |
181 | (void *) current->thread.ksp); | 180 | (void *) current->thread.ksp); |
182 | show_stack(NULL, NULL); | 181 | show_stack(NULL, NULL); |
183 | } | 182 | } |
184 | EXPORT_SYMBOL(dump_stack); | 183 | EXPORT_SYMBOL(dump_stack); |
185 | 184 | ||
186 | static inline int mask_bits(struct pt_regs *regs, unsigned long bits) | 185 | static inline int mask_bits(struct pt_regs *regs, unsigned long bits) |
187 | { | 186 | { |
188 | return (regs->psw.mask & bits) / ((~bits + 1) & bits); | 187 | return (regs->psw.mask & bits) / ((~bits + 1) & bits); |
189 | } | 188 | } |
190 | 189 | ||
191 | void show_registers(struct pt_regs *regs) | 190 | void show_registers(struct pt_regs *regs) |
192 | { | 191 | { |
193 | char *mode; | 192 | char *mode; |
194 | 193 | ||
195 | mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl"; | 194 | mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl"; |
196 | printk("%s PSW : %p %p", | 195 | printk("%s PSW : %p %p", |
197 | mode, (void *) regs->psw.mask, | 196 | mode, (void *) regs->psw.mask, |
198 | (void *) regs->psw.addr); | 197 | (void *) regs->psw.addr); |
199 | print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN); | 198 | print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN); |
200 | printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x " | 199 | printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x " |
201 | "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER), | 200 | "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER), |
202 | mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO), | 201 | mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO), |
203 | mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY), | 202 | mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY), |
204 | mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT), | 203 | mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT), |
205 | mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC), | 204 | mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC), |
206 | mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM)); | 205 | mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM)); |
207 | #ifdef CONFIG_64BIT | 206 | #ifdef CONFIG_64BIT |
208 | printk(" EA:%x", mask_bits(regs, PSW_BASE_BITS)); | 207 | printk(" EA:%x", mask_bits(regs, PSW_BASE_BITS)); |
209 | #endif | 208 | #endif |
210 | printk("\n%s GPRS: " FOURLONG, mode, | 209 | printk("\n%s GPRS: " FOURLONG, mode, |
211 | regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); | 210 | regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); |
212 | printk(" " FOURLONG, | 211 | printk(" " FOURLONG, |
213 | regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]); | 212 | regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]); |
214 | printk(" " FOURLONG, | 213 | printk(" " FOURLONG, |
215 | regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]); | 214 | regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]); |
216 | printk(" " FOURLONG, | 215 | printk(" " FOURLONG, |
217 | regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]); | 216 | regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]); |
218 | 217 | ||
219 | show_code(regs); | 218 | show_code(regs); |
220 | } | 219 | } |
221 | 220 | ||
222 | void show_regs(struct pt_regs *regs) | 221 | void show_regs(struct pt_regs *regs) |
223 | { | 222 | { |
224 | print_modules(); | 223 | print_modules(); |
225 | printk("CPU: %d %s %s %.*s\n", | 224 | printk("CPU: %d %s %s %.*s\n", |
226 | task_thread_info(current)->cpu, print_tainted(), | 225 | task_thread_info(current)->cpu, print_tainted(), |
227 | init_utsname()->release, | 226 | init_utsname()->release, |
228 | (int)strcspn(init_utsname()->version, " "), | 227 | (int)strcspn(init_utsname()->version, " "), |
229 | init_utsname()->version); | 228 | init_utsname()->version); |
230 | printk("Process %s (pid: %d, task: %p, ksp: %p)\n", | 229 | printk("Process %s (pid: %d, task: %p, ksp: %p)\n", |
231 | current->comm, current->pid, current, | 230 | current->comm, current->pid, current, |
232 | (void *) current->thread.ksp); | 231 | (void *) current->thread.ksp); |
233 | show_registers(regs); | 232 | show_registers(regs); |
234 | /* Show stack backtrace if pt_regs is from kernel mode */ | 233 | /* Show stack backtrace if pt_regs is from kernel mode */ |
235 | if (!(regs->psw.mask & PSW_MASK_PSTATE)) | 234 | if (!(regs->psw.mask & PSW_MASK_PSTATE)) |
236 | show_trace(NULL, (unsigned long *) regs->gprs[15]); | 235 | show_trace(NULL, (unsigned long *) regs->gprs[15]); |
237 | show_last_breaking_event(regs); | 236 | show_last_breaking_event(regs); |
238 | } | 237 | } |
239 | 238 | ||
240 | static DEFINE_SPINLOCK(die_lock); | 239 | static DEFINE_SPINLOCK(die_lock); |
241 | 240 | ||
242 | void die(const char * str, struct pt_regs * regs, long err) | 241 | void die(const char * str, struct pt_regs * regs, long err) |
243 | { | 242 | { |
244 | static int die_counter; | 243 | static int die_counter; |
245 | 244 | ||
246 | oops_enter(); | 245 | oops_enter(); |
247 | debug_stop_all(); | 246 | debug_stop_all(); |
248 | console_verbose(); | 247 | console_verbose(); |
249 | spin_lock_irq(&die_lock); | 248 | spin_lock_irq(&die_lock); |
250 | bust_spinlocks(1); | 249 | bust_spinlocks(1); |
251 | printk("%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter); | 250 | printk("%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter); |
252 | #ifdef CONFIG_PREEMPT | 251 | #ifdef CONFIG_PREEMPT |
253 | printk("PREEMPT "); | 252 | printk("PREEMPT "); |
254 | #endif | 253 | #endif |
255 | #ifdef CONFIG_SMP | 254 | #ifdef CONFIG_SMP |
256 | printk("SMP "); | 255 | printk("SMP "); |
257 | #endif | 256 | #endif |
258 | #ifdef CONFIG_DEBUG_PAGEALLOC | 257 | #ifdef CONFIG_DEBUG_PAGEALLOC |
259 | printk("DEBUG_PAGEALLOC"); | 258 | printk("DEBUG_PAGEALLOC"); |
260 | #endif | 259 | #endif |
261 | printk("\n"); | 260 | printk("\n"); |
262 | notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV); | 261 | notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV); |
263 | show_regs(regs); | 262 | show_regs(regs); |
264 | bust_spinlocks(0); | 263 | bust_spinlocks(0); |
265 | add_taint(TAINT_DIE); | 264 | add_taint(TAINT_DIE); |
266 | spin_unlock_irq(&die_lock); | 265 | spin_unlock_irq(&die_lock); |
267 | if (in_interrupt()) | 266 | if (in_interrupt()) |
268 | panic("Fatal exception in interrupt"); | 267 | panic("Fatal exception in interrupt"); |
269 | if (panic_on_oops) | 268 | if (panic_on_oops) |
270 | panic("Fatal exception: panic_on_oops"); | 269 | panic("Fatal exception: panic_on_oops"); |
271 | oops_exit(); | 270 | oops_exit(); |
272 | do_exit(SIGSEGV); | 271 | do_exit(SIGSEGV); |
273 | } | 272 | } |
274 | 273 | ||
275 | static void inline report_user_fault(struct pt_regs *regs, long int_code, | 274 | static void inline report_user_fault(struct pt_regs *regs, long int_code, |
276 | int signr) | 275 | int signr) |
277 | { | 276 | { |
278 | if ((task_pid_nr(current) > 1) && !show_unhandled_signals) | 277 | if ((task_pid_nr(current) > 1) && !show_unhandled_signals) |
279 | return; | 278 | return; |
280 | if (!unhandled_signal(current, signr)) | 279 | if (!unhandled_signal(current, signr)) |
281 | return; | 280 | return; |
282 | if (!printk_ratelimit()) | 281 | if (!printk_ratelimit()) |
283 | return; | 282 | return; |
284 | printk("User process fault: interruption code 0x%lX ", int_code); | 283 | printk("User process fault: interruption code 0x%lX ", int_code); |
285 | print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN); | 284 | print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN); |
286 | printk("\n"); | 285 | printk("\n"); |
287 | show_regs(regs); | 286 | show_regs(regs); |
288 | } | 287 | } |
289 | 288 | ||
290 | int is_valid_bugaddr(unsigned long addr) | 289 | int is_valid_bugaddr(unsigned long addr) |
291 | { | 290 | { |
292 | return 1; | 291 | return 1; |
293 | } | 292 | } |
294 | 293 | ||
295 | static inline void __kprobes do_trap(long pgm_int_code, int signr, char *str, | 294 | static inline void __kprobes do_trap(long pgm_int_code, int signr, char *str, |
296 | struct pt_regs *regs, siginfo_t *info) | 295 | struct pt_regs *regs, siginfo_t *info) |
297 | { | 296 | { |
298 | if (notify_die(DIE_TRAP, str, regs, pgm_int_code, | 297 | if (notify_die(DIE_TRAP, str, regs, pgm_int_code, |
299 | pgm_int_code, signr) == NOTIFY_STOP) | 298 | pgm_int_code, signr) == NOTIFY_STOP) |
300 | return; | 299 | return; |
301 | 300 | ||
302 | if (regs->psw.mask & PSW_MASK_PSTATE) { | 301 | if (regs->psw.mask & PSW_MASK_PSTATE) { |
303 | struct task_struct *tsk = current; | 302 | struct task_struct *tsk = current; |
304 | 303 | ||
305 | tsk->thread.trap_no = pgm_int_code & 0xffff; | 304 | tsk->thread.trap_no = pgm_int_code & 0xffff; |
306 | force_sig_info(signr, info, tsk); | 305 | force_sig_info(signr, info, tsk); |
307 | report_user_fault(regs, pgm_int_code, signr); | 306 | report_user_fault(regs, pgm_int_code, signr); |
308 | } else { | 307 | } else { |
309 | const struct exception_table_entry *fixup; | 308 | const struct exception_table_entry *fixup; |
310 | fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); | 309 | fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); |
311 | if (fixup) | 310 | if (fixup) |
312 | regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE; | 311 | regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE; |
313 | else { | 312 | else { |
314 | enum bug_trap_type btt; | 313 | enum bug_trap_type btt; |
315 | 314 | ||
316 | btt = report_bug(regs->psw.addr & PSW_ADDR_INSN, regs); | 315 | btt = report_bug(regs->psw.addr & PSW_ADDR_INSN, regs); |
317 | if (btt == BUG_TRAP_TYPE_WARN) | 316 | if (btt == BUG_TRAP_TYPE_WARN) |
318 | return; | 317 | return; |
319 | die(str, regs, pgm_int_code); | 318 | die(str, regs, pgm_int_code); |
320 | } | 319 | } |
321 | } | 320 | } |
322 | } | 321 | } |
323 | 322 | ||
324 | static inline void __user *get_psw_address(struct pt_regs *regs, | 323 | static inline void __user *get_psw_address(struct pt_regs *regs, |
325 | long pgm_int_code) | 324 | long pgm_int_code) |
326 | { | 325 | { |
327 | return (void __user *) | 326 | return (void __user *) |
328 | ((regs->psw.addr - (pgm_int_code >> 16)) & PSW_ADDR_INSN); | 327 | ((regs->psw.addr - (pgm_int_code >> 16)) & PSW_ADDR_INSN); |
329 | } | 328 | } |
330 | 329 | ||
331 | void __kprobes do_per_trap(struct pt_regs *regs) | 330 | void __kprobes do_per_trap(struct pt_regs *regs) |
332 | { | 331 | { |
333 | if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP) | 332 | if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP) |
334 | return; | 333 | return; |
335 | if (tracehook_consider_fatal_signal(current, SIGTRAP)) | 334 | if (tracehook_consider_fatal_signal(current, SIGTRAP)) |
336 | force_sig(SIGTRAP, current); | 335 | force_sig(SIGTRAP, current); |
337 | } | 336 | } |
338 | 337 | ||
339 | static void default_trap_handler(struct pt_regs *regs, long pgm_int_code, | 338 | static void default_trap_handler(struct pt_regs *regs, long pgm_int_code, |
340 | unsigned long trans_exc_code) | 339 | unsigned long trans_exc_code) |
341 | { | 340 | { |
342 | if (regs->psw.mask & PSW_MASK_PSTATE) { | 341 | if (regs->psw.mask & PSW_MASK_PSTATE) { |
343 | report_user_fault(regs, pgm_int_code, SIGSEGV); | 342 | report_user_fault(regs, pgm_int_code, SIGSEGV); |
344 | do_exit(SIGSEGV); | 343 | do_exit(SIGSEGV); |
345 | } else | 344 | } else |
346 | die("Unknown program exception", regs, pgm_int_code); | 345 | die("Unknown program exception", regs, pgm_int_code); |
347 | } | 346 | } |
348 | 347 | ||
349 | #define DO_ERROR_INFO(name, signr, sicode, str) \ | 348 | #define DO_ERROR_INFO(name, signr, sicode, str) \ |
350 | static void name(struct pt_regs *regs, long pgm_int_code, \ | 349 | static void name(struct pt_regs *regs, long pgm_int_code, \ |
351 | unsigned long trans_exc_code) \ | 350 | unsigned long trans_exc_code) \ |
352 | { \ | 351 | { \ |
353 | siginfo_t info; \ | 352 | siginfo_t info; \ |
354 | info.si_signo = signr; \ | 353 | info.si_signo = signr; \ |
355 | info.si_errno = 0; \ | 354 | info.si_errno = 0; \ |
356 | info.si_code = sicode; \ | 355 | info.si_code = sicode; \ |
357 | info.si_addr = get_psw_address(regs, pgm_int_code); \ | 356 | info.si_addr = get_psw_address(regs, pgm_int_code); \ |
358 | do_trap(pgm_int_code, signr, str, regs, &info); \ | 357 | do_trap(pgm_int_code, signr, str, regs, &info); \ |
359 | } | 358 | } |
360 | 359 | ||
361 | DO_ERROR_INFO(addressing_exception, SIGILL, ILL_ILLADR, | 360 | DO_ERROR_INFO(addressing_exception, SIGILL, ILL_ILLADR, |
362 | "addressing exception") | 361 | "addressing exception") |
363 | DO_ERROR_INFO(execute_exception, SIGILL, ILL_ILLOPN, | 362 | DO_ERROR_INFO(execute_exception, SIGILL, ILL_ILLOPN, |
364 | "execute exception") | 363 | "execute exception") |
365 | DO_ERROR_INFO(divide_exception, SIGFPE, FPE_INTDIV, | 364 | DO_ERROR_INFO(divide_exception, SIGFPE, FPE_INTDIV, |
366 | "fixpoint divide exception") | 365 | "fixpoint divide exception") |
367 | DO_ERROR_INFO(overflow_exception, SIGFPE, FPE_INTOVF, | 366 | DO_ERROR_INFO(overflow_exception, SIGFPE, FPE_INTOVF, |
368 | "fixpoint overflow exception") | 367 | "fixpoint overflow exception") |
369 | DO_ERROR_INFO(hfp_overflow_exception, SIGFPE, FPE_FLTOVF, | 368 | DO_ERROR_INFO(hfp_overflow_exception, SIGFPE, FPE_FLTOVF, |
370 | "HFP overflow exception") | 369 | "HFP overflow exception") |
371 | DO_ERROR_INFO(hfp_underflow_exception, SIGFPE, FPE_FLTUND, | 370 | DO_ERROR_INFO(hfp_underflow_exception, SIGFPE, FPE_FLTUND, |
372 | "HFP underflow exception") | 371 | "HFP underflow exception") |
373 | DO_ERROR_INFO(hfp_significance_exception, SIGFPE, FPE_FLTRES, | 372 | DO_ERROR_INFO(hfp_significance_exception, SIGFPE, FPE_FLTRES, |
374 | "HFP significance exception") | 373 | "HFP significance exception") |
375 | DO_ERROR_INFO(hfp_divide_exception, SIGFPE, FPE_FLTDIV, | 374 | DO_ERROR_INFO(hfp_divide_exception, SIGFPE, FPE_FLTDIV, |
376 | "HFP divide exception") | 375 | "HFP divide exception") |
377 | DO_ERROR_INFO(hfp_sqrt_exception, SIGFPE, FPE_FLTINV, | 376 | DO_ERROR_INFO(hfp_sqrt_exception, SIGFPE, FPE_FLTINV, |
378 | "HFP square root exception") | 377 | "HFP square root exception") |
379 | DO_ERROR_INFO(operand_exception, SIGILL, ILL_ILLOPN, | 378 | DO_ERROR_INFO(operand_exception, SIGILL, ILL_ILLOPN, |
380 | "operand exception") | 379 | "operand exception") |
381 | DO_ERROR_INFO(privileged_op, SIGILL, ILL_PRVOPC, | 380 | DO_ERROR_INFO(privileged_op, SIGILL, ILL_PRVOPC, |
382 | "privileged operation") | 381 | "privileged operation") |
383 | DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN, | 382 | DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN, |
384 | "special operation exception") | 383 | "special operation exception") |
385 | DO_ERROR_INFO(translation_exception, SIGILL, ILL_ILLOPN, | 384 | DO_ERROR_INFO(translation_exception, SIGILL, ILL_ILLOPN, |
386 | "translation exception") | 385 | "translation exception") |
387 | 386 | ||
388 | static inline void do_fp_trap(struct pt_regs *regs, void __user *location, | 387 | static inline void do_fp_trap(struct pt_regs *regs, void __user *location, |
389 | int fpc, long pgm_int_code) | 388 | int fpc, long pgm_int_code) |
390 | { | 389 | { |
391 | siginfo_t si; | 390 | siginfo_t si; |
392 | 391 | ||
393 | si.si_signo = SIGFPE; | 392 | si.si_signo = SIGFPE; |
394 | si.si_errno = 0; | 393 | si.si_errno = 0; |
395 | si.si_addr = location; | 394 | si.si_addr = location; |
396 | si.si_code = 0; | 395 | si.si_code = 0; |
397 | /* FPC[2] is Data Exception Code */ | 396 | /* FPC[2] is Data Exception Code */ |
398 | if ((fpc & 0x00000300) == 0) { | 397 | if ((fpc & 0x00000300) == 0) { |
399 | /* bits 6 and 7 of DXC are 0 iff IEEE exception */ | 398 | /* bits 6 and 7 of DXC are 0 iff IEEE exception */ |
400 | if (fpc & 0x8000) /* invalid fp operation */ | 399 | if (fpc & 0x8000) /* invalid fp operation */ |
401 | si.si_code = FPE_FLTINV; | 400 | si.si_code = FPE_FLTINV; |
402 | else if (fpc & 0x4000) /* div by 0 */ | 401 | else if (fpc & 0x4000) /* div by 0 */ |
403 | si.si_code = FPE_FLTDIV; | 402 | si.si_code = FPE_FLTDIV; |
404 | else if (fpc & 0x2000) /* overflow */ | 403 | else if (fpc & 0x2000) /* overflow */ |
405 | si.si_code = FPE_FLTOVF; | 404 | si.si_code = FPE_FLTOVF; |
406 | else if (fpc & 0x1000) /* underflow */ | 405 | else if (fpc & 0x1000) /* underflow */ |
407 | si.si_code = FPE_FLTUND; | 406 | si.si_code = FPE_FLTUND; |
408 | else if (fpc & 0x0800) /* inexact */ | 407 | else if (fpc & 0x0800) /* inexact */ |
409 | si.si_code = FPE_FLTRES; | 408 | si.si_code = FPE_FLTRES; |
410 | } | 409 | } |
411 | do_trap(pgm_int_code, SIGFPE, | 410 | do_trap(pgm_int_code, SIGFPE, |
412 | "floating point exception", regs, &si); | 411 | "floating point exception", regs, &si); |
413 | } | 412 | } |
414 | 413 | ||
415 | static void __kprobes illegal_op(struct pt_regs *regs, long pgm_int_code, | 414 | static void __kprobes illegal_op(struct pt_regs *regs, long pgm_int_code, |
416 | unsigned long trans_exc_code) | 415 | unsigned long trans_exc_code) |
417 | { | 416 | { |
418 | siginfo_t info; | 417 | siginfo_t info; |
419 | __u8 opcode[6]; | 418 | __u8 opcode[6]; |
420 | __u16 __user *location; | 419 | __u16 __user *location; |
421 | int signal = 0; | 420 | int signal = 0; |
422 | 421 | ||
423 | location = get_psw_address(regs, pgm_int_code); | 422 | location = get_psw_address(regs, pgm_int_code); |
424 | 423 | ||
425 | if (regs->psw.mask & PSW_MASK_PSTATE) { | 424 | if (regs->psw.mask & PSW_MASK_PSTATE) { |
426 | if (get_user(*((__u16 *) opcode), (__u16 __user *) location)) | 425 | if (get_user(*((__u16 *) opcode), (__u16 __user *) location)) |
427 | return; | 426 | return; |
428 | if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) { | 427 | if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) { |
429 | if (tracehook_consider_fatal_signal(current, SIGTRAP)) | 428 | if (tracehook_consider_fatal_signal(current, SIGTRAP)) |
430 | force_sig(SIGTRAP, current); | 429 | force_sig(SIGTRAP, current); |
431 | else | 430 | else |
432 | signal = SIGILL; | 431 | signal = SIGILL; |
433 | #ifdef CONFIG_MATHEMU | 432 | #ifdef CONFIG_MATHEMU |
434 | } else if (opcode[0] == 0xb3) { | 433 | } else if (opcode[0] == 0xb3) { |
435 | if (get_user(*((__u16 *) (opcode+2)), location+1)) | 434 | if (get_user(*((__u16 *) (opcode+2)), location+1)) |
436 | return; | 435 | return; |
437 | signal = math_emu_b3(opcode, regs); | 436 | signal = math_emu_b3(opcode, regs); |
438 | } else if (opcode[0] == 0xed) { | 437 | } else if (opcode[0] == 0xed) { |
439 | if (get_user(*((__u32 *) (opcode+2)), | 438 | if (get_user(*((__u32 *) (opcode+2)), |
440 | (__u32 __user *)(location+1))) | 439 | (__u32 __user *)(location+1))) |
441 | return; | 440 | return; |
442 | signal = math_emu_ed(opcode, regs); | 441 | signal = math_emu_ed(opcode, regs); |
443 | } else if (*((__u16 *) opcode) == 0xb299) { | 442 | } else if (*((__u16 *) opcode) == 0xb299) { |
444 | if (get_user(*((__u16 *) (opcode+2)), location+1)) | 443 | if (get_user(*((__u16 *) (opcode+2)), location+1)) |
445 | return; | 444 | return; |
446 | signal = math_emu_srnm(opcode, regs); | 445 | signal = math_emu_srnm(opcode, regs); |
447 | } else if (*((__u16 *) opcode) == 0xb29c) { | 446 | } else if (*((__u16 *) opcode) == 0xb29c) { |
448 | if (get_user(*((__u16 *) (opcode+2)), location+1)) | 447 | if (get_user(*((__u16 *) (opcode+2)), location+1)) |
449 | return; | 448 | return; |
450 | signal = math_emu_stfpc(opcode, regs); | 449 | signal = math_emu_stfpc(opcode, regs); |
451 | } else if (*((__u16 *) opcode) == 0xb29d) { | 450 | } else if (*((__u16 *) opcode) == 0xb29d) { |
452 | if (get_user(*((__u16 *) (opcode+2)), location+1)) | 451 | if (get_user(*((__u16 *) (opcode+2)), location+1)) |
453 | return; | 452 | return; |
454 | signal = math_emu_lfpc(opcode, regs); | 453 | signal = math_emu_lfpc(opcode, regs); |
455 | #endif | 454 | #endif |
456 | } else | 455 | } else |
457 | signal = SIGILL; | 456 | signal = SIGILL; |
458 | } else { | 457 | } else { |
459 | /* | 458 | /* |
460 | * If we get an illegal op in kernel mode, send it through the | 459 | * If we get an illegal op in kernel mode, send it through the |
461 | * kprobes notifier. If kprobes doesn't pick it up, SIGILL | 460 | * kprobes notifier. If kprobes doesn't pick it up, SIGILL |
462 | */ | 461 | */ |
463 | if (notify_die(DIE_BPT, "bpt", regs, pgm_int_code, | 462 | if (notify_die(DIE_BPT, "bpt", regs, pgm_int_code, |
464 | 3, SIGTRAP) != NOTIFY_STOP) | 463 | 3, SIGTRAP) != NOTIFY_STOP) |
465 | signal = SIGILL; | 464 | signal = SIGILL; |
466 | } | 465 | } |
467 | 466 | ||
468 | #ifdef CONFIG_MATHEMU | 467 | #ifdef CONFIG_MATHEMU |
469 | if (signal == SIGFPE) | 468 | if (signal == SIGFPE) |
470 | do_fp_trap(regs, location, | 469 | do_fp_trap(regs, location, |
471 | current->thread.fp_regs.fpc, pgm_int_code); | 470 | current->thread.fp_regs.fpc, pgm_int_code); |
472 | else if (signal == SIGSEGV) { | 471 | else if (signal == SIGSEGV) { |
473 | info.si_signo = signal; | 472 | info.si_signo = signal; |
474 | info.si_errno = 0; | 473 | info.si_errno = 0; |
475 | info.si_code = SEGV_MAPERR; | 474 | info.si_code = SEGV_MAPERR; |
476 | info.si_addr = (void __user *) location; | 475 | info.si_addr = (void __user *) location; |
477 | do_trap(pgm_int_code, signal, | 476 | do_trap(pgm_int_code, signal, |
478 | "user address fault", regs, &info); | 477 | "user address fault", regs, &info); |
479 | } else | 478 | } else |
480 | #endif | 479 | #endif |
481 | if (signal) { | 480 | if (signal) { |
482 | info.si_signo = signal; | 481 | info.si_signo = signal; |
483 | info.si_errno = 0; | 482 | info.si_errno = 0; |
484 | info.si_code = ILL_ILLOPC; | 483 | info.si_code = ILL_ILLOPC; |
485 | info.si_addr = (void __user *) location; | 484 | info.si_addr = (void __user *) location; |
486 | do_trap(pgm_int_code, signal, | 485 | do_trap(pgm_int_code, signal, |
487 | "illegal operation", regs, &info); | 486 | "illegal operation", regs, &info); |
488 | } | 487 | } |
489 | } | 488 | } |
490 | 489 | ||
491 | 490 | ||
492 | #ifdef CONFIG_MATHEMU | 491 | #ifdef CONFIG_MATHEMU |
493 | asmlinkage void specification_exception(struct pt_regs *regs, | 492 | asmlinkage void specification_exception(struct pt_regs *regs, |
494 | long pgm_int_code, | 493 | long pgm_int_code, |
495 | unsigned long trans_exc_code) | 494 | unsigned long trans_exc_code) |
496 | { | 495 | { |
497 | __u8 opcode[6]; | 496 | __u8 opcode[6]; |
498 | __u16 __user *location = NULL; | 497 | __u16 __user *location = NULL; |
499 | int signal = 0; | 498 | int signal = 0; |
500 | 499 | ||
501 | location = (__u16 __user *) get_psw_address(regs, pgm_int_code); | 500 | location = (__u16 __user *) get_psw_address(regs, pgm_int_code); |
502 | 501 | ||
503 | if (regs->psw.mask & PSW_MASK_PSTATE) { | 502 | if (regs->psw.mask & PSW_MASK_PSTATE) { |
504 | get_user(*((__u16 *) opcode), location); | 503 | get_user(*((__u16 *) opcode), location); |
505 | switch (opcode[0]) { | 504 | switch (opcode[0]) { |
506 | case 0x28: /* LDR Rx,Ry */ | 505 | case 0x28: /* LDR Rx,Ry */ |
507 | signal = math_emu_ldr(opcode); | 506 | signal = math_emu_ldr(opcode); |
508 | break; | 507 | break; |
509 | case 0x38: /* LER Rx,Ry */ | 508 | case 0x38: /* LER Rx,Ry */ |
510 | signal = math_emu_ler(opcode); | 509 | signal = math_emu_ler(opcode); |
511 | break; | 510 | break; |
512 | case 0x60: /* STD R,D(X,B) */ | 511 | case 0x60: /* STD R,D(X,B) */ |
513 | get_user(*((__u16 *) (opcode+2)), location+1); | 512 | get_user(*((__u16 *) (opcode+2)), location+1); |
514 | signal = math_emu_std(opcode, regs); | 513 | signal = math_emu_std(opcode, regs); |
515 | break; | 514 | break; |
516 | case 0x68: /* LD R,D(X,B) */ | 515 | case 0x68: /* LD R,D(X,B) */ |
517 | get_user(*((__u16 *) (opcode+2)), location+1); | 516 | get_user(*((__u16 *) (opcode+2)), location+1); |
518 | signal = math_emu_ld(opcode, regs); | 517 | signal = math_emu_ld(opcode, regs); |
519 | break; | 518 | break; |
520 | case 0x70: /* STE R,D(X,B) */ | 519 | case 0x70: /* STE R,D(X,B) */ |
521 | get_user(*((__u16 *) (opcode+2)), location+1); | 520 | get_user(*((__u16 *) (opcode+2)), location+1); |
522 | signal = math_emu_ste(opcode, regs); | 521 | signal = math_emu_ste(opcode, regs); |
523 | break; | 522 | break; |
524 | case 0x78: /* LE R,D(X,B) */ | 523 | case 0x78: /* LE R,D(X,B) */ |
525 | get_user(*((__u16 *) (opcode+2)), location+1); | 524 | get_user(*((__u16 *) (opcode+2)), location+1); |
526 | signal = math_emu_le(opcode, regs); | 525 | signal = math_emu_le(opcode, regs); |
527 | break; | 526 | break; |
528 | default: | 527 | default: |
529 | signal = SIGILL; | 528 | signal = SIGILL; |
530 | break; | 529 | break; |
531 | } | 530 | } |
532 | } else | 531 | } else |
533 | signal = SIGILL; | 532 | signal = SIGILL; |
534 | 533 | ||
535 | if (signal == SIGFPE) | 534 | if (signal == SIGFPE) |
536 | do_fp_trap(regs, location, | 535 | do_fp_trap(regs, location, |
537 | current->thread.fp_regs.fpc, pgm_int_code); | 536 | current->thread.fp_regs.fpc, pgm_int_code); |
538 | else if (signal) { | 537 | else if (signal) { |
539 | siginfo_t info; | 538 | siginfo_t info; |
540 | info.si_signo = signal; | 539 | info.si_signo = signal; |
541 | info.si_errno = 0; | 540 | info.si_errno = 0; |
542 | info.si_code = ILL_ILLOPN; | 541 | info.si_code = ILL_ILLOPN; |
543 | info.si_addr = location; | 542 | info.si_addr = location; |
544 | do_trap(pgm_int_code, signal, | 543 | do_trap(pgm_int_code, signal, |
545 | "specification exception", regs, &info); | 544 | "specification exception", regs, &info); |
546 | } | 545 | } |
547 | } | 546 | } |
548 | #else | 547 | #else |
549 | DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN, | 548 | DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN, |
550 | "specification exception"); | 549 | "specification exception"); |
551 | #endif | 550 | #endif |
552 | 551 | ||
553 | static void data_exception(struct pt_regs *regs, long pgm_int_code, | 552 | static void data_exception(struct pt_regs *regs, long pgm_int_code, |
554 | unsigned long trans_exc_code) | 553 | unsigned long trans_exc_code) |
555 | { | 554 | { |
556 | __u16 __user *location; | 555 | __u16 __user *location; |
557 | int signal = 0; | 556 | int signal = 0; |
558 | 557 | ||
559 | location = get_psw_address(regs, pgm_int_code); | 558 | location = get_psw_address(regs, pgm_int_code); |
560 | 559 | ||
561 | if (MACHINE_HAS_IEEE) | 560 | if (MACHINE_HAS_IEEE) |
562 | asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); | 561 | asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); |
563 | 562 | ||
564 | #ifdef CONFIG_MATHEMU | 563 | #ifdef CONFIG_MATHEMU |
565 | else if (regs->psw.mask & PSW_MASK_PSTATE) { | 564 | else if (regs->psw.mask & PSW_MASK_PSTATE) { |
566 | __u8 opcode[6]; | 565 | __u8 opcode[6]; |
567 | get_user(*((__u16 *) opcode), location); | 566 | get_user(*((__u16 *) opcode), location); |
568 | switch (opcode[0]) { | 567 | switch (opcode[0]) { |
569 | case 0x28: /* LDR Rx,Ry */ | 568 | case 0x28: /* LDR Rx,Ry */ |
570 | signal = math_emu_ldr(opcode); | 569 | signal = math_emu_ldr(opcode); |
571 | break; | 570 | break; |
572 | case 0x38: /* LER Rx,Ry */ | 571 | case 0x38: /* LER Rx,Ry */ |
573 | signal = math_emu_ler(opcode); | 572 | signal = math_emu_ler(opcode); |
574 | break; | 573 | break; |
575 | case 0x60: /* STD R,D(X,B) */ | 574 | case 0x60: /* STD R,D(X,B) */ |
576 | get_user(*((__u16 *) (opcode+2)), location+1); | 575 | get_user(*((__u16 *) (opcode+2)), location+1); |
577 | signal = math_emu_std(opcode, regs); | 576 | signal = math_emu_std(opcode, regs); |
578 | break; | 577 | break; |
579 | case 0x68: /* LD R,D(X,B) */ | 578 | case 0x68: /* LD R,D(X,B) */ |
580 | get_user(*((__u16 *) (opcode+2)), location+1); | 579 | get_user(*((__u16 *) (opcode+2)), location+1); |
581 | signal = math_emu_ld(opcode, regs); | 580 | signal = math_emu_ld(opcode, regs); |
582 | break; | 581 | break; |
583 | case 0x70: /* STE R,D(X,B) */ | 582 | case 0x70: /* STE R,D(X,B) */ |
584 | get_user(*((__u16 *) (opcode+2)), location+1); | 583 | get_user(*((__u16 *) (opcode+2)), location+1); |
585 | signal = math_emu_ste(opcode, regs); | 584 | signal = math_emu_ste(opcode, regs); |
586 | break; | 585 | break; |
587 | case 0x78: /* LE R,D(X,B) */ | 586 | case 0x78: /* LE R,D(X,B) */ |
588 | get_user(*((__u16 *) (opcode+2)), location+1); | 587 | get_user(*((__u16 *) (opcode+2)), location+1); |
589 | signal = math_emu_le(opcode, regs); | 588 | signal = math_emu_le(opcode, regs); |
590 | break; | 589 | break; |
591 | case 0xb3: | 590 | case 0xb3: |
592 | get_user(*((__u16 *) (opcode+2)), location+1); | 591 | get_user(*((__u16 *) (opcode+2)), location+1); |
593 | signal = math_emu_b3(opcode, regs); | 592 | signal = math_emu_b3(opcode, regs); |
594 | break; | 593 | break; |
595 | case 0xed: | 594 | case 0xed: |
596 | get_user(*((__u32 *) (opcode+2)), | 595 | get_user(*((__u32 *) (opcode+2)), |
597 | (__u32 __user *)(location+1)); | 596 | (__u32 __user *)(location+1)); |
598 | signal = math_emu_ed(opcode, regs); | 597 | signal = math_emu_ed(opcode, regs); |
599 | break; | 598 | break; |
600 | case 0xb2: | 599 | case 0xb2: |
601 | if (opcode[1] == 0x99) { | 600 | if (opcode[1] == 0x99) { |
602 | get_user(*((__u16 *) (opcode+2)), location+1); | 601 | get_user(*((__u16 *) (opcode+2)), location+1); |
603 | signal = math_emu_srnm(opcode, regs); | 602 | signal = math_emu_srnm(opcode, regs); |
604 | } else if (opcode[1] == 0x9c) { | 603 | } else if (opcode[1] == 0x9c) { |
605 | get_user(*((__u16 *) (opcode+2)), location+1); | 604 | get_user(*((__u16 *) (opcode+2)), location+1); |
606 | signal = math_emu_stfpc(opcode, regs); | 605 | signal = math_emu_stfpc(opcode, regs); |
607 | } else if (opcode[1] == 0x9d) { | 606 | } else if (opcode[1] == 0x9d) { |
608 | get_user(*((__u16 *) (opcode+2)), location+1); | 607 | get_user(*((__u16 *) (opcode+2)), location+1); |
609 | signal = math_emu_lfpc(opcode, regs); | 608 | signal = math_emu_lfpc(opcode, regs); |
610 | } else | 609 | } else |
611 | signal = SIGILL; | 610 | signal = SIGILL; |
612 | break; | 611 | break; |
613 | default: | 612 | default: |
614 | signal = SIGILL; | 613 | signal = SIGILL; |
615 | break; | 614 | break; |
616 | } | 615 | } |
617 | } | 616 | } |
618 | #endif | 617 | #endif |
619 | if (current->thread.fp_regs.fpc & FPC_DXC_MASK) | 618 | if (current->thread.fp_regs.fpc & FPC_DXC_MASK) |
620 | signal = SIGFPE; | 619 | signal = SIGFPE; |
621 | else | 620 | else |
622 | signal = SIGILL; | 621 | signal = SIGILL; |
623 | if (signal == SIGFPE) | 622 | if (signal == SIGFPE) |
624 | do_fp_trap(regs, location, | 623 | do_fp_trap(regs, location, |
625 | current->thread.fp_regs.fpc, pgm_int_code); | 624 | current->thread.fp_regs.fpc, pgm_int_code); |
626 | else if (signal) { | 625 | else if (signal) { |
627 | siginfo_t info; | 626 | siginfo_t info; |
628 | info.si_signo = signal; | 627 | info.si_signo = signal; |
629 | info.si_errno = 0; | 628 | info.si_errno = 0; |
630 | info.si_code = ILL_ILLOPN; | 629 | info.si_code = ILL_ILLOPN; |
631 | info.si_addr = location; | 630 | info.si_addr = location; |
632 | do_trap(pgm_int_code, signal, "data exception", regs, &info); | 631 | do_trap(pgm_int_code, signal, "data exception", regs, &info); |
633 | } | 632 | } |
634 | } | 633 | } |
635 | 634 | ||
636 | static void space_switch_exception(struct pt_regs *regs, long pgm_int_code, | 635 | static void space_switch_exception(struct pt_regs *regs, long pgm_int_code, |
637 | unsigned long trans_exc_code) | 636 | unsigned long trans_exc_code) |
638 | { | 637 | { |
639 | siginfo_t info; | 638 | siginfo_t info; |
640 | 639 | ||
641 | /* Set user psw back to home space mode. */ | 640 | /* Set user psw back to home space mode. */ |
642 | if (regs->psw.mask & PSW_MASK_PSTATE) | 641 | if (regs->psw.mask & PSW_MASK_PSTATE) |
643 | regs->psw.mask |= PSW_ASC_HOME; | 642 | regs->psw.mask |= PSW_ASC_HOME; |
644 | /* Send SIGILL. */ | 643 | /* Send SIGILL. */ |
645 | info.si_signo = SIGILL; | 644 | info.si_signo = SIGILL; |
646 | info.si_errno = 0; | 645 | info.si_errno = 0; |
647 | info.si_code = ILL_PRVOPC; | 646 | info.si_code = ILL_PRVOPC; |
648 | info.si_addr = get_psw_address(regs, pgm_int_code); | 647 | info.si_addr = get_psw_address(regs, pgm_int_code); |
649 | do_trap(pgm_int_code, SIGILL, "space switch event", regs, &info); | 648 | do_trap(pgm_int_code, SIGILL, "space switch event", regs, &info); |
650 | } | 649 | } |
651 | 650 | ||
652 | asmlinkage void __kprobes kernel_stack_overflow(struct pt_regs * regs) | 651 | asmlinkage void __kprobes kernel_stack_overflow(struct pt_regs * regs) |
653 | { | 652 | { |
654 | bust_spinlocks(1); | 653 | bust_spinlocks(1); |
655 | printk("Kernel stack overflow.\n"); | 654 | printk("Kernel stack overflow.\n"); |
656 | show_regs(regs); | 655 | show_regs(regs); |
657 | bust_spinlocks(0); | 656 | bust_spinlocks(0); |
658 | panic("Corrupt kernel stack, can't continue."); | 657 | panic("Corrupt kernel stack, can't continue."); |
659 | } | 658 | } |
660 | 659 | ||
661 | /* init is done in lowcore.S and head.S */ | 660 | /* init is done in lowcore.S and head.S */ |
662 | 661 | ||
663 | void __init trap_init(void) | 662 | void __init trap_init(void) |
664 | { | 663 | { |
665 | int i; | 664 | int i; |
666 | 665 | ||
667 | for (i = 0; i < 128; i++) | 666 | for (i = 0; i < 128; i++) |
668 | pgm_check_table[i] = &default_trap_handler; | 667 | pgm_check_table[i] = &default_trap_handler; |
669 | pgm_check_table[1] = &illegal_op; | 668 | pgm_check_table[1] = &illegal_op; |
670 | pgm_check_table[2] = &privileged_op; | 669 | pgm_check_table[2] = &privileged_op; |
671 | pgm_check_table[3] = &execute_exception; | 670 | pgm_check_table[3] = &execute_exception; |
672 | pgm_check_table[4] = &do_protection_exception; | 671 | pgm_check_table[4] = &do_protection_exception; |
673 | pgm_check_table[5] = &addressing_exception; | 672 | pgm_check_table[5] = &addressing_exception; |
674 | pgm_check_table[6] = &specification_exception; | 673 | pgm_check_table[6] = &specification_exception; |
675 | pgm_check_table[7] = &data_exception; | 674 | pgm_check_table[7] = &data_exception; |
676 | pgm_check_table[8] = &overflow_exception; | 675 | pgm_check_table[8] = &overflow_exception; |
677 | pgm_check_table[9] = ÷_exception; | 676 | pgm_check_table[9] = ÷_exception; |
678 | pgm_check_table[0x0A] = &overflow_exception; | 677 | pgm_check_table[0x0A] = &overflow_exception; |
679 | pgm_check_table[0x0B] = ÷_exception; | 678 | pgm_check_table[0x0B] = ÷_exception; |
680 | pgm_check_table[0x0C] = &hfp_overflow_exception; | 679 | pgm_check_table[0x0C] = &hfp_overflow_exception; |
681 | pgm_check_table[0x0D] = &hfp_underflow_exception; | 680 | pgm_check_table[0x0D] = &hfp_underflow_exception; |
682 | pgm_check_table[0x0E] = &hfp_significance_exception; | 681 | pgm_check_table[0x0E] = &hfp_significance_exception; |
683 | pgm_check_table[0x0F] = &hfp_divide_exception; | 682 | pgm_check_table[0x0F] = &hfp_divide_exception; |
684 | pgm_check_table[0x10] = &do_dat_exception; | 683 | pgm_check_table[0x10] = &do_dat_exception; |
685 | pgm_check_table[0x11] = &do_dat_exception; | 684 | pgm_check_table[0x11] = &do_dat_exception; |
686 | pgm_check_table[0x12] = &translation_exception; | 685 | pgm_check_table[0x12] = &translation_exception; |
687 | pgm_check_table[0x13] = &special_op_exception; | 686 | pgm_check_table[0x13] = &special_op_exception; |
688 | #ifdef CONFIG_64BIT | 687 | #ifdef CONFIG_64BIT |
689 | pgm_check_table[0x38] = &do_asce_exception; | 688 | pgm_check_table[0x38] = &do_asce_exception; |
690 | pgm_check_table[0x39] = &do_dat_exception; | 689 | pgm_check_table[0x39] = &do_dat_exception; |
691 | pgm_check_table[0x3A] = &do_dat_exception; | 690 | pgm_check_table[0x3A] = &do_dat_exception; |
692 | pgm_check_table[0x3B] = &do_dat_exception; | 691 | pgm_check_table[0x3B] = &do_dat_exception; |
693 | #endif /* CONFIG_64BIT */ | 692 | #endif /* CONFIG_64BIT */ |
694 | pgm_check_table[0x15] = &operand_exception; | 693 | pgm_check_table[0x15] = &operand_exception; |
695 | pgm_check_table[0x1C] = &space_switch_exception; | 694 | pgm_check_table[0x1C] = &space_switch_exception; |
696 | pgm_check_table[0x1D] = &hfp_sqrt_exception; | 695 | pgm_check_table[0x1D] = &hfp_sqrt_exception; |
697 | /* Enable machine checks early. */ | 696 | /* Enable machine checks early. */ |
698 | local_mcck_enable(); | 697 | local_mcck_enable(); |
699 | } | 698 | } |
700 | 699 |
arch/s390/kernel/vtime.c
1 | /* | 1 | /* |
2 | * arch/s390/kernel/vtime.c | 2 | * arch/s390/kernel/vtime.c |
3 | * Virtual cpu timer based timer functions. | 3 | * Virtual cpu timer based timer functions. |
4 | * | 4 | * |
5 | * S390 version | 5 | * S390 version |
6 | * Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation | 6 | * Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation |
7 | * Author(s): Jan Glauber <jan.glauber@de.ibm.com> | 7 | * Author(s): Jan Glauber <jan.glauber@de.ibm.com> |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
12 | #include <linux/time.h> | 12 | #include <linux/time.h> |
13 | #include <linux/delay.h> | 13 | #include <linux/delay.h> |
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/smp.h> | 15 | #include <linux/smp.h> |
16 | #include <linux/types.h> | 16 | #include <linux/types.h> |
17 | #include <linux/timex.h> | 17 | #include <linux/timex.h> |
18 | #include <linux/notifier.h> | 18 | #include <linux/notifier.h> |
19 | #include <linux/kernel_stat.h> | 19 | #include <linux/kernel_stat.h> |
20 | #include <linux/rcupdate.h> | 20 | #include <linux/rcupdate.h> |
21 | #include <linux/posix-timers.h> | 21 | #include <linux/posix-timers.h> |
22 | #include <linux/cpu.h> | 22 | #include <linux/cpu.h> |
23 | #include <linux/kprobes.h> | 23 | #include <linux/kprobes.h> |
24 | 24 | ||
25 | #include <asm/s390_ext.h> | ||
26 | #include <asm/timer.h> | 25 | #include <asm/timer.h> |
27 | #include <asm/irq_regs.h> | 26 | #include <asm/irq_regs.h> |
28 | #include <asm/cputime.h> | 27 | #include <asm/cputime.h> |
28 | #include <asm/irq.h> | ||
29 | 29 | ||
30 | static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer); | 30 | static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer); |
31 | 31 | ||
32 | DEFINE_PER_CPU(struct s390_idle_data, s390_idle); | 32 | DEFINE_PER_CPU(struct s390_idle_data, s390_idle); |
33 | 33 | ||
34 | static inline __u64 get_vtimer(void) | 34 | static inline __u64 get_vtimer(void) |
35 | { | 35 | { |
36 | __u64 timer; | 36 | __u64 timer; |
37 | 37 | ||
38 | asm volatile("STPT %0" : "=m" (timer)); | 38 | asm volatile("STPT %0" : "=m" (timer)); |
39 | return timer; | 39 | return timer; |
40 | } | 40 | } |
41 | 41 | ||
42 | static inline void set_vtimer(__u64 expires) | 42 | static inline void set_vtimer(__u64 expires) |
43 | { | 43 | { |
44 | __u64 timer; | 44 | __u64 timer; |
45 | 45 | ||
46 | asm volatile (" STPT %0\n" /* Store current cpu timer value */ | 46 | asm volatile (" STPT %0\n" /* Store current cpu timer value */ |
47 | " SPT %1" /* Set new value immediately afterwards */ | 47 | " SPT %1" /* Set new value immediately afterwards */ |
48 | : "=m" (timer) : "m" (expires) ); | 48 | : "=m" (timer) : "m" (expires) ); |
49 | S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer; | 49 | S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer; |
50 | S390_lowcore.last_update_timer = expires; | 50 | S390_lowcore.last_update_timer = expires; |
51 | } | 51 | } |
52 | 52 | ||
53 | /* | 53 | /* |
54 | * Update process times based on virtual cpu times stored by entry.S | 54 | * Update process times based on virtual cpu times stored by entry.S |
55 | * to the lowcore fields user_timer, system_timer & steal_clock. | 55 | * to the lowcore fields user_timer, system_timer & steal_clock. |
56 | */ | 56 | */ |
57 | static void do_account_vtime(struct task_struct *tsk, int hardirq_offset) | 57 | static void do_account_vtime(struct task_struct *tsk, int hardirq_offset) |
58 | { | 58 | { |
59 | struct thread_info *ti = task_thread_info(tsk); | 59 | struct thread_info *ti = task_thread_info(tsk); |
60 | __u64 timer, clock, user, system, steal; | 60 | __u64 timer, clock, user, system, steal; |
61 | 61 | ||
62 | timer = S390_lowcore.last_update_timer; | 62 | timer = S390_lowcore.last_update_timer; |
63 | clock = S390_lowcore.last_update_clock; | 63 | clock = S390_lowcore.last_update_clock; |
64 | asm volatile (" STPT %0\n" /* Store current cpu timer value */ | 64 | asm volatile (" STPT %0\n" /* Store current cpu timer value */ |
65 | " STCK %1" /* Store current tod clock value */ | 65 | " STCK %1" /* Store current tod clock value */ |
66 | : "=m" (S390_lowcore.last_update_timer), | 66 | : "=m" (S390_lowcore.last_update_timer), |
67 | "=m" (S390_lowcore.last_update_clock) ); | 67 | "=m" (S390_lowcore.last_update_clock) ); |
68 | S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; | 68 | S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; |
69 | S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock; | 69 | S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock; |
70 | 70 | ||
71 | user = S390_lowcore.user_timer - ti->user_timer; | 71 | user = S390_lowcore.user_timer - ti->user_timer; |
72 | S390_lowcore.steal_timer -= user; | 72 | S390_lowcore.steal_timer -= user; |
73 | ti->user_timer = S390_lowcore.user_timer; | 73 | ti->user_timer = S390_lowcore.user_timer; |
74 | account_user_time(tsk, user, user); | 74 | account_user_time(tsk, user, user); |
75 | 75 | ||
76 | system = S390_lowcore.system_timer - ti->system_timer; | 76 | system = S390_lowcore.system_timer - ti->system_timer; |
77 | S390_lowcore.steal_timer -= system; | 77 | S390_lowcore.steal_timer -= system; |
78 | ti->system_timer = S390_lowcore.system_timer; | 78 | ti->system_timer = S390_lowcore.system_timer; |
79 | account_system_time(tsk, hardirq_offset, system, system); | 79 | account_system_time(tsk, hardirq_offset, system, system); |
80 | 80 | ||
81 | steal = S390_lowcore.steal_timer; | 81 | steal = S390_lowcore.steal_timer; |
82 | if ((s64) steal > 0) { | 82 | if ((s64) steal > 0) { |
83 | S390_lowcore.steal_timer = 0; | 83 | S390_lowcore.steal_timer = 0; |
84 | account_steal_time(steal); | 84 | account_steal_time(steal); |
85 | } | 85 | } |
86 | } | 86 | } |
87 | 87 | ||
88 | void account_vtime(struct task_struct *prev, struct task_struct *next) | 88 | void account_vtime(struct task_struct *prev, struct task_struct *next) |
89 | { | 89 | { |
90 | struct thread_info *ti; | 90 | struct thread_info *ti; |
91 | 91 | ||
92 | do_account_vtime(prev, 0); | 92 | do_account_vtime(prev, 0); |
93 | ti = task_thread_info(prev); | 93 | ti = task_thread_info(prev); |
94 | ti->user_timer = S390_lowcore.user_timer; | 94 | ti->user_timer = S390_lowcore.user_timer; |
95 | ti->system_timer = S390_lowcore.system_timer; | 95 | ti->system_timer = S390_lowcore.system_timer; |
96 | ti = task_thread_info(next); | 96 | ti = task_thread_info(next); |
97 | S390_lowcore.user_timer = ti->user_timer; | 97 | S390_lowcore.user_timer = ti->user_timer; |
98 | S390_lowcore.system_timer = ti->system_timer; | 98 | S390_lowcore.system_timer = ti->system_timer; |
99 | } | 99 | } |
100 | 100 | ||
101 | void account_process_tick(struct task_struct *tsk, int user_tick) | 101 | void account_process_tick(struct task_struct *tsk, int user_tick) |
102 | { | 102 | { |
103 | do_account_vtime(tsk, HARDIRQ_OFFSET); | 103 | do_account_vtime(tsk, HARDIRQ_OFFSET); |
104 | } | 104 | } |
105 | 105 | ||
106 | /* | 106 | /* |
107 | * Update process times based on virtual cpu times stored by entry.S | 107 | * Update process times based on virtual cpu times stored by entry.S |
108 | * to the lowcore fields user_timer, system_timer & steal_clock. | 108 | * to the lowcore fields user_timer, system_timer & steal_clock. |
109 | */ | 109 | */ |
110 | void account_system_vtime(struct task_struct *tsk) | 110 | void account_system_vtime(struct task_struct *tsk) |
111 | { | 111 | { |
112 | struct thread_info *ti = task_thread_info(tsk); | 112 | struct thread_info *ti = task_thread_info(tsk); |
113 | __u64 timer, system; | 113 | __u64 timer, system; |
114 | 114 | ||
115 | timer = S390_lowcore.last_update_timer; | 115 | timer = S390_lowcore.last_update_timer; |
116 | S390_lowcore.last_update_timer = get_vtimer(); | 116 | S390_lowcore.last_update_timer = get_vtimer(); |
117 | S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; | 117 | S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; |
118 | 118 | ||
119 | system = S390_lowcore.system_timer - ti->system_timer; | 119 | system = S390_lowcore.system_timer - ti->system_timer; |
120 | S390_lowcore.steal_timer -= system; | 120 | S390_lowcore.steal_timer -= system; |
121 | ti->system_timer = S390_lowcore.system_timer; | 121 | ti->system_timer = S390_lowcore.system_timer; |
122 | account_system_time(tsk, 0, system, system); | 122 | account_system_time(tsk, 0, system, system); |
123 | } | 123 | } |
124 | EXPORT_SYMBOL_GPL(account_system_vtime); | 124 | EXPORT_SYMBOL_GPL(account_system_vtime); |
125 | 125 | ||
126 | void __kprobes vtime_start_cpu(__u64 int_clock, __u64 enter_timer) | 126 | void __kprobes vtime_start_cpu(__u64 int_clock, __u64 enter_timer) |
127 | { | 127 | { |
128 | struct s390_idle_data *idle = &__get_cpu_var(s390_idle); | 128 | struct s390_idle_data *idle = &__get_cpu_var(s390_idle); |
129 | struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer); | 129 | struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer); |
130 | __u64 idle_time, expires; | 130 | __u64 idle_time, expires; |
131 | 131 | ||
132 | if (idle->idle_enter == 0ULL) | 132 | if (idle->idle_enter == 0ULL) |
133 | return; | 133 | return; |
134 | 134 | ||
135 | /* Account time spent with enabled wait psw loaded as idle time. */ | 135 | /* Account time spent with enabled wait psw loaded as idle time. */ |
136 | idle_time = int_clock - idle->idle_enter; | 136 | idle_time = int_clock - idle->idle_enter; |
137 | account_idle_time(idle_time); | 137 | account_idle_time(idle_time); |
138 | S390_lowcore.steal_timer += | 138 | S390_lowcore.steal_timer += |
139 | idle->idle_enter - S390_lowcore.last_update_clock; | 139 | idle->idle_enter - S390_lowcore.last_update_clock; |
140 | S390_lowcore.last_update_clock = int_clock; | 140 | S390_lowcore.last_update_clock = int_clock; |
141 | 141 | ||
142 | /* Account system time spent going idle. */ | 142 | /* Account system time spent going idle. */ |
143 | S390_lowcore.system_timer += S390_lowcore.last_update_timer - vq->idle; | 143 | S390_lowcore.system_timer += S390_lowcore.last_update_timer - vq->idle; |
144 | S390_lowcore.last_update_timer = enter_timer; | 144 | S390_lowcore.last_update_timer = enter_timer; |
145 | 145 | ||
146 | /* Restart vtime CPU timer */ | 146 | /* Restart vtime CPU timer */ |
147 | if (vq->do_spt) { | 147 | if (vq->do_spt) { |
148 | /* Program old expire value but first save progress. */ | 148 | /* Program old expire value but first save progress. */ |
149 | expires = vq->idle - enter_timer; | 149 | expires = vq->idle - enter_timer; |
150 | expires += get_vtimer(); | 150 | expires += get_vtimer(); |
151 | set_vtimer(expires); | 151 | set_vtimer(expires); |
152 | } else { | 152 | } else { |
153 | /* Don't account the CPU timer delta while the cpu was idle. */ | 153 | /* Don't account the CPU timer delta while the cpu was idle. */ |
154 | vq->elapsed -= vq->idle - enter_timer; | 154 | vq->elapsed -= vq->idle - enter_timer; |
155 | } | 155 | } |
156 | 156 | ||
157 | idle->sequence++; | 157 | idle->sequence++; |
158 | smp_wmb(); | 158 | smp_wmb(); |
159 | idle->idle_time += idle_time; | 159 | idle->idle_time += idle_time; |
160 | idle->idle_enter = 0ULL; | 160 | idle->idle_enter = 0ULL; |
161 | idle->idle_count++; | 161 | idle->idle_count++; |
162 | smp_wmb(); | 162 | smp_wmb(); |
163 | idle->sequence++; | 163 | idle->sequence++; |
164 | } | 164 | } |
165 | 165 | ||
166 | void __kprobes vtime_stop_cpu(void) | 166 | void __kprobes vtime_stop_cpu(void) |
167 | { | 167 | { |
168 | struct s390_idle_data *idle = &__get_cpu_var(s390_idle); | 168 | struct s390_idle_data *idle = &__get_cpu_var(s390_idle); |
169 | struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer); | 169 | struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer); |
170 | psw_t psw; | 170 | psw_t psw; |
171 | 171 | ||
172 | /* Wait for external, I/O or machine check interrupt. */ | 172 | /* Wait for external, I/O or machine check interrupt. */ |
173 | psw.mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_IO | PSW_MASK_EXT; | 173 | psw.mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_IO | PSW_MASK_EXT; |
174 | 174 | ||
175 | idle->nohz_delay = 0; | 175 | idle->nohz_delay = 0; |
176 | 176 | ||
177 | /* Check if the CPU timer needs to be reprogrammed. */ | 177 | /* Check if the CPU timer needs to be reprogrammed. */ |
178 | if (vq->do_spt) { | 178 | if (vq->do_spt) { |
179 | __u64 vmax = VTIMER_MAX_SLICE; | 179 | __u64 vmax = VTIMER_MAX_SLICE; |
180 | /* | 180 | /* |
181 | * The inline assembly is equivalent to | 181 | * The inline assembly is equivalent to |
182 | * vq->idle = get_cpu_timer(); | 182 | * vq->idle = get_cpu_timer(); |
183 | * set_cpu_timer(VTIMER_MAX_SLICE); | 183 | * set_cpu_timer(VTIMER_MAX_SLICE); |
184 | * idle->idle_enter = get_clock(); | 184 | * idle->idle_enter = get_clock(); |
185 | * __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | | 185 | * __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | |
186 | * PSW_MASK_IO | PSW_MASK_EXT); | 186 | * PSW_MASK_IO | PSW_MASK_EXT); |
187 | * The difference is that the inline assembly makes sure that | 187 | * The difference is that the inline assembly makes sure that |
188 | * the last three instruction are stpt, stck and lpsw in that | 188 | * the last three instruction are stpt, stck and lpsw in that |
189 | * order. This is done to increase the precision. | 189 | * order. This is done to increase the precision. |
190 | */ | 190 | */ |
191 | asm volatile( | 191 | asm volatile( |
192 | #ifndef CONFIG_64BIT | 192 | #ifndef CONFIG_64BIT |
193 | " basr 1,0\n" | 193 | " basr 1,0\n" |
194 | "0: ahi 1,1f-0b\n" | 194 | "0: ahi 1,1f-0b\n" |
195 | " st 1,4(%2)\n" | 195 | " st 1,4(%2)\n" |
196 | #else /* CONFIG_64BIT */ | 196 | #else /* CONFIG_64BIT */ |
197 | " larl 1,1f\n" | 197 | " larl 1,1f\n" |
198 | " stg 1,8(%2)\n" | 198 | " stg 1,8(%2)\n" |
199 | #endif /* CONFIG_64BIT */ | 199 | #endif /* CONFIG_64BIT */ |
200 | " stpt 0(%4)\n" | 200 | " stpt 0(%4)\n" |
201 | " spt 0(%5)\n" | 201 | " spt 0(%5)\n" |
202 | " stck 0(%3)\n" | 202 | " stck 0(%3)\n" |
203 | #ifndef CONFIG_64BIT | 203 | #ifndef CONFIG_64BIT |
204 | " lpsw 0(%2)\n" | 204 | " lpsw 0(%2)\n" |
205 | #else /* CONFIG_64BIT */ | 205 | #else /* CONFIG_64BIT */ |
206 | " lpswe 0(%2)\n" | 206 | " lpswe 0(%2)\n" |
207 | #endif /* CONFIG_64BIT */ | 207 | #endif /* CONFIG_64BIT */ |
208 | "1:" | 208 | "1:" |
209 | : "=m" (idle->idle_enter), "=m" (vq->idle) | 209 | : "=m" (idle->idle_enter), "=m" (vq->idle) |
210 | : "a" (&psw), "a" (&idle->idle_enter), | 210 | : "a" (&psw), "a" (&idle->idle_enter), |
211 | "a" (&vq->idle), "a" (&vmax), "m" (vmax), "m" (psw) | 211 | "a" (&vq->idle), "a" (&vmax), "m" (vmax), "m" (psw) |
212 | : "memory", "cc", "1"); | 212 | : "memory", "cc", "1"); |
213 | } else { | 213 | } else { |
214 | /* | 214 | /* |
215 | * The inline assembly is equivalent to | 215 | * The inline assembly is equivalent to |
216 | * vq->idle = get_cpu_timer(); | 216 | * vq->idle = get_cpu_timer(); |
217 | * idle->idle_enter = get_clock(); | 217 | * idle->idle_enter = get_clock(); |
218 | * __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | | 218 | * __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | |
219 | * PSW_MASK_IO | PSW_MASK_EXT); | 219 | * PSW_MASK_IO | PSW_MASK_EXT); |
220 | * The difference is that the inline assembly makes sure that | 220 | * The difference is that the inline assembly makes sure that |
221 | * the last three instruction are stpt, stck and lpsw in that | 221 | * the last three instruction are stpt, stck and lpsw in that |
222 | * order. This is done to increase the precision. | 222 | * order. This is done to increase the precision. |
223 | */ | 223 | */ |
224 | asm volatile( | 224 | asm volatile( |
225 | #ifndef CONFIG_64BIT | 225 | #ifndef CONFIG_64BIT |
226 | " basr 1,0\n" | 226 | " basr 1,0\n" |
227 | "0: ahi 1,1f-0b\n" | 227 | "0: ahi 1,1f-0b\n" |
228 | " st 1,4(%2)\n" | 228 | " st 1,4(%2)\n" |
229 | #else /* CONFIG_64BIT */ | 229 | #else /* CONFIG_64BIT */ |
230 | " larl 1,1f\n" | 230 | " larl 1,1f\n" |
231 | " stg 1,8(%2)\n" | 231 | " stg 1,8(%2)\n" |
232 | #endif /* CONFIG_64BIT */ | 232 | #endif /* CONFIG_64BIT */ |
233 | " stpt 0(%4)\n" | 233 | " stpt 0(%4)\n" |
234 | " stck 0(%3)\n" | 234 | " stck 0(%3)\n" |
235 | #ifndef CONFIG_64BIT | 235 | #ifndef CONFIG_64BIT |
236 | " lpsw 0(%2)\n" | 236 | " lpsw 0(%2)\n" |
237 | #else /* CONFIG_64BIT */ | 237 | #else /* CONFIG_64BIT */ |
238 | " lpswe 0(%2)\n" | 238 | " lpswe 0(%2)\n" |
239 | #endif /* CONFIG_64BIT */ | 239 | #endif /* CONFIG_64BIT */ |
240 | "1:" | 240 | "1:" |
241 | : "=m" (idle->idle_enter), "=m" (vq->idle) | 241 | : "=m" (idle->idle_enter), "=m" (vq->idle) |
242 | : "a" (&psw), "a" (&idle->idle_enter), | 242 | : "a" (&psw), "a" (&idle->idle_enter), |
243 | "a" (&vq->idle), "m" (psw) | 243 | "a" (&vq->idle), "m" (psw) |
244 | : "memory", "cc", "1"); | 244 | : "memory", "cc", "1"); |
245 | } | 245 | } |
246 | } | 246 | } |
247 | 247 | ||
248 | cputime64_t s390_get_idle_time(int cpu) | 248 | cputime64_t s390_get_idle_time(int cpu) |
249 | { | 249 | { |
250 | struct s390_idle_data *idle; | 250 | struct s390_idle_data *idle; |
251 | unsigned long long now, idle_time, idle_enter; | 251 | unsigned long long now, idle_time, idle_enter; |
252 | unsigned int sequence; | 252 | unsigned int sequence; |
253 | 253 | ||
254 | idle = &per_cpu(s390_idle, cpu); | 254 | idle = &per_cpu(s390_idle, cpu); |
255 | 255 | ||
256 | now = get_clock(); | 256 | now = get_clock(); |
257 | repeat: | 257 | repeat: |
258 | sequence = idle->sequence; | 258 | sequence = idle->sequence; |
259 | smp_rmb(); | 259 | smp_rmb(); |
260 | if (sequence & 1) | 260 | if (sequence & 1) |
261 | goto repeat; | 261 | goto repeat; |
262 | idle_time = 0; | 262 | idle_time = 0; |
263 | idle_enter = idle->idle_enter; | 263 | idle_enter = idle->idle_enter; |
264 | if (idle_enter != 0ULL && idle_enter < now) | 264 | if (idle_enter != 0ULL && idle_enter < now) |
265 | idle_time = now - idle_enter; | 265 | idle_time = now - idle_enter; |
266 | smp_rmb(); | 266 | smp_rmb(); |
267 | if (idle->sequence != sequence) | 267 | if (idle->sequence != sequence) |
268 | goto repeat; | 268 | goto repeat; |
269 | return idle_time; | 269 | return idle_time; |
270 | } | 270 | } |
271 | 271 | ||
272 | /* | 272 | /* |
273 | * Sorted add to a list. List is linear searched until first bigger | 273 | * Sorted add to a list. List is linear searched until first bigger |
274 | * element is found. | 274 | * element is found. |
275 | */ | 275 | */ |
276 | static void list_add_sorted(struct vtimer_list *timer, struct list_head *head) | 276 | static void list_add_sorted(struct vtimer_list *timer, struct list_head *head) |
277 | { | 277 | { |
278 | struct vtimer_list *event; | 278 | struct vtimer_list *event; |
279 | 279 | ||
280 | list_for_each_entry(event, head, entry) { | 280 | list_for_each_entry(event, head, entry) { |
281 | if (event->expires > timer->expires) { | 281 | if (event->expires > timer->expires) { |
282 | list_add_tail(&timer->entry, &event->entry); | 282 | list_add_tail(&timer->entry, &event->entry); |
283 | return; | 283 | return; |
284 | } | 284 | } |
285 | } | 285 | } |
286 | list_add_tail(&timer->entry, head); | 286 | list_add_tail(&timer->entry, head); |
287 | } | 287 | } |
288 | 288 | ||
289 | /* | 289 | /* |
290 | * Do the callback functions of expired vtimer events. | 290 | * Do the callback functions of expired vtimer events. |
291 | * Called from within the interrupt handler. | 291 | * Called from within the interrupt handler. |
292 | */ | 292 | */ |
293 | static void do_callbacks(struct list_head *cb_list) | 293 | static void do_callbacks(struct list_head *cb_list) |
294 | { | 294 | { |
295 | struct vtimer_queue *vq; | 295 | struct vtimer_queue *vq; |
296 | struct vtimer_list *event, *tmp; | 296 | struct vtimer_list *event, *tmp; |
297 | 297 | ||
298 | if (list_empty(cb_list)) | 298 | if (list_empty(cb_list)) |
299 | return; | 299 | return; |
300 | 300 | ||
301 | vq = &__get_cpu_var(virt_cpu_timer); | 301 | vq = &__get_cpu_var(virt_cpu_timer); |
302 | 302 | ||
303 | list_for_each_entry_safe(event, tmp, cb_list, entry) { | 303 | list_for_each_entry_safe(event, tmp, cb_list, entry) { |
304 | list_del_init(&event->entry); | 304 | list_del_init(&event->entry); |
305 | (event->function)(event->data); | 305 | (event->function)(event->data); |
306 | if (event->interval) { | 306 | if (event->interval) { |
307 | /* Recharge interval timer */ | 307 | /* Recharge interval timer */ |
308 | event->expires = event->interval + vq->elapsed; | 308 | event->expires = event->interval + vq->elapsed; |
309 | spin_lock(&vq->lock); | 309 | spin_lock(&vq->lock); |
310 | list_add_sorted(event, &vq->list); | 310 | list_add_sorted(event, &vq->list); |
311 | spin_unlock(&vq->lock); | 311 | spin_unlock(&vq->lock); |
312 | } | 312 | } |
313 | } | 313 | } |
314 | } | 314 | } |
315 | 315 | ||
316 | /* | 316 | /* |
317 | * Handler for the virtual CPU timer. | 317 | * Handler for the virtual CPU timer. |
318 | */ | 318 | */ |
319 | static void do_cpu_timer_interrupt(unsigned int ext_int_code, | 319 | static void do_cpu_timer_interrupt(unsigned int ext_int_code, |
320 | unsigned int param32, unsigned long param64) | 320 | unsigned int param32, unsigned long param64) |
321 | { | 321 | { |
322 | struct vtimer_queue *vq; | 322 | struct vtimer_queue *vq; |
323 | struct vtimer_list *event, *tmp; | 323 | struct vtimer_list *event, *tmp; |
324 | struct list_head cb_list; /* the callback queue */ | 324 | struct list_head cb_list; /* the callback queue */ |
325 | __u64 elapsed, next; | 325 | __u64 elapsed, next; |
326 | 326 | ||
327 | kstat_cpu(smp_processor_id()).irqs[EXTINT_TMR]++; | 327 | kstat_cpu(smp_processor_id()).irqs[EXTINT_TMR]++; |
328 | INIT_LIST_HEAD(&cb_list); | 328 | INIT_LIST_HEAD(&cb_list); |
329 | vq = &__get_cpu_var(virt_cpu_timer); | 329 | vq = &__get_cpu_var(virt_cpu_timer); |
330 | 330 | ||
331 | /* walk timer list, fire all expired events */ | 331 | /* walk timer list, fire all expired events */ |
332 | spin_lock(&vq->lock); | 332 | spin_lock(&vq->lock); |
333 | 333 | ||
334 | elapsed = vq->elapsed + (vq->timer - S390_lowcore.async_enter_timer); | 334 | elapsed = vq->elapsed + (vq->timer - S390_lowcore.async_enter_timer); |
335 | BUG_ON((s64) elapsed < 0); | 335 | BUG_ON((s64) elapsed < 0); |
336 | vq->elapsed = 0; | 336 | vq->elapsed = 0; |
337 | list_for_each_entry_safe(event, tmp, &vq->list, entry) { | 337 | list_for_each_entry_safe(event, tmp, &vq->list, entry) { |
338 | if (event->expires < elapsed) | 338 | if (event->expires < elapsed) |
339 | /* move expired timer to the callback queue */ | 339 | /* move expired timer to the callback queue */ |
340 | list_move_tail(&event->entry, &cb_list); | 340 | list_move_tail(&event->entry, &cb_list); |
341 | else | 341 | else |
342 | event->expires -= elapsed; | 342 | event->expires -= elapsed; |
343 | } | 343 | } |
344 | spin_unlock(&vq->lock); | 344 | spin_unlock(&vq->lock); |
345 | 345 | ||
346 | vq->do_spt = list_empty(&cb_list); | 346 | vq->do_spt = list_empty(&cb_list); |
347 | do_callbacks(&cb_list); | 347 | do_callbacks(&cb_list); |
348 | 348 | ||
349 | /* next event is first in list */ | 349 | /* next event is first in list */ |
350 | next = VTIMER_MAX_SLICE; | 350 | next = VTIMER_MAX_SLICE; |
351 | spin_lock(&vq->lock); | 351 | spin_lock(&vq->lock); |
352 | if (!list_empty(&vq->list)) { | 352 | if (!list_empty(&vq->list)) { |
353 | event = list_first_entry(&vq->list, struct vtimer_list, entry); | 353 | event = list_first_entry(&vq->list, struct vtimer_list, entry); |
354 | next = event->expires; | 354 | next = event->expires; |
355 | } else | 355 | } else |
356 | vq->do_spt = 0; | 356 | vq->do_spt = 0; |
357 | spin_unlock(&vq->lock); | 357 | spin_unlock(&vq->lock); |
358 | /* | 358 | /* |
359 | * To improve precision add the time spent by the | 359 | * To improve precision add the time spent by the |
360 | * interrupt handler to the elapsed time. | 360 | * interrupt handler to the elapsed time. |
361 | * Note: CPU timer counts down and we got an interrupt, | 361 | * Note: CPU timer counts down and we got an interrupt, |
362 | * the current content is negative | 362 | * the current content is negative |
363 | */ | 363 | */ |
364 | elapsed = S390_lowcore.async_enter_timer - get_vtimer(); | 364 | elapsed = S390_lowcore.async_enter_timer - get_vtimer(); |
365 | set_vtimer(next - elapsed); | 365 | set_vtimer(next - elapsed); |
366 | vq->timer = next - elapsed; | 366 | vq->timer = next - elapsed; |
367 | vq->elapsed = elapsed; | 367 | vq->elapsed = elapsed; |
368 | } | 368 | } |
369 | 369 | ||
370 | void init_virt_timer(struct vtimer_list *timer) | 370 | void init_virt_timer(struct vtimer_list *timer) |
371 | { | 371 | { |
372 | timer->function = NULL; | 372 | timer->function = NULL; |
373 | INIT_LIST_HEAD(&timer->entry); | 373 | INIT_LIST_HEAD(&timer->entry); |
374 | } | 374 | } |
375 | EXPORT_SYMBOL(init_virt_timer); | 375 | EXPORT_SYMBOL(init_virt_timer); |
376 | 376 | ||
377 | static inline int vtimer_pending(struct vtimer_list *timer) | 377 | static inline int vtimer_pending(struct vtimer_list *timer) |
378 | { | 378 | { |
379 | return (!list_empty(&timer->entry)); | 379 | return (!list_empty(&timer->entry)); |
380 | } | 380 | } |
381 | 381 | ||
382 | /* | 382 | /* |
383 | * this function should only run on the specified CPU | 383 | * this function should only run on the specified CPU |
384 | */ | 384 | */ |
385 | static void internal_add_vtimer(struct vtimer_list *timer) | 385 | static void internal_add_vtimer(struct vtimer_list *timer) |
386 | { | 386 | { |
387 | struct vtimer_queue *vq; | 387 | struct vtimer_queue *vq; |
388 | unsigned long flags; | 388 | unsigned long flags; |
389 | __u64 left, expires; | 389 | __u64 left, expires; |
390 | 390 | ||
391 | vq = &per_cpu(virt_cpu_timer, timer->cpu); | 391 | vq = &per_cpu(virt_cpu_timer, timer->cpu); |
392 | spin_lock_irqsave(&vq->lock, flags); | 392 | spin_lock_irqsave(&vq->lock, flags); |
393 | 393 | ||
394 | BUG_ON(timer->cpu != smp_processor_id()); | 394 | BUG_ON(timer->cpu != smp_processor_id()); |
395 | 395 | ||
396 | if (list_empty(&vq->list)) { | 396 | if (list_empty(&vq->list)) { |
397 | /* First timer on this cpu, just program it. */ | 397 | /* First timer on this cpu, just program it. */ |
398 | list_add(&timer->entry, &vq->list); | 398 | list_add(&timer->entry, &vq->list); |
399 | set_vtimer(timer->expires); | 399 | set_vtimer(timer->expires); |
400 | vq->timer = timer->expires; | 400 | vq->timer = timer->expires; |
401 | vq->elapsed = 0; | 401 | vq->elapsed = 0; |
402 | } else { | 402 | } else { |
403 | /* Check progress of old timers. */ | 403 | /* Check progress of old timers. */ |
404 | expires = timer->expires; | 404 | expires = timer->expires; |
405 | left = get_vtimer(); | 405 | left = get_vtimer(); |
406 | if (likely((s64) expires < (s64) left)) { | 406 | if (likely((s64) expires < (s64) left)) { |
407 | /* The new timer expires before the current timer. */ | 407 | /* The new timer expires before the current timer. */ |
408 | set_vtimer(expires); | 408 | set_vtimer(expires); |
409 | vq->elapsed += vq->timer - left; | 409 | vq->elapsed += vq->timer - left; |
410 | vq->timer = expires; | 410 | vq->timer = expires; |
411 | } else { | 411 | } else { |
412 | vq->elapsed += vq->timer - left; | 412 | vq->elapsed += vq->timer - left; |
413 | vq->timer = left; | 413 | vq->timer = left; |
414 | } | 414 | } |
415 | /* Insert new timer into per cpu list. */ | 415 | /* Insert new timer into per cpu list. */ |
416 | timer->expires += vq->elapsed; | 416 | timer->expires += vq->elapsed; |
417 | list_add_sorted(timer, &vq->list); | 417 | list_add_sorted(timer, &vq->list); |
418 | } | 418 | } |
419 | 419 | ||
420 | spin_unlock_irqrestore(&vq->lock, flags); | 420 | spin_unlock_irqrestore(&vq->lock, flags); |
421 | /* release CPU acquired in prepare_vtimer or mod_virt_timer() */ | 421 | /* release CPU acquired in prepare_vtimer or mod_virt_timer() */ |
422 | put_cpu(); | 422 | put_cpu(); |
423 | } | 423 | } |
424 | 424 | ||
425 | static inline void prepare_vtimer(struct vtimer_list *timer) | 425 | static inline void prepare_vtimer(struct vtimer_list *timer) |
426 | { | 426 | { |
427 | BUG_ON(!timer->function); | 427 | BUG_ON(!timer->function); |
428 | BUG_ON(!timer->expires || timer->expires > VTIMER_MAX_SLICE); | 428 | BUG_ON(!timer->expires || timer->expires > VTIMER_MAX_SLICE); |
429 | BUG_ON(vtimer_pending(timer)); | 429 | BUG_ON(vtimer_pending(timer)); |
430 | timer->cpu = get_cpu(); | 430 | timer->cpu = get_cpu(); |
431 | } | 431 | } |
432 | 432 | ||
433 | /* | 433 | /* |
434 | * add_virt_timer - add an oneshot virtual CPU timer | 434 | * add_virt_timer - add an oneshot virtual CPU timer |
435 | */ | 435 | */ |
436 | void add_virt_timer(void *new) | 436 | void add_virt_timer(void *new) |
437 | { | 437 | { |
438 | struct vtimer_list *timer; | 438 | struct vtimer_list *timer; |
439 | 439 | ||
440 | timer = (struct vtimer_list *)new; | 440 | timer = (struct vtimer_list *)new; |
441 | prepare_vtimer(timer); | 441 | prepare_vtimer(timer); |
442 | timer->interval = 0; | 442 | timer->interval = 0; |
443 | internal_add_vtimer(timer); | 443 | internal_add_vtimer(timer); |
444 | } | 444 | } |
445 | EXPORT_SYMBOL(add_virt_timer); | 445 | EXPORT_SYMBOL(add_virt_timer); |
446 | 446 | ||
447 | /* | 447 | /* |
448 | * add_virt_timer_int - add an interval virtual CPU timer | 448 | * add_virt_timer_int - add an interval virtual CPU timer |
449 | */ | 449 | */ |
450 | void add_virt_timer_periodic(void *new) | 450 | void add_virt_timer_periodic(void *new) |
451 | { | 451 | { |
452 | struct vtimer_list *timer; | 452 | struct vtimer_list *timer; |
453 | 453 | ||
454 | timer = (struct vtimer_list *)new; | 454 | timer = (struct vtimer_list *)new; |
455 | prepare_vtimer(timer); | 455 | prepare_vtimer(timer); |
456 | timer->interval = timer->expires; | 456 | timer->interval = timer->expires; |
457 | internal_add_vtimer(timer); | 457 | internal_add_vtimer(timer); |
458 | } | 458 | } |
459 | EXPORT_SYMBOL(add_virt_timer_periodic); | 459 | EXPORT_SYMBOL(add_virt_timer_periodic); |
460 | 460 | ||
461 | int __mod_vtimer(struct vtimer_list *timer, __u64 expires, int periodic) | 461 | int __mod_vtimer(struct vtimer_list *timer, __u64 expires, int periodic) |
462 | { | 462 | { |
463 | struct vtimer_queue *vq; | 463 | struct vtimer_queue *vq; |
464 | unsigned long flags; | 464 | unsigned long flags; |
465 | int cpu; | 465 | int cpu; |
466 | 466 | ||
467 | BUG_ON(!timer->function); | 467 | BUG_ON(!timer->function); |
468 | BUG_ON(!expires || expires > VTIMER_MAX_SLICE); | 468 | BUG_ON(!expires || expires > VTIMER_MAX_SLICE); |
469 | 469 | ||
470 | if (timer->expires == expires && vtimer_pending(timer)) | 470 | if (timer->expires == expires && vtimer_pending(timer)) |
471 | return 1; | 471 | return 1; |
472 | 472 | ||
473 | cpu = get_cpu(); | 473 | cpu = get_cpu(); |
474 | vq = &per_cpu(virt_cpu_timer, cpu); | 474 | vq = &per_cpu(virt_cpu_timer, cpu); |
475 | 475 | ||
476 | /* disable interrupts before test if timer is pending */ | 476 | /* disable interrupts before test if timer is pending */ |
477 | spin_lock_irqsave(&vq->lock, flags); | 477 | spin_lock_irqsave(&vq->lock, flags); |
478 | 478 | ||
479 | /* if timer isn't pending add it on the current CPU */ | 479 | /* if timer isn't pending add it on the current CPU */ |
480 | if (!vtimer_pending(timer)) { | 480 | if (!vtimer_pending(timer)) { |
481 | spin_unlock_irqrestore(&vq->lock, flags); | 481 | spin_unlock_irqrestore(&vq->lock, flags); |
482 | 482 | ||
483 | if (periodic) | 483 | if (periodic) |
484 | timer->interval = expires; | 484 | timer->interval = expires; |
485 | else | 485 | else |
486 | timer->interval = 0; | 486 | timer->interval = 0; |
487 | timer->expires = expires; | 487 | timer->expires = expires; |
488 | timer->cpu = cpu; | 488 | timer->cpu = cpu; |
489 | internal_add_vtimer(timer); | 489 | internal_add_vtimer(timer); |
490 | return 0; | 490 | return 0; |
491 | } | 491 | } |
492 | 492 | ||
493 | /* check if we run on the right CPU */ | 493 | /* check if we run on the right CPU */ |
494 | BUG_ON(timer->cpu != cpu); | 494 | BUG_ON(timer->cpu != cpu); |
495 | 495 | ||
496 | list_del_init(&timer->entry); | 496 | list_del_init(&timer->entry); |
497 | timer->expires = expires; | 497 | timer->expires = expires; |
498 | if (periodic) | 498 | if (periodic) |
499 | timer->interval = expires; | 499 | timer->interval = expires; |
500 | 500 | ||
501 | /* the timer can't expire anymore so we can release the lock */ | 501 | /* the timer can't expire anymore so we can release the lock */ |
502 | spin_unlock_irqrestore(&vq->lock, flags); | 502 | spin_unlock_irqrestore(&vq->lock, flags); |
503 | internal_add_vtimer(timer); | 503 | internal_add_vtimer(timer); |
504 | return 1; | 504 | return 1; |
505 | } | 505 | } |
506 | 506 | ||
507 | /* | 507 | /* |
508 | * If we change a pending timer the function must be called on the CPU | 508 | * If we change a pending timer the function must be called on the CPU |
509 | * where the timer is running on. | 509 | * where the timer is running on. |
510 | * | 510 | * |
511 | * returns whether it has modified a pending timer (1) or not (0) | 511 | * returns whether it has modified a pending timer (1) or not (0) |
512 | */ | 512 | */ |
513 | int mod_virt_timer(struct vtimer_list *timer, __u64 expires) | 513 | int mod_virt_timer(struct vtimer_list *timer, __u64 expires) |
514 | { | 514 | { |
515 | return __mod_vtimer(timer, expires, 0); | 515 | return __mod_vtimer(timer, expires, 0); |
516 | } | 516 | } |
517 | EXPORT_SYMBOL(mod_virt_timer); | 517 | EXPORT_SYMBOL(mod_virt_timer); |
518 | 518 | ||
519 | /* | 519 | /* |
520 | * If we change a pending timer the function must be called on the CPU | 520 | * If we change a pending timer the function must be called on the CPU |
521 | * where the timer is running on. | 521 | * where the timer is running on. |
522 | * | 522 | * |
523 | * returns whether it has modified a pending timer (1) or not (0) | 523 | * returns whether it has modified a pending timer (1) or not (0) |
524 | */ | 524 | */ |
525 | int mod_virt_timer_periodic(struct vtimer_list *timer, __u64 expires) | 525 | int mod_virt_timer_periodic(struct vtimer_list *timer, __u64 expires) |
526 | { | 526 | { |
527 | return __mod_vtimer(timer, expires, 1); | 527 | return __mod_vtimer(timer, expires, 1); |
528 | } | 528 | } |
529 | EXPORT_SYMBOL(mod_virt_timer_periodic); | 529 | EXPORT_SYMBOL(mod_virt_timer_periodic); |
530 | 530 | ||
531 | /* | 531 | /* |
532 | * delete a virtual timer | 532 | * delete a virtual timer |
533 | * | 533 | * |
534 | * returns whether the deleted timer was pending (1) or not (0) | 534 | * returns whether the deleted timer was pending (1) or not (0) |
535 | */ | 535 | */ |
536 | int del_virt_timer(struct vtimer_list *timer) | 536 | int del_virt_timer(struct vtimer_list *timer) |
537 | { | 537 | { |
538 | unsigned long flags; | 538 | unsigned long flags; |
539 | struct vtimer_queue *vq; | 539 | struct vtimer_queue *vq; |
540 | 540 | ||
541 | /* check if timer is pending */ | 541 | /* check if timer is pending */ |
542 | if (!vtimer_pending(timer)) | 542 | if (!vtimer_pending(timer)) |
543 | return 0; | 543 | return 0; |
544 | 544 | ||
545 | vq = &per_cpu(virt_cpu_timer, timer->cpu); | 545 | vq = &per_cpu(virt_cpu_timer, timer->cpu); |
546 | spin_lock_irqsave(&vq->lock, flags); | 546 | spin_lock_irqsave(&vq->lock, flags); |
547 | 547 | ||
548 | /* we don't interrupt a running timer, just let it expire! */ | 548 | /* we don't interrupt a running timer, just let it expire! */ |
549 | list_del_init(&timer->entry); | 549 | list_del_init(&timer->entry); |
550 | 550 | ||
551 | spin_unlock_irqrestore(&vq->lock, flags); | 551 | spin_unlock_irqrestore(&vq->lock, flags); |
552 | return 1; | 552 | return 1; |
553 | } | 553 | } |
554 | EXPORT_SYMBOL(del_virt_timer); | 554 | EXPORT_SYMBOL(del_virt_timer); |
555 | 555 | ||
556 | /* | 556 | /* |
557 | * Start the virtual CPU timer on the current CPU. | 557 | * Start the virtual CPU timer on the current CPU. |
558 | */ | 558 | */ |
559 | void init_cpu_vtimer(void) | 559 | void init_cpu_vtimer(void) |
560 | { | 560 | { |
561 | struct vtimer_queue *vq; | 561 | struct vtimer_queue *vq; |
562 | 562 | ||
563 | /* initialize per cpu vtimer structure */ | 563 | /* initialize per cpu vtimer structure */ |
564 | vq = &__get_cpu_var(virt_cpu_timer); | 564 | vq = &__get_cpu_var(virt_cpu_timer); |
565 | INIT_LIST_HEAD(&vq->list); | 565 | INIT_LIST_HEAD(&vq->list); |
566 | spin_lock_init(&vq->lock); | 566 | spin_lock_init(&vq->lock); |
567 | 567 | ||
568 | /* enable cpu timer interrupts */ | 568 | /* enable cpu timer interrupts */ |
569 | __ctl_set_bit(0,10); | 569 | __ctl_set_bit(0,10); |
570 | } | 570 | } |
571 | 571 | ||
572 | static int __cpuinit s390_nohz_notify(struct notifier_block *self, | 572 | static int __cpuinit s390_nohz_notify(struct notifier_block *self, |
573 | unsigned long action, void *hcpu) | 573 | unsigned long action, void *hcpu) |
574 | { | 574 | { |
575 | struct s390_idle_data *idle; | 575 | struct s390_idle_data *idle; |
576 | long cpu = (long) hcpu; | 576 | long cpu = (long) hcpu; |
577 | 577 | ||
578 | idle = &per_cpu(s390_idle, cpu); | 578 | idle = &per_cpu(s390_idle, cpu); |
579 | switch (action) { | 579 | switch (action) { |
580 | case CPU_DYING: | 580 | case CPU_DYING: |
581 | case CPU_DYING_FROZEN: | 581 | case CPU_DYING_FROZEN: |
582 | idle->nohz_delay = 0; | 582 | idle->nohz_delay = 0; |
583 | default: | 583 | default: |
584 | break; | 584 | break; |
585 | } | 585 | } |
586 | return NOTIFY_OK; | 586 | return NOTIFY_OK; |
587 | } | 587 | } |
588 | 588 | ||
589 | void __init vtime_init(void) | 589 | void __init vtime_init(void) |
590 | { | 590 | { |
591 | /* request the cpu timer external interrupt */ | 591 | /* request the cpu timer external interrupt */ |
592 | if (register_external_interrupt(0x1005, do_cpu_timer_interrupt)) | 592 | if (register_external_interrupt(0x1005, do_cpu_timer_interrupt)) |
593 | panic("Couldn't request external interrupt 0x1005"); | 593 | panic("Couldn't request external interrupt 0x1005"); |
594 | 594 | ||
595 | /* Enable cpu timer interrupts on the boot cpu. */ | 595 | /* Enable cpu timer interrupts on the boot cpu. */ |
596 | init_cpu_vtimer(); | 596 | init_cpu_vtimer(); |
597 | cpu_notifier(s390_nohz_notify, 0); | 597 | cpu_notifier(s390_nohz_notify, 0); |
598 | } | 598 | } |
599 | 599 |
arch/s390/mm/fault.c
1 | /* | 1 | /* |
2 | * arch/s390/mm/fault.c | 2 | * arch/s390/mm/fault.c |
3 | * | 3 | * |
4 | * S390 version | 4 | * S390 version |
5 | * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation | 5 | * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation |
6 | * Author(s): Hartmut Penner (hp@de.ibm.com) | 6 | * Author(s): Hartmut Penner (hp@de.ibm.com) |
7 | * Ulrich Weigand (uweigand@de.ibm.com) | 7 | * Ulrich Weigand (uweigand@de.ibm.com) |
8 | * | 8 | * |
9 | * Derived from "arch/i386/mm/fault.c" | 9 | * Derived from "arch/i386/mm/fault.c" |
10 | * Copyright (C) 1995 Linus Torvalds | 10 | * Copyright (C) 1995 Linus Torvalds |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/kernel_stat.h> | 13 | #include <linux/kernel_stat.h> |
14 | #include <linux/perf_event.h> | 14 | #include <linux/perf_event.h> |
15 | #include <linux/signal.h> | 15 | #include <linux/signal.h> |
16 | #include <linux/sched.h> | 16 | #include <linux/sched.h> |
17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
18 | #include <linux/errno.h> | 18 | #include <linux/errno.h> |
19 | #include <linux/string.h> | 19 | #include <linux/string.h> |
20 | #include <linux/types.h> | 20 | #include <linux/types.h> |
21 | #include <linux/ptrace.h> | 21 | #include <linux/ptrace.h> |
22 | #include <linux/mman.h> | 22 | #include <linux/mman.h> |
23 | #include <linux/mm.h> | 23 | #include <linux/mm.h> |
24 | #include <linux/compat.h> | 24 | #include <linux/compat.h> |
25 | #include <linux/smp.h> | 25 | #include <linux/smp.h> |
26 | #include <linux/kdebug.h> | 26 | #include <linux/kdebug.h> |
27 | #include <linux/init.h> | 27 | #include <linux/init.h> |
28 | #include <linux/console.h> | 28 | #include <linux/console.h> |
29 | #include <linux/module.h> | 29 | #include <linux/module.h> |
30 | #include <linux/hardirq.h> | 30 | #include <linux/hardirq.h> |
31 | #include <linux/kprobes.h> | 31 | #include <linux/kprobes.h> |
32 | #include <linux/uaccess.h> | 32 | #include <linux/uaccess.h> |
33 | #include <linux/hugetlb.h> | 33 | #include <linux/hugetlb.h> |
34 | #include <asm/asm-offsets.h> | 34 | #include <asm/asm-offsets.h> |
35 | #include <asm/system.h> | 35 | #include <asm/system.h> |
36 | #include <asm/pgtable.h> | 36 | #include <asm/pgtable.h> |
37 | #include <asm/s390_ext.h> | 37 | #include <asm/irq.h> |
38 | #include <asm/mmu_context.h> | 38 | #include <asm/mmu_context.h> |
39 | #include <asm/compat.h> | 39 | #include <asm/compat.h> |
40 | #include "../kernel/entry.h" | 40 | #include "../kernel/entry.h" |
41 | 41 | ||
42 | #ifndef CONFIG_64BIT | 42 | #ifndef CONFIG_64BIT |
43 | #define __FAIL_ADDR_MASK 0x7ffff000 | 43 | #define __FAIL_ADDR_MASK 0x7ffff000 |
44 | #define __SUBCODE_MASK 0x0200 | 44 | #define __SUBCODE_MASK 0x0200 |
45 | #define __PF_RES_FIELD 0ULL | 45 | #define __PF_RES_FIELD 0ULL |
46 | #else /* CONFIG_64BIT */ | 46 | #else /* CONFIG_64BIT */ |
47 | #define __FAIL_ADDR_MASK -4096L | 47 | #define __FAIL_ADDR_MASK -4096L |
48 | #define __SUBCODE_MASK 0x0600 | 48 | #define __SUBCODE_MASK 0x0600 |
49 | #define __PF_RES_FIELD 0x8000000000000000ULL | 49 | #define __PF_RES_FIELD 0x8000000000000000ULL |
50 | #endif /* CONFIG_64BIT */ | 50 | #endif /* CONFIG_64BIT */ |
51 | 51 | ||
52 | #define VM_FAULT_BADCONTEXT 0x010000 | 52 | #define VM_FAULT_BADCONTEXT 0x010000 |
53 | #define VM_FAULT_BADMAP 0x020000 | 53 | #define VM_FAULT_BADMAP 0x020000 |
54 | #define VM_FAULT_BADACCESS 0x040000 | 54 | #define VM_FAULT_BADACCESS 0x040000 |
55 | 55 | ||
56 | static unsigned long store_indication; | 56 | static unsigned long store_indication; |
57 | 57 | ||
58 | void fault_init(void) | 58 | void fault_init(void) |
59 | { | 59 | { |
60 | if (test_facility(2) && test_facility(75)) | 60 | if (test_facility(2) && test_facility(75)) |
61 | store_indication = 0xc00; | 61 | store_indication = 0xc00; |
62 | } | 62 | } |
63 | 63 | ||
64 | static inline int notify_page_fault(struct pt_regs *regs) | 64 | static inline int notify_page_fault(struct pt_regs *regs) |
65 | { | 65 | { |
66 | int ret = 0; | 66 | int ret = 0; |
67 | 67 | ||
68 | /* kprobe_running() needs smp_processor_id() */ | 68 | /* kprobe_running() needs smp_processor_id() */ |
69 | if (kprobes_built_in() && !user_mode(regs)) { | 69 | if (kprobes_built_in() && !user_mode(regs)) { |
70 | preempt_disable(); | 70 | preempt_disable(); |
71 | if (kprobe_running() && kprobe_fault_handler(regs, 14)) | 71 | if (kprobe_running() && kprobe_fault_handler(regs, 14)) |
72 | ret = 1; | 72 | ret = 1; |
73 | preempt_enable(); | 73 | preempt_enable(); |
74 | } | 74 | } |
75 | return ret; | 75 | return ret; |
76 | } | 76 | } |
77 | 77 | ||
78 | 78 | ||
79 | /* | 79 | /* |
80 | * Unlock any spinlocks which will prevent us from getting the | 80 | * Unlock any spinlocks which will prevent us from getting the |
81 | * message out. | 81 | * message out. |
82 | */ | 82 | */ |
83 | void bust_spinlocks(int yes) | 83 | void bust_spinlocks(int yes) |
84 | { | 84 | { |
85 | if (yes) { | 85 | if (yes) { |
86 | oops_in_progress = 1; | 86 | oops_in_progress = 1; |
87 | } else { | 87 | } else { |
88 | int loglevel_save = console_loglevel; | 88 | int loglevel_save = console_loglevel; |
89 | console_unblank(); | 89 | console_unblank(); |
90 | oops_in_progress = 0; | 90 | oops_in_progress = 0; |
91 | /* | 91 | /* |
92 | * OK, the message is on the console. Now we call printk() | 92 | * OK, the message is on the console. Now we call printk() |
93 | * without oops_in_progress set so that printk will give klogd | 93 | * without oops_in_progress set so that printk will give klogd |
94 | * a poke. Hold onto your hats... | 94 | * a poke. Hold onto your hats... |
95 | */ | 95 | */ |
96 | console_loglevel = 15; | 96 | console_loglevel = 15; |
97 | printk(" "); | 97 | printk(" "); |
98 | console_loglevel = loglevel_save; | 98 | console_loglevel = loglevel_save; |
99 | } | 99 | } |
100 | } | 100 | } |
101 | 101 | ||
102 | /* | 102 | /* |
103 | * Returns the address space associated with the fault. | 103 | * Returns the address space associated with the fault. |
104 | * Returns 0 for kernel space and 1 for user space. | 104 | * Returns 0 for kernel space and 1 for user space. |
105 | */ | 105 | */ |
106 | static inline int user_space_fault(unsigned long trans_exc_code) | 106 | static inline int user_space_fault(unsigned long trans_exc_code) |
107 | { | 107 | { |
108 | /* | 108 | /* |
109 | * The lowest two bits of the translation exception | 109 | * The lowest two bits of the translation exception |
110 | * identification indicate which paging table was used. | 110 | * identification indicate which paging table was used. |
111 | */ | 111 | */ |
112 | trans_exc_code &= 3; | 112 | trans_exc_code &= 3; |
113 | if (trans_exc_code == 2) | 113 | if (trans_exc_code == 2) |
114 | /* Access via secondary space, set_fs setting decides */ | 114 | /* Access via secondary space, set_fs setting decides */ |
115 | return current->thread.mm_segment.ar4; | 115 | return current->thread.mm_segment.ar4; |
116 | if (user_mode == HOME_SPACE_MODE) | 116 | if (user_mode == HOME_SPACE_MODE) |
117 | /* User space if the access has been done via home space. */ | 117 | /* User space if the access has been done via home space. */ |
118 | return trans_exc_code == 3; | 118 | return trans_exc_code == 3; |
119 | /* | 119 | /* |
120 | * If the user space is not the home space the kernel runs in home | 120 | * If the user space is not the home space the kernel runs in home |
121 | * space. Access via secondary space has already been covered, | 121 | * space. Access via secondary space has already been covered, |
122 | * access via primary space or access register is from user space | 122 | * access via primary space or access register is from user space |
123 | * and access via home space is from the kernel. | 123 | * and access via home space is from the kernel. |
124 | */ | 124 | */ |
125 | return trans_exc_code != 3; | 125 | return trans_exc_code != 3; |
126 | } | 126 | } |
127 | 127 | ||
128 | static inline void report_user_fault(struct pt_regs *regs, long int_code, | 128 | static inline void report_user_fault(struct pt_regs *regs, long int_code, |
129 | int signr, unsigned long address) | 129 | int signr, unsigned long address) |
130 | { | 130 | { |
131 | if ((task_pid_nr(current) > 1) && !show_unhandled_signals) | 131 | if ((task_pid_nr(current) > 1) && !show_unhandled_signals) |
132 | return; | 132 | return; |
133 | if (!unhandled_signal(current, signr)) | 133 | if (!unhandled_signal(current, signr)) |
134 | return; | 134 | return; |
135 | if (!printk_ratelimit()) | 135 | if (!printk_ratelimit()) |
136 | return; | 136 | return; |
137 | printk("User process fault: interruption code 0x%lX ", int_code); | 137 | printk("User process fault: interruption code 0x%lX ", int_code); |
138 | print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN); | 138 | print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN); |
139 | printk("\n"); | 139 | printk("\n"); |
140 | printk("failing address: %lX\n", address); | 140 | printk("failing address: %lX\n", address); |
141 | show_regs(regs); | 141 | show_regs(regs); |
142 | } | 142 | } |
143 | 143 | ||
144 | /* | 144 | /* |
145 | * Send SIGSEGV to task. This is an external routine | 145 | * Send SIGSEGV to task. This is an external routine |
146 | * to keep the stack usage of do_page_fault small. | 146 | * to keep the stack usage of do_page_fault small. |
147 | */ | 147 | */ |
148 | static noinline void do_sigsegv(struct pt_regs *regs, long int_code, | 148 | static noinline void do_sigsegv(struct pt_regs *regs, long int_code, |
149 | int si_code, unsigned long trans_exc_code) | 149 | int si_code, unsigned long trans_exc_code) |
150 | { | 150 | { |
151 | struct siginfo si; | 151 | struct siginfo si; |
152 | unsigned long address; | 152 | unsigned long address; |
153 | 153 | ||
154 | address = trans_exc_code & __FAIL_ADDR_MASK; | 154 | address = trans_exc_code & __FAIL_ADDR_MASK; |
155 | current->thread.prot_addr = address; | 155 | current->thread.prot_addr = address; |
156 | current->thread.trap_no = int_code; | 156 | current->thread.trap_no = int_code; |
157 | report_user_fault(regs, int_code, SIGSEGV, address); | 157 | report_user_fault(regs, int_code, SIGSEGV, address); |
158 | si.si_signo = SIGSEGV; | 158 | si.si_signo = SIGSEGV; |
159 | si.si_code = si_code; | 159 | si.si_code = si_code; |
160 | si.si_addr = (void __user *) address; | 160 | si.si_addr = (void __user *) address; |
161 | force_sig_info(SIGSEGV, &si, current); | 161 | force_sig_info(SIGSEGV, &si, current); |
162 | } | 162 | } |
163 | 163 | ||
164 | static noinline void do_no_context(struct pt_regs *regs, long int_code, | 164 | static noinline void do_no_context(struct pt_regs *regs, long int_code, |
165 | unsigned long trans_exc_code) | 165 | unsigned long trans_exc_code) |
166 | { | 166 | { |
167 | const struct exception_table_entry *fixup; | 167 | const struct exception_table_entry *fixup; |
168 | unsigned long address; | 168 | unsigned long address; |
169 | 169 | ||
170 | /* Are we prepared to handle this kernel fault? */ | 170 | /* Are we prepared to handle this kernel fault? */ |
171 | fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); | 171 | fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); |
172 | if (fixup) { | 172 | if (fixup) { |
173 | regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE; | 173 | regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE; |
174 | return; | 174 | return; |
175 | } | 175 | } |
176 | 176 | ||
177 | /* | 177 | /* |
178 | * Oops. The kernel tried to access some bad page. We'll have to | 178 | * Oops. The kernel tried to access some bad page. We'll have to |
179 | * terminate things with extreme prejudice. | 179 | * terminate things with extreme prejudice. |
180 | */ | 180 | */ |
181 | address = trans_exc_code & __FAIL_ADDR_MASK; | 181 | address = trans_exc_code & __FAIL_ADDR_MASK; |
182 | if (!user_space_fault(trans_exc_code)) | 182 | if (!user_space_fault(trans_exc_code)) |
183 | printk(KERN_ALERT "Unable to handle kernel pointer dereference" | 183 | printk(KERN_ALERT "Unable to handle kernel pointer dereference" |
184 | " at virtual kernel address %p\n", (void *)address); | 184 | " at virtual kernel address %p\n", (void *)address); |
185 | else | 185 | else |
186 | printk(KERN_ALERT "Unable to handle kernel paging request" | 186 | printk(KERN_ALERT "Unable to handle kernel paging request" |
187 | " at virtual user address %p\n", (void *)address); | 187 | " at virtual user address %p\n", (void *)address); |
188 | 188 | ||
189 | die("Oops", regs, int_code); | 189 | die("Oops", regs, int_code); |
190 | do_exit(SIGKILL); | 190 | do_exit(SIGKILL); |
191 | } | 191 | } |
192 | 192 | ||
193 | static noinline void do_low_address(struct pt_regs *regs, long int_code, | 193 | static noinline void do_low_address(struct pt_regs *regs, long int_code, |
194 | unsigned long trans_exc_code) | 194 | unsigned long trans_exc_code) |
195 | { | 195 | { |
196 | /* Low-address protection hit in kernel mode means | 196 | /* Low-address protection hit in kernel mode means |
197 | NULL pointer write access in kernel mode. */ | 197 | NULL pointer write access in kernel mode. */ |
198 | if (regs->psw.mask & PSW_MASK_PSTATE) { | 198 | if (regs->psw.mask & PSW_MASK_PSTATE) { |
199 | /* Low-address protection hit in user mode 'cannot happen'. */ | 199 | /* Low-address protection hit in user mode 'cannot happen'. */ |
200 | die ("Low-address protection", regs, int_code); | 200 | die ("Low-address protection", regs, int_code); |
201 | do_exit(SIGKILL); | 201 | do_exit(SIGKILL); |
202 | } | 202 | } |
203 | 203 | ||
204 | do_no_context(regs, int_code, trans_exc_code); | 204 | do_no_context(regs, int_code, trans_exc_code); |
205 | } | 205 | } |
206 | 206 | ||
207 | static noinline void do_sigbus(struct pt_regs *regs, long int_code, | 207 | static noinline void do_sigbus(struct pt_regs *regs, long int_code, |
208 | unsigned long trans_exc_code) | 208 | unsigned long trans_exc_code) |
209 | { | 209 | { |
210 | struct task_struct *tsk = current; | 210 | struct task_struct *tsk = current; |
211 | unsigned long address; | 211 | unsigned long address; |
212 | struct siginfo si; | 212 | struct siginfo si; |
213 | 213 | ||
214 | /* | 214 | /* |
215 | * Send a sigbus, regardless of whether we were in kernel | 215 | * Send a sigbus, regardless of whether we were in kernel |
216 | * or user mode. | 216 | * or user mode. |
217 | */ | 217 | */ |
218 | address = trans_exc_code & __FAIL_ADDR_MASK; | 218 | address = trans_exc_code & __FAIL_ADDR_MASK; |
219 | tsk->thread.prot_addr = address; | 219 | tsk->thread.prot_addr = address; |
220 | tsk->thread.trap_no = int_code; | 220 | tsk->thread.trap_no = int_code; |
221 | si.si_signo = SIGBUS; | 221 | si.si_signo = SIGBUS; |
222 | si.si_errno = 0; | 222 | si.si_errno = 0; |
223 | si.si_code = BUS_ADRERR; | 223 | si.si_code = BUS_ADRERR; |
224 | si.si_addr = (void __user *) address; | 224 | si.si_addr = (void __user *) address; |
225 | force_sig_info(SIGBUS, &si, tsk); | 225 | force_sig_info(SIGBUS, &si, tsk); |
226 | } | 226 | } |
227 | 227 | ||
228 | static noinline void do_fault_error(struct pt_regs *regs, long int_code, | 228 | static noinline void do_fault_error(struct pt_regs *regs, long int_code, |
229 | unsigned long trans_exc_code, int fault) | 229 | unsigned long trans_exc_code, int fault) |
230 | { | 230 | { |
231 | int si_code; | 231 | int si_code; |
232 | 232 | ||
233 | switch (fault) { | 233 | switch (fault) { |
234 | case VM_FAULT_BADACCESS: | 234 | case VM_FAULT_BADACCESS: |
235 | case VM_FAULT_BADMAP: | 235 | case VM_FAULT_BADMAP: |
236 | /* Bad memory access. Check if it is kernel or user space. */ | 236 | /* Bad memory access. Check if it is kernel or user space. */ |
237 | if (regs->psw.mask & PSW_MASK_PSTATE) { | 237 | if (regs->psw.mask & PSW_MASK_PSTATE) { |
238 | /* User mode accesses just cause a SIGSEGV */ | 238 | /* User mode accesses just cause a SIGSEGV */ |
239 | si_code = (fault == VM_FAULT_BADMAP) ? | 239 | si_code = (fault == VM_FAULT_BADMAP) ? |
240 | SEGV_MAPERR : SEGV_ACCERR; | 240 | SEGV_MAPERR : SEGV_ACCERR; |
241 | do_sigsegv(regs, int_code, si_code, trans_exc_code); | 241 | do_sigsegv(regs, int_code, si_code, trans_exc_code); |
242 | return; | 242 | return; |
243 | } | 243 | } |
244 | case VM_FAULT_BADCONTEXT: | 244 | case VM_FAULT_BADCONTEXT: |
245 | do_no_context(regs, int_code, trans_exc_code); | 245 | do_no_context(regs, int_code, trans_exc_code); |
246 | break; | 246 | break; |
247 | default: /* fault & VM_FAULT_ERROR */ | 247 | default: /* fault & VM_FAULT_ERROR */ |
248 | if (fault & VM_FAULT_OOM) | 248 | if (fault & VM_FAULT_OOM) |
249 | pagefault_out_of_memory(); | 249 | pagefault_out_of_memory(); |
250 | else if (fault & VM_FAULT_SIGBUS) { | 250 | else if (fault & VM_FAULT_SIGBUS) { |
251 | /* Kernel mode? Handle exceptions or die */ | 251 | /* Kernel mode? Handle exceptions or die */ |
252 | if (!(regs->psw.mask & PSW_MASK_PSTATE)) | 252 | if (!(regs->psw.mask & PSW_MASK_PSTATE)) |
253 | do_no_context(regs, int_code, trans_exc_code); | 253 | do_no_context(regs, int_code, trans_exc_code); |
254 | else | 254 | else |
255 | do_sigbus(regs, int_code, trans_exc_code); | 255 | do_sigbus(regs, int_code, trans_exc_code); |
256 | } else | 256 | } else |
257 | BUG(); | 257 | BUG(); |
258 | break; | 258 | break; |
259 | } | 259 | } |
260 | } | 260 | } |
261 | 261 | ||
262 | /* | 262 | /* |
263 | * This routine handles page faults. It determines the address, | 263 | * This routine handles page faults. It determines the address, |
264 | * and the problem, and then passes it off to one of the appropriate | 264 | * and the problem, and then passes it off to one of the appropriate |
265 | * routines. | 265 | * routines. |
266 | * | 266 | * |
267 | * interruption code (int_code): | 267 | * interruption code (int_code): |
268 | * 04 Protection -> Write-Protection (suprression) | 268 | * 04 Protection -> Write-Protection (suprression) |
269 | * 10 Segment translation -> Not present (nullification) | 269 | * 10 Segment translation -> Not present (nullification) |
270 | * 11 Page translation -> Not present (nullification) | 270 | * 11 Page translation -> Not present (nullification) |
271 | * 3b Region third trans. -> Not present (nullification) | 271 | * 3b Region third trans. -> Not present (nullification) |
272 | */ | 272 | */ |
273 | static inline int do_exception(struct pt_regs *regs, int access, | 273 | static inline int do_exception(struct pt_regs *regs, int access, |
274 | unsigned long trans_exc_code) | 274 | unsigned long trans_exc_code) |
275 | { | 275 | { |
276 | struct task_struct *tsk; | 276 | struct task_struct *tsk; |
277 | struct mm_struct *mm; | 277 | struct mm_struct *mm; |
278 | struct vm_area_struct *vma; | 278 | struct vm_area_struct *vma; |
279 | unsigned long address; | 279 | unsigned long address; |
280 | int fault, write; | 280 | int fault, write; |
281 | 281 | ||
282 | if (notify_page_fault(regs)) | 282 | if (notify_page_fault(regs)) |
283 | return 0; | 283 | return 0; |
284 | 284 | ||
285 | tsk = current; | 285 | tsk = current; |
286 | mm = tsk->mm; | 286 | mm = tsk->mm; |
287 | 287 | ||
288 | /* | 288 | /* |
289 | * Verify that the fault happened in user space, that | 289 | * Verify that the fault happened in user space, that |
290 | * we are not in an interrupt and that there is a | 290 | * we are not in an interrupt and that there is a |
291 | * user context. | 291 | * user context. |
292 | */ | 292 | */ |
293 | fault = VM_FAULT_BADCONTEXT; | 293 | fault = VM_FAULT_BADCONTEXT; |
294 | if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm)) | 294 | if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm)) |
295 | goto out; | 295 | goto out; |
296 | 296 | ||
297 | address = trans_exc_code & __FAIL_ADDR_MASK; | 297 | address = trans_exc_code & __FAIL_ADDR_MASK; |
298 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); | 298 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); |
299 | down_read(&mm->mmap_sem); | 299 | down_read(&mm->mmap_sem); |
300 | 300 | ||
301 | fault = VM_FAULT_BADMAP; | 301 | fault = VM_FAULT_BADMAP; |
302 | vma = find_vma(mm, address); | 302 | vma = find_vma(mm, address); |
303 | if (!vma) | 303 | if (!vma) |
304 | goto out_up; | 304 | goto out_up; |
305 | 305 | ||
306 | if (unlikely(vma->vm_start > address)) { | 306 | if (unlikely(vma->vm_start > address)) { |
307 | if (!(vma->vm_flags & VM_GROWSDOWN)) | 307 | if (!(vma->vm_flags & VM_GROWSDOWN)) |
308 | goto out_up; | 308 | goto out_up; |
309 | if (expand_stack(vma, address)) | 309 | if (expand_stack(vma, address)) |
310 | goto out_up; | 310 | goto out_up; |
311 | } | 311 | } |
312 | 312 | ||
313 | /* | 313 | /* |
314 | * Ok, we have a good vm_area for this memory access, so | 314 | * Ok, we have a good vm_area for this memory access, so |
315 | * we can handle it.. | 315 | * we can handle it.. |
316 | */ | 316 | */ |
317 | fault = VM_FAULT_BADACCESS; | 317 | fault = VM_FAULT_BADACCESS; |
318 | if (unlikely(!(vma->vm_flags & access))) | 318 | if (unlikely(!(vma->vm_flags & access))) |
319 | goto out_up; | 319 | goto out_up; |
320 | 320 | ||
321 | if (is_vm_hugetlb_page(vma)) | 321 | if (is_vm_hugetlb_page(vma)) |
322 | address &= HPAGE_MASK; | 322 | address &= HPAGE_MASK; |
323 | /* | 323 | /* |
324 | * If for any reason at all we couldn't handle the fault, | 324 | * If for any reason at all we couldn't handle the fault, |
325 | * make sure we exit gracefully rather than endlessly redo | 325 | * make sure we exit gracefully rather than endlessly redo |
326 | * the fault. | 326 | * the fault. |
327 | */ | 327 | */ |
328 | write = (access == VM_WRITE || | 328 | write = (access == VM_WRITE || |
329 | (trans_exc_code & store_indication) == 0x400) ? | 329 | (trans_exc_code & store_indication) == 0x400) ? |
330 | FAULT_FLAG_WRITE : 0; | 330 | FAULT_FLAG_WRITE : 0; |
331 | fault = handle_mm_fault(mm, vma, address, write); | 331 | fault = handle_mm_fault(mm, vma, address, write); |
332 | if (unlikely(fault & VM_FAULT_ERROR)) | 332 | if (unlikely(fault & VM_FAULT_ERROR)) |
333 | goto out_up; | 333 | goto out_up; |
334 | 334 | ||
335 | if (fault & VM_FAULT_MAJOR) { | 335 | if (fault & VM_FAULT_MAJOR) { |
336 | tsk->maj_flt++; | 336 | tsk->maj_flt++; |
337 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, | 337 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, |
338 | regs, address); | 338 | regs, address); |
339 | } else { | 339 | } else { |
340 | tsk->min_flt++; | 340 | tsk->min_flt++; |
341 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, | 341 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, |
342 | regs, address); | 342 | regs, address); |
343 | } | 343 | } |
344 | /* | 344 | /* |
345 | * The instruction that caused the program check will | 345 | * The instruction that caused the program check will |
346 | * be repeated. Don't signal single step via SIGTRAP. | 346 | * be repeated. Don't signal single step via SIGTRAP. |
347 | */ | 347 | */ |
348 | clear_tsk_thread_flag(tsk, TIF_PER_TRAP); | 348 | clear_tsk_thread_flag(tsk, TIF_PER_TRAP); |
349 | fault = 0; | 349 | fault = 0; |
350 | out_up: | 350 | out_up: |
351 | up_read(&mm->mmap_sem); | 351 | up_read(&mm->mmap_sem); |
352 | out: | 352 | out: |
353 | return fault; | 353 | return fault; |
354 | } | 354 | } |
355 | 355 | ||
356 | void __kprobes do_protection_exception(struct pt_regs *regs, long pgm_int_code, | 356 | void __kprobes do_protection_exception(struct pt_regs *regs, long pgm_int_code, |
357 | unsigned long trans_exc_code) | 357 | unsigned long trans_exc_code) |
358 | { | 358 | { |
359 | int fault; | 359 | int fault; |
360 | 360 | ||
361 | /* Protection exception is suppressing, decrement psw address. */ | 361 | /* Protection exception is suppressing, decrement psw address. */ |
362 | regs->psw.addr -= (pgm_int_code >> 16); | 362 | regs->psw.addr -= (pgm_int_code >> 16); |
363 | /* | 363 | /* |
364 | * Check for low-address protection. This needs to be treated | 364 | * Check for low-address protection. This needs to be treated |
365 | * as a special case because the translation exception code | 365 | * as a special case because the translation exception code |
366 | * field is not guaranteed to contain valid data in this case. | 366 | * field is not guaranteed to contain valid data in this case. |
367 | */ | 367 | */ |
368 | if (unlikely(!(trans_exc_code & 4))) { | 368 | if (unlikely(!(trans_exc_code & 4))) { |
369 | do_low_address(regs, pgm_int_code, trans_exc_code); | 369 | do_low_address(regs, pgm_int_code, trans_exc_code); |
370 | return; | 370 | return; |
371 | } | 371 | } |
372 | fault = do_exception(regs, VM_WRITE, trans_exc_code); | 372 | fault = do_exception(regs, VM_WRITE, trans_exc_code); |
373 | if (unlikely(fault)) | 373 | if (unlikely(fault)) |
374 | do_fault_error(regs, 4, trans_exc_code, fault); | 374 | do_fault_error(regs, 4, trans_exc_code, fault); |
375 | } | 375 | } |
376 | 376 | ||
377 | void __kprobes do_dat_exception(struct pt_regs *regs, long pgm_int_code, | 377 | void __kprobes do_dat_exception(struct pt_regs *regs, long pgm_int_code, |
378 | unsigned long trans_exc_code) | 378 | unsigned long trans_exc_code) |
379 | { | 379 | { |
380 | int access, fault; | 380 | int access, fault; |
381 | 381 | ||
382 | access = VM_READ | VM_EXEC | VM_WRITE; | 382 | access = VM_READ | VM_EXEC | VM_WRITE; |
383 | fault = do_exception(regs, access, trans_exc_code); | 383 | fault = do_exception(regs, access, trans_exc_code); |
384 | if (unlikely(fault)) | 384 | if (unlikely(fault)) |
385 | do_fault_error(regs, pgm_int_code & 255, trans_exc_code, fault); | 385 | do_fault_error(regs, pgm_int_code & 255, trans_exc_code, fault); |
386 | } | 386 | } |
387 | 387 | ||
388 | #ifdef CONFIG_64BIT | 388 | #ifdef CONFIG_64BIT |
389 | void __kprobes do_asce_exception(struct pt_regs *regs, long pgm_int_code, | 389 | void __kprobes do_asce_exception(struct pt_regs *regs, long pgm_int_code, |
390 | unsigned long trans_exc_code) | 390 | unsigned long trans_exc_code) |
391 | { | 391 | { |
392 | struct mm_struct *mm = current->mm; | 392 | struct mm_struct *mm = current->mm; |
393 | struct vm_area_struct *vma; | 393 | struct vm_area_struct *vma; |
394 | 394 | ||
395 | if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm)) | 395 | if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm)) |
396 | goto no_context; | 396 | goto no_context; |
397 | 397 | ||
398 | down_read(&mm->mmap_sem); | 398 | down_read(&mm->mmap_sem); |
399 | vma = find_vma(mm, trans_exc_code & __FAIL_ADDR_MASK); | 399 | vma = find_vma(mm, trans_exc_code & __FAIL_ADDR_MASK); |
400 | up_read(&mm->mmap_sem); | 400 | up_read(&mm->mmap_sem); |
401 | 401 | ||
402 | if (vma) { | 402 | if (vma) { |
403 | update_mm(mm, current); | 403 | update_mm(mm, current); |
404 | return; | 404 | return; |
405 | } | 405 | } |
406 | 406 | ||
407 | /* User mode accesses just cause a SIGSEGV */ | 407 | /* User mode accesses just cause a SIGSEGV */ |
408 | if (regs->psw.mask & PSW_MASK_PSTATE) { | 408 | if (regs->psw.mask & PSW_MASK_PSTATE) { |
409 | do_sigsegv(regs, pgm_int_code, SEGV_MAPERR, trans_exc_code); | 409 | do_sigsegv(regs, pgm_int_code, SEGV_MAPERR, trans_exc_code); |
410 | return; | 410 | return; |
411 | } | 411 | } |
412 | 412 | ||
413 | no_context: | 413 | no_context: |
414 | do_no_context(regs, pgm_int_code, trans_exc_code); | 414 | do_no_context(regs, pgm_int_code, trans_exc_code); |
415 | } | 415 | } |
416 | #endif | 416 | #endif |
417 | 417 | ||
418 | int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write) | 418 | int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write) |
419 | { | 419 | { |
420 | struct pt_regs regs; | 420 | struct pt_regs regs; |
421 | int access, fault; | 421 | int access, fault; |
422 | 422 | ||
423 | regs.psw.mask = psw_kernel_bits; | 423 | regs.psw.mask = psw_kernel_bits; |
424 | if (!irqs_disabled()) | 424 | if (!irqs_disabled()) |
425 | regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT; | 425 | regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT; |
426 | regs.psw.addr = (unsigned long) __builtin_return_address(0); | 426 | regs.psw.addr = (unsigned long) __builtin_return_address(0); |
427 | regs.psw.addr |= PSW_ADDR_AMODE; | 427 | regs.psw.addr |= PSW_ADDR_AMODE; |
428 | uaddr &= PAGE_MASK; | 428 | uaddr &= PAGE_MASK; |
429 | access = write ? VM_WRITE : VM_READ; | 429 | access = write ? VM_WRITE : VM_READ; |
430 | fault = do_exception(®s, access, uaddr | 2); | 430 | fault = do_exception(®s, access, uaddr | 2); |
431 | if (unlikely(fault)) { | 431 | if (unlikely(fault)) { |
432 | if (fault & VM_FAULT_OOM) { | 432 | if (fault & VM_FAULT_OOM) { |
433 | pagefault_out_of_memory(); | 433 | pagefault_out_of_memory(); |
434 | fault = 0; | 434 | fault = 0; |
435 | } else if (fault & VM_FAULT_SIGBUS) | 435 | } else if (fault & VM_FAULT_SIGBUS) |
436 | do_sigbus(®s, pgm_int_code, uaddr); | 436 | do_sigbus(®s, pgm_int_code, uaddr); |
437 | } | 437 | } |
438 | return fault ? -EFAULT : 0; | 438 | return fault ? -EFAULT : 0; |
439 | } | 439 | } |
440 | 440 | ||
441 | #ifdef CONFIG_PFAULT | 441 | #ifdef CONFIG_PFAULT |
442 | /* | 442 | /* |
443 | * 'pfault' pseudo page faults routines. | 443 | * 'pfault' pseudo page faults routines. |
444 | */ | 444 | */ |
445 | static int pfault_disable; | 445 | static int pfault_disable; |
446 | 446 | ||
447 | static int __init nopfault(char *str) | 447 | static int __init nopfault(char *str) |
448 | { | 448 | { |
449 | pfault_disable = 1; | 449 | pfault_disable = 1; |
450 | return 1; | 450 | return 1; |
451 | } | 451 | } |
452 | 452 | ||
453 | __setup("nopfault", nopfault); | 453 | __setup("nopfault", nopfault); |
454 | 454 | ||
455 | struct pfault_refbk { | 455 | struct pfault_refbk { |
456 | u16 refdiagc; | 456 | u16 refdiagc; |
457 | u16 reffcode; | 457 | u16 reffcode; |
458 | u16 refdwlen; | 458 | u16 refdwlen; |
459 | u16 refversn; | 459 | u16 refversn; |
460 | u64 refgaddr; | 460 | u64 refgaddr; |
461 | u64 refselmk; | 461 | u64 refselmk; |
462 | u64 refcmpmk; | 462 | u64 refcmpmk; |
463 | u64 reserved; | 463 | u64 reserved; |
464 | } __attribute__ ((packed, aligned(8))); | 464 | } __attribute__ ((packed, aligned(8))); |
465 | 465 | ||
466 | int pfault_init(void) | 466 | int pfault_init(void) |
467 | { | 467 | { |
468 | struct pfault_refbk refbk = { | 468 | struct pfault_refbk refbk = { |
469 | .refdiagc = 0x258, | 469 | .refdiagc = 0x258, |
470 | .reffcode = 0, | 470 | .reffcode = 0, |
471 | .refdwlen = 5, | 471 | .refdwlen = 5, |
472 | .refversn = 2, | 472 | .refversn = 2, |
473 | .refgaddr = __LC_CURRENT_PID, | 473 | .refgaddr = __LC_CURRENT_PID, |
474 | .refselmk = 1ULL << 48, | 474 | .refselmk = 1ULL << 48, |
475 | .refcmpmk = 1ULL << 48, | 475 | .refcmpmk = 1ULL << 48, |
476 | .reserved = __PF_RES_FIELD }; | 476 | .reserved = __PF_RES_FIELD }; |
477 | int rc; | 477 | int rc; |
478 | 478 | ||
479 | if (!MACHINE_IS_VM || pfault_disable) | 479 | if (!MACHINE_IS_VM || pfault_disable) |
480 | return -1; | 480 | return -1; |
481 | asm volatile( | 481 | asm volatile( |
482 | " diag %1,%0,0x258\n" | 482 | " diag %1,%0,0x258\n" |
483 | "0: j 2f\n" | 483 | "0: j 2f\n" |
484 | "1: la %0,8\n" | 484 | "1: la %0,8\n" |
485 | "2:\n" | 485 | "2:\n" |
486 | EX_TABLE(0b,1b) | 486 | EX_TABLE(0b,1b) |
487 | : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc"); | 487 | : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc"); |
488 | return rc; | 488 | return rc; |
489 | } | 489 | } |
490 | 490 | ||
491 | void pfault_fini(void) | 491 | void pfault_fini(void) |
492 | { | 492 | { |
493 | struct pfault_refbk refbk = { | 493 | struct pfault_refbk refbk = { |
494 | .refdiagc = 0x258, | 494 | .refdiagc = 0x258, |
495 | .reffcode = 1, | 495 | .reffcode = 1, |
496 | .refdwlen = 5, | 496 | .refdwlen = 5, |
497 | .refversn = 2, | 497 | .refversn = 2, |
498 | }; | 498 | }; |
499 | 499 | ||
500 | if (!MACHINE_IS_VM || pfault_disable) | 500 | if (!MACHINE_IS_VM || pfault_disable) |
501 | return; | 501 | return; |
502 | asm volatile( | 502 | asm volatile( |
503 | " diag %0,0,0x258\n" | 503 | " diag %0,0,0x258\n" |
504 | "0:\n" | 504 | "0:\n" |
505 | EX_TABLE(0b,0b) | 505 | EX_TABLE(0b,0b) |
506 | : : "a" (&refbk), "m" (refbk) : "cc"); | 506 | : : "a" (&refbk), "m" (refbk) : "cc"); |
507 | } | 507 | } |
508 | 508 | ||
509 | static DEFINE_SPINLOCK(pfault_lock); | 509 | static DEFINE_SPINLOCK(pfault_lock); |
510 | static LIST_HEAD(pfault_list); | 510 | static LIST_HEAD(pfault_list); |
511 | 511 | ||
512 | static void pfault_interrupt(unsigned int ext_int_code, | 512 | static void pfault_interrupt(unsigned int ext_int_code, |
513 | unsigned int param32, unsigned long param64) | 513 | unsigned int param32, unsigned long param64) |
514 | { | 514 | { |
515 | struct task_struct *tsk; | 515 | struct task_struct *tsk; |
516 | __u16 subcode; | 516 | __u16 subcode; |
517 | pid_t pid; | 517 | pid_t pid; |
518 | 518 | ||
519 | /* | 519 | /* |
520 | * Get the external interruption subcode & pfault | 520 | * Get the external interruption subcode & pfault |
521 | * initial/completion signal bit. VM stores this | 521 | * initial/completion signal bit. VM stores this |
522 | * in the 'cpu address' field associated with the | 522 | * in the 'cpu address' field associated with the |
523 | * external interrupt. | 523 | * external interrupt. |
524 | */ | 524 | */ |
525 | subcode = ext_int_code >> 16; | 525 | subcode = ext_int_code >> 16; |
526 | if ((subcode & 0xff00) != __SUBCODE_MASK) | 526 | if ((subcode & 0xff00) != __SUBCODE_MASK) |
527 | return; | 527 | return; |
528 | kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++; | 528 | kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++; |
529 | if (subcode & 0x0080) { | 529 | if (subcode & 0x0080) { |
530 | /* Get the token (= pid of the affected task). */ | 530 | /* Get the token (= pid of the affected task). */ |
531 | pid = sizeof(void *) == 4 ? param32 : param64; | 531 | pid = sizeof(void *) == 4 ? param32 : param64; |
532 | rcu_read_lock(); | 532 | rcu_read_lock(); |
533 | tsk = find_task_by_pid_ns(pid, &init_pid_ns); | 533 | tsk = find_task_by_pid_ns(pid, &init_pid_ns); |
534 | if (tsk) | 534 | if (tsk) |
535 | get_task_struct(tsk); | 535 | get_task_struct(tsk); |
536 | rcu_read_unlock(); | 536 | rcu_read_unlock(); |
537 | if (!tsk) | 537 | if (!tsk) |
538 | return; | 538 | return; |
539 | } else { | 539 | } else { |
540 | tsk = current; | 540 | tsk = current; |
541 | } | 541 | } |
542 | spin_lock(&pfault_lock); | 542 | spin_lock(&pfault_lock); |
543 | if (subcode & 0x0080) { | 543 | if (subcode & 0x0080) { |
544 | /* signal bit is set -> a page has been swapped in by VM */ | 544 | /* signal bit is set -> a page has been swapped in by VM */ |
545 | if (tsk->thread.pfault_wait == 1) { | 545 | if (tsk->thread.pfault_wait == 1) { |
546 | /* Initial interrupt was faster than the completion | 546 | /* Initial interrupt was faster than the completion |
547 | * interrupt. pfault_wait is valid. Set pfault_wait | 547 | * interrupt. pfault_wait is valid. Set pfault_wait |
548 | * back to zero and wake up the process. This can | 548 | * back to zero and wake up the process. This can |
549 | * safely be done because the task is still sleeping | 549 | * safely be done because the task is still sleeping |
550 | * and can't produce new pfaults. */ | 550 | * and can't produce new pfaults. */ |
551 | tsk->thread.pfault_wait = 0; | 551 | tsk->thread.pfault_wait = 0; |
552 | list_del(&tsk->thread.list); | 552 | list_del(&tsk->thread.list); |
553 | wake_up_process(tsk); | 553 | wake_up_process(tsk); |
554 | } else { | 554 | } else { |
555 | /* Completion interrupt was faster than initial | 555 | /* Completion interrupt was faster than initial |
556 | * interrupt. Set pfault_wait to -1 so the initial | 556 | * interrupt. Set pfault_wait to -1 so the initial |
557 | * interrupt doesn't put the task to sleep. */ | 557 | * interrupt doesn't put the task to sleep. */ |
558 | tsk->thread.pfault_wait = -1; | 558 | tsk->thread.pfault_wait = -1; |
559 | } | 559 | } |
560 | put_task_struct(tsk); | 560 | put_task_struct(tsk); |
561 | } else { | 561 | } else { |
562 | /* signal bit not set -> a real page is missing. */ | 562 | /* signal bit not set -> a real page is missing. */ |
563 | if (tsk->thread.pfault_wait == -1) { | 563 | if (tsk->thread.pfault_wait == -1) { |
564 | /* Completion interrupt was faster than the initial | 564 | /* Completion interrupt was faster than the initial |
565 | * interrupt (pfault_wait == -1). Set pfault_wait | 565 | * interrupt (pfault_wait == -1). Set pfault_wait |
566 | * back to zero and exit. */ | 566 | * back to zero and exit. */ |
567 | tsk->thread.pfault_wait = 0; | 567 | tsk->thread.pfault_wait = 0; |
568 | } else { | 568 | } else { |
569 | /* Initial interrupt arrived before completion | 569 | /* Initial interrupt arrived before completion |
570 | * interrupt. Let the task sleep. */ | 570 | * interrupt. Let the task sleep. */ |
571 | tsk->thread.pfault_wait = 1; | 571 | tsk->thread.pfault_wait = 1; |
572 | list_add(&tsk->thread.list, &pfault_list); | 572 | list_add(&tsk->thread.list, &pfault_list); |
573 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | 573 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); |
574 | set_tsk_need_resched(tsk); | 574 | set_tsk_need_resched(tsk); |
575 | } | 575 | } |
576 | } | 576 | } |
577 | spin_unlock(&pfault_lock); | 577 | spin_unlock(&pfault_lock); |
578 | } | 578 | } |
579 | 579 | ||
580 | static int __cpuinit pfault_cpu_notify(struct notifier_block *self, | 580 | static int __cpuinit pfault_cpu_notify(struct notifier_block *self, |
581 | unsigned long action, void *hcpu) | 581 | unsigned long action, void *hcpu) |
582 | { | 582 | { |
583 | struct thread_struct *thread, *next; | 583 | struct thread_struct *thread, *next; |
584 | struct task_struct *tsk; | 584 | struct task_struct *tsk; |
585 | 585 | ||
586 | switch (action) { | 586 | switch (action) { |
587 | case CPU_DEAD: | 587 | case CPU_DEAD: |
588 | case CPU_DEAD_FROZEN: | 588 | case CPU_DEAD_FROZEN: |
589 | spin_lock_irq(&pfault_lock); | 589 | spin_lock_irq(&pfault_lock); |
590 | list_for_each_entry_safe(thread, next, &pfault_list, list) { | 590 | list_for_each_entry_safe(thread, next, &pfault_list, list) { |
591 | thread->pfault_wait = 0; | 591 | thread->pfault_wait = 0; |
592 | list_del(&thread->list); | 592 | list_del(&thread->list); |
593 | tsk = container_of(thread, struct task_struct, thread); | 593 | tsk = container_of(thread, struct task_struct, thread); |
594 | wake_up_process(tsk); | 594 | wake_up_process(tsk); |
595 | } | 595 | } |
596 | spin_unlock_irq(&pfault_lock); | 596 | spin_unlock_irq(&pfault_lock); |
597 | break; | 597 | break; |
598 | default: | 598 | default: |
599 | break; | 599 | break; |
600 | } | 600 | } |
601 | return NOTIFY_OK; | 601 | return NOTIFY_OK; |
602 | } | 602 | } |
603 | 603 | ||
604 | static int __init pfault_irq_init(void) | 604 | static int __init pfault_irq_init(void) |
605 | { | 605 | { |
606 | int rc; | 606 | int rc; |
607 | 607 | ||
608 | if (!MACHINE_IS_VM) | 608 | if (!MACHINE_IS_VM) |
609 | return 0; | 609 | return 0; |
610 | rc = register_external_interrupt(0x2603, pfault_interrupt); | 610 | rc = register_external_interrupt(0x2603, pfault_interrupt); |
611 | if (rc) | 611 | if (rc) |
612 | goto out_extint; | 612 | goto out_extint; |
613 | rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP; | 613 | rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP; |
614 | if (rc) | 614 | if (rc) |
615 | goto out_pfault; | 615 | goto out_pfault; |
616 | service_subclass_irq_register(); | 616 | service_subclass_irq_register(); |
617 | hotcpu_notifier(pfault_cpu_notify, 0); | 617 | hotcpu_notifier(pfault_cpu_notify, 0); |
618 | return 0; | 618 | return 0; |
619 | 619 | ||
620 | out_pfault: | 620 | out_pfault: |
621 | unregister_external_interrupt(0x2603, pfault_interrupt); | 621 | unregister_external_interrupt(0x2603, pfault_interrupt); |
622 | out_extint: | 622 | out_extint: |
623 | pfault_disable = 1; | 623 | pfault_disable = 1; |
624 | return rc; | 624 | return rc; |
625 | } | 625 | } |
626 | early_initcall(pfault_irq_init); | 626 | early_initcall(pfault_irq_init); |
627 | 627 | ||
628 | #endif /* CONFIG_PFAULT */ | 628 | #endif /* CONFIG_PFAULT */ |
629 | 629 |
arch/s390/oprofile/hwsampler.c
1 | /** | 1 | /** |
2 | * arch/s390/oprofile/hwsampler.c | 2 | * arch/s390/oprofile/hwsampler.c |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 2010 | 4 | * Copyright IBM Corp. 2010 |
5 | * Author: Heinz Graalfs <graalfs@de.ibm.com> | 5 | * Author: Heinz Graalfs <graalfs@de.ibm.com> |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <linux/kernel_stat.h> | 8 | #include <linux/kernel_stat.h> |
9 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/smp.h> | 11 | #include <linux/smp.h> |
12 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
13 | #include <linux/workqueue.h> | 13 | #include <linux/workqueue.h> |
14 | #include <linux/interrupt.h> | 14 | #include <linux/interrupt.h> |
15 | #include <linux/notifier.h> | 15 | #include <linux/notifier.h> |
16 | #include <linux/cpu.h> | 16 | #include <linux/cpu.h> |
17 | #include <linux/semaphore.h> | 17 | #include <linux/semaphore.h> |
18 | #include <linux/oom.h> | 18 | #include <linux/oom.h> |
19 | #include <linux/oprofile.h> | 19 | #include <linux/oprofile.h> |
20 | 20 | ||
21 | #include <asm/lowcore.h> | 21 | #include <asm/lowcore.h> |
22 | #include <asm/s390_ext.h> | 22 | #include <asm/irq.h> |
23 | 23 | ||
24 | #include "hwsampler.h" | 24 | #include "hwsampler.h" |
25 | 25 | ||
26 | #define MAX_NUM_SDB 511 | 26 | #define MAX_NUM_SDB 511 |
27 | #define MIN_NUM_SDB 1 | 27 | #define MIN_NUM_SDB 1 |
28 | 28 | ||
29 | #define ALERT_REQ_MASK 0x4000000000000000ul | 29 | #define ALERT_REQ_MASK 0x4000000000000000ul |
30 | #define BUFFER_FULL_MASK 0x8000000000000000ul | 30 | #define BUFFER_FULL_MASK 0x8000000000000000ul |
31 | 31 | ||
32 | #define EI_IEA (1 << 31) /* invalid entry address */ | 32 | #define EI_IEA (1 << 31) /* invalid entry address */ |
33 | #define EI_ISE (1 << 30) /* incorrect SDBT entry */ | 33 | #define EI_ISE (1 << 30) /* incorrect SDBT entry */ |
34 | #define EI_PRA (1 << 29) /* program request alert */ | 34 | #define EI_PRA (1 << 29) /* program request alert */ |
35 | #define EI_SACA (1 << 23) /* sampler authorization change alert */ | 35 | #define EI_SACA (1 << 23) /* sampler authorization change alert */ |
36 | #define EI_LSDA (1 << 22) /* loss of sample data alert */ | 36 | #define EI_LSDA (1 << 22) /* loss of sample data alert */ |
37 | 37 | ||
38 | DECLARE_PER_CPU(struct hws_cpu_buffer, sampler_cpu_buffer); | 38 | DECLARE_PER_CPU(struct hws_cpu_buffer, sampler_cpu_buffer); |
39 | 39 | ||
40 | struct hws_execute_parms { | 40 | struct hws_execute_parms { |
41 | void *buffer; | 41 | void *buffer; |
42 | signed int rc; | 42 | signed int rc; |
43 | }; | 43 | }; |
44 | 44 | ||
45 | DEFINE_PER_CPU(struct hws_cpu_buffer, sampler_cpu_buffer); | 45 | DEFINE_PER_CPU(struct hws_cpu_buffer, sampler_cpu_buffer); |
46 | EXPORT_PER_CPU_SYMBOL(sampler_cpu_buffer); | 46 | EXPORT_PER_CPU_SYMBOL(sampler_cpu_buffer); |
47 | 47 | ||
48 | static DEFINE_MUTEX(hws_sem); | 48 | static DEFINE_MUTEX(hws_sem); |
49 | static DEFINE_MUTEX(hws_sem_oom); | 49 | static DEFINE_MUTEX(hws_sem_oom); |
50 | 50 | ||
51 | static unsigned char hws_flush_all; | 51 | static unsigned char hws_flush_all; |
52 | static unsigned int hws_oom; | 52 | static unsigned int hws_oom; |
53 | static struct workqueue_struct *hws_wq; | 53 | static struct workqueue_struct *hws_wq; |
54 | 54 | ||
55 | static unsigned int hws_state; | 55 | static unsigned int hws_state; |
56 | enum { | 56 | enum { |
57 | HWS_INIT = 1, | 57 | HWS_INIT = 1, |
58 | HWS_DEALLOCATED, | 58 | HWS_DEALLOCATED, |
59 | HWS_STOPPED, | 59 | HWS_STOPPED, |
60 | HWS_STARTED, | 60 | HWS_STARTED, |
61 | HWS_STOPPING }; | 61 | HWS_STOPPING }; |
62 | 62 | ||
63 | /* set to 1 if called by kernel during memory allocation */ | 63 | /* set to 1 if called by kernel during memory allocation */ |
64 | static unsigned char oom_killer_was_active; | 64 | static unsigned char oom_killer_was_active; |
65 | /* size of SDBT and SDB as of allocate API */ | 65 | /* size of SDBT and SDB as of allocate API */ |
66 | static unsigned long num_sdbt = 100; | 66 | static unsigned long num_sdbt = 100; |
67 | static unsigned long num_sdb = 511; | 67 | static unsigned long num_sdb = 511; |
68 | /* sampling interval (machine cycles) */ | 68 | /* sampling interval (machine cycles) */ |
69 | static unsigned long interval; | 69 | static unsigned long interval; |
70 | 70 | ||
71 | static unsigned long min_sampler_rate; | 71 | static unsigned long min_sampler_rate; |
72 | static unsigned long max_sampler_rate; | 72 | static unsigned long max_sampler_rate; |
73 | 73 | ||
74 | static int ssctl(void *buffer) | 74 | static int ssctl(void *buffer) |
75 | { | 75 | { |
76 | int cc; | 76 | int cc; |
77 | 77 | ||
78 | /* set in order to detect a program check */ | 78 | /* set in order to detect a program check */ |
79 | cc = 1; | 79 | cc = 1; |
80 | 80 | ||
81 | asm volatile( | 81 | asm volatile( |
82 | "0: .insn s,0xB2870000,0(%1)\n" | 82 | "0: .insn s,0xB2870000,0(%1)\n" |
83 | "1: ipm %0\n" | 83 | "1: ipm %0\n" |
84 | " srl %0,28\n" | 84 | " srl %0,28\n" |
85 | "2:\n" | 85 | "2:\n" |
86 | EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) | 86 | EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) |
87 | : "+d" (cc), "+a" (buffer) | 87 | : "+d" (cc), "+a" (buffer) |
88 | : "m" (*((struct hws_ssctl_request_block *)buffer)) | 88 | : "m" (*((struct hws_ssctl_request_block *)buffer)) |
89 | : "cc", "memory"); | 89 | : "cc", "memory"); |
90 | 90 | ||
91 | return cc ? -EINVAL : 0 ; | 91 | return cc ? -EINVAL : 0 ; |
92 | } | 92 | } |
93 | 93 | ||
94 | static int qsi(void *buffer) | 94 | static int qsi(void *buffer) |
95 | { | 95 | { |
96 | int cc; | 96 | int cc; |
97 | cc = 1; | 97 | cc = 1; |
98 | 98 | ||
99 | asm volatile( | 99 | asm volatile( |
100 | "0: .insn s,0xB2860000,0(%1)\n" | 100 | "0: .insn s,0xB2860000,0(%1)\n" |
101 | "1: lhi %0,0\n" | 101 | "1: lhi %0,0\n" |
102 | "2:\n" | 102 | "2:\n" |
103 | EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) | 103 | EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) |
104 | : "=d" (cc), "+a" (buffer) | 104 | : "=d" (cc), "+a" (buffer) |
105 | : "m" (*((struct hws_qsi_info_block *)buffer)) | 105 | : "m" (*((struct hws_qsi_info_block *)buffer)) |
106 | : "cc", "memory"); | 106 | : "cc", "memory"); |
107 | 107 | ||
108 | return cc ? -EINVAL : 0; | 108 | return cc ? -EINVAL : 0; |
109 | } | 109 | } |
110 | 110 | ||
111 | static void execute_qsi(void *parms) | 111 | static void execute_qsi(void *parms) |
112 | { | 112 | { |
113 | struct hws_execute_parms *ep = parms; | 113 | struct hws_execute_parms *ep = parms; |
114 | 114 | ||
115 | ep->rc = qsi(ep->buffer); | 115 | ep->rc = qsi(ep->buffer); |
116 | } | 116 | } |
117 | 117 | ||
118 | static void execute_ssctl(void *parms) | 118 | static void execute_ssctl(void *parms) |
119 | { | 119 | { |
120 | struct hws_execute_parms *ep = parms; | 120 | struct hws_execute_parms *ep = parms; |
121 | 121 | ||
122 | ep->rc = ssctl(ep->buffer); | 122 | ep->rc = ssctl(ep->buffer); |
123 | } | 123 | } |
124 | 124 | ||
125 | static int smp_ctl_ssctl_stop(int cpu) | 125 | static int smp_ctl_ssctl_stop(int cpu) |
126 | { | 126 | { |
127 | int rc; | 127 | int rc; |
128 | struct hws_execute_parms ep; | 128 | struct hws_execute_parms ep; |
129 | struct hws_cpu_buffer *cb; | 129 | struct hws_cpu_buffer *cb; |
130 | 130 | ||
131 | cb = &per_cpu(sampler_cpu_buffer, cpu); | 131 | cb = &per_cpu(sampler_cpu_buffer, cpu); |
132 | 132 | ||
133 | cb->ssctl.es = 0; | 133 | cb->ssctl.es = 0; |
134 | cb->ssctl.cs = 0; | 134 | cb->ssctl.cs = 0; |
135 | 135 | ||
136 | ep.buffer = &cb->ssctl; | 136 | ep.buffer = &cb->ssctl; |
137 | smp_call_function_single(cpu, execute_ssctl, &ep, 1); | 137 | smp_call_function_single(cpu, execute_ssctl, &ep, 1); |
138 | rc = ep.rc; | 138 | rc = ep.rc; |
139 | if (rc) { | 139 | if (rc) { |
140 | printk(KERN_ERR "hwsampler: CPU %d CPUMF SSCTL failed.\n", cpu); | 140 | printk(KERN_ERR "hwsampler: CPU %d CPUMF SSCTL failed.\n", cpu); |
141 | dump_stack(); | 141 | dump_stack(); |
142 | } | 142 | } |
143 | 143 | ||
144 | ep.buffer = &cb->qsi; | 144 | ep.buffer = &cb->qsi; |
145 | smp_call_function_single(cpu, execute_qsi, &ep, 1); | 145 | smp_call_function_single(cpu, execute_qsi, &ep, 1); |
146 | 146 | ||
147 | if (cb->qsi.es || cb->qsi.cs) { | 147 | if (cb->qsi.es || cb->qsi.cs) { |
148 | printk(KERN_EMERG "CPUMF sampling did not stop properly.\n"); | 148 | printk(KERN_EMERG "CPUMF sampling did not stop properly.\n"); |
149 | dump_stack(); | 149 | dump_stack(); |
150 | } | 150 | } |
151 | 151 | ||
152 | return rc; | 152 | return rc; |
153 | } | 153 | } |
154 | 154 | ||
155 | static int smp_ctl_ssctl_deactivate(int cpu) | 155 | static int smp_ctl_ssctl_deactivate(int cpu) |
156 | { | 156 | { |
157 | int rc; | 157 | int rc; |
158 | struct hws_execute_parms ep; | 158 | struct hws_execute_parms ep; |
159 | struct hws_cpu_buffer *cb; | 159 | struct hws_cpu_buffer *cb; |
160 | 160 | ||
161 | cb = &per_cpu(sampler_cpu_buffer, cpu); | 161 | cb = &per_cpu(sampler_cpu_buffer, cpu); |
162 | 162 | ||
163 | cb->ssctl.es = 1; | 163 | cb->ssctl.es = 1; |
164 | cb->ssctl.cs = 0; | 164 | cb->ssctl.cs = 0; |
165 | 165 | ||
166 | ep.buffer = &cb->ssctl; | 166 | ep.buffer = &cb->ssctl; |
167 | smp_call_function_single(cpu, execute_ssctl, &ep, 1); | 167 | smp_call_function_single(cpu, execute_ssctl, &ep, 1); |
168 | rc = ep.rc; | 168 | rc = ep.rc; |
169 | if (rc) | 169 | if (rc) |
170 | printk(KERN_ERR "hwsampler: CPU %d CPUMF SSCTL failed.\n", cpu); | 170 | printk(KERN_ERR "hwsampler: CPU %d CPUMF SSCTL failed.\n", cpu); |
171 | 171 | ||
172 | ep.buffer = &cb->qsi; | 172 | ep.buffer = &cb->qsi; |
173 | smp_call_function_single(cpu, execute_qsi, &ep, 1); | 173 | smp_call_function_single(cpu, execute_qsi, &ep, 1); |
174 | 174 | ||
175 | if (cb->qsi.cs) | 175 | if (cb->qsi.cs) |
176 | printk(KERN_EMERG "CPUMF sampling was not set inactive.\n"); | 176 | printk(KERN_EMERG "CPUMF sampling was not set inactive.\n"); |
177 | 177 | ||
178 | return rc; | 178 | return rc; |
179 | } | 179 | } |
180 | 180 | ||
181 | static int smp_ctl_ssctl_enable_activate(int cpu, unsigned long interval) | 181 | static int smp_ctl_ssctl_enable_activate(int cpu, unsigned long interval) |
182 | { | 182 | { |
183 | int rc; | 183 | int rc; |
184 | struct hws_execute_parms ep; | 184 | struct hws_execute_parms ep; |
185 | struct hws_cpu_buffer *cb; | 185 | struct hws_cpu_buffer *cb; |
186 | 186 | ||
187 | cb = &per_cpu(sampler_cpu_buffer, cpu); | 187 | cb = &per_cpu(sampler_cpu_buffer, cpu); |
188 | 188 | ||
189 | cb->ssctl.h = 1; | 189 | cb->ssctl.h = 1; |
190 | cb->ssctl.tear = cb->first_sdbt; | 190 | cb->ssctl.tear = cb->first_sdbt; |
191 | cb->ssctl.dear = *(unsigned long *) cb->first_sdbt; | 191 | cb->ssctl.dear = *(unsigned long *) cb->first_sdbt; |
192 | cb->ssctl.interval = interval; | 192 | cb->ssctl.interval = interval; |
193 | cb->ssctl.es = 1; | 193 | cb->ssctl.es = 1; |
194 | cb->ssctl.cs = 1; | 194 | cb->ssctl.cs = 1; |
195 | 195 | ||
196 | ep.buffer = &cb->ssctl; | 196 | ep.buffer = &cb->ssctl; |
197 | smp_call_function_single(cpu, execute_ssctl, &ep, 1); | 197 | smp_call_function_single(cpu, execute_ssctl, &ep, 1); |
198 | rc = ep.rc; | 198 | rc = ep.rc; |
199 | if (rc) | 199 | if (rc) |
200 | printk(KERN_ERR "hwsampler: CPU %d CPUMF SSCTL failed.\n", cpu); | 200 | printk(KERN_ERR "hwsampler: CPU %d CPUMF SSCTL failed.\n", cpu); |
201 | 201 | ||
202 | ep.buffer = &cb->qsi; | 202 | ep.buffer = &cb->qsi; |
203 | smp_call_function_single(cpu, execute_qsi, &ep, 1); | 203 | smp_call_function_single(cpu, execute_qsi, &ep, 1); |
204 | if (ep.rc) | 204 | if (ep.rc) |
205 | printk(KERN_ERR "hwsampler: CPU %d CPUMF QSI failed.\n", cpu); | 205 | printk(KERN_ERR "hwsampler: CPU %d CPUMF QSI failed.\n", cpu); |
206 | 206 | ||
207 | return rc; | 207 | return rc; |
208 | } | 208 | } |
209 | 209 | ||
210 | static int smp_ctl_qsi(int cpu) | 210 | static int smp_ctl_qsi(int cpu) |
211 | { | 211 | { |
212 | struct hws_execute_parms ep; | 212 | struct hws_execute_parms ep; |
213 | struct hws_cpu_buffer *cb; | 213 | struct hws_cpu_buffer *cb; |
214 | 214 | ||
215 | cb = &per_cpu(sampler_cpu_buffer, cpu); | 215 | cb = &per_cpu(sampler_cpu_buffer, cpu); |
216 | 216 | ||
217 | ep.buffer = &cb->qsi; | 217 | ep.buffer = &cb->qsi; |
218 | smp_call_function_single(cpu, execute_qsi, &ep, 1); | 218 | smp_call_function_single(cpu, execute_qsi, &ep, 1); |
219 | 219 | ||
220 | return ep.rc; | 220 | return ep.rc; |
221 | } | 221 | } |
222 | 222 | ||
223 | static inline unsigned long *trailer_entry_ptr(unsigned long v) | 223 | static inline unsigned long *trailer_entry_ptr(unsigned long v) |
224 | { | 224 | { |
225 | void *ret; | 225 | void *ret; |
226 | 226 | ||
227 | ret = (void *)v; | 227 | ret = (void *)v; |
228 | ret += PAGE_SIZE; | 228 | ret += PAGE_SIZE; |
229 | ret -= sizeof(struct hws_trailer_entry); | 229 | ret -= sizeof(struct hws_trailer_entry); |
230 | 230 | ||
231 | return (unsigned long *) ret; | 231 | return (unsigned long *) ret; |
232 | } | 232 | } |
233 | 233 | ||
234 | /* prototypes for external interrupt handler and worker */ | 234 | /* prototypes for external interrupt handler and worker */ |
235 | static void hws_ext_handler(unsigned int ext_int_code, | 235 | static void hws_ext_handler(unsigned int ext_int_code, |
236 | unsigned int param32, unsigned long param64); | 236 | unsigned int param32, unsigned long param64); |
237 | 237 | ||
238 | static void worker(struct work_struct *work); | 238 | static void worker(struct work_struct *work); |
239 | 239 | ||
240 | static void add_samples_to_oprofile(unsigned cpu, unsigned long *, | 240 | static void add_samples_to_oprofile(unsigned cpu, unsigned long *, |
241 | unsigned long *dear); | 241 | unsigned long *dear); |
242 | 242 | ||
243 | static void init_all_cpu_buffers(void) | 243 | static void init_all_cpu_buffers(void) |
244 | { | 244 | { |
245 | int cpu; | 245 | int cpu; |
246 | struct hws_cpu_buffer *cb; | 246 | struct hws_cpu_buffer *cb; |
247 | 247 | ||
248 | for_each_online_cpu(cpu) { | 248 | for_each_online_cpu(cpu) { |
249 | cb = &per_cpu(sampler_cpu_buffer, cpu); | 249 | cb = &per_cpu(sampler_cpu_buffer, cpu); |
250 | memset(cb, 0, sizeof(struct hws_cpu_buffer)); | 250 | memset(cb, 0, sizeof(struct hws_cpu_buffer)); |
251 | } | 251 | } |
252 | } | 252 | } |
253 | 253 | ||
254 | static int is_link_entry(unsigned long *s) | 254 | static int is_link_entry(unsigned long *s) |
255 | { | 255 | { |
256 | return *s & 0x1ul ? 1 : 0; | 256 | return *s & 0x1ul ? 1 : 0; |
257 | } | 257 | } |
258 | 258 | ||
259 | static unsigned long *get_next_sdbt(unsigned long *s) | 259 | static unsigned long *get_next_sdbt(unsigned long *s) |
260 | { | 260 | { |
261 | return (unsigned long *) (*s & ~0x1ul); | 261 | return (unsigned long *) (*s & ~0x1ul); |
262 | } | 262 | } |
263 | 263 | ||
264 | static int prepare_cpu_buffers(void) | 264 | static int prepare_cpu_buffers(void) |
265 | { | 265 | { |
266 | int cpu; | 266 | int cpu; |
267 | int rc; | 267 | int rc; |
268 | struct hws_cpu_buffer *cb; | 268 | struct hws_cpu_buffer *cb; |
269 | 269 | ||
270 | rc = 0; | 270 | rc = 0; |
271 | for_each_online_cpu(cpu) { | 271 | for_each_online_cpu(cpu) { |
272 | cb = &per_cpu(sampler_cpu_buffer, cpu); | 272 | cb = &per_cpu(sampler_cpu_buffer, cpu); |
273 | atomic_set(&cb->ext_params, 0); | 273 | atomic_set(&cb->ext_params, 0); |
274 | cb->worker_entry = 0; | 274 | cb->worker_entry = 0; |
275 | cb->sample_overflow = 0; | 275 | cb->sample_overflow = 0; |
276 | cb->req_alert = 0; | 276 | cb->req_alert = 0; |
277 | cb->incorrect_sdbt_entry = 0; | 277 | cb->incorrect_sdbt_entry = 0; |
278 | cb->invalid_entry_address = 0; | 278 | cb->invalid_entry_address = 0; |
279 | cb->loss_of_sample_data = 0; | 279 | cb->loss_of_sample_data = 0; |
280 | cb->sample_auth_change_alert = 0; | 280 | cb->sample_auth_change_alert = 0; |
281 | cb->finish = 0; | 281 | cb->finish = 0; |
282 | cb->oom = 0; | 282 | cb->oom = 0; |
283 | cb->stop_mode = 0; | 283 | cb->stop_mode = 0; |
284 | } | 284 | } |
285 | 285 | ||
286 | return rc; | 286 | return rc; |
287 | } | 287 | } |
288 | 288 | ||
289 | /* | 289 | /* |
290 | * allocate_sdbt() - allocate sampler memory | 290 | * allocate_sdbt() - allocate sampler memory |
291 | * @cpu: the cpu for which sampler memory is allocated | 291 | * @cpu: the cpu for which sampler memory is allocated |
292 | * | 292 | * |
293 | * A 4K page is allocated for each requested SDBT. | 293 | * A 4K page is allocated for each requested SDBT. |
294 | * A maximum of 511 4K pages are allocated for the SDBs in each of the SDBTs. | 294 | * A maximum of 511 4K pages are allocated for the SDBs in each of the SDBTs. |
295 | * Set ALERT_REQ mask in each SDBs trailer. | 295 | * Set ALERT_REQ mask in each SDBs trailer. |
296 | * Returns zero if successful, <0 otherwise. | 296 | * Returns zero if successful, <0 otherwise. |
297 | */ | 297 | */ |
298 | static int allocate_sdbt(int cpu) | 298 | static int allocate_sdbt(int cpu) |
299 | { | 299 | { |
300 | int j, k, rc; | 300 | int j, k, rc; |
301 | unsigned long *sdbt; | 301 | unsigned long *sdbt; |
302 | unsigned long sdb; | 302 | unsigned long sdb; |
303 | unsigned long *tail; | 303 | unsigned long *tail; |
304 | unsigned long *trailer; | 304 | unsigned long *trailer; |
305 | struct hws_cpu_buffer *cb; | 305 | struct hws_cpu_buffer *cb; |
306 | 306 | ||
307 | cb = &per_cpu(sampler_cpu_buffer, cpu); | 307 | cb = &per_cpu(sampler_cpu_buffer, cpu); |
308 | 308 | ||
309 | if (cb->first_sdbt) | 309 | if (cb->first_sdbt) |
310 | return -EINVAL; | 310 | return -EINVAL; |
311 | 311 | ||
312 | sdbt = NULL; | 312 | sdbt = NULL; |
313 | tail = sdbt; | 313 | tail = sdbt; |
314 | 314 | ||
315 | for (j = 0; j < num_sdbt; j++) { | 315 | for (j = 0; j < num_sdbt; j++) { |
316 | sdbt = (unsigned long *)get_zeroed_page(GFP_KERNEL); | 316 | sdbt = (unsigned long *)get_zeroed_page(GFP_KERNEL); |
317 | 317 | ||
318 | mutex_lock(&hws_sem_oom); | 318 | mutex_lock(&hws_sem_oom); |
319 | /* OOM killer might have been activated */ | 319 | /* OOM killer might have been activated */ |
320 | barrier(); | 320 | barrier(); |
321 | if (oom_killer_was_active || !sdbt) { | 321 | if (oom_killer_was_active || !sdbt) { |
322 | if (sdbt) | 322 | if (sdbt) |
323 | free_page((unsigned long)sdbt); | 323 | free_page((unsigned long)sdbt); |
324 | 324 | ||
325 | goto allocate_sdbt_error; | 325 | goto allocate_sdbt_error; |
326 | } | 326 | } |
327 | if (cb->first_sdbt == 0) | 327 | if (cb->first_sdbt == 0) |
328 | cb->first_sdbt = (unsigned long)sdbt; | 328 | cb->first_sdbt = (unsigned long)sdbt; |
329 | 329 | ||
330 | /* link current page to tail of chain */ | 330 | /* link current page to tail of chain */ |
331 | if (tail) | 331 | if (tail) |
332 | *tail = (unsigned long)(void *)sdbt + 1; | 332 | *tail = (unsigned long)(void *)sdbt + 1; |
333 | 333 | ||
334 | mutex_unlock(&hws_sem_oom); | 334 | mutex_unlock(&hws_sem_oom); |
335 | 335 | ||
336 | for (k = 0; k < num_sdb; k++) { | 336 | for (k = 0; k < num_sdb; k++) { |
337 | /* get and set SDB page */ | 337 | /* get and set SDB page */ |
338 | sdb = get_zeroed_page(GFP_KERNEL); | 338 | sdb = get_zeroed_page(GFP_KERNEL); |
339 | 339 | ||
340 | mutex_lock(&hws_sem_oom); | 340 | mutex_lock(&hws_sem_oom); |
341 | /* OOM killer might have been activated */ | 341 | /* OOM killer might have been activated */ |
342 | barrier(); | 342 | barrier(); |
343 | if (oom_killer_was_active || !sdb) { | 343 | if (oom_killer_was_active || !sdb) { |
344 | if (sdb) | 344 | if (sdb) |
345 | free_page(sdb); | 345 | free_page(sdb); |
346 | 346 | ||
347 | goto allocate_sdbt_error; | 347 | goto allocate_sdbt_error; |
348 | } | 348 | } |
349 | *sdbt = sdb; | 349 | *sdbt = sdb; |
350 | trailer = trailer_entry_ptr(*sdbt); | 350 | trailer = trailer_entry_ptr(*sdbt); |
351 | *trailer = ALERT_REQ_MASK; | 351 | *trailer = ALERT_REQ_MASK; |
352 | sdbt++; | 352 | sdbt++; |
353 | mutex_unlock(&hws_sem_oom); | 353 | mutex_unlock(&hws_sem_oom); |
354 | } | 354 | } |
355 | tail = sdbt; | 355 | tail = sdbt; |
356 | } | 356 | } |
357 | mutex_lock(&hws_sem_oom); | 357 | mutex_lock(&hws_sem_oom); |
358 | if (oom_killer_was_active) | 358 | if (oom_killer_was_active) |
359 | goto allocate_sdbt_error; | 359 | goto allocate_sdbt_error; |
360 | 360 | ||
361 | rc = 0; | 361 | rc = 0; |
362 | if (tail) | 362 | if (tail) |
363 | *tail = (unsigned long) | 363 | *tail = (unsigned long) |
364 | ((void *)cb->first_sdbt) + 1; | 364 | ((void *)cb->first_sdbt) + 1; |
365 | 365 | ||
366 | allocate_sdbt_exit: | 366 | allocate_sdbt_exit: |
367 | mutex_unlock(&hws_sem_oom); | 367 | mutex_unlock(&hws_sem_oom); |
368 | return rc; | 368 | return rc; |
369 | 369 | ||
370 | allocate_sdbt_error: | 370 | allocate_sdbt_error: |
371 | rc = -ENOMEM; | 371 | rc = -ENOMEM; |
372 | goto allocate_sdbt_exit; | 372 | goto allocate_sdbt_exit; |
373 | } | 373 | } |
374 | 374 | ||
375 | /* | 375 | /* |
376 | * deallocate_sdbt() - deallocate all sampler memory | 376 | * deallocate_sdbt() - deallocate all sampler memory |
377 | * | 377 | * |
378 | * For each online CPU all SDBT trees are deallocated. | 378 | * For each online CPU all SDBT trees are deallocated. |
379 | * Returns the number of freed pages. | 379 | * Returns the number of freed pages. |
380 | */ | 380 | */ |
381 | static int deallocate_sdbt(void) | 381 | static int deallocate_sdbt(void) |
382 | { | 382 | { |
383 | int cpu; | 383 | int cpu; |
384 | int counter; | 384 | int counter; |
385 | 385 | ||
386 | counter = 0; | 386 | counter = 0; |
387 | 387 | ||
388 | for_each_online_cpu(cpu) { | 388 | for_each_online_cpu(cpu) { |
389 | unsigned long start; | 389 | unsigned long start; |
390 | unsigned long sdbt; | 390 | unsigned long sdbt; |
391 | unsigned long *curr; | 391 | unsigned long *curr; |
392 | struct hws_cpu_buffer *cb; | 392 | struct hws_cpu_buffer *cb; |
393 | 393 | ||
394 | cb = &per_cpu(sampler_cpu_buffer, cpu); | 394 | cb = &per_cpu(sampler_cpu_buffer, cpu); |
395 | 395 | ||
396 | if (!cb->first_sdbt) | 396 | if (!cb->first_sdbt) |
397 | continue; | 397 | continue; |
398 | 398 | ||
399 | sdbt = cb->first_sdbt; | 399 | sdbt = cb->first_sdbt; |
400 | curr = (unsigned long *) sdbt; | 400 | curr = (unsigned long *) sdbt; |
401 | start = sdbt; | 401 | start = sdbt; |
402 | 402 | ||
403 | /* we'll free the SDBT after all SDBs are processed... */ | 403 | /* we'll free the SDBT after all SDBs are processed... */ |
404 | while (1) { | 404 | while (1) { |
405 | if (!*curr || !sdbt) | 405 | if (!*curr || !sdbt) |
406 | break; | 406 | break; |
407 | 407 | ||
408 | /* watch for link entry reset if found */ | 408 | /* watch for link entry reset if found */ |
409 | if (is_link_entry(curr)) { | 409 | if (is_link_entry(curr)) { |
410 | curr = get_next_sdbt(curr); | 410 | curr = get_next_sdbt(curr); |
411 | if (sdbt) | 411 | if (sdbt) |
412 | free_page(sdbt); | 412 | free_page(sdbt); |
413 | 413 | ||
414 | /* we are done if we reach the start */ | 414 | /* we are done if we reach the start */ |
415 | if ((unsigned long) curr == start) | 415 | if ((unsigned long) curr == start) |
416 | break; | 416 | break; |
417 | else | 417 | else |
418 | sdbt = (unsigned long) curr; | 418 | sdbt = (unsigned long) curr; |
419 | } else { | 419 | } else { |
420 | /* process SDB pointer */ | 420 | /* process SDB pointer */ |
421 | if (*curr) { | 421 | if (*curr) { |
422 | free_page(*curr); | 422 | free_page(*curr); |
423 | curr++; | 423 | curr++; |
424 | } | 424 | } |
425 | } | 425 | } |
426 | counter++; | 426 | counter++; |
427 | } | 427 | } |
428 | cb->first_sdbt = 0; | 428 | cb->first_sdbt = 0; |
429 | } | 429 | } |
430 | return counter; | 430 | return counter; |
431 | } | 431 | } |
432 | 432 | ||
433 | static int start_sampling(int cpu) | 433 | static int start_sampling(int cpu) |
434 | { | 434 | { |
435 | int rc; | 435 | int rc; |
436 | struct hws_cpu_buffer *cb; | 436 | struct hws_cpu_buffer *cb; |
437 | 437 | ||
438 | cb = &per_cpu(sampler_cpu_buffer, cpu); | 438 | cb = &per_cpu(sampler_cpu_buffer, cpu); |
439 | rc = smp_ctl_ssctl_enable_activate(cpu, interval); | 439 | rc = smp_ctl_ssctl_enable_activate(cpu, interval); |
440 | if (rc) { | 440 | if (rc) { |
441 | printk(KERN_INFO "hwsampler: CPU %d ssctl failed.\n", cpu); | 441 | printk(KERN_INFO "hwsampler: CPU %d ssctl failed.\n", cpu); |
442 | goto start_exit; | 442 | goto start_exit; |
443 | } | 443 | } |
444 | 444 | ||
445 | rc = -EINVAL; | 445 | rc = -EINVAL; |
446 | if (!cb->qsi.es) { | 446 | if (!cb->qsi.es) { |
447 | printk(KERN_INFO "hwsampler: CPU %d ssctl not enabled.\n", cpu); | 447 | printk(KERN_INFO "hwsampler: CPU %d ssctl not enabled.\n", cpu); |
448 | goto start_exit; | 448 | goto start_exit; |
449 | } | 449 | } |
450 | 450 | ||
451 | if (!cb->qsi.cs) { | 451 | if (!cb->qsi.cs) { |
452 | printk(KERN_INFO "hwsampler: CPU %d ssctl not active.\n", cpu); | 452 | printk(KERN_INFO "hwsampler: CPU %d ssctl not active.\n", cpu); |
453 | goto start_exit; | 453 | goto start_exit; |
454 | } | 454 | } |
455 | 455 | ||
456 | printk(KERN_INFO | 456 | printk(KERN_INFO |
457 | "hwsampler: CPU %d, CPUMF Sampling started, interval %lu.\n", | 457 | "hwsampler: CPU %d, CPUMF Sampling started, interval %lu.\n", |
458 | cpu, interval); | 458 | cpu, interval); |
459 | 459 | ||
460 | rc = 0; | 460 | rc = 0; |
461 | 461 | ||
462 | start_exit: | 462 | start_exit: |
463 | return rc; | 463 | return rc; |
464 | } | 464 | } |
465 | 465 | ||
466 | static int stop_sampling(int cpu) | 466 | static int stop_sampling(int cpu) |
467 | { | 467 | { |
468 | unsigned long v; | 468 | unsigned long v; |
469 | int rc; | 469 | int rc; |
470 | struct hws_cpu_buffer *cb; | 470 | struct hws_cpu_buffer *cb; |
471 | 471 | ||
472 | rc = smp_ctl_qsi(cpu); | 472 | rc = smp_ctl_qsi(cpu); |
473 | WARN_ON(rc); | 473 | WARN_ON(rc); |
474 | 474 | ||
475 | cb = &per_cpu(sampler_cpu_buffer, cpu); | 475 | cb = &per_cpu(sampler_cpu_buffer, cpu); |
476 | if (!rc && !cb->qsi.es) | 476 | if (!rc && !cb->qsi.es) |
477 | printk(KERN_INFO "hwsampler: CPU %d, already stopped.\n", cpu); | 477 | printk(KERN_INFO "hwsampler: CPU %d, already stopped.\n", cpu); |
478 | 478 | ||
479 | rc = smp_ctl_ssctl_stop(cpu); | 479 | rc = smp_ctl_ssctl_stop(cpu); |
480 | if (rc) { | 480 | if (rc) { |
481 | printk(KERN_INFO "hwsampler: CPU %d, ssctl stop error %d.\n", | 481 | printk(KERN_INFO "hwsampler: CPU %d, ssctl stop error %d.\n", |
482 | cpu, rc); | 482 | cpu, rc); |
483 | goto stop_exit; | 483 | goto stop_exit; |
484 | } | 484 | } |
485 | 485 | ||
486 | printk(KERN_INFO "hwsampler: CPU %d, CPUMF Sampling stopped.\n", cpu); | 486 | printk(KERN_INFO "hwsampler: CPU %d, CPUMF Sampling stopped.\n", cpu); |
487 | 487 | ||
488 | stop_exit: | 488 | stop_exit: |
489 | v = cb->req_alert; | 489 | v = cb->req_alert; |
490 | if (v) | 490 | if (v) |
491 | printk(KERN_ERR "hwsampler: CPU %d CPUMF Request alert," | 491 | printk(KERN_ERR "hwsampler: CPU %d CPUMF Request alert," |
492 | " count=%lu.\n", cpu, v); | 492 | " count=%lu.\n", cpu, v); |
493 | 493 | ||
494 | v = cb->loss_of_sample_data; | 494 | v = cb->loss_of_sample_data; |
495 | if (v) | 495 | if (v) |
496 | printk(KERN_ERR "hwsampler: CPU %d CPUMF Loss of sample data," | 496 | printk(KERN_ERR "hwsampler: CPU %d CPUMF Loss of sample data," |
497 | " count=%lu.\n", cpu, v); | 497 | " count=%lu.\n", cpu, v); |
498 | 498 | ||
499 | v = cb->invalid_entry_address; | 499 | v = cb->invalid_entry_address; |
500 | if (v) | 500 | if (v) |
501 | printk(KERN_ERR "hwsampler: CPU %d CPUMF Invalid entry address," | 501 | printk(KERN_ERR "hwsampler: CPU %d CPUMF Invalid entry address," |
502 | " count=%lu.\n", cpu, v); | 502 | " count=%lu.\n", cpu, v); |
503 | 503 | ||
504 | v = cb->incorrect_sdbt_entry; | 504 | v = cb->incorrect_sdbt_entry; |
505 | if (v) | 505 | if (v) |
506 | printk(KERN_ERR | 506 | printk(KERN_ERR |
507 | "hwsampler: CPU %d CPUMF Incorrect SDBT address," | 507 | "hwsampler: CPU %d CPUMF Incorrect SDBT address," |
508 | " count=%lu.\n", cpu, v); | 508 | " count=%lu.\n", cpu, v); |
509 | 509 | ||
510 | v = cb->sample_auth_change_alert; | 510 | v = cb->sample_auth_change_alert; |
511 | if (v) | 511 | if (v) |
512 | printk(KERN_ERR | 512 | printk(KERN_ERR |
513 | "hwsampler: CPU %d CPUMF Sample authorization change," | 513 | "hwsampler: CPU %d CPUMF Sample authorization change," |
514 | " count=%lu.\n", cpu, v); | 514 | " count=%lu.\n", cpu, v); |
515 | 515 | ||
516 | return rc; | 516 | return rc; |
517 | } | 517 | } |
518 | 518 | ||
519 | static int check_hardware_prerequisites(void) | 519 | static int check_hardware_prerequisites(void) |
520 | { | 520 | { |
521 | if (!test_facility(68)) | 521 | if (!test_facility(68)) |
522 | return -EOPNOTSUPP; | 522 | return -EOPNOTSUPP; |
523 | return 0; | 523 | return 0; |
524 | } | 524 | } |
525 | /* | 525 | /* |
526 | * hws_oom_callback() - the OOM callback function | 526 | * hws_oom_callback() - the OOM callback function |
527 | * | 527 | * |
528 | * In case the callback is invoked during memory allocation for the | 528 | * In case the callback is invoked during memory allocation for the |
529 | * hw sampler, all obtained memory is deallocated and a flag is set | 529 | * hw sampler, all obtained memory is deallocated and a flag is set |
530 | * so main sampler memory allocation can exit with a failure code. | 530 | * so main sampler memory allocation can exit with a failure code. |
531 | * In case the callback is invoked during sampling the hw sampler | 531 | * In case the callback is invoked during sampling the hw sampler |
532 | * is deactivated for all CPUs. | 532 | * is deactivated for all CPUs. |
533 | */ | 533 | */ |
534 | static int hws_oom_callback(struct notifier_block *nfb, | 534 | static int hws_oom_callback(struct notifier_block *nfb, |
535 | unsigned long dummy, void *parm) | 535 | unsigned long dummy, void *parm) |
536 | { | 536 | { |
537 | unsigned long *freed; | 537 | unsigned long *freed; |
538 | int cpu; | 538 | int cpu; |
539 | struct hws_cpu_buffer *cb; | 539 | struct hws_cpu_buffer *cb; |
540 | 540 | ||
541 | freed = parm; | 541 | freed = parm; |
542 | 542 | ||
543 | mutex_lock(&hws_sem_oom); | 543 | mutex_lock(&hws_sem_oom); |
544 | 544 | ||
545 | if (hws_state == HWS_DEALLOCATED) { | 545 | if (hws_state == HWS_DEALLOCATED) { |
546 | /* during memory allocation */ | 546 | /* during memory allocation */ |
547 | if (oom_killer_was_active == 0) { | 547 | if (oom_killer_was_active == 0) { |
548 | oom_killer_was_active = 1; | 548 | oom_killer_was_active = 1; |
549 | *freed += deallocate_sdbt(); | 549 | *freed += deallocate_sdbt(); |
550 | } | 550 | } |
551 | } else { | 551 | } else { |
552 | int i; | 552 | int i; |
553 | cpu = get_cpu(); | 553 | cpu = get_cpu(); |
554 | cb = &per_cpu(sampler_cpu_buffer, cpu); | 554 | cb = &per_cpu(sampler_cpu_buffer, cpu); |
555 | 555 | ||
556 | if (!cb->oom) { | 556 | if (!cb->oom) { |
557 | for_each_online_cpu(i) { | 557 | for_each_online_cpu(i) { |
558 | smp_ctl_ssctl_deactivate(i); | 558 | smp_ctl_ssctl_deactivate(i); |
559 | cb->oom = 1; | 559 | cb->oom = 1; |
560 | } | 560 | } |
561 | cb->finish = 1; | 561 | cb->finish = 1; |
562 | 562 | ||
563 | printk(KERN_INFO | 563 | printk(KERN_INFO |
564 | "hwsampler: CPU %d, OOM notify during CPUMF Sampling.\n", | 564 | "hwsampler: CPU %d, OOM notify during CPUMF Sampling.\n", |
565 | cpu); | 565 | cpu); |
566 | } | 566 | } |
567 | } | 567 | } |
568 | 568 | ||
569 | mutex_unlock(&hws_sem_oom); | 569 | mutex_unlock(&hws_sem_oom); |
570 | 570 | ||
571 | return NOTIFY_OK; | 571 | return NOTIFY_OK; |
572 | } | 572 | } |
573 | 573 | ||
574 | static struct notifier_block hws_oom_notifier = { | 574 | static struct notifier_block hws_oom_notifier = { |
575 | .notifier_call = hws_oom_callback | 575 | .notifier_call = hws_oom_callback |
576 | }; | 576 | }; |
577 | 577 | ||
578 | static int hws_cpu_callback(struct notifier_block *nfb, | 578 | static int hws_cpu_callback(struct notifier_block *nfb, |
579 | unsigned long action, void *hcpu) | 579 | unsigned long action, void *hcpu) |
580 | { | 580 | { |
581 | /* We do not have sampler space available for all possible CPUs. | 581 | /* We do not have sampler space available for all possible CPUs. |
582 | All CPUs should be online when hw sampling is activated. */ | 582 | All CPUs should be online when hw sampling is activated. */ |
583 | return NOTIFY_BAD; | 583 | return NOTIFY_BAD; |
584 | } | 584 | } |
585 | 585 | ||
586 | static struct notifier_block hws_cpu_notifier = { | 586 | static struct notifier_block hws_cpu_notifier = { |
587 | .notifier_call = hws_cpu_callback | 587 | .notifier_call = hws_cpu_callback |
588 | }; | 588 | }; |
589 | 589 | ||
590 | /** | 590 | /** |
591 | * hwsampler_deactivate() - set hardware sampling temporarily inactive | 591 | * hwsampler_deactivate() - set hardware sampling temporarily inactive |
592 | * @cpu: specifies the CPU to be set inactive. | 592 | * @cpu: specifies the CPU to be set inactive. |
593 | * | 593 | * |
594 | * Returns 0 on success, !0 on failure. | 594 | * Returns 0 on success, !0 on failure. |
595 | */ | 595 | */ |
596 | int hwsampler_deactivate(unsigned int cpu) | 596 | int hwsampler_deactivate(unsigned int cpu) |
597 | { | 597 | { |
598 | /* | 598 | /* |
599 | * Deactivate hw sampling temporarily and flush the buffer | 599 | * Deactivate hw sampling temporarily and flush the buffer |
600 | * by pushing all the pending samples to oprofile buffer. | 600 | * by pushing all the pending samples to oprofile buffer. |
601 | * | 601 | * |
602 | * This function can be called under one of the following conditions: | 602 | * This function can be called under one of the following conditions: |
603 | * Memory unmap, task is exiting. | 603 | * Memory unmap, task is exiting. |
604 | */ | 604 | */ |
605 | int rc; | 605 | int rc; |
606 | struct hws_cpu_buffer *cb; | 606 | struct hws_cpu_buffer *cb; |
607 | 607 | ||
608 | rc = 0; | 608 | rc = 0; |
609 | mutex_lock(&hws_sem); | 609 | mutex_lock(&hws_sem); |
610 | 610 | ||
611 | cb = &per_cpu(sampler_cpu_buffer, cpu); | 611 | cb = &per_cpu(sampler_cpu_buffer, cpu); |
612 | if (hws_state == HWS_STARTED) { | 612 | if (hws_state == HWS_STARTED) { |
613 | rc = smp_ctl_qsi(cpu); | 613 | rc = smp_ctl_qsi(cpu); |
614 | WARN_ON(rc); | 614 | WARN_ON(rc); |
615 | if (cb->qsi.cs) { | 615 | if (cb->qsi.cs) { |
616 | rc = smp_ctl_ssctl_deactivate(cpu); | 616 | rc = smp_ctl_ssctl_deactivate(cpu); |
617 | if (rc) { | 617 | if (rc) { |
618 | printk(KERN_INFO | 618 | printk(KERN_INFO |
619 | "hwsampler: CPU %d, CPUMF Deactivation failed.\n", cpu); | 619 | "hwsampler: CPU %d, CPUMF Deactivation failed.\n", cpu); |
620 | cb->finish = 1; | 620 | cb->finish = 1; |
621 | hws_state = HWS_STOPPING; | 621 | hws_state = HWS_STOPPING; |
622 | } else { | 622 | } else { |
623 | hws_flush_all = 1; | 623 | hws_flush_all = 1; |
624 | /* Add work to queue to read pending samples.*/ | 624 | /* Add work to queue to read pending samples.*/ |
625 | queue_work_on(cpu, hws_wq, &cb->worker); | 625 | queue_work_on(cpu, hws_wq, &cb->worker); |
626 | } | 626 | } |
627 | } | 627 | } |
628 | } | 628 | } |
629 | mutex_unlock(&hws_sem); | 629 | mutex_unlock(&hws_sem); |
630 | 630 | ||
631 | if (hws_wq) | 631 | if (hws_wq) |
632 | flush_workqueue(hws_wq); | 632 | flush_workqueue(hws_wq); |
633 | 633 | ||
634 | return rc; | 634 | return rc; |
635 | } | 635 | } |
636 | 636 | ||
637 | /** | 637 | /** |
638 | * hwsampler_activate() - activate/resume hardware sampling which was deactivated | 638 | * hwsampler_activate() - activate/resume hardware sampling which was deactivated |
639 | * @cpu: specifies the CPU to be set active. | 639 | * @cpu: specifies the CPU to be set active. |
640 | * | 640 | * |
641 | * Returns 0 on success, !0 on failure. | 641 | * Returns 0 on success, !0 on failure. |
642 | */ | 642 | */ |
643 | int hwsampler_activate(unsigned int cpu) | 643 | int hwsampler_activate(unsigned int cpu) |
644 | { | 644 | { |
645 | /* | 645 | /* |
646 | * Re-activate hw sampling. This should be called in pair with | 646 | * Re-activate hw sampling. This should be called in pair with |
647 | * hwsampler_deactivate(). | 647 | * hwsampler_deactivate(). |
648 | */ | 648 | */ |
649 | int rc; | 649 | int rc; |
650 | struct hws_cpu_buffer *cb; | 650 | struct hws_cpu_buffer *cb; |
651 | 651 | ||
652 | rc = 0; | 652 | rc = 0; |
653 | mutex_lock(&hws_sem); | 653 | mutex_lock(&hws_sem); |
654 | 654 | ||
655 | cb = &per_cpu(sampler_cpu_buffer, cpu); | 655 | cb = &per_cpu(sampler_cpu_buffer, cpu); |
656 | if (hws_state == HWS_STARTED) { | 656 | if (hws_state == HWS_STARTED) { |
657 | rc = smp_ctl_qsi(cpu); | 657 | rc = smp_ctl_qsi(cpu); |
658 | WARN_ON(rc); | 658 | WARN_ON(rc); |
659 | if (!cb->qsi.cs) { | 659 | if (!cb->qsi.cs) { |
660 | hws_flush_all = 0; | 660 | hws_flush_all = 0; |
661 | rc = smp_ctl_ssctl_enable_activate(cpu, interval); | 661 | rc = smp_ctl_ssctl_enable_activate(cpu, interval); |
662 | if (rc) { | 662 | if (rc) { |
663 | printk(KERN_ERR | 663 | printk(KERN_ERR |
664 | "CPU %d, CPUMF activate sampling failed.\n", | 664 | "CPU %d, CPUMF activate sampling failed.\n", |
665 | cpu); | 665 | cpu); |
666 | } | 666 | } |
667 | } | 667 | } |
668 | } | 668 | } |
669 | 669 | ||
670 | mutex_unlock(&hws_sem); | 670 | mutex_unlock(&hws_sem); |
671 | 671 | ||
672 | return rc; | 672 | return rc; |
673 | } | 673 | } |
674 | 674 | ||
675 | static void hws_ext_handler(unsigned int ext_int_code, | 675 | static void hws_ext_handler(unsigned int ext_int_code, |
676 | unsigned int param32, unsigned long param64) | 676 | unsigned int param32, unsigned long param64) |
677 | { | 677 | { |
678 | struct hws_cpu_buffer *cb; | 678 | struct hws_cpu_buffer *cb; |
679 | 679 | ||
680 | kstat_cpu(smp_processor_id()).irqs[EXTINT_CPM]++; | 680 | kstat_cpu(smp_processor_id()).irqs[EXTINT_CPM]++; |
681 | cb = &__get_cpu_var(sampler_cpu_buffer); | 681 | cb = &__get_cpu_var(sampler_cpu_buffer); |
682 | atomic_xchg(&cb->ext_params, atomic_read(&cb->ext_params) | param32); | 682 | atomic_xchg(&cb->ext_params, atomic_read(&cb->ext_params) | param32); |
683 | if (hws_wq) | 683 | if (hws_wq) |
684 | queue_work(hws_wq, &cb->worker); | 684 | queue_work(hws_wq, &cb->worker); |
685 | } | 685 | } |
686 | 686 | ||
687 | static int check_qsi_on_setup(void) | 687 | static int check_qsi_on_setup(void) |
688 | { | 688 | { |
689 | int rc; | 689 | int rc; |
690 | unsigned int cpu; | 690 | unsigned int cpu; |
691 | struct hws_cpu_buffer *cb; | 691 | struct hws_cpu_buffer *cb; |
692 | 692 | ||
693 | for_each_online_cpu(cpu) { | 693 | for_each_online_cpu(cpu) { |
694 | cb = &per_cpu(sampler_cpu_buffer, cpu); | 694 | cb = &per_cpu(sampler_cpu_buffer, cpu); |
695 | rc = smp_ctl_qsi(cpu); | 695 | rc = smp_ctl_qsi(cpu); |
696 | WARN_ON(rc); | 696 | WARN_ON(rc); |
697 | if (rc) | 697 | if (rc) |
698 | return -EOPNOTSUPP; | 698 | return -EOPNOTSUPP; |
699 | 699 | ||
700 | if (!cb->qsi.as) { | 700 | if (!cb->qsi.as) { |
701 | printk(KERN_INFO "hwsampler: CPUMF sampling is not authorized.\n"); | 701 | printk(KERN_INFO "hwsampler: CPUMF sampling is not authorized.\n"); |
702 | return -EINVAL; | 702 | return -EINVAL; |
703 | } | 703 | } |
704 | 704 | ||
705 | if (cb->qsi.es) { | 705 | if (cb->qsi.es) { |
706 | printk(KERN_WARNING "hwsampler: CPUMF is still enabled.\n"); | 706 | printk(KERN_WARNING "hwsampler: CPUMF is still enabled.\n"); |
707 | rc = smp_ctl_ssctl_stop(cpu); | 707 | rc = smp_ctl_ssctl_stop(cpu); |
708 | if (rc) | 708 | if (rc) |
709 | return -EINVAL; | 709 | return -EINVAL; |
710 | 710 | ||
711 | printk(KERN_INFO | 711 | printk(KERN_INFO |
712 | "CPU %d, CPUMF Sampling stopped now.\n", cpu); | 712 | "CPU %d, CPUMF Sampling stopped now.\n", cpu); |
713 | } | 713 | } |
714 | } | 714 | } |
715 | return 0; | 715 | return 0; |
716 | } | 716 | } |
717 | 717 | ||
718 | static int check_qsi_on_start(void) | 718 | static int check_qsi_on_start(void) |
719 | { | 719 | { |
720 | unsigned int cpu; | 720 | unsigned int cpu; |
721 | int rc; | 721 | int rc; |
722 | struct hws_cpu_buffer *cb; | 722 | struct hws_cpu_buffer *cb; |
723 | 723 | ||
724 | for_each_online_cpu(cpu) { | 724 | for_each_online_cpu(cpu) { |
725 | cb = &per_cpu(sampler_cpu_buffer, cpu); | 725 | cb = &per_cpu(sampler_cpu_buffer, cpu); |
726 | rc = smp_ctl_qsi(cpu); | 726 | rc = smp_ctl_qsi(cpu); |
727 | WARN_ON(rc); | 727 | WARN_ON(rc); |
728 | 728 | ||
729 | if (!cb->qsi.as) | 729 | if (!cb->qsi.as) |
730 | return -EINVAL; | 730 | return -EINVAL; |
731 | 731 | ||
732 | if (cb->qsi.es) | 732 | if (cb->qsi.es) |
733 | return -EINVAL; | 733 | return -EINVAL; |
734 | 734 | ||
735 | if (cb->qsi.cs) | 735 | if (cb->qsi.cs) |
736 | return -EINVAL; | 736 | return -EINVAL; |
737 | } | 737 | } |
738 | return 0; | 738 | return 0; |
739 | } | 739 | } |
740 | 740 | ||
741 | static void worker_on_start(unsigned int cpu) | 741 | static void worker_on_start(unsigned int cpu) |
742 | { | 742 | { |
743 | struct hws_cpu_buffer *cb; | 743 | struct hws_cpu_buffer *cb; |
744 | 744 | ||
745 | cb = &per_cpu(sampler_cpu_buffer, cpu); | 745 | cb = &per_cpu(sampler_cpu_buffer, cpu); |
746 | cb->worker_entry = cb->first_sdbt; | 746 | cb->worker_entry = cb->first_sdbt; |
747 | } | 747 | } |
748 | 748 | ||
749 | static int worker_check_error(unsigned int cpu, int ext_params) | 749 | static int worker_check_error(unsigned int cpu, int ext_params) |
750 | { | 750 | { |
751 | int rc; | 751 | int rc; |
752 | unsigned long *sdbt; | 752 | unsigned long *sdbt; |
753 | struct hws_cpu_buffer *cb; | 753 | struct hws_cpu_buffer *cb; |
754 | 754 | ||
755 | rc = 0; | 755 | rc = 0; |
756 | cb = &per_cpu(sampler_cpu_buffer, cpu); | 756 | cb = &per_cpu(sampler_cpu_buffer, cpu); |
757 | sdbt = (unsigned long *) cb->worker_entry; | 757 | sdbt = (unsigned long *) cb->worker_entry; |
758 | 758 | ||
759 | if (!sdbt || !*sdbt) | 759 | if (!sdbt || !*sdbt) |
760 | return -EINVAL; | 760 | return -EINVAL; |
761 | 761 | ||
762 | if (ext_params & EI_PRA) | 762 | if (ext_params & EI_PRA) |
763 | cb->req_alert++; | 763 | cb->req_alert++; |
764 | 764 | ||
765 | if (ext_params & EI_LSDA) | 765 | if (ext_params & EI_LSDA) |
766 | cb->loss_of_sample_data++; | 766 | cb->loss_of_sample_data++; |
767 | 767 | ||
768 | if (ext_params & EI_IEA) { | 768 | if (ext_params & EI_IEA) { |
769 | cb->invalid_entry_address++; | 769 | cb->invalid_entry_address++; |
770 | rc = -EINVAL; | 770 | rc = -EINVAL; |
771 | } | 771 | } |
772 | 772 | ||
773 | if (ext_params & EI_ISE) { | 773 | if (ext_params & EI_ISE) { |
774 | cb->incorrect_sdbt_entry++; | 774 | cb->incorrect_sdbt_entry++; |
775 | rc = -EINVAL; | 775 | rc = -EINVAL; |
776 | } | 776 | } |
777 | 777 | ||
778 | if (ext_params & EI_SACA) { | 778 | if (ext_params & EI_SACA) { |
779 | cb->sample_auth_change_alert++; | 779 | cb->sample_auth_change_alert++; |
780 | rc = -EINVAL; | 780 | rc = -EINVAL; |
781 | } | 781 | } |
782 | 782 | ||
783 | return rc; | 783 | return rc; |
784 | } | 784 | } |
785 | 785 | ||
786 | static void worker_on_finish(unsigned int cpu) | 786 | static void worker_on_finish(unsigned int cpu) |
787 | { | 787 | { |
788 | int rc, i; | 788 | int rc, i; |
789 | struct hws_cpu_buffer *cb; | 789 | struct hws_cpu_buffer *cb; |
790 | 790 | ||
791 | cb = &per_cpu(sampler_cpu_buffer, cpu); | 791 | cb = &per_cpu(sampler_cpu_buffer, cpu); |
792 | 792 | ||
793 | if (cb->finish) { | 793 | if (cb->finish) { |
794 | rc = smp_ctl_qsi(cpu); | 794 | rc = smp_ctl_qsi(cpu); |
795 | WARN_ON(rc); | 795 | WARN_ON(rc); |
796 | if (cb->qsi.es) { | 796 | if (cb->qsi.es) { |
797 | printk(KERN_INFO | 797 | printk(KERN_INFO |
798 | "hwsampler: CPU %d, CPUMF Stop/Deactivate sampling.\n", | 798 | "hwsampler: CPU %d, CPUMF Stop/Deactivate sampling.\n", |
799 | cpu); | 799 | cpu); |
800 | rc = smp_ctl_ssctl_stop(cpu); | 800 | rc = smp_ctl_ssctl_stop(cpu); |
801 | if (rc) | 801 | if (rc) |
802 | printk(KERN_INFO | 802 | printk(KERN_INFO |
803 | "hwsampler: CPU %d, CPUMF Deactivation failed.\n", | 803 | "hwsampler: CPU %d, CPUMF Deactivation failed.\n", |
804 | cpu); | 804 | cpu); |
805 | 805 | ||
806 | for_each_online_cpu(i) { | 806 | for_each_online_cpu(i) { |
807 | if (i == cpu) | 807 | if (i == cpu) |
808 | continue; | 808 | continue; |
809 | if (!cb->finish) { | 809 | if (!cb->finish) { |
810 | cb->finish = 1; | 810 | cb->finish = 1; |
811 | queue_work_on(i, hws_wq, | 811 | queue_work_on(i, hws_wq, |
812 | &cb->worker); | 812 | &cb->worker); |
813 | } | 813 | } |
814 | } | 814 | } |
815 | } | 815 | } |
816 | } | 816 | } |
817 | } | 817 | } |
818 | 818 | ||
819 | static void worker_on_interrupt(unsigned int cpu) | 819 | static void worker_on_interrupt(unsigned int cpu) |
820 | { | 820 | { |
821 | unsigned long *sdbt; | 821 | unsigned long *sdbt; |
822 | unsigned char done; | 822 | unsigned char done; |
823 | struct hws_cpu_buffer *cb; | 823 | struct hws_cpu_buffer *cb; |
824 | 824 | ||
825 | cb = &per_cpu(sampler_cpu_buffer, cpu); | 825 | cb = &per_cpu(sampler_cpu_buffer, cpu); |
826 | 826 | ||
827 | sdbt = (unsigned long *) cb->worker_entry; | 827 | sdbt = (unsigned long *) cb->worker_entry; |
828 | 828 | ||
829 | done = 0; | 829 | done = 0; |
830 | /* do not proceed if stop was entered, | 830 | /* do not proceed if stop was entered, |
831 | * forget the buffers not yet processed */ | 831 | * forget the buffers not yet processed */ |
832 | while (!done && !cb->stop_mode) { | 832 | while (!done && !cb->stop_mode) { |
833 | unsigned long *trailer; | 833 | unsigned long *trailer; |
834 | struct hws_trailer_entry *te; | 834 | struct hws_trailer_entry *te; |
835 | unsigned long *dear = 0; | 835 | unsigned long *dear = 0; |
836 | 836 | ||
837 | trailer = trailer_entry_ptr(*sdbt); | 837 | trailer = trailer_entry_ptr(*sdbt); |
838 | /* leave loop if no more work to do */ | 838 | /* leave loop if no more work to do */ |
839 | if (!(*trailer & BUFFER_FULL_MASK)) { | 839 | if (!(*trailer & BUFFER_FULL_MASK)) { |
840 | done = 1; | 840 | done = 1; |
841 | if (!hws_flush_all) | 841 | if (!hws_flush_all) |
842 | continue; | 842 | continue; |
843 | } | 843 | } |
844 | 844 | ||
845 | te = (struct hws_trailer_entry *)trailer; | 845 | te = (struct hws_trailer_entry *)trailer; |
846 | cb->sample_overflow += te->overflow; | 846 | cb->sample_overflow += te->overflow; |
847 | 847 | ||
848 | add_samples_to_oprofile(cpu, sdbt, dear); | 848 | add_samples_to_oprofile(cpu, sdbt, dear); |
849 | 849 | ||
850 | /* reset trailer */ | 850 | /* reset trailer */ |
851 | xchg((unsigned char *) te, 0x40); | 851 | xchg((unsigned char *) te, 0x40); |
852 | 852 | ||
853 | /* advance to next sdb slot in current sdbt */ | 853 | /* advance to next sdb slot in current sdbt */ |
854 | sdbt++; | 854 | sdbt++; |
855 | /* in case link bit is set use address w/o link bit */ | 855 | /* in case link bit is set use address w/o link bit */ |
856 | if (is_link_entry(sdbt)) | 856 | if (is_link_entry(sdbt)) |
857 | sdbt = get_next_sdbt(sdbt); | 857 | sdbt = get_next_sdbt(sdbt); |
858 | 858 | ||
859 | cb->worker_entry = (unsigned long)sdbt; | 859 | cb->worker_entry = (unsigned long)sdbt; |
860 | } | 860 | } |
861 | } | 861 | } |
862 | 862 | ||
863 | static void add_samples_to_oprofile(unsigned int cpu, unsigned long *sdbt, | 863 | static void add_samples_to_oprofile(unsigned int cpu, unsigned long *sdbt, |
864 | unsigned long *dear) | 864 | unsigned long *dear) |
865 | { | 865 | { |
866 | struct hws_data_entry *sample_data_ptr; | 866 | struct hws_data_entry *sample_data_ptr; |
867 | unsigned long *trailer; | 867 | unsigned long *trailer; |
868 | 868 | ||
869 | trailer = trailer_entry_ptr(*sdbt); | 869 | trailer = trailer_entry_ptr(*sdbt); |
870 | if (dear) { | 870 | if (dear) { |
871 | if (dear > trailer) | 871 | if (dear > trailer) |
872 | return; | 872 | return; |
873 | trailer = dear; | 873 | trailer = dear; |
874 | } | 874 | } |
875 | 875 | ||
876 | sample_data_ptr = (struct hws_data_entry *)(*sdbt); | 876 | sample_data_ptr = (struct hws_data_entry *)(*sdbt); |
877 | 877 | ||
878 | while ((unsigned long *)sample_data_ptr < trailer) { | 878 | while ((unsigned long *)sample_data_ptr < trailer) { |
879 | struct pt_regs *regs = NULL; | 879 | struct pt_regs *regs = NULL; |
880 | struct task_struct *tsk = NULL; | 880 | struct task_struct *tsk = NULL; |
881 | 881 | ||
882 | /* | 882 | /* |
883 | * Check sampling mode, 1 indicates basic (=customer) sampling | 883 | * Check sampling mode, 1 indicates basic (=customer) sampling |
884 | * mode. | 884 | * mode. |
885 | */ | 885 | */ |
886 | if (sample_data_ptr->def != 1) { | 886 | if (sample_data_ptr->def != 1) { |
887 | /* sample slot is not yet written */ | 887 | /* sample slot is not yet written */ |
888 | break; | 888 | break; |
889 | } else { | 889 | } else { |
890 | /* make sure we don't use it twice, | 890 | /* make sure we don't use it twice, |
891 | * the next time the sampler will set it again */ | 891 | * the next time the sampler will set it again */ |
892 | sample_data_ptr->def = 0; | 892 | sample_data_ptr->def = 0; |
893 | } | 893 | } |
894 | 894 | ||
895 | /* Get pt_regs. */ | 895 | /* Get pt_regs. */ |
896 | if (sample_data_ptr->P == 1) { | 896 | if (sample_data_ptr->P == 1) { |
897 | /* userspace sample */ | 897 | /* userspace sample */ |
898 | unsigned int pid = sample_data_ptr->prim_asn; | 898 | unsigned int pid = sample_data_ptr->prim_asn; |
899 | rcu_read_lock(); | 899 | rcu_read_lock(); |
900 | tsk = pid_task(find_vpid(pid), PIDTYPE_PID); | 900 | tsk = pid_task(find_vpid(pid), PIDTYPE_PID); |
901 | if (tsk) | 901 | if (tsk) |
902 | regs = task_pt_regs(tsk); | 902 | regs = task_pt_regs(tsk); |
903 | rcu_read_unlock(); | 903 | rcu_read_unlock(); |
904 | } else { | 904 | } else { |
905 | /* kernelspace sample */ | 905 | /* kernelspace sample */ |
906 | regs = task_pt_regs(current); | 906 | regs = task_pt_regs(current); |
907 | } | 907 | } |
908 | 908 | ||
909 | mutex_lock(&hws_sem); | 909 | mutex_lock(&hws_sem); |
910 | oprofile_add_ext_hw_sample(sample_data_ptr->ia, regs, 0, | 910 | oprofile_add_ext_hw_sample(sample_data_ptr->ia, regs, 0, |
911 | !sample_data_ptr->P, tsk); | 911 | !sample_data_ptr->P, tsk); |
912 | mutex_unlock(&hws_sem); | 912 | mutex_unlock(&hws_sem); |
913 | 913 | ||
914 | sample_data_ptr++; | 914 | sample_data_ptr++; |
915 | } | 915 | } |
916 | } | 916 | } |
917 | 917 | ||
918 | static void worker(struct work_struct *work) | 918 | static void worker(struct work_struct *work) |
919 | { | 919 | { |
920 | unsigned int cpu; | 920 | unsigned int cpu; |
921 | int ext_params; | 921 | int ext_params; |
922 | struct hws_cpu_buffer *cb; | 922 | struct hws_cpu_buffer *cb; |
923 | 923 | ||
924 | cb = container_of(work, struct hws_cpu_buffer, worker); | 924 | cb = container_of(work, struct hws_cpu_buffer, worker); |
925 | cpu = smp_processor_id(); | 925 | cpu = smp_processor_id(); |
926 | ext_params = atomic_xchg(&cb->ext_params, 0); | 926 | ext_params = atomic_xchg(&cb->ext_params, 0); |
927 | 927 | ||
928 | if (!cb->worker_entry) | 928 | if (!cb->worker_entry) |
929 | worker_on_start(cpu); | 929 | worker_on_start(cpu); |
930 | 930 | ||
931 | if (worker_check_error(cpu, ext_params)) | 931 | if (worker_check_error(cpu, ext_params)) |
932 | return; | 932 | return; |
933 | 933 | ||
934 | if (!cb->finish) | 934 | if (!cb->finish) |
935 | worker_on_interrupt(cpu); | 935 | worker_on_interrupt(cpu); |
936 | 936 | ||
937 | if (cb->finish) | 937 | if (cb->finish) |
938 | worker_on_finish(cpu); | 938 | worker_on_finish(cpu); |
939 | } | 939 | } |
940 | 940 | ||
941 | /** | 941 | /** |
942 | * hwsampler_allocate() - allocate memory for the hardware sampler | 942 | * hwsampler_allocate() - allocate memory for the hardware sampler |
943 | * @sdbt: number of SDBTs per online CPU (must be > 0) | 943 | * @sdbt: number of SDBTs per online CPU (must be > 0) |
944 | * @sdb: number of SDBs per SDBT (minimum 1, maximum 511) | 944 | * @sdb: number of SDBs per SDBT (minimum 1, maximum 511) |
945 | * | 945 | * |
946 | * Returns 0 on success, !0 on failure. | 946 | * Returns 0 on success, !0 on failure. |
947 | */ | 947 | */ |
948 | int hwsampler_allocate(unsigned long sdbt, unsigned long sdb) | 948 | int hwsampler_allocate(unsigned long sdbt, unsigned long sdb) |
949 | { | 949 | { |
950 | int cpu, rc; | 950 | int cpu, rc; |
951 | mutex_lock(&hws_sem); | 951 | mutex_lock(&hws_sem); |
952 | 952 | ||
953 | rc = -EINVAL; | 953 | rc = -EINVAL; |
954 | if (hws_state != HWS_DEALLOCATED) | 954 | if (hws_state != HWS_DEALLOCATED) |
955 | goto allocate_exit; | 955 | goto allocate_exit; |
956 | 956 | ||
957 | if (sdbt < 1) | 957 | if (sdbt < 1) |
958 | goto allocate_exit; | 958 | goto allocate_exit; |
959 | 959 | ||
960 | if (sdb > MAX_NUM_SDB || sdb < MIN_NUM_SDB) | 960 | if (sdb > MAX_NUM_SDB || sdb < MIN_NUM_SDB) |
961 | goto allocate_exit; | 961 | goto allocate_exit; |
962 | 962 | ||
963 | num_sdbt = sdbt; | 963 | num_sdbt = sdbt; |
964 | num_sdb = sdb; | 964 | num_sdb = sdb; |
965 | 965 | ||
966 | oom_killer_was_active = 0; | 966 | oom_killer_was_active = 0; |
967 | register_oom_notifier(&hws_oom_notifier); | 967 | register_oom_notifier(&hws_oom_notifier); |
968 | 968 | ||
969 | for_each_online_cpu(cpu) { | 969 | for_each_online_cpu(cpu) { |
970 | if (allocate_sdbt(cpu)) { | 970 | if (allocate_sdbt(cpu)) { |
971 | unregister_oom_notifier(&hws_oom_notifier); | 971 | unregister_oom_notifier(&hws_oom_notifier); |
972 | goto allocate_error; | 972 | goto allocate_error; |
973 | } | 973 | } |
974 | } | 974 | } |
975 | unregister_oom_notifier(&hws_oom_notifier); | 975 | unregister_oom_notifier(&hws_oom_notifier); |
976 | if (oom_killer_was_active) | 976 | if (oom_killer_was_active) |
977 | goto allocate_error; | 977 | goto allocate_error; |
978 | 978 | ||
979 | hws_state = HWS_STOPPED; | 979 | hws_state = HWS_STOPPED; |
980 | rc = 0; | 980 | rc = 0; |
981 | 981 | ||
982 | allocate_exit: | 982 | allocate_exit: |
983 | mutex_unlock(&hws_sem); | 983 | mutex_unlock(&hws_sem); |
984 | return rc; | 984 | return rc; |
985 | 985 | ||
986 | allocate_error: | 986 | allocate_error: |
987 | rc = -ENOMEM; | 987 | rc = -ENOMEM; |
988 | printk(KERN_ERR "hwsampler: CPUMF Memory allocation failed.\n"); | 988 | printk(KERN_ERR "hwsampler: CPUMF Memory allocation failed.\n"); |
989 | goto allocate_exit; | 989 | goto allocate_exit; |
990 | } | 990 | } |
991 | 991 | ||
992 | /** | 992 | /** |
993 | * hwsampler_deallocate() - deallocate hardware sampler memory | 993 | * hwsampler_deallocate() - deallocate hardware sampler memory |
994 | * | 994 | * |
995 | * Returns 0 on success, !0 on failure. | 995 | * Returns 0 on success, !0 on failure. |
996 | */ | 996 | */ |
997 | int hwsampler_deallocate() | 997 | int hwsampler_deallocate() |
998 | { | 998 | { |
999 | int rc; | 999 | int rc; |
1000 | 1000 | ||
1001 | mutex_lock(&hws_sem); | 1001 | mutex_lock(&hws_sem); |
1002 | 1002 | ||
1003 | rc = -EINVAL; | 1003 | rc = -EINVAL; |
1004 | if (hws_state != HWS_STOPPED) | 1004 | if (hws_state != HWS_STOPPED) |
1005 | goto deallocate_exit; | 1005 | goto deallocate_exit; |
1006 | 1006 | ||
1007 | ctl_clear_bit(0, 5); /* set bit 58 CR0 off */ | 1007 | ctl_clear_bit(0, 5); /* set bit 58 CR0 off */ |
1008 | deallocate_sdbt(); | 1008 | deallocate_sdbt(); |
1009 | 1009 | ||
1010 | hws_state = HWS_DEALLOCATED; | 1010 | hws_state = HWS_DEALLOCATED; |
1011 | rc = 0; | 1011 | rc = 0; |
1012 | 1012 | ||
1013 | deallocate_exit: | 1013 | deallocate_exit: |
1014 | mutex_unlock(&hws_sem); | 1014 | mutex_unlock(&hws_sem); |
1015 | 1015 | ||
1016 | return rc; | 1016 | return rc; |
1017 | } | 1017 | } |
1018 | 1018 | ||
1019 | unsigned long hwsampler_query_min_interval(void) | 1019 | unsigned long hwsampler_query_min_interval(void) |
1020 | { | 1020 | { |
1021 | return min_sampler_rate; | 1021 | return min_sampler_rate; |
1022 | } | 1022 | } |
1023 | 1023 | ||
1024 | unsigned long hwsampler_query_max_interval(void) | 1024 | unsigned long hwsampler_query_max_interval(void) |
1025 | { | 1025 | { |
1026 | return max_sampler_rate; | 1026 | return max_sampler_rate; |
1027 | } | 1027 | } |
1028 | 1028 | ||
1029 | unsigned long hwsampler_get_sample_overflow_count(unsigned int cpu) | 1029 | unsigned long hwsampler_get_sample_overflow_count(unsigned int cpu) |
1030 | { | 1030 | { |
1031 | struct hws_cpu_buffer *cb; | 1031 | struct hws_cpu_buffer *cb; |
1032 | 1032 | ||
1033 | cb = &per_cpu(sampler_cpu_buffer, cpu); | 1033 | cb = &per_cpu(sampler_cpu_buffer, cpu); |
1034 | 1034 | ||
1035 | return cb->sample_overflow; | 1035 | return cb->sample_overflow; |
1036 | } | 1036 | } |
1037 | 1037 | ||
1038 | int hwsampler_setup() | 1038 | int hwsampler_setup() |
1039 | { | 1039 | { |
1040 | int rc; | 1040 | int rc; |
1041 | int cpu; | 1041 | int cpu; |
1042 | struct hws_cpu_buffer *cb; | 1042 | struct hws_cpu_buffer *cb; |
1043 | 1043 | ||
1044 | mutex_lock(&hws_sem); | 1044 | mutex_lock(&hws_sem); |
1045 | 1045 | ||
1046 | rc = -EINVAL; | 1046 | rc = -EINVAL; |
1047 | if (hws_state) | 1047 | if (hws_state) |
1048 | goto setup_exit; | 1048 | goto setup_exit; |
1049 | 1049 | ||
1050 | hws_state = HWS_INIT; | 1050 | hws_state = HWS_INIT; |
1051 | 1051 | ||
1052 | init_all_cpu_buffers(); | 1052 | init_all_cpu_buffers(); |
1053 | 1053 | ||
1054 | rc = check_hardware_prerequisites(); | 1054 | rc = check_hardware_prerequisites(); |
1055 | if (rc) | 1055 | if (rc) |
1056 | goto setup_exit; | 1056 | goto setup_exit; |
1057 | 1057 | ||
1058 | rc = check_qsi_on_setup(); | 1058 | rc = check_qsi_on_setup(); |
1059 | if (rc) | 1059 | if (rc) |
1060 | goto setup_exit; | 1060 | goto setup_exit; |
1061 | 1061 | ||
1062 | rc = -EINVAL; | 1062 | rc = -EINVAL; |
1063 | hws_wq = create_workqueue("hwsampler"); | 1063 | hws_wq = create_workqueue("hwsampler"); |
1064 | if (!hws_wq) | 1064 | if (!hws_wq) |
1065 | goto setup_exit; | 1065 | goto setup_exit; |
1066 | 1066 | ||
1067 | register_cpu_notifier(&hws_cpu_notifier); | 1067 | register_cpu_notifier(&hws_cpu_notifier); |
1068 | 1068 | ||
1069 | for_each_online_cpu(cpu) { | 1069 | for_each_online_cpu(cpu) { |
1070 | cb = &per_cpu(sampler_cpu_buffer, cpu); | 1070 | cb = &per_cpu(sampler_cpu_buffer, cpu); |
1071 | INIT_WORK(&cb->worker, worker); | 1071 | INIT_WORK(&cb->worker, worker); |
1072 | rc = smp_ctl_qsi(cpu); | 1072 | rc = smp_ctl_qsi(cpu); |
1073 | WARN_ON(rc); | 1073 | WARN_ON(rc); |
1074 | if (min_sampler_rate != cb->qsi.min_sampl_rate) { | 1074 | if (min_sampler_rate != cb->qsi.min_sampl_rate) { |
1075 | if (min_sampler_rate) { | 1075 | if (min_sampler_rate) { |
1076 | printk(KERN_WARNING | 1076 | printk(KERN_WARNING |
1077 | "hwsampler: different min sampler rate values.\n"); | 1077 | "hwsampler: different min sampler rate values.\n"); |
1078 | if (min_sampler_rate < cb->qsi.min_sampl_rate) | 1078 | if (min_sampler_rate < cb->qsi.min_sampl_rate) |
1079 | min_sampler_rate = | 1079 | min_sampler_rate = |
1080 | cb->qsi.min_sampl_rate; | 1080 | cb->qsi.min_sampl_rate; |
1081 | } else | 1081 | } else |
1082 | min_sampler_rate = cb->qsi.min_sampl_rate; | 1082 | min_sampler_rate = cb->qsi.min_sampl_rate; |
1083 | } | 1083 | } |
1084 | if (max_sampler_rate != cb->qsi.max_sampl_rate) { | 1084 | if (max_sampler_rate != cb->qsi.max_sampl_rate) { |
1085 | if (max_sampler_rate) { | 1085 | if (max_sampler_rate) { |
1086 | printk(KERN_WARNING | 1086 | printk(KERN_WARNING |
1087 | "hwsampler: different max sampler rate values.\n"); | 1087 | "hwsampler: different max sampler rate values.\n"); |
1088 | if (max_sampler_rate > cb->qsi.max_sampl_rate) | 1088 | if (max_sampler_rate > cb->qsi.max_sampl_rate) |
1089 | max_sampler_rate = | 1089 | max_sampler_rate = |
1090 | cb->qsi.max_sampl_rate; | 1090 | cb->qsi.max_sampl_rate; |
1091 | } else | 1091 | } else |
1092 | max_sampler_rate = cb->qsi.max_sampl_rate; | 1092 | max_sampler_rate = cb->qsi.max_sampl_rate; |
1093 | } | 1093 | } |
1094 | } | 1094 | } |
1095 | register_external_interrupt(0x1407, hws_ext_handler); | 1095 | register_external_interrupt(0x1407, hws_ext_handler); |
1096 | 1096 | ||
1097 | hws_state = HWS_DEALLOCATED; | 1097 | hws_state = HWS_DEALLOCATED; |
1098 | rc = 0; | 1098 | rc = 0; |
1099 | 1099 | ||
1100 | setup_exit: | 1100 | setup_exit: |
1101 | mutex_unlock(&hws_sem); | 1101 | mutex_unlock(&hws_sem); |
1102 | return rc; | 1102 | return rc; |
1103 | } | 1103 | } |
1104 | 1104 | ||
1105 | int hwsampler_shutdown() | 1105 | int hwsampler_shutdown() |
1106 | { | 1106 | { |
1107 | int rc; | 1107 | int rc; |
1108 | 1108 | ||
1109 | mutex_lock(&hws_sem); | 1109 | mutex_lock(&hws_sem); |
1110 | 1110 | ||
1111 | rc = -EINVAL; | 1111 | rc = -EINVAL; |
1112 | if (hws_state == HWS_DEALLOCATED || hws_state == HWS_STOPPED) { | 1112 | if (hws_state == HWS_DEALLOCATED || hws_state == HWS_STOPPED) { |
1113 | mutex_unlock(&hws_sem); | 1113 | mutex_unlock(&hws_sem); |
1114 | 1114 | ||
1115 | if (hws_wq) | 1115 | if (hws_wq) |
1116 | flush_workqueue(hws_wq); | 1116 | flush_workqueue(hws_wq); |
1117 | 1117 | ||
1118 | mutex_lock(&hws_sem); | 1118 | mutex_lock(&hws_sem); |
1119 | 1119 | ||
1120 | if (hws_state == HWS_STOPPED) { | 1120 | if (hws_state == HWS_STOPPED) { |
1121 | ctl_clear_bit(0, 5); /* set bit 58 CR0 off */ | 1121 | ctl_clear_bit(0, 5); /* set bit 58 CR0 off */ |
1122 | deallocate_sdbt(); | 1122 | deallocate_sdbt(); |
1123 | } | 1123 | } |
1124 | if (hws_wq) { | 1124 | if (hws_wq) { |
1125 | destroy_workqueue(hws_wq); | 1125 | destroy_workqueue(hws_wq); |
1126 | hws_wq = NULL; | 1126 | hws_wq = NULL; |
1127 | } | 1127 | } |
1128 | 1128 | ||
1129 | unregister_external_interrupt(0x1407, hws_ext_handler); | 1129 | unregister_external_interrupt(0x1407, hws_ext_handler); |
1130 | hws_state = HWS_INIT; | 1130 | hws_state = HWS_INIT; |
1131 | rc = 0; | 1131 | rc = 0; |
1132 | } | 1132 | } |
1133 | mutex_unlock(&hws_sem); | 1133 | mutex_unlock(&hws_sem); |
1134 | 1134 | ||
1135 | unregister_cpu_notifier(&hws_cpu_notifier); | 1135 | unregister_cpu_notifier(&hws_cpu_notifier); |
1136 | 1136 | ||
1137 | return rc; | 1137 | return rc; |
1138 | } | 1138 | } |
1139 | 1139 | ||
1140 | /** | 1140 | /** |
1141 | * hwsampler_start_all() - start hardware sampling on all online CPUs | 1141 | * hwsampler_start_all() - start hardware sampling on all online CPUs |
1142 | * @rate: specifies the used interval when samples are taken | 1142 | * @rate: specifies the used interval when samples are taken |
1143 | * | 1143 | * |
1144 | * Returns 0 on success, !0 on failure. | 1144 | * Returns 0 on success, !0 on failure. |
1145 | */ | 1145 | */ |
1146 | int hwsampler_start_all(unsigned long rate) | 1146 | int hwsampler_start_all(unsigned long rate) |
1147 | { | 1147 | { |
1148 | int rc, cpu; | 1148 | int rc, cpu; |
1149 | 1149 | ||
1150 | mutex_lock(&hws_sem); | 1150 | mutex_lock(&hws_sem); |
1151 | 1151 | ||
1152 | hws_oom = 0; | 1152 | hws_oom = 0; |
1153 | 1153 | ||
1154 | rc = -EINVAL; | 1154 | rc = -EINVAL; |
1155 | if (hws_state != HWS_STOPPED) | 1155 | if (hws_state != HWS_STOPPED) |
1156 | goto start_all_exit; | 1156 | goto start_all_exit; |
1157 | 1157 | ||
1158 | interval = rate; | 1158 | interval = rate; |
1159 | 1159 | ||
1160 | /* fail if rate is not valid */ | 1160 | /* fail if rate is not valid */ |
1161 | if (interval < min_sampler_rate || interval > max_sampler_rate) | 1161 | if (interval < min_sampler_rate || interval > max_sampler_rate) |
1162 | goto start_all_exit; | 1162 | goto start_all_exit; |
1163 | 1163 | ||
1164 | rc = check_qsi_on_start(); | 1164 | rc = check_qsi_on_start(); |
1165 | if (rc) | 1165 | if (rc) |
1166 | goto start_all_exit; | 1166 | goto start_all_exit; |
1167 | 1167 | ||
1168 | rc = prepare_cpu_buffers(); | 1168 | rc = prepare_cpu_buffers(); |
1169 | if (rc) | 1169 | if (rc) |
1170 | goto start_all_exit; | 1170 | goto start_all_exit; |
1171 | 1171 | ||
1172 | for_each_online_cpu(cpu) { | 1172 | for_each_online_cpu(cpu) { |
1173 | rc = start_sampling(cpu); | 1173 | rc = start_sampling(cpu); |
1174 | if (rc) | 1174 | if (rc) |
1175 | break; | 1175 | break; |
1176 | } | 1176 | } |
1177 | if (rc) { | 1177 | if (rc) { |
1178 | for_each_online_cpu(cpu) { | 1178 | for_each_online_cpu(cpu) { |
1179 | stop_sampling(cpu); | 1179 | stop_sampling(cpu); |
1180 | } | 1180 | } |
1181 | goto start_all_exit; | 1181 | goto start_all_exit; |
1182 | } | 1182 | } |
1183 | hws_state = HWS_STARTED; | 1183 | hws_state = HWS_STARTED; |
1184 | rc = 0; | 1184 | rc = 0; |
1185 | 1185 | ||
1186 | start_all_exit: | 1186 | start_all_exit: |
1187 | mutex_unlock(&hws_sem); | 1187 | mutex_unlock(&hws_sem); |
1188 | 1188 | ||
1189 | if (rc) | 1189 | if (rc) |
1190 | return rc; | 1190 | return rc; |
1191 | 1191 | ||
1192 | register_oom_notifier(&hws_oom_notifier); | 1192 | register_oom_notifier(&hws_oom_notifier); |
1193 | hws_oom = 1; | 1193 | hws_oom = 1; |
1194 | hws_flush_all = 0; | 1194 | hws_flush_all = 0; |
1195 | /* now let them in, 1407 CPUMF external interrupts */ | 1195 | /* now let them in, 1407 CPUMF external interrupts */ |
1196 | ctl_set_bit(0, 5); /* set CR0 bit 58 */ | 1196 | ctl_set_bit(0, 5); /* set CR0 bit 58 */ |
1197 | 1197 | ||
1198 | return 0; | 1198 | return 0; |
1199 | } | 1199 | } |
1200 | 1200 | ||
1201 | /** | 1201 | /** |
1202 | * hwsampler_stop_all() - stop hardware sampling on all online CPUs | 1202 | * hwsampler_stop_all() - stop hardware sampling on all online CPUs |
1203 | * | 1203 | * |
1204 | * Returns 0 on success, !0 on failure. | 1204 | * Returns 0 on success, !0 on failure. |
1205 | */ | 1205 | */ |
1206 | int hwsampler_stop_all() | 1206 | int hwsampler_stop_all() |
1207 | { | 1207 | { |
1208 | int tmp_rc, rc, cpu; | 1208 | int tmp_rc, rc, cpu; |
1209 | struct hws_cpu_buffer *cb; | 1209 | struct hws_cpu_buffer *cb; |
1210 | 1210 | ||
1211 | mutex_lock(&hws_sem); | 1211 | mutex_lock(&hws_sem); |
1212 | 1212 | ||
1213 | rc = 0; | 1213 | rc = 0; |
1214 | if (hws_state == HWS_INIT) { | 1214 | if (hws_state == HWS_INIT) { |
1215 | mutex_unlock(&hws_sem); | 1215 | mutex_unlock(&hws_sem); |
1216 | return rc; | 1216 | return rc; |
1217 | } | 1217 | } |
1218 | hws_state = HWS_STOPPING; | 1218 | hws_state = HWS_STOPPING; |
1219 | mutex_unlock(&hws_sem); | 1219 | mutex_unlock(&hws_sem); |
1220 | 1220 | ||
1221 | for_each_online_cpu(cpu) { | 1221 | for_each_online_cpu(cpu) { |
1222 | cb = &per_cpu(sampler_cpu_buffer, cpu); | 1222 | cb = &per_cpu(sampler_cpu_buffer, cpu); |
1223 | cb->stop_mode = 1; | 1223 | cb->stop_mode = 1; |
1224 | tmp_rc = stop_sampling(cpu); | 1224 | tmp_rc = stop_sampling(cpu); |
1225 | if (tmp_rc) | 1225 | if (tmp_rc) |
1226 | rc = tmp_rc; | 1226 | rc = tmp_rc; |
1227 | } | 1227 | } |
1228 | 1228 | ||
1229 | if (hws_wq) | 1229 | if (hws_wq) |
1230 | flush_workqueue(hws_wq); | 1230 | flush_workqueue(hws_wq); |
1231 | 1231 | ||
1232 | mutex_lock(&hws_sem); | 1232 | mutex_lock(&hws_sem); |
1233 | if (hws_oom) { | 1233 | if (hws_oom) { |
1234 | unregister_oom_notifier(&hws_oom_notifier); | 1234 | unregister_oom_notifier(&hws_oom_notifier); |
1235 | hws_oom = 0; | 1235 | hws_oom = 0; |
1236 | } | 1236 | } |
1237 | hws_state = HWS_STOPPED; | 1237 | hws_state = HWS_STOPPED; |
1238 | mutex_unlock(&hws_sem); | 1238 | mutex_unlock(&hws_sem); |
1239 | 1239 | ||
1240 | return rc; | 1240 | return rc; |
1241 | } | 1241 | } |
1242 | 1242 |
drivers/s390/block/dasd_diag.c
1 | /* | 1 | /* |
2 | * File...........: linux/drivers/s390/block/dasd_diag.c | 2 | * File...........: linux/drivers/s390/block/dasd_diag.c |
3 | * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> | 3 | * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> |
4 | * Based on.......: linux/drivers/s390/block/mdisk.c | 4 | * Based on.......: linux/drivers/s390/block/mdisk.c |
5 | * ...............: by Hartmunt Penner <hpenner@de.ibm.com> | 5 | * ...............: by Hartmunt Penner <hpenner@de.ibm.com> |
6 | * Bugreports.to..: <Linux390@de.ibm.com> | 6 | * Bugreports.to..: <Linux390@de.ibm.com> |
7 | * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 | 7 | * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 |
8 | * | 8 | * |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define KMSG_COMPONENT "dasd" | 11 | #define KMSG_COMPONENT "dasd" |
12 | 12 | ||
13 | #include <linux/kernel_stat.h> | 13 | #include <linux/kernel_stat.h> |
14 | #include <linux/stddef.h> | 14 | #include <linux/stddef.h> |
15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
16 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
17 | #include <linux/hdreg.h> | 17 | #include <linux/hdreg.h> |
18 | #include <linux/bio.h> | 18 | #include <linux/bio.h> |
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <linux/init.h> | 20 | #include <linux/init.h> |
21 | #include <linux/jiffies.h> | 21 | #include <linux/jiffies.h> |
22 | 22 | ||
23 | #include <asm/dasd.h> | 23 | #include <asm/dasd.h> |
24 | #include <asm/debug.h> | 24 | #include <asm/debug.h> |
25 | #include <asm/ebcdic.h> | 25 | #include <asm/ebcdic.h> |
26 | #include <asm/io.h> | 26 | #include <asm/io.h> |
27 | #include <asm/s390_ext.h> | 27 | #include <asm/irq.h> |
28 | #include <asm/vtoc.h> | 28 | #include <asm/vtoc.h> |
29 | #include <asm/diag.h> | 29 | #include <asm/diag.h> |
30 | 30 | ||
31 | #include "dasd_int.h" | 31 | #include "dasd_int.h" |
32 | #include "dasd_diag.h" | 32 | #include "dasd_diag.h" |
33 | 33 | ||
34 | #define PRINTK_HEADER "dasd(diag):" | 34 | #define PRINTK_HEADER "dasd(diag):" |
35 | 35 | ||
36 | MODULE_LICENSE("GPL"); | 36 | MODULE_LICENSE("GPL"); |
37 | 37 | ||
38 | /* The maximum number of blocks per request (max_blocks) is dependent on the | 38 | /* The maximum number of blocks per request (max_blocks) is dependent on the |
39 | * amount of storage that is available in the static I/O buffer for each | 39 | * amount of storage that is available in the static I/O buffer for each |
40 | * device. Currently each device gets 2 pages. We want to fit two requests | 40 | * device. Currently each device gets 2 pages. We want to fit two requests |
41 | * into the available memory so that we can immediately start the next if one | 41 | * into the available memory so that we can immediately start the next if one |
42 | * finishes. */ | 42 | * finishes. */ |
43 | #define DIAG_MAX_BLOCKS (((2 * PAGE_SIZE - sizeof(struct dasd_ccw_req) - \ | 43 | #define DIAG_MAX_BLOCKS (((2 * PAGE_SIZE - sizeof(struct dasd_ccw_req) - \ |
44 | sizeof(struct dasd_diag_req)) / \ | 44 | sizeof(struct dasd_diag_req)) / \ |
45 | sizeof(struct dasd_diag_bio)) / 2) | 45 | sizeof(struct dasd_diag_bio)) / 2) |
46 | #define DIAG_MAX_RETRIES 32 | 46 | #define DIAG_MAX_RETRIES 32 |
47 | #define DIAG_TIMEOUT 50 | 47 | #define DIAG_TIMEOUT 50 |
48 | 48 | ||
49 | static struct dasd_discipline dasd_diag_discipline; | 49 | static struct dasd_discipline dasd_diag_discipline; |
50 | 50 | ||
51 | struct dasd_diag_private { | 51 | struct dasd_diag_private { |
52 | struct dasd_diag_characteristics rdc_data; | 52 | struct dasd_diag_characteristics rdc_data; |
53 | struct dasd_diag_rw_io iob; | 53 | struct dasd_diag_rw_io iob; |
54 | struct dasd_diag_init_io iib; | 54 | struct dasd_diag_init_io iib; |
55 | blocknum_t pt_block; | 55 | blocknum_t pt_block; |
56 | struct ccw_dev_id dev_id; | 56 | struct ccw_dev_id dev_id; |
57 | }; | 57 | }; |
58 | 58 | ||
59 | struct dasd_diag_req { | 59 | struct dasd_diag_req { |
60 | unsigned int block_count; | 60 | unsigned int block_count; |
61 | struct dasd_diag_bio bio[0]; | 61 | struct dasd_diag_bio bio[0]; |
62 | }; | 62 | }; |
63 | 63 | ||
64 | static const u8 DASD_DIAG_CMS1[] = { 0xc3, 0xd4, 0xe2, 0xf1 };/* EBCDIC CMS1 */ | 64 | static const u8 DASD_DIAG_CMS1[] = { 0xc3, 0xd4, 0xe2, 0xf1 };/* EBCDIC CMS1 */ |
65 | 65 | ||
66 | /* Perform DIAG250 call with block I/O parameter list iob (input and output) | 66 | /* Perform DIAG250 call with block I/O parameter list iob (input and output) |
67 | * and function code cmd. | 67 | * and function code cmd. |
68 | * In case of an exception return 3. Otherwise return result of bitwise OR of | 68 | * In case of an exception return 3. Otherwise return result of bitwise OR of |
69 | * resulting condition code and DIAG return code. */ | 69 | * resulting condition code and DIAG return code. */ |
70 | static inline int dia250(void *iob, int cmd) | 70 | static inline int dia250(void *iob, int cmd) |
71 | { | 71 | { |
72 | register unsigned long reg2 asm ("2") = (unsigned long) iob; | 72 | register unsigned long reg2 asm ("2") = (unsigned long) iob; |
73 | typedef union { | 73 | typedef union { |
74 | struct dasd_diag_init_io init_io; | 74 | struct dasd_diag_init_io init_io; |
75 | struct dasd_diag_rw_io rw_io; | 75 | struct dasd_diag_rw_io rw_io; |
76 | } addr_type; | 76 | } addr_type; |
77 | int rc; | 77 | int rc; |
78 | 78 | ||
79 | rc = 3; | 79 | rc = 3; |
80 | asm volatile( | 80 | asm volatile( |
81 | " diag 2,%2,0x250\n" | 81 | " diag 2,%2,0x250\n" |
82 | "0: ipm %0\n" | 82 | "0: ipm %0\n" |
83 | " srl %0,28\n" | 83 | " srl %0,28\n" |
84 | " or %0,3\n" | 84 | " or %0,3\n" |
85 | "1:\n" | 85 | "1:\n" |
86 | EX_TABLE(0b,1b) | 86 | EX_TABLE(0b,1b) |
87 | : "+d" (rc), "=m" (*(addr_type *) iob) | 87 | : "+d" (rc), "=m" (*(addr_type *) iob) |
88 | : "d" (cmd), "d" (reg2), "m" (*(addr_type *) iob) | 88 | : "d" (cmd), "d" (reg2), "m" (*(addr_type *) iob) |
89 | : "3", "cc"); | 89 | : "3", "cc"); |
90 | return rc; | 90 | return rc; |
91 | } | 91 | } |
92 | 92 | ||
93 | /* Initialize block I/O to DIAG device using the specified blocksize and | 93 | /* Initialize block I/O to DIAG device using the specified blocksize and |
94 | * block offset. On success, return zero and set end_block to contain the | 94 | * block offset. On success, return zero and set end_block to contain the |
95 | * number of blocks on the device minus the specified offset. Return non-zero | 95 | * number of blocks on the device minus the specified offset. Return non-zero |
96 | * otherwise. */ | 96 | * otherwise. */ |
97 | static inline int | 97 | static inline int |
98 | mdsk_init_io(struct dasd_device *device, unsigned int blocksize, | 98 | mdsk_init_io(struct dasd_device *device, unsigned int blocksize, |
99 | blocknum_t offset, blocknum_t *end_block) | 99 | blocknum_t offset, blocknum_t *end_block) |
100 | { | 100 | { |
101 | struct dasd_diag_private *private; | 101 | struct dasd_diag_private *private; |
102 | struct dasd_diag_init_io *iib; | 102 | struct dasd_diag_init_io *iib; |
103 | int rc; | 103 | int rc; |
104 | 104 | ||
105 | private = (struct dasd_diag_private *) device->private; | 105 | private = (struct dasd_diag_private *) device->private; |
106 | iib = &private->iib; | 106 | iib = &private->iib; |
107 | memset(iib, 0, sizeof (struct dasd_diag_init_io)); | 107 | memset(iib, 0, sizeof (struct dasd_diag_init_io)); |
108 | 108 | ||
109 | iib->dev_nr = private->dev_id.devno; | 109 | iib->dev_nr = private->dev_id.devno; |
110 | iib->block_size = blocksize; | 110 | iib->block_size = blocksize; |
111 | iib->offset = offset; | 111 | iib->offset = offset; |
112 | iib->flaga = DASD_DIAG_FLAGA_DEFAULT; | 112 | iib->flaga = DASD_DIAG_FLAGA_DEFAULT; |
113 | 113 | ||
114 | rc = dia250(iib, INIT_BIO); | 114 | rc = dia250(iib, INIT_BIO); |
115 | 115 | ||
116 | if ((rc & 3) == 0 && end_block) | 116 | if ((rc & 3) == 0 && end_block) |
117 | *end_block = iib->end_block; | 117 | *end_block = iib->end_block; |
118 | 118 | ||
119 | return rc; | 119 | return rc; |
120 | } | 120 | } |
121 | 121 | ||
122 | /* Remove block I/O environment for device. Return zero on success, non-zero | 122 | /* Remove block I/O environment for device. Return zero on success, non-zero |
123 | * otherwise. */ | 123 | * otherwise. */ |
124 | static inline int | 124 | static inline int |
125 | mdsk_term_io(struct dasd_device * device) | 125 | mdsk_term_io(struct dasd_device * device) |
126 | { | 126 | { |
127 | struct dasd_diag_private *private; | 127 | struct dasd_diag_private *private; |
128 | struct dasd_diag_init_io *iib; | 128 | struct dasd_diag_init_io *iib; |
129 | int rc; | 129 | int rc; |
130 | 130 | ||
131 | private = (struct dasd_diag_private *) device->private; | 131 | private = (struct dasd_diag_private *) device->private; |
132 | iib = &private->iib; | 132 | iib = &private->iib; |
133 | memset(iib, 0, sizeof (struct dasd_diag_init_io)); | 133 | memset(iib, 0, sizeof (struct dasd_diag_init_io)); |
134 | iib->dev_nr = private->dev_id.devno; | 134 | iib->dev_nr = private->dev_id.devno; |
135 | rc = dia250(iib, TERM_BIO); | 135 | rc = dia250(iib, TERM_BIO); |
136 | return rc; | 136 | return rc; |
137 | } | 137 | } |
138 | 138 | ||
139 | /* Error recovery for failed DIAG requests - try to reestablish the DIAG | 139 | /* Error recovery for failed DIAG requests - try to reestablish the DIAG |
140 | * environment. */ | 140 | * environment. */ |
141 | static void | 141 | static void |
142 | dasd_diag_erp(struct dasd_device *device) | 142 | dasd_diag_erp(struct dasd_device *device) |
143 | { | 143 | { |
144 | int rc; | 144 | int rc; |
145 | 145 | ||
146 | mdsk_term_io(device); | 146 | mdsk_term_io(device); |
147 | rc = mdsk_init_io(device, device->block->bp_block, 0, NULL); | 147 | rc = mdsk_init_io(device, device->block->bp_block, 0, NULL); |
148 | if (rc == 4) { | 148 | if (rc == 4) { |
149 | if (!(test_and_set_bit(DASD_FLAG_DEVICE_RO, &device->flags))) | 149 | if (!(test_and_set_bit(DASD_FLAG_DEVICE_RO, &device->flags))) |
150 | pr_warning("%s: The access mode of a DIAG device " | 150 | pr_warning("%s: The access mode of a DIAG device " |
151 | "changed to read-only\n", | 151 | "changed to read-only\n", |
152 | dev_name(&device->cdev->dev)); | 152 | dev_name(&device->cdev->dev)); |
153 | rc = 0; | 153 | rc = 0; |
154 | } | 154 | } |
155 | if (rc) | 155 | if (rc) |
156 | pr_warning("%s: DIAG ERP failed with " | 156 | pr_warning("%s: DIAG ERP failed with " |
157 | "rc=%d\n", dev_name(&device->cdev->dev), rc); | 157 | "rc=%d\n", dev_name(&device->cdev->dev), rc); |
158 | } | 158 | } |
159 | 159 | ||
160 | /* Start a given request at the device. Return zero on success, non-zero | 160 | /* Start a given request at the device. Return zero on success, non-zero |
161 | * otherwise. */ | 161 | * otherwise. */ |
162 | static int | 162 | static int |
163 | dasd_start_diag(struct dasd_ccw_req * cqr) | 163 | dasd_start_diag(struct dasd_ccw_req * cqr) |
164 | { | 164 | { |
165 | struct dasd_device *device; | 165 | struct dasd_device *device; |
166 | struct dasd_diag_private *private; | 166 | struct dasd_diag_private *private; |
167 | struct dasd_diag_req *dreq; | 167 | struct dasd_diag_req *dreq; |
168 | int rc; | 168 | int rc; |
169 | 169 | ||
170 | device = cqr->startdev; | 170 | device = cqr->startdev; |
171 | if (cqr->retries < 0) { | 171 | if (cqr->retries < 0) { |
172 | DBF_DEV_EVENT(DBF_ERR, device, "DIAG start_IO: request %p " | 172 | DBF_DEV_EVENT(DBF_ERR, device, "DIAG start_IO: request %p " |
173 | "- no retry left)", cqr); | 173 | "- no retry left)", cqr); |
174 | cqr->status = DASD_CQR_ERROR; | 174 | cqr->status = DASD_CQR_ERROR; |
175 | return -EIO; | 175 | return -EIO; |
176 | } | 176 | } |
177 | private = (struct dasd_diag_private *) device->private; | 177 | private = (struct dasd_diag_private *) device->private; |
178 | dreq = (struct dasd_diag_req *) cqr->data; | 178 | dreq = (struct dasd_diag_req *) cqr->data; |
179 | 179 | ||
180 | private->iob.dev_nr = private->dev_id.devno; | 180 | private->iob.dev_nr = private->dev_id.devno; |
181 | private->iob.key = 0; | 181 | private->iob.key = 0; |
182 | private->iob.flags = DASD_DIAG_RWFLAG_ASYNC; | 182 | private->iob.flags = DASD_DIAG_RWFLAG_ASYNC; |
183 | private->iob.block_count = dreq->block_count; | 183 | private->iob.block_count = dreq->block_count; |
184 | private->iob.interrupt_params = (addr_t) cqr; | 184 | private->iob.interrupt_params = (addr_t) cqr; |
185 | private->iob.bio_list = dreq->bio; | 185 | private->iob.bio_list = dreq->bio; |
186 | private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT; | 186 | private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT; |
187 | 187 | ||
188 | cqr->startclk = get_clock(); | 188 | cqr->startclk = get_clock(); |
189 | cqr->starttime = jiffies; | 189 | cqr->starttime = jiffies; |
190 | cqr->retries--; | 190 | cqr->retries--; |
191 | 191 | ||
192 | rc = dia250(&private->iob, RW_BIO); | 192 | rc = dia250(&private->iob, RW_BIO); |
193 | switch (rc) { | 193 | switch (rc) { |
194 | case 0: /* Synchronous I/O finished successfully */ | 194 | case 0: /* Synchronous I/O finished successfully */ |
195 | cqr->stopclk = get_clock(); | 195 | cqr->stopclk = get_clock(); |
196 | cqr->status = DASD_CQR_SUCCESS; | 196 | cqr->status = DASD_CQR_SUCCESS; |
197 | /* Indicate to calling function that only a dasd_schedule_bh() | 197 | /* Indicate to calling function that only a dasd_schedule_bh() |
198 | and no timer is needed */ | 198 | and no timer is needed */ |
199 | rc = -EACCES; | 199 | rc = -EACCES; |
200 | break; | 200 | break; |
201 | case 8: /* Asynchronous I/O was started */ | 201 | case 8: /* Asynchronous I/O was started */ |
202 | cqr->status = DASD_CQR_IN_IO; | 202 | cqr->status = DASD_CQR_IN_IO; |
203 | rc = 0; | 203 | rc = 0; |
204 | break; | 204 | break; |
205 | default: /* Error condition */ | 205 | default: /* Error condition */ |
206 | cqr->status = DASD_CQR_QUEUED; | 206 | cqr->status = DASD_CQR_QUEUED; |
207 | DBF_DEV_EVENT(DBF_WARNING, device, "dia250 returned rc=%d", rc); | 207 | DBF_DEV_EVENT(DBF_WARNING, device, "dia250 returned rc=%d", rc); |
208 | dasd_diag_erp(device); | 208 | dasd_diag_erp(device); |
209 | rc = -EIO; | 209 | rc = -EIO; |
210 | break; | 210 | break; |
211 | } | 211 | } |
212 | cqr->intrc = rc; | 212 | cqr->intrc = rc; |
213 | return rc; | 213 | return rc; |
214 | } | 214 | } |
215 | 215 | ||
216 | /* Terminate given request at the device. */ | 216 | /* Terminate given request at the device. */ |
217 | static int | 217 | static int |
218 | dasd_diag_term_IO(struct dasd_ccw_req * cqr) | 218 | dasd_diag_term_IO(struct dasd_ccw_req * cqr) |
219 | { | 219 | { |
220 | struct dasd_device *device; | 220 | struct dasd_device *device; |
221 | 221 | ||
222 | device = cqr->startdev; | 222 | device = cqr->startdev; |
223 | mdsk_term_io(device); | 223 | mdsk_term_io(device); |
224 | mdsk_init_io(device, device->block->bp_block, 0, NULL); | 224 | mdsk_init_io(device, device->block->bp_block, 0, NULL); |
225 | cqr->status = DASD_CQR_CLEAR_PENDING; | 225 | cqr->status = DASD_CQR_CLEAR_PENDING; |
226 | cqr->stopclk = get_clock(); | 226 | cqr->stopclk = get_clock(); |
227 | dasd_schedule_device_bh(device); | 227 | dasd_schedule_device_bh(device); |
228 | return 0; | 228 | return 0; |
229 | } | 229 | } |
230 | 230 | ||
231 | /* Handle external interruption. */ | 231 | /* Handle external interruption. */ |
232 | static void dasd_ext_handler(unsigned int ext_int_code, | 232 | static void dasd_ext_handler(unsigned int ext_int_code, |
233 | unsigned int param32, unsigned long param64) | 233 | unsigned int param32, unsigned long param64) |
234 | { | 234 | { |
235 | struct dasd_ccw_req *cqr, *next; | 235 | struct dasd_ccw_req *cqr, *next; |
236 | struct dasd_device *device; | 236 | struct dasd_device *device; |
237 | unsigned long long expires; | 237 | unsigned long long expires; |
238 | unsigned long flags; | 238 | unsigned long flags; |
239 | addr_t ip; | 239 | addr_t ip; |
240 | int rc; | 240 | int rc; |
241 | 241 | ||
242 | switch (ext_int_code >> 24) { | 242 | switch (ext_int_code >> 24) { |
243 | case DASD_DIAG_CODE_31BIT: | 243 | case DASD_DIAG_CODE_31BIT: |
244 | ip = (addr_t) param32; | 244 | ip = (addr_t) param32; |
245 | break; | 245 | break; |
246 | case DASD_DIAG_CODE_64BIT: | 246 | case DASD_DIAG_CODE_64BIT: |
247 | ip = (addr_t) param64; | 247 | ip = (addr_t) param64; |
248 | break; | 248 | break; |
249 | default: | 249 | default: |
250 | return; | 250 | return; |
251 | } | 251 | } |
252 | kstat_cpu(smp_processor_id()).irqs[EXTINT_DSD]++; | 252 | kstat_cpu(smp_processor_id()).irqs[EXTINT_DSD]++; |
253 | if (!ip) { /* no intparm: unsolicited interrupt */ | 253 | if (!ip) { /* no intparm: unsolicited interrupt */ |
254 | DBF_EVENT(DBF_NOTICE, "%s", "caught unsolicited " | 254 | DBF_EVENT(DBF_NOTICE, "%s", "caught unsolicited " |
255 | "interrupt"); | 255 | "interrupt"); |
256 | return; | 256 | return; |
257 | } | 257 | } |
258 | cqr = (struct dasd_ccw_req *) ip; | 258 | cqr = (struct dasd_ccw_req *) ip; |
259 | device = (struct dasd_device *) cqr->startdev; | 259 | device = (struct dasd_device *) cqr->startdev; |
260 | if (strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { | 260 | if (strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { |
261 | DBF_DEV_EVENT(DBF_WARNING, device, | 261 | DBF_DEV_EVENT(DBF_WARNING, device, |
262 | " magic number of dasd_ccw_req 0x%08X doesn't" | 262 | " magic number of dasd_ccw_req 0x%08X doesn't" |
263 | " match discipline 0x%08X", | 263 | " match discipline 0x%08X", |
264 | cqr->magic, *(int *) (&device->discipline->name)); | 264 | cqr->magic, *(int *) (&device->discipline->name)); |
265 | return; | 265 | return; |
266 | } | 266 | } |
267 | 267 | ||
268 | /* get irq lock to modify request queue */ | 268 | /* get irq lock to modify request queue */ |
269 | spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); | 269 | spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); |
270 | 270 | ||
271 | /* Check for a pending clear operation */ | 271 | /* Check for a pending clear operation */ |
272 | if (cqr->status == DASD_CQR_CLEAR_PENDING) { | 272 | if (cqr->status == DASD_CQR_CLEAR_PENDING) { |
273 | cqr->status = DASD_CQR_CLEARED; | 273 | cqr->status = DASD_CQR_CLEARED; |
274 | dasd_device_clear_timer(device); | 274 | dasd_device_clear_timer(device); |
275 | dasd_schedule_device_bh(device); | 275 | dasd_schedule_device_bh(device); |
276 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); | 276 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); |
277 | return; | 277 | return; |
278 | } | 278 | } |
279 | 279 | ||
280 | cqr->stopclk = get_clock(); | 280 | cqr->stopclk = get_clock(); |
281 | 281 | ||
282 | expires = 0; | 282 | expires = 0; |
283 | if ((ext_int_code & 0xff0000) == 0) { | 283 | if ((ext_int_code & 0xff0000) == 0) { |
284 | cqr->status = DASD_CQR_SUCCESS; | 284 | cqr->status = DASD_CQR_SUCCESS; |
285 | /* Start first request on queue if possible -> fast_io. */ | 285 | /* Start first request on queue if possible -> fast_io. */ |
286 | if (!list_empty(&device->ccw_queue)) { | 286 | if (!list_empty(&device->ccw_queue)) { |
287 | next = list_entry(device->ccw_queue.next, | 287 | next = list_entry(device->ccw_queue.next, |
288 | struct dasd_ccw_req, devlist); | 288 | struct dasd_ccw_req, devlist); |
289 | if (next->status == DASD_CQR_QUEUED) { | 289 | if (next->status == DASD_CQR_QUEUED) { |
290 | rc = dasd_start_diag(next); | 290 | rc = dasd_start_diag(next); |
291 | if (rc == 0) | 291 | if (rc == 0) |
292 | expires = next->expires; | 292 | expires = next->expires; |
293 | } | 293 | } |
294 | } | 294 | } |
295 | } else { | 295 | } else { |
296 | cqr->status = DASD_CQR_QUEUED; | 296 | cqr->status = DASD_CQR_QUEUED; |
297 | DBF_DEV_EVENT(DBF_DEBUG, device, "interrupt status for " | 297 | DBF_DEV_EVENT(DBF_DEBUG, device, "interrupt status for " |
298 | "request %p was %d (%d retries left)", cqr, | 298 | "request %p was %d (%d retries left)", cqr, |
299 | (ext_int_code >> 16) & 0xff, cqr->retries); | 299 | (ext_int_code >> 16) & 0xff, cqr->retries); |
300 | dasd_diag_erp(device); | 300 | dasd_diag_erp(device); |
301 | } | 301 | } |
302 | 302 | ||
303 | if (expires != 0) | 303 | if (expires != 0) |
304 | dasd_device_set_timer(device, expires); | 304 | dasd_device_set_timer(device, expires); |
305 | else | 305 | else |
306 | dasd_device_clear_timer(device); | 306 | dasd_device_clear_timer(device); |
307 | dasd_schedule_device_bh(device); | 307 | dasd_schedule_device_bh(device); |
308 | 308 | ||
309 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); | 309 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); |
310 | } | 310 | } |
311 | 311 | ||
312 | /* Check whether device can be controlled by DIAG discipline. Return zero on | 312 | /* Check whether device can be controlled by DIAG discipline. Return zero on |
313 | * success, non-zero otherwise. */ | 313 | * success, non-zero otherwise. */ |
314 | static int | 314 | static int |
315 | dasd_diag_check_device(struct dasd_device *device) | 315 | dasd_diag_check_device(struct dasd_device *device) |
316 | { | 316 | { |
317 | struct dasd_block *block; | 317 | struct dasd_block *block; |
318 | struct dasd_diag_private *private; | 318 | struct dasd_diag_private *private; |
319 | struct dasd_diag_characteristics *rdc_data; | 319 | struct dasd_diag_characteristics *rdc_data; |
320 | struct dasd_diag_bio bio; | 320 | struct dasd_diag_bio bio; |
321 | struct vtoc_cms_label *label; | 321 | struct vtoc_cms_label *label; |
322 | blocknum_t end_block; | 322 | blocknum_t end_block; |
323 | unsigned int sb, bsize; | 323 | unsigned int sb, bsize; |
324 | int rc; | 324 | int rc; |
325 | 325 | ||
326 | private = (struct dasd_diag_private *) device->private; | 326 | private = (struct dasd_diag_private *) device->private; |
327 | if (private == NULL) { | 327 | if (private == NULL) { |
328 | private = kzalloc(sizeof(struct dasd_diag_private),GFP_KERNEL); | 328 | private = kzalloc(sizeof(struct dasd_diag_private),GFP_KERNEL); |
329 | if (private == NULL) { | 329 | if (private == NULL) { |
330 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", | 330 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", |
331 | "Allocating memory for private DASD data " | 331 | "Allocating memory for private DASD data " |
332 | "failed\n"); | 332 | "failed\n"); |
333 | return -ENOMEM; | 333 | return -ENOMEM; |
334 | } | 334 | } |
335 | ccw_device_get_id(device->cdev, &private->dev_id); | 335 | ccw_device_get_id(device->cdev, &private->dev_id); |
336 | device->private = (void *) private; | 336 | device->private = (void *) private; |
337 | } | 337 | } |
338 | block = dasd_alloc_block(); | 338 | block = dasd_alloc_block(); |
339 | if (IS_ERR(block)) { | 339 | if (IS_ERR(block)) { |
340 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", | 340 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", |
341 | "could not allocate dasd block structure"); | 341 | "could not allocate dasd block structure"); |
342 | device->private = NULL; | 342 | device->private = NULL; |
343 | kfree(private); | 343 | kfree(private); |
344 | return PTR_ERR(block); | 344 | return PTR_ERR(block); |
345 | } | 345 | } |
346 | device->block = block; | 346 | device->block = block; |
347 | block->base = device; | 347 | block->base = device; |
348 | 348 | ||
349 | /* Read Device Characteristics */ | 349 | /* Read Device Characteristics */ |
350 | rdc_data = (void *) &(private->rdc_data); | 350 | rdc_data = (void *) &(private->rdc_data); |
351 | rdc_data->dev_nr = private->dev_id.devno; | 351 | rdc_data->dev_nr = private->dev_id.devno; |
352 | rdc_data->rdc_len = sizeof (struct dasd_diag_characteristics); | 352 | rdc_data->rdc_len = sizeof (struct dasd_diag_characteristics); |
353 | 353 | ||
354 | rc = diag210((struct diag210 *) rdc_data); | 354 | rc = diag210((struct diag210 *) rdc_data); |
355 | if (rc) { | 355 | if (rc) { |
356 | DBF_DEV_EVENT(DBF_WARNING, device, "failed to retrieve device " | 356 | DBF_DEV_EVENT(DBF_WARNING, device, "failed to retrieve device " |
357 | "information (rc=%d)", rc); | 357 | "information (rc=%d)", rc); |
358 | rc = -EOPNOTSUPP; | 358 | rc = -EOPNOTSUPP; |
359 | goto out; | 359 | goto out; |
360 | } | 360 | } |
361 | 361 | ||
362 | device->default_expires = DIAG_TIMEOUT; | 362 | device->default_expires = DIAG_TIMEOUT; |
363 | 363 | ||
364 | /* Figure out position of label block */ | 364 | /* Figure out position of label block */ |
365 | switch (private->rdc_data.vdev_class) { | 365 | switch (private->rdc_data.vdev_class) { |
366 | case DEV_CLASS_FBA: | 366 | case DEV_CLASS_FBA: |
367 | private->pt_block = 1; | 367 | private->pt_block = 1; |
368 | break; | 368 | break; |
369 | case DEV_CLASS_ECKD: | 369 | case DEV_CLASS_ECKD: |
370 | private->pt_block = 2; | 370 | private->pt_block = 2; |
371 | break; | 371 | break; |
372 | default: | 372 | default: |
373 | pr_warning("%s: Device type %d is not supported " | 373 | pr_warning("%s: Device type %d is not supported " |
374 | "in DIAG mode\n", dev_name(&device->cdev->dev), | 374 | "in DIAG mode\n", dev_name(&device->cdev->dev), |
375 | private->rdc_data.vdev_class); | 375 | private->rdc_data.vdev_class); |
376 | rc = -EOPNOTSUPP; | 376 | rc = -EOPNOTSUPP; |
377 | goto out; | 377 | goto out; |
378 | } | 378 | } |
379 | 379 | ||
380 | DBF_DEV_EVENT(DBF_INFO, device, | 380 | DBF_DEV_EVENT(DBF_INFO, device, |
381 | "%04X: %04X on real %04X/%02X", | 381 | "%04X: %04X on real %04X/%02X", |
382 | rdc_data->dev_nr, | 382 | rdc_data->dev_nr, |
383 | rdc_data->vdev_type, | 383 | rdc_data->vdev_type, |
384 | rdc_data->rdev_type, rdc_data->rdev_model); | 384 | rdc_data->rdev_type, rdc_data->rdev_model); |
385 | 385 | ||
386 | /* terminate all outstanding operations */ | 386 | /* terminate all outstanding operations */ |
387 | mdsk_term_io(device); | 387 | mdsk_term_io(device); |
388 | 388 | ||
389 | /* figure out blocksize of device */ | 389 | /* figure out blocksize of device */ |
390 | label = (struct vtoc_cms_label *) get_zeroed_page(GFP_KERNEL); | 390 | label = (struct vtoc_cms_label *) get_zeroed_page(GFP_KERNEL); |
391 | if (label == NULL) { | 391 | if (label == NULL) { |
392 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", | 392 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", |
393 | "No memory to allocate initialization request"); | 393 | "No memory to allocate initialization request"); |
394 | rc = -ENOMEM; | 394 | rc = -ENOMEM; |
395 | goto out; | 395 | goto out; |
396 | } | 396 | } |
397 | rc = 0; | 397 | rc = 0; |
398 | end_block = 0; | 398 | end_block = 0; |
399 | /* try all sizes - needed for ECKD devices */ | 399 | /* try all sizes - needed for ECKD devices */ |
400 | for (bsize = 512; bsize <= PAGE_SIZE; bsize <<= 1) { | 400 | for (bsize = 512; bsize <= PAGE_SIZE; bsize <<= 1) { |
401 | mdsk_init_io(device, bsize, 0, &end_block); | 401 | mdsk_init_io(device, bsize, 0, &end_block); |
402 | memset(&bio, 0, sizeof (struct dasd_diag_bio)); | 402 | memset(&bio, 0, sizeof (struct dasd_diag_bio)); |
403 | bio.type = MDSK_READ_REQ; | 403 | bio.type = MDSK_READ_REQ; |
404 | bio.block_number = private->pt_block + 1; | 404 | bio.block_number = private->pt_block + 1; |
405 | bio.buffer = label; | 405 | bio.buffer = label; |
406 | memset(&private->iob, 0, sizeof (struct dasd_diag_rw_io)); | 406 | memset(&private->iob, 0, sizeof (struct dasd_diag_rw_io)); |
407 | private->iob.dev_nr = rdc_data->dev_nr; | 407 | private->iob.dev_nr = rdc_data->dev_nr; |
408 | private->iob.key = 0; | 408 | private->iob.key = 0; |
409 | private->iob.flags = 0; /* do synchronous io */ | 409 | private->iob.flags = 0; /* do synchronous io */ |
410 | private->iob.block_count = 1; | 410 | private->iob.block_count = 1; |
411 | private->iob.interrupt_params = 0; | 411 | private->iob.interrupt_params = 0; |
412 | private->iob.bio_list = &bio; | 412 | private->iob.bio_list = &bio; |
413 | private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT; | 413 | private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT; |
414 | rc = dia250(&private->iob, RW_BIO); | 414 | rc = dia250(&private->iob, RW_BIO); |
415 | if (rc == 3) { | 415 | if (rc == 3) { |
416 | pr_warning("%s: A 64-bit DIAG call failed\n", | 416 | pr_warning("%s: A 64-bit DIAG call failed\n", |
417 | dev_name(&device->cdev->dev)); | 417 | dev_name(&device->cdev->dev)); |
418 | rc = -EOPNOTSUPP; | 418 | rc = -EOPNOTSUPP; |
419 | goto out_label; | 419 | goto out_label; |
420 | } | 420 | } |
421 | mdsk_term_io(device); | 421 | mdsk_term_io(device); |
422 | if (rc == 0) | 422 | if (rc == 0) |
423 | break; | 423 | break; |
424 | } | 424 | } |
425 | if (bsize > PAGE_SIZE) { | 425 | if (bsize > PAGE_SIZE) { |
426 | pr_warning("%s: Accessing the DASD failed because of an " | 426 | pr_warning("%s: Accessing the DASD failed because of an " |
427 | "incorrect format (rc=%d)\n", | 427 | "incorrect format (rc=%d)\n", |
428 | dev_name(&device->cdev->dev), rc); | 428 | dev_name(&device->cdev->dev), rc); |
429 | rc = -EIO; | 429 | rc = -EIO; |
430 | goto out_label; | 430 | goto out_label; |
431 | } | 431 | } |
432 | /* check for label block */ | 432 | /* check for label block */ |
433 | if (memcmp(label->label_id, DASD_DIAG_CMS1, | 433 | if (memcmp(label->label_id, DASD_DIAG_CMS1, |
434 | sizeof(DASD_DIAG_CMS1)) == 0) { | 434 | sizeof(DASD_DIAG_CMS1)) == 0) { |
435 | /* get formatted blocksize from label block */ | 435 | /* get formatted blocksize from label block */ |
436 | bsize = (unsigned int) label->block_size; | 436 | bsize = (unsigned int) label->block_size; |
437 | block->blocks = (unsigned long) label->block_count; | 437 | block->blocks = (unsigned long) label->block_count; |
438 | } else | 438 | } else |
439 | block->blocks = end_block; | 439 | block->blocks = end_block; |
440 | block->bp_block = bsize; | 440 | block->bp_block = bsize; |
441 | block->s2b_shift = 0; /* bits to shift 512 to get a block */ | 441 | block->s2b_shift = 0; /* bits to shift 512 to get a block */ |
442 | for (sb = 512; sb < bsize; sb = sb << 1) | 442 | for (sb = 512; sb < bsize; sb = sb << 1) |
443 | block->s2b_shift++; | 443 | block->s2b_shift++; |
444 | rc = mdsk_init_io(device, block->bp_block, 0, NULL); | 444 | rc = mdsk_init_io(device, block->bp_block, 0, NULL); |
445 | if (rc && (rc != 4)) { | 445 | if (rc && (rc != 4)) { |
446 | pr_warning("%s: DIAG initialization failed with rc=%d\n", | 446 | pr_warning("%s: DIAG initialization failed with rc=%d\n", |
447 | dev_name(&device->cdev->dev), rc); | 447 | dev_name(&device->cdev->dev), rc); |
448 | rc = -EIO; | 448 | rc = -EIO; |
449 | } else { | 449 | } else { |
450 | if (rc == 4) | 450 | if (rc == 4) |
451 | set_bit(DASD_FLAG_DEVICE_RO, &device->flags); | 451 | set_bit(DASD_FLAG_DEVICE_RO, &device->flags); |
452 | pr_info("%s: New DASD with %ld byte/block, total size %ld " | 452 | pr_info("%s: New DASD with %ld byte/block, total size %ld " |
453 | "KB%s\n", dev_name(&device->cdev->dev), | 453 | "KB%s\n", dev_name(&device->cdev->dev), |
454 | (unsigned long) block->bp_block, | 454 | (unsigned long) block->bp_block, |
455 | (unsigned long) (block->blocks << | 455 | (unsigned long) (block->blocks << |
456 | block->s2b_shift) >> 1, | 456 | block->s2b_shift) >> 1, |
457 | (rc == 4) ? ", read-only device" : ""); | 457 | (rc == 4) ? ", read-only device" : ""); |
458 | rc = 0; | 458 | rc = 0; |
459 | } | 459 | } |
460 | out_label: | 460 | out_label: |
461 | free_page((long) label); | 461 | free_page((long) label); |
462 | out: | 462 | out: |
463 | if (rc) { | 463 | if (rc) { |
464 | device->block = NULL; | 464 | device->block = NULL; |
465 | dasd_free_block(block); | 465 | dasd_free_block(block); |
466 | device->private = NULL; | 466 | device->private = NULL; |
467 | kfree(private); | 467 | kfree(private); |
468 | } | 468 | } |
469 | return rc; | 469 | return rc; |
470 | } | 470 | } |
471 | 471 | ||
472 | /* Fill in virtual disk geometry for device. Return zero on success, non-zero | 472 | /* Fill in virtual disk geometry for device. Return zero on success, non-zero |
473 | * otherwise. */ | 473 | * otherwise. */ |
474 | static int | 474 | static int |
475 | dasd_diag_fill_geometry(struct dasd_block *block, struct hd_geometry *geo) | 475 | dasd_diag_fill_geometry(struct dasd_block *block, struct hd_geometry *geo) |
476 | { | 476 | { |
477 | if (dasd_check_blocksize(block->bp_block) != 0) | 477 | if (dasd_check_blocksize(block->bp_block) != 0) |
478 | return -EINVAL; | 478 | return -EINVAL; |
479 | geo->cylinders = (block->blocks << block->s2b_shift) >> 10; | 479 | geo->cylinders = (block->blocks << block->s2b_shift) >> 10; |
480 | geo->heads = 16; | 480 | geo->heads = 16; |
481 | geo->sectors = 128 >> block->s2b_shift; | 481 | geo->sectors = 128 >> block->s2b_shift; |
482 | return 0; | 482 | return 0; |
483 | } | 483 | } |
484 | 484 | ||
485 | static dasd_erp_fn_t | 485 | static dasd_erp_fn_t |
486 | dasd_diag_erp_action(struct dasd_ccw_req * cqr) | 486 | dasd_diag_erp_action(struct dasd_ccw_req * cqr) |
487 | { | 487 | { |
488 | return dasd_default_erp_action; | 488 | return dasd_default_erp_action; |
489 | } | 489 | } |
490 | 490 | ||
491 | static dasd_erp_fn_t | 491 | static dasd_erp_fn_t |
492 | dasd_diag_erp_postaction(struct dasd_ccw_req * cqr) | 492 | dasd_diag_erp_postaction(struct dasd_ccw_req * cqr) |
493 | { | 493 | { |
494 | return dasd_default_erp_postaction; | 494 | return dasd_default_erp_postaction; |
495 | } | 495 | } |
496 | 496 | ||
497 | /* Create DASD request from block device request. Return pointer to new | 497 | /* Create DASD request from block device request. Return pointer to new |
498 | * request on success, ERR_PTR otherwise. */ | 498 | * request on success, ERR_PTR otherwise. */ |
499 | static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev, | 499 | static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev, |
500 | struct dasd_block *block, | 500 | struct dasd_block *block, |
501 | struct request *req) | 501 | struct request *req) |
502 | { | 502 | { |
503 | struct dasd_ccw_req *cqr; | 503 | struct dasd_ccw_req *cqr; |
504 | struct dasd_diag_req *dreq; | 504 | struct dasd_diag_req *dreq; |
505 | struct dasd_diag_bio *dbio; | 505 | struct dasd_diag_bio *dbio; |
506 | struct req_iterator iter; | 506 | struct req_iterator iter; |
507 | struct bio_vec *bv; | 507 | struct bio_vec *bv; |
508 | char *dst; | 508 | char *dst; |
509 | unsigned int count, datasize; | 509 | unsigned int count, datasize; |
510 | sector_t recid, first_rec, last_rec; | 510 | sector_t recid, first_rec, last_rec; |
511 | unsigned int blksize, off; | 511 | unsigned int blksize, off; |
512 | unsigned char rw_cmd; | 512 | unsigned char rw_cmd; |
513 | 513 | ||
514 | if (rq_data_dir(req) == READ) | 514 | if (rq_data_dir(req) == READ) |
515 | rw_cmd = MDSK_READ_REQ; | 515 | rw_cmd = MDSK_READ_REQ; |
516 | else if (rq_data_dir(req) == WRITE) | 516 | else if (rq_data_dir(req) == WRITE) |
517 | rw_cmd = MDSK_WRITE_REQ; | 517 | rw_cmd = MDSK_WRITE_REQ; |
518 | else | 518 | else |
519 | return ERR_PTR(-EINVAL); | 519 | return ERR_PTR(-EINVAL); |
520 | blksize = block->bp_block; | 520 | blksize = block->bp_block; |
521 | /* Calculate record id of first and last block. */ | 521 | /* Calculate record id of first and last block. */ |
522 | first_rec = blk_rq_pos(req) >> block->s2b_shift; | 522 | first_rec = blk_rq_pos(req) >> block->s2b_shift; |
523 | last_rec = | 523 | last_rec = |
524 | (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; | 524 | (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; |
525 | /* Check struct bio and count the number of blocks for the request. */ | 525 | /* Check struct bio and count the number of blocks for the request. */ |
526 | count = 0; | 526 | count = 0; |
527 | rq_for_each_segment(bv, req, iter) { | 527 | rq_for_each_segment(bv, req, iter) { |
528 | if (bv->bv_len & (blksize - 1)) | 528 | if (bv->bv_len & (blksize - 1)) |
529 | /* Fba can only do full blocks. */ | 529 | /* Fba can only do full blocks. */ |
530 | return ERR_PTR(-EINVAL); | 530 | return ERR_PTR(-EINVAL); |
531 | count += bv->bv_len >> (block->s2b_shift + 9); | 531 | count += bv->bv_len >> (block->s2b_shift + 9); |
532 | } | 532 | } |
533 | /* Paranoia. */ | 533 | /* Paranoia. */ |
534 | if (count != last_rec - first_rec + 1) | 534 | if (count != last_rec - first_rec + 1) |
535 | return ERR_PTR(-EINVAL); | 535 | return ERR_PTR(-EINVAL); |
536 | /* Build the request */ | 536 | /* Build the request */ |
537 | datasize = sizeof(struct dasd_diag_req) + | 537 | datasize = sizeof(struct dasd_diag_req) + |
538 | count*sizeof(struct dasd_diag_bio); | 538 | count*sizeof(struct dasd_diag_bio); |
539 | cqr = dasd_smalloc_request(DASD_DIAG_MAGIC, 0, datasize, memdev); | 539 | cqr = dasd_smalloc_request(DASD_DIAG_MAGIC, 0, datasize, memdev); |
540 | if (IS_ERR(cqr)) | 540 | if (IS_ERR(cqr)) |
541 | return cqr; | 541 | return cqr; |
542 | 542 | ||
543 | dreq = (struct dasd_diag_req *) cqr->data; | 543 | dreq = (struct dasd_diag_req *) cqr->data; |
544 | dreq->block_count = count; | 544 | dreq->block_count = count; |
545 | dbio = dreq->bio; | 545 | dbio = dreq->bio; |
546 | recid = first_rec; | 546 | recid = first_rec; |
547 | rq_for_each_segment(bv, req, iter) { | 547 | rq_for_each_segment(bv, req, iter) { |
548 | dst = page_address(bv->bv_page) + bv->bv_offset; | 548 | dst = page_address(bv->bv_page) + bv->bv_offset; |
549 | for (off = 0; off < bv->bv_len; off += blksize) { | 549 | for (off = 0; off < bv->bv_len; off += blksize) { |
550 | memset(dbio, 0, sizeof (struct dasd_diag_bio)); | 550 | memset(dbio, 0, sizeof (struct dasd_diag_bio)); |
551 | dbio->type = rw_cmd; | 551 | dbio->type = rw_cmd; |
552 | dbio->block_number = recid + 1; | 552 | dbio->block_number = recid + 1; |
553 | dbio->buffer = dst; | 553 | dbio->buffer = dst; |
554 | dbio++; | 554 | dbio++; |
555 | dst += blksize; | 555 | dst += blksize; |
556 | recid++; | 556 | recid++; |
557 | } | 557 | } |
558 | } | 558 | } |
559 | cqr->retries = DIAG_MAX_RETRIES; | 559 | cqr->retries = DIAG_MAX_RETRIES; |
560 | cqr->buildclk = get_clock(); | 560 | cqr->buildclk = get_clock(); |
561 | if (blk_noretry_request(req) || | 561 | if (blk_noretry_request(req) || |
562 | block->base->features & DASD_FEATURE_FAILFAST) | 562 | block->base->features & DASD_FEATURE_FAILFAST) |
563 | set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); | 563 | set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); |
564 | cqr->startdev = memdev; | 564 | cqr->startdev = memdev; |
565 | cqr->memdev = memdev; | 565 | cqr->memdev = memdev; |
566 | cqr->block = block; | 566 | cqr->block = block; |
567 | cqr->expires = memdev->default_expires * HZ; | 567 | cqr->expires = memdev->default_expires * HZ; |
568 | cqr->status = DASD_CQR_FILLED; | 568 | cqr->status = DASD_CQR_FILLED; |
569 | return cqr; | 569 | return cqr; |
570 | } | 570 | } |
571 | 571 | ||
572 | /* Release DASD request. Return non-zero if request was successful, zero | 572 | /* Release DASD request. Return non-zero if request was successful, zero |
573 | * otherwise. */ | 573 | * otherwise. */ |
574 | static int | 574 | static int |
575 | dasd_diag_free_cp(struct dasd_ccw_req *cqr, struct request *req) | 575 | dasd_diag_free_cp(struct dasd_ccw_req *cqr, struct request *req) |
576 | { | 576 | { |
577 | int status; | 577 | int status; |
578 | 578 | ||
579 | status = cqr->status == DASD_CQR_DONE; | 579 | status = cqr->status == DASD_CQR_DONE; |
580 | dasd_sfree_request(cqr, cqr->memdev); | 580 | dasd_sfree_request(cqr, cqr->memdev); |
581 | return status; | 581 | return status; |
582 | } | 582 | } |
583 | 583 | ||
584 | static void dasd_diag_handle_terminated_request(struct dasd_ccw_req *cqr) | 584 | static void dasd_diag_handle_terminated_request(struct dasd_ccw_req *cqr) |
585 | { | 585 | { |
586 | cqr->status = DASD_CQR_FILLED; | 586 | cqr->status = DASD_CQR_FILLED; |
587 | }; | 587 | }; |
588 | 588 | ||
589 | /* Fill in IOCTL data for device. */ | 589 | /* Fill in IOCTL data for device. */ |
590 | static int | 590 | static int |
591 | dasd_diag_fill_info(struct dasd_device * device, | 591 | dasd_diag_fill_info(struct dasd_device * device, |
592 | struct dasd_information2_t * info) | 592 | struct dasd_information2_t * info) |
593 | { | 593 | { |
594 | struct dasd_diag_private *private; | 594 | struct dasd_diag_private *private; |
595 | 595 | ||
596 | private = (struct dasd_diag_private *) device->private; | 596 | private = (struct dasd_diag_private *) device->private; |
597 | info->label_block = (unsigned int) private->pt_block; | 597 | info->label_block = (unsigned int) private->pt_block; |
598 | info->FBA_layout = 1; | 598 | info->FBA_layout = 1; |
599 | info->format = DASD_FORMAT_LDL; | 599 | info->format = DASD_FORMAT_LDL; |
600 | info->characteristics_size = sizeof (struct dasd_diag_characteristics); | 600 | info->characteristics_size = sizeof (struct dasd_diag_characteristics); |
601 | memcpy(info->characteristics, | 601 | memcpy(info->characteristics, |
602 | &((struct dasd_diag_private *) device->private)->rdc_data, | 602 | &((struct dasd_diag_private *) device->private)->rdc_data, |
603 | sizeof (struct dasd_diag_characteristics)); | 603 | sizeof (struct dasd_diag_characteristics)); |
604 | info->confdata_size = 0; | 604 | info->confdata_size = 0; |
605 | return 0; | 605 | return 0; |
606 | } | 606 | } |
607 | 607 | ||
608 | static void | 608 | static void |
609 | dasd_diag_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req, | 609 | dasd_diag_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req, |
610 | struct irb *stat) | 610 | struct irb *stat) |
611 | { | 611 | { |
612 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", | 612 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", |
613 | "dump sense not available for DIAG data"); | 613 | "dump sense not available for DIAG data"); |
614 | } | 614 | } |
615 | 615 | ||
616 | static struct dasd_discipline dasd_diag_discipline = { | 616 | static struct dasd_discipline dasd_diag_discipline = { |
617 | .owner = THIS_MODULE, | 617 | .owner = THIS_MODULE, |
618 | .name = "DIAG", | 618 | .name = "DIAG", |
619 | .ebcname = "DIAG", | 619 | .ebcname = "DIAG", |
620 | .max_blocks = DIAG_MAX_BLOCKS, | 620 | .max_blocks = DIAG_MAX_BLOCKS, |
621 | .check_device = dasd_diag_check_device, | 621 | .check_device = dasd_diag_check_device, |
622 | .verify_path = dasd_generic_verify_path, | 622 | .verify_path = dasd_generic_verify_path, |
623 | .fill_geometry = dasd_diag_fill_geometry, | 623 | .fill_geometry = dasd_diag_fill_geometry, |
624 | .start_IO = dasd_start_diag, | 624 | .start_IO = dasd_start_diag, |
625 | .term_IO = dasd_diag_term_IO, | 625 | .term_IO = dasd_diag_term_IO, |
626 | .handle_terminated_request = dasd_diag_handle_terminated_request, | 626 | .handle_terminated_request = dasd_diag_handle_terminated_request, |
627 | .erp_action = dasd_diag_erp_action, | 627 | .erp_action = dasd_diag_erp_action, |
628 | .erp_postaction = dasd_diag_erp_postaction, | 628 | .erp_postaction = dasd_diag_erp_postaction, |
629 | .build_cp = dasd_diag_build_cp, | 629 | .build_cp = dasd_diag_build_cp, |
630 | .free_cp = dasd_diag_free_cp, | 630 | .free_cp = dasd_diag_free_cp, |
631 | .dump_sense = dasd_diag_dump_sense, | 631 | .dump_sense = dasd_diag_dump_sense, |
632 | .fill_info = dasd_diag_fill_info, | 632 | .fill_info = dasd_diag_fill_info, |
633 | }; | 633 | }; |
634 | 634 | ||
635 | static int __init | 635 | static int __init |
636 | dasd_diag_init(void) | 636 | dasd_diag_init(void) |
637 | { | 637 | { |
638 | if (!MACHINE_IS_VM) { | 638 | if (!MACHINE_IS_VM) { |
639 | pr_info("Discipline %s cannot be used without z/VM\n", | 639 | pr_info("Discipline %s cannot be used without z/VM\n", |
640 | dasd_diag_discipline.name); | 640 | dasd_diag_discipline.name); |
641 | return -ENODEV; | 641 | return -ENODEV; |
642 | } | 642 | } |
643 | ASCEBC(dasd_diag_discipline.ebcname, 4); | 643 | ASCEBC(dasd_diag_discipline.ebcname, 4); |
644 | 644 | ||
645 | service_subclass_irq_register(); | 645 | service_subclass_irq_register(); |
646 | register_external_interrupt(0x2603, dasd_ext_handler); | 646 | register_external_interrupt(0x2603, dasd_ext_handler); |
647 | dasd_diag_discipline_pointer = &dasd_diag_discipline; | 647 | dasd_diag_discipline_pointer = &dasd_diag_discipline; |
648 | return 0; | 648 | return 0; |
649 | } | 649 | } |
650 | 650 | ||
651 | static void __exit | 651 | static void __exit |
652 | dasd_diag_cleanup(void) | 652 | dasd_diag_cleanup(void) |
653 | { | 653 | { |
654 | unregister_external_interrupt(0x2603, dasd_ext_handler); | 654 | unregister_external_interrupt(0x2603, dasd_ext_handler); |
655 | service_subclass_irq_unregister(); | 655 | service_subclass_irq_unregister(); |
656 | dasd_diag_discipline_pointer = NULL; | 656 | dasd_diag_discipline_pointer = NULL; |
657 | } | 657 | } |
658 | 658 | ||
659 | module_init(dasd_diag_init); | 659 | module_init(dasd_diag_init); |
660 | module_exit(dasd_diag_cleanup); | 660 | module_exit(dasd_diag_cleanup); |
661 | 661 |
drivers/s390/char/sclp.c
1 | /* | 1 | /* |
2 | * core function to access sclp interface | 2 | * core function to access sclp interface |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 1999, 2009 | 4 | * Copyright IBM Corp. 1999, 2009 |
5 | * | 5 | * |
6 | * Author(s): Martin Peschke <mpeschke@de.ibm.com> | 6 | * Author(s): Martin Peschke <mpeschke@de.ibm.com> |
7 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | 7 | * Martin Schwidefsky <schwidefsky@de.ibm.com> |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/kernel_stat.h> | 10 | #include <linux/kernel_stat.h> |
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/err.h> | 12 | #include <linux/err.h> |
13 | #include <linux/spinlock.h> | 13 | #include <linux/spinlock.h> |
14 | #include <linux/interrupt.h> | 14 | #include <linux/interrupt.h> |
15 | #include <linux/timer.h> | 15 | #include <linux/timer.h> |
16 | #include <linux/reboot.h> | 16 | #include <linux/reboot.h> |
17 | #include <linux/jiffies.h> | 17 | #include <linux/jiffies.h> |
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/suspend.h> | 19 | #include <linux/suspend.h> |
20 | #include <linux/completion.h> | 20 | #include <linux/completion.h> |
21 | #include <linux/platform_device.h> | 21 | #include <linux/platform_device.h> |
22 | #include <asm/s390_ext.h> | ||
23 | #include <asm/types.h> | 22 | #include <asm/types.h> |
24 | #include <asm/irq.h> | 23 | #include <asm/irq.h> |
25 | 24 | ||
26 | #include "sclp.h" | 25 | #include "sclp.h" |
27 | 26 | ||
28 | #define SCLP_HEADER "sclp: " | 27 | #define SCLP_HEADER "sclp: " |
29 | 28 | ||
30 | /* Lock to protect internal data consistency. */ | 29 | /* Lock to protect internal data consistency. */ |
31 | static DEFINE_SPINLOCK(sclp_lock); | 30 | static DEFINE_SPINLOCK(sclp_lock); |
32 | 31 | ||
33 | /* Mask of events that we can send to the sclp interface. */ | 32 | /* Mask of events that we can send to the sclp interface. */ |
34 | static sccb_mask_t sclp_receive_mask; | 33 | static sccb_mask_t sclp_receive_mask; |
35 | 34 | ||
36 | /* Mask of events that we can receive from the sclp interface. */ | 35 | /* Mask of events that we can receive from the sclp interface. */ |
37 | static sccb_mask_t sclp_send_mask; | 36 | static sccb_mask_t sclp_send_mask; |
38 | 37 | ||
39 | /* List of registered event listeners and senders. */ | 38 | /* List of registered event listeners and senders. */ |
40 | static struct list_head sclp_reg_list; | 39 | static struct list_head sclp_reg_list; |
41 | 40 | ||
42 | /* List of queued requests. */ | 41 | /* List of queued requests. */ |
43 | static struct list_head sclp_req_queue; | 42 | static struct list_head sclp_req_queue; |
44 | 43 | ||
45 | /* Data for read and and init requests. */ | 44 | /* Data for read and and init requests. */ |
46 | static struct sclp_req sclp_read_req; | 45 | static struct sclp_req sclp_read_req; |
47 | static struct sclp_req sclp_init_req; | 46 | static struct sclp_req sclp_init_req; |
48 | static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); | 47 | static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); |
49 | static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); | 48 | static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); |
50 | 49 | ||
51 | /* Suspend request */ | 50 | /* Suspend request */ |
52 | static DECLARE_COMPLETION(sclp_request_queue_flushed); | 51 | static DECLARE_COMPLETION(sclp_request_queue_flushed); |
53 | 52 | ||
54 | static void sclp_suspend_req_cb(struct sclp_req *req, void *data) | 53 | static void sclp_suspend_req_cb(struct sclp_req *req, void *data) |
55 | { | 54 | { |
56 | complete(&sclp_request_queue_flushed); | 55 | complete(&sclp_request_queue_flushed); |
57 | } | 56 | } |
58 | 57 | ||
59 | static struct sclp_req sclp_suspend_req; | 58 | static struct sclp_req sclp_suspend_req; |
60 | 59 | ||
61 | /* Timer for request retries. */ | 60 | /* Timer for request retries. */ |
62 | static struct timer_list sclp_request_timer; | 61 | static struct timer_list sclp_request_timer; |
63 | 62 | ||
64 | /* Internal state: is the driver initialized? */ | 63 | /* Internal state: is the driver initialized? */ |
65 | static volatile enum sclp_init_state_t { | 64 | static volatile enum sclp_init_state_t { |
66 | sclp_init_state_uninitialized, | 65 | sclp_init_state_uninitialized, |
67 | sclp_init_state_initializing, | 66 | sclp_init_state_initializing, |
68 | sclp_init_state_initialized | 67 | sclp_init_state_initialized |
69 | } sclp_init_state = sclp_init_state_uninitialized; | 68 | } sclp_init_state = sclp_init_state_uninitialized; |
70 | 69 | ||
71 | /* Internal state: is a request active at the sclp? */ | 70 | /* Internal state: is a request active at the sclp? */ |
72 | static volatile enum sclp_running_state_t { | 71 | static volatile enum sclp_running_state_t { |
73 | sclp_running_state_idle, | 72 | sclp_running_state_idle, |
74 | sclp_running_state_running, | 73 | sclp_running_state_running, |
75 | sclp_running_state_reset_pending | 74 | sclp_running_state_reset_pending |
76 | } sclp_running_state = sclp_running_state_idle; | 75 | } sclp_running_state = sclp_running_state_idle; |
77 | 76 | ||
78 | /* Internal state: is a read request pending? */ | 77 | /* Internal state: is a read request pending? */ |
79 | static volatile enum sclp_reading_state_t { | 78 | static volatile enum sclp_reading_state_t { |
80 | sclp_reading_state_idle, | 79 | sclp_reading_state_idle, |
81 | sclp_reading_state_reading | 80 | sclp_reading_state_reading |
82 | } sclp_reading_state = sclp_reading_state_idle; | 81 | } sclp_reading_state = sclp_reading_state_idle; |
83 | 82 | ||
84 | /* Internal state: is the driver currently serving requests? */ | 83 | /* Internal state: is the driver currently serving requests? */ |
85 | static volatile enum sclp_activation_state_t { | 84 | static volatile enum sclp_activation_state_t { |
86 | sclp_activation_state_active, | 85 | sclp_activation_state_active, |
87 | sclp_activation_state_deactivating, | 86 | sclp_activation_state_deactivating, |
88 | sclp_activation_state_inactive, | 87 | sclp_activation_state_inactive, |
89 | sclp_activation_state_activating | 88 | sclp_activation_state_activating |
90 | } sclp_activation_state = sclp_activation_state_active; | 89 | } sclp_activation_state = sclp_activation_state_active; |
91 | 90 | ||
92 | /* Internal state: is an init mask request pending? */ | 91 | /* Internal state: is an init mask request pending? */ |
93 | static volatile enum sclp_mask_state_t { | 92 | static volatile enum sclp_mask_state_t { |
94 | sclp_mask_state_idle, | 93 | sclp_mask_state_idle, |
95 | sclp_mask_state_initializing | 94 | sclp_mask_state_initializing |
96 | } sclp_mask_state = sclp_mask_state_idle; | 95 | } sclp_mask_state = sclp_mask_state_idle; |
97 | 96 | ||
98 | /* Internal state: is the driver suspended? */ | 97 | /* Internal state: is the driver suspended? */ |
99 | static enum sclp_suspend_state_t { | 98 | static enum sclp_suspend_state_t { |
100 | sclp_suspend_state_running, | 99 | sclp_suspend_state_running, |
101 | sclp_suspend_state_suspended, | 100 | sclp_suspend_state_suspended, |
102 | } sclp_suspend_state = sclp_suspend_state_running; | 101 | } sclp_suspend_state = sclp_suspend_state_running; |
103 | 102 | ||
104 | /* Maximum retry counts */ | 103 | /* Maximum retry counts */ |
105 | #define SCLP_INIT_RETRY 3 | 104 | #define SCLP_INIT_RETRY 3 |
106 | #define SCLP_MASK_RETRY 3 | 105 | #define SCLP_MASK_RETRY 3 |
107 | 106 | ||
108 | /* Timeout intervals in seconds.*/ | 107 | /* Timeout intervals in seconds.*/ |
109 | #define SCLP_BUSY_INTERVAL 10 | 108 | #define SCLP_BUSY_INTERVAL 10 |
110 | #define SCLP_RETRY_INTERVAL 30 | 109 | #define SCLP_RETRY_INTERVAL 30 |
111 | 110 | ||
112 | static void sclp_process_queue(void); | 111 | static void sclp_process_queue(void); |
113 | static void __sclp_make_read_req(void); | 112 | static void __sclp_make_read_req(void); |
114 | static int sclp_init_mask(int calculate); | 113 | static int sclp_init_mask(int calculate); |
115 | static int sclp_init(void); | 114 | static int sclp_init(void); |
116 | 115 | ||
117 | /* Perform service call. Return 0 on success, non-zero otherwise. */ | 116 | /* Perform service call. Return 0 on success, non-zero otherwise. */ |
118 | int | 117 | int |
119 | sclp_service_call(sclp_cmdw_t command, void *sccb) | 118 | sclp_service_call(sclp_cmdw_t command, void *sccb) |
120 | { | 119 | { |
121 | int cc; | 120 | int cc; |
122 | 121 | ||
123 | asm volatile( | 122 | asm volatile( |
124 | " .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */ | 123 | " .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */ |
125 | " ipm %0\n" | 124 | " ipm %0\n" |
126 | " srl %0,28" | 125 | " srl %0,28" |
127 | : "=&d" (cc) : "d" (command), "a" (__pa(sccb)) | 126 | : "=&d" (cc) : "d" (command), "a" (__pa(sccb)) |
128 | : "cc", "memory"); | 127 | : "cc", "memory"); |
129 | if (cc == 3) | 128 | if (cc == 3) |
130 | return -EIO; | 129 | return -EIO; |
131 | if (cc == 2) | 130 | if (cc == 2) |
132 | return -EBUSY; | 131 | return -EBUSY; |
133 | return 0; | 132 | return 0; |
134 | } | 133 | } |
135 | 134 | ||
136 | 135 | ||
137 | static void | 136 | static void |
138 | __sclp_queue_read_req(void) | 137 | __sclp_queue_read_req(void) |
139 | { | 138 | { |
140 | if (sclp_reading_state == sclp_reading_state_idle) { | 139 | if (sclp_reading_state == sclp_reading_state_idle) { |
141 | sclp_reading_state = sclp_reading_state_reading; | 140 | sclp_reading_state = sclp_reading_state_reading; |
142 | __sclp_make_read_req(); | 141 | __sclp_make_read_req(); |
143 | /* Add request to head of queue */ | 142 | /* Add request to head of queue */ |
144 | list_add(&sclp_read_req.list, &sclp_req_queue); | 143 | list_add(&sclp_read_req.list, &sclp_req_queue); |
145 | } | 144 | } |
146 | } | 145 | } |
147 | 146 | ||
148 | /* Set up request retry timer. Called while sclp_lock is locked. */ | 147 | /* Set up request retry timer. Called while sclp_lock is locked. */ |
149 | static inline void | 148 | static inline void |
150 | __sclp_set_request_timer(unsigned long time, void (*function)(unsigned long), | 149 | __sclp_set_request_timer(unsigned long time, void (*function)(unsigned long), |
151 | unsigned long data) | 150 | unsigned long data) |
152 | { | 151 | { |
153 | del_timer(&sclp_request_timer); | 152 | del_timer(&sclp_request_timer); |
154 | sclp_request_timer.function = function; | 153 | sclp_request_timer.function = function; |
155 | sclp_request_timer.data = data; | 154 | sclp_request_timer.data = data; |
156 | sclp_request_timer.expires = jiffies + time; | 155 | sclp_request_timer.expires = jiffies + time; |
157 | add_timer(&sclp_request_timer); | 156 | add_timer(&sclp_request_timer); |
158 | } | 157 | } |
159 | 158 | ||
160 | /* Request timeout handler. Restart the request queue. If DATA is non-zero, | 159 | /* Request timeout handler. Restart the request queue. If DATA is non-zero, |
161 | * force restart of running request. */ | 160 | * force restart of running request. */ |
162 | static void | 161 | static void |
163 | sclp_request_timeout(unsigned long data) | 162 | sclp_request_timeout(unsigned long data) |
164 | { | 163 | { |
165 | unsigned long flags; | 164 | unsigned long flags; |
166 | 165 | ||
167 | spin_lock_irqsave(&sclp_lock, flags); | 166 | spin_lock_irqsave(&sclp_lock, flags); |
168 | if (data) { | 167 | if (data) { |
169 | if (sclp_running_state == sclp_running_state_running) { | 168 | if (sclp_running_state == sclp_running_state_running) { |
170 | /* Break running state and queue NOP read event request | 169 | /* Break running state and queue NOP read event request |
171 | * to get a defined interface state. */ | 170 | * to get a defined interface state. */ |
172 | __sclp_queue_read_req(); | 171 | __sclp_queue_read_req(); |
173 | sclp_running_state = sclp_running_state_idle; | 172 | sclp_running_state = sclp_running_state_idle; |
174 | } | 173 | } |
175 | } else { | 174 | } else { |
176 | __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ, | 175 | __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ, |
177 | sclp_request_timeout, 0); | 176 | sclp_request_timeout, 0); |
178 | } | 177 | } |
179 | spin_unlock_irqrestore(&sclp_lock, flags); | 178 | spin_unlock_irqrestore(&sclp_lock, flags); |
180 | sclp_process_queue(); | 179 | sclp_process_queue(); |
181 | } | 180 | } |
182 | 181 | ||
183 | /* Try to start a request. Return zero if the request was successfully | 182 | /* Try to start a request. Return zero if the request was successfully |
184 | * started or if it will be started at a later time. Return non-zero otherwise. | 183 | * started or if it will be started at a later time. Return non-zero otherwise. |
185 | * Called while sclp_lock is locked. */ | 184 | * Called while sclp_lock is locked. */ |
186 | static int | 185 | static int |
187 | __sclp_start_request(struct sclp_req *req) | 186 | __sclp_start_request(struct sclp_req *req) |
188 | { | 187 | { |
189 | int rc; | 188 | int rc; |
190 | 189 | ||
191 | if (sclp_running_state != sclp_running_state_idle) | 190 | if (sclp_running_state != sclp_running_state_idle) |
192 | return 0; | 191 | return 0; |
193 | del_timer(&sclp_request_timer); | 192 | del_timer(&sclp_request_timer); |
194 | rc = sclp_service_call(req->command, req->sccb); | 193 | rc = sclp_service_call(req->command, req->sccb); |
195 | req->start_count++; | 194 | req->start_count++; |
196 | 195 | ||
197 | if (rc == 0) { | 196 | if (rc == 0) { |
198 | /* Successfully started request */ | 197 | /* Successfully started request */ |
199 | req->status = SCLP_REQ_RUNNING; | 198 | req->status = SCLP_REQ_RUNNING; |
200 | sclp_running_state = sclp_running_state_running; | 199 | sclp_running_state = sclp_running_state_running; |
201 | __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ, | 200 | __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ, |
202 | sclp_request_timeout, 1); | 201 | sclp_request_timeout, 1); |
203 | return 0; | 202 | return 0; |
204 | } else if (rc == -EBUSY) { | 203 | } else if (rc == -EBUSY) { |
205 | /* Try again later */ | 204 | /* Try again later */ |
206 | __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ, | 205 | __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ, |
207 | sclp_request_timeout, 0); | 206 | sclp_request_timeout, 0); |
208 | return 0; | 207 | return 0; |
209 | } | 208 | } |
210 | /* Request failed */ | 209 | /* Request failed */ |
211 | req->status = SCLP_REQ_FAILED; | 210 | req->status = SCLP_REQ_FAILED; |
212 | return rc; | 211 | return rc; |
213 | } | 212 | } |
214 | 213 | ||
215 | /* Try to start queued requests. */ | 214 | /* Try to start queued requests. */ |
216 | static void | 215 | static void |
217 | sclp_process_queue(void) | 216 | sclp_process_queue(void) |
218 | { | 217 | { |
219 | struct sclp_req *req; | 218 | struct sclp_req *req; |
220 | int rc; | 219 | int rc; |
221 | unsigned long flags; | 220 | unsigned long flags; |
222 | 221 | ||
223 | spin_lock_irqsave(&sclp_lock, flags); | 222 | spin_lock_irqsave(&sclp_lock, flags); |
224 | if (sclp_running_state != sclp_running_state_idle) { | 223 | if (sclp_running_state != sclp_running_state_idle) { |
225 | spin_unlock_irqrestore(&sclp_lock, flags); | 224 | spin_unlock_irqrestore(&sclp_lock, flags); |
226 | return; | 225 | return; |
227 | } | 226 | } |
228 | del_timer(&sclp_request_timer); | 227 | del_timer(&sclp_request_timer); |
229 | while (!list_empty(&sclp_req_queue)) { | 228 | while (!list_empty(&sclp_req_queue)) { |
230 | req = list_entry(sclp_req_queue.next, struct sclp_req, list); | 229 | req = list_entry(sclp_req_queue.next, struct sclp_req, list); |
231 | if (!req->sccb) | 230 | if (!req->sccb) |
232 | goto do_post; | 231 | goto do_post; |
233 | rc = __sclp_start_request(req); | 232 | rc = __sclp_start_request(req); |
234 | if (rc == 0) | 233 | if (rc == 0) |
235 | break; | 234 | break; |
236 | /* Request failed */ | 235 | /* Request failed */ |
237 | if (req->start_count > 1) { | 236 | if (req->start_count > 1) { |
238 | /* Cannot abort already submitted request - could still | 237 | /* Cannot abort already submitted request - could still |
239 | * be active at the SCLP */ | 238 | * be active at the SCLP */ |
240 | __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ, | 239 | __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ, |
241 | sclp_request_timeout, 0); | 240 | sclp_request_timeout, 0); |
242 | break; | 241 | break; |
243 | } | 242 | } |
244 | do_post: | 243 | do_post: |
245 | /* Post-processing for aborted request */ | 244 | /* Post-processing for aborted request */ |
246 | list_del(&req->list); | 245 | list_del(&req->list); |
247 | if (req->callback) { | 246 | if (req->callback) { |
248 | spin_unlock_irqrestore(&sclp_lock, flags); | 247 | spin_unlock_irqrestore(&sclp_lock, flags); |
249 | req->callback(req, req->callback_data); | 248 | req->callback(req, req->callback_data); |
250 | spin_lock_irqsave(&sclp_lock, flags); | 249 | spin_lock_irqsave(&sclp_lock, flags); |
251 | } | 250 | } |
252 | } | 251 | } |
253 | spin_unlock_irqrestore(&sclp_lock, flags); | 252 | spin_unlock_irqrestore(&sclp_lock, flags); |
254 | } | 253 | } |
255 | 254 | ||
256 | static int __sclp_can_add_request(struct sclp_req *req) | 255 | static int __sclp_can_add_request(struct sclp_req *req) |
257 | { | 256 | { |
258 | if (req == &sclp_suspend_req || req == &sclp_init_req) | 257 | if (req == &sclp_suspend_req || req == &sclp_init_req) |
259 | return 1; | 258 | return 1; |
260 | if (sclp_suspend_state != sclp_suspend_state_running) | 259 | if (sclp_suspend_state != sclp_suspend_state_running) |
261 | return 0; | 260 | return 0; |
262 | if (sclp_init_state != sclp_init_state_initialized) | 261 | if (sclp_init_state != sclp_init_state_initialized) |
263 | return 0; | 262 | return 0; |
264 | if (sclp_activation_state != sclp_activation_state_active) | 263 | if (sclp_activation_state != sclp_activation_state_active) |
265 | return 0; | 264 | return 0; |
266 | return 1; | 265 | return 1; |
267 | } | 266 | } |
268 | 267 | ||
269 | /* Queue a new request. Return zero on success, non-zero otherwise. */ | 268 | /* Queue a new request. Return zero on success, non-zero otherwise. */ |
270 | int | 269 | int |
271 | sclp_add_request(struct sclp_req *req) | 270 | sclp_add_request(struct sclp_req *req) |
272 | { | 271 | { |
273 | unsigned long flags; | 272 | unsigned long flags; |
274 | int rc; | 273 | int rc; |
275 | 274 | ||
276 | spin_lock_irqsave(&sclp_lock, flags); | 275 | spin_lock_irqsave(&sclp_lock, flags); |
277 | if (!__sclp_can_add_request(req)) { | 276 | if (!__sclp_can_add_request(req)) { |
278 | spin_unlock_irqrestore(&sclp_lock, flags); | 277 | spin_unlock_irqrestore(&sclp_lock, flags); |
279 | return -EIO; | 278 | return -EIO; |
280 | } | 279 | } |
281 | req->status = SCLP_REQ_QUEUED; | 280 | req->status = SCLP_REQ_QUEUED; |
282 | req->start_count = 0; | 281 | req->start_count = 0; |
283 | list_add_tail(&req->list, &sclp_req_queue); | 282 | list_add_tail(&req->list, &sclp_req_queue); |
284 | rc = 0; | 283 | rc = 0; |
285 | /* Start if request is first in list */ | 284 | /* Start if request is first in list */ |
286 | if (sclp_running_state == sclp_running_state_idle && | 285 | if (sclp_running_state == sclp_running_state_idle && |
287 | req->list.prev == &sclp_req_queue) { | 286 | req->list.prev == &sclp_req_queue) { |
288 | if (!req->sccb) { | 287 | if (!req->sccb) { |
289 | list_del(&req->list); | 288 | list_del(&req->list); |
290 | rc = -ENODATA; | 289 | rc = -ENODATA; |
291 | goto out; | 290 | goto out; |
292 | } | 291 | } |
293 | rc = __sclp_start_request(req); | 292 | rc = __sclp_start_request(req); |
294 | if (rc) | 293 | if (rc) |
295 | list_del(&req->list); | 294 | list_del(&req->list); |
296 | } | 295 | } |
297 | out: | 296 | out: |
298 | spin_unlock_irqrestore(&sclp_lock, flags); | 297 | spin_unlock_irqrestore(&sclp_lock, flags); |
299 | return rc; | 298 | return rc; |
300 | } | 299 | } |
301 | 300 | ||
302 | EXPORT_SYMBOL(sclp_add_request); | 301 | EXPORT_SYMBOL(sclp_add_request); |
303 | 302 | ||
304 | /* Dispatch events found in request buffer to registered listeners. Return 0 | 303 | /* Dispatch events found in request buffer to registered listeners. Return 0 |
305 | * if all events were dispatched, non-zero otherwise. */ | 304 | * if all events were dispatched, non-zero otherwise. */ |
306 | static int | 305 | static int |
307 | sclp_dispatch_evbufs(struct sccb_header *sccb) | 306 | sclp_dispatch_evbufs(struct sccb_header *sccb) |
308 | { | 307 | { |
309 | unsigned long flags; | 308 | unsigned long flags; |
310 | struct evbuf_header *evbuf; | 309 | struct evbuf_header *evbuf; |
311 | struct list_head *l; | 310 | struct list_head *l; |
312 | struct sclp_register *reg; | 311 | struct sclp_register *reg; |
313 | int offset; | 312 | int offset; |
314 | int rc; | 313 | int rc; |
315 | 314 | ||
316 | spin_lock_irqsave(&sclp_lock, flags); | 315 | spin_lock_irqsave(&sclp_lock, flags); |
317 | rc = 0; | 316 | rc = 0; |
318 | for (offset = sizeof(struct sccb_header); offset < sccb->length; | 317 | for (offset = sizeof(struct sccb_header); offset < sccb->length; |
319 | offset += evbuf->length) { | 318 | offset += evbuf->length) { |
320 | evbuf = (struct evbuf_header *) ((addr_t) sccb + offset); | 319 | evbuf = (struct evbuf_header *) ((addr_t) sccb + offset); |
321 | /* Check for malformed hardware response */ | 320 | /* Check for malformed hardware response */ |
322 | if (evbuf->length == 0) | 321 | if (evbuf->length == 0) |
323 | break; | 322 | break; |
324 | /* Search for event handler */ | 323 | /* Search for event handler */ |
325 | reg = NULL; | 324 | reg = NULL; |
326 | list_for_each(l, &sclp_reg_list) { | 325 | list_for_each(l, &sclp_reg_list) { |
327 | reg = list_entry(l, struct sclp_register, list); | 326 | reg = list_entry(l, struct sclp_register, list); |
328 | if (reg->receive_mask & (1 << (32 - evbuf->type))) | 327 | if (reg->receive_mask & (1 << (32 - evbuf->type))) |
329 | break; | 328 | break; |
330 | else | 329 | else |
331 | reg = NULL; | 330 | reg = NULL; |
332 | } | 331 | } |
333 | if (reg && reg->receiver_fn) { | 332 | if (reg && reg->receiver_fn) { |
334 | spin_unlock_irqrestore(&sclp_lock, flags); | 333 | spin_unlock_irqrestore(&sclp_lock, flags); |
335 | reg->receiver_fn(evbuf); | 334 | reg->receiver_fn(evbuf); |
336 | spin_lock_irqsave(&sclp_lock, flags); | 335 | spin_lock_irqsave(&sclp_lock, flags); |
337 | } else if (reg == NULL) | 336 | } else if (reg == NULL) |
338 | rc = -ENOSYS; | 337 | rc = -ENOSYS; |
339 | } | 338 | } |
340 | spin_unlock_irqrestore(&sclp_lock, flags); | 339 | spin_unlock_irqrestore(&sclp_lock, flags); |
341 | return rc; | 340 | return rc; |
342 | } | 341 | } |
343 | 342 | ||
344 | /* Read event data request callback. */ | 343 | /* Read event data request callback. */ |
345 | static void | 344 | static void |
346 | sclp_read_cb(struct sclp_req *req, void *data) | 345 | sclp_read_cb(struct sclp_req *req, void *data) |
347 | { | 346 | { |
348 | unsigned long flags; | 347 | unsigned long flags; |
349 | struct sccb_header *sccb; | 348 | struct sccb_header *sccb; |
350 | 349 | ||
351 | sccb = (struct sccb_header *) req->sccb; | 350 | sccb = (struct sccb_header *) req->sccb; |
352 | if (req->status == SCLP_REQ_DONE && (sccb->response_code == 0x20 || | 351 | if (req->status == SCLP_REQ_DONE && (sccb->response_code == 0x20 || |
353 | sccb->response_code == 0x220)) | 352 | sccb->response_code == 0x220)) |
354 | sclp_dispatch_evbufs(sccb); | 353 | sclp_dispatch_evbufs(sccb); |
355 | spin_lock_irqsave(&sclp_lock, flags); | 354 | spin_lock_irqsave(&sclp_lock, flags); |
356 | sclp_reading_state = sclp_reading_state_idle; | 355 | sclp_reading_state = sclp_reading_state_idle; |
357 | spin_unlock_irqrestore(&sclp_lock, flags); | 356 | spin_unlock_irqrestore(&sclp_lock, flags); |
358 | } | 357 | } |
359 | 358 | ||
360 | /* Prepare read event data request. Called while sclp_lock is locked. */ | 359 | /* Prepare read event data request. Called while sclp_lock is locked. */ |
361 | static void __sclp_make_read_req(void) | 360 | static void __sclp_make_read_req(void) |
362 | { | 361 | { |
363 | struct sccb_header *sccb; | 362 | struct sccb_header *sccb; |
364 | 363 | ||
365 | sccb = (struct sccb_header *) sclp_read_sccb; | 364 | sccb = (struct sccb_header *) sclp_read_sccb; |
366 | clear_page(sccb); | 365 | clear_page(sccb); |
367 | memset(&sclp_read_req, 0, sizeof(struct sclp_req)); | 366 | memset(&sclp_read_req, 0, sizeof(struct sclp_req)); |
368 | sclp_read_req.command = SCLP_CMDW_READ_EVENT_DATA; | 367 | sclp_read_req.command = SCLP_CMDW_READ_EVENT_DATA; |
369 | sclp_read_req.status = SCLP_REQ_QUEUED; | 368 | sclp_read_req.status = SCLP_REQ_QUEUED; |
370 | sclp_read_req.start_count = 0; | 369 | sclp_read_req.start_count = 0; |
371 | sclp_read_req.callback = sclp_read_cb; | 370 | sclp_read_req.callback = sclp_read_cb; |
372 | sclp_read_req.sccb = sccb; | 371 | sclp_read_req.sccb = sccb; |
373 | sccb->length = PAGE_SIZE; | 372 | sccb->length = PAGE_SIZE; |
374 | sccb->function_code = 0; | 373 | sccb->function_code = 0; |
375 | sccb->control_mask[2] = 0x80; | 374 | sccb->control_mask[2] = 0x80; |
376 | } | 375 | } |
377 | 376 | ||
378 | /* Search request list for request with matching sccb. Return request if found, | 377 | /* Search request list for request with matching sccb. Return request if found, |
379 | * NULL otherwise. Called while sclp_lock is locked. */ | 378 | * NULL otherwise. Called while sclp_lock is locked. */ |
380 | static inline struct sclp_req * | 379 | static inline struct sclp_req * |
381 | __sclp_find_req(u32 sccb) | 380 | __sclp_find_req(u32 sccb) |
382 | { | 381 | { |
383 | struct list_head *l; | 382 | struct list_head *l; |
384 | struct sclp_req *req; | 383 | struct sclp_req *req; |
385 | 384 | ||
386 | list_for_each(l, &sclp_req_queue) { | 385 | list_for_each(l, &sclp_req_queue) { |
387 | req = list_entry(l, struct sclp_req, list); | 386 | req = list_entry(l, struct sclp_req, list); |
388 | if (sccb == (u32) (addr_t) req->sccb) | 387 | if (sccb == (u32) (addr_t) req->sccb) |
389 | return req; | 388 | return req; |
390 | } | 389 | } |
391 | return NULL; | 390 | return NULL; |
392 | } | 391 | } |
393 | 392 | ||
394 | /* Handler for external interruption. Perform request post-processing. | 393 | /* Handler for external interruption. Perform request post-processing. |
395 | * Prepare read event data request if necessary. Start processing of next | 394 | * Prepare read event data request if necessary. Start processing of next |
396 | * request on queue. */ | 395 | * request on queue. */ |
397 | static void sclp_interrupt_handler(unsigned int ext_int_code, | 396 | static void sclp_interrupt_handler(unsigned int ext_int_code, |
398 | unsigned int param32, unsigned long param64) | 397 | unsigned int param32, unsigned long param64) |
399 | { | 398 | { |
400 | struct sclp_req *req; | 399 | struct sclp_req *req; |
401 | u32 finished_sccb; | 400 | u32 finished_sccb; |
402 | u32 evbuf_pending; | 401 | u32 evbuf_pending; |
403 | 402 | ||
404 | kstat_cpu(smp_processor_id()).irqs[EXTINT_SCP]++; | 403 | kstat_cpu(smp_processor_id()).irqs[EXTINT_SCP]++; |
405 | spin_lock(&sclp_lock); | 404 | spin_lock(&sclp_lock); |
406 | finished_sccb = param32 & 0xfffffff8; | 405 | finished_sccb = param32 & 0xfffffff8; |
407 | evbuf_pending = param32 & 0x3; | 406 | evbuf_pending = param32 & 0x3; |
408 | if (finished_sccb) { | 407 | if (finished_sccb) { |
409 | del_timer(&sclp_request_timer); | 408 | del_timer(&sclp_request_timer); |
410 | sclp_running_state = sclp_running_state_reset_pending; | 409 | sclp_running_state = sclp_running_state_reset_pending; |
411 | req = __sclp_find_req(finished_sccb); | 410 | req = __sclp_find_req(finished_sccb); |
412 | if (req) { | 411 | if (req) { |
413 | /* Request post-processing */ | 412 | /* Request post-processing */ |
414 | list_del(&req->list); | 413 | list_del(&req->list); |
415 | req->status = SCLP_REQ_DONE; | 414 | req->status = SCLP_REQ_DONE; |
416 | if (req->callback) { | 415 | if (req->callback) { |
417 | spin_unlock(&sclp_lock); | 416 | spin_unlock(&sclp_lock); |
418 | req->callback(req, req->callback_data); | 417 | req->callback(req, req->callback_data); |
419 | spin_lock(&sclp_lock); | 418 | spin_lock(&sclp_lock); |
420 | } | 419 | } |
421 | } | 420 | } |
422 | sclp_running_state = sclp_running_state_idle; | 421 | sclp_running_state = sclp_running_state_idle; |
423 | } | 422 | } |
424 | if (evbuf_pending && | 423 | if (evbuf_pending && |
425 | sclp_activation_state == sclp_activation_state_active) | 424 | sclp_activation_state == sclp_activation_state_active) |
426 | __sclp_queue_read_req(); | 425 | __sclp_queue_read_req(); |
427 | spin_unlock(&sclp_lock); | 426 | spin_unlock(&sclp_lock); |
428 | sclp_process_queue(); | 427 | sclp_process_queue(); |
429 | } | 428 | } |
430 | 429 | ||
431 | /* Convert interval in jiffies to TOD ticks. */ | 430 | /* Convert interval in jiffies to TOD ticks. */ |
432 | static inline u64 | 431 | static inline u64 |
433 | sclp_tod_from_jiffies(unsigned long jiffies) | 432 | sclp_tod_from_jiffies(unsigned long jiffies) |
434 | { | 433 | { |
435 | return (u64) (jiffies / HZ) << 32; | 434 | return (u64) (jiffies / HZ) << 32; |
436 | } | 435 | } |
437 | 436 | ||
438 | /* Wait until a currently running request finished. Note: while this function | 437 | /* Wait until a currently running request finished. Note: while this function |
439 | * is running, no timers are served on the calling CPU. */ | 438 | * is running, no timers are served on the calling CPU. */ |
440 | void | 439 | void |
441 | sclp_sync_wait(void) | 440 | sclp_sync_wait(void) |
442 | { | 441 | { |
443 | unsigned long long old_tick; | 442 | unsigned long long old_tick; |
444 | unsigned long flags; | 443 | unsigned long flags; |
445 | unsigned long cr0, cr0_sync; | 444 | unsigned long cr0, cr0_sync; |
446 | u64 timeout; | 445 | u64 timeout; |
447 | int irq_context; | 446 | int irq_context; |
448 | 447 | ||
449 | /* We'll be disabling timer interrupts, so we need a custom timeout | 448 | /* We'll be disabling timer interrupts, so we need a custom timeout |
450 | * mechanism */ | 449 | * mechanism */ |
451 | timeout = 0; | 450 | timeout = 0; |
452 | if (timer_pending(&sclp_request_timer)) { | 451 | if (timer_pending(&sclp_request_timer)) { |
453 | /* Get timeout TOD value */ | 452 | /* Get timeout TOD value */ |
454 | timeout = get_clock() + | 453 | timeout = get_clock() + |
455 | sclp_tod_from_jiffies(sclp_request_timer.expires - | 454 | sclp_tod_from_jiffies(sclp_request_timer.expires - |
456 | jiffies); | 455 | jiffies); |
457 | } | 456 | } |
458 | local_irq_save(flags); | 457 | local_irq_save(flags); |
459 | /* Prevent bottom half from executing once we force interrupts open */ | 458 | /* Prevent bottom half from executing once we force interrupts open */ |
460 | irq_context = in_interrupt(); | 459 | irq_context = in_interrupt(); |
461 | if (!irq_context) | 460 | if (!irq_context) |
462 | local_bh_disable(); | 461 | local_bh_disable(); |
463 | /* Enable service-signal interruption, disable timer interrupts */ | 462 | /* Enable service-signal interruption, disable timer interrupts */ |
464 | old_tick = local_tick_disable(); | 463 | old_tick = local_tick_disable(); |
465 | trace_hardirqs_on(); | 464 | trace_hardirqs_on(); |
466 | __ctl_store(cr0, 0, 0); | 465 | __ctl_store(cr0, 0, 0); |
467 | cr0_sync = cr0; | 466 | cr0_sync = cr0; |
468 | cr0_sync &= 0xffff00a0; | 467 | cr0_sync &= 0xffff00a0; |
469 | cr0_sync |= 0x00000200; | 468 | cr0_sync |= 0x00000200; |
470 | __ctl_load(cr0_sync, 0, 0); | 469 | __ctl_load(cr0_sync, 0, 0); |
471 | __arch_local_irq_stosm(0x01); | 470 | __arch_local_irq_stosm(0x01); |
472 | /* Loop until driver state indicates finished request */ | 471 | /* Loop until driver state indicates finished request */ |
473 | while (sclp_running_state != sclp_running_state_idle) { | 472 | while (sclp_running_state != sclp_running_state_idle) { |
474 | /* Check for expired request timer */ | 473 | /* Check for expired request timer */ |
475 | if (timer_pending(&sclp_request_timer) && | 474 | if (timer_pending(&sclp_request_timer) && |
476 | get_clock() > timeout && | 475 | get_clock() > timeout && |
477 | del_timer(&sclp_request_timer)) | 476 | del_timer(&sclp_request_timer)) |
478 | sclp_request_timer.function(sclp_request_timer.data); | 477 | sclp_request_timer.function(sclp_request_timer.data); |
479 | cpu_relax(); | 478 | cpu_relax(); |
480 | } | 479 | } |
481 | local_irq_disable(); | 480 | local_irq_disable(); |
482 | __ctl_load(cr0, 0, 0); | 481 | __ctl_load(cr0, 0, 0); |
483 | if (!irq_context) | 482 | if (!irq_context) |
484 | _local_bh_enable(); | 483 | _local_bh_enable(); |
485 | local_tick_enable(old_tick); | 484 | local_tick_enable(old_tick); |
486 | local_irq_restore(flags); | 485 | local_irq_restore(flags); |
487 | } | 486 | } |
488 | EXPORT_SYMBOL(sclp_sync_wait); | 487 | EXPORT_SYMBOL(sclp_sync_wait); |
489 | 488 | ||
490 | /* Dispatch changes in send and receive mask to registered listeners. */ | 489 | /* Dispatch changes in send and receive mask to registered listeners. */ |
491 | static void | 490 | static void |
492 | sclp_dispatch_state_change(void) | 491 | sclp_dispatch_state_change(void) |
493 | { | 492 | { |
494 | struct list_head *l; | 493 | struct list_head *l; |
495 | struct sclp_register *reg; | 494 | struct sclp_register *reg; |
496 | unsigned long flags; | 495 | unsigned long flags; |
497 | sccb_mask_t receive_mask; | 496 | sccb_mask_t receive_mask; |
498 | sccb_mask_t send_mask; | 497 | sccb_mask_t send_mask; |
499 | 498 | ||
500 | do { | 499 | do { |
501 | spin_lock_irqsave(&sclp_lock, flags); | 500 | spin_lock_irqsave(&sclp_lock, flags); |
502 | reg = NULL; | 501 | reg = NULL; |
503 | list_for_each(l, &sclp_reg_list) { | 502 | list_for_each(l, &sclp_reg_list) { |
504 | reg = list_entry(l, struct sclp_register, list); | 503 | reg = list_entry(l, struct sclp_register, list); |
505 | receive_mask = reg->send_mask & sclp_receive_mask; | 504 | receive_mask = reg->send_mask & sclp_receive_mask; |
506 | send_mask = reg->receive_mask & sclp_send_mask; | 505 | send_mask = reg->receive_mask & sclp_send_mask; |
507 | if (reg->sclp_receive_mask != receive_mask || | 506 | if (reg->sclp_receive_mask != receive_mask || |
508 | reg->sclp_send_mask != send_mask) { | 507 | reg->sclp_send_mask != send_mask) { |
509 | reg->sclp_receive_mask = receive_mask; | 508 | reg->sclp_receive_mask = receive_mask; |
510 | reg->sclp_send_mask = send_mask; | 509 | reg->sclp_send_mask = send_mask; |
511 | break; | 510 | break; |
512 | } else | 511 | } else |
513 | reg = NULL; | 512 | reg = NULL; |
514 | } | 513 | } |
515 | spin_unlock_irqrestore(&sclp_lock, flags); | 514 | spin_unlock_irqrestore(&sclp_lock, flags); |
516 | if (reg && reg->state_change_fn) | 515 | if (reg && reg->state_change_fn) |
517 | reg->state_change_fn(reg); | 516 | reg->state_change_fn(reg); |
518 | } while (reg); | 517 | } while (reg); |
519 | } | 518 | } |
520 | 519 | ||
521 | struct sclp_statechangebuf { | 520 | struct sclp_statechangebuf { |
522 | struct evbuf_header header; | 521 | struct evbuf_header header; |
523 | u8 validity_sclp_active_facility_mask : 1; | 522 | u8 validity_sclp_active_facility_mask : 1; |
524 | u8 validity_sclp_receive_mask : 1; | 523 | u8 validity_sclp_receive_mask : 1; |
525 | u8 validity_sclp_send_mask : 1; | 524 | u8 validity_sclp_send_mask : 1; |
526 | u8 validity_read_data_function_mask : 1; | 525 | u8 validity_read_data_function_mask : 1; |
527 | u16 _zeros : 12; | 526 | u16 _zeros : 12; |
528 | u16 mask_length; | 527 | u16 mask_length; |
529 | u64 sclp_active_facility_mask; | 528 | u64 sclp_active_facility_mask; |
530 | sccb_mask_t sclp_receive_mask; | 529 | sccb_mask_t sclp_receive_mask; |
531 | sccb_mask_t sclp_send_mask; | 530 | sccb_mask_t sclp_send_mask; |
532 | u32 read_data_function_mask; | 531 | u32 read_data_function_mask; |
533 | } __attribute__((packed)); | 532 | } __attribute__((packed)); |
534 | 533 | ||
535 | 534 | ||
536 | /* State change event callback. Inform listeners of changes. */ | 535 | /* State change event callback. Inform listeners of changes. */ |
537 | static void | 536 | static void |
538 | sclp_state_change_cb(struct evbuf_header *evbuf) | 537 | sclp_state_change_cb(struct evbuf_header *evbuf) |
539 | { | 538 | { |
540 | unsigned long flags; | 539 | unsigned long flags; |
541 | struct sclp_statechangebuf *scbuf; | 540 | struct sclp_statechangebuf *scbuf; |
542 | 541 | ||
543 | scbuf = (struct sclp_statechangebuf *) evbuf; | 542 | scbuf = (struct sclp_statechangebuf *) evbuf; |
544 | if (scbuf->mask_length != sizeof(sccb_mask_t)) | 543 | if (scbuf->mask_length != sizeof(sccb_mask_t)) |
545 | return; | 544 | return; |
546 | spin_lock_irqsave(&sclp_lock, flags); | 545 | spin_lock_irqsave(&sclp_lock, flags); |
547 | if (scbuf->validity_sclp_receive_mask) | 546 | if (scbuf->validity_sclp_receive_mask) |
548 | sclp_receive_mask = scbuf->sclp_receive_mask; | 547 | sclp_receive_mask = scbuf->sclp_receive_mask; |
549 | if (scbuf->validity_sclp_send_mask) | 548 | if (scbuf->validity_sclp_send_mask) |
550 | sclp_send_mask = scbuf->sclp_send_mask; | 549 | sclp_send_mask = scbuf->sclp_send_mask; |
551 | spin_unlock_irqrestore(&sclp_lock, flags); | 550 | spin_unlock_irqrestore(&sclp_lock, flags); |
552 | if (scbuf->validity_sclp_active_facility_mask) | 551 | if (scbuf->validity_sclp_active_facility_mask) |
553 | sclp_facilities = scbuf->sclp_active_facility_mask; | 552 | sclp_facilities = scbuf->sclp_active_facility_mask; |
554 | sclp_dispatch_state_change(); | 553 | sclp_dispatch_state_change(); |
555 | } | 554 | } |
556 | 555 | ||
557 | static struct sclp_register sclp_state_change_event = { | 556 | static struct sclp_register sclp_state_change_event = { |
558 | .receive_mask = EVTYP_STATECHANGE_MASK, | 557 | .receive_mask = EVTYP_STATECHANGE_MASK, |
559 | .receiver_fn = sclp_state_change_cb | 558 | .receiver_fn = sclp_state_change_cb |
560 | }; | 559 | }; |
561 | 560 | ||
562 | /* Calculate receive and send mask of currently registered listeners. | 561 | /* Calculate receive and send mask of currently registered listeners. |
563 | * Called while sclp_lock is locked. */ | 562 | * Called while sclp_lock is locked. */ |
564 | static inline void | 563 | static inline void |
565 | __sclp_get_mask(sccb_mask_t *receive_mask, sccb_mask_t *send_mask) | 564 | __sclp_get_mask(sccb_mask_t *receive_mask, sccb_mask_t *send_mask) |
566 | { | 565 | { |
567 | struct list_head *l; | 566 | struct list_head *l; |
568 | struct sclp_register *t; | 567 | struct sclp_register *t; |
569 | 568 | ||
570 | *receive_mask = 0; | 569 | *receive_mask = 0; |
571 | *send_mask = 0; | 570 | *send_mask = 0; |
572 | list_for_each(l, &sclp_reg_list) { | 571 | list_for_each(l, &sclp_reg_list) { |
573 | t = list_entry(l, struct sclp_register, list); | 572 | t = list_entry(l, struct sclp_register, list); |
574 | *receive_mask |= t->receive_mask; | 573 | *receive_mask |= t->receive_mask; |
575 | *send_mask |= t->send_mask; | 574 | *send_mask |= t->send_mask; |
576 | } | 575 | } |
577 | } | 576 | } |
578 | 577 | ||
579 | /* Register event listener. Return 0 on success, non-zero otherwise. */ | 578 | /* Register event listener. Return 0 on success, non-zero otherwise. */ |
580 | int | 579 | int |
581 | sclp_register(struct sclp_register *reg) | 580 | sclp_register(struct sclp_register *reg) |
582 | { | 581 | { |
583 | unsigned long flags; | 582 | unsigned long flags; |
584 | sccb_mask_t receive_mask; | 583 | sccb_mask_t receive_mask; |
585 | sccb_mask_t send_mask; | 584 | sccb_mask_t send_mask; |
586 | int rc; | 585 | int rc; |
587 | 586 | ||
588 | rc = sclp_init(); | 587 | rc = sclp_init(); |
589 | if (rc) | 588 | if (rc) |
590 | return rc; | 589 | return rc; |
591 | spin_lock_irqsave(&sclp_lock, flags); | 590 | spin_lock_irqsave(&sclp_lock, flags); |
592 | /* Check event mask for collisions */ | 591 | /* Check event mask for collisions */ |
593 | __sclp_get_mask(&receive_mask, &send_mask); | 592 | __sclp_get_mask(&receive_mask, &send_mask); |
594 | if (reg->receive_mask & receive_mask || reg->send_mask & send_mask) { | 593 | if (reg->receive_mask & receive_mask || reg->send_mask & send_mask) { |
595 | spin_unlock_irqrestore(&sclp_lock, flags); | 594 | spin_unlock_irqrestore(&sclp_lock, flags); |
596 | return -EBUSY; | 595 | return -EBUSY; |
597 | } | 596 | } |
598 | /* Trigger initial state change callback */ | 597 | /* Trigger initial state change callback */ |
599 | reg->sclp_receive_mask = 0; | 598 | reg->sclp_receive_mask = 0; |
600 | reg->sclp_send_mask = 0; | 599 | reg->sclp_send_mask = 0; |
601 | reg->pm_event_posted = 0; | 600 | reg->pm_event_posted = 0; |
602 | list_add(®->list, &sclp_reg_list); | 601 | list_add(®->list, &sclp_reg_list); |
603 | spin_unlock_irqrestore(&sclp_lock, flags); | 602 | spin_unlock_irqrestore(&sclp_lock, flags); |
604 | rc = sclp_init_mask(1); | 603 | rc = sclp_init_mask(1); |
605 | if (rc) { | 604 | if (rc) { |
606 | spin_lock_irqsave(&sclp_lock, flags); | 605 | spin_lock_irqsave(&sclp_lock, flags); |
607 | list_del(®->list); | 606 | list_del(®->list); |
608 | spin_unlock_irqrestore(&sclp_lock, flags); | 607 | spin_unlock_irqrestore(&sclp_lock, flags); |
609 | } | 608 | } |
610 | return rc; | 609 | return rc; |
611 | } | 610 | } |
612 | 611 | ||
613 | EXPORT_SYMBOL(sclp_register); | 612 | EXPORT_SYMBOL(sclp_register); |
614 | 613 | ||
615 | /* Unregister event listener. */ | 614 | /* Unregister event listener. */ |
616 | void | 615 | void |
617 | sclp_unregister(struct sclp_register *reg) | 616 | sclp_unregister(struct sclp_register *reg) |
618 | { | 617 | { |
619 | unsigned long flags; | 618 | unsigned long flags; |
620 | 619 | ||
621 | spin_lock_irqsave(&sclp_lock, flags); | 620 | spin_lock_irqsave(&sclp_lock, flags); |
622 | list_del(®->list); | 621 | list_del(®->list); |
623 | spin_unlock_irqrestore(&sclp_lock, flags); | 622 | spin_unlock_irqrestore(&sclp_lock, flags); |
624 | sclp_init_mask(1); | 623 | sclp_init_mask(1); |
625 | } | 624 | } |
626 | 625 | ||
627 | EXPORT_SYMBOL(sclp_unregister); | 626 | EXPORT_SYMBOL(sclp_unregister); |
628 | 627 | ||
629 | /* Remove event buffers which are marked processed. Return the number of | 628 | /* Remove event buffers which are marked processed. Return the number of |
630 | * remaining event buffers. */ | 629 | * remaining event buffers. */ |
631 | int | 630 | int |
632 | sclp_remove_processed(struct sccb_header *sccb) | 631 | sclp_remove_processed(struct sccb_header *sccb) |
633 | { | 632 | { |
634 | struct evbuf_header *evbuf; | 633 | struct evbuf_header *evbuf; |
635 | int unprocessed; | 634 | int unprocessed; |
636 | u16 remaining; | 635 | u16 remaining; |
637 | 636 | ||
638 | evbuf = (struct evbuf_header *) (sccb + 1); | 637 | evbuf = (struct evbuf_header *) (sccb + 1); |
639 | unprocessed = 0; | 638 | unprocessed = 0; |
640 | remaining = sccb->length - sizeof(struct sccb_header); | 639 | remaining = sccb->length - sizeof(struct sccb_header); |
641 | while (remaining > 0) { | 640 | while (remaining > 0) { |
642 | remaining -= evbuf->length; | 641 | remaining -= evbuf->length; |
643 | if (evbuf->flags & 0x80) { | 642 | if (evbuf->flags & 0x80) { |
644 | sccb->length -= evbuf->length; | 643 | sccb->length -= evbuf->length; |
645 | memcpy(evbuf, (void *) ((addr_t) evbuf + evbuf->length), | 644 | memcpy(evbuf, (void *) ((addr_t) evbuf + evbuf->length), |
646 | remaining); | 645 | remaining); |
647 | } else { | 646 | } else { |
648 | unprocessed++; | 647 | unprocessed++; |
649 | evbuf = (struct evbuf_header *) | 648 | evbuf = (struct evbuf_header *) |
650 | ((addr_t) evbuf + evbuf->length); | 649 | ((addr_t) evbuf + evbuf->length); |
651 | } | 650 | } |
652 | } | 651 | } |
653 | return unprocessed; | 652 | return unprocessed; |
654 | } | 653 | } |
655 | 654 | ||
656 | EXPORT_SYMBOL(sclp_remove_processed); | 655 | EXPORT_SYMBOL(sclp_remove_processed); |
657 | 656 | ||
658 | struct init_sccb { | 657 | struct init_sccb { |
659 | struct sccb_header header; | 658 | struct sccb_header header; |
660 | u16 _reserved; | 659 | u16 _reserved; |
661 | u16 mask_length; | 660 | u16 mask_length; |
662 | sccb_mask_t receive_mask; | 661 | sccb_mask_t receive_mask; |
663 | sccb_mask_t send_mask; | 662 | sccb_mask_t send_mask; |
664 | sccb_mask_t sclp_receive_mask; | 663 | sccb_mask_t sclp_receive_mask; |
665 | sccb_mask_t sclp_send_mask; | 664 | sccb_mask_t sclp_send_mask; |
666 | } __attribute__((packed)); | 665 | } __attribute__((packed)); |
667 | 666 | ||
668 | /* Prepare init mask request. Called while sclp_lock is locked. */ | 667 | /* Prepare init mask request. Called while sclp_lock is locked. */ |
669 | static inline void | 668 | static inline void |
670 | __sclp_make_init_req(u32 receive_mask, u32 send_mask) | 669 | __sclp_make_init_req(u32 receive_mask, u32 send_mask) |
671 | { | 670 | { |
672 | struct init_sccb *sccb; | 671 | struct init_sccb *sccb; |
673 | 672 | ||
674 | sccb = (struct init_sccb *) sclp_init_sccb; | 673 | sccb = (struct init_sccb *) sclp_init_sccb; |
675 | clear_page(sccb); | 674 | clear_page(sccb); |
676 | memset(&sclp_init_req, 0, sizeof(struct sclp_req)); | 675 | memset(&sclp_init_req, 0, sizeof(struct sclp_req)); |
677 | sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK; | 676 | sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK; |
678 | sclp_init_req.status = SCLP_REQ_FILLED; | 677 | sclp_init_req.status = SCLP_REQ_FILLED; |
679 | sclp_init_req.start_count = 0; | 678 | sclp_init_req.start_count = 0; |
680 | sclp_init_req.callback = NULL; | 679 | sclp_init_req.callback = NULL; |
681 | sclp_init_req.callback_data = NULL; | 680 | sclp_init_req.callback_data = NULL; |
682 | sclp_init_req.sccb = sccb; | 681 | sclp_init_req.sccb = sccb; |
683 | sccb->header.length = sizeof(struct init_sccb); | 682 | sccb->header.length = sizeof(struct init_sccb); |
684 | sccb->mask_length = sizeof(sccb_mask_t); | 683 | sccb->mask_length = sizeof(sccb_mask_t); |
685 | sccb->receive_mask = receive_mask; | 684 | sccb->receive_mask = receive_mask; |
686 | sccb->send_mask = send_mask; | 685 | sccb->send_mask = send_mask; |
687 | sccb->sclp_receive_mask = 0; | 686 | sccb->sclp_receive_mask = 0; |
688 | sccb->sclp_send_mask = 0; | 687 | sccb->sclp_send_mask = 0; |
689 | } | 688 | } |
690 | 689 | ||
691 | /* Start init mask request. If calculate is non-zero, calculate the mask as | 690 | /* Start init mask request. If calculate is non-zero, calculate the mask as |
692 | * requested by registered listeners. Use zero mask otherwise. Return 0 on | 691 | * requested by registered listeners. Use zero mask otherwise. Return 0 on |
693 | * success, non-zero otherwise. */ | 692 | * success, non-zero otherwise. */ |
694 | static int | 693 | static int |
695 | sclp_init_mask(int calculate) | 694 | sclp_init_mask(int calculate) |
696 | { | 695 | { |
697 | unsigned long flags; | 696 | unsigned long flags; |
698 | struct init_sccb *sccb = (struct init_sccb *) sclp_init_sccb; | 697 | struct init_sccb *sccb = (struct init_sccb *) sclp_init_sccb; |
699 | sccb_mask_t receive_mask; | 698 | sccb_mask_t receive_mask; |
700 | sccb_mask_t send_mask; | 699 | sccb_mask_t send_mask; |
701 | int retry; | 700 | int retry; |
702 | int rc; | 701 | int rc; |
703 | unsigned long wait; | 702 | unsigned long wait; |
704 | 703 | ||
705 | spin_lock_irqsave(&sclp_lock, flags); | 704 | spin_lock_irqsave(&sclp_lock, flags); |
706 | /* Check if interface is in appropriate state */ | 705 | /* Check if interface is in appropriate state */ |
707 | if (sclp_mask_state != sclp_mask_state_idle) { | 706 | if (sclp_mask_state != sclp_mask_state_idle) { |
708 | spin_unlock_irqrestore(&sclp_lock, flags); | 707 | spin_unlock_irqrestore(&sclp_lock, flags); |
709 | return -EBUSY; | 708 | return -EBUSY; |
710 | } | 709 | } |
711 | if (sclp_activation_state == sclp_activation_state_inactive) { | 710 | if (sclp_activation_state == sclp_activation_state_inactive) { |
712 | spin_unlock_irqrestore(&sclp_lock, flags); | 711 | spin_unlock_irqrestore(&sclp_lock, flags); |
713 | return -EINVAL; | 712 | return -EINVAL; |
714 | } | 713 | } |
715 | sclp_mask_state = sclp_mask_state_initializing; | 714 | sclp_mask_state = sclp_mask_state_initializing; |
716 | /* Determine mask */ | 715 | /* Determine mask */ |
717 | if (calculate) | 716 | if (calculate) |
718 | __sclp_get_mask(&receive_mask, &send_mask); | 717 | __sclp_get_mask(&receive_mask, &send_mask); |
719 | else { | 718 | else { |
720 | receive_mask = 0; | 719 | receive_mask = 0; |
721 | send_mask = 0; | 720 | send_mask = 0; |
722 | } | 721 | } |
723 | rc = -EIO; | 722 | rc = -EIO; |
724 | for (retry = 0; retry <= SCLP_MASK_RETRY; retry++) { | 723 | for (retry = 0; retry <= SCLP_MASK_RETRY; retry++) { |
725 | /* Prepare request */ | 724 | /* Prepare request */ |
726 | __sclp_make_init_req(receive_mask, send_mask); | 725 | __sclp_make_init_req(receive_mask, send_mask); |
727 | spin_unlock_irqrestore(&sclp_lock, flags); | 726 | spin_unlock_irqrestore(&sclp_lock, flags); |
728 | if (sclp_add_request(&sclp_init_req)) { | 727 | if (sclp_add_request(&sclp_init_req)) { |
729 | /* Try again later */ | 728 | /* Try again later */ |
730 | wait = jiffies + SCLP_BUSY_INTERVAL * HZ; | 729 | wait = jiffies + SCLP_BUSY_INTERVAL * HZ; |
731 | while (time_before(jiffies, wait)) | 730 | while (time_before(jiffies, wait)) |
732 | sclp_sync_wait(); | 731 | sclp_sync_wait(); |
733 | spin_lock_irqsave(&sclp_lock, flags); | 732 | spin_lock_irqsave(&sclp_lock, flags); |
734 | continue; | 733 | continue; |
735 | } | 734 | } |
736 | while (sclp_init_req.status != SCLP_REQ_DONE && | 735 | while (sclp_init_req.status != SCLP_REQ_DONE && |
737 | sclp_init_req.status != SCLP_REQ_FAILED) | 736 | sclp_init_req.status != SCLP_REQ_FAILED) |
738 | sclp_sync_wait(); | 737 | sclp_sync_wait(); |
739 | spin_lock_irqsave(&sclp_lock, flags); | 738 | spin_lock_irqsave(&sclp_lock, flags); |
740 | if (sclp_init_req.status == SCLP_REQ_DONE && | 739 | if (sclp_init_req.status == SCLP_REQ_DONE && |
741 | sccb->header.response_code == 0x20) { | 740 | sccb->header.response_code == 0x20) { |
742 | /* Successful request */ | 741 | /* Successful request */ |
743 | if (calculate) { | 742 | if (calculate) { |
744 | sclp_receive_mask = sccb->sclp_receive_mask; | 743 | sclp_receive_mask = sccb->sclp_receive_mask; |
745 | sclp_send_mask = sccb->sclp_send_mask; | 744 | sclp_send_mask = sccb->sclp_send_mask; |
746 | } else { | 745 | } else { |
747 | sclp_receive_mask = 0; | 746 | sclp_receive_mask = 0; |
748 | sclp_send_mask = 0; | 747 | sclp_send_mask = 0; |
749 | } | 748 | } |
750 | spin_unlock_irqrestore(&sclp_lock, flags); | 749 | spin_unlock_irqrestore(&sclp_lock, flags); |
751 | sclp_dispatch_state_change(); | 750 | sclp_dispatch_state_change(); |
752 | spin_lock_irqsave(&sclp_lock, flags); | 751 | spin_lock_irqsave(&sclp_lock, flags); |
753 | rc = 0; | 752 | rc = 0; |
754 | break; | 753 | break; |
755 | } | 754 | } |
756 | } | 755 | } |
757 | sclp_mask_state = sclp_mask_state_idle; | 756 | sclp_mask_state = sclp_mask_state_idle; |
758 | spin_unlock_irqrestore(&sclp_lock, flags); | 757 | spin_unlock_irqrestore(&sclp_lock, flags); |
759 | return rc; | 758 | return rc; |
760 | } | 759 | } |
761 | 760 | ||
762 | /* Deactivate SCLP interface. On success, new requests will be rejected, | 761 | /* Deactivate SCLP interface. On success, new requests will be rejected, |
763 | * events will no longer be dispatched. Return 0 on success, non-zero | 762 | * events will no longer be dispatched. Return 0 on success, non-zero |
764 | * otherwise. */ | 763 | * otherwise. */ |
765 | int | 764 | int |
766 | sclp_deactivate(void) | 765 | sclp_deactivate(void) |
767 | { | 766 | { |
768 | unsigned long flags; | 767 | unsigned long flags; |
769 | int rc; | 768 | int rc; |
770 | 769 | ||
771 | spin_lock_irqsave(&sclp_lock, flags); | 770 | spin_lock_irqsave(&sclp_lock, flags); |
772 | /* Deactivate can only be called when active */ | 771 | /* Deactivate can only be called when active */ |
773 | if (sclp_activation_state != sclp_activation_state_active) { | 772 | if (sclp_activation_state != sclp_activation_state_active) { |
774 | spin_unlock_irqrestore(&sclp_lock, flags); | 773 | spin_unlock_irqrestore(&sclp_lock, flags); |
775 | return -EINVAL; | 774 | return -EINVAL; |
776 | } | 775 | } |
777 | sclp_activation_state = sclp_activation_state_deactivating; | 776 | sclp_activation_state = sclp_activation_state_deactivating; |
778 | spin_unlock_irqrestore(&sclp_lock, flags); | 777 | spin_unlock_irqrestore(&sclp_lock, flags); |
779 | rc = sclp_init_mask(0); | 778 | rc = sclp_init_mask(0); |
780 | spin_lock_irqsave(&sclp_lock, flags); | 779 | spin_lock_irqsave(&sclp_lock, flags); |
781 | if (rc == 0) | 780 | if (rc == 0) |
782 | sclp_activation_state = sclp_activation_state_inactive; | 781 | sclp_activation_state = sclp_activation_state_inactive; |
783 | else | 782 | else |
784 | sclp_activation_state = sclp_activation_state_active; | 783 | sclp_activation_state = sclp_activation_state_active; |
785 | spin_unlock_irqrestore(&sclp_lock, flags); | 784 | spin_unlock_irqrestore(&sclp_lock, flags); |
786 | return rc; | 785 | return rc; |
787 | } | 786 | } |
788 | 787 | ||
789 | EXPORT_SYMBOL(sclp_deactivate); | 788 | EXPORT_SYMBOL(sclp_deactivate); |
790 | 789 | ||
791 | /* Reactivate SCLP interface after sclp_deactivate. On success, new | 790 | /* Reactivate SCLP interface after sclp_deactivate. On success, new |
792 | * requests will be accepted, events will be dispatched again. Return 0 on | 791 | * requests will be accepted, events will be dispatched again. Return 0 on |
793 | * success, non-zero otherwise. */ | 792 | * success, non-zero otherwise. */ |
794 | int | 793 | int |
795 | sclp_reactivate(void) | 794 | sclp_reactivate(void) |
796 | { | 795 | { |
797 | unsigned long flags; | 796 | unsigned long flags; |
798 | int rc; | 797 | int rc; |
799 | 798 | ||
800 | spin_lock_irqsave(&sclp_lock, flags); | 799 | spin_lock_irqsave(&sclp_lock, flags); |
801 | /* Reactivate can only be called when inactive */ | 800 | /* Reactivate can only be called when inactive */ |
802 | if (sclp_activation_state != sclp_activation_state_inactive) { | 801 | if (sclp_activation_state != sclp_activation_state_inactive) { |
803 | spin_unlock_irqrestore(&sclp_lock, flags); | 802 | spin_unlock_irqrestore(&sclp_lock, flags); |
804 | return -EINVAL; | 803 | return -EINVAL; |
805 | } | 804 | } |
806 | sclp_activation_state = sclp_activation_state_activating; | 805 | sclp_activation_state = sclp_activation_state_activating; |
807 | spin_unlock_irqrestore(&sclp_lock, flags); | 806 | spin_unlock_irqrestore(&sclp_lock, flags); |
808 | rc = sclp_init_mask(1); | 807 | rc = sclp_init_mask(1); |
809 | spin_lock_irqsave(&sclp_lock, flags); | 808 | spin_lock_irqsave(&sclp_lock, flags); |
810 | if (rc == 0) | 809 | if (rc == 0) |
811 | sclp_activation_state = sclp_activation_state_active; | 810 | sclp_activation_state = sclp_activation_state_active; |
812 | else | 811 | else |
813 | sclp_activation_state = sclp_activation_state_inactive; | 812 | sclp_activation_state = sclp_activation_state_inactive; |
814 | spin_unlock_irqrestore(&sclp_lock, flags); | 813 | spin_unlock_irqrestore(&sclp_lock, flags); |
815 | return rc; | 814 | return rc; |
816 | } | 815 | } |
817 | 816 | ||
818 | EXPORT_SYMBOL(sclp_reactivate); | 817 | EXPORT_SYMBOL(sclp_reactivate); |
819 | 818 | ||
820 | /* Handler for external interruption used during initialization. Modify | 819 | /* Handler for external interruption used during initialization. Modify |
821 | * request state to done. */ | 820 | * request state to done. */ |
822 | static void sclp_check_handler(unsigned int ext_int_code, | 821 | static void sclp_check_handler(unsigned int ext_int_code, |
823 | unsigned int param32, unsigned long param64) | 822 | unsigned int param32, unsigned long param64) |
824 | { | 823 | { |
825 | u32 finished_sccb; | 824 | u32 finished_sccb; |
826 | 825 | ||
827 | kstat_cpu(smp_processor_id()).irqs[EXTINT_SCP]++; | 826 | kstat_cpu(smp_processor_id()).irqs[EXTINT_SCP]++; |
828 | finished_sccb = param32 & 0xfffffff8; | 827 | finished_sccb = param32 & 0xfffffff8; |
829 | /* Is this the interrupt we are waiting for? */ | 828 | /* Is this the interrupt we are waiting for? */ |
830 | if (finished_sccb == 0) | 829 | if (finished_sccb == 0) |
831 | return; | 830 | return; |
832 | if (finished_sccb != (u32) (addr_t) sclp_init_sccb) | 831 | if (finished_sccb != (u32) (addr_t) sclp_init_sccb) |
833 | panic("sclp: unsolicited interrupt for buffer at 0x%x\n", | 832 | panic("sclp: unsolicited interrupt for buffer at 0x%x\n", |
834 | finished_sccb); | 833 | finished_sccb); |
835 | spin_lock(&sclp_lock); | 834 | spin_lock(&sclp_lock); |
836 | if (sclp_running_state == sclp_running_state_running) { | 835 | if (sclp_running_state == sclp_running_state_running) { |
837 | sclp_init_req.status = SCLP_REQ_DONE; | 836 | sclp_init_req.status = SCLP_REQ_DONE; |
838 | sclp_running_state = sclp_running_state_idle; | 837 | sclp_running_state = sclp_running_state_idle; |
839 | } | 838 | } |
840 | spin_unlock(&sclp_lock); | 839 | spin_unlock(&sclp_lock); |
841 | } | 840 | } |
842 | 841 | ||
843 | /* Initial init mask request timed out. Modify request state to failed. */ | 842 | /* Initial init mask request timed out. Modify request state to failed. */ |
844 | static void | 843 | static void |
845 | sclp_check_timeout(unsigned long data) | 844 | sclp_check_timeout(unsigned long data) |
846 | { | 845 | { |
847 | unsigned long flags; | 846 | unsigned long flags; |
848 | 847 | ||
849 | spin_lock_irqsave(&sclp_lock, flags); | 848 | spin_lock_irqsave(&sclp_lock, flags); |
850 | if (sclp_running_state == sclp_running_state_running) { | 849 | if (sclp_running_state == sclp_running_state_running) { |
851 | sclp_init_req.status = SCLP_REQ_FAILED; | 850 | sclp_init_req.status = SCLP_REQ_FAILED; |
852 | sclp_running_state = sclp_running_state_idle; | 851 | sclp_running_state = sclp_running_state_idle; |
853 | } | 852 | } |
854 | spin_unlock_irqrestore(&sclp_lock, flags); | 853 | spin_unlock_irqrestore(&sclp_lock, flags); |
855 | } | 854 | } |
856 | 855 | ||
857 | /* Perform a check of the SCLP interface. Return zero if the interface is | 856 | /* Perform a check of the SCLP interface. Return zero if the interface is |
858 | * available and there are no pending requests from a previous instance. | 857 | * available and there are no pending requests from a previous instance. |
859 | * Return non-zero otherwise. */ | 858 | * Return non-zero otherwise. */ |
860 | static int | 859 | static int |
861 | sclp_check_interface(void) | 860 | sclp_check_interface(void) |
862 | { | 861 | { |
863 | struct init_sccb *sccb; | 862 | struct init_sccb *sccb; |
864 | unsigned long flags; | 863 | unsigned long flags; |
865 | int retry; | 864 | int retry; |
866 | int rc; | 865 | int rc; |
867 | 866 | ||
868 | spin_lock_irqsave(&sclp_lock, flags); | 867 | spin_lock_irqsave(&sclp_lock, flags); |
869 | /* Prepare init mask command */ | 868 | /* Prepare init mask command */ |
870 | rc = register_external_interrupt(0x2401, sclp_check_handler); | 869 | rc = register_external_interrupt(0x2401, sclp_check_handler); |
871 | if (rc) { | 870 | if (rc) { |
872 | spin_unlock_irqrestore(&sclp_lock, flags); | 871 | spin_unlock_irqrestore(&sclp_lock, flags); |
873 | return rc; | 872 | return rc; |
874 | } | 873 | } |
875 | for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) { | 874 | for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) { |
876 | __sclp_make_init_req(0, 0); | 875 | __sclp_make_init_req(0, 0); |
877 | sccb = (struct init_sccb *) sclp_init_req.sccb; | 876 | sccb = (struct init_sccb *) sclp_init_req.sccb; |
878 | rc = sclp_service_call(sclp_init_req.command, sccb); | 877 | rc = sclp_service_call(sclp_init_req.command, sccb); |
879 | if (rc == -EIO) | 878 | if (rc == -EIO) |
880 | break; | 879 | break; |
881 | sclp_init_req.status = SCLP_REQ_RUNNING; | 880 | sclp_init_req.status = SCLP_REQ_RUNNING; |
882 | sclp_running_state = sclp_running_state_running; | 881 | sclp_running_state = sclp_running_state_running; |
883 | __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ, | 882 | __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ, |
884 | sclp_check_timeout, 0); | 883 | sclp_check_timeout, 0); |
885 | spin_unlock_irqrestore(&sclp_lock, flags); | 884 | spin_unlock_irqrestore(&sclp_lock, flags); |
886 | /* Enable service-signal interruption - needs to happen | 885 | /* Enable service-signal interruption - needs to happen |
887 | * with IRQs enabled. */ | 886 | * with IRQs enabled. */ |
888 | service_subclass_irq_register(); | 887 | service_subclass_irq_register(); |
889 | /* Wait for signal from interrupt or timeout */ | 888 | /* Wait for signal from interrupt or timeout */ |
890 | sclp_sync_wait(); | 889 | sclp_sync_wait(); |
891 | /* Disable service-signal interruption - needs to happen | 890 | /* Disable service-signal interruption - needs to happen |
892 | * with IRQs enabled. */ | 891 | * with IRQs enabled. */ |
893 | service_subclass_irq_unregister(); | 892 | service_subclass_irq_unregister(); |
894 | spin_lock_irqsave(&sclp_lock, flags); | 893 | spin_lock_irqsave(&sclp_lock, flags); |
895 | del_timer(&sclp_request_timer); | 894 | del_timer(&sclp_request_timer); |
896 | if (sclp_init_req.status == SCLP_REQ_DONE && | 895 | if (sclp_init_req.status == SCLP_REQ_DONE && |
897 | sccb->header.response_code == 0x20) { | 896 | sccb->header.response_code == 0x20) { |
898 | rc = 0; | 897 | rc = 0; |
899 | break; | 898 | break; |
900 | } else | 899 | } else |
901 | rc = -EBUSY; | 900 | rc = -EBUSY; |
902 | } | 901 | } |
903 | unregister_external_interrupt(0x2401, sclp_check_handler); | 902 | unregister_external_interrupt(0x2401, sclp_check_handler); |
904 | spin_unlock_irqrestore(&sclp_lock, flags); | 903 | spin_unlock_irqrestore(&sclp_lock, flags); |
905 | return rc; | 904 | return rc; |
906 | } | 905 | } |
907 | 906 | ||
908 | /* Reboot event handler. Reset send and receive mask to prevent pending SCLP | 907 | /* Reboot event handler. Reset send and receive mask to prevent pending SCLP |
909 | * events from interfering with rebooted system. */ | 908 | * events from interfering with rebooted system. */ |
910 | static int | 909 | static int |
911 | sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr) | 910 | sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr) |
912 | { | 911 | { |
913 | sclp_deactivate(); | 912 | sclp_deactivate(); |
914 | return NOTIFY_DONE; | 913 | return NOTIFY_DONE; |
915 | } | 914 | } |
916 | 915 | ||
917 | static struct notifier_block sclp_reboot_notifier = { | 916 | static struct notifier_block sclp_reboot_notifier = { |
918 | .notifier_call = sclp_reboot_event | 917 | .notifier_call = sclp_reboot_event |
919 | }; | 918 | }; |
920 | 919 | ||
921 | /* | 920 | /* |
922 | * Suspend/resume SCLP notifier implementation | 921 | * Suspend/resume SCLP notifier implementation |
923 | */ | 922 | */ |
924 | 923 | ||
925 | static void sclp_pm_event(enum sclp_pm_event sclp_pm_event, int rollback) | 924 | static void sclp_pm_event(enum sclp_pm_event sclp_pm_event, int rollback) |
926 | { | 925 | { |
927 | struct sclp_register *reg; | 926 | struct sclp_register *reg; |
928 | unsigned long flags; | 927 | unsigned long flags; |
929 | 928 | ||
930 | if (!rollback) { | 929 | if (!rollback) { |
931 | spin_lock_irqsave(&sclp_lock, flags); | 930 | spin_lock_irqsave(&sclp_lock, flags); |
932 | list_for_each_entry(reg, &sclp_reg_list, list) | 931 | list_for_each_entry(reg, &sclp_reg_list, list) |
933 | reg->pm_event_posted = 0; | 932 | reg->pm_event_posted = 0; |
934 | spin_unlock_irqrestore(&sclp_lock, flags); | 933 | spin_unlock_irqrestore(&sclp_lock, flags); |
935 | } | 934 | } |
936 | do { | 935 | do { |
937 | spin_lock_irqsave(&sclp_lock, flags); | 936 | spin_lock_irqsave(&sclp_lock, flags); |
938 | list_for_each_entry(reg, &sclp_reg_list, list) { | 937 | list_for_each_entry(reg, &sclp_reg_list, list) { |
939 | if (rollback && reg->pm_event_posted) | 938 | if (rollback && reg->pm_event_posted) |
940 | goto found; | 939 | goto found; |
941 | if (!rollback && !reg->pm_event_posted) | 940 | if (!rollback && !reg->pm_event_posted) |
942 | goto found; | 941 | goto found; |
943 | } | 942 | } |
944 | spin_unlock_irqrestore(&sclp_lock, flags); | 943 | spin_unlock_irqrestore(&sclp_lock, flags); |
945 | return; | 944 | return; |
946 | found: | 945 | found: |
947 | spin_unlock_irqrestore(&sclp_lock, flags); | 946 | spin_unlock_irqrestore(&sclp_lock, flags); |
948 | if (reg->pm_event_fn) | 947 | if (reg->pm_event_fn) |
949 | reg->pm_event_fn(reg, sclp_pm_event); | 948 | reg->pm_event_fn(reg, sclp_pm_event); |
950 | reg->pm_event_posted = rollback ? 0 : 1; | 949 | reg->pm_event_posted = rollback ? 0 : 1; |
951 | } while (1); | 950 | } while (1); |
952 | } | 951 | } |
953 | 952 | ||
954 | /* | 953 | /* |
955 | * Susend/resume callbacks for platform device | 954 | * Susend/resume callbacks for platform device |
956 | */ | 955 | */ |
957 | 956 | ||
958 | static int sclp_freeze(struct device *dev) | 957 | static int sclp_freeze(struct device *dev) |
959 | { | 958 | { |
960 | unsigned long flags; | 959 | unsigned long flags; |
961 | int rc; | 960 | int rc; |
962 | 961 | ||
963 | sclp_pm_event(SCLP_PM_EVENT_FREEZE, 0); | 962 | sclp_pm_event(SCLP_PM_EVENT_FREEZE, 0); |
964 | 963 | ||
965 | spin_lock_irqsave(&sclp_lock, flags); | 964 | spin_lock_irqsave(&sclp_lock, flags); |
966 | sclp_suspend_state = sclp_suspend_state_suspended; | 965 | sclp_suspend_state = sclp_suspend_state_suspended; |
967 | spin_unlock_irqrestore(&sclp_lock, flags); | 966 | spin_unlock_irqrestore(&sclp_lock, flags); |
968 | 967 | ||
969 | /* Init supend data */ | 968 | /* Init supend data */ |
970 | memset(&sclp_suspend_req, 0, sizeof(sclp_suspend_req)); | 969 | memset(&sclp_suspend_req, 0, sizeof(sclp_suspend_req)); |
971 | sclp_suspend_req.callback = sclp_suspend_req_cb; | 970 | sclp_suspend_req.callback = sclp_suspend_req_cb; |
972 | sclp_suspend_req.status = SCLP_REQ_FILLED; | 971 | sclp_suspend_req.status = SCLP_REQ_FILLED; |
973 | init_completion(&sclp_request_queue_flushed); | 972 | init_completion(&sclp_request_queue_flushed); |
974 | 973 | ||
975 | rc = sclp_add_request(&sclp_suspend_req); | 974 | rc = sclp_add_request(&sclp_suspend_req); |
976 | if (rc == 0) | 975 | if (rc == 0) |
977 | wait_for_completion(&sclp_request_queue_flushed); | 976 | wait_for_completion(&sclp_request_queue_flushed); |
978 | else if (rc != -ENODATA) | 977 | else if (rc != -ENODATA) |
979 | goto fail_thaw; | 978 | goto fail_thaw; |
980 | 979 | ||
981 | rc = sclp_deactivate(); | 980 | rc = sclp_deactivate(); |
982 | if (rc) | 981 | if (rc) |
983 | goto fail_thaw; | 982 | goto fail_thaw; |
984 | return 0; | 983 | return 0; |
985 | 984 | ||
986 | fail_thaw: | 985 | fail_thaw: |
987 | spin_lock_irqsave(&sclp_lock, flags); | 986 | spin_lock_irqsave(&sclp_lock, flags); |
988 | sclp_suspend_state = sclp_suspend_state_running; | 987 | sclp_suspend_state = sclp_suspend_state_running; |
989 | spin_unlock_irqrestore(&sclp_lock, flags); | 988 | spin_unlock_irqrestore(&sclp_lock, flags); |
990 | sclp_pm_event(SCLP_PM_EVENT_THAW, 1); | 989 | sclp_pm_event(SCLP_PM_EVENT_THAW, 1); |
991 | return rc; | 990 | return rc; |
992 | } | 991 | } |
993 | 992 | ||
994 | static int sclp_undo_suspend(enum sclp_pm_event event) | 993 | static int sclp_undo_suspend(enum sclp_pm_event event) |
995 | { | 994 | { |
996 | unsigned long flags; | 995 | unsigned long flags; |
997 | int rc; | 996 | int rc; |
998 | 997 | ||
999 | rc = sclp_reactivate(); | 998 | rc = sclp_reactivate(); |
1000 | if (rc) | 999 | if (rc) |
1001 | return rc; | 1000 | return rc; |
1002 | 1001 | ||
1003 | spin_lock_irqsave(&sclp_lock, flags); | 1002 | spin_lock_irqsave(&sclp_lock, flags); |
1004 | sclp_suspend_state = sclp_suspend_state_running; | 1003 | sclp_suspend_state = sclp_suspend_state_running; |
1005 | spin_unlock_irqrestore(&sclp_lock, flags); | 1004 | spin_unlock_irqrestore(&sclp_lock, flags); |
1006 | 1005 | ||
1007 | sclp_pm_event(event, 0); | 1006 | sclp_pm_event(event, 0); |
1008 | return 0; | 1007 | return 0; |
1009 | } | 1008 | } |
1010 | 1009 | ||
1011 | static int sclp_thaw(struct device *dev) | 1010 | static int sclp_thaw(struct device *dev) |
1012 | { | 1011 | { |
1013 | return sclp_undo_suspend(SCLP_PM_EVENT_THAW); | 1012 | return sclp_undo_suspend(SCLP_PM_EVENT_THAW); |
1014 | } | 1013 | } |
1015 | 1014 | ||
1016 | static int sclp_restore(struct device *dev) | 1015 | static int sclp_restore(struct device *dev) |
1017 | { | 1016 | { |
1018 | return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE); | 1017 | return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE); |
1019 | } | 1018 | } |
1020 | 1019 | ||
1021 | static const struct dev_pm_ops sclp_pm_ops = { | 1020 | static const struct dev_pm_ops sclp_pm_ops = { |
1022 | .freeze = sclp_freeze, | 1021 | .freeze = sclp_freeze, |
1023 | .thaw = sclp_thaw, | 1022 | .thaw = sclp_thaw, |
1024 | .restore = sclp_restore, | 1023 | .restore = sclp_restore, |
1025 | }; | 1024 | }; |
1026 | 1025 | ||
1027 | static struct platform_driver sclp_pdrv = { | 1026 | static struct platform_driver sclp_pdrv = { |
1028 | .driver = { | 1027 | .driver = { |
1029 | .name = "sclp", | 1028 | .name = "sclp", |
1030 | .owner = THIS_MODULE, | 1029 | .owner = THIS_MODULE, |
1031 | .pm = &sclp_pm_ops, | 1030 | .pm = &sclp_pm_ops, |
1032 | }, | 1031 | }, |
1033 | }; | 1032 | }; |
1034 | 1033 | ||
1035 | static struct platform_device *sclp_pdev; | 1034 | static struct platform_device *sclp_pdev; |
1036 | 1035 | ||
1037 | /* Initialize SCLP driver. Return zero if driver is operational, non-zero | 1036 | /* Initialize SCLP driver. Return zero if driver is operational, non-zero |
1038 | * otherwise. */ | 1037 | * otherwise. */ |
1039 | static int | 1038 | static int |
1040 | sclp_init(void) | 1039 | sclp_init(void) |
1041 | { | 1040 | { |
1042 | unsigned long flags; | 1041 | unsigned long flags; |
1043 | int rc = 0; | 1042 | int rc = 0; |
1044 | 1043 | ||
1045 | spin_lock_irqsave(&sclp_lock, flags); | 1044 | spin_lock_irqsave(&sclp_lock, flags); |
1046 | /* Check for previous or running initialization */ | 1045 | /* Check for previous or running initialization */ |
1047 | if (sclp_init_state != sclp_init_state_uninitialized) | 1046 | if (sclp_init_state != sclp_init_state_uninitialized) |
1048 | goto fail_unlock; | 1047 | goto fail_unlock; |
1049 | sclp_init_state = sclp_init_state_initializing; | 1048 | sclp_init_state = sclp_init_state_initializing; |
1050 | /* Set up variables */ | 1049 | /* Set up variables */ |
1051 | INIT_LIST_HEAD(&sclp_req_queue); | 1050 | INIT_LIST_HEAD(&sclp_req_queue); |
1052 | INIT_LIST_HEAD(&sclp_reg_list); | 1051 | INIT_LIST_HEAD(&sclp_reg_list); |
1053 | list_add(&sclp_state_change_event.list, &sclp_reg_list); | 1052 | list_add(&sclp_state_change_event.list, &sclp_reg_list); |
1054 | init_timer(&sclp_request_timer); | 1053 | init_timer(&sclp_request_timer); |
1055 | /* Check interface */ | 1054 | /* Check interface */ |
1056 | spin_unlock_irqrestore(&sclp_lock, flags); | 1055 | spin_unlock_irqrestore(&sclp_lock, flags); |
1057 | rc = sclp_check_interface(); | 1056 | rc = sclp_check_interface(); |
1058 | spin_lock_irqsave(&sclp_lock, flags); | 1057 | spin_lock_irqsave(&sclp_lock, flags); |
1059 | if (rc) | 1058 | if (rc) |
1060 | goto fail_init_state_uninitialized; | 1059 | goto fail_init_state_uninitialized; |
1061 | /* Register reboot handler */ | 1060 | /* Register reboot handler */ |
1062 | rc = register_reboot_notifier(&sclp_reboot_notifier); | 1061 | rc = register_reboot_notifier(&sclp_reboot_notifier); |
1063 | if (rc) | 1062 | if (rc) |
1064 | goto fail_init_state_uninitialized; | 1063 | goto fail_init_state_uninitialized; |
1065 | /* Register interrupt handler */ | 1064 | /* Register interrupt handler */ |
1066 | rc = register_external_interrupt(0x2401, sclp_interrupt_handler); | 1065 | rc = register_external_interrupt(0x2401, sclp_interrupt_handler); |
1067 | if (rc) | 1066 | if (rc) |
1068 | goto fail_unregister_reboot_notifier; | 1067 | goto fail_unregister_reboot_notifier; |
1069 | sclp_init_state = sclp_init_state_initialized; | 1068 | sclp_init_state = sclp_init_state_initialized; |
1070 | spin_unlock_irqrestore(&sclp_lock, flags); | 1069 | spin_unlock_irqrestore(&sclp_lock, flags); |
1071 | /* Enable service-signal external interruption - needs to happen with | 1070 | /* Enable service-signal external interruption - needs to happen with |
1072 | * IRQs enabled. */ | 1071 | * IRQs enabled. */ |
1073 | service_subclass_irq_register(); | 1072 | service_subclass_irq_register(); |
1074 | sclp_init_mask(1); | 1073 | sclp_init_mask(1); |
1075 | return 0; | 1074 | return 0; |
1076 | 1075 | ||
1077 | fail_unregister_reboot_notifier: | 1076 | fail_unregister_reboot_notifier: |
1078 | unregister_reboot_notifier(&sclp_reboot_notifier); | 1077 | unregister_reboot_notifier(&sclp_reboot_notifier); |
1079 | fail_init_state_uninitialized: | 1078 | fail_init_state_uninitialized: |
1080 | sclp_init_state = sclp_init_state_uninitialized; | 1079 | sclp_init_state = sclp_init_state_uninitialized; |
1081 | fail_unlock: | 1080 | fail_unlock: |
1082 | spin_unlock_irqrestore(&sclp_lock, flags); | 1081 | spin_unlock_irqrestore(&sclp_lock, flags); |
1083 | return rc; | 1082 | return rc; |
1084 | } | 1083 | } |
1085 | 1084 | ||
1086 | /* | 1085 | /* |
1087 | * SCLP panic notifier: If we are suspended, we thaw SCLP in order to be able | 1086 | * SCLP panic notifier: If we are suspended, we thaw SCLP in order to be able |
1088 | * to print the panic message. | 1087 | * to print the panic message. |
1089 | */ | 1088 | */ |
1090 | static int sclp_panic_notify(struct notifier_block *self, | 1089 | static int sclp_panic_notify(struct notifier_block *self, |
1091 | unsigned long event, void *data) | 1090 | unsigned long event, void *data) |
1092 | { | 1091 | { |
1093 | if (sclp_suspend_state == sclp_suspend_state_suspended) | 1092 | if (sclp_suspend_state == sclp_suspend_state_suspended) |
1094 | sclp_undo_suspend(SCLP_PM_EVENT_THAW); | 1093 | sclp_undo_suspend(SCLP_PM_EVENT_THAW); |
1095 | return NOTIFY_OK; | 1094 | return NOTIFY_OK; |
1096 | } | 1095 | } |
1097 | 1096 | ||
1098 | static struct notifier_block sclp_on_panic_nb = { | 1097 | static struct notifier_block sclp_on_panic_nb = { |
1099 | .notifier_call = sclp_panic_notify, | 1098 | .notifier_call = sclp_panic_notify, |
1100 | .priority = SCLP_PANIC_PRIO, | 1099 | .priority = SCLP_PANIC_PRIO, |
1101 | }; | 1100 | }; |
1102 | 1101 | ||
1103 | static __init int sclp_initcall(void) | 1102 | static __init int sclp_initcall(void) |
1104 | { | 1103 | { |
1105 | int rc; | 1104 | int rc; |
1106 | 1105 | ||
1107 | rc = platform_driver_register(&sclp_pdrv); | 1106 | rc = platform_driver_register(&sclp_pdrv); |
1108 | if (rc) | 1107 | if (rc) |
1109 | return rc; | 1108 | return rc; |
1110 | sclp_pdev = platform_device_register_simple("sclp", -1, NULL, 0); | 1109 | sclp_pdev = platform_device_register_simple("sclp", -1, NULL, 0); |
1111 | rc = IS_ERR(sclp_pdev) ? PTR_ERR(sclp_pdev) : 0; | 1110 | rc = IS_ERR(sclp_pdev) ? PTR_ERR(sclp_pdev) : 0; |
1112 | if (rc) | 1111 | if (rc) |
1113 | goto fail_platform_driver_unregister; | 1112 | goto fail_platform_driver_unregister; |
1114 | rc = atomic_notifier_chain_register(&panic_notifier_list, | 1113 | rc = atomic_notifier_chain_register(&panic_notifier_list, |
1115 | &sclp_on_panic_nb); | 1114 | &sclp_on_panic_nb); |
1116 | if (rc) | 1115 | if (rc) |
1117 | goto fail_platform_device_unregister; | 1116 | goto fail_platform_device_unregister; |
1118 | 1117 | ||
1119 | return sclp_init(); | 1118 | return sclp_init(); |
1120 | 1119 | ||
1121 | fail_platform_device_unregister: | 1120 | fail_platform_device_unregister: |
1122 | platform_device_unregister(sclp_pdev); | 1121 | platform_device_unregister(sclp_pdev); |
1123 | fail_platform_driver_unregister: | 1122 | fail_platform_driver_unregister: |
1124 | platform_driver_unregister(&sclp_pdrv); | 1123 | platform_driver_unregister(&sclp_pdrv); |
1125 | return rc; | 1124 | return rc; |
1126 | } | 1125 | } |
1127 | 1126 | ||
1128 | arch_initcall(sclp_initcall); | 1127 | arch_initcall(sclp_initcall); |
1129 | 1128 |
drivers/s390/kvm/kvm_virtio.c
1 | /* | 1 | /* |
2 | * kvm_virtio.c - virtio for kvm on s390 | 2 | * kvm_virtio.c - virtio for kvm on s390 |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 2008 | 4 | * Copyright IBM Corp. 2008 |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License (version 2 only) | 7 | * it under the terms of the GNU General Public License (version 2 only) |
8 | * as published by the Free Software Foundation. | 8 | * as published by the Free Software Foundation. |
9 | * | 9 | * |
10 | * Author(s): Christian Borntraeger <borntraeger@de.ibm.com> | 10 | * Author(s): Christian Borntraeger <borntraeger@de.ibm.com> |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/kernel_stat.h> | 13 | #include <linux/kernel_stat.h> |
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/bootmem.h> | 15 | #include <linux/bootmem.h> |
16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
17 | #include <linux/virtio.h> | 17 | #include <linux/virtio.h> |
18 | #include <linux/virtio_config.h> | 18 | #include <linux/virtio_config.h> |
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | #include <linux/virtio_console.h> | 20 | #include <linux/virtio_console.h> |
21 | #include <linux/interrupt.h> | 21 | #include <linux/interrupt.h> |
22 | #include <linux/virtio_ring.h> | 22 | #include <linux/virtio_ring.h> |
23 | #include <linux/pfn.h> | 23 | #include <linux/pfn.h> |
24 | #include <asm/io.h> | 24 | #include <asm/io.h> |
25 | #include <asm/kvm_para.h> | 25 | #include <asm/kvm_para.h> |
26 | #include <asm/kvm_virtio.h> | 26 | #include <asm/kvm_virtio.h> |
27 | #include <asm/setup.h> | 27 | #include <asm/setup.h> |
28 | #include <asm/s390_ext.h> | ||
29 | #include <asm/irq.h> | 28 | #include <asm/irq.h> |
30 | 29 | ||
31 | #define VIRTIO_SUBCODE_64 0x0D00 | 30 | #define VIRTIO_SUBCODE_64 0x0D00 |
32 | 31 | ||
33 | /* | 32 | /* |
34 | * The pointer to our (page) of device descriptions. | 33 | * The pointer to our (page) of device descriptions. |
35 | */ | 34 | */ |
36 | static void *kvm_devices; | 35 | static void *kvm_devices; |
37 | struct work_struct hotplug_work; | 36 | struct work_struct hotplug_work; |
38 | 37 | ||
39 | struct kvm_device { | 38 | struct kvm_device { |
40 | struct virtio_device vdev; | 39 | struct virtio_device vdev; |
41 | struct kvm_device_desc *desc; | 40 | struct kvm_device_desc *desc; |
42 | }; | 41 | }; |
43 | 42 | ||
44 | #define to_kvmdev(vd) container_of(vd, struct kvm_device, vdev) | 43 | #define to_kvmdev(vd) container_of(vd, struct kvm_device, vdev) |
45 | 44 | ||
46 | /* | 45 | /* |
47 | * memory layout: | 46 | * memory layout: |
48 | * - kvm_device_descriptor | 47 | * - kvm_device_descriptor |
49 | * struct kvm_device_desc | 48 | * struct kvm_device_desc |
50 | * - configuration | 49 | * - configuration |
51 | * struct kvm_vqconfig | 50 | * struct kvm_vqconfig |
52 | * - feature bits | 51 | * - feature bits |
53 | * - config space | 52 | * - config space |
54 | */ | 53 | */ |
55 | static struct kvm_vqconfig *kvm_vq_config(const struct kvm_device_desc *desc) | 54 | static struct kvm_vqconfig *kvm_vq_config(const struct kvm_device_desc *desc) |
56 | { | 55 | { |
57 | return (struct kvm_vqconfig *)(desc + 1); | 56 | return (struct kvm_vqconfig *)(desc + 1); |
58 | } | 57 | } |
59 | 58 | ||
60 | static u8 *kvm_vq_features(const struct kvm_device_desc *desc) | 59 | static u8 *kvm_vq_features(const struct kvm_device_desc *desc) |
61 | { | 60 | { |
62 | return (u8 *)(kvm_vq_config(desc) + desc->num_vq); | 61 | return (u8 *)(kvm_vq_config(desc) + desc->num_vq); |
63 | } | 62 | } |
64 | 63 | ||
65 | static u8 *kvm_vq_configspace(const struct kvm_device_desc *desc) | 64 | static u8 *kvm_vq_configspace(const struct kvm_device_desc *desc) |
66 | { | 65 | { |
67 | return kvm_vq_features(desc) + desc->feature_len * 2; | 66 | return kvm_vq_features(desc) + desc->feature_len * 2; |
68 | } | 67 | } |
69 | 68 | ||
70 | /* | 69 | /* |
71 | * The total size of the config page used by this device (incl. desc) | 70 | * The total size of the config page used by this device (incl. desc) |
72 | */ | 71 | */ |
73 | static unsigned desc_size(const struct kvm_device_desc *desc) | 72 | static unsigned desc_size(const struct kvm_device_desc *desc) |
74 | { | 73 | { |
75 | return sizeof(*desc) | 74 | return sizeof(*desc) |
76 | + desc->num_vq * sizeof(struct kvm_vqconfig) | 75 | + desc->num_vq * sizeof(struct kvm_vqconfig) |
77 | + desc->feature_len * 2 | 76 | + desc->feature_len * 2 |
78 | + desc->config_len; | 77 | + desc->config_len; |
79 | } | 78 | } |
80 | 79 | ||
81 | /* This gets the device's feature bits. */ | 80 | /* This gets the device's feature bits. */ |
82 | static u32 kvm_get_features(struct virtio_device *vdev) | 81 | static u32 kvm_get_features(struct virtio_device *vdev) |
83 | { | 82 | { |
84 | unsigned int i; | 83 | unsigned int i; |
85 | u32 features = 0; | 84 | u32 features = 0; |
86 | struct kvm_device_desc *desc = to_kvmdev(vdev)->desc; | 85 | struct kvm_device_desc *desc = to_kvmdev(vdev)->desc; |
87 | u8 *in_features = kvm_vq_features(desc); | 86 | u8 *in_features = kvm_vq_features(desc); |
88 | 87 | ||
89 | for (i = 0; i < min(desc->feature_len * 8, 32); i++) | 88 | for (i = 0; i < min(desc->feature_len * 8, 32); i++) |
90 | if (in_features[i / 8] & (1 << (i % 8))) | 89 | if (in_features[i / 8] & (1 << (i % 8))) |
91 | features |= (1 << i); | 90 | features |= (1 << i); |
92 | return features; | 91 | return features; |
93 | } | 92 | } |
94 | 93 | ||
95 | static void kvm_finalize_features(struct virtio_device *vdev) | 94 | static void kvm_finalize_features(struct virtio_device *vdev) |
96 | { | 95 | { |
97 | unsigned int i, bits; | 96 | unsigned int i, bits; |
98 | struct kvm_device_desc *desc = to_kvmdev(vdev)->desc; | 97 | struct kvm_device_desc *desc = to_kvmdev(vdev)->desc; |
99 | /* Second half of bitmap is features we accept. */ | 98 | /* Second half of bitmap is features we accept. */ |
100 | u8 *out_features = kvm_vq_features(desc) + desc->feature_len; | 99 | u8 *out_features = kvm_vq_features(desc) + desc->feature_len; |
101 | 100 | ||
102 | /* Give virtio_ring a chance to accept features. */ | 101 | /* Give virtio_ring a chance to accept features. */ |
103 | vring_transport_features(vdev); | 102 | vring_transport_features(vdev); |
104 | 103 | ||
105 | memset(out_features, 0, desc->feature_len); | 104 | memset(out_features, 0, desc->feature_len); |
106 | bits = min_t(unsigned, desc->feature_len, sizeof(vdev->features)) * 8; | 105 | bits = min_t(unsigned, desc->feature_len, sizeof(vdev->features)) * 8; |
107 | for (i = 0; i < bits; i++) { | 106 | for (i = 0; i < bits; i++) { |
108 | if (test_bit(i, vdev->features)) | 107 | if (test_bit(i, vdev->features)) |
109 | out_features[i / 8] |= (1 << (i % 8)); | 108 | out_features[i / 8] |= (1 << (i % 8)); |
110 | } | 109 | } |
111 | } | 110 | } |
112 | 111 | ||
113 | /* | 112 | /* |
114 | * Reading and writing elements in config space | 113 | * Reading and writing elements in config space |
115 | */ | 114 | */ |
116 | static void kvm_get(struct virtio_device *vdev, unsigned int offset, | 115 | static void kvm_get(struct virtio_device *vdev, unsigned int offset, |
117 | void *buf, unsigned len) | 116 | void *buf, unsigned len) |
118 | { | 117 | { |
119 | struct kvm_device_desc *desc = to_kvmdev(vdev)->desc; | 118 | struct kvm_device_desc *desc = to_kvmdev(vdev)->desc; |
120 | 119 | ||
121 | BUG_ON(offset + len > desc->config_len); | 120 | BUG_ON(offset + len > desc->config_len); |
122 | memcpy(buf, kvm_vq_configspace(desc) + offset, len); | 121 | memcpy(buf, kvm_vq_configspace(desc) + offset, len); |
123 | } | 122 | } |
124 | 123 | ||
125 | static void kvm_set(struct virtio_device *vdev, unsigned int offset, | 124 | static void kvm_set(struct virtio_device *vdev, unsigned int offset, |
126 | const void *buf, unsigned len) | 125 | const void *buf, unsigned len) |
127 | { | 126 | { |
128 | struct kvm_device_desc *desc = to_kvmdev(vdev)->desc; | 127 | struct kvm_device_desc *desc = to_kvmdev(vdev)->desc; |
129 | 128 | ||
130 | BUG_ON(offset + len > desc->config_len); | 129 | BUG_ON(offset + len > desc->config_len); |
131 | memcpy(kvm_vq_configspace(desc) + offset, buf, len); | 130 | memcpy(kvm_vq_configspace(desc) + offset, buf, len); |
132 | } | 131 | } |
133 | 132 | ||
134 | /* | 133 | /* |
135 | * The operations to get and set the status word just access | 134 | * The operations to get and set the status word just access |
136 | * the status field of the device descriptor. set_status will also | 135 | * the status field of the device descriptor. set_status will also |
137 | * make a hypercall to the host, to tell about status changes | 136 | * make a hypercall to the host, to tell about status changes |
138 | */ | 137 | */ |
139 | static u8 kvm_get_status(struct virtio_device *vdev) | 138 | static u8 kvm_get_status(struct virtio_device *vdev) |
140 | { | 139 | { |
141 | return to_kvmdev(vdev)->desc->status; | 140 | return to_kvmdev(vdev)->desc->status; |
142 | } | 141 | } |
143 | 142 | ||
144 | static void kvm_set_status(struct virtio_device *vdev, u8 status) | 143 | static void kvm_set_status(struct virtio_device *vdev, u8 status) |
145 | { | 144 | { |
146 | BUG_ON(!status); | 145 | BUG_ON(!status); |
147 | to_kvmdev(vdev)->desc->status = status; | 146 | to_kvmdev(vdev)->desc->status = status; |
148 | kvm_hypercall1(KVM_S390_VIRTIO_SET_STATUS, | 147 | kvm_hypercall1(KVM_S390_VIRTIO_SET_STATUS, |
149 | (unsigned long) to_kvmdev(vdev)->desc); | 148 | (unsigned long) to_kvmdev(vdev)->desc); |
150 | } | 149 | } |
151 | 150 | ||
152 | /* | 151 | /* |
153 | * To reset the device, we use the KVM_VIRTIO_RESET hypercall, using the | 152 | * To reset the device, we use the KVM_VIRTIO_RESET hypercall, using the |
154 | * descriptor address. The Host will zero the status and all the | 153 | * descriptor address. The Host will zero the status and all the |
155 | * features. | 154 | * features. |
156 | */ | 155 | */ |
157 | static void kvm_reset(struct virtio_device *vdev) | 156 | static void kvm_reset(struct virtio_device *vdev) |
158 | { | 157 | { |
159 | kvm_hypercall1(KVM_S390_VIRTIO_RESET, | 158 | kvm_hypercall1(KVM_S390_VIRTIO_RESET, |
160 | (unsigned long) to_kvmdev(vdev)->desc); | 159 | (unsigned long) to_kvmdev(vdev)->desc); |
161 | } | 160 | } |
162 | 161 | ||
163 | /* | 162 | /* |
164 | * When the virtio_ring code wants to notify the Host, it calls us here and we | 163 | * When the virtio_ring code wants to notify the Host, it calls us here and we |
165 | * make a hypercall. We hand the address of the virtqueue so the Host | 164 | * make a hypercall. We hand the address of the virtqueue so the Host |
166 | * knows which virtqueue we're talking about. | 165 | * knows which virtqueue we're talking about. |
167 | */ | 166 | */ |
168 | static void kvm_notify(struct virtqueue *vq) | 167 | static void kvm_notify(struct virtqueue *vq) |
169 | { | 168 | { |
170 | struct kvm_vqconfig *config = vq->priv; | 169 | struct kvm_vqconfig *config = vq->priv; |
171 | 170 | ||
172 | kvm_hypercall1(KVM_S390_VIRTIO_NOTIFY, config->address); | 171 | kvm_hypercall1(KVM_S390_VIRTIO_NOTIFY, config->address); |
173 | } | 172 | } |
174 | 173 | ||
175 | /* | 174 | /* |
176 | * This routine finds the first virtqueue described in the configuration of | 175 | * This routine finds the first virtqueue described in the configuration of |
177 | * this device and sets it up. | 176 | * this device and sets it up. |
178 | */ | 177 | */ |
179 | static struct virtqueue *kvm_find_vq(struct virtio_device *vdev, | 178 | static struct virtqueue *kvm_find_vq(struct virtio_device *vdev, |
180 | unsigned index, | 179 | unsigned index, |
181 | void (*callback)(struct virtqueue *vq), | 180 | void (*callback)(struct virtqueue *vq), |
182 | const char *name) | 181 | const char *name) |
183 | { | 182 | { |
184 | struct kvm_device *kdev = to_kvmdev(vdev); | 183 | struct kvm_device *kdev = to_kvmdev(vdev); |
185 | struct kvm_vqconfig *config; | 184 | struct kvm_vqconfig *config; |
186 | struct virtqueue *vq; | 185 | struct virtqueue *vq; |
187 | int err; | 186 | int err; |
188 | 187 | ||
189 | if (index >= kdev->desc->num_vq) | 188 | if (index >= kdev->desc->num_vq) |
190 | return ERR_PTR(-ENOENT); | 189 | return ERR_PTR(-ENOENT); |
191 | 190 | ||
192 | config = kvm_vq_config(kdev->desc)+index; | 191 | config = kvm_vq_config(kdev->desc)+index; |
193 | 192 | ||
194 | err = vmem_add_mapping(config->address, | 193 | err = vmem_add_mapping(config->address, |
195 | vring_size(config->num, | 194 | vring_size(config->num, |
196 | KVM_S390_VIRTIO_RING_ALIGN)); | 195 | KVM_S390_VIRTIO_RING_ALIGN)); |
197 | if (err) | 196 | if (err) |
198 | goto out; | 197 | goto out; |
199 | 198 | ||
200 | vq = vring_new_virtqueue(config->num, KVM_S390_VIRTIO_RING_ALIGN, | 199 | vq = vring_new_virtqueue(config->num, KVM_S390_VIRTIO_RING_ALIGN, |
201 | vdev, (void *) config->address, | 200 | vdev, (void *) config->address, |
202 | kvm_notify, callback, name); | 201 | kvm_notify, callback, name); |
203 | if (!vq) { | 202 | if (!vq) { |
204 | err = -ENOMEM; | 203 | err = -ENOMEM; |
205 | goto unmap; | 204 | goto unmap; |
206 | } | 205 | } |
207 | 206 | ||
208 | /* | 207 | /* |
209 | * register a callback token | 208 | * register a callback token |
210 | * The host will sent this via the external interrupt parameter | 209 | * The host will sent this via the external interrupt parameter |
211 | */ | 210 | */ |
212 | config->token = (u64) vq; | 211 | config->token = (u64) vq; |
213 | 212 | ||
214 | vq->priv = config; | 213 | vq->priv = config; |
215 | return vq; | 214 | return vq; |
216 | unmap: | 215 | unmap: |
217 | vmem_remove_mapping(config->address, | 216 | vmem_remove_mapping(config->address, |
218 | vring_size(config->num, | 217 | vring_size(config->num, |
219 | KVM_S390_VIRTIO_RING_ALIGN)); | 218 | KVM_S390_VIRTIO_RING_ALIGN)); |
220 | out: | 219 | out: |
221 | return ERR_PTR(err); | 220 | return ERR_PTR(err); |
222 | } | 221 | } |
223 | 222 | ||
224 | static void kvm_del_vq(struct virtqueue *vq) | 223 | static void kvm_del_vq(struct virtqueue *vq) |
225 | { | 224 | { |
226 | struct kvm_vqconfig *config = vq->priv; | 225 | struct kvm_vqconfig *config = vq->priv; |
227 | 226 | ||
228 | vring_del_virtqueue(vq); | 227 | vring_del_virtqueue(vq); |
229 | vmem_remove_mapping(config->address, | 228 | vmem_remove_mapping(config->address, |
230 | vring_size(config->num, | 229 | vring_size(config->num, |
231 | KVM_S390_VIRTIO_RING_ALIGN)); | 230 | KVM_S390_VIRTIO_RING_ALIGN)); |
232 | } | 231 | } |
233 | 232 | ||
234 | static void kvm_del_vqs(struct virtio_device *vdev) | 233 | static void kvm_del_vqs(struct virtio_device *vdev) |
235 | { | 234 | { |
236 | struct virtqueue *vq, *n; | 235 | struct virtqueue *vq, *n; |
237 | 236 | ||
238 | list_for_each_entry_safe(vq, n, &vdev->vqs, list) | 237 | list_for_each_entry_safe(vq, n, &vdev->vqs, list) |
239 | kvm_del_vq(vq); | 238 | kvm_del_vq(vq); |
240 | } | 239 | } |
241 | 240 | ||
242 | static int kvm_find_vqs(struct virtio_device *vdev, unsigned nvqs, | 241 | static int kvm_find_vqs(struct virtio_device *vdev, unsigned nvqs, |
243 | struct virtqueue *vqs[], | 242 | struct virtqueue *vqs[], |
244 | vq_callback_t *callbacks[], | 243 | vq_callback_t *callbacks[], |
245 | const char *names[]) | 244 | const char *names[]) |
246 | { | 245 | { |
247 | struct kvm_device *kdev = to_kvmdev(vdev); | 246 | struct kvm_device *kdev = to_kvmdev(vdev); |
248 | int i; | 247 | int i; |
249 | 248 | ||
250 | /* We must have this many virtqueues. */ | 249 | /* We must have this many virtqueues. */ |
251 | if (nvqs > kdev->desc->num_vq) | 250 | if (nvqs > kdev->desc->num_vq) |
252 | return -ENOENT; | 251 | return -ENOENT; |
253 | 252 | ||
254 | for (i = 0; i < nvqs; ++i) { | 253 | for (i = 0; i < nvqs; ++i) { |
255 | vqs[i] = kvm_find_vq(vdev, i, callbacks[i], names[i]); | 254 | vqs[i] = kvm_find_vq(vdev, i, callbacks[i], names[i]); |
256 | if (IS_ERR(vqs[i])) | 255 | if (IS_ERR(vqs[i])) |
257 | goto error; | 256 | goto error; |
258 | } | 257 | } |
259 | return 0; | 258 | return 0; |
260 | 259 | ||
261 | error: | 260 | error: |
262 | kvm_del_vqs(vdev); | 261 | kvm_del_vqs(vdev); |
263 | return PTR_ERR(vqs[i]); | 262 | return PTR_ERR(vqs[i]); |
264 | } | 263 | } |
265 | 264 | ||
266 | /* | 265 | /* |
267 | * The config ops structure as defined by virtio config | 266 | * The config ops structure as defined by virtio config |
268 | */ | 267 | */ |
269 | static struct virtio_config_ops kvm_vq_configspace_ops = { | 268 | static struct virtio_config_ops kvm_vq_configspace_ops = { |
270 | .get_features = kvm_get_features, | 269 | .get_features = kvm_get_features, |
271 | .finalize_features = kvm_finalize_features, | 270 | .finalize_features = kvm_finalize_features, |
272 | .get = kvm_get, | 271 | .get = kvm_get, |
273 | .set = kvm_set, | 272 | .set = kvm_set, |
274 | .get_status = kvm_get_status, | 273 | .get_status = kvm_get_status, |
275 | .set_status = kvm_set_status, | 274 | .set_status = kvm_set_status, |
276 | .reset = kvm_reset, | 275 | .reset = kvm_reset, |
277 | .find_vqs = kvm_find_vqs, | 276 | .find_vqs = kvm_find_vqs, |
278 | .del_vqs = kvm_del_vqs, | 277 | .del_vqs = kvm_del_vqs, |
279 | }; | 278 | }; |
280 | 279 | ||
281 | /* | 280 | /* |
282 | * The root device for the kvm virtio devices. | 281 | * The root device for the kvm virtio devices. |
283 | * This makes them appear as /sys/devices/kvm_s390/0,1,2 not /sys/devices/0,1,2. | 282 | * This makes them appear as /sys/devices/kvm_s390/0,1,2 not /sys/devices/0,1,2. |
284 | */ | 283 | */ |
285 | static struct device *kvm_root; | 284 | static struct device *kvm_root; |
286 | 285 | ||
287 | /* | 286 | /* |
288 | * adds a new device and register it with virtio | 287 | * adds a new device and register it with virtio |
289 | * appropriate drivers are loaded by the device model | 288 | * appropriate drivers are loaded by the device model |
290 | */ | 289 | */ |
291 | static void add_kvm_device(struct kvm_device_desc *d, unsigned int offset) | 290 | static void add_kvm_device(struct kvm_device_desc *d, unsigned int offset) |
292 | { | 291 | { |
293 | struct kvm_device *kdev; | 292 | struct kvm_device *kdev; |
294 | 293 | ||
295 | kdev = kzalloc(sizeof(*kdev), GFP_KERNEL); | 294 | kdev = kzalloc(sizeof(*kdev), GFP_KERNEL); |
296 | if (!kdev) { | 295 | if (!kdev) { |
297 | printk(KERN_EMERG "Cannot allocate kvm dev %u type %u\n", | 296 | printk(KERN_EMERG "Cannot allocate kvm dev %u type %u\n", |
298 | offset, d->type); | 297 | offset, d->type); |
299 | return; | 298 | return; |
300 | } | 299 | } |
301 | 300 | ||
302 | kdev->vdev.dev.parent = kvm_root; | 301 | kdev->vdev.dev.parent = kvm_root; |
303 | kdev->vdev.id.device = d->type; | 302 | kdev->vdev.id.device = d->type; |
304 | kdev->vdev.config = &kvm_vq_configspace_ops; | 303 | kdev->vdev.config = &kvm_vq_configspace_ops; |
305 | kdev->desc = d; | 304 | kdev->desc = d; |
306 | 305 | ||
307 | if (register_virtio_device(&kdev->vdev) != 0) { | 306 | if (register_virtio_device(&kdev->vdev) != 0) { |
308 | printk(KERN_ERR "Failed to register kvm device %u type %u\n", | 307 | printk(KERN_ERR "Failed to register kvm device %u type %u\n", |
309 | offset, d->type); | 308 | offset, d->type); |
310 | kfree(kdev); | 309 | kfree(kdev); |
311 | } | 310 | } |
312 | } | 311 | } |
313 | 312 | ||
314 | /* | 313 | /* |
315 | * scan_devices() simply iterates through the device page. | 314 | * scan_devices() simply iterates through the device page. |
316 | * The type 0 is reserved to mean "end of devices". | 315 | * The type 0 is reserved to mean "end of devices". |
317 | */ | 316 | */ |
318 | static void scan_devices(void) | 317 | static void scan_devices(void) |
319 | { | 318 | { |
320 | unsigned int i; | 319 | unsigned int i; |
321 | struct kvm_device_desc *d; | 320 | struct kvm_device_desc *d; |
322 | 321 | ||
323 | for (i = 0; i < PAGE_SIZE; i += desc_size(d)) { | 322 | for (i = 0; i < PAGE_SIZE; i += desc_size(d)) { |
324 | d = kvm_devices + i; | 323 | d = kvm_devices + i; |
325 | 324 | ||
326 | if (d->type == 0) | 325 | if (d->type == 0) |
327 | break; | 326 | break; |
328 | 327 | ||
329 | add_kvm_device(d, i); | 328 | add_kvm_device(d, i); |
330 | } | 329 | } |
331 | } | 330 | } |
332 | 331 | ||
333 | /* | 332 | /* |
334 | * match for a kvm device with a specific desc pointer | 333 | * match for a kvm device with a specific desc pointer |
335 | */ | 334 | */ |
336 | static int match_desc(struct device *dev, void *data) | 335 | static int match_desc(struct device *dev, void *data) |
337 | { | 336 | { |
338 | if ((ulong)to_kvmdev(dev_to_virtio(dev))->desc == (ulong)data) | 337 | if ((ulong)to_kvmdev(dev_to_virtio(dev))->desc == (ulong)data) |
339 | return 1; | 338 | return 1; |
340 | 339 | ||
341 | return 0; | 340 | return 0; |
342 | } | 341 | } |
343 | 342 | ||
344 | /* | 343 | /* |
345 | * hotplug_device tries to find changes in the device page. | 344 | * hotplug_device tries to find changes in the device page. |
346 | */ | 345 | */ |
347 | static void hotplug_devices(struct work_struct *dummy) | 346 | static void hotplug_devices(struct work_struct *dummy) |
348 | { | 347 | { |
349 | unsigned int i; | 348 | unsigned int i; |
350 | struct kvm_device_desc *d; | 349 | struct kvm_device_desc *d; |
351 | struct device *dev; | 350 | struct device *dev; |
352 | 351 | ||
353 | for (i = 0; i < PAGE_SIZE; i += desc_size(d)) { | 352 | for (i = 0; i < PAGE_SIZE; i += desc_size(d)) { |
354 | d = kvm_devices + i; | 353 | d = kvm_devices + i; |
355 | 354 | ||
356 | /* end of list */ | 355 | /* end of list */ |
357 | if (d->type == 0) | 356 | if (d->type == 0) |
358 | break; | 357 | break; |
359 | 358 | ||
360 | /* device already exists */ | 359 | /* device already exists */ |
361 | dev = device_find_child(kvm_root, d, match_desc); | 360 | dev = device_find_child(kvm_root, d, match_desc); |
362 | if (dev) { | 361 | if (dev) { |
363 | /* XXX check for hotplug remove */ | 362 | /* XXX check for hotplug remove */ |
364 | put_device(dev); | 363 | put_device(dev); |
365 | continue; | 364 | continue; |
366 | } | 365 | } |
367 | 366 | ||
368 | /* new device */ | 367 | /* new device */ |
369 | printk(KERN_INFO "Adding new virtio device %p\n", d); | 368 | printk(KERN_INFO "Adding new virtio device %p\n", d); |
370 | add_kvm_device(d, i); | 369 | add_kvm_device(d, i); |
371 | } | 370 | } |
372 | } | 371 | } |
373 | 372 | ||
374 | /* | 373 | /* |
375 | * we emulate the request_irq behaviour on top of s390 extints | 374 | * we emulate the request_irq behaviour on top of s390 extints |
376 | */ | 375 | */ |
377 | static void kvm_extint_handler(unsigned int ext_int_code, | 376 | static void kvm_extint_handler(unsigned int ext_int_code, |
378 | unsigned int param32, unsigned long param64) | 377 | unsigned int param32, unsigned long param64) |
379 | { | 378 | { |
380 | struct virtqueue *vq; | 379 | struct virtqueue *vq; |
381 | u16 subcode; | 380 | u16 subcode; |
382 | u32 param; | 381 | u32 param; |
383 | 382 | ||
384 | subcode = ext_int_code >> 16; | 383 | subcode = ext_int_code >> 16; |
385 | if ((subcode & 0xff00) != VIRTIO_SUBCODE_64) | 384 | if ((subcode & 0xff00) != VIRTIO_SUBCODE_64) |
386 | return; | 385 | return; |
387 | kstat_cpu(smp_processor_id()).irqs[EXTINT_VRT]++; | 386 | kstat_cpu(smp_processor_id()).irqs[EXTINT_VRT]++; |
388 | 387 | ||
389 | /* The LSB might be overloaded, we have to mask it */ | 388 | /* The LSB might be overloaded, we have to mask it */ |
390 | vq = (struct virtqueue *)(param64 & ~1UL); | 389 | vq = (struct virtqueue *)(param64 & ~1UL); |
391 | 390 | ||
392 | /* We use ext_params to decide what this interrupt means */ | 391 | /* We use ext_params to decide what this interrupt means */ |
393 | param = param32 & VIRTIO_PARAM_MASK; | 392 | param = param32 & VIRTIO_PARAM_MASK; |
394 | 393 | ||
395 | switch (param) { | 394 | switch (param) { |
396 | case VIRTIO_PARAM_CONFIG_CHANGED: | 395 | case VIRTIO_PARAM_CONFIG_CHANGED: |
397 | { | 396 | { |
398 | struct virtio_driver *drv; | 397 | struct virtio_driver *drv; |
399 | drv = container_of(vq->vdev->dev.driver, | 398 | drv = container_of(vq->vdev->dev.driver, |
400 | struct virtio_driver, driver); | 399 | struct virtio_driver, driver); |
401 | if (drv->config_changed) | 400 | if (drv->config_changed) |
402 | drv->config_changed(vq->vdev); | 401 | drv->config_changed(vq->vdev); |
403 | 402 | ||
404 | break; | 403 | break; |
405 | } | 404 | } |
406 | case VIRTIO_PARAM_DEV_ADD: | 405 | case VIRTIO_PARAM_DEV_ADD: |
407 | schedule_work(&hotplug_work); | 406 | schedule_work(&hotplug_work); |
408 | break; | 407 | break; |
409 | case VIRTIO_PARAM_VRING_INTERRUPT: | 408 | case VIRTIO_PARAM_VRING_INTERRUPT: |
410 | default: | 409 | default: |
411 | vring_interrupt(0, vq); | 410 | vring_interrupt(0, vq); |
412 | break; | 411 | break; |
413 | } | 412 | } |
414 | } | 413 | } |
415 | 414 | ||
416 | /* | 415 | /* |
417 | * Init function for virtio | 416 | * Init function for virtio |
418 | * devices are in a single page above top of "normal" mem | 417 | * devices are in a single page above top of "normal" mem |
419 | */ | 418 | */ |
420 | static int __init kvm_devices_init(void) | 419 | static int __init kvm_devices_init(void) |
421 | { | 420 | { |
422 | int rc; | 421 | int rc; |
423 | 422 | ||
424 | if (!MACHINE_IS_KVM) | 423 | if (!MACHINE_IS_KVM) |
425 | return -ENODEV; | 424 | return -ENODEV; |
426 | 425 | ||
427 | kvm_root = root_device_register("kvm_s390"); | 426 | kvm_root = root_device_register("kvm_s390"); |
428 | if (IS_ERR(kvm_root)) { | 427 | if (IS_ERR(kvm_root)) { |
429 | rc = PTR_ERR(kvm_root); | 428 | rc = PTR_ERR(kvm_root); |
430 | printk(KERN_ERR "Could not register kvm_s390 root device"); | 429 | printk(KERN_ERR "Could not register kvm_s390 root device"); |
431 | return rc; | 430 | return rc; |
432 | } | 431 | } |
433 | 432 | ||
434 | rc = vmem_add_mapping(real_memory_size, PAGE_SIZE); | 433 | rc = vmem_add_mapping(real_memory_size, PAGE_SIZE); |
435 | if (rc) { | 434 | if (rc) { |
436 | root_device_unregister(kvm_root); | 435 | root_device_unregister(kvm_root); |
437 | return rc; | 436 | return rc; |
438 | } | 437 | } |
439 | 438 | ||
440 | kvm_devices = (void *) real_memory_size; | 439 | kvm_devices = (void *) real_memory_size; |
441 | 440 | ||
442 | INIT_WORK(&hotplug_work, hotplug_devices); | 441 | INIT_WORK(&hotplug_work, hotplug_devices); |
443 | 442 | ||
444 | service_subclass_irq_register(); | 443 | service_subclass_irq_register(); |
445 | register_external_interrupt(0x2603, kvm_extint_handler); | 444 | register_external_interrupt(0x2603, kvm_extint_handler); |
446 | 445 | ||
447 | scan_devices(); | 446 | scan_devices(); |
448 | return 0; | 447 | return 0; |
449 | } | 448 | } |
450 | 449 | ||
451 | /* code for early console output with virtio_console */ | 450 | /* code for early console output with virtio_console */ |
452 | static __init int early_put_chars(u32 vtermno, const char *buf, int count) | 451 | static __init int early_put_chars(u32 vtermno, const char *buf, int count) |
453 | { | 452 | { |
454 | char scratch[17]; | 453 | char scratch[17]; |
455 | unsigned int len = count; | 454 | unsigned int len = count; |
456 | 455 | ||
457 | if (len > sizeof(scratch) - 1) | 456 | if (len > sizeof(scratch) - 1) |
458 | len = sizeof(scratch) - 1; | 457 | len = sizeof(scratch) - 1; |
459 | scratch[len] = '\0'; | 458 | scratch[len] = '\0'; |
460 | memcpy(scratch, buf, len); | 459 | memcpy(scratch, buf, len); |
461 | kvm_hypercall1(KVM_S390_VIRTIO_NOTIFY, __pa(scratch)); | 460 | kvm_hypercall1(KVM_S390_VIRTIO_NOTIFY, __pa(scratch)); |
462 | return len; | 461 | return len; |
463 | } | 462 | } |
464 | 463 | ||
465 | static int __init s390_virtio_console_init(void) | 464 | static int __init s390_virtio_console_init(void) |
466 | { | 465 | { |
467 | if (!MACHINE_IS_KVM) | 466 | if (!MACHINE_IS_KVM) |
468 | return -ENODEV; | 467 | return -ENODEV; |
469 | return virtio_cons_early_init(early_put_chars); | 468 | return virtio_cons_early_init(early_put_chars); |
470 | } | 469 | } |
471 | console_initcall(s390_virtio_console_init); | 470 | console_initcall(s390_virtio_console_init); |
472 | 471 | ||
473 | 472 | ||
474 | /* | 473 | /* |
475 | * We do this after core stuff, but before the drivers. | 474 | * We do this after core stuff, but before the drivers. |
476 | */ | 475 | */ |
477 | postcore_initcall(kvm_devices_init); | 476 | postcore_initcall(kvm_devices_init); |
478 | 477 |
net/iucv/iucv.c
1 | /* | 1 | /* |
2 | * IUCV base infrastructure. | 2 | * IUCV base infrastructure. |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 2001, 2009 | 4 | * Copyright IBM Corp. 2001, 2009 |
5 | * | 5 | * |
6 | * Author(s): | 6 | * Author(s): |
7 | * Original source: | 7 | * Original source: |
8 | * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000 | 8 | * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000 |
9 | * Xenia Tkatschow (xenia@us.ibm.com) | 9 | * Xenia Tkatschow (xenia@us.ibm.com) |
10 | * 2Gb awareness and general cleanup: | 10 | * 2Gb awareness and general cleanup: |
11 | * Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com) | 11 | * Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com) |
12 | * Rewritten for af_iucv: | 12 | * Rewritten for af_iucv: |
13 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | 13 | * Martin Schwidefsky <schwidefsky@de.ibm.com> |
14 | * PM functions: | 14 | * PM functions: |
15 | * Ursula Braun (ursula.braun@de.ibm.com) | 15 | * Ursula Braun (ursula.braun@de.ibm.com) |
16 | * | 16 | * |
17 | * Documentation used: | 17 | * Documentation used: |
18 | * The original source | 18 | * The original source |
19 | * CP Programming Service, IBM document # SC24-5760 | 19 | * CP Programming Service, IBM document # SC24-5760 |
20 | * | 20 | * |
21 | * This program is free software; you can redistribute it and/or modify | 21 | * This program is free software; you can redistribute it and/or modify |
22 | * it under the terms of the GNU General Public License as published by | 22 | * it under the terms of the GNU General Public License as published by |
23 | * the Free Software Foundation; either version 2, or (at your option) | 23 | * the Free Software Foundation; either version 2, or (at your option) |
24 | * any later version. | 24 | * any later version. |
25 | * | 25 | * |
26 | * This program is distributed in the hope that it will be useful, | 26 | * This program is distributed in the hope that it will be useful, |
27 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 27 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
28 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 28 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
29 | * GNU General Public License for more details. | 29 | * GNU General Public License for more details. |
30 | * | 30 | * |
31 | * You should have received a copy of the GNU General Public License | 31 | * You should have received a copy of the GNU General Public License |
32 | * along with this program; if not, write to the Free Software | 32 | * along with this program; if not, write to the Free Software |
33 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 33 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
34 | */ | 34 | */ |
35 | 35 | ||
36 | #define KMSG_COMPONENT "iucv" | 36 | #define KMSG_COMPONENT "iucv" |
37 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | 37 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
38 | 38 | ||
39 | #include <linux/kernel_stat.h> | 39 | #include <linux/kernel_stat.h> |
40 | #include <linux/module.h> | 40 | #include <linux/module.h> |
41 | #include <linux/moduleparam.h> | 41 | #include <linux/moduleparam.h> |
42 | #include <linux/spinlock.h> | 42 | #include <linux/spinlock.h> |
43 | #include <linux/kernel.h> | 43 | #include <linux/kernel.h> |
44 | #include <linux/slab.h> | 44 | #include <linux/slab.h> |
45 | #include <linux/init.h> | 45 | #include <linux/init.h> |
46 | #include <linux/interrupt.h> | 46 | #include <linux/interrupt.h> |
47 | #include <linux/list.h> | 47 | #include <linux/list.h> |
48 | #include <linux/errno.h> | 48 | #include <linux/errno.h> |
49 | #include <linux/err.h> | 49 | #include <linux/err.h> |
50 | #include <linux/device.h> | 50 | #include <linux/device.h> |
51 | #include <linux/cpu.h> | 51 | #include <linux/cpu.h> |
52 | #include <linux/reboot.h> | 52 | #include <linux/reboot.h> |
53 | #include <net/iucv/iucv.h> | 53 | #include <net/iucv/iucv.h> |
54 | #include <asm/atomic.h> | 54 | #include <asm/atomic.h> |
55 | #include <asm/ebcdic.h> | 55 | #include <asm/ebcdic.h> |
56 | #include <asm/io.h> | 56 | #include <asm/io.h> |
57 | #include <asm/s390_ext.h> | 57 | #include <asm/irq.h> |
58 | #include <asm/smp.h> | 58 | #include <asm/smp.h> |
59 | 59 | ||
60 | /* | 60 | /* |
61 | * FLAGS: | 61 | * FLAGS: |
62 | * All flags are defined in the field IPFLAGS1 of each function | 62 | * All flags are defined in the field IPFLAGS1 of each function |
63 | * and can be found in CP Programming Services. | 63 | * and can be found in CP Programming Services. |
64 | * IPSRCCLS - Indicates you have specified a source class. | 64 | * IPSRCCLS - Indicates you have specified a source class. |
65 | * IPTRGCLS - Indicates you have specified a target class. | 65 | * IPTRGCLS - Indicates you have specified a target class. |
66 | * IPFGPID - Indicates you have specified a pathid. | 66 | * IPFGPID - Indicates you have specified a pathid. |
67 | * IPFGMID - Indicates you have specified a message ID. | 67 | * IPFGMID - Indicates you have specified a message ID. |
68 | * IPNORPY - Indicates a one-way message. No reply expected. | 68 | * IPNORPY - Indicates a one-way message. No reply expected. |
69 | * IPALL - Indicates that all paths are affected. | 69 | * IPALL - Indicates that all paths are affected. |
70 | */ | 70 | */ |
71 | #define IUCV_IPSRCCLS 0x01 | 71 | #define IUCV_IPSRCCLS 0x01 |
72 | #define IUCV_IPTRGCLS 0x01 | 72 | #define IUCV_IPTRGCLS 0x01 |
73 | #define IUCV_IPFGPID 0x02 | 73 | #define IUCV_IPFGPID 0x02 |
74 | #define IUCV_IPFGMID 0x04 | 74 | #define IUCV_IPFGMID 0x04 |
75 | #define IUCV_IPNORPY 0x10 | 75 | #define IUCV_IPNORPY 0x10 |
76 | #define IUCV_IPALL 0x80 | 76 | #define IUCV_IPALL 0x80 |
77 | 77 | ||
78 | static int iucv_bus_match(struct device *dev, struct device_driver *drv) | 78 | static int iucv_bus_match(struct device *dev, struct device_driver *drv) |
79 | { | 79 | { |
80 | return 0; | 80 | return 0; |
81 | } | 81 | } |
82 | 82 | ||
83 | enum iucv_pm_states { | 83 | enum iucv_pm_states { |
84 | IUCV_PM_INITIAL = 0, | 84 | IUCV_PM_INITIAL = 0, |
85 | IUCV_PM_FREEZING = 1, | 85 | IUCV_PM_FREEZING = 1, |
86 | IUCV_PM_THAWING = 2, | 86 | IUCV_PM_THAWING = 2, |
87 | IUCV_PM_RESTORING = 3, | 87 | IUCV_PM_RESTORING = 3, |
88 | }; | 88 | }; |
89 | static enum iucv_pm_states iucv_pm_state; | 89 | static enum iucv_pm_states iucv_pm_state; |
90 | 90 | ||
91 | static int iucv_pm_prepare(struct device *); | 91 | static int iucv_pm_prepare(struct device *); |
92 | static void iucv_pm_complete(struct device *); | 92 | static void iucv_pm_complete(struct device *); |
93 | static int iucv_pm_freeze(struct device *); | 93 | static int iucv_pm_freeze(struct device *); |
94 | static int iucv_pm_thaw(struct device *); | 94 | static int iucv_pm_thaw(struct device *); |
95 | static int iucv_pm_restore(struct device *); | 95 | static int iucv_pm_restore(struct device *); |
96 | 96 | ||
97 | static const struct dev_pm_ops iucv_pm_ops = { | 97 | static const struct dev_pm_ops iucv_pm_ops = { |
98 | .prepare = iucv_pm_prepare, | 98 | .prepare = iucv_pm_prepare, |
99 | .complete = iucv_pm_complete, | 99 | .complete = iucv_pm_complete, |
100 | .freeze = iucv_pm_freeze, | 100 | .freeze = iucv_pm_freeze, |
101 | .thaw = iucv_pm_thaw, | 101 | .thaw = iucv_pm_thaw, |
102 | .restore = iucv_pm_restore, | 102 | .restore = iucv_pm_restore, |
103 | }; | 103 | }; |
104 | 104 | ||
105 | struct bus_type iucv_bus = { | 105 | struct bus_type iucv_bus = { |
106 | .name = "iucv", | 106 | .name = "iucv", |
107 | .match = iucv_bus_match, | 107 | .match = iucv_bus_match, |
108 | .pm = &iucv_pm_ops, | 108 | .pm = &iucv_pm_ops, |
109 | }; | 109 | }; |
110 | EXPORT_SYMBOL(iucv_bus); | 110 | EXPORT_SYMBOL(iucv_bus); |
111 | 111 | ||
112 | struct device *iucv_root; | 112 | struct device *iucv_root; |
113 | EXPORT_SYMBOL(iucv_root); | 113 | EXPORT_SYMBOL(iucv_root); |
114 | 114 | ||
115 | static int iucv_available; | 115 | static int iucv_available; |
116 | 116 | ||
117 | /* General IUCV interrupt structure */ | 117 | /* General IUCV interrupt structure */ |
118 | struct iucv_irq_data { | 118 | struct iucv_irq_data { |
119 | u16 ippathid; | 119 | u16 ippathid; |
120 | u8 ipflags1; | 120 | u8 ipflags1; |
121 | u8 iptype; | 121 | u8 iptype; |
122 | u32 res2[8]; | 122 | u32 res2[8]; |
123 | }; | 123 | }; |
124 | 124 | ||
125 | struct iucv_irq_list { | 125 | struct iucv_irq_list { |
126 | struct list_head list; | 126 | struct list_head list; |
127 | struct iucv_irq_data data; | 127 | struct iucv_irq_data data; |
128 | }; | 128 | }; |
129 | 129 | ||
130 | static struct iucv_irq_data *iucv_irq_data[NR_CPUS]; | 130 | static struct iucv_irq_data *iucv_irq_data[NR_CPUS]; |
131 | static cpumask_t iucv_buffer_cpumask = { CPU_BITS_NONE }; | 131 | static cpumask_t iucv_buffer_cpumask = { CPU_BITS_NONE }; |
132 | static cpumask_t iucv_irq_cpumask = { CPU_BITS_NONE }; | 132 | static cpumask_t iucv_irq_cpumask = { CPU_BITS_NONE }; |
133 | 133 | ||
134 | /* | 134 | /* |
135 | * Queue of interrupt buffers lock for delivery via the tasklet | 135 | * Queue of interrupt buffers lock for delivery via the tasklet |
136 | * (fast but can't call smp_call_function). | 136 | * (fast but can't call smp_call_function). |
137 | */ | 137 | */ |
138 | static LIST_HEAD(iucv_task_queue); | 138 | static LIST_HEAD(iucv_task_queue); |
139 | 139 | ||
140 | /* | 140 | /* |
141 | * The tasklet for fast delivery of iucv interrupts. | 141 | * The tasklet for fast delivery of iucv interrupts. |
142 | */ | 142 | */ |
143 | static void iucv_tasklet_fn(unsigned long); | 143 | static void iucv_tasklet_fn(unsigned long); |
144 | static DECLARE_TASKLET(iucv_tasklet, iucv_tasklet_fn,0); | 144 | static DECLARE_TASKLET(iucv_tasklet, iucv_tasklet_fn,0); |
145 | 145 | ||
146 | /* | 146 | /* |
147 | * Queue of interrupt buffers for delivery via a work queue | 147 | * Queue of interrupt buffers for delivery via a work queue |
148 | * (slower but can call smp_call_function). | 148 | * (slower but can call smp_call_function). |
149 | */ | 149 | */ |
150 | static LIST_HEAD(iucv_work_queue); | 150 | static LIST_HEAD(iucv_work_queue); |
151 | 151 | ||
152 | /* | 152 | /* |
153 | * The work element to deliver path pending interrupts. | 153 | * The work element to deliver path pending interrupts. |
154 | */ | 154 | */ |
155 | static void iucv_work_fn(struct work_struct *work); | 155 | static void iucv_work_fn(struct work_struct *work); |
156 | static DECLARE_WORK(iucv_work, iucv_work_fn); | 156 | static DECLARE_WORK(iucv_work, iucv_work_fn); |
157 | 157 | ||
158 | /* | 158 | /* |
159 | * Spinlock protecting task and work queue. | 159 | * Spinlock protecting task and work queue. |
160 | */ | 160 | */ |
161 | static DEFINE_SPINLOCK(iucv_queue_lock); | 161 | static DEFINE_SPINLOCK(iucv_queue_lock); |
162 | 162 | ||
163 | enum iucv_command_codes { | 163 | enum iucv_command_codes { |
164 | IUCV_QUERY = 0, | 164 | IUCV_QUERY = 0, |
165 | IUCV_RETRIEVE_BUFFER = 2, | 165 | IUCV_RETRIEVE_BUFFER = 2, |
166 | IUCV_SEND = 4, | 166 | IUCV_SEND = 4, |
167 | IUCV_RECEIVE = 5, | 167 | IUCV_RECEIVE = 5, |
168 | IUCV_REPLY = 6, | 168 | IUCV_REPLY = 6, |
169 | IUCV_REJECT = 8, | 169 | IUCV_REJECT = 8, |
170 | IUCV_PURGE = 9, | 170 | IUCV_PURGE = 9, |
171 | IUCV_ACCEPT = 10, | 171 | IUCV_ACCEPT = 10, |
172 | IUCV_CONNECT = 11, | 172 | IUCV_CONNECT = 11, |
173 | IUCV_DECLARE_BUFFER = 12, | 173 | IUCV_DECLARE_BUFFER = 12, |
174 | IUCV_QUIESCE = 13, | 174 | IUCV_QUIESCE = 13, |
175 | IUCV_RESUME = 14, | 175 | IUCV_RESUME = 14, |
176 | IUCV_SEVER = 15, | 176 | IUCV_SEVER = 15, |
177 | IUCV_SETMASK = 16, | 177 | IUCV_SETMASK = 16, |
178 | IUCV_SETCONTROLMASK = 17, | 178 | IUCV_SETCONTROLMASK = 17, |
179 | }; | 179 | }; |
180 | 180 | ||
181 | /* | 181 | /* |
182 | * Error messages that are used with the iucv_sever function. They get | 182 | * Error messages that are used with the iucv_sever function. They get |
183 | * converted to EBCDIC. | 183 | * converted to EBCDIC. |
184 | */ | 184 | */ |
185 | static char iucv_error_no_listener[16] = "NO LISTENER"; | 185 | static char iucv_error_no_listener[16] = "NO LISTENER"; |
186 | static char iucv_error_no_memory[16] = "NO MEMORY"; | 186 | static char iucv_error_no_memory[16] = "NO MEMORY"; |
187 | static char iucv_error_pathid[16] = "INVALID PATHID"; | 187 | static char iucv_error_pathid[16] = "INVALID PATHID"; |
188 | 188 | ||
189 | /* | 189 | /* |
190 | * iucv_handler_list: List of registered handlers. | 190 | * iucv_handler_list: List of registered handlers. |
191 | */ | 191 | */ |
192 | static LIST_HEAD(iucv_handler_list); | 192 | static LIST_HEAD(iucv_handler_list); |
193 | 193 | ||
194 | /* | 194 | /* |
195 | * iucv_path_table: an array of iucv_path structures. | 195 | * iucv_path_table: an array of iucv_path structures. |
196 | */ | 196 | */ |
197 | static struct iucv_path **iucv_path_table; | 197 | static struct iucv_path **iucv_path_table; |
198 | static unsigned long iucv_max_pathid; | 198 | static unsigned long iucv_max_pathid; |
199 | 199 | ||
200 | /* | 200 | /* |
201 | * iucv_lock: spinlock protecting iucv_handler_list and iucv_pathid_table | 201 | * iucv_lock: spinlock protecting iucv_handler_list and iucv_pathid_table |
202 | */ | 202 | */ |
203 | static DEFINE_SPINLOCK(iucv_table_lock); | 203 | static DEFINE_SPINLOCK(iucv_table_lock); |
204 | 204 | ||
205 | /* | 205 | /* |
206 | * iucv_active_cpu: contains the number of the cpu executing the tasklet | 206 | * iucv_active_cpu: contains the number of the cpu executing the tasklet |
207 | * or the work handler. Needed for iucv_path_sever called from tasklet. | 207 | * or the work handler. Needed for iucv_path_sever called from tasklet. |
208 | */ | 208 | */ |
209 | static int iucv_active_cpu = -1; | 209 | static int iucv_active_cpu = -1; |
210 | 210 | ||
211 | /* | 211 | /* |
212 | * Mutex and wait queue for iucv_register/iucv_unregister. | 212 | * Mutex and wait queue for iucv_register/iucv_unregister. |
213 | */ | 213 | */ |
214 | static DEFINE_MUTEX(iucv_register_mutex); | 214 | static DEFINE_MUTEX(iucv_register_mutex); |
215 | 215 | ||
216 | /* | 216 | /* |
217 | * Counter for number of non-smp capable handlers. | 217 | * Counter for number of non-smp capable handlers. |
218 | */ | 218 | */ |
219 | static int iucv_nonsmp_handler; | 219 | static int iucv_nonsmp_handler; |
220 | 220 | ||
221 | /* | 221 | /* |
222 | * IUCV control data structure. Used by iucv_path_accept, iucv_path_connect, | 222 | * IUCV control data structure. Used by iucv_path_accept, iucv_path_connect, |
223 | * iucv_path_quiesce and iucv_path_sever. | 223 | * iucv_path_quiesce and iucv_path_sever. |
224 | */ | 224 | */ |
225 | struct iucv_cmd_control { | 225 | struct iucv_cmd_control { |
226 | u16 ippathid; | 226 | u16 ippathid; |
227 | u8 ipflags1; | 227 | u8 ipflags1; |
228 | u8 iprcode; | 228 | u8 iprcode; |
229 | u16 ipmsglim; | 229 | u16 ipmsglim; |
230 | u16 res1; | 230 | u16 res1; |
231 | u8 ipvmid[8]; | 231 | u8 ipvmid[8]; |
232 | u8 ipuser[16]; | 232 | u8 ipuser[16]; |
233 | u8 iptarget[8]; | 233 | u8 iptarget[8]; |
234 | } __attribute__ ((packed,aligned(8))); | 234 | } __attribute__ ((packed,aligned(8))); |
235 | 235 | ||
236 | /* | 236 | /* |
237 | * Data in parameter list iucv structure. Used by iucv_message_send, | 237 | * Data in parameter list iucv structure. Used by iucv_message_send, |
238 | * iucv_message_send2way and iucv_message_reply. | 238 | * iucv_message_send2way and iucv_message_reply. |
239 | */ | 239 | */ |
240 | struct iucv_cmd_dpl { | 240 | struct iucv_cmd_dpl { |
241 | u16 ippathid; | 241 | u16 ippathid; |
242 | u8 ipflags1; | 242 | u8 ipflags1; |
243 | u8 iprcode; | 243 | u8 iprcode; |
244 | u32 ipmsgid; | 244 | u32 ipmsgid; |
245 | u32 iptrgcls; | 245 | u32 iptrgcls; |
246 | u8 iprmmsg[8]; | 246 | u8 iprmmsg[8]; |
247 | u32 ipsrccls; | 247 | u32 ipsrccls; |
248 | u32 ipmsgtag; | 248 | u32 ipmsgtag; |
249 | u32 ipbfadr2; | 249 | u32 ipbfadr2; |
250 | u32 ipbfln2f; | 250 | u32 ipbfln2f; |
251 | u32 res; | 251 | u32 res; |
252 | } __attribute__ ((packed,aligned(8))); | 252 | } __attribute__ ((packed,aligned(8))); |
253 | 253 | ||
254 | /* | 254 | /* |
255 | * Data in buffer iucv structure. Used by iucv_message_receive, | 255 | * Data in buffer iucv structure. Used by iucv_message_receive, |
256 | * iucv_message_reject, iucv_message_send, iucv_message_send2way | 256 | * iucv_message_reject, iucv_message_send, iucv_message_send2way |
257 | * and iucv_declare_cpu. | 257 | * and iucv_declare_cpu. |
258 | */ | 258 | */ |
259 | struct iucv_cmd_db { | 259 | struct iucv_cmd_db { |
260 | u16 ippathid; | 260 | u16 ippathid; |
261 | u8 ipflags1; | 261 | u8 ipflags1; |
262 | u8 iprcode; | 262 | u8 iprcode; |
263 | u32 ipmsgid; | 263 | u32 ipmsgid; |
264 | u32 iptrgcls; | 264 | u32 iptrgcls; |
265 | u32 ipbfadr1; | 265 | u32 ipbfadr1; |
266 | u32 ipbfln1f; | 266 | u32 ipbfln1f; |
267 | u32 ipsrccls; | 267 | u32 ipsrccls; |
268 | u32 ipmsgtag; | 268 | u32 ipmsgtag; |
269 | u32 ipbfadr2; | 269 | u32 ipbfadr2; |
270 | u32 ipbfln2f; | 270 | u32 ipbfln2f; |
271 | u32 res; | 271 | u32 res; |
272 | } __attribute__ ((packed,aligned(8))); | 272 | } __attribute__ ((packed,aligned(8))); |
273 | 273 | ||
274 | /* | 274 | /* |
275 | * Purge message iucv structure. Used by iucv_message_purge. | 275 | * Purge message iucv structure. Used by iucv_message_purge. |
276 | */ | 276 | */ |
277 | struct iucv_cmd_purge { | 277 | struct iucv_cmd_purge { |
278 | u16 ippathid; | 278 | u16 ippathid; |
279 | u8 ipflags1; | 279 | u8 ipflags1; |
280 | u8 iprcode; | 280 | u8 iprcode; |
281 | u32 ipmsgid; | 281 | u32 ipmsgid; |
282 | u8 ipaudit[3]; | 282 | u8 ipaudit[3]; |
283 | u8 res1[5]; | 283 | u8 res1[5]; |
284 | u32 res2; | 284 | u32 res2; |
285 | u32 ipsrccls; | 285 | u32 ipsrccls; |
286 | u32 ipmsgtag; | 286 | u32 ipmsgtag; |
287 | u32 res3[3]; | 287 | u32 res3[3]; |
288 | } __attribute__ ((packed,aligned(8))); | 288 | } __attribute__ ((packed,aligned(8))); |
289 | 289 | ||
290 | /* | 290 | /* |
291 | * Set mask iucv structure. Used by iucv_enable_cpu. | 291 | * Set mask iucv structure. Used by iucv_enable_cpu. |
292 | */ | 292 | */ |
293 | struct iucv_cmd_set_mask { | 293 | struct iucv_cmd_set_mask { |
294 | u8 ipmask; | 294 | u8 ipmask; |
295 | u8 res1[2]; | 295 | u8 res1[2]; |
296 | u8 iprcode; | 296 | u8 iprcode; |
297 | u32 res2[9]; | 297 | u32 res2[9]; |
298 | } __attribute__ ((packed,aligned(8))); | 298 | } __attribute__ ((packed,aligned(8))); |
299 | 299 | ||
300 | union iucv_param { | 300 | union iucv_param { |
301 | struct iucv_cmd_control ctrl; | 301 | struct iucv_cmd_control ctrl; |
302 | struct iucv_cmd_dpl dpl; | 302 | struct iucv_cmd_dpl dpl; |
303 | struct iucv_cmd_db db; | 303 | struct iucv_cmd_db db; |
304 | struct iucv_cmd_purge purge; | 304 | struct iucv_cmd_purge purge; |
305 | struct iucv_cmd_set_mask set_mask; | 305 | struct iucv_cmd_set_mask set_mask; |
306 | }; | 306 | }; |
307 | 307 | ||
308 | /* | 308 | /* |
309 | * Anchor for per-cpu IUCV command parameter block. | 309 | * Anchor for per-cpu IUCV command parameter block. |
310 | */ | 310 | */ |
311 | static union iucv_param *iucv_param[NR_CPUS]; | 311 | static union iucv_param *iucv_param[NR_CPUS]; |
312 | static union iucv_param *iucv_param_irq[NR_CPUS]; | 312 | static union iucv_param *iucv_param_irq[NR_CPUS]; |
313 | 313 | ||
314 | /** | 314 | /** |
315 | * iucv_call_b2f0 | 315 | * iucv_call_b2f0 |
316 | * @code: identifier of IUCV call to CP. | 316 | * @code: identifier of IUCV call to CP. |
317 | * @parm: pointer to a struct iucv_parm block | 317 | * @parm: pointer to a struct iucv_parm block |
318 | * | 318 | * |
319 | * Calls CP to execute IUCV commands. | 319 | * Calls CP to execute IUCV commands. |
320 | * | 320 | * |
321 | * Returns the result of the CP IUCV call. | 321 | * Returns the result of the CP IUCV call. |
322 | */ | 322 | */ |
323 | static inline int iucv_call_b2f0(int command, union iucv_param *parm) | 323 | static inline int iucv_call_b2f0(int command, union iucv_param *parm) |
324 | { | 324 | { |
325 | register unsigned long reg0 asm ("0"); | 325 | register unsigned long reg0 asm ("0"); |
326 | register unsigned long reg1 asm ("1"); | 326 | register unsigned long reg1 asm ("1"); |
327 | int ccode; | 327 | int ccode; |
328 | 328 | ||
329 | reg0 = command; | 329 | reg0 = command; |
330 | reg1 = virt_to_phys(parm); | 330 | reg1 = virt_to_phys(parm); |
331 | asm volatile( | 331 | asm volatile( |
332 | " .long 0xb2f01000\n" | 332 | " .long 0xb2f01000\n" |
333 | " ipm %0\n" | 333 | " ipm %0\n" |
334 | " srl %0,28\n" | 334 | " srl %0,28\n" |
335 | : "=d" (ccode), "=m" (*parm), "+d" (reg0), "+a" (reg1) | 335 | : "=d" (ccode), "=m" (*parm), "+d" (reg0), "+a" (reg1) |
336 | : "m" (*parm) : "cc"); | 336 | : "m" (*parm) : "cc"); |
337 | return (ccode == 1) ? parm->ctrl.iprcode : ccode; | 337 | return (ccode == 1) ? parm->ctrl.iprcode : ccode; |
338 | } | 338 | } |
339 | 339 | ||
340 | /** | 340 | /** |
341 | * iucv_query_maxconn | 341 | * iucv_query_maxconn |
342 | * | 342 | * |
343 | * Determines the maximum number of connections that may be established. | 343 | * Determines the maximum number of connections that may be established. |
344 | * | 344 | * |
345 | * Returns the maximum number of connections or -EPERM is IUCV is not | 345 | * Returns the maximum number of connections or -EPERM is IUCV is not |
346 | * available. | 346 | * available. |
347 | */ | 347 | */ |
348 | static int iucv_query_maxconn(void) | 348 | static int iucv_query_maxconn(void) |
349 | { | 349 | { |
350 | register unsigned long reg0 asm ("0"); | 350 | register unsigned long reg0 asm ("0"); |
351 | register unsigned long reg1 asm ("1"); | 351 | register unsigned long reg1 asm ("1"); |
352 | void *param; | 352 | void *param; |
353 | int ccode; | 353 | int ccode; |
354 | 354 | ||
355 | param = kzalloc(sizeof(union iucv_param), GFP_KERNEL|GFP_DMA); | 355 | param = kzalloc(sizeof(union iucv_param), GFP_KERNEL|GFP_DMA); |
356 | if (!param) | 356 | if (!param) |
357 | return -ENOMEM; | 357 | return -ENOMEM; |
358 | reg0 = IUCV_QUERY; | 358 | reg0 = IUCV_QUERY; |
359 | reg1 = (unsigned long) param; | 359 | reg1 = (unsigned long) param; |
360 | asm volatile ( | 360 | asm volatile ( |
361 | " .long 0xb2f01000\n" | 361 | " .long 0xb2f01000\n" |
362 | " ipm %0\n" | 362 | " ipm %0\n" |
363 | " srl %0,28\n" | 363 | " srl %0,28\n" |
364 | : "=d" (ccode), "+d" (reg0), "+d" (reg1) : : "cc"); | 364 | : "=d" (ccode), "+d" (reg0), "+d" (reg1) : : "cc"); |
365 | if (ccode == 0) | 365 | if (ccode == 0) |
366 | iucv_max_pathid = reg1; | 366 | iucv_max_pathid = reg1; |
367 | kfree(param); | 367 | kfree(param); |
368 | return ccode ? -EPERM : 0; | 368 | return ccode ? -EPERM : 0; |
369 | } | 369 | } |
370 | 370 | ||
371 | /** | 371 | /** |
372 | * iucv_allow_cpu | 372 | * iucv_allow_cpu |
373 | * @data: unused | 373 | * @data: unused |
374 | * | 374 | * |
375 | * Allow iucv interrupts on this cpu. | 375 | * Allow iucv interrupts on this cpu. |
376 | */ | 376 | */ |
377 | static void iucv_allow_cpu(void *data) | 377 | static void iucv_allow_cpu(void *data) |
378 | { | 378 | { |
379 | int cpu = smp_processor_id(); | 379 | int cpu = smp_processor_id(); |
380 | union iucv_param *parm; | 380 | union iucv_param *parm; |
381 | 381 | ||
382 | /* | 382 | /* |
383 | * Enable all iucv interrupts. | 383 | * Enable all iucv interrupts. |
384 | * ipmask contains bits for the different interrupts | 384 | * ipmask contains bits for the different interrupts |
385 | * 0x80 - Flag to allow nonpriority message pending interrupts | 385 | * 0x80 - Flag to allow nonpriority message pending interrupts |
386 | * 0x40 - Flag to allow priority message pending interrupts | 386 | * 0x40 - Flag to allow priority message pending interrupts |
387 | * 0x20 - Flag to allow nonpriority message completion interrupts | 387 | * 0x20 - Flag to allow nonpriority message completion interrupts |
388 | * 0x10 - Flag to allow priority message completion interrupts | 388 | * 0x10 - Flag to allow priority message completion interrupts |
389 | * 0x08 - Flag to allow IUCV control interrupts | 389 | * 0x08 - Flag to allow IUCV control interrupts |
390 | */ | 390 | */ |
391 | parm = iucv_param_irq[cpu]; | 391 | parm = iucv_param_irq[cpu]; |
392 | memset(parm, 0, sizeof(union iucv_param)); | 392 | memset(parm, 0, sizeof(union iucv_param)); |
393 | parm->set_mask.ipmask = 0xf8; | 393 | parm->set_mask.ipmask = 0xf8; |
394 | iucv_call_b2f0(IUCV_SETMASK, parm); | 394 | iucv_call_b2f0(IUCV_SETMASK, parm); |
395 | 395 | ||
396 | /* | 396 | /* |
397 | * Enable all iucv control interrupts. | 397 | * Enable all iucv control interrupts. |
398 | * ipmask contains bits for the different interrupts | 398 | * ipmask contains bits for the different interrupts |
399 | * 0x80 - Flag to allow pending connections interrupts | 399 | * 0x80 - Flag to allow pending connections interrupts |
400 | * 0x40 - Flag to allow connection complete interrupts | 400 | * 0x40 - Flag to allow connection complete interrupts |
401 | * 0x20 - Flag to allow connection severed interrupts | 401 | * 0x20 - Flag to allow connection severed interrupts |
402 | * 0x10 - Flag to allow connection quiesced interrupts | 402 | * 0x10 - Flag to allow connection quiesced interrupts |
403 | * 0x08 - Flag to allow connection resumed interrupts | 403 | * 0x08 - Flag to allow connection resumed interrupts |
404 | */ | 404 | */ |
405 | memset(parm, 0, sizeof(union iucv_param)); | 405 | memset(parm, 0, sizeof(union iucv_param)); |
406 | parm->set_mask.ipmask = 0xf8; | 406 | parm->set_mask.ipmask = 0xf8; |
407 | iucv_call_b2f0(IUCV_SETCONTROLMASK, parm); | 407 | iucv_call_b2f0(IUCV_SETCONTROLMASK, parm); |
408 | /* Set indication that iucv interrupts are allowed for this cpu. */ | 408 | /* Set indication that iucv interrupts are allowed for this cpu. */ |
409 | cpumask_set_cpu(cpu, &iucv_irq_cpumask); | 409 | cpumask_set_cpu(cpu, &iucv_irq_cpumask); |
410 | } | 410 | } |
411 | 411 | ||
412 | /** | 412 | /** |
413 | * iucv_block_cpu | 413 | * iucv_block_cpu |
414 | * @data: unused | 414 | * @data: unused |
415 | * | 415 | * |
416 | * Block iucv interrupts on this cpu. | 416 | * Block iucv interrupts on this cpu. |
417 | */ | 417 | */ |
418 | static void iucv_block_cpu(void *data) | 418 | static void iucv_block_cpu(void *data) |
419 | { | 419 | { |
420 | int cpu = smp_processor_id(); | 420 | int cpu = smp_processor_id(); |
421 | union iucv_param *parm; | 421 | union iucv_param *parm; |
422 | 422 | ||
423 | /* Disable all iucv interrupts. */ | 423 | /* Disable all iucv interrupts. */ |
424 | parm = iucv_param_irq[cpu]; | 424 | parm = iucv_param_irq[cpu]; |
425 | memset(parm, 0, sizeof(union iucv_param)); | 425 | memset(parm, 0, sizeof(union iucv_param)); |
426 | iucv_call_b2f0(IUCV_SETMASK, parm); | 426 | iucv_call_b2f0(IUCV_SETMASK, parm); |
427 | 427 | ||
428 | /* Clear indication that iucv interrupts are allowed for this cpu. */ | 428 | /* Clear indication that iucv interrupts are allowed for this cpu. */ |
429 | cpumask_clear_cpu(cpu, &iucv_irq_cpumask); | 429 | cpumask_clear_cpu(cpu, &iucv_irq_cpumask); |
430 | } | 430 | } |
431 | 431 | ||
432 | /** | 432 | /** |
433 | * iucv_block_cpu_almost | 433 | * iucv_block_cpu_almost |
434 | * @data: unused | 434 | * @data: unused |
435 | * | 435 | * |
436 | * Allow connection-severed interrupts only on this cpu. | 436 | * Allow connection-severed interrupts only on this cpu. |
437 | */ | 437 | */ |
438 | static void iucv_block_cpu_almost(void *data) | 438 | static void iucv_block_cpu_almost(void *data) |
439 | { | 439 | { |
440 | int cpu = smp_processor_id(); | 440 | int cpu = smp_processor_id(); |
441 | union iucv_param *parm; | 441 | union iucv_param *parm; |
442 | 442 | ||
443 | /* Allow iucv control interrupts only */ | 443 | /* Allow iucv control interrupts only */ |
444 | parm = iucv_param_irq[cpu]; | 444 | parm = iucv_param_irq[cpu]; |
445 | memset(parm, 0, sizeof(union iucv_param)); | 445 | memset(parm, 0, sizeof(union iucv_param)); |
446 | parm->set_mask.ipmask = 0x08; | 446 | parm->set_mask.ipmask = 0x08; |
447 | iucv_call_b2f0(IUCV_SETMASK, parm); | 447 | iucv_call_b2f0(IUCV_SETMASK, parm); |
448 | /* Allow iucv-severed interrupt only */ | 448 | /* Allow iucv-severed interrupt only */ |
449 | memset(parm, 0, sizeof(union iucv_param)); | 449 | memset(parm, 0, sizeof(union iucv_param)); |
450 | parm->set_mask.ipmask = 0x20; | 450 | parm->set_mask.ipmask = 0x20; |
451 | iucv_call_b2f0(IUCV_SETCONTROLMASK, parm); | 451 | iucv_call_b2f0(IUCV_SETCONTROLMASK, parm); |
452 | 452 | ||
453 | /* Clear indication that iucv interrupts are allowed for this cpu. */ | 453 | /* Clear indication that iucv interrupts are allowed for this cpu. */ |
454 | cpumask_clear_cpu(cpu, &iucv_irq_cpumask); | 454 | cpumask_clear_cpu(cpu, &iucv_irq_cpumask); |
455 | } | 455 | } |
456 | 456 | ||
457 | /** | 457 | /** |
458 | * iucv_declare_cpu | 458 | * iucv_declare_cpu |
459 | * @data: unused | 459 | * @data: unused |
460 | * | 460 | * |
461 | * Declare a interrupt buffer on this cpu. | 461 | * Declare a interrupt buffer on this cpu. |
462 | */ | 462 | */ |
463 | static void iucv_declare_cpu(void *data) | 463 | static void iucv_declare_cpu(void *data) |
464 | { | 464 | { |
465 | int cpu = smp_processor_id(); | 465 | int cpu = smp_processor_id(); |
466 | union iucv_param *parm; | 466 | union iucv_param *parm; |
467 | int rc; | 467 | int rc; |
468 | 468 | ||
469 | if (cpumask_test_cpu(cpu, &iucv_buffer_cpumask)) | 469 | if (cpumask_test_cpu(cpu, &iucv_buffer_cpumask)) |
470 | return; | 470 | return; |
471 | 471 | ||
472 | /* Declare interrupt buffer. */ | 472 | /* Declare interrupt buffer. */ |
473 | parm = iucv_param_irq[cpu]; | 473 | parm = iucv_param_irq[cpu]; |
474 | memset(parm, 0, sizeof(union iucv_param)); | 474 | memset(parm, 0, sizeof(union iucv_param)); |
475 | parm->db.ipbfadr1 = virt_to_phys(iucv_irq_data[cpu]); | 475 | parm->db.ipbfadr1 = virt_to_phys(iucv_irq_data[cpu]); |
476 | rc = iucv_call_b2f0(IUCV_DECLARE_BUFFER, parm); | 476 | rc = iucv_call_b2f0(IUCV_DECLARE_BUFFER, parm); |
477 | if (rc) { | 477 | if (rc) { |
478 | char *err = "Unknown"; | 478 | char *err = "Unknown"; |
479 | switch (rc) { | 479 | switch (rc) { |
480 | case 0x03: | 480 | case 0x03: |
481 | err = "Directory error"; | 481 | err = "Directory error"; |
482 | break; | 482 | break; |
483 | case 0x0a: | 483 | case 0x0a: |
484 | err = "Invalid length"; | 484 | err = "Invalid length"; |
485 | break; | 485 | break; |
486 | case 0x13: | 486 | case 0x13: |
487 | err = "Buffer already exists"; | 487 | err = "Buffer already exists"; |
488 | break; | 488 | break; |
489 | case 0x3e: | 489 | case 0x3e: |
490 | err = "Buffer overlap"; | 490 | err = "Buffer overlap"; |
491 | break; | 491 | break; |
492 | case 0x5c: | 492 | case 0x5c: |
493 | err = "Paging or storage error"; | 493 | err = "Paging or storage error"; |
494 | break; | 494 | break; |
495 | } | 495 | } |
496 | pr_warning("Defining an interrupt buffer on CPU %i" | 496 | pr_warning("Defining an interrupt buffer on CPU %i" |
497 | " failed with 0x%02x (%s)\n", cpu, rc, err); | 497 | " failed with 0x%02x (%s)\n", cpu, rc, err); |
498 | return; | 498 | return; |
499 | } | 499 | } |
500 | 500 | ||
501 | /* Set indication that an iucv buffer exists for this cpu. */ | 501 | /* Set indication that an iucv buffer exists for this cpu. */ |
502 | cpumask_set_cpu(cpu, &iucv_buffer_cpumask); | 502 | cpumask_set_cpu(cpu, &iucv_buffer_cpumask); |
503 | 503 | ||
504 | if (iucv_nonsmp_handler == 0 || cpumask_empty(&iucv_irq_cpumask)) | 504 | if (iucv_nonsmp_handler == 0 || cpumask_empty(&iucv_irq_cpumask)) |
505 | /* Enable iucv interrupts on this cpu. */ | 505 | /* Enable iucv interrupts on this cpu. */ |
506 | iucv_allow_cpu(NULL); | 506 | iucv_allow_cpu(NULL); |
507 | else | 507 | else |
508 | /* Disable iucv interrupts on this cpu. */ | 508 | /* Disable iucv interrupts on this cpu. */ |
509 | iucv_block_cpu(NULL); | 509 | iucv_block_cpu(NULL); |
510 | } | 510 | } |
511 | 511 | ||
512 | /** | 512 | /** |
513 | * iucv_retrieve_cpu | 513 | * iucv_retrieve_cpu |
514 | * @data: unused | 514 | * @data: unused |
515 | * | 515 | * |
516 | * Retrieve interrupt buffer on this cpu. | 516 | * Retrieve interrupt buffer on this cpu. |
517 | */ | 517 | */ |
518 | static void iucv_retrieve_cpu(void *data) | 518 | static void iucv_retrieve_cpu(void *data) |
519 | { | 519 | { |
520 | int cpu = smp_processor_id(); | 520 | int cpu = smp_processor_id(); |
521 | union iucv_param *parm; | 521 | union iucv_param *parm; |
522 | 522 | ||
523 | if (!cpumask_test_cpu(cpu, &iucv_buffer_cpumask)) | 523 | if (!cpumask_test_cpu(cpu, &iucv_buffer_cpumask)) |
524 | return; | 524 | return; |
525 | 525 | ||
526 | /* Block iucv interrupts. */ | 526 | /* Block iucv interrupts. */ |
527 | iucv_block_cpu(NULL); | 527 | iucv_block_cpu(NULL); |
528 | 528 | ||
529 | /* Retrieve interrupt buffer. */ | 529 | /* Retrieve interrupt buffer. */ |
530 | parm = iucv_param_irq[cpu]; | 530 | parm = iucv_param_irq[cpu]; |
531 | iucv_call_b2f0(IUCV_RETRIEVE_BUFFER, parm); | 531 | iucv_call_b2f0(IUCV_RETRIEVE_BUFFER, parm); |
532 | 532 | ||
533 | /* Clear indication that an iucv buffer exists for this cpu. */ | 533 | /* Clear indication that an iucv buffer exists for this cpu. */ |
534 | cpumask_clear_cpu(cpu, &iucv_buffer_cpumask); | 534 | cpumask_clear_cpu(cpu, &iucv_buffer_cpumask); |
535 | } | 535 | } |
536 | 536 | ||
537 | /** | 537 | /** |
538 | * iucv_setmask_smp | 538 | * iucv_setmask_smp |
539 | * | 539 | * |
540 | * Allow iucv interrupts on all cpus. | 540 | * Allow iucv interrupts on all cpus. |
541 | */ | 541 | */ |
542 | static void iucv_setmask_mp(void) | 542 | static void iucv_setmask_mp(void) |
543 | { | 543 | { |
544 | int cpu; | 544 | int cpu; |
545 | 545 | ||
546 | get_online_cpus(); | 546 | get_online_cpus(); |
547 | for_each_online_cpu(cpu) | 547 | for_each_online_cpu(cpu) |
548 | /* Enable all cpus with a declared buffer. */ | 548 | /* Enable all cpus with a declared buffer. */ |
549 | if (cpumask_test_cpu(cpu, &iucv_buffer_cpumask) && | 549 | if (cpumask_test_cpu(cpu, &iucv_buffer_cpumask) && |
550 | !cpumask_test_cpu(cpu, &iucv_irq_cpumask)) | 550 | !cpumask_test_cpu(cpu, &iucv_irq_cpumask)) |
551 | smp_call_function_single(cpu, iucv_allow_cpu, | 551 | smp_call_function_single(cpu, iucv_allow_cpu, |
552 | NULL, 1); | 552 | NULL, 1); |
553 | put_online_cpus(); | 553 | put_online_cpus(); |
554 | } | 554 | } |
555 | 555 | ||
556 | /** | 556 | /** |
557 | * iucv_setmask_up | 557 | * iucv_setmask_up |
558 | * | 558 | * |
559 | * Allow iucv interrupts on a single cpu. | 559 | * Allow iucv interrupts on a single cpu. |
560 | */ | 560 | */ |
561 | static void iucv_setmask_up(void) | 561 | static void iucv_setmask_up(void) |
562 | { | 562 | { |
563 | cpumask_t cpumask; | 563 | cpumask_t cpumask; |
564 | int cpu; | 564 | int cpu; |
565 | 565 | ||
566 | /* Disable all cpu but the first in cpu_irq_cpumask. */ | 566 | /* Disable all cpu but the first in cpu_irq_cpumask. */ |
567 | cpumask_copy(&cpumask, &iucv_irq_cpumask); | 567 | cpumask_copy(&cpumask, &iucv_irq_cpumask); |
568 | cpumask_clear_cpu(cpumask_first(&iucv_irq_cpumask), &cpumask); | 568 | cpumask_clear_cpu(cpumask_first(&iucv_irq_cpumask), &cpumask); |
569 | for_each_cpu(cpu, &cpumask) | 569 | for_each_cpu(cpu, &cpumask) |
570 | smp_call_function_single(cpu, iucv_block_cpu, NULL, 1); | 570 | smp_call_function_single(cpu, iucv_block_cpu, NULL, 1); |
571 | } | 571 | } |
572 | 572 | ||
573 | /** | 573 | /** |
574 | * iucv_enable | 574 | * iucv_enable |
575 | * | 575 | * |
576 | * This function makes iucv ready for use. It allocates the pathid | 576 | * This function makes iucv ready for use. It allocates the pathid |
577 | * table, declares an iucv interrupt buffer and enables the iucv | 577 | * table, declares an iucv interrupt buffer and enables the iucv |
578 | * interrupts. Called when the first user has registered an iucv | 578 | * interrupts. Called when the first user has registered an iucv |
579 | * handler. | 579 | * handler. |
580 | */ | 580 | */ |
581 | static int iucv_enable(void) | 581 | static int iucv_enable(void) |
582 | { | 582 | { |
583 | size_t alloc_size; | 583 | size_t alloc_size; |
584 | int cpu, rc; | 584 | int cpu, rc; |
585 | 585 | ||
586 | get_online_cpus(); | 586 | get_online_cpus(); |
587 | rc = -ENOMEM; | 587 | rc = -ENOMEM; |
588 | alloc_size = iucv_max_pathid * sizeof(struct iucv_path); | 588 | alloc_size = iucv_max_pathid * sizeof(struct iucv_path); |
589 | iucv_path_table = kzalloc(alloc_size, GFP_KERNEL); | 589 | iucv_path_table = kzalloc(alloc_size, GFP_KERNEL); |
590 | if (!iucv_path_table) | 590 | if (!iucv_path_table) |
591 | goto out; | 591 | goto out; |
592 | /* Declare per cpu buffers. */ | 592 | /* Declare per cpu buffers. */ |
593 | rc = -EIO; | 593 | rc = -EIO; |
594 | for_each_online_cpu(cpu) | 594 | for_each_online_cpu(cpu) |
595 | smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); | 595 | smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); |
596 | if (cpumask_empty(&iucv_buffer_cpumask)) | 596 | if (cpumask_empty(&iucv_buffer_cpumask)) |
597 | /* No cpu could declare an iucv buffer. */ | 597 | /* No cpu could declare an iucv buffer. */ |
598 | goto out; | 598 | goto out; |
599 | put_online_cpus(); | 599 | put_online_cpus(); |
600 | return 0; | 600 | return 0; |
601 | out: | 601 | out: |
602 | kfree(iucv_path_table); | 602 | kfree(iucv_path_table); |
603 | iucv_path_table = NULL; | 603 | iucv_path_table = NULL; |
604 | put_online_cpus(); | 604 | put_online_cpus(); |
605 | return rc; | 605 | return rc; |
606 | } | 606 | } |
607 | 607 | ||
608 | /** | 608 | /** |
609 | * iucv_disable | 609 | * iucv_disable |
610 | * | 610 | * |
611 | * This function shuts down iucv. It disables iucv interrupts, retrieves | 611 | * This function shuts down iucv. It disables iucv interrupts, retrieves |
612 | * the iucv interrupt buffer and frees the pathid table. Called after the | 612 | * the iucv interrupt buffer and frees the pathid table. Called after the |
613 | * last user unregister its iucv handler. | 613 | * last user unregister its iucv handler. |
614 | */ | 614 | */ |
615 | static void iucv_disable(void) | 615 | static void iucv_disable(void) |
616 | { | 616 | { |
617 | get_online_cpus(); | 617 | get_online_cpus(); |
618 | on_each_cpu(iucv_retrieve_cpu, NULL, 1); | 618 | on_each_cpu(iucv_retrieve_cpu, NULL, 1); |
619 | kfree(iucv_path_table); | 619 | kfree(iucv_path_table); |
620 | iucv_path_table = NULL; | 620 | iucv_path_table = NULL; |
621 | put_online_cpus(); | 621 | put_online_cpus(); |
622 | } | 622 | } |
623 | 623 | ||
624 | static int __cpuinit iucv_cpu_notify(struct notifier_block *self, | 624 | static int __cpuinit iucv_cpu_notify(struct notifier_block *self, |
625 | unsigned long action, void *hcpu) | 625 | unsigned long action, void *hcpu) |
626 | { | 626 | { |
627 | cpumask_t cpumask; | 627 | cpumask_t cpumask; |
628 | long cpu = (long) hcpu; | 628 | long cpu = (long) hcpu; |
629 | 629 | ||
630 | switch (action) { | 630 | switch (action) { |
631 | case CPU_UP_PREPARE: | 631 | case CPU_UP_PREPARE: |
632 | case CPU_UP_PREPARE_FROZEN: | 632 | case CPU_UP_PREPARE_FROZEN: |
633 | iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data), | 633 | iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data), |
634 | GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); | 634 | GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); |
635 | if (!iucv_irq_data[cpu]) | 635 | if (!iucv_irq_data[cpu]) |
636 | return notifier_from_errno(-ENOMEM); | 636 | return notifier_from_errno(-ENOMEM); |
637 | 637 | ||
638 | iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param), | 638 | iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param), |
639 | GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); | 639 | GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); |
640 | if (!iucv_param[cpu]) { | 640 | if (!iucv_param[cpu]) { |
641 | kfree(iucv_irq_data[cpu]); | 641 | kfree(iucv_irq_data[cpu]); |
642 | iucv_irq_data[cpu] = NULL; | 642 | iucv_irq_data[cpu] = NULL; |
643 | return notifier_from_errno(-ENOMEM); | 643 | return notifier_from_errno(-ENOMEM); |
644 | } | 644 | } |
645 | iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param), | 645 | iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param), |
646 | GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); | 646 | GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); |
647 | if (!iucv_param_irq[cpu]) { | 647 | if (!iucv_param_irq[cpu]) { |
648 | kfree(iucv_param[cpu]); | 648 | kfree(iucv_param[cpu]); |
649 | iucv_param[cpu] = NULL; | 649 | iucv_param[cpu] = NULL; |
650 | kfree(iucv_irq_data[cpu]); | 650 | kfree(iucv_irq_data[cpu]); |
651 | iucv_irq_data[cpu] = NULL; | 651 | iucv_irq_data[cpu] = NULL; |
652 | return notifier_from_errno(-ENOMEM); | 652 | return notifier_from_errno(-ENOMEM); |
653 | } | 653 | } |
654 | break; | 654 | break; |
655 | case CPU_UP_CANCELED: | 655 | case CPU_UP_CANCELED: |
656 | case CPU_UP_CANCELED_FROZEN: | 656 | case CPU_UP_CANCELED_FROZEN: |
657 | case CPU_DEAD: | 657 | case CPU_DEAD: |
658 | case CPU_DEAD_FROZEN: | 658 | case CPU_DEAD_FROZEN: |
659 | kfree(iucv_param_irq[cpu]); | 659 | kfree(iucv_param_irq[cpu]); |
660 | iucv_param_irq[cpu] = NULL; | 660 | iucv_param_irq[cpu] = NULL; |
661 | kfree(iucv_param[cpu]); | 661 | kfree(iucv_param[cpu]); |
662 | iucv_param[cpu] = NULL; | 662 | iucv_param[cpu] = NULL; |
663 | kfree(iucv_irq_data[cpu]); | 663 | kfree(iucv_irq_data[cpu]); |
664 | iucv_irq_data[cpu] = NULL; | 664 | iucv_irq_data[cpu] = NULL; |
665 | break; | 665 | break; |
666 | case CPU_ONLINE: | 666 | case CPU_ONLINE: |
667 | case CPU_ONLINE_FROZEN: | 667 | case CPU_ONLINE_FROZEN: |
668 | case CPU_DOWN_FAILED: | 668 | case CPU_DOWN_FAILED: |
669 | case CPU_DOWN_FAILED_FROZEN: | 669 | case CPU_DOWN_FAILED_FROZEN: |
670 | if (!iucv_path_table) | 670 | if (!iucv_path_table) |
671 | break; | 671 | break; |
672 | smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); | 672 | smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); |
673 | break; | 673 | break; |
674 | case CPU_DOWN_PREPARE: | 674 | case CPU_DOWN_PREPARE: |
675 | case CPU_DOWN_PREPARE_FROZEN: | 675 | case CPU_DOWN_PREPARE_FROZEN: |
676 | if (!iucv_path_table) | 676 | if (!iucv_path_table) |
677 | break; | 677 | break; |
678 | cpumask_copy(&cpumask, &iucv_buffer_cpumask); | 678 | cpumask_copy(&cpumask, &iucv_buffer_cpumask); |
679 | cpumask_clear_cpu(cpu, &cpumask); | 679 | cpumask_clear_cpu(cpu, &cpumask); |
680 | if (cpumask_empty(&cpumask)) | 680 | if (cpumask_empty(&cpumask)) |
681 | /* Can't offline last IUCV enabled cpu. */ | 681 | /* Can't offline last IUCV enabled cpu. */ |
682 | return notifier_from_errno(-EINVAL); | 682 | return notifier_from_errno(-EINVAL); |
683 | smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1); | 683 | smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1); |
684 | if (cpumask_empty(&iucv_irq_cpumask)) | 684 | if (cpumask_empty(&iucv_irq_cpumask)) |
685 | smp_call_function_single( | 685 | smp_call_function_single( |
686 | cpumask_first(&iucv_buffer_cpumask), | 686 | cpumask_first(&iucv_buffer_cpumask), |
687 | iucv_allow_cpu, NULL, 1); | 687 | iucv_allow_cpu, NULL, 1); |
688 | break; | 688 | break; |
689 | } | 689 | } |
690 | return NOTIFY_OK; | 690 | return NOTIFY_OK; |
691 | } | 691 | } |
692 | 692 | ||
693 | static struct notifier_block __refdata iucv_cpu_notifier = { | 693 | static struct notifier_block __refdata iucv_cpu_notifier = { |
694 | .notifier_call = iucv_cpu_notify, | 694 | .notifier_call = iucv_cpu_notify, |
695 | }; | 695 | }; |
696 | 696 | ||
697 | /** | 697 | /** |
698 | * iucv_sever_pathid | 698 | * iucv_sever_pathid |
699 | * @pathid: path identification number. | 699 | * @pathid: path identification number. |
700 | * @userdata: 16-bytes of user data. | 700 | * @userdata: 16-bytes of user data. |
701 | * | 701 | * |
702 | * Sever an iucv path to free up the pathid. Used internally. | 702 | * Sever an iucv path to free up the pathid. Used internally. |
703 | */ | 703 | */ |
704 | static int iucv_sever_pathid(u16 pathid, u8 userdata[16]) | 704 | static int iucv_sever_pathid(u16 pathid, u8 userdata[16]) |
705 | { | 705 | { |
706 | union iucv_param *parm; | 706 | union iucv_param *parm; |
707 | 707 | ||
708 | parm = iucv_param_irq[smp_processor_id()]; | 708 | parm = iucv_param_irq[smp_processor_id()]; |
709 | memset(parm, 0, sizeof(union iucv_param)); | 709 | memset(parm, 0, sizeof(union iucv_param)); |
710 | if (userdata) | 710 | if (userdata) |
711 | memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); | 711 | memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); |
712 | parm->ctrl.ippathid = pathid; | 712 | parm->ctrl.ippathid = pathid; |
713 | return iucv_call_b2f0(IUCV_SEVER, parm); | 713 | return iucv_call_b2f0(IUCV_SEVER, parm); |
714 | } | 714 | } |
715 | 715 | ||
716 | /** | 716 | /** |
717 | * __iucv_cleanup_queue | 717 | * __iucv_cleanup_queue |
718 | * @dummy: unused dummy argument | 718 | * @dummy: unused dummy argument |
719 | * | 719 | * |
720 | * Nop function called via smp_call_function to force work items from | 720 | * Nop function called via smp_call_function to force work items from |
721 | * pending external iucv interrupts to the work queue. | 721 | * pending external iucv interrupts to the work queue. |
722 | */ | 722 | */ |
723 | static void __iucv_cleanup_queue(void *dummy) | 723 | static void __iucv_cleanup_queue(void *dummy) |
724 | { | 724 | { |
725 | } | 725 | } |
726 | 726 | ||
727 | /** | 727 | /** |
728 | * iucv_cleanup_queue | 728 | * iucv_cleanup_queue |
729 | * | 729 | * |
730 | * Function called after a path has been severed to find all remaining | 730 | * Function called after a path has been severed to find all remaining |
731 | * work items for the now stale pathid. The caller needs to hold the | 731 | * work items for the now stale pathid. The caller needs to hold the |
732 | * iucv_table_lock. | 732 | * iucv_table_lock. |
733 | */ | 733 | */ |
734 | static void iucv_cleanup_queue(void) | 734 | static void iucv_cleanup_queue(void) |
735 | { | 735 | { |
736 | struct iucv_irq_list *p, *n; | 736 | struct iucv_irq_list *p, *n; |
737 | 737 | ||
738 | /* | 738 | /* |
739 | * When a path is severed, the pathid can be reused immediately | 739 | * When a path is severed, the pathid can be reused immediately |
740 | * on a iucv connect or a connection pending interrupt. Remove | 740 | * on a iucv connect or a connection pending interrupt. Remove |
741 | * all entries from the task queue that refer to a stale pathid | 741 | * all entries from the task queue that refer to a stale pathid |
742 | * (iucv_path_table[ix] == NULL). Only then do the iucv connect | 742 | * (iucv_path_table[ix] == NULL). Only then do the iucv connect |
743 | * or deliver the connection pending interrupt. To get all the | 743 | * or deliver the connection pending interrupt. To get all the |
744 | * pending interrupts force them to the work queue by calling | 744 | * pending interrupts force them to the work queue by calling |
745 | * an empty function on all cpus. | 745 | * an empty function on all cpus. |
746 | */ | 746 | */ |
747 | smp_call_function(__iucv_cleanup_queue, NULL, 1); | 747 | smp_call_function(__iucv_cleanup_queue, NULL, 1); |
748 | spin_lock_irq(&iucv_queue_lock); | 748 | spin_lock_irq(&iucv_queue_lock); |
749 | list_for_each_entry_safe(p, n, &iucv_task_queue, list) { | 749 | list_for_each_entry_safe(p, n, &iucv_task_queue, list) { |
750 | /* Remove stale work items from the task queue. */ | 750 | /* Remove stale work items from the task queue. */ |
751 | if (iucv_path_table[p->data.ippathid] == NULL) { | 751 | if (iucv_path_table[p->data.ippathid] == NULL) { |
752 | list_del(&p->list); | 752 | list_del(&p->list); |
753 | kfree(p); | 753 | kfree(p); |
754 | } | 754 | } |
755 | } | 755 | } |
756 | spin_unlock_irq(&iucv_queue_lock); | 756 | spin_unlock_irq(&iucv_queue_lock); |
757 | } | 757 | } |
758 | 758 | ||
759 | /** | 759 | /** |
760 | * iucv_register: | 760 | * iucv_register: |
761 | * @handler: address of iucv handler structure | 761 | * @handler: address of iucv handler structure |
762 | * @smp: != 0 indicates that the handler can deal with out of order messages | 762 | * @smp: != 0 indicates that the handler can deal with out of order messages |
763 | * | 763 | * |
764 | * Registers a driver with IUCV. | 764 | * Registers a driver with IUCV. |
765 | * | 765 | * |
766 | * Returns 0 on success, -ENOMEM if the memory allocation for the pathid | 766 | * Returns 0 on success, -ENOMEM if the memory allocation for the pathid |
767 | * table failed, or -EIO if IUCV_DECLARE_BUFFER failed on all cpus. | 767 | * table failed, or -EIO if IUCV_DECLARE_BUFFER failed on all cpus. |
768 | */ | 768 | */ |
769 | int iucv_register(struct iucv_handler *handler, int smp) | 769 | int iucv_register(struct iucv_handler *handler, int smp) |
770 | { | 770 | { |
771 | int rc; | 771 | int rc; |
772 | 772 | ||
773 | if (!iucv_available) | 773 | if (!iucv_available) |
774 | return -ENOSYS; | 774 | return -ENOSYS; |
775 | mutex_lock(&iucv_register_mutex); | 775 | mutex_lock(&iucv_register_mutex); |
776 | if (!smp) | 776 | if (!smp) |
777 | iucv_nonsmp_handler++; | 777 | iucv_nonsmp_handler++; |
778 | if (list_empty(&iucv_handler_list)) { | 778 | if (list_empty(&iucv_handler_list)) { |
779 | rc = iucv_enable(); | 779 | rc = iucv_enable(); |
780 | if (rc) | 780 | if (rc) |
781 | goto out_mutex; | 781 | goto out_mutex; |
782 | } else if (!smp && iucv_nonsmp_handler == 1) | 782 | } else if (!smp && iucv_nonsmp_handler == 1) |
783 | iucv_setmask_up(); | 783 | iucv_setmask_up(); |
784 | INIT_LIST_HEAD(&handler->paths); | 784 | INIT_LIST_HEAD(&handler->paths); |
785 | 785 | ||
786 | spin_lock_bh(&iucv_table_lock); | 786 | spin_lock_bh(&iucv_table_lock); |
787 | list_add_tail(&handler->list, &iucv_handler_list); | 787 | list_add_tail(&handler->list, &iucv_handler_list); |
788 | spin_unlock_bh(&iucv_table_lock); | 788 | spin_unlock_bh(&iucv_table_lock); |
789 | rc = 0; | 789 | rc = 0; |
790 | out_mutex: | 790 | out_mutex: |
791 | mutex_unlock(&iucv_register_mutex); | 791 | mutex_unlock(&iucv_register_mutex); |
792 | return rc; | 792 | return rc; |
793 | } | 793 | } |
794 | EXPORT_SYMBOL(iucv_register); | 794 | EXPORT_SYMBOL(iucv_register); |
795 | 795 | ||
796 | /** | 796 | /** |
797 | * iucv_unregister | 797 | * iucv_unregister |
798 | * @handler: address of iucv handler structure | 798 | * @handler: address of iucv handler structure |
799 | * @smp: != 0 indicates that the handler can deal with out of order messages | 799 | * @smp: != 0 indicates that the handler can deal with out of order messages |
800 | * | 800 | * |
801 | * Unregister driver from IUCV. | 801 | * Unregister driver from IUCV. |
802 | */ | 802 | */ |
803 | void iucv_unregister(struct iucv_handler *handler, int smp) | 803 | void iucv_unregister(struct iucv_handler *handler, int smp) |
804 | { | 804 | { |
805 | struct iucv_path *p, *n; | 805 | struct iucv_path *p, *n; |
806 | 806 | ||
807 | mutex_lock(&iucv_register_mutex); | 807 | mutex_lock(&iucv_register_mutex); |
808 | spin_lock_bh(&iucv_table_lock); | 808 | spin_lock_bh(&iucv_table_lock); |
809 | /* Remove handler from the iucv_handler_list. */ | 809 | /* Remove handler from the iucv_handler_list. */ |
810 | list_del_init(&handler->list); | 810 | list_del_init(&handler->list); |
811 | /* Sever all pathids still referring to the handler. */ | 811 | /* Sever all pathids still referring to the handler. */ |
812 | list_for_each_entry_safe(p, n, &handler->paths, list) { | 812 | list_for_each_entry_safe(p, n, &handler->paths, list) { |
813 | iucv_sever_pathid(p->pathid, NULL); | 813 | iucv_sever_pathid(p->pathid, NULL); |
814 | iucv_path_table[p->pathid] = NULL; | 814 | iucv_path_table[p->pathid] = NULL; |
815 | list_del(&p->list); | 815 | list_del(&p->list); |
816 | iucv_path_free(p); | 816 | iucv_path_free(p); |
817 | } | 817 | } |
818 | spin_unlock_bh(&iucv_table_lock); | 818 | spin_unlock_bh(&iucv_table_lock); |
819 | if (!smp) | 819 | if (!smp) |
820 | iucv_nonsmp_handler--; | 820 | iucv_nonsmp_handler--; |
821 | if (list_empty(&iucv_handler_list)) | 821 | if (list_empty(&iucv_handler_list)) |
822 | iucv_disable(); | 822 | iucv_disable(); |
823 | else if (!smp && iucv_nonsmp_handler == 0) | 823 | else if (!smp && iucv_nonsmp_handler == 0) |
824 | iucv_setmask_mp(); | 824 | iucv_setmask_mp(); |
825 | mutex_unlock(&iucv_register_mutex); | 825 | mutex_unlock(&iucv_register_mutex); |
826 | } | 826 | } |
827 | EXPORT_SYMBOL(iucv_unregister); | 827 | EXPORT_SYMBOL(iucv_unregister); |
828 | 828 | ||
829 | static int iucv_reboot_event(struct notifier_block *this, | 829 | static int iucv_reboot_event(struct notifier_block *this, |
830 | unsigned long event, void *ptr) | 830 | unsigned long event, void *ptr) |
831 | { | 831 | { |
832 | int i; | 832 | int i; |
833 | 833 | ||
834 | get_online_cpus(); | 834 | get_online_cpus(); |
835 | on_each_cpu(iucv_block_cpu, NULL, 1); | 835 | on_each_cpu(iucv_block_cpu, NULL, 1); |
836 | preempt_disable(); | 836 | preempt_disable(); |
837 | for (i = 0; i < iucv_max_pathid; i++) { | 837 | for (i = 0; i < iucv_max_pathid; i++) { |
838 | if (iucv_path_table[i]) | 838 | if (iucv_path_table[i]) |
839 | iucv_sever_pathid(i, NULL); | 839 | iucv_sever_pathid(i, NULL); |
840 | } | 840 | } |
841 | preempt_enable(); | 841 | preempt_enable(); |
842 | put_online_cpus(); | 842 | put_online_cpus(); |
843 | iucv_disable(); | 843 | iucv_disable(); |
844 | return NOTIFY_DONE; | 844 | return NOTIFY_DONE; |
845 | } | 845 | } |
846 | 846 | ||
847 | static struct notifier_block iucv_reboot_notifier = { | 847 | static struct notifier_block iucv_reboot_notifier = { |
848 | .notifier_call = iucv_reboot_event, | 848 | .notifier_call = iucv_reboot_event, |
849 | }; | 849 | }; |
850 | 850 | ||
851 | /** | 851 | /** |
852 | * iucv_path_accept | 852 | * iucv_path_accept |
853 | * @path: address of iucv path structure | 853 | * @path: address of iucv path structure |
854 | * @handler: address of iucv handler structure | 854 | * @handler: address of iucv handler structure |
855 | * @userdata: 16 bytes of data reflected to the communication partner | 855 | * @userdata: 16 bytes of data reflected to the communication partner |
856 | * @private: private data passed to interrupt handlers for this path | 856 | * @private: private data passed to interrupt handlers for this path |
857 | * | 857 | * |
858 | * This function is issued after the user received a connection pending | 858 | * This function is issued after the user received a connection pending |
859 | * external interrupt and now wishes to complete the IUCV communication path. | 859 | * external interrupt and now wishes to complete the IUCV communication path. |
860 | * | 860 | * |
861 | * Returns the result of the CP IUCV call. | 861 | * Returns the result of the CP IUCV call. |
862 | */ | 862 | */ |
863 | int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler, | 863 | int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler, |
864 | u8 userdata[16], void *private) | 864 | u8 userdata[16], void *private) |
865 | { | 865 | { |
866 | union iucv_param *parm; | 866 | union iucv_param *parm; |
867 | int rc; | 867 | int rc; |
868 | 868 | ||
869 | local_bh_disable(); | 869 | local_bh_disable(); |
870 | if (cpumask_empty(&iucv_buffer_cpumask)) { | 870 | if (cpumask_empty(&iucv_buffer_cpumask)) { |
871 | rc = -EIO; | 871 | rc = -EIO; |
872 | goto out; | 872 | goto out; |
873 | } | 873 | } |
874 | /* Prepare parameter block. */ | 874 | /* Prepare parameter block. */ |
875 | parm = iucv_param[smp_processor_id()]; | 875 | parm = iucv_param[smp_processor_id()]; |
876 | memset(parm, 0, sizeof(union iucv_param)); | 876 | memset(parm, 0, sizeof(union iucv_param)); |
877 | parm->ctrl.ippathid = path->pathid; | 877 | parm->ctrl.ippathid = path->pathid; |
878 | parm->ctrl.ipmsglim = path->msglim; | 878 | parm->ctrl.ipmsglim = path->msglim; |
879 | if (userdata) | 879 | if (userdata) |
880 | memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); | 880 | memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); |
881 | parm->ctrl.ipflags1 = path->flags; | 881 | parm->ctrl.ipflags1 = path->flags; |
882 | 882 | ||
883 | rc = iucv_call_b2f0(IUCV_ACCEPT, parm); | 883 | rc = iucv_call_b2f0(IUCV_ACCEPT, parm); |
884 | if (!rc) { | 884 | if (!rc) { |
885 | path->private = private; | 885 | path->private = private; |
886 | path->msglim = parm->ctrl.ipmsglim; | 886 | path->msglim = parm->ctrl.ipmsglim; |
887 | path->flags = parm->ctrl.ipflags1; | 887 | path->flags = parm->ctrl.ipflags1; |
888 | } | 888 | } |
889 | out: | 889 | out: |
890 | local_bh_enable(); | 890 | local_bh_enable(); |
891 | return rc; | 891 | return rc; |
892 | } | 892 | } |
893 | EXPORT_SYMBOL(iucv_path_accept); | 893 | EXPORT_SYMBOL(iucv_path_accept); |
894 | 894 | ||
895 | /** | 895 | /** |
896 | * iucv_path_connect | 896 | * iucv_path_connect |
897 | * @path: address of iucv path structure | 897 | * @path: address of iucv path structure |
898 | * @handler: address of iucv handler structure | 898 | * @handler: address of iucv handler structure |
899 | * @userid: 8-byte user identification | 899 | * @userid: 8-byte user identification |
900 | * @system: 8-byte target system identification | 900 | * @system: 8-byte target system identification |
901 | * @userdata: 16 bytes of data reflected to the communication partner | 901 | * @userdata: 16 bytes of data reflected to the communication partner |
902 | * @private: private data passed to interrupt handlers for this path | 902 | * @private: private data passed to interrupt handlers for this path |
903 | * | 903 | * |
904 | * This function establishes an IUCV path. Although the connect may complete | 904 | * This function establishes an IUCV path. Although the connect may complete |
905 | * successfully, you are not able to use the path until you receive an IUCV | 905 | * successfully, you are not able to use the path until you receive an IUCV |
906 | * Connection Complete external interrupt. | 906 | * Connection Complete external interrupt. |
907 | * | 907 | * |
908 | * Returns the result of the CP IUCV call. | 908 | * Returns the result of the CP IUCV call. |
909 | */ | 909 | */ |
910 | int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler, | 910 | int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler, |
911 | u8 userid[8], u8 system[8], u8 userdata[16], | 911 | u8 userid[8], u8 system[8], u8 userdata[16], |
912 | void *private) | 912 | void *private) |
913 | { | 913 | { |
914 | union iucv_param *parm; | 914 | union iucv_param *parm; |
915 | int rc; | 915 | int rc; |
916 | 916 | ||
917 | spin_lock_bh(&iucv_table_lock); | 917 | spin_lock_bh(&iucv_table_lock); |
918 | iucv_cleanup_queue(); | 918 | iucv_cleanup_queue(); |
919 | if (cpumask_empty(&iucv_buffer_cpumask)) { | 919 | if (cpumask_empty(&iucv_buffer_cpumask)) { |
920 | rc = -EIO; | 920 | rc = -EIO; |
921 | goto out; | 921 | goto out; |
922 | } | 922 | } |
923 | parm = iucv_param[smp_processor_id()]; | 923 | parm = iucv_param[smp_processor_id()]; |
924 | memset(parm, 0, sizeof(union iucv_param)); | 924 | memset(parm, 0, sizeof(union iucv_param)); |
925 | parm->ctrl.ipmsglim = path->msglim; | 925 | parm->ctrl.ipmsglim = path->msglim; |
926 | parm->ctrl.ipflags1 = path->flags; | 926 | parm->ctrl.ipflags1 = path->flags; |
927 | if (userid) { | 927 | if (userid) { |
928 | memcpy(parm->ctrl.ipvmid, userid, sizeof(parm->ctrl.ipvmid)); | 928 | memcpy(parm->ctrl.ipvmid, userid, sizeof(parm->ctrl.ipvmid)); |
929 | ASCEBC(parm->ctrl.ipvmid, sizeof(parm->ctrl.ipvmid)); | 929 | ASCEBC(parm->ctrl.ipvmid, sizeof(parm->ctrl.ipvmid)); |
930 | EBC_TOUPPER(parm->ctrl.ipvmid, sizeof(parm->ctrl.ipvmid)); | 930 | EBC_TOUPPER(parm->ctrl.ipvmid, sizeof(parm->ctrl.ipvmid)); |
931 | } | 931 | } |
932 | if (system) { | 932 | if (system) { |
933 | memcpy(parm->ctrl.iptarget, system, | 933 | memcpy(parm->ctrl.iptarget, system, |
934 | sizeof(parm->ctrl.iptarget)); | 934 | sizeof(parm->ctrl.iptarget)); |
935 | ASCEBC(parm->ctrl.iptarget, sizeof(parm->ctrl.iptarget)); | 935 | ASCEBC(parm->ctrl.iptarget, sizeof(parm->ctrl.iptarget)); |
936 | EBC_TOUPPER(parm->ctrl.iptarget, sizeof(parm->ctrl.iptarget)); | 936 | EBC_TOUPPER(parm->ctrl.iptarget, sizeof(parm->ctrl.iptarget)); |
937 | } | 937 | } |
938 | if (userdata) | 938 | if (userdata) |
939 | memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); | 939 | memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); |
940 | 940 | ||
941 | rc = iucv_call_b2f0(IUCV_CONNECT, parm); | 941 | rc = iucv_call_b2f0(IUCV_CONNECT, parm); |
942 | if (!rc) { | 942 | if (!rc) { |
943 | if (parm->ctrl.ippathid < iucv_max_pathid) { | 943 | if (parm->ctrl.ippathid < iucv_max_pathid) { |
944 | path->pathid = parm->ctrl.ippathid; | 944 | path->pathid = parm->ctrl.ippathid; |
945 | path->msglim = parm->ctrl.ipmsglim; | 945 | path->msglim = parm->ctrl.ipmsglim; |
946 | path->flags = parm->ctrl.ipflags1; | 946 | path->flags = parm->ctrl.ipflags1; |
947 | path->handler = handler; | 947 | path->handler = handler; |
948 | path->private = private; | 948 | path->private = private; |
949 | list_add_tail(&path->list, &handler->paths); | 949 | list_add_tail(&path->list, &handler->paths); |
950 | iucv_path_table[path->pathid] = path; | 950 | iucv_path_table[path->pathid] = path; |
951 | } else { | 951 | } else { |
952 | iucv_sever_pathid(parm->ctrl.ippathid, | 952 | iucv_sever_pathid(parm->ctrl.ippathid, |
953 | iucv_error_pathid); | 953 | iucv_error_pathid); |
954 | rc = -EIO; | 954 | rc = -EIO; |
955 | } | 955 | } |
956 | } | 956 | } |
957 | out: | 957 | out: |
958 | spin_unlock_bh(&iucv_table_lock); | 958 | spin_unlock_bh(&iucv_table_lock); |
959 | return rc; | 959 | return rc; |
960 | } | 960 | } |
961 | EXPORT_SYMBOL(iucv_path_connect); | 961 | EXPORT_SYMBOL(iucv_path_connect); |
962 | 962 | ||
963 | /** | 963 | /** |
964 | * iucv_path_quiesce: | 964 | * iucv_path_quiesce: |
965 | * @path: address of iucv path structure | 965 | * @path: address of iucv path structure |
966 | * @userdata: 16 bytes of data reflected to the communication partner | 966 | * @userdata: 16 bytes of data reflected to the communication partner |
967 | * | 967 | * |
968 | * This function temporarily suspends incoming messages on an IUCV path. | 968 | * This function temporarily suspends incoming messages on an IUCV path. |
969 | * You can later reactivate the path by invoking the iucv_resume function. | 969 | * You can later reactivate the path by invoking the iucv_resume function. |
970 | * | 970 | * |
971 | * Returns the result from the CP IUCV call. | 971 | * Returns the result from the CP IUCV call. |
972 | */ | 972 | */ |
973 | int iucv_path_quiesce(struct iucv_path *path, u8 userdata[16]) | 973 | int iucv_path_quiesce(struct iucv_path *path, u8 userdata[16]) |
974 | { | 974 | { |
975 | union iucv_param *parm; | 975 | union iucv_param *parm; |
976 | int rc; | 976 | int rc; |
977 | 977 | ||
978 | local_bh_disable(); | 978 | local_bh_disable(); |
979 | if (cpumask_empty(&iucv_buffer_cpumask)) { | 979 | if (cpumask_empty(&iucv_buffer_cpumask)) { |
980 | rc = -EIO; | 980 | rc = -EIO; |
981 | goto out; | 981 | goto out; |
982 | } | 982 | } |
983 | parm = iucv_param[smp_processor_id()]; | 983 | parm = iucv_param[smp_processor_id()]; |
984 | memset(parm, 0, sizeof(union iucv_param)); | 984 | memset(parm, 0, sizeof(union iucv_param)); |
985 | if (userdata) | 985 | if (userdata) |
986 | memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); | 986 | memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); |
987 | parm->ctrl.ippathid = path->pathid; | 987 | parm->ctrl.ippathid = path->pathid; |
988 | rc = iucv_call_b2f0(IUCV_QUIESCE, parm); | 988 | rc = iucv_call_b2f0(IUCV_QUIESCE, parm); |
989 | out: | 989 | out: |
990 | local_bh_enable(); | 990 | local_bh_enable(); |
991 | return rc; | 991 | return rc; |
992 | } | 992 | } |
993 | EXPORT_SYMBOL(iucv_path_quiesce); | 993 | EXPORT_SYMBOL(iucv_path_quiesce); |
994 | 994 | ||
995 | /** | 995 | /** |
996 | * iucv_path_resume: | 996 | * iucv_path_resume: |
997 | * @path: address of iucv path structure | 997 | * @path: address of iucv path structure |
998 | * @userdata: 16 bytes of data reflected to the communication partner | 998 | * @userdata: 16 bytes of data reflected to the communication partner |
999 | * | 999 | * |
1000 | * This function resumes incoming messages on an IUCV path that has | 1000 | * This function resumes incoming messages on an IUCV path that has |
1001 | * been stopped with iucv_path_quiesce. | 1001 | * been stopped with iucv_path_quiesce. |
1002 | * | 1002 | * |
1003 | * Returns the result from the CP IUCV call. | 1003 | * Returns the result from the CP IUCV call. |
1004 | */ | 1004 | */ |
1005 | int iucv_path_resume(struct iucv_path *path, u8 userdata[16]) | 1005 | int iucv_path_resume(struct iucv_path *path, u8 userdata[16]) |
1006 | { | 1006 | { |
1007 | union iucv_param *parm; | 1007 | union iucv_param *parm; |
1008 | int rc; | 1008 | int rc; |
1009 | 1009 | ||
1010 | local_bh_disable(); | 1010 | local_bh_disable(); |
1011 | if (cpumask_empty(&iucv_buffer_cpumask)) { | 1011 | if (cpumask_empty(&iucv_buffer_cpumask)) { |
1012 | rc = -EIO; | 1012 | rc = -EIO; |
1013 | goto out; | 1013 | goto out; |
1014 | } | 1014 | } |
1015 | parm = iucv_param[smp_processor_id()]; | 1015 | parm = iucv_param[smp_processor_id()]; |
1016 | memset(parm, 0, sizeof(union iucv_param)); | 1016 | memset(parm, 0, sizeof(union iucv_param)); |
1017 | if (userdata) | 1017 | if (userdata) |
1018 | memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); | 1018 | memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); |
1019 | parm->ctrl.ippathid = path->pathid; | 1019 | parm->ctrl.ippathid = path->pathid; |
1020 | rc = iucv_call_b2f0(IUCV_RESUME, parm); | 1020 | rc = iucv_call_b2f0(IUCV_RESUME, parm); |
1021 | out: | 1021 | out: |
1022 | local_bh_enable(); | 1022 | local_bh_enable(); |
1023 | return rc; | 1023 | return rc; |
1024 | } | 1024 | } |
1025 | 1025 | ||
1026 | /** | 1026 | /** |
1027 | * iucv_path_sever | 1027 | * iucv_path_sever |
1028 | * @path: address of iucv path structure | 1028 | * @path: address of iucv path structure |
1029 | * @userdata: 16 bytes of data reflected to the communication partner | 1029 | * @userdata: 16 bytes of data reflected to the communication partner |
1030 | * | 1030 | * |
1031 | * This function terminates an IUCV path. | 1031 | * This function terminates an IUCV path. |
1032 | * | 1032 | * |
1033 | * Returns the result from the CP IUCV call. | 1033 | * Returns the result from the CP IUCV call. |
1034 | */ | 1034 | */ |
1035 | int iucv_path_sever(struct iucv_path *path, u8 userdata[16]) | 1035 | int iucv_path_sever(struct iucv_path *path, u8 userdata[16]) |
1036 | { | 1036 | { |
1037 | int rc; | 1037 | int rc; |
1038 | 1038 | ||
1039 | preempt_disable(); | 1039 | preempt_disable(); |
1040 | if (cpumask_empty(&iucv_buffer_cpumask)) { | 1040 | if (cpumask_empty(&iucv_buffer_cpumask)) { |
1041 | rc = -EIO; | 1041 | rc = -EIO; |
1042 | goto out; | 1042 | goto out; |
1043 | } | 1043 | } |
1044 | if (iucv_active_cpu != smp_processor_id()) | 1044 | if (iucv_active_cpu != smp_processor_id()) |
1045 | spin_lock_bh(&iucv_table_lock); | 1045 | spin_lock_bh(&iucv_table_lock); |
1046 | rc = iucv_sever_pathid(path->pathid, userdata); | 1046 | rc = iucv_sever_pathid(path->pathid, userdata); |
1047 | iucv_path_table[path->pathid] = NULL; | 1047 | iucv_path_table[path->pathid] = NULL; |
1048 | list_del_init(&path->list); | 1048 | list_del_init(&path->list); |
1049 | if (iucv_active_cpu != smp_processor_id()) | 1049 | if (iucv_active_cpu != smp_processor_id()) |
1050 | spin_unlock_bh(&iucv_table_lock); | 1050 | spin_unlock_bh(&iucv_table_lock); |
1051 | out: | 1051 | out: |
1052 | preempt_enable(); | 1052 | preempt_enable(); |
1053 | return rc; | 1053 | return rc; |
1054 | } | 1054 | } |
1055 | EXPORT_SYMBOL(iucv_path_sever); | 1055 | EXPORT_SYMBOL(iucv_path_sever); |
1056 | 1056 | ||
1057 | /** | 1057 | /** |
1058 | * iucv_message_purge | 1058 | * iucv_message_purge |
1059 | * @path: address of iucv path structure | 1059 | * @path: address of iucv path structure |
1060 | * @msg: address of iucv msg structure | 1060 | * @msg: address of iucv msg structure |
1061 | * @srccls: source class of message | 1061 | * @srccls: source class of message |
1062 | * | 1062 | * |
1063 | * Cancels a message you have sent. | 1063 | * Cancels a message you have sent. |
1064 | * | 1064 | * |
1065 | * Returns the result from the CP IUCV call. | 1065 | * Returns the result from the CP IUCV call. |
1066 | */ | 1066 | */ |
1067 | int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg, | 1067 | int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg, |
1068 | u32 srccls) | 1068 | u32 srccls) |
1069 | { | 1069 | { |
1070 | union iucv_param *parm; | 1070 | union iucv_param *parm; |
1071 | int rc; | 1071 | int rc; |
1072 | 1072 | ||
1073 | local_bh_disable(); | 1073 | local_bh_disable(); |
1074 | if (cpumask_empty(&iucv_buffer_cpumask)) { | 1074 | if (cpumask_empty(&iucv_buffer_cpumask)) { |
1075 | rc = -EIO; | 1075 | rc = -EIO; |
1076 | goto out; | 1076 | goto out; |
1077 | } | 1077 | } |
1078 | parm = iucv_param[smp_processor_id()]; | 1078 | parm = iucv_param[smp_processor_id()]; |
1079 | memset(parm, 0, sizeof(union iucv_param)); | 1079 | memset(parm, 0, sizeof(union iucv_param)); |
1080 | parm->purge.ippathid = path->pathid; | 1080 | parm->purge.ippathid = path->pathid; |
1081 | parm->purge.ipmsgid = msg->id; | 1081 | parm->purge.ipmsgid = msg->id; |
1082 | parm->purge.ipsrccls = srccls; | 1082 | parm->purge.ipsrccls = srccls; |
1083 | parm->purge.ipflags1 = IUCV_IPSRCCLS | IUCV_IPFGMID | IUCV_IPFGPID; | 1083 | parm->purge.ipflags1 = IUCV_IPSRCCLS | IUCV_IPFGMID | IUCV_IPFGPID; |
1084 | rc = iucv_call_b2f0(IUCV_PURGE, parm); | 1084 | rc = iucv_call_b2f0(IUCV_PURGE, parm); |
1085 | if (!rc) { | 1085 | if (!rc) { |
1086 | msg->audit = (*(u32 *) &parm->purge.ipaudit) >> 8; | 1086 | msg->audit = (*(u32 *) &parm->purge.ipaudit) >> 8; |
1087 | msg->tag = parm->purge.ipmsgtag; | 1087 | msg->tag = parm->purge.ipmsgtag; |
1088 | } | 1088 | } |
1089 | out: | 1089 | out: |
1090 | local_bh_enable(); | 1090 | local_bh_enable(); |
1091 | return rc; | 1091 | return rc; |
1092 | } | 1092 | } |
1093 | EXPORT_SYMBOL(iucv_message_purge); | 1093 | EXPORT_SYMBOL(iucv_message_purge); |
1094 | 1094 | ||
1095 | /** | 1095 | /** |
1096 | * iucv_message_receive_iprmdata | 1096 | * iucv_message_receive_iprmdata |
1097 | * @path: address of iucv path structure | 1097 | * @path: address of iucv path structure |
1098 | * @msg: address of iucv msg structure | 1098 | * @msg: address of iucv msg structure |
1099 | * @flags: how the message is received (IUCV_IPBUFLST) | 1099 | * @flags: how the message is received (IUCV_IPBUFLST) |
1100 | * @buffer: address of data buffer or address of struct iucv_array | 1100 | * @buffer: address of data buffer or address of struct iucv_array |
1101 | * @size: length of data buffer | 1101 | * @size: length of data buffer |
1102 | * @residual: | 1102 | * @residual: |
1103 | * | 1103 | * |
1104 | * Internal function used by iucv_message_receive and __iucv_message_receive | 1104 | * Internal function used by iucv_message_receive and __iucv_message_receive |
1105 | * to receive RMDATA data stored in struct iucv_message. | 1105 | * to receive RMDATA data stored in struct iucv_message. |
1106 | */ | 1106 | */ |
1107 | static int iucv_message_receive_iprmdata(struct iucv_path *path, | 1107 | static int iucv_message_receive_iprmdata(struct iucv_path *path, |
1108 | struct iucv_message *msg, | 1108 | struct iucv_message *msg, |
1109 | u8 flags, void *buffer, | 1109 | u8 flags, void *buffer, |
1110 | size_t size, size_t *residual) | 1110 | size_t size, size_t *residual) |
1111 | { | 1111 | { |
1112 | struct iucv_array *array; | 1112 | struct iucv_array *array; |
1113 | u8 *rmmsg; | 1113 | u8 *rmmsg; |
1114 | size_t copy; | 1114 | size_t copy; |
1115 | 1115 | ||
1116 | /* | 1116 | /* |
1117 | * Message is 8 bytes long and has been stored to the | 1117 | * Message is 8 bytes long and has been stored to the |
1118 | * message descriptor itself. | 1118 | * message descriptor itself. |
1119 | */ | 1119 | */ |
1120 | if (residual) | 1120 | if (residual) |
1121 | *residual = abs(size - 8); | 1121 | *residual = abs(size - 8); |
1122 | rmmsg = msg->rmmsg; | 1122 | rmmsg = msg->rmmsg; |
1123 | if (flags & IUCV_IPBUFLST) { | 1123 | if (flags & IUCV_IPBUFLST) { |
1124 | /* Copy to struct iucv_array. */ | 1124 | /* Copy to struct iucv_array. */ |
1125 | size = (size < 8) ? size : 8; | 1125 | size = (size < 8) ? size : 8; |
1126 | for (array = buffer; size > 0; array++) { | 1126 | for (array = buffer; size > 0; array++) { |
1127 | copy = min_t(size_t, size, array->length); | 1127 | copy = min_t(size_t, size, array->length); |
1128 | memcpy((u8 *)(addr_t) array->address, | 1128 | memcpy((u8 *)(addr_t) array->address, |
1129 | rmmsg, copy); | 1129 | rmmsg, copy); |
1130 | rmmsg += copy; | 1130 | rmmsg += copy; |
1131 | size -= copy; | 1131 | size -= copy; |
1132 | } | 1132 | } |
1133 | } else { | 1133 | } else { |
1134 | /* Copy to direct buffer. */ | 1134 | /* Copy to direct buffer. */ |
1135 | memcpy(buffer, rmmsg, min_t(size_t, size, 8)); | 1135 | memcpy(buffer, rmmsg, min_t(size_t, size, 8)); |
1136 | } | 1136 | } |
1137 | return 0; | 1137 | return 0; |
1138 | } | 1138 | } |
1139 | 1139 | ||
1140 | /** | 1140 | /** |
1141 | * __iucv_message_receive | 1141 | * __iucv_message_receive |
1142 | * @path: address of iucv path structure | 1142 | * @path: address of iucv path structure |
1143 | * @msg: address of iucv msg structure | 1143 | * @msg: address of iucv msg structure |
1144 | * @flags: how the message is received (IUCV_IPBUFLST) | 1144 | * @flags: how the message is received (IUCV_IPBUFLST) |
1145 | * @buffer: address of data buffer or address of struct iucv_array | 1145 | * @buffer: address of data buffer or address of struct iucv_array |
1146 | * @size: length of data buffer | 1146 | * @size: length of data buffer |
1147 | * @residual: | 1147 | * @residual: |
1148 | * | 1148 | * |
1149 | * This function receives messages that are being sent to you over | 1149 | * This function receives messages that are being sent to you over |
1150 | * established paths. This function will deal with RMDATA messages | 1150 | * established paths. This function will deal with RMDATA messages |
1151 | * embedded in struct iucv_message as well. | 1151 | * embedded in struct iucv_message as well. |
1152 | * | 1152 | * |
1153 | * Locking: no locking | 1153 | * Locking: no locking |
1154 | * | 1154 | * |
1155 | * Returns the result from the CP IUCV call. | 1155 | * Returns the result from the CP IUCV call. |
1156 | */ | 1156 | */ |
1157 | int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, | 1157 | int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, |
1158 | u8 flags, void *buffer, size_t size, size_t *residual) | 1158 | u8 flags, void *buffer, size_t size, size_t *residual) |
1159 | { | 1159 | { |
1160 | union iucv_param *parm; | 1160 | union iucv_param *parm; |
1161 | int rc; | 1161 | int rc; |
1162 | 1162 | ||
1163 | if (msg->flags & IUCV_IPRMDATA) | 1163 | if (msg->flags & IUCV_IPRMDATA) |
1164 | return iucv_message_receive_iprmdata(path, msg, flags, | 1164 | return iucv_message_receive_iprmdata(path, msg, flags, |
1165 | buffer, size, residual); | 1165 | buffer, size, residual); |
1166 | if (cpumask_empty(&iucv_buffer_cpumask)) { | 1166 | if (cpumask_empty(&iucv_buffer_cpumask)) { |
1167 | rc = -EIO; | 1167 | rc = -EIO; |
1168 | goto out; | 1168 | goto out; |
1169 | } | 1169 | } |
1170 | parm = iucv_param[smp_processor_id()]; | 1170 | parm = iucv_param[smp_processor_id()]; |
1171 | memset(parm, 0, sizeof(union iucv_param)); | 1171 | memset(parm, 0, sizeof(union iucv_param)); |
1172 | parm->db.ipbfadr1 = (u32)(addr_t) buffer; | 1172 | parm->db.ipbfadr1 = (u32)(addr_t) buffer; |
1173 | parm->db.ipbfln1f = (u32) size; | 1173 | parm->db.ipbfln1f = (u32) size; |
1174 | parm->db.ipmsgid = msg->id; | 1174 | parm->db.ipmsgid = msg->id; |
1175 | parm->db.ippathid = path->pathid; | 1175 | parm->db.ippathid = path->pathid; |
1176 | parm->db.iptrgcls = msg->class; | 1176 | parm->db.iptrgcls = msg->class; |
1177 | parm->db.ipflags1 = (flags | IUCV_IPFGPID | | 1177 | parm->db.ipflags1 = (flags | IUCV_IPFGPID | |
1178 | IUCV_IPFGMID | IUCV_IPTRGCLS); | 1178 | IUCV_IPFGMID | IUCV_IPTRGCLS); |
1179 | rc = iucv_call_b2f0(IUCV_RECEIVE, parm); | 1179 | rc = iucv_call_b2f0(IUCV_RECEIVE, parm); |
1180 | if (!rc || rc == 5) { | 1180 | if (!rc || rc == 5) { |
1181 | msg->flags = parm->db.ipflags1; | 1181 | msg->flags = parm->db.ipflags1; |
1182 | if (residual) | 1182 | if (residual) |
1183 | *residual = parm->db.ipbfln1f; | 1183 | *residual = parm->db.ipbfln1f; |
1184 | } | 1184 | } |
1185 | out: | 1185 | out: |
1186 | return rc; | 1186 | return rc; |
1187 | } | 1187 | } |
1188 | EXPORT_SYMBOL(__iucv_message_receive); | 1188 | EXPORT_SYMBOL(__iucv_message_receive); |
1189 | 1189 | ||
1190 | /** | 1190 | /** |
1191 | * iucv_message_receive | 1191 | * iucv_message_receive |
1192 | * @path: address of iucv path structure | 1192 | * @path: address of iucv path structure |
1193 | * @msg: address of iucv msg structure | 1193 | * @msg: address of iucv msg structure |
1194 | * @flags: how the message is received (IUCV_IPBUFLST) | 1194 | * @flags: how the message is received (IUCV_IPBUFLST) |
1195 | * @buffer: address of data buffer or address of struct iucv_array | 1195 | * @buffer: address of data buffer or address of struct iucv_array |
1196 | * @size: length of data buffer | 1196 | * @size: length of data buffer |
1197 | * @residual: | 1197 | * @residual: |
1198 | * | 1198 | * |
1199 | * This function receives messages that are being sent to you over | 1199 | * This function receives messages that are being sent to you over |
1200 | * established paths. This function will deal with RMDATA messages | 1200 | * established paths. This function will deal with RMDATA messages |
1201 | * embedded in struct iucv_message as well. | 1201 | * embedded in struct iucv_message as well. |
1202 | * | 1202 | * |
1203 | * Locking: local_bh_enable/local_bh_disable | 1203 | * Locking: local_bh_enable/local_bh_disable |
1204 | * | 1204 | * |
1205 | * Returns the result from the CP IUCV call. | 1205 | * Returns the result from the CP IUCV call. |
1206 | */ | 1206 | */ |
1207 | int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, | 1207 | int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, |
1208 | u8 flags, void *buffer, size_t size, size_t *residual) | 1208 | u8 flags, void *buffer, size_t size, size_t *residual) |
1209 | { | 1209 | { |
1210 | int rc; | 1210 | int rc; |
1211 | 1211 | ||
1212 | if (msg->flags & IUCV_IPRMDATA) | 1212 | if (msg->flags & IUCV_IPRMDATA) |
1213 | return iucv_message_receive_iprmdata(path, msg, flags, | 1213 | return iucv_message_receive_iprmdata(path, msg, flags, |
1214 | buffer, size, residual); | 1214 | buffer, size, residual); |
1215 | local_bh_disable(); | 1215 | local_bh_disable(); |
1216 | rc = __iucv_message_receive(path, msg, flags, buffer, size, residual); | 1216 | rc = __iucv_message_receive(path, msg, flags, buffer, size, residual); |
1217 | local_bh_enable(); | 1217 | local_bh_enable(); |
1218 | return rc; | 1218 | return rc; |
1219 | } | 1219 | } |
1220 | EXPORT_SYMBOL(iucv_message_receive); | 1220 | EXPORT_SYMBOL(iucv_message_receive); |
1221 | 1221 | ||
1222 | /** | 1222 | /** |
1223 | * iucv_message_reject | 1223 | * iucv_message_reject |
1224 | * @path: address of iucv path structure | 1224 | * @path: address of iucv path structure |
1225 | * @msg: address of iucv msg structure | 1225 | * @msg: address of iucv msg structure |
1226 | * | 1226 | * |
1227 | * The reject function refuses a specified message. Between the time you | 1227 | * The reject function refuses a specified message. Between the time you |
1228 | * are notified of a message and the time that you complete the message, | 1228 | * are notified of a message and the time that you complete the message, |
1229 | * the message may be rejected. | 1229 | * the message may be rejected. |
1230 | * | 1230 | * |
1231 | * Returns the result from the CP IUCV call. | 1231 | * Returns the result from the CP IUCV call. |
1232 | */ | 1232 | */ |
1233 | int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg) | 1233 | int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg) |
1234 | { | 1234 | { |
1235 | union iucv_param *parm; | 1235 | union iucv_param *parm; |
1236 | int rc; | 1236 | int rc; |
1237 | 1237 | ||
1238 | local_bh_disable(); | 1238 | local_bh_disable(); |
1239 | if (cpumask_empty(&iucv_buffer_cpumask)) { | 1239 | if (cpumask_empty(&iucv_buffer_cpumask)) { |
1240 | rc = -EIO; | 1240 | rc = -EIO; |
1241 | goto out; | 1241 | goto out; |
1242 | } | 1242 | } |
1243 | parm = iucv_param[smp_processor_id()]; | 1243 | parm = iucv_param[smp_processor_id()]; |
1244 | memset(parm, 0, sizeof(union iucv_param)); | 1244 | memset(parm, 0, sizeof(union iucv_param)); |
1245 | parm->db.ippathid = path->pathid; | 1245 | parm->db.ippathid = path->pathid; |
1246 | parm->db.ipmsgid = msg->id; | 1246 | parm->db.ipmsgid = msg->id; |
1247 | parm->db.iptrgcls = msg->class; | 1247 | parm->db.iptrgcls = msg->class; |
1248 | parm->db.ipflags1 = (IUCV_IPTRGCLS | IUCV_IPFGMID | IUCV_IPFGPID); | 1248 | parm->db.ipflags1 = (IUCV_IPTRGCLS | IUCV_IPFGMID | IUCV_IPFGPID); |
1249 | rc = iucv_call_b2f0(IUCV_REJECT, parm); | 1249 | rc = iucv_call_b2f0(IUCV_REJECT, parm); |
1250 | out: | 1250 | out: |
1251 | local_bh_enable(); | 1251 | local_bh_enable(); |
1252 | return rc; | 1252 | return rc; |
1253 | } | 1253 | } |
1254 | EXPORT_SYMBOL(iucv_message_reject); | 1254 | EXPORT_SYMBOL(iucv_message_reject); |
1255 | 1255 | ||
1256 | /** | 1256 | /** |
1257 | * iucv_message_reply | 1257 | * iucv_message_reply |
1258 | * @path: address of iucv path structure | 1258 | * @path: address of iucv path structure |
1259 | * @msg: address of iucv msg structure | 1259 | * @msg: address of iucv msg structure |
1260 | * @flags: how the reply is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST) | 1260 | * @flags: how the reply is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST) |
1261 | * @reply: address of reply data buffer or address of struct iucv_array | 1261 | * @reply: address of reply data buffer or address of struct iucv_array |
1262 | * @size: length of reply data buffer | 1262 | * @size: length of reply data buffer |
1263 | * | 1263 | * |
1264 | * This function responds to the two-way messages that you receive. You | 1264 | * This function responds to the two-way messages that you receive. You |
1265 | * must identify completely the message to which you wish to reply. ie, | 1265 | * must identify completely the message to which you wish to reply. ie, |
1266 | * pathid, msgid, and trgcls. Prmmsg signifies the data is moved into | 1266 | * pathid, msgid, and trgcls. Prmmsg signifies the data is moved into |
1267 | * the parameter list. | 1267 | * the parameter list. |
1268 | * | 1268 | * |
1269 | * Returns the result from the CP IUCV call. | 1269 | * Returns the result from the CP IUCV call. |
1270 | */ | 1270 | */ |
1271 | int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg, | 1271 | int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg, |
1272 | u8 flags, void *reply, size_t size) | 1272 | u8 flags, void *reply, size_t size) |
1273 | { | 1273 | { |
1274 | union iucv_param *parm; | 1274 | union iucv_param *parm; |
1275 | int rc; | 1275 | int rc; |
1276 | 1276 | ||
1277 | local_bh_disable(); | 1277 | local_bh_disable(); |
1278 | if (cpumask_empty(&iucv_buffer_cpumask)) { | 1278 | if (cpumask_empty(&iucv_buffer_cpumask)) { |
1279 | rc = -EIO; | 1279 | rc = -EIO; |
1280 | goto out; | 1280 | goto out; |
1281 | } | 1281 | } |
1282 | parm = iucv_param[smp_processor_id()]; | 1282 | parm = iucv_param[smp_processor_id()]; |
1283 | memset(parm, 0, sizeof(union iucv_param)); | 1283 | memset(parm, 0, sizeof(union iucv_param)); |
1284 | if (flags & IUCV_IPRMDATA) { | 1284 | if (flags & IUCV_IPRMDATA) { |
1285 | parm->dpl.ippathid = path->pathid; | 1285 | parm->dpl.ippathid = path->pathid; |
1286 | parm->dpl.ipflags1 = flags; | 1286 | parm->dpl.ipflags1 = flags; |
1287 | parm->dpl.ipmsgid = msg->id; | 1287 | parm->dpl.ipmsgid = msg->id; |
1288 | parm->dpl.iptrgcls = msg->class; | 1288 | parm->dpl.iptrgcls = msg->class; |
1289 | memcpy(parm->dpl.iprmmsg, reply, min_t(size_t, size, 8)); | 1289 | memcpy(parm->dpl.iprmmsg, reply, min_t(size_t, size, 8)); |
1290 | } else { | 1290 | } else { |
1291 | parm->db.ipbfadr1 = (u32)(addr_t) reply; | 1291 | parm->db.ipbfadr1 = (u32)(addr_t) reply; |
1292 | parm->db.ipbfln1f = (u32) size; | 1292 | parm->db.ipbfln1f = (u32) size; |
1293 | parm->db.ippathid = path->pathid; | 1293 | parm->db.ippathid = path->pathid; |
1294 | parm->db.ipflags1 = flags; | 1294 | parm->db.ipflags1 = flags; |
1295 | parm->db.ipmsgid = msg->id; | 1295 | parm->db.ipmsgid = msg->id; |
1296 | parm->db.iptrgcls = msg->class; | 1296 | parm->db.iptrgcls = msg->class; |
1297 | } | 1297 | } |
1298 | rc = iucv_call_b2f0(IUCV_REPLY, parm); | 1298 | rc = iucv_call_b2f0(IUCV_REPLY, parm); |
1299 | out: | 1299 | out: |
1300 | local_bh_enable(); | 1300 | local_bh_enable(); |
1301 | return rc; | 1301 | return rc; |
1302 | } | 1302 | } |
1303 | EXPORT_SYMBOL(iucv_message_reply); | 1303 | EXPORT_SYMBOL(iucv_message_reply); |
1304 | 1304 | ||
1305 | /** | 1305 | /** |
1306 | * __iucv_message_send | 1306 | * __iucv_message_send |
1307 | * @path: address of iucv path structure | 1307 | * @path: address of iucv path structure |
1308 | * @msg: address of iucv msg structure | 1308 | * @msg: address of iucv msg structure |
1309 | * @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST) | 1309 | * @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST) |
1310 | * @srccls: source class of message | 1310 | * @srccls: source class of message |
1311 | * @buffer: address of send buffer or address of struct iucv_array | 1311 | * @buffer: address of send buffer or address of struct iucv_array |
1312 | * @size: length of send buffer | 1312 | * @size: length of send buffer |
1313 | * | 1313 | * |
1314 | * This function transmits data to another application. Data to be | 1314 | * This function transmits data to another application. Data to be |
1315 | * transmitted is in a buffer and this is a one-way message and the | 1315 | * transmitted is in a buffer and this is a one-way message and the |
1316 | * receiver will not reply to the message. | 1316 | * receiver will not reply to the message. |
1317 | * | 1317 | * |
1318 | * Locking: no locking | 1318 | * Locking: no locking |
1319 | * | 1319 | * |
1320 | * Returns the result from the CP IUCV call. | 1320 | * Returns the result from the CP IUCV call. |
1321 | */ | 1321 | */ |
1322 | int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg, | 1322 | int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg, |
1323 | u8 flags, u32 srccls, void *buffer, size_t size) | 1323 | u8 flags, u32 srccls, void *buffer, size_t size) |
1324 | { | 1324 | { |
1325 | union iucv_param *parm; | 1325 | union iucv_param *parm; |
1326 | int rc; | 1326 | int rc; |
1327 | 1327 | ||
1328 | if (cpumask_empty(&iucv_buffer_cpumask)) { | 1328 | if (cpumask_empty(&iucv_buffer_cpumask)) { |
1329 | rc = -EIO; | 1329 | rc = -EIO; |
1330 | goto out; | 1330 | goto out; |
1331 | } | 1331 | } |
1332 | parm = iucv_param[smp_processor_id()]; | 1332 | parm = iucv_param[smp_processor_id()]; |
1333 | memset(parm, 0, sizeof(union iucv_param)); | 1333 | memset(parm, 0, sizeof(union iucv_param)); |
1334 | if (flags & IUCV_IPRMDATA) { | 1334 | if (flags & IUCV_IPRMDATA) { |
1335 | /* Message of 8 bytes can be placed into the parameter list. */ | 1335 | /* Message of 8 bytes can be placed into the parameter list. */ |
1336 | parm->dpl.ippathid = path->pathid; | 1336 | parm->dpl.ippathid = path->pathid; |
1337 | parm->dpl.ipflags1 = flags | IUCV_IPNORPY; | 1337 | parm->dpl.ipflags1 = flags | IUCV_IPNORPY; |
1338 | parm->dpl.iptrgcls = msg->class; | 1338 | parm->dpl.iptrgcls = msg->class; |
1339 | parm->dpl.ipsrccls = srccls; | 1339 | parm->dpl.ipsrccls = srccls; |
1340 | parm->dpl.ipmsgtag = msg->tag; | 1340 | parm->dpl.ipmsgtag = msg->tag; |
1341 | memcpy(parm->dpl.iprmmsg, buffer, 8); | 1341 | memcpy(parm->dpl.iprmmsg, buffer, 8); |
1342 | } else { | 1342 | } else { |
1343 | parm->db.ipbfadr1 = (u32)(addr_t) buffer; | 1343 | parm->db.ipbfadr1 = (u32)(addr_t) buffer; |
1344 | parm->db.ipbfln1f = (u32) size; | 1344 | parm->db.ipbfln1f = (u32) size; |
1345 | parm->db.ippathid = path->pathid; | 1345 | parm->db.ippathid = path->pathid; |
1346 | parm->db.ipflags1 = flags | IUCV_IPNORPY; | 1346 | parm->db.ipflags1 = flags | IUCV_IPNORPY; |
1347 | parm->db.iptrgcls = msg->class; | 1347 | parm->db.iptrgcls = msg->class; |
1348 | parm->db.ipsrccls = srccls; | 1348 | parm->db.ipsrccls = srccls; |
1349 | parm->db.ipmsgtag = msg->tag; | 1349 | parm->db.ipmsgtag = msg->tag; |
1350 | } | 1350 | } |
1351 | rc = iucv_call_b2f0(IUCV_SEND, parm); | 1351 | rc = iucv_call_b2f0(IUCV_SEND, parm); |
1352 | if (!rc) | 1352 | if (!rc) |
1353 | msg->id = parm->db.ipmsgid; | 1353 | msg->id = parm->db.ipmsgid; |
1354 | out: | 1354 | out: |
1355 | return rc; | 1355 | return rc; |
1356 | } | 1356 | } |
1357 | EXPORT_SYMBOL(__iucv_message_send); | 1357 | EXPORT_SYMBOL(__iucv_message_send); |
1358 | 1358 | ||
1359 | /** | 1359 | /** |
1360 | * iucv_message_send | 1360 | * iucv_message_send |
1361 | * @path: address of iucv path structure | 1361 | * @path: address of iucv path structure |
1362 | * @msg: address of iucv msg structure | 1362 | * @msg: address of iucv msg structure |
1363 | * @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST) | 1363 | * @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST) |
1364 | * @srccls: source class of message | 1364 | * @srccls: source class of message |
1365 | * @buffer: address of send buffer or address of struct iucv_array | 1365 | * @buffer: address of send buffer or address of struct iucv_array |
1366 | * @size: length of send buffer | 1366 | * @size: length of send buffer |
1367 | * | 1367 | * |
1368 | * This function transmits data to another application. Data to be | 1368 | * This function transmits data to another application. Data to be |
1369 | * transmitted is in a buffer and this is a one-way message and the | 1369 | * transmitted is in a buffer and this is a one-way message and the |
1370 | * receiver will not reply to the message. | 1370 | * receiver will not reply to the message. |
1371 | * | 1371 | * |
1372 | * Locking: local_bh_enable/local_bh_disable | 1372 | * Locking: local_bh_enable/local_bh_disable |
1373 | * | 1373 | * |
1374 | * Returns the result from the CP IUCV call. | 1374 | * Returns the result from the CP IUCV call. |
1375 | */ | 1375 | */ |
1376 | int iucv_message_send(struct iucv_path *path, struct iucv_message *msg, | 1376 | int iucv_message_send(struct iucv_path *path, struct iucv_message *msg, |
1377 | u8 flags, u32 srccls, void *buffer, size_t size) | 1377 | u8 flags, u32 srccls, void *buffer, size_t size) |
1378 | { | 1378 | { |
1379 | int rc; | 1379 | int rc; |
1380 | 1380 | ||
1381 | local_bh_disable(); | 1381 | local_bh_disable(); |
1382 | rc = __iucv_message_send(path, msg, flags, srccls, buffer, size); | 1382 | rc = __iucv_message_send(path, msg, flags, srccls, buffer, size); |
1383 | local_bh_enable(); | 1383 | local_bh_enable(); |
1384 | return rc; | 1384 | return rc; |
1385 | } | 1385 | } |
1386 | EXPORT_SYMBOL(iucv_message_send); | 1386 | EXPORT_SYMBOL(iucv_message_send); |
1387 | 1387 | ||
1388 | /** | 1388 | /** |
1389 | * iucv_message_send2way | 1389 | * iucv_message_send2way |
1390 | * @path: address of iucv path structure | 1390 | * @path: address of iucv path structure |
1391 | * @msg: address of iucv msg structure | 1391 | * @msg: address of iucv msg structure |
1392 | * @flags: how the message is sent and the reply is received | 1392 | * @flags: how the message is sent and the reply is received |
1393 | * (IUCV_IPRMDATA, IUCV_IPBUFLST, IUCV_IPPRTY, IUCV_ANSLST) | 1393 | * (IUCV_IPRMDATA, IUCV_IPBUFLST, IUCV_IPPRTY, IUCV_ANSLST) |
1394 | * @srccls: source class of message | 1394 | * @srccls: source class of message |
1395 | * @buffer: address of send buffer or address of struct iucv_array | 1395 | * @buffer: address of send buffer or address of struct iucv_array |
1396 | * @size: length of send buffer | 1396 | * @size: length of send buffer |
1397 | * @ansbuf: address of answer buffer or address of struct iucv_array | 1397 | * @ansbuf: address of answer buffer or address of struct iucv_array |
1398 | * @asize: size of reply buffer | 1398 | * @asize: size of reply buffer |
1399 | * | 1399 | * |
1400 | * This function transmits data to another application. Data to be | 1400 | * This function transmits data to another application. Data to be |
1401 | * transmitted is in a buffer. The receiver of the send is expected to | 1401 | * transmitted is in a buffer. The receiver of the send is expected to |
1402 | * reply to the message and a buffer is provided into which IUCV moves | 1402 | * reply to the message and a buffer is provided into which IUCV moves |
1403 | * the reply to this message. | 1403 | * the reply to this message. |
1404 | * | 1404 | * |
1405 | * Returns the result from the CP IUCV call. | 1405 | * Returns the result from the CP IUCV call. |
1406 | */ | 1406 | */ |
1407 | int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg, | 1407 | int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg, |
1408 | u8 flags, u32 srccls, void *buffer, size_t size, | 1408 | u8 flags, u32 srccls, void *buffer, size_t size, |
1409 | void *answer, size_t asize, size_t *residual) | 1409 | void *answer, size_t asize, size_t *residual) |
1410 | { | 1410 | { |
1411 | union iucv_param *parm; | 1411 | union iucv_param *parm; |
1412 | int rc; | 1412 | int rc; |
1413 | 1413 | ||
1414 | local_bh_disable(); | 1414 | local_bh_disable(); |
1415 | if (cpumask_empty(&iucv_buffer_cpumask)) { | 1415 | if (cpumask_empty(&iucv_buffer_cpumask)) { |
1416 | rc = -EIO; | 1416 | rc = -EIO; |
1417 | goto out; | 1417 | goto out; |
1418 | } | 1418 | } |
1419 | parm = iucv_param[smp_processor_id()]; | 1419 | parm = iucv_param[smp_processor_id()]; |
1420 | memset(parm, 0, sizeof(union iucv_param)); | 1420 | memset(parm, 0, sizeof(union iucv_param)); |
1421 | if (flags & IUCV_IPRMDATA) { | 1421 | if (flags & IUCV_IPRMDATA) { |
1422 | parm->dpl.ippathid = path->pathid; | 1422 | parm->dpl.ippathid = path->pathid; |
1423 | parm->dpl.ipflags1 = path->flags; /* priority message */ | 1423 | parm->dpl.ipflags1 = path->flags; /* priority message */ |
1424 | parm->dpl.iptrgcls = msg->class; | 1424 | parm->dpl.iptrgcls = msg->class; |
1425 | parm->dpl.ipsrccls = srccls; | 1425 | parm->dpl.ipsrccls = srccls; |
1426 | parm->dpl.ipmsgtag = msg->tag; | 1426 | parm->dpl.ipmsgtag = msg->tag; |
1427 | parm->dpl.ipbfadr2 = (u32)(addr_t) answer; | 1427 | parm->dpl.ipbfadr2 = (u32)(addr_t) answer; |
1428 | parm->dpl.ipbfln2f = (u32) asize; | 1428 | parm->dpl.ipbfln2f = (u32) asize; |
1429 | memcpy(parm->dpl.iprmmsg, buffer, 8); | 1429 | memcpy(parm->dpl.iprmmsg, buffer, 8); |
1430 | } else { | 1430 | } else { |
1431 | parm->db.ippathid = path->pathid; | 1431 | parm->db.ippathid = path->pathid; |
1432 | parm->db.ipflags1 = path->flags; /* priority message */ | 1432 | parm->db.ipflags1 = path->flags; /* priority message */ |
1433 | parm->db.iptrgcls = msg->class; | 1433 | parm->db.iptrgcls = msg->class; |
1434 | parm->db.ipsrccls = srccls; | 1434 | parm->db.ipsrccls = srccls; |
1435 | parm->db.ipmsgtag = msg->tag; | 1435 | parm->db.ipmsgtag = msg->tag; |
1436 | parm->db.ipbfadr1 = (u32)(addr_t) buffer; | 1436 | parm->db.ipbfadr1 = (u32)(addr_t) buffer; |
1437 | parm->db.ipbfln1f = (u32) size; | 1437 | parm->db.ipbfln1f = (u32) size; |
1438 | parm->db.ipbfadr2 = (u32)(addr_t) answer; | 1438 | parm->db.ipbfadr2 = (u32)(addr_t) answer; |
1439 | parm->db.ipbfln2f = (u32) asize; | 1439 | parm->db.ipbfln2f = (u32) asize; |
1440 | } | 1440 | } |
1441 | rc = iucv_call_b2f0(IUCV_SEND, parm); | 1441 | rc = iucv_call_b2f0(IUCV_SEND, parm); |
1442 | if (!rc) | 1442 | if (!rc) |
1443 | msg->id = parm->db.ipmsgid; | 1443 | msg->id = parm->db.ipmsgid; |
1444 | out: | 1444 | out: |
1445 | local_bh_enable(); | 1445 | local_bh_enable(); |
1446 | return rc; | 1446 | return rc; |
1447 | } | 1447 | } |
1448 | EXPORT_SYMBOL(iucv_message_send2way); | 1448 | EXPORT_SYMBOL(iucv_message_send2way); |
1449 | 1449 | ||
1450 | /** | 1450 | /** |
1451 | * iucv_path_pending | 1451 | * iucv_path_pending |
1452 | * @data: Pointer to external interrupt buffer | 1452 | * @data: Pointer to external interrupt buffer |
1453 | * | 1453 | * |
1454 | * Process connection pending work item. Called from tasklet while holding | 1454 | * Process connection pending work item. Called from tasklet while holding |
1455 | * iucv_table_lock. | 1455 | * iucv_table_lock. |
1456 | */ | 1456 | */ |
1457 | struct iucv_path_pending { | 1457 | struct iucv_path_pending { |
1458 | u16 ippathid; | 1458 | u16 ippathid; |
1459 | u8 ipflags1; | 1459 | u8 ipflags1; |
1460 | u8 iptype; | 1460 | u8 iptype; |
1461 | u16 ipmsglim; | 1461 | u16 ipmsglim; |
1462 | u16 res1; | 1462 | u16 res1; |
1463 | u8 ipvmid[8]; | 1463 | u8 ipvmid[8]; |
1464 | u8 ipuser[16]; | 1464 | u8 ipuser[16]; |
1465 | u32 res3; | 1465 | u32 res3; |
1466 | u8 ippollfg; | 1466 | u8 ippollfg; |
1467 | u8 res4[3]; | 1467 | u8 res4[3]; |
1468 | } __packed; | 1468 | } __packed; |
1469 | 1469 | ||
1470 | static void iucv_path_pending(struct iucv_irq_data *data) | 1470 | static void iucv_path_pending(struct iucv_irq_data *data) |
1471 | { | 1471 | { |
1472 | struct iucv_path_pending *ipp = (void *) data; | 1472 | struct iucv_path_pending *ipp = (void *) data; |
1473 | struct iucv_handler *handler; | 1473 | struct iucv_handler *handler; |
1474 | struct iucv_path *path; | 1474 | struct iucv_path *path; |
1475 | char *error; | 1475 | char *error; |
1476 | 1476 | ||
1477 | BUG_ON(iucv_path_table[ipp->ippathid]); | 1477 | BUG_ON(iucv_path_table[ipp->ippathid]); |
1478 | /* New pathid, handler found. Create a new path struct. */ | 1478 | /* New pathid, handler found. Create a new path struct. */ |
1479 | error = iucv_error_no_memory; | 1479 | error = iucv_error_no_memory; |
1480 | path = iucv_path_alloc(ipp->ipmsglim, ipp->ipflags1, GFP_ATOMIC); | 1480 | path = iucv_path_alloc(ipp->ipmsglim, ipp->ipflags1, GFP_ATOMIC); |
1481 | if (!path) | 1481 | if (!path) |
1482 | goto out_sever; | 1482 | goto out_sever; |
1483 | path->pathid = ipp->ippathid; | 1483 | path->pathid = ipp->ippathid; |
1484 | iucv_path_table[path->pathid] = path; | 1484 | iucv_path_table[path->pathid] = path; |
1485 | EBCASC(ipp->ipvmid, 8); | 1485 | EBCASC(ipp->ipvmid, 8); |
1486 | 1486 | ||
1487 | /* Call registered handler until one is found that wants the path. */ | 1487 | /* Call registered handler until one is found that wants the path. */ |
1488 | list_for_each_entry(handler, &iucv_handler_list, list) { | 1488 | list_for_each_entry(handler, &iucv_handler_list, list) { |
1489 | if (!handler->path_pending) | 1489 | if (!handler->path_pending) |
1490 | continue; | 1490 | continue; |
1491 | /* | 1491 | /* |
1492 | * Add path to handler to allow a call to iucv_path_sever | 1492 | * Add path to handler to allow a call to iucv_path_sever |
1493 | * inside the path_pending function. If the handler returns | 1493 | * inside the path_pending function. If the handler returns |
1494 | * an error remove the path from the handler again. | 1494 | * an error remove the path from the handler again. |
1495 | */ | 1495 | */ |
1496 | list_add(&path->list, &handler->paths); | 1496 | list_add(&path->list, &handler->paths); |
1497 | path->handler = handler; | 1497 | path->handler = handler; |
1498 | if (!handler->path_pending(path, ipp->ipvmid, ipp->ipuser)) | 1498 | if (!handler->path_pending(path, ipp->ipvmid, ipp->ipuser)) |
1499 | return; | 1499 | return; |
1500 | list_del(&path->list); | 1500 | list_del(&path->list); |
1501 | path->handler = NULL; | 1501 | path->handler = NULL; |
1502 | } | 1502 | } |
1503 | /* No handler wanted the path. */ | 1503 | /* No handler wanted the path. */ |
1504 | iucv_path_table[path->pathid] = NULL; | 1504 | iucv_path_table[path->pathid] = NULL; |
1505 | iucv_path_free(path); | 1505 | iucv_path_free(path); |
1506 | error = iucv_error_no_listener; | 1506 | error = iucv_error_no_listener; |
1507 | out_sever: | 1507 | out_sever: |
1508 | iucv_sever_pathid(ipp->ippathid, error); | 1508 | iucv_sever_pathid(ipp->ippathid, error); |
1509 | } | 1509 | } |
1510 | 1510 | ||
1511 | /** | 1511 | /** |
1512 | * iucv_path_complete | 1512 | * iucv_path_complete |
1513 | * @data: Pointer to external interrupt buffer | 1513 | * @data: Pointer to external interrupt buffer |
1514 | * | 1514 | * |
1515 | * Process connection complete work item. Called from tasklet while holding | 1515 | * Process connection complete work item. Called from tasklet while holding |
1516 | * iucv_table_lock. | 1516 | * iucv_table_lock. |
1517 | */ | 1517 | */ |
1518 | struct iucv_path_complete { | 1518 | struct iucv_path_complete { |
1519 | u16 ippathid; | 1519 | u16 ippathid; |
1520 | u8 ipflags1; | 1520 | u8 ipflags1; |
1521 | u8 iptype; | 1521 | u8 iptype; |
1522 | u16 ipmsglim; | 1522 | u16 ipmsglim; |
1523 | u16 res1; | 1523 | u16 res1; |
1524 | u8 res2[8]; | 1524 | u8 res2[8]; |
1525 | u8 ipuser[16]; | 1525 | u8 ipuser[16]; |
1526 | u32 res3; | 1526 | u32 res3; |
1527 | u8 ippollfg; | 1527 | u8 ippollfg; |
1528 | u8 res4[3]; | 1528 | u8 res4[3]; |
1529 | } __packed; | 1529 | } __packed; |
1530 | 1530 | ||
1531 | static void iucv_path_complete(struct iucv_irq_data *data) | 1531 | static void iucv_path_complete(struct iucv_irq_data *data) |
1532 | { | 1532 | { |
1533 | struct iucv_path_complete *ipc = (void *) data; | 1533 | struct iucv_path_complete *ipc = (void *) data; |
1534 | struct iucv_path *path = iucv_path_table[ipc->ippathid]; | 1534 | struct iucv_path *path = iucv_path_table[ipc->ippathid]; |
1535 | 1535 | ||
1536 | if (path) | 1536 | if (path) |
1537 | path->flags = ipc->ipflags1; | 1537 | path->flags = ipc->ipflags1; |
1538 | if (path && path->handler && path->handler->path_complete) | 1538 | if (path && path->handler && path->handler->path_complete) |
1539 | path->handler->path_complete(path, ipc->ipuser); | 1539 | path->handler->path_complete(path, ipc->ipuser); |
1540 | } | 1540 | } |
1541 | 1541 | ||
1542 | /** | 1542 | /** |
1543 | * iucv_path_severed | 1543 | * iucv_path_severed |
1544 | * @data: Pointer to external interrupt buffer | 1544 | * @data: Pointer to external interrupt buffer |
1545 | * | 1545 | * |
1546 | * Process connection severed work item. Called from tasklet while holding | 1546 | * Process connection severed work item. Called from tasklet while holding |
1547 | * iucv_table_lock. | 1547 | * iucv_table_lock. |
1548 | */ | 1548 | */ |
1549 | struct iucv_path_severed { | 1549 | struct iucv_path_severed { |
1550 | u16 ippathid; | 1550 | u16 ippathid; |
1551 | u8 res1; | 1551 | u8 res1; |
1552 | u8 iptype; | 1552 | u8 iptype; |
1553 | u32 res2; | 1553 | u32 res2; |
1554 | u8 res3[8]; | 1554 | u8 res3[8]; |
1555 | u8 ipuser[16]; | 1555 | u8 ipuser[16]; |
1556 | u32 res4; | 1556 | u32 res4; |
1557 | u8 ippollfg; | 1557 | u8 ippollfg; |
1558 | u8 res5[3]; | 1558 | u8 res5[3]; |
1559 | } __packed; | 1559 | } __packed; |
1560 | 1560 | ||
1561 | static void iucv_path_severed(struct iucv_irq_data *data) | 1561 | static void iucv_path_severed(struct iucv_irq_data *data) |
1562 | { | 1562 | { |
1563 | struct iucv_path_severed *ips = (void *) data; | 1563 | struct iucv_path_severed *ips = (void *) data; |
1564 | struct iucv_path *path = iucv_path_table[ips->ippathid]; | 1564 | struct iucv_path *path = iucv_path_table[ips->ippathid]; |
1565 | 1565 | ||
1566 | if (!path || !path->handler) /* Already severed */ | 1566 | if (!path || !path->handler) /* Already severed */ |
1567 | return; | 1567 | return; |
1568 | if (path->handler->path_severed) | 1568 | if (path->handler->path_severed) |
1569 | path->handler->path_severed(path, ips->ipuser); | 1569 | path->handler->path_severed(path, ips->ipuser); |
1570 | else { | 1570 | else { |
1571 | iucv_sever_pathid(path->pathid, NULL); | 1571 | iucv_sever_pathid(path->pathid, NULL); |
1572 | iucv_path_table[path->pathid] = NULL; | 1572 | iucv_path_table[path->pathid] = NULL; |
1573 | list_del(&path->list); | 1573 | list_del(&path->list); |
1574 | iucv_path_free(path); | 1574 | iucv_path_free(path); |
1575 | } | 1575 | } |
1576 | } | 1576 | } |
1577 | 1577 | ||
1578 | /** | 1578 | /** |
1579 | * iucv_path_quiesced | 1579 | * iucv_path_quiesced |
1580 | * @data: Pointer to external interrupt buffer | 1580 | * @data: Pointer to external interrupt buffer |
1581 | * | 1581 | * |
1582 | * Process connection quiesced work item. Called from tasklet while holding | 1582 | * Process connection quiesced work item. Called from tasklet while holding |
1583 | * iucv_table_lock. | 1583 | * iucv_table_lock. |
1584 | */ | 1584 | */ |
1585 | struct iucv_path_quiesced { | 1585 | struct iucv_path_quiesced { |
1586 | u16 ippathid; | 1586 | u16 ippathid; |
1587 | u8 res1; | 1587 | u8 res1; |
1588 | u8 iptype; | 1588 | u8 iptype; |
1589 | u32 res2; | 1589 | u32 res2; |
1590 | u8 res3[8]; | 1590 | u8 res3[8]; |
1591 | u8 ipuser[16]; | 1591 | u8 ipuser[16]; |
1592 | u32 res4; | 1592 | u32 res4; |
1593 | u8 ippollfg; | 1593 | u8 ippollfg; |
1594 | u8 res5[3]; | 1594 | u8 res5[3]; |
1595 | } __packed; | 1595 | } __packed; |
1596 | 1596 | ||
1597 | static void iucv_path_quiesced(struct iucv_irq_data *data) | 1597 | static void iucv_path_quiesced(struct iucv_irq_data *data) |
1598 | { | 1598 | { |
1599 | struct iucv_path_quiesced *ipq = (void *) data; | 1599 | struct iucv_path_quiesced *ipq = (void *) data; |
1600 | struct iucv_path *path = iucv_path_table[ipq->ippathid]; | 1600 | struct iucv_path *path = iucv_path_table[ipq->ippathid]; |
1601 | 1601 | ||
1602 | if (path && path->handler && path->handler->path_quiesced) | 1602 | if (path && path->handler && path->handler->path_quiesced) |
1603 | path->handler->path_quiesced(path, ipq->ipuser); | 1603 | path->handler->path_quiesced(path, ipq->ipuser); |
1604 | } | 1604 | } |
1605 | 1605 | ||
1606 | /** | 1606 | /** |
1607 | * iucv_path_resumed | 1607 | * iucv_path_resumed |
1608 | * @data: Pointer to external interrupt buffer | 1608 | * @data: Pointer to external interrupt buffer |
1609 | * | 1609 | * |
1610 | * Process connection resumed work item. Called from tasklet while holding | 1610 | * Process connection resumed work item. Called from tasklet while holding |
1611 | * iucv_table_lock. | 1611 | * iucv_table_lock. |
1612 | */ | 1612 | */ |
1613 | struct iucv_path_resumed { | 1613 | struct iucv_path_resumed { |
1614 | u16 ippathid; | 1614 | u16 ippathid; |
1615 | u8 res1; | 1615 | u8 res1; |
1616 | u8 iptype; | 1616 | u8 iptype; |
1617 | u32 res2; | 1617 | u32 res2; |
1618 | u8 res3[8]; | 1618 | u8 res3[8]; |
1619 | u8 ipuser[16]; | 1619 | u8 ipuser[16]; |
1620 | u32 res4; | 1620 | u32 res4; |
1621 | u8 ippollfg; | 1621 | u8 ippollfg; |
1622 | u8 res5[3]; | 1622 | u8 res5[3]; |
1623 | } __packed; | 1623 | } __packed; |
1624 | 1624 | ||
1625 | static void iucv_path_resumed(struct iucv_irq_data *data) | 1625 | static void iucv_path_resumed(struct iucv_irq_data *data) |
1626 | { | 1626 | { |
1627 | struct iucv_path_resumed *ipr = (void *) data; | 1627 | struct iucv_path_resumed *ipr = (void *) data; |
1628 | struct iucv_path *path = iucv_path_table[ipr->ippathid]; | 1628 | struct iucv_path *path = iucv_path_table[ipr->ippathid]; |
1629 | 1629 | ||
1630 | if (path && path->handler && path->handler->path_resumed) | 1630 | if (path && path->handler && path->handler->path_resumed) |
1631 | path->handler->path_resumed(path, ipr->ipuser); | 1631 | path->handler->path_resumed(path, ipr->ipuser); |
1632 | } | 1632 | } |
1633 | 1633 | ||
1634 | /** | 1634 | /** |
1635 | * iucv_message_complete | 1635 | * iucv_message_complete |
1636 | * @data: Pointer to external interrupt buffer | 1636 | * @data: Pointer to external interrupt buffer |
1637 | * | 1637 | * |
1638 | * Process message complete work item. Called from tasklet while holding | 1638 | * Process message complete work item. Called from tasklet while holding |
1639 | * iucv_table_lock. | 1639 | * iucv_table_lock. |
1640 | */ | 1640 | */ |
1641 | struct iucv_message_complete { | 1641 | struct iucv_message_complete { |
1642 | u16 ippathid; | 1642 | u16 ippathid; |
1643 | u8 ipflags1; | 1643 | u8 ipflags1; |
1644 | u8 iptype; | 1644 | u8 iptype; |
1645 | u32 ipmsgid; | 1645 | u32 ipmsgid; |
1646 | u32 ipaudit; | 1646 | u32 ipaudit; |
1647 | u8 iprmmsg[8]; | 1647 | u8 iprmmsg[8]; |
1648 | u32 ipsrccls; | 1648 | u32 ipsrccls; |
1649 | u32 ipmsgtag; | 1649 | u32 ipmsgtag; |
1650 | u32 res; | 1650 | u32 res; |
1651 | u32 ipbfln2f; | 1651 | u32 ipbfln2f; |
1652 | u8 ippollfg; | 1652 | u8 ippollfg; |
1653 | u8 res2[3]; | 1653 | u8 res2[3]; |
1654 | } __packed; | 1654 | } __packed; |
1655 | 1655 | ||
1656 | static void iucv_message_complete(struct iucv_irq_data *data) | 1656 | static void iucv_message_complete(struct iucv_irq_data *data) |
1657 | { | 1657 | { |
1658 | struct iucv_message_complete *imc = (void *) data; | 1658 | struct iucv_message_complete *imc = (void *) data; |
1659 | struct iucv_path *path = iucv_path_table[imc->ippathid]; | 1659 | struct iucv_path *path = iucv_path_table[imc->ippathid]; |
1660 | struct iucv_message msg; | 1660 | struct iucv_message msg; |
1661 | 1661 | ||
1662 | if (path && path->handler && path->handler->message_complete) { | 1662 | if (path && path->handler && path->handler->message_complete) { |
1663 | msg.flags = imc->ipflags1; | 1663 | msg.flags = imc->ipflags1; |
1664 | msg.id = imc->ipmsgid; | 1664 | msg.id = imc->ipmsgid; |
1665 | msg.audit = imc->ipaudit; | 1665 | msg.audit = imc->ipaudit; |
1666 | memcpy(msg.rmmsg, imc->iprmmsg, 8); | 1666 | memcpy(msg.rmmsg, imc->iprmmsg, 8); |
1667 | msg.class = imc->ipsrccls; | 1667 | msg.class = imc->ipsrccls; |
1668 | msg.tag = imc->ipmsgtag; | 1668 | msg.tag = imc->ipmsgtag; |
1669 | msg.length = imc->ipbfln2f; | 1669 | msg.length = imc->ipbfln2f; |
1670 | path->handler->message_complete(path, &msg); | 1670 | path->handler->message_complete(path, &msg); |
1671 | } | 1671 | } |
1672 | } | 1672 | } |
1673 | 1673 | ||
1674 | /** | 1674 | /** |
1675 | * iucv_message_pending | 1675 | * iucv_message_pending |
1676 | * @data: Pointer to external interrupt buffer | 1676 | * @data: Pointer to external interrupt buffer |
1677 | * | 1677 | * |
1678 | * Process message pending work item. Called from tasklet while holding | 1678 | * Process message pending work item. Called from tasklet while holding |
1679 | * iucv_table_lock. | 1679 | * iucv_table_lock. |
1680 | */ | 1680 | */ |
1681 | struct iucv_message_pending { | 1681 | struct iucv_message_pending { |
1682 | u16 ippathid; | 1682 | u16 ippathid; |
1683 | u8 ipflags1; | 1683 | u8 ipflags1; |
1684 | u8 iptype; | 1684 | u8 iptype; |
1685 | u32 ipmsgid; | 1685 | u32 ipmsgid; |
1686 | u32 iptrgcls; | 1686 | u32 iptrgcls; |
1687 | union { | 1687 | union { |
1688 | u32 iprmmsg1_u32; | 1688 | u32 iprmmsg1_u32; |
1689 | u8 iprmmsg1[4]; | 1689 | u8 iprmmsg1[4]; |
1690 | } ln1msg1; | 1690 | } ln1msg1; |
1691 | union { | 1691 | union { |
1692 | u32 ipbfln1f; | 1692 | u32 ipbfln1f; |
1693 | u8 iprmmsg2[4]; | 1693 | u8 iprmmsg2[4]; |
1694 | } ln1msg2; | 1694 | } ln1msg2; |
1695 | u32 res1[3]; | 1695 | u32 res1[3]; |
1696 | u32 ipbfln2f; | 1696 | u32 ipbfln2f; |
1697 | u8 ippollfg; | 1697 | u8 ippollfg; |
1698 | u8 res2[3]; | 1698 | u8 res2[3]; |
1699 | } __packed; | 1699 | } __packed; |
1700 | 1700 | ||
1701 | static void iucv_message_pending(struct iucv_irq_data *data) | 1701 | static void iucv_message_pending(struct iucv_irq_data *data) |
1702 | { | 1702 | { |
1703 | struct iucv_message_pending *imp = (void *) data; | 1703 | struct iucv_message_pending *imp = (void *) data; |
1704 | struct iucv_path *path = iucv_path_table[imp->ippathid]; | 1704 | struct iucv_path *path = iucv_path_table[imp->ippathid]; |
1705 | struct iucv_message msg; | 1705 | struct iucv_message msg; |
1706 | 1706 | ||
1707 | if (path && path->handler && path->handler->message_pending) { | 1707 | if (path && path->handler && path->handler->message_pending) { |
1708 | msg.flags = imp->ipflags1; | 1708 | msg.flags = imp->ipflags1; |
1709 | msg.id = imp->ipmsgid; | 1709 | msg.id = imp->ipmsgid; |
1710 | msg.class = imp->iptrgcls; | 1710 | msg.class = imp->iptrgcls; |
1711 | if (imp->ipflags1 & IUCV_IPRMDATA) { | 1711 | if (imp->ipflags1 & IUCV_IPRMDATA) { |
1712 | memcpy(msg.rmmsg, imp->ln1msg1.iprmmsg1, 8); | 1712 | memcpy(msg.rmmsg, imp->ln1msg1.iprmmsg1, 8); |
1713 | msg.length = 8; | 1713 | msg.length = 8; |
1714 | } else | 1714 | } else |
1715 | msg.length = imp->ln1msg2.ipbfln1f; | 1715 | msg.length = imp->ln1msg2.ipbfln1f; |
1716 | msg.reply_size = imp->ipbfln2f; | 1716 | msg.reply_size = imp->ipbfln2f; |
1717 | path->handler->message_pending(path, &msg); | 1717 | path->handler->message_pending(path, &msg); |
1718 | } | 1718 | } |
1719 | } | 1719 | } |
1720 | 1720 | ||
1721 | /** | 1721 | /** |
1722 | * iucv_tasklet_fn: | 1722 | * iucv_tasklet_fn: |
1723 | * | 1723 | * |
1724 | * This tasklet loops over the queue of irq buffers created by | 1724 | * This tasklet loops over the queue of irq buffers created by |
1725 | * iucv_external_interrupt, calls the appropriate action handler | 1725 | * iucv_external_interrupt, calls the appropriate action handler |
1726 | * and then frees the buffer. | 1726 | * and then frees the buffer. |
1727 | */ | 1727 | */ |
1728 | static void iucv_tasklet_fn(unsigned long ignored) | 1728 | static void iucv_tasklet_fn(unsigned long ignored) |
1729 | { | 1729 | { |
1730 | typedef void iucv_irq_fn(struct iucv_irq_data *); | 1730 | typedef void iucv_irq_fn(struct iucv_irq_data *); |
1731 | static iucv_irq_fn *irq_fn[] = { | 1731 | static iucv_irq_fn *irq_fn[] = { |
1732 | [0x02] = iucv_path_complete, | 1732 | [0x02] = iucv_path_complete, |
1733 | [0x03] = iucv_path_severed, | 1733 | [0x03] = iucv_path_severed, |
1734 | [0x04] = iucv_path_quiesced, | 1734 | [0x04] = iucv_path_quiesced, |
1735 | [0x05] = iucv_path_resumed, | 1735 | [0x05] = iucv_path_resumed, |
1736 | [0x06] = iucv_message_complete, | 1736 | [0x06] = iucv_message_complete, |
1737 | [0x07] = iucv_message_complete, | 1737 | [0x07] = iucv_message_complete, |
1738 | [0x08] = iucv_message_pending, | 1738 | [0x08] = iucv_message_pending, |
1739 | [0x09] = iucv_message_pending, | 1739 | [0x09] = iucv_message_pending, |
1740 | }; | 1740 | }; |
1741 | LIST_HEAD(task_queue); | 1741 | LIST_HEAD(task_queue); |
1742 | struct iucv_irq_list *p, *n; | 1742 | struct iucv_irq_list *p, *n; |
1743 | 1743 | ||
1744 | /* Serialize tasklet, iucv_path_sever and iucv_path_connect. */ | 1744 | /* Serialize tasklet, iucv_path_sever and iucv_path_connect. */ |
1745 | if (!spin_trylock(&iucv_table_lock)) { | 1745 | if (!spin_trylock(&iucv_table_lock)) { |
1746 | tasklet_schedule(&iucv_tasklet); | 1746 | tasklet_schedule(&iucv_tasklet); |
1747 | return; | 1747 | return; |
1748 | } | 1748 | } |
1749 | iucv_active_cpu = smp_processor_id(); | 1749 | iucv_active_cpu = smp_processor_id(); |
1750 | 1750 | ||
1751 | spin_lock_irq(&iucv_queue_lock); | 1751 | spin_lock_irq(&iucv_queue_lock); |
1752 | list_splice_init(&iucv_task_queue, &task_queue); | 1752 | list_splice_init(&iucv_task_queue, &task_queue); |
1753 | spin_unlock_irq(&iucv_queue_lock); | 1753 | spin_unlock_irq(&iucv_queue_lock); |
1754 | 1754 | ||
1755 | list_for_each_entry_safe(p, n, &task_queue, list) { | 1755 | list_for_each_entry_safe(p, n, &task_queue, list) { |
1756 | list_del_init(&p->list); | 1756 | list_del_init(&p->list); |
1757 | irq_fn[p->data.iptype](&p->data); | 1757 | irq_fn[p->data.iptype](&p->data); |
1758 | kfree(p); | 1758 | kfree(p); |
1759 | } | 1759 | } |
1760 | 1760 | ||
1761 | iucv_active_cpu = -1; | 1761 | iucv_active_cpu = -1; |
1762 | spin_unlock(&iucv_table_lock); | 1762 | spin_unlock(&iucv_table_lock); |
1763 | } | 1763 | } |
1764 | 1764 | ||
1765 | /** | 1765 | /** |
1766 | * iucv_work_fn: | 1766 | * iucv_work_fn: |
1767 | * | 1767 | * |
1768 | * This work function loops over the queue of path pending irq blocks | 1768 | * This work function loops over the queue of path pending irq blocks |
1769 | * created by iucv_external_interrupt, calls the appropriate action | 1769 | * created by iucv_external_interrupt, calls the appropriate action |
1770 | * handler and then frees the buffer. | 1770 | * handler and then frees the buffer. |
1771 | */ | 1771 | */ |
1772 | static void iucv_work_fn(struct work_struct *work) | 1772 | static void iucv_work_fn(struct work_struct *work) |
1773 | { | 1773 | { |
1774 | LIST_HEAD(work_queue); | 1774 | LIST_HEAD(work_queue); |
1775 | struct iucv_irq_list *p, *n; | 1775 | struct iucv_irq_list *p, *n; |
1776 | 1776 | ||
1777 | /* Serialize tasklet, iucv_path_sever and iucv_path_connect. */ | 1777 | /* Serialize tasklet, iucv_path_sever and iucv_path_connect. */ |
1778 | spin_lock_bh(&iucv_table_lock); | 1778 | spin_lock_bh(&iucv_table_lock); |
1779 | iucv_active_cpu = smp_processor_id(); | 1779 | iucv_active_cpu = smp_processor_id(); |
1780 | 1780 | ||
1781 | spin_lock_irq(&iucv_queue_lock); | 1781 | spin_lock_irq(&iucv_queue_lock); |
1782 | list_splice_init(&iucv_work_queue, &work_queue); | 1782 | list_splice_init(&iucv_work_queue, &work_queue); |
1783 | spin_unlock_irq(&iucv_queue_lock); | 1783 | spin_unlock_irq(&iucv_queue_lock); |
1784 | 1784 | ||
1785 | iucv_cleanup_queue(); | 1785 | iucv_cleanup_queue(); |
1786 | list_for_each_entry_safe(p, n, &work_queue, list) { | 1786 | list_for_each_entry_safe(p, n, &work_queue, list) { |
1787 | list_del_init(&p->list); | 1787 | list_del_init(&p->list); |
1788 | iucv_path_pending(&p->data); | 1788 | iucv_path_pending(&p->data); |
1789 | kfree(p); | 1789 | kfree(p); |
1790 | } | 1790 | } |
1791 | 1791 | ||
1792 | iucv_active_cpu = -1; | 1792 | iucv_active_cpu = -1; |
1793 | spin_unlock_bh(&iucv_table_lock); | 1793 | spin_unlock_bh(&iucv_table_lock); |
1794 | } | 1794 | } |
1795 | 1795 | ||
1796 | /** | 1796 | /** |
1797 | * iucv_external_interrupt | 1797 | * iucv_external_interrupt |
1798 | * @code: irq code | 1798 | * @code: irq code |
1799 | * | 1799 | * |
1800 | * Handles external interrupts coming in from CP. | 1800 | * Handles external interrupts coming in from CP. |
1801 | * Places the interrupt buffer on a queue and schedules iucv_tasklet_fn(). | 1801 | * Places the interrupt buffer on a queue and schedules iucv_tasklet_fn(). |
1802 | */ | 1802 | */ |
1803 | static void iucv_external_interrupt(unsigned int ext_int_code, | 1803 | static void iucv_external_interrupt(unsigned int ext_int_code, |
1804 | unsigned int param32, unsigned long param64) | 1804 | unsigned int param32, unsigned long param64) |
1805 | { | 1805 | { |
1806 | struct iucv_irq_data *p; | 1806 | struct iucv_irq_data *p; |
1807 | struct iucv_irq_list *work; | 1807 | struct iucv_irq_list *work; |
1808 | 1808 | ||
1809 | kstat_cpu(smp_processor_id()).irqs[EXTINT_IUC]++; | 1809 | kstat_cpu(smp_processor_id()).irqs[EXTINT_IUC]++; |
1810 | p = iucv_irq_data[smp_processor_id()]; | 1810 | p = iucv_irq_data[smp_processor_id()]; |
1811 | if (p->ippathid >= iucv_max_pathid) { | 1811 | if (p->ippathid >= iucv_max_pathid) { |
1812 | WARN_ON(p->ippathid >= iucv_max_pathid); | 1812 | WARN_ON(p->ippathid >= iucv_max_pathid); |
1813 | iucv_sever_pathid(p->ippathid, iucv_error_no_listener); | 1813 | iucv_sever_pathid(p->ippathid, iucv_error_no_listener); |
1814 | return; | 1814 | return; |
1815 | } | 1815 | } |
1816 | BUG_ON(p->iptype < 0x01 || p->iptype > 0x09); | 1816 | BUG_ON(p->iptype < 0x01 || p->iptype > 0x09); |
1817 | work = kmalloc(sizeof(struct iucv_irq_list), GFP_ATOMIC); | 1817 | work = kmalloc(sizeof(struct iucv_irq_list), GFP_ATOMIC); |
1818 | if (!work) { | 1818 | if (!work) { |
1819 | pr_warning("iucv_external_interrupt: out of memory\n"); | 1819 | pr_warning("iucv_external_interrupt: out of memory\n"); |
1820 | return; | 1820 | return; |
1821 | } | 1821 | } |
1822 | memcpy(&work->data, p, sizeof(work->data)); | 1822 | memcpy(&work->data, p, sizeof(work->data)); |
1823 | spin_lock(&iucv_queue_lock); | 1823 | spin_lock(&iucv_queue_lock); |
1824 | if (p->iptype == 0x01) { | 1824 | if (p->iptype == 0x01) { |
1825 | /* Path pending interrupt. */ | 1825 | /* Path pending interrupt. */ |
1826 | list_add_tail(&work->list, &iucv_work_queue); | 1826 | list_add_tail(&work->list, &iucv_work_queue); |
1827 | schedule_work(&iucv_work); | 1827 | schedule_work(&iucv_work); |
1828 | } else { | 1828 | } else { |
1829 | /* The other interrupts. */ | 1829 | /* The other interrupts. */ |
1830 | list_add_tail(&work->list, &iucv_task_queue); | 1830 | list_add_tail(&work->list, &iucv_task_queue); |
1831 | tasklet_schedule(&iucv_tasklet); | 1831 | tasklet_schedule(&iucv_tasklet); |
1832 | } | 1832 | } |
1833 | spin_unlock(&iucv_queue_lock); | 1833 | spin_unlock(&iucv_queue_lock); |
1834 | } | 1834 | } |
1835 | 1835 | ||
1836 | static int iucv_pm_prepare(struct device *dev) | 1836 | static int iucv_pm_prepare(struct device *dev) |
1837 | { | 1837 | { |
1838 | int rc = 0; | 1838 | int rc = 0; |
1839 | 1839 | ||
1840 | #ifdef CONFIG_PM_DEBUG | 1840 | #ifdef CONFIG_PM_DEBUG |
1841 | printk(KERN_INFO "iucv_pm_prepare\n"); | 1841 | printk(KERN_INFO "iucv_pm_prepare\n"); |
1842 | #endif | 1842 | #endif |
1843 | if (dev->driver && dev->driver->pm && dev->driver->pm->prepare) | 1843 | if (dev->driver && dev->driver->pm && dev->driver->pm->prepare) |
1844 | rc = dev->driver->pm->prepare(dev); | 1844 | rc = dev->driver->pm->prepare(dev); |
1845 | return rc; | 1845 | return rc; |
1846 | } | 1846 | } |
1847 | 1847 | ||
1848 | static void iucv_pm_complete(struct device *dev) | 1848 | static void iucv_pm_complete(struct device *dev) |
1849 | { | 1849 | { |
1850 | #ifdef CONFIG_PM_DEBUG | 1850 | #ifdef CONFIG_PM_DEBUG |
1851 | printk(KERN_INFO "iucv_pm_complete\n"); | 1851 | printk(KERN_INFO "iucv_pm_complete\n"); |
1852 | #endif | 1852 | #endif |
1853 | if (dev->driver && dev->driver->pm && dev->driver->pm->complete) | 1853 | if (dev->driver && dev->driver->pm && dev->driver->pm->complete) |
1854 | dev->driver->pm->complete(dev); | 1854 | dev->driver->pm->complete(dev); |
1855 | } | 1855 | } |
1856 | 1856 | ||
1857 | /** | 1857 | /** |
1858 | * iucv_path_table_empty() - determine if iucv path table is empty | 1858 | * iucv_path_table_empty() - determine if iucv path table is empty |
1859 | * | 1859 | * |
1860 | * Returns 0 if there are still iucv pathes defined | 1860 | * Returns 0 if there are still iucv pathes defined |
1861 | * 1 if there are no iucv pathes defined | 1861 | * 1 if there are no iucv pathes defined |
1862 | */ | 1862 | */ |
1863 | int iucv_path_table_empty(void) | 1863 | int iucv_path_table_empty(void) |
1864 | { | 1864 | { |
1865 | int i; | 1865 | int i; |
1866 | 1866 | ||
1867 | for (i = 0; i < iucv_max_pathid; i++) { | 1867 | for (i = 0; i < iucv_max_pathid; i++) { |
1868 | if (iucv_path_table[i]) | 1868 | if (iucv_path_table[i]) |
1869 | return 0; | 1869 | return 0; |
1870 | } | 1870 | } |
1871 | return 1; | 1871 | return 1; |
1872 | } | 1872 | } |
1873 | 1873 | ||
1874 | /** | 1874 | /** |
1875 | * iucv_pm_freeze() - Freeze PM callback | 1875 | * iucv_pm_freeze() - Freeze PM callback |
1876 | * @dev: iucv-based device | 1876 | * @dev: iucv-based device |
1877 | * | 1877 | * |
1878 | * disable iucv interrupts | 1878 | * disable iucv interrupts |
1879 | * invoke callback function of the iucv-based driver | 1879 | * invoke callback function of the iucv-based driver |
1880 | * shut down iucv, if no iucv-pathes are established anymore | 1880 | * shut down iucv, if no iucv-pathes are established anymore |
1881 | */ | 1881 | */ |
1882 | static int iucv_pm_freeze(struct device *dev) | 1882 | static int iucv_pm_freeze(struct device *dev) |
1883 | { | 1883 | { |
1884 | int cpu; | 1884 | int cpu; |
1885 | struct iucv_irq_list *p, *n; | 1885 | struct iucv_irq_list *p, *n; |
1886 | int rc = 0; | 1886 | int rc = 0; |
1887 | 1887 | ||
1888 | #ifdef CONFIG_PM_DEBUG | 1888 | #ifdef CONFIG_PM_DEBUG |
1889 | printk(KERN_WARNING "iucv_pm_freeze\n"); | 1889 | printk(KERN_WARNING "iucv_pm_freeze\n"); |
1890 | #endif | 1890 | #endif |
1891 | if (iucv_pm_state != IUCV_PM_FREEZING) { | 1891 | if (iucv_pm_state != IUCV_PM_FREEZING) { |
1892 | for_each_cpu(cpu, &iucv_irq_cpumask) | 1892 | for_each_cpu(cpu, &iucv_irq_cpumask) |
1893 | smp_call_function_single(cpu, iucv_block_cpu_almost, | 1893 | smp_call_function_single(cpu, iucv_block_cpu_almost, |
1894 | NULL, 1); | 1894 | NULL, 1); |
1895 | cancel_work_sync(&iucv_work); | 1895 | cancel_work_sync(&iucv_work); |
1896 | list_for_each_entry_safe(p, n, &iucv_work_queue, list) { | 1896 | list_for_each_entry_safe(p, n, &iucv_work_queue, list) { |
1897 | list_del_init(&p->list); | 1897 | list_del_init(&p->list); |
1898 | iucv_sever_pathid(p->data.ippathid, | 1898 | iucv_sever_pathid(p->data.ippathid, |
1899 | iucv_error_no_listener); | 1899 | iucv_error_no_listener); |
1900 | kfree(p); | 1900 | kfree(p); |
1901 | } | 1901 | } |
1902 | } | 1902 | } |
1903 | iucv_pm_state = IUCV_PM_FREEZING; | 1903 | iucv_pm_state = IUCV_PM_FREEZING; |
1904 | if (dev->driver && dev->driver->pm && dev->driver->pm->freeze) | 1904 | if (dev->driver && dev->driver->pm && dev->driver->pm->freeze) |
1905 | rc = dev->driver->pm->freeze(dev); | 1905 | rc = dev->driver->pm->freeze(dev); |
1906 | if (iucv_path_table_empty()) | 1906 | if (iucv_path_table_empty()) |
1907 | iucv_disable(); | 1907 | iucv_disable(); |
1908 | return rc; | 1908 | return rc; |
1909 | } | 1909 | } |
1910 | 1910 | ||
1911 | /** | 1911 | /** |
1912 | * iucv_pm_thaw() - Thaw PM callback | 1912 | * iucv_pm_thaw() - Thaw PM callback |
1913 | * @dev: iucv-based device | 1913 | * @dev: iucv-based device |
1914 | * | 1914 | * |
1915 | * make iucv ready for use again: allocate path table, declare interrupt buffers | 1915 | * make iucv ready for use again: allocate path table, declare interrupt buffers |
1916 | * and enable iucv interrupts | 1916 | * and enable iucv interrupts |
1917 | * invoke callback function of the iucv-based driver | 1917 | * invoke callback function of the iucv-based driver |
1918 | */ | 1918 | */ |
1919 | static int iucv_pm_thaw(struct device *dev) | 1919 | static int iucv_pm_thaw(struct device *dev) |
1920 | { | 1920 | { |
1921 | int rc = 0; | 1921 | int rc = 0; |
1922 | 1922 | ||
1923 | #ifdef CONFIG_PM_DEBUG | 1923 | #ifdef CONFIG_PM_DEBUG |
1924 | printk(KERN_WARNING "iucv_pm_thaw\n"); | 1924 | printk(KERN_WARNING "iucv_pm_thaw\n"); |
1925 | #endif | 1925 | #endif |
1926 | iucv_pm_state = IUCV_PM_THAWING; | 1926 | iucv_pm_state = IUCV_PM_THAWING; |
1927 | if (!iucv_path_table) { | 1927 | if (!iucv_path_table) { |
1928 | rc = iucv_enable(); | 1928 | rc = iucv_enable(); |
1929 | if (rc) | 1929 | if (rc) |
1930 | goto out; | 1930 | goto out; |
1931 | } | 1931 | } |
1932 | if (cpumask_empty(&iucv_irq_cpumask)) { | 1932 | if (cpumask_empty(&iucv_irq_cpumask)) { |
1933 | if (iucv_nonsmp_handler) | 1933 | if (iucv_nonsmp_handler) |
1934 | /* enable interrupts on one cpu */ | 1934 | /* enable interrupts on one cpu */ |
1935 | iucv_allow_cpu(NULL); | 1935 | iucv_allow_cpu(NULL); |
1936 | else | 1936 | else |
1937 | /* enable interrupts on all cpus */ | 1937 | /* enable interrupts on all cpus */ |
1938 | iucv_setmask_mp(); | 1938 | iucv_setmask_mp(); |
1939 | } | 1939 | } |
1940 | if (dev->driver && dev->driver->pm && dev->driver->pm->thaw) | 1940 | if (dev->driver && dev->driver->pm && dev->driver->pm->thaw) |
1941 | rc = dev->driver->pm->thaw(dev); | 1941 | rc = dev->driver->pm->thaw(dev); |
1942 | out: | 1942 | out: |
1943 | return rc; | 1943 | return rc; |
1944 | } | 1944 | } |
1945 | 1945 | ||
1946 | /** | 1946 | /** |
1947 | * iucv_pm_restore() - Restore PM callback | 1947 | * iucv_pm_restore() - Restore PM callback |
1948 | * @dev: iucv-based device | 1948 | * @dev: iucv-based device |
1949 | * | 1949 | * |
1950 | * make iucv ready for use again: allocate path table, declare interrupt buffers | 1950 | * make iucv ready for use again: allocate path table, declare interrupt buffers |
1951 | * and enable iucv interrupts | 1951 | * and enable iucv interrupts |
1952 | * invoke callback function of the iucv-based driver | 1952 | * invoke callback function of the iucv-based driver |
1953 | */ | 1953 | */ |
1954 | static int iucv_pm_restore(struct device *dev) | 1954 | static int iucv_pm_restore(struct device *dev) |
1955 | { | 1955 | { |
1956 | int rc = 0; | 1956 | int rc = 0; |
1957 | 1957 | ||
1958 | #ifdef CONFIG_PM_DEBUG | 1958 | #ifdef CONFIG_PM_DEBUG |
1959 | printk(KERN_WARNING "iucv_pm_restore %p\n", iucv_path_table); | 1959 | printk(KERN_WARNING "iucv_pm_restore %p\n", iucv_path_table); |
1960 | #endif | 1960 | #endif |
1961 | if ((iucv_pm_state != IUCV_PM_RESTORING) && iucv_path_table) | 1961 | if ((iucv_pm_state != IUCV_PM_RESTORING) && iucv_path_table) |
1962 | pr_warning("Suspending Linux did not completely close all IUCV " | 1962 | pr_warning("Suspending Linux did not completely close all IUCV " |
1963 | "connections\n"); | 1963 | "connections\n"); |
1964 | iucv_pm_state = IUCV_PM_RESTORING; | 1964 | iucv_pm_state = IUCV_PM_RESTORING; |
1965 | if (cpumask_empty(&iucv_irq_cpumask)) { | 1965 | if (cpumask_empty(&iucv_irq_cpumask)) { |
1966 | rc = iucv_query_maxconn(); | 1966 | rc = iucv_query_maxconn(); |
1967 | rc = iucv_enable(); | 1967 | rc = iucv_enable(); |
1968 | if (rc) | 1968 | if (rc) |
1969 | goto out; | 1969 | goto out; |
1970 | } | 1970 | } |
1971 | if (dev->driver && dev->driver->pm && dev->driver->pm->restore) | 1971 | if (dev->driver && dev->driver->pm && dev->driver->pm->restore) |
1972 | rc = dev->driver->pm->restore(dev); | 1972 | rc = dev->driver->pm->restore(dev); |
1973 | out: | 1973 | out: |
1974 | return rc; | 1974 | return rc; |
1975 | } | 1975 | } |
1976 | 1976 | ||
1977 | /** | 1977 | /** |
1978 | * iucv_init | 1978 | * iucv_init |
1979 | * | 1979 | * |
1980 | * Allocates and initializes various data structures. | 1980 | * Allocates and initializes various data structures. |
1981 | */ | 1981 | */ |
1982 | static int __init iucv_init(void) | 1982 | static int __init iucv_init(void) |
1983 | { | 1983 | { |
1984 | int rc; | 1984 | int rc; |
1985 | int cpu; | 1985 | int cpu; |
1986 | 1986 | ||
1987 | if (!MACHINE_IS_VM) { | 1987 | if (!MACHINE_IS_VM) { |
1988 | rc = -EPROTONOSUPPORT; | 1988 | rc = -EPROTONOSUPPORT; |
1989 | goto out; | 1989 | goto out; |
1990 | } | 1990 | } |
1991 | rc = iucv_query_maxconn(); | 1991 | rc = iucv_query_maxconn(); |
1992 | if (rc) | 1992 | if (rc) |
1993 | goto out; | 1993 | goto out; |
1994 | rc = register_external_interrupt(0x4000, iucv_external_interrupt); | 1994 | rc = register_external_interrupt(0x4000, iucv_external_interrupt); |
1995 | if (rc) | 1995 | if (rc) |
1996 | goto out; | 1996 | goto out; |
1997 | iucv_root = root_device_register("iucv"); | 1997 | iucv_root = root_device_register("iucv"); |
1998 | if (IS_ERR(iucv_root)) { | 1998 | if (IS_ERR(iucv_root)) { |
1999 | rc = PTR_ERR(iucv_root); | 1999 | rc = PTR_ERR(iucv_root); |
2000 | goto out_int; | 2000 | goto out_int; |
2001 | } | 2001 | } |
2002 | 2002 | ||
2003 | for_each_online_cpu(cpu) { | 2003 | for_each_online_cpu(cpu) { |
2004 | /* Note: GFP_DMA used to get memory below 2G */ | 2004 | /* Note: GFP_DMA used to get memory below 2G */ |
2005 | iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data), | 2005 | iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data), |
2006 | GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); | 2006 | GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); |
2007 | if (!iucv_irq_data[cpu]) { | 2007 | if (!iucv_irq_data[cpu]) { |
2008 | rc = -ENOMEM; | 2008 | rc = -ENOMEM; |
2009 | goto out_free; | 2009 | goto out_free; |
2010 | } | 2010 | } |
2011 | 2011 | ||
2012 | /* Allocate parameter blocks. */ | 2012 | /* Allocate parameter blocks. */ |
2013 | iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param), | 2013 | iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param), |
2014 | GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); | 2014 | GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); |
2015 | if (!iucv_param[cpu]) { | 2015 | if (!iucv_param[cpu]) { |
2016 | rc = -ENOMEM; | 2016 | rc = -ENOMEM; |
2017 | goto out_free; | 2017 | goto out_free; |
2018 | } | 2018 | } |
2019 | iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param), | 2019 | iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param), |
2020 | GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); | 2020 | GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); |
2021 | if (!iucv_param_irq[cpu]) { | 2021 | if (!iucv_param_irq[cpu]) { |
2022 | rc = -ENOMEM; | 2022 | rc = -ENOMEM; |
2023 | goto out_free; | 2023 | goto out_free; |
2024 | } | 2024 | } |
2025 | 2025 | ||
2026 | } | 2026 | } |
2027 | rc = register_hotcpu_notifier(&iucv_cpu_notifier); | 2027 | rc = register_hotcpu_notifier(&iucv_cpu_notifier); |
2028 | if (rc) | 2028 | if (rc) |
2029 | goto out_free; | 2029 | goto out_free; |
2030 | rc = register_reboot_notifier(&iucv_reboot_notifier); | 2030 | rc = register_reboot_notifier(&iucv_reboot_notifier); |
2031 | if (rc) | 2031 | if (rc) |
2032 | goto out_cpu; | 2032 | goto out_cpu; |
2033 | ASCEBC(iucv_error_no_listener, 16); | 2033 | ASCEBC(iucv_error_no_listener, 16); |
2034 | ASCEBC(iucv_error_no_memory, 16); | 2034 | ASCEBC(iucv_error_no_memory, 16); |
2035 | ASCEBC(iucv_error_pathid, 16); | 2035 | ASCEBC(iucv_error_pathid, 16); |
2036 | iucv_available = 1; | 2036 | iucv_available = 1; |
2037 | rc = bus_register(&iucv_bus); | 2037 | rc = bus_register(&iucv_bus); |
2038 | if (rc) | 2038 | if (rc) |
2039 | goto out_reboot; | 2039 | goto out_reboot; |
2040 | return 0; | 2040 | return 0; |
2041 | 2041 | ||
2042 | out_reboot: | 2042 | out_reboot: |
2043 | unregister_reboot_notifier(&iucv_reboot_notifier); | 2043 | unregister_reboot_notifier(&iucv_reboot_notifier); |
2044 | out_cpu: | 2044 | out_cpu: |
2045 | unregister_hotcpu_notifier(&iucv_cpu_notifier); | 2045 | unregister_hotcpu_notifier(&iucv_cpu_notifier); |
2046 | out_free: | 2046 | out_free: |
2047 | for_each_possible_cpu(cpu) { | 2047 | for_each_possible_cpu(cpu) { |
2048 | kfree(iucv_param_irq[cpu]); | 2048 | kfree(iucv_param_irq[cpu]); |
2049 | iucv_param_irq[cpu] = NULL; | 2049 | iucv_param_irq[cpu] = NULL; |
2050 | kfree(iucv_param[cpu]); | 2050 | kfree(iucv_param[cpu]); |
2051 | iucv_param[cpu] = NULL; | 2051 | iucv_param[cpu] = NULL; |
2052 | kfree(iucv_irq_data[cpu]); | 2052 | kfree(iucv_irq_data[cpu]); |
2053 | iucv_irq_data[cpu] = NULL; | 2053 | iucv_irq_data[cpu] = NULL; |
2054 | } | 2054 | } |
2055 | root_device_unregister(iucv_root); | 2055 | root_device_unregister(iucv_root); |
2056 | out_int: | 2056 | out_int: |
2057 | unregister_external_interrupt(0x4000, iucv_external_interrupt); | 2057 | unregister_external_interrupt(0x4000, iucv_external_interrupt); |
2058 | out: | 2058 | out: |
2059 | return rc; | 2059 | return rc; |
2060 | } | 2060 | } |
2061 | 2061 | ||
2062 | /** | 2062 | /** |
2063 | * iucv_exit | 2063 | * iucv_exit |
2064 | * | 2064 | * |
2065 | * Frees everything allocated from iucv_init. | 2065 | * Frees everything allocated from iucv_init. |
2066 | */ | 2066 | */ |
2067 | static void __exit iucv_exit(void) | 2067 | static void __exit iucv_exit(void) |
2068 | { | 2068 | { |
2069 | struct iucv_irq_list *p, *n; | 2069 | struct iucv_irq_list *p, *n; |
2070 | int cpu; | 2070 | int cpu; |
2071 | 2071 | ||
2072 | spin_lock_irq(&iucv_queue_lock); | 2072 | spin_lock_irq(&iucv_queue_lock); |
2073 | list_for_each_entry_safe(p, n, &iucv_task_queue, list) | 2073 | list_for_each_entry_safe(p, n, &iucv_task_queue, list) |
2074 | kfree(p); | 2074 | kfree(p); |
2075 | list_for_each_entry_safe(p, n, &iucv_work_queue, list) | 2075 | list_for_each_entry_safe(p, n, &iucv_work_queue, list) |
2076 | kfree(p); | 2076 | kfree(p); |
2077 | spin_unlock_irq(&iucv_queue_lock); | 2077 | spin_unlock_irq(&iucv_queue_lock); |
2078 | unregister_reboot_notifier(&iucv_reboot_notifier); | 2078 | unregister_reboot_notifier(&iucv_reboot_notifier); |
2079 | unregister_hotcpu_notifier(&iucv_cpu_notifier); | 2079 | unregister_hotcpu_notifier(&iucv_cpu_notifier); |
2080 | for_each_possible_cpu(cpu) { | 2080 | for_each_possible_cpu(cpu) { |
2081 | kfree(iucv_param_irq[cpu]); | 2081 | kfree(iucv_param_irq[cpu]); |
2082 | iucv_param_irq[cpu] = NULL; | 2082 | iucv_param_irq[cpu] = NULL; |
2083 | kfree(iucv_param[cpu]); | 2083 | kfree(iucv_param[cpu]); |
2084 | iucv_param[cpu] = NULL; | 2084 | iucv_param[cpu] = NULL; |
2085 | kfree(iucv_irq_data[cpu]); | 2085 | kfree(iucv_irq_data[cpu]); |
2086 | iucv_irq_data[cpu] = NULL; | 2086 | iucv_irq_data[cpu] = NULL; |
2087 | } | 2087 | } |
2088 | root_device_unregister(iucv_root); | 2088 | root_device_unregister(iucv_root); |
2089 | bus_unregister(&iucv_bus); | 2089 | bus_unregister(&iucv_bus); |
2090 | unregister_external_interrupt(0x4000, iucv_external_interrupt); | 2090 | unregister_external_interrupt(0x4000, iucv_external_interrupt); |
2091 | } | 2091 | } |
2092 | 2092 | ||
2093 | subsys_initcall(iucv_init); | 2093 | subsys_initcall(iucv_init); |
2094 | module_exit(iucv_exit); | 2094 | module_exit(iucv_exit); |
2095 | 2095 | ||
2096 | MODULE_AUTHOR("(C) 2001 IBM Corp. by Fritz Elfert (felfert@millenux.com)"); | 2096 | MODULE_AUTHOR("(C) 2001 IBM Corp. by Fritz Elfert (felfert@millenux.com)"); |
2097 | MODULE_DESCRIPTION("Linux for S/390 IUCV lowlevel driver"); | 2097 | MODULE_DESCRIPTION("Linux for S/390 IUCV lowlevel driver"); |
2098 | MODULE_LICENSE("GPL"); | 2098 | MODULE_LICENSE("GPL"); |
2099 | 2099 |