Commit 3441f04b4b62758a798f9fbbf2047dfedf0329a5

Authored by Anton Blanchard
Committed by Benjamin Herrenschmidt
1 parent 14ad0c58d5

powerpc/powernv: Create OPAL sglist helper functions and fix endian issues

We have two copies of code that creates an OPAL sg list. Consolidate
these into a common set of helpers and fix the endian issues.

The flash interface embedded a version number in the num_entries
field, whereas the dump interface did did not. Since versioning
wasn't added to the flash interface and it is impossible to add
this in a backwards compatible way, just remove it.

Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>

Showing 4 changed files with 76 additions and 188 deletions Inline Diff

arch/powerpc/include/asm/opal.h
1 /* 1 /*
2 * PowerNV OPAL definitions. 2 * PowerNV OPAL definitions.
3 * 3 *
4 * Copyright 2011 IBM Corp. 4 * Copyright 2011 IBM Corp.
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 11
12 #ifndef __OPAL_H 12 #ifndef __OPAL_H
13 #define __OPAL_H 13 #define __OPAL_H
14 14
15 /****** Takeover interface ********/ 15 /****** Takeover interface ********/
16 16
17 /* PAPR H-Call used to querty the HAL existence and/or instanciate 17 /* PAPR H-Call used to querty the HAL existence and/or instanciate
18 * it from within pHyp (tech preview only). 18 * it from within pHyp (tech preview only).
19 * 19 *
20 * This is exclusively used in prom_init.c 20 * This is exclusively used in prom_init.c
21 */ 21 */
22 22
23 #ifndef __ASSEMBLY__ 23 #ifndef __ASSEMBLY__
24 24
25 struct opal_takeover_args { 25 struct opal_takeover_args {
26 u64 k_image; /* r4 */ 26 u64 k_image; /* r4 */
27 u64 k_size; /* r5 */ 27 u64 k_size; /* r5 */
28 u64 k_entry; /* r6 */ 28 u64 k_entry; /* r6 */
29 u64 k_entry2; /* r7 */ 29 u64 k_entry2; /* r7 */
30 u64 hal_addr; /* r8 */ 30 u64 hal_addr; /* r8 */
31 u64 rd_image; /* r9 */ 31 u64 rd_image; /* r9 */
32 u64 rd_size; /* r10 */ 32 u64 rd_size; /* r10 */
33 u64 rd_loc; /* r11 */ 33 u64 rd_loc; /* r11 */
34 }; 34 };
35 35
36 /* 36 /*
37 * SG entry 37 * SG entry
38 * 38 *
39 * WARNING: The current implementation requires each entry 39 * WARNING: The current implementation requires each entry
40 * to represent a block that is 4k aligned *and* each block 40 * to represent a block that is 4k aligned *and* each block
41 * size except the last one in the list to be as well. 41 * size except the last one in the list to be as well.
42 */ 42 */
43 struct opal_sg_entry { 43 struct opal_sg_entry {
44 void *data; 44 __be64 data;
45 long length; 45 __be64 length;
46 }; 46 };
47 47
48 /* sg list */ 48 /* SG list */
49 struct opal_sg_list { 49 struct opal_sg_list {
50 unsigned long num_entries; 50 __be64 length;
51 struct opal_sg_list *next; 51 __be64 next;
52 struct opal_sg_entry entry[]; 52 struct opal_sg_entry entry[];
53 }; 53 };
54 54
55 /* We calculate number of sg entries based on PAGE_SIZE */ 55 /* We calculate number of sg entries based on PAGE_SIZE */
56 #define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry)) 56 #define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry))
57 57
58 extern long opal_query_takeover(u64 *hal_size, u64 *hal_align); 58 extern long opal_query_takeover(u64 *hal_size, u64 *hal_align);
59 59
60 extern long opal_do_takeover(struct opal_takeover_args *args); 60 extern long opal_do_takeover(struct opal_takeover_args *args);
61 61
62 struct rtas_args; 62 struct rtas_args;
63 extern int opal_enter_rtas(struct rtas_args *args, 63 extern int opal_enter_rtas(struct rtas_args *args,
64 unsigned long data, 64 unsigned long data,
65 unsigned long entry); 65 unsigned long entry);
66 66
67 #endif /* __ASSEMBLY__ */ 67 #endif /* __ASSEMBLY__ */
68 68
69 /****** OPAL APIs ******/ 69 /****** OPAL APIs ******/
70 70
71 /* Return codes */ 71 /* Return codes */
72 #define OPAL_SUCCESS 0 72 #define OPAL_SUCCESS 0
73 #define OPAL_PARAMETER -1 73 #define OPAL_PARAMETER -1
74 #define OPAL_BUSY -2 74 #define OPAL_BUSY -2
75 #define OPAL_PARTIAL -3 75 #define OPAL_PARTIAL -3
76 #define OPAL_CONSTRAINED -4 76 #define OPAL_CONSTRAINED -4
77 #define OPAL_CLOSED -5 77 #define OPAL_CLOSED -5
78 #define OPAL_HARDWARE -6 78 #define OPAL_HARDWARE -6
79 #define OPAL_UNSUPPORTED -7 79 #define OPAL_UNSUPPORTED -7
80 #define OPAL_PERMISSION -8 80 #define OPAL_PERMISSION -8
81 #define OPAL_NO_MEM -9 81 #define OPAL_NO_MEM -9
82 #define OPAL_RESOURCE -10 82 #define OPAL_RESOURCE -10
83 #define OPAL_INTERNAL_ERROR -11 83 #define OPAL_INTERNAL_ERROR -11
84 #define OPAL_BUSY_EVENT -12 84 #define OPAL_BUSY_EVENT -12
85 #define OPAL_HARDWARE_FROZEN -13 85 #define OPAL_HARDWARE_FROZEN -13
86 #define OPAL_WRONG_STATE -14 86 #define OPAL_WRONG_STATE -14
87 #define OPAL_ASYNC_COMPLETION -15 87 #define OPAL_ASYNC_COMPLETION -15
88 88
89 /* API Tokens (in r0) */ 89 /* API Tokens (in r0) */
90 #define OPAL_INVALID_CALL -1 90 #define OPAL_INVALID_CALL -1
91 #define OPAL_CONSOLE_WRITE 1 91 #define OPAL_CONSOLE_WRITE 1
92 #define OPAL_CONSOLE_READ 2 92 #define OPAL_CONSOLE_READ 2
93 #define OPAL_RTC_READ 3 93 #define OPAL_RTC_READ 3
94 #define OPAL_RTC_WRITE 4 94 #define OPAL_RTC_WRITE 4
95 #define OPAL_CEC_POWER_DOWN 5 95 #define OPAL_CEC_POWER_DOWN 5
96 #define OPAL_CEC_REBOOT 6 96 #define OPAL_CEC_REBOOT 6
97 #define OPAL_READ_NVRAM 7 97 #define OPAL_READ_NVRAM 7
98 #define OPAL_WRITE_NVRAM 8 98 #define OPAL_WRITE_NVRAM 8
99 #define OPAL_HANDLE_INTERRUPT 9 99 #define OPAL_HANDLE_INTERRUPT 9
100 #define OPAL_POLL_EVENTS 10 100 #define OPAL_POLL_EVENTS 10
101 #define OPAL_PCI_SET_HUB_TCE_MEMORY 11 101 #define OPAL_PCI_SET_HUB_TCE_MEMORY 11
102 #define OPAL_PCI_SET_PHB_TCE_MEMORY 12 102 #define OPAL_PCI_SET_PHB_TCE_MEMORY 12
103 #define OPAL_PCI_CONFIG_READ_BYTE 13 103 #define OPAL_PCI_CONFIG_READ_BYTE 13
104 #define OPAL_PCI_CONFIG_READ_HALF_WORD 14 104 #define OPAL_PCI_CONFIG_READ_HALF_WORD 14
105 #define OPAL_PCI_CONFIG_READ_WORD 15 105 #define OPAL_PCI_CONFIG_READ_WORD 15
106 #define OPAL_PCI_CONFIG_WRITE_BYTE 16 106 #define OPAL_PCI_CONFIG_WRITE_BYTE 16
107 #define OPAL_PCI_CONFIG_WRITE_HALF_WORD 17 107 #define OPAL_PCI_CONFIG_WRITE_HALF_WORD 17
108 #define OPAL_PCI_CONFIG_WRITE_WORD 18 108 #define OPAL_PCI_CONFIG_WRITE_WORD 18
109 #define OPAL_SET_XIVE 19 109 #define OPAL_SET_XIVE 19
110 #define OPAL_GET_XIVE 20 110 #define OPAL_GET_XIVE 20
111 #define OPAL_GET_COMPLETION_TOKEN_STATUS 21 /* obsolete */ 111 #define OPAL_GET_COMPLETION_TOKEN_STATUS 21 /* obsolete */
112 #define OPAL_REGISTER_OPAL_EXCEPTION_HANDLER 22 112 #define OPAL_REGISTER_OPAL_EXCEPTION_HANDLER 22
113 #define OPAL_PCI_EEH_FREEZE_STATUS 23 113 #define OPAL_PCI_EEH_FREEZE_STATUS 23
114 #define OPAL_PCI_SHPC 24 114 #define OPAL_PCI_SHPC 24
115 #define OPAL_CONSOLE_WRITE_BUFFER_SPACE 25 115 #define OPAL_CONSOLE_WRITE_BUFFER_SPACE 25
116 #define OPAL_PCI_EEH_FREEZE_CLEAR 26 116 #define OPAL_PCI_EEH_FREEZE_CLEAR 26
117 #define OPAL_PCI_PHB_MMIO_ENABLE 27 117 #define OPAL_PCI_PHB_MMIO_ENABLE 27
118 #define OPAL_PCI_SET_PHB_MEM_WINDOW 28 118 #define OPAL_PCI_SET_PHB_MEM_WINDOW 28
119 #define OPAL_PCI_MAP_PE_MMIO_WINDOW 29 119 #define OPAL_PCI_MAP_PE_MMIO_WINDOW 29
120 #define OPAL_PCI_SET_PHB_TABLE_MEMORY 30 120 #define OPAL_PCI_SET_PHB_TABLE_MEMORY 30
121 #define OPAL_PCI_SET_PE 31 121 #define OPAL_PCI_SET_PE 31
122 #define OPAL_PCI_SET_PELTV 32 122 #define OPAL_PCI_SET_PELTV 32
123 #define OPAL_PCI_SET_MVE 33 123 #define OPAL_PCI_SET_MVE 33
124 #define OPAL_PCI_SET_MVE_ENABLE 34 124 #define OPAL_PCI_SET_MVE_ENABLE 34
125 #define OPAL_PCI_GET_XIVE_REISSUE 35 125 #define OPAL_PCI_GET_XIVE_REISSUE 35
126 #define OPAL_PCI_SET_XIVE_REISSUE 36 126 #define OPAL_PCI_SET_XIVE_REISSUE 36
127 #define OPAL_PCI_SET_XIVE_PE 37 127 #define OPAL_PCI_SET_XIVE_PE 37
128 #define OPAL_GET_XIVE_SOURCE 38 128 #define OPAL_GET_XIVE_SOURCE 38
129 #define OPAL_GET_MSI_32 39 129 #define OPAL_GET_MSI_32 39
130 #define OPAL_GET_MSI_64 40 130 #define OPAL_GET_MSI_64 40
131 #define OPAL_START_CPU 41 131 #define OPAL_START_CPU 41
132 #define OPAL_QUERY_CPU_STATUS 42 132 #define OPAL_QUERY_CPU_STATUS 42
133 #define OPAL_WRITE_OPPANEL 43 133 #define OPAL_WRITE_OPPANEL 43
134 #define OPAL_PCI_MAP_PE_DMA_WINDOW 44 134 #define OPAL_PCI_MAP_PE_DMA_WINDOW 44
135 #define OPAL_PCI_MAP_PE_DMA_WINDOW_REAL 45 135 #define OPAL_PCI_MAP_PE_DMA_WINDOW_REAL 45
136 #define OPAL_PCI_RESET 49 136 #define OPAL_PCI_RESET 49
137 #define OPAL_PCI_GET_HUB_DIAG_DATA 50 137 #define OPAL_PCI_GET_HUB_DIAG_DATA 50
138 #define OPAL_PCI_GET_PHB_DIAG_DATA 51 138 #define OPAL_PCI_GET_PHB_DIAG_DATA 51
139 #define OPAL_PCI_FENCE_PHB 52 139 #define OPAL_PCI_FENCE_PHB 52
140 #define OPAL_PCI_REINIT 53 140 #define OPAL_PCI_REINIT 53
141 #define OPAL_PCI_MASK_PE_ERROR 54 141 #define OPAL_PCI_MASK_PE_ERROR 54
142 #define OPAL_SET_SLOT_LED_STATUS 55 142 #define OPAL_SET_SLOT_LED_STATUS 55
143 #define OPAL_GET_EPOW_STATUS 56 143 #define OPAL_GET_EPOW_STATUS 56
144 #define OPAL_SET_SYSTEM_ATTENTION_LED 57 144 #define OPAL_SET_SYSTEM_ATTENTION_LED 57
145 #define OPAL_RESERVED1 58 145 #define OPAL_RESERVED1 58
146 #define OPAL_RESERVED2 59 146 #define OPAL_RESERVED2 59
147 #define OPAL_PCI_NEXT_ERROR 60 147 #define OPAL_PCI_NEXT_ERROR 60
148 #define OPAL_PCI_EEH_FREEZE_STATUS2 61 148 #define OPAL_PCI_EEH_FREEZE_STATUS2 61
149 #define OPAL_PCI_POLL 62 149 #define OPAL_PCI_POLL 62
150 #define OPAL_PCI_MSI_EOI 63 150 #define OPAL_PCI_MSI_EOI 63
151 #define OPAL_PCI_GET_PHB_DIAG_DATA2 64 151 #define OPAL_PCI_GET_PHB_DIAG_DATA2 64
152 #define OPAL_XSCOM_READ 65 152 #define OPAL_XSCOM_READ 65
153 #define OPAL_XSCOM_WRITE 66 153 #define OPAL_XSCOM_WRITE 66
154 #define OPAL_LPC_READ 67 154 #define OPAL_LPC_READ 67
155 #define OPAL_LPC_WRITE 68 155 #define OPAL_LPC_WRITE 68
156 #define OPAL_RETURN_CPU 69 156 #define OPAL_RETURN_CPU 69
157 #define OPAL_ELOG_READ 71 157 #define OPAL_ELOG_READ 71
158 #define OPAL_ELOG_WRITE 72 158 #define OPAL_ELOG_WRITE 72
159 #define OPAL_ELOG_ACK 73 159 #define OPAL_ELOG_ACK 73
160 #define OPAL_ELOG_RESEND 74 160 #define OPAL_ELOG_RESEND 74
161 #define OPAL_ELOG_SIZE 75 161 #define OPAL_ELOG_SIZE 75
162 #define OPAL_FLASH_VALIDATE 76 162 #define OPAL_FLASH_VALIDATE 76
163 #define OPAL_FLASH_MANAGE 77 163 #define OPAL_FLASH_MANAGE 77
164 #define OPAL_FLASH_UPDATE 78 164 #define OPAL_FLASH_UPDATE 78
165 #define OPAL_RESYNC_TIMEBASE 79 165 #define OPAL_RESYNC_TIMEBASE 79
166 #define OPAL_DUMP_INIT 81 166 #define OPAL_DUMP_INIT 81
167 #define OPAL_DUMP_INFO 82 167 #define OPAL_DUMP_INFO 82
168 #define OPAL_DUMP_READ 83 168 #define OPAL_DUMP_READ 83
169 #define OPAL_DUMP_ACK 84 169 #define OPAL_DUMP_ACK 84
170 #define OPAL_GET_MSG 85 170 #define OPAL_GET_MSG 85
171 #define OPAL_CHECK_ASYNC_COMPLETION 86 171 #define OPAL_CHECK_ASYNC_COMPLETION 86
172 #define OPAL_SYNC_HOST_REBOOT 87 172 #define OPAL_SYNC_HOST_REBOOT 87
173 #define OPAL_SENSOR_READ 88 173 #define OPAL_SENSOR_READ 88
174 #define OPAL_GET_PARAM 89 174 #define OPAL_GET_PARAM 89
175 #define OPAL_SET_PARAM 90 175 #define OPAL_SET_PARAM 90
176 #define OPAL_DUMP_RESEND 91 176 #define OPAL_DUMP_RESEND 91
177 #define OPAL_DUMP_INFO2 94 177 #define OPAL_DUMP_INFO2 94
178 178
179 #ifndef __ASSEMBLY__ 179 #ifndef __ASSEMBLY__
180 180
181 #include <linux/notifier.h> 181 #include <linux/notifier.h>
182 182
183 /* Other enums */ 183 /* Other enums */
184 enum OpalVendorApiTokens { 184 enum OpalVendorApiTokens {
185 OPAL_START_VENDOR_API_RANGE = 1000, OPAL_END_VENDOR_API_RANGE = 1999 185 OPAL_START_VENDOR_API_RANGE = 1000, OPAL_END_VENDOR_API_RANGE = 1999
186 }; 186 };
187 187
188 enum OpalFreezeState { 188 enum OpalFreezeState {
189 OPAL_EEH_STOPPED_NOT_FROZEN = 0, 189 OPAL_EEH_STOPPED_NOT_FROZEN = 0,
190 OPAL_EEH_STOPPED_MMIO_FREEZE = 1, 190 OPAL_EEH_STOPPED_MMIO_FREEZE = 1,
191 OPAL_EEH_STOPPED_DMA_FREEZE = 2, 191 OPAL_EEH_STOPPED_DMA_FREEZE = 2,
192 OPAL_EEH_STOPPED_MMIO_DMA_FREEZE = 3, 192 OPAL_EEH_STOPPED_MMIO_DMA_FREEZE = 3,
193 OPAL_EEH_STOPPED_RESET = 4, 193 OPAL_EEH_STOPPED_RESET = 4,
194 OPAL_EEH_STOPPED_TEMP_UNAVAIL = 5, 194 OPAL_EEH_STOPPED_TEMP_UNAVAIL = 5,
195 OPAL_EEH_STOPPED_PERM_UNAVAIL = 6 195 OPAL_EEH_STOPPED_PERM_UNAVAIL = 6
196 }; 196 };
197 197
198 enum OpalEehFreezeActionToken { 198 enum OpalEehFreezeActionToken {
199 OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO = 1, 199 OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO = 1,
200 OPAL_EEH_ACTION_CLEAR_FREEZE_DMA = 2, 200 OPAL_EEH_ACTION_CLEAR_FREEZE_DMA = 2,
201 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL = 3 201 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL = 3
202 }; 202 };
203 203
204 enum OpalPciStatusToken { 204 enum OpalPciStatusToken {
205 OPAL_EEH_NO_ERROR = 0, 205 OPAL_EEH_NO_ERROR = 0,
206 OPAL_EEH_IOC_ERROR = 1, 206 OPAL_EEH_IOC_ERROR = 1,
207 OPAL_EEH_PHB_ERROR = 2, 207 OPAL_EEH_PHB_ERROR = 2,
208 OPAL_EEH_PE_ERROR = 3, 208 OPAL_EEH_PE_ERROR = 3,
209 OPAL_EEH_PE_MMIO_ERROR = 4, 209 OPAL_EEH_PE_MMIO_ERROR = 4,
210 OPAL_EEH_PE_DMA_ERROR = 5 210 OPAL_EEH_PE_DMA_ERROR = 5
211 }; 211 };
212 212
213 enum OpalPciErrorSeverity { 213 enum OpalPciErrorSeverity {
214 OPAL_EEH_SEV_NO_ERROR = 0, 214 OPAL_EEH_SEV_NO_ERROR = 0,
215 OPAL_EEH_SEV_IOC_DEAD = 1, 215 OPAL_EEH_SEV_IOC_DEAD = 1,
216 OPAL_EEH_SEV_PHB_DEAD = 2, 216 OPAL_EEH_SEV_PHB_DEAD = 2,
217 OPAL_EEH_SEV_PHB_FENCED = 3, 217 OPAL_EEH_SEV_PHB_FENCED = 3,
218 OPAL_EEH_SEV_PE_ER = 4, 218 OPAL_EEH_SEV_PE_ER = 4,
219 OPAL_EEH_SEV_INF = 5 219 OPAL_EEH_SEV_INF = 5
220 }; 220 };
221 221
222 enum OpalShpcAction { 222 enum OpalShpcAction {
223 OPAL_SHPC_GET_LINK_STATE = 0, 223 OPAL_SHPC_GET_LINK_STATE = 0,
224 OPAL_SHPC_GET_SLOT_STATE = 1 224 OPAL_SHPC_GET_SLOT_STATE = 1
225 }; 225 };
226 226
227 enum OpalShpcLinkState { 227 enum OpalShpcLinkState {
228 OPAL_SHPC_LINK_DOWN = 0, 228 OPAL_SHPC_LINK_DOWN = 0,
229 OPAL_SHPC_LINK_UP = 1 229 OPAL_SHPC_LINK_UP = 1
230 }; 230 };
231 231
232 enum OpalMmioWindowType { 232 enum OpalMmioWindowType {
233 OPAL_M32_WINDOW_TYPE = 1, 233 OPAL_M32_WINDOW_TYPE = 1,
234 OPAL_M64_WINDOW_TYPE = 2, 234 OPAL_M64_WINDOW_TYPE = 2,
235 OPAL_IO_WINDOW_TYPE = 3 235 OPAL_IO_WINDOW_TYPE = 3
236 }; 236 };
237 237
238 enum OpalShpcSlotState { 238 enum OpalShpcSlotState {
239 OPAL_SHPC_DEV_NOT_PRESENT = 0, 239 OPAL_SHPC_DEV_NOT_PRESENT = 0,
240 OPAL_SHPC_DEV_PRESENT = 1 240 OPAL_SHPC_DEV_PRESENT = 1
241 }; 241 };
242 242
243 enum OpalExceptionHandler { 243 enum OpalExceptionHandler {
244 OPAL_MACHINE_CHECK_HANDLER = 1, 244 OPAL_MACHINE_CHECK_HANDLER = 1,
245 OPAL_HYPERVISOR_MAINTENANCE_HANDLER = 2, 245 OPAL_HYPERVISOR_MAINTENANCE_HANDLER = 2,
246 OPAL_SOFTPATCH_HANDLER = 3 246 OPAL_SOFTPATCH_HANDLER = 3
247 }; 247 };
248 248
249 enum OpalPendingState { 249 enum OpalPendingState {
250 OPAL_EVENT_OPAL_INTERNAL = 0x1, 250 OPAL_EVENT_OPAL_INTERNAL = 0x1,
251 OPAL_EVENT_NVRAM = 0x2, 251 OPAL_EVENT_NVRAM = 0x2,
252 OPAL_EVENT_RTC = 0x4, 252 OPAL_EVENT_RTC = 0x4,
253 OPAL_EVENT_CONSOLE_OUTPUT = 0x8, 253 OPAL_EVENT_CONSOLE_OUTPUT = 0x8,
254 OPAL_EVENT_CONSOLE_INPUT = 0x10, 254 OPAL_EVENT_CONSOLE_INPUT = 0x10,
255 OPAL_EVENT_ERROR_LOG_AVAIL = 0x20, 255 OPAL_EVENT_ERROR_LOG_AVAIL = 0x20,
256 OPAL_EVENT_ERROR_LOG = 0x40, 256 OPAL_EVENT_ERROR_LOG = 0x40,
257 OPAL_EVENT_EPOW = 0x80, 257 OPAL_EVENT_EPOW = 0x80,
258 OPAL_EVENT_LED_STATUS = 0x100, 258 OPAL_EVENT_LED_STATUS = 0x100,
259 OPAL_EVENT_PCI_ERROR = 0x200, 259 OPAL_EVENT_PCI_ERROR = 0x200,
260 OPAL_EVENT_DUMP_AVAIL = 0x400, 260 OPAL_EVENT_DUMP_AVAIL = 0x400,
261 OPAL_EVENT_MSG_PENDING = 0x800, 261 OPAL_EVENT_MSG_PENDING = 0x800,
262 }; 262 };
263 263
264 enum OpalMessageType { 264 enum OpalMessageType {
265 OPAL_MSG_ASYNC_COMP = 0, /* params[0] = token, params[1] = rc, 265 OPAL_MSG_ASYNC_COMP = 0, /* params[0] = token, params[1] = rc,
266 * additional params function-specific 266 * additional params function-specific
267 */ 267 */
268 OPAL_MSG_MEM_ERR, 268 OPAL_MSG_MEM_ERR,
269 OPAL_MSG_EPOW, 269 OPAL_MSG_EPOW,
270 OPAL_MSG_SHUTDOWN, 270 OPAL_MSG_SHUTDOWN,
271 OPAL_MSG_TYPE_MAX, 271 OPAL_MSG_TYPE_MAX,
272 }; 272 };
273 273
274 /* Machine check related definitions */ 274 /* Machine check related definitions */
275 enum OpalMCE_Version { 275 enum OpalMCE_Version {
276 OpalMCE_V1 = 1, 276 OpalMCE_V1 = 1,
277 }; 277 };
278 278
279 enum OpalMCE_Severity { 279 enum OpalMCE_Severity {
280 OpalMCE_SEV_NO_ERROR = 0, 280 OpalMCE_SEV_NO_ERROR = 0,
281 OpalMCE_SEV_WARNING = 1, 281 OpalMCE_SEV_WARNING = 1,
282 OpalMCE_SEV_ERROR_SYNC = 2, 282 OpalMCE_SEV_ERROR_SYNC = 2,
283 OpalMCE_SEV_FATAL = 3, 283 OpalMCE_SEV_FATAL = 3,
284 }; 284 };
285 285
286 enum OpalMCE_Disposition { 286 enum OpalMCE_Disposition {
287 OpalMCE_DISPOSITION_RECOVERED = 0, 287 OpalMCE_DISPOSITION_RECOVERED = 0,
288 OpalMCE_DISPOSITION_NOT_RECOVERED = 1, 288 OpalMCE_DISPOSITION_NOT_RECOVERED = 1,
289 }; 289 };
290 290
291 enum OpalMCE_Initiator { 291 enum OpalMCE_Initiator {
292 OpalMCE_INITIATOR_UNKNOWN = 0, 292 OpalMCE_INITIATOR_UNKNOWN = 0,
293 OpalMCE_INITIATOR_CPU = 1, 293 OpalMCE_INITIATOR_CPU = 1,
294 }; 294 };
295 295
296 enum OpalMCE_ErrorType { 296 enum OpalMCE_ErrorType {
297 OpalMCE_ERROR_TYPE_UNKNOWN = 0, 297 OpalMCE_ERROR_TYPE_UNKNOWN = 0,
298 OpalMCE_ERROR_TYPE_UE = 1, 298 OpalMCE_ERROR_TYPE_UE = 1,
299 OpalMCE_ERROR_TYPE_SLB = 2, 299 OpalMCE_ERROR_TYPE_SLB = 2,
300 OpalMCE_ERROR_TYPE_ERAT = 3, 300 OpalMCE_ERROR_TYPE_ERAT = 3,
301 OpalMCE_ERROR_TYPE_TLB = 4, 301 OpalMCE_ERROR_TYPE_TLB = 4,
302 }; 302 };
303 303
304 enum OpalMCE_UeErrorType { 304 enum OpalMCE_UeErrorType {
305 OpalMCE_UE_ERROR_INDETERMINATE = 0, 305 OpalMCE_UE_ERROR_INDETERMINATE = 0,
306 OpalMCE_UE_ERROR_IFETCH = 1, 306 OpalMCE_UE_ERROR_IFETCH = 1,
307 OpalMCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH = 2, 307 OpalMCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH = 2,
308 OpalMCE_UE_ERROR_LOAD_STORE = 3, 308 OpalMCE_UE_ERROR_LOAD_STORE = 3,
309 OpalMCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE = 4, 309 OpalMCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE = 4,
310 }; 310 };
311 311
312 enum OpalMCE_SlbErrorType { 312 enum OpalMCE_SlbErrorType {
313 OpalMCE_SLB_ERROR_INDETERMINATE = 0, 313 OpalMCE_SLB_ERROR_INDETERMINATE = 0,
314 OpalMCE_SLB_ERROR_PARITY = 1, 314 OpalMCE_SLB_ERROR_PARITY = 1,
315 OpalMCE_SLB_ERROR_MULTIHIT = 2, 315 OpalMCE_SLB_ERROR_MULTIHIT = 2,
316 }; 316 };
317 317
318 enum OpalMCE_EratErrorType { 318 enum OpalMCE_EratErrorType {
319 OpalMCE_ERAT_ERROR_INDETERMINATE = 0, 319 OpalMCE_ERAT_ERROR_INDETERMINATE = 0,
320 OpalMCE_ERAT_ERROR_PARITY = 1, 320 OpalMCE_ERAT_ERROR_PARITY = 1,
321 OpalMCE_ERAT_ERROR_MULTIHIT = 2, 321 OpalMCE_ERAT_ERROR_MULTIHIT = 2,
322 }; 322 };
323 323
324 enum OpalMCE_TlbErrorType { 324 enum OpalMCE_TlbErrorType {
325 OpalMCE_TLB_ERROR_INDETERMINATE = 0, 325 OpalMCE_TLB_ERROR_INDETERMINATE = 0,
326 OpalMCE_TLB_ERROR_PARITY = 1, 326 OpalMCE_TLB_ERROR_PARITY = 1,
327 OpalMCE_TLB_ERROR_MULTIHIT = 2, 327 OpalMCE_TLB_ERROR_MULTIHIT = 2,
328 }; 328 };
329 329
330 enum OpalThreadStatus { 330 enum OpalThreadStatus {
331 OPAL_THREAD_INACTIVE = 0x0, 331 OPAL_THREAD_INACTIVE = 0x0,
332 OPAL_THREAD_STARTED = 0x1, 332 OPAL_THREAD_STARTED = 0x1,
333 OPAL_THREAD_UNAVAILABLE = 0x2 /* opal-v3 */ 333 OPAL_THREAD_UNAVAILABLE = 0x2 /* opal-v3 */
334 }; 334 };
335 335
336 enum OpalPciBusCompare { 336 enum OpalPciBusCompare {
337 OpalPciBusAny = 0, /* Any bus number match */ 337 OpalPciBusAny = 0, /* Any bus number match */
338 OpalPciBus3Bits = 2, /* Match top 3 bits of bus number */ 338 OpalPciBus3Bits = 2, /* Match top 3 bits of bus number */
339 OpalPciBus4Bits = 3, /* Match top 4 bits of bus number */ 339 OpalPciBus4Bits = 3, /* Match top 4 bits of bus number */
340 OpalPciBus5Bits = 4, /* Match top 5 bits of bus number */ 340 OpalPciBus5Bits = 4, /* Match top 5 bits of bus number */
341 OpalPciBus6Bits = 5, /* Match top 6 bits of bus number */ 341 OpalPciBus6Bits = 5, /* Match top 6 bits of bus number */
342 OpalPciBus7Bits = 6, /* Match top 7 bits of bus number */ 342 OpalPciBus7Bits = 6, /* Match top 7 bits of bus number */
343 OpalPciBusAll = 7, /* Match bus number exactly */ 343 OpalPciBusAll = 7, /* Match bus number exactly */
344 }; 344 };
345 345
346 enum OpalDeviceCompare { 346 enum OpalDeviceCompare {
347 OPAL_IGNORE_RID_DEVICE_NUMBER = 0, 347 OPAL_IGNORE_RID_DEVICE_NUMBER = 0,
348 OPAL_COMPARE_RID_DEVICE_NUMBER = 1 348 OPAL_COMPARE_RID_DEVICE_NUMBER = 1
349 }; 349 };
350 350
351 enum OpalFuncCompare { 351 enum OpalFuncCompare {
352 OPAL_IGNORE_RID_FUNCTION_NUMBER = 0, 352 OPAL_IGNORE_RID_FUNCTION_NUMBER = 0,
353 OPAL_COMPARE_RID_FUNCTION_NUMBER = 1 353 OPAL_COMPARE_RID_FUNCTION_NUMBER = 1
354 }; 354 };
355 355
356 enum OpalPeAction { 356 enum OpalPeAction {
357 OPAL_UNMAP_PE = 0, 357 OPAL_UNMAP_PE = 0,
358 OPAL_MAP_PE = 1 358 OPAL_MAP_PE = 1
359 }; 359 };
360 360
361 enum OpalPeltvAction { 361 enum OpalPeltvAction {
362 OPAL_REMOVE_PE_FROM_DOMAIN = 0, 362 OPAL_REMOVE_PE_FROM_DOMAIN = 0,
363 OPAL_ADD_PE_TO_DOMAIN = 1 363 OPAL_ADD_PE_TO_DOMAIN = 1
364 }; 364 };
365 365
366 enum OpalMveEnableAction { 366 enum OpalMveEnableAction {
367 OPAL_DISABLE_MVE = 0, 367 OPAL_DISABLE_MVE = 0,
368 OPAL_ENABLE_MVE = 1 368 OPAL_ENABLE_MVE = 1
369 }; 369 };
370 370
371 enum OpalPciResetScope { 371 enum OpalPciResetScope {
372 OPAL_PHB_COMPLETE = 1, OPAL_PCI_LINK = 2, OPAL_PHB_ERROR = 3, 372 OPAL_PHB_COMPLETE = 1, OPAL_PCI_LINK = 2, OPAL_PHB_ERROR = 3,
373 OPAL_PCI_HOT_RESET = 4, OPAL_PCI_FUNDAMENTAL_RESET = 5, 373 OPAL_PCI_HOT_RESET = 4, OPAL_PCI_FUNDAMENTAL_RESET = 5,
374 OPAL_PCI_IODA_TABLE_RESET = 6, 374 OPAL_PCI_IODA_TABLE_RESET = 6,
375 }; 375 };
376 376
377 enum OpalPciReinitScope { 377 enum OpalPciReinitScope {
378 OPAL_REINIT_PCI_DEV = 1000 378 OPAL_REINIT_PCI_DEV = 1000
379 }; 379 };
380 380
381 enum OpalPciResetState { 381 enum OpalPciResetState {
382 OPAL_DEASSERT_RESET = 0, 382 OPAL_DEASSERT_RESET = 0,
383 OPAL_ASSERT_RESET = 1 383 OPAL_ASSERT_RESET = 1
384 }; 384 };
385 385
386 enum OpalPciMaskAction { 386 enum OpalPciMaskAction {
387 OPAL_UNMASK_ERROR_TYPE = 0, 387 OPAL_UNMASK_ERROR_TYPE = 0,
388 OPAL_MASK_ERROR_TYPE = 1 388 OPAL_MASK_ERROR_TYPE = 1
389 }; 389 };
390 390
391 enum OpalSlotLedType { 391 enum OpalSlotLedType {
392 OPAL_SLOT_LED_ID_TYPE = 0, 392 OPAL_SLOT_LED_ID_TYPE = 0,
393 OPAL_SLOT_LED_FAULT_TYPE = 1 393 OPAL_SLOT_LED_FAULT_TYPE = 1
394 }; 394 };
395 395
396 enum OpalLedAction { 396 enum OpalLedAction {
397 OPAL_TURN_OFF_LED = 0, 397 OPAL_TURN_OFF_LED = 0,
398 OPAL_TURN_ON_LED = 1, 398 OPAL_TURN_ON_LED = 1,
399 OPAL_QUERY_LED_STATE_AFTER_BUSY = 2 399 OPAL_QUERY_LED_STATE_AFTER_BUSY = 2
400 }; 400 };
401 401
402 enum OpalEpowStatus { 402 enum OpalEpowStatus {
403 OPAL_EPOW_NONE = 0, 403 OPAL_EPOW_NONE = 0,
404 OPAL_EPOW_UPS = 1, 404 OPAL_EPOW_UPS = 1,
405 OPAL_EPOW_OVER_AMBIENT_TEMP = 2, 405 OPAL_EPOW_OVER_AMBIENT_TEMP = 2,
406 OPAL_EPOW_OVER_INTERNAL_TEMP = 3 406 OPAL_EPOW_OVER_INTERNAL_TEMP = 3
407 }; 407 };
408 408
409 /* 409 /*
410 * Address cycle types for LPC accesses. These also correspond 410 * Address cycle types for LPC accesses. These also correspond
411 * to the content of the first cell of the "reg" property for 411 * to the content of the first cell of the "reg" property for
412 * device nodes on the LPC bus 412 * device nodes on the LPC bus
413 */ 413 */
414 enum OpalLPCAddressType { 414 enum OpalLPCAddressType {
415 OPAL_LPC_MEM = 0, 415 OPAL_LPC_MEM = 0,
416 OPAL_LPC_IO = 1, 416 OPAL_LPC_IO = 1,
417 OPAL_LPC_FW = 2, 417 OPAL_LPC_FW = 2,
418 }; 418 };
419 419
420 /* System parameter permission */ 420 /* System parameter permission */
421 enum OpalSysparamPerm { 421 enum OpalSysparamPerm {
422 OPAL_SYSPARAM_READ = 0x1, 422 OPAL_SYSPARAM_READ = 0x1,
423 OPAL_SYSPARAM_WRITE = 0x2, 423 OPAL_SYSPARAM_WRITE = 0x2,
424 OPAL_SYSPARAM_RW = (OPAL_SYSPARAM_READ | OPAL_SYSPARAM_WRITE), 424 OPAL_SYSPARAM_RW = (OPAL_SYSPARAM_READ | OPAL_SYSPARAM_WRITE),
425 }; 425 };
426 426
427 struct opal_msg { 427 struct opal_msg {
428 __be32 msg_type; 428 __be32 msg_type;
429 __be32 reserved; 429 __be32 reserved;
430 __be64 params[8]; 430 __be64 params[8];
431 }; 431 };
432 432
433 struct opal_machine_check_event { 433 struct opal_machine_check_event {
434 enum OpalMCE_Version version:8; /* 0x00 */ 434 enum OpalMCE_Version version:8; /* 0x00 */
435 uint8_t in_use; /* 0x01 */ 435 uint8_t in_use; /* 0x01 */
436 enum OpalMCE_Severity severity:8; /* 0x02 */ 436 enum OpalMCE_Severity severity:8; /* 0x02 */
437 enum OpalMCE_Initiator initiator:8; /* 0x03 */ 437 enum OpalMCE_Initiator initiator:8; /* 0x03 */
438 enum OpalMCE_ErrorType error_type:8; /* 0x04 */ 438 enum OpalMCE_ErrorType error_type:8; /* 0x04 */
439 enum OpalMCE_Disposition disposition:8; /* 0x05 */ 439 enum OpalMCE_Disposition disposition:8; /* 0x05 */
440 uint8_t reserved_1[2]; /* 0x06 */ 440 uint8_t reserved_1[2]; /* 0x06 */
441 uint64_t gpr3; /* 0x08 */ 441 uint64_t gpr3; /* 0x08 */
442 uint64_t srr0; /* 0x10 */ 442 uint64_t srr0; /* 0x10 */
443 uint64_t srr1; /* 0x18 */ 443 uint64_t srr1; /* 0x18 */
444 union { /* 0x20 */ 444 union { /* 0x20 */
445 struct { 445 struct {
446 enum OpalMCE_UeErrorType ue_error_type:8; 446 enum OpalMCE_UeErrorType ue_error_type:8;
447 uint8_t effective_address_provided; 447 uint8_t effective_address_provided;
448 uint8_t physical_address_provided; 448 uint8_t physical_address_provided;
449 uint8_t reserved_1[5]; 449 uint8_t reserved_1[5];
450 uint64_t effective_address; 450 uint64_t effective_address;
451 uint64_t physical_address; 451 uint64_t physical_address;
452 uint8_t reserved_2[8]; 452 uint8_t reserved_2[8];
453 } ue_error; 453 } ue_error;
454 454
455 struct { 455 struct {
456 enum OpalMCE_SlbErrorType slb_error_type:8; 456 enum OpalMCE_SlbErrorType slb_error_type:8;
457 uint8_t effective_address_provided; 457 uint8_t effective_address_provided;
458 uint8_t reserved_1[6]; 458 uint8_t reserved_1[6];
459 uint64_t effective_address; 459 uint64_t effective_address;
460 uint8_t reserved_2[16]; 460 uint8_t reserved_2[16];
461 } slb_error; 461 } slb_error;
462 462
463 struct { 463 struct {
464 enum OpalMCE_EratErrorType erat_error_type:8; 464 enum OpalMCE_EratErrorType erat_error_type:8;
465 uint8_t effective_address_provided; 465 uint8_t effective_address_provided;
466 uint8_t reserved_1[6]; 466 uint8_t reserved_1[6];
467 uint64_t effective_address; 467 uint64_t effective_address;
468 uint8_t reserved_2[16]; 468 uint8_t reserved_2[16];
469 } erat_error; 469 } erat_error;
470 470
471 struct { 471 struct {
472 enum OpalMCE_TlbErrorType tlb_error_type:8; 472 enum OpalMCE_TlbErrorType tlb_error_type:8;
473 uint8_t effective_address_provided; 473 uint8_t effective_address_provided;
474 uint8_t reserved_1[6]; 474 uint8_t reserved_1[6];
475 uint64_t effective_address; 475 uint64_t effective_address;
476 uint8_t reserved_2[16]; 476 uint8_t reserved_2[16];
477 } tlb_error; 477 } tlb_error;
478 } u; 478 } u;
479 }; 479 };
480 480
481 /* FSP memory errors handling */ 481 /* FSP memory errors handling */
482 enum OpalMemErr_Version { 482 enum OpalMemErr_Version {
483 OpalMemErr_V1 = 1, 483 OpalMemErr_V1 = 1,
484 }; 484 };
485 485
486 enum OpalMemErrType { 486 enum OpalMemErrType {
487 OPAL_MEM_ERR_TYPE_RESILIENCE = 0, 487 OPAL_MEM_ERR_TYPE_RESILIENCE = 0,
488 OPAL_MEM_ERR_TYPE_DYN_DALLOC, 488 OPAL_MEM_ERR_TYPE_DYN_DALLOC,
489 OPAL_MEM_ERR_TYPE_SCRUB, 489 OPAL_MEM_ERR_TYPE_SCRUB,
490 }; 490 };
491 491
492 /* Memory Reilience error type */ 492 /* Memory Reilience error type */
493 enum OpalMemErr_ResilErrType { 493 enum OpalMemErr_ResilErrType {
494 OPAL_MEM_RESILIENCE_CE = 0, 494 OPAL_MEM_RESILIENCE_CE = 0,
495 OPAL_MEM_RESILIENCE_UE, 495 OPAL_MEM_RESILIENCE_UE,
496 OPAL_MEM_RESILIENCE_UE_SCRUB, 496 OPAL_MEM_RESILIENCE_UE_SCRUB,
497 }; 497 };
498 498
499 /* Dynamic Memory Deallocation type */ 499 /* Dynamic Memory Deallocation type */
500 enum OpalMemErr_DynErrType { 500 enum OpalMemErr_DynErrType {
501 OPAL_MEM_DYNAMIC_DEALLOC = 0, 501 OPAL_MEM_DYNAMIC_DEALLOC = 0,
502 }; 502 };
503 503
504 /* OpalMemoryErrorData->flags */ 504 /* OpalMemoryErrorData->flags */
505 #define OPAL_MEM_CORRECTED_ERROR 0x0001 505 #define OPAL_MEM_CORRECTED_ERROR 0x0001
506 #define OPAL_MEM_THRESHOLD_EXCEEDED 0x0002 506 #define OPAL_MEM_THRESHOLD_EXCEEDED 0x0002
507 #define OPAL_MEM_ACK_REQUIRED 0x8000 507 #define OPAL_MEM_ACK_REQUIRED 0x8000
508 508
509 struct OpalMemoryErrorData { 509 struct OpalMemoryErrorData {
510 enum OpalMemErr_Version version:8; /* 0x00 */ 510 enum OpalMemErr_Version version:8; /* 0x00 */
511 enum OpalMemErrType type:8; /* 0x01 */ 511 enum OpalMemErrType type:8; /* 0x01 */
512 uint16_t flags; /* 0x02 */ 512 uint16_t flags; /* 0x02 */
513 uint8_t reserved_1[4]; /* 0x04 */ 513 uint8_t reserved_1[4]; /* 0x04 */
514 514
515 union { 515 union {
516 /* Memory Resilience corrected/uncorrected error info */ 516 /* Memory Resilience corrected/uncorrected error info */
517 struct { 517 struct {
518 enum OpalMemErr_ResilErrType resil_err_type:8; 518 enum OpalMemErr_ResilErrType resil_err_type:8;
519 uint8_t reserved_1[7]; 519 uint8_t reserved_1[7];
520 uint64_t physical_address_start; 520 uint64_t physical_address_start;
521 uint64_t physical_address_end; 521 uint64_t physical_address_end;
522 } resilience; 522 } resilience;
523 /* Dynamic memory deallocation error info */ 523 /* Dynamic memory deallocation error info */
524 struct { 524 struct {
525 enum OpalMemErr_DynErrType dyn_err_type:8; 525 enum OpalMemErr_DynErrType dyn_err_type:8;
526 uint8_t reserved_1[7]; 526 uint8_t reserved_1[7];
527 uint64_t physical_address_start; 527 uint64_t physical_address_start;
528 uint64_t physical_address_end; 528 uint64_t physical_address_end;
529 } dyn_dealloc; 529 } dyn_dealloc;
530 } u; 530 } u;
531 }; 531 };
532 532
533 enum { 533 enum {
534 OPAL_P7IOC_DIAG_TYPE_NONE = 0, 534 OPAL_P7IOC_DIAG_TYPE_NONE = 0,
535 OPAL_P7IOC_DIAG_TYPE_RGC = 1, 535 OPAL_P7IOC_DIAG_TYPE_RGC = 1,
536 OPAL_P7IOC_DIAG_TYPE_BI = 2, 536 OPAL_P7IOC_DIAG_TYPE_BI = 2,
537 OPAL_P7IOC_DIAG_TYPE_CI = 3, 537 OPAL_P7IOC_DIAG_TYPE_CI = 3,
538 OPAL_P7IOC_DIAG_TYPE_MISC = 4, 538 OPAL_P7IOC_DIAG_TYPE_MISC = 4,
539 OPAL_P7IOC_DIAG_TYPE_I2C = 5, 539 OPAL_P7IOC_DIAG_TYPE_I2C = 5,
540 OPAL_P7IOC_DIAG_TYPE_LAST = 6 540 OPAL_P7IOC_DIAG_TYPE_LAST = 6
541 }; 541 };
542 542
543 struct OpalIoP7IOCErrorData { 543 struct OpalIoP7IOCErrorData {
544 uint16_t type; 544 uint16_t type;
545 545
546 /* GEM */ 546 /* GEM */
547 uint64_t gemXfir; 547 uint64_t gemXfir;
548 uint64_t gemRfir; 548 uint64_t gemRfir;
549 uint64_t gemRirqfir; 549 uint64_t gemRirqfir;
550 uint64_t gemMask; 550 uint64_t gemMask;
551 uint64_t gemRwof; 551 uint64_t gemRwof;
552 552
553 /* LEM */ 553 /* LEM */
554 uint64_t lemFir; 554 uint64_t lemFir;
555 uint64_t lemErrMask; 555 uint64_t lemErrMask;
556 uint64_t lemAction0; 556 uint64_t lemAction0;
557 uint64_t lemAction1; 557 uint64_t lemAction1;
558 uint64_t lemWof; 558 uint64_t lemWof;
559 559
560 union { 560 union {
561 struct OpalIoP7IOCRgcErrorData { 561 struct OpalIoP7IOCRgcErrorData {
562 uint64_t rgcStatus; /* 3E1C10 */ 562 uint64_t rgcStatus; /* 3E1C10 */
563 uint64_t rgcLdcp; /* 3E1C18 */ 563 uint64_t rgcLdcp; /* 3E1C18 */
564 }rgc; 564 }rgc;
565 struct OpalIoP7IOCBiErrorData { 565 struct OpalIoP7IOCBiErrorData {
566 uint64_t biLdcp0; /* 3C0100, 3C0118 */ 566 uint64_t biLdcp0; /* 3C0100, 3C0118 */
567 uint64_t biLdcp1; /* 3C0108, 3C0120 */ 567 uint64_t biLdcp1; /* 3C0108, 3C0120 */
568 uint64_t biLdcp2; /* 3C0110, 3C0128 */ 568 uint64_t biLdcp2; /* 3C0110, 3C0128 */
569 uint64_t biFenceStatus; /* 3C0130, 3C0130 */ 569 uint64_t biFenceStatus; /* 3C0130, 3C0130 */
570 570
571 uint8_t biDownbound; /* BI Downbound or Upbound */ 571 uint8_t biDownbound; /* BI Downbound or Upbound */
572 }bi; 572 }bi;
573 struct OpalIoP7IOCCiErrorData { 573 struct OpalIoP7IOCCiErrorData {
574 uint64_t ciPortStatus; /* 3Dn008 */ 574 uint64_t ciPortStatus; /* 3Dn008 */
575 uint64_t ciPortLdcp; /* 3Dn010 */ 575 uint64_t ciPortLdcp; /* 3Dn010 */
576 576
577 uint8_t ciPort; /* Index of CI port: 0/1 */ 577 uint8_t ciPort; /* Index of CI port: 0/1 */
578 }ci; 578 }ci;
579 }; 579 };
580 }; 580 };
581 581
582 /** 582 /**
583 * This structure defines the overlay which will be used to store PHB error 583 * This structure defines the overlay which will be used to store PHB error
584 * data upon request. 584 * data upon request.
585 */ 585 */
586 enum { 586 enum {
587 OPAL_PHB_ERROR_DATA_VERSION_1 = 1, 587 OPAL_PHB_ERROR_DATA_VERSION_1 = 1,
588 }; 588 };
589 589
590 enum { 590 enum {
591 OPAL_PHB_ERROR_DATA_TYPE_P7IOC = 1, 591 OPAL_PHB_ERROR_DATA_TYPE_P7IOC = 1,
592 OPAL_PHB_ERROR_DATA_TYPE_PHB3 = 2 592 OPAL_PHB_ERROR_DATA_TYPE_PHB3 = 2
593 }; 593 };
594 594
595 enum { 595 enum {
596 OPAL_P7IOC_NUM_PEST_REGS = 128, 596 OPAL_P7IOC_NUM_PEST_REGS = 128,
597 OPAL_PHB3_NUM_PEST_REGS = 256 597 OPAL_PHB3_NUM_PEST_REGS = 256
598 }; 598 };
599 599
600 struct OpalIoPhbErrorCommon { 600 struct OpalIoPhbErrorCommon {
601 uint32_t version; 601 uint32_t version;
602 uint32_t ioType; 602 uint32_t ioType;
603 uint32_t len; 603 uint32_t len;
604 }; 604 };
605 605
606 struct OpalIoP7IOCPhbErrorData { 606 struct OpalIoP7IOCPhbErrorData {
607 struct OpalIoPhbErrorCommon common; 607 struct OpalIoPhbErrorCommon common;
608 608
609 uint32_t brdgCtl; 609 uint32_t brdgCtl;
610 610
611 // P7IOC utl regs 611 // P7IOC utl regs
612 uint32_t portStatusReg; 612 uint32_t portStatusReg;
613 uint32_t rootCmplxStatus; 613 uint32_t rootCmplxStatus;
614 uint32_t busAgentStatus; 614 uint32_t busAgentStatus;
615 615
616 // P7IOC cfg regs 616 // P7IOC cfg regs
617 uint32_t deviceStatus; 617 uint32_t deviceStatus;
618 uint32_t slotStatus; 618 uint32_t slotStatus;
619 uint32_t linkStatus; 619 uint32_t linkStatus;
620 uint32_t devCmdStatus; 620 uint32_t devCmdStatus;
621 uint32_t devSecStatus; 621 uint32_t devSecStatus;
622 622
623 // cfg AER regs 623 // cfg AER regs
624 uint32_t rootErrorStatus; 624 uint32_t rootErrorStatus;
625 uint32_t uncorrErrorStatus; 625 uint32_t uncorrErrorStatus;
626 uint32_t corrErrorStatus; 626 uint32_t corrErrorStatus;
627 uint32_t tlpHdr1; 627 uint32_t tlpHdr1;
628 uint32_t tlpHdr2; 628 uint32_t tlpHdr2;
629 uint32_t tlpHdr3; 629 uint32_t tlpHdr3;
630 uint32_t tlpHdr4; 630 uint32_t tlpHdr4;
631 uint32_t sourceId; 631 uint32_t sourceId;
632 632
633 uint32_t rsv3; 633 uint32_t rsv3;
634 634
635 // Record data about the call to allocate a buffer. 635 // Record data about the call to allocate a buffer.
636 uint64_t errorClass; 636 uint64_t errorClass;
637 uint64_t correlator; 637 uint64_t correlator;
638 638
639 //P7IOC MMIO Error Regs 639 //P7IOC MMIO Error Regs
640 uint64_t p7iocPlssr; // n120 640 uint64_t p7iocPlssr; // n120
641 uint64_t p7iocCsr; // n110 641 uint64_t p7iocCsr; // n110
642 uint64_t lemFir; // nC00 642 uint64_t lemFir; // nC00
643 uint64_t lemErrorMask; // nC18 643 uint64_t lemErrorMask; // nC18
644 uint64_t lemWOF; // nC40 644 uint64_t lemWOF; // nC40
645 uint64_t phbErrorStatus; // nC80 645 uint64_t phbErrorStatus; // nC80
646 uint64_t phbFirstErrorStatus; // nC88 646 uint64_t phbFirstErrorStatus; // nC88
647 uint64_t phbErrorLog0; // nCC0 647 uint64_t phbErrorLog0; // nCC0
648 uint64_t phbErrorLog1; // nCC8 648 uint64_t phbErrorLog1; // nCC8
649 uint64_t mmioErrorStatus; // nD00 649 uint64_t mmioErrorStatus; // nD00
650 uint64_t mmioFirstErrorStatus; // nD08 650 uint64_t mmioFirstErrorStatus; // nD08
651 uint64_t mmioErrorLog0; // nD40 651 uint64_t mmioErrorLog0; // nD40
652 uint64_t mmioErrorLog1; // nD48 652 uint64_t mmioErrorLog1; // nD48
653 uint64_t dma0ErrorStatus; // nD80 653 uint64_t dma0ErrorStatus; // nD80
654 uint64_t dma0FirstErrorStatus; // nD88 654 uint64_t dma0FirstErrorStatus; // nD88
655 uint64_t dma0ErrorLog0; // nDC0 655 uint64_t dma0ErrorLog0; // nDC0
656 uint64_t dma0ErrorLog1; // nDC8 656 uint64_t dma0ErrorLog1; // nDC8
657 uint64_t dma1ErrorStatus; // nE00 657 uint64_t dma1ErrorStatus; // nE00
658 uint64_t dma1FirstErrorStatus; // nE08 658 uint64_t dma1FirstErrorStatus; // nE08
659 uint64_t dma1ErrorLog0; // nE40 659 uint64_t dma1ErrorLog0; // nE40
660 uint64_t dma1ErrorLog1; // nE48 660 uint64_t dma1ErrorLog1; // nE48
661 uint64_t pestA[OPAL_P7IOC_NUM_PEST_REGS]; 661 uint64_t pestA[OPAL_P7IOC_NUM_PEST_REGS];
662 uint64_t pestB[OPAL_P7IOC_NUM_PEST_REGS]; 662 uint64_t pestB[OPAL_P7IOC_NUM_PEST_REGS];
663 }; 663 };
664 664
665 struct OpalIoPhb3ErrorData { 665 struct OpalIoPhb3ErrorData {
666 struct OpalIoPhbErrorCommon common; 666 struct OpalIoPhbErrorCommon common;
667 667
668 uint32_t brdgCtl; 668 uint32_t brdgCtl;
669 669
670 /* PHB3 UTL regs */ 670 /* PHB3 UTL regs */
671 uint32_t portStatusReg; 671 uint32_t portStatusReg;
672 uint32_t rootCmplxStatus; 672 uint32_t rootCmplxStatus;
673 uint32_t busAgentStatus; 673 uint32_t busAgentStatus;
674 674
675 /* PHB3 cfg regs */ 675 /* PHB3 cfg regs */
676 uint32_t deviceStatus; 676 uint32_t deviceStatus;
677 uint32_t slotStatus; 677 uint32_t slotStatus;
678 uint32_t linkStatus; 678 uint32_t linkStatus;
679 uint32_t devCmdStatus; 679 uint32_t devCmdStatus;
680 uint32_t devSecStatus; 680 uint32_t devSecStatus;
681 681
682 /* cfg AER regs */ 682 /* cfg AER regs */
683 uint32_t rootErrorStatus; 683 uint32_t rootErrorStatus;
684 uint32_t uncorrErrorStatus; 684 uint32_t uncorrErrorStatus;
685 uint32_t corrErrorStatus; 685 uint32_t corrErrorStatus;
686 uint32_t tlpHdr1; 686 uint32_t tlpHdr1;
687 uint32_t tlpHdr2; 687 uint32_t tlpHdr2;
688 uint32_t tlpHdr3; 688 uint32_t tlpHdr3;
689 uint32_t tlpHdr4; 689 uint32_t tlpHdr4;
690 uint32_t sourceId; 690 uint32_t sourceId;
691 691
692 uint32_t rsv3; 692 uint32_t rsv3;
693 693
694 /* Record data about the call to allocate a buffer */ 694 /* Record data about the call to allocate a buffer */
695 uint64_t errorClass; 695 uint64_t errorClass;
696 uint64_t correlator; 696 uint64_t correlator;
697 697
698 uint64_t nFir; /* 000 */ 698 uint64_t nFir; /* 000 */
699 uint64_t nFirMask; /* 003 */ 699 uint64_t nFirMask; /* 003 */
700 uint64_t nFirWOF; /* 008 */ 700 uint64_t nFirWOF; /* 008 */
701 701
702 /* PHB3 MMIO Error Regs */ 702 /* PHB3 MMIO Error Regs */
703 uint64_t phbPlssr; /* 120 */ 703 uint64_t phbPlssr; /* 120 */
704 uint64_t phbCsr; /* 110 */ 704 uint64_t phbCsr; /* 110 */
705 uint64_t lemFir; /* C00 */ 705 uint64_t lemFir; /* C00 */
706 uint64_t lemErrorMask; /* C18 */ 706 uint64_t lemErrorMask; /* C18 */
707 uint64_t lemWOF; /* C40 */ 707 uint64_t lemWOF; /* C40 */
708 uint64_t phbErrorStatus; /* C80 */ 708 uint64_t phbErrorStatus; /* C80 */
709 uint64_t phbFirstErrorStatus; /* C88 */ 709 uint64_t phbFirstErrorStatus; /* C88 */
710 uint64_t phbErrorLog0; /* CC0 */ 710 uint64_t phbErrorLog0; /* CC0 */
711 uint64_t phbErrorLog1; /* CC8 */ 711 uint64_t phbErrorLog1; /* CC8 */
712 uint64_t mmioErrorStatus; /* D00 */ 712 uint64_t mmioErrorStatus; /* D00 */
713 uint64_t mmioFirstErrorStatus; /* D08 */ 713 uint64_t mmioFirstErrorStatus; /* D08 */
714 uint64_t mmioErrorLog0; /* D40 */ 714 uint64_t mmioErrorLog0; /* D40 */
715 uint64_t mmioErrorLog1; /* D48 */ 715 uint64_t mmioErrorLog1; /* D48 */
716 uint64_t dma0ErrorStatus; /* D80 */ 716 uint64_t dma0ErrorStatus; /* D80 */
717 uint64_t dma0FirstErrorStatus; /* D88 */ 717 uint64_t dma0FirstErrorStatus; /* D88 */
718 uint64_t dma0ErrorLog0; /* DC0 */ 718 uint64_t dma0ErrorLog0; /* DC0 */
719 uint64_t dma0ErrorLog1; /* DC8 */ 719 uint64_t dma0ErrorLog1; /* DC8 */
720 uint64_t dma1ErrorStatus; /* E00 */ 720 uint64_t dma1ErrorStatus; /* E00 */
721 uint64_t dma1FirstErrorStatus; /* E08 */ 721 uint64_t dma1FirstErrorStatus; /* E08 */
722 uint64_t dma1ErrorLog0; /* E40 */ 722 uint64_t dma1ErrorLog0; /* E40 */
723 uint64_t dma1ErrorLog1; /* E48 */ 723 uint64_t dma1ErrorLog1; /* E48 */
724 uint64_t pestA[OPAL_PHB3_NUM_PEST_REGS]; 724 uint64_t pestA[OPAL_PHB3_NUM_PEST_REGS];
725 uint64_t pestB[OPAL_PHB3_NUM_PEST_REGS]; 725 uint64_t pestB[OPAL_PHB3_NUM_PEST_REGS];
726 }; 726 };
727 727
728 typedef struct oppanel_line { 728 typedef struct oppanel_line {
729 const char * line; 729 const char * line;
730 uint64_t line_len; 730 uint64_t line_len;
731 } oppanel_line_t; 731 } oppanel_line_t;
732 732
733 /* /sys/firmware/opal */ 733 /* /sys/firmware/opal */
734 extern struct kobject *opal_kobj; 734 extern struct kobject *opal_kobj;
735 735
736 /* /ibm,opal */ 736 /* /ibm,opal */
737 extern struct device_node *opal_node; 737 extern struct device_node *opal_node;
738 738
739 /* API functions */ 739 /* API functions */
740 int64_t opal_invalid_call(void); 740 int64_t opal_invalid_call(void);
741 int64_t opal_console_write(int64_t term_number, __be64 *length, 741 int64_t opal_console_write(int64_t term_number, __be64 *length,
742 const uint8_t *buffer); 742 const uint8_t *buffer);
743 int64_t opal_console_read(int64_t term_number, __be64 *length, 743 int64_t opal_console_read(int64_t term_number, __be64 *length,
744 uint8_t *buffer); 744 uint8_t *buffer);
745 int64_t opal_console_write_buffer_space(int64_t term_number, 745 int64_t opal_console_write_buffer_space(int64_t term_number,
746 __be64 *length); 746 __be64 *length);
747 int64_t opal_rtc_read(__be32 *year_month_day, 747 int64_t opal_rtc_read(__be32 *year_month_day,
748 __be64 *hour_minute_second_millisecond); 748 __be64 *hour_minute_second_millisecond);
749 int64_t opal_rtc_write(uint32_t year_month_day, 749 int64_t opal_rtc_write(uint32_t year_month_day,
750 uint64_t hour_minute_second_millisecond); 750 uint64_t hour_minute_second_millisecond);
751 int64_t opal_cec_power_down(uint64_t request); 751 int64_t opal_cec_power_down(uint64_t request);
752 int64_t opal_cec_reboot(void); 752 int64_t opal_cec_reboot(void);
753 int64_t opal_read_nvram(uint64_t buffer, uint64_t size, uint64_t offset); 753 int64_t opal_read_nvram(uint64_t buffer, uint64_t size, uint64_t offset);
754 int64_t opal_write_nvram(uint64_t buffer, uint64_t size, uint64_t offset); 754 int64_t opal_write_nvram(uint64_t buffer, uint64_t size, uint64_t offset);
755 int64_t opal_handle_interrupt(uint64_t isn, __be64 *outstanding_event_mask); 755 int64_t opal_handle_interrupt(uint64_t isn, __be64 *outstanding_event_mask);
756 int64_t opal_poll_events(__be64 *outstanding_event_mask); 756 int64_t opal_poll_events(__be64 *outstanding_event_mask);
757 int64_t opal_pci_set_hub_tce_memory(uint64_t hub_id, uint64_t tce_mem_addr, 757 int64_t opal_pci_set_hub_tce_memory(uint64_t hub_id, uint64_t tce_mem_addr,
758 uint64_t tce_mem_size); 758 uint64_t tce_mem_size);
759 int64_t opal_pci_set_phb_tce_memory(uint64_t phb_id, uint64_t tce_mem_addr, 759 int64_t opal_pci_set_phb_tce_memory(uint64_t phb_id, uint64_t tce_mem_addr,
760 uint64_t tce_mem_size); 760 uint64_t tce_mem_size);
761 int64_t opal_pci_config_read_byte(uint64_t phb_id, uint64_t bus_dev_func, 761 int64_t opal_pci_config_read_byte(uint64_t phb_id, uint64_t bus_dev_func,
762 uint64_t offset, uint8_t *data); 762 uint64_t offset, uint8_t *data);
763 int64_t opal_pci_config_read_half_word(uint64_t phb_id, uint64_t bus_dev_func, 763 int64_t opal_pci_config_read_half_word(uint64_t phb_id, uint64_t bus_dev_func,
764 uint64_t offset, __be16 *data); 764 uint64_t offset, __be16 *data);
765 int64_t opal_pci_config_read_word(uint64_t phb_id, uint64_t bus_dev_func, 765 int64_t opal_pci_config_read_word(uint64_t phb_id, uint64_t bus_dev_func,
766 uint64_t offset, __be32 *data); 766 uint64_t offset, __be32 *data);
767 int64_t opal_pci_config_write_byte(uint64_t phb_id, uint64_t bus_dev_func, 767 int64_t opal_pci_config_write_byte(uint64_t phb_id, uint64_t bus_dev_func,
768 uint64_t offset, uint8_t data); 768 uint64_t offset, uint8_t data);
769 int64_t opal_pci_config_write_half_word(uint64_t phb_id, uint64_t bus_dev_func, 769 int64_t opal_pci_config_write_half_word(uint64_t phb_id, uint64_t bus_dev_func,
770 uint64_t offset, uint16_t data); 770 uint64_t offset, uint16_t data);
771 int64_t opal_pci_config_write_word(uint64_t phb_id, uint64_t bus_dev_func, 771 int64_t opal_pci_config_write_word(uint64_t phb_id, uint64_t bus_dev_func,
772 uint64_t offset, uint32_t data); 772 uint64_t offset, uint32_t data);
773 int64_t opal_set_xive(uint32_t isn, uint16_t server, uint8_t priority); 773 int64_t opal_set_xive(uint32_t isn, uint16_t server, uint8_t priority);
774 int64_t opal_get_xive(uint32_t isn, __be16 *server, uint8_t *priority); 774 int64_t opal_get_xive(uint32_t isn, __be16 *server, uint8_t *priority);
775 int64_t opal_register_exception_handler(uint64_t opal_exception, 775 int64_t opal_register_exception_handler(uint64_t opal_exception,
776 uint64_t handler_address, 776 uint64_t handler_address,
777 uint64_t glue_cache_line); 777 uint64_t glue_cache_line);
778 int64_t opal_pci_eeh_freeze_status(uint64_t phb_id, uint64_t pe_number, 778 int64_t opal_pci_eeh_freeze_status(uint64_t phb_id, uint64_t pe_number,
779 uint8_t *freeze_state, 779 uint8_t *freeze_state,
780 __be16 *pci_error_type, 780 __be16 *pci_error_type,
781 __be64 *phb_status); 781 __be64 *phb_status);
782 int64_t opal_pci_eeh_freeze_clear(uint64_t phb_id, uint64_t pe_number, 782 int64_t opal_pci_eeh_freeze_clear(uint64_t phb_id, uint64_t pe_number,
783 uint64_t eeh_action_token); 783 uint64_t eeh_action_token);
784 int64_t opal_pci_shpc(uint64_t phb_id, uint64_t shpc_action, uint8_t *state); 784 int64_t opal_pci_shpc(uint64_t phb_id, uint64_t shpc_action, uint8_t *state);
785 785
786 786
787 787
788 int64_t opal_pci_phb_mmio_enable(uint64_t phb_id, uint16_t window_type, 788 int64_t opal_pci_phb_mmio_enable(uint64_t phb_id, uint16_t window_type,
789 uint16_t window_num, uint16_t enable); 789 uint16_t window_num, uint16_t enable);
790 int64_t opal_pci_set_phb_mem_window(uint64_t phb_id, uint16_t window_type, 790 int64_t opal_pci_set_phb_mem_window(uint64_t phb_id, uint16_t window_type,
791 uint16_t window_num, 791 uint16_t window_num,
792 uint64_t starting_real_address, 792 uint64_t starting_real_address,
793 uint64_t starting_pci_address, 793 uint64_t starting_pci_address,
794 uint16_t segment_size); 794 uint16_t segment_size);
795 int64_t opal_pci_map_pe_mmio_window(uint64_t phb_id, uint16_t pe_number, 795 int64_t opal_pci_map_pe_mmio_window(uint64_t phb_id, uint16_t pe_number,
796 uint16_t window_type, uint16_t window_num, 796 uint16_t window_type, uint16_t window_num,
797 uint16_t segment_num); 797 uint16_t segment_num);
798 int64_t opal_pci_set_phb_table_memory(uint64_t phb_id, uint64_t rtt_addr, 798 int64_t opal_pci_set_phb_table_memory(uint64_t phb_id, uint64_t rtt_addr,
799 uint64_t ivt_addr, uint64_t ivt_len, 799 uint64_t ivt_addr, uint64_t ivt_len,
800 uint64_t reject_array_addr, 800 uint64_t reject_array_addr,
801 uint64_t peltv_addr); 801 uint64_t peltv_addr);
802 int64_t opal_pci_set_pe(uint64_t phb_id, uint64_t pe_number, uint64_t bus_dev_func, 802 int64_t opal_pci_set_pe(uint64_t phb_id, uint64_t pe_number, uint64_t bus_dev_func,
803 uint8_t bus_compare, uint8_t dev_compare, uint8_t func_compare, 803 uint8_t bus_compare, uint8_t dev_compare, uint8_t func_compare,
804 uint8_t pe_action); 804 uint8_t pe_action);
805 int64_t opal_pci_set_peltv(uint64_t phb_id, uint32_t parent_pe, uint32_t child_pe, 805 int64_t opal_pci_set_peltv(uint64_t phb_id, uint32_t parent_pe, uint32_t child_pe,
806 uint8_t state); 806 uint8_t state);
807 int64_t opal_pci_set_mve(uint64_t phb_id, uint32_t mve_number, uint32_t pe_number); 807 int64_t opal_pci_set_mve(uint64_t phb_id, uint32_t mve_number, uint32_t pe_number);
808 int64_t opal_pci_set_mve_enable(uint64_t phb_id, uint32_t mve_number, 808 int64_t opal_pci_set_mve_enable(uint64_t phb_id, uint32_t mve_number,
809 uint32_t state); 809 uint32_t state);
810 int64_t opal_pci_get_xive_reissue(uint64_t phb_id, uint32_t xive_number, 810 int64_t opal_pci_get_xive_reissue(uint64_t phb_id, uint32_t xive_number,
811 uint8_t *p_bit, uint8_t *q_bit); 811 uint8_t *p_bit, uint8_t *q_bit);
812 int64_t opal_pci_set_xive_reissue(uint64_t phb_id, uint32_t xive_number, 812 int64_t opal_pci_set_xive_reissue(uint64_t phb_id, uint32_t xive_number,
813 uint8_t p_bit, uint8_t q_bit); 813 uint8_t p_bit, uint8_t q_bit);
814 int64_t opal_pci_msi_eoi(uint64_t phb_id, uint32_t hw_irq); 814 int64_t opal_pci_msi_eoi(uint64_t phb_id, uint32_t hw_irq);
815 int64_t opal_pci_set_xive_pe(uint64_t phb_id, uint32_t pe_number, 815 int64_t opal_pci_set_xive_pe(uint64_t phb_id, uint32_t pe_number,
816 uint32_t xive_num); 816 uint32_t xive_num);
817 int64_t opal_get_xive_source(uint64_t phb_id, uint32_t xive_num, 817 int64_t opal_get_xive_source(uint64_t phb_id, uint32_t xive_num,
818 __be32 *interrupt_source_number); 818 __be32 *interrupt_source_number);
819 int64_t opal_get_msi_32(uint64_t phb_id, uint32_t mve_number, uint32_t xive_num, 819 int64_t opal_get_msi_32(uint64_t phb_id, uint32_t mve_number, uint32_t xive_num,
820 uint8_t msi_range, __be32 *msi_address, 820 uint8_t msi_range, __be32 *msi_address,
821 __be32 *message_data); 821 __be32 *message_data);
822 int64_t opal_get_msi_64(uint64_t phb_id, uint32_t mve_number, 822 int64_t opal_get_msi_64(uint64_t phb_id, uint32_t mve_number,
823 uint32_t xive_num, uint8_t msi_range, 823 uint32_t xive_num, uint8_t msi_range,
824 __be64 *msi_address, __be32 *message_data); 824 __be64 *msi_address, __be32 *message_data);
825 int64_t opal_start_cpu(uint64_t thread_number, uint64_t start_address); 825 int64_t opal_start_cpu(uint64_t thread_number, uint64_t start_address);
826 int64_t opal_query_cpu_status(uint64_t thread_number, uint8_t *thread_status); 826 int64_t opal_query_cpu_status(uint64_t thread_number, uint8_t *thread_status);
827 int64_t opal_write_oppanel(oppanel_line_t *lines, uint64_t num_lines); 827 int64_t opal_write_oppanel(oppanel_line_t *lines, uint64_t num_lines);
828 int64_t opal_pci_map_pe_dma_window(uint64_t phb_id, uint16_t pe_number, uint16_t window_id, 828 int64_t opal_pci_map_pe_dma_window(uint64_t phb_id, uint16_t pe_number, uint16_t window_id,
829 uint16_t tce_levels, uint64_t tce_table_addr, 829 uint16_t tce_levels, uint64_t tce_table_addr,
830 uint64_t tce_table_size, uint64_t tce_page_size); 830 uint64_t tce_table_size, uint64_t tce_page_size);
831 int64_t opal_pci_map_pe_dma_window_real(uint64_t phb_id, uint16_t pe_number, 831 int64_t opal_pci_map_pe_dma_window_real(uint64_t phb_id, uint16_t pe_number,
832 uint16_t dma_window_number, uint64_t pci_start_addr, 832 uint16_t dma_window_number, uint64_t pci_start_addr,
833 uint64_t pci_mem_size); 833 uint64_t pci_mem_size);
834 int64_t opal_pci_reset(uint64_t phb_id, uint8_t reset_scope, uint8_t assert_state); 834 int64_t opal_pci_reset(uint64_t phb_id, uint8_t reset_scope, uint8_t assert_state);
835 835
836 int64_t opal_pci_get_hub_diag_data(uint64_t hub_id, void *diag_buffer, 836 int64_t opal_pci_get_hub_diag_data(uint64_t hub_id, void *diag_buffer,
837 uint64_t diag_buffer_len); 837 uint64_t diag_buffer_len);
838 int64_t opal_pci_get_phb_diag_data(uint64_t phb_id, void *diag_buffer, 838 int64_t opal_pci_get_phb_diag_data(uint64_t phb_id, void *diag_buffer,
839 uint64_t diag_buffer_len); 839 uint64_t diag_buffer_len);
840 int64_t opal_pci_get_phb_diag_data2(uint64_t phb_id, void *diag_buffer, 840 int64_t opal_pci_get_phb_diag_data2(uint64_t phb_id, void *diag_buffer,
841 uint64_t diag_buffer_len); 841 uint64_t diag_buffer_len);
842 int64_t opal_pci_fence_phb(uint64_t phb_id); 842 int64_t opal_pci_fence_phb(uint64_t phb_id);
843 int64_t opal_pci_reinit(uint64_t phb_id, uint64_t reinit_scope, uint64_t data); 843 int64_t opal_pci_reinit(uint64_t phb_id, uint64_t reinit_scope, uint64_t data);
844 int64_t opal_pci_mask_pe_error(uint64_t phb_id, uint16_t pe_number, uint8_t error_type, uint8_t mask_action); 844 int64_t opal_pci_mask_pe_error(uint64_t phb_id, uint16_t pe_number, uint8_t error_type, uint8_t mask_action);
845 int64_t opal_set_slot_led_status(uint64_t phb_id, uint64_t slot_id, uint8_t led_type, uint8_t led_action); 845 int64_t opal_set_slot_led_status(uint64_t phb_id, uint64_t slot_id, uint8_t led_type, uint8_t led_action);
846 int64_t opal_get_epow_status(__be64 *status); 846 int64_t opal_get_epow_status(__be64 *status);
847 int64_t opal_set_system_attention_led(uint8_t led_action); 847 int64_t opal_set_system_attention_led(uint8_t led_action);
848 int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe, 848 int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe,
849 uint16_t *pci_error_type, uint16_t *severity); 849 uint16_t *pci_error_type, uint16_t *severity);
850 int64_t opal_pci_poll(uint64_t phb_id); 850 int64_t opal_pci_poll(uint64_t phb_id);
851 int64_t opal_return_cpu(void); 851 int64_t opal_return_cpu(void);
852 852
853 int64_t opal_xscom_read(uint32_t gcid, uint64_t pcb_addr, __be64 *val); 853 int64_t opal_xscom_read(uint32_t gcid, uint64_t pcb_addr, __be64 *val);
854 int64_t opal_xscom_write(uint32_t gcid, uint64_t pcb_addr, uint64_t val); 854 int64_t opal_xscom_write(uint32_t gcid, uint64_t pcb_addr, uint64_t val);
855 855
856 int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type, 856 int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type,
857 uint32_t addr, uint32_t data, uint32_t sz); 857 uint32_t addr, uint32_t data, uint32_t sz);
858 int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type, 858 int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type,
859 uint32_t addr, __be32 *data, uint32_t sz); 859 uint32_t addr, __be32 *data, uint32_t sz);
860 860
861 int64_t opal_read_elog(uint64_t buffer, uint64_t size, uint64_t log_id); 861 int64_t opal_read_elog(uint64_t buffer, uint64_t size, uint64_t log_id);
862 int64_t opal_get_elog_size(__be64 *log_id, __be64 *size, __be64 *elog_type); 862 int64_t opal_get_elog_size(__be64 *log_id, __be64 *size, __be64 *elog_type);
863 int64_t opal_write_elog(uint64_t buffer, uint64_t size, uint64_t offset); 863 int64_t opal_write_elog(uint64_t buffer, uint64_t size, uint64_t offset);
864 int64_t opal_send_ack_elog(uint64_t log_id); 864 int64_t opal_send_ack_elog(uint64_t log_id);
865 void opal_resend_pending_logs(void); 865 void opal_resend_pending_logs(void);
866 866
867 int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result); 867 int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result);
868 int64_t opal_manage_flash(uint8_t op); 868 int64_t opal_manage_flash(uint8_t op);
869 int64_t opal_update_flash(uint64_t blk_list); 869 int64_t opal_update_flash(uint64_t blk_list);
870 int64_t opal_dump_init(uint8_t dump_type); 870 int64_t opal_dump_init(uint8_t dump_type);
871 int64_t opal_dump_info(uint32_t *dump_id, uint32_t *dump_size); 871 int64_t opal_dump_info(uint32_t *dump_id, uint32_t *dump_size);
872 int64_t opal_dump_info2(uint32_t *dump_id, uint32_t *dump_size, uint32_t *dump_type); 872 int64_t opal_dump_info2(uint32_t *dump_id, uint32_t *dump_size, uint32_t *dump_type);
873 int64_t opal_dump_read(uint32_t dump_id, uint64_t buffer); 873 int64_t opal_dump_read(uint32_t dump_id, uint64_t buffer);
874 int64_t opal_dump_ack(uint32_t dump_id); 874 int64_t opal_dump_ack(uint32_t dump_id);
875 int64_t opal_dump_resend_notification(void); 875 int64_t opal_dump_resend_notification(void);
876 876
877 int64_t opal_get_msg(uint64_t buffer, uint64_t size); 877 int64_t opal_get_msg(uint64_t buffer, uint64_t size);
878 int64_t opal_check_completion(uint64_t buffer, uint64_t size, uint64_t token); 878 int64_t opal_check_completion(uint64_t buffer, uint64_t size, uint64_t token);
879 int64_t opal_sync_host_reboot(void); 879 int64_t opal_sync_host_reboot(void);
880 int64_t opal_get_param(uint64_t token, uint32_t param_id, uint64_t buffer, 880 int64_t opal_get_param(uint64_t token, uint32_t param_id, uint64_t buffer,
881 uint64_t length); 881 uint64_t length);
882 int64_t opal_set_param(uint64_t token, uint32_t param_id, uint64_t buffer, 882 int64_t opal_set_param(uint64_t token, uint32_t param_id, uint64_t buffer,
883 uint64_t length); 883 uint64_t length);
884 int64_t opal_sensor_read(uint32_t sensor_hndl, int token, __be32 *sensor_data); 884 int64_t opal_sensor_read(uint32_t sensor_hndl, int token, __be32 *sensor_data);
885 885
886 /* Internal functions */ 886 /* Internal functions */
887 extern int early_init_dt_scan_opal(unsigned long node, const char *uname, 887 extern int early_init_dt_scan_opal(unsigned long node, const char *uname,
888 int depth, void *data); 888 int depth, void *data);
889 extern int early_init_dt_scan_recoverable_ranges(unsigned long node, 889 extern int early_init_dt_scan_recoverable_ranges(unsigned long node,
890 const char *uname, int depth, void *data); 890 const char *uname, int depth, void *data);
891 891
892 extern int opal_get_chars(uint32_t vtermno, char *buf, int count); 892 extern int opal_get_chars(uint32_t vtermno, char *buf, int count);
893 extern int opal_put_chars(uint32_t vtermno, const char *buf, int total_len); 893 extern int opal_put_chars(uint32_t vtermno, const char *buf, int total_len);
894 894
895 extern void hvc_opal_init_early(void); 895 extern void hvc_opal_init_early(void);
896 896
897 extern int opal_notifier_register(struct notifier_block *nb); 897 extern int opal_notifier_register(struct notifier_block *nb);
898 extern int opal_notifier_unregister(struct notifier_block *nb); 898 extern int opal_notifier_unregister(struct notifier_block *nb);
899 899
900 extern int opal_message_notifier_register(enum OpalMessageType msg_type, 900 extern int opal_message_notifier_register(enum OpalMessageType msg_type,
901 struct notifier_block *nb); 901 struct notifier_block *nb);
902 extern void opal_notifier_enable(void); 902 extern void opal_notifier_enable(void);
903 extern void opal_notifier_disable(void); 903 extern void opal_notifier_disable(void);
904 extern void opal_notifier_update_evt(uint64_t evt_mask, uint64_t evt_val); 904 extern void opal_notifier_update_evt(uint64_t evt_mask, uint64_t evt_val);
905 905
906 extern int __opal_async_get_token(void); 906 extern int __opal_async_get_token(void);
907 extern int opal_async_get_token_interruptible(void); 907 extern int opal_async_get_token_interruptible(void);
908 extern int __opal_async_release_token(int token); 908 extern int __opal_async_release_token(int token);
909 extern int opal_async_release_token(int token); 909 extern int opal_async_release_token(int token);
910 extern int opal_async_wait_response(uint64_t token, struct opal_msg *msg); 910 extern int opal_async_wait_response(uint64_t token, struct opal_msg *msg);
911 extern int opal_get_sensor_data(u32 sensor_hndl, u32 *sensor_data); 911 extern int opal_get_sensor_data(u32 sensor_hndl, u32 *sensor_data);
912 912
913 struct rtc_time; 913 struct rtc_time;
914 extern int opal_set_rtc_time(struct rtc_time *tm); 914 extern int opal_set_rtc_time(struct rtc_time *tm);
915 extern void opal_get_rtc_time(struct rtc_time *tm); 915 extern void opal_get_rtc_time(struct rtc_time *tm);
916 extern unsigned long opal_get_boot_time(void); 916 extern unsigned long opal_get_boot_time(void);
917 extern void opal_nvram_init(void); 917 extern void opal_nvram_init(void);
918 extern void opal_flash_init(void); 918 extern void opal_flash_init(void);
919 extern int opal_elog_init(void); 919 extern int opal_elog_init(void);
920 extern void opal_platform_dump_init(void); 920 extern void opal_platform_dump_init(void);
921 extern void opal_sys_param_init(void); 921 extern void opal_sys_param_init(void);
922 extern void opal_msglog_init(void); 922 extern void opal_msglog_init(void);
923 923
924 extern int opal_machine_check(struct pt_regs *regs); 924 extern int opal_machine_check(struct pt_regs *regs);
925 extern bool opal_mce_check_early_recovery(struct pt_regs *regs); 925 extern bool opal_mce_check_early_recovery(struct pt_regs *regs);
926 926
927 extern void opal_shutdown(void); 927 extern void opal_shutdown(void);
928 extern int opal_resync_timebase(void); 928 extern int opal_resync_timebase(void);
929 929
930 extern void opal_lpc_init(void); 930 extern void opal_lpc_init(void);
931
932 struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
933 unsigned long vmalloc_size);
934 void opal_free_sg_list(struct opal_sg_list *sg);
931 935
932 #endif /* __ASSEMBLY__ */ 936 #endif /* __ASSEMBLY__ */
933 937
934 #endif /* __OPAL_H */ 938 #endif /* __OPAL_H */
935 939
arch/powerpc/platforms/powernv/opal-dump.c
1 /* 1 /*
2 * PowerNV OPAL Dump Interface 2 * PowerNV OPAL Dump Interface
3 * 3 *
4 * Copyright 2013,2014 IBM Corp. 4 * Copyright 2013,2014 IBM Corp.
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 11
12 #include <linux/kobject.h> 12 #include <linux/kobject.h>
13 #include <linux/mm.h> 13 #include <linux/mm.h>
14 #include <linux/slab.h> 14 #include <linux/slab.h>
15 #include <linux/vmalloc.h> 15 #include <linux/vmalloc.h>
16 #include <linux/pagemap.h> 16 #include <linux/pagemap.h>
17 #include <linux/delay.h> 17 #include <linux/delay.h>
18 18
19 #include <asm/opal.h> 19 #include <asm/opal.h>
20 20
21 #define DUMP_TYPE_FSP 0x01 21 #define DUMP_TYPE_FSP 0x01
22 22
23 struct dump_obj { 23 struct dump_obj {
24 struct kobject kobj; 24 struct kobject kobj;
25 struct bin_attribute dump_attr; 25 struct bin_attribute dump_attr;
26 uint32_t id; /* becomes object name */ 26 uint32_t id; /* becomes object name */
27 uint32_t type; 27 uint32_t type;
28 uint32_t size; 28 uint32_t size;
29 char *buffer; 29 char *buffer;
30 }; 30 };
31 #define to_dump_obj(x) container_of(x, struct dump_obj, kobj) 31 #define to_dump_obj(x) container_of(x, struct dump_obj, kobj)
32 32
33 struct dump_attribute { 33 struct dump_attribute {
34 struct attribute attr; 34 struct attribute attr;
35 ssize_t (*show)(struct dump_obj *dump, struct dump_attribute *attr, 35 ssize_t (*show)(struct dump_obj *dump, struct dump_attribute *attr,
36 char *buf); 36 char *buf);
37 ssize_t (*store)(struct dump_obj *dump, struct dump_attribute *attr, 37 ssize_t (*store)(struct dump_obj *dump, struct dump_attribute *attr,
38 const char *buf, size_t count); 38 const char *buf, size_t count);
39 }; 39 };
40 #define to_dump_attr(x) container_of(x, struct dump_attribute, attr) 40 #define to_dump_attr(x) container_of(x, struct dump_attribute, attr)
41 41
42 static ssize_t dump_id_show(struct dump_obj *dump_obj, 42 static ssize_t dump_id_show(struct dump_obj *dump_obj,
43 struct dump_attribute *attr, 43 struct dump_attribute *attr,
44 char *buf) 44 char *buf)
45 { 45 {
46 return sprintf(buf, "0x%x\n", dump_obj->id); 46 return sprintf(buf, "0x%x\n", dump_obj->id);
47 } 47 }
48 48
49 static const char* dump_type_to_string(uint32_t type) 49 static const char* dump_type_to_string(uint32_t type)
50 { 50 {
51 switch (type) { 51 switch (type) {
52 case 0x01: return "SP Dump"; 52 case 0x01: return "SP Dump";
53 case 0x02: return "System/Platform Dump"; 53 case 0x02: return "System/Platform Dump";
54 case 0x03: return "SMA Dump"; 54 case 0x03: return "SMA Dump";
55 default: return "unknown"; 55 default: return "unknown";
56 } 56 }
57 } 57 }
58 58
59 static ssize_t dump_type_show(struct dump_obj *dump_obj, 59 static ssize_t dump_type_show(struct dump_obj *dump_obj,
60 struct dump_attribute *attr, 60 struct dump_attribute *attr,
61 char *buf) 61 char *buf)
62 { 62 {
63 63
64 return sprintf(buf, "0x%x %s\n", dump_obj->type, 64 return sprintf(buf, "0x%x %s\n", dump_obj->type,
65 dump_type_to_string(dump_obj->type)); 65 dump_type_to_string(dump_obj->type));
66 } 66 }
67 67
68 static ssize_t dump_ack_show(struct dump_obj *dump_obj, 68 static ssize_t dump_ack_show(struct dump_obj *dump_obj,
69 struct dump_attribute *attr, 69 struct dump_attribute *attr,
70 char *buf) 70 char *buf)
71 { 71 {
72 return sprintf(buf, "ack - acknowledge dump\n"); 72 return sprintf(buf, "ack - acknowledge dump\n");
73 } 73 }
74 74
75 /* 75 /*
76 * Send acknowledgement to OPAL 76 * Send acknowledgement to OPAL
77 */ 77 */
78 static int64_t dump_send_ack(uint32_t dump_id) 78 static int64_t dump_send_ack(uint32_t dump_id)
79 { 79 {
80 int rc; 80 int rc;
81 81
82 rc = opal_dump_ack(dump_id); 82 rc = opal_dump_ack(dump_id);
83 if (rc) 83 if (rc)
84 pr_warn("%s: Failed to send ack to Dump ID 0x%x (%d)\n", 84 pr_warn("%s: Failed to send ack to Dump ID 0x%x (%d)\n",
85 __func__, dump_id, rc); 85 __func__, dump_id, rc);
86 return rc; 86 return rc;
87 } 87 }
88 88
89 static ssize_t dump_ack_store(struct dump_obj *dump_obj, 89 static ssize_t dump_ack_store(struct dump_obj *dump_obj,
90 struct dump_attribute *attr, 90 struct dump_attribute *attr,
91 const char *buf, 91 const char *buf,
92 size_t count) 92 size_t count)
93 { 93 {
94 dump_send_ack(dump_obj->id); 94 dump_send_ack(dump_obj->id);
95 sysfs_remove_file_self(&dump_obj->kobj, &attr->attr); 95 sysfs_remove_file_self(&dump_obj->kobj, &attr->attr);
96 kobject_put(&dump_obj->kobj); 96 kobject_put(&dump_obj->kobj);
97 return count; 97 return count;
98 } 98 }
99 99
100 /* Attributes of a dump 100 /* Attributes of a dump
101 * The binary attribute of the dump itself is dynamic 101 * The binary attribute of the dump itself is dynamic
102 * due to the dynamic size of the dump 102 * due to the dynamic size of the dump
103 */ 103 */
104 static struct dump_attribute id_attribute = 104 static struct dump_attribute id_attribute =
105 __ATTR(id, 0666, dump_id_show, NULL); 105 __ATTR(id, 0666, dump_id_show, NULL);
106 static struct dump_attribute type_attribute = 106 static struct dump_attribute type_attribute =
107 __ATTR(type, 0666, dump_type_show, NULL); 107 __ATTR(type, 0666, dump_type_show, NULL);
108 static struct dump_attribute ack_attribute = 108 static struct dump_attribute ack_attribute =
109 __ATTR(acknowledge, 0660, dump_ack_show, dump_ack_store); 109 __ATTR(acknowledge, 0660, dump_ack_show, dump_ack_store);
110 110
111 static ssize_t init_dump_show(struct dump_obj *dump_obj, 111 static ssize_t init_dump_show(struct dump_obj *dump_obj,
112 struct dump_attribute *attr, 112 struct dump_attribute *attr,
113 char *buf) 113 char *buf)
114 { 114 {
115 return sprintf(buf, "1 - initiate dump\n"); 115 return sprintf(buf, "1 - initiate dump\n");
116 } 116 }
117 117
118 static int64_t dump_fips_init(uint8_t type) 118 static int64_t dump_fips_init(uint8_t type)
119 { 119 {
120 int rc; 120 int rc;
121 121
122 rc = opal_dump_init(type); 122 rc = opal_dump_init(type);
123 if (rc) 123 if (rc)
124 pr_warn("%s: Failed to initiate FipS dump (%d)\n", 124 pr_warn("%s: Failed to initiate FipS dump (%d)\n",
125 __func__, rc); 125 __func__, rc);
126 return rc; 126 return rc;
127 } 127 }
128 128
129 static ssize_t init_dump_store(struct dump_obj *dump_obj, 129 static ssize_t init_dump_store(struct dump_obj *dump_obj,
130 struct dump_attribute *attr, 130 struct dump_attribute *attr,
131 const char *buf, 131 const char *buf,
132 size_t count) 132 size_t count)
133 { 133 {
134 dump_fips_init(DUMP_TYPE_FSP); 134 dump_fips_init(DUMP_TYPE_FSP);
135 pr_info("%s: Initiated FSP dump\n", __func__); 135 pr_info("%s: Initiated FSP dump\n", __func__);
136 return count; 136 return count;
137 } 137 }
138 138
139 static struct dump_attribute initiate_attribute = 139 static struct dump_attribute initiate_attribute =
140 __ATTR(initiate_dump, 0600, init_dump_show, init_dump_store); 140 __ATTR(initiate_dump, 0600, init_dump_show, init_dump_store);
141 141
142 static struct attribute *initiate_attrs[] = { 142 static struct attribute *initiate_attrs[] = {
143 &initiate_attribute.attr, 143 &initiate_attribute.attr,
144 NULL, 144 NULL,
145 }; 145 };
146 146
147 static struct attribute_group initiate_attr_group = { 147 static struct attribute_group initiate_attr_group = {
148 .attrs = initiate_attrs, 148 .attrs = initiate_attrs,
149 }; 149 };
150 150
151 static struct kset *dump_kset; 151 static struct kset *dump_kset;
152 152
153 static ssize_t dump_attr_show(struct kobject *kobj, 153 static ssize_t dump_attr_show(struct kobject *kobj,
154 struct attribute *attr, 154 struct attribute *attr,
155 char *buf) 155 char *buf)
156 { 156 {
157 struct dump_attribute *attribute; 157 struct dump_attribute *attribute;
158 struct dump_obj *dump; 158 struct dump_obj *dump;
159 159
160 attribute = to_dump_attr(attr); 160 attribute = to_dump_attr(attr);
161 dump = to_dump_obj(kobj); 161 dump = to_dump_obj(kobj);
162 162
163 if (!attribute->show) 163 if (!attribute->show)
164 return -EIO; 164 return -EIO;
165 165
166 return attribute->show(dump, attribute, buf); 166 return attribute->show(dump, attribute, buf);
167 } 167 }
168 168
169 static ssize_t dump_attr_store(struct kobject *kobj, 169 static ssize_t dump_attr_store(struct kobject *kobj,
170 struct attribute *attr, 170 struct attribute *attr,
171 const char *buf, size_t len) 171 const char *buf, size_t len)
172 { 172 {
173 struct dump_attribute *attribute; 173 struct dump_attribute *attribute;
174 struct dump_obj *dump; 174 struct dump_obj *dump;
175 175
176 attribute = to_dump_attr(attr); 176 attribute = to_dump_attr(attr);
177 dump = to_dump_obj(kobj); 177 dump = to_dump_obj(kobj);
178 178
179 if (!attribute->store) 179 if (!attribute->store)
180 return -EIO; 180 return -EIO;
181 181
182 return attribute->store(dump, attribute, buf, len); 182 return attribute->store(dump, attribute, buf, len);
183 } 183 }
184 184
185 static const struct sysfs_ops dump_sysfs_ops = { 185 static const struct sysfs_ops dump_sysfs_ops = {
186 .show = dump_attr_show, 186 .show = dump_attr_show,
187 .store = dump_attr_store, 187 .store = dump_attr_store,
188 }; 188 };
189 189
190 static void dump_release(struct kobject *kobj) 190 static void dump_release(struct kobject *kobj)
191 { 191 {
192 struct dump_obj *dump; 192 struct dump_obj *dump;
193 193
194 dump = to_dump_obj(kobj); 194 dump = to_dump_obj(kobj);
195 vfree(dump->buffer); 195 vfree(dump->buffer);
196 kfree(dump); 196 kfree(dump);
197 } 197 }
198 198
199 static struct attribute *dump_default_attrs[] = { 199 static struct attribute *dump_default_attrs[] = {
200 &id_attribute.attr, 200 &id_attribute.attr,
201 &type_attribute.attr, 201 &type_attribute.attr,
202 &ack_attribute.attr, 202 &ack_attribute.attr,
203 NULL, 203 NULL,
204 }; 204 };
205 205
206 static struct kobj_type dump_ktype = { 206 static struct kobj_type dump_ktype = {
207 .sysfs_ops = &dump_sysfs_ops, 207 .sysfs_ops = &dump_sysfs_ops,
208 .release = &dump_release, 208 .release = &dump_release,
209 .default_attrs = dump_default_attrs, 209 .default_attrs = dump_default_attrs,
210 }; 210 };
211 211
212 static void free_dump_sg_list(struct opal_sg_list *list)
213 {
214 struct opal_sg_list *sg1;
215 while (list) {
216 sg1 = list->next;
217 kfree(list);
218 list = sg1;
219 }
220 list = NULL;
221 }
222
223 static struct opal_sg_list *dump_data_to_sglist(struct dump_obj *dump)
224 {
225 struct opal_sg_list *sg1, *list = NULL;
226 void *addr;
227 int64_t size;
228
229 addr = dump->buffer;
230 size = dump->size;
231
232 sg1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
233 if (!sg1)
234 goto nomem;
235
236 list = sg1;
237 sg1->num_entries = 0;
238 while (size > 0) {
239 /* Translate virtual address to physical address */
240 sg1->entry[sg1->num_entries].data =
241 (void *)(vmalloc_to_pfn(addr) << PAGE_SHIFT);
242
243 if (size > PAGE_SIZE)
244 sg1->entry[sg1->num_entries].length = PAGE_SIZE;
245 else
246 sg1->entry[sg1->num_entries].length = size;
247
248 sg1->num_entries++;
249 if (sg1->num_entries >= SG_ENTRIES_PER_NODE) {
250 sg1->next = kzalloc(PAGE_SIZE, GFP_KERNEL);
251 if (!sg1->next)
252 goto nomem;
253
254 sg1 = sg1->next;
255 sg1->num_entries = 0;
256 }
257 addr += PAGE_SIZE;
258 size -= PAGE_SIZE;
259 }
260 return list;
261
262 nomem:
263 pr_err("%s : Failed to allocate memory\n", __func__);
264 free_dump_sg_list(list);
265 return NULL;
266 }
267
268 static void sglist_to_phy_addr(struct opal_sg_list *list)
269 {
270 struct opal_sg_list *sg, *next;
271
272 for (sg = list; sg; sg = next) {
273 next = sg->next;
274 /* Don't translate NULL pointer for last entry */
275 if (sg->next)
276 sg->next = (struct opal_sg_list *)__pa(sg->next);
277 else
278 sg->next = NULL;
279
280 /* Convert num_entries to length */
281 sg->num_entries =
282 sg->num_entries * sizeof(struct opal_sg_entry) + 16;
283 }
284 }
285
286 static int64_t dump_read_info(uint32_t *id, uint32_t *size, uint32_t *type) 212 static int64_t dump_read_info(uint32_t *id, uint32_t *size, uint32_t *type)
287 { 213 {
288 int rc; 214 int rc;
289 *type = 0xffffffff; 215 *type = 0xffffffff;
290 216
291 rc = opal_dump_info2(id, size, type); 217 rc = opal_dump_info2(id, size, type);
292 218
293 if (rc == OPAL_PARAMETER) 219 if (rc == OPAL_PARAMETER)
294 rc = opal_dump_info(id, size); 220 rc = opal_dump_info(id, size);
295 221
296 if (rc) 222 if (rc)
297 pr_warn("%s: Failed to get dump info (%d)\n", 223 pr_warn("%s: Failed to get dump info (%d)\n",
298 __func__, rc); 224 __func__, rc);
299 return rc; 225 return rc;
300 } 226 }
301 227
302 static int64_t dump_read_data(struct dump_obj *dump) 228 static int64_t dump_read_data(struct dump_obj *dump)
303 { 229 {
304 struct opal_sg_list *list; 230 struct opal_sg_list *list;
305 uint64_t addr; 231 uint64_t addr;
306 int64_t rc; 232 int64_t rc;
307 233
308 /* Allocate memory */ 234 /* Allocate memory */
309 dump->buffer = vzalloc(PAGE_ALIGN(dump->size)); 235 dump->buffer = vzalloc(PAGE_ALIGN(dump->size));
310 if (!dump->buffer) { 236 if (!dump->buffer) {
311 pr_err("%s : Failed to allocate memory\n", __func__); 237 pr_err("%s : Failed to allocate memory\n", __func__);
312 rc = -ENOMEM; 238 rc = -ENOMEM;
313 goto out; 239 goto out;
314 } 240 }
315 241
316 /* Generate SG list */ 242 /* Generate SG list */
317 list = dump_data_to_sglist(dump); 243 list = opal_vmalloc_to_sg_list(dump->buffer, dump->size);
318 if (!list) { 244 if (!list) {
319 rc = -ENOMEM; 245 rc = -ENOMEM;
320 goto out; 246 goto out;
321 } 247 }
322 248
323 /* Translate sg list addr to real address */
324 sglist_to_phy_addr(list);
325
326 /* First entry address */ 249 /* First entry address */
327 addr = __pa(list); 250 addr = __pa(list);
328 251
329 /* Fetch data */ 252 /* Fetch data */
330 rc = OPAL_BUSY_EVENT; 253 rc = OPAL_BUSY_EVENT;
331 while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { 254 while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
332 rc = opal_dump_read(dump->id, addr); 255 rc = opal_dump_read(dump->id, addr);
333 if (rc == OPAL_BUSY_EVENT) { 256 if (rc == OPAL_BUSY_EVENT) {
334 opal_poll_events(NULL); 257 opal_poll_events(NULL);
335 msleep(20); 258 msleep(20);
336 } 259 }
337 } 260 }
338 261
339 if (rc != OPAL_SUCCESS && rc != OPAL_PARTIAL) 262 if (rc != OPAL_SUCCESS && rc != OPAL_PARTIAL)
340 pr_warn("%s: Extract dump failed for ID 0x%x\n", 263 pr_warn("%s: Extract dump failed for ID 0x%x\n",
341 __func__, dump->id); 264 __func__, dump->id);
342 265
343 /* Free SG list */ 266 /* Free SG list */
344 free_dump_sg_list(list); 267 opal_free_sg_list(list);
345 268
346 out: 269 out:
347 return rc; 270 return rc;
348 } 271 }
349 272
350 static ssize_t dump_attr_read(struct file *filep, struct kobject *kobj, 273 static ssize_t dump_attr_read(struct file *filep, struct kobject *kobj,
351 struct bin_attribute *bin_attr, 274 struct bin_attribute *bin_attr,
352 char *buffer, loff_t pos, size_t count) 275 char *buffer, loff_t pos, size_t count)
353 { 276 {
354 ssize_t rc; 277 ssize_t rc;
355 278
356 struct dump_obj *dump = to_dump_obj(kobj); 279 struct dump_obj *dump = to_dump_obj(kobj);
357 280
358 if (!dump->buffer) { 281 if (!dump->buffer) {
359 rc = dump_read_data(dump); 282 rc = dump_read_data(dump);
360 283
361 if (rc != OPAL_SUCCESS && rc != OPAL_PARTIAL) { 284 if (rc != OPAL_SUCCESS && rc != OPAL_PARTIAL) {
362 vfree(dump->buffer); 285 vfree(dump->buffer);
363 dump->buffer = NULL; 286 dump->buffer = NULL;
364 287
365 return -EIO; 288 return -EIO;
366 } 289 }
367 if (rc == OPAL_PARTIAL) { 290 if (rc == OPAL_PARTIAL) {
368 /* On a partial read, we just return EIO 291 /* On a partial read, we just return EIO
369 * and rely on userspace to ask us to try 292 * and rely on userspace to ask us to try
370 * again. 293 * again.
371 */ 294 */
372 pr_info("%s: Platform dump partially read.ID = 0x%x\n", 295 pr_info("%s: Platform dump partially read.ID = 0x%x\n",
373 __func__, dump->id); 296 __func__, dump->id);
374 return -EIO; 297 return -EIO;
375 } 298 }
376 } 299 }
377 300
378 memcpy(buffer, dump->buffer + pos, count); 301 memcpy(buffer, dump->buffer + pos, count);
379 302
380 /* You may think we could free the dump buffer now and retrieve 303 /* You may think we could free the dump buffer now and retrieve
381 * it again later if needed, but due to current firmware limitation, 304 * it again later if needed, but due to current firmware limitation,
382 * that's not the case. So, once read into userspace once, 305 * that's not the case. So, once read into userspace once,
383 * we keep the dump around until it's acknowledged by userspace. 306 * we keep the dump around until it's acknowledged by userspace.
384 */ 307 */
385 308
386 return count; 309 return count;
387 } 310 }
388 311
389 static struct dump_obj *create_dump_obj(uint32_t id, size_t size, 312 static struct dump_obj *create_dump_obj(uint32_t id, size_t size,
390 uint32_t type) 313 uint32_t type)
391 { 314 {
392 struct dump_obj *dump; 315 struct dump_obj *dump;
393 int rc; 316 int rc;
394 317
395 dump = kzalloc(sizeof(*dump), GFP_KERNEL); 318 dump = kzalloc(sizeof(*dump), GFP_KERNEL);
396 if (!dump) 319 if (!dump)
397 return NULL; 320 return NULL;
398 321
399 dump->kobj.kset = dump_kset; 322 dump->kobj.kset = dump_kset;
400 323
401 kobject_init(&dump->kobj, &dump_ktype); 324 kobject_init(&dump->kobj, &dump_ktype);
402 325
403 sysfs_bin_attr_init(&dump->dump_attr); 326 sysfs_bin_attr_init(&dump->dump_attr);
404 327
405 dump->dump_attr.attr.name = "dump"; 328 dump->dump_attr.attr.name = "dump";
406 dump->dump_attr.attr.mode = 0400; 329 dump->dump_attr.attr.mode = 0400;
407 dump->dump_attr.size = size; 330 dump->dump_attr.size = size;
408 dump->dump_attr.read = dump_attr_read; 331 dump->dump_attr.read = dump_attr_read;
409 332
410 dump->id = id; 333 dump->id = id;
411 dump->size = size; 334 dump->size = size;
412 dump->type = type; 335 dump->type = type;
413 336
414 rc = kobject_add(&dump->kobj, NULL, "0x%x-0x%x", type, id); 337 rc = kobject_add(&dump->kobj, NULL, "0x%x-0x%x", type, id);
415 if (rc) { 338 if (rc) {
416 kobject_put(&dump->kobj); 339 kobject_put(&dump->kobj);
417 return NULL; 340 return NULL;
418 } 341 }
419 342
420 rc = sysfs_create_bin_file(&dump->kobj, &dump->dump_attr); 343 rc = sysfs_create_bin_file(&dump->kobj, &dump->dump_attr);
421 if (rc) { 344 if (rc) {
422 kobject_put(&dump->kobj); 345 kobject_put(&dump->kobj);
423 return NULL; 346 return NULL;
424 } 347 }
425 348
426 pr_info("%s: New platform dump. ID = 0x%x Size %u\n", 349 pr_info("%s: New platform dump. ID = 0x%x Size %u\n",
427 __func__, dump->id, dump->size); 350 __func__, dump->id, dump->size);
428 351
429 kobject_uevent(&dump->kobj, KOBJ_ADD); 352 kobject_uevent(&dump->kobj, KOBJ_ADD);
430 353
431 return dump; 354 return dump;
432 } 355 }
433 356
434 static int process_dump(void) 357 static int process_dump(void)
435 { 358 {
436 int rc; 359 int rc;
437 uint32_t dump_id, dump_size, dump_type; 360 uint32_t dump_id, dump_size, dump_type;
438 struct dump_obj *dump; 361 struct dump_obj *dump;
439 char name[22]; 362 char name[22];
440 363
441 rc = dump_read_info(&dump_id, &dump_size, &dump_type); 364 rc = dump_read_info(&dump_id, &dump_size, &dump_type);
442 if (rc != OPAL_SUCCESS) 365 if (rc != OPAL_SUCCESS)
443 return rc; 366 return rc;
444 367
445 sprintf(name, "0x%x-0x%x", dump_type, dump_id); 368 sprintf(name, "0x%x-0x%x", dump_type, dump_id);
446 369
447 /* we may get notified twice, let's handle 370 /* we may get notified twice, let's handle
448 * that gracefully and not create two conflicting 371 * that gracefully and not create two conflicting
449 * entries. 372 * entries.
450 */ 373 */
451 if (kset_find_obj(dump_kset, name)) 374 if (kset_find_obj(dump_kset, name))
452 return 0; 375 return 0;
453 376
454 dump = create_dump_obj(dump_id, dump_size, dump_type); 377 dump = create_dump_obj(dump_id, dump_size, dump_type);
455 if (!dump) 378 if (!dump)
456 return -1; 379 return -1;
457 380
458 return 0; 381 return 0;
459 } 382 }
460 383
461 static void dump_work_fn(struct work_struct *work) 384 static void dump_work_fn(struct work_struct *work)
462 { 385 {
463 process_dump(); 386 process_dump();
464 } 387 }
465 388
466 static DECLARE_WORK(dump_work, dump_work_fn); 389 static DECLARE_WORK(dump_work, dump_work_fn);
467 390
468 static void schedule_process_dump(void) 391 static void schedule_process_dump(void)
469 { 392 {
470 schedule_work(&dump_work); 393 schedule_work(&dump_work);
471 } 394 }
472 395
473 /* 396 /*
474 * New dump available notification 397 * New dump available notification
475 * 398 *
476 * Once we get notification, we add sysfs entries for it. 399 * Once we get notification, we add sysfs entries for it.
477 * We only fetch the dump on demand, and create sysfs asynchronously. 400 * We only fetch the dump on demand, and create sysfs asynchronously.
478 */ 401 */
479 static int dump_event(struct notifier_block *nb, 402 static int dump_event(struct notifier_block *nb,
480 unsigned long events, void *change) 403 unsigned long events, void *change)
481 { 404 {
482 if (events & OPAL_EVENT_DUMP_AVAIL) 405 if (events & OPAL_EVENT_DUMP_AVAIL)
483 schedule_process_dump(); 406 schedule_process_dump();
484 407
485 return 0; 408 return 0;
486 } 409 }
487 410
488 static struct notifier_block dump_nb = { 411 static struct notifier_block dump_nb = {
489 .notifier_call = dump_event, 412 .notifier_call = dump_event,
490 .next = NULL, 413 .next = NULL,
491 .priority = 0 414 .priority = 0
492 }; 415 };
493 416
494 void __init opal_platform_dump_init(void) 417 void __init opal_platform_dump_init(void)
495 { 418 {
496 int rc; 419 int rc;
497 420
498 dump_kset = kset_create_and_add("dump", NULL, opal_kobj); 421 dump_kset = kset_create_and_add("dump", NULL, opal_kobj);
499 if (!dump_kset) { 422 if (!dump_kset) {
500 pr_warn("%s: Failed to create dump kset\n", __func__); 423 pr_warn("%s: Failed to create dump kset\n", __func__);
501 return; 424 return;
502 } 425 }
503 426
504 rc = sysfs_create_group(&dump_kset->kobj, &initiate_attr_group); 427 rc = sysfs_create_group(&dump_kset->kobj, &initiate_attr_group);
505 if (rc) { 428 if (rc) {
506 pr_warn("%s: Failed to create initiate dump attr group\n", 429 pr_warn("%s: Failed to create initiate dump attr group\n",
507 __func__); 430 __func__);
508 kobject_put(&dump_kset->kobj); 431 kobject_put(&dump_kset->kobj);
509 return; 432 return;
510 } 433 }
511 434
512 rc = opal_notifier_register(&dump_nb); 435 rc = opal_notifier_register(&dump_nb);
513 if (rc) { 436 if (rc) {
514 pr_warn("%s: Can't register OPAL event notifier (%d)\n", 437 pr_warn("%s: Can't register OPAL event notifier (%d)\n",
515 __func__, rc); 438 __func__, rc);
516 return; 439 return;
517 } 440 }
518 441
519 opal_dump_resend_notification(); 442 opal_dump_resend_notification();
520 } 443 }
521 444
arch/powerpc/platforms/powernv/opal-flash.c
1 /* 1 /*
2 * PowerNV OPAL Firmware Update Interface 2 * PowerNV OPAL Firmware Update Interface
3 * 3 *
4 * Copyright 2013 IBM Corp. 4 * Copyright 2013 IBM Corp.
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 11
12 #define DEBUG 12 #define DEBUG
13 13
14 #include <linux/kernel.h> 14 #include <linux/kernel.h>
15 #include <linux/reboot.h> 15 #include <linux/reboot.h>
16 #include <linux/init.h> 16 #include <linux/init.h>
17 #include <linux/kobject.h> 17 #include <linux/kobject.h>
18 #include <linux/sysfs.h> 18 #include <linux/sysfs.h>
19 #include <linux/slab.h> 19 #include <linux/slab.h>
20 #include <linux/mm.h> 20 #include <linux/mm.h>
21 #include <linux/vmalloc.h> 21 #include <linux/vmalloc.h>
22 #include <linux/pagemap.h> 22 #include <linux/pagemap.h>
23 23
24 #include <asm/opal.h> 24 #include <asm/opal.h>
25 25
26 /* FLASH status codes */ 26 /* FLASH status codes */
27 #define FLASH_NO_OP -1099 /* No operation initiated by user */ 27 #define FLASH_NO_OP -1099 /* No operation initiated by user */
28 #define FLASH_NO_AUTH -9002 /* Not a service authority partition */ 28 #define FLASH_NO_AUTH -9002 /* Not a service authority partition */
29 29
30 /* Validate image status values */ 30 /* Validate image status values */
31 #define VALIDATE_IMG_READY -1001 /* Image ready for validation */ 31 #define VALIDATE_IMG_READY -1001 /* Image ready for validation */
32 #define VALIDATE_IMG_INCOMPLETE -1002 /* User copied < VALIDATE_BUF_SIZE */ 32 #define VALIDATE_IMG_INCOMPLETE -1002 /* User copied < VALIDATE_BUF_SIZE */
33 33
34 /* Manage image status values */ 34 /* Manage image status values */
35 #define MANAGE_ACTIVE_ERR -9001 /* Cannot overwrite active img */ 35 #define MANAGE_ACTIVE_ERR -9001 /* Cannot overwrite active img */
36 36
37 /* Flash image status values */ 37 /* Flash image status values */
38 #define FLASH_IMG_READY 0 /* Img ready for flash on reboot */ 38 #define FLASH_IMG_READY 0 /* Img ready for flash on reboot */
39 #define FLASH_INVALID_IMG -1003 /* Flash image shorter than expected */ 39 #define FLASH_INVALID_IMG -1003 /* Flash image shorter than expected */
40 #define FLASH_IMG_NULL_DATA -1004 /* Bad data in sg list entry */ 40 #define FLASH_IMG_NULL_DATA -1004 /* Bad data in sg list entry */
41 #define FLASH_IMG_BAD_LEN -1005 /* Bad length in sg list entry */ 41 #define FLASH_IMG_BAD_LEN -1005 /* Bad length in sg list entry */
42 42
43 /* Manage operation tokens */ 43 /* Manage operation tokens */
44 #define FLASH_REJECT_TMP_SIDE 0 /* Reject temporary fw image */ 44 #define FLASH_REJECT_TMP_SIDE 0 /* Reject temporary fw image */
45 #define FLASH_COMMIT_TMP_SIDE 1 /* Commit temporary fw image */ 45 #define FLASH_COMMIT_TMP_SIDE 1 /* Commit temporary fw image */
46 46
47 /* Update tokens */ 47 /* Update tokens */
48 #define FLASH_UPDATE_CANCEL 0 /* Cancel update request */ 48 #define FLASH_UPDATE_CANCEL 0 /* Cancel update request */
49 #define FLASH_UPDATE_INIT 1 /* Initiate update */ 49 #define FLASH_UPDATE_INIT 1 /* Initiate update */
50 50
51 /* Validate image update result tokens */ 51 /* Validate image update result tokens */
52 #define VALIDATE_TMP_UPDATE 0 /* T side will be updated */ 52 #define VALIDATE_TMP_UPDATE 0 /* T side will be updated */
53 #define VALIDATE_FLASH_AUTH 1 /* Partition does not have authority */ 53 #define VALIDATE_FLASH_AUTH 1 /* Partition does not have authority */
54 #define VALIDATE_INVALID_IMG 2 /* Candidate image is not valid */ 54 #define VALIDATE_INVALID_IMG 2 /* Candidate image is not valid */
55 #define VALIDATE_CUR_UNKNOWN 3 /* Current fixpack level is unknown */ 55 #define VALIDATE_CUR_UNKNOWN 3 /* Current fixpack level is unknown */
56 /* 56 /*
57 * Current T side will be committed to P side before being replace with new 57 * Current T side will be committed to P side before being replace with new
58 * image, and the new image is downlevel from current image 58 * image, and the new image is downlevel from current image
59 */ 59 */
60 #define VALIDATE_TMP_COMMIT_DL 4 60 #define VALIDATE_TMP_COMMIT_DL 4
61 /* 61 /*
62 * Current T side will be committed to P side before being replaced with new 62 * Current T side will be committed to P side before being replaced with new
63 * image 63 * image
64 */ 64 */
65 #define VALIDATE_TMP_COMMIT 5 65 #define VALIDATE_TMP_COMMIT 5
66 /* 66 /*
67 * T side will be updated with a downlevel image 67 * T side will be updated with a downlevel image
68 */ 68 */
69 #define VALIDATE_TMP_UPDATE_DL 6 69 #define VALIDATE_TMP_UPDATE_DL 6
70 /* 70 /*
71 * The candidate image's release date is later than the system's firmware 71 * The candidate image's release date is later than the system's firmware
72 * service entitlement date - service warranty period has expired 72 * service entitlement date - service warranty period has expired
73 */ 73 */
74 #define VALIDATE_OUT_OF_WRNTY 7 74 #define VALIDATE_OUT_OF_WRNTY 7
75 75
76 /* Validate buffer size */ 76 /* Validate buffer size */
77 #define VALIDATE_BUF_SIZE 4096 77 #define VALIDATE_BUF_SIZE 4096
78 78
79 /* XXX: Assume candidate image size is <= 1GB */ 79 /* XXX: Assume candidate image size is <= 1GB */
80 #define MAX_IMAGE_SIZE 0x40000000 80 #define MAX_IMAGE_SIZE 0x40000000
81 81
82 /* Flash sg list version */
83 #define SG_LIST_VERSION (1UL)
84
85 /* Image status */ 82 /* Image status */
86 enum { 83 enum {
87 IMAGE_INVALID, 84 IMAGE_INVALID,
88 IMAGE_LOADING, 85 IMAGE_LOADING,
89 IMAGE_READY, 86 IMAGE_READY,
90 }; 87 };
91 88
92 /* Candidate image data */ 89 /* Candidate image data */
93 struct image_data_t { 90 struct image_data_t {
94 int status; 91 int status;
95 void *data; 92 void *data;
96 uint32_t size; 93 uint32_t size;
97 }; 94 };
98 95
99 /* Candidate image header */ 96 /* Candidate image header */
100 struct image_header_t { 97 struct image_header_t {
101 uint16_t magic; 98 uint16_t magic;
102 uint16_t version; 99 uint16_t version;
103 uint32_t size; 100 uint32_t size;
104 }; 101 };
105 102
106 struct validate_flash_t { 103 struct validate_flash_t {
107 int status; /* Return status */ 104 int status; /* Return status */
108 void *buf; /* Candidate image buffer */ 105 void *buf; /* Candidate image buffer */
109 uint32_t buf_size; /* Image size */ 106 uint32_t buf_size; /* Image size */
110 uint32_t result; /* Update results token */ 107 uint32_t result; /* Update results token */
111 }; 108 };
112 109
113 struct manage_flash_t { 110 struct manage_flash_t {
114 int status; /* Return status */ 111 int status; /* Return status */
115 }; 112 };
116 113
117 struct update_flash_t { 114 struct update_flash_t {
118 int status; /* Return status */ 115 int status; /* Return status */
119 }; 116 };
120 117
121 static struct image_header_t image_header; 118 static struct image_header_t image_header;
122 static struct image_data_t image_data; 119 static struct image_data_t image_data;
123 static struct validate_flash_t validate_flash_data; 120 static struct validate_flash_t validate_flash_data;
124 static struct manage_flash_t manage_flash_data; 121 static struct manage_flash_t manage_flash_data;
125 static struct update_flash_t update_flash_data; 122 static struct update_flash_t update_flash_data;
126 123
127 static DEFINE_MUTEX(image_data_mutex); 124 static DEFINE_MUTEX(image_data_mutex);
128 125
129 /* 126 /*
130 * Validate candidate image 127 * Validate candidate image
131 */ 128 */
132 static inline void opal_flash_validate(void) 129 static inline void opal_flash_validate(void)
133 { 130 {
134 long ret; 131 long ret;
135 void *buf = validate_flash_data.buf; 132 void *buf = validate_flash_data.buf;
136 __be32 size, result; 133 __be32 size, result;
137 134
138 ret = opal_validate_flash(__pa(buf), &size, &result); 135 ret = opal_validate_flash(__pa(buf), &size, &result);
139 136
140 validate_flash_data.status = ret; 137 validate_flash_data.status = ret;
141 validate_flash_data.buf_size = be32_to_cpu(size); 138 validate_flash_data.buf_size = be32_to_cpu(size);
142 validate_flash_data.result = be32_to_cpu(result); 139 validate_flash_data.result = be32_to_cpu(result);
143 } 140 }
144 141
145 /* 142 /*
146 * Validate output format: 143 * Validate output format:
147 * validate result token 144 * validate result token
148 * current image version details 145 * current image version details
149 * new image version details 146 * new image version details
150 */ 147 */
151 static ssize_t validate_show(struct kobject *kobj, 148 static ssize_t validate_show(struct kobject *kobj,
152 struct kobj_attribute *attr, char *buf) 149 struct kobj_attribute *attr, char *buf)
153 { 150 {
154 struct validate_flash_t *args_buf = &validate_flash_data; 151 struct validate_flash_t *args_buf = &validate_flash_data;
155 int len; 152 int len;
156 153
157 /* Candidate image is not validated */ 154 /* Candidate image is not validated */
158 if (args_buf->status < VALIDATE_TMP_UPDATE) { 155 if (args_buf->status < VALIDATE_TMP_UPDATE) {
159 len = sprintf(buf, "%d\n", args_buf->status); 156 len = sprintf(buf, "%d\n", args_buf->status);
160 goto out; 157 goto out;
161 } 158 }
162 159
163 /* Result token */ 160 /* Result token */
164 len = sprintf(buf, "%d\n", args_buf->result); 161 len = sprintf(buf, "%d\n", args_buf->result);
165 162
166 /* Current and candidate image version details */ 163 /* Current and candidate image version details */
167 if ((args_buf->result != VALIDATE_TMP_UPDATE) && 164 if ((args_buf->result != VALIDATE_TMP_UPDATE) &&
168 (args_buf->result < VALIDATE_CUR_UNKNOWN)) 165 (args_buf->result < VALIDATE_CUR_UNKNOWN))
169 goto out; 166 goto out;
170 167
171 if (args_buf->buf_size > (VALIDATE_BUF_SIZE - len)) { 168 if (args_buf->buf_size > (VALIDATE_BUF_SIZE - len)) {
172 memcpy(buf + len, args_buf->buf, VALIDATE_BUF_SIZE - len); 169 memcpy(buf + len, args_buf->buf, VALIDATE_BUF_SIZE - len);
173 len = VALIDATE_BUF_SIZE; 170 len = VALIDATE_BUF_SIZE;
174 } else { 171 } else {
175 memcpy(buf + len, args_buf->buf, args_buf->buf_size); 172 memcpy(buf + len, args_buf->buf, args_buf->buf_size);
176 len += args_buf->buf_size; 173 len += args_buf->buf_size;
177 } 174 }
178 out: 175 out:
179 /* Set status to default */ 176 /* Set status to default */
180 args_buf->status = FLASH_NO_OP; 177 args_buf->status = FLASH_NO_OP;
181 return len; 178 return len;
182 } 179 }
183 180
184 /* 181 /*
185 * Validate candidate firmware image 182 * Validate candidate firmware image
186 * 183 *
187 * Note: 184 * Note:
188 * We are only interested in first 4K bytes of the 185 * We are only interested in first 4K bytes of the
189 * candidate image. 186 * candidate image.
190 */ 187 */
191 static ssize_t validate_store(struct kobject *kobj, 188 static ssize_t validate_store(struct kobject *kobj,
192 struct kobj_attribute *attr, 189 struct kobj_attribute *attr,
193 const char *buf, size_t count) 190 const char *buf, size_t count)
194 { 191 {
195 struct validate_flash_t *args_buf = &validate_flash_data; 192 struct validate_flash_t *args_buf = &validate_flash_data;
196 193
197 if (buf[0] != '1') 194 if (buf[0] != '1')
198 return -EINVAL; 195 return -EINVAL;
199 196
200 mutex_lock(&image_data_mutex); 197 mutex_lock(&image_data_mutex);
201 198
202 if (image_data.status != IMAGE_READY || 199 if (image_data.status != IMAGE_READY ||
203 image_data.size < VALIDATE_BUF_SIZE) { 200 image_data.size < VALIDATE_BUF_SIZE) {
204 args_buf->result = VALIDATE_INVALID_IMG; 201 args_buf->result = VALIDATE_INVALID_IMG;
205 args_buf->status = VALIDATE_IMG_INCOMPLETE; 202 args_buf->status = VALIDATE_IMG_INCOMPLETE;
206 goto out; 203 goto out;
207 } 204 }
208 205
209 /* Copy first 4k bytes of candidate image */ 206 /* Copy first 4k bytes of candidate image */
210 memcpy(args_buf->buf, image_data.data, VALIDATE_BUF_SIZE); 207 memcpy(args_buf->buf, image_data.data, VALIDATE_BUF_SIZE);
211 208
212 args_buf->status = VALIDATE_IMG_READY; 209 args_buf->status = VALIDATE_IMG_READY;
213 args_buf->buf_size = VALIDATE_BUF_SIZE; 210 args_buf->buf_size = VALIDATE_BUF_SIZE;
214 211
215 /* Validate candidate image */ 212 /* Validate candidate image */
216 opal_flash_validate(); 213 opal_flash_validate();
217 214
218 out: 215 out:
219 mutex_unlock(&image_data_mutex); 216 mutex_unlock(&image_data_mutex);
220 return count; 217 return count;
221 } 218 }
222 219
223 /* 220 /*
224 * Manage flash routine 221 * Manage flash routine
225 */ 222 */
226 static inline void opal_flash_manage(uint8_t op) 223 static inline void opal_flash_manage(uint8_t op)
227 { 224 {
228 struct manage_flash_t *const args_buf = &manage_flash_data; 225 struct manage_flash_t *const args_buf = &manage_flash_data;
229 226
230 args_buf->status = opal_manage_flash(op); 227 args_buf->status = opal_manage_flash(op);
231 } 228 }
232 229
233 /* 230 /*
234 * Show manage flash status 231 * Show manage flash status
235 */ 232 */
236 static ssize_t manage_show(struct kobject *kobj, 233 static ssize_t manage_show(struct kobject *kobj,
237 struct kobj_attribute *attr, char *buf) 234 struct kobj_attribute *attr, char *buf)
238 { 235 {
239 struct manage_flash_t *const args_buf = &manage_flash_data; 236 struct manage_flash_t *const args_buf = &manage_flash_data;
240 int rc; 237 int rc;
241 238
242 rc = sprintf(buf, "%d\n", args_buf->status); 239 rc = sprintf(buf, "%d\n", args_buf->status);
243 /* Set status to default*/ 240 /* Set status to default*/
244 args_buf->status = FLASH_NO_OP; 241 args_buf->status = FLASH_NO_OP;
245 return rc; 242 return rc;
246 } 243 }
247 244
248 /* 245 /*
249 * Manage operations: 246 * Manage operations:
250 * 0 - Reject 247 * 0 - Reject
251 * 1 - Commit 248 * 1 - Commit
252 */ 249 */
253 static ssize_t manage_store(struct kobject *kobj, 250 static ssize_t manage_store(struct kobject *kobj,
254 struct kobj_attribute *attr, 251 struct kobj_attribute *attr,
255 const char *buf, size_t count) 252 const char *buf, size_t count)
256 { 253 {
257 uint8_t op; 254 uint8_t op;
258 switch (buf[0]) { 255 switch (buf[0]) {
259 case '0': 256 case '0':
260 op = FLASH_REJECT_TMP_SIDE; 257 op = FLASH_REJECT_TMP_SIDE;
261 break; 258 break;
262 case '1': 259 case '1':
263 op = FLASH_COMMIT_TMP_SIDE; 260 op = FLASH_COMMIT_TMP_SIDE;
264 break; 261 break;
265 default: 262 default:
266 return -EINVAL; 263 return -EINVAL;
267 } 264 }
268 265
269 /* commit/reject temporary image */ 266 /* commit/reject temporary image */
270 opal_flash_manage(op); 267 opal_flash_manage(op);
271 return count; 268 return count;
272 } 269 }
273 270
274 /* 271 /*
275 * Free sg list
276 */
277 static void free_sg_list(struct opal_sg_list *list)
278 {
279 struct opal_sg_list *sg1;
280 while (list) {
281 sg1 = list->next;
282 kfree(list);
283 list = sg1;
284 }
285 list = NULL;
286 }
287
288 /*
289 * Build candidate image scatter gather list
290 *
291 * list format:
292 * -----------------------------------
293 * | VER (8) | Entry length in bytes |
294 * -----------------------------------
295 * | Pointer to next entry |
296 * -----------------------------------
297 * | Address of memory area 1 |
298 * -----------------------------------
299 * | Length of memory area 1 |
300 * -----------------------------------
301 * | ......... |
302 * -----------------------------------
303 * | ......... |
304 * -----------------------------------
305 * | Address of memory area N |
306 * -----------------------------------
307 * | Length of memory area N |
308 * -----------------------------------
309 */
310 static struct opal_sg_list *image_data_to_sglist(void)
311 {
312 struct opal_sg_list *sg1, *list = NULL;
313 void *addr;
314 int size;
315
316 addr = image_data.data;
317 size = image_data.size;
318
319 sg1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
320 if (!sg1)
321 return NULL;
322
323 list = sg1;
324 sg1->num_entries = 0;
325 while (size > 0) {
326 /* Translate virtual address to physical address */
327 sg1->entry[sg1->num_entries].data =
328 (void *)(vmalloc_to_pfn(addr) << PAGE_SHIFT);
329
330 if (size > PAGE_SIZE)
331 sg1->entry[sg1->num_entries].length = PAGE_SIZE;
332 else
333 sg1->entry[sg1->num_entries].length = size;
334
335 sg1->num_entries++;
336 if (sg1->num_entries >= SG_ENTRIES_PER_NODE) {
337 sg1->next = kzalloc(PAGE_SIZE, GFP_KERNEL);
338 if (!sg1->next) {
339 pr_err("%s : Failed to allocate memory\n",
340 __func__);
341 goto nomem;
342 }
343
344 sg1 = sg1->next;
345 sg1->num_entries = 0;
346 }
347 addr += PAGE_SIZE;
348 size -= PAGE_SIZE;
349 }
350 return list;
351 nomem:
352 free_sg_list(list);
353 return NULL;
354 }
355
356 /*
357 * OPAL update flash 272 * OPAL update flash
358 */ 273 */
359 static int opal_flash_update(int op) 274 static int opal_flash_update(int op)
360 { 275 {
361 struct opal_sg_list *sg, *list, *next; 276 struct opal_sg_list *list;
362 unsigned long addr; 277 unsigned long addr;
363 int64_t rc = OPAL_PARAMETER; 278 int64_t rc = OPAL_PARAMETER;
364 279
365 if (op == FLASH_UPDATE_CANCEL) { 280 if (op == FLASH_UPDATE_CANCEL) {
366 pr_alert("FLASH: Image update cancelled\n"); 281 pr_alert("FLASH: Image update cancelled\n");
367 addr = '\0'; 282 addr = '\0';
368 goto flash; 283 goto flash;
369 } 284 }
370 285
371 list = image_data_to_sglist(); 286 list = opal_vmalloc_to_sg_list(image_data.data, image_data.size);
372 if (!list) 287 if (!list)
373 goto invalid_img; 288 goto invalid_img;
374 289
375 /* First entry address */ 290 /* First entry address */
376 addr = __pa(list); 291 addr = __pa(list);
377
378 /* Translate sg list address to absolute */
379 for (sg = list; sg; sg = next) {
380 next = sg->next;
381 /* Don't translate NULL pointer for last entry */
382 if (sg->next)
383 sg->next = (struct opal_sg_list *)__pa(sg->next);
384 else
385 sg->next = NULL;
386
387 /*
388 * Convert num_entries to version/length format
389 * to satisfy OPAL.
390 */
391 sg->num_entries = (SG_LIST_VERSION << 56) |
392 (sg->num_entries * sizeof(struct opal_sg_entry) + 16);
393 }
394 292
395 pr_alert("FLASH: Image is %u bytes\n", image_data.size); 293 pr_alert("FLASH: Image is %u bytes\n", image_data.size);
396 pr_alert("FLASH: Image update requested\n"); 294 pr_alert("FLASH: Image update requested\n");
397 pr_alert("FLASH: Image will be updated during system reboot\n"); 295 pr_alert("FLASH: Image will be updated during system reboot\n");
398 pr_alert("FLASH: This will take several minutes. Do not power off!\n"); 296 pr_alert("FLASH: This will take several minutes. Do not power off!\n");
399 297
400 flash: 298 flash:
401 rc = opal_update_flash(addr); 299 rc = opal_update_flash(addr);
402 300
403 invalid_img: 301 invalid_img:
404 return rc; 302 return rc;
405 } 303 }
406 304
407 /* 305 /*
408 * Show candidate image status 306 * Show candidate image status
409 */ 307 */
410 static ssize_t update_show(struct kobject *kobj, 308 static ssize_t update_show(struct kobject *kobj,
411 struct kobj_attribute *attr, char *buf) 309 struct kobj_attribute *attr, char *buf)
412 { 310 {
413 struct update_flash_t *const args_buf = &update_flash_data; 311 struct update_flash_t *const args_buf = &update_flash_data;
414 return sprintf(buf, "%d\n", args_buf->status); 312 return sprintf(buf, "%d\n", args_buf->status);
415 } 313 }
416 314
417 /* 315 /*
418 * Set update image flag 316 * Set update image flag
419 * 1 - Flash new image 317 * 1 - Flash new image
420 * 0 - Cancel flash request 318 * 0 - Cancel flash request
421 */ 319 */
422 static ssize_t update_store(struct kobject *kobj, 320 static ssize_t update_store(struct kobject *kobj,
423 struct kobj_attribute *attr, 321 struct kobj_attribute *attr,
424 const char *buf, size_t count) 322 const char *buf, size_t count)
425 { 323 {
426 struct update_flash_t *const args_buf = &update_flash_data; 324 struct update_flash_t *const args_buf = &update_flash_data;
427 int rc = count; 325 int rc = count;
428 326
429 mutex_lock(&image_data_mutex); 327 mutex_lock(&image_data_mutex);
430 328
431 switch (buf[0]) { 329 switch (buf[0]) {
432 case '0': 330 case '0':
433 if (args_buf->status == FLASH_IMG_READY) 331 if (args_buf->status == FLASH_IMG_READY)
434 opal_flash_update(FLASH_UPDATE_CANCEL); 332 opal_flash_update(FLASH_UPDATE_CANCEL);
435 args_buf->status = FLASH_NO_OP; 333 args_buf->status = FLASH_NO_OP;
436 break; 334 break;
437 case '1': 335 case '1':
438 /* Image is loaded? */ 336 /* Image is loaded? */
439 if (image_data.status == IMAGE_READY) 337 if (image_data.status == IMAGE_READY)
440 args_buf->status = 338 args_buf->status =
441 opal_flash_update(FLASH_UPDATE_INIT); 339 opal_flash_update(FLASH_UPDATE_INIT);
442 else 340 else
443 args_buf->status = FLASH_INVALID_IMG; 341 args_buf->status = FLASH_INVALID_IMG;
444 break; 342 break;
445 default: 343 default:
446 rc = -EINVAL; 344 rc = -EINVAL;
447 } 345 }
448 346
449 mutex_unlock(&image_data_mutex); 347 mutex_unlock(&image_data_mutex);
450 return rc; 348 return rc;
451 } 349 }
452 350
453 /* 351 /*
454 * Free image buffer 352 * Free image buffer
455 */ 353 */
456 static void free_image_buf(void) 354 static void free_image_buf(void)
457 { 355 {
458 void *addr; 356 void *addr;
459 int size; 357 int size;
460 358
461 addr = image_data.data; 359 addr = image_data.data;
462 size = PAGE_ALIGN(image_data.size); 360 size = PAGE_ALIGN(image_data.size);
463 while (size > 0) { 361 while (size > 0) {
464 ClearPageReserved(vmalloc_to_page(addr)); 362 ClearPageReserved(vmalloc_to_page(addr));
465 addr += PAGE_SIZE; 363 addr += PAGE_SIZE;
466 size -= PAGE_SIZE; 364 size -= PAGE_SIZE;
467 } 365 }
468 vfree(image_data.data); 366 vfree(image_data.data);
469 image_data.data = NULL; 367 image_data.data = NULL;
470 image_data.status = IMAGE_INVALID; 368 image_data.status = IMAGE_INVALID;
471 } 369 }
472 370
473 /* 371 /*
474 * Allocate image buffer. 372 * Allocate image buffer.
475 */ 373 */
476 static int alloc_image_buf(char *buffer, size_t count) 374 static int alloc_image_buf(char *buffer, size_t count)
477 { 375 {
478 void *addr; 376 void *addr;
479 int size; 377 int size;
480 378
481 if (count < sizeof(struct image_header_t)) { 379 if (count < sizeof(struct image_header_t)) {
482 pr_warn("FLASH: Invalid candidate image\n"); 380 pr_warn("FLASH: Invalid candidate image\n");
483 return -EINVAL; 381 return -EINVAL;
484 } 382 }
485 383
486 memcpy(&image_header, (void *)buffer, sizeof(struct image_header_t)); 384 memcpy(&image_header, (void *)buffer, sizeof(struct image_header_t));
487 image_data.size = be32_to_cpu(image_header.size); 385 image_data.size = be32_to_cpu(image_header.size);
488 pr_debug("FLASH: Candidate image size = %u\n", image_data.size); 386 pr_debug("FLASH: Candidate image size = %u\n", image_data.size);
489 387
490 if (image_data.size > MAX_IMAGE_SIZE) { 388 if (image_data.size > MAX_IMAGE_SIZE) {
491 pr_warn("FLASH: Too large image\n"); 389 pr_warn("FLASH: Too large image\n");
492 return -EINVAL; 390 return -EINVAL;
493 } 391 }
494 if (image_data.size < VALIDATE_BUF_SIZE) { 392 if (image_data.size < VALIDATE_BUF_SIZE) {
495 pr_warn("FLASH: Image is shorter than expected\n"); 393 pr_warn("FLASH: Image is shorter than expected\n");
496 return -EINVAL; 394 return -EINVAL;
497 } 395 }
498 396
499 image_data.data = vzalloc(PAGE_ALIGN(image_data.size)); 397 image_data.data = vzalloc(PAGE_ALIGN(image_data.size));
500 if (!image_data.data) { 398 if (!image_data.data) {
501 pr_err("%s : Failed to allocate memory\n", __func__); 399 pr_err("%s : Failed to allocate memory\n", __func__);
502 return -ENOMEM; 400 return -ENOMEM;
503 } 401 }
504 402
505 /* Pin memory */ 403 /* Pin memory */
506 addr = image_data.data; 404 addr = image_data.data;
507 size = PAGE_ALIGN(image_data.size); 405 size = PAGE_ALIGN(image_data.size);
508 while (size > 0) { 406 while (size > 0) {
509 SetPageReserved(vmalloc_to_page(addr)); 407 SetPageReserved(vmalloc_to_page(addr));
510 addr += PAGE_SIZE; 408 addr += PAGE_SIZE;
511 size -= PAGE_SIZE; 409 size -= PAGE_SIZE;
512 } 410 }
513 411
514 image_data.status = IMAGE_LOADING; 412 image_data.status = IMAGE_LOADING;
515 return 0; 413 return 0;
516 } 414 }
517 415
518 /* 416 /*
519 * Copy candidate image 417 * Copy candidate image
520 * 418 *
521 * Parse candidate image header to get total image size 419 * Parse candidate image header to get total image size
522 * and pre-allocate required memory. 420 * and pre-allocate required memory.
523 */ 421 */
524 static ssize_t image_data_write(struct file *filp, struct kobject *kobj, 422 static ssize_t image_data_write(struct file *filp, struct kobject *kobj,
525 struct bin_attribute *bin_attr, 423 struct bin_attribute *bin_attr,
526 char *buffer, loff_t pos, size_t count) 424 char *buffer, loff_t pos, size_t count)
527 { 425 {
528 int rc; 426 int rc;
529 427
530 mutex_lock(&image_data_mutex); 428 mutex_lock(&image_data_mutex);
531 429
532 /* New image ? */ 430 /* New image ? */
533 if (pos == 0) { 431 if (pos == 0) {
534 /* Free memory, if already allocated */ 432 /* Free memory, if already allocated */
535 if (image_data.data) 433 if (image_data.data)
536 free_image_buf(); 434 free_image_buf();
537 435
538 /* Cancel outstanding image update request */ 436 /* Cancel outstanding image update request */
539 if (update_flash_data.status == FLASH_IMG_READY) 437 if (update_flash_data.status == FLASH_IMG_READY)
540 opal_flash_update(FLASH_UPDATE_CANCEL); 438 opal_flash_update(FLASH_UPDATE_CANCEL);
541 439
542 /* Allocate memory */ 440 /* Allocate memory */
543 rc = alloc_image_buf(buffer, count); 441 rc = alloc_image_buf(buffer, count);
544 if (rc) 442 if (rc)
545 goto out; 443 goto out;
546 } 444 }
547 445
548 if (image_data.status != IMAGE_LOADING) { 446 if (image_data.status != IMAGE_LOADING) {
549 rc = -ENOMEM; 447 rc = -ENOMEM;
550 goto out; 448 goto out;
551 } 449 }
552 450
553 if ((pos + count) > image_data.size) { 451 if ((pos + count) > image_data.size) {
554 rc = -EINVAL; 452 rc = -EINVAL;
555 goto out; 453 goto out;
556 } 454 }
557 455
558 memcpy(image_data.data + pos, (void *)buffer, count); 456 memcpy(image_data.data + pos, (void *)buffer, count);
559 rc = count; 457 rc = count;
560 458
561 /* Set image status */ 459 /* Set image status */
562 if ((pos + count) == image_data.size) { 460 if ((pos + count) == image_data.size) {
563 pr_debug("FLASH: Candidate image loaded....\n"); 461 pr_debug("FLASH: Candidate image loaded....\n");
564 image_data.status = IMAGE_READY; 462 image_data.status = IMAGE_READY;
565 } 463 }
566 464
567 out: 465 out:
568 mutex_unlock(&image_data_mutex); 466 mutex_unlock(&image_data_mutex);
569 return rc; 467 return rc;
570 } 468 }
571 469
572 /* 470 /*
573 * sysfs interface : 471 * sysfs interface :
574 * OPAL uses below sysfs files for code update. 472 * OPAL uses below sysfs files for code update.
575 * We create these files under /sys/firmware/opal. 473 * We create these files under /sys/firmware/opal.
576 * 474 *
577 * image : Interface to load candidate firmware image 475 * image : Interface to load candidate firmware image
578 * validate_flash : Validate firmware image 476 * validate_flash : Validate firmware image
579 * manage_flash : Commit/Reject firmware image 477 * manage_flash : Commit/Reject firmware image
580 * update_flash : Flash new firmware image 478 * update_flash : Flash new firmware image
581 * 479 *
582 */ 480 */
583 static struct bin_attribute image_data_attr = { 481 static struct bin_attribute image_data_attr = {
584 .attr = {.name = "image", .mode = 0200}, 482 .attr = {.name = "image", .mode = 0200},
585 .size = MAX_IMAGE_SIZE, /* Limit image size */ 483 .size = MAX_IMAGE_SIZE, /* Limit image size */
586 .write = image_data_write, 484 .write = image_data_write,
587 }; 485 };
588 486
589 static struct kobj_attribute validate_attribute = 487 static struct kobj_attribute validate_attribute =
590 __ATTR(validate_flash, 0600, validate_show, validate_store); 488 __ATTR(validate_flash, 0600, validate_show, validate_store);
591 489
592 static struct kobj_attribute manage_attribute = 490 static struct kobj_attribute manage_attribute =
593 __ATTR(manage_flash, 0600, manage_show, manage_store); 491 __ATTR(manage_flash, 0600, manage_show, manage_store);
594 492
595 static struct kobj_attribute update_attribute = 493 static struct kobj_attribute update_attribute =
596 __ATTR(update_flash, 0600, update_show, update_store); 494 __ATTR(update_flash, 0600, update_show, update_store);
597 495
598 static struct attribute *image_op_attrs[] = { 496 static struct attribute *image_op_attrs[] = {
599 &validate_attribute.attr, 497 &validate_attribute.attr,
600 &manage_attribute.attr, 498 &manage_attribute.attr,
601 &update_attribute.attr, 499 &update_attribute.attr,
602 NULL /* need to NULL terminate the list of attributes */ 500 NULL /* need to NULL terminate the list of attributes */
603 }; 501 };
604 502
605 static struct attribute_group image_op_attr_group = { 503 static struct attribute_group image_op_attr_group = {
606 .attrs = image_op_attrs, 504 .attrs = image_op_attrs,
607 }; 505 };
608 506
609 void __init opal_flash_init(void) 507 void __init opal_flash_init(void)
610 { 508 {
611 int ret; 509 int ret;
612 510
613 /* Allocate validate image buffer */ 511 /* Allocate validate image buffer */
614 validate_flash_data.buf = kzalloc(VALIDATE_BUF_SIZE, GFP_KERNEL); 512 validate_flash_data.buf = kzalloc(VALIDATE_BUF_SIZE, GFP_KERNEL);
615 if (!validate_flash_data.buf) { 513 if (!validate_flash_data.buf) {
616 pr_err("%s : Failed to allocate memory\n", __func__); 514 pr_err("%s : Failed to allocate memory\n", __func__);
617 return; 515 return;
618 } 516 }
619 517
620 /* Make sure /sys/firmware/opal directory is created */ 518 /* Make sure /sys/firmware/opal directory is created */
621 if (!opal_kobj) { 519 if (!opal_kobj) {
622 pr_warn("FLASH: opal kobject is not available\n"); 520 pr_warn("FLASH: opal kobject is not available\n");
623 goto nokobj; 521 goto nokobj;
624 } 522 }
625 523
626 /* Create the sysfs files */ 524 /* Create the sysfs files */
627 ret = sysfs_create_group(opal_kobj, &image_op_attr_group); 525 ret = sysfs_create_group(opal_kobj, &image_op_attr_group);
628 if (ret) { 526 if (ret) {
629 pr_warn("FLASH: Failed to create sysfs files\n"); 527 pr_warn("FLASH: Failed to create sysfs files\n");
630 goto nokobj; 528 goto nokobj;
631 } 529 }
632 530
633 ret = sysfs_create_bin_file(opal_kobj, &image_data_attr); 531 ret = sysfs_create_bin_file(opal_kobj, &image_data_attr);
634 if (ret) { 532 if (ret) {
635 pr_warn("FLASH: Failed to create sysfs files\n"); 533 pr_warn("FLASH: Failed to create sysfs files\n");
636 goto nosysfs_file; 534 goto nosysfs_file;
637 } 535 }
638 536
639 /* Set default status */ 537 /* Set default status */
640 validate_flash_data.status = FLASH_NO_OP; 538 validate_flash_data.status = FLASH_NO_OP;
641 manage_flash_data.status = FLASH_NO_OP; 539 manage_flash_data.status = FLASH_NO_OP;
642 update_flash_data.status = FLASH_NO_OP; 540 update_flash_data.status = FLASH_NO_OP;
643 image_data.status = IMAGE_INVALID; 541 image_data.status = IMAGE_INVALID;
644 return; 542 return;
645 543
646 nosysfs_file: 544 nosysfs_file:
647 sysfs_remove_group(opal_kobj, &image_op_attr_group); 545 sysfs_remove_group(opal_kobj, &image_op_attr_group);
648 546
649 nokobj: 547 nokobj:
650 kfree(validate_flash_data.buf); 548 kfree(validate_flash_data.buf);
651 return; 549 return;
652 } 550 }
653 551
arch/powerpc/platforms/powernv/opal.c
1 /* 1 /*
2 * PowerNV OPAL high level interfaces 2 * PowerNV OPAL high level interfaces
3 * 3 *
4 * Copyright 2011 IBM Corp. 4 * Copyright 2011 IBM Corp.
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 11
12 #undef DEBUG 12 #undef DEBUG
13 13
14 #include <linux/types.h> 14 #include <linux/types.h>
15 #include <linux/of.h> 15 #include <linux/of.h>
16 #include <linux/of_fdt.h> 16 #include <linux/of_fdt.h>
17 #include <linux/of_platform.h> 17 #include <linux/of_platform.h>
18 #include <linux/interrupt.h> 18 #include <linux/interrupt.h>
19 #include <linux/notifier.h> 19 #include <linux/notifier.h>
20 #include <linux/slab.h> 20 #include <linux/slab.h>
21 #include <linux/sched.h> 21 #include <linux/sched.h>
22 #include <linux/kobject.h> 22 #include <linux/kobject.h>
23 #include <linux/delay.h> 23 #include <linux/delay.h>
24 #include <linux/memblock.h> 24 #include <linux/memblock.h>
25 #include <asm/opal.h> 25 #include <asm/opal.h>
26 #include <asm/firmware.h> 26 #include <asm/firmware.h>
27 #include <asm/mce.h> 27 #include <asm/mce.h>
28 28
29 #include "powernv.h" 29 #include "powernv.h"
30 30
31 /* /sys/firmware/opal */ 31 /* /sys/firmware/opal */
32 struct kobject *opal_kobj; 32 struct kobject *opal_kobj;
33 33
34 struct opal { 34 struct opal {
35 u64 base; 35 u64 base;
36 u64 entry; 36 u64 entry;
37 u64 size; 37 u64 size;
38 } opal; 38 } opal;
39 39
40 struct mcheck_recoverable_range { 40 struct mcheck_recoverable_range {
41 u64 start_addr; 41 u64 start_addr;
42 u64 end_addr; 42 u64 end_addr;
43 u64 recover_addr; 43 u64 recover_addr;
44 }; 44 };
45 45
46 static struct mcheck_recoverable_range *mc_recoverable_range; 46 static struct mcheck_recoverable_range *mc_recoverable_range;
47 static int mc_recoverable_range_len; 47 static int mc_recoverable_range_len;
48 48
49 struct device_node *opal_node; 49 struct device_node *opal_node;
50 static DEFINE_SPINLOCK(opal_write_lock); 50 static DEFINE_SPINLOCK(opal_write_lock);
51 extern u64 opal_mc_secondary_handler[]; 51 extern u64 opal_mc_secondary_handler[];
52 static unsigned int *opal_irqs; 52 static unsigned int *opal_irqs;
53 static unsigned int opal_irq_count; 53 static unsigned int opal_irq_count;
54 static ATOMIC_NOTIFIER_HEAD(opal_notifier_head); 54 static ATOMIC_NOTIFIER_HEAD(opal_notifier_head);
55 static struct atomic_notifier_head opal_msg_notifier_head[OPAL_MSG_TYPE_MAX]; 55 static struct atomic_notifier_head opal_msg_notifier_head[OPAL_MSG_TYPE_MAX];
56 static DEFINE_SPINLOCK(opal_notifier_lock); 56 static DEFINE_SPINLOCK(opal_notifier_lock);
57 static uint64_t last_notified_mask = 0x0ul; 57 static uint64_t last_notified_mask = 0x0ul;
58 static atomic_t opal_notifier_hold = ATOMIC_INIT(0); 58 static atomic_t opal_notifier_hold = ATOMIC_INIT(0);
59 59
60 int __init early_init_dt_scan_opal(unsigned long node, 60 int __init early_init_dt_scan_opal(unsigned long node,
61 const char *uname, int depth, void *data) 61 const char *uname, int depth, void *data)
62 { 62 {
63 const void *basep, *entryp, *sizep; 63 const void *basep, *entryp, *sizep;
64 unsigned long basesz, entrysz, runtimesz; 64 unsigned long basesz, entrysz, runtimesz;
65 65
66 if (depth != 1 || strcmp(uname, "ibm,opal") != 0) 66 if (depth != 1 || strcmp(uname, "ibm,opal") != 0)
67 return 0; 67 return 0;
68 68
69 basep = of_get_flat_dt_prop(node, "opal-base-address", &basesz); 69 basep = of_get_flat_dt_prop(node, "opal-base-address", &basesz);
70 entryp = of_get_flat_dt_prop(node, "opal-entry-address", &entrysz); 70 entryp = of_get_flat_dt_prop(node, "opal-entry-address", &entrysz);
71 sizep = of_get_flat_dt_prop(node, "opal-runtime-size", &runtimesz); 71 sizep = of_get_flat_dt_prop(node, "opal-runtime-size", &runtimesz);
72 72
73 if (!basep || !entryp || !sizep) 73 if (!basep || !entryp || !sizep)
74 return 1; 74 return 1;
75 75
76 opal.base = of_read_number(basep, basesz/4); 76 opal.base = of_read_number(basep, basesz/4);
77 opal.entry = of_read_number(entryp, entrysz/4); 77 opal.entry = of_read_number(entryp, entrysz/4);
78 opal.size = of_read_number(sizep, runtimesz/4); 78 opal.size = of_read_number(sizep, runtimesz/4);
79 79
80 pr_debug("OPAL Base = 0x%llx (basep=%p basesz=%ld)\n", 80 pr_debug("OPAL Base = 0x%llx (basep=%p basesz=%ld)\n",
81 opal.base, basep, basesz); 81 opal.base, basep, basesz);
82 pr_debug("OPAL Entry = 0x%llx (entryp=%p basesz=%ld)\n", 82 pr_debug("OPAL Entry = 0x%llx (entryp=%p basesz=%ld)\n",
83 opal.entry, entryp, entrysz); 83 opal.entry, entryp, entrysz);
84 pr_debug("OPAL Entry = 0x%llx (sizep=%p runtimesz=%ld)\n", 84 pr_debug("OPAL Entry = 0x%llx (sizep=%p runtimesz=%ld)\n",
85 opal.size, sizep, runtimesz); 85 opal.size, sizep, runtimesz);
86 86
87 powerpc_firmware_features |= FW_FEATURE_OPAL; 87 powerpc_firmware_features |= FW_FEATURE_OPAL;
88 if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) { 88 if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) {
89 powerpc_firmware_features |= FW_FEATURE_OPALv2; 89 powerpc_firmware_features |= FW_FEATURE_OPALv2;
90 powerpc_firmware_features |= FW_FEATURE_OPALv3; 90 powerpc_firmware_features |= FW_FEATURE_OPALv3;
91 printk("OPAL V3 detected !\n"); 91 printk("OPAL V3 detected !\n");
92 } else if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) { 92 } else if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) {
93 powerpc_firmware_features |= FW_FEATURE_OPALv2; 93 powerpc_firmware_features |= FW_FEATURE_OPALv2;
94 printk("OPAL V2 detected !\n"); 94 printk("OPAL V2 detected !\n");
95 } else { 95 } else {
96 printk("OPAL V1 detected !\n"); 96 printk("OPAL V1 detected !\n");
97 } 97 }
98 98
99 return 1; 99 return 1;
100 } 100 }
101 101
102 int __init early_init_dt_scan_recoverable_ranges(unsigned long node, 102 int __init early_init_dt_scan_recoverable_ranges(unsigned long node,
103 const char *uname, int depth, void *data) 103 const char *uname, int depth, void *data)
104 { 104 {
105 unsigned long i, psize, size; 105 unsigned long i, psize, size;
106 const __be32 *prop; 106 const __be32 *prop;
107 107
108 if (depth != 1 || strcmp(uname, "ibm,opal") != 0) 108 if (depth != 1 || strcmp(uname, "ibm,opal") != 0)
109 return 0; 109 return 0;
110 110
111 prop = of_get_flat_dt_prop(node, "mcheck-recoverable-ranges", &psize); 111 prop = of_get_flat_dt_prop(node, "mcheck-recoverable-ranges", &psize);
112 112
113 if (!prop) 113 if (!prop)
114 return 1; 114 return 1;
115 115
116 pr_debug("Found machine check recoverable ranges.\n"); 116 pr_debug("Found machine check recoverable ranges.\n");
117 117
118 /* 118 /*
119 * Calculate number of available entries. 119 * Calculate number of available entries.
120 * 120 *
121 * Each recoverable address range entry is (start address, len, 121 * Each recoverable address range entry is (start address, len,
122 * recovery address), 2 cells each for start and recovery address, 122 * recovery address), 2 cells each for start and recovery address,
123 * 1 cell for len, totalling 5 cells per entry. 123 * 1 cell for len, totalling 5 cells per entry.
124 */ 124 */
125 mc_recoverable_range_len = psize / (sizeof(*prop) * 5); 125 mc_recoverable_range_len = psize / (sizeof(*prop) * 5);
126 126
127 /* Sanity check */ 127 /* Sanity check */
128 if (!mc_recoverable_range_len) 128 if (!mc_recoverable_range_len)
129 return 1; 129 return 1;
130 130
131 /* Size required to hold all the entries. */ 131 /* Size required to hold all the entries. */
132 size = mc_recoverable_range_len * 132 size = mc_recoverable_range_len *
133 sizeof(struct mcheck_recoverable_range); 133 sizeof(struct mcheck_recoverable_range);
134 134
135 /* 135 /*
136 * Allocate a buffer to hold the MC recoverable ranges. We would be 136 * Allocate a buffer to hold the MC recoverable ranges. We would be
137 * accessing them in real mode, hence it needs to be within 137 * accessing them in real mode, hence it needs to be within
138 * RMO region. 138 * RMO region.
139 */ 139 */
140 mc_recoverable_range =__va(memblock_alloc_base(size, __alignof__(u64), 140 mc_recoverable_range =__va(memblock_alloc_base(size, __alignof__(u64),
141 ppc64_rma_size)); 141 ppc64_rma_size));
142 memset(mc_recoverable_range, 0, size); 142 memset(mc_recoverable_range, 0, size);
143 143
144 for (i = 0; i < mc_recoverable_range_len; i++) { 144 for (i = 0; i < mc_recoverable_range_len; i++) {
145 mc_recoverable_range[i].start_addr = 145 mc_recoverable_range[i].start_addr =
146 of_read_number(prop + (i * 5) + 0, 2); 146 of_read_number(prop + (i * 5) + 0, 2);
147 mc_recoverable_range[i].end_addr = 147 mc_recoverable_range[i].end_addr =
148 mc_recoverable_range[i].start_addr + 148 mc_recoverable_range[i].start_addr +
149 of_read_number(prop + (i * 5) + 2, 1); 149 of_read_number(prop + (i * 5) + 2, 1);
150 mc_recoverable_range[i].recover_addr = 150 mc_recoverable_range[i].recover_addr =
151 of_read_number(prop + (i * 5) + 3, 2); 151 of_read_number(prop + (i * 5) + 3, 2);
152 152
153 pr_debug("Machine check recoverable range: %llx..%llx: %llx\n", 153 pr_debug("Machine check recoverable range: %llx..%llx: %llx\n",
154 mc_recoverable_range[i].start_addr, 154 mc_recoverable_range[i].start_addr,
155 mc_recoverable_range[i].end_addr, 155 mc_recoverable_range[i].end_addr,
156 mc_recoverable_range[i].recover_addr); 156 mc_recoverable_range[i].recover_addr);
157 } 157 }
158 return 1; 158 return 1;
159 } 159 }
160 160
161 static int __init opal_register_exception_handlers(void) 161 static int __init opal_register_exception_handlers(void)
162 { 162 {
163 #ifdef __BIG_ENDIAN__ 163 #ifdef __BIG_ENDIAN__
164 u64 glue; 164 u64 glue;
165 165
166 if (!(powerpc_firmware_features & FW_FEATURE_OPAL)) 166 if (!(powerpc_firmware_features & FW_FEATURE_OPAL))
167 return -ENODEV; 167 return -ENODEV;
168 168
169 /* Hookup some exception handlers except machine check. We use the 169 /* Hookup some exception handlers except machine check. We use the
170 * fwnmi area at 0x7000 to provide the glue space to OPAL 170 * fwnmi area at 0x7000 to provide the glue space to OPAL
171 */ 171 */
172 glue = 0x7000; 172 glue = 0x7000;
173 opal_register_exception_handler(OPAL_HYPERVISOR_MAINTENANCE_HANDLER, 173 opal_register_exception_handler(OPAL_HYPERVISOR_MAINTENANCE_HANDLER,
174 0, glue); 174 0, glue);
175 glue += 128; 175 glue += 128;
176 opal_register_exception_handler(OPAL_SOFTPATCH_HANDLER, 0, glue); 176 opal_register_exception_handler(OPAL_SOFTPATCH_HANDLER, 0, glue);
177 #endif 177 #endif
178 178
179 return 0; 179 return 0;
180 } 180 }
181 181
182 early_initcall(opal_register_exception_handlers); 182 early_initcall(opal_register_exception_handlers);
183 183
184 int opal_notifier_register(struct notifier_block *nb) 184 int opal_notifier_register(struct notifier_block *nb)
185 { 185 {
186 if (!nb) { 186 if (!nb) {
187 pr_warning("%s: Invalid argument (%p)\n", 187 pr_warning("%s: Invalid argument (%p)\n",
188 __func__, nb); 188 __func__, nb);
189 return -EINVAL; 189 return -EINVAL;
190 } 190 }
191 191
192 atomic_notifier_chain_register(&opal_notifier_head, nb); 192 atomic_notifier_chain_register(&opal_notifier_head, nb);
193 return 0; 193 return 0;
194 } 194 }
195 EXPORT_SYMBOL_GPL(opal_notifier_register); 195 EXPORT_SYMBOL_GPL(opal_notifier_register);
196 196
197 int opal_notifier_unregister(struct notifier_block *nb) 197 int opal_notifier_unregister(struct notifier_block *nb)
198 { 198 {
199 if (!nb) { 199 if (!nb) {
200 pr_warning("%s: Invalid argument (%p)\n", 200 pr_warning("%s: Invalid argument (%p)\n",
201 __func__, nb); 201 __func__, nb);
202 return -EINVAL; 202 return -EINVAL;
203 } 203 }
204 204
205 atomic_notifier_chain_unregister(&opal_notifier_head, nb); 205 atomic_notifier_chain_unregister(&opal_notifier_head, nb);
206 return 0; 206 return 0;
207 } 207 }
208 EXPORT_SYMBOL_GPL(opal_notifier_unregister); 208 EXPORT_SYMBOL_GPL(opal_notifier_unregister);
209 209
210 static void opal_do_notifier(uint64_t events) 210 static void opal_do_notifier(uint64_t events)
211 { 211 {
212 unsigned long flags; 212 unsigned long flags;
213 uint64_t changed_mask; 213 uint64_t changed_mask;
214 214
215 if (atomic_read(&opal_notifier_hold)) 215 if (atomic_read(&opal_notifier_hold))
216 return; 216 return;
217 217
218 spin_lock_irqsave(&opal_notifier_lock, flags); 218 spin_lock_irqsave(&opal_notifier_lock, flags);
219 changed_mask = last_notified_mask ^ events; 219 changed_mask = last_notified_mask ^ events;
220 last_notified_mask = events; 220 last_notified_mask = events;
221 spin_unlock_irqrestore(&opal_notifier_lock, flags); 221 spin_unlock_irqrestore(&opal_notifier_lock, flags);
222 222
223 /* 223 /*
224 * We feed with the event bits and changed bits for 224 * We feed with the event bits and changed bits for
225 * enough information to the callback. 225 * enough information to the callback.
226 */ 226 */
227 atomic_notifier_call_chain(&opal_notifier_head, 227 atomic_notifier_call_chain(&opal_notifier_head,
228 events, (void *)changed_mask); 228 events, (void *)changed_mask);
229 } 229 }
230 230
231 void opal_notifier_update_evt(uint64_t evt_mask, 231 void opal_notifier_update_evt(uint64_t evt_mask,
232 uint64_t evt_val) 232 uint64_t evt_val)
233 { 233 {
234 unsigned long flags; 234 unsigned long flags;
235 235
236 spin_lock_irqsave(&opal_notifier_lock, flags); 236 spin_lock_irqsave(&opal_notifier_lock, flags);
237 last_notified_mask &= ~evt_mask; 237 last_notified_mask &= ~evt_mask;
238 last_notified_mask |= evt_val; 238 last_notified_mask |= evt_val;
239 spin_unlock_irqrestore(&opal_notifier_lock, flags); 239 spin_unlock_irqrestore(&opal_notifier_lock, flags);
240 } 240 }
241 241
242 void opal_notifier_enable(void) 242 void opal_notifier_enable(void)
243 { 243 {
244 int64_t rc; 244 int64_t rc;
245 __be64 evt = 0; 245 __be64 evt = 0;
246 246
247 atomic_set(&opal_notifier_hold, 0); 247 atomic_set(&opal_notifier_hold, 0);
248 248
249 /* Process pending events */ 249 /* Process pending events */
250 rc = opal_poll_events(&evt); 250 rc = opal_poll_events(&evt);
251 if (rc == OPAL_SUCCESS && evt) 251 if (rc == OPAL_SUCCESS && evt)
252 opal_do_notifier(be64_to_cpu(evt)); 252 opal_do_notifier(be64_to_cpu(evt));
253 } 253 }
254 254
255 void opal_notifier_disable(void) 255 void opal_notifier_disable(void)
256 { 256 {
257 atomic_set(&opal_notifier_hold, 1); 257 atomic_set(&opal_notifier_hold, 1);
258 } 258 }
259 259
260 /* 260 /*
261 * Opal message notifier based on message type. Allow subscribers to get 261 * Opal message notifier based on message type. Allow subscribers to get
262 * notified for specific messgae type. 262 * notified for specific messgae type.
263 */ 263 */
264 int opal_message_notifier_register(enum OpalMessageType msg_type, 264 int opal_message_notifier_register(enum OpalMessageType msg_type,
265 struct notifier_block *nb) 265 struct notifier_block *nb)
266 { 266 {
267 if (!nb) { 267 if (!nb) {
268 pr_warning("%s: Invalid argument (%p)\n", 268 pr_warning("%s: Invalid argument (%p)\n",
269 __func__, nb); 269 __func__, nb);
270 return -EINVAL; 270 return -EINVAL;
271 } 271 }
272 if (msg_type > OPAL_MSG_TYPE_MAX) { 272 if (msg_type > OPAL_MSG_TYPE_MAX) {
273 pr_warning("%s: Invalid message type argument (%d)\n", 273 pr_warning("%s: Invalid message type argument (%d)\n",
274 __func__, msg_type); 274 __func__, msg_type);
275 return -EINVAL; 275 return -EINVAL;
276 } 276 }
277 return atomic_notifier_chain_register( 277 return atomic_notifier_chain_register(
278 &opal_msg_notifier_head[msg_type], nb); 278 &opal_msg_notifier_head[msg_type], nb);
279 } 279 }
280 280
281 static void opal_message_do_notify(uint32_t msg_type, void *msg) 281 static void opal_message_do_notify(uint32_t msg_type, void *msg)
282 { 282 {
283 /* notify subscribers */ 283 /* notify subscribers */
284 atomic_notifier_call_chain(&opal_msg_notifier_head[msg_type], 284 atomic_notifier_call_chain(&opal_msg_notifier_head[msg_type],
285 msg_type, msg); 285 msg_type, msg);
286 } 286 }
287 287
288 static void opal_handle_message(void) 288 static void opal_handle_message(void)
289 { 289 {
290 s64 ret; 290 s64 ret;
291 /* 291 /*
292 * TODO: pre-allocate a message buffer depending on opal-msg-size 292 * TODO: pre-allocate a message buffer depending on opal-msg-size
293 * value in /proc/device-tree. 293 * value in /proc/device-tree.
294 */ 294 */
295 static struct opal_msg msg; 295 static struct opal_msg msg;
296 u32 type; 296 u32 type;
297 297
298 ret = opal_get_msg(__pa(&msg), sizeof(msg)); 298 ret = opal_get_msg(__pa(&msg), sizeof(msg));
299 /* No opal message pending. */ 299 /* No opal message pending. */
300 if (ret == OPAL_RESOURCE) 300 if (ret == OPAL_RESOURCE)
301 return; 301 return;
302 302
303 /* check for errors. */ 303 /* check for errors. */
304 if (ret) { 304 if (ret) {
305 pr_warning("%s: Failed to retrive opal message, err=%lld\n", 305 pr_warning("%s: Failed to retrive opal message, err=%lld\n",
306 __func__, ret); 306 __func__, ret);
307 return; 307 return;
308 } 308 }
309 309
310 type = be32_to_cpu(msg.msg_type); 310 type = be32_to_cpu(msg.msg_type);
311 311
312 /* Sanity check */ 312 /* Sanity check */
313 if (type > OPAL_MSG_TYPE_MAX) { 313 if (type > OPAL_MSG_TYPE_MAX) {
314 pr_warning("%s: Unknown message type: %u\n", __func__, type); 314 pr_warning("%s: Unknown message type: %u\n", __func__, type);
315 return; 315 return;
316 } 316 }
317 opal_message_do_notify(type, (void *)&msg); 317 opal_message_do_notify(type, (void *)&msg);
318 } 318 }
319 319
320 static int opal_message_notify(struct notifier_block *nb, 320 static int opal_message_notify(struct notifier_block *nb,
321 unsigned long events, void *change) 321 unsigned long events, void *change)
322 { 322 {
323 if (events & OPAL_EVENT_MSG_PENDING) 323 if (events & OPAL_EVENT_MSG_PENDING)
324 opal_handle_message(); 324 opal_handle_message();
325 return 0; 325 return 0;
326 } 326 }
327 327
328 static struct notifier_block opal_message_nb = { 328 static struct notifier_block opal_message_nb = {
329 .notifier_call = opal_message_notify, 329 .notifier_call = opal_message_notify,
330 .next = NULL, 330 .next = NULL,
331 .priority = 0, 331 .priority = 0,
332 }; 332 };
333 333
334 static int __init opal_message_init(void) 334 static int __init opal_message_init(void)
335 { 335 {
336 int ret, i; 336 int ret, i;
337 337
338 for (i = 0; i < OPAL_MSG_TYPE_MAX; i++) 338 for (i = 0; i < OPAL_MSG_TYPE_MAX; i++)
339 ATOMIC_INIT_NOTIFIER_HEAD(&opal_msg_notifier_head[i]); 339 ATOMIC_INIT_NOTIFIER_HEAD(&opal_msg_notifier_head[i]);
340 340
341 ret = opal_notifier_register(&opal_message_nb); 341 ret = opal_notifier_register(&opal_message_nb);
342 if (ret) { 342 if (ret) {
343 pr_err("%s: Can't register OPAL event notifier (%d)\n", 343 pr_err("%s: Can't register OPAL event notifier (%d)\n",
344 __func__, ret); 344 __func__, ret);
345 return ret; 345 return ret;
346 } 346 }
347 return 0; 347 return 0;
348 } 348 }
349 early_initcall(opal_message_init); 349 early_initcall(opal_message_init);
350 350
351 int opal_get_chars(uint32_t vtermno, char *buf, int count) 351 int opal_get_chars(uint32_t vtermno, char *buf, int count)
352 { 352 {
353 s64 rc; 353 s64 rc;
354 __be64 evt, len; 354 __be64 evt, len;
355 355
356 if (!opal.entry) 356 if (!opal.entry)
357 return -ENODEV; 357 return -ENODEV;
358 opal_poll_events(&evt); 358 opal_poll_events(&evt);
359 if ((be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_INPUT) == 0) 359 if ((be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_INPUT) == 0)
360 return 0; 360 return 0;
361 len = cpu_to_be64(count); 361 len = cpu_to_be64(count);
362 rc = opal_console_read(vtermno, &len, buf); 362 rc = opal_console_read(vtermno, &len, buf);
363 if (rc == OPAL_SUCCESS) 363 if (rc == OPAL_SUCCESS)
364 return be64_to_cpu(len); 364 return be64_to_cpu(len);
365 return 0; 365 return 0;
366 } 366 }
367 367
368 int opal_put_chars(uint32_t vtermno, const char *data, int total_len) 368 int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
369 { 369 {
370 int written = 0; 370 int written = 0;
371 __be64 olen; 371 __be64 olen;
372 s64 len, rc; 372 s64 len, rc;
373 unsigned long flags; 373 unsigned long flags;
374 __be64 evt; 374 __be64 evt;
375 375
376 if (!opal.entry) 376 if (!opal.entry)
377 return -ENODEV; 377 return -ENODEV;
378 378
379 /* We want put_chars to be atomic to avoid mangling of hvsi 379 /* We want put_chars to be atomic to avoid mangling of hvsi
380 * packets. To do that, we first test for room and return 380 * packets. To do that, we first test for room and return
381 * -EAGAIN if there isn't enough. 381 * -EAGAIN if there isn't enough.
382 * 382 *
383 * Unfortunately, opal_console_write_buffer_space() doesn't 383 * Unfortunately, opal_console_write_buffer_space() doesn't
384 * appear to work on opal v1, so we just assume there is 384 * appear to work on opal v1, so we just assume there is
385 * enough room and be done with it 385 * enough room and be done with it
386 */ 386 */
387 spin_lock_irqsave(&opal_write_lock, flags); 387 spin_lock_irqsave(&opal_write_lock, flags);
388 if (firmware_has_feature(FW_FEATURE_OPALv2)) { 388 if (firmware_has_feature(FW_FEATURE_OPALv2)) {
389 rc = opal_console_write_buffer_space(vtermno, &olen); 389 rc = opal_console_write_buffer_space(vtermno, &olen);
390 len = be64_to_cpu(olen); 390 len = be64_to_cpu(olen);
391 if (rc || len < total_len) { 391 if (rc || len < total_len) {
392 spin_unlock_irqrestore(&opal_write_lock, flags); 392 spin_unlock_irqrestore(&opal_write_lock, flags);
393 /* Closed -> drop characters */ 393 /* Closed -> drop characters */
394 if (rc) 394 if (rc)
395 return total_len; 395 return total_len;
396 opal_poll_events(NULL); 396 opal_poll_events(NULL);
397 return -EAGAIN; 397 return -EAGAIN;
398 } 398 }
399 } 399 }
400 400
401 /* We still try to handle partial completions, though they 401 /* We still try to handle partial completions, though they
402 * should no longer happen. 402 * should no longer happen.
403 */ 403 */
404 rc = OPAL_BUSY; 404 rc = OPAL_BUSY;
405 while(total_len > 0 && (rc == OPAL_BUSY || 405 while(total_len > 0 && (rc == OPAL_BUSY ||
406 rc == OPAL_BUSY_EVENT || rc == OPAL_SUCCESS)) { 406 rc == OPAL_BUSY_EVENT || rc == OPAL_SUCCESS)) {
407 olen = cpu_to_be64(total_len); 407 olen = cpu_to_be64(total_len);
408 rc = opal_console_write(vtermno, &olen, data); 408 rc = opal_console_write(vtermno, &olen, data);
409 len = be64_to_cpu(olen); 409 len = be64_to_cpu(olen);
410 410
411 /* Closed or other error drop */ 411 /* Closed or other error drop */
412 if (rc != OPAL_SUCCESS && rc != OPAL_BUSY && 412 if (rc != OPAL_SUCCESS && rc != OPAL_BUSY &&
413 rc != OPAL_BUSY_EVENT) { 413 rc != OPAL_BUSY_EVENT) {
414 written = total_len; 414 written = total_len;
415 break; 415 break;
416 } 416 }
417 if (rc == OPAL_SUCCESS) { 417 if (rc == OPAL_SUCCESS) {
418 total_len -= len; 418 total_len -= len;
419 data += len; 419 data += len;
420 written += len; 420 written += len;
421 } 421 }
422 /* This is a bit nasty but we need that for the console to 422 /* This is a bit nasty but we need that for the console to
423 * flush when there aren't any interrupts. We will clean 423 * flush when there aren't any interrupts. We will clean
424 * things a bit later to limit that to synchronous path 424 * things a bit later to limit that to synchronous path
425 * such as the kernel console and xmon/udbg 425 * such as the kernel console and xmon/udbg
426 */ 426 */
427 do 427 do
428 opal_poll_events(&evt); 428 opal_poll_events(&evt);
429 while(rc == OPAL_SUCCESS && 429 while(rc == OPAL_SUCCESS &&
430 (be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_OUTPUT)); 430 (be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_OUTPUT));
431 } 431 }
432 spin_unlock_irqrestore(&opal_write_lock, flags); 432 spin_unlock_irqrestore(&opal_write_lock, flags);
433 return written; 433 return written;
434 } 434 }
435 435
436 static int opal_recover_mce(struct pt_regs *regs, 436 static int opal_recover_mce(struct pt_regs *regs,
437 struct machine_check_event *evt) 437 struct machine_check_event *evt)
438 { 438 {
439 int recovered = 0; 439 int recovered = 0;
440 uint64_t ea = get_mce_fault_addr(evt); 440 uint64_t ea = get_mce_fault_addr(evt);
441 441
442 if (!(regs->msr & MSR_RI)) { 442 if (!(regs->msr & MSR_RI)) {
443 /* If MSR_RI isn't set, we cannot recover */ 443 /* If MSR_RI isn't set, we cannot recover */
444 recovered = 0; 444 recovered = 0;
445 } else if (evt->disposition == MCE_DISPOSITION_RECOVERED) { 445 } else if (evt->disposition == MCE_DISPOSITION_RECOVERED) {
446 /* Platform corrected itself */ 446 /* Platform corrected itself */
447 recovered = 1; 447 recovered = 1;
448 } else if (ea && !is_kernel_addr(ea)) { 448 } else if (ea && !is_kernel_addr(ea)) {
449 /* 449 /*
450 * Faulting address is not in kernel text. We should be fine. 450 * Faulting address is not in kernel text. We should be fine.
451 * We need to find which process uses this address. 451 * We need to find which process uses this address.
452 * For now, kill the task if we have received exception when 452 * For now, kill the task if we have received exception when
453 * in userspace. 453 * in userspace.
454 * 454 *
455 * TODO: Queue up this address for hwpoisioning later. 455 * TODO: Queue up this address for hwpoisioning later.
456 */ 456 */
457 if (user_mode(regs) && !is_global_init(current)) { 457 if (user_mode(regs) && !is_global_init(current)) {
458 _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip); 458 _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
459 recovered = 1; 459 recovered = 1;
460 } else 460 } else
461 recovered = 0; 461 recovered = 0;
462 } else if (user_mode(regs) && !is_global_init(current) && 462 } else if (user_mode(regs) && !is_global_init(current) &&
463 evt->severity == MCE_SEV_ERROR_SYNC) { 463 evt->severity == MCE_SEV_ERROR_SYNC) {
464 /* 464 /*
465 * If we have received a synchronous error when in userspace 465 * If we have received a synchronous error when in userspace
466 * kill the task. 466 * kill the task.
467 */ 467 */
468 _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip); 468 _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
469 recovered = 1; 469 recovered = 1;
470 } 470 }
471 return recovered; 471 return recovered;
472 } 472 }
473 473
474 int opal_machine_check(struct pt_regs *regs) 474 int opal_machine_check(struct pt_regs *regs)
475 { 475 {
476 struct machine_check_event evt; 476 struct machine_check_event evt;
477 477
478 if (!get_mce_event(&evt, MCE_EVENT_RELEASE)) 478 if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
479 return 0; 479 return 0;
480 480
481 /* Print things out */ 481 /* Print things out */
482 if (evt.version != MCE_V1) { 482 if (evt.version != MCE_V1) {
483 pr_err("Machine Check Exception, Unknown event version %d !\n", 483 pr_err("Machine Check Exception, Unknown event version %d !\n",
484 evt.version); 484 evt.version);
485 return 0; 485 return 0;
486 } 486 }
487 machine_check_print_event_info(&evt); 487 machine_check_print_event_info(&evt);
488 488
489 if (opal_recover_mce(regs, &evt)) 489 if (opal_recover_mce(regs, &evt))
490 return 1; 490 return 1;
491 return 0; 491 return 0;
492 } 492 }
493 493
494 static uint64_t find_recovery_address(uint64_t nip) 494 static uint64_t find_recovery_address(uint64_t nip)
495 { 495 {
496 int i; 496 int i;
497 497
498 for (i = 0; i < mc_recoverable_range_len; i++) 498 for (i = 0; i < mc_recoverable_range_len; i++)
499 if ((nip >= mc_recoverable_range[i].start_addr) && 499 if ((nip >= mc_recoverable_range[i].start_addr) &&
500 (nip < mc_recoverable_range[i].end_addr)) 500 (nip < mc_recoverable_range[i].end_addr))
501 return mc_recoverable_range[i].recover_addr; 501 return mc_recoverable_range[i].recover_addr;
502 return 0; 502 return 0;
503 } 503 }
504 504
505 bool opal_mce_check_early_recovery(struct pt_regs *regs) 505 bool opal_mce_check_early_recovery(struct pt_regs *regs)
506 { 506 {
507 uint64_t recover_addr = 0; 507 uint64_t recover_addr = 0;
508 508
509 if (!opal.base || !opal.size) 509 if (!opal.base || !opal.size)
510 goto out; 510 goto out;
511 511
512 if ((regs->nip >= opal.base) && 512 if ((regs->nip >= opal.base) &&
513 (regs->nip <= (opal.base + opal.size))) 513 (regs->nip <= (opal.base + opal.size)))
514 recover_addr = find_recovery_address(regs->nip); 514 recover_addr = find_recovery_address(regs->nip);
515 515
516 /* 516 /*
517 * Setup regs->nip to rfi into fixup address. 517 * Setup regs->nip to rfi into fixup address.
518 */ 518 */
519 if (recover_addr) 519 if (recover_addr)
520 regs->nip = recover_addr; 520 regs->nip = recover_addr;
521 521
522 out: 522 out:
523 return !!recover_addr; 523 return !!recover_addr;
524 } 524 }
525 525
526 static irqreturn_t opal_interrupt(int irq, void *data) 526 static irqreturn_t opal_interrupt(int irq, void *data)
527 { 527 {
528 __be64 events; 528 __be64 events;
529 529
530 opal_handle_interrupt(virq_to_hw(irq), &events); 530 opal_handle_interrupt(virq_to_hw(irq), &events);
531 531
532 opal_do_notifier(be64_to_cpu(events)); 532 opal_do_notifier(be64_to_cpu(events));
533 533
534 return IRQ_HANDLED; 534 return IRQ_HANDLED;
535 } 535 }
536 536
537 static int opal_sysfs_init(void) 537 static int opal_sysfs_init(void)
538 { 538 {
539 opal_kobj = kobject_create_and_add("opal", firmware_kobj); 539 opal_kobj = kobject_create_and_add("opal", firmware_kobj);
540 if (!opal_kobj) { 540 if (!opal_kobj) {
541 pr_warn("kobject_create_and_add opal failed\n"); 541 pr_warn("kobject_create_and_add opal failed\n");
542 return -ENOMEM; 542 return -ENOMEM;
543 } 543 }
544 544
545 return 0; 545 return 0;
546 } 546 }
547 547
548 static int __init opal_init(void) 548 static int __init opal_init(void)
549 { 549 {
550 struct device_node *np, *consoles; 550 struct device_node *np, *consoles;
551 const __be32 *irqs; 551 const __be32 *irqs;
552 int rc, i, irqlen; 552 int rc, i, irqlen;
553 553
554 opal_node = of_find_node_by_path("/ibm,opal"); 554 opal_node = of_find_node_by_path("/ibm,opal");
555 if (!opal_node) { 555 if (!opal_node) {
556 pr_warn("opal: Node not found\n"); 556 pr_warn("opal: Node not found\n");
557 return -ENODEV; 557 return -ENODEV;
558 } 558 }
559 559
560 /* Register OPAL consoles if any ports */ 560 /* Register OPAL consoles if any ports */
561 if (firmware_has_feature(FW_FEATURE_OPALv2)) 561 if (firmware_has_feature(FW_FEATURE_OPALv2))
562 consoles = of_find_node_by_path("/ibm,opal/consoles"); 562 consoles = of_find_node_by_path("/ibm,opal/consoles");
563 else 563 else
564 consoles = of_node_get(opal_node); 564 consoles = of_node_get(opal_node);
565 if (consoles) { 565 if (consoles) {
566 for_each_child_of_node(consoles, np) { 566 for_each_child_of_node(consoles, np) {
567 if (strcmp(np->name, "serial")) 567 if (strcmp(np->name, "serial"))
568 continue; 568 continue;
569 of_platform_device_create(np, NULL, NULL); 569 of_platform_device_create(np, NULL, NULL);
570 } 570 }
571 of_node_put(consoles); 571 of_node_put(consoles);
572 } 572 }
573 573
574 /* Find all OPAL interrupts and request them */ 574 /* Find all OPAL interrupts and request them */
575 irqs = of_get_property(opal_node, "opal-interrupts", &irqlen); 575 irqs = of_get_property(opal_node, "opal-interrupts", &irqlen);
576 pr_debug("opal: Found %d interrupts reserved for OPAL\n", 576 pr_debug("opal: Found %d interrupts reserved for OPAL\n",
577 irqs ? (irqlen / 4) : 0); 577 irqs ? (irqlen / 4) : 0);
578 opal_irq_count = irqlen / 4; 578 opal_irq_count = irqlen / 4;
579 opal_irqs = kzalloc(opal_irq_count * sizeof(unsigned int), GFP_KERNEL); 579 opal_irqs = kzalloc(opal_irq_count * sizeof(unsigned int), GFP_KERNEL);
580 for (i = 0; irqs && i < (irqlen / 4); i++, irqs++) { 580 for (i = 0; irqs && i < (irqlen / 4); i++, irqs++) {
581 unsigned int hwirq = be32_to_cpup(irqs); 581 unsigned int hwirq = be32_to_cpup(irqs);
582 unsigned int irq = irq_create_mapping(NULL, hwirq); 582 unsigned int irq = irq_create_mapping(NULL, hwirq);
583 if (irq == NO_IRQ) { 583 if (irq == NO_IRQ) {
584 pr_warning("opal: Failed to map irq 0x%x\n", hwirq); 584 pr_warning("opal: Failed to map irq 0x%x\n", hwirq);
585 continue; 585 continue;
586 } 586 }
587 rc = request_irq(irq, opal_interrupt, 0, "opal", NULL); 587 rc = request_irq(irq, opal_interrupt, 0, "opal", NULL);
588 if (rc) 588 if (rc)
589 pr_warning("opal: Error %d requesting irq %d" 589 pr_warning("opal: Error %d requesting irq %d"
590 " (0x%x)\n", rc, irq, hwirq); 590 " (0x%x)\n", rc, irq, hwirq);
591 opal_irqs[i] = irq; 591 opal_irqs[i] = irq;
592 } 592 }
593 593
594 /* Create "opal" kobject under /sys/firmware */ 594 /* Create "opal" kobject under /sys/firmware */
595 rc = opal_sysfs_init(); 595 rc = opal_sysfs_init();
596 if (rc == 0) { 596 if (rc == 0) {
597 /* Setup error log interface */ 597 /* Setup error log interface */
598 rc = opal_elog_init(); 598 rc = opal_elog_init();
599 /* Setup code update interface */ 599 /* Setup code update interface */
600 opal_flash_init(); 600 opal_flash_init();
601 /* Setup platform dump extract interface */ 601 /* Setup platform dump extract interface */
602 opal_platform_dump_init(); 602 opal_platform_dump_init();
603 /* Setup system parameters interface */ 603 /* Setup system parameters interface */
604 opal_sys_param_init(); 604 opal_sys_param_init();
605 /* Setup message log interface. */ 605 /* Setup message log interface. */
606 opal_msglog_init(); 606 opal_msglog_init();
607 } 607 }
608 608
609 return 0; 609 return 0;
610 } 610 }
611 subsys_initcall(opal_init); 611 subsys_initcall(opal_init);
612 612
613 void opal_shutdown(void) 613 void opal_shutdown(void)
614 { 614 {
615 unsigned int i; 615 unsigned int i;
616 long rc = OPAL_BUSY; 616 long rc = OPAL_BUSY;
617 617
618 /* First free interrupts, which will also mask them */ 618 /* First free interrupts, which will also mask them */
619 for (i = 0; i < opal_irq_count; i++) { 619 for (i = 0; i < opal_irq_count; i++) {
620 if (opal_irqs[i]) 620 if (opal_irqs[i])
621 free_irq(opal_irqs[i], NULL); 621 free_irq(opal_irqs[i], NULL);
622 opal_irqs[i] = 0; 622 opal_irqs[i] = 0;
623 } 623 }
624 624
625 /* 625 /*
626 * Then sync with OPAL which ensure anything that can 626 * Then sync with OPAL which ensure anything that can
627 * potentially write to our memory has completed such 627 * potentially write to our memory has completed such
628 * as an ongoing dump retrieval 628 * as an ongoing dump retrieval
629 */ 629 */
630 while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { 630 while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
631 rc = opal_sync_host_reboot(); 631 rc = opal_sync_host_reboot();
632 if (rc == OPAL_BUSY) 632 if (rc == OPAL_BUSY)
633 opal_poll_events(NULL); 633 opal_poll_events(NULL);
634 else 634 else
635 mdelay(10); 635 mdelay(10);
636 } 636 }
637 } 637 }
638 638
639 /* Export this so that test modules can use it */ 639 /* Export this so that test modules can use it */
640 EXPORT_SYMBOL_GPL(opal_invalid_call); 640 EXPORT_SYMBOL_GPL(opal_invalid_call);
641
642 /* Convert a region of vmalloc memory to an opal sg list */
643 struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
644 unsigned long vmalloc_size)
645 {
646 struct opal_sg_list *sg, *first = NULL;
647 unsigned long i = 0;
648
649 sg = kzalloc(PAGE_SIZE, GFP_KERNEL);
650 if (!sg)
651 goto nomem;
652
653 first = sg;
654
655 while (vmalloc_size > 0) {
656 uint64_t data = vmalloc_to_pfn(vmalloc_addr) << PAGE_SHIFT;
657 uint64_t length = min(vmalloc_size, PAGE_SIZE);
658
659 sg->entry[i].data = cpu_to_be64(data);
660 sg->entry[i].length = cpu_to_be64(length);
661 i++;
662
663 if (i >= SG_ENTRIES_PER_NODE) {
664 struct opal_sg_list *next;
665
666 next = kzalloc(PAGE_SIZE, GFP_KERNEL);
667 if (!next)
668 goto nomem;
669
670 sg->length = cpu_to_be64(
671 i * sizeof(struct opal_sg_entry) + 16);
672 i = 0;
673 sg->next = cpu_to_be64(__pa(next));
674 sg = next;
675 }
676
677 vmalloc_addr += length;
678 vmalloc_size -= length;
679 }
680
681 sg->length = cpu_to_be64(i * sizeof(struct opal_sg_entry) + 16);
682
683 return first;
684
685 nomem:
686 pr_err("%s : Failed to allocate memory\n", __func__);
687 opal_free_sg_list(first);
688 return NULL;
689 }
690
691 void opal_free_sg_list(struct opal_sg_list *sg)
692 {
693 while (sg) {
694 uint64_t next = be64_to_cpu(sg->next);
695
696 kfree(sg);
697
698 if (next)
699 sg = __va(next);
700 else
701 sg = NULL;
702 }
703 }
641 704