Commit 838b4c02ada1b82600ce600fc3f202043898f463

Authored by Linus Torvalds

Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc

Pull powerpc fixes from Ben Herrenschmidt:
 "Here is a bunch of post-merge window fixes that have been accumulating
  in patchwork while I was on vacation or buried under other stuff last
  week.

  We have the now usual batch of LE fixes from Anton (sadly some new
  stuff that went into this merge window had endian issues, we'll try to
  make sure we do better next time)

  Some fixes and cleanups to the new 24x7 performance monitoring stuff
  (mostly typos and cleaning up printk's)

  A series of fixes for an issue with our runlatch bit, which wasn't set
  properly for offlined threads/cores and under KVM, causing potentially
  some counters to misbehave along with possible power management
  issues.

  A fix for kexec nasty race where the new kernel wouldn't "see" the
  secondary processors having reached back into firmware in time.

  And finally a few other misc (and pretty simple) bug fixes"

* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (33 commits)
  powerpc/4xx: Fix section mismatch in ppc4xx_pci.c
  ppc/kvm: Clear the runlatch bit of a vcpu before napping
  ppc/kvm: Set the runlatch bit of a CPU just before starting guest
  ppc/powernv: Set the runlatch bits correctly for offline cpus
  powerpc/pseries: Protect remove_memory() with device hotplug lock
  powerpc: Fix error return in rtas_flash module init
  powerpc: Bump BOOT_COMMAND_LINE_SIZE to 2048
  powerpc: Bump COMMAND_LINE_SIZE to 2048
  powerpc: Rename duplicate COMMAND_LINE_SIZE define
  powerpc/perf/hv-24x7: Catalog version number is be64, not be32
  powerpc/perf/hv-24x7: Remove [static 4096], sparse chokes on it
  powerpc/perf/hv-24x7: Use (unsigned long) not (u32) values when calling plpar_hcall_norets()
  powerpc/perf/hv-gpci: Make device attr static
  powerpc/perf/hv_gpci: Probe failures use pr_debug(), and padding reduced
  powerpc/perf/hv_24x7: Probe errors changed to pr_debug(), padding fixed
  powerpc/mm: Fix tlbie to add AVAL fields for 64K pages
  powerpc/powernv: Fix little endian issues in OPAL dump code
  powerpc/powernv: Create OPAL sglist helper functions and fix endian issues
  powerpc/powernv: Fix little endian issues in OPAL error log code
  powerpc/powernv: Fix little endian issues with opal_do_notifier calls
  ...

Showing 22 changed files Side-by-side Diff

arch/powerpc/boot/main.c
... ... @@ -139,18 +139,18 @@
139 139 * edit the command line passed to vmlinux (by setting /chosen/bootargs).
140 140 * The buffer is put in it's own section so that tools may locate it easier.
141 141 */
142   -static char cmdline[COMMAND_LINE_SIZE]
  142 +static char cmdline[BOOT_COMMAND_LINE_SIZE]
143 143 __attribute__((__section__("__builtin_cmdline")));
144 144  
145 145 static void prep_cmdline(void *chosen)
146 146 {
147 147 if (cmdline[0] == '\0')
148   - getprop(chosen, "bootargs", cmdline, COMMAND_LINE_SIZE-1);
  148 + getprop(chosen, "bootargs", cmdline, BOOT_COMMAND_LINE_SIZE-1);
149 149  
150 150 printf("\n\rLinux/PowerPC load: %s", cmdline);
151 151 /* If possible, edit the command line */
152 152 if (console_ops.edit_cmdline)
153   - console_ops.edit_cmdline(cmdline, COMMAND_LINE_SIZE);
  153 + console_ops.edit_cmdline(cmdline, BOOT_COMMAND_LINE_SIZE);
154 154 printf("\n\r");
155 155  
156 156 /* Put the command line back into the devtree for the kernel */
... ... @@ -174,7 +174,7 @@
174 174 * built-in command line wasn't set by an external tool */
175 175 if ((loader_info.cmdline_len > 0) && (cmdline[0] == '\0'))
176 176 memmove(cmdline, loader_info.cmdline,
177   - min(loader_info.cmdline_len, COMMAND_LINE_SIZE-1));
  177 + min(loader_info.cmdline_len, BOOT_COMMAND_LINE_SIZE-1));
178 178  
179 179 if (console_ops.open && (console_ops.open() < 0))
180 180 exit();
arch/powerpc/boot/ops.h
... ... @@ -15,7 +15,7 @@
15 15 #include "types.h"
16 16 #include "string.h"
17 17  
18   -#define COMMAND_LINE_SIZE 512
  18 +#define BOOT_COMMAND_LINE_SIZE 2048
19 19 #define MAX_PATH_LEN 256
20 20 #define MAX_PROP_LEN 256 /* What should this be? */
21 21  
arch/powerpc/boot/ps3.c
... ... @@ -47,13 +47,13 @@
47 47 * The buffer is put in it's own section so that tools may locate it easier.
48 48 */
49 49  
50   -static char cmdline[COMMAND_LINE_SIZE]
  50 +static char cmdline[BOOT_COMMAND_LINE_SIZE]
51 51 __attribute__((__section__("__builtin_cmdline")));
52 52  
53 53 static void prep_cmdline(void *chosen)
54 54 {
55 55 if (cmdline[0] == '\0')
56   - getprop(chosen, "bootargs", cmdline, COMMAND_LINE_SIZE-1);
  56 + getprop(chosen, "bootargs", cmdline, BOOT_COMMAND_LINE_SIZE-1);
57 57 else
58 58 setprop_str(chosen, "bootargs", cmdline);
59 59  
arch/powerpc/include/asm/opal.h
... ... @@ -41,14 +41,14 @@
41 41 * size except the last one in the list to be as well.
42 42 */
43 43 struct opal_sg_entry {
44   - void *data;
45   - long length;
  44 + __be64 data;
  45 + __be64 length;
46 46 };
47 47  
48   -/* sg list */
  48 +/* SG list */
49 49 struct opal_sg_list {
50   - unsigned long num_entries;
51   - struct opal_sg_list *next;
  50 + __be64 length;
  51 + __be64 next;
52 52 struct opal_sg_entry entry[];
53 53 };
54 54  
... ... @@ -858,8 +858,8 @@
858 858 int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type,
859 859 uint32_t addr, __be32 *data, uint32_t sz);
860 860  
861   -int64_t opal_read_elog(uint64_t buffer, size_t size, uint64_t log_id);
862   -int64_t opal_get_elog_size(uint64_t *log_id, size_t *size, uint64_t *elog_type);
  861 +int64_t opal_read_elog(uint64_t buffer, uint64_t size, uint64_t log_id);
  862 +int64_t opal_get_elog_size(__be64 *log_id, __be64 *size, __be64 *elog_type);
863 863 int64_t opal_write_elog(uint64_t buffer, uint64_t size, uint64_t offset);
864 864 int64_t opal_send_ack_elog(uint64_t log_id);
865 865 void opal_resend_pending_logs(void);
866 866  
867 867  
868 868  
869 869  
... ... @@ -868,23 +868,24 @@
868 868 int64_t opal_manage_flash(uint8_t op);
869 869 int64_t opal_update_flash(uint64_t blk_list);
870 870 int64_t opal_dump_init(uint8_t dump_type);
871   -int64_t opal_dump_info(uint32_t *dump_id, uint32_t *dump_size);
872   -int64_t opal_dump_info2(uint32_t *dump_id, uint32_t *dump_size, uint32_t *dump_type);
  871 +int64_t opal_dump_info(__be32 *dump_id, __be32 *dump_size);
  872 +int64_t opal_dump_info2(__be32 *dump_id, __be32 *dump_size, __be32 *dump_type);
873 873 int64_t opal_dump_read(uint32_t dump_id, uint64_t buffer);
874 874 int64_t opal_dump_ack(uint32_t dump_id);
875 875 int64_t opal_dump_resend_notification(void);
876 876  
877   -int64_t opal_get_msg(uint64_t buffer, size_t size);
878   -int64_t opal_check_completion(uint64_t buffer, size_t size, uint64_t token);
  877 +int64_t opal_get_msg(uint64_t buffer, uint64_t size);
  878 +int64_t opal_check_completion(uint64_t buffer, uint64_t size, uint64_t token);
879 879 int64_t opal_sync_host_reboot(void);
880 880 int64_t opal_get_param(uint64_t token, uint32_t param_id, uint64_t buffer,
881   - size_t length);
  881 + uint64_t length);
882 882 int64_t opal_set_param(uint64_t token, uint32_t param_id, uint64_t buffer,
883   - size_t length);
  883 + uint64_t length);
884 884 int64_t opal_sensor_read(uint32_t sensor_hndl, int token, __be32 *sensor_data);
885 885  
886 886 /* Internal functions */
887   -extern int early_init_dt_scan_opal(unsigned long node, const char *uname, int depth, void *data);
  887 +extern int early_init_dt_scan_opal(unsigned long node, const char *uname,
  888 + int depth, void *data);
888 889 extern int early_init_dt_scan_recoverable_ranges(unsigned long node,
889 890 const char *uname, int depth, void *data);
890 891  
... ... @@ -893,10 +894,6 @@
893 894  
894 895 extern void hvc_opal_init_early(void);
895 896  
896   -/* Internal functions */
897   -extern int early_init_dt_scan_opal(unsigned long node, const char *uname,
898   - int depth, void *data);
899   -
900 897 extern int opal_notifier_register(struct notifier_block *nb);
901 898 extern int opal_notifier_unregister(struct notifier_block *nb);
902 899  
... ... @@ -906,9 +903,6 @@
906 903 extern void opal_notifier_disable(void);
907 904 extern void opal_notifier_update_evt(uint64_t evt_mask, uint64_t evt_val);
908 905  
909   -extern int opal_get_chars(uint32_t vtermno, char *buf, int count);
910   -extern int opal_put_chars(uint32_t vtermno, const char *buf, int total_len);
911   -
912 906 extern int __opal_async_get_token(void);
913 907 extern int opal_async_get_token_interruptible(void);
914 908 extern int __opal_async_release_token(int token);
... ... @@ -916,8 +910,6 @@
916 910 extern int opal_async_wait_response(uint64_t token, struct opal_msg *msg);
917 911 extern int opal_get_sensor_data(u32 sensor_hndl, u32 *sensor_data);
918 912  
919   -extern void hvc_opal_init_early(void);
920   -
921 913 struct rtc_time;
922 914 extern int opal_set_rtc_time(struct rtc_time *tm);
923 915 extern void opal_get_rtc_time(struct rtc_time *tm);
... ... @@ -936,6 +928,10 @@
936 928 extern int opal_resync_timebase(void);
937 929  
938 930 extern void opal_lpc_init(void);
  931 +
  932 +struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
  933 + unsigned long vmalloc_size);
  934 +void opal_free_sg_list(struct opal_sg_list *sg);
939 935  
940 936 #endif /* __ASSEMBLY__ */
941 937  
arch/powerpc/include/uapi/asm/setup.h
1   -#include <asm-generic/setup.h>
  1 +#ifndef _UAPI_ASM_POWERPC_SETUP_H
  2 +#define _UAPI_ASM_POWERPC_SETUP_H
  3 +
  4 +#define COMMAND_LINE_SIZE 2048
  5 +
  6 +#endif /* _UAPI_ASM_POWERPC_SETUP_H */
arch/powerpc/kernel/ppc_ksyms.c
... ... @@ -120,6 +120,7 @@
120 120 EXPORT_SYMBOL(flush_instruction_cache);
121 121 #endif
122 122 EXPORT_SYMBOL(flush_dcache_range);
  123 +EXPORT_SYMBOL(flush_icache_range);
123 124  
124 125 #ifdef CONFIG_SMP
125 126 #ifdef CONFIG_PPC32
arch/powerpc/kernel/rtas_flash.c
... ... @@ -705,7 +705,7 @@
705 705 if (rtas_token("ibm,update-flash-64-and-reboot") ==
706 706 RTAS_UNKNOWN_SERVICE) {
707 707 pr_info("rtas_flash: no firmware flash support\n");
708   - return 1;
  708 + return -EINVAL;
709 709 }
710 710  
711 711 rtas_validate_flash_data.buf = kzalloc(VALIDATE_BUF_SIZE, GFP_KERNEL);
arch/powerpc/kvm/book3s_hv_rmhandlers.S
... ... @@ -242,6 +242,12 @@
242 242 */
243 243 .globl kvm_start_guest
244 244 kvm_start_guest:
  245 +
  246 + /* Set runlatch bit the minute you wake up from nap */
  247 + mfspr r1, SPRN_CTRLF
  248 + ori r1, r1, 1
  249 + mtspr SPRN_CTRLT, r1
  250 +
245 251 ld r2,PACATOC(r13)
246 252  
247 253 li r0,KVM_HWTHREAD_IN_KVM
... ... @@ -309,6 +315,11 @@
309 315 li r0, KVM_HWTHREAD_IN_NAP
310 316 stb r0, HSTATE_HWTHREAD_STATE(r13)
311 317 kvm_do_nap:
  318 + /* Clear the runlatch bit before napping */
  319 + mfspr r2, SPRN_CTRLF
  320 + clrrdi r2, r2, 1
  321 + mtspr SPRN_CTRLT, r2
  322 +
312 323 li r3, LPCR_PECE0
313 324 mfspr r4, SPRN_LPCR
314 325 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
315 326  
... ... @@ -1999,8 +2010,13 @@
1999 2010  
2000 2011 /*
2001 2012 * Take a nap until a decrementer or external or doobell interrupt
2002   - * occurs, with PECE1, PECE0 and PECEDP set in LPCR
  2013 + * occurs, with PECE1, PECE0 and PECEDP set in LPCR. Also clear the
  2014 + * runlatch bit before napping.
2003 2015 */
  2016 + mfspr r2, SPRN_CTRLF
  2017 + clrrdi r2, r2, 1
  2018 + mtspr SPRN_CTRLT, r2
  2019 +
2004 2020 li r0,1
2005 2021 stb r0,HSTATE_HWTHREAD_REQ(r13)
2006 2022 mfspr r5,SPRN_LPCR
arch/powerpc/mm/hash_native_64.c
... ... @@ -82,17 +82,14 @@
82 82 va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
83 83 va |= penc << 12;
84 84 va |= ssize << 8;
85   - /* Add AVAL part */
86   - if (psize != apsize) {
87   - /*
88   - * MPSS, 64K base page size and 16MB parge page size
89   - * We don't need all the bits, but rest of the bits
90   - * must be ignored by the processor.
91   - * vpn cover upto 65 bits of va. (0...65) and we need
92   - * 58..64 bits of va.
93   - */
94   - va |= (vpn & 0xfe);
95   - }
  85 + /*
  86 + * AVAL bits:
  87 + * We don't need all the bits, but rest of the bits
  88 + * must be ignored by the processor.
  89 + * vpn cover upto 65 bits of va. (0...65) and we need
  90 + * 58..64 bits of va.
  91 + */
  92 + va |= (vpn & 0xfe); /* AVAL */
96 93 va |= 1; /* L */
97 94 asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
98 95 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
... ... @@ -133,17 +130,14 @@
133 130 va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
134 131 va |= penc << 12;
135 132 va |= ssize << 8;
136   - /* Add AVAL part */
137   - if (psize != apsize) {
138   - /*
139   - * MPSS, 64K base page size and 16MB parge page size
140   - * We don't need all the bits, but rest of the bits
141   - * must be ignored by the processor.
142   - * vpn cover upto 65 bits of va. (0...65) and we need
143   - * 58..64 bits of va.
144   - */
145   - va |= (vpn & 0xfe);
146   - }
  133 + /*
  134 + * AVAL bits:
  135 + * We don't need all the bits, but rest of the bits
  136 + * must be ignored by the processor.
  137 + * vpn cover upto 65 bits of va. (0...65) and we need
  138 + * 58..64 bits of va.
  139 + */
  140 + va |= (vpn & 0xfe);
147 141 va |= 1; /* L */
148 142 asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
149 143 : : "r"(va) : "memory");
arch/powerpc/perf/hv-24x7.c
... ... @@ -155,16 +155,28 @@
155 155 return copy_len;
156 156 }
157 157  
158   -static unsigned long h_get_24x7_catalog_page(char page[static 4096],
159   - u32 version, u32 index)
  158 +static unsigned long h_get_24x7_catalog_page_(unsigned long phys_4096,
  159 + unsigned long version,
  160 + unsigned long index)
160 161 {
161   - WARN_ON(!IS_ALIGNED((unsigned long)page, 4096));
  162 + pr_devel("h_get_24x7_catalog_page(0x%lx, %lu, %lu)",
  163 + phys_4096,
  164 + version,
  165 + index);
  166 + WARN_ON(!IS_ALIGNED(phys_4096, 4096));
162 167 return plpar_hcall_norets(H_GET_24X7_CATALOG_PAGE,
163   - virt_to_phys(page),
  168 + phys_4096,
164 169 version,
165 170 index);
166 171 }
167 172  
  173 +static unsigned long h_get_24x7_catalog_page(char page[],
  174 + u64 version, u32 index)
  175 +{
  176 + return h_get_24x7_catalog_page_(virt_to_phys(page),
  177 + version, index);
  178 +}
  179 +
168 180 static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
169 181 struct bin_attribute *bin_attr, char *buf,
170 182 loff_t offset, size_t count)
... ... @@ -173,7 +185,7 @@
173 185 ssize_t ret = 0;
174 186 size_t catalog_len = 0, catalog_page_len = 0, page_count = 0;
175 187 loff_t page_offset = 0;
176   - uint32_t catalog_version_num = 0;
  188 + uint64_t catalog_version_num = 0;
177 189 void *page = kmem_cache_alloc(hv_page_cache, GFP_USER);
178 190 struct hv_24x7_catalog_page_0 *page_0 = page;
179 191 if (!page)
... ... @@ -185,7 +197,7 @@
185 197 goto e_free;
186 198 }
187 199  
188   - catalog_version_num = be32_to_cpu(page_0->version);
  200 + catalog_version_num = be64_to_cpu(page_0->version);
189 201 catalog_page_len = be32_to_cpu(page_0->length);
190 202 catalog_len = catalog_page_len * 4096;
191 203  
... ... @@ -208,8 +220,9 @@
208 220 page, 4096, page_offset * 4096);
209 221 e_free:
210 222 if (hret)
211   - pr_err("h_get_24x7_catalog_page(ver=%d, page=%lld) failed: rc=%ld\n",
212   - catalog_version_num, page_offset, hret);
  223 + pr_err("h_get_24x7_catalog_page(ver=%lld, page=%lld) failed:"
  224 + " rc=%ld\n",
  225 + catalog_version_num, page_offset, hret);
213 226 kfree(page);
214 227  
215 228 pr_devel("catalog_read: offset=%lld(%lld) count=%zu(%zu) catalog_len=%zu(%zu) => %zd\n",
... ... @@ -243,7 +256,7 @@
243 256 static DEVICE_ATTR_RO(_name)
244 257  
245 258 PAGE_0_ATTR(catalog_version, "%lld\n",
246   - (unsigned long long)be32_to_cpu(page_0->version));
  259 + (unsigned long long)be64_to_cpu(page_0->version));
247 260 PAGE_0_ATTR(catalog_len, "%lld\n",
248 261 (unsigned long long)be32_to_cpu(page_0->length) * 4096);
249 262 static BIN_ATTR_RO(catalog, 0/* real length varies */);
250 263  
... ... @@ -485,13 +498,13 @@
485 498 struct hv_perf_caps caps;
486 499  
487 500 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
488   - pr_info("not a virtualized system, not enabling\n");
  501 + pr_debug("not a virtualized system, not enabling\n");
489 502 return -ENODEV;
490 503 }
491 504  
492 505 hret = hv_perf_caps_get(&caps);
493 506 if (hret) {
494   - pr_info("could not obtain capabilities, error 0x%80lx, not enabling\n",
  507 + pr_debug("could not obtain capabilities, not enabling, rc=%ld\n",
495 508 hret);
496 509 return -ENODEV;
497 510 }
arch/powerpc/perf/hv-gpci.c
... ... @@ -78,7 +78,7 @@
78 78 return sprintf(page, "0x%x\n", COUNTER_INFO_VERSION_CURRENT);
79 79 }
80 80  
81   -DEVICE_ATTR_RO(kernel_version);
  81 +static DEVICE_ATTR_RO(kernel_version);
82 82 HV_CAPS_ATTR(version, "0x%x\n");
83 83 HV_CAPS_ATTR(ga, "%d\n");
84 84 HV_CAPS_ATTR(expanded, "%d\n");
85 85  
... ... @@ -273,13 +273,13 @@
273 273 struct hv_perf_caps caps;
274 274  
275 275 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
276   - pr_info("not a virtualized system, not enabling\n");
  276 + pr_debug("not a virtualized system, not enabling\n");
277 277 return -ENODEV;
278 278 }
279 279  
280 280 hret = hv_perf_caps_get(&caps);
281 281 if (hret) {
282   - pr_info("could not obtain capabilities, error 0x%80lx, not enabling\n",
  282 + pr_debug("could not obtain capabilities, not enabling, rc=%ld\n",
283 283 hret);
284 284 return -ENODEV;
285 285 }
arch/powerpc/platforms/powernv/opal-dump.c
... ... @@ -209,90 +209,21 @@
209 209 .default_attrs = dump_default_attrs,
210 210 };
211 211  
212   -static void free_dump_sg_list(struct opal_sg_list *list)
  212 +static int64_t dump_read_info(uint32_t *dump_id, uint32_t *dump_size, uint32_t *dump_type)
213 213 {
214   - struct opal_sg_list *sg1;
215   - while (list) {
216   - sg1 = list->next;
217   - kfree(list);
218   - list = sg1;
219   - }
220   - list = NULL;
221   -}
222   -
223   -static struct opal_sg_list *dump_data_to_sglist(struct dump_obj *dump)
224   -{
225   - struct opal_sg_list *sg1, *list = NULL;
226   - void *addr;
227   - int64_t size;
228   -
229   - addr = dump->buffer;
230   - size = dump->size;
231   -
232   - sg1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
233   - if (!sg1)
234   - goto nomem;
235   -
236   - list = sg1;
237   - sg1->num_entries = 0;
238   - while (size > 0) {
239   - /* Translate virtual address to physical address */
240   - sg1->entry[sg1->num_entries].data =
241   - (void *)(vmalloc_to_pfn(addr) << PAGE_SHIFT);
242   -
243   - if (size > PAGE_SIZE)
244   - sg1->entry[sg1->num_entries].length = PAGE_SIZE;
245   - else
246   - sg1->entry[sg1->num_entries].length = size;
247   -
248   - sg1->num_entries++;
249   - if (sg1->num_entries >= SG_ENTRIES_PER_NODE) {
250   - sg1->next = kzalloc(PAGE_SIZE, GFP_KERNEL);
251   - if (!sg1->next)
252   - goto nomem;
253   -
254   - sg1 = sg1->next;
255   - sg1->num_entries = 0;
256   - }
257   - addr += PAGE_SIZE;
258   - size -= PAGE_SIZE;
259   - }
260   - return list;
261   -
262   -nomem:
263   - pr_err("%s : Failed to allocate memory\n", __func__);
264   - free_dump_sg_list(list);
265   - return NULL;
266   -}
267   -
268   -static void sglist_to_phy_addr(struct opal_sg_list *list)
269   -{
270   - struct opal_sg_list *sg, *next;
271   -
272   - for (sg = list; sg; sg = next) {
273   - next = sg->next;
274   - /* Don't translate NULL pointer for last entry */
275   - if (sg->next)
276   - sg->next = (struct opal_sg_list *)__pa(sg->next);
277   - else
278   - sg->next = NULL;
279   -
280   - /* Convert num_entries to length */
281   - sg->num_entries =
282   - sg->num_entries * sizeof(struct opal_sg_entry) + 16;
283   - }
284   -}
285   -
286   -static int64_t dump_read_info(uint32_t *id, uint32_t *size, uint32_t *type)
287   -{
  214 + __be32 id, size, type;
288 215 int rc;
289   - *type = 0xffffffff;
290 216  
291   - rc = opal_dump_info2(id, size, type);
  217 + type = cpu_to_be32(0xffffffff);
292 218  
  219 + rc = opal_dump_info2(&id, &size, &type);
293 220 if (rc == OPAL_PARAMETER)
294   - rc = opal_dump_info(id, size);
  221 + rc = opal_dump_info(&id, &size);
295 222  
  223 + *dump_id = be32_to_cpu(id);
  224 + *dump_size = be32_to_cpu(size);
  225 + *dump_type = be32_to_cpu(type);
  226 +
296 227 if (rc)
297 228 pr_warn("%s: Failed to get dump info (%d)\n",
298 229 __func__, rc);
299 230  
... ... @@ -314,15 +245,12 @@
314 245 }
315 246  
316 247 /* Generate SG list */
317   - list = dump_data_to_sglist(dump);
  248 + list = opal_vmalloc_to_sg_list(dump->buffer, dump->size);
318 249 if (!list) {
319 250 rc = -ENOMEM;
320 251 goto out;
321 252 }
322 253  
323   - /* Translate sg list addr to real address */
324   - sglist_to_phy_addr(list);
325   -
326 254 /* First entry address */
327 255 addr = __pa(list);
328 256  
... ... @@ -341,7 +269,7 @@
341 269 __func__, dump->id);
342 270  
343 271 /* Free SG list */
344   - free_dump_sg_list(list);
  272 + opal_free_sg_list(list);
345 273  
346 274 out:
347 275 return rc;
arch/powerpc/platforms/powernv/opal-elog.c
... ... @@ -238,17 +238,24 @@
238 238  
239 239 static void elog_work_fn(struct work_struct *work)
240 240 {
241   - size_t elog_size;
  241 + __be64 size;
  242 + __be64 id;
  243 + __be64 type;
  244 + uint64_t elog_size;
242 245 uint64_t log_id;
243 246 uint64_t elog_type;
244 247 int rc;
245 248 char name[2+16+1];
246 249  
247   - rc = opal_get_elog_size(&log_id, &elog_size, &elog_type);
  250 + rc = opal_get_elog_size(&id, &size, &type);
248 251 if (rc != OPAL_SUCCESS) {
249 252 pr_err("ELOG: Opal log read failed\n");
250 253 return;
251 254 }
  255 +
  256 + elog_size = be64_to_cpu(size);
  257 + log_id = be64_to_cpu(id);
  258 + elog_type = be64_to_cpu(type);
252 259  
253 260 BUG_ON(elog_size > OPAL_MAX_ERRLOG_SIZE);
254 261  
arch/powerpc/platforms/powernv/opal-flash.c
... ... @@ -79,9 +79,6 @@
79 79 /* XXX: Assume candidate image size is <= 1GB */
80 80 #define MAX_IMAGE_SIZE 0x40000000
81 81  
82   -/* Flash sg list version */
83   -#define SG_LIST_VERSION (1UL)
84   -
85 82 /* Image status */
86 83 enum {
87 84 IMAGE_INVALID,
88 85  
... ... @@ -131,11 +128,15 @@
131 128 */
132 129 static inline void opal_flash_validate(void)
133 130 {
134   - struct validate_flash_t *args_buf = &validate_flash_data;
  131 + long ret;
  132 + void *buf = validate_flash_data.buf;
  133 + __be32 size, result;
135 134  
136   - args_buf->status = opal_validate_flash(__pa(args_buf->buf),
137   - &(args_buf->buf_size),
138   - &(args_buf->result));
  135 + ret = opal_validate_flash(__pa(buf), &size, &result);
  136 +
  137 + validate_flash_data.status = ret;
  138 + validate_flash_data.buf_size = be32_to_cpu(size);
  139 + validate_flash_data.result = be32_to_cpu(result);
139 140 }
140 141  
141 142 /*
142 143  
... ... @@ -268,93 +269,11 @@
268 269 }
269 270  
270 271 /*
271   - * Free sg list
272   - */
273   -static void free_sg_list(struct opal_sg_list *list)
274   -{
275   - struct opal_sg_list *sg1;
276   - while (list) {
277   - sg1 = list->next;
278   - kfree(list);
279   - list = sg1;
280   - }
281   - list = NULL;
282   -}
283   -
284   -/*
285   - * Build candidate image scatter gather list
286   - *
287   - * list format:
288   - * -----------------------------------
289   - * | VER (8) | Entry length in bytes |
290   - * -----------------------------------
291   - * | Pointer to next entry |
292   - * -----------------------------------
293   - * | Address of memory area 1 |
294   - * -----------------------------------
295   - * | Length of memory area 1 |
296   - * -----------------------------------
297   - * | ......... |
298   - * -----------------------------------
299   - * | ......... |
300   - * -----------------------------------
301   - * | Address of memory area N |
302   - * -----------------------------------
303   - * | Length of memory area N |
304   - * -----------------------------------
305   - */
306   -static struct opal_sg_list *image_data_to_sglist(void)
307   -{
308   - struct opal_sg_list *sg1, *list = NULL;
309   - void *addr;
310   - int size;
311   -
312   - addr = image_data.data;
313   - size = image_data.size;
314   -
315   - sg1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
316   - if (!sg1)
317   - return NULL;
318   -
319   - list = sg1;
320   - sg1->num_entries = 0;
321   - while (size > 0) {
322   - /* Translate virtual address to physical address */
323   - sg1->entry[sg1->num_entries].data =
324   - (void *)(vmalloc_to_pfn(addr) << PAGE_SHIFT);
325   -
326   - if (size > PAGE_SIZE)
327   - sg1->entry[sg1->num_entries].length = PAGE_SIZE;
328   - else
329   - sg1->entry[sg1->num_entries].length = size;
330   -
331   - sg1->num_entries++;
332   - if (sg1->num_entries >= SG_ENTRIES_PER_NODE) {
333   - sg1->next = kzalloc(PAGE_SIZE, GFP_KERNEL);
334   - if (!sg1->next) {
335   - pr_err("%s : Failed to allocate memory\n",
336   - __func__);
337   - goto nomem;
338   - }
339   -
340   - sg1 = sg1->next;
341   - sg1->num_entries = 0;
342   - }
343   - addr += PAGE_SIZE;
344   - size -= PAGE_SIZE;
345   - }
346   - return list;
347   -nomem:
348   - free_sg_list(list);
349   - return NULL;
350   -}
351   -
352   -/*
353 272 * OPAL update flash
354 273 */
355 274 static int opal_flash_update(int op)
356 275 {
357   - struct opal_sg_list *sg, *list, *next;
  276 + struct opal_sg_list *list;
358 277 unsigned long addr;
359 278 int64_t rc = OPAL_PARAMETER;
360 279  
361 280  
... ... @@ -364,29 +283,12 @@
364 283 goto flash;
365 284 }
366 285  
367   - list = image_data_to_sglist();
  286 + list = opal_vmalloc_to_sg_list(image_data.data, image_data.size);
368 287 if (!list)
369 288 goto invalid_img;
370 289  
371 290 /* First entry address */
372 291 addr = __pa(list);
373   -
374   - /* Translate sg list address to absolute */
375   - for (sg = list; sg; sg = next) {
376   - next = sg->next;
377   - /* Don't translate NULL pointer for last entry */
378   - if (sg->next)
379   - sg->next = (struct opal_sg_list *)__pa(sg->next);
380   - else
381   - sg->next = NULL;
382   -
383   - /*
384   - * Convert num_entries to version/length format
385   - * to satisfy OPAL.
386   - */
387   - sg->num_entries = (SG_LIST_VERSION << 56) |
388   - (sg->num_entries * sizeof(struct opal_sg_entry) + 16);
389   - }
390 292  
391 293 pr_alert("FLASH: Image is %u bytes\n", image_data.size);
392 294 pr_alert("FLASH: Image update requested\n");
arch/powerpc/platforms/powernv/opal-sysparam.c
... ... @@ -39,10 +39,11 @@
39 39 struct kobj_attribute kobj_attr;
40 40 };
41 41  
42   -static int opal_get_sys_param(u32 param_id, u32 length, void *buffer)
  42 +static ssize_t opal_get_sys_param(u32 param_id, u32 length, void *buffer)
43 43 {
44 44 struct opal_msg msg;
45   - int ret, token;
  45 + ssize_t ret;
  46 + int token;
46 47  
47 48 token = opal_async_get_token_interruptible();
48 49 if (token < 0) {
... ... @@ -59,7 +60,7 @@
59 60  
60 61 ret = opal_async_wait_response(token, &msg);
61 62 if (ret) {
62   - pr_err("%s: Failed to wait for the async response, %d\n",
  63 + pr_err("%s: Failed to wait for the async response, %zd\n",
63 64 __func__, ret);
64 65 goto out_token;
65 66 }
... ... @@ -111,7 +112,7 @@
111 112 {
112 113 struct param_attr *attr = container_of(kobj_attr, struct param_attr,
113 114 kobj_attr);
114   - int ret;
  115 + ssize_t ret;
115 116  
116 117 mutex_lock(&opal_sysparam_mutex);
117 118 ret = opal_get_sys_param(attr->param_id, attr->param_size,
118 119  
... ... @@ -121,9 +122,10 @@
121 122  
122 123 memcpy(buf, param_data_buf, attr->param_size);
123 124  
  125 + ret = attr->param_size;
124 126 out:
125 127 mutex_unlock(&opal_sysparam_mutex);
126   - return ret ? ret : attr->param_size;
  128 + return ret;
127 129 }
128 130  
129 131 static ssize_t sys_param_store(struct kobject *kobj,
130 132  
131 133  
... ... @@ -131,14 +133,20 @@
131 133 {
132 134 struct param_attr *attr = container_of(kobj_attr, struct param_attr,
133 135 kobj_attr);
134   - int ret;
  136 + ssize_t ret;
135 137  
  138 + /* MAX_PARAM_DATA_LEN is sizeof(param_data_buf) */
  139 + if (count > MAX_PARAM_DATA_LEN)
  140 + count = MAX_PARAM_DATA_LEN;
  141 +
136 142 mutex_lock(&opal_sysparam_mutex);
137 143 memcpy(param_data_buf, buf, count);
138 144 ret = opal_set_sys_param(attr->param_id, attr->param_size,
139 145 param_data_buf);
140 146 mutex_unlock(&opal_sysparam_mutex);
141   - return ret ? ret : count;
  147 + if (!ret)
  148 + ret = count;
  149 + return ret;
142 150 }
143 151  
144 152 void __init opal_sys_param_init(void)
145 153  
... ... @@ -214,13 +222,13 @@
214 222 }
215 223  
216 224 if (of_property_read_u32_array(sysparam, "param-len", size, count)) {
217   - pr_err("SYSPARAM: Missing propery param-len in the DT\n");
  225 + pr_err("SYSPARAM: Missing property param-len in the DT\n");
218 226 goto out_free_perm;
219 227 }
220 228  
221 229  
222 230 if (of_property_read_u8_array(sysparam, "param-perm", perm, count)) {
223   - pr_err("SYSPARAM: Missing propery param-perm in the DT\n");
  231 + pr_err("SYSPARAM: Missing property param-perm in the DT\n");
224 232 goto out_free_perm;
225 233 }
226 234  
... ... @@ -233,6 +241,12 @@
233 241  
234 242 /* For each of the parameters, populate the parameter attributes */
235 243 for (i = 0; i < count; i++) {
  244 + if (size[i] > MAX_PARAM_DATA_LEN) {
  245 + pr_warn("SYSPARAM: Not creating parameter %d as size "
  246 + "exceeds buffer length\n", i);
  247 + continue;
  248 + }
  249 +
236 250 sysfs_attr_init(&attr[i].kobj_attr.attr);
237 251 attr[i].param_id = id[i];
238 252 attr[i].param_size = size[i];
arch/powerpc/platforms/powernv/opal.c
... ... @@ -242,14 +242,14 @@
242 242 void opal_notifier_enable(void)
243 243 {
244 244 int64_t rc;
245   - uint64_t evt = 0;
  245 + __be64 evt = 0;
246 246  
247 247 atomic_set(&opal_notifier_hold, 0);
248 248  
249 249 /* Process pending events */
250 250 rc = opal_poll_events(&evt);
251 251 if (rc == OPAL_SUCCESS && evt)
252   - opal_do_notifier(evt);
  252 + opal_do_notifier(be64_to_cpu(evt));
253 253 }
254 254  
255 255 void opal_notifier_disable(void)
... ... @@ -529,7 +529,7 @@
529 529  
530 530 opal_handle_interrupt(virq_to_hw(irq), &events);
531 531  
532   - opal_do_notifier(events);
  532 + opal_do_notifier(be64_to_cpu(events));
533 533  
534 534 return IRQ_HANDLED;
535 535 }
... ... @@ -638,4 +638,67 @@
638 638  
639 639 /* Export this so that test modules can use it */
640 640 EXPORT_SYMBOL_GPL(opal_invalid_call);
  641 +
  642 +/* Convert a region of vmalloc memory to an opal sg list */
  643 +struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
  644 + unsigned long vmalloc_size)
  645 +{
  646 + struct opal_sg_list *sg, *first = NULL;
  647 + unsigned long i = 0;
  648 +
  649 + sg = kzalloc(PAGE_SIZE, GFP_KERNEL);
  650 + if (!sg)
  651 + goto nomem;
  652 +
  653 + first = sg;
  654 +
  655 + while (vmalloc_size > 0) {
  656 + uint64_t data = vmalloc_to_pfn(vmalloc_addr) << PAGE_SHIFT;
  657 + uint64_t length = min(vmalloc_size, PAGE_SIZE);
  658 +
  659 + sg->entry[i].data = cpu_to_be64(data);
  660 + sg->entry[i].length = cpu_to_be64(length);
  661 + i++;
  662 +
  663 + if (i >= SG_ENTRIES_PER_NODE) {
  664 + struct opal_sg_list *next;
  665 +
  666 + next = kzalloc(PAGE_SIZE, GFP_KERNEL);
  667 + if (!next)
  668 + goto nomem;
  669 +
  670 + sg->length = cpu_to_be64(
  671 + i * sizeof(struct opal_sg_entry) + 16);
  672 + i = 0;
  673 + sg->next = cpu_to_be64(__pa(next));
  674 + sg = next;
  675 + }
  676 +
  677 + vmalloc_addr += length;
  678 + vmalloc_size -= length;
  679 + }
  680 +
  681 + sg->length = cpu_to_be64(i * sizeof(struct opal_sg_entry) + 16);
  682 +
  683 + return first;
  684 +
  685 +nomem:
  686 + pr_err("%s : Failed to allocate memory\n", __func__);
  687 + opal_free_sg_list(first);
  688 + return NULL;
  689 +}
  690 +
  691 +void opal_free_sg_list(struct opal_sg_list *sg)
  692 +{
  693 + while (sg) {
  694 + uint64_t next = be64_to_cpu(sg->next);
  695 +
  696 + kfree(sg);
  697 +
  698 + if (next)
  699 + sg = __va(next);
  700 + else
  701 + sg = NULL;
  702 + }
  703 +}
arch/powerpc/platforms/powernv/pci-ioda.c
... ... @@ -343,7 +343,6 @@
343 343 pci_name(dev));
344 344 continue;
345 345 }
346   - pci_dev_get(dev);
347 346 pdn->pcidev = dev;
348 347 pdn->pe_number = pe->pe_number;
349 348 pe->dma_weight += pnv_ioda_dma_weight(dev);
... ... @@ -462,7 +461,7 @@
462 461  
463 462 pe = &phb->ioda.pe_array[pdn->pe_number];
464 463 WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops);
465   - set_iommu_table_base_and_group(&pdev->dev, &pe->tce32_table);
  464 + set_iommu_table_base(&pdev->dev, &pe->tce32_table);
466 465 }
467 466  
468 467 static int pnv_pci_ioda_dma_set_mask(struct pnv_phb *phb,
arch/powerpc/platforms/powernv/setup.c
... ... @@ -162,18 +162,62 @@
162 162 }
163 163  
164 164 #ifdef CONFIG_KEXEC
  165 +static void pnv_kexec_wait_secondaries_down(void)
  166 +{
  167 + int my_cpu, i, notified = -1;
  168 +
  169 + my_cpu = get_cpu();
  170 +
  171 + for_each_online_cpu(i) {
  172 + uint8_t status;
  173 + int64_t rc;
  174 +
  175 + if (i == my_cpu)
  176 + continue;
  177 +
  178 + for (;;) {
  179 + rc = opal_query_cpu_status(get_hard_smp_processor_id(i),
  180 + &status);
  181 + if (rc != OPAL_SUCCESS || status != OPAL_THREAD_STARTED)
  182 + break;
  183 + barrier();
  184 + if (i != notified) {
  185 + printk(KERN_INFO "kexec: waiting for cpu %d "
  186 + "(physical %d) to enter OPAL\n",
  187 + i, paca[i].hw_cpu_id);
  188 + notified = i;
  189 + }
  190 + }
  191 + }
  192 +}
  193 +
165 194 static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
166 195 {
167 196 xics_kexec_teardown_cpu(secondary);
168 197  
169   - /* Return secondary CPUs to firmware on OPAL v3 */
170   - if (firmware_has_feature(FW_FEATURE_OPALv3) && secondary) {
  198 + /* On OPAL v3, we return all CPUs to firmware */
  199 +
  200 + if (!firmware_has_feature(FW_FEATURE_OPALv3))
  201 + return;
  202 +
  203 + if (secondary) {
  204 + /* Return secondary CPUs to firmware on OPAL v3 */
171 205 mb();
172 206 get_paca()->kexec_state = KEXEC_STATE_REAL_MODE;
173 207 mb();
174 208  
175 209 /* Return the CPU to OPAL */
176 210 opal_return_cpu();
  211 + } else if (crash_shutdown) {
  212 + /*
  213 + * On crash, we don't wait for secondaries to go
  214 + * down as they might be unreachable or hung, so
  215 + * instead we just wait a bit and move on.
  216 + */
  217 + mdelay(1);
  218 + } else {
  219 + /* Primary waits for the secondaries to have reached OPAL */
  220 + pnv_kexec_wait_secondaries_down();
177 221 }
178 222 }
179 223 #endif /* CONFIG_KEXEC */
arch/powerpc/platforms/powernv/smp.c
... ... @@ -30,6 +30,7 @@
30 30 #include <asm/cputhreads.h>
31 31 #include <asm/xics.h>
32 32 #include <asm/opal.h>
  33 +#include <asm/runlatch.h>
33 34  
34 35 #include "powernv.h"
35 36  
36 37  
... ... @@ -156,7 +157,9 @@
156 157 */
157 158 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
158 159 while (!generic_check_cpu_restart(cpu)) {
  160 + ppc64_runlatch_off();
159 161 power7_nap();
  162 + ppc64_runlatch_on();
160 163 if (!generic_check_cpu_restart(cpu)) {
161 164 DBG("CPU%d Unexpected exit while offline !\n", cpu);
162 165 /* We may be getting an IPI, so we re-enable
arch/powerpc/platforms/pseries/hotplug-cpu.c
... ... @@ -88,12 +88,13 @@
88 88  
89 89 static void rtas_stop_self(void)
90 90 {
91   - struct rtas_args args = {
92   - .token = cpu_to_be32(rtas_stop_self_token),
  91 + static struct rtas_args args = {
93 92 .nargs = 0,
94 93 .nret = 1,
95 94 .rets = &args.args[0],
96 95 };
  96 +
  97 + args.token = cpu_to_be32(rtas_stop_self_token);
97 98  
98 99 local_irq_disable();
99 100  
arch/powerpc/platforms/pseries/hotplug-memory.c
... ... @@ -100,11 +100,11 @@
100 100  
101 101 start_pfn = base >> PAGE_SHIFT;
102 102  
103   - if (!pfn_valid(start_pfn)) {
104   - memblock_remove(base, memblock_size);
105   - return 0;
106   - }
  103 + lock_device_hotplug();
107 104  
  105 + if (!pfn_valid(start_pfn))
  106 + goto out;
  107 +
108 108 block_sz = memory_block_size_bytes();
109 109 sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
110 110 nid = memory_add_physaddr_to_nid(base);
111 111  
... ... @@ -114,8 +114,10 @@
114 114 base += MIN_MEMORY_BLOCK_SIZE;
115 115 }
116 116  
  117 +out:
117 118 /* Update memory regions for memory remove */
118 119 memblock_remove(base, memblock_size);
  120 + unlock_device_hotplug();
119 121 return 0;
120 122 }
121 123  
arch/powerpc/sysdev/ppc4xx_pci.c
... ... @@ -1058,7 +1058,7 @@
1058 1058 return 1;
1059 1059 }
1060 1060  
1061   -static int apm821xx_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
  1061 +static int __init apm821xx_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
1062 1062 {
1063 1063 u32 val;
1064 1064