Commit 6ee7e78e7c78d871409ad4df30551c9355be7d0e

Authored by Linus Torvalds

Merge branch 'release' of master.kernel.org:/pub/scm/linux/kernel/git/aegl/linux-2.6

* 'release' of master.kernel.org:/pub/scm/linux/kernel/git/aegl/linux-2.6:
  [IA64] replace kmalloc+memset with kzalloc
  [IA64] resolve name clash by renaming is_available_memory()
  [IA64] Need export for csum_ipv6_magic
  [IA64] Fix DISCONTIGMEM without VIRTUAL_MEM_MAP
  [PATCH] Add support for type argument in PAL_GET_PSTATE
  [IA64] tidy up return value of ip_fast_csum
  [IA64] implement csum_ipv6_magic for ia64.
  [IA64] More Itanium PAL spec updates
  [IA64] Update processor_info features
  [IA64] Add se bit to Processor State Parameter structure
  [IA64] Add dp bit to cache and bus check structs
  [IA64] SN: Correctly update smp_affinty mask
  [IA64] sparse cleanups
  [IA64] IA64 Kexec/kdump

Showing 34 changed files Side-by-side Diff

... ... @@ -434,6 +434,29 @@
434 434  
435 435 source "drivers/sn/Kconfig"
436 436  
  437 +config KEXEC
  438 + bool "kexec system call (EXPERIMENTAL)"
  439 + depends on EXPERIMENTAL && !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
  440 + help
  441 + kexec is a system call that implements the ability to shutdown your
  442 + current kernel, and to start another kernel. It is like a reboot
  443 + but it is indepedent of the system firmware. And like a reboot
  444 + you can start any kernel with it, not just Linux.
  445 +
  446 + The name comes from the similiarity to the exec system call.
  447 +
  448 + It is an ongoing process to be certain the hardware in a machine
  449 + is properly shutdown, so do not be surprised if this code does not
  450 + initially work for you. It may help to enable device hotplugging
  451 + support. As of this writing the exact hardware interface is
  452 + strongly in flux, so no good recommendation can be made.
  453 +
  454 +config CRASH_DUMP
  455 + bool "kernel crash dumps (EXPERIMENTAL)"
  456 + depends on EXPERIMENTAL && IA64_MCA_RECOVERY && !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
  457 + help
  458 + Generate crash dump after being started by kexec.
  459 +
437 460 source "drivers/firmware/Kconfig"
438 461  
439 462 source "fs/Kconfig.binfmt"
arch/ia64/hp/common/sba_iommu.c
... ... @@ -1672,15 +1672,13 @@
1672 1672 * SAC (single address cycle) addressable, so allocate a
1673 1673 * pseudo-device to enforce that.
1674 1674 */
1675   - sac = kmalloc(sizeof(*sac), GFP_KERNEL);
  1675 + sac = kzalloc(sizeof(*sac), GFP_KERNEL);
1676 1676 if (!sac)
1677 1677 panic(PFX "Couldn't allocate struct pci_dev");
1678   - memset(sac, 0, sizeof(*sac));
1679 1678  
1680   - controller = kmalloc(sizeof(*controller), GFP_KERNEL);
  1679 + controller = kzalloc(sizeof(*controller), GFP_KERNEL);
1681 1680 if (!controller)
1682 1681 panic(PFX "Couldn't allocate struct pci_controller");
1683   - memset(controller, 0, sizeof(*controller));
1684 1682  
1685 1683 controller->iommu = ioc;
1686 1684 sac->sysdata = controller;
1687 1685  
... ... @@ -1737,11 +1735,9 @@
1737 1735 struct ioc *ioc;
1738 1736 struct ioc_iommu *info;
1739 1737  
1740   - ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
  1738 + ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
1741 1739 if (!ioc)
1742 1740 return NULL;
1743   -
1744   - memset(ioc, 0, sizeof(*ioc));
1745 1741  
1746 1742 ioc->next = ioc_list;
1747 1743 ioc_list = ioc;
arch/ia64/hp/sim/simserial.c
... ... @@ -684,12 +684,11 @@
684 684 *ret_info = sstate->info;
685 685 return 0;
686 686 }
687   - info = kmalloc(sizeof(struct async_struct), GFP_KERNEL);
  687 + info = kzalloc(sizeof(struct async_struct), GFP_KERNEL);
688 688 if (!info) {
689 689 sstate->count--;
690 690 return -ENOMEM;
691 691 }
692   - memset(info, 0, sizeof(struct async_struct));
693 692 init_waitqueue_head(&info->open_wait);
694 693 init_waitqueue_head(&info->close_wait);
695 694 init_waitqueue_head(&info->delta_msr_wait);
arch/ia64/kernel/Makefile
... ... @@ -28,6 +28,7 @@
28 28 obj-$(CONFIG_CPU_FREQ) += cpufreq/
29 29 obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o
30 30 obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o
  31 +obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o
31 32 obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o
32 33 obj-$(CONFIG_AUDIT) += audit.o
33 34 obj-$(CONFIG_PCI_MSI) += msi_ia64.o
arch/ia64/kernel/cpufreq/acpi-cpufreq.c
... ... @@ -68,7 +68,8 @@
68 68  
69 69 dprintk("processor_get_pstate\n");
70 70  
71   - retval = ia64_pal_get_pstate(&pstate_index);
  71 + retval = ia64_pal_get_pstate(&pstate_index,
  72 + PAL_GET_PSTATE_TYPE_INSTANT);
72 73 *value = (u32) pstate_index;
73 74  
74 75 if (retval)
... ... @@ -91,7 +92,7 @@
91 92 dprintk("extract_clock\n");
92 93  
93 94 for (i = 0; i < data->acpi_data.state_count; i++) {
94   - if (value >= data->acpi_data.states[i].control)
  95 + if (value == data->acpi_data.states[i].status)
95 96 return data->acpi_data.states[i].core_frequency;
96 97 }
97 98 return data->acpi_data.states[i-1].core_frequency;
... ... @@ -117,11 +118,7 @@
117 118 goto migrate_end;
118 119 }
119 120  
120   - /*
121   - * processor_get_pstate gets the average frequency since the
122   - * last get. So, do two PAL_get_freq()...
123   - */
124   - ret = processor_get_pstate(&value);
  121 + /* processor_get_pstate gets the instantaneous frequency */
125 122 ret = processor_get_pstate(&value);
126 123  
127 124 if (ret) {
arch/ia64/kernel/crash.c
  1 +/*
  2 + * arch/ia64/kernel/crash.c
  3 + *
  4 + * Architecture specific (ia64) functions for kexec based crash dumps.
  5 + *
  6 + * Created by: Khalid Aziz <khalid.aziz@hp.com>
  7 + * Copyright (C) 2005 Hewlett-Packard Development Company, L.P.
  8 + * Copyright (C) 2005 Intel Corp Zou Nan hai <nanhai.zou@intel.com>
  9 + *
  10 + */
  11 +#include <linux/smp.h>
  12 +#include <linux/delay.h>
  13 +#include <linux/crash_dump.h>
  14 +#include <linux/bootmem.h>
  15 +#include <linux/kexec.h>
  16 +#include <linux/elfcore.h>
  17 +#include <linux/sysctl.h>
  18 +#include <linux/init.h>
  19 +
  20 +#include <asm/kdebug.h>
  21 +#include <asm/mca.h>
  22 +#include <asm/uaccess.h>
  23 +
  24 +int kdump_status[NR_CPUS];
  25 +atomic_t kdump_cpu_freezed;
  26 +atomic_t kdump_in_progress;
  27 +int kdump_on_init = 1;
  28 +ssize_t
  29 +copy_oldmem_page(unsigned long pfn, char *buf,
  30 + size_t csize, unsigned long offset, int userbuf)
  31 +{
  32 + void *vaddr;
  33 +
  34 + if (!csize)
  35 + return 0;
  36 + vaddr = __va(pfn<<PAGE_SHIFT);
  37 + if (userbuf) {
  38 + if (copy_to_user(buf, (vaddr + offset), csize)) {
  39 + return -EFAULT;
  40 + }
  41 + } else
  42 + memcpy(buf, (vaddr + offset), csize);
  43 + return csize;
  44 +}
  45 +
  46 +static inline Elf64_Word
  47 +*append_elf_note(Elf64_Word *buf, char *name, unsigned type, void *data,
  48 + size_t data_len)
  49 +{
  50 + struct elf_note *note = (struct elf_note *)buf;
  51 + note->n_namesz = strlen(name) + 1;
  52 + note->n_descsz = data_len;
  53 + note->n_type = type;
  54 + buf += (sizeof(*note) + 3)/4;
  55 + memcpy(buf, name, note->n_namesz);
  56 + buf += (note->n_namesz + 3)/4;
  57 + memcpy(buf, data, data_len);
  58 + buf += (data_len + 3)/4;
  59 + return buf;
  60 +}
  61 +
  62 +static void
  63 +final_note(void *buf)
  64 +{
  65 + memset(buf, 0, sizeof(struct elf_note));
  66 +}
  67 +
  68 +extern void ia64_dump_cpu_regs(void *);
  69 +
  70 +static DEFINE_PER_CPU(struct elf_prstatus, elf_prstatus);
  71 +
  72 +void
  73 +crash_save_this_cpu()
  74 +{
  75 + void *buf;
  76 + unsigned long cfm, sof, sol;
  77 +
  78 + int cpu = smp_processor_id();
  79 + struct elf_prstatus *prstatus = &per_cpu(elf_prstatus, cpu);
  80 +
  81 + elf_greg_t *dst = (elf_greg_t *)&(prstatus->pr_reg);
  82 + memset(prstatus, 0, sizeof(*prstatus));
  83 + prstatus->pr_pid = current->pid;
  84 +
  85 + ia64_dump_cpu_regs(dst);
  86 + cfm = dst[43];
  87 + sol = (cfm >> 7) & 0x7f;
  88 + sof = cfm & 0x7f;
  89 + dst[46] = (unsigned long)ia64_rse_skip_regs((unsigned long *)dst[46],
  90 + sof - sol);
  91 +
  92 + buf = (u64 *) per_cpu_ptr(crash_notes, cpu);
  93 + if (!buf)
  94 + return;
  95 + buf = append_elf_note(buf, "CORE", NT_PRSTATUS, prstatus,
  96 + sizeof(*prstatus));
  97 + final_note(buf);
  98 +}
  99 +
  100 +static int
  101 +kdump_wait_cpu_freeze(void)
  102 +{
  103 + int cpu_num = num_online_cpus() - 1;
  104 + int timeout = 1000;
  105 + while(timeout-- > 0) {
  106 + if (atomic_read(&kdump_cpu_freezed) == cpu_num)
  107 + return 0;
  108 + udelay(1000);
  109 + }
  110 + return 1;
  111 +}
  112 +
  113 +void
  114 +machine_crash_shutdown(struct pt_regs *pt)
  115 +{
  116 + /* This function is only called after the system
  117 + * has paniced or is otherwise in a critical state.
  118 + * The minimum amount of code to allow a kexec'd kernel
  119 + * to run successfully needs to happen here.
  120 + *
  121 + * In practice this means shooting down the other cpus in
  122 + * an SMP system.
  123 + */
  124 + kexec_disable_iosapic();
  125 +#ifdef CONFIG_SMP
  126 + kdump_smp_send_stop();
  127 + if (kdump_wait_cpu_freeze() && kdump_on_init) {
  128 + //not all cpu response to IPI, send INIT to freeze them
  129 + kdump_smp_send_init();
  130 + }
  131 +#endif
  132 +}
  133 +
  134 +static void
  135 +machine_kdump_on_init(void)
  136 +{
  137 + local_irq_disable();
  138 + kexec_disable_iosapic();
  139 + machine_kexec(ia64_kimage);
  140 +}
  141 +
  142 +void
  143 +kdump_cpu_freeze(struct unw_frame_info *info, void *arg)
  144 +{
  145 + int cpuid;
  146 + local_irq_disable();
  147 + cpuid = smp_processor_id();
  148 + crash_save_this_cpu();
  149 + current->thread.ksp = (__u64)info->sw - 16;
  150 + atomic_inc(&kdump_cpu_freezed);
  151 + kdump_status[cpuid] = 1;
  152 + mb();
  153 + if (cpuid == 0) {
  154 + for (;;)
  155 + cpu_relax();
  156 + } else
  157 + ia64_jump_to_sal(&sal_boot_rendez_state[cpuid]);
  158 +}
  159 +
  160 +static int
  161 +kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
  162 +{
  163 + struct ia64_mca_notify_die *nd;
  164 + struct die_args *args = data;
  165 +
  166 + if (!kdump_on_init)
  167 + return NOTIFY_DONE;
  168 +
  169 + if (val != DIE_INIT_MONARCH_ENTER &&
  170 + val != DIE_INIT_SLAVE_ENTER &&
  171 + val != DIE_MCA_RENDZVOUS_LEAVE &&
  172 + val != DIE_MCA_MONARCH_LEAVE)
  173 + return NOTIFY_DONE;
  174 +
  175 + nd = (struct ia64_mca_notify_die *)args->err;
  176 + /* Reason code 1 means machine check rendezous*/
  177 + if ((val == DIE_INIT_MONARCH_ENTER || DIE_INIT_SLAVE_ENTER) &&
  178 + nd->sos->rv_rc == 1)
  179 + return NOTIFY_DONE;
  180 +
  181 + switch (val) {
  182 + case DIE_INIT_MONARCH_ENTER:
  183 + machine_kdump_on_init();
  184 + break;
  185 + case DIE_INIT_SLAVE_ENTER:
  186 + unw_init_running(kdump_cpu_freeze, NULL);
  187 + break;
  188 + case DIE_MCA_RENDZVOUS_LEAVE:
  189 + if (atomic_read(&kdump_in_progress))
  190 + unw_init_running(kdump_cpu_freeze, NULL);
  191 + break;
  192 + case DIE_MCA_MONARCH_LEAVE:
  193 + /* die_register->signr indicate if MCA is recoverable */
  194 + if (!args->signr)
  195 + machine_kdump_on_init();
  196 + break;
  197 + }
  198 + return NOTIFY_DONE;
  199 +}
  200 +
  201 +#ifdef CONFIG_SYSCTL
  202 +static ctl_table kdump_on_init_table[] = {
  203 + {
  204 + .ctl_name = CTL_UNNUMBERED,
  205 + .procname = "kdump_on_init",
  206 + .data = &kdump_on_init,
  207 + .maxlen = sizeof(int),
  208 + .mode = 0644,
  209 + .proc_handler = &proc_dointvec,
  210 + },
  211 + { .ctl_name = 0 }
  212 +};
  213 +
  214 +static ctl_table sys_table[] = {
  215 + {
  216 + .ctl_name = CTL_KERN,
  217 + .procname = "kernel",
  218 + .mode = 0555,
  219 + .child = kdump_on_init_table,
  220 + },
  221 + { .ctl_name = 0 }
  222 +};
  223 +#endif
  224 +
  225 +static int
  226 +machine_crash_setup(void)
  227 +{
  228 + char *from = strstr(saved_command_line, "elfcorehdr=");
  229 + static struct notifier_block kdump_init_notifier_nb = {
  230 + .notifier_call = kdump_init_notifier,
  231 + };
  232 + int ret;
  233 + if (from)
  234 + elfcorehdr_addr = memparse(from+11, &from);
  235 + saved_max_pfn = (unsigned long)-1;
  236 + if((ret = register_die_notifier(&kdump_init_notifier_nb)) != 0)
  237 + return ret;
  238 +#ifdef CONFIG_SYSCTL
  239 + register_sysctl_table(sys_table, 0);
  240 +#endif
  241 + return 0;
  242 +}
  243 +
  244 +__initcall(machine_crash_setup);
arch/ia64/kernel/efi.c
... ... @@ -26,6 +26,7 @@
26 26 #include <linux/types.h>
27 27 #include <linux/time.h>
28 28 #include <linux/efi.h>
  29 +#include <linux/kexec.h>
29 30  
30 31 #include <asm/io.h>
31 32 #include <asm/kregs.h>
... ... @@ -41,7 +42,7 @@
41 42 struct efi efi;
42 43 EXPORT_SYMBOL(efi);
43 44 static efi_runtime_services_t *runtime;
44   -static unsigned long mem_limit = ~0UL, max_addr = ~0UL;
  45 +static unsigned long mem_limit = ~0UL, max_addr = ~0UL, min_addr = 0UL;
45 46  
46 47 #define efi_call_virt(f, args...) (*(f))(args)
47 48  
... ... @@ -224,7 +225,7 @@
224 225 }
225 226  
226 227 static int
227   -is_available_memory (efi_memory_desc_t *md)
  228 +is_memory_available (efi_memory_desc_t *md)
228 229 {
229 230 if (!(md->attribute & EFI_MEMORY_WB))
230 231 return 0;
... ... @@ -421,6 +422,8 @@
421 422 mem_limit = memparse(cp + 4, &cp);
422 423 } else if (memcmp(cp, "max_addr=", 9) == 0) {
423 424 max_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp));
  425 + } else if (memcmp(cp, "min_addr=", 9) == 0) {
  426 + min_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp));
424 427 } else {
425 428 while (*cp != ' ' && *cp)
426 429 ++cp;
... ... @@ -428,6 +431,8 @@
428 431 ++cp;
429 432 }
430 433 }
  434 + if (min_addr != 0UL)
  435 + printk(KERN_INFO "Ignoring memory below %luMB\n", min_addr >> 20);
431 436 if (max_addr != ~0UL)
432 437 printk(KERN_INFO "Ignoring memory above %luMB\n", max_addr >> 20);
433 438  
434 439  
... ... @@ -887,14 +892,15 @@
887 892 }
888 893 contig_high = GRANULEROUNDDOWN(contig_high);
889 894 }
890   - if (!is_available_memory(md) || md->type == EFI_LOADER_DATA)
  895 + if (!is_memory_available(md) || md->type == EFI_LOADER_DATA)
891 896 continue;
892 897  
893 898 /* Round ends inward to granule boundaries */
894 899 as = max(contig_low, md->phys_addr);
895 900 ae = min(contig_high, efi_md_end(md));
896 901  
897   - /* keep within max_addr= command line arg */
  902 + /* keep within max_addr= and min_addr= command line arg */
  903 + as = max(as, min_addr);
898 904 ae = min(ae, max_addr);
899 905 if (ae <= as)
900 906 continue;
... ... @@ -962,7 +968,7 @@
962 968 }
963 969 contig_high = GRANULEROUNDDOWN(contig_high);
964 970 }
965   - if (!is_available_memory(md))
  971 + if (!is_memory_available(md))
966 972 continue;
967 973  
968 974 /*
... ... @@ -1004,7 +1010,8 @@
1004 1010 } else
1005 1011 ae = efi_md_end(md);
1006 1012  
1007   - /* keep within max_addr= command line arg */
  1013 + /* keep within max_addr= and min_addr= command line arg */
  1014 + as = max(as, min_addr);
1008 1015 ae = min(ae, max_addr);
1009 1016 if (ae <= as)
1010 1017 continue;
1011 1018  
... ... @@ -1116,7 +1123,59 @@
1116 1123 */
1117 1124 insert_resource(res, code_resource);
1118 1125 insert_resource(res, data_resource);
  1126 +#ifdef CONFIG_KEXEC
  1127 + insert_resource(res, &efi_memmap_res);
  1128 + insert_resource(res, &boot_param_res);
  1129 + if (crashk_res.end > crashk_res.start)
  1130 + insert_resource(res, &crashk_res);
  1131 +#endif
1119 1132 }
1120 1133 }
1121 1134 }
  1135 +
  1136 +#ifdef CONFIG_KEXEC
  1137 +/* find a block of memory aligned to 64M exclude reserved regions
  1138 + rsvd_regions are sorted
  1139 + */
  1140 +unsigned long
  1141 +kdump_find_rsvd_region (unsigned long size,
  1142 + struct rsvd_region *r, int n)
  1143 +{
  1144 + int i;
  1145 + u64 start, end;
  1146 + u64 alignment = 1UL << _PAGE_SIZE_64M;
  1147 + void *efi_map_start, *efi_map_end, *p;
  1148 + efi_memory_desc_t *md;
  1149 + u64 efi_desc_size;
  1150 +
  1151 + efi_map_start = __va(ia64_boot_param->efi_memmap);
  1152 + efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
  1153 + efi_desc_size = ia64_boot_param->efi_memdesc_size;
  1154 +
  1155 + for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
  1156 + md = p;
  1157 + if (!efi_wb(md))
  1158 + continue;
  1159 + start = ALIGN(md->phys_addr, alignment);
  1160 + end = efi_md_end(md);
  1161 + for (i = 0; i < n; i++) {
  1162 + if (__pa(r[i].start) >= start && __pa(r[i].end) < end) {
  1163 + if (__pa(r[i].start) > start + size)
  1164 + return start;
  1165 + start = ALIGN(__pa(r[i].end), alignment);
  1166 + if (i < n-1 && __pa(r[i+1].start) < start + size)
  1167 + continue;
  1168 + else
  1169 + break;
  1170 + }
  1171 + }
  1172 + if (end > start + size)
  1173 + return start;
  1174 + }
  1175 +
  1176 + printk(KERN_WARNING "Cannot reserve 0x%lx byte of memory for crashdump\n",
  1177 + size);
  1178 + return ~0UL;
  1179 +}
  1180 +#endif
arch/ia64/kernel/entry.S
... ... @@ -1575,7 +1575,7 @@
1575 1575 data8 sys_mq_timedreceive // 1265
1576 1576 data8 sys_mq_notify
1577 1577 data8 sys_mq_getsetattr
1578   - data8 sys_ni_syscall // reserved for kexec_load
  1578 + data8 sys_kexec_load
1579 1579 data8 sys_ni_syscall // reserved for vserver
1580 1580 data8 sys_waitid // 1270
1581 1581 data8 sys_add_key
arch/ia64/kernel/ia64_ksyms.c
... ... @@ -14,6 +14,7 @@
14 14  
15 15 #include <asm/checksum.h>
16 16 EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */
  17 +EXPORT_SYMBOL(csum_ipv6_magic);
17 18  
18 19 #include <asm/semaphore.h>
19 20 EXPORT_SYMBOL(__down);
arch/ia64/kernel/iosapic.c
... ... @@ -288,6 +288,27 @@
288 288 /* do nothing... */
289 289 }
290 290  
  291 +
  292 +#ifdef CONFIG_KEXEC
  293 +void
  294 +kexec_disable_iosapic(void)
  295 +{
  296 + struct iosapic_intr_info *info;
  297 + struct iosapic_rte_info *rte;
  298 + u8 vec = 0;
  299 + for (info = iosapic_intr_info; info <
  300 + iosapic_intr_info + IA64_NUM_VECTORS; ++info, ++vec) {
  301 + list_for_each_entry(rte, &info->rtes,
  302 + rte_list) {
  303 + iosapic_write(rte->addr,
  304 + IOSAPIC_RTE_LOW(rte->rte_index),
  305 + IOSAPIC_MASK|vec);
  306 + iosapic_eoi(rte->addr, vec);
  307 + }
  308 + }
  309 +}
  310 +#endif
  311 +
291 312 static void
292 313 mask_irq (unsigned int irq)
293 314 {
arch/ia64/kernel/kprobes.c
... ... @@ -851,7 +851,7 @@
851 851 return;
852 852 }
853 853 } while (unw_unwind(info) >= 0);
854   - lp->bsp = 0;
  854 + lp->bsp = NULL;
855 855 lp->cfm = 0;
856 856 return;
857 857 }
arch/ia64/kernel/machine_kexec.c
  1 +/*
  2 + * arch/ia64/kernel/machine_kexec.c
  3 + *
  4 + * Handle transition of Linux booting another kernel
  5 + * Copyright (C) 2005 Hewlett-Packard Development Comapny, L.P.
  6 + * Copyright (C) 2005 Khalid Aziz <khalid.aziz@hp.com>
  7 + * Copyright (C) 2006 Intel Corp, Zou Nan hai <nanhai.zou@intel.com>
  8 + *
  9 + * This source code is licensed under the GNU General Public License,
  10 + * Version 2. See the file COPYING for more details.
  11 + */
  12 +
  13 +#include <linux/mm.h>
  14 +#include <linux/kexec.h>
  15 +#include <linux/cpu.h>
  16 +#include <linux/irq.h>
  17 +#include <asm/mmu_context.h>
  18 +#include <asm/setup.h>
  19 +#include <asm/delay.h>
  20 +#include <asm/meminit.h>
  21 +
  22 +typedef void (*relocate_new_kernel_t)(unsigned long, unsigned long,
  23 + struct ia64_boot_param *, unsigned long);
  24 +
  25 +struct kimage *ia64_kimage;
  26 +
  27 +struct resource efi_memmap_res = {
  28 + .name = "EFI Memory Map",
  29 + .start = 0,
  30 + .end = 0,
  31 + .flags = IORESOURCE_BUSY | IORESOURCE_MEM
  32 +};
  33 +
  34 +struct resource boot_param_res = {
  35 + .name = "Boot parameter",
  36 + .start = 0,
  37 + .end = 0,
  38 + .flags = IORESOURCE_BUSY | IORESOURCE_MEM
  39 +};
  40 +
  41 +
  42 +/*
  43 + * Do what every setup is needed on image and the
  44 + * reboot code buffer to allow us to avoid allocations
  45 + * later.
  46 + */
  47 +int machine_kexec_prepare(struct kimage *image)
  48 +{
  49 + void *control_code_buffer;
  50 + const unsigned long *func;
  51 +
  52 + func = (unsigned long *)&relocate_new_kernel;
  53 + /* Pre-load control code buffer to minimize work in kexec path */
  54 + control_code_buffer = page_address(image->control_code_page);
  55 + memcpy((void *)control_code_buffer, (const void *)func[0],
  56 + relocate_new_kernel_size);
  57 + flush_icache_range((unsigned long)control_code_buffer,
  58 + (unsigned long)control_code_buffer + relocate_new_kernel_size);
  59 + ia64_kimage = image;
  60 +
  61 + return 0;
  62 +}
  63 +
  64 +void machine_kexec_cleanup(struct kimage *image)
  65 +{
  66 +}
  67 +
  68 +void machine_shutdown(void)
  69 +{
  70 + int cpu;
  71 +
  72 + for_each_online_cpu(cpu) {
  73 + if (cpu != smp_processor_id())
  74 + cpu_down(cpu);
  75 + }
  76 + kexec_disable_iosapic();
  77 +}
  78 +
  79 +/*
  80 + * Do not allocate memory (or fail in any way) in machine_kexec().
  81 + * We are past the point of no return, committed to rebooting now.
  82 + */
  83 +extern void *efi_get_pal_addr(void);
  84 +static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
  85 +{
  86 + struct kimage *image = arg;
  87 + relocate_new_kernel_t rnk;
  88 + void *pal_addr = efi_get_pal_addr();
  89 + unsigned long code_addr = (unsigned long)page_address(image->control_code_page);
  90 + unsigned long vector;
  91 + int ii;
  92 +
  93 + if (image->type == KEXEC_TYPE_CRASH) {
  94 + crash_save_this_cpu();
  95 + current->thread.ksp = (__u64)info->sw - 16;
  96 + }
  97 +
  98 + /* Interrupts aren't acceptable while we reboot */
  99 + local_irq_disable();
  100 +
  101 + /* Mask CMC and Performance Monitor interrupts */
  102 + ia64_setreg(_IA64_REG_CR_PMV, 1 << 16);
  103 + ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16);
  104 +
  105 + /* Mask ITV and Local Redirect Registers */
  106 + ia64_set_itv(1 << 16);
  107 + ia64_set_lrr0(1 << 16);
  108 + ia64_set_lrr1(1 << 16);
  109 +
  110 + /* terminate possible nested in-service interrupts */
  111 + for (ii = 0; ii < 16; ii++)
  112 + ia64_eoi();
  113 +
  114 + /* unmask TPR and clear any pending interrupts */
  115 + ia64_setreg(_IA64_REG_CR_TPR, 0);
  116 + ia64_srlz_d();
  117 + vector = ia64_get_ivr();
  118 + while (vector != IA64_SPURIOUS_INT_VECTOR) {
  119 + ia64_eoi();
  120 + vector = ia64_get_ivr();
  121 + }
  122 + platform_kernel_launch_event();
  123 + rnk = (relocate_new_kernel_t)&code_addr;
  124 + (*rnk)(image->head, image->start, ia64_boot_param,
  125 + GRANULEROUNDDOWN((unsigned long) pal_addr));
  126 + BUG();
  127 +}
  128 +
  129 +void machine_kexec(struct kimage *image)
  130 +{
  131 + unw_init_running(ia64_machine_kexec, image);
  132 + for(;;);
  133 +}
arch/ia64/kernel/mca.c
... ... @@ -82,6 +82,7 @@
82 82 #include <asm/system.h>
83 83 #include <asm/sal.h>
84 84 #include <asm/mca.h>
  85 +#include <asm/kexec.h>
85 86  
86 87 #include <asm/irq.h>
87 88 #include <asm/hw_irq.h>
... ... @@ -1238,6 +1239,10 @@
1238 1239 } else {
1239 1240 /* Dump buffered message to console */
1240 1241 ia64_mlogbuf_finish(1);
  1242 +#ifdef CONFIG_CRASH_DUMP
  1243 + atomic_set(&kdump_in_progress, 1);
  1244 + monarch_cpu = -1;
  1245 +#endif
1241 1246 }
1242 1247 if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover)
1243 1248 == NOTIFY_STOP)
arch/ia64/kernel/palinfo.c
... ... @@ -16,6 +16,7 @@
16 16 * 02/05/2001 S.Eranian fixed module support
17 17 * 10/23/2001 S.Eranian updated pal_perf_mon_info bug fixes
18 18 * 03/24/2004 Ashok Raj updated to work with CPU Hotplug
  19 + * 10/26/2006 Russ Anderson updated processor features to rev 2.2 spec
19 20 */
20 21 #include <linux/types.h>
21 22 #include <linux/errno.h>
22 23  
... ... @@ -314,13 +315,20 @@
314 315 "Protection Key Registers(PKR) : %d\n"
315 316 "Implemented bits in PKR.key : %d\n"
316 317 "Hash Tag ID : 0x%x\n"
317   - "Size of RR.rid : %d\n",
  318 + "Size of RR.rid : %d\n"
  319 + "Max Purges : ",
318 320 vm_info_1.pal_vm_info_1_s.phys_add_size,
319 321 vm_info_2.pal_vm_info_2_s.impl_va_msb+1,
320 322 vm_info_1.pal_vm_info_1_s.max_pkr+1,
321 323 vm_info_1.pal_vm_info_1_s.key_size,
322 324 vm_info_1.pal_vm_info_1_s.hash_tag_id,
323 325 vm_info_2.pal_vm_info_2_s.rid_size);
  326 + if (vm_info_2.pal_vm_info_2_s.max_purges == PAL_MAX_PURGES)
  327 + p += sprintf(p, "unlimited\n");
  328 + else
  329 + p += sprintf(p, "%d\n",
  330 + vm_info_2.pal_vm_info_2_s.max_purges ?
  331 + vm_info_2.pal_vm_info_2_s.max_purges : 1);
324 332 }
325 333  
326 334 if (ia64_pal_mem_attrib(&attrib) == 0) {
... ... @@ -467,7 +475,11 @@
467 475 NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,
468 476 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
469 477 NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,
470   - NULL,NULL,NULL,NULL,NULL,
  478 + "Unimplemented instruction address fault",
  479 + "INIT, PMI, and LINT pins",
  480 + "Simple unimplemented instr addresses",
  481 + "Variable P-state performance",
  482 + "Virtual machine features implemented",
471 483 "XIP,XPSR,XFS implemented",
472 484 "XR1-XR3 implemented",
473 485 "Disable dynamic predicate prediction",
... ... @@ -475,7 +487,11 @@
475 487 "Disable dynamic data cache prefetch",
476 488 "Disable dynamic inst cache prefetch",
477 489 "Disable dynamic branch prediction",
478   - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
  490 + NULL, NULL, NULL, NULL,
  491 + "Disable P-states",
  492 + "Enable MCA on Data Poisoning",
  493 + "Enable vmsw instruction",
  494 + "Enable extern environmental notification",
479 495 "Disable BINIT on processor time-out",
480 496 "Disable dynamic power management (DPM)",
481 497 "Disable coherency",
arch/ia64/kernel/perfmon.c
... ... @@ -853,9 +853,8 @@
853 853 * allocate context descriptor
854 854 * must be able to free with interrupts disabled
855 855 */
856   - ctx = kmalloc(sizeof(pfm_context_t), GFP_KERNEL);
  856 + ctx = kzalloc(sizeof(pfm_context_t), GFP_KERNEL);
857 857 if (ctx) {
858   - memset(ctx, 0, sizeof(pfm_context_t));
859 858 DPRINT(("alloc ctx @%p\n", ctx));
860 859 }
861 860 return ctx;
arch/ia64/kernel/perfmon_montecito.h
... ... @@ -45,16 +45,16 @@
45 45 /* pmc29 */ { PFM_REG_NOTIMPL, },
46 46 /* pmc30 */ { PFM_REG_NOTIMPL, },
47 47 /* pmc31 */ { PFM_REG_NOTIMPL, },
48   -/* pmc32 */ { PFM_REG_CONFIG, 0, 0x30f01ffffffffff, 0x30f01ffffffffff, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
49   -/* pmc33 */ { PFM_REG_CONFIG, 0, 0x0, 0x1ffffffffff, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
50   -/* pmc34 */ { PFM_REG_CONFIG, 0, 0xf01ffffffffff, 0xf01ffffffffff, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
51   -/* pmc35 */ { PFM_REG_CONFIG, 0, 0x0, 0x1ffffffffff, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
  48 +/* pmc32 */ { PFM_REG_CONFIG, 0, 0x30f01ffffffffffUL, 0x30f01ffffffffffUL, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
  49 +/* pmc33 */ { PFM_REG_CONFIG, 0, 0x0, 0x1ffffffffffUL, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
  50 +/* pmc34 */ { PFM_REG_CONFIG, 0, 0xf01ffffffffffUL, 0xf01ffffffffffUL, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
  51 +/* pmc35 */ { PFM_REG_CONFIG, 0, 0x0, 0x1ffffffffffUL, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
52 52 /* pmc36 */ { PFM_REG_CONFIG, 0, 0xfffffff0, 0xf, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
53 53 /* pmc37 */ { PFM_REG_MONITOR, 4, 0x0, 0x3fff, NULL, pfm_mont_pmc_check, {RDEP_MONT_IEAR, 0, 0, 0}, {0, 0, 0, 0}},
54 54 /* pmc38 */ { PFM_REG_CONFIG, 0, 0xdb6, 0x2492, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
55 55 /* pmc39 */ { PFM_REG_MONITOR, 6, 0x0, 0xffcf, NULL, pfm_mont_pmc_check, {RDEP_MONT_ETB,0, 0, 0}, {0,0, 0, 0}},
56 56 /* pmc40 */ { PFM_REG_MONITOR, 6, 0x2000000, 0xf01cf, NULL, pfm_mont_pmc_check, {RDEP_MONT_DEAR,0, 0, 0}, {0,0, 0, 0}},
57   -/* pmc41 */ { PFM_REG_CONFIG, 0, 0x00002078fefefefe, 0x1e00018181818, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
  57 +/* pmc41 */ { PFM_REG_CONFIG, 0, 0x00002078fefefefeUL, 0x1e00018181818UL, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
58 58 /* pmc42 */ { PFM_REG_MONITOR, 6, 0x0, 0x7ff4f, NULL, pfm_mont_pmc_check, {RDEP_MONT_ETB,0, 0, 0}, {0,0, 0, 0}},
59 59 { PFM_REG_END , 0, 0x0, -1, NULL, NULL, {0,}, {0,}}, /* end marker */
60 60 };
... ... @@ -185,7 +185,7 @@
185 185 DPRINT(("cnum=%u val=0x%lx, using_dbreg=%d loaded=%d\n", cnum, tmpval, ctx->ctx_fl_using_dbreg, is_loaded));
186 186  
187 187 if (cnum == 41 && is_loaded
188   - && (tmpval & 0x1e00000000000) && (tmpval & 0x18181818UL) != 0x18181818UL && ctx->ctx_fl_using_dbreg == 0) {
  188 + && (tmpval & 0x1e00000000000UL) && (tmpval & 0x18181818UL) != 0x18181818UL && ctx->ctx_fl_using_dbreg == 0) {
189 189  
190 190 DPRINT(("pmc[%d]=0x%lx has active pmc41 settings, clearing dbr\n", cnum, tmpval));
191 191  
arch/ia64/kernel/relocate_kernel.S
  1 +/*
  2 + * arch/ia64/kernel/relocate_kernel.S
  3 + *
  4 + * Relocate kexec'able kernel and start it
  5 + *
  6 + * Copyright (C) 2005 Hewlett-Packard Development Company, L.P.
  7 + * Copyright (C) 2005 Khalid Aziz <khalid.aziz@hp.com>
  8 + * Copyright (C) 2005 Intel Corp, Zou Nan hai <nanhai.zou@intel.com>
  9 + *
  10 + * This source code is licensed under the GNU General Public License,
  11 + * Version 2. See the file COPYING for more details.
  12 + */
  13 +#include <asm/asmmacro.h>
  14 +#include <asm/kregs.h>
  15 +#include <asm/page.h>
  16 +#include <asm/pgtable.h>
  17 +#include <asm/mca_asm.h>
  18 +
  19 + /* Must be relocatable PIC code callable as a C function
  20 + */
  21 +GLOBAL_ENTRY(relocate_new_kernel)
  22 + .prologue
  23 + alloc r31=ar.pfs,4,0,0,0
  24 + .body
  25 +.reloc_entry:
  26 +{
  27 + rsm psr.i| psr.ic
  28 + mov r2=ip
  29 +}
  30 + ;;
  31 +{
  32 + flushrs // must be first insn in group
  33 + srlz.i
  34 +}
  35 + ;;
  36 + dep r2=0,r2,61,3 //to physical address
  37 + ;;
  38 + //first switch to physical mode
  39 + add r3=1f-.reloc_entry, r2
  40 + movl r16 = IA64_PSR_AC|IA64_PSR_BN|IA64_PSR_IC
  41 + mov ar.rsc=0 // put RSE in enforced lazy mode
  42 + ;;
  43 + add sp=(memory_stack_end - 16 - .reloc_entry),r2
  44 + add r8=(register_stack - .reloc_entry),r2
  45 + ;;
  46 + mov r18=ar.rnat
  47 + mov ar.bspstore=r8
  48 + ;;
  49 + mov cr.ipsr=r16
  50 + mov cr.iip=r3
  51 + mov cr.ifs=r0
  52 + srlz.i
  53 + ;;
  54 + mov ar.rnat=r18
  55 + rfi
  56 + ;;
  57 +1:
  58 + //physical mode code begin
  59 + mov b6=in1
  60 + dep r28=0,in2,61,3 //to physical address
  61 +
  62 + // purge all TC entries
  63 +#define O(member) IA64_CPUINFO_##member##_OFFSET
  64 + GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2
  65 + ;;
  66 + addl r17=O(PTCE_STRIDE),r2
  67 + addl r2=O(PTCE_BASE),r2
  68 + ;;
  69 + ld8 r18=[r2],(O(PTCE_COUNT)-O(PTCE_BASE));; // r18=ptce_base
  70 + ld4 r19=[r2],4 // r19=ptce_count[0]
  71 + ld4 r21=[r17],4 // r21=ptce_stride[0]
  72 + ;;
  73 + ld4 r20=[r2] // r20=ptce_count[1]
  74 + ld4 r22=[r17] // r22=ptce_stride[1]
  75 + mov r24=r0
  76 + ;;
  77 + adds r20=-1,r20
  78 + ;;
  79 +#undef O
  80 +2:
  81 + cmp.ltu p6,p7=r24,r19
  82 +(p7) br.cond.dpnt.few 4f
  83 + mov ar.lc=r20
  84 +3:
  85 + ptc.e r18
  86 + ;;
  87 + add r18=r22,r18
  88 + br.cloop.sptk.few 3b
  89 + ;;
  90 + add r18=r21,r18
  91 + add r24=1,r24
  92 + ;;
  93 + br.sptk.few 2b
  94 +4:
  95 + srlz.i
  96 + ;;
  97 + //purge TR entry for kernel text and data
  98 + movl r16=KERNEL_START
  99 + mov r18=KERNEL_TR_PAGE_SHIFT<<2
  100 + ;;
  101 + ptr.i r16, r18
  102 + ptr.d r16, r18
  103 + ;;
  104 + srlz.i
  105 + ;;
  106 +
  107 + // purge TR entry for percpu data
  108 + movl r16=PERCPU_ADDR
  109 + mov r18=PERCPU_PAGE_SHIFT<<2
  110 + ;;
  111 + ptr.d r16,r18
  112 + ;;
  113 + srlz.d
  114 + ;;
  115 +
  116 + // purge TR entry for pal code
  117 + mov r16=in3
  118 + mov r18=IA64_GRANULE_SHIFT<<2
  119 + ;;
  120 + ptr.i r16,r18
  121 + ;;
  122 + srlz.i
  123 + ;;
  124 +
  125 + // purge TR entry for stack
  126 + mov r16=IA64_KR(CURRENT_STACK)
  127 + ;;
  128 + shl r16=r16,IA64_GRANULE_SHIFT
  129 + movl r19=PAGE_OFFSET
  130 + ;;
  131 + add r16=r19,r16
  132 + mov r18=IA64_GRANULE_SHIFT<<2
  133 + ;;
  134 + ptr.d r16,r18
  135 + ;;
  136 + srlz.i
  137 + ;;
  138 +
  139 + //copy segments
  140 + movl r16=PAGE_MASK
  141 + mov r30=in0 // in0 is page_list
  142 + br.sptk.few .dest_page
  143 + ;;
  144 +.loop:
  145 + ld8 r30=[in0], 8;;
  146 +.dest_page:
  147 + tbit.z p0, p6=r30, 0;; // 0x1 dest page
  148 +(p6) and r17=r30, r16
  149 +(p6) br.cond.sptk.few .loop;;
  150 +
  151 + tbit.z p0, p6=r30, 1;; // 0x2 indirect page
  152 +(p6) and in0=r30, r16
  153 +(p6) br.cond.sptk.few .loop;;
  154 +
  155 + tbit.z p0, p6=r30, 2;; // 0x4 end flag
  156 +(p6) br.cond.sptk.few .end_loop;;
  157 +
  158 + tbit.z p6, p0=r30, 3;; // 0x8 source page
  159 +(p6) br.cond.sptk.few .loop
  160 +
  161 + and r18=r30, r16
  162 +
  163 + // simple copy page, may optimize later
  164 + movl r14=PAGE_SIZE/8 - 1;;
  165 + mov ar.lc=r14;;
  166 +1:
  167 + ld8 r14=[r18], 8;;
  168 + st8 [r17]=r14;;
  169 + fc.i r17
  170 + add r17=8, r17
  171 + br.ctop.sptk.few 1b
  172 + br.sptk.few .loop
  173 + ;;
  174 +
  175 +.end_loop:
  176 + sync.i // for fc.i
  177 + ;;
  178 + srlz.i
  179 + ;;
  180 + srlz.d
  181 + ;;
  182 + br.call.sptk.many b0=b6;;
  183 +
  184 +.align 32
  185 +memory_stack:
  186 + .fill 8192, 1, 0
  187 +memory_stack_end:
  188 +register_stack:
  189 + .fill 8192, 1, 0
  190 +register_stack_end:
  191 +relocate_new_kernel_end:
  192 +END(relocate_new_kernel)
  193 +
  194 +.global relocate_new_kernel_size
  195 +relocate_new_kernel_size:
  196 + data8 relocate_new_kernel_end - relocate_new_kernel
  197 +
  198 +GLOBAL_ENTRY(ia64_dump_cpu_regs)
  199 + .prologue
  200 + alloc loc0=ar.pfs,1,2,0,0
  201 + .body
  202 + mov ar.rsc=0 // put RSE in enforced lazy mode
  203 + add loc1=4*8, in0 // save r4 and r5 first
  204 + ;;
  205 +{
  206 + flushrs // flush dirty regs to backing store
  207 + srlz.i
  208 +}
  209 + st8 [loc1]=r4, 8
  210 + ;;
  211 + st8 [loc1]=r5, 8
  212 + ;;
  213 + add loc1=32*8, in0
  214 + mov r4=ar.rnat
  215 + ;;
  216 + st8 [in0]=r0, 8 // r0
  217 + st8 [loc1]=r4, 8 // rnat
  218 + mov r5=pr
  219 + ;;
  220 + st8 [in0]=r1, 8 // r1
  221 + st8 [loc1]=r5, 8 // pr
  222 + mov r4=b0
  223 + ;;
  224 + st8 [in0]=r2, 8 // r2
  225 + st8 [loc1]=r4, 8 // b0
  226 + mov r5=b1;
  227 + ;;
  228 + st8 [in0]=r3, 24 // r3
  229 + st8 [loc1]=r5, 8 // b1
  230 + mov r4=b2
  231 + ;;
  232 + st8 [in0]=r6, 8 // r6
  233 + st8 [loc1]=r4, 8 // b2
  234 + mov r5=b3
  235 + ;;
  236 + st8 [in0]=r7, 8 // r7
  237 + st8 [loc1]=r5, 8 // b3
  238 + mov r4=b4
  239 + ;;
  240 + st8 [in0]=r8, 8 // r8
  241 + st8 [loc1]=r4, 8 // b4
  242 + mov r5=b5
  243 + ;;
  244 + st8 [in0]=r9, 8 // r9
  245 + st8 [loc1]=r5, 8 // b5
  246 + mov r4=b6
  247 + ;;
  248 + st8 [in0]=r10, 8 // r10
  249 + st8 [loc1]=r5, 8 // b6
  250 + mov r5=b7
  251 + ;;
  252 + st8 [in0]=r11, 8 // r11
  253 + st8 [loc1]=r5, 8 // b7
  254 + mov r4=b0
  255 + ;;
  256 + st8 [in0]=r12, 8 // r12
  257 + st8 [loc1]=r4, 8 // ip
  258 + mov r5=loc0
  259 + ;;
  260 + st8 [in0]=r13, 8 // r13
  261 + extr.u r5=r5, 0, 38 // ar.pfs.pfm
  262 + mov r4=r0 // user mask
  263 + ;;
  264 + st8 [in0]=r14, 8 // r14
  265 + st8 [loc1]=r5, 8 // cfm
  266 + ;;
  267 + st8 [in0]=r15, 8 // r15
  268 + st8 [loc1]=r4, 8 // user mask
  269 + mov r5=ar.rsc
  270 + ;;
  271 + st8 [in0]=r16, 8 // r16
  272 + st8 [loc1]=r5, 8 // ar.rsc
  273 + mov r4=ar.bsp
  274 + ;;
  275 + st8 [in0]=r17, 8 // r17
  276 + st8 [loc1]=r4, 8 // ar.bsp
  277 + mov r5=ar.bspstore
  278 + ;;
  279 + st8 [in0]=r18, 8 // r18
  280 + st8 [loc1]=r5, 8 // ar.bspstore
  281 + mov r4=ar.rnat
  282 + ;;
  283 + st8 [in0]=r19, 8 // r19
  284 + st8 [loc1]=r4, 8 // ar.rnat
  285 + mov r5=ar.ccv
  286 + ;;
  287 + st8 [in0]=r20, 8 // r20
  288 + st8 [loc1]=r5, 8 // ar.ccv
  289 + mov r4=ar.unat
  290 + ;;
  291 + st8 [in0]=r21, 8 // r21
  292 + st8 [loc1]=r4, 8 // ar.unat
  293 + mov r5 = ar.fpsr
  294 + ;;
  295 + st8 [in0]=r22, 8 // r22
  296 + st8 [loc1]=r5, 8 // ar.fpsr
  297 + mov r4 = ar.unat
  298 + ;;
  299 + st8 [in0]=r23, 8 // r23
  300 + st8 [loc1]=r4, 8 // unat
  301 + mov r5 = ar.fpsr
  302 + ;;
  303 + st8 [in0]=r24, 8 // r24
  304 + st8 [loc1]=r5, 8 // fpsr
  305 + mov r4 = ar.pfs
  306 + ;;
  307 + st8 [in0]=r25, 8 // r25
  308 + st8 [loc1]=r4, 8 // ar.pfs
  309 + mov r5 = ar.lc
  310 + ;;
  311 + st8 [in0]=r26, 8 // r26
  312 + st8 [loc1]=r5, 8 // ar.lc
  313 + mov r4 = ar.ec
  314 + ;;
  315 + st8 [in0]=r27, 8 // r27
  316 + st8 [loc1]=r4, 8 // ar.ec
  317 + mov r5 = ar.csd
  318 + ;;
  319 + st8 [in0]=r28, 8 // r28
  320 + st8 [loc1]=r5, 8 // ar.csd
  321 + mov r4 = ar.ssd
  322 + ;;
  323 + st8 [in0]=r29, 8 // r29
  324 + st8 [loc1]=r4, 8 // ar.ssd
  325 + ;;
  326 + st8 [in0]=r30, 8 // r30
  327 + ;;
  328 + st8 [in0]=r31, 8 // r31
  329 + mov ar.pfs=loc0
  330 + ;;
  331 + br.ret.sptk.many rp
  332 +END(ia64_dump_cpu_regs)
arch/ia64/kernel/setup.c
... ... @@ -43,6 +43,8 @@
43 43 #include <linux/initrd.h>
44 44 #include <linux/pm.h>
45 45 #include <linux/cpufreq.h>
  46 +#include <linux/kexec.h>
  47 +#include <linux/crash_dump.h>
46 48  
47 49 #include <asm/ia32.h>
48 50 #include <asm/machvec.h>
... ... @@ -252,6 +254,41 @@
252 254 efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end);
253 255 n++;
254 256  
  257 +#ifdef CONFIG_KEXEC
  258 + /* crashkernel=size@offset specifies the size to reserve for a crash
  259 + * kernel.(offset is ingored for keep compatibility with other archs)
  260 + * By reserving this memory we guarantee that linux never set's it
  261 + * up as a DMA target.Useful for holding code to do something
  262 + * appropriate after a kernel panic.
  263 + */
  264 + {
  265 + char *from = strstr(saved_command_line, "crashkernel=");
  266 + unsigned long base, size;
  267 + if (from) {
  268 + size = memparse(from + 12, &from);
  269 + if (size) {
  270 + sort_regions(rsvd_region, n);
  271 + base = kdump_find_rsvd_region(size,
  272 + rsvd_region, n);
  273 + if (base != ~0UL) {
  274 + rsvd_region[n].start =
  275 + (unsigned long)__va(base);
  276 + rsvd_region[n].end =
  277 + (unsigned long)__va(base + size);
  278 + n++;
  279 + crashk_res.start = base;
  280 + crashk_res.end = base + size - 1;
  281 + }
  282 + }
  283 + }
  284 + efi_memmap_res.start = ia64_boot_param->efi_memmap;
  285 + efi_memmap_res.end = efi_memmap_res.start +
  286 + ia64_boot_param->efi_memmap_size;
  287 + boot_param_res.start = __pa(ia64_boot_param);
  288 + boot_param_res.end = boot_param_res.start +
  289 + sizeof(*ia64_boot_param);
  290 + }
  291 +#endif
255 292 /* end of memory marker */
256 293 rsvd_region[n].start = ~0UL;
257 294 rsvd_region[n].end = ~0UL;
... ... @@ -262,6 +299,7 @@
262 299  
263 300 sort_regions(rsvd_region, num_rsvd_regions);
264 301 }
  302 +
265 303  
266 304 /**
267 305 * find_initrd - get initrd parameters from the boot parameter structure
arch/ia64/kernel/smp.c
... ... @@ -30,6 +30,7 @@
30 30 #include <linux/delay.h>
31 31 #include <linux/efi.h>
32 32 #include <linux/bitops.h>
  33 +#include <linux/kexec.h>
33 34  
34 35 #include <asm/atomic.h>
35 36 #include <asm/current.h>
... ... @@ -66,6 +67,7 @@
66 67  
67 68 #define IPI_CALL_FUNC 0
68 69 #define IPI_CPU_STOP 1
  70 +#define IPI_KDUMP_CPU_STOP 3
69 71  
70 72 /* This needs to be cacheline aligned because it is written to by *other* CPUs. */
71 73 static DEFINE_PER_CPU(u64, ipi_operation) ____cacheline_aligned;
... ... @@ -155,7 +157,11 @@
155 157 case IPI_CPU_STOP:
156 158 stop_this_cpu();
157 159 break;
158   -
  160 +#ifdef CONFIG_CRASH_DUMP
  161 + case IPI_KDUMP_CPU_STOP:
  162 + unw_init_running(kdump_cpu_freeze, NULL);
  163 + break;
  164 +#endif
159 165 default:
160 166 printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", this_cpu, which);
161 167 break;
... ... @@ -213,6 +219,26 @@
213 219 send_IPI_single(smp_processor_id(), op);
214 220 }
215 221  
  222 +#ifdef CONFIG_CRASH_DUMP
  223 +void
  224 +kdump_smp_send_stop()
  225 +{
  226 + send_IPI_allbutself(IPI_KDUMP_CPU_STOP);
  227 +}
  228 +
  229 +void
  230 +kdump_smp_send_init()
  231 +{
  232 + unsigned int cpu, self_cpu;
  233 + self_cpu = smp_processor_id();
  234 + for_each_online_cpu(cpu) {
  235 + if (cpu != self_cpu) {
  236 + if(kdump_status[cpu] == 0)
  237 + platform_send_ipi(cpu, 0, IA64_IPI_DM_INIT, 0);
  238 + }
  239 + }
  240 +}
  241 +#endif
216 242 /*
217 243 * Called with preeemption disabled.
218 244 */
arch/ia64/lib/ip_fast_csum.S
... ... @@ -8,8 +8,8 @@
8 8 * in0: address of buffer to checksum (char *)
9 9 * in1: length of the buffer (int)
10 10 *
11   - * Copyright (C) 2002 Intel Corp.
12   - * Copyright (C) 2002 Ken Chen <kenneth.w.chen@intel.com>
  11 + * Copyright (C) 2002, 2006 Intel Corp.
  12 + * Copyright (C) 2002, 2006 Ken Chen <kenneth.w.chen@intel.com>
13 13 */
14 14  
15 15 #include <asm/asmmacro.h>
... ... @@ -25,6 +25,9 @@
25 25  
26 26 #define in0 r32
27 27 #define in1 r33
  28 +#define in2 r34
  29 +#define in3 r35
  30 +#define in4 r36
28 31 #define ret0 r8
29 32  
30 33 GLOBAL_ENTRY(ip_fast_csum)
31 34  
... ... @@ -65,8 +68,9 @@
65 68 zxt2 r20=r20
66 69 ;;
67 70 add r20=ret0,r20
  71 + mov r9=0xffff
68 72 ;;
69   - andcm ret0=-1,r20
  73 + andcm ret0=r9,r20
70 74 .restore sp // reset frame state
71 75 br.ret.sptk.many b0
72 76 ;;
... ... @@ -88,4 +92,52 @@
88 92 mov b0=r34
89 93 br.ret.sptk.many b0
90 94 END(ip_fast_csum)
  95 +
  96 +GLOBAL_ENTRY(csum_ipv6_magic)
  97 + ld4 r20=[in0],4
  98 + ld4 r21=[in1],4
  99 + dep r15=in3,in2,32,16
  100 + ;;
  101 + ld4 r22=[in0],4
  102 + ld4 r23=[in1],4
  103 + mux1 r15=r15,@rev
  104 + ;;
  105 + ld4 r24=[in0],4
  106 + ld4 r25=[in1],4
  107 + shr.u r15=r15,16
  108 + add r16=r20,r21
  109 + add r17=r22,r23
  110 + ;;
  111 + ld4 r26=[in0],4
  112 + ld4 r27=[in1],4
  113 + add r18=r24,r25
  114 + add r8=r16,r17
  115 + ;;
  116 + add r19=r26,r27
  117 + add r8=r8,r18
  118 + ;;
  119 + add r8=r8,r19
  120 + add r15=r15,in4
  121 + ;;
  122 + add r8=r8,r15
  123 + ;;
  124 + shr.u r10=r8,32 // now fold sum into short
  125 + zxt4 r11=r8
  126 + ;;
  127 + add r8=r10,r11
  128 + ;;
  129 + shr.u r10=r8,16 // yeah, keep it rolling
  130 + zxt2 r11=r8
  131 + ;;
  132 + add r8=r10,r11
  133 + ;;
  134 + shr.u r10=r8,16 // three times lucky
  135 + zxt2 r11=r8
  136 + ;;
  137 + add r8=r10,r11
  138 + mov r9=0xffff
  139 + ;;
  140 + andcm r8=r9,r8
  141 + br.ret.sptk.many b0
  142 +END(csum_ipv6_magic)
... ... @@ -125,11 +125,10 @@
125 125 {
126 126 struct pci_controller *controller;
127 127  
128   - controller = kmalloc(sizeof(*controller), GFP_KERNEL);
  128 + controller = kzalloc(sizeof(*controller), GFP_KERNEL);
129 129 if (!controller)
130 130 return NULL;
131 131  
132   - memset(controller, 0, sizeof(*controller));
133 132 controller->segment = seg;
134 133 controller->node = -1;
135 134 return controller;
arch/ia64/sn/kernel/irq.c
... ... @@ -117,7 +117,10 @@
117 117 nasid_t nasid, int slice)
118 118 {
119 119 int vector;
  120 + int cpuid;
  121 +#ifdef CONFIG_SMP
120 122 int cpuphys;
  123 +#endif
121 124 int64_t bridge;
122 125 int local_widget, status;
123 126 nasid_t local_nasid;
... ... @@ -146,7 +149,6 @@
146 149 vector = sn_irq_info->irq_irq;
147 150 /* Free the old PROM new_irq_info structure */
148 151 sn_intr_free(local_nasid, local_widget, new_irq_info);
149   - /* Update kernels new_irq_info with new target info */
150 152 unregister_intr_pda(new_irq_info);
151 153  
152 154 /* allocate a new PROM new_irq_info struct */
... ... @@ -160,8 +162,10 @@
160 162 return NULL;
161 163 }
162 164  
163   - cpuphys = nasid_slice_to_cpuid(nasid, slice);
164   - new_irq_info->irq_cpuid = cpuphys;
  165 + /* Update kernels new_irq_info with new target info */
  166 + cpuid = nasid_slice_to_cpuid(new_irq_info->irq_nasid,
  167 + new_irq_info->irq_slice);
  168 + new_irq_info->irq_cpuid = cpuid;
165 169 register_intr_pda(new_irq_info);
166 170  
167 171 pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type];
... ... @@ -180,6 +184,7 @@
180 184 call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
181 185  
182 186 #ifdef CONFIG_SMP
  187 + cpuphys = cpu_physical_id(cpuid);
183 188 set_irq_affinity_info((vector & 0xff), cpuphys, 0);
184 189 #endif
185 190  
... ... @@ -299,6 +304,9 @@
299 304 nasid_t nasid = sn_irq_info->irq_nasid;
300 305 int slice = sn_irq_info->irq_slice;
301 306 int cpu = nasid_slice_to_cpuid(nasid, slice);
  307 +#ifdef CONFIG_SMP
  308 + int cpuphys;
  309 +#endif
302 310  
303 311 pci_dev_get(pci_dev);
304 312 sn_irq_info->irq_cpuid = cpu;
... ... @@ -311,6 +319,10 @@
311 319 spin_unlock(&sn_irq_info_lock);
312 320  
313 321 register_intr_pda(sn_irq_info);
  322 +#ifdef CONFIG_SMP
  323 + cpuphys = cpu_physical_id(cpu);
  324 + set_irq_affinity_info(sn_irq_info->irq_irq, cpuphys, 0);
  325 +#endif
314 326 }
315 327  
316 328 void sn_irq_unfixup(struct pci_dev *pci_dev)
arch/ia64/sn/kernel/msi_sn.c
... ... @@ -136,10 +136,6 @@
136 136 */
137 137 msg.data = 0x100 + irq;
138 138  
139   -#ifdef CONFIG_SMP
140   - set_irq_affinity_info(irq, sn_irq_info->irq_cpuid, 0);
141   -#endif
142   -
143 139 write_msi_msg(irq, &msg);
144 140 set_irq_chip_and_handler(irq, &sn_msi_chip, handle_edge_irq);
145 141  
arch/ia64/sn/kernel/setup.c
... ... @@ -769,5 +769,13 @@
769 769 return 0;
770 770 return test_bit(id, sn_prom_features);
771 771 }
  772 +
  773 +void
  774 +sn_kernel_launch_event(void)
  775 +{
  776 + /* ignore status until we understand possible failure, if any*/
  777 + if (ia64_sn_kernel_launch_event())
  778 + printk(KERN_ERR "KEXEC is not supported in this PROM, Please update the PROM.\n");
  779 +}
772 780 EXPORT_SYMBOL(sn_prom_feature_available);
include/asm-ia64/checksum.h
... ... @@ -70,5 +70,11 @@
70 70 return (__force __sum16)~sum;
71 71 }
72 72  
  73 +#define _HAVE_ARCH_IPV6_CSUM 1
  74 +struct in6_addr;
  75 +extern unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
  76 + struct in6_addr *daddr, __u32 len, unsigned short proto,
  77 + unsigned int csum);
  78 +
73 79 #endif /* _ASM_IA64_CHECKSUM_H */
include/asm-ia64/kexec.h
  1 +#ifndef _ASM_IA64_KEXEC_H
  2 +#define _ASM_IA64_KEXEC_H
  3 +
  4 +
  5 +/* Maximum physical address we can use pages from */
  6 +#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
  7 +/* Maximum address we can reach in physical address mode */
  8 +#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
  9 +/* Maximum address we can use for the control code buffer */
  10 +#define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
  11 +
  12 +#define KEXEC_CONTROL_CODE_SIZE (8192 + 8192 + 4096)
  13 +
  14 +/* The native architecture */
  15 +#define KEXEC_ARCH KEXEC_ARCH_IA_64
  16 +
  17 +#define MAX_NOTE_BYTES 1024
  18 +
  19 +#define kexec_flush_icache_page(page) do { \
  20 + unsigned long page_addr = (unsigned long)page_address(page); \
  21 + flush_icache_range(page_addr, page_addr + PAGE_SIZE); \
  22 + } while(0)
  23 +
  24 +extern struct kimage *ia64_kimage;
  25 +DECLARE_PER_CPU(u64, ia64_mca_pal_base);
  26 +const extern unsigned int relocate_new_kernel_size;
  27 +extern void relocate_new_kernel(unsigned long, unsigned long,
  28 + struct ia64_boot_param *, unsigned long);
  29 +static inline void
  30 +crash_setup_regs(struct pt_regs *newregs, struct pt_regs *oldregs)
  31 +{
  32 +}
  33 +extern struct resource efi_memmap_res;
  34 +extern struct resource boot_param_res;
  35 +extern void kdump_smp_send_stop(void);
  36 +extern void kdump_smp_send_init(void);
  37 +extern void kexec_disable_iosapic(void);
  38 +extern void crash_save_this_cpu(void);
  39 +struct rsvd_region;
  40 +extern unsigned long kdump_find_rsvd_region(unsigned long size,
  41 + struct rsvd_region *rsvd_regions, int n);
  42 +extern void kdump_cpu_freeze(struct unw_frame_info *info, void *arg);
  43 +extern int kdump_status[];
  44 +extern atomic_t kdump_cpu_freezed;
  45 +extern atomic_t kdump_in_progress;
  46 +
  47 +#endif /* _ASM_IA64_KEXEC_H */
include/asm-ia64/machvec.h
... ... @@ -37,6 +37,7 @@
37 37 u8 size);
38 38 typedef void ia64_mv_migrate_t(struct task_struct * task);
39 39 typedef void ia64_mv_pci_fixup_bus_t (struct pci_bus *);
  40 +typedef void ia64_mv_kernel_launch_event_t(void);
40 41  
41 42 /* DMA-mapping interface: */
42 43 typedef void ia64_mv_dma_init (void);
... ... @@ -218,6 +219,7 @@
218 219 ia64_mv_setup_msi_irq_t *setup_msi_irq;
219 220 ia64_mv_teardown_msi_irq_t *teardown_msi_irq;
220 221 ia64_mv_pci_fixup_bus_t *pci_fixup_bus;
  222 + ia64_mv_kernel_launch_event_t *kernel_launch_event;
221 223 } __attribute__((__aligned__(16))); /* align attrib? see above comment */
222 224  
223 225 #define MACHVEC_INIT(name) \
... ... @@ -317,6 +319,9 @@
317 319 #endif
318 320 #ifndef platform_tlb_migrate_finish
319 321 # define platform_tlb_migrate_finish machvec_noop_mm
  322 +#endif
  323 +#ifndef platform_kernel_launch_event
  324 +# define platform_kernel_launch_event machvec_noop
320 325 #endif
321 326 #ifndef platform_dma_init
322 327 # define platform_dma_init swiotlb_init
include/asm-ia64/machvec_sn2.h
... ... @@ -67,6 +67,7 @@
67 67 extern ia64_mv_dma_mapping_error sn_dma_mapping_error;
68 68 extern ia64_mv_dma_supported sn_dma_supported;
69 69 extern ia64_mv_migrate_t sn_migrate;
  70 +extern ia64_mv_kernel_launch_event_t sn_kernel_launch_event;
70 71 extern ia64_mv_setup_msi_irq_t sn_setup_msi_irq;
71 72 extern ia64_mv_teardown_msi_irq_t sn_teardown_msi_irq;
72 73 extern ia64_mv_pci_fixup_bus_t sn_pci_fixup_bus;
... ... @@ -121,6 +122,7 @@
121 122 #define platform_dma_mapping_error sn_dma_mapping_error
122 123 #define platform_dma_supported sn_dma_supported
123 124 #define platform_migrate sn_migrate
  125 +#define platform_kernel_launch_event sn_kernel_launch_event
124 126 #ifdef CONFIG_PCI_MSI
125 127 #define platform_setup_msi_irq sn_setup_msi_irq
126 128 #define platform_teardown_msi_irq sn_teardown_msi_irq
include/asm-ia64/meminit.h
... ... @@ -15,11 +15,12 @@
15 15 * - initrd (optional)
16 16 * - command line string
17 17 * - kernel code & data
  18 + * - crash dumping code reserved region
18 19 * - Kernel memory map built from EFI memory map
19 20 *
20 21 * More could be added if necessary
21 22 */
22   -#define IA64_MAX_RSVD_REGIONS 6
  23 +#define IA64_MAX_RSVD_REGIONS 7
23 24  
24 25 struct rsvd_region {
25 26 unsigned long start; /* virtual address of beginning of element */
include/asm-ia64/page.h
... ... @@ -101,7 +101,7 @@
101 101  
102 102 #ifdef CONFIG_VIRTUAL_MEM_MAP
103 103 extern int ia64_pfn_valid (unsigned long pfn);
104   -#elif defined(CONFIG_FLATMEM)
  104 +#else
105 105 # define ia64_pfn_valid(pfn) 1
106 106 #endif
107 107  
108 108  
... ... @@ -110,12 +110,11 @@
110 110 #ifdef CONFIG_DISCONTIGMEM
111 111 # define page_to_pfn(page) ((unsigned long) (page - vmem_map))
112 112 # define pfn_to_page(pfn) (vmem_map + (pfn))
  113 +#else
  114 +# include <asm-generic/memory_model.h>
113 115 #endif
114   -#endif
115   -
116   -#if defined(CONFIG_FLATMEM) || defined(CONFIG_SPARSEMEM)
117   -/* FLATMEM always configures mem_map (mem_map = vmem_map if necessary) */
118   -#include <asm-generic/memory_model.h>
  116 +#else
  117 +# include <asm-generic/memory_model.h>
119 118 #endif
120 119  
121 120 #ifdef CONFIG_FLATMEM
include/asm-ia64/pal.h
... ... @@ -20,6 +20,8 @@
20 20 * 00/05/24 eranian Updated to latest PAL spec, fix structures bugs, added
21 21 * 00/05/25 eranian Support for stack calls, and static physical calls
22 22 * 00/06/18 eranian Support for stacked physical calls
  23 + * 06/10/26 rja Support for Intel Itanium Architecture Software Developer's
  24 + * Manual Rev 2.2 (Jan 2006)
23 25 */
24 26  
25 27 /*
... ... @@ -69,6 +71,8 @@
69 71 #define PAL_PREFETCH_VISIBILITY 41 /* Make Processor Prefetches Visible */
70 72 #define PAL_LOGICAL_TO_PHYSICAL 42 /* returns information on logical to physical processor mapping */
71 73 #define PAL_CACHE_SHARED_INFO 43 /* returns information on caches shared by logical processor */
  74 +#define PAL_GET_HW_POLICY 48 /* Get current hardware resource sharing policy */
  75 +#define PAL_SET_HW_POLICY 49 /* Set current hardware resource sharing policy */
72 76  
73 77 #define PAL_COPY_PAL 256 /* relocate PAL procedures and PAL PMI */
74 78 #define PAL_HALT_INFO 257 /* return the low power capabilities of processor */
... ... @@ -80,6 +84,11 @@
80 84 #define PAL_SET_PSTATE 263 /* set the P-state */
81 85 #define PAL_BRAND_INFO 274 /* Processor branding information */
82 86  
  87 +#define PAL_GET_PSTATE_TYPE_LASTSET 0
  88 +#define PAL_GET_PSTATE_TYPE_AVGANDRESET 1
  89 +#define PAL_GET_PSTATE_TYPE_AVGNORESET 2
  90 +#define PAL_GET_PSTATE_TYPE_INSTANT 3
  91 +
83 92 #ifndef __ASSEMBLY__
84 93  
85 94 #include <linux/types.h>
... ... @@ -102,6 +111,7 @@
102 111 * cache without sideeffects
103 112 * and "restrict" was 1
104 113 */
  114 +#define PAL_STATUS_REQUIRES_MEMORY (-9) /* Call requires PAL memory buffer */
105 115  
106 116 /* Processor cache level in the heirarchy */
107 117 typedef u64 pal_cache_level_t;
... ... @@ -456,7 +466,9 @@
456 466 * by the processor
457 467 */
458 468  
459   - reserved2 : 11,
  469 + se : 1, /* Shared error. MCA in a
  470 + shared structure */
  471 + reserved2 : 10,
460 472 cc : 1, /* Cache check */
461 473 tc : 1, /* TLB check */
462 474 bc : 1, /* Bus check */
463 475  
... ... @@ -487,10 +499,12 @@
487 499 * error occurred
488 500 */
489 501 wiv : 1, /* Way field valid */
490   - reserved2 : 10,
  502 + reserved2 : 1,
  503 + dp : 1, /* Data poisoned on MBE */
  504 + reserved3 : 8,
491 505  
492 506 index : 20, /* Cache line index */
493   - reserved3 : 2,
  507 + reserved4 : 2,
494 508  
495 509 is : 1, /* instruction set (1 == ia32) */
496 510 iv : 1, /* instruction set field valid */
... ... @@ -557,7 +571,7 @@
557 571 type : 8, /* Bus xaction type*/
558 572 sev : 5, /* Bus error severity*/
559 573 hier : 2, /* Bus hierarchy level */
560   - reserved1 : 1,
  574 + dp : 1, /* Data poisoned on MBE */
561 575 bsi : 8, /* Bus error status
562 576 * info
563 577 */
... ... @@ -834,7 +848,9 @@
834 848 u64 pbf_req_bus_parking : 1;
835 849 u64 pbf_bus_lock_mask : 1;
836 850 u64 pbf_enable_half_xfer_rate : 1;
837   - u64 pbf_reserved2 : 22;
  851 + u64 pbf_reserved2 : 20;
  852 + u64 pbf_enable_shared_line_replace : 1;
  853 + u64 pbf_enable_exclusive_line_replace : 1;
838 854 u64 pbf_disable_xaction_queueing : 1;
839 855 u64 pbf_disable_resp_err_check : 1;
840 856 u64 pbf_disable_berr_check : 1;
... ... @@ -1077,6 +1093,24 @@
1077 1093 return iprv.status;
1078 1094 }
1079 1095  
  1096 +/*
  1097 + * Get the current hardware resource sharing policy of the processor
  1098 + */
  1099 +static inline s64
  1100 +ia64_pal_get_hw_policy (u64 proc_num, u64 *cur_policy, u64 *num_impacted,
  1101 + u64 *la)
  1102 +{
  1103 + struct ia64_pal_retval iprv;
  1104 + PAL_CALL(iprv, PAL_GET_HW_POLICY, proc_num, 0, 0);
  1105 + if (cur_policy)
  1106 + *cur_policy = iprv.v0;
  1107 + if (num_impacted)
  1108 + *num_impacted = iprv.v1;
  1109 + if (la)
  1110 + *la = iprv.v2;
  1111 + return iprv.status;
  1112 +}
  1113 +
1080 1114 /* Make the processor enter HALT or one of the implementation dependent low
1081 1115 * power states where prefetching and execution are suspended and cache and
1082 1116 * TLB coherency is not maintained.
1083 1117  
... ... @@ -1112,10 +1146,10 @@
1112 1146  
1113 1147 /* Get the current P-state information */
1114 1148 static inline s64
1115   -ia64_pal_get_pstate (u64 *pstate_index)
  1149 +ia64_pal_get_pstate (u64 *pstate_index, unsigned long type)
1116 1150 {
1117 1151 struct ia64_pal_retval iprv;
1118   - PAL_CALL_STK(iprv, PAL_GET_PSTATE, 0, 0, 0);
  1152 + PAL_CALL_STK(iprv, PAL_GET_PSTATE, type, 0, 0);
1119 1153 *pstate_index = iprv.v0;
1120 1154 return iprv.status;
1121 1155 }
... ... @@ -1401,6 +1435,17 @@
1401 1435 return iprv.status;
1402 1436 }
1403 1437  
  1438 +/*
  1439 + * Set the current hardware resource sharing policy of the processor
  1440 + */
  1441 +static inline s64
  1442 +ia64_pal_set_hw_policy (u64 policy)
  1443 +{
  1444 + struct ia64_pal_retval iprv;
  1445 + PAL_CALL(iprv, PAL_SET_HW_POLICY, policy, 0, 0);
  1446 + return iprv.status;
  1447 +}
  1448 +
1404 1449 /* Cause the processor to enter SHUTDOWN state, where prefetching and execution are
1405 1450 * suspended, but cause cache and TLB coherency to be maintained.
1406 1451 * This is usually called in IA-32 mode.
1407 1452  
... ... @@ -1524,12 +1569,15 @@
1524 1569 } pal_vm_info_1_s;
1525 1570 } pal_vm_info_1_u_t;
1526 1571  
  1572 +#define PAL_MAX_PURGES 0xFFFF /* all ones is means unlimited */
  1573 +
1527 1574 typedef union pal_vm_info_2_u {
1528 1575 u64 pvi2_val;
1529 1576 struct {
1530 1577 u64 impl_va_msb : 8,
1531 1578 rid_size : 8,
1532   - reserved : 48;
  1579 + max_purges : 16,
  1580 + reserved : 32;
1533 1581 } pal_vm_info_2_s;
1534 1582 } pal_vm_info_2_u_t;
1535 1583  
include/asm-ia64/sn/sn_sal.h
... ... @@ -88,6 +88,8 @@
88 88 #define SN_SAL_INJECT_ERROR 0x02000067
89 89 #define SN_SAL_SET_CPU_NUMBER 0x02000068
90 90  
  91 +#define SN_SAL_KERNEL_LAUNCH_EVENT 0x02000069
  92 +
91 93 /*
92 94 * Service-specific constants
93 95 */
... ... @@ -1153,6 +1155,13 @@
1153 1155 struct ia64_sal_retval rv;
1154 1156  
1155 1157 SAL_CALL_NOLOCK(rv, SN_SAL_SET_CPU_NUMBER, cpu, 0, 0, 0, 0, 0, 0);
  1158 + return rv.status;
  1159 +}
  1160 +static inline int
  1161 +ia64_sn_kernel_launch_event(void)
  1162 +{
  1163 + struct ia64_sal_retval rv;
  1164 + SAL_CALL_NOLOCK(rv, SN_SAL_KERNEL_LAUNCH_EVENT, 0, 0, 0, 0, 0, 0, 0);
1156 1165 return rv.status;
1157 1166 }
1158 1167 #endif /* _ASM_IA64_SN_SN_SAL_H */
include/linux/kexec.h
... ... @@ -109,6 +109,10 @@
109 109 extern struct kimage *kexec_image;
110 110 extern struct kimage *kexec_crash_image;
111 111  
  112 +#ifndef kexec_flush_icache_page
  113 +#define kexec_flush_icache_page(page)
  114 +#endif
  115 +
112 116 #define KEXEC_ON_CRASH 0x00000001
113 117 #define KEXEC_ARCH_MASK 0xffff0000
114 118  
... ... @@ -133,6 +137,7 @@
133 137 extern struct resource crashk_res;
134 138 typedef u32 note_buf_t[MAX_NOTE_BYTES/4];
135 139 extern note_buf_t *crash_notes;
  140 +
136 141  
137 142 #else /* !CONFIG_KEXEC */
138 143 struct pt_regs;
... ... @@ -852,6 +852,7 @@
852 852 memset(ptr + uchunk, 0, mchunk - uchunk);
853 853 }
854 854 result = copy_from_user(ptr, buf, uchunk);
  855 + kexec_flush_icache_page(page);
855 856 kunmap(page);
856 857 if (result) {
857 858 result = (result < 0) ? result : -EIO;