Commit 9129d6ea475b7e9f216c8324ea05b7a0d8aba540
Exists in
master
and in
4 other branches
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6: [IA64] Increase default nodes shift to 10, nr_cpus to 1024 [IA64] remove redundant local_irq_save() calls from sn_sal.h [IA64] panic if topology_init kzalloc fails [IA64-SGI] Silent data corruption caused by XPC V2.
Showing 8 changed files Side-by-side Diff
arch/ia64/Kconfig
| ... | ... | @@ -258,7 +258,7 @@ |
| 258 | 258 | int "Maximum number of CPUs (2-1024)" |
| 259 | 259 | range 2 1024 |
| 260 | 260 | depends on SMP |
| 261 | - default "64" | |
| 261 | + default "1024" | |
| 262 | 262 | help |
| 263 | 263 | You should set this to the number of CPUs in your system, but |
| 264 | 264 | keep in mind that a kernel compiled for, e.g., 2 CPUs will boot but |
| ... | ... | @@ -354,7 +354,7 @@ |
| 354 | 354 | config NODES_SHIFT |
| 355 | 355 | int "Max num nodes shift(3-10)" |
| 356 | 356 | range 3 10 |
| 357 | - default "8" | |
| 357 | + default "10" | |
| 358 | 358 | depends on NEED_MULTIPLE_NODES |
| 359 | 359 | help |
| 360 | 360 | This option specifies the maximum number of nodes in your SSI system. |
arch/ia64/kernel/topology.c
| ... | ... | @@ -67,10 +67,8 @@ |
| 67 | 67 | #endif |
| 68 | 68 | |
| 69 | 69 | sysfs_cpus = kzalloc(sizeof(struct ia64_cpu) * NR_CPUS, GFP_KERNEL); |
| 70 | - if (!sysfs_cpus) { | |
| 71 | - err = -ENOMEM; | |
| 72 | - goto out; | |
| 73 | - } | |
| 70 | + if (!sysfs_cpus) | |
| 71 | + panic("kzalloc in topology_init failed - NR_CPUS too big?"); | |
| 74 | 72 | |
| 75 | 73 | for_each_present_cpu(i) { |
| 76 | 74 | if((err = arch_register_cpu(i))) |
arch/ia64/sn/kernel/xpc_channel.c
| ... | ... | @@ -279,8 +279,8 @@ |
| 279 | 279 | return part->reason; |
| 280 | 280 | } |
| 281 | 281 | |
| 282 | - bte_ret = xp_bte_copy((u64) src, (u64) ia64_tpa((u64) dst), | |
| 283 | - (u64) cnt, (BTE_NORMAL | BTE_WACQUIRE), NULL); | |
| 282 | + bte_ret = xp_bte_copy((u64) src, (u64) dst, (u64) cnt, | |
| 283 | + (BTE_NORMAL | BTE_WACQUIRE), NULL); | |
| 284 | 284 | if (bte_ret == BTE_SUCCESS) { |
| 285 | 285 | return xpcSuccess; |
| 286 | 286 | } |
arch/ia64/sn/kernel/xpc_main.c
| ... | ... | @@ -1052,6 +1052,8 @@ |
| 1052 | 1052 | if (xpc_sysctl) { |
| 1053 | 1053 | unregister_sysctl_table(xpc_sysctl); |
| 1054 | 1054 | } |
| 1055 | + | |
| 1056 | + kfree(xpc_remote_copy_buffer_base); | |
| 1055 | 1057 | } |
| 1056 | 1058 | |
| 1057 | 1059 | |
| 1058 | 1060 | |
| 1059 | 1061 | |
| ... | ... | @@ -1212,25 +1214,21 @@ |
| 1212 | 1214 | partid_t partid; |
| 1213 | 1215 | struct xpc_partition *part; |
| 1214 | 1216 | pid_t pid; |
| 1217 | + size_t buf_size; | |
| 1215 | 1218 | |
| 1216 | 1219 | |
| 1217 | 1220 | if (!ia64_platform_is("sn2")) { |
| 1218 | 1221 | return -ENODEV; |
| 1219 | 1222 | } |
| 1220 | 1223 | |
| 1221 | - /* | |
| 1222 | - * xpc_remote_copy_buffer is used as a temporary buffer for bte_copy'ng | |
| 1223 | - * various portions of a partition's reserved page. Its size is based | |
| 1224 | - * on the size of the reserved page header and part_nasids mask. So we | |
| 1225 | - * need to ensure that the other items will fit as well. | |
| 1226 | - */ | |
| 1227 | - if (XPC_RP_VARS_SIZE > XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES) { | |
| 1228 | - dev_err(xpc_part, "xpc_remote_copy_buffer is not big enough\n"); | |
| 1229 | - return -EPERM; | |
| 1230 | - } | |
| 1231 | - DBUG_ON((u64) xpc_remote_copy_buffer != | |
| 1232 | - L1_CACHE_ALIGN((u64) xpc_remote_copy_buffer)); | |
| 1233 | 1224 | |
| 1225 | + buf_size = max(XPC_RP_VARS_SIZE, | |
| 1226 | + XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES); | |
| 1227 | + xpc_remote_copy_buffer = xpc_kmalloc_cacheline_aligned(buf_size, | |
| 1228 | + GFP_KERNEL, &xpc_remote_copy_buffer_base); | |
| 1229 | + if (xpc_remote_copy_buffer == NULL) | |
| 1230 | + return -ENOMEM; | |
| 1231 | + | |
| 1234 | 1232 | snprintf(xpc_part->bus_id, BUS_ID_SIZE, "part"); |
| 1235 | 1233 | snprintf(xpc_chan->bus_id, BUS_ID_SIZE, "chan"); |
| 1236 | 1234 | |
| ... | ... | @@ -1293,6 +1291,8 @@ |
| 1293 | 1291 | if (xpc_sysctl) { |
| 1294 | 1292 | unregister_sysctl_table(xpc_sysctl); |
| 1295 | 1293 | } |
| 1294 | + | |
| 1295 | + kfree(xpc_remote_copy_buffer_base); | |
| 1296 | 1296 | return -EBUSY; |
| 1297 | 1297 | } |
| 1298 | 1298 | |
| ... | ... | @@ -1311,6 +1311,8 @@ |
| 1311 | 1311 | if (xpc_sysctl) { |
| 1312 | 1312 | unregister_sysctl_table(xpc_sysctl); |
| 1313 | 1313 | } |
| 1314 | + | |
| 1315 | + kfree(xpc_remote_copy_buffer_base); | |
| 1314 | 1316 | return -EBUSY; |
| 1315 | 1317 | } |
| 1316 | 1318 | |
| ... | ... | @@ -1362,6 +1364,8 @@ |
| 1362 | 1364 | if (xpc_sysctl) { |
| 1363 | 1365 | unregister_sysctl_table(xpc_sysctl); |
| 1364 | 1366 | } |
| 1367 | + | |
| 1368 | + kfree(xpc_remote_copy_buffer_base); | |
| 1365 | 1369 | return -EBUSY; |
| 1366 | 1370 | } |
| 1367 | 1371 |
arch/ia64/sn/kernel/xpc_partition.c
| ... | ... | @@ -71,19 +71,15 @@ |
| 71 | 71 | * Generic buffer used to store a local copy of portions of a remote |
| 72 | 72 | * partition's reserved page (either its header and part_nasids mask, |
| 73 | 73 | * or its vars). |
| 74 | - * | |
| 75 | - * xpc_discovery runs only once and is a seperate thread that is | |
| 76 | - * very likely going to be processing in parallel with receiving | |
| 77 | - * interrupts. | |
| 78 | 74 | */ |
| 79 | -char ____cacheline_aligned xpc_remote_copy_buffer[XPC_RP_HEADER_SIZE + | |
| 80 | - XP_NASID_MASK_BYTES]; | |
| 75 | +char *xpc_remote_copy_buffer; | |
| 76 | +void *xpc_remote_copy_buffer_base; | |
| 81 | 77 | |
| 82 | 78 | |
| 83 | 79 | /* |
| 84 | 80 | * Guarantee that the kmalloc'd memory is cacheline aligned. |
| 85 | 81 | */ |
| 86 | -static void * | |
| 82 | +void * | |
| 87 | 83 | xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base) |
| 88 | 84 | { |
| 89 | 85 | /* see if kmalloc will give us cachline aligned memory by default */ |
| ... | ... | @@ -148,7 +144,7 @@ |
| 148 | 144 | } |
| 149 | 145 | } |
| 150 | 146 | |
| 151 | - bte_res = xp_bte_copy(rp_pa, ia64_tpa(buf), buf_len, | |
| 147 | + bte_res = xp_bte_copy(rp_pa, buf, buf_len, | |
| 152 | 148 | (BTE_NOTIFY | BTE_WACQUIRE), NULL); |
| 153 | 149 | if (bte_res != BTE_SUCCESS) { |
| 154 | 150 | dev_dbg(xpc_part, "xp_bte_copy failed %i\n", bte_res); |
| ... | ... | @@ -447,7 +443,7 @@ |
| 447 | 443 | |
| 448 | 444 | /* pull the remote_hb cache line */ |
| 449 | 445 | bres = xp_bte_copy(part->remote_vars_pa, |
| 450 | - ia64_tpa((u64) remote_vars), | |
| 446 | + (u64) remote_vars, | |
| 451 | 447 | XPC_RP_VARS_SIZE, |
| 452 | 448 | (BTE_NOTIFY | BTE_WACQUIRE), NULL); |
| 453 | 449 | if (bres != BTE_SUCCESS) { |
| ... | ... | @@ -498,8 +494,7 @@ |
| 498 | 494 | |
| 499 | 495 | |
| 500 | 496 | /* pull over the reserved page header and part_nasids mask */ |
| 501 | - | |
| 502 | - bres = xp_bte_copy(*remote_rp_pa, ia64_tpa((u64) remote_rp), | |
| 497 | + bres = xp_bte_copy(*remote_rp_pa, (u64) remote_rp, | |
| 503 | 498 | XPC_RP_HEADER_SIZE + xp_nasid_mask_bytes, |
| 504 | 499 | (BTE_NOTIFY | BTE_WACQUIRE), NULL); |
| 505 | 500 | if (bres != BTE_SUCCESS) { |
| 506 | 501 | |
| ... | ... | @@ -554,11 +549,8 @@ |
| 554 | 549 | return xpcVarsNotSet; |
| 555 | 550 | } |
| 556 | 551 | |
| 557 | - | |
| 558 | 552 | /* pull over the cross partition variables */ |
| 559 | - | |
| 560 | - bres = xp_bte_copy(remote_vars_pa, ia64_tpa((u64) remote_vars), | |
| 561 | - XPC_RP_VARS_SIZE, | |
| 553 | + bres = xp_bte_copy(remote_vars_pa, (u64) remote_vars, XPC_RP_VARS_SIZE, | |
| 562 | 554 | (BTE_NOTIFY | BTE_WACQUIRE), NULL); |
| 563 | 555 | if (bres != BTE_SUCCESS) { |
| 564 | 556 | return xpc_map_bte_errors(bres); |
| ... | ... | @@ -1239,7 +1231,7 @@ |
| 1239 | 1231 | |
| 1240 | 1232 | part_nasid_pa = (u64) XPC_RP_PART_NASIDS(part->remote_rp_pa); |
| 1241 | 1233 | |
| 1242 | - bte_res = xp_bte_copy(part_nasid_pa, ia64_tpa((u64) nasid_mask), | |
| 1234 | + bte_res = xp_bte_copy(part_nasid_pa, (u64) nasid_mask, | |
| 1243 | 1235 | xp_nasid_mask_bytes, (BTE_NOTIFY | BTE_WACQUIRE), NULL); |
| 1244 | 1236 | |
| 1245 | 1237 | return xpc_map_bte_errors(bte_res); |
include/asm-ia64/sn/sn_sal.h
| ... | ... | @@ -706,12 +706,9 @@ |
| 706 | 706 | sn_change_memprotect(u64 paddr, u64 len, u64 perms, u64 *nasid_array) |
| 707 | 707 | { |
| 708 | 708 | struct ia64_sal_retval ret_stuff; |
| 709 | - unsigned long irq_flags; | |
| 710 | 709 | |
| 711 | - local_irq_save(irq_flags); | |
| 712 | 710 | ia64_sal_oemcall_nolock(&ret_stuff, SN_SAL_MEMPROTECT, paddr, len, |
| 713 | 711 | (u64)nasid_array, perms, 0, 0, 0); |
| 714 | - local_irq_restore(irq_flags); | |
| 715 | 712 | return ret_stuff.status; |
| 716 | 713 | } |
| 717 | 714 | #define SN_MEMPROT_ACCESS_CLASS_0 0x14a080 |
| 718 | 715 | |
| 719 | 716 | |
| ... | ... | @@ -1143,12 +1140,9 @@ |
| 1143 | 1140 | sn_inject_error(u64 paddr, u64 *data, u64 *ecc) |
| 1144 | 1141 | { |
| 1145 | 1142 | struct ia64_sal_retval ret_stuff; |
| 1146 | - unsigned long irq_flags; | |
| 1147 | 1143 | |
| 1148 | - local_irq_save(irq_flags); | |
| 1149 | 1144 | ia64_sal_oemcall_nolock(&ret_stuff, SN_SAL_INJECT_ERROR, paddr, (u64)data, |
| 1150 | 1145 | (u64)ecc, 0, 0, 0, 0); |
| 1151 | - local_irq_restore(irq_flags); | |
| 1152 | 1146 | return ret_stuff.status; |
| 1153 | 1147 | } |
| 1154 | 1148 |
include/asm-ia64/sn/xp.h
| ... | ... | @@ -60,23 +60,37 @@ |
| 60 | 60 | * the bte_copy() once in the hope that the failure was due to a temporary |
| 61 | 61 | * aberration (i.e., the link going down temporarily). |
| 62 | 62 | * |
| 63 | - * See bte_copy for definition of the input parameters. | |
| 63 | + * src - physical address of the source of the transfer. | |
| 64 | + * vdst - virtual address of the destination of the transfer. | |
| 65 | + * len - number of bytes to transfer from source to destination. | |
| 66 | + * mode - see bte_copy() for definition. | |
| 67 | + * notification - see bte_copy() for definition. | |
| 64 | 68 | * |
| 65 | 69 | * Note: xp_bte_copy() should never be called while holding a spinlock. |
| 66 | 70 | */ |
| 67 | 71 | static inline bte_result_t |
| 68 | -xp_bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification) | |
| 72 | +xp_bte_copy(u64 src, u64 vdst, u64 len, u64 mode, void *notification) | |
| 69 | 73 | { |
| 70 | 74 | bte_result_t ret; |
| 75 | + u64 pdst = ia64_tpa(vdst); | |
| 71 | 76 | |
| 72 | 77 | |
| 73 | - ret = bte_copy(src, dest, len, mode, notification); | |
| 78 | + /* | |
| 79 | + * Ensure that the physically mapped memory is contiguous. | |
| 80 | + * | |
| 81 | + * We do this by ensuring that the memory is from region 7 only. | |
| 82 | + * If the need should arise to use memory from one of the other | |
| 83 | + * regions, then modify the BUG_ON() statement to ensure that the | |
| 84 | + * memory from that region is always physically contiguous. | |
| 85 | + */ | |
| 86 | + BUG_ON(REGION_NUMBER(vdst) != RGN_KERNEL); | |
| 74 | 87 | |
| 88 | + ret = bte_copy(src, pdst, len, mode, notification); | |
| 75 | 89 | if (ret != BTE_SUCCESS) { |
| 76 | 90 | if (!in_interrupt()) { |
| 77 | 91 | cond_resched(); |
| 78 | 92 | } |
| 79 | - ret = bte_copy(src, dest, len, mode, notification); | |
| 93 | + ret = bte_copy(src, pdst, len, mode, notification); | |
| 80 | 94 | } |
| 81 | 95 | |
| 82 | 96 | return ret; |
include/asm-ia64/sn/xpc.h
| ... | ... | @@ -683,7 +683,9 @@ |
| 683 | 683 | extern struct xpc_rsvd_page *xpc_rsvd_page; |
| 684 | 684 | extern struct xpc_vars_part *xpc_vars_part; |
| 685 | 685 | extern struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1]; |
| 686 | -extern char xpc_remote_copy_buffer[]; | |
| 686 | +extern char *xpc_remote_copy_buffer; | |
| 687 | +extern void *xpc_remote_copy_buffer_base; | |
| 688 | +extern void *xpc_kmalloc_cacheline_aligned(size_t, gfp_t, void **); | |
| 687 | 689 | extern struct xpc_rsvd_page *xpc_rsvd_page_init(void); |
| 688 | 690 | extern void xpc_allow_IPI_ops(void); |
| 689 | 691 | extern void xpc_restrict_IPI_ops(void); |