Commit 19be9e8aa7820e4a8266d779476e057af7b798be

Authored by Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mpe/linux

Pull powerpc updates from Michael Ellerman:
 "There's some bug fixes or cleanups to facilitate fixes, a MAINTAINERS
  update, and a new syscall (bpf)"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mpe/linux:
  powerpc/numa: ensure per-cpu NUMA mappings are correct on topology update
  powerpc/numa: use cached value of update->cpu in update_cpu_topology
  cxl: Fix PSL error due to duplicate segment table entries
  powerpc/mm: Use appropriate ESID mask in copro_calculate_slb()
  cxl: Refactor cxl_load_segment() and find_free_sste()
  cxl: Disable secondary hash in segment table
  Revert "powerpc/powernv: Fix endian bug in LPC bus debugfs accessors"
  powernv: Use _GLOBAL_TOC for opal wrappers
  powerpc: Wire up sys_bpf() syscall
  MAINTAINERS: nx-842 driver maintainer change
  powerpc/mm: Remove redundant #if case
  powerpc/mm: Fix build error with hugetlfs disabled

Showing 12 changed files Side-by-side Diff

... ... @@ -4608,7 +4608,7 @@
4608 4608 F: drivers/crypto/nx/
4609 4609  
4610 4610 IBM Power 842 compression accelerator
4611   -M: Nathan Fontenot <nfont@linux.vnet.ibm.com>
  4611 +M: Dan Streetman <ddstreet@us.ibm.com>
4612 4612 S: Supported
4613 4613 F: drivers/crypto/nx/nx-842.c
4614 4614 F: include/linux/nx842.h
arch/powerpc/include/asm/hugetlb.h
... ... @@ -71,7 +71,7 @@
71 71  
72 72 void flush_dcache_icache_hugepage(struct page *page);
73 73  
74   -#if defined(CONFIG_PPC_MM_SLICES) || defined(CONFIG_PPC_SUBPAGE_PROT)
  74 +#if defined(CONFIG_PPC_MM_SLICES)
75 75 int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
76 76 unsigned long len);
77 77 #else
arch/powerpc/include/asm/systbl.h
... ... @@ -365,4 +365,5 @@
365 365 SYSCALL_SPU(seccomp)
366 366 SYSCALL_SPU(getrandom)
367 367 SYSCALL_SPU(memfd_create)
  368 +SYSCALL_SPU(bpf)
arch/powerpc/include/asm/unistd.h
... ... @@ -12,7 +12,7 @@
12 12 #include <uapi/asm/unistd.h>
13 13  
14 14  
15   -#define __NR_syscalls 361
  15 +#define __NR_syscalls 362
16 16  
17 17 #define __NR__exit __NR_exit
18 18 #define NR_syscalls __NR_syscalls
arch/powerpc/include/uapi/asm/unistd.h
... ... @@ -383,6 +383,7 @@
383 383 #define __NR_seccomp 358
384 384 #define __NR_getrandom 359
385 385 #define __NR_memfd_create 360
  386 +#define __NR_bpf 361
386 387  
387 388 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
arch/powerpc/mm/copro_fault.c
... ... @@ -99,8 +99,6 @@
99 99 u64 vsid;
100 100 int psize, ssize;
101 101  
102   - slb->esid = (ea & ESID_MASK) | SLB_ESID_V;
103   -
104 102 switch (REGION_ID(ea)) {
105 103 case USER_REGION_ID:
106 104 pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea);
... ... @@ -133,6 +131,7 @@
133 131 vsid |= mmu_psize_defs[psize].sllp |
134 132 ((ssize == MMU_SEGSIZE_1T) ? SLB_VSID_B_1T : 0);
135 133  
  134 + slb->esid = (ea & (ssize == MMU_SEGSIZE_1T ? ESID_MASK_1T : ESID_MASK)) | SLB_ESID_V;
136 135 slb->vsid = vsid;
137 136  
138 137 return 0;
arch/powerpc/mm/numa.c
... ... @@ -1509,11 +1509,14 @@
1509 1509 cpu = smp_processor_id();
1510 1510  
1511 1511 for (update = data; update; update = update->next) {
  1512 + int new_nid = update->new_nid;
1512 1513 if (cpu != update->cpu)
1513 1514 continue;
1514 1515  
1515   - unmap_cpu_from_node(update->cpu);
1516   - map_cpu_to_node(update->cpu, update->new_nid);
  1516 + unmap_cpu_from_node(cpu);
  1517 + map_cpu_to_node(cpu, new_nid);
  1518 + set_cpu_numa_node(cpu, new_nid);
  1519 + set_cpu_numa_mem(cpu, local_memory_node(new_nid));
1517 1520 vdso_getcpu_init();
1518 1521 }
1519 1522  
arch/powerpc/mm/slice.c
... ... @@ -682,6 +682,7 @@
682 682 slice_convert(mm, mask, psize);
683 683 }
684 684  
  685 +#ifdef CONFIG_HUGETLB_PAGE
685 686 /*
686 687 * is_hugepage_only_range() is used by generic code to verify whether
687 688 * a normal mmap mapping (non hugetlbfs) is valid on a given area.
... ... @@ -726,4 +727,5 @@
726 727 #endif
727 728 return !slice_check_fit(mask, available);
728 729 }
  730 +#endif
arch/powerpc/platforms/powernv/opal-lpc.c
... ... @@ -191,7 +191,6 @@
191 191 {
192 192 struct lpc_debugfs_entry *lpc = filp->private_data;
193 193 u32 data, pos, len, todo;
194   - __be32 bedata;
195 194 int rc;
196 195  
197 196 if (!access_ok(VERIFY_WRITE, ubuf, count))
198 197  
... ... @@ -214,10 +213,9 @@
214 213 len = 2;
215 214 }
216 215 rc = opal_lpc_read(opal_lpc_chip_id, lpc->lpc_type, pos,
217   - &bedata, len);
  216 + &data, len);
218 217 if (rc)
219 218 return -ENXIO;
220   - data = be32_to_cpu(bedata);
221 219 switch(len) {
222 220 case 4:
223 221 rc = __put_user((u32)data, (u32 __user *)ubuf);
arch/powerpc/platforms/powernv/opal-wrappers.S
... ... @@ -58,7 +58,7 @@
58 58 */
59 59  
60 60 #define OPAL_CALL(name, token) \
61   - _GLOBAL(name); \
  61 + _GLOBAL_TOC(name); \
62 62 mflr r0; \
63 63 std r0,16(r1); \
64 64 li r0,token; \
drivers/misc/cxl/fault.c
... ... @@ -21,60 +21,64 @@
21 21  
22 22 #include "cxl.h"
23 23  
24   -static struct cxl_sste* find_free_sste(struct cxl_sste *primary_group,
25   - bool sec_hash,
26   - struct cxl_sste *secondary_group,
27   - unsigned int *lru)
  24 +static bool sste_matches(struct cxl_sste *sste, struct copro_slb *slb)
28 25 {
29   - unsigned int i, entry;
30   - struct cxl_sste *sste, *group = primary_group;
  26 + return ((sste->vsid_data == cpu_to_be64(slb->vsid)) &&
  27 + (sste->esid_data == cpu_to_be64(slb->esid)));
  28 +}
31 29  
32   - for (i = 0; i < 2; i++) {
33   - for (entry = 0; entry < 8; entry++) {
34   - sste = group + entry;
35   - if (!(be64_to_cpu(sste->esid_data) & SLB_ESID_V))
36   - return sste;
37   - }
38   - if (!sec_hash)
39   - break;
40   - group = secondary_group;
  30 +/*
  31 + * This finds a free SSTE for the given SLB, or returns NULL if it's already in
  32 + * the segment table.
  33 + */
  34 +static struct cxl_sste* find_free_sste(struct cxl_context *ctx,
  35 + struct copro_slb *slb)
  36 +{
  37 + struct cxl_sste *primary, *sste, *ret = NULL;
  38 + unsigned int mask = (ctx->sst_size >> 7) - 1; /* SSTP0[SegTableSize] */
  39 + unsigned int entry;
  40 + unsigned int hash;
  41 +
  42 + if (slb->vsid & SLB_VSID_B_1T)
  43 + hash = (slb->esid >> SID_SHIFT_1T) & mask;
  44 + else /* 256M */
  45 + hash = (slb->esid >> SID_SHIFT) & mask;
  46 +
  47 + primary = ctx->sstp + (hash << 3);
  48 +
  49 + for (entry = 0, sste = primary; entry < 8; entry++, sste++) {
  50 + if (!ret && !(be64_to_cpu(sste->esid_data) & SLB_ESID_V))
  51 + ret = sste;
  52 + if (sste_matches(sste, slb))
  53 + return NULL;
41 54 }
  55 + if (ret)
  56 + return ret;
  57 +
42 58 /* Nothing free, select an entry to cast out */
43   - if (sec_hash && (*lru & 0x8))
44   - sste = secondary_group + (*lru & 0x7);
45   - else
46   - sste = primary_group + (*lru & 0x7);
47   - *lru = (*lru + 1) & 0xf;
  59 + ret = primary + ctx->sst_lru;
  60 + ctx->sst_lru = (ctx->sst_lru + 1) & 0x7;
48 61  
49   - return sste;
  62 + return ret;
50 63 }
51 64  
52 65 static void cxl_load_segment(struct cxl_context *ctx, struct copro_slb *slb)
53 66 {
54 67 /* mask is the group index, we search primary and secondary here. */
55   - unsigned int mask = (ctx->sst_size >> 7)-1; /* SSTP0[SegTableSize] */
56   - bool sec_hash = 1;
57 68 struct cxl_sste *sste;
58   - unsigned int hash;
59 69 unsigned long flags;
60 70  
61   -
62   - sec_hash = !!(cxl_p1n_read(ctx->afu, CXL_PSL_SR_An) & CXL_PSL_SR_An_SC);
63   -
64   - if (slb->vsid & SLB_VSID_B_1T)
65   - hash = (slb->esid >> SID_SHIFT_1T) & mask;
66   - else /* 256M */
67   - hash = (slb->esid >> SID_SHIFT) & mask;
68   -
69 71 spin_lock_irqsave(&ctx->sste_lock, flags);
70   - sste = find_free_sste(ctx->sstp + (hash << 3), sec_hash,
71   - ctx->sstp + ((~hash & mask) << 3), &ctx->sst_lru);
  72 + sste = find_free_sste(ctx, slb);
  73 + if (!sste)
  74 + goto out_unlock;
72 75  
73 76 pr_devel("CXL Populating SST[%li]: %#llx %#llx\n",
74 77 sste - ctx->sstp, slb->vsid, slb->esid);
75 78  
76 79 sste->vsid_data = cpu_to_be64(slb->vsid);
77 80 sste->esid_data = cpu_to_be64(slb->esid);
  81 +out_unlock:
78 82 spin_unlock_irqrestore(&ctx->sste_lock, flags);
79 83 }
80 84  
drivers/misc/cxl/native.c
... ... @@ -417,7 +417,7 @@
417 417 ctx->elem->haurp = 0; /* disable */
418 418 ctx->elem->sdr = cpu_to_be64(mfspr(SPRN_SDR1));
419 419  
420   - sr = CXL_PSL_SR_An_SC;
  420 + sr = 0;
421 421 if (ctx->master)
422 422 sr |= CXL_PSL_SR_An_MP;
423 423 if (mfspr(SPRN_LPCR) & LPCR_TC)
... ... @@ -508,7 +508,7 @@
508 508 u64 sr;
509 509 int rc;
510 510  
511   - sr = CXL_PSL_SR_An_SC;
  511 + sr = 0;
512 512 set_endian(sr);
513 513 if (ctx->master)
514 514 sr |= CXL_PSL_SR_An_MP;