Commit 3c43de0ffd58f1d2ada3e6804fe66fdf85ccb2e5

Authored by Linus Torvalds

Merge branch 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm

Pull ARM fixes from Russell King:
 - add the new bpf syscall to ARM.
 - drop a redundant return statement in __iommu_alloc_remap()
 - fix a performance issue noticed by Thomas Petazzoni with
   kmap_atomic().
 - fix an issue with the L2 cache OF parsing code which caused it to
   incorrectly print warnings on each boot, and make the warning text
   more consistent with the rest of the code

* 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm:
  ARM: 8180/1: mm: implement no-highmem fast path in kmap_atomic_pfn()
  ARM: 8183/1: l2c: Improve l2c310_of_parse() error message
  ARM: 8181/1: Drop extra return statement
  ARM: 8182/1: l2c: Make l2x0_cache_size_of_parse() return 'int'
  ARM: enable bpf syscall

Showing 5 changed files Side-by-side Diff

arch/arm/include/uapi/asm/unistd.h
... ... @@ -412,6 +412,7 @@
412 412 #define __NR_seccomp (__NR_SYSCALL_BASE+383)
413 413 #define __NR_getrandom (__NR_SYSCALL_BASE+384)
414 414 #define __NR_memfd_create (__NR_SYSCALL_BASE+385)
  415 +#define __NR_bpf (__NR_SYSCALL_BASE+386)
415 416  
416 417 /*
417 418 * The following SWIs are ARM private.
arch/arm/kernel/calls.S
... ... @@ -395,6 +395,7 @@
395 395 CALL(sys_seccomp)
396 396 CALL(sys_getrandom)
397 397 /* 385 */ CALL(sys_memfd_create)
  398 + CALL(sys_bpf)
398 399 #ifndef syscalls_counted
399 400 .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
400 401 #define syscalls_counted
arch/arm/mm/cache-l2x0.c
... ... @@ -956,7 +956,7 @@
956 956 * @associativity: variable to return the calculated associativity in
957 957 * @max_way_size: the maximum size in bytes for the cache ways
958 958 */
959   -static void __init l2x0_cache_size_of_parse(const struct device_node *np,
  959 +static int __init l2x0_cache_size_of_parse(const struct device_node *np,
960 960 u32 *aux_val, u32 *aux_mask,
961 961 u32 *associativity,
962 962 u32 max_way_size)
... ... @@ -974,7 +974,7 @@
974 974 of_property_read_u32(np, "cache-line-size", &line_size);
975 975  
976 976 if (!cache_size || !sets)
977   - return;
  977 + return -ENODEV;
978 978  
979 979 /* All these l2 caches have the same line = block size actually */
980 980 if (!line_size) {
... ... @@ -1009,7 +1009,7 @@
1009 1009  
1010 1010 if (way_size > max_way_size) {
1011 1011 pr_err("L2C OF: set size %dKB is too large\n", way_size);
1012   - return;
  1012 + return -EINVAL;
1013 1013 }
1014 1014  
1015 1015 pr_info("L2C OF: override cache size: %d bytes (%dKB)\n",
... ... @@ -1027,7 +1027,7 @@
1027 1027 if (way_size_bits < 1 || way_size_bits > 6) {
1028 1028 pr_err("L2C OF: cache way size illegal: %dKB is not mapped\n",
1029 1029 way_size);
1030   - return;
  1030 + return -EINVAL;
1031 1031 }
1032 1032  
1033 1033 mask |= L2C_AUX_CTRL_WAY_SIZE_MASK;
... ... @@ -1036,6 +1036,8 @@
1036 1036 *aux_val &= ~mask;
1037 1037 *aux_val |= val;
1038 1038 *aux_mask &= ~mask;
  1039 +
  1040 + return 0;
1039 1041 }
1040 1042  
1041 1043 static void __init l2x0_of_parse(const struct device_node *np,
... ... @@ -1046,6 +1048,7 @@
1046 1048 u32 dirty = 0;
1047 1049 u32 val = 0, mask = 0;
1048 1050 u32 assoc;
  1051 + int ret;
1049 1052  
1050 1053 of_property_read_u32(np, "arm,tag-latency", &tag);
1051 1054 if (tag) {
... ... @@ -1068,7 +1071,10 @@
1068 1071 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
1069 1072 }
1070 1073  
1071   - l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_256K);
  1074 + ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_256K);
  1075 + if (ret)
  1076 + return;
  1077 +
1072 1078 if (assoc > 8) {
1073 1079 pr_err("l2x0 of: cache setting yield too high associativity\n");
1074 1080 pr_err("l2x0 of: %d calculated, max 8\n", assoc);
... ... @@ -1125,6 +1131,7 @@
1125 1131 u32 tag[3] = { 0, 0, 0 };
1126 1132 u32 filter[2] = { 0, 0 };
1127 1133 u32 assoc;
  1134 + int ret;
1128 1135  
1129 1136 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
1130 1137 if (tag[0] && tag[1] && tag[2])
... ... @@ -1152,7 +1159,10 @@
1152 1159 l2x0_base + L310_ADDR_FILTER_START);
1153 1160 }
1154 1161  
1155   - l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K);
  1162 + ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K);
  1163 + if (ret)
  1164 + return;
  1165 +
1156 1166 switch (assoc) {
1157 1167 case 16:
1158 1168 *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK;
... ... @@ -1164,8 +1174,8 @@
1164 1174 *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK;
1165 1175 break;
1166 1176 default:
1167   - pr_err("PL310 OF: cache setting yield illegal associativity\n");
1168   - pr_err("PL310 OF: %d calculated, only 8 and 16 legal\n", assoc);
  1177 + pr_err("L2C-310 OF cache associativity %d invalid, only 8 or 16 permitted\n",
  1178 + assoc);
1169 1179 break;
1170 1180 }
1171 1181 }
arch/arm/mm/dma-mapping.c
... ... @@ -1198,7 +1198,6 @@
1198 1198 {
1199 1199 return dma_common_pages_remap(pages, size,
1200 1200 VM_ARM_DMA_CONSISTENT | VM_USERMAP, prot, caller);
1201   - return NULL;
1202 1201 }
1203 1202  
1204 1203 /*
arch/arm/mm/highmem.c
... ... @@ -127,8 +127,11 @@
127 127 {
128 128 unsigned long vaddr;
129 129 int idx, type;
  130 + struct page *page = pfn_to_page(pfn);
130 131  
131 132 pagefault_disable();
  133 + if (!PageHighMem(page))
  134 + return page_address(page);
132 135  
133 136 type = kmap_atomic_idx_push();
134 137 idx = type + KM_TYPE_NR * smp_processor_id();