Commit 1ae1c1d09f220ded48ee9a7d91a65e94f95c4af1
Committed by
Linus Torvalds
1 parent
274023da1e
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
thp, s390: architecture backend for thp on s390
This implements the architecture backend for transparent hugepages on s390. Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Hugh Dickins <hughd@google.com> Cc: Hillf Danton <dhillf@gmail.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 6 changed files with 220 additions and 18 deletions Side-by-side Diff
arch/s390/include/asm/hugetlb.h
... | ... | @@ -78,25 +78,8 @@ |
78 | 78 | " csp %1,%3" |
79 | 79 | : "=m" (*pmdp) |
80 | 80 | : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc"); |
81 | - pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY; | |
82 | 81 | } |
83 | 82 | |
84 | -static inline void __pmd_idte(unsigned long address, pmd_t *pmdp) | |
85 | -{ | |
86 | - unsigned long sto = (unsigned long) pmdp - | |
87 | - pmd_index(address) * sizeof(pmd_t); | |
88 | - | |
89 | - if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INV)) { | |
90 | - asm volatile( | |
91 | - " .insn rrf,0xb98e0000,%2,%3,0,0" | |
92 | - : "=m" (*pmdp) | |
93 | - : "m" (*pmdp), "a" (sto), | |
94 | - "a" ((address & HPAGE_MASK)) | |
95 | - ); | |
96 | - } | |
97 | - pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY; | |
98 | -} | |
99 | - | |
100 | 83 | static inline void huge_ptep_invalidate(struct mm_struct *mm, |
101 | 84 | unsigned long address, pte_t *ptep) |
102 | 85 | { |
... | ... | @@ -106,6 +89,7 @@ |
106 | 89 | __pmd_idte(address, pmdp); |
107 | 90 | else |
108 | 91 | __pmd_csp(pmdp); |
92 | + pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY; | |
109 | 93 | } |
110 | 94 | |
111 | 95 | static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, |
arch/s390/include/asm/pgtable.h
... | ... | @@ -350,6 +350,10 @@ |
350 | 350 | #define _SEGMENT_ENTRY_SPLIT_BIT 0 /* THP splitting bit number */ |
351 | 351 | #define _SEGMENT_ENTRY_SPLIT (1UL << _SEGMENT_ENTRY_SPLIT_BIT) |
352 | 352 | |
353 | +/* Set of bits not changed in pmd_modify */ | |
354 | +#define _SEGMENT_CHG_MASK (_SEGMENT_ENTRY_ORIGIN | _SEGMENT_ENTRY_LARGE \ | |
355 | + | _SEGMENT_ENTRY_SPLIT | _SEGMENT_ENTRY_CO) | |
356 | + | |
353 | 357 | /* Page status table bits for virtualization */ |
354 | 358 | #define RCP_ACC_BITS 0xf000000000000000UL |
355 | 359 | #define RCP_FP_BIT 0x0800000000000000UL |
... | ... | @@ -512,6 +516,26 @@ |
512 | 516 | extern void pmdp_splitting_flush(struct vm_area_struct *vma, |
513 | 517 | unsigned long addr, pmd_t *pmdp); |
514 | 518 | |
519 | +#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS | |
520 | +extern int pmdp_set_access_flags(struct vm_area_struct *vma, | |
521 | + unsigned long address, pmd_t *pmdp, | |
522 | + pmd_t entry, int dirty); | |
523 | + | |
524 | +#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH | |
525 | +extern int pmdp_clear_flush_young(struct vm_area_struct *vma, | |
526 | + unsigned long address, pmd_t *pmdp); | |
527 | + | |
528 | +#define __HAVE_ARCH_PMD_WRITE | |
529 | +static inline int pmd_write(pmd_t pmd) | |
530 | +{ | |
531 | + return (pmd_val(pmd) & _SEGMENT_ENTRY_RO) == 0; | |
532 | +} | |
533 | + | |
534 | +static inline int pmd_young(pmd_t pmd) | |
535 | +{ | |
536 | + return 0; | |
537 | +} | |
538 | + | |
515 | 539 | static inline int pte_none(pte_t pte) |
516 | 540 | { |
517 | 541 | return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT); |
... | ... | @@ -1165,6 +1189,22 @@ |
1165 | 1189 | #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) |
1166 | 1190 | #define pte_unmap(pte) do { } while (0) |
1167 | 1191 | |
1192 | +static inline void __pmd_idte(unsigned long address, pmd_t *pmdp) | |
1193 | +{ | |
1194 | + unsigned long sto = (unsigned long) pmdp - | |
1195 | + pmd_index(address) * sizeof(pmd_t); | |
1196 | + | |
1197 | + if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INV)) { | |
1198 | + asm volatile( | |
1199 | + " .insn rrf,0xb98e0000,%2,%3,0,0" | |
1200 | + : "=m" (*pmdp) | |
1201 | + : "m" (*pmdp), "a" (sto), | |
1202 | + "a" ((address & HPAGE_MASK)) | |
1203 | + : "cc" | |
1204 | + ); | |
1205 | + } | |
1206 | +} | |
1207 | + | |
1168 | 1208 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
1169 | 1209 | #define __HAVE_ARCH_PGTABLE_DEPOSIT |
1170 | 1210 | extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable); |
... | ... | @@ -1175,6 +1215,156 @@ |
1175 | 1215 | static inline int pmd_trans_splitting(pmd_t pmd) |
1176 | 1216 | { |
1177 | 1217 | return pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT; |
1218 | +} | |
1219 | + | |
1220 | +static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, | |
1221 | + pmd_t *pmdp, pmd_t entry) | |
1222 | +{ | |
1223 | + *pmdp = entry; | |
1224 | +} | |
1225 | + | |
1226 | +static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot) | |
1227 | +{ | |
1228 | + unsigned long pgprot_pmd = 0; | |
1229 | + | |
1230 | + if (pgprot_val(pgprot) & _PAGE_INVALID) { | |
1231 | + if (pgprot_val(pgprot) & _PAGE_SWT) | |
1232 | + pgprot_pmd |= _HPAGE_TYPE_NONE; | |
1233 | + pgprot_pmd |= _SEGMENT_ENTRY_INV; | |
1234 | + } | |
1235 | + if (pgprot_val(pgprot) & _PAGE_RO) | |
1236 | + pgprot_pmd |= _SEGMENT_ENTRY_RO; | |
1237 | + return pgprot_pmd; | |
1238 | +} | |
1239 | + | |
1240 | +static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) | |
1241 | +{ | |
1242 | + pmd_val(pmd) &= _SEGMENT_CHG_MASK; | |
1243 | + pmd_val(pmd) |= massage_pgprot_pmd(newprot); | |
1244 | + return pmd; | |
1245 | +} | |
1246 | + | |
1247 | +static inline pmd_t pmd_mkhuge(pmd_t pmd) | |
1248 | +{ | |
1249 | + pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE; | |
1250 | + return pmd; | |
1251 | +} | |
1252 | + | |
1253 | +static inline pmd_t pmd_mkwrite(pmd_t pmd) | |
1254 | +{ | |
1255 | + pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO; | |
1256 | + return pmd; | |
1257 | +} | |
1258 | + | |
1259 | +static inline pmd_t pmd_wrprotect(pmd_t pmd) | |
1260 | +{ | |
1261 | + pmd_val(pmd) |= _SEGMENT_ENTRY_RO; | |
1262 | + return pmd; | |
1263 | +} | |
1264 | + | |
1265 | +static inline pmd_t pmd_mkdirty(pmd_t pmd) | |
1266 | +{ | |
1267 | + /* No dirty bit in the segment table entry. */ | |
1268 | + return pmd; | |
1269 | +} | |
1270 | + | |
1271 | +static inline pmd_t pmd_mkold(pmd_t pmd) | |
1272 | +{ | |
1273 | + /* No referenced bit in the segment table entry. */ | |
1274 | + return pmd; | |
1275 | +} | |
1276 | + | |
1277 | +static inline pmd_t pmd_mkyoung(pmd_t pmd) | |
1278 | +{ | |
1279 | + /* No referenced bit in the segment table entry. */ | |
1280 | + return pmd; | |
1281 | +} | |
1282 | + | |
1283 | +#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG | |
1284 | +static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, | |
1285 | + unsigned long address, pmd_t *pmdp) | |
1286 | +{ | |
1287 | + unsigned long pmd_addr = pmd_val(*pmdp) & HPAGE_MASK; | |
1288 | + long tmp, rc; | |
1289 | + int counter; | |
1290 | + | |
1291 | + rc = 0; | |
1292 | + if (MACHINE_HAS_RRBM) { | |
1293 | + counter = PTRS_PER_PTE >> 6; | |
1294 | + asm volatile( | |
1295 | + "0: .insn rre,0xb9ae0000,%0,%3\n" /* rrbm */ | |
1296 | + " ogr %1,%0\n" | |
1297 | + " la %3,0(%4,%3)\n" | |
1298 | + " brct %2,0b\n" | |
1299 | + : "=&d" (tmp), "+&d" (rc), "+d" (counter), | |
1300 | + "+a" (pmd_addr) | |
1301 | + : "a" (64 * 4096UL) : "cc"); | |
1302 | + rc = !!rc; | |
1303 | + } else { | |
1304 | + counter = PTRS_PER_PTE; | |
1305 | + asm volatile( | |
1306 | + "0: rrbe 0,%2\n" | |
1307 | + " la %2,0(%3,%2)\n" | |
1308 | + " brc 12,1f\n" | |
1309 | + " lhi %0,1\n" | |
1310 | + "1: brct %1,0b\n" | |
1311 | + : "+d" (rc), "+d" (counter), "+a" (pmd_addr) | |
1312 | + : "a" (4096UL) : "cc"); | |
1313 | + } | |
1314 | + return rc; | |
1315 | +} | |
1316 | + | |
1317 | +#define __HAVE_ARCH_PMDP_GET_AND_CLEAR | |
1318 | +static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, | |
1319 | + unsigned long address, pmd_t *pmdp) | |
1320 | +{ | |
1321 | + pmd_t pmd = *pmdp; | |
1322 | + | |
1323 | + __pmd_idte(address, pmdp); | |
1324 | + pmd_clear(pmdp); | |
1325 | + return pmd; | |
1326 | +} | |
1327 | + | |
1328 | +#define __HAVE_ARCH_PMDP_CLEAR_FLUSH | |
1329 | +static inline pmd_t pmdp_clear_flush(struct vm_area_struct *vma, | |
1330 | + unsigned long address, pmd_t *pmdp) | |
1331 | +{ | |
1332 | + return pmdp_get_and_clear(vma->vm_mm, address, pmdp); | |
1333 | +} | |
1334 | + | |
1335 | +#define __HAVE_ARCH_PMDP_INVALIDATE | |
1336 | +static inline void pmdp_invalidate(struct vm_area_struct *vma, | |
1337 | + unsigned long address, pmd_t *pmdp) | |
1338 | +{ | |
1339 | + __pmd_idte(address, pmdp); | |
1340 | +} | |
1341 | + | |
1342 | +static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot) | |
1343 | +{ | |
1344 | + pmd_t __pmd; | |
1345 | + pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot); | |
1346 | + return __pmd; | |
1347 | +} | |
1348 | + | |
1349 | +#define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot)) | |
1350 | +#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) | |
1351 | + | |
1352 | +static inline int pmd_trans_huge(pmd_t pmd) | |
1353 | +{ | |
1354 | + return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE; | |
1355 | +} | |
1356 | + | |
1357 | +static inline int has_transparent_hugepage(void) | |
1358 | +{ | |
1359 | + return MACHINE_HAS_HPAGE ? 1 : 0; | |
1360 | +} | |
1361 | + | |
1362 | +static inline unsigned long pmd_pfn(pmd_t pmd) | |
1363 | +{ | |
1364 | + if (pmd_trans_huge(pmd)) | |
1365 | + return pmd_val(pmd) >> HPAGE_SHIFT; | |
1366 | + else | |
1367 | + return pmd_val(pmd) >> PAGE_SHIFT; | |
1178 | 1368 | } |
1179 | 1369 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
1180 | 1370 |
arch/s390/include/asm/setup.h
... | ... | @@ -81,6 +81,7 @@ |
81 | 81 | #define MACHINE_FLAG_SPP (1UL << 13) |
82 | 82 | #define MACHINE_FLAG_TOPOLOGY (1UL << 14) |
83 | 83 | #define MACHINE_FLAG_TE (1UL << 15) |
84 | +#define MACHINE_FLAG_RRBM (1UL << 16) | |
84 | 85 | |
85 | 86 | #define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM) |
86 | 87 | #define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM) |
... | ... | @@ -99,7 +100,8 @@ |
99 | 100 | #define MACHINE_HAS_PFMF (0) |
100 | 101 | #define MACHINE_HAS_SPP (0) |
101 | 102 | #define MACHINE_HAS_TOPOLOGY (0) |
102 | -#define MACHINE_HAS_TE (0) | |
103 | +#define MACHINE_HAS_TE (0) | |
104 | +#define MACHINE_HAS_RRBM (0) | |
103 | 105 | #else /* CONFIG_64BIT */ |
104 | 106 | #define MACHINE_HAS_IEEE (1) |
105 | 107 | #define MACHINE_HAS_CSP (1) |
... | ... | @@ -112,6 +114,7 @@ |
112 | 114 | #define MACHINE_HAS_SPP (S390_lowcore.machine_flags & MACHINE_FLAG_SPP) |
113 | 115 | #define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY) |
114 | 116 | #define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE) |
117 | +#define MACHINE_HAS_RRBM (S390_lowcore.machine_flags & MACHINE_FLAG_RRBM) | |
115 | 118 | #endif /* CONFIG_64BIT */ |
116 | 119 | |
117 | 120 | #define ZFCPDUMP_HSA_SIZE (32UL<<20) |
arch/s390/include/asm/tlb.h
... | ... | @@ -137,6 +137,7 @@ |
137 | 137 | #define tlb_start_vma(tlb, vma) do { } while (0) |
138 | 138 | #define tlb_end_vma(tlb, vma) do { } while (0) |
139 | 139 | #define tlb_remove_tlb_entry(tlb, ptep, addr) do { } while (0) |
140 | +#define tlb_remove_pmd_tlb_entry(tlb, pmdp, addr) do { } while (0) | |
140 | 141 | #define tlb_migrate_finish(mm) do { } while (0) |
141 | 142 | |
142 | 143 | #endif /* _S390_TLB_H */ |
arch/s390/kernel/early.c
... | ... | @@ -388,6 +388,8 @@ |
388 | 388 | S390_lowcore.machine_flags |= MACHINE_FLAG_SPP; |
389 | 389 | if (test_facility(50) && test_facility(73)) |
390 | 390 | S390_lowcore.machine_flags |= MACHINE_FLAG_TE; |
391 | + if (test_facility(66)) | |
392 | + S390_lowcore.machine_flags |= MACHINE_FLAG_RRBM; | |
391 | 393 | #endif |
392 | 394 | } |
393 | 395 |
arch/s390/mm/pgtable.c
... | ... | @@ -898,6 +898,28 @@ |
898 | 898 | #endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */ |
899 | 899 | |
900 | 900 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
901 | +int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address, | |
902 | + pmd_t *pmdp) | |
903 | +{ | |
904 | + VM_BUG_ON(address & ~HPAGE_PMD_MASK); | |
905 | + /* No need to flush TLB | |
906 | + * On s390 reference bits are in storage key and never in TLB */ | |
907 | + return pmdp_test_and_clear_young(vma, address, pmdp); | |
908 | +} | |
909 | + | |
910 | +int pmdp_set_access_flags(struct vm_area_struct *vma, | |
911 | + unsigned long address, pmd_t *pmdp, | |
912 | + pmd_t entry, int dirty) | |
913 | +{ | |
914 | + VM_BUG_ON(address & ~HPAGE_PMD_MASK); | |
915 | + | |
916 | + if (pmd_same(*pmdp, entry)) | |
917 | + return 0; | |
918 | + pmdp_invalidate(vma, address, pmdp); | |
919 | + set_pmd_at(vma->vm_mm, address, pmdp, entry); | |
920 | + return 1; | |
921 | +} | |
922 | + | |
901 | 923 | static void pmdp_splitting_flush_sync(void *arg) |
902 | 924 | { |
903 | 925 | /* Simply deliver the interrupt */ |