Commit a5d6e63323fe7799eb0e6fd0a41fbfad10fca258

Authored by Linus Torvalds

Merge branch 'akpm' (fixes from Andrew)

Merge patches from Andrew Morton:
 "13 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  mm: place page->pmd_huge_pte to right union
  MAINTAINERS: add keyboard driver to Hyper-V file list
  x86, mm: do not leak page->ptl for pmd page tables
  ipc,shm: correct error return value in shmctl (SHM_UNLOCK)
  mm, mempolicy: silence gcc warning
  block/partitions/efi.c: fix bound check
  ARM: drivers/rtc/rtc-at91rm9200.c: disable interrupts at shutdown
  mm: hugetlbfs: fix hugetlbfs optimization
  kernel: remove CONFIG_USE_GENERIC_SMP_HELPERS cleanly
  ipc,shm: fix shm_file deletion races
  mm: thp: give transparent hugepage code a separate copy_page
  checkpatch: fix "Use of uninitialized value" warnings
  configfs: fix race between dentry put and lookup

Showing 16 changed files Side-by-side Diff

Documentation/vm/split_page_table_lock
... ... @@ -63,9 +63,9 @@
63 63 PMD split lock enabling requires pgtable_pmd_page_ctor() call on PMD table
64 64 allocation and pgtable_pmd_page_dtor() on freeing.
65 65  
66   -Allocation usually happens in pmd_alloc_one(), freeing in pmd_free(), but
67   -make sure you cover all PMD table allocation / freeing paths: i.e X86_PAE
68   -preallocate few PMDs on pgd_alloc().
  66 +Allocation usually happens in pmd_alloc_one(), freeing in pmd_free() and
  67 +pmd_free_tlb(), but make sure you cover all PMD table allocation / freeing
  68 +paths: i.e X86_PAE preallocate few PMDs on pgd_alloc().
69 69  
70 70 With everything in place you can set CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK.
71 71  
... ... @@ -4065,6 +4065,7 @@
4065 4065 F: arch/x86/kernel/cpu/mshyperv.c
4066 4066 F: drivers/hid/hid-hyperv.c
4067 4067 F: drivers/hv/
  4068 +F: drivers/input/serio/hyperv-keyboard.c
4068 4069 F: drivers/net/hyperv/
4069 4070 F: drivers/scsi/storvsc_drv.c
4070 4071 F: drivers/video/hyperv_fb.c
arch/x86/mm/pgtable.c
... ... @@ -61,6 +61,7 @@
61 61 #if PAGETABLE_LEVELS > 2
62 62 void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
63 63 {
  64 + struct page *page = virt_to_page(pmd);
64 65 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
65 66 /*
66 67 * NOTE! For PAE, any changes to the top page-directory-pointer-table
... ... @@ -69,7 +70,8 @@
69 70 #ifdef CONFIG_X86_PAE
70 71 tlb->need_flush_all = 1;
71 72 #endif
72   - tlb_remove_page(tlb, virt_to_page(pmd));
  73 + pgtable_pmd_page_dtor(page);
  74 + tlb_remove_page(tlb, page);
73 75 }
74 76  
75 77 #if PAGETABLE_LEVELS > 3
block/partitions/efi.c
... ... @@ -96,6 +96,7 @@
96 96 * - Code works, detects all the partitions.
97 97 *
98 98 ************************************************************/
  99 +#include <linux/kernel.h>
99 100 #include <linux/crc32.h>
100 101 #include <linux/ctype.h>
101 102 #include <linux/math64.h>
... ... @@ -715,8 +716,8 @@
715 716 efi_guid_unparse(&ptes[i].unique_partition_guid, info->uuid);
716 717  
717 718 /* Naively convert UTF16-LE to 7 bits. */
718   - label_max = min(sizeof(info->volname) - 1,
719   - sizeof(ptes[i].partition_name));
  719 + label_max = min(ARRAY_SIZE(info->volname) - 1,
  720 + ARRAY_SIZE(ptes[i].partition_name));
720 721 info->volname[label_max] = 0;
721 722 while (label_count < label_max) {
722 723 u8 c = ptes[i].partition_name[label_count] & 0xff;
drivers/block/null_blk.c
... ... @@ -223,7 +223,7 @@
223 223 blk_end_request_all(rq, 0);
224 224 }
225 225  
226   -#if defined(CONFIG_SMP) && defined(CONFIG_USE_GENERIC_SMP_HELPERS)
  226 +#ifdef CONFIG_SMP
227 227  
228 228 static void null_ipi_cmd_end_io(void *data)
229 229 {
... ... @@ -260,7 +260,7 @@
260 260 put_cpu();
261 261 }
262 262  
263   -#endif /* CONFIG_SMP && CONFIG_USE_GENERIC_SMP_HELPERS */
  263 +#endif /* CONFIG_SMP */
264 264  
265 265 static inline void null_handle_cmd(struct nullb_cmd *cmd)
266 266 {
... ... @@ -270,7 +270,7 @@
270 270 end_cmd(cmd);
271 271 break;
272 272 case NULL_IRQ_SOFTIRQ:
273   -#if defined(CONFIG_SMP) && defined(CONFIG_USE_GENERIC_SMP_HELPERS)
  273 +#ifdef CONFIG_SMP
274 274 null_cmd_end_ipi(cmd);
275 275 #else
276 276 end_cmd(cmd);
... ... @@ -571,7 +571,7 @@
571 571 {
572 572 unsigned int i;
573 573  
574   -#if !defined(CONFIG_SMP) || !defined(CONFIG_USE_GENERIC_SMP_HELPERS)
  574 +#if !defined(CONFIG_SMP)
575 575 if (irqmode == NULL_IRQ_SOFTIRQ) {
576 576 pr_warn("null_blk: softirq completions not available.\n");
577 577 pr_warn("null_blk: using direct completions.\n");
drivers/rtc/rtc-at91rm9200.c
... ... @@ -428,6 +428,14 @@
428 428 return 0;
429 429 }
430 430  
  431 +static void at91_rtc_shutdown(struct platform_device *pdev)
  432 +{
  433 + /* Disable all interrupts */
  434 + at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM |
  435 + AT91_RTC_SECEV | AT91_RTC_TIMEV |
  436 + AT91_RTC_CALEV);
  437 +}
  438 +
431 439 #ifdef CONFIG_PM_SLEEP
432 440  
433 441 /* AT91RM9200 RTC Power management control */
... ... @@ -466,6 +474,7 @@
466 474  
467 475 static struct platform_driver at91_rtc_driver = {
468 476 .remove = __exit_p(at91_rtc_remove),
  477 + .shutdown = at91_rtc_shutdown,
469 478 .driver = {
470 479 .name = "at91_rtc",
471 480 .owner = THIS_MODULE,
... ... @@ -56,10 +56,19 @@
56 56 struct configfs_dirent *sd = dentry->d_fsdata;
57 57  
58 58 if (sd) {
59   - BUG_ON(sd->s_dentry != dentry);
60 59 /* Coordinate with configfs_readdir */
61 60 spin_lock(&configfs_dirent_lock);
62   - sd->s_dentry = NULL;
  61 + /* Coordinate with configfs_attach_attr where will increase
  62 + * sd->s_count and update sd->s_dentry to new allocated one.
  63 + * Only set sd->dentry to null when this dentry is the only
  64 + * sd owner.
  65 + * If not do so, configfs_d_iput may run just after
  66 + * configfs_attach_attr and set sd->s_dentry to null
  67 + * even it's still in use.
  68 + */
  69 + if (atomic_read(&sd->s_count) <= 2)
  70 + sd->s_dentry = NULL;
  71 +
63 72 spin_unlock(&configfs_dirent_lock);
64 73 configfs_put(sd);
65 74 }
66 75  
... ... @@ -416,8 +425,11 @@
416 425 struct configfs_attribute * attr = sd->s_element;
417 426 int error;
418 427  
  428 + spin_lock(&configfs_dirent_lock);
419 429 dentry->d_fsdata = configfs_get(sd);
420 430 sd->s_dentry = dentry;
  431 + spin_unlock(&configfs_dirent_lock);
  432 +
421 433 error = configfs_create(dentry, (attr->ca_mode & S_IALLUGO) | S_IFREG,
422 434 configfs_init_file);
423 435 if (error) {
include/linux/hugetlb.h
... ... @@ -31,6 +31,7 @@
31 31 void hugepage_put_subpool(struct hugepage_subpool *spool);
32 32  
33 33 int PageHuge(struct page *page);
  34 +int PageHeadHuge(struct page *page_head);
34 35  
35 36 void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
36 37 int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
... ... @@ -69,7 +70,6 @@
69 70 bool isolate_huge_page(struct page *page, struct list_head *list);
70 71 void putback_active_hugepage(struct page *page);
71 72 bool is_hugepage_active(struct page *page);
72   -void copy_huge_page(struct page *dst, struct page *src);
73 73  
74 74 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
75 75 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
... ... @@ -104,6 +104,11 @@
104 104 return 0;
105 105 }
106 106  
  107 +static inline int PageHeadHuge(struct page *page_head)
  108 +{
  109 + return 0;
  110 +}
  111 +
107 112 static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
108 113 {
109 114 }
... ... @@ -140,9 +145,6 @@
140 145 #define isolate_huge_page(p, l) false
141 146 #define putback_active_hugepage(p) do {} while (0)
142 147 #define is_hugepage_active(x) false
143   -static inline void copy_huge_page(struct page *dst, struct page *src)
144   -{
145   -}
146 148  
147 149 static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
148 150 unsigned long address, unsigned long end, pgprot_t newprot)
include/linux/mm_types.h
... ... @@ -65,9 +65,6 @@
65 65 * this page is only used to
66 66 * free other pages.
67 67 */
68   -#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
69   - pgtable_t pmd_huge_pte; /* protected by page->ptl */
70   -#endif
71 68 };
72 69  
73 70 union {
... ... @@ -135,6 +132,9 @@
135 132  
136 133 struct list_head list; /* slobs list of pages */
137 134 struct slab *slab_page; /* slab fields */
  135 +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
  136 + pgtable_t pmd_huge_pte; /* protected by page->ptl */
  137 +#endif
138 138 };
139 139  
140 140 /* Remainder is not double word aligned */
... ... @@ -208,15 +208,18 @@
208 208 */
209 209 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
210 210 {
  211 + struct file *shm_file;
  212 +
  213 + shm_file = shp->shm_file;
  214 + shp->shm_file = NULL;
211 215 ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
212 216 shm_rmid(ns, shp);
213 217 shm_unlock(shp);
214   - if (!is_file_hugepages(shp->shm_file))
215   - shmem_lock(shp->shm_file, 0, shp->mlock_user);
  218 + if (!is_file_hugepages(shm_file))
  219 + shmem_lock(shm_file, 0, shp->mlock_user);
216 220 else if (shp->mlock_user)
217   - user_shm_unlock(file_inode(shp->shm_file)->i_size,
218   - shp->mlock_user);
219   - fput (shp->shm_file);
  221 + user_shm_unlock(file_inode(shm_file)->i_size, shp->mlock_user);
  222 + fput(shm_file);
220 223 ipc_rcu_putref(shp, shm_rcu_free);
221 224 }
222 225  
223 226  
224 227  
225 228  
226 229  
... ... @@ -974,15 +977,25 @@
974 977 ipc_lock_object(&shp->shm_perm);
975 978 if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
976 979 kuid_t euid = current_euid();
977   - err = -EPERM;
978 980 if (!uid_eq(euid, shp->shm_perm.uid) &&
979   - !uid_eq(euid, shp->shm_perm.cuid))
  981 + !uid_eq(euid, shp->shm_perm.cuid)) {
  982 + err = -EPERM;
980 983 goto out_unlock0;
981   - if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK))
  984 + }
  985 + if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) {
  986 + err = -EPERM;
982 987 goto out_unlock0;
  988 + }
983 989 }
984 990  
985 991 shm_file = shp->shm_file;
  992 +
  993 + /* check if shm_destroy() is tearing down shp */
  994 + if (shm_file == NULL) {
  995 + err = -EIDRM;
  996 + goto out_unlock0;
  997 + }
  998 +
986 999 if (is_file_hugepages(shm_file))
987 1000 goto out_unlock0;
988 1001  
... ... @@ -1101,6 +1114,14 @@
1101 1114 goto out_unlock;
1102 1115  
1103 1116 ipc_lock_object(&shp->shm_perm);
  1117 +
  1118 + /* check if shm_destroy() is tearing down shp */
  1119 + if (shp->shm_file == NULL) {
  1120 + ipc_unlock_object(&shp->shm_perm);
  1121 + err = -EIDRM;
  1122 + goto out_unlock;
  1123 + }
  1124 +
1104 1125 path = shp->shm_file->f_path;
1105 1126 path_get(&path);
1106 1127 shp->shm_nattch++;
... ... @@ -476,40 +476,6 @@
476 476 return 0;
477 477 }
478 478  
479   -static void copy_gigantic_page(struct page *dst, struct page *src)
480   -{
481   - int i;
482   - struct hstate *h = page_hstate(src);
483   - struct page *dst_base = dst;
484   - struct page *src_base = src;
485   -
486   - for (i = 0; i < pages_per_huge_page(h); ) {
487   - cond_resched();
488   - copy_highpage(dst, src);
489   -
490   - i++;
491   - dst = mem_map_next(dst, dst_base, i);
492   - src = mem_map_next(src, src_base, i);
493   - }
494   -}
495   -
496   -void copy_huge_page(struct page *dst, struct page *src)
497   -{
498   - int i;
499   - struct hstate *h = page_hstate(src);
500   -
501   - if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
502   - copy_gigantic_page(dst, src);
503   - return;
504   - }
505   -
506   - might_sleep();
507   - for (i = 0; i < pages_per_huge_page(h); i++) {
508   - cond_resched();
509   - copy_highpage(dst + i, src + i);
510   - }
511   -}
512   -
513 479 static void enqueue_huge_page(struct hstate *h, struct page *page)
514 480 {
515 481 int nid = page_to_nid(page);
... ... @@ -735,6 +701,23 @@
735 701 return dtor == free_huge_page;
736 702 }
737 703 EXPORT_SYMBOL_GPL(PageHuge);
  704 +
  705 +/*
  706 + * PageHeadHuge() only returns true for hugetlbfs head page, but not for
  707 + * normal or transparent huge pages.
  708 + */
  709 +int PageHeadHuge(struct page *page_head)
  710 +{
  711 + compound_page_dtor *dtor;
  712 +
  713 + if (!PageHead(page_head))
  714 + return 0;
  715 +
  716 + dtor = get_compound_page_dtor(page_head);
  717 +
  718 + return dtor == free_huge_page;
  719 +}
  720 +EXPORT_SYMBOL_GPL(PageHeadHuge);
738 721  
739 722 pgoff_t __basepage_index(struct page *page)
740 723 {
... ... @@ -2950,7 +2950,7 @@
2950 2950 return;
2951 2951 }
2952 2952  
2953   - p += snprintf(p, maxlen, policy_modes[mode]);
  2953 + p += snprintf(p, maxlen, "%s", policy_modes[mode]);
2954 2954  
2955 2955 if (flags & MPOL_MODE_FLAGS) {
2956 2956 p += snprintf(p, buffer + maxlen - p, "=");
... ... @@ -442,6 +442,54 @@
442 442 }
443 443  
444 444 /*
  445 + * Gigantic pages are so large that we do not guarantee that page++ pointer
  446 + * arithmetic will work across the entire page. We need something more
  447 + * specialized.
  448 + */
  449 +static void __copy_gigantic_page(struct page *dst, struct page *src,
  450 + int nr_pages)
  451 +{
  452 + int i;
  453 + struct page *dst_base = dst;
  454 + struct page *src_base = src;
  455 +
  456 + for (i = 0; i < nr_pages; ) {
  457 + cond_resched();
  458 + copy_highpage(dst, src);
  459 +
  460 + i++;
  461 + dst = mem_map_next(dst, dst_base, i);
  462 + src = mem_map_next(src, src_base, i);
  463 + }
  464 +}
  465 +
  466 +static void copy_huge_page(struct page *dst, struct page *src)
  467 +{
  468 + int i;
  469 + int nr_pages;
  470 +
  471 + if (PageHuge(src)) {
  472 + /* hugetlbfs page */
  473 + struct hstate *h = page_hstate(src);
  474 + nr_pages = pages_per_huge_page(h);
  475 +
  476 + if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) {
  477 + __copy_gigantic_page(dst, src, nr_pages);
  478 + return;
  479 + }
  480 + } else {
  481 + /* thp page */
  482 + BUG_ON(!PageTransHuge(src));
  483 + nr_pages = hpage_nr_pages(src);
  484 + }
  485 +
  486 + for (i = 0; i < nr_pages; i++) {
  487 + cond_resched();
  488 + copy_highpage(dst + i, src + i);
  489 + }
  490 +}
  491 +
  492 +/*
445 493 * Copy the page to its new location
446 494 */
447 495 void migrate_page_copy(struct page *newpage, struct page *page)
... ... @@ -82,19 +82,6 @@
82 82  
83 83 static void put_compound_page(struct page *page)
84 84 {
85   - /*
86   - * hugetlbfs pages cannot be split from under us. If this is a
87   - * hugetlbfs page, check refcount on head page and release the page if
88   - * the refcount becomes zero.
89   - */
90   - if (PageHuge(page)) {
91   - page = compound_head(page);
92   - if (put_page_testzero(page))
93   - __put_compound_page(page);
94   -
95   - return;
96   - }
97   -
98 85 if (unlikely(PageTail(page))) {
99 86 /* __split_huge_page_refcount can run under us */
100 87 struct page *page_head = compound_trans_head(page);
101 88  
102 89  
... ... @@ -111,14 +98,31 @@
111 98 * still hot on arches that do not support
112 99 * this_cpu_cmpxchg_double().
113 100 */
114   - if (PageSlab(page_head)) {
115   - if (PageTail(page)) {
  101 + if (PageSlab(page_head) || PageHeadHuge(page_head)) {
  102 + if (likely(PageTail(page))) {
  103 + /*
  104 + * __split_huge_page_refcount
  105 + * cannot race here.
  106 + */
  107 + VM_BUG_ON(!PageHead(page_head));
  108 + atomic_dec(&page->_mapcount);
116 109 if (put_page_testzero(page_head))
117 110 VM_BUG_ON(1);
118   -
119   - atomic_dec(&page->_mapcount);
120   - goto skip_lock_tail;
  111 + if (put_page_testzero(page_head))
  112 + __put_compound_page(page_head);
  113 + return;
121 114 } else
  115 + /*
  116 + * __split_huge_page_refcount
  117 + * run before us, "page" was a
  118 + * THP tail. The split
  119 + * page_head has been freed
  120 + * and reallocated as slab or
  121 + * hugetlbfs page of smaller
  122 + * order (only possible if
  123 + * reallocated as slab on
  124 + * x86).
  125 + */
122 126 goto skip_lock;
123 127 }
124 128 /*
... ... @@ -132,8 +136,27 @@
132 136 /* __split_huge_page_refcount run before us */
133 137 compound_unlock_irqrestore(page_head, flags);
134 138 skip_lock:
135   - if (put_page_testzero(page_head))
136   - __put_single_page(page_head);
  139 + if (put_page_testzero(page_head)) {
  140 + /*
  141 + * The head page may have been
  142 + * freed and reallocated as a
  143 + * compound page of smaller
  144 + * order and then freed again.
  145 + * All we know is that it
  146 + * cannot have become: a THP
  147 + * page, a compound page of
  148 + * higher order, a tail page.
  149 + * That is because we still
  150 + * hold the refcount of the
  151 + * split THP tail and
  152 + * page_head was the THP head
  153 + * before the split.
  154 + */
  155 + if (PageHead(page_head))
  156 + __put_compound_page(page_head);
  157 + else
  158 + __put_single_page(page_head);
  159 + }
137 160 out_put_single:
138 161 if (put_page_testzero(page))
139 162 __put_single_page(page);
... ... @@ -155,7 +178,6 @@
155 178 VM_BUG_ON(atomic_read(&page->_count) != 0);
156 179 compound_unlock_irqrestore(page_head, flags);
157 180  
158   -skip_lock_tail:
159 181 if (put_page_testzero(page_head)) {
160 182 if (PageHead(page_head))
161 183 __put_compound_page(page_head);
162 184  
163 185  
164 186  
165 187  
166 188  
167 189  
... ... @@ -198,51 +220,52 @@
198 220 * proper PT lock that already serializes against
199 221 * split_huge_page().
200 222 */
  223 + unsigned long flags;
201 224 bool got = false;
202   - struct page *page_head;
  225 + struct page *page_head = compound_trans_head(page);
203 226  
204   - /*
205   - * If this is a hugetlbfs page it cannot be split under us. Simply
206   - * increment refcount for the head page.
207   - */
208   - if (PageHuge(page)) {
209   - page_head = compound_head(page);
210   - atomic_inc(&page_head->_count);
211   - got = true;
212   - } else {
213   - unsigned long flags;
214   -
215   - page_head = compound_trans_head(page);
216   - if (likely(page != page_head &&
217   - get_page_unless_zero(page_head))) {
218   -
219   - /* Ref to put_compound_page() comment. */
220   - if (PageSlab(page_head)) {
221   - if (likely(PageTail(page))) {
222   - __get_page_tail_foll(page, false);
223   - return true;
224   - } else {
225   - put_page(page_head);
226   - return false;
227   - }
228   - }
229   -
230   - /*
231   - * page_head wasn't a dangling pointer but it
232   - * may not be a head page anymore by the time
233   - * we obtain the lock. That is ok as long as it
234   - * can't be freed from under us.
235   - */
236   - flags = compound_lock_irqsave(page_head);
237   - /* here __split_huge_page_refcount won't run anymore */
  227 + if (likely(page != page_head && get_page_unless_zero(page_head))) {
  228 + /* Ref to put_compound_page() comment. */
  229 + if (PageSlab(page_head) || PageHeadHuge(page_head)) {
238 230 if (likely(PageTail(page))) {
  231 + /*
  232 + * This is a hugetlbfs page or a slab
  233 + * page. __split_huge_page_refcount
  234 + * cannot race here.
  235 + */
  236 + VM_BUG_ON(!PageHead(page_head));
239 237 __get_page_tail_foll(page, false);
240   - got = true;
241   - }
242   - compound_unlock_irqrestore(page_head, flags);
243   - if (unlikely(!got))
  238 + return true;
  239 + } else {
  240 + /*
  241 + * __split_huge_page_refcount run
  242 + * before us, "page" was a THP
  243 + * tail. The split page_head has been
  244 + * freed and reallocated as slab or
  245 + * hugetlbfs page of smaller order
  246 + * (only possible if reallocated as
  247 + * slab on x86).
  248 + */
244 249 put_page(page_head);
  250 + return false;
  251 + }
245 252 }
  253 +
  254 + /*
  255 + * page_head wasn't a dangling pointer but it
  256 + * may not be a head page anymore by the time
  257 + * we obtain the lock. That is ok as long as it
  258 + * can't be freed from under us.
  259 + */
  260 + flags = compound_lock_irqsave(page_head);
  261 + /* here __split_huge_page_refcount won't run anymore */
  262 + if (likely(PageTail(page))) {
  263 + __get_page_tail_foll(page, false);
  264 + got = true;
  265 + }
  266 + compound_unlock_irqrestore(page_head, flags);
  267 + if (unlikely(!got))
  268 + put_page(page_head);
246 269 }
247 270 return got;
248 271 }
... ... @@ -224,7 +224,7 @@
224 224  
225 225 config RPS
226 226 boolean
227   - depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
  227 + depends on SMP && SYSFS
228 228 default y
229 229  
230 230 config RFS_ACCEL
... ... @@ -235,7 +235,7 @@
235 235  
236 236 config XPS
237 237 boolean
238   - depends on SMP && USE_GENERIC_SMP_HELPERS
  238 + depends on SMP
239 239 default y
240 240  
241 241 config NETPRIO_CGROUP
scripts/checkpatch.pl
... ... @@ -3289,6 +3289,7 @@
3289 3289 }
3290 3290 }
3291 3291 if (!defined $suppress_whiletrailers{$linenr} &&
  3292 + defined($stat) && defined($cond) &&
3292 3293 $line =~ /\b(?:if|while|for)\s*\(/ && $line !~ /^.\s*#/) {
3293 3294 my ($s, $c) = ($stat, $cond);
3294 3295