Commit e533b227055598b1f7dc8503a3b4f36b14b9da8a
Exists in
master
and in
4 other branches
Merge branch 'core-v28-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-v28-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: do_generic_file_read: s/EINTR/EIO/ if lock_page_killable() fails softirq, warning fix: correct a format to avoid a warning softirqs, debug: preemption check x86, pci-hotplug, calgary / rio: fix EBDA ioremap() IO resources, x86: ioremap sanity check to catch mapping requests exceeding, fix IO resources, x86: ioremap sanity check to catch mapping requests exceeding the BAR sizes softlockup: Documentation/sysctl/kernel.txt: fix softlockup_thresh description dmi scan: warn about too early calls to dmi_check_system() generic: redefine resource_size_t as phys_addr_t generic: make PFN_PHYS explicitly return phys_addr_t generic: add phys_addr_t for holding physical addresses softirq: allocate less vectors IO resources: fix/remove printk printk: robustify printk, update comment printk: robustify printk, fix #2 printk: robustify printk, fix printk: robustify printk Fixed up conflicts in: arch/powerpc/include/asm/types.h arch/powerpc/platforms/Kconfig.cputype manually.
Showing 26 changed files Side-by-side Diff
- arch/m32r/mm/discontig.c
- arch/powerpc/Kconfig
- arch/powerpc/include/asm/types.h
- arch/powerpc/platforms/Kconfig.cputype
- arch/powerpc/sysdev/ppc4xx_pci.c
- arch/x86/Kconfig
- arch/x86/kernel/e820.c
- arch/x86/mm/ioremap.c
- drivers/firmware/dmi_scan.c
- drivers/pci/hotplug/ibmphp_ebda.c
- drivers/pci/setup-bus.c
- include/asm-x86/page_32.h
- include/asm-x86/page_64.h
- include/asm-x86/xen/page.h
- include/linux/interrupt.h
- include/linux/ioport.h
- include/linux/kernel.h
- include/linux/pfn.h
- include/linux/types.h
- kernel/printk.c
- kernel/resource.c
- kernel/softirq.c
- kernel/time/tick-sched.c
- kernel/timer.c
- mm/Kconfig
- mm/filemap.c
arch/m32r/mm/discontig.c
... | ... | @@ -111,9 +111,9 @@ |
111 | 111 | initrd_start, INITRD_SIZE); |
112 | 112 | } else { |
113 | 113 | printk("initrd extends beyond end of memory " |
114 | - "(0x%08lx > 0x%08lx)\ndisabling initrd\n", | |
114 | + "(0x%08lx > 0x%08llx)\ndisabling initrd\n", | |
115 | 115 | INITRD_START + INITRD_SIZE, |
116 | - PFN_PHYS(max_low_pfn)); | |
116 | + (unsigned long long)PFN_PHYS(max_low_pfn)); | |
117 | 117 | |
118 | 118 | initrd_start = 0; |
119 | 119 | } |
arch/powerpc/Kconfig
arch/powerpc/include/asm/types.h
... | ... | @@ -48,13 +48,6 @@ |
48 | 48 | |
49 | 49 | typedef __vector128 vector128; |
50 | 50 | |
51 | -/* Physical address used by some IO functions */ | |
52 | -#if defined(CONFIG_PPC64) || defined(CONFIG_PHYS_64BIT) | |
53 | -typedef u64 phys_addr_t; | |
54 | -#else | |
55 | -typedef u32 phys_addr_t; | |
56 | -#endif | |
57 | - | |
58 | 51 | #if defined(__powerpc64__) || defined(CONFIG_PHYS_64BIT) |
59 | 52 | typedef u64 dma_addr_t; |
60 | 53 | #else |
arch/powerpc/platforms/Kconfig.cputype
... | ... | @@ -135,7 +135,6 @@ |
135 | 135 | config PHYS_64BIT |
136 | 136 | bool 'Large physical address support' if E500 || PPC_86xx |
137 | 137 | depends on (44x || E500 || PPC_86xx) && !PPC_83xx && !PPC_82xx |
138 | - select RESOURCES_64BIT | |
139 | 138 | ---help--- |
140 | 139 | This option enables kernel support for larger than 32-bit physical |
141 | 140 | addresses. This feature may not be available on all cores. |
arch/powerpc/sysdev/ppc4xx_pci.c
... | ... | @@ -39,13 +39,10 @@ |
39 | 39 | #define U64_TO_U32_LOW(val) ((u32)((val) & 0x00000000ffffffffULL)) |
40 | 40 | #define U64_TO_U32_HIGH(val) ((u32)((val) >> 32)) |
41 | 41 | |
42 | -#ifdef CONFIG_RESOURCES_64BIT | |
43 | -#define RES_TO_U32_LOW(val) U64_TO_U32_LOW(val) | |
44 | -#define RES_TO_U32_HIGH(val) U64_TO_U32_HIGH(val) | |
45 | -#else | |
46 | -#define RES_TO_U32_LOW(val) (val) | |
47 | -#define RES_TO_U32_HIGH(val) (0) | |
48 | -#endif | |
42 | +#define RES_TO_U32_LOW(val) \ | |
43 | + ((sizeof(resource_size_t) > sizeof(u32)) ? U64_TO_U32_LOW(val) : (val)) | |
44 | +#define RES_TO_U32_HIGH(val) \ | |
45 | + ((sizeof(resource_size_t) > sizeof(u32)) ? U64_TO_U32_HIGH(val) : (0)) | |
49 | 46 | |
50 | 47 | static inline int ppc440spe_revA(void) |
51 | 48 | { |
52 | 49 | |
53 | 50 | |
... | ... | @@ -144,12 +141,11 @@ |
144 | 141 | |
145 | 142 | /* Use that */ |
146 | 143 | res->start = pci_addr; |
147 | -#ifndef CONFIG_RESOURCES_64BIT | |
148 | 144 | /* Beware of 32 bits resources */ |
149 | - if ((pci_addr + size) > 0x100000000ull) | |
145 | + if (sizeof(resource_size_t) == sizeof(u32) && | |
146 | + (pci_addr + size) > 0x100000000ull) | |
150 | 147 | res->end = 0xffffffff; |
151 | 148 | else |
152 | -#endif | |
153 | 149 | res->end = res->start + size - 1; |
154 | 150 | break; |
155 | 151 | } |
arch/x86/Kconfig
... | ... | @@ -935,12 +935,14 @@ |
935 | 935 | config X86_PAE |
936 | 936 | bool "PAE (Physical Address Extension) Support" |
937 | 937 | depends on X86_32 && !HIGHMEM4G |
938 | - select RESOURCES_64BIT | |
939 | 938 | help |
940 | 939 | PAE is required for NX support, and furthermore enables |
941 | 940 | larger swapspace support for non-overcommit purposes. It |
942 | 941 | has the cost of more pagetable lookup overhead, and also |
943 | 942 | consumes more pagetable space per process. |
943 | + | |
944 | +config ARCH_PHYS_ADDR_T_64BIT | |
945 | + def_bool X86_64 || X86_PAE | |
944 | 946 | |
945 | 947 | # Common NUMA Features |
946 | 948 | config NUMA |
arch/x86/kernel/e820.c
... | ... | @@ -1282,12 +1282,10 @@ |
1282 | 1282 | e820_res = res; |
1283 | 1283 | for (i = 0; i < e820.nr_map; i++) { |
1284 | 1284 | end = e820.map[i].addr + e820.map[i].size - 1; |
1285 | -#ifndef CONFIG_RESOURCES_64BIT | |
1286 | - if (end > 0x100000000ULL) { | |
1285 | + if (end != (resource_size_t)end) { | |
1287 | 1286 | res++; |
1288 | 1287 | continue; |
1289 | 1288 | } |
1290 | -#endif | |
1291 | 1289 | res->name = e820_type_to_string(e820.map[i].type); |
1292 | 1290 | res->start = e820.map[i].addr; |
1293 | 1291 | res->end = end; |
arch/x86/mm/ioremap.c
... | ... | @@ -220,6 +220,12 @@ |
220 | 220 | return (__force void __iomem *)phys_to_virt(phys_addr); |
221 | 221 | |
222 | 222 | /* |
223 | + * Check if the request spans more than any BAR in the iomem resource | |
224 | + * tree. | |
225 | + */ | |
226 | + WARN_ON(iomem_map_sanity_check(phys_addr, size)); | |
227 | + | |
228 | + /* | |
223 | 229 | * Don't allow anybody to remap normal RAM that we're using.. |
224 | 230 | */ |
225 | 231 | for (pfn = phys_addr >> PAGE_SHIFT; |
drivers/firmware/dmi_scan.c
... | ... | @@ -15,6 +15,11 @@ |
15 | 15 | */ |
16 | 16 | static char dmi_empty_string[] = " "; |
17 | 17 | |
18 | +/* | |
19 | + * Catch too early calls to dmi_check_system(): | |
20 | + */ | |
21 | +static int dmi_initialized; | |
22 | + | |
18 | 23 | static const char * __init dmi_string_nosave(const struct dmi_header *dm, u8 s) |
19 | 24 | { |
20 | 25 | const u8 *bp = ((u8 *) dm) + dm->length; |
... | ... | @@ -366,7 +371,7 @@ |
366 | 371 | |
367 | 372 | if (efi_enabled) { |
368 | 373 | if (efi.smbios == EFI_INVALID_TABLE_ADDR) |
369 | - goto out; | |
374 | + goto error; | |
370 | 375 | |
371 | 376 | /* This is called as a core_initcall() because it isn't |
372 | 377 | * needed during early boot. This also means we can |
373 | 378 | |
... | ... | @@ -374,13 +379,13 @@ |
374 | 379 | */ |
375 | 380 | p = dmi_ioremap(efi.smbios, 32); |
376 | 381 | if (p == NULL) |
377 | - goto out; | |
382 | + goto error; | |
378 | 383 | |
379 | 384 | rc = dmi_present(p + 0x10); /* offset of _DMI_ string */ |
380 | 385 | dmi_iounmap(p, 32); |
381 | 386 | if (!rc) { |
382 | 387 | dmi_available = 1; |
383 | - return; | |
388 | + goto out; | |
384 | 389 | } |
385 | 390 | } |
386 | 391 | else { |
387 | 392 | |
388 | 393 | |
... | ... | @@ -391,19 +396,22 @@ |
391 | 396 | */ |
392 | 397 | p = dmi_ioremap(0xF0000, 0x10000); |
393 | 398 | if (p == NULL) |
394 | - goto out; | |
399 | + goto error; | |
395 | 400 | |
396 | 401 | for (q = p; q < p + 0x10000; q += 16) { |
397 | 402 | rc = dmi_present(q); |
398 | 403 | if (!rc) { |
399 | 404 | dmi_available = 1; |
400 | 405 | dmi_iounmap(p, 0x10000); |
401 | - return; | |
406 | + goto out; | |
402 | 407 | } |
403 | 408 | } |
404 | 409 | dmi_iounmap(p, 0x10000); |
405 | 410 | } |
406 | - out: printk(KERN_INFO "DMI not present or invalid.\n"); | |
411 | + error: | |
412 | + printk(KERN_INFO "DMI not present or invalid.\n"); | |
413 | + out: | |
414 | + dmi_initialized = 1; | |
407 | 415 | } |
408 | 416 | |
409 | 417 | /** |
... | ... | @@ -423,6 +431,8 @@ |
423 | 431 | { |
424 | 432 | int i, count = 0; |
425 | 433 | const struct dmi_system_id *d = list; |
434 | + | |
435 | + WARN(!dmi_initialized, KERN_ERR "dmi check: not initialized yet.\n"); | |
426 | 436 | |
427 | 437 | while (d->ident) { |
428 | 438 | for (i = 0; i < ARRAY_SIZE(d->matches); i++) { |
drivers/pci/hotplug/ibmphp_ebda.c
drivers/pci/setup-bus.c
... | ... | @@ -378,11 +378,10 @@ |
378 | 378 | align = 0; |
379 | 379 | min_align = 0; |
380 | 380 | for (order = 0; order <= max_order; order++) { |
381 | -#ifdef CONFIG_RESOURCES_64BIT | |
382 | - resource_size_t align1 = 1ULL << (order + 20); | |
383 | -#else | |
384 | - resource_size_t align1 = 1U << (order + 20); | |
385 | -#endif | |
381 | + resource_size_t align1 = 1; | |
382 | + | |
383 | + align1 <<= (order + 20); | |
384 | + | |
386 | 385 | if (!align) |
387 | 386 | min_align = align1; |
388 | 387 | else if (ALIGN(align + min_align, min_align) < align1) |
include/asm-x86/page_32.h
... | ... | @@ -39,7 +39,6 @@ |
39 | 39 | typedef u64 pudval_t; |
40 | 40 | typedef u64 pgdval_t; |
41 | 41 | typedef u64 pgprotval_t; |
42 | -typedef u64 phys_addr_t; | |
43 | 42 | |
44 | 43 | typedef union { |
45 | 44 | struct { |
... | ... | @@ -60,7 +59,6 @@ |
60 | 59 | typedef unsigned long pudval_t; |
61 | 60 | typedef unsigned long pgdval_t; |
62 | 61 | typedef unsigned long pgprotval_t; |
63 | -typedef unsigned long phys_addr_t; | |
64 | 62 | |
65 | 63 | typedef union { |
66 | 64 | pteval_t pte; |
include/asm-x86/page_64.h
include/asm-x86/xen/page.h
... | ... | @@ -76,13 +76,13 @@ |
76 | 76 | static inline xmaddr_t phys_to_machine(xpaddr_t phys) |
77 | 77 | { |
78 | 78 | unsigned offset = phys.paddr & ~PAGE_MASK; |
79 | - return XMADDR(PFN_PHYS((u64)pfn_to_mfn(PFN_DOWN(phys.paddr))) | offset); | |
79 | + return XMADDR(PFN_PHYS(pfn_to_mfn(PFN_DOWN(phys.paddr))) | offset); | |
80 | 80 | } |
81 | 81 | |
82 | 82 | static inline xpaddr_t machine_to_phys(xmaddr_t machine) |
83 | 83 | { |
84 | 84 | unsigned offset = machine.maddr & ~PAGE_MASK; |
85 | - return XPADDR(PFN_PHYS((u64)mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset); | |
85 | + return XPADDR(PFN_PHYS(mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset); | |
86 | 86 | } |
87 | 87 | |
88 | 88 | /* |
include/linux/interrupt.h
include/linux/ioport.h
... | ... | @@ -174,6 +174,7 @@ |
174 | 174 | |
175 | 175 | extern void __devm_release_region(struct device *dev, struct resource *parent, |
176 | 176 | resource_size_t start, resource_size_t n); |
177 | +extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size); | |
177 | 178 | |
178 | 179 | #endif /* __ASSEMBLY__ */ |
179 | 180 | #endif /* _LINUX_IOPORT_H */ |
include/linux/kernel.h
include/linux/pfn.h
1 | 1 | #ifndef _LINUX_PFN_H_ |
2 | 2 | #define _LINUX_PFN_H_ |
3 | 3 | |
4 | +#ifndef __ASSEMBLY__ | |
5 | +#include <linux/types.h> | |
6 | +#endif | |
7 | + | |
4 | 8 | #define PFN_ALIGN(x) (((unsigned long)(x) + (PAGE_SIZE - 1)) & PAGE_MASK) |
5 | 9 | #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT) |
6 | 10 | #define PFN_DOWN(x) ((x) >> PAGE_SHIFT) |
7 | -#define PFN_PHYS(x) ((x) << PAGE_SHIFT) | |
11 | +#define PFN_PHYS(x) ((phys_addr_t)(x) << PAGE_SHIFT) | |
8 | 12 | |
9 | 13 | #endif |
include/linux/types.h
... | ... | @@ -191,11 +191,13 @@ |
191 | 191 | #ifdef __KERNEL__ |
192 | 192 | typedef unsigned __bitwise__ gfp_t; |
193 | 193 | |
194 | -#ifdef CONFIG_RESOURCES_64BIT | |
195 | -typedef u64 resource_size_t; | |
194 | +#ifdef CONFIG_PHYS_ADDR_T_64BIT | |
195 | +typedef u64 phys_addr_t; | |
196 | 196 | #else |
197 | -typedef u32 resource_size_t; | |
197 | +typedef u32 phys_addr_t; | |
198 | 198 | #endif |
199 | + | |
200 | +typedef phys_addr_t resource_size_t; | |
199 | 201 | |
200 | 202 | struct ustat { |
201 | 203 | __kernel_daddr_t f_tfree; |
kernel/printk.c
... | ... | @@ -577,9 +577,6 @@ |
577 | 577 | * @fmt: format string |
578 | 578 | * |
579 | 579 | * This is printk(). It can be called from any context. We want it to work. |
580 | - * Be aware of the fact that if oops_in_progress is not set, we might try to | |
581 | - * wake klogd up which could deadlock on runqueue lock if printk() is called | |
582 | - * from scheduler code. | |
583 | 580 | * |
584 | 581 | * We try to grab the console_sem. If we succeed, it's easy - we log the output and |
585 | 582 | * call the console drivers. If we fail to get the semaphore we place the output |
586 | 583 | |
587 | 584 | |
... | ... | @@ -984,10 +981,25 @@ |
984 | 981 | return console_locked; |
985 | 982 | } |
986 | 983 | |
987 | -void wake_up_klogd(void) | |
984 | +static DEFINE_PER_CPU(int, printk_pending); | |
985 | + | |
986 | +void printk_tick(void) | |
988 | 987 | { |
989 | - if (!oops_in_progress && waitqueue_active(&log_wait)) | |
988 | + if (__get_cpu_var(printk_pending)) { | |
989 | + __get_cpu_var(printk_pending) = 0; | |
990 | 990 | wake_up_interruptible(&log_wait); |
991 | + } | |
992 | +} | |
993 | + | |
994 | +int printk_needs_cpu(int cpu) | |
995 | +{ | |
996 | + return per_cpu(printk_pending, cpu); | |
997 | +} | |
998 | + | |
999 | +void wake_up_klogd(void) | |
1000 | +{ | |
1001 | + if (waitqueue_active(&log_wait)) | |
1002 | + __raw_get_cpu_var(printk_pending) = 1; | |
991 | 1003 | } |
992 | 1004 | |
993 | 1005 | /** |
kernel/resource.c
... | ... | @@ -38,10 +38,6 @@ |
38 | 38 | |
39 | 39 | static DEFINE_RWLOCK(resource_lock); |
40 | 40 | |
41 | -#ifdef CONFIG_PROC_FS | |
42 | - | |
43 | -enum { MAX_IORES_LEVEL = 5 }; | |
44 | - | |
45 | 41 | static void *r_next(struct seq_file *m, void *v, loff_t *pos) |
46 | 42 | { |
47 | 43 | struct resource *p = v; |
... | ... | @@ -53,6 +49,10 @@ |
53 | 49 | return p->sibling; |
54 | 50 | } |
55 | 51 | |
52 | +#ifdef CONFIG_PROC_FS | |
53 | + | |
54 | +enum { MAX_IORES_LEVEL = 5 }; | |
55 | + | |
56 | 56 | static void *r_start(struct seq_file *m, loff_t *pos) |
57 | 57 | __acquires(resource_lock) |
58 | 58 | { |
59 | 59 | |
... | ... | @@ -549,13 +549,9 @@ |
549 | 549 | } |
550 | 550 | |
551 | 551 | if (!res) { |
552 | - printk(KERN_DEBUG " __reserve_region_with_split: (%s) [%llx, %llx], res: (%s) [%llx, %llx]\n", | |
553 | - conflict->name, conflict->start, conflict->end, | |
554 | - name, start, end); | |
555 | - | |
556 | 552 | /* failed, split and try again */ |
557 | 553 | |
558 | - /* conflict coverred whole area */ | |
554 | + /* conflict covered whole area */ | |
559 | 555 | if (conflict->start <= start && conflict->end >= end) |
560 | 556 | return; |
561 | 557 | |
... | ... | @@ -832,4 +828,41 @@ |
832 | 828 | } |
833 | 829 | |
834 | 830 | __setup("reserve=", reserve_setup); |
831 | + | |
832 | +/* | |
833 | + * Check if the requested addr and size spans more than any slot in the | |
834 | + * iomem resource tree. | |
835 | + */ | |
836 | +int iomem_map_sanity_check(resource_size_t addr, unsigned long size) | |
837 | +{ | |
838 | + struct resource *p = &iomem_resource; | |
839 | + int err = 0; | |
840 | + loff_t l; | |
841 | + | |
842 | + read_lock(&resource_lock); | |
843 | + for (p = p->child; p ; p = r_next(NULL, p, &l)) { | |
844 | + /* | |
845 | + * We can probably skip the resources without | |
846 | + * IORESOURCE_IO attribute? | |
847 | + */ | |
848 | + if (p->start >= addr + size) | |
849 | + continue; | |
850 | + if (p->end < addr) | |
851 | + continue; | |
852 | + if (p->start <= addr && (p->end >= addr + size - 1)) | |
853 | + continue; | |
854 | + printk(KERN_WARNING "resource map sanity check conflict: " | |
855 | + "0x%llx 0x%llx 0x%llx 0x%llx %s\n", | |
856 | + (unsigned long long)addr, | |
857 | + (unsigned long long)(addr + size - 1), | |
858 | + (unsigned long long)p->start, | |
859 | + (unsigned long long)p->end, | |
860 | + p->name); | |
861 | + err = -1; | |
862 | + break; | |
863 | + } | |
864 | + read_unlock(&resource_lock); | |
865 | + | |
866 | + return err; | |
867 | +} |
kernel/softirq.c
... | ... | @@ -46,7 +46,7 @@ |
46 | 46 | EXPORT_SYMBOL(irq_stat); |
47 | 47 | #endif |
48 | 48 | |
49 | -static struct softirq_action softirq_vec[32] __cacheline_aligned_in_smp; | |
49 | +static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp; | |
50 | 50 | |
51 | 51 | static DEFINE_PER_CPU(struct task_struct *, ksoftirqd); |
52 | 52 | |
53 | 53 | |
... | ... | @@ -205,7 +205,18 @@ |
205 | 205 | |
206 | 206 | do { |
207 | 207 | if (pending & 1) { |
208 | + int prev_count = preempt_count(); | |
209 | + | |
208 | 210 | h->action(h); |
211 | + | |
212 | + if (unlikely(prev_count != preempt_count())) { | |
213 | + printk(KERN_ERR "huh, entered softirq %d %p" | |
214 | + "with preempt_count %08x," | |
215 | + " exited with %08x?\n", h - softirq_vec, | |
216 | + h->action, prev_count, preempt_count()); | |
217 | + preempt_count() = prev_count; | |
218 | + } | |
219 | + | |
209 | 220 | rcu_bh_qsctr_inc(cpu); |
210 | 221 | } |
211 | 222 | h++; |
kernel/time/tick-sched.c
... | ... | @@ -270,7 +270,7 @@ |
270 | 270 | next_jiffies = get_next_timer_interrupt(last_jiffies); |
271 | 271 | delta_jiffies = next_jiffies - last_jiffies; |
272 | 272 | |
273 | - if (rcu_needs_cpu(cpu)) | |
273 | + if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu)) | |
274 | 274 | delta_jiffies = 1; |
275 | 275 | /* |
276 | 276 | * Do not stop the tick, if we are only one off |
kernel/timer.c
mm/Kconfig
mm/filemap.c
... | ... | @@ -1100,8 +1100,9 @@ |
1100 | 1100 | |
1101 | 1101 | page_not_up_to_date: |
1102 | 1102 | /* Get exclusive access to the page ... */ |
1103 | - if (lock_page_killable(page)) | |
1104 | - goto readpage_eio; | |
1103 | + error = lock_page_killable(page); | |
1104 | + if (unlikely(error)) | |
1105 | + goto readpage_error; | |
1105 | 1106 | |
1106 | 1107 | page_not_up_to_date_locked: |
1107 | 1108 | /* Did it get truncated before we got the lock? */ |
... | ... | @@ -1130,8 +1131,9 @@ |
1130 | 1131 | } |
1131 | 1132 | |
1132 | 1133 | if (!PageUptodate(page)) { |
1133 | - if (lock_page_killable(page)) | |
1134 | - goto readpage_eio; | |
1134 | + error = lock_page_killable(page); | |
1135 | + if (unlikely(error)) | |
1136 | + goto readpage_error; | |
1135 | 1137 | if (!PageUptodate(page)) { |
1136 | 1138 | if (page->mapping == NULL) { |
1137 | 1139 | /* |
1138 | 1140 | |
... | ... | @@ -1143,15 +1145,14 @@ |
1143 | 1145 | } |
1144 | 1146 | unlock_page(page); |
1145 | 1147 | shrink_readahead_size_eio(filp, ra); |
1146 | - goto readpage_eio; | |
1148 | + error = -EIO; | |
1149 | + goto readpage_error; | |
1147 | 1150 | } |
1148 | 1151 | unlock_page(page); |
1149 | 1152 | } |
1150 | 1153 | |
1151 | 1154 | goto page_ok; |
1152 | 1155 | |
1153 | -readpage_eio: | |
1154 | - error = -EIO; | |
1155 | 1156 | readpage_error: |
1156 | 1157 | /* UHHUH! A synchronous read error occurred. Report it */ |
1157 | 1158 | desc->error = error; |