Commit 72414d3f1d22fc3e311b162fca95c430048d38ce

Authored by Maneesh Soni
Committed by Linus Torvalds
1 parent 4f339ecb30

[PATCH] kexec code cleanup

o Following patch provides purely cosmetic changes and corrects CodingStyle
  guide lines related certain issues like below in kexec related files

  o braces for one line "if" statements, "for" loops,
  o more than 80 column wide lines,
  o No space after "while", "for" and "switch" key words

o Changes:
  o take-2: Removed the extra tab before "case" key words.
  o take-3: Put operator at the end of line and space before "*/"

Signed-off-by: Maneesh Soni <maneesh@in.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

Showing 10 changed files with 243 additions and 211 deletions Side-by-side Diff

arch/i386/kernel/crash.c
... ... @@ -31,10 +31,11 @@
31 31 /* This keeps a track of which one is crashing cpu. */
32 32 static int crashing_cpu;
33 33  
34   -static u32 *append_elf_note(u32 *buf,
35   - char *name, unsigned type, void *data, size_t data_len)
  34 +static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
  35 + size_t data_len)
36 36 {
37 37 struct elf_note note;
  38 +
38 39 note.n_namesz = strlen(name) + 1;
39 40 note.n_descsz = data_len;
40 41 note.n_type = type;
41 42  
42 43  
43 44  
44 45  
... ... @@ -44,26 +45,28 @@
44 45 buf += (note.n_namesz + 3)/4;
45 46 memcpy(buf, data, note.n_descsz);
46 47 buf += (note.n_descsz + 3)/4;
  48 +
47 49 return buf;
48 50 }
49 51  
50 52 static void final_note(u32 *buf)
51 53 {
52 54 struct elf_note note;
  55 +
53 56 note.n_namesz = 0;
54 57 note.n_descsz = 0;
55 58 note.n_type = 0;
56 59 memcpy(buf, &note, sizeof(note));
57 60 }
58 61  
59   -
60 62 static void crash_save_this_cpu(struct pt_regs *regs, int cpu)
61 63 {
62 64 struct elf_prstatus prstatus;
63 65 u32 *buf;
64   - if ((cpu < 0) || (cpu >= NR_CPUS)) {
  66 +
  67 + if ((cpu < 0) || (cpu >= NR_CPUS))
65 68 return;
66   - }
  69 +
67 70 /* Using ELF notes here is opportunistic.
68 71 * I need a well defined structure format
69 72 * for the data I pass, and I need tags
... ... @@ -75,9 +78,8 @@
75 78 memset(&prstatus, 0, sizeof(prstatus));
76 79 prstatus.pr_pid = current->pid;
77 80 elf_core_copy_regs(&prstatus.pr_reg, regs);
78   - buf = append_elf_note(buf, "CORE", NT_PRSTATUS,
79   - &prstatus, sizeof(prstatus));
80   -
  81 + buf = append_elf_note(buf, "CORE", NT_PRSTATUS, &prstatus,
  82 + sizeof(prstatus));
81 83 final_note(buf);
82 84 }
83 85  
84 86  
... ... @@ -119,8 +121,8 @@
119 121 {
120 122 struct pt_regs regs;
121 123 int cpu;
122   - cpu = smp_processor_id();
123 124  
  125 + cpu = smp_processor_id();
124 126 if (saved_regs)
125 127 crash_setup_regs(&regs, saved_regs);
126 128 else
... ... @@ -153,6 +155,7 @@
153 155 /* Assume hlt works */
154 156 __asm__("hlt");
155 157 for(;;);
  158 +
156 159 return 1;
157 160 }
158 161  
159 162  
... ... @@ -169,8 +172,8 @@
169 172 static void nmi_shootdown_cpus(void)
170 173 {
171 174 unsigned long msecs;
172   - atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
173 175  
  176 + atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
174 177 /* Would it be better to replace the trap vector here? */
175 178 set_nmi_callback(crash_nmi_callback);
176 179 /* Ensure the new callback function is set before sending
arch/i386/kernel/machine_kexec.c
... ... @@ -80,7 +80,8 @@
80 80 /* Identity map the page table entry */
81 81 pgtable_level1[level1_index] = address | L0_ATTR;
82 82 pgtable_level2[level2_index] = __pa(pgtable_level1) | L1_ATTR;
83   - set_64bit(&pgtable_level3[level3_index], __pa(pgtable_level2) | L2_ATTR);
  83 + set_64bit(&pgtable_level3[level3_index],
  84 + __pa(pgtable_level2) | L2_ATTR);
84 85  
85 86 /* Flush the tlb so the new mapping takes effect.
86 87 * Global tlb entries are not flushed but that is not an issue.
... ... @@ -139,8 +140,10 @@
139 140 }
140 141  
141 142 typedef asmlinkage NORET_TYPE void (*relocate_new_kernel_t)(
142   - unsigned long indirection_page, unsigned long reboot_code_buffer,
143   - unsigned long start_address, unsigned int has_pae) ATTRIB_NORET;
  143 + unsigned long indirection_page,
  144 + unsigned long reboot_code_buffer,
  145 + unsigned long start_address,
  146 + unsigned int has_pae) ATTRIB_NORET;
144 147  
145 148 const extern unsigned char relocate_new_kernel[];
146 149 extern void relocate_new_kernel_end(void);
147 150  
148 151  
... ... @@ -180,20 +183,23 @@
180 183 {
181 184 unsigned long page_list;
182 185 unsigned long reboot_code_buffer;
  186 +
183 187 relocate_new_kernel_t rnk;
184 188  
185 189 /* Interrupts aren't acceptable while we reboot */
186 190 local_irq_disable();
187 191  
188 192 /* Compute some offsets */
189   - reboot_code_buffer = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
  193 + reboot_code_buffer = page_to_pfn(image->control_code_page)
  194 + << PAGE_SHIFT;
190 195 page_list = image->head;
191 196  
192 197 /* Set up an identity mapping for the reboot_code_buffer */
193 198 identity_map_page(reboot_code_buffer);
194 199  
195 200 /* copy it out */
196   - memcpy((void *)reboot_code_buffer, relocate_new_kernel, relocate_new_kernel_size);
  201 + memcpy((void *)reboot_code_buffer, relocate_new_kernel,
  202 + relocate_new_kernel_size);
197 203  
198 204 /* The segment registers are funny things, they are
199 205 * automatically loaded from a table, in memory wherever you
arch/ppc/kernel/machine_kexec.c
... ... @@ -21,24 +21,23 @@
21 21 #include <asm/machdep.h>
22 22  
23 23 typedef NORET_TYPE void (*relocate_new_kernel_t)(
24   - unsigned long indirection_page, unsigned long reboot_code_buffer,
25   - unsigned long start_address) ATTRIB_NORET;
  24 + unsigned long indirection_page,
  25 + unsigned long reboot_code_buffer,
  26 + unsigned long start_address) ATTRIB_NORET;
26 27  
27 28 const extern unsigned char relocate_new_kernel[];
28 29 const extern unsigned int relocate_new_kernel_size;
29 30  
30 31 void machine_shutdown(void)
31 32 {
32   - if (ppc_md.machine_shutdown) {
  33 + if (ppc_md.machine_shutdown)
33 34 ppc_md.machine_shutdown();
34   - }
35 35 }
36 36  
37 37 void machine_crash_shutdown(struct pt_regs *regs)
38 38 {
39   - if (ppc_md.machine_crash_shutdown) {
  39 + if (ppc_md.machine_crash_shutdown)
40 40 ppc_md.machine_crash_shutdown();
41   - }
42 41 }
43 42  
44 43 /*
45 44  
... ... @@ -48,9 +47,8 @@
48 47 */
49 48 int machine_kexec_prepare(struct kimage *image)
50 49 {
51   - if (ppc_md.machine_kexec_prepare) {
  50 + if (ppc_md.machine_kexec_prepare)
52 51 return ppc_md.machine_kexec_prepare(image);
53   - }
54 52 /*
55 53 * Fail if platform doesn't provide its own machine_kexec_prepare
56 54 * implementation.
57 55  
... ... @@ -60,9 +58,8 @@
60 58  
61 59 void machine_kexec_cleanup(struct kimage *image)
62 60 {
63   - if (ppc_md.machine_kexec_cleanup) {
  61 + if (ppc_md.machine_kexec_cleanup)
64 62 ppc_md.machine_kexec_cleanup(image);
65   - }
66 63 }
67 64  
68 65 /*
69 66  
... ... @@ -71,9 +68,9 @@
71 68 */
72 69 NORET_TYPE void machine_kexec(struct kimage *image)
73 70 {
74   - if (ppc_md.machine_kexec) {
  71 + if (ppc_md.machine_kexec)
75 72 ppc_md.machine_kexec(image);
76   - } else {
  73 + else {
77 74 /*
78 75 * Fall back to normal restart if platform doesn't provide
79 76 * its own kexec function, and user insist to kexec...
... ... @@ -83,7 +80,6 @@
83 80 for(;;);
84 81 }
85 82  
86   -
87 83 /*
88 84 * This is a generic machine_kexec function suitable at least for
89 85 * non-OpenFirmware embedded platforms.
90 86  
91 87  
... ... @@ -104,15 +100,15 @@
104 100  
105 101 /* we need both effective and real address here */
106 102 reboot_code_buffer =
107   - (unsigned long)page_address(image->control_code_page);
  103 + (unsigned long)page_address(image->control_code_page);
108 104 reboot_code_buffer_phys = virt_to_phys((void *)reboot_code_buffer);
109 105  
110 106 /* copy our kernel relocation code to the control code page */
111   - memcpy((void *)reboot_code_buffer,
112   - relocate_new_kernel, relocate_new_kernel_size);
  107 + memcpy((void *)reboot_code_buffer, relocate_new_kernel,
  108 + relocate_new_kernel_size);
113 109  
114 110 flush_icache_range(reboot_code_buffer,
115   - reboot_code_buffer + KEXEC_CONTROL_CODE_SIZE);
  111 + reboot_code_buffer + KEXEC_CONTROL_CODE_SIZE);
116 112 printk(KERN_INFO "Bye!\n");
117 113  
118 114 /* now call it */
arch/ppc64/kernel/machine_kexec.c
... ... @@ -58,7 +58,7 @@
58 58 * handle the virtual mode, we must make sure no destination
59 59 * overlaps kernel static data or bss.
60 60 */
61   - for(i = 0; i < image->nr_segments; i++)
  61 + for (i = 0; i < image->nr_segments; i++)
62 62 if (image->segment[i].mem < __pa(_end))
63 63 return -ETXTBSY;
64 64  
... ... @@ -76,7 +76,7 @@
76 76 low = __pa(htab_address);
77 77 high = low + (htab_hash_mask + 1) * HASH_GROUP_SIZE;
78 78  
79   - for(i = 0; i < image->nr_segments; i++) {
  79 + for (i = 0; i < image->nr_segments; i++) {
80 80 begin = image->segment[i].mem;
81 81 end = begin + image->segment[i].memsz;
82 82  
... ... @@ -98,7 +98,7 @@
98 98 low = *basep;
99 99 high = low + (*sizep);
100 100  
101   - for(i = 0; i < image->nr_segments; i++) {
  101 + for (i = 0; i < image->nr_segments; i++) {
102 102 begin = image->segment[i].mem;
103 103 end = begin + image->segment[i].memsz;
104 104  
... ... @@ -274,7 +274,8 @@
274 274  
275 275 /* Our assembly helper, in kexec_stub.S */
276 276 extern NORET_TYPE void kexec_sequence(void *newstack, unsigned long start,
277   - void *image, void *control, void (*clear_all)(void)) ATTRIB_NORET;
  277 + void *image, void *control,
  278 + void (*clear_all)(void)) ATTRIB_NORET;
278 279  
279 280 /* too late to fail here */
280 281 void machine_kexec(struct kimage *image)
arch/s390/kernel/machine_kexec.c
... ... @@ -67,7 +67,7 @@
67 67 ctl_clear_bit(0,28);
68 68  
69 69 on_each_cpu(kexec_halt_all_cpus, image, 0, 0);
70   - for(;;);
  70 + for (;;);
71 71 }
72 72  
73 73 static void
... ... @@ -85,7 +85,7 @@
85 85 for_each_online_cpu(cpu) {
86 86 if (cpu == smp_processor_id())
87 87 continue;
88   - while(!smp_cpu_not_running(cpu))
  88 + while (!smp_cpu_not_running(cpu))
89 89 cpu_relax();
90 90 }
91 91  
arch/x86_64/kernel/machine_kexec.c
... ... @@ -32,29 +32,31 @@
32 32 #define L2_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
33 33 #define L3_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
34 34  
35   -static void init_level2_page(
36   - u64 *level2p, unsigned long addr)
  35 +static void init_level2_page(u64 *level2p, unsigned long addr)
37 36 {
38 37 unsigned long end_addr;
  38 +
39 39 addr &= PAGE_MASK;
40 40 end_addr = addr + LEVEL2_SIZE;
41   - while(addr < end_addr) {
  41 + while (addr < end_addr) {
42 42 *(level2p++) = addr | L1_ATTR;
43 43 addr += LEVEL1_SIZE;
44 44 }
45 45 }
46 46  
47   -static int init_level3_page(struct kimage *image,
48   - u64 *level3p, unsigned long addr, unsigned long last_addr)
  47 +static int init_level3_page(struct kimage *image, u64 *level3p,
  48 + unsigned long addr, unsigned long last_addr)
49 49 {
50 50 unsigned long end_addr;
51 51 int result;
  52 +
52 53 result = 0;
53 54 addr &= PAGE_MASK;
54 55 end_addr = addr + LEVEL3_SIZE;
55   - while((addr < last_addr) && (addr < end_addr)) {
  56 + while ((addr < last_addr) && (addr < end_addr)) {
56 57 struct page *page;
57 58 u64 *level2p;
  59 +
58 60 page = kimage_alloc_control_pages(image, 0);
59 61 if (!page) {
60 62 result = -ENOMEM;
... ... @@ -66,7 +68,7 @@
66 68 addr += LEVEL2_SIZE;
67 69 }
68 70 /* clear the unused entries */
69   - while(addr < end_addr) {
  71 + while (addr < end_addr) {
70 72 *(level3p++) = 0;
71 73 addr += LEVEL2_SIZE;
72 74 }
73 75  
74 76  
75 77  
... ... @@ -75,17 +77,19 @@
75 77 }
76 78  
77 79  
78   -static int init_level4_page(struct kimage *image,
79   - u64 *level4p, unsigned long addr, unsigned long last_addr)
  80 +static int init_level4_page(struct kimage *image, u64 *level4p,
  81 + unsigned long addr, unsigned long last_addr)
80 82 {
81 83 unsigned long end_addr;
82 84 int result;
  85 +
83 86 result = 0;
84 87 addr &= PAGE_MASK;
85 88 end_addr = addr + LEVEL4_SIZE;
86   - while((addr < last_addr) && (addr < end_addr)) {
  89 + while ((addr < last_addr) && (addr < end_addr)) {
87 90 struct page *page;
88 91 u64 *level3p;
  92 +
89 93 page = kimage_alloc_control_pages(image, 0);
90 94 if (!page) {
91 95 result = -ENOMEM;
92 96  
... ... @@ -100,11 +104,11 @@
100 104 addr += LEVEL3_SIZE;
101 105 }
102 106 /* clear the unused entries */
103   - while(addr < end_addr) {
  107 + while (addr < end_addr) {
104 108 *(level4p++) = 0;
105 109 addr += LEVEL3_SIZE;
106 110 }
107   - out:
  111 +out:
108 112 return result;
109 113 }
110 114  
... ... @@ -113,7 +117,7 @@
113 117 {
114 118 u64 *level4p;
115 119 level4p = (u64 *)__va(start_pgtable);
116   - return init_level4_page(image, level4p, 0, end_pfn << PAGE_SHIFT);
  120 + return init_level4_page(image, level4p, 0, end_pfn << PAGE_SHIFT);
117 121 }
118 122  
119 123 static void set_idt(void *newidt, u16 limit)
... ... @@ -159,9 +163,10 @@
159 163 #undef __STR
160 164 }
161 165  
162   -typedef NORET_TYPE void (*relocate_new_kernel_t)(
163   - unsigned long indirection_page, unsigned long control_code_buffer,
164   - unsigned long start_address, unsigned long pgtable) ATTRIB_NORET;
  166 +typedef NORET_TYPE void (*relocate_new_kernel_t)(unsigned long indirection_page,
  167 + unsigned long control_code_buffer,
  168 + unsigned long start_address,
  169 + unsigned long pgtable) ATTRIB_NORET;
165 170  
166 171 const extern unsigned char relocate_new_kernel[];
167 172 const extern unsigned long relocate_new_kernel_size;
168 173  
169 174  
170 175  
... ... @@ -172,17 +177,17 @@
172 177 int result;
173 178  
174 179 /* Calculate the offsets */
175   - start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
  180 + start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
176 181 control_code_buffer = start_pgtable + 4096UL;
177 182  
178 183 /* Setup the identity mapped 64bit page table */
179 184 result = init_pgtable(image, start_pgtable);
180   - if (result) {
  185 + if (result)
181 186 return result;
182   - }
183 187  
184 188 /* Place the code in the reboot code buffer */
185   - memcpy(__va(control_code_buffer), relocate_new_kernel, relocate_new_kernel_size);
  189 + memcpy(__va(control_code_buffer), relocate_new_kernel,
  190 + relocate_new_kernel_size);
186 191  
187 192 return 0;
188 193 }
... ... @@ -207,8 +212,8 @@
207 212 local_irq_disable();
208 213  
209 214 /* Calculate the offsets */
210   - page_list = image->head;
211   - start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
  215 + page_list = image->head;
  216 + start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
212 217 control_code_buffer = start_pgtable + 4096UL;
213 218  
214 219 /* Set the low half of the page table to my identity mapped
... ... @@ -287,7 +287,7 @@
287 287 size_t read = 0, csize;
288 288 int rc = 0;
289 289  
290   - while(count) {
  290 + while (count) {
291 291 pfn = *ppos / PAGE_SIZE;
292 292 if (pfn > saved_max_pfn)
293 293 return read;
include/linux/kexec.h
... ... @@ -91,14 +91,17 @@
91 91 extern int machine_kexec_prepare(struct kimage *image);
92 92 extern void machine_kexec_cleanup(struct kimage *image);
93 93 extern asmlinkage long sys_kexec_load(unsigned long entry,
94   - unsigned long nr_segments, struct kexec_segment __user *segments,
95   - unsigned long flags);
  94 + unsigned long nr_segments,
  95 + struct kexec_segment __user *segments,
  96 + unsigned long flags);
96 97 #ifdef CONFIG_COMPAT
97 98 extern asmlinkage long compat_sys_kexec_load(unsigned long entry,
98   - unsigned long nr_segments, struct compat_kexec_segment __user *segments,
99   - unsigned long flags);
  99 + unsigned long nr_segments,
  100 + struct compat_kexec_segment __user *segments,
  101 + unsigned long flags);
100 102 #endif
101   -extern struct page *kimage_alloc_control_pages(struct kimage *image, unsigned int order);
  103 +extern struct page *kimage_alloc_control_pages(struct kimage *image,
  104 + unsigned int order);
102 105 extern void crash_kexec(struct pt_regs *);
103 106 int kexec_should_crash(struct task_struct *);
104 107 extern struct kimage *kexec_image;
include/linux/syscalls.h
... ... @@ -159,9 +159,9 @@
159 159 asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd,
160 160 void __user *arg);
161 161 asmlinkage long sys_restart_syscall(void);
162   -asmlinkage long sys_kexec_load(unsigned long entry,
163   - unsigned long nr_segments, struct kexec_segment __user *segments,
164   - unsigned long flags);
  162 +asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
  163 + struct kexec_segment __user *segments,
  164 + unsigned long flags);
165 165  
166 166 asmlinkage long sys_exit(int error_code);
167 167 asmlinkage void sys_exit_group(int error_code);
... ... @@ -87,12 +87,15 @@
87 87 */
88 88 #define KIMAGE_NO_DEST (-1UL)
89 89  
90   -static int kimage_is_destination_range(
91   - struct kimage *image, unsigned long start, unsigned long end);
92   -static struct page *kimage_alloc_page(struct kimage *image, unsigned int gfp_mask, unsigned long dest);
  90 +static int kimage_is_destination_range(struct kimage *image,
  91 + unsigned long start, unsigned long end);
  92 +static struct page *kimage_alloc_page(struct kimage *image,
  93 + unsigned int gfp_mask,
  94 + unsigned long dest);
93 95  
94 96 static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
95   - unsigned long nr_segments, struct kexec_segment __user *segments)
  97 + unsigned long nr_segments,
  98 + struct kexec_segment __user *segments)
96 99 {
97 100 size_t segment_bytes;
98 101 struct kimage *image;
99 102  
... ... @@ -102,9 +105,9 @@
102 105 /* Allocate a controlling structure */
103 106 result = -ENOMEM;
104 107 image = kmalloc(sizeof(*image), GFP_KERNEL);
105   - if (!image) {
  108 + if (!image)
106 109 goto out;
107   - }
  110 +
108 111 memset(image, 0, sizeof(*image));
109 112 image->head = 0;
110 113 image->entry = &image->head;
... ... @@ -145,6 +148,7 @@
145 148 result = -EADDRNOTAVAIL;
146 149 for (i = 0; i < nr_segments; i++) {
147 150 unsigned long mstart, mend;
  151 +
148 152 mstart = image->segment[i].mem;
149 153 mend = mstart + image->segment[i].memsz;
150 154 if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
151 155  
152 156  
... ... @@ -159,12 +163,13 @@
159 163 * easy explanation as one segment stops on another.
160 164 */
161 165 result = -EINVAL;
162   - for(i = 0; i < nr_segments; i++) {
  166 + for (i = 0; i < nr_segments; i++) {
163 167 unsigned long mstart, mend;
164 168 unsigned long j;
  169 +
165 170 mstart = image->segment[i].mem;
166 171 mend = mstart + image->segment[i].memsz;
167   - for(j = 0; j < i; j++) {
  172 + for (j = 0; j < i; j++) {
168 173 unsigned long pstart, pend;
169 174 pstart = image->segment[j].mem;
170 175 pend = pstart + image->segment[j].memsz;
171 176  
172 177  
173 178  
174 179  
175 180  
... ... @@ -180,25 +185,25 @@
180 185 * later on.
181 186 */
182 187 result = -EINVAL;
183   - for(i = 0; i < nr_segments; i++) {
  188 + for (i = 0; i < nr_segments; i++) {
184 189 if (image->segment[i].bufsz > image->segment[i].memsz)
185 190 goto out;
186 191 }
187 192  
188   -
189 193 result = 0;
190   - out:
191   - if (result == 0) {
  194 +out:
  195 + if (result == 0)
192 196 *rimage = image;
193   - } else {
  197 + else
194 198 kfree(image);
195   - }
  199 +
196 200 return result;
197 201  
198 202 }
199 203  
200 204 static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
201   - unsigned long nr_segments, struct kexec_segment __user *segments)
  205 + unsigned long nr_segments,
  206 + struct kexec_segment __user *segments)
202 207 {
203 208 int result;
204 209 struct kimage *image;
205 210  
... ... @@ -206,9 +211,9 @@
206 211 /* Allocate and initialize a controlling structure */
207 212 image = NULL;
208 213 result = do_kimage_alloc(&image, entry, nr_segments, segments);
209   - if (result) {
  214 + if (result)
210 215 goto out;
211   - }
  216 +
212 217 *rimage = image;
213 218  
214 219 /*
... ... @@ -218,7 +223,7 @@
218 223 */
219 224 result = -ENOMEM;
220 225 image->control_code_page = kimage_alloc_control_pages(image,
221   - get_order(KEXEC_CONTROL_CODE_SIZE));
  226 + get_order(KEXEC_CONTROL_CODE_SIZE));
222 227 if (!image->control_code_page) {
223 228 printk(KERN_ERR "Could not allocate control_code_buffer\n");
224 229 goto out;
225 230  
226 231  
227 232  
... ... @@ -226,16 +231,17 @@
226 231  
227 232 result = 0;
228 233 out:
229   - if (result == 0) {
  234 + if (result == 0)
230 235 *rimage = image;
231   - } else {
  236 + else
232 237 kfree(image);
233   - }
  238 +
234 239 return result;
235 240 }
236 241  
237 242 static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
238   - unsigned long nr_segments, struct kexec_segment *segments)
  243 + unsigned long nr_segments,
  244 + struct kexec_segment *segments)
239 245 {
240 246 int result;
241 247 struct kimage *image;
242 248  
... ... @@ -250,9 +256,8 @@
250 256  
251 257 /* Allocate and initialize a controlling structure */
252 258 result = do_kimage_alloc(&image, entry, nr_segments, segments);
253   - if (result) {
  259 + if (result)
254 260 goto out;
255   - }
256 261  
257 262 /* Enable the special crash kernel control page
258 263 * allocation policy.
... ... @@ -272,6 +277,7 @@
272 277 result = -EADDRNOTAVAIL;
273 278 for (i = 0; i < nr_segments; i++) {
274 279 unsigned long mstart, mend;
  280 +
275 281 mstart = image->segment[i].mem;
276 282 mend = mstart + image->segment[i].memsz - 1;
277 283 /* Ensure we are within the crash kernel limits */
... ... @@ -279,7 +285,6 @@
279 285 goto out;
280 286 }
281 287  
282   -
283 288 /*
284 289 * Find a location for the control code buffer, and add
285 290 * the vector of segments so that it's pages will also be
286 291  
287 292  
288 293  
289 294  
290 295  
291 296  
292 297  
293 298  
294 299  
295 300  
296 301  
297 302  
298 303  
299 304  
300 305  
301 306  
302 307  
303 308  
304 309  
... ... @@ -287,80 +292,84 @@
287 292 */
288 293 result = -ENOMEM;
289 294 image->control_code_page = kimage_alloc_control_pages(image,
290   - get_order(KEXEC_CONTROL_CODE_SIZE));
  295 + get_order(KEXEC_CONTROL_CODE_SIZE));
291 296 if (!image->control_code_page) {
292 297 printk(KERN_ERR "Could not allocate control_code_buffer\n");
293 298 goto out;
294 299 }
295 300  
296 301 result = 0;
297   - out:
298   - if (result == 0) {
  302 +out:
  303 + if (result == 0)
299 304 *rimage = image;
300   - } else {
  305 + else
301 306 kfree(image);
302   - }
  307 +
303 308 return result;
304 309 }
305 310  
306   -static int kimage_is_destination_range(
307   - struct kimage *image, unsigned long start, unsigned long end)
  311 +static int kimage_is_destination_range(struct kimage *image,
  312 + unsigned long start,
  313 + unsigned long end)
308 314 {
309 315 unsigned long i;
310 316  
311 317 for (i = 0; i < image->nr_segments; i++) {
312 318 unsigned long mstart, mend;
  319 +
313 320 mstart = image->segment[i].mem;
314   - mend = mstart + image->segment[i].memsz;
315   - if ((end > mstart) && (start < mend)) {
  321 + mend = mstart + image->segment[i].memsz;
  322 + if ((end > mstart) && (start < mend))
316 323 return 1;
317   - }
318 324 }
  325 +
319 326 return 0;
320 327 }
321 328  
322   -static struct page *kimage_alloc_pages(unsigned int gfp_mask, unsigned int order)
  329 +static struct page *kimage_alloc_pages(unsigned int gfp_mask,
  330 + unsigned int order)
323 331 {
324 332 struct page *pages;
  333 +
325 334 pages = alloc_pages(gfp_mask, order);
326 335 if (pages) {
327 336 unsigned int count, i;
328 337 pages->mapping = NULL;
329 338 pages->private = order;
330 339 count = 1 << order;
331   - for(i = 0; i < count; i++) {
  340 + for (i = 0; i < count; i++)
332 341 SetPageReserved(pages + i);
333   - }
334 342 }
  343 +
335 344 return pages;
336 345 }
337 346  
338 347 static void kimage_free_pages(struct page *page)
339 348 {
340 349 unsigned int order, count, i;
  350 +
341 351 order = page->private;
342 352 count = 1 << order;
343   - for(i = 0; i < count; i++) {
  353 + for (i = 0; i < count; i++)
344 354 ClearPageReserved(page + i);
345   - }
346 355 __free_pages(page, order);
347 356 }
348 357  
349 358 static void kimage_free_page_list(struct list_head *list)
350 359 {
351 360 struct list_head *pos, *next;
  361 +
352 362 list_for_each_safe(pos, next, list) {
353 363 struct page *page;
354 364  
355 365 page = list_entry(pos, struct page, lru);
356 366 list_del(&page->lru);
357   -
358 367 kimage_free_pages(page);
359 368 }
360 369 }
361 370  
362   -static struct page *kimage_alloc_normal_control_pages(
363   - struct kimage *image, unsigned int order)
  371 +static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
  372 + unsigned int order)
364 373 {
365 374 /* Control pages are special, they are the intermediaries
366 375 * that are needed while we copy the rest of the pages
... ... @@ -387,6 +396,7 @@
387 396 */
388 397 do {
389 398 unsigned long pfn, epfn, addr, eaddr;
  399 +
390 400 pages = kimage_alloc_pages(GFP_KERNEL, order);
391 401 if (!pages)
392 402 break;
393 403  
... ... @@ -395,12 +405,12 @@
395 405 addr = pfn << PAGE_SHIFT;
396 406 eaddr = epfn << PAGE_SHIFT;
397 407 if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
398   - kimage_is_destination_range(image, addr, eaddr))
399   - {
  408 + kimage_is_destination_range(image, addr, eaddr)) {
400 409 list_add(&pages->lru, &extra_pages);
401 410 pages = NULL;
402 411 }
403   - } while(!pages);
  412 + } while (!pages);
  413 +
404 414 if (pages) {
405 415 /* Remember the allocated page... */
406 416 list_add(&pages->lru, &image->control_pages);
407 417  
408 418  
... ... @@ -420,12 +430,12 @@
420 430 * For now it is simpler to just free the pages.
421 431 */
422 432 kimage_free_page_list(&extra_pages);
423   - return pages;
424 433  
  434 + return pages;
425 435 }
426 436  
427   -static struct page *kimage_alloc_crash_control_pages(
428   - struct kimage *image, unsigned int order)
  437 +static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
  438 + unsigned int order)
429 439 {
430 440 /* Control pages are special, they are the intermediaries
431 441 * that are needed while we copy the rest of the pages
432 442  
433 443  
434 444  
435 445  
436 446  
437 447  
... ... @@ -450,21 +460,22 @@
450 460 */
451 461 unsigned long hole_start, hole_end, size;
452 462 struct page *pages;
  463 +
453 464 pages = NULL;
454 465 size = (1 << order) << PAGE_SHIFT;
455 466 hole_start = (image->control_page + (size - 1)) & ~(size - 1);
456 467 hole_end = hole_start + size - 1;
457   - while(hole_end <= crashk_res.end) {
  468 + while (hole_end <= crashk_res.end) {
458 469 unsigned long i;
459   - if (hole_end > KEXEC_CONTROL_MEMORY_LIMIT) {
  470 +
  471 + if (hole_end > KEXEC_CONTROL_MEMORY_LIMIT)
460 472 break;
461   - }
462   - if (hole_end > crashk_res.end) {
  473 + if (hole_end > crashk_res.end)
463 474 break;
464   - }
465 475 /* See if I overlap any of the segments */
466   - for(i = 0; i < image->nr_segments; i++) {
  476 + for (i = 0; i < image->nr_segments; i++) {
467 477 unsigned long mstart, mend;
  478 +
468 479 mstart = image->segment[i].mem;
469 480 mend = mstart + image->segment[i].memsz - 1;
470 481 if ((hole_end >= mstart) && (hole_start <= mend)) {
471 482  
472 483  
473 484  
... ... @@ -480,18 +491,19 @@
480 491 break;
481 492 }
482 493 }
483   - if (pages) {
  494 + if (pages)
484 495 image->control_page = hole_end;
485   - }
  496 +
486 497 return pages;
487 498 }
488 499  
489 500  
490   -struct page *kimage_alloc_control_pages(
491   - struct kimage *image, unsigned int order)
  501 +struct page *kimage_alloc_control_pages(struct kimage *image,
  502 + unsigned int order)
492 503 {
493 504 struct page *pages = NULL;
494   - switch(image->type) {
  505 +
  506 + switch (image->type) {
495 507 case KEXEC_TYPE_DEFAULT:
496 508 pages = kimage_alloc_normal_control_pages(image, order);
497 509 break;
498 510  
499 511  
500 512  
501 513  
502 514  
503 515  
504 516  
505 517  
506 518  
507 519  
... ... @@ -499,43 +511,46 @@
499 511 pages = kimage_alloc_crash_control_pages(image, order);
500 512 break;
501 513 }
  514 +
502 515 return pages;
503 516 }
504 517  
505 518 static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
506 519 {
507   - if (*image->entry != 0) {
  520 + if (*image->entry != 0)
508 521 image->entry++;
509   - }
  522 +
510 523 if (image->entry == image->last_entry) {
511 524 kimage_entry_t *ind_page;
512 525 struct page *page;
  526 +
513 527 page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
514   - if (!page) {
  528 + if (!page)
515 529 return -ENOMEM;
516   - }
  530 +
517 531 ind_page = page_address(page);
518 532 *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION;
519 533 image->entry = ind_page;
520   - image->last_entry =
521   - ind_page + ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
  534 + image->last_entry = ind_page +
  535 + ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
522 536 }
523 537 *image->entry = entry;
524 538 image->entry++;
525 539 *image->entry = 0;
  540 +
526 541 return 0;
527 542 }
528 543  
529   -static int kimage_set_destination(
530   - struct kimage *image, unsigned long destination)
  544 +static int kimage_set_destination(struct kimage *image,
  545 + unsigned long destination)
531 546 {
532 547 int result;
533 548  
534 549 destination &= PAGE_MASK;
535 550 result = kimage_add_entry(image, destination | IND_DESTINATION);
536   - if (result == 0) {
  551 + if (result == 0)
537 552 image->destination = destination;
538   - }
  553 +
539 554 return result;
540 555 }
541 556  
542 557  
... ... @@ -546,9 +561,9 @@
546 561  
547 562 page &= PAGE_MASK;
548 563 result = kimage_add_entry(image, page | IND_SOURCE);
549   - if (result == 0) {
  564 + if (result == 0)
550 565 image->destination += PAGE_SIZE;
551   - }
  566 +
552 567 return result;
553 568 }
554 569  
555 570  
556 571  
... ... @@ -564,10 +579,11 @@
564 579 }
565 580 static int kimage_terminate(struct kimage *image)
566 581 {
567   - if (*image->entry != 0) {
  582 + if (*image->entry != 0)
568 583 image->entry++;
569   - }
  584 +
570 585 *image->entry = IND_DONE;
  586 +
571 587 return 0;
572 588 }
573 589  
574 590  
575 591  
576 592  
577 593  
578 594  
579 595  
... ... @@ -591,26 +607,24 @@
591 607  
592 608 if (!image)
593 609 return;
  610 +
594 611 kimage_free_extra_pages(image);
595 612 for_each_kimage_entry(image, ptr, entry) {
596 613 if (entry & IND_INDIRECTION) {
597 614 /* Free the previous indirection page */
598   - if (ind & IND_INDIRECTION) {
  615 + if (ind & IND_INDIRECTION)
599 616 kimage_free_entry(ind);
600   - }
601 617 /* Save this indirection page until we are
602 618 * done with it.
603 619 */
604 620 ind = entry;
605 621 }
606   - else if (entry & IND_SOURCE) {
  622 + else if (entry & IND_SOURCE)
607 623 kimage_free_entry(entry);
608   - }
609 624 }
610 625 /* Free the final indirection page */
611   - if (ind & IND_INDIRECTION) {
  626 + if (ind & IND_INDIRECTION)
612 627 kimage_free_entry(ind);
613   - }
614 628  
615 629 /* Handle any machine specific cleanup */
616 630 machine_kexec_cleanup(image);
617 631  
618 632  
619 633  
620 634  
621 635  
622 636  
... ... @@ -620,26 +634,28 @@
620 634 kfree(image);
621 635 }
622 636  
623   -static kimage_entry_t *kimage_dst_used(struct kimage *image, unsigned long page)
  637 +static kimage_entry_t *kimage_dst_used(struct kimage *image,
  638 + unsigned long page)
624 639 {
625 640 kimage_entry_t *ptr, entry;
626 641 unsigned long destination = 0;
627 642  
628 643 for_each_kimage_entry(image, ptr, entry) {
629   - if (entry & IND_DESTINATION) {
  644 + if (entry & IND_DESTINATION)
630 645 destination = entry & PAGE_MASK;
631   - }
632 646 else if (entry & IND_SOURCE) {
633   - if (page == destination) {
  647 + if (page == destination)
634 648 return ptr;
635   - }
636 649 destination += PAGE_SIZE;
637 650 }
638 651 }
  652 +
639 653 return 0;
640 654 }
641 655  
642   -static struct page *kimage_alloc_page(struct kimage *image, unsigned int gfp_mask, unsigned long destination)
  656 +static struct page *kimage_alloc_page(struct kimage *image,
  657 + unsigned int gfp_mask,
  658 + unsigned long destination)
643 659 {
644 660 /*
645 661 * Here we implement safeguards to ensure that a source page
646 662  
647 663  
... ... @@ -679,11 +695,11 @@
679 695  
680 696 /* Allocate a page, if we run out of memory give up */
681 697 page = kimage_alloc_pages(gfp_mask, 0);
682   - if (!page) {
  698 + if (!page)
683 699 return 0;
684   - }
685 700 /* If the page cannot be used file it away */
686   - if (page_to_pfn(page) > (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
  701 + if (page_to_pfn(page) >
  702 + (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
687 703 list_add(&page->lru, &image->unuseable_pages);
688 704 continue;
689 705 }
... ... @@ -694,7 +710,8 @@
694 710 break;
695 711  
696 712 /* If the page is not a destination page use it */
697   - if (!kimage_is_destination_range(image, addr, addr + PAGE_SIZE))
  713 + if (!kimage_is_destination_range(image, addr,
  714 + addr + PAGE_SIZE))
698 715 break;
699 716  
700 717 /*
701 718  
... ... @@ -727,11 +744,12 @@
727 744 list_add(&page->lru, &image->dest_pages);
728 745 }
729 746 }
  747 +
730 748 return page;
731 749 }
732 750  
733 751 static int kimage_load_normal_segment(struct kimage *image,
734   - struct kexec_segment *segment)
  752 + struct kexec_segment *segment)
735 753 {
736 754 unsigned long maddr;
737 755 unsigned long ubytes, mbytes;
738 756  
739 757  
740 758  
741 759  
742 760  
743 761  
744 762  
745 763  
... ... @@ -745,34 +763,36 @@
745 763 maddr = segment->mem;
746 764  
747 765 result = kimage_set_destination(image, maddr);
748   - if (result < 0) {
  766 + if (result < 0)
749 767 goto out;
750   - }
751   - while(mbytes) {
  768 +
  769 + while (mbytes) {
752 770 struct page *page;
753 771 char *ptr;
754 772 size_t uchunk, mchunk;
  773 +
755 774 page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
756 775 if (page == 0) {
757 776 result = -ENOMEM;
758 777 goto out;
759 778 }
760   - result = kimage_add_page(image, page_to_pfn(page) << PAGE_SHIFT);
761   - if (result < 0) {
  779 + result = kimage_add_page(image, page_to_pfn(page)
  780 + << PAGE_SHIFT);
  781 + if (result < 0)
762 782 goto out;
763   - }
  783 +
764 784 ptr = kmap(page);
765 785 /* Start with a clear page */
766 786 memset(ptr, 0, PAGE_SIZE);
767 787 ptr += maddr & ~PAGE_MASK;
768 788 mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
769   - if (mchunk > mbytes) {
  789 + if (mchunk > mbytes)
770 790 mchunk = mbytes;
771   - }
  791 +
772 792 uchunk = mchunk;
773   - if (uchunk > ubytes) {
  793 + if (uchunk > ubytes)
774 794 uchunk = ubytes;
775   - }
  795 +
776 796 result = copy_from_user(ptr, buf, uchunk);
777 797 kunmap(page);
778 798 if (result) {
779 799  
... ... @@ -784,12 +804,12 @@
784 804 buf += mchunk;
785 805 mbytes -= mchunk;
786 806 }
787   - out:
  807 +out:
788 808 return result;
789 809 }
790 810  
791 811 static int kimage_load_crash_segment(struct kimage *image,
792   - struct kexec_segment *segment)
  812 + struct kexec_segment *segment)
793 813 {
794 814 /* For crash dumps kernels we simply copy the data from
795 815 * user space to it's destination.
796 816  
... ... @@ -805,10 +825,11 @@
805 825 ubytes = segment->bufsz;
806 826 mbytes = segment->memsz;
807 827 maddr = segment->mem;
808   - while(mbytes) {
  828 + while (mbytes) {
809 829 struct page *page;
810 830 char *ptr;
811 831 size_t uchunk, mchunk;
  832 +
812 833 page = pfn_to_page(maddr >> PAGE_SHIFT);
813 834 if (page == 0) {
814 835 result = -ENOMEM;
815 836  
... ... @@ -817,9 +838,9 @@
817 838 ptr = kmap(page);
818 839 ptr += maddr & ~PAGE_MASK;
819 840 mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
820   - if (mchunk > mbytes) {
  841 + if (mchunk > mbytes)
821 842 mchunk = mbytes;
822   - }
  843 +
823 844 uchunk = mchunk;
824 845 if (uchunk > ubytes) {
825 846 uchunk = ubytes;
826 847  
827 848  
... ... @@ -837,15 +858,16 @@
837 858 buf += mchunk;
838 859 mbytes -= mchunk;
839 860 }
840   - out:
  861 +out:
841 862 return result;
842 863 }
843 864  
844 865 static int kimage_load_segment(struct kimage *image,
845   - struct kexec_segment *segment)
  866 + struct kexec_segment *segment)
846 867 {
847 868 int result = -ENOMEM;
848   - switch(image->type) {
  869 +
  870 + switch (image->type) {
849 871 case KEXEC_TYPE_DEFAULT:
850 872 result = kimage_load_normal_segment(image, segment);
851 873 break;
... ... @@ -853,6 +875,7 @@
853 875 result = kimage_load_crash_segment(image, segment);
854 876 break;
855 877 }
  878 +
856 879 return result;
857 880 }
858 881  
... ... @@ -885,9 +908,9 @@
885 908 */
886 909 static int kexec_lock = 0;
887 910  
888   -asmlinkage long sys_kexec_load(unsigned long entry,
889   - unsigned long nr_segments, struct kexec_segment __user *segments,
890   - unsigned long flags)
  911 +asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
  912 + struct kexec_segment __user *segments,
  913 + unsigned long flags)
891 914 {
892 915 struct kimage **dest_image, *image;
893 916 int locked;
894 917  
... ... @@ -907,9 +930,7 @@
907 930 /* Verify we are on the appropriate architecture */
908 931 if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) &&
909 932 ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
910   - {
911 933 return -EINVAL;
912   - }
913 934  
914 935 /* Put an artificial cap on the number
915 936 * of segments passed to kexec_load.
916 937  
917 938  
918 939  
919 940  
920 941  
921 942  
922 943  
923 944  
924 945  
925 946  
926 947  
927 948  
928 949  
929 950  
930 951  
931 952  
932 953  
... ... @@ -929,58 +950,59 @@
929 950 * KISS: always take the mutex.
930 951 */
931 952 locked = xchg(&kexec_lock, 1);
932   - if (locked) {
  953 + if (locked)
933 954 return -EBUSY;
934   - }
  955 +
935 956 dest_image = &kexec_image;
936   - if (flags & KEXEC_ON_CRASH) {
  957 + if (flags & KEXEC_ON_CRASH)
937 958 dest_image = &kexec_crash_image;
938   - }
939 959 if (nr_segments > 0) {
940 960 unsigned long i;
  961 +
941 962 /* Loading another kernel to reboot into */
942   - if ((flags & KEXEC_ON_CRASH) == 0) {
943   - result = kimage_normal_alloc(&image, entry, nr_segments, segments);
944   - }
  963 + if ((flags & KEXEC_ON_CRASH) == 0)
  964 + result = kimage_normal_alloc(&image, entry,
  965 + nr_segments, segments);
945 966 /* Loading another kernel to switch to if this one crashes */
946 967 else if (flags & KEXEC_ON_CRASH) {
947 968 /* Free any current crash dump kernel before
948 969 * we corrupt it.
949 970 */
950 971 kimage_free(xchg(&kexec_crash_image, NULL));
951   - result = kimage_crash_alloc(&image, entry, nr_segments, segments);
  972 + result = kimage_crash_alloc(&image, entry,
  973 + nr_segments, segments);
952 974 }
953   - if (result) {
  975 + if (result)
954 976 goto out;
955   - }
  977 +
956 978 result = machine_kexec_prepare(image);
957   - if (result) {
  979 + if (result)
958 980 goto out;
959   - }
960   - for(i = 0; i < nr_segments; i++) {
  981 +
  982 + for (i = 0; i < nr_segments; i++) {
961 983 result = kimage_load_segment(image, &image->segment[i]);
962   - if (result) {
  984 + if (result)
963 985 goto out;
964   - }
965 986 }
966 987 result = kimage_terminate(image);
967   - if (result) {
  988 + if (result)
968 989 goto out;
969   - }
970 990 }
971 991 /* Install the new kernel, and Uninstall the old */
972 992 image = xchg(dest_image, image);
973 993  
974   - out:
  994 +out:
975 995 xchg(&kexec_lock, 0); /* Release the mutex */
976 996 kimage_free(image);
  997 +
977 998 return result;
978 999 }
979 1000  
980 1001 #ifdef CONFIG_COMPAT
981 1002 asmlinkage long compat_sys_kexec_load(unsigned long entry,
982   - unsigned long nr_segments, struct compat_kexec_segment __user *segments,
983   - unsigned long flags)
  1003 + unsigned long nr_segments,
  1004 + struct compat_kexec_segment __user *segments,
  1005 + unsigned long flags)
984 1006 {
985 1007 struct compat_kexec_segment in;
986 1008 struct kexec_segment out, __user *ksegments;
987 1009  
988 1010  
989 1011  
990 1012  
991 1013  
... ... @@ -989,20 +1011,17 @@
989 1011 /* Don't allow clients that don't understand the native
990 1012 * architecture to do anything.
991 1013 */
992   - if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT) {
  1014 + if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT)
993 1015 return -EINVAL;
994   - }
995 1016  
996   - if (nr_segments > KEXEC_SEGMENT_MAX) {
  1017 + if (nr_segments > KEXEC_SEGMENT_MAX)
997 1018 return -EINVAL;
998   - }
999 1019  
1000 1020 ksegments = compat_alloc_user_space(nr_segments * sizeof(out));
1001 1021 for (i=0; i < nr_segments; i++) {
1002 1022 result = copy_from_user(&in, &segments[i], sizeof(in));
1003   - if (result) {
  1023 + if (result)
1004 1024 return -EFAULT;
1005   - }
1006 1025  
1007 1026 out.buf = compat_ptr(in.buf);
1008 1027 out.bufsz = in.bufsz;
1009 1028  
... ... @@ -1010,9 +1029,8 @@
1010 1029 out.memsz = in.memsz;
1011 1030  
1012 1031 result = copy_to_user(&ksegments[i], &out, sizeof(out));
1013   - if (result) {
  1032 + if (result)
1014 1033 return -EFAULT;
1015   - }
1016 1034 }
1017 1035  
1018 1036 return sys_kexec_load(entry, nr_segments, ksegments, flags);