Commit ffe1b7e14e6b606bd84cab564aa2f481dbd4e418
Committed by
Paul Mackerras
1 parent
38fcdcfe38
Exists in
master
and in
4 other branches
[PATCH] ppc64: Formatting cleanups in arch/ppc64/kernel/ItLpQueue.c
Just formatting cleanups: * rename some "nextLpEvent" variables to just "event" * make code fit in 80 columns * use brackets around if/else * use a temporary to make hvlpevent_clear_valid clearer Signed-off-by: Michael Ellerman <michael@ellerman.id.au> Acked-by: Stephen Rothwell <sfr@canb.auug.org.au> Signed-off-by: Paul Mackerras <paulus@samba.org>
Showing 1 changed file with 41 additions and 31 deletions Side-by-side Diff
arch/ppc64/kernel/ItLpQueue.c
| ... | ... | @@ -74,24 +74,27 @@ |
| 74 | 74 | |
| 75 | 75 | static struct HvLpEvent * get_next_hvlpevent(void) |
| 76 | 76 | { |
| 77 | - struct HvLpEvent * nextLpEvent = | |
| 78 | - (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr; | |
| 79 | - if (nextLpEvent->xFlags.xValid) { | |
| 77 | + struct HvLpEvent * event; | |
| 78 | + event = (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr; | |
| 79 | + | |
| 80 | + if (event->xFlags.xValid) { | |
| 80 | 81 | /* rmb() needed only for weakly consistent machines (regatta) */ |
| 81 | 82 | rmb(); |
| 82 | 83 | /* Set pointer to next potential event */ |
| 83 | - hvlpevent_queue.xSlicCurEventPtr += ((nextLpEvent->xSizeMinus1 + | |
| 84 | - LpEventAlign) / | |
| 85 | - LpEventAlign) * | |
| 86 | - LpEventAlign; | |
| 84 | + hvlpevent_queue.xSlicCurEventPtr += ((event->xSizeMinus1 + | |
| 85 | + LpEventAlign) / LpEventAlign) * LpEventAlign; | |
| 86 | + | |
| 87 | 87 | /* Wrap to beginning if no room at end */ |
| 88 | - if (hvlpevent_queue.xSlicCurEventPtr > hvlpevent_queue.xSlicLastValidEventPtr) | |
| 89 | - hvlpevent_queue.xSlicCurEventPtr = hvlpevent_queue.xSlicEventStackPtr; | |
| 88 | + if (hvlpevent_queue.xSlicCurEventPtr > | |
| 89 | + hvlpevent_queue.xSlicLastValidEventPtr) { | |
| 90 | + hvlpevent_queue.xSlicCurEventPtr = | |
| 91 | + hvlpevent_queue.xSlicEventStackPtr; | |
| 92 | + } | |
| 93 | + } else { | |
| 94 | + event = NULL; | |
| 90 | 95 | } |
| 91 | - else | |
| 92 | - nextLpEvent = NULL; | |
| 93 | 96 | |
| 94 | - return nextLpEvent; | |
| 97 | + return event; | |
| 95 | 98 | } |
| 96 | 99 | |
| 97 | 100 | static unsigned long spread_lpevents = NR_CPUS; |
| 98 | 101 | |
| 99 | 102 | |
| 100 | 103 | |
| 101 | 104 | |
| 102 | 105 | |
| 103 | 106 | |
| 104 | 107 | |
| 105 | 108 | |
| 106 | 109 | |
| ... | ... | @@ -104,34 +107,41 @@ |
| 104 | 107 | return 0; |
| 105 | 108 | |
| 106 | 109 | next_event = (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr; |
| 107 | - return next_event->xFlags.xValid | hvlpevent_queue.xPlicOverflowIntPending; | |
| 110 | + | |
| 111 | + return next_event->xFlags.xValid | | |
| 112 | + hvlpevent_queue.xPlicOverflowIntPending; | |
| 108 | 113 | } |
| 109 | 114 | |
| 110 | 115 | static void hvlpevent_clear_valid(struct HvLpEvent * event) |
| 111 | 116 | { |
| 112 | - /* Clear the valid bit of the event | |
| 113 | - * Also clear bits within this event that might | |
| 114 | - * look like valid bits (on 64-byte boundaries) | |
| 117 | + /* Tell the Hypervisor that we're done with this event. | |
| 118 | + * Also clear bits within this event that might look like valid bits. | |
| 119 | + * ie. on 64-byte boundaries. | |
| 115 | 120 | */ |
| 121 | + struct HvLpEvent *tmp; | |
| 116 | 122 | unsigned extra = ((event->xSizeMinus1 + LpEventAlign) / |
| 117 | 123 | LpEventAlign) - 1; |
| 124 | + | |
| 118 | 125 | switch (extra) { |
| 119 | 126 | case 3: |
| 120 | - ((struct HvLpEvent*)((char*)event+3*LpEventAlign))->xFlags.xValid=0; | |
| 127 | + tmp = (struct HvLpEvent*)((char*)event + 3 * LpEventAlign); | |
| 128 | + tmp->xFlags.xValid = 0; | |
| 121 | 129 | case 2: |
| 122 | - ((struct HvLpEvent*)((char*)event+2*LpEventAlign))->xFlags.xValid=0; | |
| 130 | + tmp = (struct HvLpEvent*)((char*)event + 2 * LpEventAlign); | |
| 131 | + tmp->xFlags.xValid = 0; | |
| 123 | 132 | case 1: |
| 124 | - ((struct HvLpEvent*)((char*)event+1*LpEventAlign))->xFlags.xValid=0; | |
| 125 | - case 0: | |
| 126 | - ; | |
| 133 | + tmp = (struct HvLpEvent*)((char*)event + 1 * LpEventAlign); | |
| 134 | + tmp->xFlags.xValid = 0; | |
| 127 | 135 | } |
| 136 | + | |
| 128 | 137 | mb(); |
| 138 | + | |
| 129 | 139 | event->xFlags.xValid = 0; |
| 130 | 140 | } |
| 131 | 141 | |
| 132 | 142 | void process_hvlpevents(struct pt_regs *regs) |
| 133 | 143 | { |
| 134 | - struct HvLpEvent * nextLpEvent; | |
| 144 | + struct HvLpEvent * event; | |
| 135 | 145 | |
| 136 | 146 | /* If we have recursed, just return */ |
| 137 | 147 | if ( !set_inUse() ) |
| ... | ... | @@ -143,8 +153,8 @@ |
| 143 | 153 | BUG(); |
| 144 | 154 | |
| 145 | 155 | for (;;) { |
| 146 | - nextLpEvent = get_next_hvlpevent(); | |
| 147 | - if (nextLpEvent) { | |
| 156 | + event = get_next_hvlpevent(); | |
| 157 | + if (event) { | |
| 148 | 158 | /* Call appropriate handler here, passing |
| 149 | 159 | * a pointer to the LpEvent. The handler |
| 150 | 160 | * must make a copy of the LpEvent if it |
| 151 | 161 | |
| 152 | 162 | |
| ... | ... | @@ -158,15 +168,15 @@ |
| 158 | 168 | * registered for, so no type check is necessary |
| 159 | 169 | * here! |
| 160 | 170 | */ |
| 161 | - if (nextLpEvent->xType < HvLpEvent_Type_NumTypes) | |
| 162 | - __get_cpu_var(hvlpevent_counts)[nextLpEvent->xType]++; | |
| 163 | - if (nextLpEvent->xType < HvLpEvent_Type_NumTypes && | |
| 164 | - lpEventHandler[nextLpEvent->xType]) | |
| 165 | - lpEventHandler[nextLpEvent->xType](nextLpEvent, regs); | |
| 171 | + if (event->xType < HvLpEvent_Type_NumTypes) | |
| 172 | + __get_cpu_var(hvlpevent_counts)[event->xType]++; | |
| 173 | + if (event->xType < HvLpEvent_Type_NumTypes && | |
| 174 | + lpEventHandler[event->xType]) | |
| 175 | + lpEventHandler[event->xType](event, regs); | |
| 166 | 176 | else |
| 167 | - printk(KERN_INFO "Unexpected Lp Event type=%d\n", nextLpEvent->xType ); | |
| 177 | + printk(KERN_INFO "Unexpected Lp Event type=%d\n", event->xType ); | |
| 168 | 178 | |
| 169 | - hvlpevent_clear_valid(nextLpEvent); | |
| 179 | + hvlpevent_clear_valid(event); | |
| 170 | 180 | } else if (hvlpevent_queue.xPlicOverflowIntPending) |
| 171 | 181 | /* |
| 172 | 182 | * No more valid events. If overflow events are |