Commit af61bdf035e2e4dd646b37b270bd558188a127c0
1 parent
fe0d42203c
Exists in
master
and in
6 other branches
ARM: vfp: rename last_VFP_context to vfp_current_hw_state
Rename the slightly confusing 'last_VFP_context' variable to be more descriptive of what it actually is. This variable stores a pointer to the current owner's vfpstate structure for the context held in the VFP hardware. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Showing 2 changed files with 26 additions and 20 deletions Side-by-side Diff
arch/arm/vfp/vfphw.S
... | ... | @@ -77,9 +77,9 @@ |
77 | 77 | bne look_for_VFP_exceptions @ VFP is already enabled |
78 | 78 | |
79 | 79 | DBGSTR1 "enable %x", r10 |
80 | - ldr r3, last_VFP_context_address | |
80 | + ldr r3, vfp_current_hw_state_address | |
81 | 81 | orr r1, r1, #FPEXC_EN @ user FPEXC has the enable bit set |
82 | - ldr r4, [r3, r11, lsl #2] @ last_VFP_context pointer | |
82 | + ldr r4, [r3, r11, lsl #2] @ vfp_current_hw_state pointer | |
83 | 83 | bic r5, r1, #FPEXC_EX @ make sure exceptions are disabled |
84 | 84 | cmp r4, r10 |
85 | 85 | beq check_for_exception @ we are returning to the same |
... | ... | @@ -116,7 +116,7 @@ |
116 | 116 | |
117 | 117 | no_old_VFP_process: |
118 | 118 | DBGSTR1 "load state %p", r10 |
119 | - str r10, [r3, r11, lsl #2] @ update the last_VFP_context pointer | |
119 | + str r10, [r3, r11, lsl #2] @ update the vfp_current_hw_state pointer | |
120 | 120 | @ Load the saved state back into the VFP |
121 | 121 | VFPFLDMIA r10, r5 @ reload the working registers while |
122 | 122 | @ FPEXC is in a safe state |
... | ... | @@ -207,8 +207,8 @@ |
207 | 207 | ENDPROC(vfp_save_state) |
208 | 208 | |
209 | 209 | .align |
210 | -last_VFP_context_address: | |
211 | - .word last_VFP_context | |
210 | +vfp_current_hw_state_address: | |
211 | + .word vfp_current_hw_state | |
212 | 212 | |
213 | 213 | .macro tbl_branch, base, tmp, shift |
214 | 214 | #ifdef CONFIG_THUMB2_KERNEL |
arch/arm/vfp/vfpmodule.c
... | ... | @@ -33,9 +33,15 @@ |
33 | 33 | void vfp_null_entry(void); |
34 | 34 | |
35 | 35 | void (*vfp_vector)(void) = vfp_null_entry; |
36 | -union vfp_state *last_VFP_context[NR_CPUS]; | |
37 | 36 | |
38 | 37 | /* |
38 | + * The pointer to the vfpstate structure of the thread which currently | |
39 | + * owns the context held in the VFP hardware, or NULL if the hardware | |
40 | + * context is invalid. | |
41 | + */ | |
42 | +union vfp_state *vfp_current_hw_state[NR_CPUS]; | |
43 | + | |
44 | +/* | |
39 | 45 | * Dual-use variable. |
40 | 46 | * Used in startup: set to non-zero if VFP checks fail |
41 | 47 | * After startup, holds VFP architecture |
42 | 48 | |
... | ... | @@ -57,12 +63,12 @@ |
57 | 63 | |
58 | 64 | /* |
59 | 65 | * Disable VFP to ensure we initialize it first. We must ensure |
60 | - * that the modification of last_VFP_context[] and hardware disable | |
66 | + * that the modification of vfp_current_hw_state[] and hardware disable | |
61 | 67 | * are done for the same CPU and without preemption. |
62 | 68 | */ |
63 | 69 | cpu = get_cpu(); |
64 | - if (last_VFP_context[cpu] == vfp) | |
65 | - last_VFP_context[cpu] = NULL; | |
70 | + if (vfp_current_hw_state[cpu] == vfp) | |
71 | + vfp_current_hw_state[cpu] = NULL; | |
66 | 72 | fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); |
67 | 73 | put_cpu(); |
68 | 74 | } |
... | ... | @@ -73,8 +79,8 @@ |
73 | 79 | union vfp_state *vfp = &thread->vfpstate; |
74 | 80 | unsigned int cpu = get_cpu(); |
75 | 81 | |
76 | - if (last_VFP_context[cpu] == vfp) | |
77 | - last_VFP_context[cpu] = NULL; | |
82 | + if (vfp_current_hw_state[cpu] == vfp) | |
83 | + vfp_current_hw_state[cpu] = NULL; | |
78 | 84 | put_cpu(); |
79 | 85 | } |
80 | 86 | |
... | ... | @@ -129,9 +135,9 @@ |
129 | 135 | * case the thread migrates to a different CPU. The |
130 | 136 | * restoring is done lazily. |
131 | 137 | */ |
132 | - if ((fpexc & FPEXC_EN) && last_VFP_context[cpu]) { | |
133 | - vfp_save_state(last_VFP_context[cpu], fpexc); | |
134 | - last_VFP_context[cpu]->hard.cpu = cpu; | |
138 | + if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu]) { | |
139 | + vfp_save_state(vfp_current_hw_state[cpu], fpexc); | |
140 | + vfp_current_hw_state[cpu]->hard.cpu = cpu; | |
135 | 141 | } |
136 | 142 | /* |
137 | 143 | * Thread migration, just force the reloading of the |
... | ... | @@ -139,7 +145,7 @@ |
139 | 145 | * contain stale data. |
140 | 146 | */ |
141 | 147 | if (thread->vfpstate.hard.cpu != cpu) |
142 | - last_VFP_context[cpu] = NULL; | |
148 | + vfp_current_hw_state[cpu] = NULL; | |
143 | 149 | #endif |
144 | 150 | |
145 | 151 | /* |
... | ... | @@ -415,7 +421,7 @@ |
415 | 421 | } |
416 | 422 | |
417 | 423 | /* clear any information we had about last context state */ |
418 | - memset(last_VFP_context, 0, sizeof(last_VFP_context)); | |
424 | + memset(vfp_current_hw_state, 0, sizeof(vfp_current_hw_state)); | |
419 | 425 | |
420 | 426 | return 0; |
421 | 427 | } |
... | ... | @@ -451,7 +457,7 @@ |
451 | 457 | * If the thread we're interested in is the current owner of the |
452 | 458 | * hardware VFP state, then we need to save its state. |
453 | 459 | */ |
454 | - if (last_VFP_context[cpu] == &thread->vfpstate) { | |
460 | + if (vfp_current_hw_state[cpu] == &thread->vfpstate) { | |
455 | 461 | u32 fpexc = fmrx(FPEXC); |
456 | 462 | |
457 | 463 | /* |
... | ... | @@ -473,7 +479,7 @@ |
473 | 479 | * If the thread we're interested in is the current owner of the |
474 | 480 | * hardware VFP state, then we need to save its state. |
475 | 481 | */ |
476 | - if (last_VFP_context[cpu] == &thread->vfpstate) { | |
482 | + if (vfp_current_hw_state[cpu] == &thread->vfpstate) { | |
477 | 483 | u32 fpexc = fmrx(FPEXC); |
478 | 484 | |
479 | 485 | fmxr(FPEXC, fpexc & ~FPEXC_EN); |
... | ... | @@ -482,7 +488,7 @@ |
482 | 488 | * Set the context to NULL to force a reload the next time |
483 | 489 | * the thread uses the VFP. |
484 | 490 | */ |
485 | - last_VFP_context[cpu] = NULL; | |
491 | + vfp_current_hw_state[cpu] = NULL; | |
486 | 492 | } |
487 | 493 | |
488 | 494 | #ifdef CONFIG_SMP |
... | ... | @@ -514,7 +520,7 @@ |
514 | 520 | { |
515 | 521 | if (action == CPU_DYING || action == CPU_DYING_FROZEN) { |
516 | 522 | unsigned int cpu = (long)hcpu; |
517 | - last_VFP_context[cpu] = NULL; | |
523 | + vfp_current_hw_state[cpu] = NULL; | |
518 | 524 | } else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) |
519 | 525 | vfp_enable(NULL); |
520 | 526 | return NOTIFY_OK; |