Commit ae6aa2ea8973e200cb3d0564a64a1b441d233428

Authored by Martin Schwidefsky
Committed by Linus Torvalds
1 parent 4c139862b8

[PATCH] s390: machine check handler bugs

The new machine check handler still has a few bugs.

1) The system entry time has to be stored in the machine check handler,

2) the machine check return psw may not be stored at the usual place
   because it might overwrite the return psw of the interrupted context,

3) the return address for the call to s390_handle_mcck in the i/o interrupt
   handler is not correct,

4) the system call cleanup has to take the different save area of the
   machine check handler into account,

5) the machine check handler may not call UPDATE_VTIME before
   CREATE_STACK_FRAME, and

6) the io leave path needs a critical section cleanup to make sure that the
   TIF_MCCK_PENDING bit is really checked before switching back to user space.

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

Showing 4 changed files with 181 additions and 58 deletions Side-by-side Diff

arch/s390/kernel/entry.S
... ... @@ -138,14 +138,14 @@
138 138 st %r12,__SF_BACKCHAIN(%r15) # clear back chain
139 139 .endm
140 140  
141   - .macro RESTORE_ALL sync
142   - mvc __LC_RETURN_PSW(8),SP_PSW(%r15) # move user PSW to lowcore
  141 + .macro RESTORE_ALL psworg,sync
  142 + mvc \psworg(8),SP_PSW(%r15) # move user PSW to lowcore
143 143 .if !\sync
144   - ni __LC_RETURN_PSW+1,0xfd # clear wait state bit
  144 + ni \psworg+1,0xfd # clear wait state bit
145 145 .endif
146 146 lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user
147 147 STORE_TIMER __LC_EXIT_TIMER
148   - lpsw __LC_RETURN_PSW # back to caller
  148 + lpsw \psworg # back to caller
149 149 .endm
150 150  
151 151 /*
... ... @@ -235,7 +235,7 @@
235 235 tm __TI_flags+3(%r9),_TIF_WORK_SVC
236 236 bnz BASED(sysc_work) # there is work to do (signals etc.)
237 237 sysc_leave:
238   - RESTORE_ALL 1
  238 + RESTORE_ALL __LC_RETURN_PSW,1
239 239  
240 240 #
241 241 # recheck if there is more work to do
... ... @@ -312,8 +312,6 @@
312 312 la %r14,BASED(sysc_return) # load adr. of system return
313 313 br %r1 # branch to do_single_step
314 314  
315   -__critical_end:
316   -
317 315 #
318 316 # call trace before and after sys_call
319 317 #
... ... @@ -571,7 +569,8 @@
571 569 tm __TI_flags+3(%r9),_TIF_WORK_INT
572 570 bnz BASED(io_work) # there is work to do (signals etc.)
573 571 io_leave:
574   - RESTORE_ALL 0
  572 + RESTORE_ALL __LC_RETURN_PSW,0
  573 +io_done:
575 574  
576 575 #ifdef CONFIG_PREEMPT
577 576 io_preempt:
... ... @@ -621,7 +620,7 @@
621 620 #
622 621 io_mcck_pending:
623 622 l %r1,BASED(.Ls390_handle_mcck)
624   - l %r14,BASED(io_work_loop)
  623 + la %r14,BASED(io_work_loop)
625 624 br %r1 # TIF bit will be cleared by handler
626 625  
627 626 #
... ... @@ -674,6 +673,8 @@
674 673 basr %r14,%r1
675 674 b BASED(io_return)
676 675  
  676 +__critical_end:
  677 +
677 678 /*
678 679 * Machine check handler routines
679 680 */
... ... @@ -681,6 +682,7 @@
681 682 .globl mcck_int_handler
682 683 mcck_int_handler:
683 684 spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer
  685 + mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA
684 686 lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs
685 687 SAVE_ALL_BASE __LC_SAVE_AREA+32
686 688 la %r12,__LC_MCK_OLD_PSW
687 689  
... ... @@ -693,17 +695,8 @@
693 695 mvc __LC_ASYNC_ENTER_TIMER(8),__LC_LAST_UPDATE_TIMER
694 696 mvc __LC_SYNC_ENTER_TIMER(8),__LC_LAST_UPDATE_TIMER
695 697 mvc __LC_EXIT_TIMER(8),__LC_LAST_UPDATE_TIMER
696   -0: tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid?
697   - bno BASED(mcck_no_vtime) # no -> skip cleanup critical
698   - tm __LC_MCK_OLD_PSW+1,0x01 # interrupting from user ?
699   - bz BASED(mcck_no_vtime)
700   - UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
701   - UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
702   - mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
703   -mcck_no_vtime:
704 698 #endif
705   -0:
706   - tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
  699 +0: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
707 700 bno BASED(mcck_int_main) # no -> skip cleanup critical
708 701 tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit
709 702 bnz BASED(mcck_int_main) # from user -> load async stack
... ... @@ -720,6 +713,16 @@
720 713 be BASED(0f)
721 714 l %r15,__LC_PANIC_STACK # load panic stack
722 715 0: CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+32
  716 +#ifdef CONFIG_VIRT_CPU_ACCOUNTING
  717 + tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid?
  718 + bno BASED(mcck_no_vtime) # no -> skip cleanup critical
  719 + tm __LC_MCK_OLD_PSW+1,0x01 # interrupting from user ?
  720 + bz BASED(mcck_no_vtime)
  721 + UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
  722 + UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
  723 + mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
  724 +mcck_no_vtime:
  725 +#endif
723 726 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
724 727 la %r2,SP_PTREGS(%r15) # load pt_regs
725 728 l %r1,BASED(.Ls390_mcck)
... ... @@ -737,7 +740,7 @@
737 740 l %r1,BASED(.Ls390_handle_mcck)
738 741 basr %r14,%r1 # call machine check handler
739 742 mcck_return:
740   - RESTORE_ALL 0
  743 + RESTORE_ALL __LC_RETURN_MCCK_PSW,0
741 744  
742 745 #ifdef CONFIG_SMP
743 746 /*
... ... @@ -803,6 +806,10 @@
803 806 .long sysc_leave + 0x80000000, sysc_work_loop + 0x80000000
804 807 cleanup_table_sysc_work_loop:
805 808 .long sysc_work_loop + 0x80000000, sysc_reschedule + 0x80000000
  809 +cleanup_table_io_leave:
  810 + .long io_leave + 0x80000000, io_done + 0x80000000
  811 +cleanup_table_io_work_loop:
  812 + .long io_work_loop + 0x80000000, io_mcck_pending + 0x80000000
806 813  
807 814 cleanup_critical:
808 815 clc 4(4,%r12),BASED(cleanup_table_system_call)
809 816  
... ... @@ -825,10 +832,26 @@
825 832 clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop+4)
826 833 bl BASED(cleanup_sysc_return)
827 834 0:
  835 + clc 4(4,%r12),BASED(cleanup_table_io_leave)
  836 + bl BASED(0f)
  837 + clc 4(4,%r12),BASED(cleanup_table_io_leave+4)
  838 + bl BASED(cleanup_io_leave)
  839 +0:
  840 + clc 4(4,%r12),BASED(cleanup_table_io_work_loop)
  841 + bl BASED(0f)
  842 + clc 4(4,%r12),BASED(cleanup_table_io_work_loop+4)
  843 + bl BASED(cleanup_io_return)
  844 +0:
828 845 br %r14
829 846  
830 847 cleanup_system_call:
831 848 mvc __LC_RETURN_PSW(8),0(%r12)
  849 + c %r12,BASED(.Lmck_old_psw)
  850 + be BASED(0f)
  851 + la %r12,__LC_SAVE_AREA+16
  852 + b BASED(1f)
  853 +0: la %r12,__LC_SAVE_AREA+32
  854 +1:
832 855 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
833 856 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+4)
834 857 bh BASED(0f)
835 858  
... ... @@ -838,11 +861,13 @@
838 861 #endif
839 862 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn)
840 863 bh BASED(0f)
841   - mvc __LC_SAVE_AREA(16),__LC_SAVE_AREA+16
842   -0: st %r13,__LC_SAVE_AREA+20
  864 + mvc __LC_SAVE_AREA(16),0(%r12)
  865 +0: st %r13,4(%r12)
  866 + st %r12,__LC_SAVE_AREA+48 # argh
843 867 SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
844 868 CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
845   - st %r15,__LC_SAVE_AREA+28
  869 + l %r12,__LC_SAVE_AREA+48 # argh
  870 + st %r15,12(%r12)
846 871 lh %r7,0x8a
847 872 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
848 873 cleanup_vtime:
849 874  
850 875  
851 876  
... ... @@ -879,17 +904,21 @@
879 904  
880 905 cleanup_sysc_leave:
881 906 clc 4(4,%r12),BASED(cleanup_sysc_leave_insn)
882   - be BASED(0f)
  907 + be BASED(2f)
883 908 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
884 909 mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
885 910 clc 4(4,%r12),BASED(cleanup_sysc_leave_insn+4)
886   - be BASED(0f)
  911 + be BASED(2f)
887 912 #endif
888 913 mvc __LC_RETURN_PSW(8),SP_PSW(%r15)
889   - mvc __LC_SAVE_AREA+16(16),SP_R12(%r15)
890   - lm %r0,%r11,SP_R0(%r15)
  914 + c %r12,BASED(.Lmck_old_psw)
  915 + bne BASED(0f)
  916 + mvc __LC_SAVE_AREA+32(16),SP_R12(%r15)
  917 + b BASED(1f)
  918 +0: mvc __LC_SAVE_AREA+16(16),SP_R12(%r15)
  919 +1: lm %r0,%r11,SP_R0(%r15)
891 920 l %r15,SP_R15(%r15)
892   -0: la %r12,__LC_RETURN_PSW
  921 +2: la %r12,__LC_RETURN_PSW
893 922 br %r14
894 923 cleanup_sysc_leave_insn:
895 924 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
... ... @@ -897,6 +926,36 @@
897 926 #endif
898 927 .long sysc_leave + 10 + 0x80000000
899 928  
  929 +cleanup_io_return:
  930 + mvc __LC_RETURN_PSW(4),0(%r12)
  931 + mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_io_work_loop)
  932 + la %r12,__LC_RETURN_PSW
  933 + br %r14
  934 +
  935 +cleanup_io_leave:
  936 + clc 4(4,%r12),BASED(cleanup_io_leave_insn)
  937 + be BASED(2f)
  938 +#ifdef CONFIG_VIRT_CPU_ACCOUNTING
  939 + mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
  940 + clc 4(4,%r12),BASED(cleanup_io_leave_insn+4)
  941 + be BASED(2f)
  942 +#endif
  943 + mvc __LC_RETURN_PSW(8),SP_PSW(%r15)
  944 + c %r12,BASED(.Lmck_old_psw)
  945 + bne BASED(0f)
  946 + mvc __LC_SAVE_AREA+32(16),SP_R12(%r15)
  947 + b BASED(1f)
  948 +0: mvc __LC_SAVE_AREA+16(16),SP_R12(%r15)
  949 +1: lm %r0,%r11,SP_R0(%r15)
  950 + l %r15,SP_R15(%r15)
  951 +2: la %r12,__LC_RETURN_PSW
  952 + br %r14
  953 +cleanup_io_leave_insn:
  954 +#ifdef CONFIG_VIRT_CPU_ACCOUNTING
  955 + .long io_leave + 18 + 0x80000000
  956 +#endif
  957 + .long io_leave + 14 + 0x80000000
  958 +
900 959 /*
901 960 * Integer constants
902 961 */
... ... @@ -918,6 +977,7 @@
918 977 .Ls390_mcck: .long s390_do_machine_check
919 978 .Ls390_handle_mcck:
920 979 .long s390_handle_mcck
  980 +.Lmck_old_psw: .long __LC_MCK_OLD_PSW
921 981 .Ldo_IRQ: .long do_IRQ
922 982 .Ldo_extint: .long do_extint
923 983 .Ldo_signal: .long do_signal
arch/s390/kernel/entry64.S
... ... @@ -131,14 +131,14 @@
131 131 stg %r12,__SF_BACKCHAIN(%r15)
132 132 .endm
133 133  
134   - .macro RESTORE_ALL sync
135   - mvc __LC_RETURN_PSW(16),SP_PSW(%r15) # move user PSW to lowcore
  134 + .macro RESTORE_ALL psworg,sync
  135 + mvc \psworg(16),SP_PSW(%r15) # move user PSW to lowcore
136 136 .if !\sync
137   - ni __LC_RETURN_PSW+1,0xfd # clear wait state bit
  137 + ni \psworg+1,0xfd # clear wait state bit
138 138 .endif
139 139 lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user
140 140 STORE_TIMER __LC_EXIT_TIMER
141   - lpswe __LC_RETURN_PSW # back to caller
  141 + lpswe \psworg # back to caller
142 142 .endm
143 143  
144 144 /*
... ... @@ -233,7 +233,7 @@
233 233 tm __TI_flags+7(%r9),_TIF_WORK_SVC
234 234 jnz sysc_work # there is work to do (signals etc.)
235 235 sysc_leave:
236   - RESTORE_ALL 1
  236 + RESTORE_ALL __LC_RETURN_PSW,1
237 237  
238 238 #
239 239 # recheck if there is more work to do
... ... @@ -308,8 +308,6 @@
308 308 jg do_single_step # branch to do_sigtrap
309 309  
310 310  
311   -__critical_end:
312   -
313 311 #
314 312 # call syscall_trace before and after system call
315 313 # special linkage: %r12 contains the return address for trace_svc
... ... @@ -612,7 +610,8 @@
612 610 tm __TI_flags+7(%r9),_TIF_WORK_INT
613 611 jnz io_work # there is work to do (signals etc.)
614 612 io_leave:
615   - RESTORE_ALL 0
  613 + RESTORE_ALL __LC_RETURN_PSW,0
  614 +io_done:
616 615  
617 616 #ifdef CONFIG_PREEMPT
618 617 io_preempt:
... ... @@ -711,6 +710,8 @@
711 710 brasl %r14,do_extint
712 711 j io_return
713 712  
  713 +__critical_end:
  714 +
714 715 /*
715 716 * Machine check handler routines
716 717 */
... ... @@ -718,6 +719,7 @@
718 719 mcck_int_handler:
719 720 la %r1,4095 # revalidate r1
720 721 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
  722 + mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA-4095(%r1)
721 723 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
722 724 SAVE_ALL_BASE __LC_SAVE_AREA+64
723 725 la %r12,__LC_MCK_OLD_PSW
724 726  
... ... @@ -730,17 +732,8 @@
730 732 mvc __LC_ASYNC_ENTER_TIMER(8),__LC_LAST_UPDATE_TIMER
731 733 mvc __LC_SYNC_ENTER_TIMER(8),__LC_LAST_UPDATE_TIMER
732 734 mvc __LC_EXIT_TIMER(8),__LC_LAST_UPDATE_TIMER
733   -0: tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid?
734   - jno mcck_no_vtime # no -> no timer update
735   - tm __LC_MCK_OLD_PSW+1,0x01 # interrupting from user ?
736   - jz mcck_no_vtime
737   - UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
738   - UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
739   - mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
740   -mcck_no_vtime:
741 735 #endif
742   -0:
743   - tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
  736 +0: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
744 737 jno mcck_int_main # no -> skip cleanup critical
745 738 tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit
746 739 jnz mcck_int_main # from user -> load kernel stack
... ... @@ -756,6 +749,16 @@
756 749 jz 0f
757 750 lg %r15,__LC_PANIC_STACK # load panic stack
758 751 0: CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+64
  752 +#ifdef CONFIG_VIRT_CPU_ACCOUNTING
  753 + tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid?
  754 + jno mcck_no_vtime # no -> no timer update
  755 + tm __LC_MCK_OLD_PSW+1,0x01 # interrupting from user ?
  756 + jz mcck_no_vtime
  757 + UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
  758 + UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
  759 + mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
  760 +mcck_no_vtime:
  761 +#endif
759 762 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
760 763 la %r2,SP_PTREGS(%r15) # load pt_regs
761 764 brasl %r14,s390_do_machine_check
... ... @@ -771,7 +774,7 @@
771 774 jno mcck_return
772 775 brasl %r14,s390_handle_mcck
773 776 mcck_return:
774   - RESTORE_ALL 0
  777 + RESTORE_ALL __LC_RETURN_MCCK_PSW,0
775 778  
776 779 #ifdef CONFIG_SMP
777 780 /*
... ... @@ -833,6 +836,10 @@
833 836 .quad sysc_leave, sysc_work_loop
834 837 cleanup_table_sysc_work_loop:
835 838 .quad sysc_work_loop, sysc_reschedule
  839 +cleanup_table_io_leave:
  840 + .quad io_leave, io_done
  841 +cleanup_table_io_work_loop:
  842 + .quad io_work_loop, io_mcck_pending
836 843  
837 844 cleanup_critical:
838 845 clc 8(8,%r12),BASED(cleanup_table_system_call)
839 846  
... ... @@ -855,10 +862,26 @@
855 862 clc 8(8,%r12),BASED(cleanup_table_sysc_work_loop+8)
856 863 jl cleanup_sysc_return
857 864 0:
  865 + clc 8(8,%r12),BASED(cleanup_table_io_leave)
  866 + jl 0f
  867 + clc 8(8,%r12),BASED(cleanup_table_io_leave+8)
  868 + jl cleanup_io_leave
  869 +0:
  870 + clc 8(8,%r12),BASED(cleanup_table_io_work_loop)
  871 + jl 0f
  872 + clc 8(8,%r12),BASED(cleanup_table_io_work_loop+8)
  873 + jl cleanup_io_return
  874 +0:
858 875 br %r14
859 876  
860 877 cleanup_system_call:
861 878 mvc __LC_RETURN_PSW(16),0(%r12)
  879 + cghi %r12,__LC_MCK_OLD_PSW
  880 + je 0f
  881 + la %r12,__LC_SAVE_AREA+32
  882 + j 1f
  883 +0: la %r12,__LC_SAVE_AREA+64
  884 +1:
862 885 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
863 886 clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+8)
864 887 jh 0f
865 888  
... ... @@ -868,11 +891,13 @@
868 891 #endif
869 892 clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn)
870 893 jh 0f
871   - mvc __LC_SAVE_AREA(32),__LC_SAVE_AREA+32
872   -0: stg %r13,__LC_SAVE_AREA+40
  894 + mvc __LC_SAVE_AREA(32),0(%r12)
  895 +0: stg %r13,8(%r12)
  896 + stg %r12,__LC_SAVE_AREA+96 # argh
873 897 SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
874 898 CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
875   - stg %r15,__LC_SAVE_AREA+56
  899 + lg %r12,__LC_SAVE_AREA+96 # argh
  900 + stg %r15,24(%r12)
876 901 llgh %r7,__LC_SVC_INT_CODE
877 902 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
878 903 cleanup_vtime:
879 904  
880 905  
881 906  
882 907  
... ... @@ -909,23 +934,57 @@
909 934  
910 935 cleanup_sysc_leave:
911 936 clc 8(8,%r12),BASED(cleanup_sysc_leave_insn)
912   - je 0f
  937 + je 2f
913 938 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
914 939 mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
915 940 clc 8(8,%r12),BASED(cleanup_sysc_leave_insn+8)
916   - je 0f
  941 + je 2f
917 942 #endif
918 943 mvc __LC_RETURN_PSW(16),SP_PSW(%r15)
919   - mvc __LC_SAVE_AREA+32(32),SP_R12(%r15)
920   - lmg %r0,%r11,SP_R0(%r15)
  944 + cghi %r12,__LC_MCK_OLD_PSW
  945 + jne 0f
  946 + mvc __LC_SAVE_AREA+64(32),SP_R12(%r15)
  947 + j 1f
  948 +0: mvc __LC_SAVE_AREA+32(32),SP_R12(%r15)
  949 +1: lmg %r0,%r11,SP_R0(%r15)
921 950 lg %r15,SP_R15(%r15)
922   -0: la %r12,__LC_RETURN_PSW
  951 +2: la %r12,__LC_RETURN_PSW
923 952 br %r14
924 953 cleanup_sysc_leave_insn:
925 954 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
926 955 .quad sysc_leave + 16
927 956 #endif
928 957 .quad sysc_leave + 12
  958 +
  959 +cleanup_io_return:
  960 + mvc __LC_RETURN_PSW(8),0(%r12)
  961 + mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_io_work_loop)
  962 + la %r12,__LC_RETURN_PSW
  963 + br %r14
  964 +
  965 +cleanup_io_leave:
  966 + clc 8(8,%r12),BASED(cleanup_io_leave_insn)
  967 + je 2f
  968 +#ifdef CONFIG_VIRT_CPU_ACCOUNTING
  969 + mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
  970 + clc 8(8,%r12),BASED(cleanup_io_leave_insn+8)
  971 + je 2f
  972 +#endif
  973 + mvc __LC_RETURN_PSW(16),SP_PSW(%r15)
  974 + cghi %r12,__LC_MCK_OLD_PSW
  975 + jne 0f
  976 + mvc __LC_SAVE_AREA+64(32),SP_R12(%r15)
  977 + j 1f
  978 +0: mvc __LC_SAVE_AREA+32(32),SP_R12(%r15)
  979 +1: lmg %r0,%r11,SP_R0(%r15)
  980 + lg %r15,SP_R15(%r15)
  981 +2: la %r12,__LC_RETURN_PSW
  982 + br %r14
  983 +cleanup_io_leave_insn:
  984 +#ifdef CONFIG_VIRT_CPU_ACCOUNTING
  985 + .quad io_leave + 20
  986 +#endif
  987 + .quad io_leave + 16
929 988  
930 989 /*
931 990 * Integer constants
drivers/s390/s390mach.c
... ... @@ -240,7 +240,7 @@
240 240 * Floating point control register can't be restored.
241 241 * Task will be terminated.
242 242 */
243   - asm volatile ("lfpc 0(%0)" : : "a" (&zero));
  243 + asm volatile ("lfpc 0(%0)" : : "a" (&zero), "m" (zero));
244 244 kill_task = 1;
245 245  
246 246 }
include/asm-s390/lowcore.h
... ... @@ -68,6 +68,7 @@
68 68 #define __LC_SYSTEM_TIMER 0x270
69 69 #define __LC_LAST_UPDATE_CLOCK 0x278
70 70 #define __LC_STEAL_CLOCK 0x280
  71 +#define __LC_RETURN_MCCK_PSW 0x288
71 72 #define __LC_KERNEL_STACK 0xC40
72 73 #define __LC_THREAD_INFO 0xC44
73 74 #define __LC_ASYNC_STACK 0xC48
... ... @@ -90,6 +91,7 @@
90 91 #define __LC_SYSTEM_TIMER 0x278
91 92 #define __LC_LAST_UPDATE_CLOCK 0x280
92 93 #define __LC_STEAL_CLOCK 0x288
  94 +#define __LC_RETURN_MCCK_PSW 0x290
93 95 #define __LC_KERNEL_STACK 0xD40
94 96 #define __LC_THREAD_INFO 0xD48
95 97 #define __LC_ASYNC_STACK 0xD50
... ... @@ -196,7 +198,8 @@
196 198 __u64 system_timer; /* 0x270 */
197 199 __u64 last_update_clock; /* 0x278 */
198 200 __u64 steal_clock; /* 0x280 */
199   - __u8 pad8[0xc00-0x288]; /* 0x288 */
  201 + psw_t return_mcck_psw; /* 0x288 */
  202 + __u8 pad8[0xc00-0x290]; /* 0x290 */
200 203  
201 204 /* System info area */
202 205 __u32 save_area[16]; /* 0xc00 */
... ... @@ -285,7 +288,8 @@
285 288 __u64 system_timer; /* 0x278 */
286 289 __u64 last_update_clock; /* 0x280 */
287 290 __u64 steal_clock; /* 0x288 */
288   - __u8 pad8[0xc00-0x290]; /* 0x290 */
  291 + psw_t return_mcck_psw; /* 0x290 */
  292 + __u8 pad8[0xc00-0x2a0]; /* 0x2a0 */
289 293 /* System info area */
290 294 __u64 save_area[16]; /* 0xc00 */
291 295 __u8 pad9[0xd40-0xc80]; /* 0xc80 */