@@ -742,6 +742,73 @@ void restore_tm_state(struct pt_regs *regs)
742
742
#define __switch_to_tm (prev )
743
743
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
744
744
745
+ static inline void save_sprs (struct thread_struct * t )
746
+ {
747
+ #ifdef CONFIG_ALTIVEC
748
+ if (cpu_has_feature (cpu_has_feature (CPU_FTR_ALTIVEC )))
749
+ t -> vrsave = mfspr (SPRN_VRSAVE );
750
+ #endif
751
+ #ifdef CONFIG_PPC_BOOK3S_64
752
+ if (cpu_has_feature (CPU_FTR_DSCR ))
753
+ t -> dscr = mfspr (SPRN_DSCR );
754
+
755
+ if (cpu_has_feature (CPU_FTR_ARCH_207S )) {
756
+ t -> bescr = mfspr (SPRN_BESCR );
757
+ t -> ebbhr = mfspr (SPRN_EBBHR );
758
+ t -> ebbrr = mfspr (SPRN_EBBRR );
759
+
760
+ t -> fscr = mfspr (SPRN_FSCR );
761
+
762
+ /*
763
+ * Note that the TAR is not available for use in the kernel.
764
+ * (To provide this, the TAR should be backed up/restored on
765
+ * exception entry/exit instead, and be in pt_regs. FIXME,
766
+ * this should be in pt_regs anyway (for debug).)
767
+ */
768
+ t -> tar = mfspr (SPRN_TAR );
769
+ }
770
+ #endif
771
+ }
772
+
773
+ static inline void restore_sprs (struct thread_struct * old_thread ,
774
+ struct thread_struct * new_thread )
775
+ {
776
+ #ifdef CONFIG_ALTIVEC
777
+ if (cpu_has_feature (CPU_FTR_ALTIVEC ) &&
778
+ old_thread -> vrsave != new_thread -> vrsave )
779
+ mtspr (SPRN_VRSAVE , new_thread -> vrsave );
780
+ #endif
781
+ #ifdef CONFIG_PPC_BOOK3S_64
782
+ if (cpu_has_feature (CPU_FTR_DSCR )) {
783
+ u64 dscr = get_paca ()-> dscr_default ;
784
+ u64 fscr = old_thread -> fscr & ~FSCR_DSCR ;
785
+
786
+ if (new_thread -> dscr_inherit ) {
787
+ dscr = new_thread -> dscr ;
788
+ fscr |= FSCR_DSCR ;
789
+ }
790
+
791
+ if (old_thread -> dscr != dscr )
792
+ mtspr (SPRN_DSCR , dscr );
793
+
794
+ if (old_thread -> fscr != fscr )
795
+ mtspr (SPRN_FSCR , fscr );
796
+ }
797
+
798
+ if (cpu_has_feature (CPU_FTR_ARCH_207S )) {
799
+ if (old_thread -> bescr != new_thread -> bescr )
800
+ mtspr (SPRN_BESCR , new_thread -> bescr );
801
+ if (old_thread -> ebbhr != new_thread -> ebbhr )
802
+ mtspr (SPRN_EBBHR , new_thread -> ebbhr );
803
+ if (old_thread -> ebbrr != new_thread -> ebbrr )
804
+ mtspr (SPRN_EBBRR , new_thread -> ebbrr );
805
+
806
+ if (old_thread -> tar != new_thread -> tar )
807
+ mtspr (SPRN_TAR , new_thread -> tar );
808
+ }
809
+ #endif
810
+ }
811
+
745
812
struct task_struct * __switch_to (struct task_struct * prev ,
746
813
struct task_struct * new )
747
814
{
@@ -751,17 +818,16 @@ struct task_struct *__switch_to(struct task_struct *prev,
751
818
struct ppc64_tlb_batch * batch ;
752
819
#endif
753
820
821
+ new_thread = & new -> thread ;
822
+ old_thread = & current -> thread ;
823
+
754
824
WARN_ON (!irqs_disabled ());
755
825
756
- /* Back up the TAR and DSCR across context switches.
757
- * Note that the TAR is not available for use in the kernel. (To
758
- * provide this, the TAR should be backed up/restored on exception
759
- * entry/exit instead, and be in pt_regs. FIXME, this should be in
760
- * pt_regs anyway (for debug).)
761
- * Save the TAR and DSCR here before we do treclaim/trecheckpoint as
762
- * these will change them.
826
+ /*
827
+ * We need to save SPRs before treclaim/trecheckpoint as these will
828
+ * change a number of them.
763
829
*/
764
- save_early_sprs (& prev -> thread );
830
+ save_sprs (& prev -> thread );
765
831
766
832
__switch_to_tm (prev );
767
833
@@ -844,10 +910,6 @@ struct task_struct *__switch_to(struct task_struct *prev,
844
910
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
845
911
#endif
846
912
847
-
848
- new_thread = & new -> thread ;
849
- old_thread = & current -> thread ;
850
-
851
913
#ifdef CONFIG_PPC64
852
914
/*
853
915
* Collect processor utilization data per process
@@ -883,6 +945,10 @@ struct task_struct *__switch_to(struct task_struct *prev,
883
945
884
946
last = _switch (old_thread , new_thread );
885
947
948
+ /* Need to recalculate these after calling _switch() */
949
+ old_thread = & last -> thread ;
950
+ new_thread = & current -> thread ;
951
+
886
952
#ifdef CONFIG_PPC_BOOK3S_64
887
953
if (current_thread_info ()-> local_flags & _TLF_LAZY_MMU ) {
888
954
current_thread_info ()-> local_flags &= ~_TLF_LAZY_MMU ;
@@ -891,6 +957,8 @@ struct task_struct *__switch_to(struct task_struct *prev,
891
957
}
892
958
#endif /* CONFIG_PPC_BOOK3S_64 */
893
959
960
+ restore_sprs (old_thread , new_thread );
961
+
894
962
return last ;
895
963
}
896
964
0 commit comments