Skip to content
Permalink
Browse files
ppc/fpu: Add generic FPU api similar to x86
- Add kernel_fpu_begin & kernel_fpu_end API as x86
- Add logic similar to x86 to ensure fpu
  begin/end call correctness
- Add kernel_fpu_enabled to know if FPU is enabled

Signed-off-by: Anson Jacob <Anson.Jacob@amd.com>
  • Loading branch information
Anson Jacob authored and intel-lab-lkp committed Jul 20, 2021
1 parent e73f0f0 commit 5cd65b03db50b99961fa08fdc6aa8ca919cb05d4
Show file tree
Hide file tree
Showing 2 changed files with 137 additions and 22 deletions.
@@ -41,10 +41,7 @@ extern void enable_kernel_fp(void);
extern void flush_fp_to_thread(struct task_struct *);
extern void giveup_fpu(struct task_struct *);
extern void save_fpu(struct task_struct *);
static inline void disable_kernel_fp(void)
{
msr_check_and_clear(MSR_FP);
}
extern void disable_kernel_fp(void);
#else
static inline void save_fpu(struct task_struct *t) { }
static inline void flush_fp_to_thread(struct task_struct *t) { }
@@ -55,10 +52,7 @@ extern void enable_kernel_altivec(void);
extern void flush_altivec_to_thread(struct task_struct *);
extern void giveup_altivec(struct task_struct *);
extern void save_altivec(struct task_struct *);
static inline void disable_kernel_altivec(void)
{
msr_check_and_clear(MSR_VEC);
}
extern void disable_kernel_altivec(void);
#else
static inline void save_altivec(struct task_struct *t) { }
static inline void __giveup_altivec(struct task_struct *t) { }
@@ -67,20 +61,7 @@ static inline void __giveup_altivec(struct task_struct *t) { }
#ifdef CONFIG_VSX
extern void enable_kernel_vsx(void);
extern void flush_vsx_to_thread(struct task_struct *);
static inline void disable_kernel_vsx(void)
{
msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
}
#else
static inline void enable_kernel_vsx(void)
{
BUILD_BUG();
}

static inline void disable_kernel_vsx(void)
{
BUILD_BUG();
}
extern void disable_kernel_vsx(void);
#endif

#ifdef CONFIG_SPE
@@ -114,4 +95,8 @@ static inline void clear_task_ebb(struct task_struct *t)

extern int set_thread_tidr(struct task_struct *t);

bool kernel_fpu_enabled(void);
void kernel_fpu_begin(void);
void kernel_fpu_end(void);

#endif /* _ASM_POWERPC_SWITCH_TO_H */
@@ -75,6 +75,17 @@
#define TM_DEBUG(x...) do { } while(0)
#endif

/*
* Track whether the kernel is using the FPU state
* currently.
*
* This flag is used:
*
* - kernel_fpu_begin()/end() correctness
* - kernel_fpu_enabled info
*/
static DEFINE_PER_CPU(bool, in_kernel_fpu);

extern unsigned long _get_SP(void);

#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
@@ -212,6 +223,9 @@ void enable_kernel_fp(void)
unsigned long cpumsr;

WARN_ON(preemptible());
WARN_ON_ONCE(this_cpu_read(in_kernel_fpu));

this_cpu_write(in_kernel_fpu, true);

cpumsr = msr_check_and_set(MSR_FP);

@@ -231,6 +245,15 @@ void enable_kernel_fp(void)
}
}
EXPORT_SYMBOL(enable_kernel_fp);

void disable_kernel_fp(void)
{
WARN_ON_ONCE(!this_cpu_read(in_kernel_fpu));

this_cpu_write(in_kernel_fpu, false);
msr_check_and_clear(MSR_FP);
}
EXPORT_SYMBOL(disable_kernel_fp);
#else
static inline void __giveup_fpu(struct task_struct *tsk) { }
#endif /* CONFIG_PPC_FPU */
@@ -263,6 +286,9 @@ void enable_kernel_altivec(void)
unsigned long cpumsr;

WARN_ON(preemptible());
WARN_ON_ONCE(this_cpu_read(in_kernel_fpu));

this_cpu_write(in_kernel_fpu, true);

cpumsr = msr_check_and_set(MSR_VEC);

@@ -283,6 +309,14 @@ void enable_kernel_altivec(void)
}
EXPORT_SYMBOL(enable_kernel_altivec);

void disable_kernel_altivec(void)
{
WARN_ON_ONCE(!this_cpu_read(in_kernel_fpu));

this_cpu_write(in_kernel_fpu, false);
msr_check_and_clear(MSR_VEC);
}
EXPORT_SYMBOL(disable_kernel_altivec);
/*
* Make sure the VMX/Altivec register state in the
* the thread_struct is up to date for task tsk.
@@ -333,6 +367,9 @@ void enable_kernel_vsx(void)
unsigned long cpumsr;

WARN_ON(preemptible());
WARN_ON_ONCE(this_cpu_read(in_kernel_fpu));

this_cpu_write(in_kernel_fpu, true);

cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);

@@ -354,6 +391,15 @@ void enable_kernel_vsx(void)
}
EXPORT_SYMBOL(enable_kernel_vsx);

void disable_kernel_vsx(void)
{
WARN_ON_ONCE(!this_cpu_read(in_kernel_fpu));

this_cpu_write(in_kernel_fpu, false);
msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
}
EXPORT_SYMBOL(disable_kernel_vsx);

void flush_vsx_to_thread(struct task_struct *tsk)
{
if (tsk->thread.regs) {
@@ -406,6 +452,90 @@ void flush_spe_to_thread(struct task_struct *tsk)
}
#endif /* CONFIG_SPE */

static bool fpu_support(void)
{
if (cpu_has_feature(CPU_FTR_VSX_COMP)) {
return true;
} else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) {
return true;
} else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) {
return true;
}

return false;
}

bool kernel_fpu_enabled(void)
{
return this_cpu_read(in_kernel_fpu);
}
EXPORT_SYMBOL(kernel_fpu_enabled);

void kernel_fpu_begin(void)
{
if (!fpu_support()) {
WARN_ON_ONCE(1);
return;
}

preempt_disable();

#ifdef CONFIG_VSX
if (cpu_has_feature(CPU_FTR_VSX_COMP)) {
enable_kernel_vsx();
return;
}
#endif

#ifdef CONFIG_ALTIVEC
if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) {
enable_kernel_altivec();
return;
}
#endif

#ifdef CONFIG_PPC_FPU
if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) {
enable_kernel_fp();
return;
}
#endif
}
EXPORT_SYMBOL_GPL(kernel_fpu_begin);

void kernel_fpu_end(void)
{
if (!fpu_support()) {
WARN_ON_ONCE(1);
return;
}

#ifdef CONFIG_VSX
if (cpu_has_feature(CPU_FTR_VSX_COMP)) {
disable_kernel_vsx();
goto done;
}
#endif

#ifdef CONFIG_ALTIVEC
if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) {
disable_kernel_altivec();
goto done;
}
#endif

#ifdef CONFIG_PPC_FPU
if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) {
disable_kernel_fp();
goto done;
}
#endif

done:
preempt_enable();
}
EXPORT_SYMBOL_GPL(kernel_fpu_end);

static unsigned long msr_all_available;

static int __init init_msr_all_available(void)

0 comments on commit 5cd65b0

Please sign in to comment.