diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index da71d41227ff9a32c5ff46b125a76d699705a31b..12acbb32a5613f641d4c31afd69960d639cc7010 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -446,19 +446,19 @@ static inline int copy_fpregs_to_fpstate(struct fpu *fpu)
 	return 0;
 }
 
-static inline void __copy_kernel_to_fpregs(struct fpu *fpu)
+static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate)
 {
 	if (use_xsave()) {
-		copy_kernel_to_xregs(&fpu->state.xsave, -1);
+		copy_kernel_to_xregs(&fpstate->xsave, -1);
 	} else {
 		if (use_fxsr())
-			copy_kernel_to_fxregs(&fpu->state.fxsave);
+			copy_kernel_to_fxregs(&fpstate->fxsave);
 		else
-			copy_kernel_to_fregs(&fpu->state.fsave);
+			copy_kernel_to_fregs(&fpstate->fsave);
 	}
 }
 
-static inline void copy_kernel_to_fpregs(struct fpu *fpu)
+static inline void copy_kernel_to_fpregs(union fpregs_state *fpstate)
 {
 	/*
 	 * AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is
@@ -470,10 +470,10 @@ static inline void copy_kernel_to_fpregs(struct fpu *fpu)
 			"fnclex\n\t"
 			"emms\n\t"
 			"fildl %P[addr]"	/* set F?P to defined value */
-			: : [addr] "m" (fpu->fpregs_active));
+			: : [addr] "m" (fpstate));
 	}
 
-	__copy_kernel_to_fpregs(fpu);
+	__copy_kernel_to_fpregs(fpstate);
 }
 
 extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size);
@@ -642,7 +642,7 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
 static inline void switch_fpu_finish(struct fpu *new_fpu, fpu_switch_t fpu_switch)
 {
 	if (fpu_switch.preload)
-		copy_kernel_to_fpregs(new_fpu);
+		copy_kernel_to_fpregs(&new_fpu->state);
 }
 
 /*
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 8470df44c06d1b45b928ad9e77b01214b53bca4c..79de954626fd971f1d24553078bed1f199d52267 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -127,7 +127,7 @@ void __kernel_fpu_end(void)
 	struct fpu *fpu = &current->thread.fpu;
 
 	if (fpu->fpregs_active)
-		copy_kernel_to_fpregs(fpu);
+		copy_kernel_to_fpregs(&fpu->state);
 	else
 		__fpregs_deactivate_hw();
 
@@ -368,7 +368,7 @@ void fpu__restore(struct fpu *fpu)
 	/* Avoid __kernel_fpu_begin() right after fpregs_activate() */
 	kernel_fpu_disable();
 	fpregs_activate(fpu);
-	copy_kernel_to_fpregs(fpu);
+	copy_kernel_to_fpregs(&fpu->state);
 	fpu->counter++;
 	kernel_fpu_enable();
 }
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 66871f4937fa3f624f7aff2944ac8c10fdf5baf5..26eaeb522cab214bed15cba35f5be945722d70ae 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7030,7 +7030,7 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
 	kvm_put_guest_xcr0(vcpu);
 	vcpu->guest_fpu_loaded = 1;
 	__kernel_fpu_begin();
-	__copy_kernel_to_fpregs(&vcpu->arch.guest_fpu);
+	__copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state);
 	trace_kvm_fpu(1);
 }