Skip to content

Commit

Permalink
Merge tag 'v5.4.107' into 5.4
Browse files Browse the repository at this point in the history
This is the 5.4.107 stable release
  • Loading branch information
xanmod committed Mar 20, 2021
2 parents aa581f5 + a65e788 commit 7b6f384
Show file tree
Hide file tree
Showing 44 changed files with 919 additions and 546 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 106
SUBLEVEL = 107
EXTRAVERSION =
NAME = Kleptomaniac Octopus

Expand Down
3 changes: 3 additions & 0 deletions arch/arm64/include/asm/kvm_hyp.h
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,9 @@ void __sysreg32_restore_state(struct kvm_vcpu *vcpu);

void __debug_switch_to_guest(struct kvm_vcpu *vcpu);
void __debug_switch_to_host(struct kvm_vcpu *vcpu);
void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu);
void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu);


void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
Expand Down
24 changes: 15 additions & 9 deletions arch/arm64/kvm/hyp/debug-sr.c
Original file line number Diff line number Diff line change
Expand Up @@ -168,20 +168,28 @@ static void __hyp_text __debug_restore_state(struct kvm_vcpu *vcpu,
write_sysreg(ctxt->sys_regs[MDCCINT_EL1], mdccint_el1);
}

void __hyp_text __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu)
{
/*
* Non-VHE: Disable and flush SPE data generation
* VHE: The vcpu can run, but it can't hide.
*/
__debug_save_spe_nvhe(&vcpu->arch.host_debug_state.pmscr_el1);

}

void __hyp_text __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu)
{
__debug_restore_spe_nvhe(vcpu->arch.host_debug_state.pmscr_el1);
}

void __hyp_text __debug_switch_to_guest(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *host_ctxt;
struct kvm_cpu_context *guest_ctxt;
struct kvm_guest_debug_arch *host_dbg;
struct kvm_guest_debug_arch *guest_dbg;

/*
* Non-VHE: Disable and flush SPE data generation
* VHE: The vcpu can run, but it can't hide.
*/
if (!has_vhe())
__debug_save_spe_nvhe(&vcpu->arch.host_debug_state.pmscr_el1);

if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
return;

Expand All @@ -201,8 +209,6 @@ void __hyp_text __debug_switch_to_host(struct kvm_vcpu *vcpu)
struct kvm_guest_debug_arch *host_dbg;
struct kvm_guest_debug_arch *guest_dbg;

if (!has_vhe())
__debug_restore_spe_nvhe(vcpu->arch.host_debug_state.pmscr_el1);

if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
return;
Expand Down
13 changes: 12 additions & 1 deletion arch/arm64/kvm/hyp/switch.c
Original file line number Diff line number Diff line change
Expand Up @@ -682,6 +682,15 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)

__sysreg_save_state_nvhe(host_ctxt);

/*
* We must flush and disable the SPE buffer for nVHE, as
* the translation regime(EL1&0) is going to be loaded with
* that of the guest. And we must do this before we change the
* translation regime to EL2 (via MDCR_EL2_EPB == 0) and
* before we load guest Stage1.
*/
__debug_save_host_buffers_nvhe(vcpu);

__activate_vm(kern_hyp_va(vcpu->kvm));
__activate_traps(vcpu);

Expand Down Expand Up @@ -720,11 +729,13 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
__fpsimd_save_fpexc32(vcpu);

__debug_switch_to_host(vcpu);

/*
* This must come after restoring the host sysregs, since a non-VHE
* system may enable SPE here and make use of the TTBRs.
*/
__debug_switch_to_host(vcpu);
__debug_restore_host_buffers_nvhe(vcpu);

if (pmu_switch_needed)
__pmu_switch_to_host(host_ctxt);
Expand Down
137 changes: 81 additions & 56 deletions arch/x86/crypto/aesni-intel_asm.S
Original file line number Diff line number Diff line change
Expand Up @@ -319,7 +319,7 @@ _initial_blocks_\@:

# Main loop - Encrypt/Decrypt remaining blocks

cmp $0, %r13
test %r13, %r13
je _zero_cipher_left_\@
sub $64, %r13
je _four_cipher_left_\@
Expand Down Expand Up @@ -438,7 +438,7 @@ _multiple_of_16_bytes_\@:

mov PBlockLen(%arg2), %r12

cmp $0, %r12
test %r12, %r12
je _partial_done\@

GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
Expand Down Expand Up @@ -475,15 +475,15 @@ _T_8_\@:
add $8, %r10
sub $8, %r11
psrldq $8, %xmm0
cmp $0, %r11
test %r11, %r11
je _return_T_done_\@
_T_4_\@:
movd %xmm0, %eax
mov %eax, (%r10)
add $4, %r10
sub $4, %r11
psrldq $4, %xmm0
cmp $0, %r11
test %r11, %r11
je _return_T_done_\@
_T_123_\@:
movd %xmm0, %eax
Expand Down Expand Up @@ -620,7 +620,7 @@ _get_AAD_blocks\@:

/* read the last <16B of AAD */
_get_AAD_rest\@:
cmp $0, %r11
test %r11, %r11
je _get_AAD_done\@

READ_PARTIAL_BLOCK %r10, %r11, \TMP1, \TMP7
Expand All @@ -641,7 +641,7 @@ _get_AAD_done\@:
.macro PARTIAL_BLOCK CYPH_PLAIN_OUT PLAIN_CYPH_IN PLAIN_CYPH_LEN DATA_OFFSET \
AAD_HASH operation
mov PBlockLen(%arg2), %r13
cmp $0, %r13
test %r13, %r13
je _partial_block_done_\@ # Leave Macro if no partial blocks
# Read in input data without over reading
cmp $16, \PLAIN_CYPH_LEN
Expand Down Expand Up @@ -693,7 +693,7 @@ _no_extra_mask_1_\@:
PSHUFB_XMM %xmm2, %xmm3
pxor %xmm3, \AAD_HASH

cmp $0, %r10
test %r10, %r10
jl _partial_incomplete_1_\@

# GHASH computation for the last <16 Byte block
Expand Down Expand Up @@ -728,7 +728,7 @@ _no_extra_mask_2_\@:
PSHUFB_XMM %xmm2, %xmm9
pxor %xmm9, \AAD_HASH

cmp $0, %r10
test %r10, %r10
jl _partial_incomplete_2_\@

# GHASH computation for the last <16 Byte block
Expand All @@ -748,7 +748,7 @@ _encode_done_\@:
PSHUFB_XMM %xmm2, %xmm9
.endif
# output encrypted Bytes
cmp $0, %r10
test %r10, %r10
jl _partial_fill_\@
mov %r13, %r12
mov $16, %r13
Expand Down Expand Up @@ -1946,7 +1946,7 @@ ENTRY(aesni_set_key)
ENDPROC(aesni_set_key)

/*
* void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
* void aesni_enc(const void *ctx, u8 *dst, const u8 *src)
*/
ENTRY(aesni_enc)
FRAME_BEGIN
Expand Down Expand Up @@ -2137,7 +2137,7 @@ _aesni_enc4:
ENDPROC(_aesni_enc4)

/*
* void aesni_dec (struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
* void aesni_dec (const void *ctx, u8 *dst, const u8 *src)
*/
ENTRY(aesni_dec)
FRAME_BEGIN
Expand Down Expand Up @@ -2726,25 +2726,18 @@ ENDPROC(aesni_ctr_enc)
pxor CTR, IV;

/*
* void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
* bool enc, u8 *iv)
* void aesni_xts_encrypt(const struct crypto_aes_ctx *ctx, u8 *dst,
* const u8 *src, unsigned int len, le128 *iv)
*/
ENTRY(aesni_xts_crypt8)
ENTRY(aesni_xts_encrypt)
FRAME_BEGIN
cmpb $0, %cl
movl $0, %ecx
movl $240, %r10d
leaq _aesni_enc4, %r11
leaq _aesni_dec4, %rax
cmovel %r10d, %ecx
cmoveq %rax, %r11

movdqa .Lgf128mul_x_ble_mask, GF128MUL_MASK
movups (IVP), IV

mov 480(KEYP), KLEN
addq %rcx, KEYP

.Lxts_enc_loop4:
movdqa IV, STATE1
movdqu 0x00(INP), INC
pxor INC, STATE1
Expand All @@ -2768,71 +2761,103 @@ ENTRY(aesni_xts_crypt8)
pxor INC, STATE4
movdqu IV, 0x30(OUTP)

CALL_NOSPEC %r11
call _aesni_enc4

movdqu 0x00(OUTP), INC
pxor INC, STATE1
movdqu STATE1, 0x00(OUTP)

_aesni_gf128mul_x_ble()
movdqa IV, STATE1
movdqu 0x40(INP), INC
pxor INC, STATE1
movdqu IV, 0x40(OUTP)

movdqu 0x10(OUTP), INC
pxor INC, STATE2
movdqu STATE2, 0x10(OUTP)

_aesni_gf128mul_x_ble()
movdqa IV, STATE2
movdqu 0x50(INP), INC
pxor INC, STATE2
movdqu IV, 0x50(OUTP)

movdqu 0x20(OUTP), INC
pxor INC, STATE3
movdqu STATE3, 0x20(OUTP)

_aesni_gf128mul_x_ble()
movdqa IV, STATE3
movdqu 0x60(INP), INC
pxor INC, STATE3
movdqu IV, 0x60(OUTP)

movdqu 0x30(OUTP), INC
pxor INC, STATE4
movdqu STATE4, 0x30(OUTP)

_aesni_gf128mul_x_ble()
movdqa IV, STATE4
movdqu 0x70(INP), INC
pxor INC, STATE4
movdqu IV, 0x70(OUTP)

_aesni_gf128mul_x_ble()
add $64, INP
add $64, OUTP
sub $64, LEN
ja .Lxts_enc_loop4

movups IV, (IVP)

CALL_NOSPEC %r11
FRAME_END
ret
ENDPROC(aesni_xts_encrypt)

/*
* void aesni_xts_decrypt(const struct crypto_aes_ctx *ctx, u8 *dst,
* const u8 *src, unsigned int len, le128 *iv)
*/
ENTRY(aesni_xts_decrypt)
FRAME_BEGIN

movdqa .Lgf128mul_x_ble_mask, GF128MUL_MASK
movups (IVP), IV

mov 480(KEYP), KLEN
add $240, KEYP

movdqu 0x40(OUTP), INC
.Lxts_dec_loop4:
movdqa IV, STATE1
movdqu 0x00(INP), INC
pxor INC, STATE1
movdqu STATE1, 0x40(OUTP)
movdqu IV, 0x00(OUTP)

movdqu 0x50(OUTP), INC
_aesni_gf128mul_x_ble()
movdqa IV, STATE2
movdqu 0x10(INP), INC
pxor INC, STATE2
movdqu IV, 0x10(OUTP)

_aesni_gf128mul_x_ble()
movdqa IV, STATE3
movdqu 0x20(INP), INC
pxor INC, STATE3
movdqu IV, 0x20(OUTP)

_aesni_gf128mul_x_ble()
movdqa IV, STATE4
movdqu 0x30(INP), INC
pxor INC, STATE4
movdqu IV, 0x30(OUTP)

call _aesni_dec4

movdqu 0x00(OUTP), INC
pxor INC, STATE1
movdqu STATE1, 0x00(OUTP)

movdqu 0x10(OUTP), INC
pxor INC, STATE2
movdqu STATE2, 0x50(OUTP)
movdqu STATE2, 0x10(OUTP)

movdqu 0x60(OUTP), INC
movdqu 0x20(OUTP), INC
pxor INC, STATE3
movdqu STATE3, 0x60(OUTP)
movdqu STATE3, 0x20(OUTP)

movdqu 0x70(OUTP), INC
movdqu 0x30(OUTP), INC
pxor INC, STATE4
movdqu STATE4, 0x70(OUTP)
movdqu STATE4, 0x30(OUTP)

_aesni_gf128mul_x_ble()

add $64, INP
add $64, OUTP
sub $64, LEN
ja .Lxts_dec_loop4

movups IV, (IVP)

FRAME_END
ret
ENDPROC(aesni_xts_crypt8)
ENDPROC(aesni_xts_decrypt)

#endif

0 comments on commit 7b6f384

Please sign in to comment.