Skip to content

Commit

Permalink
Merge tag 'v5.10.120' into v5.10-rt
Browse files Browse the repository at this point in the history
Signed-off-by: Luis Claudio R. Goncalves <lgoncalv@redhat.com>
  • Loading branch information
lclaudio committed Jun 10, 2022
2 parents f5dc729 + 70dd2d1 commit a788888
Show file tree
Hide file tree
Showing 58 changed files with 459 additions and 308 deletions.
2 changes: 1 addition & 1 deletion Documentation/process/submitting-patches.rst
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ as you intend it to.

The maintainer will thank you if you write your patch description in a
form which can be easily pulled into Linux's source code management
system, ``git``, as a "commit log". See :ref:`explicit_in_reply_to`.
system, ``git``, as a "commit log". See :ref:`the_canonical_patch_format`.

Solve only one problem per patch. If your description starts to get
long, that's a sign that you probably need to split up your patch.
Expand Down
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 119
SUBLEVEL = 120
EXTRAVERSION =
NAME = Dare mighty things

Expand Down
2 changes: 1 addition & 1 deletion arch/arm/boot/dts/s5pv210-aries.dtsi
Original file line number Diff line number Diff line change
Expand Up @@ -896,7 +896,7 @@
device-wakeup-gpios = <&gpg3 4 GPIO_ACTIVE_HIGH>;
interrupt-parent = <&gph2>;
interrupts = <5 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "host-wake";
interrupt-names = "host-wakeup";
};
};

Expand Down
8 changes: 5 additions & 3 deletions arch/powerpc/kvm/book3s_hv_uvmem.c
Original file line number Diff line number Diff line change
Expand Up @@ -359,13 +359,15 @@ static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm,
static bool kvmppc_next_nontransitioned_gfn(const struct kvm_memory_slot *memslot,
struct kvm *kvm, unsigned long *gfn)
{
struct kvmppc_uvmem_slot *p;
struct kvmppc_uvmem_slot *p = NULL, *iter;
bool ret = false;
unsigned long i;

list_for_each_entry(p, &kvm->arch.uvmem_pfns, list)
if (*gfn >= p->base_pfn && *gfn < p->base_pfn + p->nr_pfns)
list_for_each_entry(iter, &kvm->arch.uvmem_pfns, list)
if (*gfn >= iter->base_pfn && *gfn < iter->base_pfn + iter->nr_pfns) {
p = iter;
break;
}
if (!p)
return ret;
/*
Expand Down
41 changes: 27 additions & 14 deletions arch/x86/kernel/kvm.c
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@ void kvm_async_pf_task_wake(u32 token)
{
u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
struct kvm_task_sleep_node *n;
struct kvm_task_sleep_node *n, *dummy = NULL;

if (token == ~0) {
apf_task_wake_all();
Expand All @@ -200,28 +200,41 @@ void kvm_async_pf_task_wake(u32 token)
n = _find_apf_task(b, token);
if (!n) {
/*
* async PF was not yet handled.
* Add dummy entry for the token.
* Async #PF not yet handled, add a dummy entry for the token.
* Allocating the token must be down outside of the raw lock
* as the allocator is preemptible on PREEMPT_RT kernels.
*/
n = kzalloc(sizeof(*n), GFP_ATOMIC);
if (!n) {
if (!dummy) {
raw_spin_unlock(&b->lock);
dummy = kzalloc(sizeof(*dummy), GFP_ATOMIC);

/*
* Allocation failed! Busy wait while other cpu
* handles async PF.
* Continue looping on allocation failure, eventually
* the async #PF will be handled and allocating a new
* node will be unnecessary.
*/
if (!dummy)
cpu_relax();

/*
* Recheck for async #PF completion before enqueueing
* the dummy token to avoid duplicate list entries.
*/
raw_spin_unlock(&b->lock);
cpu_relax();
goto again;
}
n->token = token;
n->cpu = smp_processor_id();
init_swait_queue_head(&n->wq);
hlist_add_head(&n->link, &b->list);
dummy->token = token;
dummy->cpu = smp_processor_id();
init_swait_queue_head(&dummy->wq);
hlist_add_head(&dummy->link, &b->list);
dummy = NULL;
} else {
apf_task_wake_one(n);
}
raw_spin_unlock(&b->lock);
return;

/* A dummy token might be allocated and ultimately not used. */
if (dummy)
kfree(dummy);
}
EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);

Expand Down
31 changes: 19 additions & 12 deletions arch/x86/kvm/x86.c
Original file line number Diff line number Diff line change
Expand Up @@ -7295,7 +7295,7 @@ int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction);

static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r)
static bool kvm_vcpu_check_code_breakpoint(struct kvm_vcpu *vcpu, int *r)
{
if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) &&
(vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) {
Expand Down Expand Up @@ -7364,25 +7364,23 @@ static bool is_vmware_backdoor_opcode(struct x86_emulate_ctxt *ctxt)
}

/*
* Decode to be emulated instruction. Return EMULATION_OK if success.
* Decode an instruction for emulation. The caller is responsible for handling
* code breakpoints. Note, manually detecting code breakpoints is unnecessary
* (and wrong) when emulating on an intercepted fault-like exception[*], as
* code breakpoints have higher priority and thus have already been done by
* hardware.
*
* [*] Except #MC, which is higher priority, but KVM should never emulate in
* response to a machine check.
*/
int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
void *insn, int insn_len)
{
int r = EMULATION_OK;
struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
int r;

init_emulate_ctxt(vcpu);

/*
* We will reenter on the same instruction since we do not set
* complete_userspace_io. This does not handle watchpoints yet,
* those would be handled in the emulate_ops.
*/
if (!(emulation_type & EMULTYPE_SKIP) &&
kvm_vcpu_check_breakpoint(vcpu, &r))
return r;

ctxt->ud = emulation_type & EMULTYPE_TRAP_UD;

r = x86_decode_insn(ctxt, insn, insn_len);
Expand Down Expand Up @@ -7417,6 +7415,15 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
if (!(emulation_type & EMULTYPE_NO_DECODE)) {
kvm_clear_exception_queue(vcpu);

/*
* Return immediately if RIP hits a code breakpoint, such #DBs
* are fault-like and are higher priority than any faults on
* the code fetch itself.
*/
if (!(emulation_type & EMULTYPE_SKIP) &&
kvm_vcpu_check_code_breakpoint(vcpu, &r))
return r;

r = x86_decode_emulated_instruction(vcpu, emulation_type,
insn, insn_len);
if (r != EMULATION_OK) {
Expand Down
2 changes: 0 additions & 2 deletions crypto/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -1941,5 +1941,3 @@ source "crypto/asymmetric_keys/Kconfig"
source "certs/Kconfig"

endif # if CRYPTO

source "lib/crypto/Kconfig"
110 changes: 44 additions & 66 deletions crypto/drbg.c
Original file line number Diff line number Diff line change
Expand Up @@ -1035,17 +1035,38 @@ static const struct drbg_state_ops drbg_hash_ops = {
******************************************************************/

static inline int __drbg_seed(struct drbg_state *drbg, struct list_head *seed,
int reseed)
int reseed, enum drbg_seed_state new_seed_state)
{
int ret = drbg->d_ops->update(drbg, seed, reseed);

if (ret)
return ret;

drbg->seeded = true;
drbg->seeded = new_seed_state;
/* 10.1.1.2 / 10.1.1.3 step 5 */
drbg->reseed_ctr = 1;

switch (drbg->seeded) {
case DRBG_SEED_STATE_UNSEEDED:
/* Impossible, but handle it to silence compiler warnings. */
fallthrough;
case DRBG_SEED_STATE_PARTIAL:
/*
* Require frequent reseeds until the seed source is
* fully initialized.
*/
drbg->reseed_threshold = 50;
break;

case DRBG_SEED_STATE_FULL:
/*
* Seed source has become fully initialized, frequent
* reseeds no longer required.
*/
drbg->reseed_threshold = drbg_max_requests(drbg);
break;
}

return ret;
}

Expand All @@ -1065,12 +1086,10 @@ static inline int drbg_get_random_bytes(struct drbg_state *drbg,
return 0;
}

static void drbg_async_seed(struct work_struct *work)
static int drbg_seed_from_random(struct drbg_state *drbg)
{
struct drbg_string data;
LIST_HEAD(seedlist);
struct drbg_state *drbg = container_of(work, struct drbg_state,
seed_work);
unsigned int entropylen = drbg_sec_strength(drbg->core->flags);
unsigned char entropy[32];
int ret;
Expand All @@ -1081,26 +1100,15 @@ static void drbg_async_seed(struct work_struct *work)
drbg_string_fill(&data, entropy, entropylen);
list_add_tail(&data.list, &seedlist);

mutex_lock(&drbg->drbg_mutex);

ret = drbg_get_random_bytes(drbg, entropy, entropylen);
if (ret)
goto unlock;

/* Set seeded to false so that if __drbg_seed fails the
* next generate call will trigger a reseed.
*/
drbg->seeded = false;

__drbg_seed(drbg, &seedlist, true);

if (drbg->seeded)
drbg->reseed_threshold = drbg_max_requests(drbg);
goto out;

unlock:
mutex_unlock(&drbg->drbg_mutex);
ret = __drbg_seed(drbg, &seedlist, true, DRBG_SEED_STATE_FULL);

out:
memzero_explicit(entropy, entropylen);
return ret;
}

/*
Expand All @@ -1122,6 +1130,7 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers,
unsigned int entropylen = drbg_sec_strength(drbg->core->flags);
struct drbg_string data1;
LIST_HEAD(seedlist);
enum drbg_seed_state new_seed_state = DRBG_SEED_STATE_FULL;

/* 9.1 / 9.2 / 9.3.1 step 3 */
if (pers && pers->len > (drbg_max_addtl(drbg))) {
Expand Down Expand Up @@ -1149,6 +1158,9 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers,
BUG_ON((entropylen * 2) > sizeof(entropy));

/* Get seed from in-kernel /dev/urandom */
if (!rng_is_initialized())
new_seed_state = DRBG_SEED_STATE_PARTIAL;

ret = drbg_get_random_bytes(drbg, entropy, entropylen);
if (ret)
goto out;
Expand Down Expand Up @@ -1205,7 +1217,7 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers,
memset(drbg->C, 0, drbg_statelen(drbg));
}

ret = __drbg_seed(drbg, &seedlist, reseed);
ret = __drbg_seed(drbg, &seedlist, reseed, new_seed_state);

out:
memzero_explicit(entropy, entropylen * 2);
Expand Down Expand Up @@ -1385,19 +1397,25 @@ static int drbg_generate(struct drbg_state *drbg,
* here. The spec is a bit convoluted here, we make it simpler.
*/
if (drbg->reseed_threshold < drbg->reseed_ctr)
drbg->seeded = false;
drbg->seeded = DRBG_SEED_STATE_UNSEEDED;

if (drbg->pr || !drbg->seeded) {
if (drbg->pr || drbg->seeded == DRBG_SEED_STATE_UNSEEDED) {
pr_devel("DRBG: reseeding before generation (prediction "
"resistance: %s, state %s)\n",
drbg->pr ? "true" : "false",
drbg->seeded ? "seeded" : "unseeded");
(drbg->seeded == DRBG_SEED_STATE_FULL ?
"seeded" : "unseeded"));
/* 9.3.1 steps 7.1 through 7.3 */
len = drbg_seed(drbg, addtl, true);
if (len)
goto err;
/* 9.3.1 step 7.4 */
addtl = NULL;
} else if (rng_is_initialized() &&
drbg->seeded == DRBG_SEED_STATE_PARTIAL) {
len = drbg_seed_from_random(drbg);
if (len)
goto err;
}

if (addtl && 0 < addtl->len)
Expand Down Expand Up @@ -1490,50 +1508,15 @@ static int drbg_generate_long(struct drbg_state *drbg,
return 0;
}

static int drbg_schedule_async_seed(struct notifier_block *nb, unsigned long action, void *data)
{
struct drbg_state *drbg = container_of(nb, struct drbg_state,
random_ready);

schedule_work(&drbg->seed_work);
return 0;
}

static int drbg_prepare_hrng(struct drbg_state *drbg)
{
int err;

/* We do not need an HRNG in test mode. */
if (list_empty(&drbg->test_data.list))
return 0;

drbg->jent = crypto_alloc_rng("jitterentropy_rng", 0, 0);

INIT_WORK(&drbg->seed_work, drbg_async_seed);

drbg->random_ready.notifier_call = drbg_schedule_async_seed;
err = register_random_ready_notifier(&drbg->random_ready);

switch (err) {
case 0:
break;

case -EALREADY:
err = 0;
fallthrough;

default:
drbg->random_ready.notifier_call = NULL;
return err;
}

/*
* Require frequent reseeds until the seed source is fully
* initialized.
*/
drbg->reseed_threshold = 50;

return err;
return 0;
}

/*
Expand Down Expand Up @@ -1576,7 +1559,7 @@ static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers,
if (!drbg->core) {
drbg->core = &drbg_cores[coreref];
drbg->pr = pr;
drbg->seeded = false;
drbg->seeded = DRBG_SEED_STATE_UNSEEDED;
drbg->reseed_threshold = drbg_max_requests(drbg);

ret = drbg_alloc_state(drbg);
Expand Down Expand Up @@ -1627,11 +1610,6 @@ static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers,
*/
static int drbg_uninstantiate(struct drbg_state *drbg)
{
if (drbg->random_ready.notifier_call) {
unregister_random_ready_notifier(&drbg->random_ready);
cancel_work_sync(&drbg->seed_work);
}

if (!IS_ERR_OR_NULL(drbg->jent))
crypto_free_rng(drbg->jent);
drbg->jent = NULL;
Expand Down

0 comments on commit a788888

Please sign in to comment.